1 //===---- CGBuiltin.cpp - Emit LLVM Code for builtins ---------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This contains code to emit Builtin calls as LLVM code.
11 //===----------------------------------------------------------------------===//
14 #include "CGObjCRuntime.h"
15 #include "CGOpenCLRuntime.h"
16 #include "CGRecordLayout.h"
17 #include "CodeGenFunction.h"
18 #include "CodeGenModule.h"
19 #include "ConstantEmitter.h"
20 #include "PatternInit.h"
21 #include "TargetInfo.h"
22 #include "clang/AST/ASTContext.h"
23 #include "clang/AST/Attr.h"
24 #include "clang/AST/Decl.h"
25 #include "clang/AST/OSLog.h"
26 #include "clang/Basic/TargetBuiltins.h"
27 #include "clang/Basic/TargetInfo.h"
28 #include "clang/CodeGen/CGFunctionInfo.h"
29 #include "llvm/ADT/SmallPtrSet.h"
30 #include "llvm/ADT/StringExtras.h"
31 #include "llvm/Analysis/ValueTracking.h"
32 #include "llvm/IR/DataLayout.h"
33 #include "llvm/IR/InlineAsm.h"
34 #include "llvm/IR/Intrinsics.h"
35 #include "llvm/IR/IntrinsicsAArch64.h"
36 #include "llvm/IR/IntrinsicsAMDGPU.h"
37 #include "llvm/IR/IntrinsicsARM.h"
38 #include "llvm/IR/IntrinsicsBPF.h"
39 #include "llvm/IR/IntrinsicsHexagon.h"
40 #include "llvm/IR/IntrinsicsNVPTX.h"
41 #include "llvm/IR/IntrinsicsPowerPC.h"
42 #include "llvm/IR/IntrinsicsR600.h"
43 #include "llvm/IR/IntrinsicsS390.h"
44 #include "llvm/IR/IntrinsicsWebAssembly.h"
45 #include "llvm/IR/IntrinsicsX86.h"
46 #include "llvm/IR/MDBuilder.h"
47 #include "llvm/IR/MatrixBuilder.h"
48 #include "llvm/Support/ConvertUTF.h"
49 #include "llvm/Support/ScopedPrinter.h"
50 #include "llvm/Support/X86TargetParser.h"
53 using namespace clang;
54 using namespace CodeGen;
58 int64_t clamp(int64_t Value, int64_t Low, int64_t High) {
59 return std::min(High, std::max(Low, Value));
62 static void initializeAlloca(CodeGenFunction &CGF, AllocaInst *AI, Value *Size,
63 Align AlignmentInBytes) {
65 switch (CGF.getLangOpts().getTrivialAutoVarInit()) {
66 case LangOptions::TrivialAutoVarInitKind::Uninitialized:
67 // Nothing to initialize.
69 case LangOptions::TrivialAutoVarInitKind::Zero:
70 Byte = CGF.Builder.getInt8(0x00);
72 case LangOptions::TrivialAutoVarInitKind::Pattern: {
73 llvm::Type *Int8 = llvm::IntegerType::getInt8Ty(CGF.CGM.getLLVMContext());
74 Byte = llvm::dyn_cast<llvm::ConstantInt>(
75 initializationPatternFor(CGF.CGM, Int8));
79 if (CGF.CGM.stopAutoInit())
81 CGF.Builder.CreateMemSet(AI, Byte, Size, AlignmentInBytes);
84 /// getBuiltinLibFunction - Given a builtin id for a function like
85 /// "__builtin_fabsf", return a Function* for "fabsf".
86 llvm::Constant *CodeGenModule::getBuiltinLibFunction(const FunctionDecl *FD,
88 assert(Context.BuiltinInfo.isLibFunction(BuiltinID));
90 // Get the name, skip over the __builtin_ prefix (if necessary).
94 // If the builtin has been declared explicitly with an assembler label,
95 // use the mangled name. This differs from the plain label on platforms
96 // that prefix labels.
97 if (FD->hasAttr<AsmLabelAttr>())
98 Name = getMangledName(D);
100 Name = Context.BuiltinInfo.getName(BuiltinID) + 10;
102 llvm::FunctionType *Ty =
103 cast<llvm::FunctionType>(getTypes().ConvertType(FD->getType()));
105 return GetOrCreateLLVMFunction(Name, Ty, D, /*ForVTable=*/false);
108 /// Emit the conversions required to turn the given value into an
109 /// integer of the given size.
110 static Value *EmitToInt(CodeGenFunction &CGF, llvm::Value *V,
111 QualType T, llvm::IntegerType *IntType) {
112 V = CGF.EmitToMemory(V, T);
114 if (V->getType()->isPointerTy())
115 return CGF.Builder.CreatePtrToInt(V, IntType);
117 assert(V->getType() == IntType);
121 static Value *EmitFromInt(CodeGenFunction &CGF, llvm::Value *V,
122 QualType T, llvm::Type *ResultType) {
123 V = CGF.EmitFromMemory(V, T);
125 if (ResultType->isPointerTy())
126 return CGF.Builder.CreateIntToPtr(V, ResultType);
128 assert(V->getType() == ResultType);
132 /// Utility to insert an atomic instruction based on Intrinsic::ID
133 /// and the expression node.
134 static Value *MakeBinaryAtomicValue(
135 CodeGenFunction &CGF, llvm::AtomicRMWInst::BinOp Kind, const CallExpr *E,
136 AtomicOrdering Ordering = AtomicOrdering::SequentiallyConsistent) {
137 QualType T = E->getType();
138 assert(E->getArg(0)->getType()->isPointerType());
139 assert(CGF.getContext().hasSameUnqualifiedType(T,
140 E->getArg(0)->getType()->getPointeeType()));
141 assert(CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->getType()));
143 llvm::Value *DestPtr = CGF.EmitScalarExpr(E->getArg(0));
144 unsigned AddrSpace = DestPtr->getType()->getPointerAddressSpace();
146 llvm::IntegerType *IntType =
147 llvm::IntegerType::get(CGF.getLLVMContext(),
148 CGF.getContext().getTypeSize(T));
149 llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace);
151 llvm::Value *Args[2];
152 Args[0] = CGF.Builder.CreateBitCast(DestPtr, IntPtrType);
153 Args[1] = CGF.EmitScalarExpr(E->getArg(1));
154 llvm::Type *ValueType = Args[1]->getType();
155 Args[1] = EmitToInt(CGF, Args[1], T, IntType);
157 llvm::Value *Result = CGF.Builder.CreateAtomicRMW(
158 Kind, Args[0], Args[1], Ordering);
159 return EmitFromInt(CGF, Result, T, ValueType);
162 static Value *EmitNontemporalStore(CodeGenFunction &CGF, const CallExpr *E) {
163 Value *Val = CGF.EmitScalarExpr(E->getArg(0));
164 Value *Address = CGF.EmitScalarExpr(E->getArg(1));
166 // Convert the type of the pointer to a pointer to the stored type.
167 Val = CGF.EmitToMemory(Val, E->getArg(0)->getType());
168 Value *BC = CGF.Builder.CreateBitCast(
169 Address, llvm::PointerType::getUnqual(Val->getType()), "cast");
170 LValue LV = CGF.MakeNaturalAlignAddrLValue(BC, E->getArg(0)->getType());
171 LV.setNontemporal(true);
172 CGF.EmitStoreOfScalar(Val, LV, false);
176 static Value *EmitNontemporalLoad(CodeGenFunction &CGF, const CallExpr *E) {
177 Value *Address = CGF.EmitScalarExpr(E->getArg(0));
179 LValue LV = CGF.MakeNaturalAlignAddrLValue(Address, E->getType());
180 LV.setNontemporal(true);
181 return CGF.EmitLoadOfScalar(LV, E->getExprLoc());
184 static RValue EmitBinaryAtomic(CodeGenFunction &CGF,
185 llvm::AtomicRMWInst::BinOp Kind,
187 return RValue::get(MakeBinaryAtomicValue(CGF, Kind, E));
190 /// Utility to insert an atomic instruction based Intrinsic::ID and
191 /// the expression node, where the return value is the result of the
193 static RValue EmitBinaryAtomicPost(CodeGenFunction &CGF,
194 llvm::AtomicRMWInst::BinOp Kind,
196 Instruction::BinaryOps Op,
197 bool Invert = false) {
198 QualType T = E->getType();
199 assert(E->getArg(0)->getType()->isPointerType());
200 assert(CGF.getContext().hasSameUnqualifiedType(T,
201 E->getArg(0)->getType()->getPointeeType()));
202 assert(CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->getType()));
204 llvm::Value *DestPtr = CGF.EmitScalarExpr(E->getArg(0));
205 unsigned AddrSpace = DestPtr->getType()->getPointerAddressSpace();
207 llvm::IntegerType *IntType =
208 llvm::IntegerType::get(CGF.getLLVMContext(),
209 CGF.getContext().getTypeSize(T));
210 llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace);
212 llvm::Value *Args[2];
213 Args[1] = CGF.EmitScalarExpr(E->getArg(1));
214 llvm::Type *ValueType = Args[1]->getType();
215 Args[1] = EmitToInt(CGF, Args[1], T, IntType);
216 Args[0] = CGF.Builder.CreateBitCast(DestPtr, IntPtrType);
218 llvm::Value *Result = CGF.Builder.CreateAtomicRMW(
219 Kind, Args[0], Args[1], llvm::AtomicOrdering::SequentiallyConsistent);
220 Result = CGF.Builder.CreateBinOp(Op, Result, Args[1]);
223 CGF.Builder.CreateBinOp(llvm::Instruction::Xor, Result,
224 llvm::ConstantInt::getAllOnesValue(IntType));
225 Result = EmitFromInt(CGF, Result, T, ValueType);
226 return RValue::get(Result);
229 /// Utility to insert an atomic cmpxchg instruction.
231 /// @param CGF The current codegen function.
232 /// @param E Builtin call expression to convert to cmpxchg.
233 /// arg0 - address to operate on
234 /// arg1 - value to compare with
236 /// @param ReturnBool Specifies whether to return success flag of
237 /// cmpxchg result or the old value.
239 /// @returns result of cmpxchg, according to ReturnBool
241 /// Note: In order to lower Microsoft's _InterlockedCompareExchange* intrinsics
242 /// invoke the function EmitAtomicCmpXchgForMSIntrin.
243 static Value *MakeAtomicCmpXchgValue(CodeGenFunction &CGF, const CallExpr *E,
245 QualType T = ReturnBool ? E->getArg(1)->getType() : E->getType();
246 llvm::Value *DestPtr = CGF.EmitScalarExpr(E->getArg(0));
247 unsigned AddrSpace = DestPtr->getType()->getPointerAddressSpace();
249 llvm::IntegerType *IntType = llvm::IntegerType::get(
250 CGF.getLLVMContext(), CGF.getContext().getTypeSize(T));
251 llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace);
254 Args[0] = CGF.Builder.CreateBitCast(DestPtr, IntPtrType);
255 Args[1] = CGF.EmitScalarExpr(E->getArg(1));
256 llvm::Type *ValueType = Args[1]->getType();
257 Args[1] = EmitToInt(CGF, Args[1], T, IntType);
258 Args[2] = EmitToInt(CGF, CGF.EmitScalarExpr(E->getArg(2)), T, IntType);
260 Value *Pair = CGF.Builder.CreateAtomicCmpXchg(
261 Args[0], Args[1], Args[2], llvm::AtomicOrdering::SequentiallyConsistent,
262 llvm::AtomicOrdering::SequentiallyConsistent);
264 // Extract boolean success flag and zext it to int.
265 return CGF.Builder.CreateZExt(CGF.Builder.CreateExtractValue(Pair, 1),
266 CGF.ConvertType(E->getType()));
268 // Extract old value and emit it using the same type as compare value.
269 return EmitFromInt(CGF, CGF.Builder.CreateExtractValue(Pair, 0), T,
273 /// This function should be invoked to emit atomic cmpxchg for Microsoft's
274 /// _InterlockedCompareExchange* intrinsics which have the following signature:
275 /// T _InterlockedCompareExchange(T volatile *Destination,
279 /// Whereas the llvm 'cmpxchg' instruction has the following syntax:
280 /// cmpxchg *Destination, Comparand, Exchange.
281 /// So we need to swap Comparand and Exchange when invoking
282 /// CreateAtomicCmpXchg. That is the reason we could not use the above utility
283 /// function MakeAtomicCmpXchgValue since it expects the arguments to be
287 Value *EmitAtomicCmpXchgForMSIntrin(CodeGenFunction &CGF, const CallExpr *E,
288 AtomicOrdering SuccessOrdering = AtomicOrdering::SequentiallyConsistent) {
289 assert(E->getArg(0)->getType()->isPointerType());
290 assert(CGF.getContext().hasSameUnqualifiedType(
291 E->getType(), E->getArg(0)->getType()->getPointeeType()));
292 assert(CGF.getContext().hasSameUnqualifiedType(E->getType(),
293 E->getArg(1)->getType()));
294 assert(CGF.getContext().hasSameUnqualifiedType(E->getType(),
295 E->getArg(2)->getType()));
297 auto *Destination = CGF.EmitScalarExpr(E->getArg(0));
298 auto *Comparand = CGF.EmitScalarExpr(E->getArg(2));
299 auto *Exchange = CGF.EmitScalarExpr(E->getArg(1));
301 // For Release ordering, the failure ordering should be Monotonic.
302 auto FailureOrdering = SuccessOrdering == AtomicOrdering::Release ?
303 AtomicOrdering::Monotonic :
306 auto *Result = CGF.Builder.CreateAtomicCmpXchg(
307 Destination, Comparand, Exchange,
308 SuccessOrdering, FailureOrdering);
309 Result->setVolatile(true);
310 return CGF.Builder.CreateExtractValue(Result, 0);
313 static Value *EmitAtomicIncrementValue(CodeGenFunction &CGF, const CallExpr *E,
314 AtomicOrdering Ordering = AtomicOrdering::SequentiallyConsistent) {
315 assert(E->getArg(0)->getType()->isPointerType());
317 auto *IntTy = CGF.ConvertType(E->getType());
318 auto *Result = CGF.Builder.CreateAtomicRMW(
320 CGF.EmitScalarExpr(E->getArg(0)),
321 ConstantInt::get(IntTy, 1),
323 return CGF.Builder.CreateAdd(Result, ConstantInt::get(IntTy, 1));
326 static Value *EmitAtomicDecrementValue(CodeGenFunction &CGF, const CallExpr *E,
327 AtomicOrdering Ordering = AtomicOrdering::SequentiallyConsistent) {
328 assert(E->getArg(0)->getType()->isPointerType());
330 auto *IntTy = CGF.ConvertType(E->getType());
331 auto *Result = CGF.Builder.CreateAtomicRMW(
333 CGF.EmitScalarExpr(E->getArg(0)),
334 ConstantInt::get(IntTy, 1),
336 return CGF.Builder.CreateSub(Result, ConstantInt::get(IntTy, 1));
339 // Build a plain volatile load.
340 static Value *EmitISOVolatileLoad(CodeGenFunction &CGF, const CallExpr *E) {
341 Value *Ptr = CGF.EmitScalarExpr(E->getArg(0));
342 QualType ElTy = E->getArg(0)->getType()->getPointeeType();
343 CharUnits LoadSize = CGF.getContext().getTypeSizeInChars(ElTy);
345 llvm::IntegerType::get(CGF.getLLVMContext(), LoadSize.getQuantity() * 8);
346 Ptr = CGF.Builder.CreateBitCast(Ptr, ITy->getPointerTo());
347 llvm::LoadInst *Load = CGF.Builder.CreateAlignedLoad(Ptr, LoadSize);
348 Load->setVolatile(true);
352 // Build a plain volatile store.
353 static Value *EmitISOVolatileStore(CodeGenFunction &CGF, const CallExpr *E) {
354 Value *Ptr = CGF.EmitScalarExpr(E->getArg(0));
355 Value *Value = CGF.EmitScalarExpr(E->getArg(1));
356 QualType ElTy = E->getArg(0)->getType()->getPointeeType();
357 CharUnits StoreSize = CGF.getContext().getTypeSizeInChars(ElTy);
359 llvm::IntegerType::get(CGF.getLLVMContext(), StoreSize.getQuantity() * 8);
360 Ptr = CGF.Builder.CreateBitCast(Ptr, ITy->getPointerTo());
361 llvm::StoreInst *Store =
362 CGF.Builder.CreateAlignedStore(Value, Ptr, StoreSize);
363 Store->setVolatile(true);
367 // Emit a simple mangled intrinsic that has 1 argument and a return type
368 // matching the argument type. Depending on mode, this may be a constrained
369 // floating-point intrinsic.
370 static Value *emitUnaryMaybeConstrainedFPBuiltin(CodeGenFunction &CGF,
371 const CallExpr *E, unsigned IntrinsicID,
372 unsigned ConstrainedIntrinsicID) {
373 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
375 if (CGF.Builder.getIsFPConstrained()) {
376 Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID, Src0->getType());
377 return CGF.Builder.CreateConstrainedFPCall(F, { Src0 });
379 Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
380 return CGF.Builder.CreateCall(F, Src0);
384 // Emit an intrinsic that has 2 operands of the same type as its result.
385 // Depending on mode, this may be a constrained floating-point intrinsic.
386 static Value *emitBinaryMaybeConstrainedFPBuiltin(CodeGenFunction &CGF,
387 const CallExpr *E, unsigned IntrinsicID,
388 unsigned ConstrainedIntrinsicID) {
389 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
390 llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
392 if (CGF.Builder.getIsFPConstrained()) {
393 Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID, Src0->getType());
394 return CGF.Builder.CreateConstrainedFPCall(F, { Src0, Src1 });
396 Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
397 return CGF.Builder.CreateCall(F, { Src0, Src1 });
401 // Emit an intrinsic that has 3 operands of the same type as its result.
402 // Depending on mode, this may be a constrained floating-point intrinsic.
403 static Value *emitTernaryMaybeConstrainedFPBuiltin(CodeGenFunction &CGF,
404 const CallExpr *E, unsigned IntrinsicID,
405 unsigned ConstrainedIntrinsicID) {
406 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
407 llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
408 llvm::Value *Src2 = CGF.EmitScalarExpr(E->getArg(2));
410 if (CGF.Builder.getIsFPConstrained()) {
411 Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID, Src0->getType());
412 return CGF.Builder.CreateConstrainedFPCall(F, { Src0, Src1, Src2 });
414 Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
415 return CGF.Builder.CreateCall(F, { Src0, Src1, Src2 });
419 // Emit an intrinsic where all operands are of the same type as the result.
420 // Depending on mode, this may be a constrained floating-point intrinsic.
421 static Value *emitCallMaybeConstrainedFPBuiltin(CodeGenFunction &CGF,
422 unsigned IntrinsicID,
423 unsigned ConstrainedIntrinsicID,
425 ArrayRef<Value *> Args) {
427 if (CGF.Builder.getIsFPConstrained())
428 F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID, Ty);
430 F = CGF.CGM.getIntrinsic(IntrinsicID, Ty);
432 if (CGF.Builder.getIsFPConstrained())
433 return CGF.Builder.CreateConstrainedFPCall(F, Args);
435 return CGF.Builder.CreateCall(F, Args);
438 // Emit a simple mangled intrinsic that has 1 argument and a return type
439 // matching the argument type.
440 static Value *emitUnaryBuiltin(CodeGenFunction &CGF,
442 unsigned IntrinsicID) {
443 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
445 Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
446 return CGF.Builder.CreateCall(F, Src0);
449 // Emit an intrinsic that has 2 operands of the same type as its result.
450 static Value *emitBinaryBuiltin(CodeGenFunction &CGF,
452 unsigned IntrinsicID) {
453 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
454 llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
456 Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
457 return CGF.Builder.CreateCall(F, { Src0, Src1 });
460 // Emit an intrinsic that has 3 operands of the same type as its result.
461 static Value *emitTernaryBuiltin(CodeGenFunction &CGF,
463 unsigned IntrinsicID) {
464 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
465 llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
466 llvm::Value *Src2 = CGF.EmitScalarExpr(E->getArg(2));
468 Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
469 return CGF.Builder.CreateCall(F, { Src0, Src1, Src2 });
472 // Emit an intrinsic that has 1 float or double operand, and 1 integer.
473 static Value *emitFPIntBuiltin(CodeGenFunction &CGF,
475 unsigned IntrinsicID) {
476 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
477 llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
479 Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
480 return CGF.Builder.CreateCall(F, {Src0, Src1});
483 // Emit an intrinsic that has overloaded integer result and fp operand.
485 emitMaybeConstrainedFPToIntRoundBuiltin(CodeGenFunction &CGF, const CallExpr *E,
486 unsigned IntrinsicID,
487 unsigned ConstrainedIntrinsicID) {
488 llvm::Type *ResultType = CGF.ConvertType(E->getType());
489 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
491 if (CGF.Builder.getIsFPConstrained()) {
492 Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID,
493 {ResultType, Src0->getType()});
494 return CGF.Builder.CreateConstrainedFPCall(F, {Src0});
497 CGF.CGM.getIntrinsic(IntrinsicID, {ResultType, Src0->getType()});
498 return CGF.Builder.CreateCall(F, Src0);
502 /// EmitFAbs - Emit a call to @llvm.fabs().
503 static Value *EmitFAbs(CodeGenFunction &CGF, Value *V) {
504 Function *F = CGF.CGM.getIntrinsic(Intrinsic::fabs, V->getType());
505 llvm::CallInst *Call = CGF.Builder.CreateCall(F, V);
506 Call->setDoesNotAccessMemory();
510 /// Emit the computation of the sign bit for a floating point value. Returns
511 /// the i1 sign bit value.
512 static Value *EmitSignBit(CodeGenFunction &CGF, Value *V) {
513 LLVMContext &C = CGF.CGM.getLLVMContext();
515 llvm::Type *Ty = V->getType();
516 int Width = Ty->getPrimitiveSizeInBits();
517 llvm::Type *IntTy = llvm::IntegerType::get(C, Width);
518 V = CGF.Builder.CreateBitCast(V, IntTy);
519 if (Ty->isPPC_FP128Ty()) {
520 // We want the sign bit of the higher-order double. The bitcast we just
521 // did works as if the double-double was stored to memory and then
522 // read as an i128. The "store" will put the higher-order double in the
523 // lower address in both little- and big-Endian modes, but the "load"
524 // will treat those bits as a different part of the i128: the low bits in
525 // little-Endian, the high bits in big-Endian. Therefore, on big-Endian
526 // we need to shift the high bits down to the low before truncating.
528 if (CGF.getTarget().isBigEndian()) {
529 Value *ShiftCst = llvm::ConstantInt::get(IntTy, Width);
530 V = CGF.Builder.CreateLShr(V, ShiftCst);
532 // We are truncating value in order to extract the higher-order
533 // double, which we will be using to extract the sign from.
534 IntTy = llvm::IntegerType::get(C, Width);
535 V = CGF.Builder.CreateTrunc(V, IntTy);
537 Value *Zero = llvm::Constant::getNullValue(IntTy);
538 return CGF.Builder.CreateICmpSLT(V, Zero);
541 static RValue emitLibraryCall(CodeGenFunction &CGF, const FunctionDecl *FD,
542 const CallExpr *E, llvm::Constant *calleeValue) {
543 CGCallee callee = CGCallee::forDirect(calleeValue, GlobalDecl(FD));
544 return CGF.EmitCall(E->getCallee()->getType(), callee, E, ReturnValueSlot());
547 /// Emit a call to llvm.{sadd,uadd,ssub,usub,smul,umul}.with.overflow.*
548 /// depending on IntrinsicID.
550 /// \arg CGF The current codegen function.
551 /// \arg IntrinsicID The ID for the Intrinsic we wish to generate.
552 /// \arg X The first argument to the llvm.*.with.overflow.*.
553 /// \arg Y The second argument to the llvm.*.with.overflow.*.
554 /// \arg Carry The carry returned by the llvm.*.with.overflow.*.
555 /// \returns The result (i.e. sum/product) returned by the intrinsic.
556 static llvm::Value *EmitOverflowIntrinsic(CodeGenFunction &CGF,
557 const llvm::Intrinsic::ID IntrinsicID,
558 llvm::Value *X, llvm::Value *Y,
559 llvm::Value *&Carry) {
560 // Make sure we have integers of the same width.
561 assert(X->getType() == Y->getType() &&
562 "Arguments must be the same type. (Did you forget to make sure both "
563 "arguments have the same integer width?)");
565 Function *Callee = CGF.CGM.getIntrinsic(IntrinsicID, X->getType());
566 llvm::Value *Tmp = CGF.Builder.CreateCall(Callee, {X, Y});
567 Carry = CGF.Builder.CreateExtractValue(Tmp, 1);
568 return CGF.Builder.CreateExtractValue(Tmp, 0);
571 static Value *emitRangedBuiltin(CodeGenFunction &CGF,
572 unsigned IntrinsicID,
574 llvm::MDBuilder MDHelper(CGF.getLLVMContext());
575 llvm::MDNode *RNode = MDHelper.createRange(APInt(32, low), APInt(32, high));
576 Function *F = CGF.CGM.getIntrinsic(IntrinsicID, {});
577 llvm::Instruction *Call = CGF.Builder.CreateCall(F);
578 Call->setMetadata(llvm::LLVMContext::MD_range, RNode);
583 struct WidthAndSignedness {
589 static WidthAndSignedness
590 getIntegerWidthAndSignedness(const clang::ASTContext &context,
591 const clang::QualType Type) {
592 assert(Type->isIntegerType() && "Given type is not an integer.");
593 unsigned Width = Type->isBooleanType() ? 1
594 : Type->isExtIntType() ? context.getIntWidth(Type)
595 : context.getTypeInfo(Type).Width;
596 bool Signed = Type->isSignedIntegerType();
597 return {Width, Signed};
600 // Given one or more integer types, this function produces an integer type that
601 // encompasses them: any value in one of the given types could be expressed in
602 // the encompassing type.
603 static struct WidthAndSignedness
604 EncompassingIntegerType(ArrayRef<struct WidthAndSignedness> Types) {
605 assert(Types.size() > 0 && "Empty list of types.");
607 // If any of the given types is signed, we must return a signed type.
609 for (const auto &Type : Types) {
610 Signed |= Type.Signed;
613 // The encompassing type must have a width greater than or equal to the width
614 // of the specified types. Additionally, if the encompassing type is signed,
615 // its width must be strictly greater than the width of any unsigned types
618 for (const auto &Type : Types) {
619 unsigned MinWidth = Type.Width + (Signed && !Type.Signed);
620 if (Width < MinWidth) {
625 return {Width, Signed};
628 Value *CodeGenFunction::EmitVAStartEnd(Value *ArgValue, bool IsStart) {
629 llvm::Type *DestType = Int8PtrTy;
630 if (ArgValue->getType() != DestType)
632 Builder.CreateBitCast(ArgValue, DestType, ArgValue->getName().data());
634 Intrinsic::ID inst = IsStart ? Intrinsic::vastart : Intrinsic::vaend;
635 return Builder.CreateCall(CGM.getIntrinsic(inst), ArgValue);
638 /// Checks if using the result of __builtin_object_size(p, @p From) in place of
639 /// __builtin_object_size(p, @p To) is correct
640 static bool areBOSTypesCompatible(int From, int To) {
641 // Note: Our __builtin_object_size implementation currently treats Type=0 and
642 // Type=2 identically. Encoding this implementation detail here may make
643 // improving __builtin_object_size difficult in the future, so it's omitted.
644 return From == To || (From == 0 && To == 1) || (From == 3 && To == 2);
648 getDefaultBuiltinObjectSizeResult(unsigned Type, llvm::IntegerType *ResType) {
649 return ConstantInt::get(ResType, (Type & 2) ? 0 : -1, /*isSigned=*/true);
653 CodeGenFunction::evaluateOrEmitBuiltinObjectSize(const Expr *E, unsigned Type,
654 llvm::IntegerType *ResType,
655 llvm::Value *EmittedE,
658 if (!E->tryEvaluateObjectSize(ObjectSize, getContext(), Type))
659 return emitBuiltinObjectSize(E, Type, ResType, EmittedE, IsDynamic);
660 return ConstantInt::get(ResType, ObjectSize, /*isSigned=*/true);
663 /// Returns a Value corresponding to the size of the given expression.
664 /// This Value may be either of the following:
665 /// - A llvm::Argument (if E is a param with the pass_object_size attribute on
667 /// - A call to the @llvm.objectsize intrinsic
669 /// EmittedE is the result of emitting `E` as a scalar expr. If it's non-null
670 /// and we wouldn't otherwise try to reference a pass_object_size parameter,
671 /// we'll call @llvm.objectsize on EmittedE, rather than emitting E.
673 CodeGenFunction::emitBuiltinObjectSize(const Expr *E, unsigned Type,
674 llvm::IntegerType *ResType,
675 llvm::Value *EmittedE, bool IsDynamic) {
676 // We need to reference an argument if the pointer is a parameter with the
677 // pass_object_size attribute.
678 if (auto *D = dyn_cast<DeclRefExpr>(E->IgnoreParenImpCasts())) {
679 auto *Param = dyn_cast<ParmVarDecl>(D->getDecl());
680 auto *PS = D->getDecl()->getAttr<PassObjectSizeAttr>();
681 if (Param != nullptr && PS != nullptr &&
682 areBOSTypesCompatible(PS->getType(), Type)) {
683 auto Iter = SizeArguments.find(Param);
684 assert(Iter != SizeArguments.end());
686 const ImplicitParamDecl *D = Iter->second;
687 auto DIter = LocalDeclMap.find(D);
688 assert(DIter != LocalDeclMap.end());
690 return EmitLoadOfScalar(DIter->second, /*Volatile=*/false,
691 getContext().getSizeType(), E->getBeginLoc());
695 // LLVM can't handle Type=3 appropriately, and __builtin_object_size shouldn't
696 // evaluate E for side-effects. In either case, we shouldn't lower to
698 if (Type == 3 || (!EmittedE && E->HasSideEffects(getContext())))
699 return getDefaultBuiltinObjectSizeResult(Type, ResType);
701 Value *Ptr = EmittedE ? EmittedE : EmitScalarExpr(E);
702 assert(Ptr->getType()->isPointerTy() &&
703 "Non-pointer passed to __builtin_object_size?");
706 CGM.getIntrinsic(Intrinsic::objectsize, {ResType, Ptr->getType()});
708 // LLVM only supports 0 and 2, make sure that we pass along that as a boolean.
709 Value *Min = Builder.getInt1((Type & 2) != 0);
710 // For GCC compatibility, __builtin_object_size treat NULL as unknown size.
711 Value *NullIsUnknown = Builder.getTrue();
712 Value *Dynamic = Builder.getInt1(IsDynamic);
713 return Builder.CreateCall(F, {Ptr, Min, NullIsUnknown, Dynamic});
717 /// A struct to generically describe a bit test intrinsic.
719 enum ActionKind : uint8_t { TestOnly, Complement, Reset, Set };
720 enum InterlockingKind : uint8_t {
729 InterlockingKind Interlocking;
732 static BitTest decodeBitTestBuiltin(unsigned BuiltinID);
736 BitTest BitTest::decodeBitTestBuiltin(unsigned BuiltinID) {
738 // Main portable variants.
739 case Builtin::BI_bittest:
740 return {TestOnly, Unlocked, false};
741 case Builtin::BI_bittestandcomplement:
742 return {Complement, Unlocked, false};
743 case Builtin::BI_bittestandreset:
744 return {Reset, Unlocked, false};
745 case Builtin::BI_bittestandset:
746 return {Set, Unlocked, false};
747 case Builtin::BI_interlockedbittestandreset:
748 return {Reset, Sequential, false};
749 case Builtin::BI_interlockedbittestandset:
750 return {Set, Sequential, false};
752 // X86-specific 64-bit variants.
753 case Builtin::BI_bittest64:
754 return {TestOnly, Unlocked, true};
755 case Builtin::BI_bittestandcomplement64:
756 return {Complement, Unlocked, true};
757 case Builtin::BI_bittestandreset64:
758 return {Reset, Unlocked, true};
759 case Builtin::BI_bittestandset64:
760 return {Set, Unlocked, true};
761 case Builtin::BI_interlockedbittestandreset64:
762 return {Reset, Sequential, true};
763 case Builtin::BI_interlockedbittestandset64:
764 return {Set, Sequential, true};
766 // ARM/AArch64-specific ordering variants.
767 case Builtin::BI_interlockedbittestandset_acq:
768 return {Set, Acquire, false};
769 case Builtin::BI_interlockedbittestandset_rel:
770 return {Set, Release, false};
771 case Builtin::BI_interlockedbittestandset_nf:
772 return {Set, NoFence, false};
773 case Builtin::BI_interlockedbittestandreset_acq:
774 return {Reset, Acquire, false};
775 case Builtin::BI_interlockedbittestandreset_rel:
776 return {Reset, Release, false};
777 case Builtin::BI_interlockedbittestandreset_nf:
778 return {Reset, NoFence, false};
780 llvm_unreachable("expected only bittest intrinsics");
783 static char bitActionToX86BTCode(BitTest::ActionKind A) {
785 case BitTest::TestOnly: return '\0';
786 case BitTest::Complement: return 'c';
787 case BitTest::Reset: return 'r';
788 case BitTest::Set: return 's';
790 llvm_unreachable("invalid action");
793 static llvm::Value *EmitX86BitTestIntrinsic(CodeGenFunction &CGF,
795 const CallExpr *E, Value *BitBase,
797 char Action = bitActionToX86BTCode(BT.Action);
798 char SizeSuffix = BT.Is64Bit ? 'q' : 'l';
800 // Build the assembly.
802 raw_svector_ostream AsmOS(Asm);
803 if (BT.Interlocking != BitTest::Unlocked)
808 AsmOS << SizeSuffix << " $2, ($1)\n\tsetc ${0:b}";
810 // Build the constraints. FIXME: We should support immediates when possible.
811 std::string Constraints = "=r,r,r,~{cc},~{flags},~{fpsr}";
812 llvm::IntegerType *IntType = llvm::IntegerType::get(
813 CGF.getLLVMContext(),
814 CGF.getContext().getTypeSize(E->getArg(1)->getType()));
815 llvm::Type *IntPtrType = IntType->getPointerTo();
816 llvm::FunctionType *FTy =
817 llvm::FunctionType::get(CGF.Int8Ty, {IntPtrType, IntType}, false);
819 llvm::InlineAsm *IA =
820 llvm::InlineAsm::get(FTy, Asm, Constraints, /*hasSideEffects=*/true);
821 return CGF.Builder.CreateCall(IA, {BitBase, BitPos});
824 static llvm::AtomicOrdering
825 getBitTestAtomicOrdering(BitTest::InterlockingKind I) {
827 case BitTest::Unlocked: return llvm::AtomicOrdering::NotAtomic;
828 case BitTest::Sequential: return llvm::AtomicOrdering::SequentiallyConsistent;
829 case BitTest::Acquire: return llvm::AtomicOrdering::Acquire;
830 case BitTest::Release: return llvm::AtomicOrdering::Release;
831 case BitTest::NoFence: return llvm::AtomicOrdering::Monotonic;
833 llvm_unreachable("invalid interlocking");
836 /// Emit a _bittest* intrinsic. These intrinsics take a pointer to an array of
837 /// bits and a bit position and read and optionally modify the bit at that
838 /// position. The position index can be arbitrarily large, i.e. it can be larger
839 /// than 31 or 63, so we need an indexed load in the general case.
840 static llvm::Value *EmitBitTestIntrinsic(CodeGenFunction &CGF,
843 Value *BitBase = CGF.EmitScalarExpr(E->getArg(0));
844 Value *BitPos = CGF.EmitScalarExpr(E->getArg(1));
846 BitTest BT = BitTest::decodeBitTestBuiltin(BuiltinID);
848 // X86 has special BT, BTC, BTR, and BTS instructions that handle the array
849 // indexing operation internally. Use them if possible.
850 if (CGF.getTarget().getTriple().isX86())
851 return EmitX86BitTestIntrinsic(CGF, BT, E, BitBase, BitPos);
853 // Otherwise, use generic code to load one byte and test the bit. Use all but
854 // the bottom three bits as the array index, and the bottom three bits to form
856 // Bit = BitBaseI8[BitPos >> 3] & (1 << (BitPos & 0x7)) != 0;
857 Value *ByteIndex = CGF.Builder.CreateAShr(
858 BitPos, llvm::ConstantInt::get(BitPos->getType(), 3), "bittest.byteidx");
859 Value *BitBaseI8 = CGF.Builder.CreatePointerCast(BitBase, CGF.Int8PtrTy);
860 Address ByteAddr(CGF.Builder.CreateInBoundsGEP(CGF.Int8Ty, BitBaseI8,
861 ByteIndex, "bittest.byteaddr"),
864 CGF.Builder.CreateAnd(CGF.Builder.CreateTrunc(BitPos, CGF.Int8Ty),
865 llvm::ConstantInt::get(CGF.Int8Ty, 0x7));
867 // The updating instructions will need a mask.
868 Value *Mask = nullptr;
869 if (BT.Action != BitTest::TestOnly) {
870 Mask = CGF.Builder.CreateShl(llvm::ConstantInt::get(CGF.Int8Ty, 1), PosLow,
874 // Check the action and ordering of the interlocked intrinsics.
875 llvm::AtomicOrdering Ordering = getBitTestAtomicOrdering(BT.Interlocking);
877 Value *OldByte = nullptr;
878 if (Ordering != llvm::AtomicOrdering::NotAtomic) {
879 // Emit a combined atomicrmw load/store operation for the interlocked
881 llvm::AtomicRMWInst::BinOp RMWOp = llvm::AtomicRMWInst::Or;
882 if (BT.Action == BitTest::Reset) {
883 Mask = CGF.Builder.CreateNot(Mask);
884 RMWOp = llvm::AtomicRMWInst::And;
886 OldByte = CGF.Builder.CreateAtomicRMW(RMWOp, ByteAddr.getPointer(), Mask,
889 // Emit a plain load for the non-interlocked intrinsics.
890 OldByte = CGF.Builder.CreateLoad(ByteAddr, "bittest.byte");
891 Value *NewByte = nullptr;
893 case BitTest::TestOnly:
894 // Don't store anything.
896 case BitTest::Complement:
897 NewByte = CGF.Builder.CreateXor(OldByte, Mask);
900 NewByte = CGF.Builder.CreateAnd(OldByte, CGF.Builder.CreateNot(Mask));
903 NewByte = CGF.Builder.CreateOr(OldByte, Mask);
907 CGF.Builder.CreateStore(NewByte, ByteAddr);
910 // However we loaded the old byte, either by plain load or atomicrmw, shift
911 // the bit into the low position and mask it to 0 or 1.
912 Value *ShiftedByte = CGF.Builder.CreateLShr(OldByte, PosLow, "bittest.shr");
913 return CGF.Builder.CreateAnd(
914 ShiftedByte, llvm::ConstantInt::get(CGF.Int8Ty, 1), "bittest.res");
918 enum class MSVCSetJmpKind {
925 /// MSVC handles setjmp a bit differently on different platforms. On every
926 /// architecture except 32-bit x86, the frame address is passed. On x86, extra
927 /// parameters can be passed as variadic arguments, but we always pass none.
928 static RValue EmitMSVCRTSetJmp(CodeGenFunction &CGF, MSVCSetJmpKind SJKind,
930 llvm::Value *Arg1 = nullptr;
931 llvm::Type *Arg1Ty = nullptr;
933 bool IsVarArg = false;
934 if (SJKind == MSVCSetJmpKind::_setjmp3) {
936 Arg1Ty = CGF.Int32Ty;
937 Arg1 = llvm::ConstantInt::get(CGF.IntTy, 0);
940 Name = SJKind == MSVCSetJmpKind::_setjmp ? "_setjmp" : "_setjmpex";
941 Arg1Ty = CGF.Int8PtrTy;
942 if (CGF.getTarget().getTriple().getArch() == llvm::Triple::aarch64) {
943 Arg1 = CGF.Builder.CreateCall(
944 CGF.CGM.getIntrinsic(Intrinsic::sponentry, CGF.AllocaInt8PtrTy));
946 Arg1 = CGF.Builder.CreateCall(
947 CGF.CGM.getIntrinsic(Intrinsic::frameaddress, CGF.AllocaInt8PtrTy),
948 llvm::ConstantInt::get(CGF.Int32Ty, 0));
951 // Mark the call site and declaration with ReturnsTwice.
952 llvm::Type *ArgTypes[2] = {CGF.Int8PtrTy, Arg1Ty};
953 llvm::AttributeList ReturnsTwiceAttr = llvm::AttributeList::get(
954 CGF.getLLVMContext(), llvm::AttributeList::FunctionIndex,
955 llvm::Attribute::ReturnsTwice);
956 llvm::FunctionCallee SetJmpFn = CGF.CGM.CreateRuntimeFunction(
957 llvm::FunctionType::get(CGF.IntTy, ArgTypes, IsVarArg), Name,
958 ReturnsTwiceAttr, /*Local=*/true);
960 llvm::Value *Buf = CGF.Builder.CreateBitOrPointerCast(
961 CGF.EmitScalarExpr(E->getArg(0)), CGF.Int8PtrTy);
962 llvm::Value *Args[] = {Buf, Arg1};
963 llvm::CallBase *CB = CGF.EmitRuntimeCallOrInvoke(SetJmpFn, Args);
964 CB->setAttributes(ReturnsTwiceAttr);
965 return RValue::get(CB);
968 // Many of MSVC builtins are on x64, ARM and AArch64; to avoid repeating code,
969 // we handle them here.
970 enum class CodeGenFunction::MSVCIntrin {
974 _InterlockedDecrement,
975 _InterlockedExchange,
976 _InterlockedExchangeAdd,
977 _InterlockedExchangeSub,
978 _InterlockedIncrement,
981 _InterlockedExchangeAdd_acq,
982 _InterlockedExchangeAdd_rel,
983 _InterlockedExchangeAdd_nf,
984 _InterlockedExchange_acq,
985 _InterlockedExchange_rel,
986 _InterlockedExchange_nf,
987 _InterlockedCompareExchange_acq,
988 _InterlockedCompareExchange_rel,
989 _InterlockedCompareExchange_nf,
999 _InterlockedIncrement_acq,
1000 _InterlockedIncrement_rel,
1001 _InterlockedIncrement_nf,
1002 _InterlockedDecrement_acq,
1003 _InterlockedDecrement_rel,
1004 _InterlockedDecrement_nf,
1008 Value *CodeGenFunction::EmitMSVCBuiltinExpr(MSVCIntrin BuiltinID,
1009 const CallExpr *E) {
1010 switch (BuiltinID) {
1011 case MSVCIntrin::_BitScanForward:
1012 case MSVCIntrin::_BitScanReverse: {
1013 Value *ArgValue = EmitScalarExpr(E->getArg(1));
1015 llvm::Type *ArgType = ArgValue->getType();
1016 llvm::Type *IndexType =
1017 EmitScalarExpr(E->getArg(0))->getType()->getPointerElementType();
1018 llvm::Type *ResultType = ConvertType(E->getType());
1020 Value *ArgZero = llvm::Constant::getNullValue(ArgType);
1021 Value *ResZero = llvm::Constant::getNullValue(ResultType);
1022 Value *ResOne = llvm::ConstantInt::get(ResultType, 1);
1024 BasicBlock *Begin = Builder.GetInsertBlock();
1025 BasicBlock *End = createBasicBlock("bitscan_end", this->CurFn);
1026 Builder.SetInsertPoint(End);
1027 PHINode *Result = Builder.CreatePHI(ResultType, 2, "bitscan_result");
1029 Builder.SetInsertPoint(Begin);
1030 Value *IsZero = Builder.CreateICmpEQ(ArgValue, ArgZero);
1031 BasicBlock *NotZero = createBasicBlock("bitscan_not_zero", this->CurFn);
1032 Builder.CreateCondBr(IsZero, End, NotZero);
1033 Result->addIncoming(ResZero, Begin);
1035 Builder.SetInsertPoint(NotZero);
1036 Address IndexAddress = EmitPointerWithAlignment(E->getArg(0));
1038 if (BuiltinID == MSVCIntrin::_BitScanForward) {
1039 Function *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType);
1040 Value *ZeroCount = Builder.CreateCall(F, {ArgValue, Builder.getTrue()});
1041 ZeroCount = Builder.CreateIntCast(ZeroCount, IndexType, false);
1042 Builder.CreateStore(ZeroCount, IndexAddress, false);
1044 unsigned ArgWidth = cast<llvm::IntegerType>(ArgType)->getBitWidth();
1045 Value *ArgTypeLastIndex = llvm::ConstantInt::get(IndexType, ArgWidth - 1);
1047 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType);
1048 Value *ZeroCount = Builder.CreateCall(F, {ArgValue, Builder.getTrue()});
1049 ZeroCount = Builder.CreateIntCast(ZeroCount, IndexType, false);
1050 Value *Index = Builder.CreateNSWSub(ArgTypeLastIndex, ZeroCount);
1051 Builder.CreateStore(Index, IndexAddress, false);
1053 Builder.CreateBr(End);
1054 Result->addIncoming(ResOne, NotZero);
1056 Builder.SetInsertPoint(End);
1059 case MSVCIntrin::_InterlockedAnd:
1060 return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E);
1061 case MSVCIntrin::_InterlockedExchange:
1062 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E);
1063 case MSVCIntrin::_InterlockedExchangeAdd:
1064 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add, E);
1065 case MSVCIntrin::_InterlockedExchangeSub:
1066 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Sub, E);
1067 case MSVCIntrin::_InterlockedOr:
1068 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E);
1069 case MSVCIntrin::_InterlockedXor:
1070 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor, E);
1071 case MSVCIntrin::_InterlockedExchangeAdd_acq:
1072 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add, E,
1073 AtomicOrdering::Acquire);
1074 case MSVCIntrin::_InterlockedExchangeAdd_rel:
1075 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add, E,
1076 AtomicOrdering::Release);
1077 case MSVCIntrin::_InterlockedExchangeAdd_nf:
1078 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add, E,
1079 AtomicOrdering::Monotonic);
1080 case MSVCIntrin::_InterlockedExchange_acq:
1081 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E,
1082 AtomicOrdering::Acquire);
1083 case MSVCIntrin::_InterlockedExchange_rel:
1084 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E,
1085 AtomicOrdering::Release);
1086 case MSVCIntrin::_InterlockedExchange_nf:
1087 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E,
1088 AtomicOrdering::Monotonic);
1089 case MSVCIntrin::_InterlockedCompareExchange_acq:
1090 return EmitAtomicCmpXchgForMSIntrin(*this, E, AtomicOrdering::Acquire);
1091 case MSVCIntrin::_InterlockedCompareExchange_rel:
1092 return EmitAtomicCmpXchgForMSIntrin(*this, E, AtomicOrdering::Release);
1093 case MSVCIntrin::_InterlockedCompareExchange_nf:
1094 return EmitAtomicCmpXchgForMSIntrin(*this, E, AtomicOrdering::Monotonic);
1095 case MSVCIntrin::_InterlockedOr_acq:
1096 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E,
1097 AtomicOrdering::Acquire);
1098 case MSVCIntrin::_InterlockedOr_rel:
1099 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E,
1100 AtomicOrdering::Release);
1101 case MSVCIntrin::_InterlockedOr_nf:
1102 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E,
1103 AtomicOrdering::Monotonic);
1104 case MSVCIntrin::_InterlockedXor_acq:
1105 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor, E,
1106 AtomicOrdering::Acquire);
1107 case MSVCIntrin::_InterlockedXor_rel:
1108 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor, E,
1109 AtomicOrdering::Release);
1110 case MSVCIntrin::_InterlockedXor_nf:
1111 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor, E,
1112 AtomicOrdering::Monotonic);
1113 case MSVCIntrin::_InterlockedAnd_acq:
1114 return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E,
1115 AtomicOrdering::Acquire);
1116 case MSVCIntrin::_InterlockedAnd_rel:
1117 return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E,
1118 AtomicOrdering::Release);
1119 case MSVCIntrin::_InterlockedAnd_nf:
1120 return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E,
1121 AtomicOrdering::Monotonic);
1122 case MSVCIntrin::_InterlockedIncrement_acq:
1123 return EmitAtomicIncrementValue(*this, E, AtomicOrdering::Acquire);
1124 case MSVCIntrin::_InterlockedIncrement_rel:
1125 return EmitAtomicIncrementValue(*this, E, AtomicOrdering::Release);
1126 case MSVCIntrin::_InterlockedIncrement_nf:
1127 return EmitAtomicIncrementValue(*this, E, AtomicOrdering::Monotonic);
1128 case MSVCIntrin::_InterlockedDecrement_acq:
1129 return EmitAtomicDecrementValue(*this, E, AtomicOrdering::Acquire);
1130 case MSVCIntrin::_InterlockedDecrement_rel:
1131 return EmitAtomicDecrementValue(*this, E, AtomicOrdering::Release);
1132 case MSVCIntrin::_InterlockedDecrement_nf:
1133 return EmitAtomicDecrementValue(*this, E, AtomicOrdering::Monotonic);
1135 case MSVCIntrin::_InterlockedDecrement:
1136 return EmitAtomicDecrementValue(*this, E);
1137 case MSVCIntrin::_InterlockedIncrement:
1138 return EmitAtomicIncrementValue(*this, E);
1140 case MSVCIntrin::__fastfail: {
1141 // Request immediate process termination from the kernel. The instruction
1142 // sequences to do this are documented on MSDN:
1143 // https://msdn.microsoft.com/en-us/library/dn774154.aspx
1144 llvm::Triple::ArchType ISA = getTarget().getTriple().getArch();
1145 StringRef Asm, Constraints;
1148 ErrorUnsupported(E, "__fastfail call for this architecture");
1150 case llvm::Triple::x86:
1151 case llvm::Triple::x86_64:
1153 Constraints = "{cx}";
1155 case llvm::Triple::thumb:
1157 Constraints = "{r0}";
1159 case llvm::Triple::aarch64:
1160 Asm = "brk #0xF003";
1161 Constraints = "{w0}";
1163 llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, {Int32Ty}, false);
1164 llvm::InlineAsm *IA =
1165 llvm::InlineAsm::get(FTy, Asm, Constraints, /*hasSideEffects=*/true);
1166 llvm::AttributeList NoReturnAttr = llvm::AttributeList::get(
1167 getLLVMContext(), llvm::AttributeList::FunctionIndex,
1168 llvm::Attribute::NoReturn);
1169 llvm::CallInst *CI = Builder.CreateCall(IA, EmitScalarExpr(E->getArg(0)));
1170 CI->setAttributes(NoReturnAttr);
1174 llvm_unreachable("Incorrect MSVC intrinsic!");
1178 // ARC cleanup for __builtin_os_log_format
1179 struct CallObjCArcUse final : EHScopeStack::Cleanup {
1180 CallObjCArcUse(llvm::Value *object) : object(object) {}
1181 llvm::Value *object;
1183 void Emit(CodeGenFunction &CGF, Flags flags) override {
1184 CGF.EmitARCIntrinsicUse(object);
1189 Value *CodeGenFunction::EmitCheckedArgForBuiltin(const Expr *E,
1190 BuiltinCheckKind Kind) {
1191 assert((Kind == BCK_CLZPassedZero || Kind == BCK_CTZPassedZero)
1192 && "Unsupported builtin check kind");
1194 Value *ArgValue = EmitScalarExpr(E);
1195 if (!SanOpts.has(SanitizerKind::Builtin) || !getTarget().isCLZForZeroUndef())
1198 SanitizerScope SanScope(this);
1199 Value *Cond = Builder.CreateICmpNE(
1200 ArgValue, llvm::Constant::getNullValue(ArgValue->getType()));
1201 EmitCheck(std::make_pair(Cond, SanitizerKind::Builtin),
1202 SanitizerHandler::InvalidBuiltin,
1203 {EmitCheckSourceLocation(E->getExprLoc()),
1204 llvm::ConstantInt::get(Builder.getInt8Ty(), Kind)},
1209 /// Get the argument type for arguments to os_log_helper.
1210 static CanQualType getOSLogArgType(ASTContext &C, int Size) {
1211 QualType UnsignedTy = C.getIntTypeForBitwidth(Size * 8, /*Signed=*/false);
1212 return C.getCanonicalType(UnsignedTy);
1215 llvm::Function *CodeGenFunction::generateBuiltinOSLogHelperFunction(
1216 const analyze_os_log::OSLogBufferLayout &Layout,
1217 CharUnits BufferAlignment) {
1218 ASTContext &Ctx = getContext();
1220 llvm::SmallString<64> Name;
1222 raw_svector_ostream OS(Name);
1223 OS << "__os_log_helper";
1224 OS << "_" << BufferAlignment.getQuantity();
1225 OS << "_" << int(Layout.getSummaryByte());
1226 OS << "_" << int(Layout.getNumArgsByte());
1227 for (const auto &Item : Layout.Items)
1228 OS << "_" << int(Item.getSizeByte()) << "_"
1229 << int(Item.getDescriptorByte());
1232 if (llvm::Function *F = CGM.getModule().getFunction(Name))
1235 llvm::SmallVector<QualType, 4> ArgTys;
1236 FunctionArgList Args;
1237 Args.push_back(ImplicitParamDecl::Create(
1238 Ctx, nullptr, SourceLocation(), &Ctx.Idents.get("buffer"), Ctx.VoidPtrTy,
1239 ImplicitParamDecl::Other));
1240 ArgTys.emplace_back(Ctx.VoidPtrTy);
1242 for (unsigned int I = 0, E = Layout.Items.size(); I < E; ++I) {
1243 char Size = Layout.Items[I].getSizeByte();
1247 QualType ArgTy = getOSLogArgType(Ctx, Size);
1248 Args.push_back(ImplicitParamDecl::Create(
1249 Ctx, nullptr, SourceLocation(),
1250 &Ctx.Idents.get(std::string("arg") + llvm::to_string(I)), ArgTy,
1251 ImplicitParamDecl::Other));
1252 ArgTys.emplace_back(ArgTy);
1255 QualType ReturnTy = Ctx.VoidTy;
1256 QualType FuncionTy = Ctx.getFunctionType(ReturnTy, ArgTys, {});
1258 // The helper function has linkonce_odr linkage to enable the linker to merge
1259 // identical functions. To ensure the merging always happens, 'noinline' is
1260 // attached to the function when compiling with -Oz.
1261 const CGFunctionInfo &FI =
1262 CGM.getTypes().arrangeBuiltinFunctionDeclaration(ReturnTy, Args);
1263 llvm::FunctionType *FuncTy = CGM.getTypes().GetFunctionType(FI);
1264 llvm::Function *Fn = llvm::Function::Create(
1265 FuncTy, llvm::GlobalValue::LinkOnceODRLinkage, Name, &CGM.getModule());
1266 Fn->setVisibility(llvm::GlobalValue::HiddenVisibility);
1267 CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI, Fn);
1268 CGM.SetLLVMFunctionAttributesForDefinition(nullptr, Fn);
1269 Fn->setDoesNotThrow();
1271 // Attach 'noinline' at -Oz.
1272 if (CGM.getCodeGenOpts().OptimizeSize == 2)
1273 Fn->addFnAttr(llvm::Attribute::NoInline);
1275 auto NL = ApplyDebugLocation::CreateEmpty(*this);
1276 IdentifierInfo *II = &Ctx.Idents.get(Name);
1277 FunctionDecl *FD = FunctionDecl::Create(
1278 Ctx, Ctx.getTranslationUnitDecl(), SourceLocation(), SourceLocation(), II,
1279 FuncionTy, nullptr, SC_PrivateExtern, false, false);
1280 // Avoid generating debug location info for the function.
1283 StartFunction(FD, ReturnTy, Fn, FI, Args);
1285 // Create a scope with an artificial location for the body of this function.
1286 auto AL = ApplyDebugLocation::CreateArtificial(*this);
1289 Address BufAddr(Builder.CreateLoad(GetAddrOfLocalVar(Args[0]), "buf"),
1291 Builder.CreateStore(Builder.getInt8(Layout.getSummaryByte()),
1292 Builder.CreateConstByteGEP(BufAddr, Offset++, "summary"));
1293 Builder.CreateStore(Builder.getInt8(Layout.getNumArgsByte()),
1294 Builder.CreateConstByteGEP(BufAddr, Offset++, "numArgs"));
1297 for (const auto &Item : Layout.Items) {
1298 Builder.CreateStore(
1299 Builder.getInt8(Item.getDescriptorByte()),
1300 Builder.CreateConstByteGEP(BufAddr, Offset++, "argDescriptor"));
1301 Builder.CreateStore(
1302 Builder.getInt8(Item.getSizeByte()),
1303 Builder.CreateConstByteGEP(BufAddr, Offset++, "argSize"));
1305 CharUnits Size = Item.size();
1306 if (!Size.getQuantity())
1309 Address Arg = GetAddrOfLocalVar(Args[I]);
1310 Address Addr = Builder.CreateConstByteGEP(BufAddr, Offset, "argData");
1311 Addr = Builder.CreateBitCast(Addr, Arg.getPointer()->getType(),
1313 Builder.CreateStore(Builder.CreateLoad(Arg), Addr);
1323 RValue CodeGenFunction::emitBuiltinOSLogFormat(const CallExpr &E) {
1324 assert(E.getNumArgs() >= 2 &&
1325 "__builtin_os_log_format takes at least 2 arguments");
1326 ASTContext &Ctx = getContext();
1327 analyze_os_log::OSLogBufferLayout Layout;
1328 analyze_os_log::computeOSLogBufferLayout(Ctx, &E, Layout);
1329 Address BufAddr = EmitPointerWithAlignment(E.getArg(0));
1330 llvm::SmallVector<llvm::Value *, 4> RetainableOperands;
1332 // Ignore argument 1, the format string. It is not currently used.
1334 Args.add(RValue::get(BufAddr.getPointer()), Ctx.VoidPtrTy);
1336 for (const auto &Item : Layout.Items) {
1337 int Size = Item.getSizeByte();
1341 llvm::Value *ArgVal;
1343 if (Item.getKind() == analyze_os_log::OSLogBufferItem::MaskKind) {
1345 for (unsigned I = 0, E = Item.getMaskType().size(); I < E; ++I)
1346 Val |= ((uint64_t)Item.getMaskType()[I]) << I * 8;
1347 ArgVal = llvm::Constant::getIntegerValue(Int64Ty, llvm::APInt(64, Val));
1348 } else if (const Expr *TheExpr = Item.getExpr()) {
1349 ArgVal = EmitScalarExpr(TheExpr, /*Ignore*/ false);
1351 // If a temporary object that requires destruction after the full
1352 // expression is passed, push a lifetime-extended cleanup to extend its
1353 // lifetime to the end of the enclosing block scope.
1354 auto LifetimeExtendObject = [&](const Expr *E) {
1355 E = E->IgnoreParenCasts();
1356 // Extend lifetimes of objects returned by function calls and message
1359 // FIXME: We should do this in other cases in which temporaries are
1360 // created including arguments of non-ARC types (e.g., C++
1362 if (isa<CallExpr>(E) || isa<ObjCMessageExpr>(E))
1367 if (TheExpr->getType()->isObjCRetainableType() &&
1368 getLangOpts().ObjCAutoRefCount && LifetimeExtendObject(TheExpr)) {
1369 assert(getEvaluationKind(TheExpr->getType()) == TEK_Scalar &&
1370 "Only scalar can be a ObjC retainable type");
1371 if (!isa<Constant>(ArgVal)) {
1372 CleanupKind Cleanup = getARCCleanupKind();
1373 QualType Ty = TheExpr->getType();
1374 Address Alloca = Address::invalid();
1375 Address Addr = CreateMemTemp(Ty, "os.log.arg", &Alloca);
1376 ArgVal = EmitARCRetain(Ty, ArgVal);
1377 Builder.CreateStore(ArgVal, Addr);
1378 pushLifetimeExtendedDestroy(Cleanup, Alloca, Ty,
1379 CodeGenFunction::destroyARCStrongPrecise,
1380 Cleanup & EHCleanup);
1382 // Push a clang.arc.use call to ensure ARC optimizer knows that the
1383 // argument has to be alive.
1384 if (CGM.getCodeGenOpts().OptimizationLevel != 0)
1385 pushCleanupAfterFullExpr<CallObjCArcUse>(Cleanup, ArgVal);
1389 ArgVal = Builder.getInt32(Item.getConstValue().getQuantity());
1392 unsigned ArgValSize =
1393 CGM.getDataLayout().getTypeSizeInBits(ArgVal->getType());
1394 llvm::IntegerType *IntTy = llvm::Type::getIntNTy(getLLVMContext(),
1396 ArgVal = Builder.CreateBitOrPointerCast(ArgVal, IntTy);
1397 CanQualType ArgTy = getOSLogArgType(Ctx, Size);
1398 // If ArgVal has type x86_fp80, zero-extend ArgVal.
1399 ArgVal = Builder.CreateZExtOrBitCast(ArgVal, ConvertType(ArgTy));
1400 Args.add(RValue::get(ArgVal), ArgTy);
1403 const CGFunctionInfo &FI =
1404 CGM.getTypes().arrangeBuiltinFunctionCall(Ctx.VoidTy, Args);
1405 llvm::Function *F = CodeGenFunction(CGM).generateBuiltinOSLogHelperFunction(
1406 Layout, BufAddr.getAlignment());
1407 EmitCall(FI, CGCallee::forDirect(F), ReturnValueSlot(), Args);
1408 return RValue::get(BufAddr.getPointer());
1411 /// Determine if a binop is a checked mixed-sign multiply we can specialize.
1412 static bool isSpecialMixedSignMultiply(unsigned BuiltinID,
1413 WidthAndSignedness Op1Info,
1414 WidthAndSignedness Op2Info,
1415 WidthAndSignedness ResultInfo) {
1416 return BuiltinID == Builtin::BI__builtin_mul_overflow &&
1417 std::max(Op1Info.Width, Op2Info.Width) >= ResultInfo.Width &&
1418 Op1Info.Signed != Op2Info.Signed;
1421 /// Emit a checked mixed-sign multiply. This is a cheaper specialization of
1422 /// the generic checked-binop irgen.
1424 EmitCheckedMixedSignMultiply(CodeGenFunction &CGF, const clang::Expr *Op1,
1425 WidthAndSignedness Op1Info, const clang::Expr *Op2,
1426 WidthAndSignedness Op2Info,
1427 const clang::Expr *ResultArg, QualType ResultQTy,
1428 WidthAndSignedness ResultInfo) {
1429 assert(isSpecialMixedSignMultiply(Builtin::BI__builtin_mul_overflow, Op1Info,
1430 Op2Info, ResultInfo) &&
1431 "Not a mixed-sign multipliction we can specialize");
1433 // Emit the signed and unsigned operands.
1434 const clang::Expr *SignedOp = Op1Info.Signed ? Op1 : Op2;
1435 const clang::Expr *UnsignedOp = Op1Info.Signed ? Op2 : Op1;
1436 llvm::Value *Signed = CGF.EmitScalarExpr(SignedOp);
1437 llvm::Value *Unsigned = CGF.EmitScalarExpr(UnsignedOp);
1438 unsigned SignedOpWidth = Op1Info.Signed ? Op1Info.Width : Op2Info.Width;
1439 unsigned UnsignedOpWidth = Op1Info.Signed ? Op2Info.Width : Op1Info.Width;
1441 // One of the operands may be smaller than the other. If so, [s|z]ext it.
1442 if (SignedOpWidth < UnsignedOpWidth)
1443 Signed = CGF.Builder.CreateSExt(Signed, Unsigned->getType(), "op.sext");
1444 if (UnsignedOpWidth < SignedOpWidth)
1445 Unsigned = CGF.Builder.CreateZExt(Unsigned, Signed->getType(), "op.zext");
1447 llvm::Type *OpTy = Signed->getType();
1448 llvm::Value *Zero = llvm::Constant::getNullValue(OpTy);
1449 Address ResultPtr = CGF.EmitPointerWithAlignment(ResultArg);
1450 llvm::Type *ResTy = ResultPtr.getElementType();
1451 unsigned OpWidth = std::max(Op1Info.Width, Op2Info.Width);
1453 // Take the absolute value of the signed operand.
1454 llvm::Value *IsNegative = CGF.Builder.CreateICmpSLT(Signed, Zero);
1455 llvm::Value *AbsOfNegative = CGF.Builder.CreateSub(Zero, Signed);
1456 llvm::Value *AbsSigned =
1457 CGF.Builder.CreateSelect(IsNegative, AbsOfNegative, Signed);
1459 // Perform a checked unsigned multiplication.
1460 llvm::Value *UnsignedOverflow;
1461 llvm::Value *UnsignedResult =
1462 EmitOverflowIntrinsic(CGF, llvm::Intrinsic::umul_with_overflow, AbsSigned,
1463 Unsigned, UnsignedOverflow);
1465 llvm::Value *Overflow, *Result;
1466 if (ResultInfo.Signed) {
1467 // Signed overflow occurs if the result is greater than INT_MAX or lesser
1468 // than INT_MIN, i.e when |Result| > (INT_MAX + IsNegative).
1470 llvm::APInt::getSignedMaxValue(ResultInfo.Width).zextOrSelf(OpWidth);
1471 llvm::Value *MaxResult =
1472 CGF.Builder.CreateAdd(llvm::ConstantInt::get(OpTy, IntMax),
1473 CGF.Builder.CreateZExt(IsNegative, OpTy));
1474 llvm::Value *SignedOverflow =
1475 CGF.Builder.CreateICmpUGT(UnsignedResult, MaxResult);
1476 Overflow = CGF.Builder.CreateOr(UnsignedOverflow, SignedOverflow);
1478 // Prepare the signed result (possibly by negating it).
1479 llvm::Value *NegativeResult = CGF.Builder.CreateNeg(UnsignedResult);
1480 llvm::Value *SignedResult =
1481 CGF.Builder.CreateSelect(IsNegative, NegativeResult, UnsignedResult);
1482 Result = CGF.Builder.CreateTrunc(SignedResult, ResTy);
1484 // Unsigned overflow occurs if the result is < 0 or greater than UINT_MAX.
1485 llvm::Value *Underflow = CGF.Builder.CreateAnd(
1486 IsNegative, CGF.Builder.CreateIsNotNull(UnsignedResult));
1487 Overflow = CGF.Builder.CreateOr(UnsignedOverflow, Underflow);
1488 if (ResultInfo.Width < OpWidth) {
1490 llvm::APInt::getMaxValue(ResultInfo.Width).zext(OpWidth);
1491 llvm::Value *TruncOverflow = CGF.Builder.CreateICmpUGT(
1492 UnsignedResult, llvm::ConstantInt::get(OpTy, IntMax));
1493 Overflow = CGF.Builder.CreateOr(Overflow, TruncOverflow);
1496 // Negate the product if it would be negative in infinite precision.
1497 Result = CGF.Builder.CreateSelect(
1498 IsNegative, CGF.Builder.CreateNeg(UnsignedResult), UnsignedResult);
1500 Result = CGF.Builder.CreateTrunc(Result, ResTy);
1502 assert(Overflow && Result && "Missing overflow or result");
1505 ResultArg->getType()->getPointeeType().isVolatileQualified();
1506 CGF.Builder.CreateStore(CGF.EmitToMemory(Result, ResultQTy), ResultPtr,
1508 return RValue::get(Overflow);
1511 static llvm::Value *dumpRecord(CodeGenFunction &CGF, QualType RType,
1512 Value *&RecordPtr, CharUnits Align,
1513 llvm::FunctionCallee Func, int Lvl) {
1514 ASTContext &Context = CGF.getContext();
1515 RecordDecl *RD = RType->castAs<RecordType>()->getDecl()->getDefinition();
1516 std::string Pad = std::string(Lvl * 4, ' ');
1519 CGF.Builder.CreateGlobalStringPtr(RType.getAsString() + " {\n");
1520 Value *Res = CGF.Builder.CreateCall(Func, {GString});
1522 static llvm::DenseMap<QualType, const char *> Types;
1523 if (Types.empty()) {
1524 Types[Context.CharTy] = "%c";
1525 Types[Context.BoolTy] = "%d";
1526 Types[Context.SignedCharTy] = "%hhd";
1527 Types[Context.UnsignedCharTy] = "%hhu";
1528 Types[Context.IntTy] = "%d";
1529 Types[Context.UnsignedIntTy] = "%u";
1530 Types[Context.LongTy] = "%ld";
1531 Types[Context.UnsignedLongTy] = "%lu";
1532 Types[Context.LongLongTy] = "%lld";
1533 Types[Context.UnsignedLongLongTy] = "%llu";
1534 Types[Context.ShortTy] = "%hd";
1535 Types[Context.UnsignedShortTy] = "%hu";
1536 Types[Context.VoidPtrTy] = "%p";
1537 Types[Context.FloatTy] = "%f";
1538 Types[Context.DoubleTy] = "%f";
1539 Types[Context.LongDoubleTy] = "%Lf";
1540 Types[Context.getPointerType(Context.CharTy)] = "%s";
1541 Types[Context.getPointerType(Context.getConstType(Context.CharTy))] = "%s";
1544 for (const auto *FD : RD->fields()) {
1545 Value *FieldPtr = RecordPtr;
1547 FieldPtr = CGF.Builder.CreatePointerCast(
1548 FieldPtr, CGF.ConvertType(Context.getPointerType(FD->getType())));
1550 FieldPtr = CGF.Builder.CreateStructGEP(CGF.ConvertType(RType), FieldPtr,
1551 FD->getFieldIndex());
1553 GString = CGF.Builder.CreateGlobalStringPtr(
1555 .concat(FD->getType().getAsString())
1556 .concat(llvm::Twine(' '))
1557 .concat(FD->getNameAsString())
1560 Value *TmpRes = CGF.Builder.CreateCall(Func, {GString});
1561 Res = CGF.Builder.CreateAdd(Res, TmpRes);
1563 QualType CanonicalType =
1564 FD->getType().getUnqualifiedType().getCanonicalType();
1566 // We check whether we are in a recursive type
1567 if (CanonicalType->isRecordType()) {
1568 TmpRes = dumpRecord(CGF, CanonicalType, FieldPtr, Align, Func, Lvl + 1);
1569 Res = CGF.Builder.CreateAdd(TmpRes, Res);
1573 // We try to determine the best format to print the current field
1574 llvm::Twine Format = Types.find(CanonicalType) == Types.end()
1575 ? Types[Context.VoidPtrTy]
1576 : Types[CanonicalType];
1578 Address FieldAddress = Address(FieldPtr, Align);
1579 FieldPtr = CGF.Builder.CreateLoad(FieldAddress);
1581 // FIXME Need to handle bitfield here
1582 GString = CGF.Builder.CreateGlobalStringPtr(
1583 Format.concat(llvm::Twine('\n')).str());
1584 TmpRes = CGF.Builder.CreateCall(Func, {GString, FieldPtr});
1585 Res = CGF.Builder.CreateAdd(Res, TmpRes);
1588 GString = CGF.Builder.CreateGlobalStringPtr(Pad + "}\n");
1589 Value *TmpRes = CGF.Builder.CreateCall(Func, {GString});
1590 Res = CGF.Builder.CreateAdd(Res, TmpRes);
1595 TypeRequiresBuiltinLaunderImp(const ASTContext &Ctx, QualType Ty,
1596 llvm::SmallPtrSetImpl<const Decl *> &Seen) {
1597 if (const auto *Arr = Ctx.getAsArrayType(Ty))
1598 Ty = Ctx.getBaseElementType(Arr);
1600 const auto *Record = Ty->getAsCXXRecordDecl();
1604 // We've already checked this type, or are in the process of checking it.
1605 if (!Seen.insert(Record).second)
1608 assert(Record->hasDefinition() &&
1609 "Incomplete types should already be diagnosed");
1611 if (Record->isDynamicClass())
1614 for (FieldDecl *F : Record->fields()) {
1615 if (TypeRequiresBuiltinLaunderImp(Ctx, F->getType(), Seen))
1621 /// Determine if the specified type requires laundering by checking if it is a
1622 /// dynamic class type or contains a subobject which is a dynamic class type.
1623 static bool TypeRequiresBuiltinLaunder(CodeGenModule &CGM, QualType Ty) {
1624 if (!CGM.getCodeGenOpts().StrictVTablePointers)
1626 llvm::SmallPtrSet<const Decl *, 16> Seen;
1627 return TypeRequiresBuiltinLaunderImp(CGM.getContext(), Ty, Seen);
1630 RValue CodeGenFunction::emitRotate(const CallExpr *E, bool IsRotateRight) {
1631 llvm::Value *Src = EmitScalarExpr(E->getArg(0));
1632 llvm::Value *ShiftAmt = EmitScalarExpr(E->getArg(1));
1634 // The builtin's shift arg may have a different type than the source arg and
1635 // result, but the LLVM intrinsic uses the same type for all values.
1636 llvm::Type *Ty = Src->getType();
1637 ShiftAmt = Builder.CreateIntCast(ShiftAmt, Ty, false);
1639 // Rotate is a special case of LLVM funnel shift - 1st 2 args are the same.
1640 unsigned IID = IsRotateRight ? Intrinsic::fshr : Intrinsic::fshl;
1641 Function *F = CGM.getIntrinsic(IID, Ty);
1642 return RValue::get(Builder.CreateCall(F, { Src, Src, ShiftAmt }));
1645 RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
1647 ReturnValueSlot ReturnValue) {
1648 const FunctionDecl *FD = GD.getDecl()->getAsFunction();
1649 // See if we can constant fold this builtin. If so, don't emit it at all.
1650 Expr::EvalResult Result;
1651 if (E->EvaluateAsRValue(Result, CGM.getContext()) &&
1652 !Result.hasSideEffects()) {
1653 if (Result.Val.isInt())
1654 return RValue::get(llvm::ConstantInt::get(getLLVMContext(),
1655 Result.Val.getInt()));
1656 if (Result.Val.isFloat())
1657 return RValue::get(llvm::ConstantFP::get(getLLVMContext(),
1658 Result.Val.getFloat()));
1661 // There are LLVM math intrinsics/instructions corresponding to math library
1662 // functions except the LLVM op will never set errno while the math library
1663 // might. Also, math builtins have the same semantics as their math library
1664 // twins. Thus, we can transform math library and builtin calls to their
1665 // LLVM counterparts if the call is marked 'const' (known to never set errno).
1666 if (FD->hasAttr<ConstAttr>()) {
1667 switch (BuiltinID) {
1668 case Builtin::BIceil:
1669 case Builtin::BIceilf:
1670 case Builtin::BIceill:
1671 case Builtin::BI__builtin_ceil:
1672 case Builtin::BI__builtin_ceilf:
1673 case Builtin::BI__builtin_ceilf16:
1674 case Builtin::BI__builtin_ceill:
1675 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
1677 Intrinsic::experimental_constrained_ceil));
1679 case Builtin::BIcopysign:
1680 case Builtin::BIcopysignf:
1681 case Builtin::BIcopysignl:
1682 case Builtin::BI__builtin_copysign:
1683 case Builtin::BI__builtin_copysignf:
1684 case Builtin::BI__builtin_copysignf16:
1685 case Builtin::BI__builtin_copysignl:
1686 case Builtin::BI__builtin_copysignf128:
1687 return RValue::get(emitBinaryBuiltin(*this, E, Intrinsic::copysign));
1689 case Builtin::BIcos:
1690 case Builtin::BIcosf:
1691 case Builtin::BIcosl:
1692 case Builtin::BI__builtin_cos:
1693 case Builtin::BI__builtin_cosf:
1694 case Builtin::BI__builtin_cosf16:
1695 case Builtin::BI__builtin_cosl:
1696 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
1698 Intrinsic::experimental_constrained_cos));
1700 case Builtin::BIexp:
1701 case Builtin::BIexpf:
1702 case Builtin::BIexpl:
1703 case Builtin::BI__builtin_exp:
1704 case Builtin::BI__builtin_expf:
1705 case Builtin::BI__builtin_expf16:
1706 case Builtin::BI__builtin_expl:
1707 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
1709 Intrinsic::experimental_constrained_exp));
1711 case Builtin::BIexp2:
1712 case Builtin::BIexp2f:
1713 case Builtin::BIexp2l:
1714 case Builtin::BI__builtin_exp2:
1715 case Builtin::BI__builtin_exp2f:
1716 case Builtin::BI__builtin_exp2f16:
1717 case Builtin::BI__builtin_exp2l:
1718 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
1720 Intrinsic::experimental_constrained_exp2));
1722 case Builtin::BIfabs:
1723 case Builtin::BIfabsf:
1724 case Builtin::BIfabsl:
1725 case Builtin::BI__builtin_fabs:
1726 case Builtin::BI__builtin_fabsf:
1727 case Builtin::BI__builtin_fabsf16:
1728 case Builtin::BI__builtin_fabsl:
1729 case Builtin::BI__builtin_fabsf128:
1730 return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::fabs));
1732 case Builtin::BIfloor:
1733 case Builtin::BIfloorf:
1734 case Builtin::BIfloorl:
1735 case Builtin::BI__builtin_floor:
1736 case Builtin::BI__builtin_floorf:
1737 case Builtin::BI__builtin_floorf16:
1738 case Builtin::BI__builtin_floorl:
1739 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
1741 Intrinsic::experimental_constrained_floor));
1743 case Builtin::BIfma:
1744 case Builtin::BIfmaf:
1745 case Builtin::BIfmal:
1746 case Builtin::BI__builtin_fma:
1747 case Builtin::BI__builtin_fmaf:
1748 case Builtin::BI__builtin_fmaf16:
1749 case Builtin::BI__builtin_fmal:
1750 return RValue::get(emitTernaryMaybeConstrainedFPBuiltin(*this, E,
1752 Intrinsic::experimental_constrained_fma));
1754 case Builtin::BIfmax:
1755 case Builtin::BIfmaxf:
1756 case Builtin::BIfmaxl:
1757 case Builtin::BI__builtin_fmax:
1758 case Builtin::BI__builtin_fmaxf:
1759 case Builtin::BI__builtin_fmaxf16:
1760 case Builtin::BI__builtin_fmaxl:
1761 return RValue::get(emitBinaryMaybeConstrainedFPBuiltin(*this, E,
1763 Intrinsic::experimental_constrained_maxnum));
1765 case Builtin::BIfmin:
1766 case Builtin::BIfminf:
1767 case Builtin::BIfminl:
1768 case Builtin::BI__builtin_fmin:
1769 case Builtin::BI__builtin_fminf:
1770 case Builtin::BI__builtin_fminf16:
1771 case Builtin::BI__builtin_fminl:
1772 return RValue::get(emitBinaryMaybeConstrainedFPBuiltin(*this, E,
1774 Intrinsic::experimental_constrained_minnum));
1776 // fmod() is a special-case. It maps to the frem instruction rather than an
1778 case Builtin::BIfmod:
1779 case Builtin::BIfmodf:
1780 case Builtin::BIfmodl:
1781 case Builtin::BI__builtin_fmod:
1782 case Builtin::BI__builtin_fmodf:
1783 case Builtin::BI__builtin_fmodf16:
1784 case Builtin::BI__builtin_fmodl: {
1785 Value *Arg1 = EmitScalarExpr(E->getArg(0));
1786 Value *Arg2 = EmitScalarExpr(E->getArg(1));
1787 return RValue::get(Builder.CreateFRem(Arg1, Arg2, "fmod"));
1790 case Builtin::BIlog:
1791 case Builtin::BIlogf:
1792 case Builtin::BIlogl:
1793 case Builtin::BI__builtin_log:
1794 case Builtin::BI__builtin_logf:
1795 case Builtin::BI__builtin_logf16:
1796 case Builtin::BI__builtin_logl:
1797 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
1799 Intrinsic::experimental_constrained_log));
1801 case Builtin::BIlog10:
1802 case Builtin::BIlog10f:
1803 case Builtin::BIlog10l:
1804 case Builtin::BI__builtin_log10:
1805 case Builtin::BI__builtin_log10f:
1806 case Builtin::BI__builtin_log10f16:
1807 case Builtin::BI__builtin_log10l:
1808 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
1810 Intrinsic::experimental_constrained_log10));
1812 case Builtin::BIlog2:
1813 case Builtin::BIlog2f:
1814 case Builtin::BIlog2l:
1815 case Builtin::BI__builtin_log2:
1816 case Builtin::BI__builtin_log2f:
1817 case Builtin::BI__builtin_log2f16:
1818 case Builtin::BI__builtin_log2l:
1819 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
1821 Intrinsic::experimental_constrained_log2));
1823 case Builtin::BInearbyint:
1824 case Builtin::BInearbyintf:
1825 case Builtin::BInearbyintl:
1826 case Builtin::BI__builtin_nearbyint:
1827 case Builtin::BI__builtin_nearbyintf:
1828 case Builtin::BI__builtin_nearbyintl:
1829 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
1830 Intrinsic::nearbyint,
1831 Intrinsic::experimental_constrained_nearbyint));
1833 case Builtin::BIpow:
1834 case Builtin::BIpowf:
1835 case Builtin::BIpowl:
1836 case Builtin::BI__builtin_pow:
1837 case Builtin::BI__builtin_powf:
1838 case Builtin::BI__builtin_powf16:
1839 case Builtin::BI__builtin_powl:
1840 return RValue::get(emitBinaryMaybeConstrainedFPBuiltin(*this, E,
1842 Intrinsic::experimental_constrained_pow));
1844 case Builtin::BIrint:
1845 case Builtin::BIrintf:
1846 case Builtin::BIrintl:
1847 case Builtin::BI__builtin_rint:
1848 case Builtin::BI__builtin_rintf:
1849 case Builtin::BI__builtin_rintf16:
1850 case Builtin::BI__builtin_rintl:
1851 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
1853 Intrinsic::experimental_constrained_rint));
1855 case Builtin::BIround:
1856 case Builtin::BIroundf:
1857 case Builtin::BIroundl:
1858 case Builtin::BI__builtin_round:
1859 case Builtin::BI__builtin_roundf:
1860 case Builtin::BI__builtin_roundf16:
1861 case Builtin::BI__builtin_roundl:
1862 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
1864 Intrinsic::experimental_constrained_round));
1866 case Builtin::BIsin:
1867 case Builtin::BIsinf:
1868 case Builtin::BIsinl:
1869 case Builtin::BI__builtin_sin:
1870 case Builtin::BI__builtin_sinf:
1871 case Builtin::BI__builtin_sinf16:
1872 case Builtin::BI__builtin_sinl:
1873 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
1875 Intrinsic::experimental_constrained_sin));
1877 case Builtin::BIsqrt:
1878 case Builtin::BIsqrtf:
1879 case Builtin::BIsqrtl:
1880 case Builtin::BI__builtin_sqrt:
1881 case Builtin::BI__builtin_sqrtf:
1882 case Builtin::BI__builtin_sqrtf16:
1883 case Builtin::BI__builtin_sqrtl:
1884 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
1886 Intrinsic::experimental_constrained_sqrt));
1888 case Builtin::BItrunc:
1889 case Builtin::BItruncf:
1890 case Builtin::BItruncl:
1891 case Builtin::BI__builtin_trunc:
1892 case Builtin::BI__builtin_truncf:
1893 case Builtin::BI__builtin_truncf16:
1894 case Builtin::BI__builtin_truncl:
1895 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
1897 Intrinsic::experimental_constrained_trunc));
1899 case Builtin::BIlround:
1900 case Builtin::BIlroundf:
1901 case Builtin::BIlroundl:
1902 case Builtin::BI__builtin_lround:
1903 case Builtin::BI__builtin_lroundf:
1904 case Builtin::BI__builtin_lroundl:
1905 return RValue::get(emitMaybeConstrainedFPToIntRoundBuiltin(
1906 *this, E, Intrinsic::lround,
1907 Intrinsic::experimental_constrained_lround));
1909 case Builtin::BIllround:
1910 case Builtin::BIllroundf:
1911 case Builtin::BIllroundl:
1912 case Builtin::BI__builtin_llround:
1913 case Builtin::BI__builtin_llroundf:
1914 case Builtin::BI__builtin_llroundl:
1915 return RValue::get(emitMaybeConstrainedFPToIntRoundBuiltin(
1916 *this, E, Intrinsic::llround,
1917 Intrinsic::experimental_constrained_llround));
1919 case Builtin::BIlrint:
1920 case Builtin::BIlrintf:
1921 case Builtin::BIlrintl:
1922 case Builtin::BI__builtin_lrint:
1923 case Builtin::BI__builtin_lrintf:
1924 case Builtin::BI__builtin_lrintl:
1925 return RValue::get(emitMaybeConstrainedFPToIntRoundBuiltin(
1926 *this, E, Intrinsic::lrint,
1927 Intrinsic::experimental_constrained_lrint));
1929 case Builtin::BIllrint:
1930 case Builtin::BIllrintf:
1931 case Builtin::BIllrintl:
1932 case Builtin::BI__builtin_llrint:
1933 case Builtin::BI__builtin_llrintf:
1934 case Builtin::BI__builtin_llrintl:
1935 return RValue::get(emitMaybeConstrainedFPToIntRoundBuiltin(
1936 *this, E, Intrinsic::llrint,
1937 Intrinsic::experimental_constrained_llrint));
1944 switch (BuiltinID) {
1946 case Builtin::BI__builtin___CFStringMakeConstantString:
1947 case Builtin::BI__builtin___NSStringMakeConstantString:
1948 return RValue::get(ConstantEmitter(*this).emitAbstract(E, E->getType()));
1949 case Builtin::BI__builtin_stdarg_start:
1950 case Builtin::BI__builtin_va_start:
1951 case Builtin::BI__va_start:
1952 case Builtin::BI__builtin_va_end:
1954 EmitVAStartEnd(BuiltinID == Builtin::BI__va_start
1955 ? EmitScalarExpr(E->getArg(0))
1956 : EmitVAListRef(E->getArg(0)).getPointer(),
1957 BuiltinID != Builtin::BI__builtin_va_end));
1958 case Builtin::BI__builtin_va_copy: {
1959 Value *DstPtr = EmitVAListRef(E->getArg(0)).getPointer();
1960 Value *SrcPtr = EmitVAListRef(E->getArg(1)).getPointer();
1962 llvm::Type *Type = Int8PtrTy;
1964 DstPtr = Builder.CreateBitCast(DstPtr, Type);
1965 SrcPtr = Builder.CreateBitCast(SrcPtr, Type);
1966 return RValue::get(Builder.CreateCall(CGM.getIntrinsic(Intrinsic::vacopy),
1969 case Builtin::BI__builtin_abs:
1970 case Builtin::BI__builtin_labs:
1971 case Builtin::BI__builtin_llabs: {
1973 // The negation has 'nsw' because abs of INT_MIN is undefined.
1974 Value *ArgValue = EmitScalarExpr(E->getArg(0));
1975 Value *NegOp = Builder.CreateNSWNeg(ArgValue, "neg");
1976 Constant *Zero = llvm::Constant::getNullValue(ArgValue->getType());
1977 Value *CmpResult = Builder.CreateICmpSLT(ArgValue, Zero, "abscond");
1978 Value *Result = Builder.CreateSelect(CmpResult, NegOp, ArgValue, "abs");
1979 return RValue::get(Result);
1981 case Builtin::BI__builtin_conj:
1982 case Builtin::BI__builtin_conjf:
1983 case Builtin::BI__builtin_conjl:
1984 case Builtin::BIconj:
1985 case Builtin::BIconjf:
1986 case Builtin::BIconjl: {
1987 ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0));
1988 Value *Real = ComplexVal.first;
1989 Value *Imag = ComplexVal.second;
1990 Imag = Builder.CreateFNeg(Imag, "neg");
1991 return RValue::getComplex(std::make_pair(Real, Imag));
1993 case Builtin::BI__builtin_creal:
1994 case Builtin::BI__builtin_crealf:
1995 case Builtin::BI__builtin_creall:
1996 case Builtin::BIcreal:
1997 case Builtin::BIcrealf:
1998 case Builtin::BIcreall: {
1999 ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0));
2000 return RValue::get(ComplexVal.first);
2003 case Builtin::BI__builtin_dump_struct: {
2004 llvm::Type *LLVMIntTy = getTypes().ConvertType(getContext().IntTy);
2005 llvm::FunctionType *LLVMFuncType = llvm::FunctionType::get(
2006 LLVMIntTy, {llvm::Type::getInt8PtrTy(getLLVMContext())}, true);
2008 Value *Func = EmitScalarExpr(E->getArg(1)->IgnoreImpCasts());
2009 CharUnits Arg0Align = EmitPointerWithAlignment(E->getArg(0)).getAlignment();
2011 const Expr *Arg0 = E->getArg(0)->IgnoreImpCasts();
2012 QualType Arg0Type = Arg0->getType()->getPointeeType();
2014 Value *RecordPtr = EmitScalarExpr(Arg0);
2015 Value *Res = dumpRecord(*this, Arg0Type, RecordPtr, Arg0Align,
2016 {LLVMFuncType, Func}, 0);
2017 return RValue::get(Res);
2020 case Builtin::BI__builtin_preserve_access_index: {
2021 // Only enabled preserved access index region when debuginfo
2022 // is available as debuginfo is needed to preserve user-level
2024 if (!getDebugInfo()) {
2025 CGM.Error(E->getExprLoc(), "using builtin_preserve_access_index() without -g");
2026 return RValue::get(EmitScalarExpr(E->getArg(0)));
2029 // Nested builtin_preserve_access_index() not supported
2030 if (IsInPreservedAIRegion) {
2031 CGM.Error(E->getExprLoc(), "nested builtin_preserve_access_index() not supported");
2032 return RValue::get(EmitScalarExpr(E->getArg(0)));
2035 IsInPreservedAIRegion = true;
2036 Value *Res = EmitScalarExpr(E->getArg(0));
2037 IsInPreservedAIRegion = false;
2038 return RValue::get(Res);
2041 case Builtin::BI__builtin_cimag:
2042 case Builtin::BI__builtin_cimagf:
2043 case Builtin::BI__builtin_cimagl:
2044 case Builtin::BIcimag:
2045 case Builtin::BIcimagf:
2046 case Builtin::BIcimagl: {
2047 ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0));
2048 return RValue::get(ComplexVal.second);
2051 case Builtin::BI__builtin_clrsb:
2052 case Builtin::BI__builtin_clrsbl:
2053 case Builtin::BI__builtin_clrsbll: {
2054 // clrsb(x) -> clz(x < 0 ? ~x : x) - 1 or
2055 Value *ArgValue = EmitScalarExpr(E->getArg(0));
2057 llvm::Type *ArgType = ArgValue->getType();
2058 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType);
2060 llvm::Type *ResultType = ConvertType(E->getType());
2061 Value *Zero = llvm::Constant::getNullValue(ArgType);
2062 Value *IsNeg = Builder.CreateICmpSLT(ArgValue, Zero, "isneg");
2063 Value *Inverse = Builder.CreateNot(ArgValue, "not");
2064 Value *Tmp = Builder.CreateSelect(IsNeg, Inverse, ArgValue);
2065 Value *Ctlz = Builder.CreateCall(F, {Tmp, Builder.getFalse()});
2066 Value *Result = Builder.CreateSub(Ctlz, llvm::ConstantInt::get(ArgType, 1));
2067 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
2069 return RValue::get(Result);
2071 case Builtin::BI__builtin_ctzs:
2072 case Builtin::BI__builtin_ctz:
2073 case Builtin::BI__builtin_ctzl:
2074 case Builtin::BI__builtin_ctzll: {
2075 Value *ArgValue = EmitCheckedArgForBuiltin(E->getArg(0), BCK_CTZPassedZero);
2077 llvm::Type *ArgType = ArgValue->getType();
2078 Function *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType);
2080 llvm::Type *ResultType = ConvertType(E->getType());
2081 Value *ZeroUndef = Builder.getInt1(getTarget().isCLZForZeroUndef());
2082 Value *Result = Builder.CreateCall(F, {ArgValue, ZeroUndef});
2083 if (Result->getType() != ResultType)
2084 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
2086 return RValue::get(Result);
2088 case Builtin::BI__builtin_clzs:
2089 case Builtin::BI__builtin_clz:
2090 case Builtin::BI__builtin_clzl:
2091 case Builtin::BI__builtin_clzll: {
2092 Value *ArgValue = EmitCheckedArgForBuiltin(E->getArg(0), BCK_CLZPassedZero);
2094 llvm::Type *ArgType = ArgValue->getType();
2095 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType);
2097 llvm::Type *ResultType = ConvertType(E->getType());
2098 Value *ZeroUndef = Builder.getInt1(getTarget().isCLZForZeroUndef());
2099 Value *Result = Builder.CreateCall(F, {ArgValue, ZeroUndef});
2100 if (Result->getType() != ResultType)
2101 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
2103 return RValue::get(Result);
2105 case Builtin::BI__builtin_ffs:
2106 case Builtin::BI__builtin_ffsl:
2107 case Builtin::BI__builtin_ffsll: {
2108 // ffs(x) -> x ? cttz(x) + 1 : 0
2109 Value *ArgValue = EmitScalarExpr(E->getArg(0));
2111 llvm::Type *ArgType = ArgValue->getType();
2112 Function *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType);
2114 llvm::Type *ResultType = ConvertType(E->getType());
2116 Builder.CreateAdd(Builder.CreateCall(F, {ArgValue, Builder.getTrue()}),
2117 llvm::ConstantInt::get(ArgType, 1));
2118 Value *Zero = llvm::Constant::getNullValue(ArgType);
2119 Value *IsZero = Builder.CreateICmpEQ(ArgValue, Zero, "iszero");
2120 Value *Result = Builder.CreateSelect(IsZero, Zero, Tmp, "ffs");
2121 if (Result->getType() != ResultType)
2122 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
2124 return RValue::get(Result);
2126 case Builtin::BI__builtin_parity:
2127 case Builtin::BI__builtin_parityl:
2128 case Builtin::BI__builtin_parityll: {
2129 // parity(x) -> ctpop(x) & 1
2130 Value *ArgValue = EmitScalarExpr(E->getArg(0));
2132 llvm::Type *ArgType = ArgValue->getType();
2133 Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ArgType);
2135 llvm::Type *ResultType = ConvertType(E->getType());
2136 Value *Tmp = Builder.CreateCall(F, ArgValue);
2137 Value *Result = Builder.CreateAnd(Tmp, llvm::ConstantInt::get(ArgType, 1));
2138 if (Result->getType() != ResultType)
2139 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
2141 return RValue::get(Result);
2143 case Builtin::BI__lzcnt16:
2144 case Builtin::BI__lzcnt:
2145 case Builtin::BI__lzcnt64: {
2146 Value *ArgValue = EmitScalarExpr(E->getArg(0));
2148 llvm::Type *ArgType = ArgValue->getType();
2149 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType);
2151 llvm::Type *ResultType = ConvertType(E->getType());
2152 Value *Result = Builder.CreateCall(F, {ArgValue, Builder.getFalse()});
2153 if (Result->getType() != ResultType)
2154 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
2156 return RValue::get(Result);
2158 case Builtin::BI__popcnt16:
2159 case Builtin::BI__popcnt:
2160 case Builtin::BI__popcnt64:
2161 case Builtin::BI__builtin_popcount:
2162 case Builtin::BI__builtin_popcountl:
2163 case Builtin::BI__builtin_popcountll: {
2164 Value *ArgValue = EmitScalarExpr(E->getArg(0));
2166 llvm::Type *ArgType = ArgValue->getType();
2167 Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ArgType);
2169 llvm::Type *ResultType = ConvertType(E->getType());
2170 Value *Result = Builder.CreateCall(F, ArgValue);
2171 if (Result->getType() != ResultType)
2172 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
2174 return RValue::get(Result);
2176 case Builtin::BI__builtin_unpredictable: {
2177 // Always return the argument of __builtin_unpredictable. LLVM does not
2178 // handle this builtin. Metadata for this builtin should be added directly
2179 // to instructions such as branches or switches that use it.
2180 return RValue::get(EmitScalarExpr(E->getArg(0)));
2182 case Builtin::BI__builtin_expect: {
2183 Value *ArgValue = EmitScalarExpr(E->getArg(0));
2184 llvm::Type *ArgType = ArgValue->getType();
2186 Value *ExpectedValue = EmitScalarExpr(E->getArg(1));
2187 // Don't generate llvm.expect on -O0 as the backend won't use it for
2189 // Note, we still IRGen ExpectedValue because it could have side-effects.
2190 if (CGM.getCodeGenOpts().OptimizationLevel == 0)
2191 return RValue::get(ArgValue);
2193 Function *FnExpect = CGM.getIntrinsic(Intrinsic::expect, ArgType);
2195 Builder.CreateCall(FnExpect, {ArgValue, ExpectedValue}, "expval");
2196 return RValue::get(Result);
2198 case Builtin::BI__builtin_expect_with_probability: {
2199 Value *ArgValue = EmitScalarExpr(E->getArg(0));
2200 llvm::Type *ArgType = ArgValue->getType();
2202 Value *ExpectedValue = EmitScalarExpr(E->getArg(1));
2203 llvm::APFloat Probability(0.0);
2204 const Expr *ProbArg = E->getArg(2);
2205 bool EvalSucceed = ProbArg->EvaluateAsFloat(Probability, CGM.getContext());
2206 assert(EvalSucceed && "probability should be able to evaluate as float");
2208 bool LoseInfo = false;
2209 Probability.convert(llvm::APFloat::IEEEdouble(),
2210 llvm::RoundingMode::Dynamic, &LoseInfo);
2211 llvm::Type *Ty = ConvertType(ProbArg->getType());
2212 Constant *Confidence = ConstantFP::get(Ty, Probability);
2213 // Don't generate llvm.expect.with.probability on -O0 as the backend
2214 // won't use it for anything.
2215 // Note, we still IRGen ExpectedValue because it could have side-effects.
2216 if (CGM.getCodeGenOpts().OptimizationLevel == 0)
2217 return RValue::get(ArgValue);
2219 Function *FnExpect =
2220 CGM.getIntrinsic(Intrinsic::expect_with_probability, ArgType);
2221 Value *Result = Builder.CreateCall(
2222 FnExpect, {ArgValue, ExpectedValue, Confidence}, "expval");
2223 return RValue::get(Result);
2225 case Builtin::BI__builtin_assume_aligned: {
2226 const Expr *Ptr = E->getArg(0);
2227 Value *PtrValue = EmitScalarExpr(Ptr);
2228 Value *OffsetValue =
2229 (E->getNumArgs() > 2) ? EmitScalarExpr(E->getArg(2)) : nullptr;
2231 Value *AlignmentValue = EmitScalarExpr(E->getArg(1));
2232 ConstantInt *AlignmentCI = cast<ConstantInt>(AlignmentValue);
2233 if (AlignmentCI->getValue().ugt(llvm::Value::MaximumAlignment))
2234 AlignmentCI = ConstantInt::get(AlignmentCI->getType(),
2235 llvm::Value::MaximumAlignment);
2237 emitAlignmentAssumption(PtrValue, Ptr,
2238 /*The expr loc is sufficient.*/ SourceLocation(),
2239 AlignmentCI, OffsetValue);
2240 return RValue::get(PtrValue);
2242 case Builtin::BI__assume:
2243 case Builtin::BI__builtin_assume: {
2244 if (E->getArg(0)->HasSideEffects(getContext()))
2245 return RValue::get(nullptr);
2247 Value *ArgValue = EmitScalarExpr(E->getArg(0));
2248 Function *FnAssume = CGM.getIntrinsic(Intrinsic::assume);
2249 return RValue::get(Builder.CreateCall(FnAssume, ArgValue));
2251 case Builtin::BI__builtin_bswap16:
2252 case Builtin::BI__builtin_bswap32:
2253 case Builtin::BI__builtin_bswap64: {
2254 return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::bswap));
2256 case Builtin::BI__builtin_bitreverse8:
2257 case Builtin::BI__builtin_bitreverse16:
2258 case Builtin::BI__builtin_bitreverse32:
2259 case Builtin::BI__builtin_bitreverse64: {
2260 return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::bitreverse));
2262 case Builtin::BI__builtin_rotateleft8:
2263 case Builtin::BI__builtin_rotateleft16:
2264 case Builtin::BI__builtin_rotateleft32:
2265 case Builtin::BI__builtin_rotateleft64:
2266 case Builtin::BI_rotl8: // Microsoft variants of rotate left
2267 case Builtin::BI_rotl16:
2268 case Builtin::BI_rotl:
2269 case Builtin::BI_lrotl:
2270 case Builtin::BI_rotl64:
2271 return emitRotate(E, false);
2273 case Builtin::BI__builtin_rotateright8:
2274 case Builtin::BI__builtin_rotateright16:
2275 case Builtin::BI__builtin_rotateright32:
2276 case Builtin::BI__builtin_rotateright64:
2277 case Builtin::BI_rotr8: // Microsoft variants of rotate right
2278 case Builtin::BI_rotr16:
2279 case Builtin::BI_rotr:
2280 case Builtin::BI_lrotr:
2281 case Builtin::BI_rotr64:
2282 return emitRotate(E, true);
2284 case Builtin::BI__builtin_constant_p: {
2285 llvm::Type *ResultType = ConvertType(E->getType());
2287 const Expr *Arg = E->getArg(0);
2288 QualType ArgType = Arg->getType();
2289 // FIXME: The allowance for Obj-C pointers and block pointers is historical
2290 // and likely a mistake.
2291 if (!ArgType->isIntegralOrEnumerationType() && !ArgType->isFloatingType() &&
2292 !ArgType->isObjCObjectPointerType() && !ArgType->isBlockPointerType())
2293 // Per the GCC documentation, only numeric constants are recognized after
2295 return RValue::get(ConstantInt::get(ResultType, 0));
2297 if (Arg->HasSideEffects(getContext()))
2298 // The argument is unevaluated, so be conservative if it might have
2300 return RValue::get(ConstantInt::get(ResultType, 0));
2302 Value *ArgValue = EmitScalarExpr(Arg);
2303 if (ArgType->isObjCObjectPointerType()) {
2304 // Convert Objective-C objects to id because we cannot distinguish between
2305 // LLVM types for Obj-C classes as they are opaque.
2306 ArgType = CGM.getContext().getObjCIdType();
2307 ArgValue = Builder.CreateBitCast(ArgValue, ConvertType(ArgType));
2310 CGM.getIntrinsic(Intrinsic::is_constant, ConvertType(ArgType));
2311 Value *Result = Builder.CreateCall(F, ArgValue);
2312 if (Result->getType() != ResultType)
2313 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/false);
2314 return RValue::get(Result);
2316 case Builtin::BI__builtin_dynamic_object_size:
2317 case Builtin::BI__builtin_object_size: {
2319 E->getArg(1)->EvaluateKnownConstInt(getContext()).getZExtValue();
2320 auto *ResType = cast<llvm::IntegerType>(ConvertType(E->getType()));
2322 // We pass this builtin onto the optimizer so that it can figure out the
2323 // object size in more complex cases.
2324 bool IsDynamic = BuiltinID == Builtin::BI__builtin_dynamic_object_size;
2325 return RValue::get(emitBuiltinObjectSize(E->getArg(0), Type, ResType,
2326 /*EmittedE=*/nullptr, IsDynamic));
2328 case Builtin::BI__builtin_prefetch: {
2329 Value *Locality, *RW, *Address = EmitScalarExpr(E->getArg(0));
2330 // FIXME: Technically these constants should of type 'int', yes?
2331 RW = (E->getNumArgs() > 1) ? EmitScalarExpr(E->getArg(1)) :
2332 llvm::ConstantInt::get(Int32Ty, 0);
2333 Locality = (E->getNumArgs() > 2) ? EmitScalarExpr(E->getArg(2)) :
2334 llvm::ConstantInt::get(Int32Ty, 3);
2335 Value *Data = llvm::ConstantInt::get(Int32Ty, 1);
2336 Function *F = CGM.getIntrinsic(Intrinsic::prefetch, Address->getType());
2337 return RValue::get(Builder.CreateCall(F, {Address, RW, Locality, Data}));
2339 case Builtin::BI__builtin_readcyclecounter: {
2340 Function *F = CGM.getIntrinsic(Intrinsic::readcyclecounter);
2341 return RValue::get(Builder.CreateCall(F));
2343 case Builtin::BI__builtin___clear_cache: {
2344 Value *Begin = EmitScalarExpr(E->getArg(0));
2345 Value *End = EmitScalarExpr(E->getArg(1));
2346 Function *F = CGM.getIntrinsic(Intrinsic::clear_cache);
2347 return RValue::get(Builder.CreateCall(F, {Begin, End}));
2349 case Builtin::BI__builtin_trap:
2350 return RValue::get(EmitTrapCall(Intrinsic::trap));
2351 case Builtin::BI__debugbreak:
2352 return RValue::get(EmitTrapCall(Intrinsic::debugtrap));
2353 case Builtin::BI__builtin_unreachable: {
2354 EmitUnreachable(E->getExprLoc());
2356 // We do need to preserve an insertion point.
2357 EmitBlock(createBasicBlock("unreachable.cont"));
2359 return RValue::get(nullptr);
2362 case Builtin::BI__builtin_powi:
2363 case Builtin::BI__builtin_powif:
2364 case Builtin::BI__builtin_powil:
2365 return RValue::get(emitBinaryMaybeConstrainedFPBuiltin(
2366 *this, E, Intrinsic::powi, Intrinsic::experimental_constrained_powi));
2368 case Builtin::BI__builtin_isgreater:
2369 case Builtin::BI__builtin_isgreaterequal:
2370 case Builtin::BI__builtin_isless:
2371 case Builtin::BI__builtin_islessequal:
2372 case Builtin::BI__builtin_islessgreater:
2373 case Builtin::BI__builtin_isunordered: {
2374 // Ordered comparisons: we know the arguments to these are matching scalar
2375 // floating point values.
2376 Value *LHS = EmitScalarExpr(E->getArg(0));
2377 Value *RHS = EmitScalarExpr(E->getArg(1));
2379 switch (BuiltinID) {
2380 default: llvm_unreachable("Unknown ordered comparison");
2381 case Builtin::BI__builtin_isgreater:
2382 LHS = Builder.CreateFCmpOGT(LHS, RHS, "cmp");
2384 case Builtin::BI__builtin_isgreaterequal:
2385 LHS = Builder.CreateFCmpOGE(LHS, RHS, "cmp");
2387 case Builtin::BI__builtin_isless:
2388 LHS = Builder.CreateFCmpOLT(LHS, RHS, "cmp");
2390 case Builtin::BI__builtin_islessequal:
2391 LHS = Builder.CreateFCmpOLE(LHS, RHS, "cmp");
2393 case Builtin::BI__builtin_islessgreater:
2394 LHS = Builder.CreateFCmpONE(LHS, RHS, "cmp");
2396 case Builtin::BI__builtin_isunordered:
2397 LHS = Builder.CreateFCmpUNO(LHS, RHS, "cmp");
2400 // ZExt bool to int type.
2401 return RValue::get(Builder.CreateZExt(LHS, ConvertType(E->getType())));
2403 case Builtin::BI__builtin_isnan: {
2404 Value *V = EmitScalarExpr(E->getArg(0));
2405 V = Builder.CreateFCmpUNO(V, V, "cmp");
2406 return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType())));
2409 case Builtin::BI__builtin_matrix_transpose: {
2410 const auto *MatrixTy = E->getArg(0)->getType()->getAs<ConstantMatrixType>();
2411 Value *MatValue = EmitScalarExpr(E->getArg(0));
2412 MatrixBuilder<CGBuilderTy> MB(Builder);
2413 Value *Result = MB.CreateMatrixTranspose(MatValue, MatrixTy->getNumRows(),
2414 MatrixTy->getNumColumns());
2415 return RValue::get(Result);
2418 case Builtin::BI__builtin_matrix_column_major_load: {
2419 MatrixBuilder<CGBuilderTy> MB(Builder);
2420 // Emit everything that isn't dependent on the first parameter type
2421 Value *Stride = EmitScalarExpr(E->getArg(3));
2422 const auto *ResultTy = E->getType()->getAs<ConstantMatrixType>();
2423 auto *PtrTy = E->getArg(0)->getType()->getAs<PointerType>();
2424 assert(PtrTy && "arg0 must be of pointer type");
2425 bool IsVolatile = PtrTy->getPointeeType().isVolatileQualified();
2427 Address Src = EmitPointerWithAlignment(E->getArg(0));
2428 EmitNonNullArgCheck(RValue::get(Src.getPointer()), E->getArg(0)->getType(),
2429 E->getArg(0)->getExprLoc(), FD, 0);
2430 Value *Result = MB.CreateColumnMajorLoad(
2431 Src.getPointer(), Align(Src.getAlignment().getQuantity()), Stride,
2432 IsVolatile, ResultTy->getNumRows(), ResultTy->getNumColumns(),
2434 return RValue::get(Result);
2437 case Builtin::BI__builtin_matrix_column_major_store: {
2438 MatrixBuilder<CGBuilderTy> MB(Builder);
2439 Value *Matrix = EmitScalarExpr(E->getArg(0));
2440 Address Dst = EmitPointerWithAlignment(E->getArg(1));
2441 Value *Stride = EmitScalarExpr(E->getArg(2));
2443 const auto *MatrixTy = E->getArg(0)->getType()->getAs<ConstantMatrixType>();
2444 auto *PtrTy = E->getArg(1)->getType()->getAs<PointerType>();
2445 assert(PtrTy && "arg1 must be of pointer type");
2446 bool IsVolatile = PtrTy->getPointeeType().isVolatileQualified();
2448 EmitNonNullArgCheck(RValue::get(Dst.getPointer()), E->getArg(1)->getType(),
2449 E->getArg(1)->getExprLoc(), FD, 0);
2450 Value *Result = MB.CreateColumnMajorStore(
2451 Matrix, Dst.getPointer(), Align(Dst.getAlignment().getQuantity()),
2452 Stride, IsVolatile, MatrixTy->getNumRows(), MatrixTy->getNumColumns());
2453 return RValue::get(Result);
2456 case Builtin::BIfinite:
2457 case Builtin::BI__finite:
2458 case Builtin::BIfinitef:
2459 case Builtin::BI__finitef:
2460 case Builtin::BIfinitel:
2461 case Builtin::BI__finitel:
2462 case Builtin::BI__builtin_isinf:
2463 case Builtin::BI__builtin_isfinite: {
2464 // isinf(x) --> fabs(x) == infinity
2465 // isfinite(x) --> fabs(x) != infinity
2466 // x != NaN via the ordered compare in either case.
2467 Value *V = EmitScalarExpr(E->getArg(0));
2468 Value *Fabs = EmitFAbs(*this, V);
2469 Constant *Infinity = ConstantFP::getInfinity(V->getType());
2470 CmpInst::Predicate Pred = (BuiltinID == Builtin::BI__builtin_isinf)
2472 : CmpInst::FCMP_ONE;
2473 Value *FCmp = Builder.CreateFCmp(Pred, Fabs, Infinity, "cmpinf");
2474 return RValue::get(Builder.CreateZExt(FCmp, ConvertType(E->getType())));
2477 case Builtin::BI__builtin_isinf_sign: {
2478 // isinf_sign(x) -> fabs(x) == infinity ? (signbit(x) ? -1 : 1) : 0
2479 Value *Arg = EmitScalarExpr(E->getArg(0));
2480 Value *AbsArg = EmitFAbs(*this, Arg);
2481 Value *IsInf = Builder.CreateFCmpOEQ(
2482 AbsArg, ConstantFP::getInfinity(Arg->getType()), "isinf");
2483 Value *IsNeg = EmitSignBit(*this, Arg);
2485 llvm::Type *IntTy = ConvertType(E->getType());
2486 Value *Zero = Constant::getNullValue(IntTy);
2487 Value *One = ConstantInt::get(IntTy, 1);
2488 Value *NegativeOne = ConstantInt::get(IntTy, -1);
2489 Value *SignResult = Builder.CreateSelect(IsNeg, NegativeOne, One);
2490 Value *Result = Builder.CreateSelect(IsInf, SignResult, Zero);
2491 return RValue::get(Result);
2494 case Builtin::BI__builtin_isnormal: {
2495 // isnormal(x) --> x == x && fabsf(x) < infinity && fabsf(x) >= float_min
2496 Value *V = EmitScalarExpr(E->getArg(0));
2497 Value *Eq = Builder.CreateFCmpOEQ(V, V, "iseq");
2499 Value *Abs = EmitFAbs(*this, V);
2500 Value *IsLessThanInf =
2501 Builder.CreateFCmpULT(Abs, ConstantFP::getInfinity(V->getType()),"isinf");
2502 APFloat Smallest = APFloat::getSmallestNormalized(
2503 getContext().getFloatTypeSemantics(E->getArg(0)->getType()));
2505 Builder.CreateFCmpUGE(Abs, ConstantFP::get(V->getContext(), Smallest),
2507 V = Builder.CreateAnd(Eq, IsLessThanInf, "and");
2508 V = Builder.CreateAnd(V, IsNormal, "and");
2509 return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType())));
2512 case Builtin::BI__builtin_flt_rounds: {
2513 Function *F = CGM.getIntrinsic(Intrinsic::flt_rounds);
2515 llvm::Type *ResultType = ConvertType(E->getType());
2516 Value *Result = Builder.CreateCall(F);
2517 if (Result->getType() != ResultType)
2518 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
2520 return RValue::get(Result);
2523 case Builtin::BI__builtin_fpclassify: {
2524 Value *V = EmitScalarExpr(E->getArg(5));
2525 llvm::Type *Ty = ConvertType(E->getArg(5)->getType());
2528 BasicBlock *Begin = Builder.GetInsertBlock();
2529 BasicBlock *End = createBasicBlock("fpclassify_end", this->CurFn);
2530 Builder.SetInsertPoint(End);
2532 Builder.CreatePHI(ConvertType(E->getArg(0)->getType()), 4,
2533 "fpclassify_result");
2535 // if (V==0) return FP_ZERO
2536 Builder.SetInsertPoint(Begin);
2537 Value *IsZero = Builder.CreateFCmpOEQ(V, Constant::getNullValue(Ty),
2539 Value *ZeroLiteral = EmitScalarExpr(E->getArg(4));
2540 BasicBlock *NotZero = createBasicBlock("fpclassify_not_zero", this->CurFn);
2541 Builder.CreateCondBr(IsZero, End, NotZero);
2542 Result->addIncoming(ZeroLiteral, Begin);
2544 // if (V != V) return FP_NAN
2545 Builder.SetInsertPoint(NotZero);
2546 Value *IsNan = Builder.CreateFCmpUNO(V, V, "cmp");
2547 Value *NanLiteral = EmitScalarExpr(E->getArg(0));
2548 BasicBlock *NotNan = createBasicBlock("fpclassify_not_nan", this->CurFn);
2549 Builder.CreateCondBr(IsNan, End, NotNan);
2550 Result->addIncoming(NanLiteral, NotZero);
2552 // if (fabs(V) == infinity) return FP_INFINITY
2553 Builder.SetInsertPoint(NotNan);
2554 Value *VAbs = EmitFAbs(*this, V);
2556 Builder.CreateFCmpOEQ(VAbs, ConstantFP::getInfinity(V->getType()),
2558 Value *InfLiteral = EmitScalarExpr(E->getArg(1));
2559 BasicBlock *NotInf = createBasicBlock("fpclassify_not_inf", this->CurFn);
2560 Builder.CreateCondBr(IsInf, End, NotInf);
2561 Result->addIncoming(InfLiteral, NotNan);
2563 // if (fabs(V) >= MIN_NORMAL) return FP_NORMAL else FP_SUBNORMAL
2564 Builder.SetInsertPoint(NotInf);
2565 APFloat Smallest = APFloat::getSmallestNormalized(
2566 getContext().getFloatTypeSemantics(E->getArg(5)->getType()));
2568 Builder.CreateFCmpUGE(VAbs, ConstantFP::get(V->getContext(), Smallest),
2570 Value *NormalResult =
2571 Builder.CreateSelect(IsNormal, EmitScalarExpr(E->getArg(2)),
2572 EmitScalarExpr(E->getArg(3)));
2573 Builder.CreateBr(End);
2574 Result->addIncoming(NormalResult, NotInf);
2577 Builder.SetInsertPoint(End);
2578 return RValue::get(Result);
2581 case Builtin::BIalloca:
2582 case Builtin::BI_alloca:
2583 case Builtin::BI__builtin_alloca: {
2584 Value *Size = EmitScalarExpr(E->getArg(0));
2585 const TargetInfo &TI = getContext().getTargetInfo();
2586 // The alignment of the alloca should correspond to __BIGGEST_ALIGNMENT__.
2587 const Align SuitableAlignmentInBytes =
2589 .toCharUnitsFromBits(TI.getSuitableAlign())
2591 AllocaInst *AI = Builder.CreateAlloca(Builder.getInt8Ty(), Size);
2592 AI->setAlignment(SuitableAlignmentInBytes);
2593 initializeAlloca(*this, AI, Size, SuitableAlignmentInBytes);
2594 return RValue::get(AI);
2597 case Builtin::BI__builtin_alloca_with_align: {
2598 Value *Size = EmitScalarExpr(E->getArg(0));
2599 Value *AlignmentInBitsValue = EmitScalarExpr(E->getArg(1));
2600 auto *AlignmentInBitsCI = cast<ConstantInt>(AlignmentInBitsValue);
2601 unsigned AlignmentInBits = AlignmentInBitsCI->getZExtValue();
2602 const Align AlignmentInBytes =
2603 CGM.getContext().toCharUnitsFromBits(AlignmentInBits).getAsAlign();
2604 AllocaInst *AI = Builder.CreateAlloca(Builder.getInt8Ty(), Size);
2605 AI->setAlignment(AlignmentInBytes);
2606 initializeAlloca(*this, AI, Size, AlignmentInBytes);
2607 return RValue::get(AI);
2610 case Builtin::BIbzero:
2611 case Builtin::BI__builtin_bzero: {
2612 Address Dest = EmitPointerWithAlignment(E->getArg(0));
2613 Value *SizeVal = EmitScalarExpr(E->getArg(1));
2614 EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(),
2615 E->getArg(0)->getExprLoc(), FD, 0);
2616 Builder.CreateMemSet(Dest, Builder.getInt8(0), SizeVal, false);
2617 return RValue::get(nullptr);
2619 case Builtin::BImemcpy:
2620 case Builtin::BI__builtin_memcpy:
2621 case Builtin::BImempcpy:
2622 case Builtin::BI__builtin_mempcpy: {
2623 Address Dest = EmitPointerWithAlignment(E->getArg(0));
2624 Address Src = EmitPointerWithAlignment(E->getArg(1));
2625 Value *SizeVal = EmitScalarExpr(E->getArg(2));
2626 EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(),
2627 E->getArg(0)->getExprLoc(), FD, 0);
2628 EmitNonNullArgCheck(RValue::get(Src.getPointer()), E->getArg(1)->getType(),
2629 E->getArg(1)->getExprLoc(), FD, 1);
2630 Builder.CreateMemCpy(Dest, Src, SizeVal, false);
2631 if (BuiltinID == Builtin::BImempcpy ||
2632 BuiltinID == Builtin::BI__builtin_mempcpy)
2633 return RValue::get(Builder.CreateInBoundsGEP(Dest.getPointer(), SizeVal));
2635 return RValue::get(Dest.getPointer());
2638 case Builtin::BI__builtin_memcpy_inline: {
2639 Address Dest = EmitPointerWithAlignment(E->getArg(0));
2640 Address Src = EmitPointerWithAlignment(E->getArg(1));
2642 E->getArg(2)->EvaluateKnownConstInt(getContext()).getZExtValue();
2643 EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(),
2644 E->getArg(0)->getExprLoc(), FD, 0);
2645 EmitNonNullArgCheck(RValue::get(Src.getPointer()), E->getArg(1)->getType(),
2646 E->getArg(1)->getExprLoc(), FD, 1);
2647 Builder.CreateMemCpyInline(Dest, Src, Size);
2648 return RValue::get(nullptr);
2651 case Builtin::BI__builtin_char_memchr:
2652 BuiltinID = Builtin::BI__builtin_memchr;
2655 case Builtin::BI__builtin___memcpy_chk: {
2656 // fold __builtin_memcpy_chk(x, y, cst1, cst2) to memcpy iff cst1<=cst2.
2657 Expr::EvalResult SizeResult, DstSizeResult;
2658 if (!E->getArg(2)->EvaluateAsInt(SizeResult, CGM.getContext()) ||
2659 !E->getArg(3)->EvaluateAsInt(DstSizeResult, CGM.getContext()))
2661 llvm::APSInt Size = SizeResult.Val.getInt();
2662 llvm::APSInt DstSize = DstSizeResult.Val.getInt();
2663 if (Size.ugt(DstSize))
2665 Address Dest = EmitPointerWithAlignment(E->getArg(0));
2666 Address Src = EmitPointerWithAlignment(E->getArg(1));
2667 Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size);
2668 Builder.CreateMemCpy(Dest, Src, SizeVal, false);
2669 return RValue::get(Dest.getPointer());
2672 case Builtin::BI__builtin_objc_memmove_collectable: {
2673 Address DestAddr = EmitPointerWithAlignment(E->getArg(0));
2674 Address SrcAddr = EmitPointerWithAlignment(E->getArg(1));
2675 Value *SizeVal = EmitScalarExpr(E->getArg(2));
2676 CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this,
2677 DestAddr, SrcAddr, SizeVal);
2678 return RValue::get(DestAddr.getPointer());
2681 case Builtin::BI__builtin___memmove_chk: {
2682 // fold __builtin_memmove_chk(x, y, cst1, cst2) to memmove iff cst1<=cst2.
2683 Expr::EvalResult SizeResult, DstSizeResult;
2684 if (!E->getArg(2)->EvaluateAsInt(SizeResult, CGM.getContext()) ||
2685 !E->getArg(3)->EvaluateAsInt(DstSizeResult, CGM.getContext()))
2687 llvm::APSInt Size = SizeResult.Val.getInt();
2688 llvm::APSInt DstSize = DstSizeResult.Val.getInt();
2689 if (Size.ugt(DstSize))
2691 Address Dest = EmitPointerWithAlignment(E->getArg(0));
2692 Address Src = EmitPointerWithAlignment(E->getArg(1));
2693 Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size);
2694 Builder.CreateMemMove(Dest, Src, SizeVal, false);
2695 return RValue::get(Dest.getPointer());
2698 case Builtin::BImemmove:
2699 case Builtin::BI__builtin_memmove: {
2700 Address Dest = EmitPointerWithAlignment(E->getArg(0));
2701 Address Src = EmitPointerWithAlignment(E->getArg(1));
2702 Value *SizeVal = EmitScalarExpr(E->getArg(2));
2703 EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(),
2704 E->getArg(0)->getExprLoc(), FD, 0);
2705 EmitNonNullArgCheck(RValue::get(Src.getPointer()), E->getArg(1)->getType(),
2706 E->getArg(1)->getExprLoc(), FD, 1);
2707 Builder.CreateMemMove(Dest, Src, SizeVal, false);
2708 return RValue::get(Dest.getPointer());
2710 case Builtin::BImemset:
2711 case Builtin::BI__builtin_memset: {
2712 Address Dest = EmitPointerWithAlignment(E->getArg(0));
2713 Value *ByteVal = Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)),
2714 Builder.getInt8Ty());
2715 Value *SizeVal = EmitScalarExpr(E->getArg(2));
2716 EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(),
2717 E->getArg(0)->getExprLoc(), FD, 0);
2718 Builder.CreateMemSet(Dest, ByteVal, SizeVal, false);
2719 return RValue::get(Dest.getPointer());
2721 case Builtin::BI__builtin___memset_chk: {
2722 // fold __builtin_memset_chk(x, y, cst1, cst2) to memset iff cst1<=cst2.
2723 Expr::EvalResult SizeResult, DstSizeResult;
2724 if (!E->getArg(2)->EvaluateAsInt(SizeResult, CGM.getContext()) ||
2725 !E->getArg(3)->EvaluateAsInt(DstSizeResult, CGM.getContext()))
2727 llvm::APSInt Size = SizeResult.Val.getInt();
2728 llvm::APSInt DstSize = DstSizeResult.Val.getInt();
2729 if (Size.ugt(DstSize))
2731 Address Dest = EmitPointerWithAlignment(E->getArg(0));
2732 Value *ByteVal = Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)),
2733 Builder.getInt8Ty());
2734 Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size);
2735 Builder.CreateMemSet(Dest, ByteVal, SizeVal, false);
2736 return RValue::get(Dest.getPointer());
2738 case Builtin::BI__builtin_wmemcmp: {
2739 // The MSVC runtime library does not provide a definition of wmemcmp, so we
2740 // need an inline implementation.
2741 if (!getTarget().getTriple().isOSMSVCRT())
2744 llvm::Type *WCharTy = ConvertType(getContext().WCharTy);
2746 Value *Dst = EmitScalarExpr(E->getArg(0));
2747 Value *Src = EmitScalarExpr(E->getArg(1));
2748 Value *Size = EmitScalarExpr(E->getArg(2));
2750 BasicBlock *Entry = Builder.GetInsertBlock();
2751 BasicBlock *CmpGT = createBasicBlock("wmemcmp.gt");
2752 BasicBlock *CmpLT = createBasicBlock("wmemcmp.lt");
2753 BasicBlock *Next = createBasicBlock("wmemcmp.next");
2754 BasicBlock *Exit = createBasicBlock("wmemcmp.exit");
2755 Value *SizeEq0 = Builder.CreateICmpEQ(Size, ConstantInt::get(SizeTy, 0));
2756 Builder.CreateCondBr(SizeEq0, Exit, CmpGT);
2759 PHINode *DstPhi = Builder.CreatePHI(Dst->getType(), 2);
2760 DstPhi->addIncoming(Dst, Entry);
2761 PHINode *SrcPhi = Builder.CreatePHI(Src->getType(), 2);
2762 SrcPhi->addIncoming(Src, Entry);
2763 PHINode *SizePhi = Builder.CreatePHI(SizeTy, 2);
2764 SizePhi->addIncoming(Size, Entry);
2765 CharUnits WCharAlign =
2766 getContext().getTypeAlignInChars(getContext().WCharTy);
2767 Value *DstCh = Builder.CreateAlignedLoad(WCharTy, DstPhi, WCharAlign);
2768 Value *SrcCh = Builder.CreateAlignedLoad(WCharTy, SrcPhi, WCharAlign);
2769 Value *DstGtSrc = Builder.CreateICmpUGT(DstCh, SrcCh);
2770 Builder.CreateCondBr(DstGtSrc, Exit, CmpLT);
2773 Value *DstLtSrc = Builder.CreateICmpULT(DstCh, SrcCh);
2774 Builder.CreateCondBr(DstLtSrc, Exit, Next);
2777 Value *NextDst = Builder.CreateConstInBoundsGEP1_32(WCharTy, DstPhi, 1);
2778 Value *NextSrc = Builder.CreateConstInBoundsGEP1_32(WCharTy, SrcPhi, 1);
2779 Value *NextSize = Builder.CreateSub(SizePhi, ConstantInt::get(SizeTy, 1));
2780 Value *NextSizeEq0 =
2781 Builder.CreateICmpEQ(NextSize, ConstantInt::get(SizeTy, 0));
2782 Builder.CreateCondBr(NextSizeEq0, Exit, CmpGT);
2783 DstPhi->addIncoming(NextDst, Next);
2784 SrcPhi->addIncoming(NextSrc, Next);
2785 SizePhi->addIncoming(NextSize, Next);
2788 PHINode *Ret = Builder.CreatePHI(IntTy, 4);
2789 Ret->addIncoming(ConstantInt::get(IntTy, 0), Entry);
2790 Ret->addIncoming(ConstantInt::get(IntTy, 1), CmpGT);
2791 Ret->addIncoming(ConstantInt::get(IntTy, -1), CmpLT);
2792 Ret->addIncoming(ConstantInt::get(IntTy, 0), Next);
2793 return RValue::get(Ret);
2795 case Builtin::BI__builtin_dwarf_cfa: {
2796 // The offset in bytes from the first argument to the CFA.
2798 // Why on earth is this in the frontend? Is there any reason at
2799 // all that the backend can't reasonably determine this while
2800 // lowering llvm.eh.dwarf.cfa()?
2802 // TODO: If there's a satisfactory reason, add a target hook for
2803 // this instead of hard-coding 0, which is correct for most targets.
2806 Function *F = CGM.getIntrinsic(Intrinsic::eh_dwarf_cfa);
2807 return RValue::get(Builder.CreateCall(F,
2808 llvm::ConstantInt::get(Int32Ty, Offset)));
2810 case Builtin::BI__builtin_return_address: {
2811 Value *Depth = ConstantEmitter(*this).emitAbstract(E->getArg(0),
2812 getContext().UnsignedIntTy);
2813 Function *F = CGM.getIntrinsic(Intrinsic::returnaddress);
2814 return RValue::get(Builder.CreateCall(F, Depth));
2816 case Builtin::BI_ReturnAddress: {
2817 Function *F = CGM.getIntrinsic(Intrinsic::returnaddress);
2818 return RValue::get(Builder.CreateCall(F, Builder.getInt32(0)));
2820 case Builtin::BI__builtin_frame_address: {
2821 Value *Depth = ConstantEmitter(*this).emitAbstract(E->getArg(0),
2822 getContext().UnsignedIntTy);
2823 Function *F = CGM.getIntrinsic(Intrinsic::frameaddress, AllocaInt8PtrTy);
2824 return RValue::get(Builder.CreateCall(F, Depth));
2826 case Builtin::BI__builtin_extract_return_addr: {
2827 Value *Address = EmitScalarExpr(E->getArg(0));
2828 Value *Result = getTargetHooks().decodeReturnAddress(*this, Address);
2829 return RValue::get(Result);
2831 case Builtin::BI__builtin_frob_return_addr: {
2832 Value *Address = EmitScalarExpr(E->getArg(0));
2833 Value *Result = getTargetHooks().encodeReturnAddress(*this, Address);
2834 return RValue::get(Result);
2836 case Builtin::BI__builtin_dwarf_sp_column: {
2837 llvm::IntegerType *Ty
2838 = cast<llvm::IntegerType>(ConvertType(E->getType()));
2839 int Column = getTargetHooks().getDwarfEHStackPointer(CGM);
2841 CGM.ErrorUnsupported(E, "__builtin_dwarf_sp_column");
2842 return RValue::get(llvm::UndefValue::get(Ty));
2844 return RValue::get(llvm::ConstantInt::get(Ty, Column, true));
2846 case Builtin::BI__builtin_init_dwarf_reg_size_table: {
2847 Value *Address = EmitScalarExpr(E->getArg(0));
2848 if (getTargetHooks().initDwarfEHRegSizeTable(*this, Address))
2849 CGM.ErrorUnsupported(E, "__builtin_init_dwarf_reg_size_table");
2850 return RValue::get(llvm::UndefValue::get(ConvertType(E->getType())));
2852 case Builtin::BI__builtin_eh_return: {
2853 Value *Int = EmitScalarExpr(E->getArg(0));
2854 Value *Ptr = EmitScalarExpr(E->getArg(1));
2856 llvm::IntegerType *IntTy = cast<llvm::IntegerType>(Int->getType());
2857 assert((IntTy->getBitWidth() == 32 || IntTy->getBitWidth() == 64) &&
2858 "LLVM's __builtin_eh_return only supports 32- and 64-bit variants");
2860 CGM.getIntrinsic(IntTy->getBitWidth() == 32 ? Intrinsic::eh_return_i32
2861 : Intrinsic::eh_return_i64);
2862 Builder.CreateCall(F, {Int, Ptr});
2863 Builder.CreateUnreachable();
2865 // We do need to preserve an insertion point.
2866 EmitBlock(createBasicBlock("builtin_eh_return.cont"));
2868 return RValue::get(nullptr);
2870 case Builtin::BI__builtin_unwind_init: {
2871 Function *F = CGM.getIntrinsic(Intrinsic::eh_unwind_init);
2872 return RValue::get(Builder.CreateCall(F));
2874 case Builtin::BI__builtin_extend_pointer: {
2875 // Extends a pointer to the size of an _Unwind_Word, which is
2876 // uint64_t on all platforms. Generally this gets poked into a
2877 // register and eventually used as an address, so if the
2878 // addressing registers are wider than pointers and the platform
2879 // doesn't implicitly ignore high-order bits when doing
2880 // addressing, we need to make sure we zext / sext based on
2881 // the platform's expectations.
2883 // See: http://gcc.gnu.org/ml/gcc-bugs/2002-02/msg00237.html
2885 // Cast the pointer to intptr_t.
2886 Value *Ptr = EmitScalarExpr(E->getArg(0));
2887 Value *Result = Builder.CreatePtrToInt(Ptr, IntPtrTy, "extend.cast");
2889 // If that's 64 bits, we're done.
2890 if (IntPtrTy->getBitWidth() == 64)
2891 return RValue::get(Result);
2893 // Otherwise, ask the codegen data what to do.
2894 if (getTargetHooks().extendPointerWithSExt())
2895 return RValue::get(Builder.CreateSExt(Result, Int64Ty, "extend.sext"));
2897 return RValue::get(Builder.CreateZExt(Result, Int64Ty, "extend.zext"));
2899 case Builtin::BI__builtin_setjmp: {
2900 // Buffer is a void**.
2901 Address Buf = EmitPointerWithAlignment(E->getArg(0));
2903 // Store the frame pointer to the setjmp buffer.
2904 Value *FrameAddr = Builder.CreateCall(
2905 CGM.getIntrinsic(Intrinsic::frameaddress, AllocaInt8PtrTy),
2906 ConstantInt::get(Int32Ty, 0));
2907 Builder.CreateStore(FrameAddr, Buf);
2909 // Store the stack pointer to the setjmp buffer.
2911 Builder.CreateCall(CGM.getIntrinsic(Intrinsic::stacksave));
2912 Address StackSaveSlot = Builder.CreateConstInBoundsGEP(Buf, 2);
2913 Builder.CreateStore(StackAddr, StackSaveSlot);
2915 // Call LLVM's EH setjmp, which is lightweight.
2916 Function *F = CGM.getIntrinsic(Intrinsic::eh_sjlj_setjmp);
2917 Buf = Builder.CreateBitCast(Buf, Int8PtrTy);
2918 return RValue::get(Builder.CreateCall(F, Buf.getPointer()));
2920 case Builtin::BI__builtin_longjmp: {
2921 Value *Buf = EmitScalarExpr(E->getArg(0));
2922 Buf = Builder.CreateBitCast(Buf, Int8PtrTy);
2924 // Call LLVM's EH longjmp, which is lightweight.
2925 Builder.CreateCall(CGM.getIntrinsic(Intrinsic::eh_sjlj_longjmp), Buf);
2927 // longjmp doesn't return; mark this as unreachable.
2928 Builder.CreateUnreachable();
2930 // We do need to preserve an insertion point.
2931 EmitBlock(createBasicBlock("longjmp.cont"));
2933 return RValue::get(nullptr);
2935 case Builtin::BI__builtin_launder: {
2936 const Expr *Arg = E->getArg(0);
2937 QualType ArgTy = Arg->getType()->getPointeeType();
2938 Value *Ptr = EmitScalarExpr(Arg);
2939 if (TypeRequiresBuiltinLaunder(CGM, ArgTy))
2940 Ptr = Builder.CreateLaunderInvariantGroup(Ptr);
2942 return RValue::get(Ptr);
2944 case Builtin::BI__sync_fetch_and_add:
2945 case Builtin::BI__sync_fetch_and_sub:
2946 case Builtin::BI__sync_fetch_and_or:
2947 case Builtin::BI__sync_fetch_and_and:
2948 case Builtin::BI__sync_fetch_and_xor:
2949 case Builtin::BI__sync_fetch_and_nand:
2950 case Builtin::BI__sync_add_and_fetch:
2951 case Builtin::BI__sync_sub_and_fetch:
2952 case Builtin::BI__sync_and_and_fetch:
2953 case Builtin::BI__sync_or_and_fetch:
2954 case Builtin::BI__sync_xor_and_fetch:
2955 case Builtin::BI__sync_nand_and_fetch:
2956 case Builtin::BI__sync_val_compare_and_swap:
2957 case Builtin::BI__sync_bool_compare_and_swap:
2958 case Builtin::BI__sync_lock_test_and_set:
2959 case Builtin::BI__sync_lock_release:
2960 case Builtin::BI__sync_swap:
2961 llvm_unreachable("Shouldn't make it through sema");
2962 case Builtin::BI__sync_fetch_and_add_1:
2963 case Builtin::BI__sync_fetch_and_add_2:
2964 case Builtin::BI__sync_fetch_and_add_4:
2965 case Builtin::BI__sync_fetch_and_add_8:
2966 case Builtin::BI__sync_fetch_and_add_16:
2967 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Add, E);
2968 case Builtin::BI__sync_fetch_and_sub_1:
2969 case Builtin::BI__sync_fetch_and_sub_2:
2970 case Builtin::BI__sync_fetch_and_sub_4:
2971 case Builtin::BI__sync_fetch_and_sub_8:
2972 case Builtin::BI__sync_fetch_and_sub_16:
2973 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Sub, E);
2974 case Builtin::BI__sync_fetch_and_or_1:
2975 case Builtin::BI__sync_fetch_and_or_2:
2976 case Builtin::BI__sync_fetch_and_or_4:
2977 case Builtin::BI__sync_fetch_and_or_8:
2978 case Builtin::BI__sync_fetch_and_or_16:
2979 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Or, E);
2980 case Builtin::BI__sync_fetch_and_and_1:
2981 case Builtin::BI__sync_fetch_and_and_2:
2982 case Builtin::BI__sync_fetch_and_and_4:
2983 case Builtin::BI__sync_fetch_and_and_8:
2984 case Builtin::BI__sync_fetch_and_and_16:
2985 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::And, E);
2986 case Builtin::BI__sync_fetch_and_xor_1:
2987 case Builtin::BI__sync_fetch_and_xor_2:
2988 case Builtin::BI__sync_fetch_and_xor_4:
2989 case Builtin::BI__sync_fetch_and_xor_8:
2990 case Builtin::BI__sync_fetch_and_xor_16:
2991 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xor, E);
2992 case Builtin::BI__sync_fetch_and_nand_1:
2993 case Builtin::BI__sync_fetch_and_nand_2:
2994 case Builtin::BI__sync_fetch_and_nand_4:
2995 case Builtin::BI__sync_fetch_and_nand_8:
2996 case Builtin::BI__sync_fetch_and_nand_16:
2997 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Nand, E);
2999 // Clang extensions: not overloaded yet.
3000 case Builtin::BI__sync_fetch_and_min:
3001 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Min, E);
3002 case Builtin::BI__sync_fetch_and_max:
3003 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Max, E);
3004 case Builtin::BI__sync_fetch_and_umin:
3005 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::UMin, E);
3006 case Builtin::BI__sync_fetch_and_umax:
3007 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::UMax, E);
3009 case Builtin::BI__sync_add_and_fetch_1:
3010 case Builtin::BI__sync_add_and_fetch_2:
3011 case Builtin::BI__sync_add_and_fetch_4:
3012 case Builtin::BI__sync_add_and_fetch_8:
3013 case Builtin::BI__sync_add_and_fetch_16:
3014 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Add, E,
3015 llvm::Instruction::Add);
3016 case Builtin::BI__sync_sub_and_fetch_1:
3017 case Builtin::BI__sync_sub_and_fetch_2:
3018 case Builtin::BI__sync_sub_and_fetch_4:
3019 case Builtin::BI__sync_sub_and_fetch_8:
3020 case Builtin::BI__sync_sub_and_fetch_16:
3021 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Sub, E,
3022 llvm::Instruction::Sub);
3023 case Builtin::BI__sync_and_and_fetch_1:
3024 case Builtin::BI__sync_and_and_fetch_2:
3025 case Builtin::BI__sync_and_and_fetch_4:
3026 case Builtin::BI__sync_and_and_fetch_8:
3027 case Builtin::BI__sync_and_and_fetch_16:
3028 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::And, E,
3029 llvm::Instruction::And);
3030 case Builtin::BI__sync_or_and_fetch_1:
3031 case Builtin::BI__sync_or_and_fetch_2:
3032 case Builtin::BI__sync_or_and_fetch_4:
3033 case Builtin::BI__sync_or_and_fetch_8:
3034 case Builtin::BI__sync_or_and_fetch_16:
3035 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Or, E,
3036 llvm::Instruction::Or);
3037 case Builtin::BI__sync_xor_and_fetch_1:
3038 case Builtin::BI__sync_xor_and_fetch_2:
3039 case Builtin::BI__sync_xor_and_fetch_4:
3040 case Builtin::BI__sync_xor_and_fetch_8:
3041 case Builtin::BI__sync_xor_and_fetch_16:
3042 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Xor, E,
3043 llvm::Instruction::Xor);
3044 case Builtin::BI__sync_nand_and_fetch_1:
3045 case Builtin::BI__sync_nand_and_fetch_2:
3046 case Builtin::BI__sync_nand_and_fetch_4:
3047 case Builtin::BI__sync_nand_and_fetch_8:
3048 case Builtin::BI__sync_nand_and_fetch_16:
3049 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Nand, E,
3050 llvm::Instruction::And, true);
3052 case Builtin::BI__sync_val_compare_and_swap_1:
3053 case Builtin::BI__sync_val_compare_and_swap_2:
3054 case Builtin::BI__sync_val_compare_and_swap_4:
3055 case Builtin::BI__sync_val_compare_and_swap_8:
3056 case Builtin::BI__sync_val_compare_and_swap_16:
3057 return RValue::get(MakeAtomicCmpXchgValue(*this, E, false));
3059 case Builtin::BI__sync_bool_compare_and_swap_1:
3060 case Builtin::BI__sync_bool_compare_and_swap_2:
3061 case Builtin::BI__sync_bool_compare_and_swap_4:
3062 case Builtin::BI__sync_bool_compare_and_swap_8:
3063 case Builtin::BI__sync_bool_compare_and_swap_16:
3064 return RValue::get(MakeAtomicCmpXchgValue(*this, E, true));
3066 case Builtin::BI__sync_swap_1:
3067 case Builtin::BI__sync_swap_2:
3068 case Builtin::BI__sync_swap_4:
3069 case Builtin::BI__sync_swap_8:
3070 case Builtin::BI__sync_swap_16:
3071 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xchg, E);
3073 case Builtin::BI__sync_lock_test_and_set_1:
3074 case Builtin::BI__sync_lock_test_and_set_2:
3075 case Builtin::BI__sync_lock_test_and_set_4:
3076 case Builtin::BI__sync_lock_test_and_set_8:
3077 case Builtin::BI__sync_lock_test_and_set_16:
3078 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xchg, E);
3080 case Builtin::BI__sync_lock_release_1:
3081 case Builtin::BI__sync_lock_release_2:
3082 case Builtin::BI__sync_lock_release_4:
3083 case Builtin::BI__sync_lock_release_8:
3084 case Builtin::BI__sync_lock_release_16: {
3085 Value *Ptr = EmitScalarExpr(E->getArg(0));
3086 QualType ElTy = E->getArg(0)->getType()->getPointeeType();
3087 CharUnits StoreSize = getContext().getTypeSizeInChars(ElTy);
3088 llvm::Type *ITy = llvm::IntegerType::get(getLLVMContext(),
3089 StoreSize.getQuantity() * 8);
3090 Ptr = Builder.CreateBitCast(Ptr, ITy->getPointerTo());
3091 llvm::StoreInst *Store =
3092 Builder.CreateAlignedStore(llvm::Constant::getNullValue(ITy), Ptr,
3094 Store->setAtomic(llvm::AtomicOrdering::Release);
3095 return RValue::get(nullptr);
3098 case Builtin::BI__sync_synchronize: {
3099 // We assume this is supposed to correspond to a C++0x-style
3100 // sequentially-consistent fence (i.e. this is only usable for
3101 // synchronization, not device I/O or anything like that). This intrinsic
3102 // is really badly designed in the sense that in theory, there isn't
3103 // any way to safely use it... but in practice, it mostly works
3104 // to use it with non-atomic loads and stores to get acquire/release
3106 Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent);
3107 return RValue::get(nullptr);
3110 case Builtin::BI__builtin_nontemporal_load:
3111 return RValue::get(EmitNontemporalLoad(*this, E));
3112 case Builtin::BI__builtin_nontemporal_store:
3113 return RValue::get(EmitNontemporalStore(*this, E));
3114 case Builtin::BI__c11_atomic_is_lock_free:
3115 case Builtin::BI__atomic_is_lock_free: {
3116 // Call "bool __atomic_is_lock_free(size_t size, void *ptr)". For the
3117 // __c11 builtin, ptr is 0 (indicating a properly-aligned object), since
3118 // _Atomic(T) is always properly-aligned.
3119 const char *LibCallName = "__atomic_is_lock_free";
3121 Args.add(RValue::get(EmitScalarExpr(E->getArg(0))),
3122 getContext().getSizeType());
3123 if (BuiltinID == Builtin::BI__atomic_is_lock_free)
3124 Args.add(RValue::get(EmitScalarExpr(E->getArg(1))),
3125 getContext().VoidPtrTy);
3127 Args.add(RValue::get(llvm::Constant::getNullValue(VoidPtrTy)),
3128 getContext().VoidPtrTy);
3129 const CGFunctionInfo &FuncInfo =
3130 CGM.getTypes().arrangeBuiltinFunctionCall(E->getType(), Args);
3131 llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FuncInfo);
3132 llvm::FunctionCallee Func = CGM.CreateRuntimeFunction(FTy, LibCallName);
3133 return EmitCall(FuncInfo, CGCallee::forDirect(Func),
3134 ReturnValueSlot(), Args);
3137 case Builtin::BI__atomic_test_and_set: {
3138 // Look at the argument type to determine whether this is a volatile
3139 // operation. The parameter type is always volatile.
3140 QualType PtrTy = E->getArg(0)->IgnoreImpCasts()->getType();
3142 PtrTy->castAs<PointerType>()->getPointeeType().isVolatileQualified();
3144 Value *Ptr = EmitScalarExpr(E->getArg(0));
3145 unsigned AddrSpace = Ptr->getType()->getPointerAddressSpace();
3146 Ptr = Builder.CreateBitCast(Ptr, Int8Ty->getPointerTo(AddrSpace));
3147 Value *NewVal = Builder.getInt8(1);
3148 Value *Order = EmitScalarExpr(E->getArg(1));
3149 if (isa<llvm::ConstantInt>(Order)) {
3150 int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
3151 AtomicRMWInst *Result = nullptr;
3153 case 0: // memory_order_relaxed
3154 default: // invalid order
3155 Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, Ptr, NewVal,
3156 llvm::AtomicOrdering::Monotonic);
3158 case 1: // memory_order_consume
3159 case 2: // memory_order_acquire
3160 Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, Ptr, NewVal,
3161 llvm::AtomicOrdering::Acquire);
3163 case 3: // memory_order_release
3164 Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, Ptr, NewVal,
3165 llvm::AtomicOrdering::Release);
3167 case 4: // memory_order_acq_rel
3169 Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, Ptr, NewVal,
3170 llvm::AtomicOrdering::AcquireRelease);
3172 case 5: // memory_order_seq_cst
3173 Result = Builder.CreateAtomicRMW(
3174 llvm::AtomicRMWInst::Xchg, Ptr, NewVal,
3175 llvm::AtomicOrdering::SequentiallyConsistent);
3178 Result->setVolatile(Volatile);
3179 return RValue::get(Builder.CreateIsNotNull(Result, "tobool"));
3182 llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
3184 llvm::BasicBlock *BBs[5] = {
3185 createBasicBlock("monotonic", CurFn),
3186 createBasicBlock("acquire", CurFn),
3187 createBasicBlock("release", CurFn),
3188 createBasicBlock("acqrel", CurFn),
3189 createBasicBlock("seqcst", CurFn)
3191 llvm::AtomicOrdering Orders[5] = {
3192 llvm::AtomicOrdering::Monotonic, llvm::AtomicOrdering::Acquire,
3193 llvm::AtomicOrdering::Release, llvm::AtomicOrdering::AcquireRelease,
3194 llvm::AtomicOrdering::SequentiallyConsistent};
3196 Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
3197 llvm::SwitchInst *SI = Builder.CreateSwitch(Order, BBs[0]);
3199 Builder.SetInsertPoint(ContBB);
3200 PHINode *Result = Builder.CreatePHI(Int8Ty, 5, "was_set");
3202 for (unsigned i = 0; i < 5; ++i) {
3203 Builder.SetInsertPoint(BBs[i]);
3204 AtomicRMWInst *RMW = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg,
3205 Ptr, NewVal, Orders[i]);
3206 RMW->setVolatile(Volatile);
3207 Result->addIncoming(RMW, BBs[i]);
3208 Builder.CreateBr(ContBB);
3211 SI->addCase(Builder.getInt32(0), BBs[0]);
3212 SI->addCase(Builder.getInt32(1), BBs[1]);
3213 SI->addCase(Builder.getInt32(2), BBs[1]);
3214 SI->addCase(Builder.getInt32(3), BBs[2]);
3215 SI->addCase(Builder.getInt32(4), BBs[3]);
3216 SI->addCase(Builder.getInt32(5), BBs[4]);
3218 Builder.SetInsertPoint(ContBB);
3219 return RValue::get(Builder.CreateIsNotNull(Result, "tobool"));
3222 case Builtin::BI__atomic_clear: {
3223 QualType PtrTy = E->getArg(0)->IgnoreImpCasts()->getType();
3225 PtrTy->castAs<PointerType>()->getPointeeType().isVolatileQualified();
3227 Address Ptr = EmitPointerWithAlignment(E->getArg(0));
3228 unsigned AddrSpace = Ptr.getPointer()->getType()->getPointerAddressSpace();
3229 Ptr = Builder.CreateBitCast(Ptr, Int8Ty->getPointerTo(AddrSpace));
3230 Value *NewVal = Builder.getInt8(0);
3231 Value *Order = EmitScalarExpr(E->getArg(1));
3232 if (isa<llvm::ConstantInt>(Order)) {
3233 int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
3234 StoreInst *Store = Builder.CreateStore(NewVal, Ptr, Volatile);
3236 case 0: // memory_order_relaxed
3237 default: // invalid order
3238 Store->setOrdering(llvm::AtomicOrdering::Monotonic);
3240 case 3: // memory_order_release
3241 Store->setOrdering(llvm::AtomicOrdering::Release);
3243 case 5: // memory_order_seq_cst
3244 Store->setOrdering(llvm::AtomicOrdering::SequentiallyConsistent);
3247 return RValue::get(nullptr);
3250 llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
3252 llvm::BasicBlock *BBs[3] = {
3253 createBasicBlock("monotonic", CurFn),
3254 createBasicBlock("release", CurFn),
3255 createBasicBlock("seqcst", CurFn)
3257 llvm::AtomicOrdering Orders[3] = {
3258 llvm::AtomicOrdering::Monotonic, llvm::AtomicOrdering::Release,
3259 llvm::AtomicOrdering::SequentiallyConsistent};
3261 Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
3262 llvm::SwitchInst *SI = Builder.CreateSwitch(Order, BBs[0]);
3264 for (unsigned i = 0; i < 3; ++i) {
3265 Builder.SetInsertPoint(BBs[i]);
3266 StoreInst *Store = Builder.CreateStore(NewVal, Ptr, Volatile);
3267 Store->setOrdering(Orders[i]);
3268 Builder.CreateBr(ContBB);
3271 SI->addCase(Builder.getInt32(0), BBs[0]);
3272 SI->addCase(Builder.getInt32(3), BBs[1]);
3273 SI->addCase(Builder.getInt32(5), BBs[2]);
3275 Builder.SetInsertPoint(ContBB);
3276 return RValue::get(nullptr);
3279 case Builtin::BI__atomic_thread_fence:
3280 case Builtin::BI__atomic_signal_fence:
3281 case Builtin::BI__c11_atomic_thread_fence:
3282 case Builtin::BI__c11_atomic_signal_fence: {
3283 llvm::SyncScope::ID SSID;
3284 if (BuiltinID == Builtin::BI__atomic_signal_fence ||
3285 BuiltinID == Builtin::BI__c11_atomic_signal_fence)
3286 SSID = llvm::SyncScope::SingleThread;
3288 SSID = llvm::SyncScope::System;
3289 Value *Order = EmitScalarExpr(E->getArg(0));
3290 if (isa<llvm::ConstantInt>(Order)) {
3291 int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
3293 case 0: // memory_order_relaxed
3294 default: // invalid order
3296 case 1: // memory_order_consume
3297 case 2: // memory_order_acquire
3298 Builder.CreateFence(llvm::AtomicOrdering::Acquire, SSID);
3300 case 3: // memory_order_release
3301 Builder.CreateFence(llvm::AtomicOrdering::Release, SSID);
3303 case 4: // memory_order_acq_rel
3304 Builder.CreateFence(llvm::AtomicOrdering::AcquireRelease, SSID);
3306 case 5: // memory_order_seq_cst
3307 Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent, SSID);
3310 return RValue::get(nullptr);
3313 llvm::BasicBlock *AcquireBB, *ReleaseBB, *AcqRelBB, *SeqCstBB;
3314 AcquireBB = createBasicBlock("acquire", CurFn);
3315 ReleaseBB = createBasicBlock("release", CurFn);
3316 AcqRelBB = createBasicBlock("acqrel", CurFn);
3317 SeqCstBB = createBasicBlock("seqcst", CurFn);
3318 llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
3320 Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
3321 llvm::SwitchInst *SI = Builder.CreateSwitch(Order, ContBB);
3323 Builder.SetInsertPoint(AcquireBB);
3324 Builder.CreateFence(llvm::AtomicOrdering::Acquire, SSID);
3325 Builder.CreateBr(ContBB);
3326 SI->addCase(Builder.getInt32(1), AcquireBB);
3327 SI->addCase(Builder.getInt32(2), AcquireBB);
3329 Builder.SetInsertPoint(ReleaseBB);
3330 Builder.CreateFence(llvm::AtomicOrdering::Release, SSID);
3331 Builder.CreateBr(ContBB);
3332 SI->addCase(Builder.getInt32(3), ReleaseBB);
3334 Builder.SetInsertPoint(AcqRelBB);
3335 Builder.CreateFence(llvm::AtomicOrdering::AcquireRelease, SSID);
3336 Builder.CreateBr(ContBB);
3337 SI->addCase(Builder.getInt32(4), AcqRelBB);
3339 Builder.SetInsertPoint(SeqCstBB);
3340 Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent, SSID);
3341 Builder.CreateBr(ContBB);
3342 SI->addCase(Builder.getInt32(5), SeqCstBB);
3344 Builder.SetInsertPoint(ContBB);
3345 return RValue::get(nullptr);
3348 case Builtin::BI__builtin_signbit:
3349 case Builtin::BI__builtin_signbitf:
3350 case Builtin::BI__builtin_signbitl: {
3352 Builder.CreateZExt(EmitSignBit(*this, EmitScalarExpr(E->getArg(0))),
3353 ConvertType(E->getType())));
3355 case Builtin::BI__warn_memset_zero_len:
3356 return RValue::getIgnored();
3357 case Builtin::BI__annotation: {
3358 // Re-encode each wide string to UTF8 and make an MDString.
3359 SmallVector<Metadata *, 1> Strings;
3360 for (const Expr *Arg : E->arguments()) {
3361 const auto *Str = cast<StringLiteral>(Arg->IgnoreParenCasts());
3362 assert(Str->getCharByteWidth() == 2);
3363 StringRef WideBytes = Str->getBytes();
3364 std::string StrUtf8;
3365 if (!convertUTF16ToUTF8String(
3366 makeArrayRef(WideBytes.data(), WideBytes.size()), StrUtf8)) {
3367 CGM.ErrorUnsupported(E, "non-UTF16 __annotation argument");
3370 Strings.push_back(llvm::MDString::get(getLLVMContext(), StrUtf8));
3373 // Build and MDTuple of MDStrings and emit the intrinsic call.
3375 CGM.getIntrinsic(llvm::Intrinsic::codeview_annotation, {});
3376 MDTuple *StrTuple = MDTuple::get(getLLVMContext(), Strings);
3377 Builder.CreateCall(F, MetadataAsValue::get(getLLVMContext(), StrTuple));
3378 return RValue::getIgnored();
3380 case Builtin::BI__builtin_annotation: {
3381 llvm::Value *AnnVal = EmitScalarExpr(E->getArg(0));
3382 llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::annotation,
3385 // Get the annotation string, go through casts. Sema requires this to be a
3386 // non-wide string literal, potentially casted, so the cast<> is safe.
3387 const Expr *AnnotationStrExpr = E->getArg(1)->IgnoreParenCasts();
3388 StringRef Str = cast<StringLiteral>(AnnotationStrExpr)->getString();
3389 return RValue::get(EmitAnnotationCall(F, AnnVal, Str, E->getExprLoc()));
3391 case Builtin::BI__builtin_addcb:
3392 case Builtin::BI__builtin_addcs:
3393 case Builtin::BI__builtin_addc:
3394 case Builtin::BI__builtin_addcl:
3395 case Builtin::BI__builtin_addcll:
3396 case Builtin::BI__builtin_subcb:
3397 case Builtin::BI__builtin_subcs:
3398 case Builtin::BI__builtin_subc:
3399 case Builtin::BI__builtin_subcl:
3400 case Builtin::BI__builtin_subcll: {
3402 // We translate all of these builtins from expressions of the form:
3403 // int x = ..., y = ..., carryin = ..., carryout, result;
3404 // result = __builtin_addc(x, y, carryin, &carryout);
3406 // to LLVM IR of the form:
3408 // %tmp1 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %x, i32 %y)
3409 // %tmpsum1 = extractvalue {i32, i1} %tmp1, 0
3410 // %carry1 = extractvalue {i32, i1} %tmp1, 1
3411 // %tmp2 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %tmpsum1,
3413 // %result = extractvalue {i32, i1} %tmp2, 0
3414 // %carry2 = extractvalue {i32, i1} %tmp2, 1
3415 // %tmp3 = or i1 %carry1, %carry2
3416 // %tmp4 = zext i1 %tmp3 to i32
3417 // store i32 %tmp4, i32* %carryout
3419 // Scalarize our inputs.
3420 llvm::Value *X = EmitScalarExpr(E->getArg(0));
3421 llvm::Value *Y = EmitScalarExpr(E->getArg(1));
3422 llvm::Value *Carryin = EmitScalarExpr(E->getArg(2));
3423 Address CarryOutPtr = EmitPointerWithAlignment(E->getArg(3));
3425 // Decide if we are lowering to a uadd.with.overflow or usub.with.overflow.
3426 llvm::Intrinsic::ID IntrinsicId;
3427 switch (BuiltinID) {
3428 default: llvm_unreachable("Unknown multiprecision builtin id.");
3429 case Builtin::BI__builtin_addcb:
3430 case Builtin::BI__builtin_addcs:
3431 case Builtin::BI__builtin_addc:
3432 case Builtin::BI__builtin_addcl:
3433 case Builtin::BI__builtin_addcll:
3434 IntrinsicId = llvm::Intrinsic::uadd_with_overflow;
3436 case Builtin::BI__builtin_subcb:
3437 case Builtin::BI__builtin_subcs:
3438 case Builtin::BI__builtin_subc:
3439 case Builtin::BI__builtin_subcl:
3440 case Builtin::BI__builtin_subcll:
3441 IntrinsicId = llvm::Intrinsic::usub_with_overflow;
3445 // Construct our resulting LLVM IR expression.
3446 llvm::Value *Carry1;
3447 llvm::Value *Sum1 = EmitOverflowIntrinsic(*this, IntrinsicId,
3449 llvm::Value *Carry2;
3450 llvm::Value *Sum2 = EmitOverflowIntrinsic(*this, IntrinsicId,
3451 Sum1, Carryin, Carry2);
3452 llvm::Value *CarryOut = Builder.CreateZExt(Builder.CreateOr(Carry1, Carry2),
3454 Builder.CreateStore(CarryOut, CarryOutPtr);
3455 return RValue::get(Sum2);
3458 case Builtin::BI__builtin_add_overflow:
3459 case Builtin::BI__builtin_sub_overflow:
3460 case Builtin::BI__builtin_mul_overflow: {
3461 const clang::Expr *LeftArg = E->getArg(0);
3462 const clang::Expr *RightArg = E->getArg(1);
3463 const clang::Expr *ResultArg = E->getArg(2);
3465 clang::QualType ResultQTy =
3466 ResultArg->getType()->castAs<PointerType>()->getPointeeType();
3468 WidthAndSignedness LeftInfo =
3469 getIntegerWidthAndSignedness(CGM.getContext(), LeftArg->getType());
3470 WidthAndSignedness RightInfo =
3471 getIntegerWidthAndSignedness(CGM.getContext(), RightArg->getType());
3472 WidthAndSignedness ResultInfo =
3473 getIntegerWidthAndSignedness(CGM.getContext(), ResultQTy);
3475 // Handle mixed-sign multiplication as a special case, because adding
3476 // runtime or backend support for our generic irgen would be too expensive.
3477 if (isSpecialMixedSignMultiply(BuiltinID, LeftInfo, RightInfo, ResultInfo))
3478 return EmitCheckedMixedSignMultiply(*this, LeftArg, LeftInfo, RightArg,
3479 RightInfo, ResultArg, ResultQTy,
3482 WidthAndSignedness EncompassingInfo =
3483 EncompassingIntegerType({LeftInfo, RightInfo, ResultInfo});
3485 llvm::Type *EncompassingLLVMTy =
3486 llvm::IntegerType::get(CGM.getLLVMContext(), EncompassingInfo.Width);
3488 llvm::Type *ResultLLVMTy = CGM.getTypes().ConvertType(ResultQTy);
3490 llvm::Intrinsic::ID IntrinsicId;
3491 switch (BuiltinID) {
3493 llvm_unreachable("Unknown overflow builtin id.");
3494 case Builtin::BI__builtin_add_overflow:
3495 IntrinsicId = EncompassingInfo.Signed
3496 ? llvm::Intrinsic::sadd_with_overflow
3497 : llvm::Intrinsic::uadd_with_overflow;
3499 case Builtin::BI__builtin_sub_overflow:
3500 IntrinsicId = EncompassingInfo.Signed
3501 ? llvm::Intrinsic::ssub_with_overflow
3502 : llvm::Intrinsic::usub_with_overflow;
3504 case Builtin::BI__builtin_mul_overflow:
3505 IntrinsicId = EncompassingInfo.Signed
3506 ? llvm::Intrinsic::smul_with_overflow
3507 : llvm::Intrinsic::umul_with_overflow;
3511 llvm::Value *Left = EmitScalarExpr(LeftArg);
3512 llvm::Value *Right = EmitScalarExpr(RightArg);
3513 Address ResultPtr = EmitPointerWithAlignment(ResultArg);
3515 // Extend each operand to the encompassing type.
3516 Left = Builder.CreateIntCast(Left, EncompassingLLVMTy, LeftInfo.Signed);
3517 Right = Builder.CreateIntCast(Right, EncompassingLLVMTy, RightInfo.Signed);
3519 // Perform the operation on the extended values.
3520 llvm::Value *Overflow, *Result;
3521 Result = EmitOverflowIntrinsic(*this, IntrinsicId, Left, Right, Overflow);
3523 if (EncompassingInfo.Width > ResultInfo.Width) {
3524 // The encompassing type is wider than the result type, so we need to
3526 llvm::Value *ResultTrunc = Builder.CreateTrunc(Result, ResultLLVMTy);
3528 // To see if the truncation caused an overflow, we will extend
3529 // the result and then compare it to the original result.
3530 llvm::Value *ResultTruncExt = Builder.CreateIntCast(
3531 ResultTrunc, EncompassingLLVMTy, ResultInfo.Signed);
3532 llvm::Value *TruncationOverflow =
3533 Builder.CreateICmpNE(Result, ResultTruncExt);
3535 Overflow = Builder.CreateOr(Overflow, TruncationOverflow);
3536 Result = ResultTrunc;
3539 // Finally, store the result using the pointer.
3541 ResultArg->getType()->getPointeeType().isVolatileQualified();
3542 Builder.CreateStore(EmitToMemory(Result, ResultQTy), ResultPtr, isVolatile);
3544 return RValue::get(Overflow);
3547 case Builtin::BI__builtin_uadd_overflow:
3548 case Builtin::BI__builtin_uaddl_overflow:
3549 case Builtin::BI__builtin_uaddll_overflow:
3550 case Builtin::BI__builtin_usub_overflow:
3551 case Builtin::BI__builtin_usubl_overflow:
3552 case Builtin::BI__builtin_usubll_overflow:
3553 case Builtin::BI__builtin_umul_overflow:
3554 case Builtin::BI__builtin_umull_overflow:
3555 case Builtin::BI__builtin_umulll_overflow:
3556 case Builtin::BI__builtin_sadd_overflow:
3557 case Builtin::BI__builtin_saddl_overflow:
3558 case Builtin::BI__builtin_saddll_overflow:
3559 case Builtin::BI__builtin_ssub_overflow:
3560 case Builtin::BI__builtin_ssubl_overflow:
3561 case Builtin::BI__builtin_ssubll_overflow:
3562 case Builtin::BI__builtin_smul_overflow:
3563 case Builtin::BI__builtin_smull_overflow:
3564 case Builtin::BI__builtin_smulll_overflow: {
3566 // We translate all of these builtins directly to the relevant llvm IR node.
3568 // Scalarize our inputs.
3569 llvm::Value *X = EmitScalarExpr(E->getArg(0));
3570 llvm::Value *Y = EmitScalarExpr(E->getArg(1));
3571 Address SumOutPtr = EmitPointerWithAlignment(E->getArg(2));
3573 // Decide which of the overflow intrinsics we are lowering to:
3574 llvm::Intrinsic::ID IntrinsicId;
3575 switch (BuiltinID) {
3576 default: llvm_unreachable("Unknown overflow builtin id.");
3577 case Builtin::BI__builtin_uadd_overflow:
3578 case Builtin::BI__builtin_uaddl_overflow:
3579 case Builtin::BI__builtin_uaddll_overflow:
3580 IntrinsicId = llvm::Intrinsic::uadd_with_overflow;
3582 case Builtin::BI__builtin_usub_overflow:
3583 case Builtin::BI__builtin_usubl_overflow:
3584 case Builtin::BI__builtin_usubll_overflow:
3585 IntrinsicId = llvm::Intrinsic::usub_with_overflow;
3587 case Builtin::BI__builtin_umul_overflow:
3588 case Builtin::BI__builtin_umull_overflow:
3589 case Builtin::BI__builtin_umulll_overflow:
3590 IntrinsicId = llvm::Intrinsic::umul_with_overflow;
3592 case Builtin::BI__builtin_sadd_overflow:
3593 case Builtin::BI__builtin_saddl_overflow:
3594 case Builtin::BI__builtin_saddll_overflow:
3595 IntrinsicId = llvm::Intrinsic::sadd_with_overflow;
3597 case Builtin::BI__builtin_ssub_overflow:
3598 case Builtin::BI__builtin_ssubl_overflow:
3599 case Builtin::BI__builtin_ssubll_overflow:
3600 IntrinsicId = llvm::Intrinsic::ssub_with_overflow;
3602 case Builtin::BI__builtin_smul_overflow:
3603 case Builtin::BI__builtin_smull_overflow:
3604 case Builtin::BI__builtin_smulll_overflow:
3605 IntrinsicId = llvm::Intrinsic::smul_with_overflow;
3611 llvm::Value *Sum = EmitOverflowIntrinsic(*this, IntrinsicId, X, Y, Carry);
3612 Builder.CreateStore(Sum, SumOutPtr);
3614 return RValue::get(Carry);
3616 case Builtin::BI__builtin_addressof:
3617 return RValue::get(EmitLValue(E->getArg(0)).getPointer(*this));
3618 case Builtin::BI__builtin_operator_new:
3619 return EmitBuiltinNewDeleteCall(
3620 E->getCallee()->getType()->castAs<FunctionProtoType>(), E, false);
3621 case Builtin::BI__builtin_operator_delete:
3622 return EmitBuiltinNewDeleteCall(
3623 E->getCallee()->getType()->castAs<FunctionProtoType>(), E, true);
3625 case Builtin::BI__builtin_is_aligned:
3626 return EmitBuiltinIsAligned(E);
3627 case Builtin::BI__builtin_align_up:
3628 return EmitBuiltinAlignTo(E, true);
3629 case Builtin::BI__builtin_align_down:
3630 return EmitBuiltinAlignTo(E, false);
3632 case Builtin::BI__noop:
3633 // __noop always evaluates to an integer literal zero.
3634 return RValue::get(ConstantInt::get(IntTy, 0));
3635 case Builtin::BI__builtin_call_with_static_chain: {
3636 const CallExpr *Call = cast<CallExpr>(E->getArg(0));
3637 const Expr *Chain = E->getArg(1);
3638 return EmitCall(Call->getCallee()->getType(),
3639 EmitCallee(Call->getCallee()), Call, ReturnValue,
3640 EmitScalarExpr(Chain));
3642 case Builtin::BI_InterlockedExchange8:
3643 case Builtin::BI_InterlockedExchange16:
3644 case Builtin::BI_InterlockedExchange:
3645 case Builtin::BI_InterlockedExchangePointer:
3647 EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange, E));
3648 case Builtin::BI_InterlockedCompareExchangePointer:
3649 case Builtin::BI_InterlockedCompareExchangePointer_nf: {
3651 llvm::IntegerType *IntType =
3652 IntegerType::get(getLLVMContext(),
3653 getContext().getTypeSize(E->getType()));
3654 llvm::Type *IntPtrType = IntType->getPointerTo();
3656 llvm::Value *Destination =
3657 Builder.CreateBitCast(EmitScalarExpr(E->getArg(0)), IntPtrType);
3659 llvm::Value *Exchange = EmitScalarExpr(E->getArg(1));
3660 RTy = Exchange->getType();
3661 Exchange = Builder.CreatePtrToInt(Exchange, IntType);
3663 llvm::Value *Comparand =
3664 Builder.CreatePtrToInt(EmitScalarExpr(E->getArg(2)), IntType);
3667 BuiltinID == Builtin::BI_InterlockedCompareExchangePointer_nf ?
3668 AtomicOrdering::Monotonic : AtomicOrdering::SequentiallyConsistent;
3670 auto Result = Builder.CreateAtomicCmpXchg(Destination, Comparand, Exchange,
3671 Ordering, Ordering);
3672 Result->setVolatile(true);
3674 return RValue::get(Builder.CreateIntToPtr(Builder.CreateExtractValue(Result,
3678 case Builtin::BI_InterlockedCompareExchange8:
3679 case Builtin::BI_InterlockedCompareExchange16:
3680 case Builtin::BI_InterlockedCompareExchange:
3681 case Builtin::BI_InterlockedCompareExchange64:
3682 return RValue::get(EmitAtomicCmpXchgForMSIntrin(*this, E));
3683 case Builtin::BI_InterlockedIncrement16:
3684 case Builtin::BI_InterlockedIncrement:
3686 EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement, E));
3687 case Builtin::BI_InterlockedDecrement16:
3688 case Builtin::BI_InterlockedDecrement:
3690 EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedDecrement, E));
3691 case Builtin::BI_InterlockedAnd8:
3692 case Builtin::BI_InterlockedAnd16:
3693 case Builtin::BI_InterlockedAnd:
3694 return RValue::get(EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedAnd, E));
3695 case Builtin::BI_InterlockedExchangeAdd8:
3696 case Builtin::BI_InterlockedExchangeAdd16:
3697 case Builtin::BI_InterlockedExchangeAdd:
3699 EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeAdd, E));
3700 case Builtin::BI_InterlockedExchangeSub8:
3701 case Builtin::BI_InterlockedExchangeSub16:
3702 case Builtin::BI_InterlockedExchangeSub:
3704 EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeSub, E));
3705 case Builtin::BI_InterlockedOr8:
3706 case Builtin::BI_InterlockedOr16:
3707 case Builtin::BI_InterlockedOr:
3708 return RValue::get(EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr, E));
3709 case Builtin::BI_InterlockedXor8:
3710 case Builtin::BI_InterlockedXor16:
3711 case Builtin::BI_InterlockedXor:
3712 return RValue::get(EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor, E));
3714 case Builtin::BI_bittest64:
3715 case Builtin::BI_bittest:
3716 case Builtin::BI_bittestandcomplement64:
3717 case Builtin::BI_bittestandcomplement:
3718 case Builtin::BI_bittestandreset64:
3719 case Builtin::BI_bittestandreset:
3720 case Builtin::BI_bittestandset64:
3721 case Builtin::BI_bittestandset:
3722 case Builtin::BI_interlockedbittestandreset:
3723 case Builtin::BI_interlockedbittestandreset64:
3724 case Builtin::BI_interlockedbittestandset64:
3725 case Builtin::BI_interlockedbittestandset:
3726 case Builtin::BI_interlockedbittestandset_acq:
3727 case Builtin::BI_interlockedbittestandset_rel:
3728 case Builtin::BI_interlockedbittestandset_nf:
3729 case Builtin::BI_interlockedbittestandreset_acq:
3730 case Builtin::BI_interlockedbittestandreset_rel:
3731 case Builtin::BI_interlockedbittestandreset_nf:
3732 return RValue::get(EmitBitTestIntrinsic(*this, BuiltinID, E));
3734 // These builtins exist to emit regular volatile loads and stores not
3735 // affected by the -fms-volatile setting.
3736 case Builtin::BI__iso_volatile_load8:
3737 case Builtin::BI__iso_volatile_load16:
3738 case Builtin::BI__iso_volatile_load32:
3739 case Builtin::BI__iso_volatile_load64:
3740 return RValue::get(EmitISOVolatileLoad(*this, E));
3741 case Builtin::BI__iso_volatile_store8:
3742 case Builtin::BI__iso_volatile_store16:
3743 case Builtin::BI__iso_volatile_store32:
3744 case Builtin::BI__iso_volatile_store64:
3745 return RValue::get(EmitISOVolatileStore(*this, E));
3747 case Builtin::BI__exception_code:
3748 case Builtin::BI_exception_code:
3749 return RValue::get(EmitSEHExceptionCode());
3750 case Builtin::BI__exception_info:
3751 case Builtin::BI_exception_info:
3752 return RValue::get(EmitSEHExceptionInfo());
3753 case Builtin::BI__abnormal_termination:
3754 case Builtin::BI_abnormal_termination:
3755 return RValue::get(EmitSEHAbnormalTermination());
3756 case Builtin::BI_setjmpex:
3757 if (getTarget().getTriple().isOSMSVCRT())
3758 return EmitMSVCRTSetJmp(*this, MSVCSetJmpKind::_setjmpex, E);
3760 case Builtin::BI_setjmp:
3761 if (getTarget().getTriple().isOSMSVCRT()) {
3762 if (getTarget().getTriple().getArch() == llvm::Triple::x86)
3763 return EmitMSVCRTSetJmp(*this, MSVCSetJmpKind::_setjmp3, E);
3764 else if (getTarget().getTriple().getArch() == llvm::Triple::aarch64)
3765 return EmitMSVCRTSetJmp(*this, MSVCSetJmpKind::_setjmpex, E);
3766 return EmitMSVCRTSetJmp(*this, MSVCSetJmpKind::_setjmp, E);
3770 case Builtin::BI__GetExceptionInfo: {
3771 if (llvm::GlobalVariable *GV =
3772 CGM.getCXXABI().getThrowInfo(FD->getParamDecl(0)->getType()))
3773 return RValue::get(llvm::ConstantExpr::getBitCast(GV, CGM.Int8PtrTy));
3777 case Builtin::BI__fastfail:
3778 return RValue::get(EmitMSVCBuiltinExpr(MSVCIntrin::__fastfail, E));
3780 case Builtin::BI__builtin_coro_size: {
3781 auto & Context = getContext();
3782 auto SizeTy = Context.getSizeType();
3783 auto T = Builder.getIntNTy(Context.getTypeSize(SizeTy));
3784 Function *F = CGM.getIntrinsic(Intrinsic::coro_size, T);
3785 return RValue::get(Builder.CreateCall(F));
3788 case Builtin::BI__builtin_coro_id:
3789 return EmitCoroutineIntrinsic(E, Intrinsic::coro_id);
3790 case Builtin::BI__builtin_coro_promise:
3791 return EmitCoroutineIntrinsic(E, Intrinsic::coro_promise);
3792 case Builtin::BI__builtin_coro_resume:
3793 return EmitCoroutineIntrinsic(E, Intrinsic::coro_resume);
3794 case Builtin::BI__builtin_coro_frame:
3795 return EmitCoroutineIntrinsic(E, Intrinsic::coro_frame);
3796 case Builtin::BI__builtin_coro_noop:
3797 return EmitCoroutineIntrinsic(E, Intrinsic::coro_noop);
3798 case Builtin::BI__builtin_coro_free:
3799 return EmitCoroutineIntrinsic(E, Intrinsic::coro_free);
3800 case Builtin::BI__builtin_coro_destroy:
3801 return EmitCoroutineIntrinsic(E, Intrinsic::coro_destroy);
3802 case Builtin::BI__builtin_coro_done:
3803 return EmitCoroutineIntrinsic(E, Intrinsic::coro_done);
3804 case Builtin::BI__builtin_coro_alloc:
3805 return EmitCoroutineIntrinsic(E, Intrinsic::coro_alloc);
3806 case Builtin::BI__builtin_coro_begin:
3807 return EmitCoroutineIntrinsic(E, Intrinsic::coro_begin);
3808 case Builtin::BI__builtin_coro_end:
3809 return EmitCoroutineIntrinsic(E, Intrinsic::coro_end);
3810 case Builtin::BI__builtin_coro_suspend:
3811 return EmitCoroutineIntrinsic(E, Intrinsic::coro_suspend);
3812 case Builtin::BI__builtin_coro_param:
3813 return EmitCoroutineIntrinsic(E, Intrinsic::coro_param);
3815 // OpenCL v2.0 s6.13.16.2, Built-in pipe read and write functions
3816 case Builtin::BIread_pipe:
3817 case Builtin::BIwrite_pipe: {
3818 Value *Arg0 = EmitScalarExpr(E->getArg(0)),
3819 *Arg1 = EmitScalarExpr(E->getArg(1));
3820 CGOpenCLRuntime OpenCLRT(CGM);
3821 Value *PacketSize = OpenCLRT.getPipeElemSize(E->getArg(0));
3822 Value *PacketAlign = OpenCLRT.getPipeElemAlign(E->getArg(0));
3824 // Type of the generic packet parameter.
3825 unsigned GenericAS =
3826 getContext().getTargetAddressSpace(LangAS::opencl_generic);
3827 llvm::Type *I8PTy = llvm::PointerType::get(
3828 llvm::Type::getInt8Ty(getLLVMContext()), GenericAS);
3830 // Testing which overloaded version we should generate the call for.
3831 if (2U == E->getNumArgs()) {
3832 const char *Name = (BuiltinID == Builtin::BIread_pipe) ? "__read_pipe_2"
3834 // Creating a generic function type to be able to call with any builtin or
3835 // user defined type.
3836 llvm::Type *ArgTys[] = {Arg0->getType(), I8PTy, Int32Ty, Int32Ty};
3837 llvm::FunctionType *FTy = llvm::FunctionType::get(
3838 Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false);
3839 Value *BCast = Builder.CreatePointerCast(Arg1, I8PTy);
3841 Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, Name),
3842 {Arg0, BCast, PacketSize, PacketAlign}));
3844 assert(4 == E->getNumArgs() &&
3845 "Illegal number of parameters to pipe function");
3846 const char *Name = (BuiltinID == Builtin::BIread_pipe) ? "__read_pipe_4"
3849 llvm::Type *ArgTys[] = {Arg0->getType(), Arg1->getType(), Int32Ty, I8PTy,
3851 Value *Arg2 = EmitScalarExpr(E->getArg(2)),
3852 *Arg3 = EmitScalarExpr(E->getArg(3));
3853 llvm::FunctionType *FTy = llvm::FunctionType::get(
3854 Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false);
3855 Value *BCast = Builder.CreatePointerCast(Arg3, I8PTy);
3856 // We know the third argument is an integer type, but we may need to cast
3858 if (Arg2->getType() != Int32Ty)
3859 Arg2 = Builder.CreateZExtOrTrunc(Arg2, Int32Ty);
3860 return RValue::get(Builder.CreateCall(
3861 CGM.CreateRuntimeFunction(FTy, Name),
3862 {Arg0, Arg1, Arg2, BCast, PacketSize, PacketAlign}));
3865 // OpenCL v2.0 s6.13.16 ,s9.17.3.5 - Built-in pipe reserve read and write
3867 case Builtin::BIreserve_read_pipe:
3868 case Builtin::BIreserve_write_pipe:
3869 case Builtin::BIwork_group_reserve_read_pipe:
3870 case Builtin::BIwork_group_reserve_write_pipe:
3871 case Builtin::BIsub_group_reserve_read_pipe:
3872 case Builtin::BIsub_group_reserve_write_pipe: {
3873 // Composing the mangled name for the function.
3875 if (BuiltinID == Builtin::BIreserve_read_pipe)
3876 Name = "__reserve_read_pipe";
3877 else if (BuiltinID == Builtin::BIreserve_write_pipe)
3878 Name = "__reserve_write_pipe";
3879 else if (BuiltinID == Builtin::BIwork_group_reserve_read_pipe)
3880 Name = "__work_group_reserve_read_pipe";
3881 else if (BuiltinID == Builtin::BIwork_group_reserve_write_pipe)
3882 Name = "__work_group_reserve_write_pipe";
3883 else if (BuiltinID == Builtin::BIsub_group_reserve_read_pipe)
3884 Name = "__sub_group_reserve_read_pipe";
3886 Name = "__sub_group_reserve_write_pipe";
3888 Value *Arg0 = EmitScalarExpr(E->getArg(0)),
3889 *Arg1 = EmitScalarExpr(E->getArg(1));
3890 llvm::Type *ReservedIDTy = ConvertType(getContext().OCLReserveIDTy);
3891 CGOpenCLRuntime OpenCLRT(CGM);
3892 Value *PacketSize = OpenCLRT.getPipeElemSize(E->getArg(0));
3893 Value *PacketAlign = OpenCLRT.getPipeElemAlign(E->getArg(0));
3895 // Building the generic function prototype.
3896 llvm::Type *ArgTys[] = {Arg0->getType(), Int32Ty, Int32Ty, Int32Ty};
3897 llvm::FunctionType *FTy = llvm::FunctionType::get(
3898 ReservedIDTy, llvm::ArrayRef<llvm::Type *>(ArgTys), false);
3899 // We know the second argument is an integer type, but we may need to cast
3901 if (Arg1->getType() != Int32Ty)
3902 Arg1 = Builder.CreateZExtOrTrunc(Arg1, Int32Ty);
3904 Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, Name),
3905 {Arg0, Arg1, PacketSize, PacketAlign}));
3907 // OpenCL v2.0 s6.13.16, s9.17.3.5 - Built-in pipe commit read and write
3909 case Builtin::BIcommit_read_pipe:
3910 case Builtin::BIcommit_write_pipe:
3911 case Builtin::BIwork_group_commit_read_pipe:
3912 case Builtin::BIwork_group_commit_write_pipe:
3913 case Builtin::BIsub_group_commit_read_pipe:
3914 case Builtin::BIsub_group_commit_write_pipe: {
3916 if (BuiltinID == Builtin::BIcommit_read_pipe)
3917 Name = "__commit_read_pipe";
3918 else if (BuiltinID == Builtin::BIcommit_write_pipe)
3919 Name = "__commit_write_pipe";
3920 else if (BuiltinID == Builtin::BIwork_group_commit_read_pipe)
3921 Name = "__work_group_commit_read_pipe";
3922 else if (BuiltinID == Builtin::BIwork_group_commit_write_pipe)
3923 Name = "__work_group_commit_write_pipe";
3924 else if (BuiltinID == Builtin::BIsub_group_commit_read_pipe)
3925 Name = "__sub_group_commit_read_pipe";
3927 Name = "__sub_group_commit_write_pipe";
3929 Value *Arg0 = EmitScalarExpr(E->getArg(0)),
3930 *Arg1 = EmitScalarExpr(E->getArg(1));
3931 CGOpenCLRuntime OpenCLRT(CGM);
3932 Value *PacketSize = OpenCLRT.getPipeElemSize(E->getArg(0));
3933 Value *PacketAlign = OpenCLRT.getPipeElemAlign(E->getArg(0));
3935 // Building the generic function prototype.
3936 llvm::Type *ArgTys[] = {Arg0->getType(), Arg1->getType(), Int32Ty, Int32Ty};
3937 llvm::FunctionType *FTy =
3938 llvm::FunctionType::get(llvm::Type::getVoidTy(getLLVMContext()),
3939 llvm::ArrayRef<llvm::Type *>(ArgTys), false);
3942 Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, Name),
3943 {Arg0, Arg1, PacketSize, PacketAlign}));
3945 // OpenCL v2.0 s6.13.16.4 Built-in pipe query functions
3946 case Builtin::BIget_pipe_num_packets:
3947 case Builtin::BIget_pipe_max_packets: {
3948 const char *BaseName;
3949 const auto *PipeTy = E->getArg(0)->getType()->castAs<PipeType>();
3950 if (BuiltinID == Builtin::BIget_pipe_num_packets)
3951 BaseName = "__get_pipe_num_packets";
3953 BaseName = "__get_pipe_max_packets";
3954 std::string Name = std::string(BaseName) +
3955 std::string(PipeTy->isReadOnly() ? "_ro" : "_wo");
3957 // Building the generic function prototype.
3958 Value *Arg0 = EmitScalarExpr(E->getArg(0));
3959 CGOpenCLRuntime OpenCLRT(CGM);
3960 Value *PacketSize = OpenCLRT.getPipeElemSize(E->getArg(0));
3961 Value *PacketAlign = OpenCLRT.getPipeElemAlign(E->getArg(0));
3962 llvm::Type *ArgTys[] = {Arg0->getType(), Int32Ty, Int32Ty};
3963 llvm::FunctionType *FTy = llvm::FunctionType::get(
3964 Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false);
3966 return RValue::get(Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, Name),
3967 {Arg0, PacketSize, PacketAlign}));
3970 // OpenCL v2.0 s6.13.9 - Address space qualifier functions.
3971 case Builtin::BIto_global:
3972 case Builtin::BIto_local:
3973 case Builtin::BIto_private: {
3974 auto Arg0 = EmitScalarExpr(E->getArg(0));
3975 auto NewArgT = llvm::PointerType::get(Int8Ty,
3976 CGM.getContext().getTargetAddressSpace(LangAS::opencl_generic));
3977 auto NewRetT = llvm::PointerType::get(Int8Ty,
3978 CGM.getContext().getTargetAddressSpace(
3979 E->getType()->getPointeeType().getAddressSpace()));
3980 auto FTy = llvm::FunctionType::get(NewRetT, {NewArgT}, false);
3981 llvm::Value *NewArg;
3982 if (Arg0->getType()->getPointerAddressSpace() !=
3983 NewArgT->getPointerAddressSpace())
3984 NewArg = Builder.CreateAddrSpaceCast(Arg0, NewArgT);
3986 NewArg = Builder.CreateBitOrPointerCast(Arg0, NewArgT);
3987 auto NewName = std::string("__") + E->getDirectCallee()->getName().str();
3989 Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, NewName), {NewArg});
3990 return RValue::get(Builder.CreateBitOrPointerCast(NewCall,
3991 ConvertType(E->getType())));
3994 // OpenCL v2.0, s6.13.17 - Enqueue kernel function.
3995 // It contains four different overload formats specified in Table 6.13.17.1.
3996 case Builtin::BIenqueue_kernel: {
3997 StringRef Name; // Generated function call name
3998 unsigned NumArgs = E->getNumArgs();
4000 llvm::Type *QueueTy = ConvertType(getContext().OCLQueueTy);
4001 llvm::Type *GenericVoidPtrTy = Builder.getInt8PtrTy(
4002 getContext().getTargetAddressSpace(LangAS::opencl_generic));
4004 llvm::Value *Queue = EmitScalarExpr(E->getArg(0));
4005 llvm::Value *Flags = EmitScalarExpr(E->getArg(1));
4006 LValue NDRangeL = EmitAggExprToLValue(E->getArg(2));
4007 llvm::Value *Range = NDRangeL.getAddress(*this).getPointer();
4008 llvm::Type *RangeTy = NDRangeL.getAddress(*this).getType();
4011 // The most basic form of the call with parameters:
4012 // queue_t, kernel_enqueue_flags_t, ndrange_t, block(void)
4013 Name = "__enqueue_kernel_basic";
4014 llvm::Type *ArgTys[] = {QueueTy, Int32Ty, RangeTy, GenericVoidPtrTy,
4016 llvm::FunctionType *FTy = llvm::FunctionType::get(
4017 Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false);
4020 CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(3));
4021 llvm::Value *Kernel =
4022 Builder.CreatePointerCast(Info.Kernel, GenericVoidPtrTy);
4023 llvm::Value *Block =
4024 Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
4027 B.addByValAttr(NDRangeL.getAddress(*this).getElementType());
4028 llvm::AttributeList ByValAttrSet =
4029 llvm::AttributeList::get(CGM.getModule().getContext(), 3U, B);
4032 Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, Name, ByValAttrSet),
4033 {Queue, Flags, Range, Kernel, Block});
4034 RTCall->setAttributes(ByValAttrSet);
4035 return RValue::get(RTCall);
4037 assert(NumArgs >= 5 && "Invalid enqueue_kernel signature");
4039 // Create a temporary array to hold the sizes of local pointer arguments
4040 // for the block. \p First is the position of the first size argument.
4041 auto CreateArrayForSizeVar = [=](unsigned First)
4042 -> std::tuple<llvm::Value *, llvm::Value *, llvm::Value *> {
4043 llvm::APInt ArraySize(32, NumArgs - First);
4044 QualType SizeArrayTy = getContext().getConstantArrayType(
4045 getContext().getSizeType(), ArraySize, nullptr, ArrayType::Normal,
4046 /*IndexTypeQuals=*/0);
4047 auto Tmp = CreateMemTemp(SizeArrayTy, "block_sizes");
4048 llvm::Value *TmpPtr = Tmp.getPointer();
4049 llvm::Value *TmpSize = EmitLifetimeStart(
4050 CGM.getDataLayout().getTypeAllocSize(Tmp.getElementType()), TmpPtr);
4051 llvm::Value *ElemPtr;
4052 // Each of the following arguments specifies the size of the corresponding
4053 // argument passed to the enqueued block.
4054 auto *Zero = llvm::ConstantInt::get(IntTy, 0);
4055 for (unsigned I = First; I < NumArgs; ++I) {
4056 auto *Index = llvm::ConstantInt::get(IntTy, I - First);
4057 auto *GEP = Builder.CreateGEP(TmpPtr, {Zero, Index});
4061 Builder.CreateZExtOrTrunc(EmitScalarExpr(E->getArg(I)), SizeTy);
4062 Builder.CreateAlignedStore(
4063 V, GEP, CGM.getDataLayout().getPrefTypeAlign(SizeTy));
4065 return std::tie(ElemPtr, TmpSize, TmpPtr);
4068 // Could have events and/or varargs.
4069 if (E->getArg(3)->getType()->isBlockPointerType()) {
4070 // No events passed, but has variadic arguments.
4071 Name = "__enqueue_kernel_varargs";
4073 CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(3));
4074 llvm::Value *Kernel =
4075 Builder.CreatePointerCast(Info.Kernel, GenericVoidPtrTy);
4076 auto *Block = Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
4077 llvm::Value *ElemPtr, *TmpSize, *TmpPtr;
4078 std::tie(ElemPtr, TmpSize, TmpPtr) = CreateArrayForSizeVar(4);
4080 // Create a vector of the arguments, as well as a constant value to
4081 // express to the runtime the number of variadic arguments.
4082 llvm::Value *const Args[] = {Queue, Flags,
4084 Block, ConstantInt::get(IntTy, NumArgs - 4),
4086 llvm::Type *const ArgTys[] = {
4087 QueueTy, IntTy, RangeTy, GenericVoidPtrTy,
4088 GenericVoidPtrTy, IntTy, ElemPtr->getType()};
4090 llvm::FunctionType *FTy = llvm::FunctionType::get(Int32Ty, ArgTys, false);
4091 auto Call = RValue::get(
4092 Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, Name), Args));
4094 EmitLifetimeEnd(TmpSize, TmpPtr);
4097 // Any calls now have event arguments passed.
4099 llvm::Type *EventTy = ConvertType(getContext().OCLClkEventTy);
4100 llvm::PointerType *EventPtrTy = EventTy->getPointerTo(
4101 CGM.getContext().getTargetAddressSpace(LangAS::opencl_generic));
4103 llvm::Value *NumEvents =
4104 Builder.CreateZExtOrTrunc(EmitScalarExpr(E->getArg(3)), Int32Ty);
4106 // Since SemaOpenCLBuiltinEnqueueKernel allows fifth and sixth arguments
4107 // to be a null pointer constant (including `0` literal), we can take it
4108 // into account and emit null pointer directly.
4109 llvm::Value *EventWaitList = nullptr;
4110 if (E->getArg(4)->isNullPointerConstant(
4111 getContext(), Expr::NPC_ValueDependentIsNotNull)) {
4112 EventWaitList = llvm::ConstantPointerNull::get(EventPtrTy);
4114 EventWaitList = E->getArg(4)->getType()->isArrayType()
4115 ? EmitArrayToPointerDecay(E->getArg(4)).getPointer()
4116 : EmitScalarExpr(E->getArg(4));
4117 // Convert to generic address space.
4118 EventWaitList = Builder.CreatePointerCast(EventWaitList, EventPtrTy);
4120 llvm::Value *EventRet = nullptr;
4121 if (E->getArg(5)->isNullPointerConstant(
4122 getContext(), Expr::NPC_ValueDependentIsNotNull)) {
4123 EventRet = llvm::ConstantPointerNull::get(EventPtrTy);
4126 Builder.CreatePointerCast(EmitScalarExpr(E->getArg(5)), EventPtrTy);
4130 CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(6));
4131 llvm::Value *Kernel =
4132 Builder.CreatePointerCast(Info.Kernel, GenericVoidPtrTy);
4133 llvm::Value *Block =
4134 Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
4136 std::vector<llvm::Type *> ArgTys = {
4137 QueueTy, Int32Ty, RangeTy, Int32Ty,
4138 EventPtrTy, EventPtrTy, GenericVoidPtrTy, GenericVoidPtrTy};
4140 std::vector<llvm::Value *> Args = {Queue, Flags, Range,
4141 NumEvents, EventWaitList, EventRet,
4145 // Has events but no variadics.
4146 Name = "__enqueue_kernel_basic_events";
4147 llvm::FunctionType *FTy = llvm::FunctionType::get(
4148 Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false);
4150 Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, Name),
4151 llvm::ArrayRef<llvm::Value *>(Args)));
4153 // Has event info and variadics
4154 // Pass the number of variadics to the runtime function too.
4155 Args.push_back(ConstantInt::get(Int32Ty, NumArgs - 7));
4156 ArgTys.push_back(Int32Ty);
4157 Name = "__enqueue_kernel_events_varargs";
4159 llvm::Value *ElemPtr, *TmpSize, *TmpPtr;
4160 std::tie(ElemPtr, TmpSize, TmpPtr) = CreateArrayForSizeVar(7);
4161 Args.push_back(ElemPtr);
4162 ArgTys.push_back(ElemPtr->getType());
4164 llvm::FunctionType *FTy = llvm::FunctionType::get(
4165 Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false);
4167 RValue::get(Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, Name),
4168 llvm::ArrayRef<llvm::Value *>(Args)));
4170 EmitLifetimeEnd(TmpSize, TmpPtr);
4175 // OpenCL v2.0 s6.13.17.6 - Kernel query functions need bitcast of block
4177 case Builtin::BIget_kernel_work_group_size: {
4178 llvm::Type *GenericVoidPtrTy = Builder.getInt8PtrTy(
4179 getContext().getTargetAddressSpace(LangAS::opencl_generic));
4181 CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(0));
4182 Value *Kernel = Builder.CreatePointerCast(Info.Kernel, GenericVoidPtrTy);
4183 Value *Arg = Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
4184 return RValue::get(Builder.CreateCall(
4185 CGM.CreateRuntimeFunction(
4186 llvm::FunctionType::get(IntTy, {GenericVoidPtrTy, GenericVoidPtrTy},
4188 "__get_kernel_work_group_size_impl"),
4191 case Builtin::BIget_kernel_preferred_work_group_size_multiple: {
4192 llvm::Type *GenericVoidPtrTy = Builder.getInt8PtrTy(
4193 getContext().getTargetAddressSpace(LangAS::opencl_generic));
4195 CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(0));
4196 Value *Kernel = Builder.CreatePointerCast(Info.Kernel, GenericVoidPtrTy);
4197 Value *Arg = Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
4198 return RValue::get(Builder.CreateCall(
4199 CGM.CreateRuntimeFunction(
4200 llvm::FunctionType::get(IntTy, {GenericVoidPtrTy, GenericVoidPtrTy},
4202 "__get_kernel_preferred_work_group_size_multiple_impl"),
4205 case Builtin::BIget_kernel_max_sub_group_size_for_ndrange:
4206 case Builtin::BIget_kernel_sub_group_count_for_ndrange: {
4207 llvm::Type *GenericVoidPtrTy = Builder.getInt8PtrTy(
4208 getContext().getTargetAddressSpace(LangAS::opencl_generic));
4209 LValue NDRangeL = EmitAggExprToLValue(E->getArg(0));
4210 llvm::Value *NDRange = NDRangeL.getAddress(*this).getPointer();
4212 CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(1));
4213 Value *Kernel = Builder.CreatePointerCast(Info.Kernel, GenericVoidPtrTy);
4214 Value *Block = Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
4216 BuiltinID == Builtin::BIget_kernel_max_sub_group_size_for_ndrange
4217 ? "__get_kernel_max_sub_group_size_for_ndrange_impl"
4218 : "__get_kernel_sub_group_count_for_ndrange_impl";
4219 return RValue::get(Builder.CreateCall(
4220 CGM.CreateRuntimeFunction(
4221 llvm::FunctionType::get(
4222 IntTy, {NDRange->getType(), GenericVoidPtrTy, GenericVoidPtrTy},
4225 {NDRange, Kernel, Block}));
4228 case Builtin::BI__builtin_store_half:
4229 case Builtin::BI__builtin_store_halff: {
4230 Value *Val = EmitScalarExpr(E->getArg(0));
4231 Address Address = EmitPointerWithAlignment(E->getArg(1));
4232 Value *HalfVal = Builder.CreateFPTrunc(Val, Builder.getHalfTy());
4233 return RValue::get(Builder.CreateStore(HalfVal, Address));
4235 case Builtin::BI__builtin_load_half: {
4236 Address Address = EmitPointerWithAlignment(E->getArg(0));
4237 Value *HalfVal = Builder.CreateLoad(Address);
4238 return RValue::get(Builder.CreateFPExt(HalfVal, Builder.getDoubleTy()));
4240 case Builtin::BI__builtin_load_halff: {
4241 Address Address = EmitPointerWithAlignment(E->getArg(0));
4242 Value *HalfVal = Builder.CreateLoad(Address);
4243 return RValue::get(Builder.CreateFPExt(HalfVal, Builder.getFloatTy()));
4245 case Builtin::BIprintf:
4246 if (getTarget().getTriple().isNVPTX())
4247 return EmitNVPTXDevicePrintfCallExpr(E, ReturnValue);
4248 if (getTarget().getTriple().getArch() == Triple::amdgcn &&
4250 return EmitAMDGPUDevicePrintfCallExpr(E, ReturnValue);
4252 case Builtin::BI__builtin_canonicalize:
4253 case Builtin::BI__builtin_canonicalizef:
4254 case Builtin::BI__builtin_canonicalizef16:
4255 case Builtin::BI__builtin_canonicalizel:
4256 return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::canonicalize));
4258 case Builtin::BI__builtin_thread_pointer: {
4259 if (!getContext().getTargetInfo().isTLSSupported())
4260 CGM.ErrorUnsupported(E, "__builtin_thread_pointer");
4261 // Fall through - it's already mapped to the intrinsic by GCCBuiltin.
4264 case Builtin::BI__builtin_os_log_format:
4265 return emitBuiltinOSLogFormat(*E);
4267 case Builtin::BI__xray_customevent: {
4268 if (!ShouldXRayInstrumentFunction())
4269 return RValue::getIgnored();
4271 if (!CGM.getCodeGenOpts().XRayInstrumentationBundle.has(
4272 XRayInstrKind::Custom))
4273 return RValue::getIgnored();
4275 if (const auto *XRayAttr = CurFuncDecl->getAttr<XRayInstrumentAttr>())
4276 if (XRayAttr->neverXRayInstrument() && !AlwaysEmitXRayCustomEvents())
4277 return RValue::getIgnored();
4279 Function *F = CGM.getIntrinsic(Intrinsic::xray_customevent);
4280 auto FTy = F->getFunctionType();
4281 auto Arg0 = E->getArg(0);
4282 auto Arg0Val = EmitScalarExpr(Arg0);
4283 auto Arg0Ty = Arg0->getType();
4284 auto PTy0 = FTy->getParamType(0);
4285 if (PTy0 != Arg0Val->getType()) {
4286 if (Arg0Ty->isArrayType())
4287 Arg0Val = EmitArrayToPointerDecay(Arg0).getPointer();
4289 Arg0Val = Builder.CreatePointerCast(Arg0Val, PTy0);
4291 auto Arg1 = EmitScalarExpr(E->getArg(1));
4292 auto PTy1 = FTy->getParamType(1);
4293 if (PTy1 != Arg1->getType())
4294 Arg1 = Builder.CreateTruncOrBitCast(Arg1, PTy1);
4295 return RValue::get(Builder.CreateCall(F, {Arg0Val, Arg1}));
4298 case Builtin::BI__xray_typedevent: {
4299 // TODO: There should be a way to always emit events even if the current
4300 // function is not instrumented. Losing events in a stream can cripple
4302 if (!ShouldXRayInstrumentFunction())
4303 return RValue::getIgnored();
4305 if (!CGM.getCodeGenOpts().XRayInstrumentationBundle.has(
4306 XRayInstrKind::Typed))
4307 return RValue::getIgnored();
4309 if (const auto *XRayAttr = CurFuncDecl->getAttr<XRayInstrumentAttr>())
4310 if (XRayAttr->neverXRayInstrument() && !AlwaysEmitXRayTypedEvents())
4311 return RValue::getIgnored();
4313 Function *F = CGM.getIntrinsic(Intrinsic::xray_typedevent);
4314 auto FTy = F->getFunctionType();
4315 auto Arg0 = EmitScalarExpr(E->getArg(0));
4316 auto PTy0 = FTy->getParamType(0);
4317 if (PTy0 != Arg0->getType())
4318 Arg0 = Builder.CreateTruncOrBitCast(Arg0, PTy0);
4319 auto Arg1 = E->getArg(1);
4320 auto Arg1Val = EmitScalarExpr(Arg1);
4321 auto Arg1Ty = Arg1->getType();
4322 auto PTy1 = FTy->getParamType(1);
4323 if (PTy1 != Arg1Val->getType()) {
4324 if (Arg1Ty->isArrayType())
4325 Arg1Val = EmitArrayToPointerDecay(Arg1).getPointer();
4327 Arg1Val = Builder.CreatePointerCast(Arg1Val, PTy1);
4329 auto Arg2 = EmitScalarExpr(E->getArg(2));
4330 auto PTy2 = FTy->getParamType(2);
4331 if (PTy2 != Arg2->getType())
4332 Arg2 = Builder.CreateTruncOrBitCast(Arg2, PTy2);
4333 return RValue::get(Builder.CreateCall(F, {Arg0, Arg1Val, Arg2}));
4336 case Builtin::BI__builtin_ms_va_start:
4337 case Builtin::BI__builtin_ms_va_end:
4339 EmitVAStartEnd(EmitMSVAListRef(E->getArg(0)).getPointer(),
4340 BuiltinID == Builtin::BI__builtin_ms_va_start));
4342 case Builtin::BI__builtin_ms_va_copy: {
4343 // Lower this manually. We can't reliably determine whether or not any
4344 // given va_copy() is for a Win64 va_list from the calling convention
4345 // alone, because it's legal to do this from a System V ABI function.
4346 // With opaque pointer types, we won't have enough information in LLVM
4347 // IR to determine this from the argument types, either. Best to do it
4348 // now, while we have enough information.
4349 Address DestAddr = EmitMSVAListRef(E->getArg(0));
4350 Address SrcAddr = EmitMSVAListRef(E->getArg(1));
4352 llvm::Type *BPP = Int8PtrPtrTy;
4354 DestAddr = Address(Builder.CreateBitCast(DestAddr.getPointer(), BPP, "cp"),
4355 DestAddr.getAlignment());
4356 SrcAddr = Address(Builder.CreateBitCast(SrcAddr.getPointer(), BPP, "ap"),
4357 SrcAddr.getAlignment());
4359 Value *ArgPtr = Builder.CreateLoad(SrcAddr, "ap.val");
4360 return RValue::get(Builder.CreateStore(ArgPtr, DestAddr));
4364 // If this is an alias for a lib function (e.g. __builtin_sin), emit
4365 // the call using the normal call path, but using the unmangled
4366 // version of the function name.
4367 if (getContext().BuiltinInfo.isLibFunction(BuiltinID))
4368 return emitLibraryCall(*this, FD, E,
4369 CGM.getBuiltinLibFunction(FD, BuiltinID));
4371 // If this is a predefined lib function (e.g. malloc), emit the call
4372 // using exactly the normal call path.
4373 if (getContext().BuiltinInfo.isPredefinedLibFunction(BuiltinID))
4374 return emitLibraryCall(*this, FD, E,
4375 cast<llvm::Constant>(EmitScalarExpr(E->getCallee())));
4377 // Check that a call to a target specific builtin has the correct target
4379 // This is down here to avoid non-target specific builtins, however, if
4380 // generic builtins start to require generic target features then we
4381 // can move this up to the beginning of the function.
4382 checkTargetFeatures(E, FD);
4384 if (unsigned VectorWidth = getContext().BuiltinInfo.getRequiredVectorWidth(BuiltinID))
4385 LargestVectorWidth = std::max(LargestVectorWidth, VectorWidth);
4387 // See if we have a target specific intrinsic.
4388 const char *Name = getContext().BuiltinInfo.getName(BuiltinID);
4389 Intrinsic::ID IntrinsicID = Intrinsic::not_intrinsic;
4391 llvm::Triple::getArchTypePrefix(getTarget().getTriple().getArch());
4392 if (!Prefix.empty()) {
4393 IntrinsicID = Intrinsic::getIntrinsicForGCCBuiltin(Prefix.data(), Name);
4394 // NOTE we don't need to perform a compatibility flag check here since the
4395 // intrinsics are declared in Builtins*.def via LANGBUILTIN which filter the
4396 // MS builtins via ALL_MS_LANGUAGES and are filtered earlier.
4397 if (IntrinsicID == Intrinsic::not_intrinsic)
4398 IntrinsicID = Intrinsic::getIntrinsicForMSBuiltin(Prefix.data(), Name);
4401 if (IntrinsicID != Intrinsic::not_intrinsic) {
4402 SmallVector<Value*, 16> Args;
4404 // Find out if any arguments are required to be integer constant
4406 unsigned ICEArguments = 0;
4407 ASTContext::GetBuiltinTypeError Error;
4408 getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments);
4409 assert(Error == ASTContext::GE_None && "Should not codegen an error");
4411 Function *F = CGM.getIntrinsic(IntrinsicID);
4412 llvm::FunctionType *FTy = F->getFunctionType();
4414 for (unsigned i = 0, e = E->getNumArgs(); i != e; ++i) {
4416 // If this is a normal argument, just emit it as a scalar.
4417 if ((ICEArguments & (1 << i)) == 0) {
4418 ArgValue = EmitScalarExpr(E->getArg(i));
4420 // If this is required to be a constant, constant fold it so that we
4421 // know that the generated intrinsic gets a ConstantInt.
4422 llvm::APSInt Result;
4423 bool IsConst = E->getArg(i)->isIntegerConstantExpr(Result,getContext());
4424 assert(IsConst && "Constant arg isn't actually constant?");
4426 ArgValue = llvm::ConstantInt::get(getLLVMContext(), Result);
4429 // If the intrinsic arg type is different from the builtin arg type
4430 // we need to do a bit cast.
4431 llvm::Type *PTy = FTy->getParamType(i);
4432 if (PTy != ArgValue->getType()) {
4433 // XXX - vector of pointers?
4434 if (auto *PtrTy = dyn_cast<llvm::PointerType>(PTy)) {
4435 if (PtrTy->getAddressSpace() !=
4436 ArgValue->getType()->getPointerAddressSpace()) {
4437 ArgValue = Builder.CreateAddrSpaceCast(
4439 ArgValue->getType()->getPointerTo(PtrTy->getAddressSpace()));
4443 assert(PTy->canLosslesslyBitCastTo(FTy->getParamType(i)) &&
4444 "Must be able to losslessly bit cast to param");
4445 ArgValue = Builder.CreateBitCast(ArgValue, PTy);
4448 Args.push_back(ArgValue);
4451 Value *V = Builder.CreateCall(F, Args);
4452 QualType BuiltinRetType = E->getType();
4454 llvm::Type *RetTy = VoidTy;
4455 if (!BuiltinRetType->isVoidType())
4456 RetTy = ConvertType(BuiltinRetType);
4458 if (RetTy != V->getType()) {
4459 // XXX - vector of pointers?
4460 if (auto *PtrTy = dyn_cast<llvm::PointerType>(RetTy)) {
4461 if (PtrTy->getAddressSpace() != V->getType()->getPointerAddressSpace()) {
4462 V = Builder.CreateAddrSpaceCast(
4463 V, V->getType()->getPointerTo(PtrTy->getAddressSpace()));
4467 assert(V->getType()->canLosslesslyBitCastTo(RetTy) &&
4468 "Must be able to losslessly bit cast result type");
4469 V = Builder.CreateBitCast(V, RetTy);
4472 return RValue::get(V);
4475 // Some target-specific builtins can have aggregate return values, e.g.
4476 // __builtin_arm_mve_vld2q_u32. So if the result is an aggregate, force
4477 // ReturnValue to be non-null, so that the target-specific emission code can
4478 // always just emit into it.
4479 TypeEvaluationKind EvalKind = getEvaluationKind(E->getType());
4480 if (EvalKind == TEK_Aggregate && ReturnValue.isNull()) {
4481 Address DestPtr = CreateMemTemp(E->getType(), "agg.tmp");
4482 ReturnValue = ReturnValueSlot(DestPtr, false);
4485 // Now see if we can emit a target-specific builtin.
4486 if (Value *V = EmitTargetBuiltinExpr(BuiltinID, E, ReturnValue)) {
4489 return RValue::get(V);
4491 return RValue::getAggregate(ReturnValue.getValue(),
4492 ReturnValue.isVolatile());
4494 llvm_unreachable("No current target builtin returns complex");
4496 llvm_unreachable("Bad evaluation kind in EmitBuiltinExpr");
4499 ErrorUnsupported(E, "builtin function");
4501 // Unknown builtin, for now just dump it out and return undef.
4502 return GetUndefRValue(E->getType());
4505 static Value *EmitTargetArchBuiltinExpr(CodeGenFunction *CGF,
4506 unsigned BuiltinID, const CallExpr *E,
4507 ReturnValueSlot ReturnValue,
4508 llvm::Triple::ArchType Arch) {
4510 case llvm::Triple::arm:
4511 case llvm::Triple::armeb:
4512 case llvm::Triple::thumb:
4513 case llvm::Triple::thumbeb:
4514 return CGF->EmitARMBuiltinExpr(BuiltinID, E, ReturnValue, Arch);
4515 case llvm::Triple::aarch64:
4516 case llvm::Triple::aarch64_32:
4517 case llvm::Triple::aarch64_be:
4518 return CGF->EmitAArch64BuiltinExpr(BuiltinID, E, Arch);
4519 case llvm::Triple::bpfeb:
4520 case llvm::Triple::bpfel:
4521 return CGF->EmitBPFBuiltinExpr(BuiltinID, E);
4522 case llvm::Triple::x86:
4523 case llvm::Triple::x86_64:
4524 return CGF->EmitX86BuiltinExpr(BuiltinID, E);
4525 case llvm::Triple::ppc:
4526 case llvm::Triple::ppc64:
4527 case llvm::Triple::ppc64le:
4528 return CGF->EmitPPCBuiltinExpr(BuiltinID, E);
4529 case llvm::Triple::r600:
4530 case llvm::Triple::amdgcn:
4531 return CGF->EmitAMDGPUBuiltinExpr(BuiltinID, E);
4532 case llvm::Triple::systemz:
4533 return CGF->EmitSystemZBuiltinExpr(BuiltinID, E);
4534 case llvm::Triple::nvptx:
4535 case llvm::Triple::nvptx64:
4536 return CGF->EmitNVPTXBuiltinExpr(BuiltinID, E);
4537 case llvm::Triple::wasm32:
4538 case llvm::Triple::wasm64:
4539 return CGF->EmitWebAssemblyBuiltinExpr(BuiltinID, E);
4540 case llvm::Triple::hexagon:
4541 return CGF->EmitHexagonBuiltinExpr(BuiltinID, E);
4547 Value *CodeGenFunction::EmitTargetBuiltinExpr(unsigned BuiltinID,
4549 ReturnValueSlot ReturnValue) {
4550 if (getContext().BuiltinInfo.isAuxBuiltinID(BuiltinID)) {
4551 assert(getContext().getAuxTargetInfo() && "Missing aux target info");
4552 return EmitTargetArchBuiltinExpr(
4553 this, getContext().BuiltinInfo.getAuxBuiltinID(BuiltinID), E,
4554 ReturnValue, getContext().getAuxTargetInfo()->getTriple().getArch());
4557 return EmitTargetArchBuiltinExpr(this, BuiltinID, E, ReturnValue,
4558 getTarget().getTriple().getArch());
4561 static llvm::VectorType *GetNeonType(CodeGenFunction *CGF,
4562 NeonTypeFlags TypeFlags,
4563 bool HasLegalHalfType = true,
4565 bool AllowBFloatArgsAndRet = true) {
4566 int IsQuad = TypeFlags.isQuad();
4567 switch (TypeFlags.getEltType()) {
4568 case NeonTypeFlags::Int8:
4569 case NeonTypeFlags::Poly8:
4570 return llvm::FixedVectorType::get(CGF->Int8Ty, V1Ty ? 1 : (8 << IsQuad));
4571 case NeonTypeFlags::Int16:
4572 case NeonTypeFlags::Poly16:
4573 return llvm::FixedVectorType::get(CGF->Int16Ty, V1Ty ? 1 : (4 << IsQuad));
4574 case NeonTypeFlags::BFloat16:
4575 if (AllowBFloatArgsAndRet)
4576 return llvm::FixedVectorType::get(CGF->BFloatTy, V1Ty ? 1 : (4 << IsQuad));
4578 return llvm::FixedVectorType::get(CGF->Int16Ty, V1Ty ? 1 : (4 << IsQuad));
4579 case NeonTypeFlags::Float16:
4580 if (HasLegalHalfType)
4581 return llvm::FixedVectorType::get(CGF->HalfTy, V1Ty ? 1 : (4 << IsQuad));
4583 return llvm::FixedVectorType::get(CGF->Int16Ty, V1Ty ? 1 : (4 << IsQuad));
4584 case NeonTypeFlags::Int32:
4585 return llvm::FixedVectorType::get(CGF->Int32Ty, V1Ty ? 1 : (2 << IsQuad));
4586 case NeonTypeFlags::Int64:
4587 case NeonTypeFlags::Poly64:
4588 return llvm::FixedVectorType::get(CGF->Int64Ty, V1Ty ? 1 : (1 << IsQuad));
4589 case NeonTypeFlags::Poly128:
4590 // FIXME: i128 and f128 doesn't get fully support in Clang and llvm.
4591 // There is a lot of i128 and f128 API missing.
4592 // so we use v16i8 to represent poly128 and get pattern matched.
4593 return llvm::FixedVectorType::get(CGF->Int8Ty, 16);
4594 case NeonTypeFlags::Float32:
4595 return llvm::FixedVectorType::get(CGF->FloatTy, V1Ty ? 1 : (2 << IsQuad));
4596 case NeonTypeFlags::Float64:
4597 return llvm::FixedVectorType::get(CGF->DoubleTy, V1Ty ? 1 : (1 << IsQuad));
4599 llvm_unreachable("Unknown vector element type!");
4602 static llvm::VectorType *GetFloatNeonType(CodeGenFunction *CGF,
4603 NeonTypeFlags IntTypeFlags) {
4604 int IsQuad = IntTypeFlags.isQuad();
4605 switch (IntTypeFlags.getEltType()) {
4606 case NeonTypeFlags::Int16:
4607 return llvm::FixedVectorType::get(CGF->HalfTy, (4 << IsQuad));
4608 case NeonTypeFlags::Int32:
4609 return llvm::FixedVectorType::get(CGF->FloatTy, (2 << IsQuad));
4610 case NeonTypeFlags::Int64:
4611 return llvm::FixedVectorType::get(CGF->DoubleTy, (1 << IsQuad));
4613 llvm_unreachable("Type can't be converted to floating-point!");
4617 Value *CodeGenFunction::EmitNeonSplat(Value *V, Constant *C,
4618 const ElementCount &Count) {
4619 Value *SV = llvm::ConstantVector::getSplat(Count, C);
4620 return Builder.CreateShuffleVector(V, V, SV, "lane");
4623 Value *CodeGenFunction::EmitNeonSplat(Value *V, Constant *C) {
4624 ElementCount EC = cast<llvm::VectorType>(V->getType())->getElementCount();
4625 return EmitNeonSplat(V, C, EC);
4628 Value *CodeGenFunction::EmitNeonCall(Function *F, SmallVectorImpl<Value*> &Ops,
4630 unsigned shift, bool rightshift) {
4632 for (Function::const_arg_iterator ai = F->arg_begin(), ae = F->arg_end();
4633 ai != ae; ++ai, ++j) {
4634 if (F->isConstrainedFPIntrinsic())
4635 if (ai->getType()->isMetadataTy())
4637 if (shift > 0 && shift == j)
4638 Ops[j] = EmitNeonShiftVector(Ops[j], ai->getType(), rightshift);
4640 Ops[j] = Builder.CreateBitCast(Ops[j], ai->getType(), name);
4643 if (F->isConstrainedFPIntrinsic())
4644 return Builder.CreateConstrainedFPCall(F, Ops, name);
4646 return Builder.CreateCall(F, Ops, name);
4649 Value *CodeGenFunction::EmitNeonShiftVector(Value *V, llvm::Type *Ty,
4651 int SV = cast<ConstantInt>(V)->getSExtValue();
4652 return ConstantInt::get(Ty, neg ? -SV : SV);
4655 // Right-shift a vector by a constant.
4656 Value *CodeGenFunction::EmitNeonRShiftImm(Value *Vec, Value *Shift,
4657 llvm::Type *Ty, bool usgn,
4659 llvm::VectorType *VTy = cast<llvm::VectorType>(Ty);
4661 int ShiftAmt = cast<ConstantInt>(Shift)->getSExtValue();
4662 int EltSize = VTy->getScalarSizeInBits();
4664 Vec = Builder.CreateBitCast(Vec, Ty);
4666 // lshr/ashr are undefined when the shift amount is equal to the vector
4668 if (ShiftAmt == EltSize) {
4670 // Right-shifting an unsigned value by its size yields 0.
4671 return llvm::ConstantAggregateZero::get(VTy);
4673 // Right-shifting a signed value by its size is equivalent
4674 // to a shift of size-1.
4676 Shift = ConstantInt::get(VTy->getElementType(), ShiftAmt);
4680 Shift = EmitNeonShiftVector(Shift, Ty, false);
4682 return Builder.CreateLShr(Vec, Shift, name);
4684 return Builder.CreateAShr(Vec, Shift, name);
4688 AddRetType = (1 << 0),
4689 Add1ArgType = (1 << 1),
4690 Add2ArgTypes = (1 << 2),
4692 VectorizeRetType = (1 << 3),
4693 VectorizeArgTypes = (1 << 4),
4695 InventFloatType = (1 << 5),
4696 UnsignedAlts = (1 << 6),
4698 Use64BitVectors = (1 << 7),
4699 Use128BitVectors = (1 << 8),
4701 Vectorize1ArgType = Add1ArgType | VectorizeArgTypes,
4702 VectorRet = AddRetType | VectorizeRetType,
4703 VectorRetGetArgs01 =
4704 AddRetType | Add2ArgTypes | VectorizeRetType | VectorizeArgTypes,
4706 AddRetType | VectorizeRetType | Add1ArgType | InventFloatType
4710 struct ARMVectorIntrinsicInfo {
4711 const char *NameHint;
4713 unsigned LLVMIntrinsic;
4714 unsigned AltLLVMIntrinsic;
4715 uint64_t TypeModifier;
4717 bool operator<(unsigned RHSBuiltinID) const {
4718 return BuiltinID < RHSBuiltinID;
4720 bool operator<(const ARMVectorIntrinsicInfo &TE) const {
4721 return BuiltinID < TE.BuiltinID;
4724 } // end anonymous namespace
4726 #define NEONMAP0(NameBase) \
4727 { #NameBase, NEON::BI__builtin_neon_ ## NameBase, 0, 0, 0 }
4729 #define NEONMAP1(NameBase, LLVMIntrinsic, TypeModifier) \
4730 { #NameBase, NEON:: BI__builtin_neon_ ## NameBase, \
4731 Intrinsic::LLVMIntrinsic, 0, TypeModifier }
4733 #define NEONMAP2(NameBase, LLVMIntrinsic, AltLLVMIntrinsic, TypeModifier) \
4734 { #NameBase, NEON:: BI__builtin_neon_ ## NameBase, \
4735 Intrinsic::LLVMIntrinsic, Intrinsic::AltLLVMIntrinsic, \
4738 static const ARMVectorIntrinsicInfo ARMSIMDIntrinsicMap [] = {
4739 NEONMAP1(__a32_vcvt_bf16_v, arm_neon_vcvtfp2bf, 0),
4740 NEONMAP0(splat_lane_v),
4741 NEONMAP0(splat_laneq_v),
4742 NEONMAP0(splatq_lane_v),
4743 NEONMAP0(splatq_laneq_v),
4744 NEONMAP2(vabd_v, arm_neon_vabdu, arm_neon_vabds, Add1ArgType | UnsignedAlts),
4745 NEONMAP2(vabdq_v, arm_neon_vabdu, arm_neon_vabds, Add1ArgType | UnsignedAlts),
4746 NEONMAP1(vabs_v, arm_neon_vabs, 0),
4747 NEONMAP1(vabsq_v, arm_neon_vabs, 0),
4749 NEONMAP1(vaesdq_v, arm_neon_aesd, 0),
4750 NEONMAP1(vaeseq_v, arm_neon_aese, 0),
4751 NEONMAP1(vaesimcq_v, arm_neon_aesimc, 0),
4752 NEONMAP1(vaesmcq_v, arm_neon_aesmc, 0),
4753 NEONMAP1(vbfdot_v, arm_neon_bfdot, 0),
4754 NEONMAP1(vbfdotq_v, arm_neon_bfdot, 0),
4755 NEONMAP1(vbfmlalbq_v, arm_neon_bfmlalb, 0),
4756 NEONMAP1(vbfmlaltq_v, arm_neon_bfmlalt, 0),
4757 NEONMAP1(vbfmmlaq_v, arm_neon_bfmmla, 0),
4758 NEONMAP1(vbsl_v, arm_neon_vbsl, AddRetType),
4759 NEONMAP1(vbslq_v, arm_neon_vbsl, AddRetType),
4760 NEONMAP1(vcadd_rot270_v, arm_neon_vcadd_rot270, Add1ArgType),
4761 NEONMAP1(vcadd_rot90_v, arm_neon_vcadd_rot90, Add1ArgType),
4762 NEONMAP1(vcaddq_rot270_v, arm_neon_vcadd_rot270, Add1ArgType),
4763 NEONMAP1(vcaddq_rot90_v, arm_neon_vcadd_rot90, Add1ArgType),
4764 NEONMAP1(vcage_v, arm_neon_vacge, 0),
4765 NEONMAP1(vcageq_v, arm_neon_vacge, 0),
4766 NEONMAP1(vcagt_v, arm_neon_vacgt, 0),
4767 NEONMAP1(vcagtq_v, arm_neon_vacgt, 0),
4768 NEONMAP1(vcale_v, arm_neon_vacge, 0),
4769 NEONMAP1(vcaleq_v, arm_neon_vacge, 0),
4770 NEONMAP1(vcalt_v, arm_neon_vacgt, 0),
4771 NEONMAP1(vcaltq_v, arm_neon_vacgt, 0),
4780 NEONMAP1(vcls_v, arm_neon_vcls, Add1ArgType),
4781 NEONMAP1(vclsq_v, arm_neon_vcls, Add1ArgType),
4784 NEONMAP1(vclz_v, ctlz, Add1ArgType),
4785 NEONMAP1(vclzq_v, ctlz, Add1ArgType),
4786 NEONMAP1(vcnt_v, ctpop, Add1ArgType),
4787 NEONMAP1(vcntq_v, ctpop, Add1ArgType),
4788 NEONMAP1(vcvt_f16_f32, arm_neon_vcvtfp2hf, 0),
4789 NEONMAP0(vcvt_f16_v),
4790 NEONMAP1(vcvt_f32_f16, arm_neon_vcvthf2fp, 0),
4791 NEONMAP0(vcvt_f32_v),
4792 NEONMAP2(vcvt_n_f16_v, arm_neon_vcvtfxu2fp, arm_neon_vcvtfxs2fp, 0),
4793 NEONMAP2(vcvt_n_f32_v, arm_neon_vcvtfxu2fp, arm_neon_vcvtfxs2fp, 0),
4794 NEONMAP1(vcvt_n_s16_v, arm_neon_vcvtfp2fxs, 0),
4795 NEONMAP1(vcvt_n_s32_v, arm_neon_vcvtfp2fxs, 0),
4796 NEONMAP1(vcvt_n_s64_v, arm_neon_vcvtfp2fxs, 0),
4797 NEONMAP1(vcvt_n_u16_v, arm_neon_vcvtfp2fxu, 0),
4798 NEONMAP1(vcvt_n_u32_v, arm_neon_vcvtfp2fxu, 0),
4799 NEONMAP1(vcvt_n_u64_v, arm_neon_vcvtfp2fxu, 0),
4800 NEONMAP0(vcvt_s16_v),
4801 NEONMAP0(vcvt_s32_v),
4802 NEONMAP0(vcvt_s64_v),
4803 NEONMAP0(vcvt_u16_v),
4804 NEONMAP0(vcvt_u32_v),
4805 NEONMAP0(vcvt_u64_v),
4806 NEONMAP1(vcvta_s16_v, arm_neon_vcvtas, 0),
4807 NEONMAP1(vcvta_s32_v, arm_neon_vcvtas, 0),
4808 NEONMAP1(vcvta_s64_v, arm_neon_vcvtas, 0),
4809 NEONMAP1(vcvta_u16_v, arm_neon_vcvtau, 0),
4810 NEONMAP1(vcvta_u32_v, arm_neon_vcvtau, 0),
4811 NEONMAP1(vcvta_u64_v, arm_neon_vcvtau, 0),
4812 NEONMAP1(vcvtaq_s16_v, arm_neon_vcvtas, 0),
4813 NEONMAP1(vcvtaq_s32_v, arm_neon_vcvtas, 0),
4814 NEONMAP1(vcvtaq_s64_v, arm_neon_vcvtas, 0),
4815 NEONMAP1(vcvtaq_u16_v, arm_neon_vcvtau, 0),
4816 NEONMAP1(vcvtaq_u32_v, arm_neon_vcvtau, 0),
4817 NEONMAP1(vcvtaq_u64_v, arm_neon_vcvtau, 0),
4818 NEONMAP1(vcvth_bf16_f32, arm_neon_vcvtbfp2bf, 0),
4819 NEONMAP1(vcvtm_s16_v, arm_neon_vcvtms, 0),
4820 NEONMAP1(vcvtm_s32_v, arm_neon_vcvtms, 0),
4821 NEONMAP1(vcvtm_s64_v, arm_neon_vcvtms, 0),
4822 NEONMAP1(vcvtm_u16_v, arm_neon_vcvtmu, 0),
4823 NEONMAP1(vcvtm_u32_v, arm_neon_vcvtmu, 0),
4824 NEONMAP1(vcvtm_u64_v, arm_neon_vcvtmu, 0),
4825 NEONMAP1(vcvtmq_s16_v, arm_neon_vcvtms, 0),
4826 NEONMAP1(vcvtmq_s32_v, arm_neon_vcvtms, 0),
4827 NEONMAP1(vcvtmq_s64_v, arm_neon_vcvtms, 0),
4828 NEONMAP1(vcvtmq_u16_v, arm_neon_vcvtmu, 0),
4829 NEONMAP1(vcvtmq_u32_v, arm_neon_vcvtmu, 0),
4830 NEONMAP1(vcvtmq_u64_v, arm_neon_vcvtmu, 0),
4831 NEONMAP1(vcvtn_s16_v, arm_neon_vcvtns, 0),
4832 NEONMAP1(vcvtn_s32_v, arm_neon_vcvtns, 0),
4833 NEONMAP1(vcvtn_s64_v, arm_neon_vcvtns, 0),
4834 NEONMAP1(vcvtn_u16_v, arm_neon_vcvtnu, 0),
4835 NEONMAP1(vcvtn_u32_v, arm_neon_vcvtnu, 0),
4836 NEONMAP1(vcvtn_u64_v, arm_neon_vcvtnu, 0),
4837 NEONMAP1(vcvtnq_s16_v, arm_neon_vcvtns, 0),
4838 NEONMAP1(vcvtnq_s32_v, arm_neon_vcvtns, 0),
4839 NEONMAP1(vcvtnq_s64_v, arm_neon_vcvtns, 0),
4840 NEONMAP1(vcvtnq_u16_v, arm_neon_vcvtnu, 0),
4841 NEONMAP1(vcvtnq_u32_v, arm_neon_vcvtnu, 0),
4842 NEONMAP1(vcvtnq_u64_v, arm_neon_vcvtnu, 0),
4843 NEONMAP1(vcvtp_s16_v, arm_neon_vcvtps, 0),
4844 NEONMAP1(vcvtp_s32_v, arm_neon_vcvtps, 0),
4845 NEONMAP1(vcvtp_s64_v, arm_neon_vcvtps, 0),
4846 NEONMAP1(vcvtp_u16_v, arm_neon_vcvtpu, 0),
4847 NEONMAP1(vcvtp_u32_v, arm_neon_vcvtpu, 0),
4848 NEONMAP1(vcvtp_u64_v, arm_neon_vcvtpu, 0),
4849 NEONMAP1(vcvtpq_s16_v, arm_neon_vcvtps, 0),
4850 NEONMAP1(vcvtpq_s32_v, arm_neon_vcvtps, 0),
4851 NEONMAP1(vcvtpq_s64_v, arm_neon_vcvtps, 0),
4852 NEONMAP1(vcvtpq_u16_v, arm_neon_vcvtpu, 0),
4853 NEONMAP1(vcvtpq_u32_v, arm_neon_vcvtpu, 0),
4854 NEONMAP1(vcvtpq_u64_v, arm_neon_vcvtpu, 0),
4855 NEONMAP0(vcvtq_f16_v),
4856 NEONMAP0(vcvtq_f32_v),
4857 NEONMAP2(vcvtq_n_f16_v, arm_neon_vcvtfxu2fp, arm_neon_vcvtfxs2fp, 0),
4858 NEONMAP2(vcvtq_n_f32_v, arm_neon_vcvtfxu2fp, arm_neon_vcvtfxs2fp, 0),
4859 NEONMAP1(vcvtq_n_s16_v, arm_neon_vcvtfp2fxs, 0),
4860 NEONMAP1(vcvtq_n_s32_v, arm_neon_vcvtfp2fxs, 0),
4861 NEONMAP1(vcvtq_n_s64_v, arm_neon_vcvtfp2fxs, 0),
4862 NEONMAP1(vcvtq_n_u16_v, arm_neon_vcvtfp2fxu, 0),
4863 NEONMAP1(vcvtq_n_u32_v, arm_neon_vcvtfp2fxu, 0),
4864 NEONMAP1(vcvtq_n_u64_v, arm_neon_vcvtfp2fxu, 0),
4865 NEONMAP0(vcvtq_s16_v),
4866 NEONMAP0(vcvtq_s32_v),
4867 NEONMAP0(vcvtq_s64_v),
4868 NEONMAP0(vcvtq_u16_v),
4869 NEONMAP0(vcvtq_u32_v),
4870 NEONMAP0(vcvtq_u64_v),
4871 NEONMAP2(vdot_v, arm_neon_udot, arm_neon_sdot, 0),
4872 NEONMAP2(vdotq_v, arm_neon_udot, arm_neon_sdot, 0),
4877 NEONMAP2(vhadd_v, arm_neon_vhaddu, arm_neon_vhadds, Add1ArgType | UnsignedAlts),
4878 NEONMAP2(vhaddq_v, arm_neon_vhaddu, arm_neon_vhadds, Add1ArgType | UnsignedAlts),
4879 NEONMAP2(vhsub_v, arm_neon_vhsubu, arm_neon_vhsubs, Add1ArgType | UnsignedAlts),
4880 NEONMAP2(vhsubq_v, arm_neon_vhsubu, arm_neon_vhsubs, Add1ArgType | UnsignedAlts),
4881 NEONMAP0(vld1_dup_v),
4882 NEONMAP1(vld1_v, arm_neon_vld1, 0),
4883 NEONMAP1(vld1_x2_v, arm_neon_vld1x2, 0),
4884 NEONMAP1(vld1_x3_v, arm_neon_vld1x3, 0),
4885 NEONMAP1(vld1_x4_v, arm_neon_vld1x4, 0),
4886 NEONMAP0(vld1q_dup_v),
4887 NEONMAP1(vld1q_v, arm_neon_vld1, 0),
4888 NEONMAP1(vld1q_x2_v, arm_neon_vld1x2, 0),
4889 NEONMAP1(vld1q_x3_v, arm_neon_vld1x3, 0),
4890 NEONMAP1(vld1q_x4_v, arm_neon_vld1x4, 0),
4891 NEONMAP1(vld2_dup_v, arm_neon_vld2dup, 0),
4892 NEONMAP1(vld2_lane_v, arm_neon_vld2lane, 0),
4893 NEONMAP1(vld2_v, arm_neon_vld2, 0),
4894 NEONMAP1(vld2q_dup_v, arm_neon_vld2dup, 0),
4895 NEONMAP1(vld2q_lane_v, arm_neon_vld2lane, 0),
4896 NEONMAP1(vld2q_v, arm_neon_vld2, 0),
4897 NEONMAP1(vld3_dup_v, arm_neon_vld3dup, 0),
4898 NEONMAP1(vld3_lane_v, arm_neon_vld3lane, 0),
4899 NEONMAP1(vld3_v, arm_neon_vld3, 0),
4900 NEONMAP1(vld3q_dup_v, arm_neon_vld3dup, 0),
4901 NEONMAP1(vld3q_lane_v, arm_neon_vld3lane, 0),
4902 NEONMAP1(vld3q_v, arm_neon_vld3, 0),
4903 NEONMAP1(vld4_dup_v, arm_neon_vld4dup, 0),
4904 NEONMAP1(vld4_lane_v, arm_neon_vld4lane, 0),
4905 NEONMAP1(vld4_v, arm_neon_vld4, 0),
4906 NEONMAP1(vld4q_dup_v, arm_neon_vld4dup, 0),
4907 NEONMAP1(vld4q_lane_v, arm_neon_vld4lane, 0),
4908 NEONMAP1(vld4q_v, arm_neon_vld4, 0),
4909 NEONMAP2(vmax_v, arm_neon_vmaxu, arm_neon_vmaxs, Add1ArgType | UnsignedAlts),
4910 NEONMAP1(vmaxnm_v, arm_neon_vmaxnm, Add1ArgType),
4911 NEONMAP1(vmaxnmq_v, arm_neon_vmaxnm, Add1ArgType),
4912 NEONMAP2(vmaxq_v, arm_neon_vmaxu, arm_neon_vmaxs, Add1ArgType | UnsignedAlts),
4913 NEONMAP2(vmin_v, arm_neon_vminu, arm_neon_vmins, Add1ArgType | UnsignedAlts),
4914 NEONMAP1(vminnm_v, arm_neon_vminnm, Add1ArgType),
4915 NEONMAP1(vminnmq_v, arm_neon_vminnm, Add1ArgType),
4916 NEONMAP2(vminq_v, arm_neon_vminu, arm_neon_vmins, Add1ArgType | UnsignedAlts),
4917 NEONMAP2(vmmlaq_v, arm_neon_ummla, arm_neon_smmla, 0),
4920 NEONMAP1(vmul_v, arm_neon_vmulp, Add1ArgType),
4922 NEONMAP1(vmulq_v, arm_neon_vmulp, Add1ArgType),
4923 NEONMAP2(vpadal_v, arm_neon_vpadalu, arm_neon_vpadals, UnsignedAlts),
4924 NEONMAP2(vpadalq_v, arm_neon_vpadalu, arm_neon_vpadals, UnsignedAlts),
4925 NEONMAP1(vpadd_v, arm_neon_vpadd, Add1ArgType),
4926 NEONMAP2(vpaddl_v, arm_neon_vpaddlu, arm_neon_vpaddls, UnsignedAlts),
4927 NEONMAP2(vpaddlq_v, arm_neon_vpaddlu, arm_neon_vpaddls, UnsignedAlts),
4928 NEONMAP1(vpaddq_v, arm_neon_vpadd, Add1ArgType),
4929 NEONMAP2(vpmax_v, arm_neon_vpmaxu, arm_neon_vpmaxs, Add1ArgType | UnsignedAlts),
4930 NEONMAP2(vpmin_v, arm_neon_vpminu, arm_neon_vpmins, Add1ArgType | UnsignedAlts),
4931 NEONMAP1(vqabs_v, arm_neon_vqabs, Add1ArgType),
4932 NEONMAP1(vqabsq_v, arm_neon_vqabs, Add1ArgType),
4933 NEONMAP2(vqadd_v, uadd_sat, sadd_sat, Add1ArgType | UnsignedAlts),
4934 NEONMAP2(vqaddq_v, uadd_sat, sadd_sat, Add1ArgType | UnsignedAlts),
4935 NEONMAP2(vqdmlal_v, arm_neon_vqdmull, sadd_sat, 0),
4936 NEONMAP2(vqdmlsl_v, arm_neon_vqdmull, ssub_sat, 0),
4937 NEONMAP1(vqdmulh_v, arm_neon_vqdmulh, Add1ArgType),
4938 NEONMAP1(vqdmulhq_v, arm_neon_vqdmulh, Add1ArgType),
4939 NEONMAP1(vqdmull_v, arm_neon_vqdmull, Add1ArgType),
4940 NEONMAP2(vqmovn_v, arm_neon_vqmovnu, arm_neon_vqmovns, Add1ArgType | UnsignedAlts),
4941 NEONMAP1(vqmovun_v, arm_neon_vqmovnsu, Add1ArgType),
4942 NEONMAP1(vqneg_v, arm_neon_vqneg, Add1ArgType),
4943 NEONMAP1(vqnegq_v, arm_neon_vqneg, Add1ArgType),
4944 NEONMAP1(vqrdmulh_v, arm_neon_vqrdmulh, Add1ArgType),
4945 NEONMAP1(vqrdmulhq_v, arm_neon_vqrdmulh, Add1ArgType),
4946 NEONMAP2(vqrshl_v, arm_neon_vqrshiftu, arm_neon_vqrshifts, Add1ArgType | UnsignedAlts),
4947 NEONMAP2(vqrshlq_v, arm_neon_vqrshiftu, arm_neon_vqrshifts, Add1ArgType | UnsignedAlts),
4948 NEONMAP2(vqshl_n_v, arm_neon_vqshiftu, arm_neon_vqshifts, UnsignedAlts),
4949 NEONMAP2(vqshl_v, arm_neon_vqshiftu, arm_neon_vqshifts, Add1ArgType | UnsignedAlts),
4950 NEONMAP2(vqshlq_n_v, arm_neon_vqshiftu, arm_neon_vqshifts, UnsignedAlts),
4951 NEONMAP2(vqshlq_v, arm_neon_vqshiftu, arm_neon_vqshifts, Add1ArgType | UnsignedAlts),
4952 NEONMAP1(vqshlu_n_v, arm_neon_vqshiftsu, 0),
4953 NEONMAP1(vqshluq_n_v, arm_neon_vqshiftsu, 0),
4954 NEONMAP2(vqsub_v, usub_sat, ssub_sat, Add1ArgType | UnsignedAlts),
4955 NEONMAP2(vqsubq_v, usub_sat, ssub_sat, Add1ArgType | UnsignedAlts),
4956 NEONMAP1(vraddhn_v, arm_neon_vraddhn, Add1ArgType),
4957 NEONMAP2(vrecpe_v, arm_neon_vrecpe, arm_neon_vrecpe, 0),
4958 NEONMAP2(vrecpeq_v, arm_neon_vrecpe, arm_neon_vrecpe, 0),
4959 NEONMAP1(vrecps_v, arm_neon_vrecps, Add1ArgType),
4960 NEONMAP1(vrecpsq_v, arm_neon_vrecps, Add1ArgType),
4961 NEONMAP2(vrhadd_v, arm_neon_vrhaddu, arm_neon_vrhadds, Add1ArgType | UnsignedAlts),
4962 NEONMAP2(vrhaddq_v, arm_neon_vrhaddu, arm_neon_vrhadds, Add1ArgType | UnsignedAlts),
4963 NEONMAP1(vrnd_v, arm_neon_vrintz, Add1ArgType),
4964 NEONMAP1(vrnda_v, arm_neon_vrinta, Add1ArgType),
4965 NEONMAP1(vrndaq_v, arm_neon_vrinta, Add1ArgType),
4968 NEONMAP1(vrndm_v, arm_neon_vrintm, Add1ArgType),
4969 NEONMAP1(vrndmq_v, arm_neon_vrintm, Add1ArgType),
4970 NEONMAP1(vrndn_v, arm_neon_vrintn, Add1ArgType),
4971 NEONMAP1(vrndnq_v, arm_neon_vrintn, Add1ArgType),
4972 NEONMAP1(vrndp_v, arm_neon_vrintp, Add1ArgType),
4973 NEONMAP1(vrndpq_v, arm_neon_vrintp, Add1ArgType),
4974 NEONMAP1(vrndq_v, arm_neon_vrintz, Add1ArgType),
4975 NEONMAP1(vrndx_v, arm_neon_vrintx, Add1ArgType),
4976 NEONMAP1(vrndxq_v, arm_neon_vrintx, Add1ArgType),
4977 NEONMAP2(vrshl_v, arm_neon_vrshiftu, arm_neon_vrshifts, Add1ArgType | UnsignedAlts),
4978 NEONMAP2(vrshlq_v, arm_neon_vrshiftu, arm_neon_vrshifts, Add1ArgType | UnsignedAlts),
4979 NEONMAP2(vrshr_n_v, arm_neon_vrshiftu, arm_neon_vrshifts, UnsignedAlts),
4980 NEONMAP2(vrshrq_n_v, arm_neon_vrshiftu, arm_neon_vrshifts, UnsignedAlts),
4981 NEONMAP2(vrsqrte_v, arm_neon_vrsqrte, arm_neon_vrsqrte, 0),
4982 NEONMAP2(vrsqrteq_v, arm_neon_vrsqrte, arm_neon_vrsqrte, 0),
4983 NEONMAP1(vrsqrts_v, arm_neon_vrsqrts, Add1ArgType),
4984 NEONMAP1(vrsqrtsq_v, arm_neon_vrsqrts, Add1ArgType),
4985 NEONMAP1(vrsubhn_v, arm_neon_vrsubhn, Add1ArgType),
4986 NEONMAP1(vsha1su0q_v, arm_neon_sha1su0, 0),
4987 NEONMAP1(vsha1su1q_v, arm_neon_sha1su1, 0),
4988 NEONMAP1(vsha256h2q_v, arm_neon_sha256h2, 0),
4989 NEONMAP1(vsha256hq_v, arm_neon_sha256h, 0),
4990 NEONMAP1(vsha256su0q_v, arm_neon_sha256su0, 0),
4991 NEONMAP1(vsha256su1q_v, arm_neon_sha256su1, 0),
4993 NEONMAP2(vshl_v, arm_neon_vshiftu, arm_neon_vshifts, Add1ArgType | UnsignedAlts),
4994 NEONMAP0(vshll_n_v),
4995 NEONMAP0(vshlq_n_v),
4996 NEONMAP2(vshlq_v, arm_neon_vshiftu, arm_neon_vshifts, Add1ArgType | UnsignedAlts),
4998 NEONMAP0(vshrn_n_v),
4999 NEONMAP0(vshrq_n_v),
5000 NEONMAP1(vst1_v, arm_neon_vst1, 0),
5001 NEONMAP1(vst1_x2_v, arm_neon_vst1x2, 0),
5002 NEONMAP1(vst1_x3_v, arm_neon_vst1x3, 0),
5003 NEONMAP1(vst1_x4_v, arm_neon_vst1x4, 0),
5004 NEONMAP1(vst1q_v, arm_neon_vst1, 0),
5005 NEONMAP1(vst1q_x2_v, arm_neon_vst1x2, 0),
5006 NEONMAP1(vst1q_x3_v, arm_neon_vst1x3, 0),
5007 NEONMAP1(vst1q_x4_v, arm_neon_vst1x4, 0),
5008 NEONMAP1(vst2_lane_v, arm_neon_vst2lane, 0),
5009 NEONMAP1(vst2_v, arm_neon_vst2, 0),
5010 NEONMAP1(vst2q_lane_v, arm_neon_vst2lane, 0),
5011 NEONMAP1(vst2q_v, arm_neon_vst2, 0),
5012 NEONMAP1(vst3_lane_v, arm_neon_vst3lane, 0),
5013 NEONMAP1(vst3_v, arm_neon_vst3, 0),
5014 NEONMAP1(vst3q_lane_v, arm_neon_vst3lane, 0),
5015 NEONMAP1(vst3q_v, arm_neon_vst3, 0),
5016 NEONMAP1(vst4_lane_v, arm_neon_vst4lane, 0),
5017 NEONMAP1(vst4_v, arm_neon_vst4, 0),
5018 NEONMAP1(vst4q_lane_v, arm_neon_vst4lane, 0),
5019 NEONMAP1(vst4q_v, arm_neon_vst4, 0),
5025 NEONMAP1(vusdot_v, arm_neon_usdot, 0),
5026 NEONMAP1(vusdotq_v, arm_neon_usdot, 0),
5027 NEONMAP1(vusmmlaq_v, arm_neon_usmmla, 0),
5034 static const ARMVectorIntrinsicInfo AArch64SIMDIntrinsicMap[] = {
5035 NEONMAP1(__a64_vcvtq_low_bf16_v, aarch64_neon_bfcvtn, 0),
5036 NEONMAP0(splat_lane_v),
5037 NEONMAP0(splat_laneq_v),
5038 NEONMAP0(splatq_lane_v),
5039 NEONMAP0(splatq_laneq_v),
5040 NEONMAP1(vabs_v, aarch64_neon_abs, 0),
5041 NEONMAP1(vabsq_v, aarch64_neon_abs, 0),
5043 NEONMAP1(vaesdq_v, aarch64_crypto_aesd, 0),
5044 NEONMAP1(vaeseq_v, aarch64_crypto_aese, 0),
5045 NEONMAP1(vaesimcq_v, aarch64_crypto_aesimc, 0),
5046 NEONMAP1(vaesmcq_v, aarch64_crypto_aesmc, 0),
5047 NEONMAP1(vbfdot_v, aarch64_neon_bfdot, 0),
5048 NEONMAP1(vbfdotq_v, aarch64_neon_bfdot, 0),
5049 NEONMAP1(vbfmlalbq_v, aarch64_neon_bfmlalb, 0),
5050 NEONMAP1(vbfmlaltq_v, aarch64_neon_bfmlalt, 0),
5051 NEONMAP1(vbfmmlaq_v, aarch64_neon_bfmmla, 0),
5052 NEONMAP1(vcadd_rot270_v, aarch64_neon_vcadd_rot270, Add1ArgType),
5053 NEONMAP1(vcadd_rot90_v, aarch64_neon_vcadd_rot90, Add1ArgType),
5054 NEONMAP1(vcaddq_rot270_v, aarch64_neon_vcadd_rot270, Add1ArgType),
5055 NEONMAP1(vcaddq_rot90_v, aarch64_neon_vcadd_rot90, Add1ArgType),
5056 NEONMAP1(vcage_v, aarch64_neon_facge, 0),
5057 NEONMAP1(vcageq_v, aarch64_neon_facge, 0),
5058 NEONMAP1(vcagt_v, aarch64_neon_facgt, 0),
5059 NEONMAP1(vcagtq_v, aarch64_neon_facgt, 0),
5060 NEONMAP1(vcale_v, aarch64_neon_facge, 0),
5061 NEONMAP1(vcaleq_v, aarch64_neon_facge, 0),
5062 NEONMAP1(vcalt_v, aarch64_neon_facgt, 0),
5063 NEONMAP1(vcaltq_v, aarch64_neon_facgt, 0),
5072 NEONMAP1(vcls_v, aarch64_neon_cls, Add1ArgType),
5073 NEONMAP1(vclsq_v, aarch64_neon_cls, Add1ArgType),
5076 NEONMAP1(vclz_v, ctlz, Add1ArgType),
5077 NEONMAP1(vclzq_v, ctlz, Add1ArgType),
5078 NEONMAP1(vcnt_v, ctpop, Add1ArgType),
5079 NEONMAP1(vcntq_v, ctpop, Add1ArgType),
5080 NEONMAP1(vcvt_f16_f32, aarch64_neon_vcvtfp2hf, 0),
5081 NEONMAP0(vcvt_f16_v),
5082 NEONMAP1(vcvt_f32_f16, aarch64_neon_vcvthf2fp, 0),
5083 NEONMAP0(vcvt_f32_v),
5084 NEONMAP2(vcvt_n_f16_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
5085 NEONMAP2(vcvt_n_f32_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
5086 NEONMAP2(vcvt_n_f64_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
5087 NEONMAP1(vcvt_n_s16_v, aarch64_neon_vcvtfp2fxs, 0),
5088 NEONMAP1(vcvt_n_s32_v, aarch64_neon_vcvtfp2fxs, 0),
5089 NEONMAP1(vcvt_n_s64_v, aarch64_neon_vcvtfp2fxs, 0),
5090 NEONMAP1(vcvt_n_u16_v, aarch64_neon_vcvtfp2fxu, 0),
5091 NEONMAP1(vcvt_n_u32_v, aarch64_neon_vcvtfp2fxu, 0),
5092 NEONMAP1(vcvt_n_u64_v, aarch64_neon_vcvtfp2fxu, 0),
5093 NEONMAP0(vcvtq_f16_v),
5094 NEONMAP0(vcvtq_f32_v),
5095 NEONMAP1(vcvtq_high_bf16_v, aarch64_neon_bfcvtn2, 0),
5096 NEONMAP2(vcvtq_n_f16_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
5097 NEONMAP2(vcvtq_n_f32_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
5098 NEONMAP2(vcvtq_n_f64_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
5099 NEONMAP1(vcvtq_n_s16_v, aarch64_neon_vcvtfp2fxs, 0),
5100 NEONMAP1(vcvtq_n_s32_v, aarch64_neon_vcvtfp2fxs, 0),
5101 NEONMAP1(vcvtq_n_s64_v, aarch64_neon_vcvtfp2fxs, 0),
5102 NEONMAP1(vcvtq_n_u16_v, aarch64_neon_vcvtfp2fxu, 0),
5103 NEONMAP1(vcvtq_n_u32_v, aarch64_neon_vcvtfp2fxu, 0),
5104 NEONMAP1(vcvtq_n_u64_v, aarch64_neon_vcvtfp2fxu, 0),
5105 NEONMAP1(vcvtx_f32_v, aarch64_neon_fcvtxn, AddRetType | Add1ArgType),
5106 NEONMAP2(vdot_v, aarch64_neon_udot, aarch64_neon_sdot, 0),
5107 NEONMAP2(vdotq_v, aarch64_neon_udot, aarch64_neon_sdot, 0),
5112 NEONMAP1(vfmlal_high_v, aarch64_neon_fmlal2, 0),
5113 NEONMAP1(vfmlal_low_v, aarch64_neon_fmlal, 0),
5114 NEONMAP1(vfmlalq_high_v, aarch64_neon_fmlal2, 0),
5115 NEONMAP1(vfmlalq_low_v, aarch64_neon_fmlal, 0),
5116 NEONMAP1(vfmlsl_high_v, aarch64_neon_fmlsl2, 0),
5117 NEONMAP1(vfmlsl_low_v, aarch64_neon_fmlsl, 0),
5118 NEONMAP1(vfmlslq_high_v, aarch64_neon_fmlsl2, 0),
5119 NEONMAP1(vfmlslq_low_v, aarch64_neon_fmlsl, 0),
5120 NEONMAP2(vhadd_v, aarch64_neon_uhadd, aarch64_neon_shadd, Add1ArgType | UnsignedAlts),
5121 NEONMAP2(vhaddq_v, aarch64_neon_uhadd, aarch64_neon_shadd, Add1ArgType | UnsignedAlts),
5122 NEONMAP2(vhsub_v, aarch64_neon_uhsub, aarch64_neon_shsub, Add1ArgType | UnsignedAlts),
5123 NEONMAP2(vhsubq_v, aarch64_neon_uhsub, aarch64_neon_shsub, Add1ArgType | UnsignedAlts),
5124 NEONMAP1(vld1_x2_v, aarch64_neon_ld1x2, 0),
5125 NEONMAP1(vld1_x3_v, aarch64_neon_ld1x3, 0),
5126 NEONMAP1(vld1_x4_v, aarch64_neon_ld1x4, 0),
5127 NEONMAP1(vld1q_x2_v, aarch64_neon_ld1x2, 0),
5128 NEONMAP1(vld1q_x3_v, aarch64_neon_ld1x3, 0),
5129 NEONMAP1(vld1q_x4_v, aarch64_neon_ld1x4, 0),
5130 NEONMAP2(vmmlaq_v, aarch64_neon_ummla, aarch64_neon_smmla, 0),
5133 NEONMAP1(vmul_v, aarch64_neon_pmul, Add1ArgType),
5134 NEONMAP1(vmulq_v, aarch64_neon_pmul, Add1ArgType),
5135 NEONMAP1(vpadd_v, aarch64_neon_addp, Add1ArgType),
5136 NEONMAP2(vpaddl_v, aarch64_neon_uaddlp, aarch64_neon_saddlp, UnsignedAlts),
5137 NEONMAP2(vpaddlq_v, aarch64_neon_uaddlp, aarch64_neon_saddlp, UnsignedAlts),
5138 NEONMAP1(vpaddq_v, aarch64_neon_addp, Add1ArgType),
5139 NEONMAP1(vqabs_v, aarch64_neon_sqabs, Add1ArgType),
5140 NEONMAP1(vqabsq_v, aarch64_neon_sqabs, Add1ArgType),
5141 NEONMAP2(vqadd_v, aarch64_neon_uqadd, aarch64_neon_sqadd, Add1ArgType | UnsignedAlts),
5142 NEONMAP2(vqaddq_v, aarch64_neon_uqadd, aarch64_neon_sqadd, Add1ArgType | UnsignedAlts),
5143 NEONMAP2(vqdmlal_v, aarch64_neon_sqdmull, aarch64_neon_sqadd, 0),
5144 NEONMAP2(vqdmlsl_v, aarch64_neon_sqdmull, aarch64_neon_sqsub, 0),
5145 NEONMAP1(vqdmulh_lane_v, aarch64_neon_sqdmulh_lane, 0),
5146 NEONMAP1(vqdmulh_laneq_v, aarch64_neon_sqdmulh_laneq, 0),
5147 NEONMAP1(vqdmulh_v, aarch64_neon_sqdmulh, Add1ArgType),
5148 NEONMAP1(vqdmulhq_lane_v, aarch64_neon_sqdmulh_lane, 0),
5149 NEONMAP1(vqdmulhq_laneq_v, aarch64_neon_sqdmulh_laneq, 0),
5150 NEONMAP1(vqdmulhq_v, aarch64_neon_sqdmulh, Add1ArgType),
5151 NEONMAP1(vqdmull_v, aarch64_neon_sqdmull, Add1ArgType),
5152 NEONMAP2(vqmovn_v, aarch64_neon_uqxtn, aarch64_neon_sqxtn, Add1ArgType | UnsignedAlts),
5153 NEONMAP1(vqmovun_v, aarch64_neon_sqxtun, Add1ArgType),
5154 NEONMAP1(vqneg_v, aarch64_neon_sqneg, Add1ArgType),
5155 NEONMAP1(vqnegq_v, aarch64_neon_sqneg, Add1ArgType),
5156 NEONMAP1(vqrdmulh_lane_v, aarch64_neon_sqrdmulh_lane, 0),
5157 NEONMAP1(vqrdmulh_laneq_v, aarch64_neon_sqrdmulh_laneq, 0),
5158 NEONMAP1(vqrdmulh_v, aarch64_neon_sqrdmulh, Add1ArgType),
5159 NEONMAP1(vqrdmulhq_lane_v, aarch64_neon_sqrdmulh_lane, 0),
5160 NEONMAP1(vqrdmulhq_laneq_v, aarch64_neon_sqrdmulh_laneq, 0),
5161 NEONMAP1(vqrdmulhq_v, aarch64_neon_sqrdmulh, Add1ArgType),
5162 NEONMAP2(vqrshl_v, aarch64_neon_uqrshl, aarch64_neon_sqrshl, Add1ArgType | UnsignedAlts),
5163 NEONMAP2(vqrshlq_v, aarch64_neon_uqrshl, aarch64_neon_sqrshl, Add1ArgType | UnsignedAlts),
5164 NEONMAP2(vqshl_n_v, aarch64_neon_uqshl, aarch64_neon_sqshl, UnsignedAlts),
5165 NEONMAP2(vqshl_v, aarch64_neon_uqshl, aarch64_neon_sqshl, Add1ArgType | UnsignedAlts),
5166 NEONMAP2(vqshlq_n_v, aarch64_neon_uqshl, aarch64_neon_sqshl,UnsignedAlts),
5167 NEONMAP2(vqshlq_v, aarch64_neon_uqshl, aarch64_neon_sqshl, Add1ArgType | UnsignedAlts),
5168 NEONMAP1(vqshlu_n_v, aarch64_neon_sqshlu, 0),
5169 NEONMAP1(vqshluq_n_v, aarch64_neon_sqshlu, 0),
5170 NEONMAP2(vqsub_v, aarch64_neon_uqsub, aarch64_neon_sqsub, Add1ArgType | UnsignedAlts),
5171 NEONMAP2(vqsubq_v, aarch64_neon_uqsub, aarch64_neon_sqsub, Add1ArgType | UnsignedAlts),
5172 NEONMAP1(vraddhn_v, aarch64_neon_raddhn, Add1ArgType),
5173 NEONMAP2(vrecpe_v, aarch64_neon_frecpe, aarch64_neon_urecpe, 0),
5174 NEONMAP2(vrecpeq_v, aarch64_neon_frecpe, aarch64_neon_urecpe, 0),
5175 NEONMAP1(vrecps_v, aarch64_neon_frecps, Add1ArgType),
5176 NEONMAP1(vrecpsq_v, aarch64_neon_frecps, Add1ArgType),
5177 NEONMAP2(vrhadd_v, aarch64_neon_urhadd, aarch64_neon_srhadd, Add1ArgType | UnsignedAlts),
5178 NEONMAP2(vrhaddq_v, aarch64_neon_urhadd, aarch64_neon_srhadd, Add1ArgType | UnsignedAlts),
5181 NEONMAP2(vrshl_v, aarch64_neon_urshl, aarch64_neon_srshl, Add1ArgType | UnsignedAlts),
5182 NEONMAP2(vrshlq_v, aarch64_neon_urshl, aarch64_neon_srshl, Add1ArgType | UnsignedAlts),
5183 NEONMAP2(vrshr_n_v, aarch64_neon_urshl, aarch64_neon_srshl, UnsignedAlts),
5184 NEONMAP2(vrshrq_n_v, aarch64_neon_urshl, aarch64_neon_srshl, UnsignedAlts),
5185 NEONMAP2(vrsqrte_v, aarch64_neon_frsqrte, aarch64_neon_ursqrte, 0),
5186 NEONMAP2(vrsqrteq_v, aarch64_neon_frsqrte, aarch64_neon_ursqrte, 0),
5187 NEONMAP1(vrsqrts_v, aarch64_neon_frsqrts, Add1ArgType),
5188 NEONMAP1(vrsqrtsq_v, aarch64_neon_frsqrts, Add1ArgType),
5189 NEONMAP1(vrsubhn_v, aarch64_neon_rsubhn, Add1ArgType),
5190 NEONMAP1(vsha1su0q_v, aarch64_crypto_sha1su0, 0),
5191 NEONMAP1(vsha1su1q_v, aarch64_crypto_sha1su1, 0),
5192 NEONMAP1(vsha256h2q_v, aarch64_crypto_sha256h2, 0),
5193 NEONMAP1(vsha256hq_v, aarch64_crypto_sha256h, 0),
5194 NEONMAP1(vsha256su0q_v, aarch64_crypto_sha256su0, 0),
5195 NEONMAP1(vsha256su1q_v, aarch64_crypto_sha256su1, 0),
5197 NEONMAP2(vshl_v, aarch64_neon_ushl, aarch64_neon_sshl, Add1ArgType | UnsignedAlts),
5198 NEONMAP0(vshll_n_v),
5199 NEONMAP0(vshlq_n_v),
5200 NEONMAP2(vshlq_v, aarch64_neon_ushl, aarch64_neon_sshl, Add1ArgType | UnsignedAlts),
5202 NEONMAP0(vshrn_n_v),
5203 NEONMAP0(vshrq_n_v),
5204 NEONMAP1(vst1_x2_v, aarch64_neon_st1x2, 0),
5205 NEONMAP1(vst1_x3_v, aarch64_neon_st1x3, 0),
5206 NEONMAP1(vst1_x4_v, aarch64_neon_st1x4, 0),
5207 NEONMAP1(vst1q_x2_v, aarch64_neon_st1x2, 0),
5208 NEONMAP1(vst1q_x3_v, aarch64_neon_st1x3, 0),
5209 NEONMAP1(vst1q_x4_v, aarch64_neon_st1x4, 0),
5213 NEONMAP1(vusdot_v, aarch64_neon_usdot, 0),
5214 NEONMAP1(vusdotq_v, aarch64_neon_usdot, 0),
5215 NEONMAP1(vusmmlaq_v, aarch64_neon_usmmla, 0),
5218 static const ARMVectorIntrinsicInfo AArch64SISDIntrinsicMap[] = {
5219 NEONMAP1(vabdd_f64, aarch64_sisd_fabd, Add1ArgType),
5220 NEONMAP1(vabds_f32, aarch64_sisd_fabd, Add1ArgType),
5221 NEONMAP1(vabsd_s64, aarch64_neon_abs, Add1ArgType),
5222 NEONMAP1(vaddlv_s32, aarch64_neon_saddlv, AddRetType | Add1ArgType),
5223 NEONMAP1(vaddlv_u32, aarch64_neon_uaddlv, AddRetType | Add1ArgType),
5224 NEONMAP1(vaddlvq_s32, aarch64_neon_saddlv, AddRetType | Add1ArgType),
5225 NEONMAP1(vaddlvq_u32, aarch64_neon_uaddlv, AddRetType | Add1ArgType),
5226 NEONMAP1(vaddv_f32, aarch64_neon_faddv, AddRetType | Add1ArgType),
5227 NEONMAP1(vaddv_s32, aarch64_neon_saddv, AddRetType | Add1ArgType),
5228 NEONMAP1(vaddv_u32, aarch64_neon_uaddv, AddRetType | Add1ArgType),
5229 NEONMAP1(vaddvq_f32, aarch64_neon_faddv, AddRetType | Add1ArgType),
5230 NEONMAP1(vaddvq_f64, aarch64_neon_faddv, AddRetType | Add1ArgType),
5231 NEONMAP1(vaddvq_s32, aarch64_neon_saddv, AddRetType | Add1ArgType),
5232 NEONMAP1(vaddvq_s64, aarch64_neon_saddv, AddRetType | Add1ArgType),
5233 NEONMAP1(vaddvq_u32, aarch64_neon_uaddv, AddRetType | Add1ArgType),
5234 NEONMAP1(vaddvq_u64, aarch64_neon_uaddv, AddRetType | Add1ArgType),
5235 NEONMAP1(vcaged_f64, aarch64_neon_facge, AddRetType | Add1ArgType),
5236 NEONMAP1(vcages_f32, aarch64_neon_facge, AddRetType | Add1ArgType),
5237 NEONMAP1(vcagtd_f64, aarch64_neon_facgt, AddRetType | Add1ArgType),
5238 NEONMAP1(vcagts_f32, aarch64_neon_facgt, AddRetType | Add1ArgType),
5239 NEONMAP1(vcaled_f64, aarch64_neon_facge, AddRetType | Add1ArgType),
5240 NEONMAP1(vcales_f32, aarch64_neon_facge, AddRetType | Add1ArgType),
5241 NEONMAP1(vcaltd_f64, aarch64_neon_facgt, AddRetType | Add1ArgType),
5242 NEONMAP1(vcalts_f32, aarch64_neon_facgt, AddRetType | Add1ArgType),
5243 NEONMAP1(vcvtad_s64_f64, aarch64_neon_fcvtas, AddRetType | Add1ArgType),
5244 NEONMAP1(vcvtad_u64_f64, aarch64_neon_fcvtau, AddRetType | Add1ArgType),
5245 NEONMAP1(vcvtas_s32_f32, aarch64_neon_fcvtas, AddRetType | Add1ArgType),
5246 NEONMAP1(vcvtas_u32_f32, aarch64_neon_fcvtau, AddRetType | Add1ArgType),
5247 NEONMAP1(vcvtd_n_f64_s64, aarch64_neon_vcvtfxs2fp, AddRetType | Add1ArgType),
5248 NEONMAP1(vcvtd_n_f64_u64, aarch64_neon_vcvtfxu2fp, AddRetType | Add1ArgType),
5249 NEONMAP1(vcvtd_n_s64_f64, aarch64_neon_vcvtfp2fxs, AddRetType | Add1ArgType),
5250 NEONMAP1(vcvtd_n_u64_f64, aarch64_neon_vcvtfp2fxu, AddRetType | Add1ArgType),
5251 NEONMAP1(vcvth_bf16_f32, aarch64_neon_bfcvt, 0),
5252 NEONMAP1(vcvtmd_s64_f64, aarch64_neon_fcvtms, AddRetType | Add1ArgType),
5253 NEONMAP1(vcvtmd_u64_f64, aarch64_neon_fcvtmu, AddRetType | Add1ArgType),
5254 NEONMAP1(vcvtms_s32_f32, aarch64_neon_fcvtms, AddRetType | Add1ArgType),
5255 NEONMAP1(vcvtms_u32_f32, aarch64_neon_fcvtmu, AddRetType | Add1ArgType),
5256 NEONMAP1(vcvtnd_s64_f64, aarch64_neon_fcvtns, AddRetType | Add1ArgType),
5257 NEONMAP1(vcvtnd_u64_f64, aarch64_neon_fcvtnu, AddRetType | Add1ArgType),
5258 NEONMAP1(vcvtns_s32_f32, aarch64_neon_fcvtns, AddRetType | Add1ArgType),
5259 NEONMAP1(vcvtns_u32_f32, aarch64_neon_fcvtnu, AddRetType | Add1ArgType),
5260 NEONMAP1(vcvtpd_s64_f64, aarch64_neon_fcvtps, AddRetType | Add1ArgType),
5261 NEONMAP1(vcvtpd_u64_f64, aarch64_neon_fcvtpu, AddRetType | Add1ArgType),
5262 NEONMAP1(vcvtps_s32_f32, aarch64_neon_fcvtps, AddRetType | Add1ArgType),
5263 NEONMAP1(vcvtps_u32_f32, aarch64_neon_fcvtpu, AddRetType | Add1ArgType),
5264 NEONMAP1(vcvts_n_f32_s32, aarch64_neon_vcvtfxs2fp, AddRetType | Add1ArgType),
5265 NEONMAP1(vcvts_n_f32_u32, aarch64_neon_vcvtfxu2fp, AddRetType | Add1ArgType),
5266 NEONMAP1(vcvts_n_s32_f32, aarch64_neon_vcvtfp2fxs, AddRetType | Add1ArgType),
5267 NEONMAP1(vcvts_n_u32_f32, aarch64_neon_vcvtfp2fxu, AddRetType | Add1ArgType),
5268 NEONMAP1(vcvtxd_f32_f64, aarch64_sisd_fcvtxn, 0),
5269 NEONMAP1(vmaxnmv_f32, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType),
5270 NEONMAP1(vmaxnmvq_f32, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType),
5271 NEONMAP1(vmaxnmvq_f64, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType),
5272 NEONMAP1(vmaxv_f32, aarch64_neon_fmaxv, AddRetType | Add1ArgType),
5273 NEONMAP1(vmaxv_s32, aarch64_neon_smaxv, AddRetType | Add1ArgType),
5274 NEONMAP1(vmaxv_u32, aarch64_neon_umaxv, AddRetType | Add1ArgType),
5275 NEONMAP1(vmaxvq_f32, aarch64_neon_fmaxv, AddRetType | Add1ArgType),
5276 NEONMAP1(vmaxvq_f64, aarch64_neon_fmaxv, AddRetType | Add1ArgType),
5277 NEONMAP1(vmaxvq_s32, aarch64_neon_smaxv, AddRetType | Add1ArgType),
5278 NEONMAP1(vmaxvq_u32, aarch64_neon_umaxv, AddRetType | Add1ArgType),
5279 NEONMAP1(vminnmv_f32, aarch64_neon_fminnmv, AddRetType | Add1ArgType),
5280 NEONMAP1(vminnmvq_f32, aarch64_neon_fminnmv, AddRetType | Add1ArgType),
5281 NEONMAP1(vminnmvq_f64, aarch64_neon_fminnmv, AddRetType | Add1ArgType),
5282 NEONMAP1(vminv_f32, aarch64_neon_fminv, AddRetType | Add1ArgType),
5283 NEONMAP1(vminv_s32, aarch64_neon_sminv, AddRetType | Add1ArgType),
5284 NEONMAP1(vminv_u32, aarch64_neon_uminv, AddRetType | Add1ArgType),
5285 NEONMAP1(vminvq_f32, aarch64_neon_fminv, AddRetType | Add1ArgType),
5286 NEONMAP1(vminvq_f64, aarch64_neon_fminv, AddRetType | Add1ArgType),
5287 NEONMAP1(vminvq_s32, aarch64_neon_sminv, AddRetType | Add1ArgType),
5288 NEONMAP1(vminvq_u32, aarch64_neon_uminv, AddRetType | Add1ArgType),
5289 NEONMAP1(vmull_p64, aarch64_neon_pmull64, 0),
5290 NEONMAP1(vmulxd_f64, aarch64_neon_fmulx, Add1ArgType),
5291 NEONMAP1(vmulxs_f32, aarch64_neon_fmulx, Add1ArgType),
5292 NEONMAP1(vpaddd_s64, aarch64_neon_uaddv, AddRetType | Add1ArgType),
5293 NEONMAP1(vpaddd_u64, aarch64_neon_uaddv, AddRetType | Add1ArgType),
5294 NEONMAP1(vpmaxnmqd_f64, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType),
5295 NEONMAP1(vpmaxnms_f32, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType),
5296 NEONMAP1(vpmaxqd_f64, aarch64_neon_fmaxv, AddRetType | Add1ArgType),
5297 NEONMAP1(vpmaxs_f32, aarch64_neon_fmaxv, AddRetType | Add1ArgType),
5298 NEONMAP1(vpminnmqd_f64, aarch64_neon_fminnmv, AddRetType | Add1ArgType),
5299 NEONMAP1(vpminnms_f32, aarch64_neon_fminnmv, AddRetType | Add1ArgType),
5300 NEONMAP1(vpminqd_f64, aarch64_neon_fminv, AddRetType | Add1ArgType),
5301 NEONMAP1(vpmins_f32, aarch64_neon_fminv, AddRetType | Add1ArgType),
5302 NEONMAP1(vqabsb_s8, aarch64_neon_sqabs, Vectorize1ArgType | Use64BitVectors),
5303 NEONMAP1(vqabsd_s64, aarch64_neon_sqabs, Add1ArgType),
5304 NEONMAP1(vqabsh_s16, aarch64_neon_sqabs, Vectorize1ArgType | Use64BitVectors),
5305 NEONMAP1(vqabss_s32, aarch64_neon_sqabs, Add1ArgType),
5306 NEONMAP1(vqaddb_s8, aarch64_neon_sqadd, Vectorize1ArgType | Use64BitVectors),
5307 NEONMAP1(vqaddb_u8, aarch64_neon_uqadd, Vectorize1ArgType | Use64BitVectors),
5308 NEONMAP1(vqaddd_s64, aarch64_neon_sqadd, Add1ArgType),
5309 NEONMAP1(vqaddd_u64, aarch64_neon_uqadd, Add1ArgType),
5310 NEONMAP1(vqaddh_s16, aarch64_neon_sqadd, Vectorize1ArgType | Use64BitVectors),
5311 NEONMAP1(vqaddh_u16, aarch64_neon_uqadd, Vectorize1ArgType | Use64BitVectors),
5312 NEONMAP1(vqadds_s32, aarch64_neon_sqadd, Add1ArgType),
5313 NEONMAP1(vqadds_u32, aarch64_neon_uqadd, Add1ArgType),
5314 NEONMAP1(vqdmulhh_s16, aarch64_neon_sqdmulh, Vectorize1ArgType | Use64BitVectors),
5315 NEONMAP1(vqdmulhs_s32, aarch64_neon_sqdmulh, Add1ArgType),
5316 NEONMAP1(vqdmullh_s16, aarch64_neon_sqdmull, VectorRet | Use128BitVectors),
5317 NEONMAP1(vqdmulls_s32, aarch64_neon_sqdmulls_scalar, 0),
5318 NEONMAP1(vqmovnd_s64, aarch64_neon_scalar_sqxtn, AddRetType | Add1ArgType),
5319 NEONMAP1(vqmovnd_u64, aarch64_neon_scalar_uqxtn, AddRetType | Add1ArgType),
5320 NEONMAP1(vqmovnh_s16, aarch64_neon_sqxtn, VectorRet | Use64BitVectors),
5321 NEONMAP1(vqmovnh_u16, aarch64_neon_uqxtn, VectorRet | Use64BitVectors),
5322 NEONMAP1(vqmovns_s32, aarch64_neon_sqxtn, VectorRet | Use64BitVectors),
5323 NEONMAP1(vqmovns_u32, aarch64_neon_uqxtn, VectorRet | Use64BitVectors),
5324 NEONMAP1(vqmovund_s64, aarch64_neon_scalar_sqxtun, AddRetType | Add1ArgType),
5325 NEONMAP1(vqmovunh_s16, aarch64_neon_sqxtun, VectorRet | Use64BitVectors),
5326 NEONMAP1(vqmovuns_s32, aarch64_neon_sqxtun, VectorRet | Use64BitVectors),
5327 NEONMAP1(vqnegb_s8, aarch64_neon_sqneg, Vectorize1ArgType | Use64BitVectors),
5328 NEONMAP1(vqnegd_s64, aarch64_neon_sqneg, Add1ArgType),
5329 NEONMAP1(vqnegh_s16, aarch64_neon_sqneg, Vectorize1ArgType | Use64BitVectors),
5330 NEONMAP1(vqnegs_s32, aarch64_neon_sqneg, Add1ArgType),
5331 NEONMAP1(vqrdmulhh_s16, aarch64_neon_sqrdmulh, Vectorize1ArgType | Use64BitVectors),
5332 NEONMAP1(vqrdmulhs_s32, aarch64_neon_sqrdmulh, Add1ArgType),
5333 NEONMAP1(vqrshlb_s8, aarch64_neon_sqrshl, Vectorize1ArgType | Use64BitVectors),
5334 NEONMAP1(vqrshlb_u8, aarch64_neon_uqrshl, Vectorize1ArgType | Use64BitVectors),
5335 NEONMAP1(vqrshld_s64, aarch64_neon_sqrshl, Add1ArgType),
5336 NEONMAP1(vqrshld_u64, aarch64_neon_uqrshl, Add1ArgType),
5337 NEONMAP1(vqrshlh_s16, aarch64_neon_sqrshl, Vectorize1ArgType | Use64BitVectors),
5338 NEONMAP1(vqrshlh_u16, aarch64_neon_uqrshl, Vectorize1ArgType | Use64BitVectors),
5339 NEONMAP1(vqrshls_s32, aarch64_neon_sqrshl, Add1ArgType),
5340 NEONMAP1(vqrshls_u32, aarch64_neon_uqrshl, Add1ArgType),
5341 NEONMAP1(vqrshrnd_n_s64, aarch64_neon_sqrshrn, AddRetType),
5342 NEONMAP1(vqrshrnd_n_u64, aarch64_neon_uqrshrn, AddRetType),
5343 NEONMAP1(vqrshrnh_n_s16, aarch64_neon_sqrshrn, VectorRet | Use64BitVectors),
5344 NEONMAP1(vqrshrnh_n_u16, aarch64_neon_uqrshrn, VectorRet | Use64BitVectors),
5345 NEONMAP1(vqrshrns_n_s32, aarch64_neon_sqrshrn, VectorRet | Use64BitVectors),
5346 NEONMAP1(vqrshrns_n_u32, aarch64_neon_uqrshrn, VectorRet | Use64BitVectors),
5347 NEONMAP1(vqrshrund_n_s64, aarch64_neon_sqrshrun, AddRetType),
5348 NEONMAP1(vqrshrunh_n_s16, aarch64_neon_sqrshrun, VectorRet | Use64BitVectors),
5349 NEONMAP1(vqrshruns_n_s32, aarch64_neon_sqrshrun, VectorRet | Use64BitVectors),
5350 NEONMAP1(vqshlb_n_s8, aarch64_neon_sqshl, Vectorize1ArgType | Use64BitVectors),
5351 NEONMAP1(vqshlb_n_u8, aarch64_neon_uqshl, Vectorize1ArgType | Use64BitVectors),
5352 NEONMAP1(vqshlb_s8, aarch64_neon_sqshl, Vectorize1ArgType | Use64BitVectors),
5353 NEONMAP1(vqshlb_u8, aarch64_neon_uqshl, Vectorize1ArgType | Use64BitVectors),
5354 NEONMAP1(vqshld_s64, aarch64_neon_sqshl, Add1ArgType),
5355 NEONMAP1(vqshld_u64, aarch64_neon_uqshl, Add1ArgType),
5356 NEONMAP1(vqshlh_n_s16, aarch64_neon_sqshl, Vectorize1ArgType | Use64BitVectors),
5357 NEONMAP1(vqshlh_n_u16, aarch64_neon_uqshl, Vectorize1ArgType | Use64BitVectors),
5358 NEONMAP1(vqshlh_s16, aarch64_neon_sqshl, Vectorize1ArgType | Use64BitVectors),
5359 NEONMAP1(vqshlh_u16, aarch64_neon_uqshl, Vectorize1ArgType | Use64BitVectors),
5360 NEONMAP1(vqshls_n_s32, aarch64_neon_sqshl, Add1ArgType),
5361 NEONMAP1(vqshls_n_u32, aarch64_neon_uqshl, Add1ArgType),
5362 NEONMAP1(vqshls_s32, aarch64_neon_sqshl, Add1ArgType),
5363 NEONMAP1(vqshls_u32, aarch64_neon_uqshl, Add1ArgType),
5364 NEONMAP1(vqshlub_n_s8, aarch64_neon_sqshlu, Vectorize1ArgType | Use64BitVectors),
5365 NEONMAP1(vqshluh_n_s16, aarch64_neon_sqshlu, Vectorize1ArgType | Use64BitVectors),
5366 NEONMAP1(vqshlus_n_s32, aarch64_neon_sqshlu, Add1ArgType),
5367 NEONMAP1(vqshrnd_n_s64, aarch64_neon_sqshrn, AddRetType),
5368 NEONMAP1(vqshrnd_n_u64, aarch64_neon_uqshrn, AddRetType),
5369 NEONMAP1(vqshrnh_n_s16, aarch64_neon_sqshrn, VectorRet | Use64BitVectors),
5370 NEONMAP1(vqshrnh_n_u16, aarch64_neon_uqshrn, VectorRet | Use64BitVectors),
5371 NEONMAP1(vqshrns_n_s32, aarch64_neon_sqshrn, VectorRet | Use64BitVectors),
5372 NEONMAP1(vqshrns_n_u32, aarch64_neon_uqshrn, VectorRet | Use64BitVectors),
5373 NEONMAP1(vqshrund_n_s64, aarch64_neon_sqshrun, AddRetType),
5374 NEONMAP1(vqshrunh_n_s16, aarch64_neon_sqshrun, VectorRet | Use64BitVectors),
5375 NEONMAP1(vqshruns_n_s32, aarch64_neon_sqshrun, VectorRet | Use64BitVectors),
5376 NEONMAP1(vqsubb_s8, aarch64_neon_sqsub, Vectorize1ArgType | Use64BitVectors),
5377 NEONMAP1(vqsubb_u8, aarch64_neon_uqsub, Vectorize1ArgType | Use64BitVectors),
5378 NEONMAP1(vqsubd_s64, aarch64_neon_sqsub, Add1ArgType),
5379 NEONMAP1(vqsubd_u64, aarch64_neon_uqsub, Add1ArgType),
5380 NEONMAP1(vqsubh_s16, aarch64_neon_sqsub, Vectorize1ArgType | Use64BitVectors),
5381 NEONMAP1(vqsubh_u16, aarch64_neon_uqsub, Vectorize1ArgType | Use64BitVectors),
5382 NEONMAP1(vqsubs_s32, aarch64_neon_sqsub, Add1ArgType),
5383 NEONMAP1(vqsubs_u32, aarch64_neon_uqsub, Add1ArgType),
5384 NEONMAP1(vrecped_f64, aarch64_neon_frecpe, Add1ArgType),
5385 NEONMAP1(vrecpes_f32, aarch64_neon_frecpe, Add1ArgType),
5386 NEONMAP1(vrecpxd_f64, aarch64_neon_frecpx, Add1ArgType),
5387 NEONMAP1(vrecpxs_f32, aarch64_neon_frecpx, Add1ArgType),
5388 NEONMAP1(vrshld_s64, aarch64_neon_srshl, Add1ArgType),
5389 NEONMAP1(vrshld_u64, aarch64_neon_urshl, Add1ArgType),
5390 NEONMAP1(vrsqrted_f64, aarch64_neon_frsqrte, Add1ArgType),
5391 NEONMAP1(vrsqrtes_f32, aarch64_neon_frsqrte, Add1ArgType),
5392 NEONMAP1(vrsqrtsd_f64, aarch64_neon_frsqrts, Add1ArgType),
5393 NEONMAP1(vrsqrtss_f32, aarch64_neon_frsqrts, Add1ArgType),
5394 NEONMAP1(vsha1cq_u32, aarch64_crypto_sha1c, 0),
5395 NEONMAP1(vsha1h_u32, aarch64_crypto_sha1h, 0),
5396 NEONMAP1(vsha1mq_u32, aarch64_crypto_sha1m, 0),
5397 NEONMAP1(vsha1pq_u32, aarch64_crypto_sha1p, 0),
5398 NEONMAP1(vshld_s64, aarch64_neon_sshl, Add1ArgType),
5399 NEONMAP1(vshld_u64, aarch64_neon_ushl, Add1ArgType),
5400 NEONMAP1(vslid_n_s64, aarch64_neon_vsli, Vectorize1ArgType),
5401 NEONMAP1(vslid_n_u64, aarch64_neon_vsli, Vectorize1ArgType),
5402 NEONMAP1(vsqaddb_u8, aarch64_neon_usqadd, Vectorize1ArgType | Use64BitVectors),
5403 NEONMAP1(vsqaddd_u64, aarch64_neon_usqadd, Add1ArgType),
5404 NEONMAP1(vsqaddh_u16, aarch64_neon_usqadd, Vectorize1ArgType | Use64BitVectors),
5405 NEONMAP1(vsqadds_u32, aarch64_neon_usqadd, Add1ArgType),
5406 NEONMAP1(vsrid_n_s64, aarch64_neon_vsri, Vectorize1ArgType),
5407 NEONMAP1(vsrid_n_u64, aarch64_neon_vsri, Vectorize1ArgType),
5408 NEONMAP1(vuqaddb_s8, aarch64_neon_suqadd, Vectorize1ArgType | Use64BitVectors),
5409 NEONMAP1(vuqaddd_s64, aarch64_neon_suqadd, Add1ArgType),
5410 NEONMAP1(vuqaddh_s16, aarch64_neon_suqadd, Vectorize1ArgType | Use64BitVectors),
5411 NEONMAP1(vuqadds_s32, aarch64_neon_suqadd, Add1ArgType),
5412 // FP16 scalar intrinisics go here.
5413 NEONMAP1(vabdh_f16, aarch64_sisd_fabd, Add1ArgType),
5414 NEONMAP1(vcvtah_s32_f16, aarch64_neon_fcvtas, AddRetType | Add1ArgType),
5415 NEONMAP1(vcvtah_s64_f16, aarch64_neon_fcvtas, AddRetType | Add1ArgType),
5416 NEONMAP1(vcvtah_u32_f16, aarch64_neon_fcvtau, AddRetType | Add1ArgType),
5417 NEONMAP1(vcvtah_u64_f16, aarch64_neon_fcvtau, AddRetType | Add1ArgType),
5418 NEONMAP1(vcvth_n_f16_s32, aarch64_neon_vcvtfxs2fp, AddRetType | Add1ArgType),
5419 NEONMAP1(vcvth_n_f16_s64, aarch64_neon_vcvtfxs2fp, AddRetType | Add1ArgType),
5420 NEONMAP1(vcvth_n_f16_u32, aarch64_neon_vcvtfxu2fp, AddRetType | Add1ArgType),
5421 NEONMAP1(vcvth_n_f16_u64, aarch64_neon_vcvtfxu2fp, AddRetType | Add1ArgType),
5422 NEONMAP1(vcvth_n_s32_f16, aarch64_neon_vcvtfp2fxs, AddRetType | Add1ArgType),
5423 NEONMAP1(vcvth_n_s64_f16, aarch64_neon_vcvtfp2fxs, AddRetType | Add1ArgType),
5424 NEONMAP1(vcvth_n_u32_f16, aarch64_neon_vcvtfp2fxu, AddRetType | Add1ArgType),
5425 NEONMAP1(vcvth_n_u64_f16, aarch64_neon_vcvtfp2fxu, AddRetType | Add1ArgType),
5426 NEONMAP1(vcvtmh_s32_f16, aarch64_neon_fcvtms, AddRetType | Add1ArgType),
5427 NEONMAP1(vcvtmh_s64_f16, aarch64_neon_fcvtms, AddRetType | Add1ArgType),
5428 NEONMAP1(vcvtmh_u32_f16, aarch64_neon_fcvtmu, AddRetType | Add1ArgType),
5429 NEONMAP1(vcvtmh_u64_f16, aarch64_neon_fcvtmu, AddRetType | Add1ArgType),
5430 NEONMAP1(vcvtnh_s32_f16, aarch64_neon_fcvtns, AddRetType | Add1ArgType),
5431 NEONMAP1(vcvtnh_s64_f16, aarch64_neon_fcvtns, AddRetType | Add1ArgType),
5432 NEONMAP1(vcvtnh_u32_f16, aarch64_neon_fcvtnu, AddRetType | Add1ArgType),
5433 NEONMAP1(vcvtnh_u64_f16, aarch64_neon_fcvtnu, AddRetType | Add1ArgType),
5434 NEONMAP1(vcvtph_s32_f16, aarch64_neon_fcvtps, AddRetType | Add1ArgType),
5435 NEONMAP1(vcvtph_s64_f16, aarch64_neon_fcvtps, AddRetType | Add1ArgType),
5436 NEONMAP1(vcvtph_u32_f16, aarch64_neon_fcvtpu, AddRetType | Add1ArgType),
5437 NEONMAP1(vcvtph_u64_f16, aarch64_neon_fcvtpu, AddRetType | Add1ArgType),
5438 NEONMAP1(vmulxh_f16, aarch64_neon_fmulx, Add1ArgType),
5439 NEONMAP1(vrecpeh_f16, aarch64_neon_frecpe, Add1ArgType),
5440 NEONMAP1(vrecpxh_f16, aarch64_neon_frecpx, Add1ArgType),
5441 NEONMAP1(vrsqrteh_f16, aarch64_neon_frsqrte, Add1ArgType),
5442 NEONMAP1(vrsqrtsh_f16, aarch64_neon_frsqrts, Add1ArgType),
5449 #define SVEMAP1(NameBase, LLVMIntrinsic, TypeModifier) \
5451 #NameBase, SVE::BI__builtin_sve_##NameBase, Intrinsic::LLVMIntrinsic, 0, \
5455 #define SVEMAP2(NameBase, TypeModifier) \
5456 { #NameBase, SVE::BI__builtin_sve_##NameBase, 0, 0, TypeModifier }
5457 static const ARMVectorIntrinsicInfo AArch64SVEIntrinsicMap[] = {
5458 #define GET_SVE_LLVM_INTRINSIC_MAP
5459 #include "clang/Basic/arm_sve_builtin_cg.inc"
5460 #undef GET_SVE_LLVM_INTRINSIC_MAP
5466 static bool NEONSIMDIntrinsicsProvenSorted = false;
5468 static bool AArch64SIMDIntrinsicsProvenSorted = false;
5469 static bool AArch64SISDIntrinsicsProvenSorted = false;
5470 static bool AArch64SVEIntrinsicsProvenSorted = false;
5472 static const ARMVectorIntrinsicInfo *
5473 findARMVectorIntrinsicInMap(ArrayRef<ARMVectorIntrinsicInfo> IntrinsicMap,
5474 unsigned BuiltinID, bool &MapProvenSorted) {
5477 if (!MapProvenSorted) {
5478 assert(llvm::is_sorted(IntrinsicMap));
5479 MapProvenSorted = true;
5483 const ARMVectorIntrinsicInfo *Builtin =
5484 llvm::lower_bound(IntrinsicMap, BuiltinID);
5486 if (Builtin != IntrinsicMap.end() && Builtin->BuiltinID == BuiltinID)
5492 Function *CodeGenFunction::LookupNeonLLVMIntrinsic(unsigned IntrinsicID,
5494 llvm::Type *ArgType,
5495 const CallExpr *E) {
5497 if (Modifier & Use64BitVectors)
5499 else if (Modifier & Use128BitVectors)
5503 SmallVector<llvm::Type *, 3> Tys;
5504 if (Modifier & AddRetType) {
5505 llvm::Type *Ty = ConvertType(E->getCallReturnType(getContext()));
5506 if (Modifier & VectorizeRetType)
5507 Ty = llvm::FixedVectorType::get(
5508 Ty, VectorSize ? VectorSize / Ty->getPrimitiveSizeInBits() : 1);
5514 if (Modifier & VectorizeArgTypes) {
5515 int Elts = VectorSize ? VectorSize / ArgType->getPrimitiveSizeInBits() : 1;
5516 ArgType = llvm::FixedVectorType::get(ArgType, Elts);
5519 if (Modifier & (Add1ArgType | Add2ArgTypes))
5520 Tys.push_back(ArgType);
5522 if (Modifier & Add2ArgTypes)
5523 Tys.push_back(ArgType);
5525 if (Modifier & InventFloatType)
5526 Tys.push_back(FloatTy);
5528 return CGM.getIntrinsic(IntrinsicID, Tys);
5531 static Value *EmitCommonNeonSISDBuiltinExpr(
5532 CodeGenFunction &CGF, const ARMVectorIntrinsicInfo &SISDInfo,
5533 SmallVectorImpl<Value *> &Ops, const CallExpr *E) {
5534 unsigned BuiltinID = SISDInfo.BuiltinID;
5535 unsigned int Int = SISDInfo.LLVMIntrinsic;
5536 unsigned Modifier = SISDInfo.TypeModifier;
5537 const char *s = SISDInfo.NameHint;
5539 switch (BuiltinID) {
5540 case NEON::BI__builtin_neon_vcled_s64:
5541 case NEON::BI__builtin_neon_vcled_u64:
5542 case NEON::BI__builtin_neon_vcles_f32:
5543 case NEON::BI__builtin_neon_vcled_f64:
5544 case NEON::BI__builtin_neon_vcltd_s64:
5545 case NEON::BI__builtin_neon_vcltd_u64:
5546 case NEON::BI__builtin_neon_vclts_f32:
5547 case NEON::BI__builtin_neon_vcltd_f64:
5548 case NEON::BI__builtin_neon_vcales_f32:
5549 case NEON::BI__builtin_neon_vcaled_f64:
5550 case NEON::BI__builtin_neon_vcalts_f32:
5551 case NEON::BI__builtin_neon_vcaltd_f64:
5552 // Only one direction of comparisons actually exist, cmle is actually a cmge
5553 // with swapped operands. The table gives us the right intrinsic but we
5554 // still need to do the swap.
5555 std::swap(Ops[0], Ops[1]);
5559 assert(Int && "Generic code assumes a valid intrinsic");
5561 // Determine the type(s) of this overloaded AArch64 intrinsic.
5562 const Expr *Arg = E->getArg(0);
5563 llvm::Type *ArgTy = CGF.ConvertType(Arg->getType());
5564 Function *F = CGF.LookupNeonLLVMIntrinsic(Int, Modifier, ArgTy, E);
5567 ConstantInt *C0 = ConstantInt::get(CGF.SizeTy, 0);
5568 for (Function::const_arg_iterator ai = F->arg_begin(), ae = F->arg_end();
5569 ai != ae; ++ai, ++j) {
5570 llvm::Type *ArgTy = ai->getType();
5571 if (Ops[j]->getType()->getPrimitiveSizeInBits() ==
5572 ArgTy->getPrimitiveSizeInBits())
5575 assert(ArgTy->isVectorTy() && !Ops[j]->getType()->isVectorTy());
5576 // The constant argument to an _n_ intrinsic always has Int32Ty, so truncate
5577 // it before inserting.
5578 Ops[j] = CGF.Builder.CreateTruncOrBitCast(
5579 Ops[j], cast<llvm::VectorType>(ArgTy)->getElementType());
5581 CGF.Builder.CreateInsertElement(UndefValue::get(ArgTy), Ops[j], C0);
5584 Value *Result = CGF.EmitNeonCall(F, Ops, s);
5585 llvm::Type *ResultType = CGF.ConvertType(E->getType());
5586 if (ResultType->getPrimitiveSizeInBits() <
5587 Result->getType()->getPrimitiveSizeInBits())
5588 return CGF.Builder.CreateExtractElement(Result, C0);
5590 return CGF.Builder.CreateBitCast(Result, ResultType, s);
5593 Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
5594 unsigned BuiltinID, unsigned LLVMIntrinsic, unsigned AltLLVMIntrinsic,
5595 const char *NameHint, unsigned Modifier, const CallExpr *E,
5596 SmallVectorImpl<llvm::Value *> &Ops, Address PtrOp0, Address PtrOp1,
5597 llvm::Triple::ArchType Arch) {
5598 // Get the last argument, which specifies the vector type.
5599 llvm::APSInt NeonTypeConst;
5600 const Expr *Arg = E->getArg(E->getNumArgs() - 1);
5601 if (!Arg->isIntegerConstantExpr(NeonTypeConst, getContext()))
5604 // Determine the type of this overloaded NEON intrinsic.
5605 NeonTypeFlags Type(NeonTypeConst.getZExtValue());
5606 bool Usgn = Type.isUnsigned();
5607 bool Quad = Type.isQuad();
5608 const bool HasLegalHalfType = getTarget().hasLegalHalfType();
5609 const bool AllowBFloatArgsAndRet =
5610 getTargetHooks().getABIInfo().allowBFloatArgsAndRet();
5612 llvm::VectorType *VTy = GetNeonType(this, Type, HasLegalHalfType, false,
5613 AllowBFloatArgsAndRet);
5614 llvm::Type *Ty = VTy;
5618 auto getAlignmentValue32 = [&](Address addr) -> Value* {
5619 return Builder.getInt32(addr.getAlignment().getQuantity());
5622 unsigned Int = LLVMIntrinsic;
5623 if ((Modifier & UnsignedAlts) && !Usgn)
5624 Int = AltLLVMIntrinsic;
5626 switch (BuiltinID) {
5628 case NEON::BI__builtin_neon_splat_lane_v:
5629 case NEON::BI__builtin_neon_splat_laneq_v:
5630 case NEON::BI__builtin_neon_splatq_lane_v:
5631 case NEON::BI__builtin_neon_splatq_laneq_v: {
5632 auto NumElements = VTy->getElementCount();
5633 if (BuiltinID == NEON::BI__builtin_neon_splatq_lane_v)
5634 NumElements = NumElements * 2;
5635 if (BuiltinID == NEON::BI__builtin_neon_splat_laneq_v)
5636 NumElements = NumElements / 2;
5638 Ops[0] = Builder.CreateBitCast(Ops[0], VTy);
5639 return EmitNeonSplat(Ops[0], cast<ConstantInt>(Ops[1]), NumElements);
5641 case NEON::BI__builtin_neon_vpadd_v:
5642 case NEON::BI__builtin_neon_vpaddq_v:
5643 // We don't allow fp/int overloading of intrinsics.
5644 if (VTy->getElementType()->isFloatingPointTy() &&
5645 Int == Intrinsic::aarch64_neon_addp)
5646 Int = Intrinsic::aarch64_neon_faddp;
5648 case NEON::BI__builtin_neon_vabs_v:
5649 case NEON::BI__builtin_neon_vabsq_v:
5650 if (VTy->getElementType()->isFloatingPointTy())
5651 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::fabs, Ty), Ops, "vabs");
5652 return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Ty), Ops, "vabs");
5653 case NEON::BI__builtin_neon_vaddhn_v: {
5654 llvm::VectorType *SrcTy =
5655 llvm::VectorType::getExtendedElementVectorType(VTy);
5657 // %sum = add <4 x i32> %lhs, %rhs
5658 Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy);
5659 Ops[1] = Builder.CreateBitCast(Ops[1], SrcTy);
5660 Ops[0] = Builder.CreateAdd(Ops[0], Ops[1], "vaddhn");
5662 // %high = lshr <4 x i32> %sum, <i32 16, i32 16, i32 16, i32 16>
5663 Constant *ShiftAmt =
5664 ConstantInt::get(SrcTy, SrcTy->getScalarSizeInBits() / 2);
5665 Ops[0] = Builder.CreateLShr(Ops[0], ShiftAmt, "vaddhn");
5667 // %res = trunc <4 x i32> %high to <4 x i16>
5668 return Builder.CreateTrunc(Ops[0], VTy, "vaddhn");
5670 case NEON::BI__builtin_neon_vcale_v:
5671 case NEON::BI__builtin_neon_vcaleq_v:
5672 case NEON::BI__builtin_neon_vcalt_v:
5673 case NEON::BI__builtin_neon_vcaltq_v:
5674 std::swap(Ops[0], Ops[1]);
5676 case NEON::BI__builtin_neon_vcage_v:
5677 case NEON::BI__builtin_neon_vcageq_v:
5678 case NEON::BI__builtin_neon_vcagt_v:
5679 case NEON::BI__builtin_neon_vcagtq_v: {
5681 switch (VTy->getScalarSizeInBits()) {
5682 default: llvm_unreachable("unexpected type");
5693 auto *VecFlt = llvm::FixedVectorType::get(Ty, VTy->getNumElements());
5694 llvm::Type *Tys[] = { VTy, VecFlt };
5695 Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys);
5696 return EmitNeonCall(F, Ops, NameHint);
5698 case NEON::BI__builtin_neon_vceqz_v:
5699 case NEON::BI__builtin_neon_vceqzq_v:
5700 return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OEQ,
5701 ICmpInst::ICMP_EQ, "vceqz");
5702 case NEON::BI__builtin_neon_vcgez_v:
5703 case NEON::BI__builtin_neon_vcgezq_v:
5704 return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OGE,
5705 ICmpInst::ICMP_SGE, "vcgez");
5706 case NEON::BI__builtin_neon_vclez_v:
5707 case NEON::BI__builtin_neon_vclezq_v:
5708 return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OLE,
5709 ICmpInst::ICMP_SLE, "vclez");
5710 case NEON::BI__builtin_neon_vcgtz_v:
5711 case NEON::BI__builtin_neon_vcgtzq_v:
5712 return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OGT,
5713 ICmpInst::ICMP_SGT, "vcgtz");
5714 case NEON::BI__builtin_neon_vcltz_v:
5715 case NEON::BI__builtin_neon_vcltzq_v:
5716 return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OLT,
5717 ICmpInst::ICMP_SLT, "vcltz");
5718 case NEON::BI__builtin_neon_vclz_v:
5719 case NEON::BI__builtin_neon_vclzq_v:
5720 // We generate target-independent intrinsic, which needs a second argument
5721 // for whether or not clz of zero is undefined; on ARM it isn't.
5722 Ops.push_back(Builder.getInt1(getTarget().isCLZForZeroUndef()));
5724 case NEON::BI__builtin_neon_vcvt_f32_v:
5725 case NEON::BI__builtin_neon_vcvtq_f32_v:
5726 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
5727 Ty = GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float32, false, Quad),
5729 return Usgn ? Builder.CreateUIToFP(Ops[0], Ty, "vcvt")
5730 : Builder.CreateSIToFP(Ops[0], Ty, "vcvt");
5731 case NEON::BI__builtin_neon_vcvt_f16_v:
5732 case NEON::BI__builtin_neon_vcvtq_f16_v:
5733 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
5734 Ty = GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float16, false, Quad),
5736 return Usgn ? Builder.CreateUIToFP(Ops[0], Ty, "vcvt")
5737 : Builder.CreateSIToFP(Ops[0], Ty, "vcvt");
5738 case NEON::BI__builtin_neon_vcvt_n_f16_v:
5739 case NEON::BI__builtin_neon_vcvt_n_f32_v:
5740 case NEON::BI__builtin_neon_vcvt_n_f64_v:
5741 case NEON::BI__builtin_neon_vcvtq_n_f16_v:
5742 case NEON::BI__builtin_neon_vcvtq_n_f32_v:
5743 case NEON::BI__builtin_neon_vcvtq_n_f64_v: {
5744 llvm::Type *Tys[2] = { GetFloatNeonType(this, Type), Ty };
5745 Int = Usgn ? LLVMIntrinsic : AltLLVMIntrinsic;
5746 Function *F = CGM.getIntrinsic(Int, Tys);
5747 return EmitNeonCall(F, Ops, "vcvt_n");
5749 case NEON::BI__builtin_neon_vcvt_n_s16_v:
5750 case NEON::BI__builtin_neon_vcvt_n_s32_v:
5751 case NEON::BI__builtin_neon_vcvt_n_u16_v:
5752 case NEON::BI__builtin_neon_vcvt_n_u32_v:
5753 case NEON::BI__builtin_neon_vcvt_n_s64_v:
5754 case NEON::BI__builtin_neon_vcvt_n_u64_v:
5755 case NEON::BI__builtin_neon_vcvtq_n_s16_v:
5756 case NEON::BI__builtin_neon_vcvtq_n_s32_v:
5757 case NEON::BI__builtin_neon_vcvtq_n_u16_v:
5758 case NEON::BI__builtin_neon_vcvtq_n_u32_v:
5759 case NEON::BI__builtin_neon_vcvtq_n_s64_v:
5760 case NEON::BI__builtin_neon_vcvtq_n_u64_v: {
5761 llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) };
5762 Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys);
5763 return EmitNeonCall(F, Ops, "vcvt_n");
5765 case NEON::BI__builtin_neon_vcvt_s32_v:
5766 case NEON::BI__builtin_neon_vcvt_u32_v:
5767 case NEON::BI__builtin_neon_vcvt_s64_v:
5768 case NEON::BI__builtin_neon_vcvt_u64_v:
5769 case NEON::BI__builtin_neon_vcvt_s16_v:
5770 case NEON::BI__builtin_neon_vcvt_u16_v:
5771 case NEON::BI__builtin_neon_vcvtq_s32_v:
5772 case NEON::BI__builtin_neon_vcvtq_u32_v:
5773 case NEON::BI__builtin_neon_vcvtq_s64_v:
5774 case NEON::BI__builtin_neon_vcvtq_u64_v:
5775 case NEON::BI__builtin_neon_vcvtq_s16_v:
5776 case NEON::BI__builtin_neon_vcvtq_u16_v: {
5777 Ops[0] = Builder.CreateBitCast(Ops[0], GetFloatNeonType(this, Type));
5778 return Usgn ? Builder.CreateFPToUI(Ops[0], Ty, "vcvt")
5779 : Builder.CreateFPToSI(Ops[0], Ty, "vcvt");
5781 case NEON::BI__builtin_neon_vcvta_s16_v:
5782 case NEON::BI__builtin_neon_vcvta_s32_v:
5783 case NEON::BI__builtin_neon_vcvta_s64_v:
5784 case NEON::BI__builtin_neon_vcvta_u16_v:
5785 case NEON::BI__builtin_neon_vcvta_u32_v:
5786 case NEON::BI__builtin_neon_vcvta_u64_v:
5787 case NEON::BI__builtin_neon_vcvtaq_s16_v:
5788 case NEON::BI__builtin_neon_vcvtaq_s32_v:
5789 case NEON::BI__builtin_neon_vcvtaq_s64_v:
5790 case NEON::BI__builtin_neon_vcvtaq_u16_v:
5791 case NEON::BI__builtin_neon_vcvtaq_u32_v:
5792 case NEON::BI__builtin_neon_vcvtaq_u64_v:
5793 case NEON::BI__builtin_neon_vcvtn_s16_v:
5794 case NEON::BI__builtin_neon_vcvtn_s32_v:
5795 case NEON::BI__builtin_neon_vcvtn_s64_v:
5796 case NEON::BI__builtin_neon_vcvtn_u16_v:
5797 case NEON::BI__builtin_neon_vcvtn_u32_v:
5798 case NEON::BI__builtin_neon_vcvtn_u64_v:
5799 case NEON::BI__builtin_neon_vcvtnq_s16_v:
5800 case NEON::BI__builtin_neon_vcvtnq_s32_v:
5801 case NEON::BI__builtin_neon_vcvtnq_s64_v:
5802 case NEON::BI__builtin_neon_vcvtnq_u16_v:
5803 case NEON::BI__builtin_neon_vcvtnq_u32_v:
5804 case NEON::BI__builtin_neon_vcvtnq_u64_v:
5805 case NEON::BI__builtin_neon_vcvtp_s16_v:
5806 case NEON::BI__builtin_neon_vcvtp_s32_v:
5807 case NEON::BI__builtin_neon_vcvtp_s64_v:
5808 case NEON::BI__builtin_neon_vcvtp_u16_v:
5809 case NEON::BI__builtin_neon_vcvtp_u32_v:
5810 case NEON::BI__builtin_neon_vcvtp_u64_v:
5811 case NEON::BI__builtin_neon_vcvtpq_s16_v:
5812 case NEON::BI__builtin_neon_vcvtpq_s32_v:
5813 case NEON::BI__builtin_neon_vcvtpq_s64_v:
5814 case NEON::BI__builtin_neon_vcvtpq_u16_v:
5815 case NEON::BI__builtin_neon_vcvtpq_u32_v:
5816 case NEON::BI__builtin_neon_vcvtpq_u64_v:
5817 case NEON::BI__builtin_neon_vcvtm_s16_v:
5818 case NEON::BI__builtin_neon_vcvtm_s32_v:
5819 case NEON::BI__builtin_neon_vcvtm_s64_v:
5820 case NEON::BI__builtin_neon_vcvtm_u16_v:
5821 case NEON::BI__builtin_neon_vcvtm_u32_v:
5822 case NEON::BI__builtin_neon_vcvtm_u64_v:
5823 case NEON::BI__builtin_neon_vcvtmq_s16_v:
5824 case NEON::BI__builtin_neon_vcvtmq_s32_v:
5825 case NEON::BI__builtin_neon_vcvtmq_s64_v:
5826 case NEON::BI__builtin_neon_vcvtmq_u16_v:
5827 case NEON::BI__builtin_neon_vcvtmq_u32_v:
5828 case NEON::BI__builtin_neon_vcvtmq_u64_v: {
5829 llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) };
5830 return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, NameHint);
5832 case NEON::BI__builtin_neon_vcvtx_f32_v: {
5833 llvm::Type *Tys[2] = { VTy->getTruncatedElementVectorType(VTy), Ty};
5834 return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, NameHint);
5837 case NEON::BI__builtin_neon_vext_v:
5838 case NEON::BI__builtin_neon_vextq_v: {
5839 int CV = cast<ConstantInt>(Ops[2])->getSExtValue();
5840 SmallVector<int, 16> Indices;
5841 for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i)
5842 Indices.push_back(i+CV);
5844 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
5845 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
5846 return Builder.CreateShuffleVector(Ops[0], Ops[1], Indices, "vext");
5848 case NEON::BI__builtin_neon_vfma_v:
5849 case NEON::BI__builtin_neon_vfmaq_v: {
5850 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
5851 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
5852 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
5854 // NEON intrinsic puts accumulator first, unlike the LLVM fma.
5855 return emitCallMaybeConstrainedFPBuiltin(
5856 *this, Intrinsic::fma, Intrinsic::experimental_constrained_fma, Ty,
5857 {Ops[1], Ops[2], Ops[0]});
5859 case NEON::BI__builtin_neon_vld1_v:
5860 case NEON::BI__builtin_neon_vld1q_v: {
5861 llvm::Type *Tys[] = {Ty, Int8PtrTy};
5862 Ops.push_back(getAlignmentValue32(PtrOp0));
5863 return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, "vld1");
5865 case NEON::BI__builtin_neon_vld1_x2_v:
5866 case NEON::BI__builtin_neon_vld1q_x2_v:
5867 case NEON::BI__builtin_neon_vld1_x3_v:
5868 case NEON::BI__builtin_neon_vld1q_x3_v:
5869 case NEON::BI__builtin_neon_vld1_x4_v:
5870 case NEON::BI__builtin_neon_vld1q_x4_v: {
5871 llvm::Type *PTy = llvm::PointerType::getUnqual(VTy->getElementType());
5872 Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
5873 llvm::Type *Tys[2] = { VTy, PTy };
5874 Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys);
5875 Ops[1] = Builder.CreateCall(F, Ops[1], "vld1xN");
5876 Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
5877 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
5878 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
5880 case NEON::BI__builtin_neon_vld2_v:
5881 case NEON::BI__builtin_neon_vld2q_v:
5882 case NEON::BI__builtin_neon_vld3_v:
5883 case NEON::BI__builtin_neon_vld3q_v:
5884 case NEON::BI__builtin_neon_vld4_v:
5885 case NEON::BI__builtin_neon_vld4q_v:
5886 case NEON::BI__builtin_neon_vld2_dup_v:
5887 case NEON::BI__builtin_neon_vld2q_dup_v:
5888 case NEON::BI__builtin_neon_vld3_dup_v:
5889 case NEON::BI__builtin_neon_vld3q_dup_v:
5890 case NEON::BI__builtin_neon_vld4_dup_v:
5891 case NEON::BI__builtin_neon_vld4q_dup_v: {
5892 llvm::Type *Tys[] = {Ty, Int8PtrTy};
5893 Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys);
5894 Value *Align = getAlignmentValue32(PtrOp1);
5895 Ops[1] = Builder.CreateCall(F, {Ops[1], Align}, NameHint);
5896 Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
5897 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
5898 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
5900 case NEON::BI__builtin_neon_vld1_dup_v:
5901 case NEON::BI__builtin_neon_vld1q_dup_v: {
5902 Value *V = UndefValue::get(Ty);
5903 Ty = llvm::PointerType::getUnqual(VTy->getElementType());
5904 PtrOp0 = Builder.CreateBitCast(PtrOp0, Ty);
5905 LoadInst *Ld = Builder.CreateLoad(PtrOp0);
5906 llvm::Constant *CI = ConstantInt::get(SizeTy, 0);
5907 Ops[0] = Builder.CreateInsertElement(V, Ld, CI);
5908 return EmitNeonSplat(Ops[0], CI);
5910 case NEON::BI__builtin_neon_vld2_lane_v:
5911 case NEON::BI__builtin_neon_vld2q_lane_v:
5912 case NEON::BI__builtin_neon_vld3_lane_v:
5913 case NEON::BI__builtin_neon_vld3q_lane_v:
5914 case NEON::BI__builtin_neon_vld4_lane_v:
5915 case NEON::BI__builtin_neon_vld4q_lane_v: {
5916 llvm::Type *Tys[] = {Ty, Int8PtrTy};
5917 Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys);
5918 for (unsigned I = 2; I < Ops.size() - 1; ++I)
5919 Ops[I] = Builder.CreateBitCast(Ops[I], Ty);
5920 Ops.push_back(getAlignmentValue32(PtrOp1));
5921 Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), NameHint);
5922 Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
5923 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
5924 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
5926 case NEON::BI__builtin_neon_vmovl_v: {
5927 llvm::Type *DTy =llvm::VectorType::getTruncatedElementVectorType(VTy);
5928 Ops[0] = Builder.CreateBitCast(Ops[0], DTy);
5930 return Builder.CreateZExt(Ops[0], Ty, "vmovl");
5931 return Builder.CreateSExt(Ops[0], Ty, "vmovl");
5933 case NEON::BI__builtin_neon_vmovn_v: {
5934 llvm::Type *QTy = llvm::VectorType::getExtendedElementVectorType(VTy);
5935 Ops[0] = Builder.CreateBitCast(Ops[0], QTy);
5936 return Builder.CreateTrunc(Ops[0], Ty, "vmovn");
5938 case NEON::BI__builtin_neon_vmull_v:
5939 // FIXME: the integer vmull operations could be emitted in terms of pure
5940 // LLVM IR (2 exts followed by a mul). Unfortunately LLVM has a habit of
5941 // hoisting the exts outside loops. Until global ISel comes along that can
5942 // see through such movement this leads to bad CodeGen. So we need an
5943 // intrinsic for now.
5944 Int = Usgn ? Intrinsic::arm_neon_vmullu : Intrinsic::arm_neon_vmulls;
5945 Int = Type.isPoly() ? (unsigned)Intrinsic::arm_neon_vmullp : Int;
5946 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmull");
5947 case NEON::BI__builtin_neon_vpadal_v:
5948 case NEON::BI__builtin_neon_vpadalq_v: {
5949 // The source operand type has twice as many elements of half the size.
5950 unsigned EltBits = VTy->getElementType()->getPrimitiveSizeInBits();
5952 llvm::IntegerType::get(getLLVMContext(), EltBits / 2);
5954 llvm::FixedVectorType::get(EltTy, VTy->getNumElements() * 2);
5955 llvm::Type *Tys[2] = { Ty, NarrowTy };
5956 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, NameHint);
5958 case NEON::BI__builtin_neon_vpaddl_v:
5959 case NEON::BI__builtin_neon_vpaddlq_v: {
5960 // The source operand type has twice as many elements of half the size.
5961 unsigned EltBits = VTy->getElementType()->getPrimitiveSizeInBits();
5962 llvm::Type *EltTy = llvm::IntegerType::get(getLLVMContext(), EltBits / 2);
5964 llvm::FixedVectorType::get(EltTy, VTy->getNumElements() * 2);
5965 llvm::Type *Tys[2] = { Ty, NarrowTy };
5966 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vpaddl");
5968 case NEON::BI__builtin_neon_vqdmlal_v:
5969 case NEON::BI__builtin_neon_vqdmlsl_v: {
5970 SmallVector<Value *, 2> MulOps(Ops.begin() + 1, Ops.end());
5972 EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Ty), MulOps, "vqdmlal");
5974 return EmitNeonCall(CGM.getIntrinsic(AltLLVMIntrinsic, Ty), Ops, NameHint);
5976 case NEON::BI__builtin_neon_vqdmulhq_lane_v:
5977 case NEON::BI__builtin_neon_vqdmulh_lane_v:
5978 case NEON::BI__builtin_neon_vqrdmulhq_lane_v:
5979 case NEON::BI__builtin_neon_vqrdmulh_lane_v: {
5980 auto *RTy = cast<llvm::VectorType>(Ty);
5981 if (BuiltinID == NEON::BI__builtin_neon_vqdmulhq_lane_v ||
5982 BuiltinID == NEON::BI__builtin_neon_vqrdmulhq_lane_v)
5983 RTy = llvm::FixedVectorType::get(RTy->getElementType(),
5984 RTy->getNumElements() * 2);
5985 llvm::Type *Tys[2] = {
5986 RTy, GetNeonType(this, NeonTypeFlags(Type.getEltType(), false,
5987 /*isQuad*/ false))};
5988 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, NameHint);
5990 case NEON::BI__builtin_neon_vqdmulhq_laneq_v:
5991 case NEON::BI__builtin_neon_vqdmulh_laneq_v:
5992 case NEON::BI__builtin_neon_vqrdmulhq_laneq_v:
5993 case NEON::BI__builtin_neon_vqrdmulh_laneq_v: {
5994 llvm::Type *Tys[2] = {
5995 Ty, GetNeonType(this, NeonTypeFlags(Type.getEltType(), false,
5997 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, NameHint);
5999 case NEON::BI__builtin_neon_vqshl_n_v:
6000 case NEON::BI__builtin_neon_vqshlq_n_v:
6001 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshl_n",
6003 case NEON::BI__builtin_neon_vqshlu_n_v:
6004 case NEON::BI__builtin_neon_vqshluq_n_v:
6005 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshlu_n",
6007 case NEON::BI__builtin_neon_vrecpe_v:
6008 case NEON::BI__builtin_neon_vrecpeq_v:
6009 case NEON::BI__builtin_neon_vrsqrte_v:
6010 case NEON::BI__builtin_neon_vrsqrteq_v:
6011 Int = Ty->isFPOrFPVectorTy() ? LLVMIntrinsic : AltLLVMIntrinsic;
6012 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, NameHint);
6013 case NEON::BI__builtin_neon_vrndi_v:
6014 case NEON::BI__builtin_neon_vrndiq_v:
6015 Int = Builder.getIsFPConstrained()
6016 ? Intrinsic::experimental_constrained_nearbyint
6017 : Intrinsic::nearbyint;
6018 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, NameHint);
6019 case NEON::BI__builtin_neon_vrshr_n_v:
6020 case NEON::BI__builtin_neon_vrshrq_n_v:
6021 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrshr_n",
6023 case NEON::BI__builtin_neon_vshl_n_v:
6024 case NEON::BI__builtin_neon_vshlq_n_v:
6025 Ops[1] = EmitNeonShiftVector(Ops[1], Ty, false);
6026 return Builder.CreateShl(Builder.CreateBitCast(Ops[0],Ty), Ops[1],
6028 case NEON::BI__builtin_neon_vshll_n_v: {
6029 llvm::Type *SrcTy = llvm::VectorType::getTruncatedElementVectorType(VTy);
6030 Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy);
6032 Ops[0] = Builder.CreateZExt(Ops[0], VTy);
6034 Ops[0] = Builder.CreateSExt(Ops[0], VTy);
6035 Ops[1] = EmitNeonShiftVector(Ops[1], VTy, false);
6036 return Builder.CreateShl(Ops[0], Ops[1], "vshll_n");
6038 case NEON::BI__builtin_neon_vshrn_n_v: {
6039 llvm::Type *SrcTy = llvm::VectorType::getExtendedElementVectorType(VTy);
6040 Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy);
6041 Ops[1] = EmitNeonShiftVector(Ops[1], SrcTy, false);
6043 Ops[0] = Builder.CreateLShr(Ops[0], Ops[1]);
6045 Ops[0] = Builder.CreateAShr(Ops[0], Ops[1]);
6046 return Builder.CreateTrunc(Ops[0], Ty, "vshrn_n");
6048 case NEON::BI__builtin_neon_vshr_n_v:
6049 case NEON::BI__builtin_neon_vshrq_n_v:
6050 return EmitNeonRShiftImm(Ops[0], Ops[1], Ty, Usgn, "vshr_n");
6051 case NEON::BI__builtin_neon_vst1_v:
6052 case NEON::BI__builtin_neon_vst1q_v:
6053 case NEON::BI__builtin_neon_vst2_v:
6054 case NEON::BI__builtin_neon_vst2q_v:
6055 case NEON::BI__builtin_neon_vst3_v:
6056 case NEON::BI__builtin_neon_vst3q_v:
6057 case NEON::BI__builtin_neon_vst4_v:
6058 case NEON::BI__builtin_neon_vst4q_v:
6059 case NEON::BI__builtin_neon_vst2_lane_v:
6060 case NEON::BI__builtin_neon_vst2q_lane_v:
6061 case NEON::BI__builtin_neon_vst3_lane_v:
6062 case NEON::BI__builtin_neon_vst3q_lane_v:
6063 case NEON::BI__builtin_neon_vst4_lane_v:
6064 case NEON::BI__builtin_neon_vst4q_lane_v: {
6065 llvm::Type *Tys[] = {Int8PtrTy, Ty};
6066 Ops.push_back(getAlignmentValue32(PtrOp0));
6067 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "");
6069 case NEON::BI__builtin_neon_vst1_x2_v:
6070 case NEON::BI__builtin_neon_vst1q_x2_v:
6071 case NEON::BI__builtin_neon_vst1_x3_v:
6072 case NEON::BI__builtin_neon_vst1q_x3_v:
6073 case NEON::BI__builtin_neon_vst1_x4_v:
6074 case NEON::BI__builtin_neon_vst1q_x4_v: {
6075 llvm::Type *PTy = llvm::PointerType::getUnqual(VTy->getElementType());
6076 // TODO: Currently in AArch32 mode the pointer operand comes first, whereas
6077 // in AArch64 it comes last. We may want to stick to one or another.
6078 if (Arch == llvm::Triple::aarch64 || Arch == llvm::Triple::aarch64_be ||
6079 Arch == llvm::Triple::aarch64_32) {
6080 llvm::Type *Tys[2] = { VTy, PTy };
6081 std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end());
6082 return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, "");
6084 llvm::Type *Tys[2] = { PTy, VTy };
6085 return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, "");
6087 case NEON::BI__builtin_neon_vsubhn_v: {
6088 llvm::VectorType *SrcTy =
6089 llvm::VectorType::getExtendedElementVectorType(VTy);
6091 // %sum = add <4 x i32> %lhs, %rhs
6092 Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy);
6093 Ops[1] = Builder.CreateBitCast(Ops[1], SrcTy);
6094 Ops[0] = Builder.CreateSub(Ops[0], Ops[1], "vsubhn");
6096 // %high = lshr <4 x i32> %sum, <i32 16, i32 16, i32 16, i32 16>
6097 Constant *ShiftAmt =
6098 ConstantInt::get(SrcTy, SrcTy->getScalarSizeInBits() / 2);
6099 Ops[0] = Builder.CreateLShr(Ops[0], ShiftAmt, "vsubhn");
6101 // %res = trunc <4 x i32> %high to <4 x i16>
6102 return Builder.CreateTrunc(Ops[0], VTy, "vsubhn");
6104 case NEON::BI__builtin_neon_vtrn_v:
6105 case NEON::BI__builtin_neon_vtrnq_v: {
6106 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
6107 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
6108 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
6109 Value *SV = nullptr;
6111 for (unsigned vi = 0; vi != 2; ++vi) {
6112 SmallVector<int, 16> Indices;
6113 for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) {
6114 Indices.push_back(i+vi);
6115 Indices.push_back(i+e+vi);
6117 Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi);
6118 SV = Builder.CreateShuffleVector(Ops[1], Ops[2], Indices, "vtrn");
6119 SV = Builder.CreateDefaultAlignedStore(SV, Addr);
6123 case NEON::BI__builtin_neon_vtst_v:
6124 case NEON::BI__builtin_neon_vtstq_v: {
6125 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
6126 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
6127 Ops[0] = Builder.CreateAnd(Ops[0], Ops[1]);
6128 Ops[0] = Builder.CreateICmp(ICmpInst::ICMP_NE, Ops[0],
6129 ConstantAggregateZero::get(Ty));
6130 return Builder.CreateSExt(Ops[0], Ty, "vtst");
6132 case NEON::BI__builtin_neon_vuzp_v:
6133 case NEON::BI__builtin_neon_vuzpq_v: {
6134 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
6135 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
6136 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
6137 Value *SV = nullptr;
6139 for (unsigned vi = 0; vi != 2; ++vi) {
6140 SmallVector<int, 16> Indices;
6141 for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i)
6142 Indices.push_back(2*i+vi);
6144 Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi);
6145 SV = Builder.CreateShuffleVector(Ops[1], Ops[2], Indices, "vuzp");
6146 SV = Builder.CreateDefaultAlignedStore(SV, Addr);
6150 case NEON::BI__builtin_neon_vzip_v:
6151 case NEON::BI__builtin_neon_vzipq_v: {
6152 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
6153 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
6154 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
6155 Value *SV = nullptr;
6157 for (unsigned vi = 0; vi != 2; ++vi) {
6158 SmallVector<int, 16> Indices;
6159 for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) {
6160 Indices.push_back((i + vi*e) >> 1);
6161 Indices.push_back(((i + vi*e) >> 1)+e);
6163 Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi);
6164 SV = Builder.CreateShuffleVector(Ops[1], Ops[2], Indices, "vzip");
6165 SV = Builder.CreateDefaultAlignedStore(SV, Addr);
6169 case NEON::BI__builtin_neon_vdot_v:
6170 case NEON::BI__builtin_neon_vdotq_v: {
6172 llvm::FixedVectorType::get(Int8Ty, Ty->getPrimitiveSizeInBits() / 8);
6173 llvm::Type *Tys[2] = { Ty, InputTy };
6174 Int = Usgn ? LLVMIntrinsic : AltLLVMIntrinsic;
6175 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vdot");
6177 case NEON::BI__builtin_neon_vfmlal_low_v:
6178 case NEON::BI__builtin_neon_vfmlalq_low_v: {
6180 llvm::FixedVectorType::get(HalfTy, Ty->getPrimitiveSizeInBits() / 16);
6181 llvm::Type *Tys[2] = { Ty, InputTy };
6182 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vfmlal_low");
6184 case NEON::BI__builtin_neon_vfmlsl_low_v:
6185 case NEON::BI__builtin_neon_vfmlslq_low_v: {
6187 llvm::FixedVectorType::get(HalfTy, Ty->getPrimitiveSizeInBits() / 16);
6188 llvm::Type *Tys[2] = { Ty, InputTy };
6189 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vfmlsl_low");
6191 case NEON::BI__builtin_neon_vfmlal_high_v:
6192 case NEON::BI__builtin_neon_vfmlalq_high_v: {
6194 llvm::FixedVectorType::get(HalfTy, Ty->getPrimitiveSizeInBits() / 16);
6195 llvm::Type *Tys[2] = { Ty, InputTy };
6196 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vfmlal_high");
6198 case NEON::BI__builtin_neon_vfmlsl_high_v:
6199 case NEON::BI__builtin_neon_vfmlslq_high_v: {
6201 llvm::FixedVectorType::get(HalfTy, Ty->getPrimitiveSizeInBits() / 16);
6202 llvm::Type *Tys[2] = { Ty, InputTy };
6203 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vfmlsl_high");
6205 case NEON::BI__builtin_neon_vmmlaq_v: {
6207 llvm::FixedVectorType::get(Int8Ty, Ty->getPrimitiveSizeInBits() / 8);
6208 llvm::Type *Tys[2] = { Ty, InputTy };
6209 Int = Usgn ? LLVMIntrinsic : AltLLVMIntrinsic;
6210 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmmla");
6212 case NEON::BI__builtin_neon_vusmmlaq_v: {
6214 llvm::FixedVectorType::get(Int8Ty, Ty->getPrimitiveSizeInBits() / 8);
6215 llvm::Type *Tys[2] = { Ty, InputTy };
6216 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vusmmla");
6218 case NEON::BI__builtin_neon_vusdot_v:
6219 case NEON::BI__builtin_neon_vusdotq_v: {
6221 llvm::FixedVectorType::get(Int8Ty, Ty->getPrimitiveSizeInBits() / 8);
6222 llvm::Type *Tys[2] = { Ty, InputTy };
6223 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vusdot");
6225 case NEON::BI__builtin_neon_vbfdot_v:
6226 case NEON::BI__builtin_neon_vbfdotq_v: {
6227 llvm::Type *InputTy =
6228 llvm::FixedVectorType::get(Int8Ty, Ty->getPrimitiveSizeInBits() / 8);
6229 llvm::Type *Tys[2] = { Ty, InputTy };
6230 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vbfdot");
6232 case NEON::BI__builtin_neon_vbfmmlaq_v: {
6233 llvm::Type *InputTy =
6234 llvm::FixedVectorType::get(Int8Ty, Ty->getPrimitiveSizeInBits() / 8);
6235 llvm::Type *Tys[2] = { Ty, InputTy };
6236 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vbfmmla");
6238 case NEON::BI__builtin_neon_vbfmlalbq_v: {
6239 llvm::Type *InputTy =
6240 llvm::FixedVectorType::get(Int8Ty, Ty->getPrimitiveSizeInBits() / 8);
6241 llvm::Type *Tys[2] = { Ty, InputTy };
6242 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vbfmlalb");
6244 case NEON::BI__builtin_neon_vbfmlaltq_v: {
6245 llvm::Type *InputTy =
6246 llvm::FixedVectorType::get(Int8Ty, Ty->getPrimitiveSizeInBits() / 8);
6247 llvm::Type *Tys[2] = { Ty, InputTy };
6248 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vbfmlalt");
6250 case NEON::BI__builtin_neon___a32_vcvt_bf16_v: {
6251 llvm::Type *Tys[1] = { Ty };
6252 Function *F = CGM.getIntrinsic(Int, Tys);
6253 return EmitNeonCall(F, Ops, "vcvtfp2bf");
6258 assert(Int && "Expected valid intrinsic number");
6260 // Determine the type(s) of this overloaded AArch64 intrinsic.
6261 Function *F = LookupNeonLLVMIntrinsic(Int, Modifier, Ty, E);
6263 Value *Result = EmitNeonCall(F, Ops, NameHint);
6264 llvm::Type *ResultType = ConvertType(E->getType());
6265 // AArch64 intrinsic one-element vector type cast to
6266 // scalar type expected by the builtin
6267 return Builder.CreateBitCast(Result, ResultType, NameHint);
6270 Value *CodeGenFunction::EmitAArch64CompareBuiltinExpr(
6271 Value *Op, llvm::Type *Ty, const CmpInst::Predicate Fp,
6272 const CmpInst::Predicate Ip, const Twine &Name) {
6273 llvm::Type *OTy = Op->getType();
6275 // FIXME: this is utterly horrific. We should not be looking at previous
6276 // codegen context to find out what needs doing. Unfortunately TableGen
6277 // currently gives us exactly the same calls for vceqz_f32 and vceqz_s32
6279 if (BitCastInst *BI = dyn_cast<BitCastInst>(Op))
6280 OTy = BI->getOperand(0)->getType();
6282 Op = Builder.CreateBitCast(Op, OTy);
6283 if (OTy->getScalarType()->isFloatingPointTy()) {
6284 Op = Builder.CreateFCmp(Fp, Op, Constant::getNullValue(OTy));
6286 Op = Builder.CreateICmp(Ip, Op, Constant::getNullValue(OTy));
6288 return Builder.CreateSExt(Op, Ty, Name);
6291 static Value *packTBLDVectorList(CodeGenFunction &CGF, ArrayRef<Value *> Ops,
6292 Value *ExtOp, Value *IndexOp,
6293 llvm::Type *ResTy, unsigned IntID,
6295 SmallVector<Value *, 2> TblOps;
6297 TblOps.push_back(ExtOp);
6299 // Build a vector containing sequential number like (0, 1, 2, ..., 15)
6300 SmallVector<int, 16> Indices;
6301 llvm::VectorType *TblTy = cast<llvm::VectorType>(Ops[0]->getType());
6302 for (unsigned i = 0, e = TblTy->getNumElements(); i != e; ++i) {
6303 Indices.push_back(2*i);
6304 Indices.push_back(2*i+1);
6307 int PairPos = 0, End = Ops.size() - 1;
6308 while (PairPos < End) {
6309 TblOps.push_back(CGF.Builder.CreateShuffleVector(Ops[PairPos],
6310 Ops[PairPos+1], Indices,
6315 // If there's an odd number of 64-bit lookup table, fill the high 64-bit
6316 // of the 128-bit lookup table with zero.
6317 if (PairPos == End) {
6318 Value *ZeroTbl = ConstantAggregateZero::get(TblTy);
6319 TblOps.push_back(CGF.Builder.CreateShuffleVector(Ops[PairPos],
6320 ZeroTbl, Indices, Name));
6324 TblOps.push_back(IndexOp);
6325 TblF = CGF.CGM.getIntrinsic(IntID, ResTy);
6327 return CGF.EmitNeonCall(TblF, TblOps, Name);
6330 Value *CodeGenFunction::GetValueForARMHint(unsigned BuiltinID) {
6332 switch (BuiltinID) {
6335 case ARM::BI__builtin_arm_nop:
6338 case ARM::BI__builtin_arm_yield:
6339 case ARM::BI__yield:
6342 case ARM::BI__builtin_arm_wfe:
6346 case ARM::BI__builtin_arm_wfi:
6350 case ARM::BI__builtin_arm_sev:
6354 case ARM::BI__builtin_arm_sevl:
6360 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_hint),
6361 llvm::ConstantInt::get(Int32Ty, Value));
6364 enum SpecialRegisterAccessKind {
6370 // Generates the IR for the read/write special register builtin,
6371 // ValueType is the type of the value that is to be written or read,
6372 // RegisterType is the type of the register being written to or read from.
6373 static Value *EmitSpecialRegisterBuiltin(CodeGenFunction &CGF,
6375 llvm::Type *RegisterType,
6376 llvm::Type *ValueType,
6377 SpecialRegisterAccessKind AccessKind,
6378 StringRef SysReg = "") {
6379 // write and register intrinsics only support 32 and 64 bit operations.
6380 assert((RegisterType->isIntegerTy(32) || RegisterType->isIntegerTy(64))
6381 && "Unsupported size for register.");
6383 CodeGen::CGBuilderTy &Builder = CGF.Builder;
6384 CodeGen::CodeGenModule &CGM = CGF.CGM;
6385 LLVMContext &Context = CGM.getLLVMContext();
6387 if (SysReg.empty()) {
6388 const Expr *SysRegStrExpr = E->getArg(0)->IgnoreParenCasts();
6389 SysReg = cast<clang::StringLiteral>(SysRegStrExpr)->getString();
6392 llvm::Metadata *Ops[] = { llvm::MDString::get(Context, SysReg) };
6393 llvm::MDNode *RegName = llvm::MDNode::get(Context, Ops);
6394 llvm::Value *Metadata = llvm::MetadataAsValue::get(Context, RegName);
6396 llvm::Type *Types[] = { RegisterType };
6398 bool MixedTypes = RegisterType->isIntegerTy(64) && ValueType->isIntegerTy(32);
6399 assert(!(RegisterType->isIntegerTy(32) && ValueType->isIntegerTy(64))
6400 && "Can't fit 64-bit value in 32-bit register");
6402 if (AccessKind != Write) {
6403 assert(AccessKind == NormalRead || AccessKind == VolatileRead);
6404 llvm::Function *F = CGM.getIntrinsic(
6405 AccessKind == VolatileRead ? llvm::Intrinsic::read_volatile_register
6406 : llvm::Intrinsic::read_register,
6408 llvm::Value *Call = Builder.CreateCall(F, Metadata);
6411 // Read into 64 bit register and then truncate result to 32 bit.
6412 return Builder.CreateTrunc(Call, ValueType);
6414 if (ValueType->isPointerTy())
6415 // Have i32/i64 result (Call) but want to return a VoidPtrTy (i8*).
6416 return Builder.CreateIntToPtr(Call, ValueType);
6421 llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::write_register, Types);
6422 llvm::Value *ArgValue = CGF.EmitScalarExpr(E->getArg(1));
6424 // Extend 32 bit write value to 64 bit to pass to write.
6425 ArgValue = Builder.CreateZExt(ArgValue, RegisterType);
6426 return Builder.CreateCall(F, { Metadata, ArgValue });
6429 if (ValueType->isPointerTy()) {
6430 // Have VoidPtrTy ArgValue but want to return an i32/i64.
6431 ArgValue = Builder.CreatePtrToInt(ArgValue, RegisterType);
6432 return Builder.CreateCall(F, { Metadata, ArgValue });
6435 return Builder.CreateCall(F, { Metadata, ArgValue });
6438 /// Return true if BuiltinID is an overloaded Neon intrinsic with an extra
6439 /// argument that specifies the vector type.
6440 static bool HasExtraNeonArgument(unsigned BuiltinID) {
6441 switch (BuiltinID) {
6443 case NEON::BI__builtin_neon_vget_lane_i8:
6444 case NEON::BI__builtin_neon_vget_lane_i16:
6445 case NEON::BI__builtin_neon_vget_lane_bf16:
6446 case NEON::BI__builtin_neon_vget_lane_i32:
6447 case NEON::BI__builtin_neon_vget_lane_i64:
6448 case NEON::BI__builtin_neon_vget_lane_f32:
6449 case NEON::BI__builtin_neon_vgetq_lane_i8:
6450 case NEON::BI__builtin_neon_vgetq_lane_i16:
6451 case NEON::BI__builtin_neon_vgetq_lane_bf16:
6452 case NEON::BI__builtin_neon_vgetq_lane_i32:
6453 case NEON::BI__builtin_neon_vgetq_lane_i64:
6454 case NEON::BI__builtin_neon_vgetq_lane_f32:
6455 case NEON::BI__builtin_neon_vduph_lane_bf16:
6456 case NEON::BI__builtin_neon_vduph_laneq_bf16:
6457 case NEON::BI__builtin_neon_vset_lane_i8:
6458 case NEON::BI__builtin_neon_vset_lane_i16:
6459 case NEON::BI__builtin_neon_vset_lane_bf16:
6460 case NEON::BI__builtin_neon_vset_lane_i32:
6461 case NEON::BI__builtin_neon_vset_lane_i64:
6462 case NEON::BI__builtin_neon_vset_lane_f32:
6463 case NEON::BI__builtin_neon_vsetq_lane_i8:
6464 case NEON::BI__builtin_neon_vsetq_lane_i16:
6465 case NEON::BI__builtin_neon_vsetq_lane_bf16:
6466 case NEON::BI__builtin_neon_vsetq_lane_i32:
6467 case NEON::BI__builtin_neon_vsetq_lane_i64:
6468 case NEON::BI__builtin_neon_vsetq_lane_f32:
6469 case NEON::BI__builtin_neon_vsha1h_u32:
6470 case NEON::BI__builtin_neon_vsha1cq_u32:
6471 case NEON::BI__builtin_neon_vsha1pq_u32:
6472 case NEON::BI__builtin_neon_vsha1mq_u32:
6473 case NEON::BI__builtin_neon_vcvth_bf16_f32:
6474 case clang::ARM::BI_MoveToCoprocessor:
6475 case clang::ARM::BI_MoveToCoprocessor2:
6481 Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
6483 ReturnValueSlot ReturnValue,
6484 llvm::Triple::ArchType Arch) {
6485 if (auto Hint = GetValueForARMHint(BuiltinID))
6488 if (BuiltinID == ARM::BI__emit) {
6489 bool IsThumb = getTarget().getTriple().getArch() == llvm::Triple::thumb;
6490 llvm::FunctionType *FTy =
6491 llvm::FunctionType::get(VoidTy, /*Variadic=*/false);
6493 Expr::EvalResult Result;
6494 if (!E->getArg(0)->EvaluateAsInt(Result, CGM.getContext()))
6495 llvm_unreachable("Sema will ensure that the parameter is constant");
6497 llvm::APSInt Value = Result.Val.getInt();
6498 uint64_t ZExtValue = Value.zextOrTrunc(IsThumb ? 16 : 32).getZExtValue();
6500 llvm::InlineAsm *Emit =
6501 IsThumb ? InlineAsm::get(FTy, ".inst.n 0x" + utohexstr(ZExtValue), "",
6502 /*hasSideEffects=*/true)
6503 : InlineAsm::get(FTy, ".inst 0x" + utohexstr(ZExtValue), "",
6504 /*hasSideEffects=*/true);
6506 return Builder.CreateCall(Emit);
6509 if (BuiltinID == ARM::BI__builtin_arm_dbg) {
6510 Value *Option = EmitScalarExpr(E->getArg(0));
6511 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_dbg), Option);
6514 if (BuiltinID == ARM::BI__builtin_arm_prefetch) {
6515 Value *Address = EmitScalarExpr(E->getArg(0));
6516 Value *RW = EmitScalarExpr(E->getArg(1));
6517 Value *IsData = EmitScalarExpr(E->getArg(2));
6519 // Locality is not supported on ARM target
6520 Value *Locality = llvm::ConstantInt::get(Int32Ty, 3);
6522 Function *F = CGM.getIntrinsic(Intrinsic::prefetch, Address->getType());
6523 return Builder.CreateCall(F, {Address, RW, Locality, IsData});
6526 if (BuiltinID == ARM::BI__builtin_arm_rbit) {
6527 llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
6528 return Builder.CreateCall(
6529 CGM.getIntrinsic(Intrinsic::bitreverse, Arg->getType()), Arg, "rbit");
6532 if (BuiltinID == ARM::BI__builtin_arm_cls) {
6533 llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
6534 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_cls), Arg, "cls");
6536 if (BuiltinID == ARM::BI__builtin_arm_cls64) {
6537 llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
6538 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_cls64), Arg,
6542 if (BuiltinID == ARM::BI__clear_cache) {
6543 assert(E->getNumArgs() == 2 && "__clear_cache takes 2 arguments");
6544 const FunctionDecl *FD = E->getDirectCallee();
6546 for (unsigned i = 0; i < 2; i++)
6547 Ops[i] = EmitScalarExpr(E->getArg(i));
6548 llvm::Type *Ty = CGM.getTypes().ConvertType(FD->getType());
6549 llvm::FunctionType *FTy = cast<llvm::FunctionType>(Ty);
6550 StringRef Name = FD->getName();
6551 return EmitNounwindRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), Ops);
6554 if (BuiltinID == ARM::BI__builtin_arm_mcrr ||
6555 BuiltinID == ARM::BI__builtin_arm_mcrr2) {
6558 switch (BuiltinID) {
6559 default: llvm_unreachable("unexpected builtin");
6560 case ARM::BI__builtin_arm_mcrr:
6561 F = CGM.getIntrinsic(Intrinsic::arm_mcrr);
6563 case ARM::BI__builtin_arm_mcrr2:
6564 F = CGM.getIntrinsic(Intrinsic::arm_mcrr2);
6568 // MCRR{2} instruction has 5 operands but
6569 // the intrinsic has 4 because Rt and Rt2
6570 // are represented as a single unsigned 64
6571 // bit integer in the intrinsic definition
6572 // but internally it's represented as 2 32
6575 Value *Coproc = EmitScalarExpr(E->getArg(0));
6576 Value *Opc1 = EmitScalarExpr(E->getArg(1));
6577 Value *RtAndRt2 = EmitScalarExpr(E->getArg(2));
6578 Value *CRm = EmitScalarExpr(E->getArg(3));
6580 Value *C1 = llvm::ConstantInt::get(Int64Ty, 32);
6581 Value *Rt = Builder.CreateTruncOrBitCast(RtAndRt2, Int32Ty);
6582 Value *Rt2 = Builder.CreateLShr(RtAndRt2, C1);
6583 Rt2 = Builder.CreateTruncOrBitCast(Rt2, Int32Ty);
6585 return Builder.CreateCall(F, {Coproc, Opc1, Rt, Rt2, CRm});
6588 if (BuiltinID == ARM::BI__builtin_arm_mrrc ||
6589 BuiltinID == ARM::BI__builtin_arm_mrrc2) {
6592 switch (BuiltinID) {
6593 default: llvm_unreachable("unexpected builtin");
6594 case ARM::BI__builtin_arm_mrrc:
6595 F = CGM.getIntrinsic(Intrinsic::arm_mrrc);
6597 case ARM::BI__builtin_arm_mrrc2:
6598 F = CGM.getIntrinsic(Intrinsic::arm_mrrc2);
6602 Value *Coproc = EmitScalarExpr(E->getArg(0));
6603 Value *Opc1 = EmitScalarExpr(E->getArg(1));
6604 Value *CRm = EmitScalarExpr(E->getArg(2));
6605 Value *RtAndRt2 = Builder.CreateCall(F, {Coproc, Opc1, CRm});
6607 // Returns an unsigned 64 bit integer, represented
6608 // as two 32 bit integers.
6610 Value *Rt = Builder.CreateExtractValue(RtAndRt2, 1);
6611 Value *Rt1 = Builder.CreateExtractValue(RtAndRt2, 0);
6612 Rt = Builder.CreateZExt(Rt, Int64Ty);
6613 Rt1 = Builder.CreateZExt(Rt1, Int64Ty);
6615 Value *ShiftCast = llvm::ConstantInt::get(Int64Ty, 32);
6616 RtAndRt2 = Builder.CreateShl(Rt, ShiftCast, "shl", true);
6617 RtAndRt2 = Builder.CreateOr(RtAndRt2, Rt1);
6619 return Builder.CreateBitCast(RtAndRt2, ConvertType(E->getType()));
6622 if (BuiltinID == ARM::BI__builtin_arm_ldrexd ||
6623 ((BuiltinID == ARM::BI__builtin_arm_ldrex ||
6624 BuiltinID == ARM::BI__builtin_arm_ldaex) &&
6625 getContext().getTypeSize(E->getType()) == 64) ||
6626 BuiltinID == ARM::BI__ldrexd) {
6629 switch (BuiltinID) {
6630 default: llvm_unreachable("unexpected builtin");
6631 case ARM::BI__builtin_arm_ldaex:
6632 F = CGM.getIntrinsic(Intrinsic::arm_ldaexd);
6634 case ARM::BI__builtin_arm_ldrexd:
6635 case ARM::BI__builtin_arm_ldrex:
6636 case ARM::BI__ldrexd:
6637 F = CGM.getIntrinsic(Intrinsic::arm_ldrexd);
6641 Value *LdPtr = EmitScalarExpr(E->getArg(0));
6642 Value *Val = Builder.CreateCall(F, Builder.CreateBitCast(LdPtr, Int8PtrTy),
6645 Value *Val0 = Builder.CreateExtractValue(Val, 1);
6646 Value *Val1 = Builder.CreateExtractValue(Val, 0);
6647 Val0 = Builder.CreateZExt(Val0, Int64Ty);
6648 Val1 = Builder.CreateZExt(Val1, Int64Ty);
6650 Value *ShiftCst = llvm::ConstantInt::get(Int64Ty, 32);
6651 Val = Builder.CreateShl(Val0, ShiftCst, "shl", true /* nuw */);
6652 Val = Builder.CreateOr(Val, Val1);
6653 return Builder.CreateBitCast(Val, ConvertType(E->getType()));
6656 if (BuiltinID == ARM::BI__builtin_arm_ldrex ||
6657 BuiltinID == ARM::BI__builtin_arm_ldaex) {
6658 Value *LoadAddr = EmitScalarExpr(E->getArg(0));
6660 QualType Ty = E->getType();
6661 llvm::Type *RealResTy = ConvertType(Ty);
6662 llvm::Type *PtrTy = llvm::IntegerType::get(
6663 getLLVMContext(), getContext().getTypeSize(Ty))->getPointerTo();
6664 LoadAddr = Builder.CreateBitCast(LoadAddr, PtrTy);
6666 Function *F = CGM.getIntrinsic(BuiltinID == ARM::BI__builtin_arm_ldaex
6667 ? Intrinsic::arm_ldaex
6668 : Intrinsic::arm_ldrex,
6670 Value *Val = Builder.CreateCall(F, LoadAddr, "ldrex");
6672 if (RealResTy->isPointerTy())
6673 return Builder.CreateIntToPtr(Val, RealResTy);
6675 llvm::Type *IntResTy = llvm::IntegerType::get(
6676 getLLVMContext(), CGM.getDataLayout().getTypeSizeInBits(RealResTy));
6677 Val = Builder.CreateTruncOrBitCast(Val, IntResTy);
6678 return Builder.CreateBitCast(Val, RealResTy);
6682 if (BuiltinID == ARM::BI__builtin_arm_strexd ||
6683 ((BuiltinID == ARM::BI__builtin_arm_stlex ||
6684 BuiltinID == ARM::BI__builtin_arm_strex) &&
6685 getContext().getTypeSize(E->getArg(0)->getType()) == 64)) {
6686 Function *F = CGM.getIntrinsic(BuiltinID == ARM::BI__builtin_arm_stlex
6687 ? Intrinsic::arm_stlexd
6688 : Intrinsic::arm_strexd);
6689 llvm::Type *STy = llvm::StructType::get(Int32Ty, Int32Ty);
6691 Address Tmp = CreateMemTemp(E->getArg(0)->getType());
6692 Value *Val = EmitScalarExpr(E->getArg(0));
6693 Builder.CreateStore(Val, Tmp);
6695 Address LdPtr = Builder.CreateBitCast(Tmp,llvm::PointerType::getUnqual(STy));
6696 Val = Builder.CreateLoad(LdPtr);
6698 Value *Arg0 = Builder.CreateExtractValue(Val, 0);
6699 Value *Arg1 = Builder.CreateExtractValue(Val, 1);
6700 Value *StPtr = Builder.CreateBitCast(EmitScalarExpr(E->getArg(1)), Int8PtrTy);
6701 return Builder.CreateCall(F, {Arg0, Arg1, StPtr}, "strexd");
6704 if (BuiltinID == ARM::BI__builtin_arm_strex ||
6705 BuiltinID == ARM::BI__builtin_arm_stlex) {
6706 Value *StoreVal = EmitScalarExpr(E->getArg(0));
6707 Value *StoreAddr = EmitScalarExpr(E->getArg(1));
6709 QualType Ty = E->getArg(0)->getType();
6710 llvm::Type *StoreTy = llvm::IntegerType::get(getLLVMContext(),
6711 getContext().getTypeSize(Ty));
6712 StoreAddr = Builder.CreateBitCast(StoreAddr, StoreTy->getPointerTo());
6714 if (StoreVal->getType()->isPointerTy())
6715 StoreVal = Builder.CreatePtrToInt(StoreVal, Int32Ty);
6717 llvm::Type *IntTy = llvm::IntegerType::get(
6719 CGM.getDataLayout().getTypeSizeInBits(StoreVal->getType()));
6720 StoreVal = Builder.CreateBitCast(StoreVal, IntTy);
6721 StoreVal = Builder.CreateZExtOrBitCast(StoreVal, Int32Ty);
6724 Function *F = CGM.getIntrinsic(BuiltinID == ARM::BI__builtin_arm_stlex
6725 ? Intrinsic::arm_stlex
6726 : Intrinsic::arm_strex,
6727 StoreAddr->getType());
6728 return Builder.CreateCall(F, {StoreVal, StoreAddr}, "strex");
6731 if (BuiltinID == ARM::BI__builtin_arm_clrex) {
6732 Function *F = CGM.getIntrinsic(Intrinsic::arm_clrex);
6733 return Builder.CreateCall(F);
6737 Intrinsic::ID CRCIntrinsicID = Intrinsic::not_intrinsic;
6738 switch (BuiltinID) {
6739 case ARM::BI__builtin_arm_crc32b:
6740 CRCIntrinsicID = Intrinsic::arm_crc32b; break;
6741 case ARM::BI__builtin_arm_crc32cb:
6742 CRCIntrinsicID = Intrinsic::arm_crc32cb; break;
6743 case ARM::BI__builtin_arm_crc32h:
6744 CRCIntrinsicID = Intrinsic::arm_crc32h; break;
6745 case ARM::BI__builtin_arm_crc32ch:
6746 CRCIntrinsicID = Intrinsic::arm_crc32ch; break;
6747 case ARM::BI__builtin_arm_crc32w:
6748 case ARM::BI__builtin_arm_crc32d:
6749 CRCIntrinsicID = Intrinsic::arm_crc32w; break;
6750 case ARM::BI__builtin_arm_crc32cw:
6751 case ARM::BI__builtin_arm_crc32cd:
6752 CRCIntrinsicID = Intrinsic::arm_crc32cw; break;
6755 if (CRCIntrinsicID != Intrinsic::not_intrinsic) {
6756 Value *Arg0 = EmitScalarExpr(E->getArg(0));
6757 Value *Arg1 = EmitScalarExpr(E->getArg(1));
6759 // crc32{c,}d intrinsics are implemnted as two calls to crc32{c,}w
6760 // intrinsics, hence we need different codegen for these cases.
6761 if (BuiltinID == ARM::BI__builtin_arm_crc32d ||
6762 BuiltinID == ARM::BI__builtin_arm_crc32cd) {
6763 Value *C1 = llvm::ConstantInt::get(Int64Ty, 32);
6764 Value *Arg1a = Builder.CreateTruncOrBitCast(Arg1, Int32Ty);
6765 Value *Arg1b = Builder.CreateLShr(Arg1, C1);
6766 Arg1b = Builder.CreateTruncOrBitCast(Arg1b, Int32Ty);
6768 Function *F = CGM.getIntrinsic(CRCIntrinsicID);
6769 Value *Res = Builder.CreateCall(F, {Arg0, Arg1a});
6770 return Builder.CreateCall(F, {Res, Arg1b});
6772 Arg1 = Builder.CreateZExtOrBitCast(Arg1, Int32Ty);
6774 Function *F = CGM.getIntrinsic(CRCIntrinsicID);
6775 return Builder.CreateCall(F, {Arg0, Arg1});
6779 if (BuiltinID == ARM::BI__builtin_arm_rsr ||
6780 BuiltinID == ARM::BI__builtin_arm_rsr64 ||
6781 BuiltinID == ARM::BI__builtin_arm_rsrp ||
6782 BuiltinID == ARM::BI__builtin_arm_wsr ||
6783 BuiltinID == ARM::BI__builtin_arm_wsr64 ||
6784 BuiltinID == ARM::BI__builtin_arm_wsrp) {
6786 SpecialRegisterAccessKind AccessKind = Write;
6787 if (BuiltinID == ARM::BI__builtin_arm_rsr ||
6788 BuiltinID == ARM::BI__builtin_arm_rsr64 ||
6789 BuiltinID == ARM::BI__builtin_arm_rsrp)
6790 AccessKind = VolatileRead;
6792 bool IsPointerBuiltin = BuiltinID == ARM::BI__builtin_arm_rsrp ||
6793 BuiltinID == ARM::BI__builtin_arm_wsrp;
6795 bool Is64Bit = BuiltinID == ARM::BI__builtin_arm_rsr64 ||
6796 BuiltinID == ARM::BI__builtin_arm_wsr64;
6798 llvm::Type *ValueType;
6799 llvm::Type *RegisterType;
6800 if (IsPointerBuiltin) {
6801 ValueType = VoidPtrTy;
6802 RegisterType = Int32Ty;
6803 } else if (Is64Bit) {
6804 ValueType = RegisterType = Int64Ty;
6806 ValueType = RegisterType = Int32Ty;
6809 return EmitSpecialRegisterBuiltin(*this, E, RegisterType, ValueType,
6813 // Deal with MVE builtins
6814 if (Value *Result = EmitARMMVEBuiltinExpr(BuiltinID, E, ReturnValue, Arch))
6816 // Handle CDE builtins
6817 if (Value *Result = EmitARMCDEBuiltinExpr(BuiltinID, E, ReturnValue, Arch))
6820 // Find out if any arguments are required to be integer constant
6822 unsigned ICEArguments = 0;
6823 ASTContext::GetBuiltinTypeError Error;
6824 getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments);
6825 assert(Error == ASTContext::GE_None && "Should not codegen an error");
6827 auto getAlignmentValue32 = [&](Address addr) -> Value* {
6828 return Builder.getInt32(addr.getAlignment().getQuantity());
6831 Address PtrOp0 = Address::invalid();
6832 Address PtrOp1 = Address::invalid();
6833 SmallVector<Value*, 4> Ops;
6834 bool HasExtraArg = HasExtraNeonArgument(BuiltinID);
6835 unsigned NumArgs = E->getNumArgs() - (HasExtraArg ? 1 : 0);
6836 for (unsigned i = 0, e = NumArgs; i != e; i++) {
6838 switch (BuiltinID) {
6839 case NEON::BI__builtin_neon_vld1_v:
6840 case NEON::BI__builtin_neon_vld1q_v:
6841 case NEON::BI__builtin_neon_vld1q_lane_v:
6842 case NEON::BI__builtin_neon_vld1_lane_v:
6843 case NEON::BI__builtin_neon_vld1_dup_v:
6844 case NEON::BI__builtin_neon_vld1q_dup_v:
6845 case NEON::BI__builtin_neon_vst1_v:
6846 case NEON::BI__builtin_neon_vst1q_v:
6847 case NEON::BI__builtin_neon_vst1q_lane_v:
6848 case NEON::BI__builtin_neon_vst1_lane_v:
6849 case NEON::BI__builtin_neon_vst2_v:
6850 case NEON::BI__builtin_neon_vst2q_v:
6851 case NEON::BI__builtin_neon_vst2_lane_v:
6852 case NEON::BI__builtin_neon_vst2q_lane_v:
6853 case NEON::BI__builtin_neon_vst3_v:
6854 case NEON::BI__builtin_neon_vst3q_v:
6855 case NEON::BI__builtin_neon_vst3_lane_v:
6856 case NEON::BI__builtin_neon_vst3q_lane_v:
6857 case NEON::BI__builtin_neon_vst4_v:
6858 case NEON::BI__builtin_neon_vst4q_v:
6859 case NEON::BI__builtin_neon_vst4_lane_v:
6860 case NEON::BI__builtin_neon_vst4q_lane_v:
6861 // Get the alignment for the argument in addition to the value;
6862 // we'll use it later.
6863 PtrOp0 = EmitPointerWithAlignment(E->getArg(0));
6864 Ops.push_back(PtrOp0.getPointer());
6869 switch (BuiltinID) {
6870 case NEON::BI__builtin_neon_vld2_v:
6871 case NEON::BI__builtin_neon_vld2q_v:
6872 case NEON::BI__builtin_neon_vld3_v:
6873 case NEON::BI__builtin_neon_vld3q_v:
6874 case NEON::BI__builtin_neon_vld4_v:
6875 case NEON::BI__builtin_neon_vld4q_v:
6876 case NEON::BI__builtin_neon_vld2_lane_v:
6877 case NEON::BI__builtin_neon_vld2q_lane_v:
6878 case NEON::BI__builtin_neon_vld3_lane_v:
6879 case NEON::BI__builtin_neon_vld3q_lane_v:
6880 case NEON::BI__builtin_neon_vld4_lane_v:
6881 case NEON::BI__builtin_neon_vld4q_lane_v:
6882 case NEON::BI__builtin_neon_vld2_dup_v:
6883 case NEON::BI__builtin_neon_vld2q_dup_v:
6884 case NEON::BI__builtin_neon_vld3_dup_v:
6885 case NEON::BI__builtin_neon_vld3q_dup_v:
6886 case NEON::BI__builtin_neon_vld4_dup_v:
6887 case NEON::BI__builtin_neon_vld4q_dup_v:
6888 // Get the alignment for the argument in addition to the value;
6889 // we'll use it later.
6890 PtrOp1 = EmitPointerWithAlignment(E->getArg(1));
6891 Ops.push_back(PtrOp1.getPointer());
6896 if ((ICEArguments & (1 << i)) == 0) {
6897 Ops.push_back(EmitScalarExpr(E->getArg(i)));
6899 // If this is required to be a constant, constant fold it so that we know
6900 // that the generated intrinsic gets a ConstantInt.
6901 llvm::APSInt Result;
6902 bool IsConst = E->getArg(i)->isIntegerConstantExpr(Result, getContext());
6903 assert(IsConst && "Constant arg isn't actually constant?"); (void)IsConst;
6904 Ops.push_back(llvm::ConstantInt::get(getLLVMContext(), Result));
6908 switch (BuiltinID) {
6911 case NEON::BI__builtin_neon_vget_lane_i8:
6912 case NEON::BI__builtin_neon_vget_lane_i16:
6913 case NEON::BI__builtin_neon_vget_lane_i32:
6914 case NEON::BI__builtin_neon_vget_lane_i64:
6915 case NEON::BI__builtin_neon_vget_lane_bf16:
6916 case NEON::BI__builtin_neon_vget_lane_f32:
6917 case NEON::BI__builtin_neon_vgetq_lane_i8:
6918 case NEON::BI__builtin_neon_vgetq_lane_i16:
6919 case NEON::BI__builtin_neon_vgetq_lane_i32:
6920 case NEON::BI__builtin_neon_vgetq_lane_i64:
6921 case NEON::BI__builtin_neon_vgetq_lane_bf16:
6922 case NEON::BI__builtin_neon_vgetq_lane_f32:
6923 case NEON::BI__builtin_neon_vduph_lane_bf16:
6924 case NEON::BI__builtin_neon_vduph_laneq_bf16:
6925 return Builder.CreateExtractElement(Ops[0], Ops[1], "vget_lane");
6927 case NEON::BI__builtin_neon_vrndns_f32: {
6928 Value *Arg = EmitScalarExpr(E->getArg(0));
6929 llvm::Type *Tys[] = {Arg->getType()};
6930 Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vrintn, Tys);
6931 return Builder.CreateCall(F, {Arg}, "vrndn"); }
6933 case NEON::BI__builtin_neon_vset_lane_i8:
6934 case NEON::BI__builtin_neon_vset_lane_i16:
6935 case NEON::BI__builtin_neon_vset_lane_i32:
6936 case NEON::BI__builtin_neon_vset_lane_i64:
6937 case NEON::BI__builtin_neon_vset_lane_bf16:
6938 case NEON::BI__builtin_neon_vset_lane_f32:
6939 case NEON::BI__builtin_neon_vsetq_lane_i8:
6940 case NEON::BI__builtin_neon_vsetq_lane_i16:
6941 case NEON::BI__builtin_neon_vsetq_lane_i32:
6942 case NEON::BI__builtin_neon_vsetq_lane_i64:
6943 case NEON::BI__builtin_neon_vsetq_lane_bf16:
6944 case NEON::BI__builtin_neon_vsetq_lane_f32:
6945 return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vset_lane");
6947 case NEON::BI__builtin_neon_vsha1h_u32:
6948 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha1h), Ops,
6950 case NEON::BI__builtin_neon_vsha1cq_u32:
6951 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha1c), Ops,
6953 case NEON::BI__builtin_neon_vsha1pq_u32:
6954 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha1p), Ops,
6956 case NEON::BI__builtin_neon_vsha1mq_u32:
6957 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha1m), Ops,
6960 case NEON::BI__builtin_neon_vcvth_bf16_f32: {
6961 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vcvtbfp2bf), Ops,
6965 // The ARM _MoveToCoprocessor builtins put the input register value as
6966 // the first argument, but the LLVM intrinsic expects it as the third one.
6967 case ARM::BI_MoveToCoprocessor:
6968 case ARM::BI_MoveToCoprocessor2: {
6969 Function *F = CGM.getIntrinsic(BuiltinID == ARM::BI_MoveToCoprocessor ?
6970 Intrinsic::arm_mcr : Intrinsic::arm_mcr2);
6971 return Builder.CreateCall(F, {Ops[1], Ops[2], Ops[0],
6972 Ops[3], Ops[4], Ops[5]});
6974 case ARM::BI_BitScanForward:
6975 case ARM::BI_BitScanForward64:
6976 return EmitMSVCBuiltinExpr(MSVCIntrin::_BitScanForward, E);
6977 case ARM::BI_BitScanReverse:
6978 case ARM::BI_BitScanReverse64:
6979 return EmitMSVCBuiltinExpr(MSVCIntrin::_BitScanReverse, E);
6981 case ARM::BI_InterlockedAnd64:
6982 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedAnd, E);
6983 case ARM::BI_InterlockedExchange64:
6984 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange, E);
6985 case ARM::BI_InterlockedExchangeAdd64:
6986 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeAdd, E);
6987 case ARM::BI_InterlockedExchangeSub64:
6988 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeSub, E);
6989 case ARM::BI_InterlockedOr64:
6990 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr, E);
6991 case ARM::BI_InterlockedXor64:
6992 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor, E);
6993 case ARM::BI_InterlockedDecrement64:
6994 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedDecrement, E);
6995 case ARM::BI_InterlockedIncrement64:
6996 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement, E);
6997 case ARM::BI_InterlockedExchangeAdd8_acq:
6998 case ARM::BI_InterlockedExchangeAdd16_acq:
6999 case ARM::BI_InterlockedExchangeAdd_acq:
7000 case ARM::BI_InterlockedExchangeAdd64_acq:
7001 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeAdd_acq, E);
7002 case ARM::BI_InterlockedExchangeAdd8_rel:
7003 case ARM::BI_InterlockedExchangeAdd16_rel:
7004 case ARM::BI_InterlockedExchangeAdd_rel:
7005 case ARM::BI_InterlockedExchangeAdd64_rel:
7006 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeAdd_rel, E);
7007 case ARM::BI_InterlockedExchangeAdd8_nf:
7008 case ARM::BI_InterlockedExchangeAdd16_nf:
7009 case ARM::BI_InterlockedExchangeAdd_nf:
7010 case ARM::BI_InterlockedExchangeAdd64_nf:
7011 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeAdd_nf, E);
7012 case ARM::BI_InterlockedExchange8_acq:
7013 case ARM::BI_InterlockedExchange16_acq:
7014 case ARM::BI_InterlockedExchange_acq:
7015 case ARM::BI_InterlockedExchange64_acq:
7016 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange_acq, E);
7017 case ARM::BI_InterlockedExchange8_rel:
7018 case ARM::BI_InterlockedExchange16_rel:
7019 case ARM::BI_InterlockedExchange_rel:
7020 case ARM::BI_InterlockedExchange64_rel:
7021 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange_rel, E);
7022 case ARM::BI_InterlockedExchange8_nf:
7023 case ARM::BI_InterlockedExchange16_nf:
7024 case ARM::BI_InterlockedExchange_nf:
7025 case ARM::BI_InterlockedExchange64_nf:
7026 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange_nf, E);
7027 case ARM::BI_InterlockedCompareExchange8_acq:
7028 case ARM::BI_InterlockedCompareExchange16_acq:
7029 case ARM::BI_InterlockedCompareExchange_acq:
7030 case ARM::BI_InterlockedCompareExchange64_acq:
7031 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedCompareExchange_acq, E);
7032 case ARM::BI_InterlockedCompareExchange8_rel:
7033 case ARM::BI_InterlockedCompareExchange16_rel:
7034 case ARM::BI_InterlockedCompareExchange_rel:
7035 case ARM::BI_InterlockedCompareExchange64_rel:
7036 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedCompareExchange_rel, E);
7037 case ARM::BI_InterlockedCompareExchange8_nf:
7038 case ARM::BI_InterlockedCompareExchange16_nf:
7039 case ARM::BI_InterlockedCompareExchange_nf:
7040 case ARM::BI_InterlockedCompareExchange64_nf:
7041 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedCompareExchange_nf, E);
7042 case ARM::BI_InterlockedOr8_acq:
7043 case ARM::BI_InterlockedOr16_acq:
7044 case ARM::BI_InterlockedOr_acq:
7045 case ARM::BI_InterlockedOr64_acq:
7046 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr_acq, E);
7047 case ARM::BI_InterlockedOr8_rel:
7048 case ARM::BI_InterlockedOr16_rel:
7049 case ARM::BI_InterlockedOr_rel:
7050 case ARM::BI_InterlockedOr64_rel:
7051 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr_rel, E);
7052 case ARM::BI_InterlockedOr8_nf:
7053 case ARM::BI_InterlockedOr16_nf:
7054 case ARM::BI_InterlockedOr_nf:
7055 case ARM::BI_InterlockedOr64_nf:
7056 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr_nf, E);
7057 case ARM::BI_InterlockedXor8_acq:
7058 case ARM::BI_InterlockedXor16_acq:
7059 case ARM::BI_InterlockedXor_acq:
7060 case ARM::BI_InterlockedXor64_acq:
7061 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor_acq, E);
7062 case ARM::BI_InterlockedXor8_rel:
7063 case ARM::BI_InterlockedXor16_rel:
7064 case ARM::BI_InterlockedXor_rel:
7065 case ARM::BI_InterlockedXor64_rel:
7066 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor_rel, E);
7067 case ARM::BI_InterlockedXor8_nf:
7068 case ARM::BI_InterlockedXor16_nf:
7069 case ARM::BI_InterlockedXor_nf:
7070 case ARM::BI_InterlockedXor64_nf:
7071 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor_nf, E);
7072 case ARM::BI_InterlockedAnd8_acq:
7073 case ARM::BI_InterlockedAnd16_acq:
7074 case ARM::BI_InterlockedAnd_acq:
7075 case ARM::BI_InterlockedAnd64_acq:
7076 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedAnd_acq, E);
7077 case ARM::BI_InterlockedAnd8_rel:
7078 case ARM::BI_InterlockedAnd16_rel:
7079 case ARM::BI_InterlockedAnd_rel:
7080 case ARM::BI_InterlockedAnd64_rel:
7081 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedAnd_rel, E);
7082 case ARM::BI_InterlockedAnd8_nf:
7083 case ARM::BI_InterlockedAnd16_nf:
7084 case ARM::BI_InterlockedAnd_nf:
7085 case ARM::BI_InterlockedAnd64_nf:
7086 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedAnd_nf, E);
7087 case ARM::BI_InterlockedIncrement16_acq:
7088 case ARM::BI_InterlockedIncrement_acq:
7089 case ARM::BI_InterlockedIncrement64_acq:
7090 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement_acq, E);
7091 case ARM::BI_InterlockedIncrement16_rel:
7092 case ARM::BI_InterlockedIncrement_rel:
7093 case ARM::BI_InterlockedIncrement64_rel:
7094 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement_rel, E);
7095 case ARM::BI_InterlockedIncrement16_nf:
7096 case ARM::BI_InterlockedIncrement_nf:
7097 case ARM::BI_InterlockedIncrement64_nf:
7098 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement_nf, E);
7099 case ARM::BI_InterlockedDecrement16_acq:
7100 case ARM::BI_InterlockedDecrement_acq:
7101 case ARM::BI_InterlockedDecrement64_acq:
7102 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedDecrement_acq, E);
7103 case ARM::BI_InterlockedDecrement16_rel:
7104 case ARM::BI_InterlockedDecrement_rel:
7105 case ARM::BI_InterlockedDecrement64_rel:
7106 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedDecrement_rel, E);
7107 case ARM::BI_InterlockedDecrement16_nf:
7108 case ARM::BI_InterlockedDecrement_nf:
7109 case ARM::BI_InterlockedDecrement64_nf:
7110 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedDecrement_nf, E);
7113 // Get the last argument, which specifies the vector type.
7114 assert(HasExtraArg);
7115 llvm::APSInt Result;
7116 const Expr *Arg = E->getArg(E->getNumArgs()-1);
7117 if (!Arg->isIntegerConstantExpr(Result, getContext()))
7120 if (BuiltinID == ARM::BI__builtin_arm_vcvtr_f ||
7121 BuiltinID == ARM::BI__builtin_arm_vcvtr_d) {
7122 // Determine the overloaded type of this builtin.
7124 if (BuiltinID == ARM::BI__builtin_arm_vcvtr_f)
7129 // Determine whether this is an unsigned conversion or not.
7130 bool usgn = Result.getZExtValue() == 1;
7131 unsigned Int = usgn ? Intrinsic::arm_vcvtru : Intrinsic::arm_vcvtr;
7133 // Call the appropriate intrinsic.
7134 Function *F = CGM.getIntrinsic(Int, Ty);
7135 return Builder.CreateCall(F, Ops, "vcvtr");
7138 // Determine the type of this overloaded NEON intrinsic.
7139 NeonTypeFlags Type(Result.getZExtValue());
7140 bool usgn = Type.isUnsigned();
7141 bool rightShift = false;
7143 llvm::VectorType *VTy = GetNeonType(this, Type,
7144 getTarget().hasLegalHalfType(),
7146 getTarget().hasBFloat16Type());
7147 llvm::Type *Ty = VTy;
7151 // Many NEON builtins have identical semantics and uses in ARM and
7152 // AArch64. Emit these in a single function.
7153 auto IntrinsicMap = makeArrayRef(ARMSIMDIntrinsicMap);
7154 const ARMVectorIntrinsicInfo *Builtin = findARMVectorIntrinsicInMap(
7155 IntrinsicMap, BuiltinID, NEONSIMDIntrinsicsProvenSorted);
7157 return EmitCommonNeonBuiltinExpr(
7158 Builtin->BuiltinID, Builtin->LLVMIntrinsic, Builtin->AltLLVMIntrinsic,
7159 Builtin->NameHint, Builtin->TypeModifier, E, Ops, PtrOp0, PtrOp1, Arch);
7162 switch (BuiltinID) {
7163 default: return nullptr;
7164 case NEON::BI__builtin_neon_vld1q_lane_v:
7165 // Handle 64-bit integer elements as a special case. Use shuffles of
7166 // one-element vectors to avoid poor code for i64 in the backend.
7167 if (VTy->getElementType()->isIntegerTy(64)) {
7168 // Extract the other lane.
7169 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
7170 int Lane = cast<ConstantInt>(Ops[2])->getZExtValue();
7171 Value *SV = llvm::ConstantVector::get(ConstantInt::get(Int32Ty, 1-Lane));
7172 Ops[1] = Builder.CreateShuffleVector(Ops[1], Ops[1], SV);
7173 // Load the value as a one-element vector.
7174 Ty = llvm::FixedVectorType::get(VTy->getElementType(), 1);
7175 llvm::Type *Tys[] = {Ty, Int8PtrTy};
7176 Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld1, Tys);
7177 Value *Align = getAlignmentValue32(PtrOp0);
7178 Value *Ld = Builder.CreateCall(F, {Ops[0], Align});
7180 int Indices[] = {1 - Lane, Lane};
7181 return Builder.CreateShuffleVector(Ops[1], Ld, Indices, "vld1q_lane");
7184 case NEON::BI__builtin_neon_vld1_lane_v: {
7185 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
7186 PtrOp0 = Builder.CreateElementBitCast(PtrOp0, VTy->getElementType());
7187 Value *Ld = Builder.CreateLoad(PtrOp0);
7188 return Builder.CreateInsertElement(Ops[1], Ld, Ops[2], "vld1_lane");
7190 case NEON::BI__builtin_neon_vqrshrn_n_v:
7192 usgn ? Intrinsic::arm_neon_vqrshiftnu : Intrinsic::arm_neon_vqrshiftns;
7193 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshrn_n",
7195 case NEON::BI__builtin_neon_vqrshrun_n_v:
7196 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqrshiftnsu, Ty),
7197 Ops, "vqrshrun_n", 1, true);
7198 case NEON::BI__builtin_neon_vqshrn_n_v:
7199 Int = usgn ? Intrinsic::arm_neon_vqshiftnu : Intrinsic::arm_neon_vqshiftns;
7200 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshrn_n",
7202 case NEON::BI__builtin_neon_vqshrun_n_v:
7203 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqshiftnsu, Ty),
7204 Ops, "vqshrun_n", 1, true);
7205 case NEON::BI__builtin_neon_vrecpe_v:
7206 case NEON::BI__builtin_neon_vrecpeq_v:
7207 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrecpe, Ty),
7209 case NEON::BI__builtin_neon_vrshrn_n_v:
7210 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrshiftn, Ty),
7211 Ops, "vrshrn_n", 1, true);
7212 case NEON::BI__builtin_neon_vrsra_n_v:
7213 case NEON::BI__builtin_neon_vrsraq_n_v:
7214 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
7215 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
7216 Ops[2] = EmitNeonShiftVector(Ops[2], Ty, true);
7217 Int = usgn ? Intrinsic::arm_neon_vrshiftu : Intrinsic::arm_neon_vrshifts;
7218 Ops[1] = Builder.CreateCall(CGM.getIntrinsic(Int, Ty), {Ops[1], Ops[2]});
7219 return Builder.CreateAdd(Ops[0], Ops[1], "vrsra_n");
7220 case NEON::BI__builtin_neon_vsri_n_v:
7221 case NEON::BI__builtin_neon_vsriq_n_v:
7224 case NEON::BI__builtin_neon_vsli_n_v:
7225 case NEON::BI__builtin_neon_vsliq_n_v:
7226 Ops[2] = EmitNeonShiftVector(Ops[2], Ty, rightShift);
7227 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vshiftins, Ty),
7229 case NEON::BI__builtin_neon_vsra_n_v:
7230 case NEON::BI__builtin_neon_vsraq_n_v:
7231 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
7232 Ops[1] = EmitNeonRShiftImm(Ops[1], Ops[2], Ty, usgn, "vsra_n");
7233 return Builder.CreateAdd(Ops[0], Ops[1]);
7234 case NEON::BI__builtin_neon_vst1q_lane_v:
7235 // Handle 64-bit integer elements as a special case. Use a shuffle to get
7236 // a one-element vector and avoid poor code for i64 in the backend.
7237 if (VTy->getElementType()->isIntegerTy(64)) {
7238 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
7239 Value *SV = llvm::ConstantVector::get(cast<llvm::Constant>(Ops[2]));
7240 Ops[1] = Builder.CreateShuffleVector(Ops[1], Ops[1], SV);
7241 Ops[2] = getAlignmentValue32(PtrOp0);
7242 llvm::Type *Tys[] = {Int8PtrTy, Ops[1]->getType()};
7243 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst1,
7247 case NEON::BI__builtin_neon_vst1_lane_v: {
7248 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
7249 Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2]);
7250 Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
7251 auto St = Builder.CreateStore(Ops[1], Builder.CreateBitCast(PtrOp0, Ty));
7254 case NEON::BI__builtin_neon_vtbl1_v:
7255 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl1),
7257 case NEON::BI__builtin_neon_vtbl2_v:
7258 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl2),
7260 case NEON::BI__builtin_neon_vtbl3_v:
7261 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl3),
7263 case NEON::BI__builtin_neon_vtbl4_v:
7264 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl4),
7266 case NEON::BI__builtin_neon_vtbx1_v:
7267 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx1),
7269 case NEON::BI__builtin_neon_vtbx2_v:
7270 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx2),
7272 case NEON::BI__builtin_neon_vtbx3_v:
7273 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx3),
7275 case NEON::BI__builtin_neon_vtbx4_v:
7276 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx4),
7281 template<typename Integer>
7282 static Integer GetIntegerConstantValue(const Expr *E, ASTContext &Context) {
7283 llvm::APSInt IntVal;
7284 bool IsConst = E->isIntegerConstantExpr(IntVal, Context);
7285 assert(IsConst && "Sema should have checked this was a constant");
7287 return IntVal.getExtValue();
7290 static llvm::Value *SignOrZeroExtend(CGBuilderTy &Builder, llvm::Value *V,
7291 llvm::Type *T, bool Unsigned) {
7292 // Helper function called by Tablegen-constructed ARM MVE builtin codegen,
7293 // which finds it convenient to specify signed/unsigned as a boolean flag.
7294 return Unsigned ? Builder.CreateZExt(V, T) : Builder.CreateSExt(V, T);
7297 static llvm::Value *MVEImmediateShr(CGBuilderTy &Builder, llvm::Value *V,
7298 uint32_t Shift, bool Unsigned) {
7299 // MVE helper function for integer shift right. This must handle signed vs
7300 // unsigned, and also deal specially with the case where the shift count is
7301 // equal to the lane size. In LLVM IR, an LShr with that parameter would be
7302 // undefined behavior, but in MVE it's legal, so we must convert it to code
7303 // that is not undefined in IR.
7304 unsigned LaneBits = cast<llvm::VectorType>(V->getType())
7306 ->getPrimitiveSizeInBits();
7307 if (Shift == LaneBits) {
7308 // An unsigned shift of the full lane size always generates zero, so we can
7309 // simply emit a zero vector. A signed shift of the full lane size does the
7310 // same thing as shifting by one bit fewer.
7312 return llvm::Constant::getNullValue(V->getType());
7316 return Unsigned ? Builder.CreateLShr(V, Shift) : Builder.CreateAShr(V, Shift);
7319 static llvm::Value *ARMMVEVectorSplat(CGBuilderTy &Builder, llvm::Value *V) {
7320 // MVE-specific helper function for a vector splat, which infers the element
7321 // count of the output vector by knowing that MVE vectors are all 128 bits
7323 unsigned Elements = 128 / V->getType()->getPrimitiveSizeInBits();
7324 return Builder.CreateVectorSplat(Elements, V);
7327 static llvm::Value *ARMMVEVectorReinterpret(CGBuilderTy &Builder,
7328 CodeGenFunction *CGF,
7330 llvm::Type *DestType) {
7331 // Convert one MVE vector type into another by reinterpreting its in-register
7334 // Little-endian, this is identical to a bitcast (which reinterprets the
7335 // memory format). But big-endian, they're not necessarily the same, because
7336 // the register and memory formats map to each other differently depending on
7339 // We generate a bitcast whenever we can (if we're little-endian, or if the
7340 // lane sizes are the same anyway). Otherwise we fall back to an IR intrinsic
7341 // that performs the different kind of reinterpretation.
7342 if (CGF->getTarget().isBigEndian() &&
7343 V->getType()->getScalarSizeInBits() != DestType->getScalarSizeInBits()) {
7344 return Builder.CreateCall(
7345 CGF->CGM.getIntrinsic(Intrinsic::arm_mve_vreinterpretq,
7346 {DestType, V->getType()}),
7349 return Builder.CreateBitCast(V, DestType);
7353 static llvm::Value *VectorUnzip(CGBuilderTy &Builder, llvm::Value *V, bool Odd) {
7354 // Make a shufflevector that extracts every other element of a vector (evens
7355 // or odds, as desired).
7356 SmallVector<int, 16> Indices;
7357 unsigned InputElements =
7358 cast<llvm::VectorType>(V->getType())->getNumElements();
7359 for (unsigned i = 0; i < InputElements; i += 2)
7360 Indices.push_back(i + Odd);
7361 return Builder.CreateShuffleVector(V, llvm::UndefValue::get(V->getType()),
7365 static llvm::Value *VectorZip(CGBuilderTy &Builder, llvm::Value *V0,
7367 // Make a shufflevector that interleaves two vectors element by element.
7368 assert(V0->getType() == V1->getType() && "Can't zip different vector types");
7369 SmallVector<int, 16> Indices;
7370 unsigned InputElements =
7371 cast<llvm::VectorType>(V0->getType())->getNumElements();
7372 for (unsigned i = 0; i < InputElements; i++) {
7373 Indices.push_back(i);
7374 Indices.push_back(i + InputElements);
7376 return Builder.CreateShuffleVector(V0, V1, Indices);
7379 template<unsigned HighBit, unsigned OtherBits>
7380 static llvm::Value *ARMMVEConstantSplat(CGBuilderTy &Builder, llvm::Type *VT) {
7381 // MVE-specific helper function to make a vector splat of a constant such as
7382 // UINT_MAX or INT_MIN, in which all bits below the highest one are equal.
7383 llvm::Type *T = cast<llvm::VectorType>(VT)->getElementType();
7384 unsigned LaneBits = T->getPrimitiveSizeInBits();
7385 uint32_t Value = HighBit << (LaneBits - 1);
7387 Value |= (1UL << (LaneBits - 1)) - 1;
7388 llvm::Value *Lane = llvm::ConstantInt::get(T, Value);
7389 return ARMMVEVectorSplat(Builder, Lane);
7392 static llvm::Value *ARMMVEVectorElementReverse(CGBuilderTy &Builder,
7394 unsigned ReverseWidth) {
7395 // MVE-specific helper function which reverses the elements of a
7396 // vector within every (ReverseWidth)-bit collection of lanes.
7397 SmallVector<int, 16> Indices;
7398 unsigned LaneSize = V->getType()->getScalarSizeInBits();
7399 unsigned Elements = 128 / LaneSize;
7400 unsigned Mask = ReverseWidth / LaneSize - 1;
7401 for (unsigned i = 0; i < Elements; i++)
7402 Indices.push_back(i ^ Mask);
7403 return Builder.CreateShuffleVector(V, llvm::UndefValue::get(V->getType()),
7407 Value *CodeGenFunction::EmitARMMVEBuiltinExpr(unsigned BuiltinID,
7409 ReturnValueSlot ReturnValue,
7410 llvm::Triple::ArchType Arch) {
7411 enum class CustomCodeGen { VLD24, VST24 } CustomCodeGenType;
7412 Intrinsic::ID IRIntr;
7413 unsigned NumVectors;
7415 // Code autogenerated by Tablegen will handle all the simple builtins.
7416 switch (BuiltinID) {
7417 #include "clang/Basic/arm_mve_builtin_cg.inc"
7419 // If we didn't match an MVE builtin id at all, go back to the
7420 // main EmitARMBuiltinExpr.
7425 // Anything that breaks from that switch is an MVE builtin that
7426 // needs handwritten code to generate.
7428 switch (CustomCodeGenType) {
7430 case CustomCodeGen::VLD24: {
7431 llvm::SmallVector<Value *, 4> Ops;
7432 llvm::SmallVector<llvm::Type *, 4> Tys;
7434 auto MvecCType = E->getType();
7435 auto MvecLType = ConvertType(MvecCType);
7436 assert(MvecLType->isStructTy() &&
7437 "Return type for vld[24]q should be a struct");
7438 assert(MvecLType->getStructNumElements() == 1 &&
7439 "Return-type struct for vld[24]q should have one element");
7440 auto MvecLTypeInner = MvecLType->getStructElementType(0);
7441 assert(MvecLTypeInner->isArrayTy() &&
7442 "Return-type struct for vld[24]q should contain an array");
7443 assert(MvecLTypeInner->getArrayNumElements() == NumVectors &&
7444 "Array member of return-type struct vld[24]q has wrong length");
7445 auto VecLType = MvecLTypeInner->getArrayElementType();
7447 Tys.push_back(VecLType);
7449 auto Addr = E->getArg(0);
7450 Ops.push_back(EmitScalarExpr(Addr));
7451 Tys.push_back(ConvertType(Addr->getType()));
7453 Function *F = CGM.getIntrinsic(IRIntr, makeArrayRef(Tys));
7454 Value *LoadResult = Builder.CreateCall(F, Ops);
7455 Value *MvecOut = UndefValue::get(MvecLType);
7456 for (unsigned i = 0; i < NumVectors; ++i) {
7457 Value *Vec = Builder.CreateExtractValue(LoadResult, i);
7458 MvecOut = Builder.CreateInsertValue(MvecOut, Vec, {0, i});
7461 if (ReturnValue.isNull())
7464 return Builder.CreateStore(MvecOut, ReturnValue.getValue());
7467 case CustomCodeGen::VST24: {
7468 llvm::SmallVector<Value *, 4> Ops;
7469 llvm::SmallVector<llvm::Type *, 4> Tys;
7471 auto Addr = E->getArg(0);
7472 Ops.push_back(EmitScalarExpr(Addr));
7473 Tys.push_back(ConvertType(Addr->getType()));
7475 auto MvecCType = E->getArg(1)->getType();
7476 auto MvecLType = ConvertType(MvecCType);
7477 assert(MvecLType->isStructTy() && "Data type for vst2q should be a struct");
7478 assert(MvecLType->getStructNumElements() == 1 &&
7479 "Data-type struct for vst2q should have one element");
7480 auto MvecLTypeInner = MvecLType->getStructElementType(0);
7481 assert(MvecLTypeInner->isArrayTy() &&
7482 "Data-type struct for vst2q should contain an array");
7483 assert(MvecLTypeInner->getArrayNumElements() == NumVectors &&
7484 "Array member of return-type struct vld[24]q has wrong length");
7485 auto VecLType = MvecLTypeInner->getArrayElementType();
7487 Tys.push_back(VecLType);
7489 AggValueSlot MvecSlot = CreateAggTemp(MvecCType);
7490 EmitAggExpr(E->getArg(1), MvecSlot);
7491 auto Mvec = Builder.CreateLoad(MvecSlot.getAddress());
7492 for (unsigned i = 0; i < NumVectors; i++)
7493 Ops.push_back(Builder.CreateExtractValue(Mvec, {0, i}));
7495 Function *F = CGM.getIntrinsic(IRIntr, makeArrayRef(Tys));
7496 Value *ToReturn = nullptr;
7497 for (unsigned i = 0; i < NumVectors; i++) {
7498 Ops.push_back(llvm::ConstantInt::get(Int32Ty, i));
7499 ToReturn = Builder.CreateCall(F, Ops);
7505 llvm_unreachable("unknown custom codegen type.");
7508 Value *CodeGenFunction::EmitARMCDEBuiltinExpr(unsigned BuiltinID,
7510 ReturnValueSlot ReturnValue,
7511 llvm::Triple::ArchType Arch) {
7512 switch (BuiltinID) {
7515 #include "clang/Basic/arm_cde_builtin_cg.inc"
7519 static Value *EmitAArch64TblBuiltinExpr(CodeGenFunction &CGF, unsigned BuiltinID,
7521 SmallVectorImpl<Value *> &Ops,
7522 llvm::Triple::ArchType Arch) {
7523 unsigned int Int = 0;
7524 const char *s = nullptr;
7526 switch (BuiltinID) {
7529 case NEON::BI__builtin_neon_vtbl1_v:
7530 case NEON::BI__builtin_neon_vqtbl1_v:
7531 case NEON::BI__builtin_neon_vqtbl1q_v:
7532 case NEON::BI__builtin_neon_vtbl2_v:
7533 case NEON::BI__builtin_neon_vqtbl2_v:
7534 case NEON::BI__builtin_neon_vqtbl2q_v:
7535 case NEON::BI__builtin_neon_vtbl3_v:
7536 case NEON::BI__builtin_neon_vqtbl3_v:
7537 case NEON::BI__builtin_neon_vqtbl3q_v:
7538 case NEON::BI__builtin_neon_vtbl4_v:
7539 case NEON::BI__builtin_neon_vqtbl4_v:
7540 case NEON::BI__builtin_neon_vqtbl4q_v:
7542 case NEON::BI__builtin_neon_vtbx1_v:
7543 case NEON::BI__builtin_neon_vqtbx1_v:
7544 case NEON::BI__builtin_neon_vqtbx1q_v:
7545 case NEON::BI__builtin_neon_vtbx2_v:
7546 case NEON::BI__builtin_neon_vqtbx2_v:
7547 case NEON::BI__builtin_neon_vqtbx2q_v:
7548 case NEON::BI__builtin_neon_vtbx3_v:
7549 case NEON::BI__builtin_neon_vqtbx3_v:
7550 case NEON::BI__builtin_neon_vqtbx3q_v:
7551 case NEON::BI__builtin_neon_vtbx4_v:
7552 case NEON::BI__builtin_neon_vqtbx4_v:
7553 case NEON::BI__builtin_neon_vqtbx4q_v:
7557 assert(E->getNumArgs() >= 3);
7559 // Get the last argument, which specifies the vector type.
7560 llvm::APSInt Result;
7561 const Expr *Arg = E->getArg(E->getNumArgs() - 1);
7562 if (!Arg->isIntegerConstantExpr(Result, CGF.getContext()))
7565 // Determine the type of this overloaded NEON intrinsic.
7566 NeonTypeFlags Type(Result.getZExtValue());
7567 llvm::VectorType *Ty = GetNeonType(&CGF, Type);
7571 CodeGen::CGBuilderTy &Builder = CGF.Builder;
7573 // AArch64 scalar builtins are not overloaded, they do not have an extra
7574 // argument that specifies the vector type, need to handle each case.
7575 switch (BuiltinID) {
7576 case NEON::BI__builtin_neon_vtbl1_v: {
7577 return packTBLDVectorList(CGF, makeArrayRef(Ops).slice(0, 1), nullptr,
7578 Ops[1], Ty, Intrinsic::aarch64_neon_tbl1,
7581 case NEON::BI__builtin_neon_vtbl2_v: {
7582 return packTBLDVectorList(CGF, makeArrayRef(Ops).slice(0, 2), nullptr,
7583 Ops[2], Ty, Intrinsic::aarch64_neon_tbl1,
7586 case NEON::BI__builtin_neon_vtbl3_v: {
7587 return packTBLDVectorList(CGF, makeArrayRef(Ops).slice(0, 3), nullptr,
7588 Ops[3], Ty, Intrinsic::aarch64_neon_tbl2,
7591 case NEON::BI__builtin_neon_vtbl4_v: {
7592 return packTBLDVectorList(CGF, makeArrayRef(Ops).slice(0, 4), nullptr,
7593 Ops[4], Ty, Intrinsic::aarch64_neon_tbl2,
7596 case NEON::BI__builtin_neon_vtbx1_v: {
7598 packTBLDVectorList(CGF, makeArrayRef(Ops).slice(1, 1), nullptr, Ops[2],
7599 Ty, Intrinsic::aarch64_neon_tbl1, "vtbl1");
7601 llvm::Constant *EightV = ConstantInt::get(Ty, 8);
7602 Value *CmpRes = Builder.CreateICmp(ICmpInst::ICMP_UGE, Ops[2], EightV);
7603 CmpRes = Builder.CreateSExt(CmpRes, Ty);
7605 Value *EltsFromInput = Builder.CreateAnd(CmpRes, Ops[0]);
7606 Value *EltsFromTbl = Builder.CreateAnd(Builder.CreateNot(CmpRes), TblRes);
7607 return Builder.CreateOr(EltsFromInput, EltsFromTbl, "vtbx");
7609 case NEON::BI__builtin_neon_vtbx2_v: {
7610 return packTBLDVectorList(CGF, makeArrayRef(Ops).slice(1, 2), Ops[0],
7611 Ops[3], Ty, Intrinsic::aarch64_neon_tbx1,
7614 case NEON::BI__builtin_neon_vtbx3_v: {
7616 packTBLDVectorList(CGF, makeArrayRef(Ops).slice(1, 3), nullptr, Ops[4],
7617 Ty, Intrinsic::aarch64_neon_tbl2, "vtbl2");
7619 llvm::Constant *TwentyFourV = ConstantInt::get(Ty, 24);
7620 Value *CmpRes = Builder.CreateICmp(ICmpInst::ICMP_UGE, Ops[4],
7622 CmpRes = Builder.CreateSExt(CmpRes, Ty);
7624 Value *EltsFromInput = Builder.CreateAnd(CmpRes, Ops[0]);
7625 Value *EltsFromTbl = Builder.CreateAnd(Builder.CreateNot(CmpRes), TblRes);
7626 return Builder.CreateOr(EltsFromInput, EltsFromTbl, "vtbx");
7628 case NEON::BI__builtin_neon_vtbx4_v: {
7629 return packTBLDVectorList(CGF, makeArrayRef(Ops).slice(1, 4), Ops[0],
7630 Ops[5], Ty, Intrinsic::aarch64_neon_tbx2,
7633 case NEON::BI__builtin_neon_vqtbl1_v:
7634 case NEON::BI__builtin_neon_vqtbl1q_v:
7635 Int = Intrinsic::aarch64_neon_tbl1; s = "vtbl1"; break;
7636 case NEON::BI__builtin_neon_vqtbl2_v:
7637 case NEON::BI__builtin_neon_vqtbl2q_v: {
7638 Int = Intrinsic::aarch64_neon_tbl2; s = "vtbl2"; break;
7639 case NEON::BI__builtin_neon_vqtbl3_v:
7640 case NEON::BI__builtin_neon_vqtbl3q_v:
7641 Int = Intrinsic::aarch64_neon_tbl3; s = "vtbl3"; break;
7642 case NEON::BI__builtin_neon_vqtbl4_v:
7643 case NEON::BI__builtin_neon_vqtbl4q_v:
7644 Int = Intrinsic::aarch64_neon_tbl4; s = "vtbl4"; break;
7645 case NEON::BI__builtin_neon_vqtbx1_v:
7646 case NEON::BI__builtin_neon_vqtbx1q_v:
7647 Int = Intrinsic::aarch64_neon_tbx1; s = "vtbx1"; break;
7648 case NEON::BI__builtin_neon_vqtbx2_v:
7649 case NEON::BI__builtin_neon_vqtbx2q_v:
7650 Int = Intrinsic::aarch64_neon_tbx2; s = "vtbx2"; break;
7651 case NEON::BI__builtin_neon_vqtbx3_v:
7652 case NEON::BI__builtin_neon_vqtbx3q_v:
7653 Int = Intrinsic::aarch64_neon_tbx3; s = "vtbx3"; break;
7654 case NEON::BI__builtin_neon_vqtbx4_v:
7655 case NEON::BI__builtin_neon_vqtbx4q_v:
7656 Int = Intrinsic::aarch64_neon_tbx4; s = "vtbx4"; break;
7663 Function *F = CGF.CGM.getIntrinsic(Int, Ty);
7664 return CGF.EmitNeonCall(F, Ops, s);
7667 Value *CodeGenFunction::vectorWrapScalar16(Value *Op) {
7668 auto *VTy = llvm::FixedVectorType::get(Int16Ty, 4);
7669 Op = Builder.CreateBitCast(Op, Int16Ty);
7670 Value *V = UndefValue::get(VTy);
7671 llvm::Constant *CI = ConstantInt::get(SizeTy, 0);
7672 Op = Builder.CreateInsertElement(V, Op, CI);
7676 /// SVEBuiltinMemEltTy - Returns the memory element type for this memory
7677 /// access builtin. Only required if it can't be inferred from the base pointer
7679 llvm::Type *CodeGenFunction::SVEBuiltinMemEltTy(SVETypeFlags TypeFlags) {
7680 switch (TypeFlags.getMemEltType()) {
7681 case SVETypeFlags::MemEltTyDefault:
7682 return getEltType(TypeFlags);
7683 case SVETypeFlags::MemEltTyInt8:
7684 return Builder.getInt8Ty();
7685 case SVETypeFlags::MemEltTyInt16:
7686 return Builder.getInt16Ty();
7687 case SVETypeFlags::MemEltTyInt32:
7688 return Builder.getInt32Ty();
7689 case SVETypeFlags::MemEltTyInt64:
7690 return Builder.getInt64Ty();
7692 llvm_unreachable("Unknown MemEltType");
7695 llvm::Type *CodeGenFunction::getEltType(SVETypeFlags TypeFlags) {
7696 switch (TypeFlags.getEltType()) {
7698 llvm_unreachable("Invalid SVETypeFlag!");
7700 case SVETypeFlags::EltTyInt8:
7701 return Builder.getInt8Ty();
7702 case SVETypeFlags::EltTyInt16:
7703 return Builder.getInt16Ty();
7704 case SVETypeFlags::EltTyInt32:
7705 return Builder.getInt32Ty();
7706 case SVETypeFlags::EltTyInt64:
7707 return Builder.getInt64Ty();
7709 case SVETypeFlags::EltTyFloat16:
7710 return Builder.getHalfTy();
7711 case SVETypeFlags::EltTyFloat32:
7712 return Builder.getFloatTy();
7713 case SVETypeFlags::EltTyFloat64:
7714 return Builder.getDoubleTy();
7716 case SVETypeFlags::EltTyBFloat16:
7717 return Builder.getBFloatTy();
7719 case SVETypeFlags::EltTyBool8:
7720 case SVETypeFlags::EltTyBool16:
7721 case SVETypeFlags::EltTyBool32:
7722 case SVETypeFlags::EltTyBool64:
7723 return Builder.getInt1Ty();
7727 // Return the llvm predicate vector type corresponding to the specified element
7729 llvm::ScalableVectorType *
7730 CodeGenFunction::getSVEPredType(SVETypeFlags TypeFlags) {
7731 switch (TypeFlags.getEltType()) {
7732 default: llvm_unreachable("Unhandled SVETypeFlag!");
7734 case SVETypeFlags::EltTyInt8:
7735 return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 16);
7736 case SVETypeFlags::EltTyInt16:
7737 return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 8);
7738 case SVETypeFlags::EltTyInt32:
7739 return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 4);
7740 case SVETypeFlags::EltTyInt64:
7741 return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 2);
7743 case SVETypeFlags::EltTyBFloat16:
7744 return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 8);
7745 case SVETypeFlags::EltTyFloat16:
7746 return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 8);
7747 case SVETypeFlags::EltTyFloat32:
7748 return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 4);
7749 case SVETypeFlags::EltTyFloat64:
7750 return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 2);
7752 case SVETypeFlags::EltTyBool8:
7753 return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 16);
7754 case SVETypeFlags::EltTyBool16:
7755 return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 8);
7756 case SVETypeFlags::EltTyBool32:
7757 return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 4);
7758 case SVETypeFlags::EltTyBool64:
7759 return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 2);
7763 // Return the llvm vector type corresponding to the specified element TypeFlags.
7764 llvm::ScalableVectorType *
7765 CodeGenFunction::getSVEType(const SVETypeFlags &TypeFlags) {
7766 switch (TypeFlags.getEltType()) {
7768 llvm_unreachable("Invalid SVETypeFlag!");
7770 case SVETypeFlags::EltTyInt8:
7771 return llvm::ScalableVectorType::get(Builder.getInt8Ty(), 16);
7772 case SVETypeFlags::EltTyInt16:
7773 return llvm::ScalableVectorType::get(Builder.getInt16Ty(), 8);
7774 case SVETypeFlags::EltTyInt32:
7775 return llvm::ScalableVectorType::get(Builder.getInt32Ty(), 4);
7776 case SVETypeFlags::EltTyInt64:
7777 return llvm::ScalableVectorType::get(Builder.getInt64Ty(), 2);
7779 case SVETypeFlags::EltTyFloat16:
7780 return llvm::ScalableVectorType::get(Builder.getHalfTy(), 8);
7781 case SVETypeFlags::EltTyBFloat16:
7782 return llvm::ScalableVectorType::get(Builder.getBFloatTy(), 8);
7783 case SVETypeFlags::EltTyFloat32:
7784 return llvm::ScalableVectorType::get(Builder.getFloatTy(), 4);
7785 case SVETypeFlags::EltTyFloat64:
7786 return llvm::ScalableVectorType::get(Builder.getDoubleTy(), 2);
7788 case SVETypeFlags::EltTyBool8:
7789 return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 16);
7790 case SVETypeFlags::EltTyBool16:
7791 return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 8);
7792 case SVETypeFlags::EltTyBool32:
7793 return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 4);
7794 case SVETypeFlags::EltTyBool64:
7795 return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 2);
7799 llvm::Value *CodeGenFunction::EmitSVEAllTruePred(SVETypeFlags TypeFlags) {
7801 CGM.getIntrinsic(Intrinsic::aarch64_sve_ptrue, getSVEPredType(TypeFlags));
7802 return Builder.CreateCall(Ptrue, {Builder.getInt32(/*SV_ALL*/ 31)});
7805 constexpr unsigned SVEBitsPerBlock = 128;
7807 static llvm::ScalableVectorType *getSVEVectorForElementType(llvm::Type *EltTy) {
7808 unsigned NumElts = SVEBitsPerBlock / EltTy->getScalarSizeInBits();
7809 return llvm::ScalableVectorType::get(EltTy, NumElts);
7812 // Reinterpret the input predicate so that it can be used to correctly isolate
7813 // the elements of the specified datatype.
7814 Value *CodeGenFunction::EmitSVEPredicateCast(Value *Pred,
7815 llvm::ScalableVectorType *VTy) {
7816 auto *RTy = llvm::VectorType::get(IntegerType::get(getLLVMContext(), 1), VTy);
7817 if (Pred->getType() == RTy)
7821 llvm::Type *IntrinsicTy;
7822 switch (VTy->getMinNumElements()) {
7824 llvm_unreachable("unsupported element count!");
7828 IntID = Intrinsic::aarch64_sve_convert_from_svbool;
7832 IntID = Intrinsic::aarch64_sve_convert_to_svbool;
7833 IntrinsicTy = Pred->getType();
7837 Function *F = CGM.getIntrinsic(IntID, IntrinsicTy);
7838 Value *C = Builder.CreateCall(F, Pred);
7839 assert(C->getType() == RTy && "Unexpected return type!");
7843 Value *CodeGenFunction::EmitSVEGatherLoad(SVETypeFlags TypeFlags,
7844 SmallVectorImpl<Value *> &Ops,
7846 auto *ResultTy = getSVEType(TypeFlags);
7847 auto *OverloadedTy =
7848 llvm::ScalableVectorType::get(SVEBuiltinMemEltTy(TypeFlags), ResultTy);
7850 // At the ACLE level there's only one predicate type, svbool_t, which is
7851 // mapped to <n x 16 x i1>. However, this might be incompatible with the
7852 // actual type being loaded. For example, when loading doubles (i64) the
7853 // predicated should be <n x 2 x i1> instead. At the IR level the type of
7854 // the predicate and the data being loaded must match. Cast accordingly.
7855 Ops[0] = EmitSVEPredicateCast(Ops[0], OverloadedTy);
7857 Function *F = nullptr;
7858 if (Ops[1]->getType()->isVectorTy())
7859 // This is the "vector base, scalar offset" case. In order to uniquely
7860 // map this built-in to an LLVM IR intrinsic, we need both the return type
7861 // and the type of the vector base.
7862 F = CGM.getIntrinsic(IntID, {OverloadedTy, Ops[1]->getType()});
7864 // This is the "scalar base, vector offset case". The type of the offset
7865 // is encoded in the name of the intrinsic. We only need to specify the
7866 // return type in order to uniquely map this built-in to an LLVM IR
7868 F = CGM.getIntrinsic(IntID, OverloadedTy);
7870 // Pass 0 when the offset is missing. This can only be applied when using
7871 // the "vector base" addressing mode for which ACLE allows no offset. The
7872 // corresponding LLVM IR always requires an offset.
7873 if (Ops.size() == 2) {
7874 assert(Ops[1]->getType()->isVectorTy() && "Scalar base requires an offset");
7875 Ops.push_back(ConstantInt::get(Int64Ty, 0));
7878 // For "vector base, scalar index" scale the index so that it becomes a
7880 if (!TypeFlags.isByteIndexed() && Ops[1]->getType()->isVectorTy()) {
7881 unsigned BytesPerElt =
7882 OverloadedTy->getElementType()->getScalarSizeInBits() / 8;
7883 Value *Scale = ConstantInt::get(Int64Ty, BytesPerElt);
7884 Ops[2] = Builder.CreateMul(Ops[2], Scale);
7887 Value *Call = Builder.CreateCall(F, Ops);
7889 // The following sext/zext is only needed when ResultTy != OverloadedTy. In
7890 // other cases it's folded into a nop.
7891 return TypeFlags.isZExtReturn() ? Builder.CreateZExt(Call, ResultTy)
7892 : Builder.CreateSExt(Call, ResultTy);
7895 Value *CodeGenFunction::EmitSVEScatterStore(SVETypeFlags TypeFlags,
7896 SmallVectorImpl<Value *> &Ops,
7898 auto *SrcDataTy = getSVEType(TypeFlags);
7899 auto *OverloadedTy =
7900 llvm::ScalableVectorType::get(SVEBuiltinMemEltTy(TypeFlags), SrcDataTy);
7902 // In ACLE the source data is passed in the last argument, whereas in LLVM IR
7903 // it's the first argument. Move it accordingly.
7904 Ops.insert(Ops.begin(), Ops.pop_back_val());
7906 Function *F = nullptr;
7907 if (Ops[2]->getType()->isVectorTy())
7908 // This is the "vector base, scalar offset" case. In order to uniquely
7909 // map this built-in to an LLVM IR intrinsic, we need both the return type
7910 // and the type of the vector base.
7911 F = CGM.getIntrinsic(IntID, {OverloadedTy, Ops[2]->getType()});
7913 // This is the "scalar base, vector offset case". The type of the offset
7914 // is encoded in the name of the intrinsic. We only need to specify the
7915 // return type in order to uniquely map this built-in to an LLVM IR
7917 F = CGM.getIntrinsic(IntID, OverloadedTy);
7919 // Pass 0 when the offset is missing. This can only be applied when using
7920 // the "vector base" addressing mode for which ACLE allows no offset. The
7921 // corresponding LLVM IR always requires an offset.
7922 if (Ops.size() == 3) {
7923 assert(Ops[1]->getType()->isVectorTy() && "Scalar base requires an offset");
7924 Ops.push_back(ConstantInt::get(Int64Ty, 0));
7927 // Truncation is needed when SrcDataTy != OverloadedTy. In other cases it's
7928 // folded into a nop.
7929 Ops[0] = Builder.CreateTrunc(Ops[0], OverloadedTy);
7931 // At the ACLE level there's only one predicate type, svbool_t, which is
7932 // mapped to <n x 16 x i1>. However, this might be incompatible with the
7933 // actual type being stored. For example, when storing doubles (i64) the
7934 // predicated should be <n x 2 x i1> instead. At the IR level the type of
7935 // the predicate and the data being stored must match. Cast accordingly.
7936 Ops[1] = EmitSVEPredicateCast(Ops[1], OverloadedTy);
7938 // For "vector base, scalar index" scale the index so that it becomes a
7940 if (!TypeFlags.isByteIndexed() && Ops[2]->getType()->isVectorTy()) {
7941 unsigned BytesPerElt =
7942 OverloadedTy->getElementType()->getScalarSizeInBits() / 8;
7943 Value *Scale = ConstantInt::get(Int64Ty, BytesPerElt);
7944 Ops[3] = Builder.CreateMul(Ops[3], Scale);
7947 return Builder.CreateCall(F, Ops);
7950 Value *CodeGenFunction::EmitSVEGatherPrefetch(SVETypeFlags TypeFlags,
7951 SmallVectorImpl<Value *> &Ops,
7953 // The gather prefetches are overloaded on the vector input - this can either
7954 // be the vector of base addresses or vector of offsets.
7955 auto *OverloadedTy = dyn_cast<llvm::ScalableVectorType>(Ops[1]->getType());
7957 OverloadedTy = cast<llvm::ScalableVectorType>(Ops[2]->getType());
7959 // Cast the predicate from svbool_t to the right number of elements.
7960 Ops[0] = EmitSVEPredicateCast(Ops[0], OverloadedTy);
7962 // vector + imm addressing modes
7963 if (Ops[1]->getType()->isVectorTy()) {
7964 if (Ops.size() == 3) {
7965 // Pass 0 for 'vector+imm' when the index is omitted.
7966 Ops.push_back(ConstantInt::get(Int64Ty, 0));
7968 // The sv_prfop is the last operand in the builtin and IR intrinsic.
7969 std::swap(Ops[2], Ops[3]);
7971 // Index needs to be passed as scaled offset.
7972 llvm::Type *MemEltTy = SVEBuiltinMemEltTy(TypeFlags);
7973 unsigned BytesPerElt = MemEltTy->getPrimitiveSizeInBits() / 8;
7974 Value *Scale = ConstantInt::get(Int64Ty, BytesPerElt);
7975 Ops[2] = Builder.CreateMul(Ops[2], Scale);
7979 Function *F = CGM.getIntrinsic(IntID, OverloadedTy);
7980 return Builder.CreateCall(F, Ops);
7983 Value *CodeGenFunction::EmitSVEStructLoad(SVETypeFlags TypeFlags,
7984 SmallVectorImpl<Value*> &Ops,
7986 llvm::ScalableVectorType *VTy = getSVEType(TypeFlags);
7987 auto VecPtrTy = llvm::PointerType::getUnqual(VTy);
7988 auto EltPtrTy = llvm::PointerType::getUnqual(VTy->getElementType());
7992 case Intrinsic::aarch64_sve_ld2:
7995 case Intrinsic::aarch64_sve_ld3:
7998 case Intrinsic::aarch64_sve_ld4:
8002 llvm_unreachable("unknown intrinsic!");
8004 auto RetTy = llvm::VectorType::get(VTy->getElementType(),
8005 VTy->getElementCount() * N);
8007 Value *Predicate = EmitSVEPredicateCast(Ops[0], VTy);
8008 Value *BasePtr= Builder.CreateBitCast(Ops[1], VecPtrTy);
8009 Value *Offset = Ops.size() > 2 ? Ops[2] : Builder.getInt32(0);
8010 BasePtr = Builder.CreateGEP(VTy, BasePtr, Offset);
8011 BasePtr = Builder.CreateBitCast(BasePtr, EltPtrTy);
8013 Function *F = CGM.getIntrinsic(IntID, {RetTy, Predicate->getType()});
8014 return Builder.CreateCall(F, { Predicate, BasePtr });
8017 Value *CodeGenFunction::EmitSVEStructStore(SVETypeFlags TypeFlags,
8018 SmallVectorImpl<Value*> &Ops,
8020 llvm::ScalableVectorType *VTy = getSVEType(TypeFlags);
8021 auto VecPtrTy = llvm::PointerType::getUnqual(VTy);
8022 auto EltPtrTy = llvm::PointerType::getUnqual(VTy->getElementType());
8026 case Intrinsic::aarch64_sve_st2:
8029 case Intrinsic::aarch64_sve_st3:
8032 case Intrinsic::aarch64_sve_st4:
8036 llvm_unreachable("unknown intrinsic!");
8039 llvm::VectorType::get(VTy->getElementType(), VTy->getElementCount() * N);
8041 Value *Predicate = EmitSVEPredicateCast(Ops[0], VTy);
8042 Value *BasePtr = Builder.CreateBitCast(Ops[1], VecPtrTy);
8043 Value *Offset = Ops.size() > 3 ? Ops[2] : Builder.getInt32(0);
8044 Value *Val = Ops.back();
8045 BasePtr = Builder.CreateGEP(VTy, BasePtr, Offset);
8046 BasePtr = Builder.CreateBitCast(BasePtr, EltPtrTy);
8048 // The llvm.aarch64.sve.st2/3/4 intrinsics take legal part vectors, so we
8049 // need to break up the tuple vector.
8050 SmallVector<llvm::Value*, 5> Operands;
8052 CGM.getIntrinsic(Intrinsic::aarch64_sve_tuple_get, {VTy, TupleTy});
8053 for (unsigned I = 0; I < N; ++I)
8054 Operands.push_back(Builder.CreateCall(FExtr, {Val, Builder.getInt32(I)}));
8055 Operands.append({Predicate, BasePtr});
8057 Function *F = CGM.getIntrinsic(IntID, { VTy });
8058 return Builder.CreateCall(F, Operands);
8061 // SVE2's svpmullb and svpmullt builtins are similar to the svpmullb_pair and
8062 // svpmullt_pair intrinsics, with the exception that their results are bitcast
8064 Value *CodeGenFunction::EmitSVEPMull(SVETypeFlags TypeFlags,
8065 SmallVectorImpl<Value *> &Ops,
8066 unsigned BuiltinID) {
8067 // Splat scalar operand to vector (intrinsics with _n infix)
8068 if (TypeFlags.hasSplatOperand()) {
8069 unsigned OpNo = TypeFlags.getSplatOperand();
8070 Ops[OpNo] = EmitSVEDupX(Ops[OpNo]);
8073 // The pair-wise function has a narrower overloaded type.
8074 Function *F = CGM.getIntrinsic(BuiltinID, Ops[0]->getType());
8075 Value *Call = Builder.CreateCall(F, {Ops[0], Ops[1]});
8077 // Now bitcast to the wider result type.
8078 llvm::ScalableVectorType *Ty = getSVEType(TypeFlags);
8079 return EmitSVEReinterpret(Call, Ty);
8082 Value *CodeGenFunction::EmitSVEMovl(SVETypeFlags TypeFlags,
8083 ArrayRef<Value *> Ops, unsigned BuiltinID) {
8084 llvm::Type *OverloadedTy = getSVEType(TypeFlags);
8085 Function *F = CGM.getIntrinsic(BuiltinID, OverloadedTy);
8086 return Builder.CreateCall(F, {Ops[0], Builder.getInt32(0)});
8089 Value *CodeGenFunction::EmitSVEPrefetchLoad(SVETypeFlags TypeFlags,
8090 SmallVectorImpl<Value *> &Ops,
8091 unsigned BuiltinID) {
8092 auto *MemEltTy = SVEBuiltinMemEltTy(TypeFlags);
8093 auto *VectorTy = getSVEVectorForElementType(MemEltTy);
8094 auto *MemoryTy = llvm::ScalableVectorType::get(MemEltTy, VectorTy);
8096 Value *Predicate = EmitSVEPredicateCast(Ops[0], MemoryTy);
8097 Value *BasePtr = Ops[1];
8099 // Implement the index operand if not omitted.
8100 if (Ops.size() > 3) {
8101 BasePtr = Builder.CreateBitCast(BasePtr, MemoryTy->getPointerTo());
8102 BasePtr = Builder.CreateGEP(MemoryTy, BasePtr, Ops[2]);
8105 // Prefetch intriniscs always expect an i8*
8106 BasePtr = Builder.CreateBitCast(BasePtr, llvm::PointerType::getUnqual(Int8Ty));
8107 Value *PrfOp = Ops.back();
8109 Function *F = CGM.getIntrinsic(BuiltinID, Predicate->getType());
8110 return Builder.CreateCall(F, {Predicate, BasePtr, PrfOp});
8113 Value *CodeGenFunction::EmitSVEMaskedLoad(const CallExpr *E,
8114 llvm::Type *ReturnTy,
8115 SmallVectorImpl<Value *> &Ops,
8117 bool IsZExtReturn) {
8118 QualType LangPTy = E->getArg(1)->getType();
8119 llvm::Type *MemEltTy = CGM.getTypes().ConvertType(
8120 LangPTy->getAs<PointerType>()->getPointeeType());
8122 // The vector type that is returned may be different from the
8123 // eventual type loaded from memory.
8124 auto VectorTy = cast<llvm::ScalableVectorType>(ReturnTy);
8125 auto MemoryTy = llvm::ScalableVectorType::get(MemEltTy, VectorTy);
8127 Value *Predicate = EmitSVEPredicateCast(Ops[0], MemoryTy);
8128 Value *BasePtr = Builder.CreateBitCast(Ops[1], MemoryTy->getPointerTo());
8129 Value *Offset = Ops.size() > 2 ? Ops[2] : Builder.getInt32(0);
8130 BasePtr = Builder.CreateGEP(MemoryTy, BasePtr, Offset);
8132 BasePtr = Builder.CreateBitCast(BasePtr, MemEltTy->getPointerTo());
8133 Function *F = CGM.getIntrinsic(BuiltinID, MemoryTy);
8134 Value *Load = Builder.CreateCall(F, {Predicate, BasePtr});
8136 return IsZExtReturn ? Builder.CreateZExt(Load, VectorTy)
8137 : Builder.CreateSExt(Load, VectorTy);
8140 Value *CodeGenFunction::EmitSVEMaskedStore(const CallExpr *E,
8141 SmallVectorImpl<Value *> &Ops,
8142 unsigned BuiltinID) {
8143 QualType LangPTy = E->getArg(1)->getType();
8144 llvm::Type *MemEltTy = CGM.getTypes().ConvertType(
8145 LangPTy->getAs<PointerType>()->getPointeeType());
8147 // The vector type that is stored may be different from the
8148 // eventual type stored to memory.
8149 auto VectorTy = cast<llvm::ScalableVectorType>(Ops.back()->getType());
8150 auto MemoryTy = llvm::ScalableVectorType::get(MemEltTy, VectorTy);
8152 Value *Predicate = EmitSVEPredicateCast(Ops[0], MemoryTy);
8153 Value *BasePtr = Builder.CreateBitCast(Ops[1], MemoryTy->getPointerTo());
8154 Value *Offset = Ops.size() == 4 ? Ops[2] : Builder.getInt32(0);
8155 BasePtr = Builder.CreateGEP(MemoryTy, BasePtr, Offset);
8157 // Last value is always the data
8158 llvm::Value *Val = Builder.CreateTrunc(Ops.back(), MemoryTy);
8160 BasePtr = Builder.CreateBitCast(BasePtr, MemEltTy->getPointerTo());
8161 Function *F = CGM.getIntrinsic(BuiltinID, MemoryTy);
8162 return Builder.CreateCall(F, {Val, Predicate, BasePtr});
8165 // Limit the usage of scalable llvm IR generated by the ACLE by using the
8166 // sve dup.x intrinsic instead of IRBuilder::CreateVectorSplat.
8167 Value *CodeGenFunction::EmitSVEDupX(Value *Scalar, llvm::Type *Ty) {
8168 auto F = CGM.getIntrinsic(Intrinsic::aarch64_sve_dup_x, Ty);
8169 return Builder.CreateCall(F, Scalar);
8172 Value *CodeGenFunction::EmitSVEDupX(Value* Scalar) {
8173 return EmitSVEDupX(Scalar, getSVEVectorForElementType(Scalar->getType()));
8176 Value *CodeGenFunction::EmitSVEReinterpret(Value *Val, llvm::Type *Ty) {
8177 // FIXME: For big endian this needs an additional REV, or needs a separate
8178 // intrinsic that is code-generated as a no-op, because the LLVM bitcast
8179 // instruction is defined as 'bitwise' equivalent from memory point of
8180 // view (when storing/reloading), whereas the svreinterpret builtin
8181 // implements bitwise equivalent cast from register point of view.
8182 // LLVM CodeGen for a bitcast must add an explicit REV for big-endian.
8183 return Builder.CreateBitCast(Val, Ty);
8186 static void InsertExplicitZeroOperand(CGBuilderTy &Builder, llvm::Type *Ty,
8187 SmallVectorImpl<Value *> &Ops) {
8188 auto *SplatZero = Constant::getNullValue(Ty);
8189 Ops.insert(Ops.begin(), SplatZero);
8192 static void InsertExplicitUndefOperand(CGBuilderTy &Builder, llvm::Type *Ty,
8193 SmallVectorImpl<Value *> &Ops) {
8194 auto *SplatUndef = UndefValue::get(Ty);
8195 Ops.insert(Ops.begin(), SplatUndef);
8198 SmallVector<llvm::Type *, 2> CodeGenFunction::getSVEOverloadTypes(
8199 SVETypeFlags TypeFlags, llvm::Type *ResultType, ArrayRef<Value *> Ops) {
8200 if (TypeFlags.isOverloadNone())
8203 llvm::Type *DefaultType = getSVEType(TypeFlags);
8205 if (TypeFlags.isOverloadWhile())
8206 return {DefaultType, Ops[1]->getType()};
8208 if (TypeFlags.isOverloadWhileRW())
8209 return {getSVEPredType(TypeFlags), Ops[0]->getType()};
8211 if (TypeFlags.isOverloadCvt() || TypeFlags.isTupleSet())
8212 return {Ops[0]->getType(), Ops.back()->getType()};
8214 if (TypeFlags.isTupleCreate() || TypeFlags.isTupleGet())
8215 return {ResultType, Ops[0]->getType()};
8217 assert(TypeFlags.isOverloadDefault() && "Unexpected value for overloads");
8218 return {DefaultType};
8221 Value *CodeGenFunction::EmitAArch64SVEBuiltinExpr(unsigned BuiltinID,
8222 const CallExpr *E) {
8223 // Find out if any arguments are required to be integer constant expressions.
8224 unsigned ICEArguments = 0;
8225 ASTContext::GetBuiltinTypeError Error;
8226 getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments);
8227 assert(Error == ASTContext::GE_None && "Should not codegen an error");
8229 llvm::Type *Ty = ConvertType(E->getType());
8230 if (BuiltinID >= SVE::BI__builtin_sve_reinterpret_s8_s8 &&
8231 BuiltinID <= SVE::BI__builtin_sve_reinterpret_f64_f64) {
8232 Value *Val = EmitScalarExpr(E->getArg(0));
8233 return EmitSVEReinterpret(Val, Ty);
8236 llvm::SmallVector<Value *, 4> Ops;
8237 for (unsigned i = 0, e = E->getNumArgs(); i != e; i++) {
8238 if ((ICEArguments & (1 << i)) == 0)
8239 Ops.push_back(EmitScalarExpr(E->getArg(i)));
8241 // If this is required to be a constant, constant fold it so that we know
8242 // that the generated intrinsic gets a ConstantInt.
8243 llvm::APSInt Result;
8244 if (!E->getArg(i)->isIntegerConstantExpr(Result, getContext()))
8245 llvm_unreachable("Expected argument to be a constant");
8247 // Immediates for SVE llvm intrinsics are always 32bit. We can safely
8248 // truncate because the immediate has been range checked and no valid
8249 // immediate requires more than a handful of bits.
8250 Result = Result.extOrTrunc(32);
8251 Ops.push_back(llvm::ConstantInt::get(getLLVMContext(), Result));
8255 auto *Builtin = findARMVectorIntrinsicInMap(AArch64SVEIntrinsicMap, BuiltinID,
8256 AArch64SVEIntrinsicsProvenSorted);
8257 SVETypeFlags TypeFlags(Builtin->TypeModifier);
8258 if (TypeFlags.isLoad())
8259 return EmitSVEMaskedLoad(E, Ty, Ops, Builtin->LLVMIntrinsic,
8260 TypeFlags.isZExtReturn());
8261 else if (TypeFlags.isStore())
8262 return EmitSVEMaskedStore(E, Ops, Builtin->LLVMIntrinsic);
8263 else if (TypeFlags.isGatherLoad())
8264 return EmitSVEGatherLoad(TypeFlags, Ops, Builtin->LLVMIntrinsic);
8265 else if (TypeFlags.isScatterStore())
8266 return EmitSVEScatterStore(TypeFlags, Ops, Builtin->LLVMIntrinsic);
8267 else if (TypeFlags.isPrefetch())
8268 return EmitSVEPrefetchLoad(TypeFlags, Ops, Builtin->LLVMIntrinsic);
8269 else if (TypeFlags.isGatherPrefetch())
8270 return EmitSVEGatherPrefetch(TypeFlags, Ops, Builtin->LLVMIntrinsic);
8271 else if (TypeFlags.isStructLoad())
8272 return EmitSVEStructLoad(TypeFlags, Ops, Builtin->LLVMIntrinsic);
8273 else if (TypeFlags.isStructStore())
8274 return EmitSVEStructStore(TypeFlags, Ops, Builtin->LLVMIntrinsic);
8275 else if (TypeFlags.isUndef())
8276 return UndefValue::get(Ty);
8277 else if (Builtin->LLVMIntrinsic != 0) {
8278 if (TypeFlags.getMergeType() == SVETypeFlags::MergeZeroExp)
8279 InsertExplicitZeroOperand(Builder, Ty, Ops);
8281 if (TypeFlags.getMergeType() == SVETypeFlags::MergeAnyExp)
8282 InsertExplicitUndefOperand(Builder, Ty, Ops);
8284 // Some ACLE builtins leave out the argument to specify the predicate
8285 // pattern, which is expected to be expanded to an SV_ALL pattern.
8286 if (TypeFlags.isAppendSVALL())
8287 Ops.push_back(Builder.getInt32(/*SV_ALL*/ 31));
8288 if (TypeFlags.isInsertOp1SVALL())
8289 Ops.insert(&Ops[1], Builder.getInt32(/*SV_ALL*/ 31));
8291 // Predicates must match the main datatype.
8292 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
8293 if (auto PredTy = dyn_cast<llvm::VectorType>(Ops[i]->getType()))
8294 if (PredTy->getElementType()->isIntegerTy(1))
8295 Ops[i] = EmitSVEPredicateCast(Ops[i], getSVEType(TypeFlags));
8297 // Splat scalar operand to vector (intrinsics with _n infix)
8298 if (TypeFlags.hasSplatOperand()) {
8299 unsigned OpNo = TypeFlags.getSplatOperand();
8300 Ops[OpNo] = EmitSVEDupX(Ops[OpNo]);
8303 if (TypeFlags.isReverseCompare())
8304 std::swap(Ops[1], Ops[2]);
8306 if (TypeFlags.isReverseUSDOT())
8307 std::swap(Ops[1], Ops[2]);
8309 // Predicated intrinsics with _z suffix need a select w/ zeroinitializer.
8310 if (TypeFlags.getMergeType() == SVETypeFlags::MergeZero) {
8311 llvm::Type *OpndTy = Ops[1]->getType();
8312 auto *SplatZero = Constant::getNullValue(OpndTy);
8313 Function *Sel = CGM.getIntrinsic(Intrinsic::aarch64_sve_sel, OpndTy);
8314 Ops[1] = Builder.CreateCall(Sel, {Ops[0], Ops[1], SplatZero});
8317 Function *F = CGM.getIntrinsic(Builtin->LLVMIntrinsic,
8318 getSVEOverloadTypes(TypeFlags, Ty, Ops));
8319 Value *Call = Builder.CreateCall(F, Ops);
8321 // Predicate results must be converted to svbool_t.
8322 if (auto PredTy = dyn_cast<llvm::VectorType>(Call->getType()))
8323 if (PredTy->getScalarType()->isIntegerTy(1))
8324 Call = EmitSVEPredicateCast(Call, cast<llvm::ScalableVectorType>(Ty));
8329 switch (BuiltinID) {
8333 case SVE::BI__builtin_sve_svmov_b_z: {
8334 // svmov_b_z(pg, op) <=> svand_b_z(pg, op, op)
8335 SVETypeFlags TypeFlags(Builtin->TypeModifier);
8336 llvm::Type* OverloadedTy = getSVEType(TypeFlags);
8337 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_sve_and_z, OverloadedTy);
8338 return Builder.CreateCall(F, {Ops[0], Ops[1], Ops[1]});
8341 case SVE::BI__builtin_sve_svnot_b_z: {
8342 // svnot_b_z(pg, op) <=> sveor_b_z(pg, op, pg)
8343 SVETypeFlags TypeFlags(Builtin->TypeModifier);
8344 llvm::Type* OverloadedTy = getSVEType(TypeFlags);
8345 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_sve_eor_z, OverloadedTy);
8346 return Builder.CreateCall(F, {Ops[0], Ops[1], Ops[0]});
8349 case SVE::BI__builtin_sve_svmovlb_u16:
8350 case SVE::BI__builtin_sve_svmovlb_u32:
8351 case SVE::BI__builtin_sve_svmovlb_u64:
8352 return EmitSVEMovl(TypeFlags, Ops, Intrinsic::aarch64_sve_ushllb);
8354 case SVE::BI__builtin_sve_svmovlb_s16:
8355 case SVE::BI__builtin_sve_svmovlb_s32:
8356 case SVE::BI__builtin_sve_svmovlb_s64:
8357 return EmitSVEMovl(TypeFlags, Ops, Intrinsic::aarch64_sve_sshllb);
8359 case SVE::BI__builtin_sve_svmovlt_u16:
8360 case SVE::BI__builtin_sve_svmovlt_u32:
8361 case SVE::BI__builtin_sve_svmovlt_u64:
8362 return EmitSVEMovl(TypeFlags, Ops, Intrinsic::aarch64_sve_ushllt);
8364 case SVE::BI__builtin_sve_svmovlt_s16:
8365 case SVE::BI__builtin_sve_svmovlt_s32:
8366 case SVE::BI__builtin_sve_svmovlt_s64:
8367 return EmitSVEMovl(TypeFlags, Ops, Intrinsic::aarch64_sve_sshllt);
8369 case SVE::BI__builtin_sve_svpmullt_u16:
8370 case SVE::BI__builtin_sve_svpmullt_u64:
8371 case SVE::BI__builtin_sve_svpmullt_n_u16:
8372 case SVE::BI__builtin_sve_svpmullt_n_u64:
8373 return EmitSVEPMull(TypeFlags, Ops, Intrinsic::aarch64_sve_pmullt_pair);
8375 case SVE::BI__builtin_sve_svpmullb_u16:
8376 case SVE::BI__builtin_sve_svpmullb_u64:
8377 case SVE::BI__builtin_sve_svpmullb_n_u16:
8378 case SVE::BI__builtin_sve_svpmullb_n_u64:
8379 return EmitSVEPMull(TypeFlags, Ops, Intrinsic::aarch64_sve_pmullb_pair);
8381 case SVE::BI__builtin_sve_svdup_n_b8:
8382 case SVE::BI__builtin_sve_svdup_n_b16:
8383 case SVE::BI__builtin_sve_svdup_n_b32:
8384 case SVE::BI__builtin_sve_svdup_n_b64: {
8386 Builder.CreateICmpNE(Ops[0], Constant::getNullValue(Ops[0]->getType()));
8387 llvm::ScalableVectorType *OverloadedTy = getSVEType(TypeFlags);
8388 Value *Dup = EmitSVEDupX(CmpNE, OverloadedTy);
8389 return EmitSVEPredicateCast(Dup, cast<llvm::ScalableVectorType>(Ty));
8392 case SVE::BI__builtin_sve_svdupq_n_b8:
8393 case SVE::BI__builtin_sve_svdupq_n_b16:
8394 case SVE::BI__builtin_sve_svdupq_n_b32:
8395 case SVE::BI__builtin_sve_svdupq_n_b64:
8396 case SVE::BI__builtin_sve_svdupq_n_u8:
8397 case SVE::BI__builtin_sve_svdupq_n_s8:
8398 case SVE::BI__builtin_sve_svdupq_n_u64:
8399 case SVE::BI__builtin_sve_svdupq_n_f64:
8400 case SVE::BI__builtin_sve_svdupq_n_s64:
8401 case SVE::BI__builtin_sve_svdupq_n_u16:
8402 case SVE::BI__builtin_sve_svdupq_n_f16:
8403 case SVE::BI__builtin_sve_svdupq_n_bf16:
8404 case SVE::BI__builtin_sve_svdupq_n_s16:
8405 case SVE::BI__builtin_sve_svdupq_n_u32:
8406 case SVE::BI__builtin_sve_svdupq_n_f32:
8407 case SVE::BI__builtin_sve_svdupq_n_s32: {
8408 // These builtins are implemented by storing each element to an array and using
8409 // ld1rq to materialize a vector.
8410 unsigned NumOpnds = Ops.size();
8413 cast<llvm::VectorType>(Ty)->getElementType()->isIntegerTy(1);
8415 // For svdupq_n_b* the element type of is an integer of type 128/numelts,
8416 // so that the compare can use the width that is natural for the expected
8417 // number of predicate lanes.
8418 llvm::Type *EltTy = Ops[0]->getType();
8420 EltTy = IntegerType::get(getLLVMContext(), SVEBitsPerBlock / NumOpnds);
8422 Address Alloca = CreateTempAlloca(llvm::ArrayType::get(EltTy, NumOpnds),
8423 CharUnits::fromQuantity(16));
8424 for (unsigned I = 0; I < NumOpnds; ++I)
8425 Builder.CreateDefaultAlignedStore(
8426 IsBoolTy ? Builder.CreateZExt(Ops[I], EltTy) : Ops[I],
8427 Builder.CreateGEP(Alloca.getPointer(),
8428 {Builder.getInt64(0), Builder.getInt64(I)}));
8430 SVETypeFlags TypeFlags(Builtin->TypeModifier);
8431 Value *Pred = EmitSVEAllTruePred(TypeFlags);
8433 llvm::Type *OverloadedTy = getSVEVectorForElementType(EltTy);
8434 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_sve_ld1rq, OverloadedTy);
8435 Value *Alloca0 = Builder.CreateGEP(
8436 Alloca.getPointer(), {Builder.getInt64(0), Builder.getInt64(0)});
8437 Value *LD1RQ = Builder.CreateCall(F, {Pred, Alloca0});
8442 // For svdupq_n_b* we need to add an additional 'cmpne' with '0'.
8443 F = CGM.getIntrinsic(NumOpnds == 2 ? Intrinsic::aarch64_sve_cmpne
8444 : Intrinsic::aarch64_sve_cmpne_wide,
8447 Builder.CreateCall(F, {Pred, LD1RQ, EmitSVEDupX(Builder.getInt64(0))});
8448 return EmitSVEPredicateCast(Call, cast<llvm::ScalableVectorType>(Ty));
8451 case SVE::BI__builtin_sve_svpfalse_b:
8452 return ConstantInt::getFalse(Ty);
8454 case SVE::BI__builtin_sve_svlen_bf16:
8455 case SVE::BI__builtin_sve_svlen_f16:
8456 case SVE::BI__builtin_sve_svlen_f32:
8457 case SVE::BI__builtin_sve_svlen_f64:
8458 case SVE::BI__builtin_sve_svlen_s8:
8459 case SVE::BI__builtin_sve_svlen_s16:
8460 case SVE::BI__builtin_sve_svlen_s32:
8461 case SVE::BI__builtin_sve_svlen_s64:
8462 case SVE::BI__builtin_sve_svlen_u8:
8463 case SVE::BI__builtin_sve_svlen_u16:
8464 case SVE::BI__builtin_sve_svlen_u32:
8465 case SVE::BI__builtin_sve_svlen_u64: {
8466 SVETypeFlags TF(Builtin->TypeModifier);
8467 auto VTy = cast<llvm::VectorType>(getSVEType(TF));
8468 auto NumEls = llvm::ConstantInt::get(Ty, VTy->getElementCount().Min);
8470 Function *F = CGM.getIntrinsic(Intrinsic::vscale, Ty);
8471 return Builder.CreateMul(NumEls, Builder.CreateCall(F));
8474 case SVE::BI__builtin_sve_svtbl2_u8:
8475 case SVE::BI__builtin_sve_svtbl2_s8:
8476 case SVE::BI__builtin_sve_svtbl2_u16:
8477 case SVE::BI__builtin_sve_svtbl2_s16:
8478 case SVE::BI__builtin_sve_svtbl2_u32:
8479 case SVE::BI__builtin_sve_svtbl2_s32:
8480 case SVE::BI__builtin_sve_svtbl2_u64:
8481 case SVE::BI__builtin_sve_svtbl2_s64:
8482 case SVE::BI__builtin_sve_svtbl2_f16:
8483 case SVE::BI__builtin_sve_svtbl2_bf16:
8484 case SVE::BI__builtin_sve_svtbl2_f32:
8485 case SVE::BI__builtin_sve_svtbl2_f64: {
8486 SVETypeFlags TF(Builtin->TypeModifier);
8487 auto VTy = cast<llvm::VectorType>(getSVEType(TF));
8488 auto TupleTy = llvm::VectorType::get(VTy->getElementType(),
8489 VTy->getElementCount() * 2);
8491 CGM.getIntrinsic(Intrinsic::aarch64_sve_tuple_get, {VTy, TupleTy});
8492 Value *V0 = Builder.CreateCall(FExtr, {Ops[0], Builder.getInt32(0)});
8493 Value *V1 = Builder.CreateCall(FExtr, {Ops[0], Builder.getInt32(1)});
8494 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_sve_tbl2, VTy);
8495 return Builder.CreateCall(F, {V0, V1, Ops[1]});
8499 /// Should not happen
8503 Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
8505 llvm::Triple::ArchType Arch) {
8506 if (BuiltinID >= AArch64::FirstSVEBuiltin &&
8507 BuiltinID <= AArch64::LastSVEBuiltin)
8508 return EmitAArch64SVEBuiltinExpr(BuiltinID, E);
8510 unsigned HintID = static_cast<unsigned>(-1);
8511 switch (BuiltinID) {
8513 case AArch64::BI__builtin_arm_nop:
8516 case AArch64::BI__builtin_arm_yield:
8517 case AArch64::BI__yield:
8520 case AArch64::BI__builtin_arm_wfe:
8521 case AArch64::BI__wfe:
8524 case AArch64::BI__builtin_arm_wfi:
8525 case AArch64::BI__wfi:
8528 case AArch64::BI__builtin_arm_sev:
8529 case AArch64::BI__sev:
8532 case AArch64::BI__builtin_arm_sevl:
8533 case AArch64::BI__sevl:
8538 if (HintID != static_cast<unsigned>(-1)) {
8539 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_hint);
8540 return Builder.CreateCall(F, llvm::ConstantInt::get(Int32Ty, HintID));
8543 if (BuiltinID == AArch64::BI__builtin_arm_prefetch) {
8544 Value *Address = EmitScalarExpr(E->getArg(0));
8545 Value *RW = EmitScalarExpr(E->getArg(1));
8546 Value *CacheLevel = EmitScalarExpr(E->getArg(2));
8547 Value *RetentionPolicy = EmitScalarExpr(E->getArg(3));
8548 Value *IsData = EmitScalarExpr(E->getArg(4));
8550 Value *Locality = nullptr;
8551 if (cast<llvm::ConstantInt>(RetentionPolicy)->isZero()) {
8552 // Temporal fetch, needs to convert cache level to locality.
8553 Locality = llvm::ConstantInt::get(Int32Ty,
8554 -cast<llvm::ConstantInt>(CacheLevel)->getValue() + 3);
8557 Locality = llvm::ConstantInt::get(Int32Ty, 0);
8560 // FIXME: We need AArch64 specific LLVM intrinsic if we want to specify
8561 // PLDL3STRM or PLDL2STRM.
8562 Function *F = CGM.getIntrinsic(Intrinsic::prefetch, Address->getType());
8563 return Builder.CreateCall(F, {Address, RW, Locality, IsData});
8566 if (BuiltinID == AArch64::BI__builtin_arm_rbit) {
8567 assert((getContext().getTypeSize(E->getType()) == 32) &&
8568 "rbit of unusual size!");
8569 llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
8570 return Builder.CreateCall(
8571 CGM.getIntrinsic(Intrinsic::bitreverse, Arg->getType()), Arg, "rbit");
8573 if (BuiltinID == AArch64::BI__builtin_arm_rbit64) {
8574 assert((getContext().getTypeSize(E->getType()) == 64) &&
8575 "rbit of unusual size!");
8576 llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
8577 return Builder.CreateCall(
8578 CGM.getIntrinsic(Intrinsic::bitreverse, Arg->getType()), Arg, "rbit");
8581 if (BuiltinID == AArch64::BI__builtin_arm_cls) {
8582 llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
8583 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::aarch64_cls), Arg,
8586 if (BuiltinID == AArch64::BI__builtin_arm_cls64) {
8587 llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
8588 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::aarch64_cls64), Arg,
8592 if (BuiltinID == AArch64::BI__builtin_arm_jcvt) {
8593 assert((getContext().getTypeSize(E->getType()) == 32) &&
8594 "__jcvt of unusual size!");
8595 llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
8596 return Builder.CreateCall(
8597 CGM.getIntrinsic(Intrinsic::aarch64_fjcvtzs), Arg);
8600 if (BuiltinID == AArch64::BI__clear_cache) {
8601 assert(E->getNumArgs() == 2 && "__clear_cache takes 2 arguments");
8602 const FunctionDecl *FD = E->getDirectCallee();
8604 for (unsigned i = 0; i < 2; i++)
8605 Ops[i] = EmitScalarExpr(E->getArg(i));
8606 llvm::Type *Ty = CGM.getTypes().ConvertType(FD->getType());
8607 llvm::FunctionType *FTy = cast<llvm::FunctionType>(Ty);
8608 StringRef Name = FD->getName();
8609 return EmitNounwindRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), Ops);
8612 if ((BuiltinID == AArch64::BI__builtin_arm_ldrex ||
8613 BuiltinID == AArch64::BI__builtin_arm_ldaex) &&
8614 getContext().getTypeSize(E->getType()) == 128) {
8615 Function *F = CGM.getIntrinsic(BuiltinID == AArch64::BI__builtin_arm_ldaex
8616 ? Intrinsic::aarch64_ldaxp
8617 : Intrinsic::aarch64_ldxp);
8619 Value *LdPtr = EmitScalarExpr(E->getArg(0));
8620 Value *Val = Builder.CreateCall(F, Builder.CreateBitCast(LdPtr, Int8PtrTy),
8623 Value *Val0 = Builder.CreateExtractValue(Val, 1);
8624 Value *Val1 = Builder.CreateExtractValue(Val, 0);
8625 llvm::Type *Int128Ty = llvm::IntegerType::get(getLLVMContext(), 128);
8626 Val0 = Builder.CreateZExt(Val0, Int128Ty);
8627 Val1 = Builder.CreateZExt(Val1, Int128Ty);
8629 Value *ShiftCst = llvm::ConstantInt::get(Int128Ty, 64);
8630 Val = Builder.CreateShl(Val0, ShiftCst, "shl", true /* nuw */);
8631 Val = Builder.CreateOr(Val, Val1);
8632 return Builder.CreateBitCast(Val, ConvertType(E->getType()));
8633 } else if (BuiltinID == AArch64::BI__builtin_arm_ldrex ||
8634 BuiltinID == AArch64::BI__builtin_arm_ldaex) {
8635 Value *LoadAddr = EmitScalarExpr(E->getArg(0));
8637 QualType Ty = E->getType();
8638 llvm::Type *RealResTy = ConvertType(Ty);
8639 llvm::Type *PtrTy = llvm::IntegerType::get(
8640 getLLVMContext(), getContext().getTypeSize(Ty))->getPointerTo();
8641 LoadAddr = Builder.CreateBitCast(LoadAddr, PtrTy);
8643 Function *F = CGM.getIntrinsic(BuiltinID == AArch64::BI__builtin_arm_ldaex
8644 ? Intrinsic::aarch64_ldaxr
8645 : Intrinsic::aarch64_ldxr,
8647 Value *Val = Builder.CreateCall(F, LoadAddr, "ldxr");
8649 if (RealResTy->isPointerTy())
8650 return Builder.CreateIntToPtr(Val, RealResTy);
8652 llvm::Type *IntResTy = llvm::IntegerType::get(
8653 getLLVMContext(), CGM.getDataLayout().getTypeSizeInBits(RealResTy));
8654 Val = Builder.CreateTruncOrBitCast(Val, IntResTy);
8655 return Builder.CreateBitCast(Val, RealResTy);
8658 if ((BuiltinID == AArch64::BI__builtin_arm_strex ||
8659 BuiltinID == AArch64::BI__builtin_arm_stlex) &&
8660 getContext().getTypeSize(E->getArg(0)->getType()) == 128) {
8661 Function *F = CGM.getIntrinsic(BuiltinID == AArch64::BI__builtin_arm_stlex
8662 ? Intrinsic::aarch64_stlxp
8663 : Intrinsic::aarch64_stxp);
8664 llvm::Type *STy = llvm::StructType::get(Int64Ty, Int64Ty);
8666 Address Tmp = CreateMemTemp(E->getArg(0)->getType());
8667 EmitAnyExprToMem(E->getArg(0), Tmp, Qualifiers(), /*init*/ true);
8669 Tmp = Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(STy));
8670 llvm::Value *Val = Builder.CreateLoad(Tmp);
8672 Value *Arg0 = Builder.CreateExtractValue(Val, 0);
8673 Value *Arg1 = Builder.CreateExtractValue(Val, 1);
8674 Value *StPtr = Builder.CreateBitCast(EmitScalarExpr(E->getArg(1)),
8676 return Builder.CreateCall(F, {Arg0, Arg1, StPtr}, "stxp");
8679 if (BuiltinID == AArch64::BI__builtin_arm_strex ||
8680 BuiltinID == AArch64::BI__builtin_arm_stlex) {
8681 Value *StoreVal = EmitScalarExpr(E->getArg(0));
8682 Value *StoreAddr = EmitScalarExpr(E->getArg(1));
8684 QualType Ty = E->getArg(0)->getType();
8685 llvm::Type *StoreTy = llvm::IntegerType::get(getLLVMContext(),
8686 getContext().getTypeSize(Ty));
8687 StoreAddr = Builder.CreateBitCast(StoreAddr, StoreTy->getPointerTo());
8689 if (StoreVal->getType()->isPointerTy())
8690 StoreVal = Builder.CreatePtrToInt(StoreVal, Int64Ty);
8692 llvm::Type *IntTy = llvm::IntegerType::get(
8694 CGM.getDataLayout().getTypeSizeInBits(StoreVal->getType()));
8695 StoreVal = Builder.CreateBitCast(StoreVal, IntTy);
8696 StoreVal = Builder.CreateZExtOrBitCast(StoreVal, Int64Ty);
8699 Function *F = CGM.getIntrinsic(BuiltinID == AArch64::BI__builtin_arm_stlex
8700 ? Intrinsic::aarch64_stlxr
8701 : Intrinsic::aarch64_stxr,
8702 StoreAddr->getType());
8703 return Builder.CreateCall(F, {StoreVal, StoreAddr}, "stxr");
8706 if (BuiltinID == AArch64::BI__getReg) {
8707 Expr::EvalResult Result;
8708 if (!E->getArg(0)->EvaluateAsInt(Result, CGM.getContext()))
8709 llvm_unreachable("Sema will ensure that the parameter is constant");
8711 llvm::APSInt Value = Result.Val.getInt();
8712 LLVMContext &Context = CGM.getLLVMContext();
8713 std::string Reg = Value == 31 ? "sp" : "x" + Value.toString(10);
8715 llvm::Metadata *Ops[] = {llvm::MDString::get(Context, Reg)};
8716 llvm::MDNode *RegName = llvm::MDNode::get(Context, Ops);
8717 llvm::Value *Metadata = llvm::MetadataAsValue::get(Context, RegName);
8720 CGM.getIntrinsic(llvm::Intrinsic::read_register, {Int64Ty});
8721 return Builder.CreateCall(F, Metadata);
8724 if (BuiltinID == AArch64::BI__builtin_arm_clrex) {
8725 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_clrex);
8726 return Builder.CreateCall(F);
8729 if (BuiltinID == AArch64::BI_ReadWriteBarrier)
8730 return Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent,
8731 llvm::SyncScope::SingleThread);
8734 Intrinsic::ID CRCIntrinsicID = Intrinsic::not_intrinsic;
8735 switch (BuiltinID) {
8736 case AArch64::BI__builtin_arm_crc32b:
8737 CRCIntrinsicID = Intrinsic::aarch64_crc32b; break;
8738 case AArch64::BI__builtin_arm_crc32cb:
8739 CRCIntrinsicID = Intrinsic::aarch64_crc32cb; break;
8740 case AArch64::BI__builtin_arm_crc32h:
8741 CRCIntrinsicID = Intrinsic::aarch64_crc32h; break;
8742 case AArch64::BI__builtin_arm_crc32ch:
8743 CRCIntrinsicID = Intrinsic::aarch64_crc32ch; break;
8744 case AArch64::BI__builtin_arm_crc32w:
8745 CRCIntrinsicID = Intrinsic::aarch64_crc32w; break;
8746 case AArch64::BI__builtin_arm_crc32cw:
8747 CRCIntrinsicID = Intrinsic::aarch64_crc32cw; break;
8748 case AArch64::BI__builtin_arm_crc32d:
8749 CRCIntrinsicID = Intrinsic::aarch64_crc32x; break;
8750 case AArch64::BI__builtin_arm_crc32cd:
8751 CRCIntrinsicID = Intrinsic::aarch64_crc32cx; break;
8754 if (CRCIntrinsicID != Intrinsic::not_intrinsic) {
8755 Value *Arg0 = EmitScalarExpr(E->getArg(0));
8756 Value *Arg1 = EmitScalarExpr(E->getArg(1));
8757 Function *F = CGM.getIntrinsic(CRCIntrinsicID);
8759 llvm::Type *DataTy = F->getFunctionType()->getParamType(1);
8760 Arg1 = Builder.CreateZExtOrBitCast(Arg1, DataTy);
8762 return Builder.CreateCall(F, {Arg0, Arg1});
8765 // Memory Tagging Extensions (MTE) Intrinsics
8766 Intrinsic::ID MTEIntrinsicID = Intrinsic::not_intrinsic;
8767 switch (BuiltinID) {
8768 case AArch64::BI__builtin_arm_irg:
8769 MTEIntrinsicID = Intrinsic::aarch64_irg; break;
8770 case AArch64::BI__builtin_arm_addg:
8771 MTEIntrinsicID = Intrinsic::aarch64_addg; break;
8772 case AArch64::BI__builtin_arm_gmi:
8773 MTEIntrinsicID = Intrinsic::aarch64_gmi; break;
8774 case AArch64::BI__builtin_arm_ldg:
8775 MTEIntrinsicID = Intrinsic::aarch64_ldg; break;
8776 case AArch64::BI__builtin_arm_stg:
8777 MTEIntrinsicID = Intrinsic::aarch64_stg; break;
8778 case AArch64::BI__builtin_arm_subp:
8779 MTEIntrinsicID = Intrinsic::aarch64_subp; break;
8782 if (MTEIntrinsicID != Intrinsic::not_intrinsic) {
8783 llvm::Type *T = ConvertType(E->getType());
8785 if (MTEIntrinsicID == Intrinsic::aarch64_irg) {
8786 Value *Pointer = EmitScalarExpr(E->getArg(0));
8787 Value *Mask = EmitScalarExpr(E->getArg(1));
8789 Pointer = Builder.CreatePointerCast(Pointer, Int8PtrTy);
8790 Mask = Builder.CreateZExt(Mask, Int64Ty);
8791 Value *RV = Builder.CreateCall(
8792 CGM.getIntrinsic(MTEIntrinsicID), {Pointer, Mask});
8793 return Builder.CreatePointerCast(RV, T);
8795 if (MTEIntrinsicID == Intrinsic::aarch64_addg) {
8796 Value *Pointer = EmitScalarExpr(E->getArg(0));
8797 Value *TagOffset = EmitScalarExpr(E->getArg(1));
8799 Pointer = Builder.CreatePointerCast(Pointer, Int8PtrTy);
8800 TagOffset = Builder.CreateZExt(TagOffset, Int64Ty);
8801 Value *RV = Builder.CreateCall(
8802 CGM.getIntrinsic(MTEIntrinsicID), {Pointer, TagOffset});
8803 return Builder.CreatePointerCast(RV, T);
8805 if (MTEIntrinsicID == Intrinsic::aarch64_gmi) {
8806 Value *Pointer = EmitScalarExpr(E->getArg(0));
8807 Value *ExcludedMask = EmitScalarExpr(E->getArg(1));
8809 ExcludedMask = Builder.CreateZExt(ExcludedMask, Int64Ty);
8810 Pointer = Builder.CreatePointerCast(Pointer, Int8PtrTy);
8811 return Builder.CreateCall(
8812 CGM.getIntrinsic(MTEIntrinsicID), {Pointer, ExcludedMask});
8814 // Although it is possible to supply a different return
8815 // address (first arg) to this intrinsic, for now we set
8816 // return address same as input address.
8817 if (MTEIntrinsicID == Intrinsic::aarch64_ldg) {
8818 Value *TagAddress = EmitScalarExpr(E->getArg(0));
8819 TagAddress = Builder.CreatePointerCast(TagAddress, Int8PtrTy);
8820 Value *RV = Builder.CreateCall(
8821 CGM.getIntrinsic(MTEIntrinsicID), {TagAddress, TagAddress});
8822 return Builder.CreatePointerCast(RV, T);
8824 // Although it is possible to supply a different tag (to set)
8825 // to this intrinsic (as first arg), for now we supply
8826 // the tag that is in input address arg (common use case).
8827 if (MTEIntrinsicID == Intrinsic::aarch64_stg) {
8828 Value *TagAddress = EmitScalarExpr(E->getArg(0));
8829 TagAddress = Builder.CreatePointerCast(TagAddress, Int8PtrTy);
8830 return Builder.CreateCall(
8831 CGM.getIntrinsic(MTEIntrinsicID), {TagAddress, TagAddress});
8833 if (MTEIntrinsicID == Intrinsic::aarch64_subp) {
8834 Value *PointerA = EmitScalarExpr(E->getArg(0));
8835 Value *PointerB = EmitScalarExpr(E->getArg(1));
8836 PointerA = Builder.CreatePointerCast(PointerA, Int8PtrTy);
8837 PointerB = Builder.CreatePointerCast(PointerB, Int8PtrTy);
8838 return Builder.CreateCall(
8839 CGM.getIntrinsic(MTEIntrinsicID), {PointerA, PointerB});
8843 if (BuiltinID == AArch64::BI__builtin_arm_rsr ||
8844 BuiltinID == AArch64::BI__builtin_arm_rsr64 ||
8845 BuiltinID == AArch64::BI__builtin_arm_rsrp ||
8846 BuiltinID == AArch64::BI__builtin_arm_wsr ||
8847 BuiltinID == AArch64::BI__builtin_arm_wsr64 ||
8848 BuiltinID == AArch64::BI__builtin_arm_wsrp) {
8850 SpecialRegisterAccessKind AccessKind = Write;
8851 if (BuiltinID == AArch64::BI__builtin_arm_rsr ||
8852 BuiltinID == AArch64::BI__builtin_arm_rsr64 ||
8853 BuiltinID == AArch64::BI__builtin_arm_rsrp)
8854 AccessKind = VolatileRead;
8856 bool IsPointerBuiltin = BuiltinID == AArch64::BI__builtin_arm_rsrp ||
8857 BuiltinID == AArch64::BI__builtin_arm_wsrp;
8859 bool Is64Bit = BuiltinID != AArch64::BI__builtin_arm_rsr &&
8860 BuiltinID != AArch64::BI__builtin_arm_wsr;
8862 llvm::Type *ValueType;
8863 llvm::Type *RegisterType = Int64Ty;
8864 if (IsPointerBuiltin) {
8865 ValueType = VoidPtrTy;
8866 } else if (Is64Bit) {
8867 ValueType = Int64Ty;
8869 ValueType = Int32Ty;
8872 return EmitSpecialRegisterBuiltin(*this, E, RegisterType, ValueType,
8876 if (BuiltinID == AArch64::BI_ReadStatusReg ||
8877 BuiltinID == AArch64::BI_WriteStatusReg) {
8878 LLVMContext &Context = CGM.getLLVMContext();
8881 E->getArg(0)->EvaluateKnownConstInt(getContext()).getZExtValue();
8883 std::string SysRegStr;
8884 llvm::raw_string_ostream(SysRegStr) <<
8885 ((1 << 1) | ((SysReg >> 14) & 1)) << ":" <<
8886 ((SysReg >> 11) & 7) << ":" <<
8887 ((SysReg >> 7) & 15) << ":" <<
8888 ((SysReg >> 3) & 15) << ":" <<
8891 llvm::Metadata *Ops[] = { llvm::MDString::get(Context, SysRegStr) };
8892 llvm::MDNode *RegName = llvm::MDNode::get(Context, Ops);
8893 llvm::Value *Metadata = llvm::MetadataAsValue::get(Context, RegName);
8895 llvm::Type *RegisterType = Int64Ty;
8896 llvm::Type *Types[] = { RegisterType };
8898 if (BuiltinID == AArch64::BI_ReadStatusReg) {
8899 llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::read_register, Types);
8901 return Builder.CreateCall(F, Metadata);
8904 llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::write_register, Types);
8905 llvm::Value *ArgValue = EmitScalarExpr(E->getArg(1));
8907 return Builder.CreateCall(F, { Metadata, ArgValue });
8910 if (BuiltinID == AArch64::BI_AddressOfReturnAddress) {
8912 CGM.getIntrinsic(Intrinsic::addressofreturnaddress, AllocaInt8PtrTy);
8913 return Builder.CreateCall(F);
8916 if (BuiltinID == AArch64::BI__builtin_sponentry) {
8917 llvm::Function *F = CGM.getIntrinsic(Intrinsic::sponentry, AllocaInt8PtrTy);
8918 return Builder.CreateCall(F);
8921 // Find out if any arguments are required to be integer constant
8923 unsigned ICEArguments = 0;
8924 ASTContext::GetBuiltinTypeError Error;
8925 getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments);
8926 assert(Error == ASTContext::GE_None && "Should not codegen an error");
8928 llvm::SmallVector<Value*, 4> Ops;
8929 Address PtrOp0 = Address::invalid();
8930 for (unsigned i = 0, e = E->getNumArgs() - 1; i != e; i++) {
8932 switch (BuiltinID) {
8933 case NEON::BI__builtin_neon_vld1_v:
8934 case NEON::BI__builtin_neon_vld1q_v:
8935 case NEON::BI__builtin_neon_vld1_dup_v:
8936 case NEON::BI__builtin_neon_vld1q_dup_v:
8937 case NEON::BI__builtin_neon_vld1_lane_v:
8938 case NEON::BI__builtin_neon_vld1q_lane_v:
8939 case NEON::BI__builtin_neon_vst1_v:
8940 case NEON::BI__builtin_neon_vst1q_v:
8941 case NEON::BI__builtin_neon_vst1_lane_v:
8942 case NEON::BI__builtin_neon_vst1q_lane_v:
8943 // Get the alignment for the argument in addition to the value;
8944 // we'll use it later.
8945 PtrOp0 = EmitPointerWithAlignment(E->getArg(0));
8946 Ops.push_back(PtrOp0.getPointer());
8950 if ((ICEArguments & (1 << i)) == 0) {
8951 Ops.push_back(EmitScalarExpr(E->getArg(i)));
8953 // If this is required to be a constant, constant fold it so that we know
8954 // that the generated intrinsic gets a ConstantInt.
8955 llvm::APSInt Result;
8956 bool IsConst = E->getArg(i)->isIntegerConstantExpr(Result, getContext());
8957 assert(IsConst && "Constant arg isn't actually constant?");
8959 Ops.push_back(llvm::ConstantInt::get(getLLVMContext(), Result));
8963 auto SISDMap = makeArrayRef(AArch64SISDIntrinsicMap);
8964 const ARMVectorIntrinsicInfo *Builtin = findARMVectorIntrinsicInMap(
8965 SISDMap, BuiltinID, AArch64SISDIntrinsicsProvenSorted);
8968 Ops.push_back(EmitScalarExpr(E->getArg(E->getNumArgs() - 1)));
8969 Value *Result = EmitCommonNeonSISDBuiltinExpr(*this, *Builtin, Ops, E);
8970 assert(Result && "SISD intrinsic should have been handled");
8974 llvm::APSInt Result;
8975 const Expr *Arg = E->getArg(E->getNumArgs()-1);
8976 NeonTypeFlags Type(0);
8977 if (Arg->isIntegerConstantExpr(Result, getContext()))
8978 // Determine the type of this overloaded NEON intrinsic.
8979 Type = NeonTypeFlags(Result.getZExtValue());
8981 bool usgn = Type.isUnsigned();
8982 bool quad = Type.isQuad();
8984 // Handle non-overloaded intrinsics first.
8985 switch (BuiltinID) {
8987 case NEON::BI__builtin_neon_vabsh_f16:
8988 Ops.push_back(EmitScalarExpr(E->getArg(0)));
8989 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::fabs, HalfTy), Ops, "vabs");
8990 case NEON::BI__builtin_neon_vldrq_p128: {
8991 llvm::Type *Int128Ty = llvm::Type::getIntNTy(getLLVMContext(), 128);
8992 llvm::Type *Int128PTy = llvm::PointerType::get(Int128Ty, 0);
8993 Value *Ptr = Builder.CreateBitCast(EmitScalarExpr(E->getArg(0)), Int128PTy);
8994 return Builder.CreateAlignedLoad(Int128Ty, Ptr,
8995 CharUnits::fromQuantity(16));
8997 case NEON::BI__builtin_neon_vstrq_p128: {
8998 llvm::Type *Int128PTy = llvm::Type::getIntNPtrTy(getLLVMContext(), 128);
8999 Value *Ptr = Builder.CreateBitCast(Ops[0], Int128PTy);
9000 return Builder.CreateDefaultAlignedStore(EmitScalarExpr(E->getArg(1)), Ptr);
9002 case NEON::BI__builtin_neon_vcvts_u32_f32:
9003 case NEON::BI__builtin_neon_vcvtd_u64_f64:
9006 case NEON::BI__builtin_neon_vcvts_s32_f32:
9007 case NEON::BI__builtin_neon_vcvtd_s64_f64: {
9008 Ops.push_back(EmitScalarExpr(E->getArg(0)));
9009 bool Is64 = Ops[0]->getType()->getPrimitiveSizeInBits() == 64;
9010 llvm::Type *InTy = Is64 ? Int64Ty : Int32Ty;
9011 llvm::Type *FTy = Is64 ? DoubleTy : FloatTy;
9012 Ops[0] = Builder.CreateBitCast(Ops[0], FTy);
9014 return Builder.CreateFPToUI(Ops[0], InTy);
9015 return Builder.CreateFPToSI(Ops[0], InTy);
9017 case NEON::BI__builtin_neon_vcvts_f32_u32:
9018 case NEON::BI__builtin_neon_vcvtd_f64_u64:
9021 case NEON::BI__builtin_neon_vcvts_f32_s32:
9022 case NEON::BI__builtin_neon_vcvtd_f64_s64: {
9023 Ops.push_back(EmitScalarExpr(E->getArg(0)));
9024 bool Is64 = Ops[0]->getType()->getPrimitiveSizeInBits() == 64;
9025 llvm::Type *InTy = Is64 ? Int64Ty : Int32Ty;
9026 llvm::Type *FTy = Is64 ? DoubleTy : FloatTy;
9027 Ops[0] = Builder.CreateBitCast(Ops[0], InTy);
9029 return Builder.CreateUIToFP(Ops[0], FTy);
9030 return Builder.CreateSIToFP(Ops[0], FTy);
9032 case NEON::BI__builtin_neon_vcvth_f16_u16:
9033 case NEON::BI__builtin_neon_vcvth_f16_u32:
9034 case NEON::BI__builtin_neon_vcvth_f16_u64:
9037 case NEON::BI__builtin_neon_vcvth_f16_s16:
9038 case NEON::BI__builtin_neon_vcvth_f16_s32:
9039 case NEON::BI__builtin_neon_vcvth_f16_s64: {
9040 Ops.push_back(EmitScalarExpr(E->getArg(0)));
9041 llvm::Type *FTy = HalfTy;
9043 if (Ops[0]->getType()->getPrimitiveSizeInBits() == 64)
9045 else if (Ops[0]->getType()->getPrimitiveSizeInBits() == 32)
9049 Ops[0] = Builder.CreateBitCast(Ops[0], InTy);
9051 return Builder.CreateUIToFP(Ops[0], FTy);
9052 return Builder.CreateSIToFP(Ops[0], FTy);
9054 case NEON::BI__builtin_neon_vcvth_u16_f16:
9057 case NEON::BI__builtin_neon_vcvth_s16_f16: {
9058 Ops.push_back(EmitScalarExpr(E->getArg(0)));
9059 Ops[0] = Builder.CreateBitCast(Ops[0], HalfTy);
9061 return Builder.CreateFPToUI(Ops[0], Int16Ty);
9062 return Builder.CreateFPToSI(Ops[0], Int16Ty);
9064 case NEON::BI__builtin_neon_vcvth_u32_f16:
9067 case NEON::BI__builtin_neon_vcvth_s32_f16: {
9068 Ops.push_back(EmitScalarExpr(E->getArg(0)));
9069 Ops[0] = Builder.CreateBitCast(Ops[0], HalfTy);
9071 return Builder.CreateFPToUI(Ops[0], Int32Ty);
9072 return Builder.CreateFPToSI(Ops[0], Int32Ty);
9074 case NEON::BI__builtin_neon_vcvth_u64_f16:
9077 case NEON::BI__builtin_neon_vcvth_s64_f16: {
9078 Ops.push_back(EmitScalarExpr(E->getArg(0)));
9079 Ops[0] = Builder.CreateBitCast(Ops[0], HalfTy);
9081 return Builder.CreateFPToUI(Ops[0], Int64Ty);
9082 return Builder.CreateFPToSI(Ops[0], Int64Ty);
9084 case NEON::BI__builtin_neon_vcvtah_u16_f16:
9085 case NEON::BI__builtin_neon_vcvtmh_u16_f16:
9086 case NEON::BI__builtin_neon_vcvtnh_u16_f16:
9087 case NEON::BI__builtin_neon_vcvtph_u16_f16:
9088 case NEON::BI__builtin_neon_vcvtah_s16_f16:
9089 case NEON::BI__builtin_neon_vcvtmh_s16_f16:
9090 case NEON::BI__builtin_neon_vcvtnh_s16_f16:
9091 case NEON::BI__builtin_neon_vcvtph_s16_f16: {
9093 llvm::Type* InTy = Int32Ty;
9094 llvm::Type* FTy = HalfTy;
9095 llvm::Type *Tys[2] = {InTy, FTy};
9096 Ops.push_back(EmitScalarExpr(E->getArg(0)));
9097 switch (BuiltinID) {
9098 default: llvm_unreachable("missing builtin ID in switch!");
9099 case NEON::BI__builtin_neon_vcvtah_u16_f16:
9100 Int = Intrinsic::aarch64_neon_fcvtau; break;
9101 case NEON::BI__builtin_neon_vcvtmh_u16_f16:
9102 Int = Intrinsic::aarch64_neon_fcvtmu; break;
9103 case NEON::BI__builtin_neon_vcvtnh_u16_f16:
9104 Int = Intrinsic::aarch64_neon_fcvtnu; break;
9105 case NEON::BI__builtin_neon_vcvtph_u16_f16:
9106 Int = Intrinsic::aarch64_neon_fcvtpu; break;
9107 case NEON::BI__builtin_neon_vcvtah_s16_f16:
9108 Int = Intrinsic::aarch64_neon_fcvtas; break;
9109 case NEON::BI__builtin_neon_vcvtmh_s16_f16:
9110 Int = Intrinsic::aarch64_neon_fcvtms; break;
9111 case NEON::BI__builtin_neon_vcvtnh_s16_f16:
9112 Int = Intrinsic::aarch64_neon_fcvtns; break;
9113 case NEON::BI__builtin_neon_vcvtph_s16_f16:
9114 Int = Intrinsic::aarch64_neon_fcvtps; break;
9116 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "fcvt");
9117 return Builder.CreateTrunc(Ops[0], Int16Ty);
9119 case NEON::BI__builtin_neon_vcaleh_f16:
9120 case NEON::BI__builtin_neon_vcalth_f16:
9121 case NEON::BI__builtin_neon_vcageh_f16:
9122 case NEON::BI__builtin_neon_vcagth_f16: {
9124 llvm::Type* InTy = Int32Ty;
9125 llvm::Type* FTy = HalfTy;
9126 llvm::Type *Tys[2] = {InTy, FTy};
9127 Ops.push_back(EmitScalarExpr(E->getArg(1)));
9128 switch (BuiltinID) {
9129 default: llvm_unreachable("missing builtin ID in switch!");
9130 case NEON::BI__builtin_neon_vcageh_f16:
9131 Int = Intrinsic::aarch64_neon_facge; break;
9132 case NEON::BI__builtin_neon_vcagth_f16:
9133 Int = Intrinsic::aarch64_neon_facgt; break;
9134 case NEON::BI__builtin_neon_vcaleh_f16:
9135 Int = Intrinsic::aarch64_neon_facge; std::swap(Ops[0], Ops[1]); break;
9136 case NEON::BI__builtin_neon_vcalth_f16:
9137 Int = Intrinsic::aarch64_neon_facgt; std::swap(Ops[0], Ops[1]); break;
9139 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "facg");
9140 return Builder.CreateTrunc(Ops[0], Int16Ty);
9142 case NEON::BI__builtin_neon_vcvth_n_s16_f16:
9143 case NEON::BI__builtin_neon_vcvth_n_u16_f16: {
9145 llvm::Type* InTy = Int32Ty;
9146 llvm::Type* FTy = HalfTy;
9147 llvm::Type *Tys[2] = {InTy, FTy};
9148 Ops.push_back(EmitScalarExpr(E->getArg(1)));
9149 switch (BuiltinID) {
9150 default: llvm_unreachable("missing builtin ID in switch!");
9151 case NEON::BI__builtin_neon_vcvth_n_s16_f16:
9152 Int = Intrinsic::aarch64_neon_vcvtfp2fxs; break;
9153 case NEON::BI__builtin_neon_vcvth_n_u16_f16:
9154 Int = Intrinsic::aarch64_neon_vcvtfp2fxu; break;
9156 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "fcvth_n");
9157 return Builder.CreateTrunc(Ops[0], Int16Ty);
9159 case NEON::BI__builtin_neon_vcvth_n_f16_s16:
9160 case NEON::BI__builtin_neon_vcvth_n_f16_u16: {
9162 llvm::Type* FTy = HalfTy;
9163 llvm::Type* InTy = Int32Ty;
9164 llvm::Type *Tys[2] = {FTy, InTy};
9165 Ops.push_back(EmitScalarExpr(E->getArg(1)));
9166 switch (BuiltinID) {
9167 default: llvm_unreachable("missing builtin ID in switch!");
9168 case NEON::BI__builtin_neon_vcvth_n_f16_s16:
9169 Int = Intrinsic::aarch64_neon_vcvtfxs2fp;
9170 Ops[0] = Builder.CreateSExt(Ops[0], InTy, "sext");
9172 case NEON::BI__builtin_neon_vcvth_n_f16_u16:
9173 Int = Intrinsic::aarch64_neon_vcvtfxu2fp;
9174 Ops[0] = Builder.CreateZExt(Ops[0], InTy);
9177 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "fcvth_n");
9179 case NEON::BI__builtin_neon_vpaddd_s64: {
9180 auto *Ty = llvm::FixedVectorType::get(Int64Ty, 2);
9181 Value *Vec = EmitScalarExpr(E->getArg(0));
9182 // The vector is v2f64, so make sure it's bitcast to that.
9183 Vec = Builder.CreateBitCast(Vec, Ty, "v2i64");
9184 llvm::Value *Idx0 = llvm::ConstantInt::get(SizeTy, 0);
9185 llvm::Value *Idx1 = llvm::ConstantInt::get(SizeTy, 1);
9186 Value *Op0 = Builder.CreateExtractElement(Vec, Idx0, "lane0");
9187 Value *Op1 = Builder.CreateExtractElement(Vec, Idx1, "lane1");
9188 // Pairwise addition of a v2f64 into a scalar f64.
9189 return Builder.CreateAdd(Op0, Op1, "vpaddd");
9191 case NEON::BI__builtin_neon_vpaddd_f64: {
9192 auto *Ty = llvm::FixedVectorType::get(DoubleTy, 2);
9193 Value *Vec = EmitScalarExpr(E->getArg(0));
9194 // The vector is v2f64, so make sure it's bitcast to that.
9195 Vec = Builder.CreateBitCast(Vec, Ty, "v2f64");
9196 llvm::Value *Idx0 = llvm::ConstantInt::get(SizeTy, 0);
9197 llvm::Value *Idx1 = llvm::ConstantInt::get(SizeTy, 1);
9198 Value *Op0 = Builder.CreateExtractElement(Vec, Idx0, "lane0");
9199 Value *Op1 = Builder.CreateExtractElement(Vec, Idx1, "lane1");
9200 // Pairwise addition of a v2f64 into a scalar f64.
9201 return Builder.CreateFAdd(Op0, Op1, "vpaddd");
9203 case NEON::BI__builtin_neon_vpadds_f32: {
9204 auto *Ty = llvm::FixedVectorType::get(FloatTy, 2);
9205 Value *Vec = EmitScalarExpr(E->getArg(0));
9206 // The vector is v2f32, so make sure it's bitcast to that.
9207 Vec = Builder.CreateBitCast(Vec, Ty, "v2f32");
9208 llvm::Value *Idx0 = llvm::ConstantInt::get(SizeTy, 0);
9209 llvm::Value *Idx1 = llvm::ConstantInt::get(SizeTy, 1);
9210 Value *Op0 = Builder.CreateExtractElement(Vec, Idx0, "lane0");
9211 Value *Op1 = Builder.CreateExtractElement(Vec, Idx1, "lane1");
9212 // Pairwise addition of a v2f32 into a scalar f32.
9213 return Builder.CreateFAdd(Op0, Op1, "vpaddd");
9215 case NEON::BI__builtin_neon_vceqzd_s64:
9216 case NEON::BI__builtin_neon_vceqzd_f64:
9217 case NEON::BI__builtin_neon_vceqzs_f32:
9218 case NEON::BI__builtin_neon_vceqzh_f16:
9219 Ops.push_back(EmitScalarExpr(E->getArg(0)));
9220 return EmitAArch64CompareBuiltinExpr(
9221 Ops[0], ConvertType(E->getCallReturnType(getContext())),
9222 ICmpInst::FCMP_OEQ, ICmpInst::ICMP_EQ, "vceqz");
9223 case NEON::BI__builtin_neon_vcgezd_s64:
9224 case NEON::BI__builtin_neon_vcgezd_f64:
9225 case NEON::BI__builtin_neon_vcgezs_f32:
9226 case NEON::BI__builtin_neon_vcgezh_f16:
9227 Ops.push_back(EmitScalarExpr(E->getArg(0)));
9228 return EmitAArch64CompareBuiltinExpr(
9229 Ops[0], ConvertType(E->getCallReturnType(getContext())),
9230 ICmpInst::FCMP_OGE, ICmpInst::ICMP_SGE, "vcgez");
9231 case NEON::BI__builtin_neon_vclezd_s64:
9232 case NEON::BI__builtin_neon_vclezd_f64:
9233 case NEON::BI__builtin_neon_vclezs_f32:
9234 case NEON::BI__builtin_neon_vclezh_f16:
9235 Ops.push_back(EmitScalarExpr(E->getArg(0)));
9236 return EmitAArch64CompareBuiltinExpr(
9237 Ops[0], ConvertType(E->getCallReturnType(getContext())),
9238 ICmpInst::FCMP_OLE, ICmpInst::ICMP_SLE, "vclez");
9239 case NEON::BI__builtin_neon_vcgtzd_s64:
9240 case NEON::BI__builtin_neon_vcgtzd_f64:
9241 case NEON::BI__builtin_neon_vcgtzs_f32:
9242 case NEON::BI__builtin_neon_vcgtzh_f16:
9243 Ops.push_back(EmitScalarExpr(E->getArg(0)));
9244 return EmitAArch64CompareBuiltinExpr(
9245 Ops[0], ConvertType(E->getCallReturnType(getContext())),
9246 ICmpInst::FCMP_OGT, ICmpInst::ICMP_SGT, "vcgtz");
9247 case NEON::BI__builtin_neon_vcltzd_s64:
9248 case NEON::BI__builtin_neon_vcltzd_f64:
9249 case NEON::BI__builtin_neon_vcltzs_f32:
9250 case NEON::BI__builtin_neon_vcltzh_f16:
9251 Ops.push_back(EmitScalarExpr(E->getArg(0)));
9252 return EmitAArch64CompareBuiltinExpr(
9253 Ops[0], ConvertType(E->getCallReturnType(getContext())),
9254 ICmpInst::FCMP_OLT, ICmpInst::ICMP_SLT, "vcltz");
9256 case NEON::BI__builtin_neon_vceqzd_u64: {
9257 Ops.push_back(EmitScalarExpr(E->getArg(0)));
9258 Ops[0] = Builder.CreateBitCast(Ops[0], Int64Ty);
9260 Builder.CreateICmpEQ(Ops[0], llvm::Constant::getNullValue(Int64Ty));
9261 return Builder.CreateSExt(Ops[0], Int64Ty, "vceqzd");
9263 case NEON::BI__builtin_neon_vceqd_f64:
9264 case NEON::BI__builtin_neon_vcled_f64:
9265 case NEON::BI__builtin_neon_vcltd_f64:
9266 case NEON::BI__builtin_neon_vcged_f64:
9267 case NEON::BI__builtin_neon_vcgtd_f64: {
9268 llvm::CmpInst::Predicate P;
9269 switch (BuiltinID) {
9270 default: llvm_unreachable("missing builtin ID in switch!");
9271 case NEON::BI__builtin_neon_vceqd_f64: P = llvm::FCmpInst::FCMP_OEQ; break;
9272 case NEON::BI__builtin_neon_vcled_f64: P = llvm::FCmpInst::FCMP_OLE; break;
9273 case NEON::BI__builtin_neon_vcltd_f64: P = llvm::FCmpInst::FCMP_OLT; break;
9274 case NEON::BI__builtin_neon_vcged_f64: P = llvm::FCmpInst::FCMP_OGE; break;
9275 case NEON::BI__builtin_neon_vcgtd_f64: P = llvm::FCmpInst::FCMP_OGT; break;
9277 Ops.push_back(EmitScalarExpr(E->getArg(1)));
9278 Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy);
9279 Ops[1] = Builder.CreateBitCast(Ops[1], DoubleTy);
9280 Ops[0] = Builder.CreateFCmp(P, Ops[0], Ops[1]);
9281 return Builder.CreateSExt(Ops[0], Int64Ty, "vcmpd");
9283 case NEON::BI__builtin_neon_vceqs_f32:
9284 case NEON::BI__builtin_neon_vcles_f32:
9285 case NEON::BI__builtin_neon_vclts_f32:
9286 case NEON::BI__builtin_neon_vcges_f32:
9287 case NEON::BI__builtin_neon_vcgts_f32: {
9288 llvm::CmpInst::Predicate P;
9289 switch (BuiltinID) {
9290 default: llvm_unreachable("missing builtin ID in switch!");
9291 case NEON::BI__builtin_neon_vceqs_f32: P = llvm::FCmpInst::FCMP_OEQ; break;
9292 case NEON::BI__builtin_neon_vcles_f32: P = llvm::FCmpInst::FCMP_OLE; break;
9293 case NEON::BI__builtin_neon_vclts_f32: P = llvm::FCmpInst::FCMP_OLT; break;
9294 case NEON::BI__builtin_neon_vcges_f32: P = llvm::FCmpInst::FCMP_OGE; break;
9295 case NEON::BI__builtin_neon_vcgts_f32: P = llvm::FCmpInst::FCMP_OGT; break;
9297 Ops.push_back(EmitScalarExpr(E->getArg(1)));
9298 Ops[0] = Builder.CreateBitCast(Ops[0], FloatTy);
9299 Ops[1] = Builder.CreateBitCast(Ops[1], FloatTy);
9300 Ops[0] = Builder.CreateFCmp(P, Ops[0], Ops[1]);
9301 return Builder.CreateSExt(Ops[0], Int32Ty, "vcmpd");
9303 case NEON::BI__builtin_neon_vceqh_f16:
9304 case NEON::BI__builtin_neon_vcleh_f16:
9305 case NEON::BI__builtin_neon_vclth_f16:
9306 case NEON::BI__builtin_neon_vcgeh_f16:
9307 case NEON::BI__builtin_neon_vcgth_f16: {
9308 llvm::CmpInst::Predicate P;
9309 switch (BuiltinID) {
9310 default: llvm_unreachable("missing builtin ID in switch!");
9311 case NEON::BI__builtin_neon_vceqh_f16: P = llvm::FCmpInst::FCMP_OEQ; break;
9312 case NEON::BI__builtin_neon_vcleh_f16: P = llvm::FCmpInst::FCMP_OLE; break;
9313 case NEON::BI__builtin_neon_vclth_f16: P = llvm::FCmpInst::FCMP_OLT; break;
9314 case NEON::BI__builtin_neon_vcgeh_f16: P = llvm::FCmpInst::FCMP_OGE; break;
9315 case NEON::BI__builtin_neon_vcgth_f16: P = llvm::FCmpInst::FCMP_OGT; break;
9317 Ops.push_back(EmitScalarExpr(E->getArg(1)));
9318 Ops[0] = Builder.CreateBitCast(Ops[0], HalfTy);
9319 Ops[1] = Builder.CreateBitCast(Ops[1], HalfTy);
9320 Ops[0] = Builder.CreateFCmp(P, Ops[0], Ops[1]);
9321 return Builder.CreateSExt(Ops[0], Int16Ty, "vcmpd");
9323 case NEON::BI__builtin_neon_vceqd_s64:
9324 case NEON::BI__builtin_neon_vceqd_u64:
9325 case NEON::BI__builtin_neon_vcgtd_s64:
9326 case NEON::BI__builtin_neon_vcgtd_u64:
9327 case NEON::BI__builtin_neon_vcltd_s64:
9328 case NEON::BI__builtin_neon_vcltd_u64:
9329 case NEON::BI__builtin_neon_vcged_u64:
9330 case NEON::BI__builtin_neon_vcged_s64:
9331 case NEON::BI__builtin_neon_vcled_u64:
9332 case NEON::BI__builtin_neon_vcled_s64: {
9333 llvm::CmpInst::Predicate P;
9334 switch (BuiltinID) {
9335 default: llvm_unreachable("missing builtin ID in switch!");
9336 case NEON::BI__builtin_neon_vceqd_s64:
9337 case NEON::BI__builtin_neon_vceqd_u64:P = llvm::ICmpInst::ICMP_EQ;break;
9338 case NEON::BI__builtin_neon_vcgtd_s64:P = llvm::ICmpInst::ICMP_SGT;break;
9339 case NEON::BI__builtin_neon_vcgtd_u64:P = llvm::ICmpInst::ICMP_UGT;break;
9340 case NEON::BI__builtin_neon_vcltd_s64:P = llvm::ICmpInst::ICMP_SLT;break;
9341 case NEON::BI__builtin_neon_vcltd_u64:P = llvm::ICmpInst::ICMP_ULT;break;
9342 case NEON::BI__builtin_neon_vcged_u64:P = llvm::ICmpInst::ICMP_UGE;break;
9343 case NEON::BI__builtin_neon_vcged_s64:P = llvm::ICmpInst::ICMP_SGE;break;
9344 case NEON::BI__builtin_neon_vcled_u64:P = llvm::ICmpInst::ICMP_ULE;break;
9345 case NEON::BI__builtin_neon_vcled_s64:P = llvm::ICmpInst::ICMP_SLE;break;
9347 Ops.push_back(EmitScalarExpr(E->getArg(1)));
9348 Ops[0] = Builder.CreateBitCast(Ops[0], Int64Ty);
9349 Ops[1] = Builder.CreateBitCast(Ops[1], Int64Ty);
9350 Ops[0] = Builder.CreateICmp(P, Ops[0], Ops[1]);
9351 return Builder.CreateSExt(Ops[0], Int64Ty, "vceqd");
9353 case NEON::BI__builtin_neon_vtstd_s64:
9354 case NEON::BI__builtin_neon_vtstd_u64: {
9355 Ops.push_back(EmitScalarExpr(E->getArg(1)));
9356 Ops[0] = Builder.CreateBitCast(Ops[0], Int64Ty);
9357 Ops[1] = Builder.CreateBitCast(Ops[1], Int64Ty);
9358 Ops[0] = Builder.CreateAnd(Ops[0], Ops[1]);
9359 Ops[0] = Builder.CreateICmp(ICmpInst::ICMP_NE, Ops[0],
9360 llvm::Constant::getNullValue(Int64Ty));
9361 return Builder.CreateSExt(Ops[0], Int64Ty, "vtstd");
9363 case NEON::BI__builtin_neon_vset_lane_i8:
9364 case NEON::BI__builtin_neon_vset_lane_i16:
9365 case NEON::BI__builtin_neon_vset_lane_i32:
9366 case NEON::BI__builtin_neon_vset_lane_i64:
9367 case NEON::BI__builtin_neon_vset_lane_bf16:
9368 case NEON::BI__builtin_neon_vset_lane_f32:
9369 case NEON::BI__builtin_neon_vsetq_lane_i8:
9370 case NEON::BI__builtin_neon_vsetq_lane_i16:
9371 case NEON::BI__builtin_neon_vsetq_lane_i32:
9372 case NEON::BI__builtin_neon_vsetq_lane_i64:
9373 case NEON::BI__builtin_neon_vsetq_lane_bf16:
9374 case NEON::BI__builtin_neon_vsetq_lane_f32:
9375 Ops.push_back(EmitScalarExpr(E->getArg(2)));
9376 return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vset_lane");
9377 case NEON::BI__builtin_neon_vset_lane_f64:
9378 // The vector type needs a cast for the v1f64 variant.
9380 Builder.CreateBitCast(Ops[1], llvm::FixedVectorType::get(DoubleTy, 1));
9381 Ops.push_back(EmitScalarExpr(E->getArg(2)));
9382 return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vset_lane");
9383 case NEON::BI__builtin_neon_vsetq_lane_f64:
9384 // The vector type needs a cast for the v2f64 variant.
9386 Builder.CreateBitCast(Ops[1], llvm::FixedVectorType::get(DoubleTy, 2));
9387 Ops.push_back(EmitScalarExpr(E->getArg(2)));
9388 return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vset_lane");
9390 case NEON::BI__builtin_neon_vget_lane_i8:
9391 case NEON::BI__builtin_neon_vdupb_lane_i8:
9393 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int8Ty, 8));
9394 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
9396 case NEON::BI__builtin_neon_vgetq_lane_i8:
9397 case NEON::BI__builtin_neon_vdupb_laneq_i8:
9399 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int8Ty, 16));
9400 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
9402 case NEON::BI__builtin_neon_vget_lane_i16:
9403 case NEON::BI__builtin_neon_vduph_lane_i16:
9405 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int16Ty, 4));
9406 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
9408 case NEON::BI__builtin_neon_vgetq_lane_i16:
9409 case NEON::BI__builtin_neon_vduph_laneq_i16:
9411 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int16Ty, 8));
9412 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
9414 case NEON::BI__builtin_neon_vget_lane_i32:
9415 case NEON::BI__builtin_neon_vdups_lane_i32:
9417 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int32Ty, 2));
9418 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
9420 case NEON::BI__builtin_neon_vdups_lane_f32:
9422 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(FloatTy, 2));
9423 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
9425 case NEON::BI__builtin_neon_vgetq_lane_i32:
9426 case NEON::BI__builtin_neon_vdups_laneq_i32:
9428 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int32Ty, 4));
9429 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
9431 case NEON::BI__builtin_neon_vget_lane_i64:
9432 case NEON::BI__builtin_neon_vdupd_lane_i64:
9434 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int64Ty, 1));
9435 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
9437 case NEON::BI__builtin_neon_vdupd_lane_f64:
9439 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(DoubleTy, 1));
9440 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
9442 case NEON::BI__builtin_neon_vgetq_lane_i64:
9443 case NEON::BI__builtin_neon_vdupd_laneq_i64:
9445 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int64Ty, 2));
9446 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
9448 case NEON::BI__builtin_neon_vget_lane_f32:
9450 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(FloatTy, 2));
9451 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
9453 case NEON::BI__builtin_neon_vget_lane_f64:
9455 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(DoubleTy, 1));
9456 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
9458 case NEON::BI__builtin_neon_vgetq_lane_f32:
9459 case NEON::BI__builtin_neon_vdups_laneq_f32:
9461 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(FloatTy, 4));
9462 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
9464 case NEON::BI__builtin_neon_vgetq_lane_f64:
9465 case NEON::BI__builtin_neon_vdupd_laneq_f64:
9467 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(DoubleTy, 2));
9468 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
9470 case NEON::BI__builtin_neon_vaddh_f16:
9471 Ops.push_back(EmitScalarExpr(E->getArg(1)));
9472 return Builder.CreateFAdd(Ops[0], Ops[1], "vaddh");
9473 case NEON::BI__builtin_neon_vsubh_f16:
9474 Ops.push_back(EmitScalarExpr(E->getArg(1)));
9475 return Builder.CreateFSub(Ops[0], Ops[1], "vsubh");
9476 case NEON::BI__builtin_neon_vmulh_f16:
9477 Ops.push_back(EmitScalarExpr(E->getArg(1)));
9478 return Builder.CreateFMul(Ops[0], Ops[1], "vmulh");
9479 case NEON::BI__builtin_neon_vdivh_f16:
9480 Ops.push_back(EmitScalarExpr(E->getArg(1)));
9481 return Builder.CreateFDiv(Ops[0], Ops[1], "vdivh");
9482 case NEON::BI__builtin_neon_vfmah_f16:
9483 // NEON intrinsic puts accumulator first, unlike the LLVM fma.
9484 return emitCallMaybeConstrainedFPBuiltin(
9485 *this, Intrinsic::fma, Intrinsic::experimental_constrained_fma, HalfTy,
9486 {EmitScalarExpr(E->getArg(1)), EmitScalarExpr(E->getArg(2)), Ops[0]});
9487 case NEON::BI__builtin_neon_vfmsh_f16: {
9488 // FIXME: This should be an fneg instruction:
9489 Value *Zero = llvm::ConstantFP::getZeroValueForNegation(HalfTy);
9490 Value* Sub = Builder.CreateFSub(Zero, EmitScalarExpr(E->getArg(1)), "vsubh");
9492 // NEON intrinsic puts accumulator first, unlike the LLVM fma.
9493 return emitCallMaybeConstrainedFPBuiltin(
9494 *this, Intrinsic::fma, Intrinsic::experimental_constrained_fma, HalfTy,
9495 {Sub, EmitScalarExpr(E->getArg(2)), Ops[0]});
9497 case NEON::BI__builtin_neon_vaddd_s64:
9498 case NEON::BI__builtin_neon_vaddd_u64:
9499 return Builder.CreateAdd(Ops[0], EmitScalarExpr(E->getArg(1)), "vaddd");
9500 case NEON::BI__builtin_neon_vsubd_s64:
9501 case NEON::BI__builtin_neon_vsubd_u64:
9502 return Builder.CreateSub(Ops[0], EmitScalarExpr(E->getArg(1)), "vsubd");
9503 case NEON::BI__builtin_neon_vqdmlalh_s16:
9504 case NEON::BI__builtin_neon_vqdmlslh_s16: {
9505 SmallVector<Value *, 2> ProductOps;
9506 ProductOps.push_back(vectorWrapScalar16(Ops[1]));
9507 ProductOps.push_back(vectorWrapScalar16(EmitScalarExpr(E->getArg(2))));
9508 auto *VTy = llvm::FixedVectorType::get(Int32Ty, 4);
9509 Ops[1] = EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqdmull, VTy),
9510 ProductOps, "vqdmlXl");
9511 Constant *CI = ConstantInt::get(SizeTy, 0);
9512 Ops[1] = Builder.CreateExtractElement(Ops[1], CI, "lane0");
9514 unsigned AccumInt = BuiltinID == NEON::BI__builtin_neon_vqdmlalh_s16
9515 ? Intrinsic::aarch64_neon_sqadd
9516 : Intrinsic::aarch64_neon_sqsub;
9517 return EmitNeonCall(CGM.getIntrinsic(AccumInt, Int32Ty), Ops, "vqdmlXl");
9519 case NEON::BI__builtin_neon_vqshlud_n_s64: {
9520 Ops.push_back(EmitScalarExpr(E->getArg(1)));
9521 Ops[1] = Builder.CreateZExt(Ops[1], Int64Ty);
9522 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqshlu, Int64Ty),
9525 case NEON::BI__builtin_neon_vqshld_n_u64:
9526 case NEON::BI__builtin_neon_vqshld_n_s64: {
9527 unsigned Int = BuiltinID == NEON::BI__builtin_neon_vqshld_n_u64
9528 ? Intrinsic::aarch64_neon_uqshl
9529 : Intrinsic::aarch64_neon_sqshl;
9530 Ops.push_back(EmitScalarExpr(E->getArg(1)));
9531 Ops[1] = Builder.CreateZExt(Ops[1], Int64Ty);
9532 return EmitNeonCall(CGM.getIntrinsic(Int, Int64Ty), Ops, "vqshl_n");
9534 case NEON::BI__builtin_neon_vrshrd_n_u64:
9535 case NEON::BI__builtin_neon_vrshrd_n_s64: {
9536 unsigned Int = BuiltinID == NEON::BI__builtin_neon_vrshrd_n_u64
9537 ? Intrinsic::aarch64_neon_urshl
9538 : Intrinsic::aarch64_neon_srshl;
9539 Ops.push_back(EmitScalarExpr(E->getArg(1)));
9540 int SV = cast<ConstantInt>(Ops[1])->getSExtValue();
9541 Ops[1] = ConstantInt::get(Int64Ty, -SV);
9542 return EmitNeonCall(CGM.getIntrinsic(Int, Int64Ty), Ops, "vrshr_n");
9544 case NEON::BI__builtin_neon_vrsrad_n_u64:
9545 case NEON::BI__builtin_neon_vrsrad_n_s64: {
9546 unsigned Int = BuiltinID == NEON::BI__builtin_neon_vrsrad_n_u64
9547 ? Intrinsic::aarch64_neon_urshl
9548 : Intrinsic::aarch64_neon_srshl;
9549 Ops[1] = Builder.CreateBitCast(Ops[1], Int64Ty);
9550 Ops.push_back(Builder.CreateNeg(EmitScalarExpr(E->getArg(2))));
9551 Ops[1] = Builder.CreateCall(CGM.getIntrinsic(Int, Int64Ty),
9552 {Ops[1], Builder.CreateSExt(Ops[2], Int64Ty)});
9553 return Builder.CreateAdd(Ops[0], Builder.CreateBitCast(Ops[1], Int64Ty));
9555 case NEON::BI__builtin_neon_vshld_n_s64:
9556 case NEON::BI__builtin_neon_vshld_n_u64: {
9557 llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(1)));
9558 return Builder.CreateShl(
9559 Ops[0], ConstantInt::get(Int64Ty, Amt->getZExtValue()), "shld_n");
9561 case NEON::BI__builtin_neon_vshrd_n_s64: {
9562 llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(1)));
9563 return Builder.CreateAShr(
9564 Ops[0], ConstantInt::get(Int64Ty, std::min(static_cast<uint64_t>(63),
9565 Amt->getZExtValue())),
9568 case NEON::BI__builtin_neon_vshrd_n_u64: {
9569 llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(1)));
9570 uint64_t ShiftAmt = Amt->getZExtValue();
9571 // Right-shifting an unsigned value by its size yields 0.
9573 return ConstantInt::get(Int64Ty, 0);
9574 return Builder.CreateLShr(Ops[0], ConstantInt::get(Int64Ty, ShiftAmt),
9577 case NEON::BI__builtin_neon_vsrad_n_s64: {
9578 llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(2)));
9579 Ops[1] = Builder.CreateAShr(
9580 Ops[1], ConstantInt::get(Int64Ty, std::min(static_cast<uint64_t>(63),
9581 Amt->getZExtValue())),
9583 return Builder.CreateAdd(Ops[0], Ops[1]);
9585 case NEON::BI__builtin_neon_vsrad_n_u64: {
9586 llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(2)));
9587 uint64_t ShiftAmt = Amt->getZExtValue();
9588 // Right-shifting an unsigned value by its size yields 0.
9589 // As Op + 0 = Op, return Ops[0] directly.
9592 Ops[1] = Builder.CreateLShr(Ops[1], ConstantInt::get(Int64Ty, ShiftAmt),
9594 return Builder.CreateAdd(Ops[0], Ops[1]);
9596 case NEON::BI__builtin_neon_vqdmlalh_lane_s16:
9597 case NEON::BI__builtin_neon_vqdmlalh_laneq_s16:
9598 case NEON::BI__builtin_neon_vqdmlslh_lane_s16:
9599 case NEON::BI__builtin_neon_vqdmlslh_laneq_s16: {
9600 Ops[2] = Builder.CreateExtractElement(Ops[2], EmitScalarExpr(E->getArg(3)),
9602 SmallVector<Value *, 2> ProductOps;
9603 ProductOps.push_back(vectorWrapScalar16(Ops[1]));
9604 ProductOps.push_back(vectorWrapScalar16(Ops[2]));
9605 auto *VTy = llvm::FixedVectorType::get(Int32Ty, 4);
9606 Ops[1] = EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqdmull, VTy),
9607 ProductOps, "vqdmlXl");
9608 Constant *CI = ConstantInt::get(SizeTy, 0);
9609 Ops[1] = Builder.CreateExtractElement(Ops[1], CI, "lane0");
9612 unsigned AccInt = (BuiltinID == NEON::BI__builtin_neon_vqdmlalh_lane_s16 ||
9613 BuiltinID == NEON::BI__builtin_neon_vqdmlalh_laneq_s16)
9614 ? Intrinsic::aarch64_neon_sqadd
9615 : Intrinsic::aarch64_neon_sqsub;
9616 return EmitNeonCall(CGM.getIntrinsic(AccInt, Int32Ty), Ops, "vqdmlXl");
9618 case NEON::BI__builtin_neon_vqdmlals_s32:
9619 case NEON::BI__builtin_neon_vqdmlsls_s32: {
9620 SmallVector<Value *, 2> ProductOps;
9621 ProductOps.push_back(Ops[1]);
9622 ProductOps.push_back(EmitScalarExpr(E->getArg(2)));
9624 EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqdmulls_scalar),
9625 ProductOps, "vqdmlXl");
9627 unsigned AccumInt = BuiltinID == NEON::BI__builtin_neon_vqdmlals_s32
9628 ? Intrinsic::aarch64_neon_sqadd
9629 : Intrinsic::aarch64_neon_sqsub;
9630 return EmitNeonCall(CGM.getIntrinsic(AccumInt, Int64Ty), Ops, "vqdmlXl");
9632 case NEON::BI__builtin_neon_vqdmlals_lane_s32:
9633 case NEON::BI__builtin_neon_vqdmlals_laneq_s32:
9634 case NEON::BI__builtin_neon_vqdmlsls_lane_s32:
9635 case NEON::BI__builtin_neon_vqdmlsls_laneq_s32: {
9636 Ops[2] = Builder.CreateExtractElement(Ops[2], EmitScalarExpr(E->getArg(3)),
9638 SmallVector<Value *, 2> ProductOps;
9639 ProductOps.push_back(Ops[1]);
9640 ProductOps.push_back(Ops[2]);
9642 EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqdmulls_scalar),
9643 ProductOps, "vqdmlXl");
9646 unsigned AccInt = (BuiltinID == NEON::BI__builtin_neon_vqdmlals_lane_s32 ||
9647 BuiltinID == NEON::BI__builtin_neon_vqdmlals_laneq_s32)
9648 ? Intrinsic::aarch64_neon_sqadd
9649 : Intrinsic::aarch64_neon_sqsub;
9650 return EmitNeonCall(CGM.getIntrinsic(AccInt, Int64Ty), Ops, "vqdmlXl");
9652 case NEON::BI__builtin_neon_vget_lane_bf16:
9653 case NEON::BI__builtin_neon_vduph_lane_bf16:
9654 case NEON::BI__builtin_neon_vduph_lane_f16: {
9655 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
9658 case NEON::BI__builtin_neon_vgetq_lane_bf16:
9659 case NEON::BI__builtin_neon_vduph_laneq_bf16:
9660 case NEON::BI__builtin_neon_vduph_laneq_f16: {
9661 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
9664 case AArch64::BI_BitScanForward:
9665 case AArch64::BI_BitScanForward64:
9666 return EmitMSVCBuiltinExpr(MSVCIntrin::_BitScanForward, E);
9667 case AArch64::BI_BitScanReverse:
9668 case AArch64::BI_BitScanReverse64:
9669 return EmitMSVCBuiltinExpr(MSVCIntrin::_BitScanReverse, E);
9670 case AArch64::BI_InterlockedAnd64:
9671 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedAnd, E);
9672 case AArch64::BI_InterlockedExchange64:
9673 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange, E);
9674 case AArch64::BI_InterlockedExchangeAdd64:
9675 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeAdd, E);
9676 case AArch64::BI_InterlockedExchangeSub64:
9677 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeSub, E);
9678 case AArch64::BI_InterlockedOr64:
9679 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr, E);
9680 case AArch64::BI_InterlockedXor64:
9681 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor, E);
9682 case AArch64::BI_InterlockedDecrement64:
9683 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedDecrement, E);
9684 case AArch64::BI_InterlockedIncrement64:
9685 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement, E);
9686 case AArch64::BI_InterlockedExchangeAdd8_acq:
9687 case AArch64::BI_InterlockedExchangeAdd16_acq:
9688 case AArch64::BI_InterlockedExchangeAdd_acq:
9689 case AArch64::BI_InterlockedExchangeAdd64_acq:
9690 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeAdd_acq, E);
9691 case AArch64::BI_InterlockedExchangeAdd8_rel:
9692 case AArch64::BI_InterlockedExchangeAdd16_rel:
9693 case AArch64::BI_InterlockedExchangeAdd_rel:
9694 case AArch64::BI_InterlockedExchangeAdd64_rel:
9695 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeAdd_rel, E);
9696 case AArch64::BI_InterlockedExchangeAdd8_nf:
9697 case AArch64::BI_InterlockedExchangeAdd16_nf:
9698 case AArch64::BI_InterlockedExchangeAdd_nf:
9699 case AArch64::BI_InterlockedExchangeAdd64_nf:
9700 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeAdd_nf, E);
9701 case AArch64::BI_InterlockedExchange8_acq:
9702 case AArch64::BI_InterlockedExchange16_acq:
9703 case AArch64::BI_InterlockedExchange_acq:
9704 case AArch64::BI_InterlockedExchange64_acq:
9705 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange_acq, E);
9706 case AArch64::BI_InterlockedExchange8_rel:
9707 case AArch64::BI_InterlockedExchange16_rel:
9708 case AArch64::BI_InterlockedExchange_rel:
9709 case AArch64::BI_InterlockedExchange64_rel:
9710 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange_rel, E);
9711 case AArch64::BI_InterlockedExchange8_nf:
9712 case AArch64::BI_InterlockedExchange16_nf:
9713 case AArch64::BI_InterlockedExchange_nf:
9714 case AArch64::BI_InterlockedExchange64_nf:
9715 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange_nf, E);
9716 case AArch64::BI_InterlockedCompareExchange8_acq:
9717 case AArch64::BI_InterlockedCompareExchange16_acq:
9718 case AArch64::BI_InterlockedCompareExchange_acq:
9719 case AArch64::BI_InterlockedCompareExchange64_acq:
9720 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedCompareExchange_acq, E);
9721 case AArch64::BI_InterlockedCompareExchange8_rel:
9722 case AArch64::BI_InterlockedCompareExchange16_rel:
9723 case AArch64::BI_InterlockedCompareExchange_rel:
9724 case AArch64::BI_InterlockedCompareExchange64_rel:
9725 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedCompareExchange_rel, E);
9726 case AArch64::BI_InterlockedCompareExchange8_nf:
9727 case AArch64::BI_InterlockedCompareExchange16_nf:
9728 case AArch64::BI_InterlockedCompareExchange_nf:
9729 case AArch64::BI_InterlockedCompareExchange64_nf:
9730 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedCompareExchange_nf, E);
9731 case AArch64::BI_InterlockedOr8_acq:
9732 case AArch64::BI_InterlockedOr16_acq:
9733 case AArch64::BI_InterlockedOr_acq:
9734 case AArch64::BI_InterlockedOr64_acq:
9735 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr_acq, E);
9736 case AArch64::BI_InterlockedOr8_rel:
9737 case AArch64::BI_InterlockedOr16_rel:
9738 case AArch64::BI_InterlockedOr_rel:
9739 case AArch64::BI_InterlockedOr64_rel:
9740 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr_rel, E);
9741 case AArch64::BI_InterlockedOr8_nf:
9742 case AArch64::BI_InterlockedOr16_nf:
9743 case AArch64::BI_InterlockedOr_nf:
9744 case AArch64::BI_InterlockedOr64_nf:
9745 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr_nf, E);
9746 case AArch64::BI_InterlockedXor8_acq:
9747 case AArch64::BI_InterlockedXor16_acq:
9748 case AArch64::BI_InterlockedXor_acq:
9749 case AArch64::BI_InterlockedXor64_acq:
9750 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor_acq, E);
9751 case AArch64::BI_InterlockedXor8_rel:
9752 case AArch64::BI_InterlockedXor16_rel:
9753 case AArch64::BI_InterlockedXor_rel:
9754 case AArch64::BI_InterlockedXor64_rel:
9755 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor_rel, E);
9756 case AArch64::BI_InterlockedXor8_nf:
9757 case AArch64::BI_InterlockedXor16_nf:
9758 case AArch64::BI_InterlockedXor_nf:
9759 case AArch64::BI_InterlockedXor64_nf:
9760 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor_nf, E);
9761 case AArch64::BI_InterlockedAnd8_acq:
9762 case AArch64::BI_InterlockedAnd16_acq:
9763 case AArch64::BI_InterlockedAnd_acq:
9764 case AArch64::BI_InterlockedAnd64_acq:
9765 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedAnd_acq, E);
9766 case AArch64::BI_InterlockedAnd8_rel:
9767 case AArch64::BI_InterlockedAnd16_rel:
9768 case AArch64::BI_InterlockedAnd_rel:
9769 case AArch64::BI_InterlockedAnd64_rel:
9770 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedAnd_rel, E);
9771 case AArch64::BI_InterlockedAnd8_nf:
9772 case AArch64::BI_InterlockedAnd16_nf:
9773 case AArch64::BI_InterlockedAnd_nf:
9774 case AArch64::BI_InterlockedAnd64_nf:
9775 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedAnd_nf, E);
9776 case AArch64::BI_InterlockedIncrement16_acq:
9777 case AArch64::BI_InterlockedIncrement_acq:
9778 case AArch64::BI_InterlockedIncrement64_acq:
9779 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement_acq, E);
9780 case AArch64::BI_InterlockedIncrement16_rel:
9781 case AArch64::BI_InterlockedIncrement_rel:
9782 case AArch64::BI_InterlockedIncrement64_rel:
9783 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement_rel, E);
9784 case AArch64::BI_InterlockedIncrement16_nf:
9785 case AArch64::BI_InterlockedIncrement_nf:
9786 case AArch64::BI_InterlockedIncrement64_nf:
9787 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement_nf, E);
9788 case AArch64::BI_InterlockedDecrement16_acq:
9789 case AArch64::BI_InterlockedDecrement_acq:
9790 case AArch64::BI_InterlockedDecrement64_acq:
9791 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedDecrement_acq, E);
9792 case AArch64::BI_InterlockedDecrement16_rel:
9793 case AArch64::BI_InterlockedDecrement_rel:
9794 case AArch64::BI_InterlockedDecrement64_rel:
9795 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedDecrement_rel, E);
9796 case AArch64::BI_InterlockedDecrement16_nf:
9797 case AArch64::BI_InterlockedDecrement_nf:
9798 case AArch64::BI_InterlockedDecrement64_nf:
9799 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedDecrement_nf, E);
9801 case AArch64::BI_InterlockedAdd: {
9802 Value *Arg0 = EmitScalarExpr(E->getArg(0));
9803 Value *Arg1 = EmitScalarExpr(E->getArg(1));
9804 AtomicRMWInst *RMWI = Builder.CreateAtomicRMW(
9805 AtomicRMWInst::Add, Arg0, Arg1,
9806 llvm::AtomicOrdering::SequentiallyConsistent);
9807 return Builder.CreateAdd(RMWI, Arg1);
9811 llvm::VectorType *VTy = GetNeonType(this, Type);
9812 llvm::Type *Ty = VTy;
9816 // Not all intrinsics handled by the common case work for AArch64 yet, so only
9817 // defer to common code if it's been added to our special map.
9818 Builtin = findARMVectorIntrinsicInMap(AArch64SIMDIntrinsicMap, BuiltinID,
9819 AArch64SIMDIntrinsicsProvenSorted);
9822 return EmitCommonNeonBuiltinExpr(
9823 Builtin->BuiltinID, Builtin->LLVMIntrinsic, Builtin->AltLLVMIntrinsic,
9824 Builtin->NameHint, Builtin->TypeModifier, E, Ops,
9825 /*never use addresses*/ Address::invalid(), Address::invalid(), Arch);
9827 if (Value *V = EmitAArch64TblBuiltinExpr(*this, BuiltinID, E, Ops, Arch))
9831 switch (BuiltinID) {
9832 default: return nullptr;
9833 case NEON::BI__builtin_neon_vbsl_v:
9834 case NEON::BI__builtin_neon_vbslq_v: {
9835 llvm::Type *BitTy = llvm::VectorType::getInteger(VTy);
9836 Ops[0] = Builder.CreateBitCast(Ops[0], BitTy, "vbsl");
9837 Ops[1] = Builder.CreateBitCast(Ops[1], BitTy, "vbsl");
9838 Ops[2] = Builder.CreateBitCast(Ops[2], BitTy, "vbsl");
9840 Ops[1] = Builder.CreateAnd(Ops[0], Ops[1], "vbsl");
9841 Ops[2] = Builder.CreateAnd(Builder.CreateNot(Ops[0]), Ops[2], "vbsl");
9842 Ops[0] = Builder.CreateOr(Ops[1], Ops[2], "vbsl");
9843 return Builder.CreateBitCast(Ops[0], Ty);
9845 case NEON::BI__builtin_neon_vfma_lane_v:
9846 case NEON::BI__builtin_neon_vfmaq_lane_v: { // Only used for FP types
9847 // The ARM builtins (and instructions) have the addend as the first
9848 // operand, but the 'fma' intrinsics have it last. Swap it around here.
9849 Value *Addend = Ops[0];
9850 Value *Multiplicand = Ops[1];
9851 Value *LaneSource = Ops[2];
9852 Ops[0] = Multiplicand;
9853 Ops[1] = LaneSource;
9856 // Now adjust things to handle the lane access.
9857 auto *SourceTy = BuiltinID == NEON::BI__builtin_neon_vfmaq_lane_v
9858 ? llvm::FixedVectorType::get(VTy->getElementType(),
9859 VTy->getNumElements() / 2)
9861 llvm::Constant *cst = cast<Constant>(Ops[3]);
9862 Value *SV = llvm::ConstantVector::getSplat(VTy->getElementCount(), cst);
9863 Ops[1] = Builder.CreateBitCast(Ops[1], SourceTy);
9864 Ops[1] = Builder.CreateShuffleVector(Ops[1], Ops[1], SV, "lane");
9867 Int = Builder.getIsFPConstrained() ? Intrinsic::experimental_constrained_fma
9869 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "fmla");
9871 case NEON::BI__builtin_neon_vfma_laneq_v: {
9872 llvm::VectorType *VTy = cast<llvm::VectorType>(Ty);
9873 // v1f64 fma should be mapped to Neon scalar f64 fma
9874 if (VTy && VTy->getElementType() == DoubleTy) {
9875 Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy);
9876 Ops[1] = Builder.CreateBitCast(Ops[1], DoubleTy);
9877 llvm::Type *VTy = GetNeonType(this,
9878 NeonTypeFlags(NeonTypeFlags::Float64, false, true));
9879 Ops[2] = Builder.CreateBitCast(Ops[2], VTy);
9880 Ops[2] = Builder.CreateExtractElement(Ops[2], Ops[3], "extract");
9882 Result = emitCallMaybeConstrainedFPBuiltin(
9883 *this, Intrinsic::fma, Intrinsic::experimental_constrained_fma,
9884 DoubleTy, {Ops[1], Ops[2], Ops[0]});
9885 return Builder.CreateBitCast(Result, Ty);
9887 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
9888 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
9890 auto *STy = llvm::FixedVectorType::get(VTy->getElementType(),
9891 VTy->getNumElements() * 2);
9892 Ops[2] = Builder.CreateBitCast(Ops[2], STy);
9893 Value *SV = llvm::ConstantVector::getSplat(VTy->getElementCount(),
9894 cast<ConstantInt>(Ops[3]));
9895 Ops[2] = Builder.CreateShuffleVector(Ops[2], Ops[2], SV, "lane");
9897 return emitCallMaybeConstrainedFPBuiltin(
9898 *this, Intrinsic::fma, Intrinsic::experimental_constrained_fma, Ty,
9899 {Ops[2], Ops[1], Ops[0]});
9901 case NEON::BI__builtin_neon_vfmaq_laneq_v: {
9902 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
9903 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
9905 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
9906 Ops[2] = EmitNeonSplat(Ops[2], cast<ConstantInt>(Ops[3]));
9907 return emitCallMaybeConstrainedFPBuiltin(
9908 *this, Intrinsic::fma, Intrinsic::experimental_constrained_fma, Ty,
9909 {Ops[2], Ops[1], Ops[0]});
9911 case NEON::BI__builtin_neon_vfmah_lane_f16:
9912 case NEON::BI__builtin_neon_vfmas_lane_f32:
9913 case NEON::BI__builtin_neon_vfmah_laneq_f16:
9914 case NEON::BI__builtin_neon_vfmas_laneq_f32:
9915 case NEON::BI__builtin_neon_vfmad_lane_f64:
9916 case NEON::BI__builtin_neon_vfmad_laneq_f64: {
9917 Ops.push_back(EmitScalarExpr(E->getArg(3)));
9918 llvm::Type *Ty = ConvertType(E->getCallReturnType(getContext()));
9919 Ops[2] = Builder.CreateExtractElement(Ops[2], Ops[3], "extract");
9920 return emitCallMaybeConstrainedFPBuiltin(
9921 *this, Intrinsic::fma, Intrinsic::experimental_constrained_fma, Ty,
9922 {Ops[1], Ops[2], Ops[0]});
9924 case NEON::BI__builtin_neon_vmull_v:
9925 // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
9926 Int = usgn ? Intrinsic::aarch64_neon_umull : Intrinsic::aarch64_neon_smull;
9927 if (Type.isPoly()) Int = Intrinsic::aarch64_neon_pmull;
9928 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmull");
9929 case NEON::BI__builtin_neon_vmax_v:
9930 case NEON::BI__builtin_neon_vmaxq_v:
9931 // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
9932 Int = usgn ? Intrinsic::aarch64_neon_umax : Intrinsic::aarch64_neon_smax;
9933 if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fmax;
9934 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmax");
9935 case NEON::BI__builtin_neon_vmaxh_f16: {
9936 Ops.push_back(EmitScalarExpr(E->getArg(1)));
9937 Int = Intrinsic::aarch64_neon_fmax;
9938 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vmax");
9940 case NEON::BI__builtin_neon_vmin_v:
9941 case NEON::BI__builtin_neon_vminq_v:
9942 // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
9943 Int = usgn ? Intrinsic::aarch64_neon_umin : Intrinsic::aarch64_neon_smin;
9944 if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fmin;
9945 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmin");
9946 case NEON::BI__builtin_neon_vminh_f16: {
9947 Ops.push_back(EmitScalarExpr(E->getArg(1)));
9948 Int = Intrinsic::aarch64_neon_fmin;
9949 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vmin");
9951 case NEON::BI__builtin_neon_vabd_v:
9952 case NEON::BI__builtin_neon_vabdq_v:
9953 // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
9954 Int = usgn ? Intrinsic::aarch64_neon_uabd : Intrinsic::aarch64_neon_sabd;
9955 if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fabd;
9956 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vabd");
9957 case NEON::BI__builtin_neon_vpadal_v:
9958 case NEON::BI__builtin_neon_vpadalq_v: {
9959 unsigned ArgElts = VTy->getNumElements();
9960 llvm::IntegerType *EltTy = cast<IntegerType>(VTy->getElementType());
9961 unsigned BitWidth = EltTy->getBitWidth();
9962 auto *ArgTy = llvm::FixedVectorType::get(
9963 llvm::IntegerType::get(getLLVMContext(), BitWidth / 2), 2 * ArgElts);
9964 llvm::Type* Tys[2] = { VTy, ArgTy };
9965 Int = usgn ? Intrinsic::aarch64_neon_uaddlp : Intrinsic::aarch64_neon_saddlp;
9966 SmallVector<llvm::Value*, 1> TmpOps;
9967 TmpOps.push_back(Ops[1]);
9968 Function *F = CGM.getIntrinsic(Int, Tys);
9969 llvm::Value *tmp = EmitNeonCall(F, TmpOps, "vpadal");
9970 llvm::Value *addend = Builder.CreateBitCast(Ops[0], tmp->getType());
9971 return Builder.CreateAdd(tmp, addend);
9973 case NEON::BI__builtin_neon_vpmin_v:
9974 case NEON::BI__builtin_neon_vpminq_v:
9975 // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
9976 Int = usgn ? Intrinsic::aarch64_neon_uminp : Intrinsic::aarch64_neon_sminp;
9977 if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fminp;
9978 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpmin");
9979 case NEON::BI__builtin_neon_vpmax_v:
9980 case NEON::BI__builtin_neon_vpmaxq_v:
9981 // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
9982 Int = usgn ? Intrinsic::aarch64_neon_umaxp : Intrinsic::aarch64_neon_smaxp;
9983 if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fmaxp;
9984 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpmax");
9985 case NEON::BI__builtin_neon_vminnm_v:
9986 case NEON::BI__builtin_neon_vminnmq_v:
9987 Int = Intrinsic::aarch64_neon_fminnm;
9988 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vminnm");
9989 case NEON::BI__builtin_neon_vminnmh_f16:
9990 Ops.push_back(EmitScalarExpr(E->getArg(1)));
9991 Int = Intrinsic::aarch64_neon_fminnm;
9992 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vminnm");
9993 case NEON::BI__builtin_neon_vmaxnm_v:
9994 case NEON::BI__builtin_neon_vmaxnmq_v:
9995 Int = Intrinsic::aarch64_neon_fmaxnm;
9996 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmaxnm");
9997 case NEON::BI__builtin_neon_vmaxnmh_f16:
9998 Ops.push_back(EmitScalarExpr(E->getArg(1)));
9999 Int = Intrinsic::aarch64_neon_fmaxnm;
10000 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vmaxnm");
10001 case NEON::BI__builtin_neon_vrecpss_f32: {
10002 Ops.push_back(EmitScalarExpr(E->getArg(1)));
10003 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_frecps, FloatTy),
10006 case NEON::BI__builtin_neon_vrecpsd_f64:
10007 Ops.push_back(EmitScalarExpr(E->getArg(1)));
10008 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_frecps, DoubleTy),
10010 case NEON::BI__builtin_neon_vrecpsh_f16:
10011 Ops.push_back(EmitScalarExpr(E->getArg(1)));
10012 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_frecps, HalfTy),
10014 case NEON::BI__builtin_neon_vqshrun_n_v:
10015 Int = Intrinsic::aarch64_neon_sqshrun;
10016 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshrun_n");
10017 case NEON::BI__builtin_neon_vqrshrun_n_v:
10018 Int = Intrinsic::aarch64_neon_sqrshrun;
10019 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshrun_n");
10020 case NEON::BI__builtin_neon_vqshrn_n_v:
10021 Int = usgn ? Intrinsic::aarch64_neon_uqshrn : Intrinsic::aarch64_neon_sqshrn;
10022 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshrn_n");
10023 case NEON::BI__builtin_neon_vrshrn_n_v:
10024 Int = Intrinsic::aarch64_neon_rshrn;
10025 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrshrn_n");
10026 case NEON::BI__builtin_neon_vqrshrn_n_v:
10027 Int = usgn ? Intrinsic::aarch64_neon_uqrshrn : Intrinsic::aarch64_neon_sqrshrn;
10028 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshrn_n");
10029 case NEON::BI__builtin_neon_vrndah_f16: {
10030 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10031 Int = Builder.getIsFPConstrained()
10032 ? Intrinsic::experimental_constrained_round
10033 : Intrinsic::round;
10034 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrnda");
10036 case NEON::BI__builtin_neon_vrnda_v:
10037 case NEON::BI__builtin_neon_vrndaq_v: {
10038 Int = Builder.getIsFPConstrained()
10039 ? Intrinsic::experimental_constrained_round
10040 : Intrinsic::round;
10041 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrnda");
10043 case NEON::BI__builtin_neon_vrndih_f16: {
10044 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10045 Int = Builder.getIsFPConstrained()
10046 ? Intrinsic::experimental_constrained_nearbyint
10047 : Intrinsic::nearbyint;
10048 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndi");
10050 case NEON::BI__builtin_neon_vrndmh_f16: {
10051 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10052 Int = Builder.getIsFPConstrained()
10053 ? Intrinsic::experimental_constrained_floor
10054 : Intrinsic::floor;
10055 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndm");
10057 case NEON::BI__builtin_neon_vrndm_v:
10058 case NEON::BI__builtin_neon_vrndmq_v: {
10059 Int = Builder.getIsFPConstrained()
10060 ? Intrinsic::experimental_constrained_floor
10061 : Intrinsic::floor;
10062 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndm");
10064 case NEON::BI__builtin_neon_vrndnh_f16: {
10065 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10066 Int = Intrinsic::aarch64_neon_frintn;
10067 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndn");
10069 case NEON::BI__builtin_neon_vrndn_v:
10070 case NEON::BI__builtin_neon_vrndnq_v: {
10071 Int = Intrinsic::aarch64_neon_frintn;
10072 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndn");
10074 case NEON::BI__builtin_neon_vrndns_f32: {
10075 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10076 Int = Intrinsic::aarch64_neon_frintn;
10077 return EmitNeonCall(CGM.getIntrinsic(Int, FloatTy), Ops, "vrndn");
10079 case NEON::BI__builtin_neon_vrndph_f16: {
10080 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10081 Int = Builder.getIsFPConstrained()
10082 ? Intrinsic::experimental_constrained_ceil
10084 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndp");
10086 case NEON::BI__builtin_neon_vrndp_v:
10087 case NEON::BI__builtin_neon_vrndpq_v: {
10088 Int = Builder.getIsFPConstrained()
10089 ? Intrinsic::experimental_constrained_ceil
10091 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndp");
10093 case NEON::BI__builtin_neon_vrndxh_f16: {
10094 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10095 Int = Builder.getIsFPConstrained()
10096 ? Intrinsic::experimental_constrained_rint
10098 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndx");
10100 case NEON::BI__builtin_neon_vrndx_v:
10101 case NEON::BI__builtin_neon_vrndxq_v: {
10102 Int = Builder.getIsFPConstrained()
10103 ? Intrinsic::experimental_constrained_rint
10105 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndx");
10107 case NEON::BI__builtin_neon_vrndh_f16: {
10108 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10109 Int = Builder.getIsFPConstrained()
10110 ? Intrinsic::experimental_constrained_trunc
10111 : Intrinsic::trunc;
10112 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndz");
10114 case NEON::BI__builtin_neon_vrnd_v:
10115 case NEON::BI__builtin_neon_vrndq_v: {
10116 Int = Builder.getIsFPConstrained()
10117 ? Intrinsic::experimental_constrained_trunc
10118 : Intrinsic::trunc;
10119 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndz");
10121 case NEON::BI__builtin_neon_vcvt_f64_v:
10122 case NEON::BI__builtin_neon_vcvtq_f64_v:
10123 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
10124 Ty = GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float64, false, quad));
10125 return usgn ? Builder.CreateUIToFP(Ops[0], Ty, "vcvt")
10126 : Builder.CreateSIToFP(Ops[0], Ty, "vcvt");
10127 case NEON::BI__builtin_neon_vcvt_f64_f32: {
10128 assert(Type.getEltType() == NeonTypeFlags::Float64 && quad &&
10129 "unexpected vcvt_f64_f32 builtin");
10130 NeonTypeFlags SrcFlag = NeonTypeFlags(NeonTypeFlags::Float32, false, false);
10131 Ops[0] = Builder.CreateBitCast(Ops[0], GetNeonType(this, SrcFlag));
10133 return Builder.CreateFPExt(Ops[0], Ty, "vcvt");
10135 case NEON::BI__builtin_neon_vcvt_f32_f64: {
10136 assert(Type.getEltType() == NeonTypeFlags::Float32 &&
10137 "unexpected vcvt_f32_f64 builtin");
10138 NeonTypeFlags SrcFlag = NeonTypeFlags(NeonTypeFlags::Float64, false, true);
10139 Ops[0] = Builder.CreateBitCast(Ops[0], GetNeonType(this, SrcFlag));
10141 return Builder.CreateFPTrunc(Ops[0], Ty, "vcvt");
10143 case NEON::BI__builtin_neon_vcvt_s32_v:
10144 case NEON::BI__builtin_neon_vcvt_u32_v:
10145 case NEON::BI__builtin_neon_vcvt_s64_v:
10146 case NEON::BI__builtin_neon_vcvt_u64_v:
10147 case NEON::BI__builtin_neon_vcvt_s16_v:
10148 case NEON::BI__builtin_neon_vcvt_u16_v:
10149 case NEON::BI__builtin_neon_vcvtq_s32_v:
10150 case NEON::BI__builtin_neon_vcvtq_u32_v:
10151 case NEON::BI__builtin_neon_vcvtq_s64_v:
10152 case NEON::BI__builtin_neon_vcvtq_u64_v:
10153 case NEON::BI__builtin_neon_vcvtq_s16_v:
10154 case NEON::BI__builtin_neon_vcvtq_u16_v: {
10155 Ops[0] = Builder.CreateBitCast(Ops[0], GetFloatNeonType(this, Type));
10157 return Builder.CreateFPToUI(Ops[0], Ty);
10158 return Builder.CreateFPToSI(Ops[0], Ty);
10160 case NEON::BI__builtin_neon_vcvta_s16_v:
10161 case NEON::BI__builtin_neon_vcvta_u16_v:
10162 case NEON::BI__builtin_neon_vcvta_s32_v:
10163 case NEON::BI__builtin_neon_vcvtaq_s16_v:
10164 case NEON::BI__builtin_neon_vcvtaq_s32_v:
10165 case NEON::BI__builtin_neon_vcvta_u32_v:
10166 case NEON::BI__builtin_neon_vcvtaq_u16_v:
10167 case NEON::BI__builtin_neon_vcvtaq_u32_v:
10168 case NEON::BI__builtin_neon_vcvta_s64_v:
10169 case NEON::BI__builtin_neon_vcvtaq_s64_v:
10170 case NEON::BI__builtin_neon_vcvta_u64_v:
10171 case NEON::BI__builtin_neon_vcvtaq_u64_v: {
10172 Int = usgn ? Intrinsic::aarch64_neon_fcvtau : Intrinsic::aarch64_neon_fcvtas;
10173 llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) };
10174 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvta");
10176 case NEON::BI__builtin_neon_vcvtm_s16_v:
10177 case NEON::BI__builtin_neon_vcvtm_s32_v:
10178 case NEON::BI__builtin_neon_vcvtmq_s16_v:
10179 case NEON::BI__builtin_neon_vcvtmq_s32_v:
10180 case NEON::BI__builtin_neon_vcvtm_u16_v:
10181 case NEON::BI__builtin_neon_vcvtm_u32_v:
10182 case NEON::BI__builtin_neon_vcvtmq_u16_v:
10183 case NEON::BI__builtin_neon_vcvtmq_u32_v:
10184 case NEON::BI__builtin_neon_vcvtm_s64_v:
10185 case NEON::BI__builtin_neon_vcvtmq_s64_v:
10186 case NEON::BI__builtin_neon_vcvtm_u64_v:
10187 case NEON::BI__builtin_neon_vcvtmq_u64_v: {
10188 Int = usgn ? Intrinsic::aarch64_neon_fcvtmu : Intrinsic::aarch64_neon_fcvtms;
10189 llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) };
10190 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtm");
10192 case NEON::BI__builtin_neon_vcvtn_s16_v:
10193 case NEON::BI__builtin_neon_vcvtn_s32_v:
10194 case NEON::BI__builtin_neon_vcvtnq_s16_v:
10195 case NEON::BI__builtin_neon_vcvtnq_s32_v:
10196 case NEON::BI__builtin_neon_vcvtn_u16_v:
10197 case NEON::BI__builtin_neon_vcvtn_u32_v:
10198 case NEON::BI__builtin_neon_vcvtnq_u16_v:
10199 case NEON::BI__builtin_neon_vcvtnq_u32_v:
10200 case NEON::BI__builtin_neon_vcvtn_s64_v:
10201 case NEON::BI__builtin_neon_vcvtnq_s64_v:
10202 case NEON::BI__builtin_neon_vcvtn_u64_v:
10203 case NEON::BI__builtin_neon_vcvtnq_u64_v: {
10204 Int = usgn ? Intrinsic::aarch64_neon_fcvtnu : Intrinsic::aarch64_neon_fcvtns;
10205 llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) };
10206 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtn");
10208 case NEON::BI__builtin_neon_vcvtp_s16_v:
10209 case NEON::BI__builtin_neon_vcvtp_s32_v:
10210 case NEON::BI__builtin_neon_vcvtpq_s16_v:
10211 case NEON::BI__builtin_neon_vcvtpq_s32_v:
10212 case NEON::BI__builtin_neon_vcvtp_u16_v:
10213 case NEON::BI__builtin_neon_vcvtp_u32_v:
10214 case NEON::BI__builtin_neon_vcvtpq_u16_v:
10215 case NEON::BI__builtin_neon_vcvtpq_u32_v:
10216 case NEON::BI__builtin_neon_vcvtp_s64_v:
10217 case NEON::BI__builtin_neon_vcvtpq_s64_v:
10218 case NEON::BI__builtin_neon_vcvtp_u64_v:
10219 case NEON::BI__builtin_neon_vcvtpq_u64_v: {
10220 Int = usgn ? Intrinsic::aarch64_neon_fcvtpu : Intrinsic::aarch64_neon_fcvtps;
10221 llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) };
10222 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtp");
10224 case NEON::BI__builtin_neon_vmulx_v:
10225 case NEON::BI__builtin_neon_vmulxq_v: {
10226 Int = Intrinsic::aarch64_neon_fmulx;
10227 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmulx");
10229 case NEON::BI__builtin_neon_vmulxh_lane_f16:
10230 case NEON::BI__builtin_neon_vmulxh_laneq_f16: {
10231 // vmulx_lane should be mapped to Neon scalar mulx after
10232 // extracting the scalar element
10233 Ops.push_back(EmitScalarExpr(E->getArg(2)));
10234 Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2], "extract");
10236 Int = Intrinsic::aarch64_neon_fmulx;
10237 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vmulx");
10239 case NEON::BI__builtin_neon_vmul_lane_v:
10240 case NEON::BI__builtin_neon_vmul_laneq_v: {
10241 // v1f64 vmul_lane should be mapped to Neon scalar mul lane
10243 if (BuiltinID == NEON::BI__builtin_neon_vmul_laneq_v)
10245 Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy);
10246 llvm::Type *VTy = GetNeonType(this,
10247 NeonTypeFlags(NeonTypeFlags::Float64, false, Quad));
10248 Ops[1] = Builder.CreateBitCast(Ops[1], VTy);
10249 Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2], "extract");
10250 Value *Result = Builder.CreateFMul(Ops[0], Ops[1]);
10251 return Builder.CreateBitCast(Result, Ty);
10253 case NEON::BI__builtin_neon_vnegd_s64:
10254 return Builder.CreateNeg(EmitScalarExpr(E->getArg(0)), "vnegd");
10255 case NEON::BI__builtin_neon_vnegh_f16:
10256 return Builder.CreateFNeg(EmitScalarExpr(E->getArg(0)), "vnegh");
10257 case NEON::BI__builtin_neon_vpmaxnm_v:
10258 case NEON::BI__builtin_neon_vpmaxnmq_v: {
10259 Int = Intrinsic::aarch64_neon_fmaxnmp;
10260 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpmaxnm");
10262 case NEON::BI__builtin_neon_vpminnm_v:
10263 case NEON::BI__builtin_neon_vpminnmq_v: {
10264 Int = Intrinsic::aarch64_neon_fminnmp;
10265 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpminnm");
10267 case NEON::BI__builtin_neon_vsqrth_f16: {
10268 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10269 Int = Builder.getIsFPConstrained()
10270 ? Intrinsic::experimental_constrained_sqrt
10272 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vsqrt");
10274 case NEON::BI__builtin_neon_vsqrt_v:
10275 case NEON::BI__builtin_neon_vsqrtq_v: {
10276 Int = Builder.getIsFPConstrained()
10277 ? Intrinsic::experimental_constrained_sqrt
10279 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
10280 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vsqrt");
10282 case NEON::BI__builtin_neon_vrbit_v:
10283 case NEON::BI__builtin_neon_vrbitq_v: {
10284 Int = Intrinsic::aarch64_neon_rbit;
10285 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrbit");
10287 case NEON::BI__builtin_neon_vaddv_u8:
10288 // FIXME: These are handled by the AArch64 scalar code.
10291 case NEON::BI__builtin_neon_vaddv_s8: {
10292 Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv;
10294 VTy = llvm::FixedVectorType::get(Int8Ty, 8);
10295 llvm::Type *Tys[2] = { Ty, VTy };
10296 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10297 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddv");
10298 return Builder.CreateTrunc(Ops[0], Int8Ty);
10300 case NEON::BI__builtin_neon_vaddv_u16:
10303 case NEON::BI__builtin_neon_vaddv_s16: {
10304 Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv;
10306 VTy = llvm::FixedVectorType::get(Int16Ty, 4);
10307 llvm::Type *Tys[2] = { Ty, VTy };
10308 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10309 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddv");
10310 return Builder.CreateTrunc(Ops[0], Int16Ty);
10312 case NEON::BI__builtin_neon_vaddvq_u8:
10315 case NEON::BI__builtin_neon_vaddvq_s8: {
10316 Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv;
10318 VTy = llvm::FixedVectorType::get(Int8Ty, 16);
10319 llvm::Type *Tys[2] = { Ty, VTy };
10320 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10321 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddv");
10322 return Builder.CreateTrunc(Ops[0], Int8Ty);
10324 case NEON::BI__builtin_neon_vaddvq_u16:
10327 case NEON::BI__builtin_neon_vaddvq_s16: {
10328 Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv;
10330 VTy = llvm::FixedVectorType::get(Int16Ty, 8);
10331 llvm::Type *Tys[2] = { Ty, VTy };
10332 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10333 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddv");
10334 return Builder.CreateTrunc(Ops[0], Int16Ty);
10336 case NEON::BI__builtin_neon_vmaxv_u8: {
10337 Int = Intrinsic::aarch64_neon_umaxv;
10339 VTy = llvm::FixedVectorType::get(Int8Ty, 8);
10340 llvm::Type *Tys[2] = { Ty, VTy };
10341 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10342 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
10343 return Builder.CreateTrunc(Ops[0], Int8Ty);
10345 case NEON::BI__builtin_neon_vmaxv_u16: {
10346 Int = Intrinsic::aarch64_neon_umaxv;
10348 VTy = llvm::FixedVectorType::get(Int16Ty, 4);
10349 llvm::Type *Tys[2] = { Ty, VTy };
10350 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10351 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
10352 return Builder.CreateTrunc(Ops[0], Int16Ty);
10354 case NEON::BI__builtin_neon_vmaxvq_u8: {
10355 Int = Intrinsic::aarch64_neon_umaxv;
10357 VTy = llvm::FixedVectorType::get(Int8Ty, 16);
10358 llvm::Type *Tys[2] = { Ty, VTy };
10359 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10360 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
10361 return Builder.CreateTrunc(Ops[0], Int8Ty);
10363 case NEON::BI__builtin_neon_vmaxvq_u16: {
10364 Int = Intrinsic::aarch64_neon_umaxv;
10366 VTy = llvm::FixedVectorType::get(Int16Ty, 8);
10367 llvm::Type *Tys[2] = { Ty, VTy };
10368 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10369 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
10370 return Builder.CreateTrunc(Ops[0], Int16Ty);
10372 case NEON::BI__builtin_neon_vmaxv_s8: {
10373 Int = Intrinsic::aarch64_neon_smaxv;
10375 VTy = llvm::FixedVectorType::get(Int8Ty, 8);
10376 llvm::Type *Tys[2] = { Ty, VTy };
10377 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10378 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
10379 return Builder.CreateTrunc(Ops[0], Int8Ty);
10381 case NEON::BI__builtin_neon_vmaxv_s16: {
10382 Int = Intrinsic::aarch64_neon_smaxv;
10384 VTy = llvm::FixedVectorType::get(Int16Ty, 4);
10385 llvm::Type *Tys[2] = { Ty, VTy };
10386 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10387 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
10388 return Builder.CreateTrunc(Ops[0], Int16Ty);
10390 case NEON::BI__builtin_neon_vmaxvq_s8: {
10391 Int = Intrinsic::aarch64_neon_smaxv;
10393 VTy = llvm::FixedVectorType::get(Int8Ty, 16);
10394 llvm::Type *Tys[2] = { Ty, VTy };
10395 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10396 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
10397 return Builder.CreateTrunc(Ops[0], Int8Ty);
10399 case NEON::BI__builtin_neon_vmaxvq_s16: {
10400 Int = Intrinsic::aarch64_neon_smaxv;
10402 VTy = llvm::FixedVectorType::get(Int16Ty, 8);
10403 llvm::Type *Tys[2] = { Ty, VTy };
10404 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10405 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
10406 return Builder.CreateTrunc(Ops[0], Int16Ty);
10408 case NEON::BI__builtin_neon_vmaxv_f16: {
10409 Int = Intrinsic::aarch64_neon_fmaxv;
10411 VTy = llvm::FixedVectorType::get(HalfTy, 4);
10412 llvm::Type *Tys[2] = { Ty, VTy };
10413 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10414 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
10415 return Builder.CreateTrunc(Ops[0], HalfTy);
10417 case NEON::BI__builtin_neon_vmaxvq_f16: {
10418 Int = Intrinsic::aarch64_neon_fmaxv;
10420 VTy = llvm::FixedVectorType::get(HalfTy, 8);
10421 llvm::Type *Tys[2] = { Ty, VTy };
10422 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10423 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
10424 return Builder.CreateTrunc(Ops[0], HalfTy);
10426 case NEON::BI__builtin_neon_vminv_u8: {
10427 Int = Intrinsic::aarch64_neon_uminv;
10429 VTy = llvm::FixedVectorType::get(Int8Ty, 8);
10430 llvm::Type *Tys[2] = { Ty, VTy };
10431 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10432 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
10433 return Builder.CreateTrunc(Ops[0], Int8Ty);
10435 case NEON::BI__builtin_neon_vminv_u16: {
10436 Int = Intrinsic::aarch64_neon_uminv;
10438 VTy = llvm::FixedVectorType::get(Int16Ty, 4);
10439 llvm::Type *Tys[2] = { Ty, VTy };
10440 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10441 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
10442 return Builder.CreateTrunc(Ops[0], Int16Ty);
10444 case NEON::BI__builtin_neon_vminvq_u8: {
10445 Int = Intrinsic::aarch64_neon_uminv;
10447 VTy = llvm::FixedVectorType::get(Int8Ty, 16);
10448 llvm::Type *Tys[2] = { Ty, VTy };
10449 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10450 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
10451 return Builder.CreateTrunc(Ops[0], Int8Ty);
10453 case NEON::BI__builtin_neon_vminvq_u16: {
10454 Int = Intrinsic::aarch64_neon_uminv;
10456 VTy = llvm::FixedVectorType::get(Int16Ty, 8);
10457 llvm::Type *Tys[2] = { Ty, VTy };
10458 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10459 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
10460 return Builder.CreateTrunc(Ops[0], Int16Ty);
10462 case NEON::BI__builtin_neon_vminv_s8: {
10463 Int = Intrinsic::aarch64_neon_sminv;
10465 VTy = llvm::FixedVectorType::get(Int8Ty, 8);
10466 llvm::Type *Tys[2] = { Ty, VTy };
10467 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10468 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
10469 return Builder.CreateTrunc(Ops[0], Int8Ty);
10471 case NEON::BI__builtin_neon_vminv_s16: {
10472 Int = Intrinsic::aarch64_neon_sminv;
10474 VTy = llvm::FixedVectorType::get(Int16Ty, 4);
10475 llvm::Type *Tys[2] = { Ty, VTy };
10476 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10477 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
10478 return Builder.CreateTrunc(Ops[0], Int16Ty);
10480 case NEON::BI__builtin_neon_vminvq_s8: {
10481 Int = Intrinsic::aarch64_neon_sminv;
10483 VTy = llvm::FixedVectorType::get(Int8Ty, 16);
10484 llvm::Type *Tys[2] = { Ty, VTy };
10485 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10486 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
10487 return Builder.CreateTrunc(Ops[0], Int8Ty);
10489 case NEON::BI__builtin_neon_vminvq_s16: {
10490 Int = Intrinsic::aarch64_neon_sminv;
10492 VTy = llvm::FixedVectorType::get(Int16Ty, 8);
10493 llvm::Type *Tys[2] = { Ty, VTy };
10494 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10495 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
10496 return Builder.CreateTrunc(Ops[0], Int16Ty);
10498 case NEON::BI__builtin_neon_vminv_f16: {
10499 Int = Intrinsic::aarch64_neon_fminv;
10501 VTy = llvm::FixedVectorType::get(HalfTy, 4);
10502 llvm::Type *Tys[2] = { Ty, VTy };
10503 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10504 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
10505 return Builder.CreateTrunc(Ops[0], HalfTy);
10507 case NEON::BI__builtin_neon_vminvq_f16: {
10508 Int = Intrinsic::aarch64_neon_fminv;
10510 VTy = llvm::FixedVectorType::get(HalfTy, 8);
10511 llvm::Type *Tys[2] = { Ty, VTy };
10512 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10513 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
10514 return Builder.CreateTrunc(Ops[0], HalfTy);
10516 case NEON::BI__builtin_neon_vmaxnmv_f16: {
10517 Int = Intrinsic::aarch64_neon_fmaxnmv;
10519 VTy = llvm::FixedVectorType::get(HalfTy, 4);
10520 llvm::Type *Tys[2] = { Ty, VTy };
10521 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10522 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxnmv");
10523 return Builder.CreateTrunc(Ops[0], HalfTy);
10525 case NEON::BI__builtin_neon_vmaxnmvq_f16: {
10526 Int = Intrinsic::aarch64_neon_fmaxnmv;
10528 VTy = llvm::FixedVectorType::get(HalfTy, 8);
10529 llvm::Type *Tys[2] = { Ty, VTy };
10530 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10531 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxnmv");
10532 return Builder.CreateTrunc(Ops[0], HalfTy);
10534 case NEON::BI__builtin_neon_vminnmv_f16: {
10535 Int = Intrinsic::aarch64_neon_fminnmv;
10537 VTy = llvm::FixedVectorType::get(HalfTy, 4);
10538 llvm::Type *Tys[2] = { Ty, VTy };
10539 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10540 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminnmv");
10541 return Builder.CreateTrunc(Ops[0], HalfTy);
10543 case NEON::BI__builtin_neon_vminnmvq_f16: {
10544 Int = Intrinsic::aarch64_neon_fminnmv;
10546 VTy = llvm::FixedVectorType::get(HalfTy, 8);
10547 llvm::Type *Tys[2] = { Ty, VTy };
10548 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10549 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminnmv");
10550 return Builder.CreateTrunc(Ops[0], HalfTy);
10552 case NEON::BI__builtin_neon_vmul_n_f64: {
10553 Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy);
10554 Value *RHS = Builder.CreateBitCast(EmitScalarExpr(E->getArg(1)), DoubleTy);
10555 return Builder.CreateFMul(Ops[0], RHS);
10557 case NEON::BI__builtin_neon_vaddlv_u8: {
10558 Int = Intrinsic::aarch64_neon_uaddlv;
10560 VTy = llvm::FixedVectorType::get(Int8Ty, 8);
10561 llvm::Type *Tys[2] = { Ty, VTy };
10562 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10563 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
10564 return Builder.CreateTrunc(Ops[0], Int16Ty);
10566 case NEON::BI__builtin_neon_vaddlv_u16: {
10567 Int = Intrinsic::aarch64_neon_uaddlv;
10569 VTy = llvm::FixedVectorType::get(Int16Ty, 4);
10570 llvm::Type *Tys[2] = { Ty, VTy };
10571 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10572 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
10574 case NEON::BI__builtin_neon_vaddlvq_u8: {
10575 Int = Intrinsic::aarch64_neon_uaddlv;
10577 VTy = llvm::FixedVectorType::get(Int8Ty, 16);
10578 llvm::Type *Tys[2] = { Ty, VTy };
10579 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10580 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
10581 return Builder.CreateTrunc(Ops[0], Int16Ty);
10583 case NEON::BI__builtin_neon_vaddlvq_u16: {
10584 Int = Intrinsic::aarch64_neon_uaddlv;
10586 VTy = llvm::FixedVectorType::get(Int16Ty, 8);
10587 llvm::Type *Tys[2] = { Ty, VTy };
10588 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10589 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
10591 case NEON::BI__builtin_neon_vaddlv_s8: {
10592 Int = Intrinsic::aarch64_neon_saddlv;
10594 VTy = llvm::FixedVectorType::get(Int8Ty, 8);
10595 llvm::Type *Tys[2] = { Ty, VTy };
10596 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10597 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
10598 return Builder.CreateTrunc(Ops[0], Int16Ty);
10600 case NEON::BI__builtin_neon_vaddlv_s16: {
10601 Int = Intrinsic::aarch64_neon_saddlv;
10603 VTy = llvm::FixedVectorType::get(Int16Ty, 4);
10604 llvm::Type *Tys[2] = { Ty, VTy };
10605 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10606 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
10608 case NEON::BI__builtin_neon_vaddlvq_s8: {
10609 Int = Intrinsic::aarch64_neon_saddlv;
10611 VTy = llvm::FixedVectorType::get(Int8Ty, 16);
10612 llvm::Type *Tys[2] = { Ty, VTy };
10613 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10614 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
10615 return Builder.CreateTrunc(Ops[0], Int16Ty);
10617 case NEON::BI__builtin_neon_vaddlvq_s16: {
10618 Int = Intrinsic::aarch64_neon_saddlv;
10620 VTy = llvm::FixedVectorType::get(Int16Ty, 8);
10621 llvm::Type *Tys[2] = { Ty, VTy };
10622 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10623 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
10625 case NEON::BI__builtin_neon_vsri_n_v:
10626 case NEON::BI__builtin_neon_vsriq_n_v: {
10627 Int = Intrinsic::aarch64_neon_vsri;
10628 llvm::Function *Intrin = CGM.getIntrinsic(Int, Ty);
10629 return EmitNeonCall(Intrin, Ops, "vsri_n");
10631 case NEON::BI__builtin_neon_vsli_n_v:
10632 case NEON::BI__builtin_neon_vsliq_n_v: {
10633 Int = Intrinsic::aarch64_neon_vsli;
10634 llvm::Function *Intrin = CGM.getIntrinsic(Int, Ty);
10635 return EmitNeonCall(Intrin, Ops, "vsli_n");
10637 case NEON::BI__builtin_neon_vsra_n_v:
10638 case NEON::BI__builtin_neon_vsraq_n_v:
10639 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
10640 Ops[1] = EmitNeonRShiftImm(Ops[1], Ops[2], Ty, usgn, "vsra_n");
10641 return Builder.CreateAdd(Ops[0], Ops[1]);
10642 case NEON::BI__builtin_neon_vrsra_n_v:
10643 case NEON::BI__builtin_neon_vrsraq_n_v: {
10644 Int = usgn ? Intrinsic::aarch64_neon_urshl : Intrinsic::aarch64_neon_srshl;
10645 SmallVector<llvm::Value*,2> TmpOps;
10646 TmpOps.push_back(Ops[1]);
10647 TmpOps.push_back(Ops[2]);
10648 Function* F = CGM.getIntrinsic(Int, Ty);
10649 llvm::Value *tmp = EmitNeonCall(F, TmpOps, "vrshr_n", 1, true);
10650 Ops[0] = Builder.CreateBitCast(Ops[0], VTy);
10651 return Builder.CreateAdd(Ops[0], tmp);
10653 case NEON::BI__builtin_neon_vld1_v:
10654 case NEON::BI__builtin_neon_vld1q_v: {
10655 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(VTy));
10656 return Builder.CreateAlignedLoad(VTy, Ops[0], PtrOp0.getAlignment());
10658 case NEON::BI__builtin_neon_vst1_v:
10659 case NEON::BI__builtin_neon_vst1q_v:
10660 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(VTy));
10661 Ops[1] = Builder.CreateBitCast(Ops[1], VTy);
10662 return Builder.CreateAlignedStore(Ops[1], Ops[0], PtrOp0.getAlignment());
10663 case NEON::BI__builtin_neon_vld1_lane_v:
10664 case NEON::BI__builtin_neon_vld1q_lane_v: {
10665 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
10666 Ty = llvm::PointerType::getUnqual(VTy->getElementType());
10667 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
10668 Ops[0] = Builder.CreateAlignedLoad(VTy->getElementType(), Ops[0],
10669 PtrOp0.getAlignment());
10670 return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vld1_lane");
10672 case NEON::BI__builtin_neon_vld1_dup_v:
10673 case NEON::BI__builtin_neon_vld1q_dup_v: {
10674 Value *V = UndefValue::get(Ty);
10675 Ty = llvm::PointerType::getUnqual(VTy->getElementType());
10676 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
10677 Ops[0] = Builder.CreateAlignedLoad(VTy->getElementType(), Ops[0],
10678 PtrOp0.getAlignment());
10679 llvm::Constant *CI = ConstantInt::get(Int32Ty, 0);
10680 Ops[0] = Builder.CreateInsertElement(V, Ops[0], CI);
10681 return EmitNeonSplat(Ops[0], CI);
10683 case NEON::BI__builtin_neon_vst1_lane_v:
10684 case NEON::BI__builtin_neon_vst1q_lane_v:
10685 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
10686 Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2]);
10687 Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
10688 return Builder.CreateAlignedStore(Ops[1], Builder.CreateBitCast(Ops[0], Ty),
10689 PtrOp0.getAlignment());
10690 case NEON::BI__builtin_neon_vld2_v:
10691 case NEON::BI__builtin_neon_vld2q_v: {
10692 llvm::Type *PTy = llvm::PointerType::getUnqual(VTy);
10693 Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
10694 llvm::Type *Tys[2] = { VTy, PTy };
10695 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld2, Tys);
10696 Ops[1] = Builder.CreateCall(F, Ops[1], "vld2");
10697 Ops[0] = Builder.CreateBitCast(Ops[0],
10698 llvm::PointerType::getUnqual(Ops[1]->getType()));
10699 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
10701 case NEON::BI__builtin_neon_vld3_v:
10702 case NEON::BI__builtin_neon_vld3q_v: {
10703 llvm::Type *PTy = llvm::PointerType::getUnqual(VTy);
10704 Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
10705 llvm::Type *Tys[2] = { VTy, PTy };
10706 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld3, Tys);
10707 Ops[1] = Builder.CreateCall(F, Ops[1], "vld3");
10708 Ops[0] = Builder.CreateBitCast(Ops[0],
10709 llvm::PointerType::getUnqual(Ops[1]->getType()));
10710 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
10712 case NEON::BI__builtin_neon_vld4_v:
10713 case NEON::BI__builtin_neon_vld4q_v: {
10714 llvm::Type *PTy = llvm::PointerType::getUnqual(VTy);
10715 Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
10716 llvm::Type *Tys[2] = { VTy, PTy };
10717 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld4, Tys);
10718 Ops[1] = Builder.CreateCall(F, Ops[1], "vld4");
10719 Ops[0] = Builder.CreateBitCast(Ops[0],
10720 llvm::PointerType::getUnqual(Ops[1]->getType()));
10721 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
10723 case NEON::BI__builtin_neon_vld2_dup_v:
10724 case NEON::BI__builtin_neon_vld2q_dup_v: {
10726 llvm::PointerType::getUnqual(VTy->getElementType());
10727 Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
10728 llvm::Type *Tys[2] = { VTy, PTy };
10729 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld2r, Tys);
10730 Ops[1] = Builder.CreateCall(F, Ops[1], "vld2");
10731 Ops[0] = Builder.CreateBitCast(Ops[0],
10732 llvm::PointerType::getUnqual(Ops[1]->getType()));
10733 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
10735 case NEON::BI__builtin_neon_vld3_dup_v:
10736 case NEON::BI__builtin_neon_vld3q_dup_v: {
10738 llvm::PointerType::getUnqual(VTy->getElementType());
10739 Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
10740 llvm::Type *Tys[2] = { VTy, PTy };
10741 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld3r, Tys);
10742 Ops[1] = Builder.CreateCall(F, Ops[1], "vld3");
10743 Ops[0] = Builder.CreateBitCast(Ops[0],
10744 llvm::PointerType::getUnqual(Ops[1]->getType()));
10745 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
10747 case NEON::BI__builtin_neon_vld4_dup_v:
10748 case NEON::BI__builtin_neon_vld4q_dup_v: {
10750 llvm::PointerType::getUnqual(VTy->getElementType());
10751 Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
10752 llvm::Type *Tys[2] = { VTy, PTy };
10753 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld4r, Tys);
10754 Ops[1] = Builder.CreateCall(F, Ops[1], "vld4");
10755 Ops[0] = Builder.CreateBitCast(Ops[0],
10756 llvm::PointerType::getUnqual(Ops[1]->getType()));
10757 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
10759 case NEON::BI__builtin_neon_vld2_lane_v:
10760 case NEON::BI__builtin_neon_vld2q_lane_v: {
10761 llvm::Type *Tys[2] = { VTy, Ops[1]->getType() };
10762 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld2lane, Tys);
10763 Ops.push_back(Ops[1]);
10764 Ops.erase(Ops.begin()+1);
10765 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
10766 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
10767 Ops[3] = Builder.CreateZExt(Ops[3], Int64Ty);
10768 Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), "vld2_lane");
10769 Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
10770 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
10771 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
10773 case NEON::BI__builtin_neon_vld3_lane_v:
10774 case NEON::BI__builtin_neon_vld3q_lane_v: {
10775 llvm::Type *Tys[2] = { VTy, Ops[1]->getType() };
10776 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld3lane, Tys);
10777 Ops.push_back(Ops[1]);
10778 Ops.erase(Ops.begin()+1);
10779 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
10780 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
10781 Ops[3] = Builder.CreateBitCast(Ops[3], Ty);
10782 Ops[4] = Builder.CreateZExt(Ops[4], Int64Ty);
10783 Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), "vld3_lane");
10784 Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
10785 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
10786 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
10788 case NEON::BI__builtin_neon_vld4_lane_v:
10789 case NEON::BI__builtin_neon_vld4q_lane_v: {
10790 llvm::Type *Tys[2] = { VTy, Ops[1]->getType() };
10791 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld4lane, Tys);
10792 Ops.push_back(Ops[1]);
10793 Ops.erase(Ops.begin()+1);
10794 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
10795 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
10796 Ops[3] = Builder.CreateBitCast(Ops[3], Ty);
10797 Ops[4] = Builder.CreateBitCast(Ops[4], Ty);
10798 Ops[5] = Builder.CreateZExt(Ops[5], Int64Ty);
10799 Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), "vld4_lane");
10800 Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
10801 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
10802 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
10804 case NEON::BI__builtin_neon_vst2_v:
10805 case NEON::BI__builtin_neon_vst2q_v: {
10806 Ops.push_back(Ops[0]);
10807 Ops.erase(Ops.begin());
10808 llvm::Type *Tys[2] = { VTy, Ops[2]->getType() };
10809 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st2, Tys),
10812 case NEON::BI__builtin_neon_vst2_lane_v:
10813 case NEON::BI__builtin_neon_vst2q_lane_v: {
10814 Ops.push_back(Ops[0]);
10815 Ops.erase(Ops.begin());
10816 Ops[2] = Builder.CreateZExt(Ops[2], Int64Ty);
10817 llvm::Type *Tys[2] = { VTy, Ops[3]->getType() };
10818 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st2lane, Tys),
10821 case NEON::BI__builtin_neon_vst3_v:
10822 case NEON::BI__builtin_neon_vst3q_v: {
10823 Ops.push_back(Ops[0]);
10824 Ops.erase(Ops.begin());
10825 llvm::Type *Tys[2] = { VTy, Ops[3]->getType() };
10826 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st3, Tys),
10829 case NEON::BI__builtin_neon_vst3_lane_v:
10830 case NEON::BI__builtin_neon_vst3q_lane_v: {
10831 Ops.push_back(Ops[0]);
10832 Ops.erase(Ops.begin());
10833 Ops[3] = Builder.CreateZExt(Ops[3], Int64Ty);
10834 llvm::Type *Tys[2] = { VTy, Ops[4]->getType() };
10835 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st3lane, Tys),
10838 case NEON::BI__builtin_neon_vst4_v:
10839 case NEON::BI__builtin_neon_vst4q_v: {
10840 Ops.push_back(Ops[0]);
10841 Ops.erase(Ops.begin());
10842 llvm::Type *Tys[2] = { VTy, Ops[4]->getType() };
10843 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st4, Tys),
10846 case NEON::BI__builtin_neon_vst4_lane_v:
10847 case NEON::BI__builtin_neon_vst4q_lane_v: {
10848 Ops.push_back(Ops[0]);
10849 Ops.erase(Ops.begin());
10850 Ops[4] = Builder.CreateZExt(Ops[4], Int64Ty);
10851 llvm::Type *Tys[2] = { VTy, Ops[5]->getType() };
10852 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st4lane, Tys),
10855 case NEON::BI__builtin_neon_vtrn_v:
10856 case NEON::BI__builtin_neon_vtrnq_v: {
10857 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
10858 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
10859 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
10860 Value *SV = nullptr;
10862 for (unsigned vi = 0; vi != 2; ++vi) {
10863 SmallVector<int, 16> Indices;
10864 for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) {
10865 Indices.push_back(i+vi);
10866 Indices.push_back(i+e+vi);
10868 Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi);
10869 SV = Builder.CreateShuffleVector(Ops[1], Ops[2], Indices, "vtrn");
10870 SV = Builder.CreateDefaultAlignedStore(SV, Addr);
10874 case NEON::BI__builtin_neon_vuzp_v:
10875 case NEON::BI__builtin_neon_vuzpq_v: {
10876 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
10877 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
10878 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
10879 Value *SV = nullptr;
10881 for (unsigned vi = 0; vi != 2; ++vi) {
10882 SmallVector<int, 16> Indices;
10883 for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i)
10884 Indices.push_back(2*i+vi);
10886 Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi);
10887 SV = Builder.CreateShuffleVector(Ops[1], Ops[2], Indices, "vuzp");
10888 SV = Builder.CreateDefaultAlignedStore(SV, Addr);
10892 case NEON::BI__builtin_neon_vzip_v:
10893 case NEON::BI__builtin_neon_vzipq_v: {
10894 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
10895 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
10896 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
10897 Value *SV = nullptr;
10899 for (unsigned vi = 0; vi != 2; ++vi) {
10900 SmallVector<int, 16> Indices;
10901 for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) {
10902 Indices.push_back((i + vi*e) >> 1);
10903 Indices.push_back(((i + vi*e) >> 1)+e);
10905 Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi);
10906 SV = Builder.CreateShuffleVector(Ops[1], Ops[2], Indices, "vzip");
10907 SV = Builder.CreateDefaultAlignedStore(SV, Addr);
10911 case NEON::BI__builtin_neon_vqtbl1q_v: {
10912 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbl1, Ty),
10915 case NEON::BI__builtin_neon_vqtbl2q_v: {
10916 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbl2, Ty),
10919 case NEON::BI__builtin_neon_vqtbl3q_v: {
10920 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbl3, Ty),
10923 case NEON::BI__builtin_neon_vqtbl4q_v: {
10924 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbl4, Ty),
10927 case NEON::BI__builtin_neon_vqtbx1q_v: {
10928 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbx1, Ty),
10931 case NEON::BI__builtin_neon_vqtbx2q_v: {
10932 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbx2, Ty),
10935 case NEON::BI__builtin_neon_vqtbx3q_v: {
10936 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbx3, Ty),
10939 case NEON::BI__builtin_neon_vqtbx4q_v: {
10940 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbx4, Ty),
10943 case NEON::BI__builtin_neon_vsqadd_v:
10944 case NEON::BI__builtin_neon_vsqaddq_v: {
10945 Int = Intrinsic::aarch64_neon_usqadd;
10946 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vsqadd");
10948 case NEON::BI__builtin_neon_vuqadd_v:
10949 case NEON::BI__builtin_neon_vuqaddq_v: {
10950 Int = Intrinsic::aarch64_neon_suqadd;
10951 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vuqadd");
10956 Value *CodeGenFunction::EmitBPFBuiltinExpr(unsigned BuiltinID,
10957 const CallExpr *E) {
10958 assert((BuiltinID == BPF::BI__builtin_preserve_field_info ||
10959 BuiltinID == BPF::BI__builtin_btf_type_id) &&
10960 "unexpected BPF builtin");
10962 switch (BuiltinID) {
10964 llvm_unreachable("Unexpected BPF builtin");
10965 case BPF::BI__builtin_preserve_field_info: {
10966 const Expr *Arg = E->getArg(0);
10967 bool IsBitField = Arg->IgnoreParens()->getObjectKind() == OK_BitField;
10969 if (!getDebugInfo()) {
10970 CGM.Error(E->getExprLoc(),
10971 "using __builtin_preserve_field_info() without -g");
10972 return IsBitField ? EmitLValue(Arg).getBitFieldPointer()
10973 : EmitLValue(Arg).getPointer(*this);
10976 // Enable underlying preserve_*_access_index() generation.
10977 bool OldIsInPreservedAIRegion = IsInPreservedAIRegion;
10978 IsInPreservedAIRegion = true;
10979 Value *FieldAddr = IsBitField ? EmitLValue(Arg).getBitFieldPointer()
10980 : EmitLValue(Arg).getPointer(*this);
10981 IsInPreservedAIRegion = OldIsInPreservedAIRegion;
10983 ConstantInt *C = cast<ConstantInt>(EmitScalarExpr(E->getArg(1)));
10984 Value *InfoKind = ConstantInt::get(Int64Ty, C->getSExtValue());
10986 // Built the IR for the preserve_field_info intrinsic.
10987 llvm::Function *FnGetFieldInfo = llvm::Intrinsic::getDeclaration(
10988 &CGM.getModule(), llvm::Intrinsic::bpf_preserve_field_info,
10989 {FieldAddr->getType()});
10990 return Builder.CreateCall(FnGetFieldInfo, {FieldAddr, InfoKind});
10992 case BPF::BI__builtin_btf_type_id: {
10993 Value *FieldVal = nullptr;
10995 // The LValue cannot be converted Value in order to be used as the function
10996 // parameter. If it is a structure, it is the "alloca" result of the LValue
10997 // (a pointer) is used in the parameter. If it is a simple type,
10998 // the value will be loaded from its corresponding "alloca" and used as
10999 // the parameter. In our case, let us just get a pointer of the LValue
11000 // since we do not really use the parameter. The purpose of parameter
11001 // is to prevent the generated IR llvm.bpf.btf.type.id intrinsic call,
11002 // which carries metadata, from being changed.
11003 bool IsLValue = E->getArg(0)->isLValue();
11005 FieldVal = EmitLValue(E->getArg(0)).getPointer(*this);
11007 FieldVal = EmitScalarExpr(E->getArg(0));
11009 if (!getDebugInfo()) {
11010 CGM.Error(E->getExprLoc(), "using __builtin_btf_type_id() without -g");
11014 // Generate debuginfo type for the first argument.
11015 llvm::DIType *DbgInfo =
11016 getDebugInfo()->getOrCreateStandaloneType(E->getArg(0)->getType(),
11017 E->getArg(0)->getExprLoc());
11019 ConstantInt *Flag = cast<ConstantInt>(EmitScalarExpr(E->getArg(1)));
11020 Value *FlagValue = ConstantInt::get(Int64Ty, Flag->getSExtValue());
11022 // Built the IR for the btf_type_id intrinsic.
11024 // In the above, we converted LValue argument to a pointer to LValue.
11025 // For example, the following
11027 // C1: __builtin_btf_type_id(v, flag);
11028 // will be converted to
11029 // L1: llvm.bpf.btf.type.id(&v, flag)
11030 // This makes it hard to differentiate from
11031 // C2: __builtin_btf_type_id(&v, flag);
11033 // L2: llvm.bpf.btf.type.id(&v, flag)
11035 // If both C1 and C2 are present in the code, the llvm may later
11036 // on do CSE on L1 and L2, which will result in incorrect tagged types.
11038 // The C1->L1 transformation only happens if the argument of
11039 // __builtin_btf_type_id() is a LValue. So Let us put whether
11040 // the argument is an LValue or not into generated IR. This should
11041 // prevent potential CSE from causing debuginfo type loss.
11043 // The generated IR intrinsics will hence look like
11044 // L1: llvm.bpf.btf.type.id(&v, 1, flag) !di_type_for_{v};
11045 // L2: llvm.bpf.btf.type.id(&v, 0, flag) !di_type_for_{&v};
11046 Constant *CV = ConstantInt::get(IntTy, IsLValue);
11047 llvm::Function *FnBtfTypeId = llvm::Intrinsic::getDeclaration(
11048 &CGM.getModule(), llvm::Intrinsic::bpf_btf_type_id,
11049 {FieldVal->getType(), CV->getType()});
11050 CallInst *Fn = Builder.CreateCall(FnBtfTypeId, {FieldVal, CV, FlagValue});
11051 Fn->setMetadata(LLVMContext::MD_preserve_access_index, DbgInfo);
11057 llvm::Value *CodeGenFunction::
11058 BuildVector(ArrayRef<llvm::Value*> Ops) {
11059 assert((Ops.size() & (Ops.size() - 1)) == 0 &&
11060 "Not a power-of-two sized vector!");
11061 bool AllConstants = true;
11062 for (unsigned i = 0, e = Ops.size(); i != e && AllConstants; ++i)
11063 AllConstants &= isa<Constant>(Ops[i]);
11065 // If this is a constant vector, create a ConstantVector.
11066 if (AllConstants) {
11067 SmallVector<llvm::Constant*, 16> CstOps;
11068 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
11069 CstOps.push_back(cast<Constant>(Ops[i]));
11070 return llvm::ConstantVector::get(CstOps);
11073 // Otherwise, insertelement the values to build the vector.
11074 Value *Result = llvm::UndefValue::get(
11075 llvm::FixedVectorType::get(Ops[0]->getType(), Ops.size()));
11077 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
11078 Result = Builder.CreateInsertElement(Result, Ops[i], Builder.getInt32(i));
11083 // Convert the mask from an integer type to a vector of i1.
11084 static Value *getMaskVecValue(CodeGenFunction &CGF, Value *Mask,
11085 unsigned NumElts) {
11087 auto *MaskTy = llvm::FixedVectorType::get(
11088 CGF.Builder.getInt1Ty(),
11089 cast<IntegerType>(Mask->getType())->getBitWidth());
11090 Value *MaskVec = CGF.Builder.CreateBitCast(Mask, MaskTy);
11092 // If we have less than 8 elements, then the starting mask was an i8 and
11093 // we need to extract down to the right number of elements.
11096 for (unsigned i = 0; i != NumElts; ++i)
11098 MaskVec = CGF.Builder.CreateShuffleVector(MaskVec, MaskVec,
11099 makeArrayRef(Indices, NumElts),
11105 static Value *EmitX86MaskedStore(CodeGenFunction &CGF, ArrayRef<Value *> Ops,
11107 // Cast the pointer to right type.
11108 Value *Ptr = CGF.Builder.CreateBitCast(Ops[0],
11109 llvm::PointerType::getUnqual(Ops[1]->getType()));
11111 Value *MaskVec = getMaskVecValue(
11112 CGF, Ops[2], cast<llvm::VectorType>(Ops[1]->getType())->getNumElements());
11114 return CGF.Builder.CreateMaskedStore(Ops[1], Ptr, Alignment, MaskVec);
11117 static Value *EmitX86MaskedLoad(CodeGenFunction &CGF, ArrayRef<Value *> Ops,
11119 // Cast the pointer to right type.
11120 Value *Ptr = CGF.Builder.CreateBitCast(Ops[0],
11121 llvm::PointerType::getUnqual(Ops[1]->getType()));
11123 Value *MaskVec = getMaskVecValue(
11124 CGF, Ops[2], cast<llvm::VectorType>(Ops[1]->getType())->getNumElements());
11126 return CGF.Builder.CreateMaskedLoad(Ptr, Alignment, MaskVec, Ops[1]);
11129 static Value *EmitX86ExpandLoad(CodeGenFunction &CGF,
11130 ArrayRef<Value *> Ops) {
11131 auto *ResultTy = cast<llvm::VectorType>(Ops[1]->getType());
11132 llvm::Type *PtrTy = ResultTy->getElementType();
11134 // Cast the pointer to element type.
11135 Value *Ptr = CGF.Builder.CreateBitCast(Ops[0],
11136 llvm::PointerType::getUnqual(PtrTy));
11138 Value *MaskVec = getMaskVecValue(CGF, Ops[2], ResultTy->getNumElements());
11140 llvm::Function *F = CGF.CGM.getIntrinsic(Intrinsic::masked_expandload,
11142 return CGF.Builder.CreateCall(F, { Ptr, MaskVec, Ops[1] });
11145 static Value *EmitX86CompressExpand(CodeGenFunction &CGF,
11146 ArrayRef<Value *> Ops,
11148 auto *ResultTy = cast<llvm::VectorType>(Ops[1]->getType());
11150 Value *MaskVec = getMaskVecValue(CGF, Ops[2], ResultTy->getNumElements());
11152 Intrinsic::ID IID = IsCompress ? Intrinsic::x86_avx512_mask_compress
11153 : Intrinsic::x86_avx512_mask_expand;
11154 llvm::Function *F = CGF.CGM.getIntrinsic(IID, ResultTy);
11155 return CGF.Builder.CreateCall(F, { Ops[0], Ops[1], MaskVec });
11158 static Value *EmitX86CompressStore(CodeGenFunction &CGF,
11159 ArrayRef<Value *> Ops) {
11160 auto *ResultTy = cast<llvm::VectorType>(Ops[1]->getType());
11161 llvm::Type *PtrTy = ResultTy->getElementType();
11163 // Cast the pointer to element type.
11164 Value *Ptr = CGF.Builder.CreateBitCast(Ops[0],
11165 llvm::PointerType::getUnqual(PtrTy));
11167 Value *MaskVec = getMaskVecValue(CGF, Ops[2], ResultTy->getNumElements());
11169 llvm::Function *F = CGF.CGM.getIntrinsic(Intrinsic::masked_compressstore,
11171 return CGF.Builder.CreateCall(F, { Ops[1], Ptr, MaskVec });
11174 static Value *EmitX86MaskLogic(CodeGenFunction &CGF, Instruction::BinaryOps Opc,
11175 ArrayRef<Value *> Ops,
11176 bool InvertLHS = false) {
11177 unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth();
11178 Value *LHS = getMaskVecValue(CGF, Ops[0], NumElts);
11179 Value *RHS = getMaskVecValue(CGF, Ops[1], NumElts);
11182 LHS = CGF.Builder.CreateNot(LHS);
11184 return CGF.Builder.CreateBitCast(CGF.Builder.CreateBinOp(Opc, LHS, RHS),
11185 Ops[0]->getType());
11188 static Value *EmitX86FunnelShift(CodeGenFunction &CGF, Value *Op0, Value *Op1,
11189 Value *Amt, bool IsRight) {
11190 llvm::Type *Ty = Op0->getType();
11192 // Amount may be scalar immediate, in which case create a splat vector.
11193 // Funnel shifts amounts are treated as modulo and types are all power-of-2 so
11194 // we only care about the lowest log2 bits anyway.
11195 if (Amt->getType() != Ty) {
11196 unsigned NumElts = cast<llvm::VectorType>(Ty)->getNumElements();
11197 Amt = CGF.Builder.CreateIntCast(Amt, Ty->getScalarType(), false);
11198 Amt = CGF.Builder.CreateVectorSplat(NumElts, Amt);
11201 unsigned IID = IsRight ? Intrinsic::fshr : Intrinsic::fshl;
11202 Function *F = CGF.CGM.getIntrinsic(IID, Ty);
11203 return CGF.Builder.CreateCall(F, {Op0, Op1, Amt});
11206 static Value *EmitX86vpcom(CodeGenFunction &CGF, ArrayRef<Value *> Ops,
11208 Value *Op0 = Ops[0];
11209 Value *Op1 = Ops[1];
11210 llvm::Type *Ty = Op0->getType();
11211 uint64_t Imm = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0x7;
11213 CmpInst::Predicate Pred;
11216 Pred = IsSigned ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT;
11219 Pred = IsSigned ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE;
11222 Pred = IsSigned ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT;
11225 Pred = IsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE;
11228 Pred = ICmpInst::ICMP_EQ;
11231 Pred = ICmpInst::ICMP_NE;
11234 return llvm::Constant::getNullValue(Ty); // FALSE
11236 return llvm::Constant::getAllOnesValue(Ty); // TRUE
11238 llvm_unreachable("Unexpected XOP vpcom/vpcomu predicate");
11241 Value *Cmp = CGF.Builder.CreateICmp(Pred, Op0, Op1);
11242 Value *Res = CGF.Builder.CreateSExt(Cmp, Ty);
11246 static Value *EmitX86Select(CodeGenFunction &CGF,
11247 Value *Mask, Value *Op0, Value *Op1) {
11249 // If the mask is all ones just return first argument.
11250 if (const auto *C = dyn_cast<Constant>(Mask))
11251 if (C->isAllOnesValue())
11254 Mask = getMaskVecValue(
11255 CGF, Mask, cast<llvm::VectorType>(Op0->getType())->getNumElements());
11257 return CGF.Builder.CreateSelect(Mask, Op0, Op1);
11260 static Value *EmitX86ScalarSelect(CodeGenFunction &CGF,
11261 Value *Mask, Value *Op0, Value *Op1) {
11262 // If the mask is all ones just return first argument.
11263 if (const auto *C = dyn_cast<Constant>(Mask))
11264 if (C->isAllOnesValue())
11267 auto *MaskTy = llvm::FixedVectorType::get(
11268 CGF.Builder.getInt1Ty(), Mask->getType()->getIntegerBitWidth());
11269 Mask = CGF.Builder.CreateBitCast(Mask, MaskTy);
11270 Mask = CGF.Builder.CreateExtractElement(Mask, (uint64_t)0);
11271 return CGF.Builder.CreateSelect(Mask, Op0, Op1);
11274 static Value *EmitX86MaskedCompareResult(CodeGenFunction &CGF, Value *Cmp,
11275 unsigned NumElts, Value *MaskIn) {
11277 const auto *C = dyn_cast<Constant>(MaskIn);
11278 if (!C || !C->isAllOnesValue())
11279 Cmp = CGF.Builder.CreateAnd(Cmp, getMaskVecValue(CGF, MaskIn, NumElts));
11284 for (unsigned i = 0; i != NumElts; ++i)
11286 for (unsigned i = NumElts; i != 8; ++i)
11287 Indices[i] = i % NumElts + NumElts;
11288 Cmp = CGF.Builder.CreateShuffleVector(
11289 Cmp, llvm::Constant::getNullValue(Cmp->getType()), Indices);
11292 return CGF.Builder.CreateBitCast(Cmp,
11293 IntegerType::get(CGF.getLLVMContext(),
11294 std::max(NumElts, 8U)));
11297 static Value *EmitX86MaskedCompare(CodeGenFunction &CGF, unsigned CC,
11298 bool Signed, ArrayRef<Value *> Ops) {
11299 assert((Ops.size() == 2 || Ops.size() == 4) &&
11300 "Unexpected number of arguments");
11302 cast<llvm::VectorType>(Ops[0]->getType())->getNumElements();
11306 Cmp = Constant::getNullValue(
11307 llvm::FixedVectorType::get(CGF.Builder.getInt1Ty(), NumElts));
11308 } else if (CC == 7) {
11309 Cmp = Constant::getAllOnesValue(
11310 llvm::FixedVectorType::get(CGF.Builder.getInt1Ty(), NumElts));
11312 ICmpInst::Predicate Pred;
11314 default: llvm_unreachable("Unknown condition code");
11315 case 0: Pred = ICmpInst::ICMP_EQ; break;
11316 case 1: Pred = Signed ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT; break;
11317 case 2: Pred = Signed ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE; break;
11318 case 4: Pred = ICmpInst::ICMP_NE; break;
11319 case 5: Pred = Signed ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE; break;
11320 case 6: Pred = Signed ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT; break;
11322 Cmp = CGF.Builder.CreateICmp(Pred, Ops[0], Ops[1]);
11325 Value *MaskIn = nullptr;
11326 if (Ops.size() == 4)
11329 return EmitX86MaskedCompareResult(CGF, Cmp, NumElts, MaskIn);
11332 static Value *EmitX86ConvertToMask(CodeGenFunction &CGF, Value *In) {
11333 Value *Zero = Constant::getNullValue(In->getType());
11334 return EmitX86MaskedCompare(CGF, 1, true, { In, Zero });
11337 static Value *EmitX86ConvertIntToFp(CodeGenFunction &CGF,
11338 ArrayRef<Value *> Ops, bool IsSigned) {
11339 unsigned Rnd = cast<llvm::ConstantInt>(Ops[3])->getZExtValue();
11340 llvm::Type *Ty = Ops[1]->getType();
11344 Intrinsic::ID IID = IsSigned ? Intrinsic::x86_avx512_sitofp_round
11345 : Intrinsic::x86_avx512_uitofp_round;
11346 Function *F = CGF.CGM.getIntrinsic(IID, { Ty, Ops[0]->getType() });
11347 Res = CGF.Builder.CreateCall(F, { Ops[0], Ops[3] });
11349 Res = IsSigned ? CGF.Builder.CreateSIToFP(Ops[0], Ty)
11350 : CGF.Builder.CreateUIToFP(Ops[0], Ty);
11353 return EmitX86Select(CGF, Ops[2], Res, Ops[1]);
11356 static Value *EmitX86Abs(CodeGenFunction &CGF, ArrayRef<Value *> Ops) {
11358 llvm::Type *Ty = Ops[0]->getType();
11359 Value *Zero = llvm::Constant::getNullValue(Ty);
11360 Value *Sub = CGF.Builder.CreateSub(Zero, Ops[0]);
11361 Value *Cmp = CGF.Builder.CreateICmp(ICmpInst::ICMP_SGT, Ops[0], Zero);
11362 Value *Res = CGF.Builder.CreateSelect(Cmp, Ops[0], Sub);
11366 static Value *EmitX86MinMax(CodeGenFunction &CGF, ICmpInst::Predicate Pred,
11367 ArrayRef<Value *> Ops) {
11368 Value *Cmp = CGF.Builder.CreateICmp(Pred, Ops[0], Ops[1]);
11369 Value *Res = CGF.Builder.CreateSelect(Cmp, Ops[0], Ops[1]);
11371 assert(Ops.size() == 2);
11375 // Lowers X86 FMA intrinsics to IR.
11376 static Value *EmitX86FMAExpr(CodeGenFunction &CGF, ArrayRef<Value *> Ops,
11377 unsigned BuiltinID, bool IsAddSub) {
11379 bool Subtract = false;
11380 Intrinsic::ID IID = Intrinsic::not_intrinsic;
11381 switch (BuiltinID) {
11383 case clang::X86::BI__builtin_ia32_vfmsubps512_mask3:
11386 case clang::X86::BI__builtin_ia32_vfmaddps512_mask:
11387 case clang::X86::BI__builtin_ia32_vfmaddps512_maskz:
11388 case clang::X86::BI__builtin_ia32_vfmaddps512_mask3:
11389 IID = llvm::Intrinsic::x86_avx512_vfmadd_ps_512; break;
11390 case clang::X86::BI__builtin_ia32_vfmsubpd512_mask3:
11393 case clang::X86::BI__builtin_ia32_vfmaddpd512_mask:
11394 case clang::X86::BI__builtin_ia32_vfmaddpd512_maskz:
11395 case clang::X86::BI__builtin_ia32_vfmaddpd512_mask3:
11396 IID = llvm::Intrinsic::x86_avx512_vfmadd_pd_512; break;
11397 case clang::X86::BI__builtin_ia32_vfmsubaddps512_mask3:
11400 case clang::X86::BI__builtin_ia32_vfmaddsubps512_mask:
11401 case clang::X86::BI__builtin_ia32_vfmaddsubps512_maskz:
11402 case clang::X86::BI__builtin_ia32_vfmaddsubps512_mask3:
11403 IID = llvm::Intrinsic::x86_avx512_vfmaddsub_ps_512;
11405 case clang::X86::BI__builtin_ia32_vfmsubaddpd512_mask3:
11408 case clang::X86::BI__builtin_ia32_vfmaddsubpd512_mask:
11409 case clang::X86::BI__builtin_ia32_vfmaddsubpd512_maskz:
11410 case clang::X86::BI__builtin_ia32_vfmaddsubpd512_mask3:
11411 IID = llvm::Intrinsic::x86_avx512_vfmaddsub_pd_512;
11420 C = CGF.Builder.CreateFNeg(C);
11424 // Only handle in case of _MM_FROUND_CUR_DIRECTION/4 (no rounding).
11425 if (IID != Intrinsic::not_intrinsic &&
11426 (cast<llvm::ConstantInt>(Ops.back())->getZExtValue() != (uint64_t)4 ||
11428 Function *Intr = CGF.CGM.getIntrinsic(IID);
11429 Res = CGF.Builder.CreateCall(Intr, {A, B, C, Ops.back() });
11431 llvm::Type *Ty = A->getType();
11433 if (CGF.Builder.getIsFPConstrained()) {
11434 FMA = CGF.CGM.getIntrinsic(Intrinsic::experimental_constrained_fma, Ty);
11435 Res = CGF.Builder.CreateConstrainedFPCall(FMA, {A, B, C});
11437 FMA = CGF.CGM.getIntrinsic(Intrinsic::fma, Ty);
11438 Res = CGF.Builder.CreateCall(FMA, {A, B, C});
11442 // Handle any required masking.
11443 Value *MaskFalseVal = nullptr;
11444 switch (BuiltinID) {
11445 case clang::X86::BI__builtin_ia32_vfmaddps512_mask:
11446 case clang::X86::BI__builtin_ia32_vfmaddpd512_mask:
11447 case clang::X86::BI__builtin_ia32_vfmaddsubps512_mask:
11448 case clang::X86::BI__builtin_ia32_vfmaddsubpd512_mask:
11449 MaskFalseVal = Ops[0];
11451 case clang::X86::BI__builtin_ia32_vfmaddps512_maskz:
11452 case clang::X86::BI__builtin_ia32_vfmaddpd512_maskz:
11453 case clang::X86::BI__builtin_ia32_vfmaddsubps512_maskz:
11454 case clang::X86::BI__builtin_ia32_vfmaddsubpd512_maskz:
11455 MaskFalseVal = Constant::getNullValue(Ops[0]->getType());
11457 case clang::X86::BI__builtin_ia32_vfmsubps512_mask3:
11458 case clang::X86::BI__builtin_ia32_vfmaddps512_mask3:
11459 case clang::X86::BI__builtin_ia32_vfmsubpd512_mask3:
11460 case clang::X86::BI__builtin_ia32_vfmaddpd512_mask3:
11461 case clang::X86::BI__builtin_ia32_vfmsubaddps512_mask3:
11462 case clang::X86::BI__builtin_ia32_vfmaddsubps512_mask3:
11463 case clang::X86::BI__builtin_ia32_vfmsubaddpd512_mask3:
11464 case clang::X86::BI__builtin_ia32_vfmaddsubpd512_mask3:
11465 MaskFalseVal = Ops[2];
11470 return EmitX86Select(CGF, Ops[3], Res, MaskFalseVal);
11476 EmitScalarFMAExpr(CodeGenFunction &CGF, MutableArrayRef<Value *> Ops,
11477 Value *Upper, bool ZeroMask = false, unsigned PTIdx = 0,
11478 bool NegAcc = false) {
11480 if (Ops.size() > 4)
11481 Rnd = cast<llvm::ConstantInt>(Ops[4])->getZExtValue();
11484 Ops[2] = CGF.Builder.CreateFNeg(Ops[2]);
11486 Ops[0] = CGF.Builder.CreateExtractElement(Ops[0], (uint64_t)0);
11487 Ops[1] = CGF.Builder.CreateExtractElement(Ops[1], (uint64_t)0);
11488 Ops[2] = CGF.Builder.CreateExtractElement(Ops[2], (uint64_t)0);
11491 Intrinsic::ID IID = Ops[0]->getType()->getPrimitiveSizeInBits() == 32 ?
11492 Intrinsic::x86_avx512_vfmadd_f32 :
11493 Intrinsic::x86_avx512_vfmadd_f64;
11494 Res = CGF.Builder.CreateCall(CGF.CGM.getIntrinsic(IID),
11495 {Ops[0], Ops[1], Ops[2], Ops[4]});
11496 } else if (CGF.Builder.getIsFPConstrained()) {
11497 Function *FMA = CGF.CGM.getIntrinsic(
11498 Intrinsic::experimental_constrained_fma, Ops[0]->getType());
11499 Res = CGF.Builder.CreateConstrainedFPCall(FMA, Ops.slice(0, 3));
11501 Function *FMA = CGF.CGM.getIntrinsic(Intrinsic::fma, Ops[0]->getType());
11502 Res = CGF.Builder.CreateCall(FMA, Ops.slice(0, 3));
11504 // If we have more than 3 arguments, we need to do masking.
11505 if (Ops.size() > 3) {
11506 Value *PassThru = ZeroMask ? Constant::getNullValue(Res->getType())
11509 // If we negated the accumulator and the its the PassThru value we need to
11510 // bypass the negate. Conveniently Upper should be the same thing in this
11512 if (NegAcc && PTIdx == 2)
11513 PassThru = CGF.Builder.CreateExtractElement(Upper, (uint64_t)0);
11515 Res = EmitX86ScalarSelect(CGF, Ops[3], Res, PassThru);
11517 return CGF.Builder.CreateInsertElement(Upper, Res, (uint64_t)0);
11520 static Value *EmitX86Muldq(CodeGenFunction &CGF, bool IsSigned,
11521 ArrayRef<Value *> Ops) {
11522 llvm::Type *Ty = Ops[0]->getType();
11523 // Arguments have a vXi32 type so cast to vXi64.
11524 Ty = llvm::FixedVectorType::get(CGF.Int64Ty,
11525 Ty->getPrimitiveSizeInBits() / 64);
11526 Value *LHS = CGF.Builder.CreateBitCast(Ops[0], Ty);
11527 Value *RHS = CGF.Builder.CreateBitCast(Ops[1], Ty);
11530 // Shift left then arithmetic shift right.
11531 Constant *ShiftAmt = ConstantInt::get(Ty, 32);
11532 LHS = CGF.Builder.CreateShl(LHS, ShiftAmt);
11533 LHS = CGF.Builder.CreateAShr(LHS, ShiftAmt);
11534 RHS = CGF.Builder.CreateShl(RHS, ShiftAmt);
11535 RHS = CGF.Builder.CreateAShr(RHS, ShiftAmt);
11537 // Clear the upper bits.
11538 Constant *Mask = ConstantInt::get(Ty, 0xffffffff);
11539 LHS = CGF.Builder.CreateAnd(LHS, Mask);
11540 RHS = CGF.Builder.CreateAnd(RHS, Mask);
11543 return CGF.Builder.CreateMul(LHS, RHS);
11546 // Emit a masked pternlog intrinsic. This only exists because the header has to
11547 // use a macro and we aren't able to pass the input argument to a pternlog
11548 // builtin and a select builtin without evaluating it twice.
11549 static Value *EmitX86Ternlog(CodeGenFunction &CGF, bool ZeroMask,
11550 ArrayRef<Value *> Ops) {
11551 llvm::Type *Ty = Ops[0]->getType();
11553 unsigned VecWidth = Ty->getPrimitiveSizeInBits();
11554 unsigned EltWidth = Ty->getScalarSizeInBits();
11556 if (VecWidth == 128 && EltWidth == 32)
11557 IID = Intrinsic::x86_avx512_pternlog_d_128;
11558 else if (VecWidth == 256 && EltWidth == 32)
11559 IID = Intrinsic::x86_avx512_pternlog_d_256;
11560 else if (VecWidth == 512 && EltWidth == 32)
11561 IID = Intrinsic::x86_avx512_pternlog_d_512;
11562 else if (VecWidth == 128 && EltWidth == 64)
11563 IID = Intrinsic::x86_avx512_pternlog_q_128;
11564 else if (VecWidth == 256 && EltWidth == 64)
11565 IID = Intrinsic::x86_avx512_pternlog_q_256;
11566 else if (VecWidth == 512 && EltWidth == 64)
11567 IID = Intrinsic::x86_avx512_pternlog_q_512;
11569 llvm_unreachable("Unexpected intrinsic");
11571 Value *Ternlog = CGF.Builder.CreateCall(CGF.CGM.getIntrinsic(IID),
11573 Value *PassThru = ZeroMask ? ConstantAggregateZero::get(Ty) : Ops[0];
11574 return EmitX86Select(CGF, Ops[4], Ternlog, PassThru);
11577 static Value *EmitX86SExtMask(CodeGenFunction &CGF, Value *Op,
11578 llvm::Type *DstTy) {
11579 unsigned NumberOfElements = cast<llvm::VectorType>(DstTy)->getNumElements();
11580 Value *Mask = getMaskVecValue(CGF, Op, NumberOfElements);
11581 return CGF.Builder.CreateSExt(Mask, DstTy, "vpmovm2");
11584 // Emit addition or subtraction with signed/unsigned saturation.
11585 static Value *EmitX86AddSubSatExpr(CodeGenFunction &CGF,
11586 ArrayRef<Value *> Ops, bool IsSigned,
11588 Intrinsic::ID IID =
11589 IsSigned ? (IsAddition ? Intrinsic::sadd_sat : Intrinsic::ssub_sat)
11590 : (IsAddition ? Intrinsic::uadd_sat : Intrinsic::usub_sat);
11591 llvm::Function *F = CGF.CGM.getIntrinsic(IID, Ops[0]->getType());
11592 return CGF.Builder.CreateCall(F, {Ops[0], Ops[1]});
11595 Value *CodeGenFunction::EmitX86CpuIs(const CallExpr *E) {
11596 const Expr *CPUExpr = E->getArg(0)->IgnoreParenCasts();
11597 StringRef CPUStr = cast<clang::StringLiteral>(CPUExpr)->getString();
11598 return EmitX86CpuIs(CPUStr);
11601 // Convert F16 halfs to floats.
11602 static Value *EmitX86CvtF16ToFloatExpr(CodeGenFunction &CGF,
11603 ArrayRef<Value *> Ops,
11604 llvm::Type *DstTy) {
11605 assert((Ops.size() == 1 || Ops.size() == 3 || Ops.size() == 4) &&
11606 "Unknown cvtph2ps intrinsic");
11608 // If the SAE intrinsic doesn't use default rounding then we can't upgrade.
11609 if (Ops.size() == 4 && cast<llvm::ConstantInt>(Ops[3])->getZExtValue() != 4) {
11611 CGF.CGM.getIntrinsic(Intrinsic::x86_avx512_mask_vcvtph2ps_512);
11612 return CGF.Builder.CreateCall(F, {Ops[0], Ops[1], Ops[2], Ops[3]});
11615 unsigned NumDstElts = cast<llvm::VectorType>(DstTy)->getNumElements();
11616 Value *Src = Ops[0];
11618 // Extract the subvector.
11619 if (NumDstElts != cast<llvm::VectorType>(Src->getType())->getNumElements()) {
11620 assert(NumDstElts == 4 && "Unexpected vector size");
11621 Src = CGF.Builder.CreateShuffleVector(Src, UndefValue::get(Src->getType()),
11622 ArrayRef<int>{0, 1, 2, 3});
11625 // Bitcast from vXi16 to vXf16.
11626 auto *HalfTy = llvm::FixedVectorType::get(
11627 llvm::Type::getHalfTy(CGF.getLLVMContext()), NumDstElts);
11628 Src = CGF.Builder.CreateBitCast(Src, HalfTy);
11630 // Perform the fp-extension.
11631 Value *Res = CGF.Builder.CreateFPExt(Src, DstTy, "cvtph2ps");
11633 if (Ops.size() >= 3)
11634 Res = EmitX86Select(CGF, Ops[2], Res, Ops[1]);
11638 // Convert a BF16 to a float.
11639 static Value *EmitX86CvtBF16ToFloatExpr(CodeGenFunction &CGF,
11641 ArrayRef<Value *> Ops) {
11642 llvm::Type *Int32Ty = CGF.Builder.getInt32Ty();
11643 Value *ZeroExt = CGF.Builder.CreateZExt(Ops[0], Int32Ty);
11644 Value *Shl = CGF.Builder.CreateShl(ZeroExt, 16);
11645 llvm::Type *ResultType = CGF.ConvertType(E->getType());
11646 Value *BitCast = CGF.Builder.CreateBitCast(Shl, ResultType);
11650 Value *CodeGenFunction::EmitX86CpuIs(StringRef CPUStr) {
11652 llvm::Type *Int32Ty = Builder.getInt32Ty();
11654 // Matching the struct layout from the compiler-rt/libgcc structure that is
11656 // unsigned int __cpu_vendor;
11657 // unsigned int __cpu_type;
11658 // unsigned int __cpu_subtype;
11659 // unsigned int __cpu_features[1];
11660 llvm::Type *STy = llvm::StructType::get(Int32Ty, Int32Ty, Int32Ty,
11661 llvm::ArrayType::get(Int32Ty, 1));
11663 // Grab the global __cpu_model.
11664 llvm::Constant *CpuModel = CGM.CreateRuntimeVariable(STy, "__cpu_model");
11665 cast<llvm::GlobalValue>(CpuModel)->setDSOLocal(true);
11667 // Calculate the index needed to access the correct field based on the
11668 // range. Also adjust the expected value.
11671 std::tie(Index, Value) = StringSwitch<std::pair<unsigned, unsigned>>(CPUStr)
11672 #define X86_VENDOR(ENUM, STRING) \
11673 .Case(STRING, {0u, static_cast<unsigned>(llvm::X86::ENUM)})
11674 #define X86_CPU_TYPE_ALIAS(ENUM, ALIAS) \
11675 .Case(ALIAS, {1u, static_cast<unsigned>(llvm::X86::ENUM)})
11676 #define X86_CPU_TYPE(ENUM, STR) \
11677 .Case(STR, {1u, static_cast<unsigned>(llvm::X86::ENUM)})
11678 #define X86_CPU_SUBTYPE(ENUM, STR) \
11679 .Case(STR, {2u, static_cast<unsigned>(llvm::X86::ENUM)})
11680 #include "llvm/Support/X86TargetParser.def"
11682 assert(Value != 0 && "Invalid CPUStr passed to CpuIs");
11684 // Grab the appropriate field from __cpu_model.
11685 llvm::Value *Idxs[] = {ConstantInt::get(Int32Ty, 0),
11686 ConstantInt::get(Int32Ty, Index)};
11687 llvm::Value *CpuValue = Builder.CreateGEP(STy, CpuModel, Idxs);
11688 CpuValue = Builder.CreateAlignedLoad(CpuValue, CharUnits::fromQuantity(4));
11690 // Check the value of the field against the requested value.
11691 return Builder.CreateICmpEQ(CpuValue,
11692 llvm::ConstantInt::get(Int32Ty, Value));
11695 Value *CodeGenFunction::EmitX86CpuSupports(const CallExpr *E) {
11696 const Expr *FeatureExpr = E->getArg(0)->IgnoreParenCasts();
11697 StringRef FeatureStr = cast<StringLiteral>(FeatureExpr)->getString();
11698 return EmitX86CpuSupports(FeatureStr);
11702 CodeGenFunction::GetX86CpuSupportsMask(ArrayRef<StringRef> FeatureStrs) {
11703 // Processor features and mapping to processor feature value.
11704 uint64_t FeaturesMask = 0;
11705 for (const StringRef &FeatureStr : FeatureStrs) {
11707 StringSwitch<unsigned>(FeatureStr)
11708 #define X86_FEATURE_COMPAT(ENUM, STR) .Case(STR, llvm::X86::FEATURE_##ENUM)
11709 #include "llvm/Support/X86TargetParser.def"
11711 FeaturesMask |= (1ULL << Feature);
11713 return FeaturesMask;
11716 Value *CodeGenFunction::EmitX86CpuSupports(ArrayRef<StringRef> FeatureStrs) {
11717 return EmitX86CpuSupports(GetX86CpuSupportsMask(FeatureStrs));
11720 llvm::Value *CodeGenFunction::EmitX86CpuSupports(uint64_t FeaturesMask) {
11721 uint32_t Features1 = Lo_32(FeaturesMask);
11722 uint32_t Features2 = Hi_32(FeaturesMask);
11724 Value *Result = Builder.getTrue();
11726 if (Features1 != 0) {
11727 // Matching the struct layout from the compiler-rt/libgcc structure that is
11729 // unsigned int __cpu_vendor;
11730 // unsigned int __cpu_type;
11731 // unsigned int __cpu_subtype;
11732 // unsigned int __cpu_features[1];
11733 llvm::Type *STy = llvm::StructType::get(Int32Ty, Int32Ty, Int32Ty,
11734 llvm::ArrayType::get(Int32Ty, 1));
11736 // Grab the global __cpu_model.
11737 llvm::Constant *CpuModel = CGM.CreateRuntimeVariable(STy, "__cpu_model");
11738 cast<llvm::GlobalValue>(CpuModel)->setDSOLocal(true);
11740 // Grab the first (0th) element from the field __cpu_features off of the
11741 // global in the struct STy.
11742 Value *Idxs[] = {Builder.getInt32(0), Builder.getInt32(3),
11743 Builder.getInt32(0)};
11744 Value *CpuFeatures = Builder.CreateGEP(STy, CpuModel, Idxs);
11746 Builder.CreateAlignedLoad(CpuFeatures, CharUnits::fromQuantity(4));
11748 // Check the value of the bit corresponding to the feature requested.
11749 Value *Mask = Builder.getInt32(Features1);
11750 Value *Bitset = Builder.CreateAnd(Features, Mask);
11751 Value *Cmp = Builder.CreateICmpEQ(Bitset, Mask);
11752 Result = Builder.CreateAnd(Result, Cmp);
11755 if (Features2 != 0) {
11756 llvm::Constant *CpuFeatures2 = CGM.CreateRuntimeVariable(Int32Ty,
11757 "__cpu_features2");
11758 cast<llvm::GlobalValue>(CpuFeatures2)->setDSOLocal(true);
11761 Builder.CreateAlignedLoad(CpuFeatures2, CharUnits::fromQuantity(4));
11763 // Check the value of the bit corresponding to the feature requested.
11764 Value *Mask = Builder.getInt32(Features2);
11765 Value *Bitset = Builder.CreateAnd(Features, Mask);
11766 Value *Cmp = Builder.CreateICmpEQ(Bitset, Mask);
11767 Result = Builder.CreateAnd(Result, Cmp);
11773 Value *CodeGenFunction::EmitX86CpuInit() {
11774 llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy,
11775 /*Variadic*/ false);
11776 llvm::FunctionCallee Func =
11777 CGM.CreateRuntimeFunction(FTy, "__cpu_indicator_init");
11778 cast<llvm::GlobalValue>(Func.getCallee())->setDSOLocal(true);
11779 cast<llvm::GlobalValue>(Func.getCallee())
11780 ->setDLLStorageClass(llvm::GlobalValue::DefaultStorageClass);
11781 return Builder.CreateCall(Func);
11784 Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
11785 const CallExpr *E) {
11786 if (BuiltinID == X86::BI__builtin_cpu_is)
11787 return EmitX86CpuIs(E);
11788 if (BuiltinID == X86::BI__builtin_cpu_supports)
11789 return EmitX86CpuSupports(E);
11790 if (BuiltinID == X86::BI__builtin_cpu_init)
11791 return EmitX86CpuInit();
11793 SmallVector<Value*, 4> Ops;
11795 // Find out if any arguments are required to be integer constant expressions.
11796 unsigned ICEArguments = 0;
11797 ASTContext::GetBuiltinTypeError Error;
11798 getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments);
11799 assert(Error == ASTContext::GE_None && "Should not codegen an error");
11801 for (unsigned i = 0, e = E->getNumArgs(); i != e; i++) {
11802 // If this is a normal argument, just emit it as a scalar.
11803 if ((ICEArguments & (1 << i)) == 0) {
11804 Ops.push_back(EmitScalarExpr(E->getArg(i)));
11808 // If this is required to be a constant, constant fold it so that we know
11809 // that the generated intrinsic gets a ConstantInt.
11810 llvm::APSInt Result;
11811 bool IsConst = E->getArg(i)->isIntegerConstantExpr(Result, getContext());
11812 assert(IsConst && "Constant arg isn't actually constant?"); (void)IsConst;
11813 Ops.push_back(llvm::ConstantInt::get(getLLVMContext(), Result));
11816 // These exist so that the builtin that takes an immediate can be bounds
11817 // checked by clang to avoid passing bad immediates to the backend. Since
11818 // AVX has a larger immediate than SSE we would need separate builtins to
11819 // do the different bounds checking. Rather than create a clang specific
11820 // SSE only builtin, this implements eight separate builtins to match gcc
11822 auto getCmpIntrinsicCall = [this, &Ops](Intrinsic::ID ID, unsigned Imm) {
11823 Ops.push_back(llvm::ConstantInt::get(Int8Ty, Imm));
11824 llvm::Function *F = CGM.getIntrinsic(ID);
11825 return Builder.CreateCall(F, Ops);
11828 // For the vector forms of FP comparisons, translate the builtins directly to
11830 // TODO: The builtins could be removed if the SSE header files used vector
11831 // extension comparisons directly (vector ordered/unordered may need
11832 // additional support via __builtin_isnan()).
11833 auto getVectorFCmpIR = [this, &Ops](CmpInst::Predicate Pred,
11834 bool IsSignaling) {
11837 Cmp = Builder.CreateFCmpS(Pred, Ops[0], Ops[1]);
11839 Cmp = Builder.CreateFCmp(Pred, Ops[0], Ops[1]);
11840 llvm::VectorType *FPVecTy = cast<llvm::VectorType>(Ops[0]->getType());
11841 llvm::VectorType *IntVecTy = llvm::VectorType::getInteger(FPVecTy);
11842 Value *Sext = Builder.CreateSExt(Cmp, IntVecTy);
11843 return Builder.CreateBitCast(Sext, FPVecTy);
11846 switch (BuiltinID) {
11847 default: return nullptr;
11848 case X86::BI_mm_prefetch: {
11849 Value *Address = Ops[0];
11850 ConstantInt *C = cast<ConstantInt>(Ops[1]);
11851 Value *RW = ConstantInt::get(Int32Ty, (C->getZExtValue() >> 2) & 0x1);
11852 Value *Locality = ConstantInt::get(Int32Ty, C->getZExtValue() & 0x3);
11853 Value *Data = ConstantInt::get(Int32Ty, 1);
11854 Function *F = CGM.getIntrinsic(Intrinsic::prefetch, Address->getType());
11855 return Builder.CreateCall(F, {Address, RW, Locality, Data});
11857 case X86::BI_mm_clflush: {
11858 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse2_clflush),
11861 case X86::BI_mm_lfence: {
11862 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse2_lfence));
11864 case X86::BI_mm_mfence: {
11865 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse2_mfence));
11867 case X86::BI_mm_sfence: {
11868 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_sfence));
11870 case X86::BI_mm_pause: {
11871 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse2_pause));
11873 case X86::BI__rdtsc: {
11874 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_rdtsc));
11876 case X86::BI__builtin_ia32_rdtscp: {
11877 Value *Call = Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_rdtscp));
11878 Builder.CreateDefaultAlignedStore(Builder.CreateExtractValue(Call, 1),
11880 return Builder.CreateExtractValue(Call, 0);
11882 case X86::BI__builtin_ia32_lzcnt_u16:
11883 case X86::BI__builtin_ia32_lzcnt_u32:
11884 case X86::BI__builtin_ia32_lzcnt_u64: {
11885 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, Ops[0]->getType());
11886 return Builder.CreateCall(F, {Ops[0], Builder.getInt1(false)});
11888 case X86::BI__builtin_ia32_tzcnt_u16:
11889 case X86::BI__builtin_ia32_tzcnt_u32:
11890 case X86::BI__builtin_ia32_tzcnt_u64: {
11891 Function *F = CGM.getIntrinsic(Intrinsic::cttz, Ops[0]->getType());
11892 return Builder.CreateCall(F, {Ops[0], Builder.getInt1(false)});
11894 case X86::BI__builtin_ia32_undef128:
11895 case X86::BI__builtin_ia32_undef256:
11896 case X86::BI__builtin_ia32_undef512:
11897 // The x86 definition of "undef" is not the same as the LLVM definition
11898 // (PR32176). We leave optimizing away an unnecessary zero constant to the
11899 // IR optimizer and backend.
11900 // TODO: If we had a "freeze" IR instruction to generate a fixed undef
11901 // value, we should use that here instead of a zero.
11902 return llvm::Constant::getNullValue(ConvertType(E->getType()));
11903 case X86::BI__builtin_ia32_vec_init_v8qi:
11904 case X86::BI__builtin_ia32_vec_init_v4hi:
11905 case X86::BI__builtin_ia32_vec_init_v2si:
11906 return Builder.CreateBitCast(BuildVector(Ops),
11907 llvm::Type::getX86_MMXTy(getLLVMContext()));
11908 case X86::BI__builtin_ia32_vec_ext_v2si:
11909 case X86::BI__builtin_ia32_vec_ext_v16qi:
11910 case X86::BI__builtin_ia32_vec_ext_v8hi:
11911 case X86::BI__builtin_ia32_vec_ext_v4si:
11912 case X86::BI__builtin_ia32_vec_ext_v4sf:
11913 case X86::BI__builtin_ia32_vec_ext_v2di:
11914 case X86::BI__builtin_ia32_vec_ext_v32qi:
11915 case X86::BI__builtin_ia32_vec_ext_v16hi:
11916 case X86::BI__builtin_ia32_vec_ext_v8si:
11917 case X86::BI__builtin_ia32_vec_ext_v4di: {
11919 cast<llvm::VectorType>(Ops[0]->getType())->getNumElements();
11920 uint64_t Index = cast<ConstantInt>(Ops[1])->getZExtValue();
11921 Index &= NumElts - 1;
11922 // These builtins exist so we can ensure the index is an ICE and in range.
11923 // Otherwise we could just do this in the header file.
11924 return Builder.CreateExtractElement(Ops[0], Index);
11926 case X86::BI__builtin_ia32_vec_set_v16qi:
11927 case X86::BI__builtin_ia32_vec_set_v8hi:
11928 case X86::BI__builtin_ia32_vec_set_v4si:
11929 case X86::BI__builtin_ia32_vec_set_v2di:
11930 case X86::BI__builtin_ia32_vec_set_v32qi:
11931 case X86::BI__builtin_ia32_vec_set_v16hi:
11932 case X86::BI__builtin_ia32_vec_set_v8si:
11933 case X86::BI__builtin_ia32_vec_set_v4di: {
11935 cast<llvm::VectorType>(Ops[0]->getType())->getNumElements();
11936 unsigned Index = cast<ConstantInt>(Ops[2])->getZExtValue();
11937 Index &= NumElts - 1;
11938 // These builtins exist so we can ensure the index is an ICE and in range.
11939 // Otherwise we could just do this in the header file.
11940 return Builder.CreateInsertElement(Ops[0], Ops[1], Index);
11942 case X86::BI_mm_setcsr:
11943 case X86::BI__builtin_ia32_ldmxcsr: {
11944 Address Tmp = CreateMemTemp(E->getArg(0)->getType());
11945 Builder.CreateStore(Ops[0], Tmp);
11946 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_ldmxcsr),
11947 Builder.CreateBitCast(Tmp.getPointer(), Int8PtrTy));
11949 case X86::BI_mm_getcsr:
11950 case X86::BI__builtin_ia32_stmxcsr: {
11951 Address Tmp = CreateMemTemp(E->getType());
11952 Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_stmxcsr),
11953 Builder.CreateBitCast(Tmp.getPointer(), Int8PtrTy));
11954 return Builder.CreateLoad(Tmp, "stmxcsr");
11956 case X86::BI__builtin_ia32_xsave:
11957 case X86::BI__builtin_ia32_xsave64:
11958 case X86::BI__builtin_ia32_xrstor:
11959 case X86::BI__builtin_ia32_xrstor64:
11960 case X86::BI__builtin_ia32_xsaveopt:
11961 case X86::BI__builtin_ia32_xsaveopt64:
11962 case X86::BI__builtin_ia32_xrstors:
11963 case X86::BI__builtin_ia32_xrstors64:
11964 case X86::BI__builtin_ia32_xsavec:
11965 case X86::BI__builtin_ia32_xsavec64:
11966 case X86::BI__builtin_ia32_xsaves:
11967 case X86::BI__builtin_ia32_xsaves64:
11968 case X86::BI__builtin_ia32_xsetbv:
11969 case X86::BI_xsetbv: {
11971 #define INTRINSIC_X86_XSAVE_ID(NAME) \
11972 case X86::BI__builtin_ia32_##NAME: \
11973 ID = Intrinsic::x86_##NAME; \
11975 switch (BuiltinID) {
11976 default: llvm_unreachable("Unsupported intrinsic!");
11977 INTRINSIC_X86_XSAVE_ID(xsave);
11978 INTRINSIC_X86_XSAVE_ID(xsave64);
11979 INTRINSIC_X86_XSAVE_ID(xrstor);
11980 INTRINSIC_X86_XSAVE_ID(xrstor64);
11981 INTRINSIC_X86_XSAVE_ID(xsaveopt);
11982 INTRINSIC_X86_XSAVE_ID(xsaveopt64);
11983 INTRINSIC_X86_XSAVE_ID(xrstors);
11984 INTRINSIC_X86_XSAVE_ID(xrstors64);
11985 INTRINSIC_X86_XSAVE_ID(xsavec);
11986 INTRINSIC_X86_XSAVE_ID(xsavec64);
11987 INTRINSIC_X86_XSAVE_ID(xsaves);
11988 INTRINSIC_X86_XSAVE_ID(xsaves64);
11989 INTRINSIC_X86_XSAVE_ID(xsetbv);
11990 case X86::BI_xsetbv:
11991 ID = Intrinsic::x86_xsetbv;
11994 #undef INTRINSIC_X86_XSAVE_ID
11995 Value *Mhi = Builder.CreateTrunc(
11996 Builder.CreateLShr(Ops[1], ConstantInt::get(Int64Ty, 32)), Int32Ty);
11997 Value *Mlo = Builder.CreateTrunc(Ops[1], Int32Ty);
11999 Ops.push_back(Mlo);
12000 return Builder.CreateCall(CGM.getIntrinsic(ID), Ops);
12002 case X86::BI__builtin_ia32_xgetbv:
12003 case X86::BI_xgetbv:
12004 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_xgetbv), Ops);
12005 case X86::BI__builtin_ia32_storedqudi128_mask:
12006 case X86::BI__builtin_ia32_storedqusi128_mask:
12007 case X86::BI__builtin_ia32_storedquhi128_mask:
12008 case X86::BI__builtin_ia32_storedquqi128_mask:
12009 case X86::BI__builtin_ia32_storeupd128_mask:
12010 case X86::BI__builtin_ia32_storeups128_mask:
12011 case X86::BI__builtin_ia32_storedqudi256_mask:
12012 case X86::BI__builtin_ia32_storedqusi256_mask:
12013 case X86::BI__builtin_ia32_storedquhi256_mask:
12014 case X86::BI__builtin_ia32_storedquqi256_mask:
12015 case X86::BI__builtin_ia32_storeupd256_mask:
12016 case X86::BI__builtin_ia32_storeups256_mask:
12017 case X86::BI__builtin_ia32_storedqudi512_mask:
12018 case X86::BI__builtin_ia32_storedqusi512_mask:
12019 case X86::BI__builtin_ia32_storedquhi512_mask:
12020 case X86::BI__builtin_ia32_storedquqi512_mask:
12021 case X86::BI__builtin_ia32_storeupd512_mask:
12022 case X86::BI__builtin_ia32_storeups512_mask:
12023 return EmitX86MaskedStore(*this, Ops, Align(1));
12025 case X86::BI__builtin_ia32_storess128_mask:
12026 case X86::BI__builtin_ia32_storesd128_mask:
12027 return EmitX86MaskedStore(*this, Ops, Align(1));
12029 case X86::BI__builtin_ia32_vpopcntb_128:
12030 case X86::BI__builtin_ia32_vpopcntd_128:
12031 case X86::BI__builtin_ia32_vpopcntq_128:
12032 case X86::BI__builtin_ia32_vpopcntw_128:
12033 case X86::BI__builtin_ia32_vpopcntb_256:
12034 case X86::BI__builtin_ia32_vpopcntd_256:
12035 case X86::BI__builtin_ia32_vpopcntq_256:
12036 case X86::BI__builtin_ia32_vpopcntw_256:
12037 case X86::BI__builtin_ia32_vpopcntb_512:
12038 case X86::BI__builtin_ia32_vpopcntd_512:
12039 case X86::BI__builtin_ia32_vpopcntq_512:
12040 case X86::BI__builtin_ia32_vpopcntw_512: {
12041 llvm::Type *ResultType = ConvertType(E->getType());
12042 llvm::Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ResultType);
12043 return Builder.CreateCall(F, Ops);
12045 case X86::BI__builtin_ia32_cvtmask2b128:
12046 case X86::BI__builtin_ia32_cvtmask2b256:
12047 case X86::BI__builtin_ia32_cvtmask2b512:
12048 case X86::BI__builtin_ia32_cvtmask2w128:
12049 case X86::BI__builtin_ia32_cvtmask2w256:
12050 case X86::BI__builtin_ia32_cvtmask2w512:
12051 case X86::BI__builtin_ia32_cvtmask2d128:
12052 case X86::BI__builtin_ia32_cvtmask2d256:
12053 case X86::BI__builtin_ia32_cvtmask2d512:
12054 case X86::BI__builtin_ia32_cvtmask2q128:
12055 case X86::BI__builtin_ia32_cvtmask2q256:
12056 case X86::BI__builtin_ia32_cvtmask2q512:
12057 return EmitX86SExtMask(*this, Ops[0], ConvertType(E->getType()));
12059 case X86::BI__builtin_ia32_cvtb2mask128:
12060 case X86::BI__builtin_ia32_cvtb2mask256:
12061 case X86::BI__builtin_ia32_cvtb2mask512:
12062 case X86::BI__builtin_ia32_cvtw2mask128:
12063 case X86::BI__builtin_ia32_cvtw2mask256:
12064 case X86::BI__builtin_ia32_cvtw2mask512:
12065 case X86::BI__builtin_ia32_cvtd2mask128:
12066 case X86::BI__builtin_ia32_cvtd2mask256:
12067 case X86::BI__builtin_ia32_cvtd2mask512:
12068 case X86::BI__builtin_ia32_cvtq2mask128:
12069 case X86::BI__builtin_ia32_cvtq2mask256:
12070 case X86::BI__builtin_ia32_cvtq2mask512:
12071 return EmitX86ConvertToMask(*this, Ops[0]);
12073 case X86::BI__builtin_ia32_cvtdq2ps512_mask:
12074 case X86::BI__builtin_ia32_cvtqq2ps512_mask:
12075 case X86::BI__builtin_ia32_cvtqq2pd512_mask:
12076 return EmitX86ConvertIntToFp(*this, Ops, /*IsSigned*/true);
12077 case X86::BI__builtin_ia32_cvtudq2ps512_mask:
12078 case X86::BI__builtin_ia32_cvtuqq2ps512_mask:
12079 case X86::BI__builtin_ia32_cvtuqq2pd512_mask:
12080 return EmitX86ConvertIntToFp(*this, Ops, /*IsSigned*/false);
12082 case X86::BI__builtin_ia32_vfmaddss3:
12083 case X86::BI__builtin_ia32_vfmaddsd3:
12084 case X86::BI__builtin_ia32_vfmaddss3_mask:
12085 case X86::BI__builtin_ia32_vfmaddsd3_mask:
12086 return EmitScalarFMAExpr(*this, Ops, Ops[0]);
12087 case X86::BI__builtin_ia32_vfmaddss:
12088 case X86::BI__builtin_ia32_vfmaddsd:
12089 return EmitScalarFMAExpr(*this, Ops,
12090 Constant::getNullValue(Ops[0]->getType()));
12091 case X86::BI__builtin_ia32_vfmaddss3_maskz:
12092 case X86::BI__builtin_ia32_vfmaddsd3_maskz:
12093 return EmitScalarFMAExpr(*this, Ops, Ops[0], /*ZeroMask*/true);
12094 case X86::BI__builtin_ia32_vfmaddss3_mask3:
12095 case X86::BI__builtin_ia32_vfmaddsd3_mask3:
12096 return EmitScalarFMAExpr(*this, Ops, Ops[2], /*ZeroMask*/false, 2);
12097 case X86::BI__builtin_ia32_vfmsubss3_mask3:
12098 case X86::BI__builtin_ia32_vfmsubsd3_mask3:
12099 return EmitScalarFMAExpr(*this, Ops, Ops[2], /*ZeroMask*/false, 2,
12101 case X86::BI__builtin_ia32_vfmaddps:
12102 case X86::BI__builtin_ia32_vfmaddpd:
12103 case X86::BI__builtin_ia32_vfmaddps256:
12104 case X86::BI__builtin_ia32_vfmaddpd256:
12105 case X86::BI__builtin_ia32_vfmaddps512_mask:
12106 case X86::BI__builtin_ia32_vfmaddps512_maskz:
12107 case X86::BI__builtin_ia32_vfmaddps512_mask3:
12108 case X86::BI__builtin_ia32_vfmsubps512_mask3:
12109 case X86::BI__builtin_ia32_vfmaddpd512_mask:
12110 case X86::BI__builtin_ia32_vfmaddpd512_maskz:
12111 case X86::BI__builtin_ia32_vfmaddpd512_mask3:
12112 case X86::BI__builtin_ia32_vfmsubpd512_mask3:
12113 return EmitX86FMAExpr(*this, Ops, BuiltinID, /*IsAddSub*/false);
12114 case X86::BI__builtin_ia32_vfmaddsubps512_mask:
12115 case X86::BI__builtin_ia32_vfmaddsubps512_maskz:
12116 case X86::BI__builtin_ia32_vfmaddsubps512_mask3:
12117 case X86::BI__builtin_ia32_vfmsubaddps512_mask3:
12118 case X86::BI__builtin_ia32_vfmaddsubpd512_mask:
12119 case X86::BI__builtin_ia32_vfmaddsubpd512_maskz:
12120 case X86::BI__builtin_ia32_vfmaddsubpd512_mask3:
12121 case X86::BI__builtin_ia32_vfmsubaddpd512_mask3:
12122 return EmitX86FMAExpr(*this, Ops, BuiltinID, /*IsAddSub*/true);
12124 case X86::BI__builtin_ia32_movdqa32store128_mask:
12125 case X86::BI__builtin_ia32_movdqa64store128_mask:
12126 case X86::BI__builtin_ia32_storeaps128_mask:
12127 case X86::BI__builtin_ia32_storeapd128_mask:
12128 case X86::BI__builtin_ia32_movdqa32store256_mask:
12129 case X86::BI__builtin_ia32_movdqa64store256_mask:
12130 case X86::BI__builtin_ia32_storeaps256_mask:
12131 case X86::BI__builtin_ia32_storeapd256_mask:
12132 case X86::BI__builtin_ia32_movdqa32store512_mask:
12133 case X86::BI__builtin_ia32_movdqa64store512_mask:
12134 case X86::BI__builtin_ia32_storeaps512_mask:
12135 case X86::BI__builtin_ia32_storeapd512_mask:
12136 return EmitX86MaskedStore(
12138 getContext().getTypeAlignInChars(E->getArg(1)->getType()).getAsAlign());
12140 case X86::BI__builtin_ia32_loadups128_mask:
12141 case X86::BI__builtin_ia32_loadups256_mask:
12142 case X86::BI__builtin_ia32_loadups512_mask:
12143 case X86::BI__builtin_ia32_loadupd128_mask:
12144 case X86::BI__builtin_ia32_loadupd256_mask:
12145 case X86::BI__builtin_ia32_loadupd512_mask:
12146 case X86::BI__builtin_ia32_loaddquqi128_mask:
12147 case X86::BI__builtin_ia32_loaddquqi256_mask:
12148 case X86::BI__builtin_ia32_loaddquqi512_mask:
12149 case X86::BI__builtin_ia32_loaddquhi128_mask:
12150 case X86::BI__builtin_ia32_loaddquhi256_mask:
12151 case X86::BI__builtin_ia32_loaddquhi512_mask:
12152 case X86::BI__builtin_ia32_loaddqusi128_mask:
12153 case X86::BI__builtin_ia32_loaddqusi256_mask:
12154 case X86::BI__builtin_ia32_loaddqusi512_mask:
12155 case X86::BI__builtin_ia32_loaddqudi128_mask:
12156 case X86::BI__builtin_ia32_loaddqudi256_mask:
12157 case X86::BI__builtin_ia32_loaddqudi512_mask:
12158 return EmitX86MaskedLoad(*this, Ops, Align(1));
12160 case X86::BI__builtin_ia32_loadss128_mask:
12161 case X86::BI__builtin_ia32_loadsd128_mask:
12162 return EmitX86MaskedLoad(*this, Ops, Align(1));
12164 case X86::BI__builtin_ia32_loadaps128_mask:
12165 case X86::BI__builtin_ia32_loadaps256_mask:
12166 case X86::BI__builtin_ia32_loadaps512_mask:
12167 case X86::BI__builtin_ia32_loadapd128_mask:
12168 case X86::BI__builtin_ia32_loadapd256_mask:
12169 case X86::BI__builtin_ia32_loadapd512_mask:
12170 case X86::BI__builtin_ia32_movdqa32load128_mask:
12171 case X86::BI__builtin_ia32_movdqa32load256_mask:
12172 case X86::BI__builtin_ia32_movdqa32load512_mask:
12173 case X86::BI__builtin_ia32_movdqa64load128_mask:
12174 case X86::BI__builtin_ia32_movdqa64load256_mask:
12175 case X86::BI__builtin_ia32_movdqa64load512_mask:
12176 return EmitX86MaskedLoad(
12178 getContext().getTypeAlignInChars(E->getArg(1)->getType()).getAsAlign());
12180 case X86::BI__builtin_ia32_expandloaddf128_mask:
12181 case X86::BI__builtin_ia32_expandloaddf256_mask:
12182 case X86::BI__builtin_ia32_expandloaddf512_mask:
12183 case X86::BI__builtin_ia32_expandloadsf128_mask:
12184 case X86::BI__builtin_ia32_expandloadsf256_mask:
12185 case X86::BI__builtin_ia32_expandloadsf512_mask:
12186 case X86::BI__builtin_ia32_expandloaddi128_mask:
12187 case X86::BI__builtin_ia32_expandloaddi256_mask:
12188 case X86::BI__builtin_ia32_expandloaddi512_mask:
12189 case X86::BI__builtin_ia32_expandloadsi128_mask:
12190 case X86::BI__builtin_ia32_expandloadsi256_mask:
12191 case X86::BI__builtin_ia32_expandloadsi512_mask:
12192 case X86::BI__builtin_ia32_expandloadhi128_mask:
12193 case X86::BI__builtin_ia32_expandloadhi256_mask:
12194 case X86::BI__builtin_ia32_expandloadhi512_mask:
12195 case X86::BI__builtin_ia32_expandloadqi128_mask:
12196 case X86::BI__builtin_ia32_expandloadqi256_mask:
12197 case X86::BI__builtin_ia32_expandloadqi512_mask:
12198 return EmitX86ExpandLoad(*this, Ops);
12200 case X86::BI__builtin_ia32_compressstoredf128_mask:
12201 case X86::BI__builtin_ia32_compressstoredf256_mask:
12202 case X86::BI__builtin_ia32_compressstoredf512_mask:
12203 case X86::BI__builtin_ia32_compressstoresf128_mask:
12204 case X86::BI__builtin_ia32_compressstoresf256_mask:
12205 case X86::BI__builtin_ia32_compressstoresf512_mask:
12206 case X86::BI__builtin_ia32_compressstoredi128_mask:
12207 case X86::BI__builtin_ia32_compressstoredi256_mask:
12208 case X86::BI__builtin_ia32_compressstoredi512_mask:
12209 case X86::BI__builtin_ia32_compressstoresi128_mask:
12210 case X86::BI__builtin_ia32_compressstoresi256_mask:
12211 case X86::BI__builtin_ia32_compressstoresi512_mask:
12212 case X86::BI__builtin_ia32_compressstorehi128_mask:
12213 case X86::BI__builtin_ia32_compressstorehi256_mask:
12214 case X86::BI__builtin_ia32_compressstorehi512_mask:
12215 case X86::BI__builtin_ia32_compressstoreqi128_mask:
12216 case X86::BI__builtin_ia32_compressstoreqi256_mask:
12217 case X86::BI__builtin_ia32_compressstoreqi512_mask:
12218 return EmitX86CompressStore(*this, Ops);
12220 case X86::BI__builtin_ia32_expanddf128_mask:
12221 case X86::BI__builtin_ia32_expanddf256_mask:
12222 case X86::BI__builtin_ia32_expanddf512_mask:
12223 case X86::BI__builtin_ia32_expandsf128_mask:
12224 case X86::BI__builtin_ia32_expandsf256_mask:
12225 case X86::BI__builtin_ia32_expandsf512_mask:
12226 case X86::BI__builtin_ia32_expanddi128_mask:
12227 case X86::BI__builtin_ia32_expanddi256_mask:
12228 case X86::BI__builtin_ia32_expanddi512_mask:
12229 case X86::BI__builtin_ia32_expandsi128_mask:
12230 case X86::BI__builtin_ia32_expandsi256_mask:
12231 case X86::BI__builtin_ia32_expandsi512_mask:
12232 case X86::BI__builtin_ia32_expandhi128_mask:
12233 case X86::BI__builtin_ia32_expandhi256_mask:
12234 case X86::BI__builtin_ia32_expandhi512_mask:
12235 case X86::BI__builtin_ia32_expandqi128_mask:
12236 case X86::BI__builtin_ia32_expandqi256_mask:
12237 case X86::BI__builtin_ia32_expandqi512_mask:
12238 return EmitX86CompressExpand(*this, Ops, /*IsCompress*/false);
12240 case X86::BI__builtin_ia32_compressdf128_mask:
12241 case X86::BI__builtin_ia32_compressdf256_mask:
12242 case X86::BI__builtin_ia32_compressdf512_mask:
12243 case X86::BI__builtin_ia32_compresssf128_mask:
12244 case X86::BI__builtin_ia32_compresssf256_mask:
12245 case X86::BI__builtin_ia32_compresssf512_mask:
12246 case X86::BI__builtin_ia32_compressdi128_mask:
12247 case X86::BI__builtin_ia32_compressdi256_mask:
12248 case X86::BI__builtin_ia32_compressdi512_mask:
12249 case X86::BI__builtin_ia32_compresssi128_mask:
12250 case X86::BI__builtin_ia32_compresssi256_mask:
12251 case X86::BI__builtin_ia32_compresssi512_mask:
12252 case X86::BI__builtin_ia32_compresshi128_mask:
12253 case X86::BI__builtin_ia32_compresshi256_mask:
12254 case X86::BI__builtin_ia32_compresshi512_mask:
12255 case X86::BI__builtin_ia32_compressqi128_mask:
12256 case X86::BI__builtin_ia32_compressqi256_mask:
12257 case X86::BI__builtin_ia32_compressqi512_mask:
12258 return EmitX86CompressExpand(*this, Ops, /*IsCompress*/true);
12260 case X86::BI__builtin_ia32_gather3div2df:
12261 case X86::BI__builtin_ia32_gather3div2di:
12262 case X86::BI__builtin_ia32_gather3div4df:
12263 case X86::BI__builtin_ia32_gather3div4di:
12264 case X86::BI__builtin_ia32_gather3div4sf:
12265 case X86::BI__builtin_ia32_gather3div4si:
12266 case X86::BI__builtin_ia32_gather3div8sf:
12267 case X86::BI__builtin_ia32_gather3div8si:
12268 case X86::BI__builtin_ia32_gather3siv2df:
12269 case X86::BI__builtin_ia32_gather3siv2di:
12270 case X86::BI__builtin_ia32_gather3siv4df:
12271 case X86::BI__builtin_ia32_gather3siv4di:
12272 case X86::BI__builtin_ia32_gather3siv4sf:
12273 case X86::BI__builtin_ia32_gather3siv4si:
12274 case X86::BI__builtin_ia32_gather3siv8sf:
12275 case X86::BI__builtin_ia32_gather3siv8si:
12276 case X86::BI__builtin_ia32_gathersiv8df:
12277 case X86::BI__builtin_ia32_gathersiv16sf:
12278 case X86::BI__builtin_ia32_gatherdiv8df:
12279 case X86::BI__builtin_ia32_gatherdiv16sf:
12280 case X86::BI__builtin_ia32_gathersiv8di:
12281 case X86::BI__builtin_ia32_gathersiv16si:
12282 case X86::BI__builtin_ia32_gatherdiv8di:
12283 case X86::BI__builtin_ia32_gatherdiv16si: {
12285 switch (BuiltinID) {
12286 default: llvm_unreachable("Unexpected builtin");
12287 case X86::BI__builtin_ia32_gather3div2df:
12288 IID = Intrinsic::x86_avx512_mask_gather3div2_df;
12290 case X86::BI__builtin_ia32_gather3div2di:
12291 IID = Intrinsic::x86_avx512_mask_gather3div2_di;
12293 case X86::BI__builtin_ia32_gather3div4df:
12294 IID = Intrinsic::x86_avx512_mask_gather3div4_df;
12296 case X86::BI__builtin_ia32_gather3div4di:
12297 IID = Intrinsic::x86_avx512_mask_gather3div4_di;
12299 case X86::BI__builtin_ia32_gather3div4sf:
12300 IID = Intrinsic::x86_avx512_mask_gather3div4_sf;
12302 case X86::BI__builtin_ia32_gather3div4si:
12303 IID = Intrinsic::x86_avx512_mask_gather3div4_si;
12305 case X86::BI__builtin_ia32_gather3div8sf:
12306 IID = Intrinsic::x86_avx512_mask_gather3div8_sf;
12308 case X86::BI__builtin_ia32_gather3div8si:
12309 IID = Intrinsic::x86_avx512_mask_gather3div8_si;
12311 case X86::BI__builtin_ia32_gather3siv2df:
12312 IID = Intrinsic::x86_avx512_mask_gather3siv2_df;
12314 case X86::BI__builtin_ia32_gather3siv2di:
12315 IID = Intrinsic::x86_avx512_mask_gather3siv2_di;
12317 case X86::BI__builtin_ia32_gather3siv4df:
12318 IID = Intrinsic::x86_avx512_mask_gather3siv4_df;
12320 case X86::BI__builtin_ia32_gather3siv4di:
12321 IID = Intrinsic::x86_avx512_mask_gather3siv4_di;
12323 case X86::BI__builtin_ia32_gather3siv4sf:
12324 IID = Intrinsic::x86_avx512_mask_gather3siv4_sf;
12326 case X86::BI__builtin_ia32_gather3siv4si:
12327 IID = Intrinsic::x86_avx512_mask_gather3siv4_si;
12329 case X86::BI__builtin_ia32_gather3siv8sf:
12330 IID = Intrinsic::x86_avx512_mask_gather3siv8_sf;
12332 case X86::BI__builtin_ia32_gather3siv8si:
12333 IID = Intrinsic::x86_avx512_mask_gather3siv8_si;
12335 case X86::BI__builtin_ia32_gathersiv8df:
12336 IID = Intrinsic::x86_avx512_mask_gather_dpd_512;
12338 case X86::BI__builtin_ia32_gathersiv16sf:
12339 IID = Intrinsic::x86_avx512_mask_gather_dps_512;
12341 case X86::BI__builtin_ia32_gatherdiv8df:
12342 IID = Intrinsic::x86_avx512_mask_gather_qpd_512;
12344 case X86::BI__builtin_ia32_gatherdiv16sf:
12345 IID = Intrinsic::x86_avx512_mask_gather_qps_512;
12347 case X86::BI__builtin_ia32_gathersiv8di:
12348 IID = Intrinsic::x86_avx512_mask_gather_dpq_512;
12350 case X86::BI__builtin_ia32_gathersiv16si:
12351 IID = Intrinsic::x86_avx512_mask_gather_dpi_512;
12353 case X86::BI__builtin_ia32_gatherdiv8di:
12354 IID = Intrinsic::x86_avx512_mask_gather_qpq_512;
12356 case X86::BI__builtin_ia32_gatherdiv16si:
12357 IID = Intrinsic::x86_avx512_mask_gather_qpi_512;
12362 std::min(cast<llvm::VectorType>(Ops[0]->getType())->getNumElements(),
12363 cast<llvm::VectorType>(Ops[2]->getType())->getNumElements());
12364 Ops[3] = getMaskVecValue(*this, Ops[3], MinElts);
12365 Function *Intr = CGM.getIntrinsic(IID);
12366 return Builder.CreateCall(Intr, Ops);
12369 case X86::BI__builtin_ia32_scattersiv8df:
12370 case X86::BI__builtin_ia32_scattersiv16sf:
12371 case X86::BI__builtin_ia32_scatterdiv8df:
12372 case X86::BI__builtin_ia32_scatterdiv16sf:
12373 case X86::BI__builtin_ia32_scattersiv8di:
12374 case X86::BI__builtin_ia32_scattersiv16si:
12375 case X86::BI__builtin_ia32_scatterdiv8di:
12376 case X86::BI__builtin_ia32_scatterdiv16si:
12377 case X86::BI__builtin_ia32_scatterdiv2df:
12378 case X86::BI__builtin_ia32_scatterdiv2di:
12379 case X86::BI__builtin_ia32_scatterdiv4df:
12380 case X86::BI__builtin_ia32_scatterdiv4di:
12381 case X86::BI__builtin_ia32_scatterdiv4sf:
12382 case X86::BI__builtin_ia32_scatterdiv4si:
12383 case X86::BI__builtin_ia32_scatterdiv8sf:
12384 case X86::BI__builtin_ia32_scatterdiv8si:
12385 case X86::BI__builtin_ia32_scattersiv2df:
12386 case X86::BI__builtin_ia32_scattersiv2di:
12387 case X86::BI__builtin_ia32_scattersiv4df:
12388 case X86::BI__builtin_ia32_scattersiv4di:
12389 case X86::BI__builtin_ia32_scattersiv4sf:
12390 case X86::BI__builtin_ia32_scattersiv4si:
12391 case X86::BI__builtin_ia32_scattersiv8sf:
12392 case X86::BI__builtin_ia32_scattersiv8si: {
12394 switch (BuiltinID) {
12395 default: llvm_unreachable("Unexpected builtin");
12396 case X86::BI__builtin_ia32_scattersiv8df:
12397 IID = Intrinsic::x86_avx512_mask_scatter_dpd_512;
12399 case X86::BI__builtin_ia32_scattersiv16sf:
12400 IID = Intrinsic::x86_avx512_mask_scatter_dps_512;
12402 case X86::BI__builtin_ia32_scatterdiv8df:
12403 IID = Intrinsic::x86_avx512_mask_scatter_qpd_512;
12405 case X86::BI__builtin_ia32_scatterdiv16sf:
12406 IID = Intrinsic::x86_avx512_mask_scatter_qps_512;
12408 case X86::BI__builtin_ia32_scattersiv8di:
12409 IID = Intrinsic::x86_avx512_mask_scatter_dpq_512;
12411 case X86::BI__builtin_ia32_scattersiv16si:
12412 IID = Intrinsic::x86_avx512_mask_scatter_dpi_512;
12414 case X86::BI__builtin_ia32_scatterdiv8di:
12415 IID = Intrinsic::x86_avx512_mask_scatter_qpq_512;
12417 case X86::BI__builtin_ia32_scatterdiv16si:
12418 IID = Intrinsic::x86_avx512_mask_scatter_qpi_512;
12420 case X86::BI__builtin_ia32_scatterdiv2df:
12421 IID = Intrinsic::x86_avx512_mask_scatterdiv2_df;
12423 case X86::BI__builtin_ia32_scatterdiv2di:
12424 IID = Intrinsic::x86_avx512_mask_scatterdiv2_di;
12426 case X86::BI__builtin_ia32_scatterdiv4df:
12427 IID = Intrinsic::x86_avx512_mask_scatterdiv4_df;
12429 case X86::BI__builtin_ia32_scatterdiv4di:
12430 IID = Intrinsic::x86_avx512_mask_scatterdiv4_di;
12432 case X86::BI__builtin_ia32_scatterdiv4sf:
12433 IID = Intrinsic::x86_avx512_mask_scatterdiv4_sf;
12435 case X86::BI__builtin_ia32_scatterdiv4si:
12436 IID = Intrinsic::x86_avx512_mask_scatterdiv4_si;
12438 case X86::BI__builtin_ia32_scatterdiv8sf:
12439 IID = Intrinsic::x86_avx512_mask_scatterdiv8_sf;
12441 case X86::BI__builtin_ia32_scatterdiv8si:
12442 IID = Intrinsic::x86_avx512_mask_scatterdiv8_si;
12444 case X86::BI__builtin_ia32_scattersiv2df:
12445 IID = Intrinsic::x86_avx512_mask_scattersiv2_df;
12447 case X86::BI__builtin_ia32_scattersiv2di:
12448 IID = Intrinsic::x86_avx512_mask_scattersiv2_di;
12450 case X86::BI__builtin_ia32_scattersiv4df:
12451 IID = Intrinsic::x86_avx512_mask_scattersiv4_df;
12453 case X86::BI__builtin_ia32_scattersiv4di:
12454 IID = Intrinsic::x86_avx512_mask_scattersiv4_di;
12456 case X86::BI__builtin_ia32_scattersiv4sf:
12457 IID = Intrinsic::x86_avx512_mask_scattersiv4_sf;
12459 case X86::BI__builtin_ia32_scattersiv4si:
12460 IID = Intrinsic::x86_avx512_mask_scattersiv4_si;
12462 case X86::BI__builtin_ia32_scattersiv8sf:
12463 IID = Intrinsic::x86_avx512_mask_scattersiv8_sf;
12465 case X86::BI__builtin_ia32_scattersiv8si:
12466 IID = Intrinsic::x86_avx512_mask_scattersiv8_si;
12471 std::min(cast<llvm::VectorType>(Ops[2]->getType())->getNumElements(),
12472 cast<llvm::VectorType>(Ops[3]->getType())->getNumElements());
12473 Ops[1] = getMaskVecValue(*this, Ops[1], MinElts);
12474 Function *Intr = CGM.getIntrinsic(IID);
12475 return Builder.CreateCall(Intr, Ops);
12478 case X86::BI__builtin_ia32_vextractf128_pd256:
12479 case X86::BI__builtin_ia32_vextractf128_ps256:
12480 case X86::BI__builtin_ia32_vextractf128_si256:
12481 case X86::BI__builtin_ia32_extract128i256:
12482 case X86::BI__builtin_ia32_extractf64x4_mask:
12483 case X86::BI__builtin_ia32_extractf32x4_mask:
12484 case X86::BI__builtin_ia32_extracti64x4_mask:
12485 case X86::BI__builtin_ia32_extracti32x4_mask:
12486 case X86::BI__builtin_ia32_extractf32x8_mask:
12487 case X86::BI__builtin_ia32_extracti32x8_mask:
12488 case X86::BI__builtin_ia32_extractf32x4_256_mask:
12489 case X86::BI__builtin_ia32_extracti32x4_256_mask:
12490 case X86::BI__builtin_ia32_extractf64x2_256_mask:
12491 case X86::BI__builtin_ia32_extracti64x2_256_mask:
12492 case X86::BI__builtin_ia32_extractf64x2_512_mask:
12493 case X86::BI__builtin_ia32_extracti64x2_512_mask: {
12494 auto *DstTy = cast<llvm::VectorType>(ConvertType(E->getType()));
12495 unsigned NumElts = DstTy->getNumElements();
12496 unsigned SrcNumElts =
12497 cast<llvm::VectorType>(Ops[0]->getType())->getNumElements();
12498 unsigned SubVectors = SrcNumElts / NumElts;
12499 unsigned Index = cast<ConstantInt>(Ops[1])->getZExtValue();
12500 assert(llvm::isPowerOf2_32(SubVectors) && "Expected power of 2 subvectors");
12501 Index &= SubVectors - 1; // Remove any extra bits.
12505 for (unsigned i = 0; i != NumElts; ++i)
12506 Indices[i] = i + Index;
12508 Value *Res = Builder.CreateShuffleVector(Ops[0],
12509 UndefValue::get(Ops[0]->getType()),
12510 makeArrayRef(Indices, NumElts),
12513 if (Ops.size() == 4)
12514 Res = EmitX86Select(*this, Ops[3], Res, Ops[2]);
12518 case X86::BI__builtin_ia32_vinsertf128_pd256:
12519 case X86::BI__builtin_ia32_vinsertf128_ps256:
12520 case X86::BI__builtin_ia32_vinsertf128_si256:
12521 case X86::BI__builtin_ia32_insert128i256:
12522 case X86::BI__builtin_ia32_insertf64x4:
12523 case X86::BI__builtin_ia32_insertf32x4:
12524 case X86::BI__builtin_ia32_inserti64x4:
12525 case X86::BI__builtin_ia32_inserti32x4:
12526 case X86::BI__builtin_ia32_insertf32x8:
12527 case X86::BI__builtin_ia32_inserti32x8:
12528 case X86::BI__builtin_ia32_insertf32x4_256:
12529 case X86::BI__builtin_ia32_inserti32x4_256:
12530 case X86::BI__builtin_ia32_insertf64x2_256:
12531 case X86::BI__builtin_ia32_inserti64x2_256:
12532 case X86::BI__builtin_ia32_insertf64x2_512:
12533 case X86::BI__builtin_ia32_inserti64x2_512: {
12534 unsigned DstNumElts =
12535 cast<llvm::VectorType>(Ops[0]->getType())->getNumElements();
12536 unsigned SrcNumElts =
12537 cast<llvm::VectorType>(Ops[1]->getType())->getNumElements();
12538 unsigned SubVectors = DstNumElts / SrcNumElts;
12539 unsigned Index = cast<ConstantInt>(Ops[2])->getZExtValue();
12540 assert(llvm::isPowerOf2_32(SubVectors) && "Expected power of 2 subvectors");
12541 Index &= SubVectors - 1; // Remove any extra bits.
12542 Index *= SrcNumElts;
12545 for (unsigned i = 0; i != DstNumElts; ++i)
12546 Indices[i] = (i >= SrcNumElts) ? SrcNumElts + (i % SrcNumElts) : i;
12548 Value *Op1 = Builder.CreateShuffleVector(Ops[1],
12549 UndefValue::get(Ops[1]->getType()),
12550 makeArrayRef(Indices, DstNumElts),
12553 for (unsigned i = 0; i != DstNumElts; ++i) {
12554 if (i >= Index && i < (Index + SrcNumElts))
12555 Indices[i] = (i - Index) + DstNumElts;
12560 return Builder.CreateShuffleVector(Ops[0], Op1,
12561 makeArrayRef(Indices, DstNumElts),
12564 case X86::BI__builtin_ia32_pmovqd512_mask:
12565 case X86::BI__builtin_ia32_pmovwb512_mask: {
12566 Value *Res = Builder.CreateTrunc(Ops[0], Ops[1]->getType());
12567 return EmitX86Select(*this, Ops[2], Res, Ops[1]);
12569 case X86::BI__builtin_ia32_pmovdb512_mask:
12570 case X86::BI__builtin_ia32_pmovdw512_mask:
12571 case X86::BI__builtin_ia32_pmovqw512_mask: {
12572 if (const auto *C = dyn_cast<Constant>(Ops[2]))
12573 if (C->isAllOnesValue())
12574 return Builder.CreateTrunc(Ops[0], Ops[1]->getType());
12577 switch (BuiltinID) {
12578 default: llvm_unreachable("Unsupported intrinsic!");
12579 case X86::BI__builtin_ia32_pmovdb512_mask:
12580 IID = Intrinsic::x86_avx512_mask_pmov_db_512;
12582 case X86::BI__builtin_ia32_pmovdw512_mask:
12583 IID = Intrinsic::x86_avx512_mask_pmov_dw_512;
12585 case X86::BI__builtin_ia32_pmovqw512_mask:
12586 IID = Intrinsic::x86_avx512_mask_pmov_qw_512;
12590 Function *Intr = CGM.getIntrinsic(IID);
12591 return Builder.CreateCall(Intr, Ops);
12593 case X86::BI__builtin_ia32_pblendw128:
12594 case X86::BI__builtin_ia32_blendpd:
12595 case X86::BI__builtin_ia32_blendps:
12596 case X86::BI__builtin_ia32_blendpd256:
12597 case X86::BI__builtin_ia32_blendps256:
12598 case X86::BI__builtin_ia32_pblendw256:
12599 case X86::BI__builtin_ia32_pblendd128:
12600 case X86::BI__builtin_ia32_pblendd256: {
12602 cast<llvm::VectorType>(Ops[0]->getType())->getNumElements();
12603 unsigned Imm = cast<llvm::ConstantInt>(Ops[2])->getZExtValue();
12606 // If there are more than 8 elements, the immediate is used twice so make
12607 // sure we handle that.
12608 for (unsigned i = 0; i != NumElts; ++i)
12609 Indices[i] = ((Imm >> (i % 8)) & 0x1) ? NumElts + i : i;
12611 return Builder.CreateShuffleVector(Ops[0], Ops[1],
12612 makeArrayRef(Indices, NumElts),
12615 case X86::BI__builtin_ia32_pshuflw:
12616 case X86::BI__builtin_ia32_pshuflw256:
12617 case X86::BI__builtin_ia32_pshuflw512: {
12618 uint32_t Imm = cast<llvm::ConstantInt>(Ops[1])->getZExtValue();
12619 auto *Ty = cast<llvm::VectorType>(Ops[0]->getType());
12620 unsigned NumElts = Ty->getNumElements();
12622 // Splat the 8-bits of immediate 4 times to help the loop wrap around.
12623 Imm = (Imm & 0xff) * 0x01010101;
12626 for (unsigned l = 0; l != NumElts; l += 8) {
12627 for (unsigned i = 0; i != 4; ++i) {
12628 Indices[l + i] = l + (Imm & 3);
12631 for (unsigned i = 4; i != 8; ++i)
12632 Indices[l + i] = l + i;
12635 return Builder.CreateShuffleVector(Ops[0], UndefValue::get(Ty),
12636 makeArrayRef(Indices, NumElts),
12639 case X86::BI__builtin_ia32_pshufhw:
12640 case X86::BI__builtin_ia32_pshufhw256:
12641 case X86::BI__builtin_ia32_pshufhw512: {
12642 uint32_t Imm = cast<llvm::ConstantInt>(Ops[1])->getZExtValue();
12643 auto *Ty = cast<llvm::VectorType>(Ops[0]->getType());
12644 unsigned NumElts = Ty->getNumElements();
12646 // Splat the 8-bits of immediate 4 times to help the loop wrap around.
12647 Imm = (Imm & 0xff) * 0x01010101;
12650 for (unsigned l = 0; l != NumElts; l += 8) {
12651 for (unsigned i = 0; i != 4; ++i)
12652 Indices[l + i] = l + i;
12653 for (unsigned i = 4; i != 8; ++i) {
12654 Indices[l + i] = l + 4 + (Imm & 3);
12659 return Builder.CreateShuffleVector(Ops[0], UndefValue::get(Ty),
12660 makeArrayRef(Indices, NumElts),
12663 case X86::BI__builtin_ia32_pshufd:
12664 case X86::BI__builtin_ia32_pshufd256:
12665 case X86::BI__builtin_ia32_pshufd512:
12666 case X86::BI__builtin_ia32_vpermilpd:
12667 case X86::BI__builtin_ia32_vpermilps:
12668 case X86::BI__builtin_ia32_vpermilpd256:
12669 case X86::BI__builtin_ia32_vpermilps256:
12670 case X86::BI__builtin_ia32_vpermilpd512:
12671 case X86::BI__builtin_ia32_vpermilps512: {
12672 uint32_t Imm = cast<llvm::ConstantInt>(Ops[1])->getZExtValue();
12673 auto *Ty = cast<llvm::VectorType>(Ops[0]->getType());
12674 unsigned NumElts = Ty->getNumElements();
12675 unsigned NumLanes = Ty->getPrimitiveSizeInBits() / 128;
12676 unsigned NumLaneElts = NumElts / NumLanes;
12678 // Splat the 8-bits of immediate 4 times to help the loop wrap around.
12679 Imm = (Imm & 0xff) * 0x01010101;
12682 for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
12683 for (unsigned i = 0; i != NumLaneElts; ++i) {
12684 Indices[i + l] = (Imm % NumLaneElts) + l;
12685 Imm /= NumLaneElts;
12689 return Builder.CreateShuffleVector(Ops[0], UndefValue::get(Ty),
12690 makeArrayRef(Indices, NumElts),
12693 case X86::BI__builtin_ia32_shufpd:
12694 case X86::BI__builtin_ia32_shufpd256:
12695 case X86::BI__builtin_ia32_shufpd512:
12696 case X86::BI__builtin_ia32_shufps:
12697 case X86::BI__builtin_ia32_shufps256:
12698 case X86::BI__builtin_ia32_shufps512: {
12699 uint32_t Imm = cast<llvm::ConstantInt>(Ops[2])->getZExtValue();
12700 auto *Ty = cast<llvm::VectorType>(Ops[0]->getType());
12701 unsigned NumElts = Ty->getNumElements();
12702 unsigned NumLanes = Ty->getPrimitiveSizeInBits() / 128;
12703 unsigned NumLaneElts = NumElts / NumLanes;
12705 // Splat the 8-bits of immediate 4 times to help the loop wrap around.
12706 Imm = (Imm & 0xff) * 0x01010101;
12709 for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
12710 for (unsigned i = 0; i != NumLaneElts; ++i) {
12711 unsigned Index = Imm % NumLaneElts;
12712 Imm /= NumLaneElts;
12713 if (i >= (NumLaneElts / 2))
12715 Indices[l + i] = l + Index;
12719 return Builder.CreateShuffleVector(Ops[0], Ops[1],
12720 makeArrayRef(Indices, NumElts),
12723 case X86::BI__builtin_ia32_permdi256:
12724 case X86::BI__builtin_ia32_permdf256:
12725 case X86::BI__builtin_ia32_permdi512:
12726 case X86::BI__builtin_ia32_permdf512: {
12727 unsigned Imm = cast<llvm::ConstantInt>(Ops[1])->getZExtValue();
12728 auto *Ty = cast<llvm::VectorType>(Ops[0]->getType());
12729 unsigned NumElts = Ty->getNumElements();
12731 // These intrinsics operate on 256-bit lanes of four 64-bit elements.
12733 for (unsigned l = 0; l != NumElts; l += 4)
12734 for (unsigned i = 0; i != 4; ++i)
12735 Indices[l + i] = l + ((Imm >> (2 * i)) & 0x3);
12737 return Builder.CreateShuffleVector(Ops[0], UndefValue::get(Ty),
12738 makeArrayRef(Indices, NumElts),
12741 case X86::BI__builtin_ia32_palignr128:
12742 case X86::BI__builtin_ia32_palignr256:
12743 case X86::BI__builtin_ia32_palignr512: {
12744 unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0xff;
12747 cast<llvm::VectorType>(Ops[0]->getType())->getNumElements();
12748 assert(NumElts % 16 == 0);
12750 // If palignr is shifting the pair of vectors more than the size of two
12751 // lanes, emit zero.
12752 if (ShiftVal >= 32)
12753 return llvm::Constant::getNullValue(ConvertType(E->getType()));
12755 // If palignr is shifting the pair of input vectors more than one lane,
12756 // but less than two lanes, convert to shifting in zeroes.
12757 if (ShiftVal > 16) {
12760 Ops[0] = llvm::Constant::getNullValue(Ops[0]->getType());
12764 // 256-bit palignr operates on 128-bit lanes so we need to handle that
12765 for (unsigned l = 0; l != NumElts; l += 16) {
12766 for (unsigned i = 0; i != 16; ++i) {
12767 unsigned Idx = ShiftVal + i;
12769 Idx += NumElts - 16; // End of lane, switch operand.
12770 Indices[l + i] = Idx + l;
12774 return Builder.CreateShuffleVector(Ops[1], Ops[0],
12775 makeArrayRef(Indices, NumElts),
12778 case X86::BI__builtin_ia32_alignd128:
12779 case X86::BI__builtin_ia32_alignd256:
12780 case X86::BI__builtin_ia32_alignd512:
12781 case X86::BI__builtin_ia32_alignq128:
12782 case X86::BI__builtin_ia32_alignq256:
12783 case X86::BI__builtin_ia32_alignq512: {
12785 cast<llvm::VectorType>(Ops[0]->getType())->getNumElements();
12786 unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0xff;
12788 // Mask the shift amount to width of two vectors.
12789 ShiftVal &= (2 * NumElts) - 1;
12792 for (unsigned i = 0; i != NumElts; ++i)
12793 Indices[i] = i + ShiftVal;
12795 return Builder.CreateShuffleVector(Ops[1], Ops[0],
12796 makeArrayRef(Indices, NumElts),
12799 case X86::BI__builtin_ia32_shuf_f32x4_256:
12800 case X86::BI__builtin_ia32_shuf_f64x2_256:
12801 case X86::BI__builtin_ia32_shuf_i32x4_256:
12802 case X86::BI__builtin_ia32_shuf_i64x2_256:
12803 case X86::BI__builtin_ia32_shuf_f32x4:
12804 case X86::BI__builtin_ia32_shuf_f64x2:
12805 case X86::BI__builtin_ia32_shuf_i32x4:
12806 case X86::BI__builtin_ia32_shuf_i64x2: {
12807 unsigned Imm = cast<llvm::ConstantInt>(Ops[2])->getZExtValue();
12808 auto *Ty = cast<llvm::VectorType>(Ops[0]->getType());
12809 unsigned NumElts = Ty->getNumElements();
12810 unsigned NumLanes = Ty->getPrimitiveSizeInBits() == 512 ? 4 : 2;
12811 unsigned NumLaneElts = NumElts / NumLanes;
12814 for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
12815 unsigned Index = (Imm % NumLanes) * NumLaneElts;
12816 Imm /= NumLanes; // Discard the bits we just used.
12817 if (l >= (NumElts / 2))
12818 Index += NumElts; // Switch to other source.
12819 for (unsigned i = 0; i != NumLaneElts; ++i) {
12820 Indices[l + i] = Index + i;
12824 return Builder.CreateShuffleVector(Ops[0], Ops[1],
12825 makeArrayRef(Indices, NumElts),
12829 case X86::BI__builtin_ia32_vperm2f128_pd256:
12830 case X86::BI__builtin_ia32_vperm2f128_ps256:
12831 case X86::BI__builtin_ia32_vperm2f128_si256:
12832 case X86::BI__builtin_ia32_permti256: {
12833 unsigned Imm = cast<llvm::ConstantInt>(Ops[2])->getZExtValue();
12835 cast<llvm::VectorType>(Ops[0]->getType())->getNumElements();
12837 // This takes a very simple approach since there are two lanes and a
12838 // shuffle can have 2 inputs. So we reserve the first input for the first
12839 // lane and the second input for the second lane. This may result in
12840 // duplicate sources, but this can be dealt with in the backend.
12844 for (unsigned l = 0; l != 2; ++l) {
12845 // Determine the source for this lane.
12846 if (Imm & (1 << ((l * 4) + 3)))
12847 OutOps[l] = llvm::ConstantAggregateZero::get(Ops[0]->getType());
12848 else if (Imm & (1 << ((l * 4) + 1)))
12849 OutOps[l] = Ops[1];
12851 OutOps[l] = Ops[0];
12853 for (unsigned i = 0; i != NumElts/2; ++i) {
12854 // Start with ith element of the source for this lane.
12855 unsigned Idx = (l * NumElts) + i;
12856 // If bit 0 of the immediate half is set, switch to the high half of
12858 if (Imm & (1 << (l * 4)))
12860 Indices[(l * (NumElts/2)) + i] = Idx;
12864 return Builder.CreateShuffleVector(OutOps[0], OutOps[1],
12865 makeArrayRef(Indices, NumElts),
12869 case X86::BI__builtin_ia32_pslldqi128_byteshift:
12870 case X86::BI__builtin_ia32_pslldqi256_byteshift:
12871 case X86::BI__builtin_ia32_pslldqi512_byteshift: {
12872 unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[1])->getZExtValue() & 0xff;
12873 auto *ResultType = cast<llvm::VectorType>(Ops[0]->getType());
12874 // Builtin type is vXi64 so multiply by 8 to get bytes.
12875 unsigned NumElts = ResultType->getNumElements() * 8;
12877 // If pslldq is shifting the vector more than 15 bytes, emit zero.
12878 if (ShiftVal >= 16)
12879 return llvm::Constant::getNullValue(ResultType);
12882 // 256/512-bit pslldq operates on 128-bit lanes so we need to handle that
12883 for (unsigned l = 0; l != NumElts; l += 16) {
12884 for (unsigned i = 0; i != 16; ++i) {
12885 unsigned Idx = NumElts + i - ShiftVal;
12886 if (Idx < NumElts) Idx -= NumElts - 16; // end of lane, switch operand.
12887 Indices[l + i] = Idx + l;
12891 auto *VecTy = llvm::FixedVectorType::get(Int8Ty, NumElts);
12892 Value *Cast = Builder.CreateBitCast(Ops[0], VecTy, "cast");
12893 Value *Zero = llvm::Constant::getNullValue(VecTy);
12894 Value *SV = Builder.CreateShuffleVector(Zero, Cast,
12895 makeArrayRef(Indices, NumElts),
12897 return Builder.CreateBitCast(SV, Ops[0]->getType(), "cast");
12899 case X86::BI__builtin_ia32_psrldqi128_byteshift:
12900 case X86::BI__builtin_ia32_psrldqi256_byteshift:
12901 case X86::BI__builtin_ia32_psrldqi512_byteshift: {
12902 unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[1])->getZExtValue() & 0xff;
12903 auto *ResultType = cast<llvm::VectorType>(Ops[0]->getType());
12904 // Builtin type is vXi64 so multiply by 8 to get bytes.
12905 unsigned NumElts = ResultType->getNumElements() * 8;
12907 // If psrldq is shifting the vector more than 15 bytes, emit zero.
12908 if (ShiftVal >= 16)
12909 return llvm::Constant::getNullValue(ResultType);
12912 // 256/512-bit psrldq operates on 128-bit lanes so we need to handle that
12913 for (unsigned l = 0; l != NumElts; l += 16) {
12914 for (unsigned i = 0; i != 16; ++i) {
12915 unsigned Idx = i + ShiftVal;
12916 if (Idx >= 16) Idx += NumElts - 16; // end of lane, switch operand.
12917 Indices[l + i] = Idx + l;
12921 auto *VecTy = llvm::FixedVectorType::get(Int8Ty, NumElts);
12922 Value *Cast = Builder.CreateBitCast(Ops[0], VecTy, "cast");
12923 Value *Zero = llvm::Constant::getNullValue(VecTy);
12924 Value *SV = Builder.CreateShuffleVector(Cast, Zero,
12925 makeArrayRef(Indices, NumElts),
12927 return Builder.CreateBitCast(SV, ResultType, "cast");
12929 case X86::BI__builtin_ia32_kshiftliqi:
12930 case X86::BI__builtin_ia32_kshiftlihi:
12931 case X86::BI__builtin_ia32_kshiftlisi:
12932 case X86::BI__builtin_ia32_kshiftlidi: {
12933 unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[1])->getZExtValue() & 0xff;
12934 unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth();
12936 if (ShiftVal >= NumElts)
12937 return llvm::Constant::getNullValue(Ops[0]->getType());
12939 Value *In = getMaskVecValue(*this, Ops[0], NumElts);
12942 for (unsigned i = 0; i != NumElts; ++i)
12943 Indices[i] = NumElts + i - ShiftVal;
12945 Value *Zero = llvm::Constant::getNullValue(In->getType());
12946 Value *SV = Builder.CreateShuffleVector(Zero, In,
12947 makeArrayRef(Indices, NumElts),
12949 return Builder.CreateBitCast(SV, Ops[0]->getType());
12951 case X86::BI__builtin_ia32_kshiftriqi:
12952 case X86::BI__builtin_ia32_kshiftrihi:
12953 case X86::BI__builtin_ia32_kshiftrisi:
12954 case X86::BI__builtin_ia32_kshiftridi: {
12955 unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[1])->getZExtValue() & 0xff;
12956 unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth();
12958 if (ShiftVal >= NumElts)
12959 return llvm::Constant::getNullValue(Ops[0]->getType());
12961 Value *In = getMaskVecValue(*this, Ops[0], NumElts);
12964 for (unsigned i = 0; i != NumElts; ++i)
12965 Indices[i] = i + ShiftVal;
12967 Value *Zero = llvm::Constant::getNullValue(In->getType());
12968 Value *SV = Builder.CreateShuffleVector(In, Zero,
12969 makeArrayRef(Indices, NumElts),
12971 return Builder.CreateBitCast(SV, Ops[0]->getType());
12973 case X86::BI__builtin_ia32_movnti:
12974 case X86::BI__builtin_ia32_movnti64:
12975 case X86::BI__builtin_ia32_movntsd:
12976 case X86::BI__builtin_ia32_movntss: {
12977 llvm::MDNode *Node = llvm::MDNode::get(
12978 getLLVMContext(), llvm::ConstantAsMetadata::get(Builder.getInt32(1)));
12980 Value *Ptr = Ops[0];
12981 Value *Src = Ops[1];
12983 // Extract the 0'th element of the source vector.
12984 if (BuiltinID == X86::BI__builtin_ia32_movntsd ||
12985 BuiltinID == X86::BI__builtin_ia32_movntss)
12986 Src = Builder.CreateExtractElement(Src, (uint64_t)0, "extract");
12988 // Convert the type of the pointer to a pointer to the stored type.
12989 Value *BC = Builder.CreateBitCast(
12990 Ptr, llvm::PointerType::getUnqual(Src->getType()), "cast");
12992 // Unaligned nontemporal store of the scalar value.
12993 StoreInst *SI = Builder.CreateDefaultAlignedStore(Src, BC);
12994 SI->setMetadata(CGM.getModule().getMDKindID("nontemporal"), Node);
12995 SI->setAlignment(llvm::Align(1));
12998 // Rotate is a special case of funnel shift - 1st 2 args are the same.
12999 case X86::BI__builtin_ia32_vprotb:
13000 case X86::BI__builtin_ia32_vprotw:
13001 case X86::BI__builtin_ia32_vprotd:
13002 case X86::BI__builtin_ia32_vprotq:
13003 case X86::BI__builtin_ia32_vprotbi:
13004 case X86::BI__builtin_ia32_vprotwi:
13005 case X86::BI__builtin_ia32_vprotdi:
13006 case X86::BI__builtin_ia32_vprotqi:
13007 case X86::BI__builtin_ia32_prold128:
13008 case X86::BI__builtin_ia32_prold256:
13009 case X86::BI__builtin_ia32_prold512:
13010 case X86::BI__builtin_ia32_prolq128:
13011 case X86::BI__builtin_ia32_prolq256:
13012 case X86::BI__builtin_ia32_prolq512:
13013 case X86::BI__builtin_ia32_prolvd128:
13014 case X86::BI__builtin_ia32_prolvd256:
13015 case X86::BI__builtin_ia32_prolvd512:
13016 case X86::BI__builtin_ia32_prolvq128:
13017 case X86::BI__builtin_ia32_prolvq256:
13018 case X86::BI__builtin_ia32_prolvq512:
13019 return EmitX86FunnelShift(*this, Ops[0], Ops[0], Ops[1], false);
13020 case X86::BI__builtin_ia32_prord128:
13021 case X86::BI__builtin_ia32_prord256:
13022 case X86::BI__builtin_ia32_prord512:
13023 case X86::BI__builtin_ia32_prorq128:
13024 case X86::BI__builtin_ia32_prorq256:
13025 case X86::BI__builtin_ia32_prorq512:
13026 case X86::BI__builtin_ia32_prorvd128:
13027 case X86::BI__builtin_ia32_prorvd256:
13028 case X86::BI__builtin_ia32_prorvd512:
13029 case X86::BI__builtin_ia32_prorvq128:
13030 case X86::BI__builtin_ia32_prorvq256:
13031 case X86::BI__builtin_ia32_prorvq512:
13032 return EmitX86FunnelShift(*this, Ops[0], Ops[0], Ops[1], true);
13033 case X86::BI__builtin_ia32_selectb_128:
13034 case X86::BI__builtin_ia32_selectb_256:
13035 case X86::BI__builtin_ia32_selectb_512:
13036 case X86::BI__builtin_ia32_selectw_128:
13037 case X86::BI__builtin_ia32_selectw_256:
13038 case X86::BI__builtin_ia32_selectw_512:
13039 case X86::BI__builtin_ia32_selectd_128:
13040 case X86::BI__builtin_ia32_selectd_256:
13041 case X86::BI__builtin_ia32_selectd_512:
13042 case X86::BI__builtin_ia32_selectq_128:
13043 case X86::BI__builtin_ia32_selectq_256:
13044 case X86::BI__builtin_ia32_selectq_512:
13045 case X86::BI__builtin_ia32_selectps_128:
13046 case X86::BI__builtin_ia32_selectps_256:
13047 case X86::BI__builtin_ia32_selectps_512:
13048 case X86::BI__builtin_ia32_selectpd_128:
13049 case X86::BI__builtin_ia32_selectpd_256:
13050 case X86::BI__builtin_ia32_selectpd_512:
13051 return EmitX86Select(*this, Ops[0], Ops[1], Ops[2]);
13052 case X86::BI__builtin_ia32_selectss_128:
13053 case X86::BI__builtin_ia32_selectsd_128: {
13054 Value *A = Builder.CreateExtractElement(Ops[1], (uint64_t)0);
13055 Value *B = Builder.CreateExtractElement(Ops[2], (uint64_t)0);
13056 A = EmitX86ScalarSelect(*this, Ops[0], A, B);
13057 return Builder.CreateInsertElement(Ops[1], A, (uint64_t)0);
13059 case X86::BI__builtin_ia32_cmpb128_mask:
13060 case X86::BI__builtin_ia32_cmpb256_mask:
13061 case X86::BI__builtin_ia32_cmpb512_mask:
13062 case X86::BI__builtin_ia32_cmpw128_mask:
13063 case X86::BI__builtin_ia32_cmpw256_mask:
13064 case X86::BI__builtin_ia32_cmpw512_mask:
13065 case X86::BI__builtin_ia32_cmpd128_mask:
13066 case X86::BI__builtin_ia32_cmpd256_mask:
13067 case X86::BI__builtin_ia32_cmpd512_mask:
13068 case X86::BI__builtin_ia32_cmpq128_mask:
13069 case X86::BI__builtin_ia32_cmpq256_mask:
13070 case X86::BI__builtin_ia32_cmpq512_mask: {
13071 unsigned CC = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0x7;
13072 return EmitX86MaskedCompare(*this, CC, true, Ops);
13074 case X86::BI__builtin_ia32_ucmpb128_mask:
13075 case X86::BI__builtin_ia32_ucmpb256_mask:
13076 case X86::BI__builtin_ia32_ucmpb512_mask:
13077 case X86::BI__builtin_ia32_ucmpw128_mask:
13078 case X86::BI__builtin_ia32_ucmpw256_mask:
13079 case X86::BI__builtin_ia32_ucmpw512_mask:
13080 case X86::BI__builtin_ia32_ucmpd128_mask:
13081 case X86::BI__builtin_ia32_ucmpd256_mask:
13082 case X86::BI__builtin_ia32_ucmpd512_mask:
13083 case X86::BI__builtin_ia32_ucmpq128_mask:
13084 case X86::BI__builtin_ia32_ucmpq256_mask:
13085 case X86::BI__builtin_ia32_ucmpq512_mask: {
13086 unsigned CC = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0x7;
13087 return EmitX86MaskedCompare(*this, CC, false, Ops);
13089 case X86::BI__builtin_ia32_vpcomb:
13090 case X86::BI__builtin_ia32_vpcomw:
13091 case X86::BI__builtin_ia32_vpcomd:
13092 case X86::BI__builtin_ia32_vpcomq:
13093 return EmitX86vpcom(*this, Ops, true);
13094 case X86::BI__builtin_ia32_vpcomub:
13095 case X86::BI__builtin_ia32_vpcomuw:
13096 case X86::BI__builtin_ia32_vpcomud:
13097 case X86::BI__builtin_ia32_vpcomuq:
13098 return EmitX86vpcom(*this, Ops, false);
13100 case X86::BI__builtin_ia32_kortestcqi:
13101 case X86::BI__builtin_ia32_kortestchi:
13102 case X86::BI__builtin_ia32_kortestcsi:
13103 case X86::BI__builtin_ia32_kortestcdi: {
13104 Value *Or = EmitX86MaskLogic(*this, Instruction::Or, Ops);
13105 Value *C = llvm::Constant::getAllOnesValue(Ops[0]->getType());
13106 Value *Cmp = Builder.CreateICmpEQ(Or, C);
13107 return Builder.CreateZExt(Cmp, ConvertType(E->getType()));
13109 case X86::BI__builtin_ia32_kortestzqi:
13110 case X86::BI__builtin_ia32_kortestzhi:
13111 case X86::BI__builtin_ia32_kortestzsi:
13112 case X86::BI__builtin_ia32_kortestzdi: {
13113 Value *Or = EmitX86MaskLogic(*this, Instruction::Or, Ops);
13114 Value *C = llvm::Constant::getNullValue(Ops[0]->getType());
13115 Value *Cmp = Builder.CreateICmpEQ(Or, C);
13116 return Builder.CreateZExt(Cmp, ConvertType(E->getType()));
13119 case X86::BI__builtin_ia32_ktestcqi:
13120 case X86::BI__builtin_ia32_ktestzqi:
13121 case X86::BI__builtin_ia32_ktestchi:
13122 case X86::BI__builtin_ia32_ktestzhi:
13123 case X86::BI__builtin_ia32_ktestcsi:
13124 case X86::BI__builtin_ia32_ktestzsi:
13125 case X86::BI__builtin_ia32_ktestcdi:
13126 case X86::BI__builtin_ia32_ktestzdi: {
13128 switch (BuiltinID) {
13129 default: llvm_unreachable("Unsupported intrinsic!");
13130 case X86::BI__builtin_ia32_ktestcqi:
13131 IID = Intrinsic::x86_avx512_ktestc_b;
13133 case X86::BI__builtin_ia32_ktestzqi:
13134 IID = Intrinsic::x86_avx512_ktestz_b;
13136 case X86::BI__builtin_ia32_ktestchi:
13137 IID = Intrinsic::x86_avx512_ktestc_w;
13139 case X86::BI__builtin_ia32_ktestzhi:
13140 IID = Intrinsic::x86_avx512_ktestz_w;
13142 case X86::BI__builtin_ia32_ktestcsi:
13143 IID = Intrinsic::x86_avx512_ktestc_d;
13145 case X86::BI__builtin_ia32_ktestzsi:
13146 IID = Intrinsic::x86_avx512_ktestz_d;
13148 case X86::BI__builtin_ia32_ktestcdi:
13149 IID = Intrinsic::x86_avx512_ktestc_q;
13151 case X86::BI__builtin_ia32_ktestzdi:
13152 IID = Intrinsic::x86_avx512_ktestz_q;
13156 unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth();
13157 Value *LHS = getMaskVecValue(*this, Ops[0], NumElts);
13158 Value *RHS = getMaskVecValue(*this, Ops[1], NumElts);
13159 Function *Intr = CGM.getIntrinsic(IID);
13160 return Builder.CreateCall(Intr, {LHS, RHS});
13163 case X86::BI__builtin_ia32_kaddqi:
13164 case X86::BI__builtin_ia32_kaddhi:
13165 case X86::BI__builtin_ia32_kaddsi:
13166 case X86::BI__builtin_ia32_kadddi: {
13168 switch (BuiltinID) {
13169 default: llvm_unreachable("Unsupported intrinsic!");
13170 case X86::BI__builtin_ia32_kaddqi:
13171 IID = Intrinsic::x86_avx512_kadd_b;
13173 case X86::BI__builtin_ia32_kaddhi:
13174 IID = Intrinsic::x86_avx512_kadd_w;
13176 case X86::BI__builtin_ia32_kaddsi:
13177 IID = Intrinsic::x86_avx512_kadd_d;
13179 case X86::BI__builtin_ia32_kadddi:
13180 IID = Intrinsic::x86_avx512_kadd_q;
13184 unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth();
13185 Value *LHS = getMaskVecValue(*this, Ops[0], NumElts);
13186 Value *RHS = getMaskVecValue(*this, Ops[1], NumElts);
13187 Function *Intr = CGM.getIntrinsic(IID);
13188 Value *Res = Builder.CreateCall(Intr, {LHS, RHS});
13189 return Builder.CreateBitCast(Res, Ops[0]->getType());
13191 case X86::BI__builtin_ia32_kandqi:
13192 case X86::BI__builtin_ia32_kandhi:
13193 case X86::BI__builtin_ia32_kandsi:
13194 case X86::BI__builtin_ia32_kanddi:
13195 return EmitX86MaskLogic(*this, Instruction::And, Ops);
13196 case X86::BI__builtin_ia32_kandnqi:
13197 case X86::BI__builtin_ia32_kandnhi:
13198 case X86::BI__builtin_ia32_kandnsi:
13199 case X86::BI__builtin_ia32_kandndi:
13200 return EmitX86MaskLogic(*this, Instruction::And, Ops, true);
13201 case X86::BI__builtin_ia32_korqi:
13202 case X86::BI__builtin_ia32_korhi:
13203 case X86::BI__builtin_ia32_korsi:
13204 case X86::BI__builtin_ia32_kordi:
13205 return EmitX86MaskLogic(*this, Instruction::Or, Ops);
13206 case X86::BI__builtin_ia32_kxnorqi:
13207 case X86::BI__builtin_ia32_kxnorhi:
13208 case X86::BI__builtin_ia32_kxnorsi:
13209 case X86::BI__builtin_ia32_kxnordi:
13210 return EmitX86MaskLogic(*this, Instruction::Xor, Ops, true);
13211 case X86::BI__builtin_ia32_kxorqi:
13212 case X86::BI__builtin_ia32_kxorhi:
13213 case X86::BI__builtin_ia32_kxorsi:
13214 case X86::BI__builtin_ia32_kxordi:
13215 return EmitX86MaskLogic(*this, Instruction::Xor, Ops);
13216 case X86::BI__builtin_ia32_knotqi:
13217 case X86::BI__builtin_ia32_knothi:
13218 case X86::BI__builtin_ia32_knotsi:
13219 case X86::BI__builtin_ia32_knotdi: {
13220 unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth();
13221 Value *Res = getMaskVecValue(*this, Ops[0], NumElts);
13222 return Builder.CreateBitCast(Builder.CreateNot(Res),
13223 Ops[0]->getType());
13225 case X86::BI__builtin_ia32_kmovb:
13226 case X86::BI__builtin_ia32_kmovw:
13227 case X86::BI__builtin_ia32_kmovd:
13228 case X86::BI__builtin_ia32_kmovq: {
13229 // Bitcast to vXi1 type and then back to integer. This gets the mask
13230 // register type into the IR, but might be optimized out depending on
13231 // what's around it.
13232 unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth();
13233 Value *Res = getMaskVecValue(*this, Ops[0], NumElts);
13234 return Builder.CreateBitCast(Res, Ops[0]->getType());
13237 case X86::BI__builtin_ia32_kunpckdi:
13238 case X86::BI__builtin_ia32_kunpcksi:
13239 case X86::BI__builtin_ia32_kunpckhi: {
13240 unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth();
13241 Value *LHS = getMaskVecValue(*this, Ops[0], NumElts);
13242 Value *RHS = getMaskVecValue(*this, Ops[1], NumElts);
13244 for (unsigned i = 0; i != NumElts; ++i)
13247 // First extract half of each vector. This gives better codegen than
13248 // doing it in a single shuffle.
13249 LHS = Builder.CreateShuffleVector(LHS, LHS,
13250 makeArrayRef(Indices, NumElts / 2));
13251 RHS = Builder.CreateShuffleVector(RHS, RHS,
13252 makeArrayRef(Indices, NumElts / 2));
13253 // Concat the vectors.
13254 // NOTE: Operands are swapped to match the intrinsic definition.
13255 Value *Res = Builder.CreateShuffleVector(RHS, LHS,
13256 makeArrayRef(Indices, NumElts));
13257 return Builder.CreateBitCast(Res, Ops[0]->getType());
13260 case X86::BI__builtin_ia32_vplzcntd_128:
13261 case X86::BI__builtin_ia32_vplzcntd_256:
13262 case X86::BI__builtin_ia32_vplzcntd_512:
13263 case X86::BI__builtin_ia32_vplzcntq_128:
13264 case X86::BI__builtin_ia32_vplzcntq_256:
13265 case X86::BI__builtin_ia32_vplzcntq_512: {
13266 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, Ops[0]->getType());
13267 return Builder.CreateCall(F, {Ops[0],Builder.getInt1(false)});
13269 case X86::BI__builtin_ia32_sqrtss:
13270 case X86::BI__builtin_ia32_sqrtsd: {
13271 Value *A = Builder.CreateExtractElement(Ops[0], (uint64_t)0);
13273 if (Builder.getIsFPConstrained()) {
13274 F = CGM.getIntrinsic(Intrinsic::experimental_constrained_sqrt,
13276 A = Builder.CreateConstrainedFPCall(F, {A});
13278 F = CGM.getIntrinsic(Intrinsic::sqrt, A->getType());
13279 A = Builder.CreateCall(F, {A});
13281 return Builder.CreateInsertElement(Ops[0], A, (uint64_t)0);
13283 case X86::BI__builtin_ia32_sqrtsd_round_mask:
13284 case X86::BI__builtin_ia32_sqrtss_round_mask: {
13285 unsigned CC = cast<llvm::ConstantInt>(Ops[4])->getZExtValue();
13286 // Support only if the rounding mode is 4 (AKA CUR_DIRECTION),
13287 // otherwise keep the intrinsic.
13289 Intrinsic::ID IID = BuiltinID == X86::BI__builtin_ia32_sqrtsd_round_mask ?
13290 Intrinsic::x86_avx512_mask_sqrt_sd :
13291 Intrinsic::x86_avx512_mask_sqrt_ss;
13292 return Builder.CreateCall(CGM.getIntrinsic(IID), Ops);
13294 Value *A = Builder.CreateExtractElement(Ops[1], (uint64_t)0);
13296 if (Builder.getIsFPConstrained()) {
13297 F = CGM.getIntrinsic(Intrinsic::experimental_constrained_sqrt,
13299 A = Builder.CreateConstrainedFPCall(F, A);
13301 F = CGM.getIntrinsic(Intrinsic::sqrt, A->getType());
13302 A = Builder.CreateCall(F, A);
13304 Value *Src = Builder.CreateExtractElement(Ops[2], (uint64_t)0);
13305 A = EmitX86ScalarSelect(*this, Ops[3], A, Src);
13306 return Builder.CreateInsertElement(Ops[0], A, (uint64_t)0);
13308 case X86::BI__builtin_ia32_sqrtpd256:
13309 case X86::BI__builtin_ia32_sqrtpd:
13310 case X86::BI__builtin_ia32_sqrtps256:
13311 case X86::BI__builtin_ia32_sqrtps:
13312 case X86::BI__builtin_ia32_sqrtps512:
13313 case X86::BI__builtin_ia32_sqrtpd512: {
13314 if (Ops.size() == 2) {
13315 unsigned CC = cast<llvm::ConstantInt>(Ops[1])->getZExtValue();
13316 // Support only if the rounding mode is 4 (AKA CUR_DIRECTION),
13317 // otherwise keep the intrinsic.
13319 Intrinsic::ID IID = BuiltinID == X86::BI__builtin_ia32_sqrtps512 ?
13320 Intrinsic::x86_avx512_sqrt_ps_512 :
13321 Intrinsic::x86_avx512_sqrt_pd_512;
13322 return Builder.CreateCall(CGM.getIntrinsic(IID), Ops);
13325 if (Builder.getIsFPConstrained()) {
13326 Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_sqrt,
13327 Ops[0]->getType());
13328 return Builder.CreateConstrainedFPCall(F, Ops[0]);
13330 Function *F = CGM.getIntrinsic(Intrinsic::sqrt, Ops[0]->getType());
13331 return Builder.CreateCall(F, Ops[0]);
13334 case X86::BI__builtin_ia32_pabsb128:
13335 case X86::BI__builtin_ia32_pabsw128:
13336 case X86::BI__builtin_ia32_pabsd128:
13337 case X86::BI__builtin_ia32_pabsb256:
13338 case X86::BI__builtin_ia32_pabsw256:
13339 case X86::BI__builtin_ia32_pabsd256:
13340 case X86::BI__builtin_ia32_pabsq128:
13341 case X86::BI__builtin_ia32_pabsq256:
13342 case X86::BI__builtin_ia32_pabsb512:
13343 case X86::BI__builtin_ia32_pabsw512:
13344 case X86::BI__builtin_ia32_pabsd512:
13345 case X86::BI__builtin_ia32_pabsq512:
13346 return EmitX86Abs(*this, Ops);
13348 case X86::BI__builtin_ia32_pmaxsb128:
13349 case X86::BI__builtin_ia32_pmaxsw128:
13350 case X86::BI__builtin_ia32_pmaxsd128:
13351 case X86::BI__builtin_ia32_pmaxsq128:
13352 case X86::BI__builtin_ia32_pmaxsb256:
13353 case X86::BI__builtin_ia32_pmaxsw256:
13354 case X86::BI__builtin_ia32_pmaxsd256:
13355 case X86::BI__builtin_ia32_pmaxsq256:
13356 case X86::BI__builtin_ia32_pmaxsb512:
13357 case X86::BI__builtin_ia32_pmaxsw512:
13358 case X86::BI__builtin_ia32_pmaxsd512:
13359 case X86::BI__builtin_ia32_pmaxsq512:
13360 return EmitX86MinMax(*this, ICmpInst::ICMP_SGT, Ops);
13361 case X86::BI__builtin_ia32_pmaxub128:
13362 case X86::BI__builtin_ia32_pmaxuw128:
13363 case X86::BI__builtin_ia32_pmaxud128:
13364 case X86::BI__builtin_ia32_pmaxuq128:
13365 case X86::BI__builtin_ia32_pmaxub256:
13366 case X86::BI__builtin_ia32_pmaxuw256:
13367 case X86::BI__builtin_ia32_pmaxud256:
13368 case X86::BI__builtin_ia32_pmaxuq256:
13369 case X86::BI__builtin_ia32_pmaxub512:
13370 case X86::BI__builtin_ia32_pmaxuw512:
13371 case X86::BI__builtin_ia32_pmaxud512:
13372 case X86::BI__builtin_ia32_pmaxuq512:
13373 return EmitX86MinMax(*this, ICmpInst::ICMP_UGT, Ops);
13374 case X86::BI__builtin_ia32_pminsb128:
13375 case X86::BI__builtin_ia32_pminsw128:
13376 case X86::BI__builtin_ia32_pminsd128:
13377 case X86::BI__builtin_ia32_pminsq128:
13378 case X86::BI__builtin_ia32_pminsb256:
13379 case X86::BI__builtin_ia32_pminsw256:
13380 case X86::BI__builtin_ia32_pminsd256:
13381 case X86::BI__builtin_ia32_pminsq256:
13382 case X86::BI__builtin_ia32_pminsb512:
13383 case X86::BI__builtin_ia32_pminsw512:
13384 case X86::BI__builtin_ia32_pminsd512:
13385 case X86::BI__builtin_ia32_pminsq512:
13386 return EmitX86MinMax(*this, ICmpInst::ICMP_SLT, Ops);
13387 case X86::BI__builtin_ia32_pminub128:
13388 case X86::BI__builtin_ia32_pminuw128:
13389 case X86::BI__builtin_ia32_pminud128:
13390 case X86::BI__builtin_ia32_pminuq128:
13391 case X86::BI__builtin_ia32_pminub256:
13392 case X86::BI__builtin_ia32_pminuw256:
13393 case X86::BI__builtin_ia32_pminud256:
13394 case X86::BI__builtin_ia32_pminuq256:
13395 case X86::BI__builtin_ia32_pminub512:
13396 case X86::BI__builtin_ia32_pminuw512:
13397 case X86::BI__builtin_ia32_pminud512:
13398 case X86::BI__builtin_ia32_pminuq512:
13399 return EmitX86MinMax(*this, ICmpInst::ICMP_ULT, Ops);
13401 case X86::BI__builtin_ia32_pmuludq128:
13402 case X86::BI__builtin_ia32_pmuludq256:
13403 case X86::BI__builtin_ia32_pmuludq512:
13404 return EmitX86Muldq(*this, /*IsSigned*/false, Ops);
13406 case X86::BI__builtin_ia32_pmuldq128:
13407 case X86::BI__builtin_ia32_pmuldq256:
13408 case X86::BI__builtin_ia32_pmuldq512:
13409 return EmitX86Muldq(*this, /*IsSigned*/true, Ops);
13411 case X86::BI__builtin_ia32_pternlogd512_mask:
13412 case X86::BI__builtin_ia32_pternlogq512_mask:
13413 case X86::BI__builtin_ia32_pternlogd128_mask:
13414 case X86::BI__builtin_ia32_pternlogd256_mask:
13415 case X86::BI__builtin_ia32_pternlogq128_mask:
13416 case X86::BI__builtin_ia32_pternlogq256_mask:
13417 return EmitX86Ternlog(*this, /*ZeroMask*/false, Ops);
13419 case X86::BI__builtin_ia32_pternlogd512_maskz:
13420 case X86::BI__builtin_ia32_pternlogq512_maskz:
13421 case X86::BI__builtin_ia32_pternlogd128_maskz:
13422 case X86::BI__builtin_ia32_pternlogd256_maskz:
13423 case X86::BI__builtin_ia32_pternlogq128_maskz:
13424 case X86::BI__builtin_ia32_pternlogq256_maskz:
13425 return EmitX86Ternlog(*this, /*ZeroMask*/true, Ops);
13427 case X86::BI__builtin_ia32_vpshldd128:
13428 case X86::BI__builtin_ia32_vpshldd256:
13429 case X86::BI__builtin_ia32_vpshldd512:
13430 case X86::BI__builtin_ia32_vpshldq128:
13431 case X86::BI__builtin_ia32_vpshldq256:
13432 case X86::BI__builtin_ia32_vpshldq512:
13433 case X86::BI__builtin_ia32_vpshldw128:
13434 case X86::BI__builtin_ia32_vpshldw256:
13435 case X86::BI__builtin_ia32_vpshldw512:
13436 return EmitX86FunnelShift(*this, Ops[0], Ops[1], Ops[2], false);
13438 case X86::BI__builtin_ia32_vpshrdd128:
13439 case X86::BI__builtin_ia32_vpshrdd256:
13440 case X86::BI__builtin_ia32_vpshrdd512:
13441 case X86::BI__builtin_ia32_vpshrdq128:
13442 case X86::BI__builtin_ia32_vpshrdq256:
13443 case X86::BI__builtin_ia32_vpshrdq512:
13444 case X86::BI__builtin_ia32_vpshrdw128:
13445 case X86::BI__builtin_ia32_vpshrdw256:
13446 case X86::BI__builtin_ia32_vpshrdw512:
13447 // Ops 0 and 1 are swapped.
13448 return EmitX86FunnelShift(*this, Ops[1], Ops[0], Ops[2], true);
13450 case X86::BI__builtin_ia32_vpshldvd128:
13451 case X86::BI__builtin_ia32_vpshldvd256:
13452 case X86::BI__builtin_ia32_vpshldvd512:
13453 case X86::BI__builtin_ia32_vpshldvq128:
13454 case X86::BI__builtin_ia32_vpshldvq256:
13455 case X86::BI__builtin_ia32_vpshldvq512:
13456 case X86::BI__builtin_ia32_vpshldvw128:
13457 case X86::BI__builtin_ia32_vpshldvw256:
13458 case X86::BI__builtin_ia32_vpshldvw512:
13459 return EmitX86FunnelShift(*this, Ops[0], Ops[1], Ops[2], false);
13461 case X86::BI__builtin_ia32_vpshrdvd128:
13462 case X86::BI__builtin_ia32_vpshrdvd256:
13463 case X86::BI__builtin_ia32_vpshrdvd512:
13464 case X86::BI__builtin_ia32_vpshrdvq128:
13465 case X86::BI__builtin_ia32_vpshrdvq256:
13466 case X86::BI__builtin_ia32_vpshrdvq512:
13467 case X86::BI__builtin_ia32_vpshrdvw128:
13468 case X86::BI__builtin_ia32_vpshrdvw256:
13469 case X86::BI__builtin_ia32_vpshrdvw512:
13470 // Ops 0 and 1 are swapped.
13471 return EmitX86FunnelShift(*this, Ops[1], Ops[0], Ops[2], true);
13474 case X86::BI__builtin_ia32_pswapdsf:
13475 case X86::BI__builtin_ia32_pswapdsi: {
13476 llvm::Type *MMXTy = llvm::Type::getX86_MMXTy(getLLVMContext());
13477 Ops[0] = Builder.CreateBitCast(Ops[0], MMXTy, "cast");
13478 llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_3dnowa_pswapd);
13479 return Builder.CreateCall(F, Ops, "pswapd");
13481 case X86::BI__builtin_ia32_rdrand16_step:
13482 case X86::BI__builtin_ia32_rdrand32_step:
13483 case X86::BI__builtin_ia32_rdrand64_step:
13484 case X86::BI__builtin_ia32_rdseed16_step:
13485 case X86::BI__builtin_ia32_rdseed32_step:
13486 case X86::BI__builtin_ia32_rdseed64_step: {
13488 switch (BuiltinID) {
13489 default: llvm_unreachable("Unsupported intrinsic!");
13490 case X86::BI__builtin_ia32_rdrand16_step:
13491 ID = Intrinsic::x86_rdrand_16;
13493 case X86::BI__builtin_ia32_rdrand32_step:
13494 ID = Intrinsic::x86_rdrand_32;
13496 case X86::BI__builtin_ia32_rdrand64_step:
13497 ID = Intrinsic::x86_rdrand_64;
13499 case X86::BI__builtin_ia32_rdseed16_step:
13500 ID = Intrinsic::x86_rdseed_16;
13502 case X86::BI__builtin_ia32_rdseed32_step:
13503 ID = Intrinsic::x86_rdseed_32;
13505 case X86::BI__builtin_ia32_rdseed64_step:
13506 ID = Intrinsic::x86_rdseed_64;
13510 Value *Call = Builder.CreateCall(CGM.getIntrinsic(ID));
13511 Builder.CreateDefaultAlignedStore(Builder.CreateExtractValue(Call, 0),
13513 return Builder.CreateExtractValue(Call, 1);
13515 case X86::BI__builtin_ia32_addcarryx_u32:
13516 case X86::BI__builtin_ia32_addcarryx_u64:
13517 case X86::BI__builtin_ia32_subborrow_u32:
13518 case X86::BI__builtin_ia32_subborrow_u64: {
13520 switch (BuiltinID) {
13521 default: llvm_unreachable("Unsupported intrinsic!");
13522 case X86::BI__builtin_ia32_addcarryx_u32:
13523 IID = Intrinsic::x86_addcarry_32;
13525 case X86::BI__builtin_ia32_addcarryx_u64:
13526 IID = Intrinsic::x86_addcarry_64;
13528 case X86::BI__builtin_ia32_subborrow_u32:
13529 IID = Intrinsic::x86_subborrow_32;
13531 case X86::BI__builtin_ia32_subborrow_u64:
13532 IID = Intrinsic::x86_subborrow_64;
13536 Value *Call = Builder.CreateCall(CGM.getIntrinsic(IID),
13537 { Ops[0], Ops[1], Ops[2] });
13538 Builder.CreateDefaultAlignedStore(Builder.CreateExtractValue(Call, 1),
13540 return Builder.CreateExtractValue(Call, 0);
13543 case X86::BI__builtin_ia32_fpclassps128_mask:
13544 case X86::BI__builtin_ia32_fpclassps256_mask:
13545 case X86::BI__builtin_ia32_fpclassps512_mask:
13546 case X86::BI__builtin_ia32_fpclasspd128_mask:
13547 case X86::BI__builtin_ia32_fpclasspd256_mask:
13548 case X86::BI__builtin_ia32_fpclasspd512_mask: {
13550 cast<llvm::VectorType>(Ops[0]->getType())->getNumElements();
13551 Value *MaskIn = Ops[2];
13552 Ops.erase(&Ops[2]);
13555 switch (BuiltinID) {
13556 default: llvm_unreachable("Unsupported intrinsic!");
13557 case X86::BI__builtin_ia32_fpclassps128_mask:
13558 ID = Intrinsic::x86_avx512_fpclass_ps_128;
13560 case X86::BI__builtin_ia32_fpclassps256_mask:
13561 ID = Intrinsic::x86_avx512_fpclass_ps_256;
13563 case X86::BI__builtin_ia32_fpclassps512_mask:
13564 ID = Intrinsic::x86_avx512_fpclass_ps_512;
13566 case X86::BI__builtin_ia32_fpclasspd128_mask:
13567 ID = Intrinsic::x86_avx512_fpclass_pd_128;
13569 case X86::BI__builtin_ia32_fpclasspd256_mask:
13570 ID = Intrinsic::x86_avx512_fpclass_pd_256;
13572 case X86::BI__builtin_ia32_fpclasspd512_mask:
13573 ID = Intrinsic::x86_avx512_fpclass_pd_512;
13577 Value *Fpclass = Builder.CreateCall(CGM.getIntrinsic(ID), Ops);
13578 return EmitX86MaskedCompareResult(*this, Fpclass, NumElts, MaskIn);
13581 case X86::BI__builtin_ia32_vp2intersect_q_512:
13582 case X86::BI__builtin_ia32_vp2intersect_q_256:
13583 case X86::BI__builtin_ia32_vp2intersect_q_128:
13584 case X86::BI__builtin_ia32_vp2intersect_d_512:
13585 case X86::BI__builtin_ia32_vp2intersect_d_256:
13586 case X86::BI__builtin_ia32_vp2intersect_d_128: {
13588 cast<llvm::VectorType>(Ops[0]->getType())->getNumElements();
13591 switch (BuiltinID) {
13592 default: llvm_unreachable("Unsupported intrinsic!");
13593 case X86::BI__builtin_ia32_vp2intersect_q_512:
13594 ID = Intrinsic::x86_avx512_vp2intersect_q_512;
13596 case X86::BI__builtin_ia32_vp2intersect_q_256:
13597 ID = Intrinsic::x86_avx512_vp2intersect_q_256;
13599 case X86::BI__builtin_ia32_vp2intersect_q_128:
13600 ID = Intrinsic::x86_avx512_vp2intersect_q_128;
13602 case X86::BI__builtin_ia32_vp2intersect_d_512:
13603 ID = Intrinsic::x86_avx512_vp2intersect_d_512;
13605 case X86::BI__builtin_ia32_vp2intersect_d_256:
13606 ID = Intrinsic::x86_avx512_vp2intersect_d_256;
13608 case X86::BI__builtin_ia32_vp2intersect_d_128:
13609 ID = Intrinsic::x86_avx512_vp2intersect_d_128;
13613 Value *Call = Builder.CreateCall(CGM.getIntrinsic(ID), {Ops[0], Ops[1]});
13614 Value *Result = Builder.CreateExtractValue(Call, 0);
13615 Result = EmitX86MaskedCompareResult(*this, Result, NumElts, nullptr);
13616 Builder.CreateDefaultAlignedStore(Result, Ops[2]);
13618 Result = Builder.CreateExtractValue(Call, 1);
13619 Result = EmitX86MaskedCompareResult(*this, Result, NumElts, nullptr);
13620 return Builder.CreateDefaultAlignedStore(Result, Ops[3]);
13623 case X86::BI__builtin_ia32_vpmultishiftqb128:
13624 case X86::BI__builtin_ia32_vpmultishiftqb256:
13625 case X86::BI__builtin_ia32_vpmultishiftqb512: {
13627 switch (BuiltinID) {
13628 default: llvm_unreachable("Unsupported intrinsic!");
13629 case X86::BI__builtin_ia32_vpmultishiftqb128:
13630 ID = Intrinsic::x86_avx512_pmultishift_qb_128;
13632 case X86::BI__builtin_ia32_vpmultishiftqb256:
13633 ID = Intrinsic::x86_avx512_pmultishift_qb_256;
13635 case X86::BI__builtin_ia32_vpmultishiftqb512:
13636 ID = Intrinsic::x86_avx512_pmultishift_qb_512;
13640 return Builder.CreateCall(CGM.getIntrinsic(ID), Ops);
13643 case X86::BI__builtin_ia32_vpshufbitqmb128_mask:
13644 case X86::BI__builtin_ia32_vpshufbitqmb256_mask:
13645 case X86::BI__builtin_ia32_vpshufbitqmb512_mask: {
13647 cast<llvm::VectorType>(Ops[0]->getType())->getNumElements();
13648 Value *MaskIn = Ops[2];
13649 Ops.erase(&Ops[2]);
13652 switch (BuiltinID) {
13653 default: llvm_unreachable("Unsupported intrinsic!");
13654 case X86::BI__builtin_ia32_vpshufbitqmb128_mask:
13655 ID = Intrinsic::x86_avx512_vpshufbitqmb_128;
13657 case X86::BI__builtin_ia32_vpshufbitqmb256_mask:
13658 ID = Intrinsic::x86_avx512_vpshufbitqmb_256;
13660 case X86::BI__builtin_ia32_vpshufbitqmb512_mask:
13661 ID = Intrinsic::x86_avx512_vpshufbitqmb_512;
13665 Value *Shufbit = Builder.CreateCall(CGM.getIntrinsic(ID), Ops);
13666 return EmitX86MaskedCompareResult(*this, Shufbit, NumElts, MaskIn);
13669 // packed comparison intrinsics
13670 case X86::BI__builtin_ia32_cmpeqps:
13671 case X86::BI__builtin_ia32_cmpeqpd:
13672 return getVectorFCmpIR(CmpInst::FCMP_OEQ, /*IsSignaling*/false);
13673 case X86::BI__builtin_ia32_cmpltps:
13674 case X86::BI__builtin_ia32_cmpltpd:
13675 return getVectorFCmpIR(CmpInst::FCMP_OLT, /*IsSignaling*/true);
13676 case X86::BI__builtin_ia32_cmpleps:
13677 case X86::BI__builtin_ia32_cmplepd:
13678 return getVectorFCmpIR(CmpInst::FCMP_OLE, /*IsSignaling*/true);
13679 case X86::BI__builtin_ia32_cmpunordps:
13680 case X86::BI__builtin_ia32_cmpunordpd:
13681 return getVectorFCmpIR(CmpInst::FCMP_UNO, /*IsSignaling*/false);
13682 case X86::BI__builtin_ia32_cmpneqps:
13683 case X86::BI__builtin_ia32_cmpneqpd:
13684 return getVectorFCmpIR(CmpInst::FCMP_UNE, /*IsSignaling*/false);
13685 case X86::BI__builtin_ia32_cmpnltps:
13686 case X86::BI__builtin_ia32_cmpnltpd:
13687 return getVectorFCmpIR(CmpInst::FCMP_UGE, /*IsSignaling*/true);
13688 case X86::BI__builtin_ia32_cmpnleps:
13689 case X86::BI__builtin_ia32_cmpnlepd:
13690 return getVectorFCmpIR(CmpInst::FCMP_UGT, /*IsSignaling*/true);
13691 case X86::BI__builtin_ia32_cmpordps:
13692 case X86::BI__builtin_ia32_cmpordpd:
13693 return getVectorFCmpIR(CmpInst::FCMP_ORD, /*IsSignaling*/false);
13694 case X86::BI__builtin_ia32_cmpps:
13695 case X86::BI__builtin_ia32_cmpps256:
13696 case X86::BI__builtin_ia32_cmppd:
13697 case X86::BI__builtin_ia32_cmppd256:
13698 case X86::BI__builtin_ia32_cmpps128_mask:
13699 case X86::BI__builtin_ia32_cmpps256_mask:
13700 case X86::BI__builtin_ia32_cmpps512_mask:
13701 case X86::BI__builtin_ia32_cmppd128_mask:
13702 case X86::BI__builtin_ia32_cmppd256_mask:
13703 case X86::BI__builtin_ia32_cmppd512_mask: {
13704 // Lowering vector comparisons to fcmp instructions, while
13705 // ignoring signalling behaviour requested
13706 // ignoring rounding mode requested
13707 // This is is only possible as long as FENV_ACCESS is not implemented.
13708 // See also: https://reviews.llvm.org/D45616
13710 // The third argument is the comparison condition, and integer in the
13712 unsigned CC = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0x1f;
13714 // Lowering to IR fcmp instruction.
13715 // Ignoring requested signaling behaviour,
13716 // e.g. both _CMP_GT_OS & _CMP_GT_OQ are translated to FCMP_OGT.
13717 FCmpInst::Predicate Pred;
13719 // Predicates for 16-31 repeat the 0-15 predicates. Only the signalling
13720 // behavior is inverted. We'll handle that after the switch.
13721 switch (CC & 0xf) {
13722 case 0x00: Pred = FCmpInst::FCMP_OEQ; IsSignaling = false; break;
13723 case 0x01: Pred = FCmpInst::FCMP_OLT; IsSignaling = true; break;
13724 case 0x02: Pred = FCmpInst::FCMP_OLE; IsSignaling = true; break;
13725 case 0x03: Pred = FCmpInst::FCMP_UNO; IsSignaling = false; break;
13726 case 0x04: Pred = FCmpInst::FCMP_UNE; IsSignaling = false; break;
13727 case 0x05: Pred = FCmpInst::FCMP_UGE; IsSignaling = true; break;
13728 case 0x06: Pred = FCmpInst::FCMP_UGT; IsSignaling = true; break;
13729 case 0x07: Pred = FCmpInst::FCMP_ORD; IsSignaling = false; break;
13730 case 0x08: Pred = FCmpInst::FCMP_UEQ; IsSignaling = false; break;
13731 case 0x09: Pred = FCmpInst::FCMP_ULT; IsSignaling = true; break;
13732 case 0x0a: Pred = FCmpInst::FCMP_ULE; IsSignaling = true; break;
13733 case 0x0b: Pred = FCmpInst::FCMP_FALSE; IsSignaling = false; break;
13734 case 0x0c: Pred = FCmpInst::FCMP_ONE; IsSignaling = false; break;
13735 case 0x0d: Pred = FCmpInst::FCMP_OGE; IsSignaling = true; break;
13736 case 0x0e: Pred = FCmpInst::FCMP_OGT; IsSignaling = true; break;
13737 case 0x0f: Pred = FCmpInst::FCMP_TRUE; IsSignaling = false; break;
13738 default: llvm_unreachable("Unhandled CC");
13741 // Invert the signalling behavior for 16-31.
13743 IsSignaling = !IsSignaling;
13745 // If the predicate is true or false and we're using constrained intrinsics,
13746 // we don't have a compare intrinsic we can use. Just use the legacy X86
13747 // specific intrinsic.
13748 if ((Pred == FCmpInst::FCMP_TRUE || Pred == FCmpInst::FCMP_FALSE) &&
13749 Builder.getIsFPConstrained()) {
13752 switch (BuiltinID) {
13753 default: llvm_unreachable("Unexpected builtin");
13754 case X86::BI__builtin_ia32_cmpps:
13755 IID = Intrinsic::x86_sse_cmp_ps;
13757 case X86::BI__builtin_ia32_cmpps256:
13758 IID = Intrinsic::x86_avx_cmp_ps_256;
13760 case X86::BI__builtin_ia32_cmppd:
13761 IID = Intrinsic::x86_sse2_cmp_pd;
13763 case X86::BI__builtin_ia32_cmppd256:
13764 IID = Intrinsic::x86_avx_cmp_pd_256;
13766 case X86::BI__builtin_ia32_cmpps512_mask:
13767 IID = Intrinsic::x86_avx512_cmp_ps_512;
13769 case X86::BI__builtin_ia32_cmppd512_mask:
13770 IID = Intrinsic::x86_avx512_cmp_pd_512;
13772 case X86::BI__builtin_ia32_cmpps128_mask:
13773 IID = Intrinsic::x86_avx512_cmp_ps_128;
13775 case X86::BI__builtin_ia32_cmpps256_mask:
13776 IID = Intrinsic::x86_avx512_cmp_ps_256;
13778 case X86::BI__builtin_ia32_cmppd128_mask:
13779 IID = Intrinsic::x86_avx512_cmp_pd_128;
13781 case X86::BI__builtin_ia32_cmppd256_mask:
13782 IID = Intrinsic::x86_avx512_cmp_pd_256;
13786 Function *Intr = CGM.getIntrinsic(IID);
13787 if (cast<llvm::VectorType>(Intr->getReturnType())
13789 ->isIntegerTy(1)) {
13791 cast<llvm::VectorType>(Ops[0]->getType())->getNumElements();
13792 Value *MaskIn = Ops[3];
13793 Ops.erase(&Ops[3]);
13795 Value *Cmp = Builder.CreateCall(Intr, Ops);
13796 return EmitX86MaskedCompareResult(*this, Cmp, NumElts, MaskIn);
13799 return Builder.CreateCall(Intr, Ops);
13802 // Builtins without the _mask suffix return a vector of integers
13803 // of the same width as the input vectors
13804 switch (BuiltinID) {
13805 case X86::BI__builtin_ia32_cmpps512_mask:
13806 case X86::BI__builtin_ia32_cmppd512_mask:
13807 case X86::BI__builtin_ia32_cmpps128_mask:
13808 case X86::BI__builtin_ia32_cmpps256_mask:
13809 case X86::BI__builtin_ia32_cmppd128_mask:
13810 case X86::BI__builtin_ia32_cmppd256_mask: {
13811 // FIXME: Support SAE.
13813 cast<llvm::VectorType>(Ops[0]->getType())->getNumElements();
13816 Cmp = Builder.CreateFCmpS(Pred, Ops[0], Ops[1]);
13818 Cmp = Builder.CreateFCmp(Pred, Ops[0], Ops[1]);
13819 return EmitX86MaskedCompareResult(*this, Cmp, NumElts, Ops[3]);
13822 return getVectorFCmpIR(Pred, IsSignaling);
13826 // SSE scalar comparison intrinsics
13827 case X86::BI__builtin_ia32_cmpeqss:
13828 return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 0);
13829 case X86::BI__builtin_ia32_cmpltss:
13830 return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 1);
13831 case X86::BI__builtin_ia32_cmpless:
13832 return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 2);
13833 case X86::BI__builtin_ia32_cmpunordss:
13834 return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 3);
13835 case X86::BI__builtin_ia32_cmpneqss:
13836 return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 4);
13837 case X86::BI__builtin_ia32_cmpnltss:
13838 return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 5);
13839 case X86::BI__builtin_ia32_cmpnless:
13840 return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 6);
13841 case X86::BI__builtin_ia32_cmpordss:
13842 return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 7);
13843 case X86::BI__builtin_ia32_cmpeqsd:
13844 return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 0);
13845 case X86::BI__builtin_ia32_cmpltsd:
13846 return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 1);
13847 case X86::BI__builtin_ia32_cmplesd:
13848 return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 2);
13849 case X86::BI__builtin_ia32_cmpunordsd:
13850 return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 3);
13851 case X86::BI__builtin_ia32_cmpneqsd:
13852 return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 4);
13853 case X86::BI__builtin_ia32_cmpnltsd:
13854 return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 5);
13855 case X86::BI__builtin_ia32_cmpnlesd:
13856 return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 6);
13857 case X86::BI__builtin_ia32_cmpordsd:
13858 return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 7);
13860 // f16c half2float intrinsics
13861 case X86::BI__builtin_ia32_vcvtph2ps:
13862 case X86::BI__builtin_ia32_vcvtph2ps256:
13863 case X86::BI__builtin_ia32_vcvtph2ps_mask:
13864 case X86::BI__builtin_ia32_vcvtph2ps256_mask:
13865 case X86::BI__builtin_ia32_vcvtph2ps512_mask:
13866 return EmitX86CvtF16ToFloatExpr(*this, Ops, ConvertType(E->getType()));
13868 // AVX512 bf16 intrinsics
13869 case X86::BI__builtin_ia32_cvtneps2bf16_128_mask: {
13870 Ops[2] = getMaskVecValue(
13872 cast<llvm::VectorType>(Ops[0]->getType())->getNumElements());
13873 Intrinsic::ID IID = Intrinsic::x86_avx512bf16_mask_cvtneps2bf16_128;
13874 return Builder.CreateCall(CGM.getIntrinsic(IID), Ops);
13876 case X86::BI__builtin_ia32_cvtsbf162ss_32:
13877 return EmitX86CvtBF16ToFloatExpr(*this, E, Ops);
13879 case X86::BI__builtin_ia32_cvtneps2bf16_256_mask:
13880 case X86::BI__builtin_ia32_cvtneps2bf16_512_mask: {
13882 switch (BuiltinID) {
13883 default: llvm_unreachable("Unsupported intrinsic!");
13884 case X86::BI__builtin_ia32_cvtneps2bf16_256_mask:
13885 IID = Intrinsic::x86_avx512bf16_cvtneps2bf16_256;
13887 case X86::BI__builtin_ia32_cvtneps2bf16_512_mask:
13888 IID = Intrinsic::x86_avx512bf16_cvtneps2bf16_512;
13891 Value *Res = Builder.CreateCall(CGM.getIntrinsic(IID), Ops[0]);
13892 return EmitX86Select(*this, Ops[2], Res, Ops[1]);
13895 case X86::BI__emul:
13896 case X86::BI__emulu: {
13897 llvm::Type *Int64Ty = llvm::IntegerType::get(getLLVMContext(), 64);
13898 bool isSigned = (BuiltinID == X86::BI__emul);
13899 Value *LHS = Builder.CreateIntCast(Ops[0], Int64Ty, isSigned);
13900 Value *RHS = Builder.CreateIntCast(Ops[1], Int64Ty, isSigned);
13901 return Builder.CreateMul(LHS, RHS, "", !isSigned, isSigned);
13903 case X86::BI__mulh:
13904 case X86::BI__umulh:
13905 case X86::BI_mul128:
13906 case X86::BI_umul128: {
13907 llvm::Type *ResType = ConvertType(E->getType());
13908 llvm::Type *Int128Ty = llvm::IntegerType::get(getLLVMContext(), 128);
13910 bool IsSigned = (BuiltinID == X86::BI__mulh || BuiltinID == X86::BI_mul128);
13911 Value *LHS = Builder.CreateIntCast(Ops[0], Int128Ty, IsSigned);
13912 Value *RHS = Builder.CreateIntCast(Ops[1], Int128Ty, IsSigned);
13914 Value *MulResult, *HigherBits;
13916 MulResult = Builder.CreateNSWMul(LHS, RHS);
13917 HigherBits = Builder.CreateAShr(MulResult, 64);
13919 MulResult = Builder.CreateNUWMul(LHS, RHS);
13920 HigherBits = Builder.CreateLShr(MulResult, 64);
13922 HigherBits = Builder.CreateIntCast(HigherBits, ResType, IsSigned);
13924 if (BuiltinID == X86::BI__mulh || BuiltinID == X86::BI__umulh)
13927 Address HighBitsAddress = EmitPointerWithAlignment(E->getArg(2));
13928 Builder.CreateStore(HigherBits, HighBitsAddress);
13929 return Builder.CreateIntCast(MulResult, ResType, IsSigned);
13932 case X86::BI__faststorefence: {
13933 return Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent,
13934 llvm::SyncScope::System);
13936 case X86::BI__shiftleft128:
13937 case X86::BI__shiftright128: {
13938 // FIXME: Once fshl/fshr no longer add an unneeded and and cmov, do this:
13939 // llvm::Function *F = CGM.getIntrinsic(
13940 // BuiltinID == X86::BI__shiftleft128 ? Intrinsic::fshl : Intrinsic::fshr,
13942 // Ops[2] = Builder.CreateZExt(Ops[2], Int64Ty);
13943 // return Builder.CreateCall(F, Ops);
13944 llvm::Type *Int128Ty = Builder.getInt128Ty();
13945 Value *HighPart128 =
13946 Builder.CreateShl(Builder.CreateZExt(Ops[1], Int128Ty), 64);
13947 Value *LowPart128 = Builder.CreateZExt(Ops[0], Int128Ty);
13948 Value *Val = Builder.CreateOr(HighPart128, LowPart128);
13949 Value *Amt = Builder.CreateAnd(Builder.CreateZExt(Ops[2], Int128Ty),
13950 llvm::ConstantInt::get(Int128Ty, 0x3f));
13952 if (BuiltinID == X86::BI__shiftleft128)
13953 Res = Builder.CreateLShr(Builder.CreateShl(Val, Amt), 64);
13955 Res = Builder.CreateLShr(Val, Amt);
13956 return Builder.CreateTrunc(Res, Int64Ty);
13958 case X86::BI_ReadWriteBarrier:
13959 case X86::BI_ReadBarrier:
13960 case X86::BI_WriteBarrier: {
13961 return Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent,
13962 llvm::SyncScope::SingleThread);
13964 case X86::BI_BitScanForward:
13965 case X86::BI_BitScanForward64:
13966 return EmitMSVCBuiltinExpr(MSVCIntrin::_BitScanForward, E);
13967 case X86::BI_BitScanReverse:
13968 case X86::BI_BitScanReverse64:
13969 return EmitMSVCBuiltinExpr(MSVCIntrin::_BitScanReverse, E);
13971 case X86::BI_InterlockedAnd64:
13972 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedAnd, E);
13973 case X86::BI_InterlockedExchange64:
13974 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange, E);
13975 case X86::BI_InterlockedExchangeAdd64:
13976 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeAdd, E);
13977 case X86::BI_InterlockedExchangeSub64:
13978 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeSub, E);
13979 case X86::BI_InterlockedOr64:
13980 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr, E);
13981 case X86::BI_InterlockedXor64:
13982 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor, E);
13983 case X86::BI_InterlockedDecrement64:
13984 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedDecrement, E);
13985 case X86::BI_InterlockedIncrement64:
13986 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement, E);
13987 case X86::BI_InterlockedCompareExchange128: {
13988 // InterlockedCompareExchange128 doesn't directly refer to 128bit ints,
13989 // instead it takes pointers to 64bit ints for Destination and
13990 // ComparandResult, and exchange is taken as two 64bit ints (high & low).
13991 // The previous value is written to ComparandResult, and success is
13994 llvm::Type *Int128Ty = Builder.getInt128Ty();
13995 llvm::Type *Int128PtrTy = Int128Ty->getPointerTo();
13997 Value *Destination =
13998 Builder.CreateBitCast(Ops[0], Int128PtrTy);
13999 Value *ExchangeHigh128 = Builder.CreateZExt(Ops[1], Int128Ty);
14000 Value *ExchangeLow128 = Builder.CreateZExt(Ops[2], Int128Ty);
14001 Address ComparandResult(Builder.CreateBitCast(Ops[3], Int128PtrTy),
14002 getContext().toCharUnitsFromBits(128));
14004 Value *Exchange = Builder.CreateOr(
14005 Builder.CreateShl(ExchangeHigh128, 64, "", false, false),
14008 Value *Comparand = Builder.CreateLoad(ComparandResult);
14010 AtomicCmpXchgInst *CXI =
14011 Builder.CreateAtomicCmpXchg(Destination, Comparand, Exchange,
14012 AtomicOrdering::SequentiallyConsistent,
14013 AtomicOrdering::SequentiallyConsistent);
14014 CXI->setVolatile(true);
14016 // Write the result back to the inout pointer.
14017 Builder.CreateStore(Builder.CreateExtractValue(CXI, 0), ComparandResult);
14019 // Get the success boolean and zero extend it to i8.
14020 Value *Success = Builder.CreateExtractValue(CXI, 1);
14021 return Builder.CreateZExt(Success, ConvertType(E->getType()));
14024 case X86::BI_AddressOfReturnAddress: {
14026 CGM.getIntrinsic(Intrinsic::addressofreturnaddress, AllocaInt8PtrTy);
14027 return Builder.CreateCall(F);
14029 case X86::BI__stosb: {
14030 // We treat __stosb as a volatile memset - it may not generate "rep stosb"
14031 // instruction, but it will create a memset that won't be optimized away.
14032 return Builder.CreateMemSet(Ops[0], Ops[1], Ops[2], Align(1), true);
14035 // llvm.trap makes a ud2a instruction on x86.
14036 return EmitTrapCall(Intrinsic::trap);
14037 case X86::BI__int2c: {
14038 // This syscall signals a driver assertion failure in x86 NT kernels.
14039 llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false);
14040 llvm::InlineAsm *IA =
14041 llvm::InlineAsm::get(FTy, "int $$0x2c", "", /*hasSideEffects=*/true);
14042 llvm::AttributeList NoReturnAttr = llvm::AttributeList::get(
14043 getLLVMContext(), llvm::AttributeList::FunctionIndex,
14044 llvm::Attribute::NoReturn);
14045 llvm::CallInst *CI = Builder.CreateCall(IA);
14046 CI->setAttributes(NoReturnAttr);
14049 case X86::BI__readfsbyte:
14050 case X86::BI__readfsword:
14051 case X86::BI__readfsdword:
14052 case X86::BI__readfsqword: {
14053 llvm::Type *IntTy = ConvertType(E->getType());
14055 Builder.CreateIntToPtr(Ops[0], llvm::PointerType::get(IntTy, 257));
14056 LoadInst *Load = Builder.CreateAlignedLoad(
14057 IntTy, Ptr, getContext().getTypeAlignInChars(E->getType()));
14058 Load->setVolatile(true);
14061 case X86::BI__readgsbyte:
14062 case X86::BI__readgsword:
14063 case X86::BI__readgsdword:
14064 case X86::BI__readgsqword: {
14065 llvm::Type *IntTy = ConvertType(E->getType());
14067 Builder.CreateIntToPtr(Ops[0], llvm::PointerType::get(IntTy, 256));
14068 LoadInst *Load = Builder.CreateAlignedLoad(
14069 IntTy, Ptr, getContext().getTypeAlignInChars(E->getType()));
14070 Load->setVolatile(true);
14073 case X86::BI__builtin_ia32_paddsb512:
14074 case X86::BI__builtin_ia32_paddsw512:
14075 case X86::BI__builtin_ia32_paddsb256:
14076 case X86::BI__builtin_ia32_paddsw256:
14077 case X86::BI__builtin_ia32_paddsb128:
14078 case X86::BI__builtin_ia32_paddsw128:
14079 return EmitX86AddSubSatExpr(*this, Ops, true, true);
14080 case X86::BI__builtin_ia32_paddusb512:
14081 case X86::BI__builtin_ia32_paddusw512:
14082 case X86::BI__builtin_ia32_paddusb256:
14083 case X86::BI__builtin_ia32_paddusw256:
14084 case X86::BI__builtin_ia32_paddusb128:
14085 case X86::BI__builtin_ia32_paddusw128:
14086 return EmitX86AddSubSatExpr(*this, Ops, false, true);
14087 case X86::BI__builtin_ia32_psubsb512:
14088 case X86::BI__builtin_ia32_psubsw512:
14089 case X86::BI__builtin_ia32_psubsb256:
14090 case X86::BI__builtin_ia32_psubsw256:
14091 case X86::BI__builtin_ia32_psubsb128:
14092 case X86::BI__builtin_ia32_psubsw128:
14093 return EmitX86AddSubSatExpr(*this, Ops, true, false);
14094 case X86::BI__builtin_ia32_psubusb512:
14095 case X86::BI__builtin_ia32_psubusw512:
14096 case X86::BI__builtin_ia32_psubusb256:
14097 case X86::BI__builtin_ia32_psubusw256:
14098 case X86::BI__builtin_ia32_psubusb128:
14099 case X86::BI__builtin_ia32_psubusw128:
14100 return EmitX86AddSubSatExpr(*this, Ops, false, false);
14104 Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
14105 const CallExpr *E) {
14106 SmallVector<Value*, 4> Ops;
14108 for (unsigned i = 0, e = E->getNumArgs(); i != e; i++)
14109 Ops.push_back(EmitScalarExpr(E->getArg(i)));
14111 Intrinsic::ID ID = Intrinsic::not_intrinsic;
14113 switch (BuiltinID) {
14114 default: return nullptr;
14116 // __builtin_ppc_get_timebase is GCC 4.8+'s PowerPC-specific name for what we
14117 // call __builtin_readcyclecounter.
14118 case PPC::BI__builtin_ppc_get_timebase:
14119 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::readcyclecounter));
14121 // vec_ld, vec_xl_be, vec_lvsl, vec_lvsr
14122 case PPC::BI__builtin_altivec_lvx:
14123 case PPC::BI__builtin_altivec_lvxl:
14124 case PPC::BI__builtin_altivec_lvebx:
14125 case PPC::BI__builtin_altivec_lvehx:
14126 case PPC::BI__builtin_altivec_lvewx:
14127 case PPC::BI__builtin_altivec_lvsl:
14128 case PPC::BI__builtin_altivec_lvsr:
14129 case PPC::BI__builtin_vsx_lxvd2x:
14130 case PPC::BI__builtin_vsx_lxvw4x:
14131 case PPC::BI__builtin_vsx_lxvd2x_be:
14132 case PPC::BI__builtin_vsx_lxvw4x_be:
14133 case PPC::BI__builtin_vsx_lxvl:
14134 case PPC::BI__builtin_vsx_lxvll:
14136 if(BuiltinID == PPC::BI__builtin_vsx_lxvl ||
14137 BuiltinID == PPC::BI__builtin_vsx_lxvll){
14138 Ops[0] = Builder.CreateBitCast(Ops[0], Int8PtrTy);
14140 Ops[1] = Builder.CreateBitCast(Ops[1], Int8PtrTy);
14141 Ops[0] = Builder.CreateGEP(Ops[1], Ops[0]);
14145 switch (BuiltinID) {
14146 default: llvm_unreachable("Unsupported ld/lvsl/lvsr intrinsic!");
14147 case PPC::BI__builtin_altivec_lvx:
14148 ID = Intrinsic::ppc_altivec_lvx;
14150 case PPC::BI__builtin_altivec_lvxl:
14151 ID = Intrinsic::ppc_altivec_lvxl;
14153 case PPC::BI__builtin_altivec_lvebx:
14154 ID = Intrinsic::ppc_altivec_lvebx;
14156 case PPC::BI__builtin_altivec_lvehx:
14157 ID = Intrinsic::ppc_altivec_lvehx;
14159 case PPC::BI__builtin_altivec_lvewx:
14160 ID = Intrinsic::ppc_altivec_lvewx;
14162 case PPC::BI__builtin_altivec_lvsl:
14163 ID = Intrinsic::ppc_altivec_lvsl;
14165 case PPC::BI__builtin_altivec_lvsr:
14166 ID = Intrinsic::ppc_altivec_lvsr;
14168 case PPC::BI__builtin_vsx_lxvd2x:
14169 ID = Intrinsic::ppc_vsx_lxvd2x;
14171 case PPC::BI__builtin_vsx_lxvw4x:
14172 ID = Intrinsic::ppc_vsx_lxvw4x;
14174 case PPC::BI__builtin_vsx_lxvd2x_be:
14175 ID = Intrinsic::ppc_vsx_lxvd2x_be;
14177 case PPC::BI__builtin_vsx_lxvw4x_be:
14178 ID = Intrinsic::ppc_vsx_lxvw4x_be;
14180 case PPC::BI__builtin_vsx_lxvl:
14181 ID = Intrinsic::ppc_vsx_lxvl;
14183 case PPC::BI__builtin_vsx_lxvll:
14184 ID = Intrinsic::ppc_vsx_lxvll;
14187 llvm::Function *F = CGM.getIntrinsic(ID);
14188 return Builder.CreateCall(F, Ops, "");
14191 // vec_st, vec_xst_be
14192 case PPC::BI__builtin_altivec_stvx:
14193 case PPC::BI__builtin_altivec_stvxl:
14194 case PPC::BI__builtin_altivec_stvebx:
14195 case PPC::BI__builtin_altivec_stvehx:
14196 case PPC::BI__builtin_altivec_stvewx:
14197 case PPC::BI__builtin_vsx_stxvd2x:
14198 case PPC::BI__builtin_vsx_stxvw4x:
14199 case PPC::BI__builtin_vsx_stxvd2x_be:
14200 case PPC::BI__builtin_vsx_stxvw4x_be:
14201 case PPC::BI__builtin_vsx_stxvl:
14202 case PPC::BI__builtin_vsx_stxvll:
14204 if(BuiltinID == PPC::BI__builtin_vsx_stxvl ||
14205 BuiltinID == PPC::BI__builtin_vsx_stxvll ){
14206 Ops[1] = Builder.CreateBitCast(Ops[1], Int8PtrTy);
14208 Ops[2] = Builder.CreateBitCast(Ops[2], Int8PtrTy);
14209 Ops[1] = Builder.CreateGEP(Ops[2], Ops[1]);
14213 switch (BuiltinID) {
14214 default: llvm_unreachable("Unsupported st intrinsic!");
14215 case PPC::BI__builtin_altivec_stvx:
14216 ID = Intrinsic::ppc_altivec_stvx;
14218 case PPC::BI__builtin_altivec_stvxl:
14219 ID = Intrinsic::ppc_altivec_stvxl;
14221 case PPC::BI__builtin_altivec_stvebx:
14222 ID = Intrinsic::ppc_altivec_stvebx;
14224 case PPC::BI__builtin_altivec_stvehx:
14225 ID = Intrinsic::ppc_altivec_stvehx;
14227 case PPC::BI__builtin_altivec_stvewx:
14228 ID = Intrinsic::ppc_altivec_stvewx;
14230 case PPC::BI__builtin_vsx_stxvd2x:
14231 ID = Intrinsic::ppc_vsx_stxvd2x;
14233 case PPC::BI__builtin_vsx_stxvw4x:
14234 ID = Intrinsic::ppc_vsx_stxvw4x;
14236 case PPC::BI__builtin_vsx_stxvd2x_be:
14237 ID = Intrinsic::ppc_vsx_stxvd2x_be;
14239 case PPC::BI__builtin_vsx_stxvw4x_be:
14240 ID = Intrinsic::ppc_vsx_stxvw4x_be;
14242 case PPC::BI__builtin_vsx_stxvl:
14243 ID = Intrinsic::ppc_vsx_stxvl;
14245 case PPC::BI__builtin_vsx_stxvll:
14246 ID = Intrinsic::ppc_vsx_stxvll;
14249 llvm::Function *F = CGM.getIntrinsic(ID);
14250 return Builder.CreateCall(F, Ops, "");
14253 case PPC::BI__builtin_vsx_xvsqrtsp:
14254 case PPC::BI__builtin_vsx_xvsqrtdp: {
14255 llvm::Type *ResultType = ConvertType(E->getType());
14256 Value *X = EmitScalarExpr(E->getArg(0));
14257 if (Builder.getIsFPConstrained()) {
14258 llvm::Function *F = CGM.getIntrinsic(
14259 Intrinsic::experimental_constrained_sqrt, ResultType);
14260 return Builder.CreateConstrainedFPCall(F, X);
14262 llvm::Function *F = CGM.getIntrinsic(Intrinsic::sqrt, ResultType);
14263 return Builder.CreateCall(F, X);
14266 // Count leading zeros
14267 case PPC::BI__builtin_altivec_vclzb:
14268 case PPC::BI__builtin_altivec_vclzh:
14269 case PPC::BI__builtin_altivec_vclzw:
14270 case PPC::BI__builtin_altivec_vclzd: {
14271 llvm::Type *ResultType = ConvertType(E->getType());
14272 Value *X = EmitScalarExpr(E->getArg(0));
14273 Value *Undef = ConstantInt::get(Builder.getInt1Ty(), false);
14274 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ResultType);
14275 return Builder.CreateCall(F, {X, Undef});
14277 case PPC::BI__builtin_altivec_vctzb:
14278 case PPC::BI__builtin_altivec_vctzh:
14279 case PPC::BI__builtin_altivec_vctzw:
14280 case PPC::BI__builtin_altivec_vctzd: {
14281 llvm::Type *ResultType = ConvertType(E->getType());
14282 Value *X = EmitScalarExpr(E->getArg(0));
14283 Value *Undef = ConstantInt::get(Builder.getInt1Ty(), false);
14284 Function *F = CGM.getIntrinsic(Intrinsic::cttz, ResultType);
14285 return Builder.CreateCall(F, {X, Undef});
14287 case PPC::BI__builtin_altivec_vpopcntb:
14288 case PPC::BI__builtin_altivec_vpopcnth:
14289 case PPC::BI__builtin_altivec_vpopcntw:
14290 case PPC::BI__builtin_altivec_vpopcntd: {
14291 llvm::Type *ResultType = ConvertType(E->getType());
14292 Value *X = EmitScalarExpr(E->getArg(0));
14293 llvm::Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ResultType);
14294 return Builder.CreateCall(F, X);
14297 case PPC::BI__builtin_vsx_xvcpsgnsp:
14298 case PPC::BI__builtin_vsx_xvcpsgndp: {
14299 llvm::Type *ResultType = ConvertType(E->getType());
14300 Value *X = EmitScalarExpr(E->getArg(0));
14301 Value *Y = EmitScalarExpr(E->getArg(1));
14302 ID = Intrinsic::copysign;
14303 llvm::Function *F = CGM.getIntrinsic(ID, ResultType);
14304 return Builder.CreateCall(F, {X, Y});
14306 // Rounding/truncation
14307 case PPC::BI__builtin_vsx_xvrspip:
14308 case PPC::BI__builtin_vsx_xvrdpip:
14309 case PPC::BI__builtin_vsx_xvrdpim:
14310 case PPC::BI__builtin_vsx_xvrspim:
14311 case PPC::BI__builtin_vsx_xvrdpi:
14312 case PPC::BI__builtin_vsx_xvrspi:
14313 case PPC::BI__builtin_vsx_xvrdpic:
14314 case PPC::BI__builtin_vsx_xvrspic:
14315 case PPC::BI__builtin_vsx_xvrdpiz:
14316 case PPC::BI__builtin_vsx_xvrspiz: {
14317 llvm::Type *ResultType = ConvertType(E->getType());
14318 Value *X = EmitScalarExpr(E->getArg(0));
14319 if (BuiltinID == PPC::BI__builtin_vsx_xvrdpim ||
14320 BuiltinID == PPC::BI__builtin_vsx_xvrspim)
14321 ID = Builder.getIsFPConstrained()
14322 ? Intrinsic::experimental_constrained_floor
14323 : Intrinsic::floor;
14324 else if (BuiltinID == PPC::BI__builtin_vsx_xvrdpi ||
14325 BuiltinID == PPC::BI__builtin_vsx_xvrspi)
14326 ID = Builder.getIsFPConstrained()
14327 ? Intrinsic::experimental_constrained_round
14328 : Intrinsic::round;
14329 else if (BuiltinID == PPC::BI__builtin_vsx_xvrdpic ||
14330 BuiltinID == PPC::BI__builtin_vsx_xvrspic)
14331 ID = Builder.getIsFPConstrained()
14332 ? Intrinsic::experimental_constrained_nearbyint
14333 : Intrinsic::nearbyint;
14334 else if (BuiltinID == PPC::BI__builtin_vsx_xvrdpip ||
14335 BuiltinID == PPC::BI__builtin_vsx_xvrspip)
14336 ID = Builder.getIsFPConstrained()
14337 ? Intrinsic::experimental_constrained_ceil
14339 else if (BuiltinID == PPC::BI__builtin_vsx_xvrdpiz ||
14340 BuiltinID == PPC::BI__builtin_vsx_xvrspiz)
14341 ID = Builder.getIsFPConstrained()
14342 ? Intrinsic::experimental_constrained_trunc
14343 : Intrinsic::trunc;
14344 llvm::Function *F = CGM.getIntrinsic(ID, ResultType);
14345 return Builder.getIsFPConstrained() ? Builder.CreateConstrainedFPCall(F, X)
14346 : Builder.CreateCall(F, X);
14350 case PPC::BI__builtin_vsx_xvabsdp:
14351 case PPC::BI__builtin_vsx_xvabssp: {
14352 llvm::Type *ResultType = ConvertType(E->getType());
14353 Value *X = EmitScalarExpr(E->getArg(0));
14354 llvm::Function *F = CGM.getIntrinsic(Intrinsic::fabs, ResultType);
14355 return Builder.CreateCall(F, X);
14359 case PPC::BI__builtin_vsx_xvmaddadp:
14360 case PPC::BI__builtin_vsx_xvmaddasp:
14361 case PPC::BI__builtin_vsx_xvnmaddadp:
14362 case PPC::BI__builtin_vsx_xvnmaddasp:
14363 case PPC::BI__builtin_vsx_xvmsubadp:
14364 case PPC::BI__builtin_vsx_xvmsubasp:
14365 case PPC::BI__builtin_vsx_xvnmsubadp:
14366 case PPC::BI__builtin_vsx_xvnmsubasp: {
14367 llvm::Type *ResultType = ConvertType(E->getType());
14368 Value *X = EmitScalarExpr(E->getArg(0));
14369 Value *Y = EmitScalarExpr(E->getArg(1));
14370 Value *Z = EmitScalarExpr(E->getArg(2));
14372 if (Builder.getIsFPConstrained())
14373 F = CGM.getIntrinsic(Intrinsic::experimental_constrained_fma, ResultType);
14375 F = CGM.getIntrinsic(Intrinsic::fma, ResultType);
14376 switch (BuiltinID) {
14377 case PPC::BI__builtin_vsx_xvmaddadp:
14378 case PPC::BI__builtin_vsx_xvmaddasp:
14379 if (Builder.getIsFPConstrained())
14380 return Builder.CreateConstrainedFPCall(F, {X, Y, Z});
14382 return Builder.CreateCall(F, {X, Y, Z});
14383 case PPC::BI__builtin_vsx_xvnmaddadp:
14384 case PPC::BI__builtin_vsx_xvnmaddasp:
14385 if (Builder.getIsFPConstrained())
14386 return Builder.CreateFNeg(
14387 Builder.CreateConstrainedFPCall(F, {X, Y, Z}), "neg");
14389 return Builder.CreateFNeg(Builder.CreateCall(F, {X, Y, Z}), "neg");
14390 case PPC::BI__builtin_vsx_xvmsubadp:
14391 case PPC::BI__builtin_vsx_xvmsubasp:
14392 if (Builder.getIsFPConstrained())
14393 return Builder.CreateConstrainedFPCall(
14394 F, {X, Y, Builder.CreateFNeg(Z, "neg")});
14396 return Builder.CreateCall(F, {X, Y, Builder.CreateFNeg(Z, "neg")});
14397 case PPC::BI__builtin_vsx_xvnmsubadp:
14398 case PPC::BI__builtin_vsx_xvnmsubasp:
14399 if (Builder.getIsFPConstrained())
14400 return Builder.CreateFNeg(
14401 Builder.CreateConstrainedFPCall(
14402 F, {X, Y, Builder.CreateFNeg(Z, "neg")}),
14405 return Builder.CreateFNeg(
14406 Builder.CreateCall(F, {X, Y, Builder.CreateFNeg(Z, "neg")}),
14409 llvm_unreachable("Unknown FMA operation");
14410 return nullptr; // Suppress no-return warning
14413 case PPC::BI__builtin_vsx_insertword: {
14414 llvm::Function *F = CGM.getIntrinsic(Intrinsic::ppc_vsx_xxinsertw);
14416 // Third argument is a compile time constant int. It must be clamped to
14417 // to the range [0, 12].
14418 ConstantInt *ArgCI = dyn_cast<ConstantInt>(Ops[2]);
14420 "Third arg to xxinsertw intrinsic must be constant integer");
14421 const int64_t MaxIndex = 12;
14422 int64_t Index = clamp(ArgCI->getSExtValue(), 0, MaxIndex);
14424 // The builtin semantics don't exactly match the xxinsertw instructions
14425 // semantics (which ppc_vsx_xxinsertw follows). The builtin extracts the
14426 // word from the first argument, and inserts it in the second argument. The
14427 // instruction extracts the word from its second input register and inserts
14428 // it into its first input register, so swap the first and second arguments.
14429 std::swap(Ops[0], Ops[1]);
14431 // Need to cast the second argument from a vector of unsigned int to a
14432 // vector of long long.
14434 Builder.CreateBitCast(Ops[1], llvm::FixedVectorType::get(Int64Ty, 2));
14436 if (getTarget().isLittleEndian()) {
14437 // Reverse the double words in the vector we will extract from.
14439 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int64Ty, 2));
14440 Ops[0] = Builder.CreateShuffleVector(Ops[0], Ops[0], ArrayRef<int>{1, 0});
14442 // Reverse the index.
14443 Index = MaxIndex - Index;
14446 // Intrinsic expects the first arg to be a vector of int.
14448 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int32Ty, 4));
14449 Ops[2] = ConstantInt::getSigned(Int32Ty, Index);
14450 return Builder.CreateCall(F, Ops);
14453 case PPC::BI__builtin_vsx_extractuword: {
14454 llvm::Function *F = CGM.getIntrinsic(Intrinsic::ppc_vsx_xxextractuw);
14456 // Intrinsic expects the first argument to be a vector of doublewords.
14458 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int64Ty, 2));
14460 // The second argument is a compile time constant int that needs to
14461 // be clamped to the range [0, 12].
14462 ConstantInt *ArgCI = dyn_cast<ConstantInt>(Ops[1]);
14464 "Second Arg to xxextractuw intrinsic must be a constant integer!");
14465 const int64_t MaxIndex = 12;
14466 int64_t Index = clamp(ArgCI->getSExtValue(), 0, MaxIndex);
14468 if (getTarget().isLittleEndian()) {
14469 // Reverse the index.
14470 Index = MaxIndex - Index;
14471 Ops[1] = ConstantInt::getSigned(Int32Ty, Index);
14473 // Emit the call, then reverse the double words of the results vector.
14474 Value *Call = Builder.CreateCall(F, Ops);
14476 Value *ShuffleCall =
14477 Builder.CreateShuffleVector(Call, Call, ArrayRef<int>{1, 0});
14478 return ShuffleCall;
14480 Ops[1] = ConstantInt::getSigned(Int32Ty, Index);
14481 return Builder.CreateCall(F, Ops);
14485 case PPC::BI__builtin_vsx_xxpermdi: {
14486 ConstantInt *ArgCI = dyn_cast<ConstantInt>(Ops[2]);
14487 assert(ArgCI && "Third arg must be constant integer!");
14489 unsigned Index = ArgCI->getZExtValue();
14491 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int64Ty, 2));
14493 Builder.CreateBitCast(Ops[1], llvm::FixedVectorType::get(Int64Ty, 2));
14495 // Account for endianness by treating this as just a shuffle. So we use the
14496 // same indices for both LE and BE in order to produce expected results in
14498 int ElemIdx0 = (Index & 2) >> 1;
14499 int ElemIdx1 = 2 + (Index & 1);
14501 int ShuffleElts[2] = {ElemIdx0, ElemIdx1};
14502 Value *ShuffleCall =
14503 Builder.CreateShuffleVector(Ops[0], Ops[1], ShuffleElts);
14504 QualType BIRetType = E->getType();
14505 auto RetTy = ConvertType(BIRetType);
14506 return Builder.CreateBitCast(ShuffleCall, RetTy);
14509 case PPC::BI__builtin_vsx_xxsldwi: {
14510 ConstantInt *ArgCI = dyn_cast<ConstantInt>(Ops[2]);
14511 assert(ArgCI && "Third argument must be a compile time constant");
14512 unsigned Index = ArgCI->getZExtValue() & 0x3;
14514 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int32Ty, 4));
14516 Builder.CreateBitCast(Ops[1], llvm::FixedVectorType::get(Int32Ty, 4));
14518 // Create a shuffle mask
14523 if (getTarget().isLittleEndian()) {
14524 // Little endian element N comes from element 8+N-Index of the
14525 // concatenated wide vector (of course, using modulo arithmetic on
14526 // the total number of elements).
14527 ElemIdx0 = (8 - Index) % 8;
14528 ElemIdx1 = (9 - Index) % 8;
14529 ElemIdx2 = (10 - Index) % 8;
14530 ElemIdx3 = (11 - Index) % 8;
14532 // Big endian ElemIdx<N> = Index + N
14534 ElemIdx1 = Index + 1;
14535 ElemIdx2 = Index + 2;
14536 ElemIdx3 = Index + 3;
14539 int ShuffleElts[4] = {ElemIdx0, ElemIdx1, ElemIdx2, ElemIdx3};
14540 Value *ShuffleCall =
14541 Builder.CreateShuffleVector(Ops[0], Ops[1], ShuffleElts);
14542 QualType BIRetType = E->getType();
14543 auto RetTy = ConvertType(BIRetType);
14544 return Builder.CreateBitCast(ShuffleCall, RetTy);
14547 case PPC::BI__builtin_pack_vector_int128: {
14548 bool isLittleEndian = getTarget().isLittleEndian();
14549 Value *UndefValue =
14550 llvm::UndefValue::get(llvm::FixedVectorType::get(Ops[0]->getType(), 2));
14551 Value *Res = Builder.CreateInsertElement(
14552 UndefValue, Ops[0], (uint64_t)(isLittleEndian ? 1 : 0));
14553 Res = Builder.CreateInsertElement(Res, Ops[1],
14554 (uint64_t)(isLittleEndian ? 0 : 1));
14555 return Builder.CreateBitCast(Res, ConvertType(E->getType()));
14558 case PPC::BI__builtin_unpack_vector_int128: {
14559 ConstantInt *Index = cast<ConstantInt>(Ops[1]);
14560 Value *Unpacked = Builder.CreateBitCast(
14561 Ops[0], llvm::FixedVectorType::get(ConvertType(E->getType()), 2));
14563 if (getTarget().isLittleEndian())
14564 Index = ConstantInt::get(Index->getType(), 1 - Index->getZExtValue());
14566 return Builder.CreateExtractElement(Unpacked, Index);
14572 // If \p E is not null pointer, insert address space cast to match return
14573 // type of \p E if necessary.
14574 Value *EmitAMDGPUDispatchPtr(CodeGenFunction &CGF,
14575 const CallExpr *E = nullptr) {
14576 auto *F = CGF.CGM.getIntrinsic(Intrinsic::amdgcn_dispatch_ptr);
14577 auto *Call = CGF.Builder.CreateCall(F);
14578 Call->addAttribute(
14579 AttributeList::ReturnIndex,
14580 Attribute::getWithDereferenceableBytes(Call->getContext(), 64));
14581 Call->addAttribute(AttributeList::ReturnIndex,
14582 Attribute::getWithAlignment(Call->getContext(), Align(4)));
14585 QualType BuiltinRetType = E->getType();
14586 auto *RetTy = cast<llvm::PointerType>(CGF.ConvertType(BuiltinRetType));
14587 if (RetTy == Call->getType())
14589 return CGF.Builder.CreateAddrSpaceCast(Call, RetTy);
14592 // \p Index is 0, 1, and 2 for x, y, and z dimension, respectively.
14593 Value *EmitAMDGPUWorkGroupSize(CodeGenFunction &CGF, unsigned Index) {
14594 const unsigned XOffset = 4;
14595 auto *DP = EmitAMDGPUDispatchPtr(CGF);
14596 // Indexing the HSA kernel_dispatch_packet struct.
14597 auto *Offset = llvm::ConstantInt::get(CGF.Int32Ty, XOffset + Index * 2);
14598 auto *GEP = CGF.Builder.CreateGEP(DP, Offset);
14600 CGF.Int16Ty->getPointerTo(GEP->getType()->getPointerAddressSpace());
14601 auto *Cast = CGF.Builder.CreateBitCast(GEP, DstTy);
14602 auto *LD = CGF.Builder.CreateLoad(Address(Cast, CharUnits::fromQuantity(2)));
14603 llvm::MDBuilder MDHelper(CGF.getLLVMContext());
14604 llvm::MDNode *RNode = MDHelper.createRange(APInt(16, 1),
14605 APInt(16, CGF.getTarget().getMaxOpenCLWorkGroupSize() + 1));
14606 LD->setMetadata(llvm::LLVMContext::MD_range, RNode);
14607 LD->setMetadata(llvm::LLVMContext::MD_invariant_load,
14608 llvm::MDNode::get(CGF.getLLVMContext(), None));
14613 // For processing memory ordering and memory scope arguments of various
14614 // amdgcn builtins.
14615 // \p Order takes a C++11 comptabile memory-ordering specifier and converts
14616 // it into LLVM's memory ordering specifier using atomic C ABI, and writes
14617 // to \p AO. \p Scope takes a const char * and converts it into AMDGCN
14618 // specific SyncScopeID and writes it to \p SSID.
14619 bool CodeGenFunction::ProcessOrderScopeAMDGCN(Value *Order, Value *Scope,
14620 llvm::AtomicOrdering &AO,
14621 llvm::SyncScope::ID &SSID) {
14622 if (isa<llvm::ConstantInt>(Order)) {
14623 int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
14625 // Map C11/C++11 memory ordering to LLVM memory ordering
14626 switch (static_cast<llvm::AtomicOrderingCABI>(ord)) {
14627 case llvm::AtomicOrderingCABI::acquire:
14628 AO = llvm::AtomicOrdering::Acquire;
14630 case llvm::AtomicOrderingCABI::release:
14631 AO = llvm::AtomicOrdering::Release;
14633 case llvm::AtomicOrderingCABI::acq_rel:
14634 AO = llvm::AtomicOrdering::AcquireRelease;
14636 case llvm::AtomicOrderingCABI::seq_cst:
14637 AO = llvm::AtomicOrdering::SequentiallyConsistent;
14639 case llvm::AtomicOrderingCABI::consume:
14640 case llvm::AtomicOrderingCABI::relaxed:
14645 llvm::getConstantStringInfo(Scope, scp);
14646 SSID = getLLVMContext().getOrInsertSyncScopeID(scp);
14652 Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID,
14653 const CallExpr *E) {
14654 llvm::AtomicOrdering AO = llvm::AtomicOrdering::SequentiallyConsistent;
14655 llvm::SyncScope::ID SSID;
14656 switch (BuiltinID) {
14657 case AMDGPU::BI__builtin_amdgcn_div_scale:
14658 case AMDGPU::BI__builtin_amdgcn_div_scalef: {
14659 // Translate from the intrinsics's struct return to the builtin's out
14662 Address FlagOutPtr = EmitPointerWithAlignment(E->getArg(3));
14664 llvm::Value *X = EmitScalarExpr(E->getArg(0));
14665 llvm::Value *Y = EmitScalarExpr(E->getArg(1));
14666 llvm::Value *Z = EmitScalarExpr(E->getArg(2));
14668 llvm::Function *Callee = CGM.getIntrinsic(Intrinsic::amdgcn_div_scale,
14671 llvm::Value *Tmp = Builder.CreateCall(Callee, {X, Y, Z});
14673 llvm::Value *Result = Builder.CreateExtractValue(Tmp, 0);
14674 llvm::Value *Flag = Builder.CreateExtractValue(Tmp, 1);
14676 llvm::Type *RealFlagType
14677 = FlagOutPtr.getPointer()->getType()->getPointerElementType();
14679 llvm::Value *FlagExt = Builder.CreateZExt(Flag, RealFlagType);
14680 Builder.CreateStore(FlagExt, FlagOutPtr);
14683 case AMDGPU::BI__builtin_amdgcn_div_fmas:
14684 case AMDGPU::BI__builtin_amdgcn_div_fmasf: {
14685 llvm::Value *Src0 = EmitScalarExpr(E->getArg(0));
14686 llvm::Value *Src1 = EmitScalarExpr(E->getArg(1));
14687 llvm::Value *Src2 = EmitScalarExpr(E->getArg(2));
14688 llvm::Value *Src3 = EmitScalarExpr(E->getArg(3));
14690 llvm::Function *F = CGM.getIntrinsic(Intrinsic::amdgcn_div_fmas,
14692 llvm::Value *Src3ToBool = Builder.CreateIsNotNull(Src3);
14693 return Builder.CreateCall(F, {Src0, Src1, Src2, Src3ToBool});
14696 case AMDGPU::BI__builtin_amdgcn_ds_swizzle:
14697 return emitBinaryBuiltin(*this, E, Intrinsic::amdgcn_ds_swizzle);
14698 case AMDGPU::BI__builtin_amdgcn_mov_dpp8:
14699 return emitBinaryBuiltin(*this, E, Intrinsic::amdgcn_mov_dpp8);
14700 case AMDGPU::BI__builtin_amdgcn_mov_dpp:
14701 case AMDGPU::BI__builtin_amdgcn_update_dpp: {
14702 llvm::SmallVector<llvm::Value *, 6> Args;
14703 for (unsigned I = 0; I != E->getNumArgs(); ++I)
14704 Args.push_back(EmitScalarExpr(E->getArg(I)));
14705 assert(Args.size() == 5 || Args.size() == 6);
14706 if (Args.size() == 5)
14707 Args.insert(Args.begin(), llvm::UndefValue::get(Args[0]->getType()));
14709 CGM.getIntrinsic(Intrinsic::amdgcn_update_dpp, Args[0]->getType());
14710 return Builder.CreateCall(F, Args);
14712 case AMDGPU::BI__builtin_amdgcn_div_fixup:
14713 case AMDGPU::BI__builtin_amdgcn_div_fixupf:
14714 case AMDGPU::BI__builtin_amdgcn_div_fixuph:
14715 return emitTernaryBuiltin(*this, E, Intrinsic::amdgcn_div_fixup);
14716 case AMDGPU::BI__builtin_amdgcn_trig_preop:
14717 case AMDGPU::BI__builtin_amdgcn_trig_preopf:
14718 return emitFPIntBuiltin(*this, E, Intrinsic::amdgcn_trig_preop);
14719 case AMDGPU::BI__builtin_amdgcn_rcp:
14720 case AMDGPU::BI__builtin_amdgcn_rcpf:
14721 case AMDGPU::BI__builtin_amdgcn_rcph:
14722 return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_rcp);
14723 case AMDGPU::BI__builtin_amdgcn_sqrt:
14724 case AMDGPU::BI__builtin_amdgcn_sqrtf:
14725 case AMDGPU::BI__builtin_amdgcn_sqrth:
14726 return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_sqrt);
14727 case AMDGPU::BI__builtin_amdgcn_rsq:
14728 case AMDGPU::BI__builtin_amdgcn_rsqf:
14729 case AMDGPU::BI__builtin_amdgcn_rsqh:
14730 return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_rsq);
14731 case AMDGPU::BI__builtin_amdgcn_rsq_clamp:
14732 case AMDGPU::BI__builtin_amdgcn_rsq_clampf:
14733 return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_rsq_clamp);
14734 case AMDGPU::BI__builtin_amdgcn_sinf:
14735 case AMDGPU::BI__builtin_amdgcn_sinh:
14736 return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_sin);
14737 case AMDGPU::BI__builtin_amdgcn_cosf:
14738 case AMDGPU::BI__builtin_amdgcn_cosh:
14739 return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_cos);
14740 case AMDGPU::BI__builtin_amdgcn_dispatch_ptr:
14741 return EmitAMDGPUDispatchPtr(*this, E);
14742 case AMDGPU::BI__builtin_amdgcn_log_clampf:
14743 return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_log_clamp);
14744 case AMDGPU::BI__builtin_amdgcn_ldexp:
14745 case AMDGPU::BI__builtin_amdgcn_ldexpf:
14746 case AMDGPU::BI__builtin_amdgcn_ldexph:
14747 return emitFPIntBuiltin(*this, E, Intrinsic::amdgcn_ldexp);
14748 case AMDGPU::BI__builtin_amdgcn_frexp_mant:
14749 case AMDGPU::BI__builtin_amdgcn_frexp_mantf:
14750 case AMDGPU::BI__builtin_amdgcn_frexp_manth:
14751 return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_frexp_mant);
14752 case AMDGPU::BI__builtin_amdgcn_frexp_exp:
14753 case AMDGPU::BI__builtin_amdgcn_frexp_expf: {
14754 Value *Src0 = EmitScalarExpr(E->getArg(0));
14755 Function *F = CGM.getIntrinsic(Intrinsic::amdgcn_frexp_exp,
14756 { Builder.getInt32Ty(), Src0->getType() });
14757 return Builder.CreateCall(F, Src0);
14759 case AMDGPU::BI__builtin_amdgcn_frexp_exph: {
14760 Value *Src0 = EmitScalarExpr(E->getArg(0));
14761 Function *F = CGM.getIntrinsic(Intrinsic::amdgcn_frexp_exp,
14762 { Builder.getInt16Ty(), Src0->getType() });
14763 return Builder.CreateCall(F, Src0);
14765 case AMDGPU::BI__builtin_amdgcn_fract:
14766 case AMDGPU::BI__builtin_amdgcn_fractf:
14767 case AMDGPU::BI__builtin_amdgcn_fracth:
14768 return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_fract);
14769 case AMDGPU::BI__builtin_amdgcn_lerp:
14770 return emitTernaryBuiltin(*this, E, Intrinsic::amdgcn_lerp);
14771 case AMDGPU::BI__builtin_amdgcn_ubfe:
14772 return emitTernaryBuiltin(*this, E, Intrinsic::amdgcn_ubfe);
14773 case AMDGPU::BI__builtin_amdgcn_sbfe:
14774 return emitTernaryBuiltin(*this, E, Intrinsic::amdgcn_sbfe);
14775 case AMDGPU::BI__builtin_amdgcn_uicmp:
14776 case AMDGPU::BI__builtin_amdgcn_uicmpl:
14777 case AMDGPU::BI__builtin_amdgcn_sicmp:
14778 case AMDGPU::BI__builtin_amdgcn_sicmpl: {
14779 llvm::Value *Src0 = EmitScalarExpr(E->getArg(0));
14780 llvm::Value *Src1 = EmitScalarExpr(E->getArg(1));
14781 llvm::Value *Src2 = EmitScalarExpr(E->getArg(2));
14783 // FIXME-GFX10: How should 32 bit mask be handled?
14784 Function *F = CGM.getIntrinsic(Intrinsic::amdgcn_icmp,
14785 { Builder.getInt64Ty(), Src0->getType() });
14786 return Builder.CreateCall(F, { Src0, Src1, Src2 });
14788 case AMDGPU::BI__builtin_amdgcn_fcmp:
14789 case AMDGPU::BI__builtin_amdgcn_fcmpf: {
14790 llvm::Value *Src0 = EmitScalarExpr(E->getArg(0));
14791 llvm::Value *Src1 = EmitScalarExpr(E->getArg(1));
14792 llvm::Value *Src2 = EmitScalarExpr(E->getArg(2));
14794 // FIXME-GFX10: How should 32 bit mask be handled?
14795 Function *F = CGM.getIntrinsic(Intrinsic::amdgcn_fcmp,
14796 { Builder.getInt64Ty(), Src0->getType() });
14797 return Builder.CreateCall(F, { Src0, Src1, Src2 });
14799 case AMDGPU::BI__builtin_amdgcn_class:
14800 case AMDGPU::BI__builtin_amdgcn_classf:
14801 case AMDGPU::BI__builtin_amdgcn_classh:
14802 return emitFPIntBuiltin(*this, E, Intrinsic::amdgcn_class);
14803 case AMDGPU::BI__builtin_amdgcn_fmed3f:
14804 case AMDGPU::BI__builtin_amdgcn_fmed3h:
14805 return emitTernaryBuiltin(*this, E, Intrinsic::amdgcn_fmed3);
14806 case AMDGPU::BI__builtin_amdgcn_ds_append:
14807 case AMDGPU::BI__builtin_amdgcn_ds_consume: {
14808 Intrinsic::ID Intrin = BuiltinID == AMDGPU::BI__builtin_amdgcn_ds_append ?
14809 Intrinsic::amdgcn_ds_append : Intrinsic::amdgcn_ds_consume;
14810 Value *Src0 = EmitScalarExpr(E->getArg(0));
14811 Function *F = CGM.getIntrinsic(Intrin, { Src0->getType() });
14812 return Builder.CreateCall(F, { Src0, Builder.getFalse() });
14814 case AMDGPU::BI__builtin_amdgcn_read_exec: {
14815 CallInst *CI = cast<CallInst>(
14816 EmitSpecialRegisterBuiltin(*this, E, Int64Ty, Int64Ty, NormalRead, "exec"));
14817 CI->setConvergent();
14820 case AMDGPU::BI__builtin_amdgcn_read_exec_lo:
14821 case AMDGPU::BI__builtin_amdgcn_read_exec_hi: {
14822 StringRef RegName = BuiltinID == AMDGPU::BI__builtin_amdgcn_read_exec_lo ?
14823 "exec_lo" : "exec_hi";
14824 CallInst *CI = cast<CallInst>(
14825 EmitSpecialRegisterBuiltin(*this, E, Int32Ty, Int32Ty, NormalRead, RegName));
14826 CI->setConvergent();
14830 case AMDGPU::BI__builtin_amdgcn_workitem_id_x:
14831 return emitRangedBuiltin(*this, Intrinsic::amdgcn_workitem_id_x, 0, 1024);
14832 case AMDGPU::BI__builtin_amdgcn_workitem_id_y:
14833 return emitRangedBuiltin(*this, Intrinsic::amdgcn_workitem_id_y, 0, 1024);
14834 case AMDGPU::BI__builtin_amdgcn_workitem_id_z:
14835 return emitRangedBuiltin(*this, Intrinsic::amdgcn_workitem_id_z, 0, 1024);
14837 // amdgcn workgroup size
14838 case AMDGPU::BI__builtin_amdgcn_workgroup_size_x:
14839 return EmitAMDGPUWorkGroupSize(*this, 0);
14840 case AMDGPU::BI__builtin_amdgcn_workgroup_size_y:
14841 return EmitAMDGPUWorkGroupSize(*this, 1);
14842 case AMDGPU::BI__builtin_amdgcn_workgroup_size_z:
14843 return EmitAMDGPUWorkGroupSize(*this, 2);
14846 case AMDGPU::BI__builtin_r600_recipsqrt_ieee:
14847 case AMDGPU::BI__builtin_r600_recipsqrt_ieeef:
14848 return emitUnaryBuiltin(*this, E, Intrinsic::r600_recipsqrt_ieee);
14849 case AMDGPU::BI__builtin_r600_read_tidig_x:
14850 return emitRangedBuiltin(*this, Intrinsic::r600_read_tidig_x, 0, 1024);
14851 case AMDGPU::BI__builtin_r600_read_tidig_y:
14852 return emitRangedBuiltin(*this, Intrinsic::r600_read_tidig_y, 0, 1024);
14853 case AMDGPU::BI__builtin_r600_read_tidig_z:
14854 return emitRangedBuiltin(*this, Intrinsic::r600_read_tidig_z, 0, 1024);
14855 case AMDGPU::BI__builtin_amdgcn_alignbit: {
14856 llvm::Value *Src0 = EmitScalarExpr(E->getArg(0));
14857 llvm::Value *Src1 = EmitScalarExpr(E->getArg(1));
14858 llvm::Value *Src2 = EmitScalarExpr(E->getArg(2));
14859 Function *F = CGM.getIntrinsic(Intrinsic::fshr, Src0->getType());
14860 return Builder.CreateCall(F, { Src0, Src1, Src2 });
14863 case AMDGPU::BI__builtin_amdgcn_fence: {
14864 if (ProcessOrderScopeAMDGCN(EmitScalarExpr(E->getArg(0)),
14865 EmitScalarExpr(E->getArg(1)), AO, SSID))
14866 return Builder.CreateFence(AO, SSID);
14869 case AMDGPU::BI__builtin_amdgcn_atomic_inc32:
14870 case AMDGPU::BI__builtin_amdgcn_atomic_inc64:
14871 case AMDGPU::BI__builtin_amdgcn_atomic_dec32:
14872 case AMDGPU::BI__builtin_amdgcn_atomic_dec64: {
14873 unsigned BuiltinAtomicOp;
14874 llvm::Type *ResultType = ConvertType(E->getType());
14876 switch (BuiltinID) {
14877 case AMDGPU::BI__builtin_amdgcn_atomic_inc32:
14878 case AMDGPU::BI__builtin_amdgcn_atomic_inc64:
14879 BuiltinAtomicOp = Intrinsic::amdgcn_atomic_inc;
14881 case AMDGPU::BI__builtin_amdgcn_atomic_dec32:
14882 case AMDGPU::BI__builtin_amdgcn_atomic_dec64:
14883 BuiltinAtomicOp = Intrinsic::amdgcn_atomic_dec;
14887 Value *Ptr = EmitScalarExpr(E->getArg(0));
14888 Value *Val = EmitScalarExpr(E->getArg(1));
14890 llvm::Function *F =
14891 CGM.getIntrinsic(BuiltinAtomicOp, {ResultType, Ptr->getType()});
14893 if (ProcessOrderScopeAMDGCN(EmitScalarExpr(E->getArg(2)),
14894 EmitScalarExpr(E->getArg(3)), AO, SSID)) {
14896 // llvm.amdgcn.atomic.inc and llvm.amdgcn.atomic.dec expects ordering and
14897 // scope as unsigned values
14898 Value *MemOrder = Builder.getInt32(static_cast<int>(AO));
14899 Value *MemScope = Builder.getInt32(static_cast<int>(SSID));
14901 QualType PtrTy = E->getArg(0)->IgnoreImpCasts()->getType();
14903 PtrTy->castAs<PointerType>()->getPointeeType().isVolatileQualified();
14904 Value *IsVolatile = Builder.getInt1(static_cast<bool>(Volatile));
14906 return Builder.CreateCall(F, {Ptr, Val, MemOrder, MemScope, IsVolatile});
14915 /// Handle a SystemZ function in which the final argument is a pointer
14916 /// to an int that receives the post-instruction CC value. At the LLVM level
14917 /// this is represented as a function that returns a {result, cc} pair.
14918 static Value *EmitSystemZIntrinsicWithCC(CodeGenFunction &CGF,
14919 unsigned IntrinsicID,
14920 const CallExpr *E) {
14921 unsigned NumArgs = E->getNumArgs() - 1;
14922 SmallVector<Value *, 8> Args(NumArgs);
14923 for (unsigned I = 0; I < NumArgs; ++I)
14924 Args[I] = CGF.EmitScalarExpr(E->getArg(I));
14925 Address CCPtr = CGF.EmitPointerWithAlignment(E->getArg(NumArgs));
14926 Function *F = CGF.CGM.getIntrinsic(IntrinsicID);
14927 Value *Call = CGF.Builder.CreateCall(F, Args);
14928 Value *CC = CGF.Builder.CreateExtractValue(Call, 1);
14929 CGF.Builder.CreateStore(CC, CCPtr);
14930 return CGF.Builder.CreateExtractValue(Call, 0);
14933 Value *CodeGenFunction::EmitSystemZBuiltinExpr(unsigned BuiltinID,
14934 const CallExpr *E) {
14935 switch (BuiltinID) {
14936 case SystemZ::BI__builtin_tbegin: {
14937 Value *TDB = EmitScalarExpr(E->getArg(0));
14938 Value *Control = llvm::ConstantInt::get(Int32Ty, 0xff0c);
14939 Function *F = CGM.getIntrinsic(Intrinsic::s390_tbegin);
14940 return Builder.CreateCall(F, {TDB, Control});
14942 case SystemZ::BI__builtin_tbegin_nofloat: {
14943 Value *TDB = EmitScalarExpr(E->getArg(0));
14944 Value *Control = llvm::ConstantInt::get(Int32Ty, 0xff0c);
14945 Function *F = CGM.getIntrinsic(Intrinsic::s390_tbegin_nofloat);
14946 return Builder.CreateCall(F, {TDB, Control});
14948 case SystemZ::BI__builtin_tbeginc: {
14949 Value *TDB = llvm::ConstantPointerNull::get(Int8PtrTy);
14950 Value *Control = llvm::ConstantInt::get(Int32Ty, 0xff08);
14951 Function *F = CGM.getIntrinsic(Intrinsic::s390_tbeginc);
14952 return Builder.CreateCall(F, {TDB, Control});
14954 case SystemZ::BI__builtin_tabort: {
14955 Value *Data = EmitScalarExpr(E->getArg(0));
14956 Function *F = CGM.getIntrinsic(Intrinsic::s390_tabort);
14957 return Builder.CreateCall(F, Builder.CreateSExt(Data, Int64Ty, "tabort"));
14959 case SystemZ::BI__builtin_non_tx_store: {
14960 Value *Address = EmitScalarExpr(E->getArg(0));
14961 Value *Data = EmitScalarExpr(E->getArg(1));
14962 Function *F = CGM.getIntrinsic(Intrinsic::s390_ntstg);
14963 return Builder.CreateCall(F, {Data, Address});
14966 // Vector builtins. Note that most vector builtins are mapped automatically
14967 // to target-specific LLVM intrinsics. The ones handled specially here can
14968 // be represented via standard LLVM IR, which is preferable to enable common
14969 // LLVM optimizations.
14971 case SystemZ::BI__builtin_s390_vpopctb:
14972 case SystemZ::BI__builtin_s390_vpopcth:
14973 case SystemZ::BI__builtin_s390_vpopctf:
14974 case SystemZ::BI__builtin_s390_vpopctg: {
14975 llvm::Type *ResultType = ConvertType(E->getType());
14976 Value *X = EmitScalarExpr(E->getArg(0));
14977 Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ResultType);
14978 return Builder.CreateCall(F, X);
14981 case SystemZ::BI__builtin_s390_vclzb:
14982 case SystemZ::BI__builtin_s390_vclzh:
14983 case SystemZ::BI__builtin_s390_vclzf:
14984 case SystemZ::BI__builtin_s390_vclzg: {
14985 llvm::Type *ResultType = ConvertType(E->getType());
14986 Value *X = EmitScalarExpr(E->getArg(0));
14987 Value *Undef = ConstantInt::get(Builder.getInt1Ty(), false);
14988 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ResultType);
14989 return Builder.CreateCall(F, {X, Undef});
14992 case SystemZ::BI__builtin_s390_vctzb:
14993 case SystemZ::BI__builtin_s390_vctzh:
14994 case SystemZ::BI__builtin_s390_vctzf:
14995 case SystemZ::BI__builtin_s390_vctzg: {
14996 llvm::Type *ResultType = ConvertType(E->getType());
14997 Value *X = EmitScalarExpr(E->getArg(0));
14998 Value *Undef = ConstantInt::get(Builder.getInt1Ty(), false);
14999 Function *F = CGM.getIntrinsic(Intrinsic::cttz, ResultType);
15000 return Builder.CreateCall(F, {X, Undef});
15003 case SystemZ::BI__builtin_s390_vfsqsb:
15004 case SystemZ::BI__builtin_s390_vfsqdb: {
15005 llvm::Type *ResultType = ConvertType(E->getType());
15006 Value *X = EmitScalarExpr(E->getArg(0));
15007 if (Builder.getIsFPConstrained()) {
15008 Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_sqrt, ResultType);
15009 return Builder.CreateConstrainedFPCall(F, { X });
15011 Function *F = CGM.getIntrinsic(Intrinsic::sqrt, ResultType);
15012 return Builder.CreateCall(F, X);
15015 case SystemZ::BI__builtin_s390_vfmasb:
15016 case SystemZ::BI__builtin_s390_vfmadb: {
15017 llvm::Type *ResultType = ConvertType(E->getType());
15018 Value *X = EmitScalarExpr(E->getArg(0));
15019 Value *Y = EmitScalarExpr(E->getArg(1));
15020 Value *Z = EmitScalarExpr(E->getArg(2));
15021 if (Builder.getIsFPConstrained()) {
15022 Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_fma, ResultType);
15023 return Builder.CreateConstrainedFPCall(F, {X, Y, Z});
15025 Function *F = CGM.getIntrinsic(Intrinsic::fma, ResultType);
15026 return Builder.CreateCall(F, {X, Y, Z});
15029 case SystemZ::BI__builtin_s390_vfmssb:
15030 case SystemZ::BI__builtin_s390_vfmsdb: {
15031 llvm::Type *ResultType = ConvertType(E->getType());
15032 Value *X = EmitScalarExpr(E->getArg(0));
15033 Value *Y = EmitScalarExpr(E->getArg(1));
15034 Value *Z = EmitScalarExpr(E->getArg(2));
15035 if (Builder.getIsFPConstrained()) {
15036 Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_fma, ResultType);
15037 return Builder.CreateConstrainedFPCall(F, {X, Y, Builder.CreateFNeg(Z, "neg")});
15039 Function *F = CGM.getIntrinsic(Intrinsic::fma, ResultType);
15040 return Builder.CreateCall(F, {X, Y, Builder.CreateFNeg(Z, "neg")});
15043 case SystemZ::BI__builtin_s390_vfnmasb:
15044 case SystemZ::BI__builtin_s390_vfnmadb: {
15045 llvm::Type *ResultType = ConvertType(E->getType());
15046 Value *X = EmitScalarExpr(E->getArg(0));
15047 Value *Y = EmitScalarExpr(E->getArg(1));
15048 Value *Z = EmitScalarExpr(E->getArg(2));
15049 if (Builder.getIsFPConstrained()) {
15050 Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_fma, ResultType);
15051 return Builder.CreateFNeg(Builder.CreateConstrainedFPCall(F, {X, Y, Z}), "neg");
15053 Function *F = CGM.getIntrinsic(Intrinsic::fma, ResultType);
15054 return Builder.CreateFNeg(Builder.CreateCall(F, {X, Y, Z}), "neg");
15057 case SystemZ::BI__builtin_s390_vfnmssb:
15058 case SystemZ::BI__builtin_s390_vfnmsdb: {
15059 llvm::Type *ResultType = ConvertType(E->getType());
15060 Value *X = EmitScalarExpr(E->getArg(0));
15061 Value *Y = EmitScalarExpr(E->getArg(1));
15062 Value *Z = EmitScalarExpr(E->getArg(2));
15063 if (Builder.getIsFPConstrained()) {
15064 Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_fma, ResultType);
15065 Value *NegZ = Builder.CreateFNeg(Z, "sub");
15066 return Builder.CreateFNeg(Builder.CreateConstrainedFPCall(F, {X, Y, NegZ}));
15068 Function *F = CGM.getIntrinsic(Intrinsic::fma, ResultType);
15069 Value *NegZ = Builder.CreateFNeg(Z, "neg");
15070 return Builder.CreateFNeg(Builder.CreateCall(F, {X, Y, NegZ}));
15073 case SystemZ::BI__builtin_s390_vflpsb:
15074 case SystemZ::BI__builtin_s390_vflpdb: {
15075 llvm::Type *ResultType = ConvertType(E->getType());
15076 Value *X = EmitScalarExpr(E->getArg(0));
15077 Function *F = CGM.getIntrinsic(Intrinsic::fabs, ResultType);
15078 return Builder.CreateCall(F, X);
15080 case SystemZ::BI__builtin_s390_vflnsb:
15081 case SystemZ::BI__builtin_s390_vflndb: {
15082 llvm::Type *ResultType = ConvertType(E->getType());
15083 Value *X = EmitScalarExpr(E->getArg(0));
15084 Function *F = CGM.getIntrinsic(Intrinsic::fabs, ResultType);
15085 return Builder.CreateFNeg(Builder.CreateCall(F, X), "neg");
15087 case SystemZ::BI__builtin_s390_vfisb:
15088 case SystemZ::BI__builtin_s390_vfidb: {
15089 llvm::Type *ResultType = ConvertType(E->getType());
15090 Value *X = EmitScalarExpr(E->getArg(0));
15091 // Constant-fold the M4 and M5 mask arguments.
15092 llvm::APSInt M4, M5;
15093 bool IsConstM4 = E->getArg(1)->isIntegerConstantExpr(M4, getContext());
15094 bool IsConstM5 = E->getArg(2)->isIntegerConstantExpr(M5, getContext());
15095 assert(IsConstM4 && IsConstM5 && "Constant arg isn't actually constant?");
15096 (void)IsConstM4; (void)IsConstM5;
15097 // Check whether this instance can be represented via a LLVM standard
15098 // intrinsic. We only support some combinations of M4 and M5.
15099 Intrinsic::ID ID = Intrinsic::not_intrinsic;
15101 switch (M4.getZExtValue()) {
15103 case 0: // IEEE-inexact exception allowed
15104 switch (M5.getZExtValue()) {
15106 case 0: ID = Intrinsic::rint;
15107 CI = Intrinsic::experimental_constrained_rint; break;
15110 case 4: // IEEE-inexact exception suppressed
15111 switch (M5.getZExtValue()) {
15113 case 0: ID = Intrinsic::nearbyint;
15114 CI = Intrinsic::experimental_constrained_nearbyint; break;
15115 case 1: ID = Intrinsic::round;
15116 CI = Intrinsic::experimental_constrained_round; break;
15117 case 5: ID = Intrinsic::trunc;
15118 CI = Intrinsic::experimental_constrained_trunc; break;
15119 case 6: ID = Intrinsic::ceil;
15120 CI = Intrinsic::experimental_constrained_ceil; break;
15121 case 7: ID = Intrinsic::floor;
15122 CI = Intrinsic::experimental_constrained_floor; break;
15126 if (ID != Intrinsic::not_intrinsic) {
15127 if (Builder.getIsFPConstrained()) {
15128 Function *F = CGM.getIntrinsic(CI, ResultType);
15129 return Builder.CreateConstrainedFPCall(F, X);
15131 Function *F = CGM.getIntrinsic(ID, ResultType);
15132 return Builder.CreateCall(F, X);
15135 switch (BuiltinID) { // FIXME: constrained version?
15136 case SystemZ::BI__builtin_s390_vfisb: ID = Intrinsic::s390_vfisb; break;
15137 case SystemZ::BI__builtin_s390_vfidb: ID = Intrinsic::s390_vfidb; break;
15138 default: llvm_unreachable("Unknown BuiltinID");
15140 Function *F = CGM.getIntrinsic(ID);
15141 Value *M4Value = llvm::ConstantInt::get(getLLVMContext(), M4);
15142 Value *M5Value = llvm::ConstantInt::get(getLLVMContext(), M5);
15143 return Builder.CreateCall(F, {X, M4Value, M5Value});
15145 case SystemZ::BI__builtin_s390_vfmaxsb:
15146 case SystemZ::BI__builtin_s390_vfmaxdb: {
15147 llvm::Type *ResultType = ConvertType(E->getType());
15148 Value *X = EmitScalarExpr(E->getArg(0));
15149 Value *Y = EmitScalarExpr(E->getArg(1));
15150 // Constant-fold the M4 mask argument.
15152 bool IsConstM4 = E->getArg(2)->isIntegerConstantExpr(M4, getContext());
15153 assert(IsConstM4 && "Constant arg isn't actually constant?");
15155 // Check whether this instance can be represented via a LLVM standard
15156 // intrinsic. We only support some values of M4.
15157 Intrinsic::ID ID = Intrinsic::not_intrinsic;
15159 switch (M4.getZExtValue()) {
15161 case 4: ID = Intrinsic::maxnum;
15162 CI = Intrinsic::experimental_constrained_maxnum; break;
15164 if (ID != Intrinsic::not_intrinsic) {
15165 if (Builder.getIsFPConstrained()) {
15166 Function *F = CGM.getIntrinsic(CI, ResultType);
15167 return Builder.CreateConstrainedFPCall(F, {X, Y});
15169 Function *F = CGM.getIntrinsic(ID, ResultType);
15170 return Builder.CreateCall(F, {X, Y});
15173 switch (BuiltinID) {
15174 case SystemZ::BI__builtin_s390_vfmaxsb: ID = Intrinsic::s390_vfmaxsb; break;
15175 case SystemZ::BI__builtin_s390_vfmaxdb: ID = Intrinsic::s390_vfmaxdb; break;
15176 default: llvm_unreachable("Unknown BuiltinID");
15178 Function *F = CGM.getIntrinsic(ID);
15179 Value *M4Value = llvm::ConstantInt::get(getLLVMContext(), M4);
15180 return Builder.CreateCall(F, {X, Y, M4Value});
15182 case SystemZ::BI__builtin_s390_vfminsb:
15183 case SystemZ::BI__builtin_s390_vfmindb: {
15184 llvm::Type *ResultType = ConvertType(E->getType());
15185 Value *X = EmitScalarExpr(E->getArg(0));
15186 Value *Y = EmitScalarExpr(E->getArg(1));
15187 // Constant-fold the M4 mask argument.
15189 bool IsConstM4 = E->getArg(2)->isIntegerConstantExpr(M4, getContext());
15190 assert(IsConstM4 && "Constant arg isn't actually constant?");
15192 // Check whether this instance can be represented via a LLVM standard
15193 // intrinsic. We only support some values of M4.
15194 Intrinsic::ID ID = Intrinsic::not_intrinsic;
15196 switch (M4.getZExtValue()) {
15198 case 4: ID = Intrinsic::minnum;
15199 CI = Intrinsic::experimental_constrained_minnum; break;
15201 if (ID != Intrinsic::not_intrinsic) {
15202 if (Builder.getIsFPConstrained()) {
15203 Function *F = CGM.getIntrinsic(CI, ResultType);
15204 return Builder.CreateConstrainedFPCall(F, {X, Y});
15206 Function *F = CGM.getIntrinsic(ID, ResultType);
15207 return Builder.CreateCall(F, {X, Y});
15210 switch (BuiltinID) {
15211 case SystemZ::BI__builtin_s390_vfminsb: ID = Intrinsic::s390_vfminsb; break;
15212 case SystemZ::BI__builtin_s390_vfmindb: ID = Intrinsic::s390_vfmindb; break;
15213 default: llvm_unreachable("Unknown BuiltinID");
15215 Function *F = CGM.getIntrinsic(ID);
15216 Value *M4Value = llvm::ConstantInt::get(getLLVMContext(), M4);
15217 return Builder.CreateCall(F, {X, Y, M4Value});
15220 case SystemZ::BI__builtin_s390_vlbrh:
15221 case SystemZ::BI__builtin_s390_vlbrf:
15222 case SystemZ::BI__builtin_s390_vlbrg: {
15223 llvm::Type *ResultType = ConvertType(E->getType());
15224 Value *X = EmitScalarExpr(E->getArg(0));
15225 Function *F = CGM.getIntrinsic(Intrinsic::bswap, ResultType);
15226 return Builder.CreateCall(F, X);
15229 // Vector intrinsics that output the post-instruction CC value.
15231 #define INTRINSIC_WITH_CC(NAME) \
15232 case SystemZ::BI__builtin_##NAME: \
15233 return EmitSystemZIntrinsicWithCC(*this, Intrinsic::NAME, E)
15235 INTRINSIC_WITH_CC(s390_vpkshs);
15236 INTRINSIC_WITH_CC(s390_vpksfs);
15237 INTRINSIC_WITH_CC(s390_vpksgs);
15239 INTRINSIC_WITH_CC(s390_vpklshs);
15240 INTRINSIC_WITH_CC(s390_vpklsfs);
15241 INTRINSIC_WITH_CC(s390_vpklsgs);
15243 INTRINSIC_WITH_CC(s390_vceqbs);
15244 INTRINSIC_WITH_CC(s390_vceqhs);
15245 INTRINSIC_WITH_CC(s390_vceqfs);
15246 INTRINSIC_WITH_CC(s390_vceqgs);
15248 INTRINSIC_WITH_CC(s390_vchbs);
15249 INTRINSIC_WITH_CC(s390_vchhs);
15250 INTRINSIC_WITH_CC(s390_vchfs);
15251 INTRINSIC_WITH_CC(s390_vchgs);
15253 INTRINSIC_WITH_CC(s390_vchlbs);
15254 INTRINSIC_WITH_CC(s390_vchlhs);
15255 INTRINSIC_WITH_CC(s390_vchlfs);
15256 INTRINSIC_WITH_CC(s390_vchlgs);
15258 INTRINSIC_WITH_CC(s390_vfaebs);
15259 INTRINSIC_WITH_CC(s390_vfaehs);
15260 INTRINSIC_WITH_CC(s390_vfaefs);
15262 INTRINSIC_WITH_CC(s390_vfaezbs);
15263 INTRINSIC_WITH_CC(s390_vfaezhs);
15264 INTRINSIC_WITH_CC(s390_vfaezfs);
15266 INTRINSIC_WITH_CC(s390_vfeebs);
15267 INTRINSIC_WITH_CC(s390_vfeehs);
15268 INTRINSIC_WITH_CC(s390_vfeefs);
15270 INTRINSIC_WITH_CC(s390_vfeezbs);
15271 INTRINSIC_WITH_CC(s390_vfeezhs);
15272 INTRINSIC_WITH_CC(s390_vfeezfs);
15274 INTRINSIC_WITH_CC(s390_vfenebs);
15275 INTRINSIC_WITH_CC(s390_vfenehs);
15276 INTRINSIC_WITH_CC(s390_vfenefs);
15278 INTRINSIC_WITH_CC(s390_vfenezbs);
15279 INTRINSIC_WITH_CC(s390_vfenezhs);
15280 INTRINSIC_WITH_CC(s390_vfenezfs);
15282 INTRINSIC_WITH_CC(s390_vistrbs);
15283 INTRINSIC_WITH_CC(s390_vistrhs);
15284 INTRINSIC_WITH_CC(s390_vistrfs);
15286 INTRINSIC_WITH_CC(s390_vstrcbs);
15287 INTRINSIC_WITH_CC(s390_vstrchs);
15288 INTRINSIC_WITH_CC(s390_vstrcfs);
15290 INTRINSIC_WITH_CC(s390_vstrczbs);
15291 INTRINSIC_WITH_CC(s390_vstrczhs);
15292 INTRINSIC_WITH_CC(s390_vstrczfs);
15294 INTRINSIC_WITH_CC(s390_vfcesbs);
15295 INTRINSIC_WITH_CC(s390_vfcedbs);
15296 INTRINSIC_WITH_CC(s390_vfchsbs);
15297 INTRINSIC_WITH_CC(s390_vfchdbs);
15298 INTRINSIC_WITH_CC(s390_vfchesbs);
15299 INTRINSIC_WITH_CC(s390_vfchedbs);
15301 INTRINSIC_WITH_CC(s390_vftcisb);
15302 INTRINSIC_WITH_CC(s390_vftcidb);
15304 INTRINSIC_WITH_CC(s390_vstrsb);
15305 INTRINSIC_WITH_CC(s390_vstrsh);
15306 INTRINSIC_WITH_CC(s390_vstrsf);
15308 INTRINSIC_WITH_CC(s390_vstrszb);
15309 INTRINSIC_WITH_CC(s390_vstrszh);
15310 INTRINSIC_WITH_CC(s390_vstrszf);
15312 #undef INTRINSIC_WITH_CC
15320 // Helper classes for mapping MMA builtins to particular LLVM intrinsic variant.
15321 struct NVPTXMmaLdstInfo {
15322 unsigned NumResults; // Number of elements to load/store
15323 // Intrinsic IDs for row/col variants. 0 if particular layout is unsupported.
15328 #define MMA_INTR(geom_op_type, layout) \
15329 Intrinsic::nvvm_wmma_##geom_op_type##_##layout##_stride
15330 #define MMA_LDST(n, geom_op_type) \
15331 { n, MMA_INTR(geom_op_type, col), MMA_INTR(geom_op_type, row) }
15333 static NVPTXMmaLdstInfo getNVPTXMmaLdstInfo(unsigned BuiltinID) {
15334 switch (BuiltinID) {
15336 case NVPTX::BI__hmma_m16n16k16_ld_a:
15337 return MMA_LDST(8, m16n16k16_load_a_f16);
15338 case NVPTX::BI__hmma_m16n16k16_ld_b:
15339 return MMA_LDST(8, m16n16k16_load_b_f16);
15340 case NVPTX::BI__hmma_m16n16k16_ld_c_f16:
15341 return MMA_LDST(4, m16n16k16_load_c_f16);
15342 case NVPTX::BI__hmma_m16n16k16_ld_c_f32:
15343 return MMA_LDST(8, m16n16k16_load_c_f32);
15344 case NVPTX::BI__hmma_m32n8k16_ld_a:
15345 return MMA_LDST(8, m32n8k16_load_a_f16);
15346 case NVPTX::BI__hmma_m32n8k16_ld_b:
15347 return MMA_LDST(8, m32n8k16_load_b_f16);
15348 case NVPTX::BI__hmma_m32n8k16_ld_c_f16:
15349 return MMA_LDST(4, m32n8k16_load_c_f16);
15350 case NVPTX::BI__hmma_m32n8k16_ld_c_f32:
15351 return MMA_LDST(8, m32n8k16_load_c_f32);
15352 case NVPTX::BI__hmma_m8n32k16_ld_a:
15353 return MMA_LDST(8, m8n32k16_load_a_f16);
15354 case NVPTX::BI__hmma_m8n32k16_ld_b:
15355 return MMA_LDST(8, m8n32k16_load_b_f16);
15356 case NVPTX::BI__hmma_m8n32k16_ld_c_f16:
15357 return MMA_LDST(4, m8n32k16_load_c_f16);
15358 case NVPTX::BI__hmma_m8n32k16_ld_c_f32:
15359 return MMA_LDST(8, m8n32k16_load_c_f32);
15361 // Integer MMA loads
15362 case NVPTX::BI__imma_m16n16k16_ld_a_s8:
15363 return MMA_LDST(2, m16n16k16_load_a_s8);
15364 case NVPTX::BI__imma_m16n16k16_ld_a_u8:
15365 return MMA_LDST(2, m16n16k16_load_a_u8);
15366 case NVPTX::BI__imma_m16n16k16_ld_b_s8:
15367 return MMA_LDST(2, m16n16k16_load_b_s8);
15368 case NVPTX::BI__imma_m16n16k16_ld_b_u8:
15369 return MMA_LDST(2, m16n16k16_load_b_u8);
15370 case NVPTX::BI__imma_m16n16k16_ld_c:
15371 return MMA_LDST(8, m16n16k16_load_c_s32);
15372 case NVPTX::BI__imma_m32n8k16_ld_a_s8:
15373 return MMA_LDST(4, m32n8k16_load_a_s8);
15374 case NVPTX::BI__imma_m32n8k16_ld_a_u8:
15375 return MMA_LDST(4, m32n8k16_load_a_u8);
15376 case NVPTX::BI__imma_m32n8k16_ld_b_s8:
15377 return MMA_LDST(1, m32n8k16_load_b_s8);
15378 case NVPTX::BI__imma_m32n8k16_ld_b_u8:
15379 return MMA_LDST(1, m32n8k16_load_b_u8);
15380 case NVPTX::BI__imma_m32n8k16_ld_c:
15381 return MMA_LDST(8, m32n8k16_load_c_s32);
15382 case NVPTX::BI__imma_m8n32k16_ld_a_s8:
15383 return MMA_LDST(1, m8n32k16_load_a_s8);
15384 case NVPTX::BI__imma_m8n32k16_ld_a_u8:
15385 return MMA_LDST(1, m8n32k16_load_a_u8);
15386 case NVPTX::BI__imma_m8n32k16_ld_b_s8:
15387 return MMA_LDST(4, m8n32k16_load_b_s8);
15388 case NVPTX::BI__imma_m8n32k16_ld_b_u8:
15389 return MMA_LDST(4, m8n32k16_load_b_u8);
15390 case NVPTX::BI__imma_m8n32k16_ld_c:
15391 return MMA_LDST(8, m8n32k16_load_c_s32);
15393 // Sub-integer MMA loads.
15394 // Only row/col layout is supported by A/B fragments.
15395 case NVPTX::BI__imma_m8n8k32_ld_a_s4:
15396 return {1, 0, MMA_INTR(m8n8k32_load_a_s4, row)};
15397 case NVPTX::BI__imma_m8n8k32_ld_a_u4:
15398 return {1, 0, MMA_INTR(m8n8k32_load_a_u4, row)};
15399 case NVPTX::BI__imma_m8n8k32_ld_b_s4:
15400 return {1, MMA_INTR(m8n8k32_load_b_s4, col), 0};
15401 case NVPTX::BI__imma_m8n8k32_ld_b_u4:
15402 return {1, MMA_INTR(m8n8k32_load_b_u4, col), 0};
15403 case NVPTX::BI__imma_m8n8k32_ld_c:
15404 return MMA_LDST(2, m8n8k32_load_c_s32);
15405 case NVPTX::BI__bmma_m8n8k128_ld_a_b1:
15406 return {1, 0, MMA_INTR(m8n8k128_load_a_b1, row)};
15407 case NVPTX::BI__bmma_m8n8k128_ld_b_b1:
15408 return {1, MMA_INTR(m8n8k128_load_b_b1, col), 0};
15409 case NVPTX::BI__bmma_m8n8k128_ld_c:
15410 return MMA_LDST(2, m8n8k128_load_c_s32);
15412 // NOTE: We need to follow inconsitent naming scheme used by NVCC. Unlike
15413 // PTX and LLVM IR where stores always use fragment D, NVCC builtins always
15414 // use fragment C for both loads and stores.
15416 case NVPTX::BI__hmma_m16n16k16_st_c_f16:
15417 return MMA_LDST(4, m16n16k16_store_d_f16);
15418 case NVPTX::BI__hmma_m16n16k16_st_c_f32:
15419 return MMA_LDST(8, m16n16k16_store_d_f32);
15420 case NVPTX::BI__hmma_m32n8k16_st_c_f16:
15421 return MMA_LDST(4, m32n8k16_store_d_f16);
15422 case NVPTX::BI__hmma_m32n8k16_st_c_f32:
15423 return MMA_LDST(8, m32n8k16_store_d_f32);
15424 case NVPTX::BI__hmma_m8n32k16_st_c_f16:
15425 return MMA_LDST(4, m8n32k16_store_d_f16);
15426 case NVPTX::BI__hmma_m8n32k16_st_c_f32:
15427 return MMA_LDST(8, m8n32k16_store_d_f32);
15429 // Integer and sub-integer MMA stores.
15430 // Another naming quirk. Unlike other MMA builtins that use PTX types in the
15431 // name, integer loads/stores use LLVM's i32.
15432 case NVPTX::BI__imma_m16n16k16_st_c_i32:
15433 return MMA_LDST(8, m16n16k16_store_d_s32);
15434 case NVPTX::BI__imma_m32n8k16_st_c_i32:
15435 return MMA_LDST(8, m32n8k16_store_d_s32);
15436 case NVPTX::BI__imma_m8n32k16_st_c_i32:
15437 return MMA_LDST(8, m8n32k16_store_d_s32);
15438 case NVPTX::BI__imma_m8n8k32_st_c_i32:
15439 return MMA_LDST(2, m8n8k32_store_d_s32);
15440 case NVPTX::BI__bmma_m8n8k128_st_c_i32:
15441 return MMA_LDST(2, m8n8k128_store_d_s32);
15444 llvm_unreachable("Unknown MMA builtin");
15451 struct NVPTXMmaInfo {
15456 std::array<unsigned, 8> Variants;
15458 unsigned getMMAIntrinsic(int Layout, bool Satf) {
15459 unsigned Index = Layout * 2 + Satf;
15460 if (Index >= Variants.size())
15462 return Variants[Index];
15466 // Returns an intrinsic that matches Layout and Satf for valid combinations of
15467 // Layout and Satf, 0 otherwise.
15468 static NVPTXMmaInfo getNVPTXMmaInfo(unsigned BuiltinID) {
15469 // clang-format off
15470 #define MMA_VARIANTS(geom, type) {{ \
15471 Intrinsic::nvvm_wmma_##geom##_mma_row_row_##type, \
15472 Intrinsic::nvvm_wmma_##geom##_mma_row_row_##type##_satfinite, \
15473 Intrinsic::nvvm_wmma_##geom##_mma_row_col_##type, \
15474 Intrinsic::nvvm_wmma_##geom##_mma_row_col_##type##_satfinite, \
15475 Intrinsic::nvvm_wmma_##geom##_mma_col_row_##type, \
15476 Intrinsic::nvvm_wmma_##geom##_mma_col_row_##type##_satfinite, \
15477 Intrinsic::nvvm_wmma_##geom##_mma_col_col_##type, \
15478 Intrinsic::nvvm_wmma_##geom##_mma_col_col_##type##_satfinite \
15480 // Sub-integer MMA only supports row.col layout.
15481 #define MMA_VARIANTS_I4(geom, type) {{ \
15484 Intrinsic::nvvm_wmma_##geom##_mma_row_col_##type, \
15485 Intrinsic::nvvm_wmma_##geom##_mma_row_col_##type##_satfinite, \
15491 // b1 MMA does not support .satfinite.
15492 #define MMA_VARIANTS_B1(geom, type) {{ \
15495 Intrinsic::nvvm_wmma_##geom##_mma_row_col_##type, \
15503 switch (BuiltinID) {
15505 // Note that 'type' argument of MMA_VARIANT uses D_C notation, while
15506 // NumEltsN of return value are ordered as A,B,C,D.
15507 case NVPTX::BI__hmma_m16n16k16_mma_f16f16:
15508 return {8, 8, 4, 4, MMA_VARIANTS(m16n16k16, f16_f16)};
15509 case NVPTX::BI__hmma_m16n16k16_mma_f32f16:
15510 return {8, 8, 4, 8, MMA_VARIANTS(m16n16k16, f32_f16)};
15511 case NVPTX::BI__hmma_m16n16k16_mma_f16f32:
15512 return {8, 8, 8, 4, MMA_VARIANTS(m16n16k16, f16_f32)};
15513 case NVPTX::BI__hmma_m16n16k16_mma_f32f32:
15514 return {8, 8, 8, 8, MMA_VARIANTS(m16n16k16, f32_f32)};
15515 case NVPTX::BI__hmma_m32n8k16_mma_f16f16:
15516 return {8, 8, 4, 4, MMA_VARIANTS(m32n8k16, f16_f16)};
15517 case NVPTX::BI__hmma_m32n8k16_mma_f32f16:
15518 return {8, 8, 4, 8, MMA_VARIANTS(m32n8k16, f32_f16)};
15519 case NVPTX::BI__hmma_m32n8k16_mma_f16f32:
15520 return {8, 8, 8, 4, MMA_VARIANTS(m32n8k16, f16_f32)};
15521 case NVPTX::BI__hmma_m32n8k16_mma_f32f32:
15522 return {8, 8, 8, 8, MMA_VARIANTS(m32n8k16, f32_f32)};
15523 case NVPTX::BI__hmma_m8n32k16_mma_f16f16:
15524 return {8, 8, 4, 4, MMA_VARIANTS(m8n32k16, f16_f16)};
15525 case NVPTX::BI__hmma_m8n32k16_mma_f32f16:
15526 return {8, 8, 4, 8, MMA_VARIANTS(m8n32k16, f32_f16)};
15527 case NVPTX::BI__hmma_m8n32k16_mma_f16f32:
15528 return {8, 8, 8, 4, MMA_VARIANTS(m8n32k16, f16_f32)};
15529 case NVPTX::BI__hmma_m8n32k16_mma_f32f32:
15530 return {8, 8, 8, 8, MMA_VARIANTS(m8n32k16, f32_f32)};
15533 case NVPTX::BI__imma_m16n16k16_mma_s8:
15534 return {2, 2, 8, 8, MMA_VARIANTS(m16n16k16, s8)};
15535 case NVPTX::BI__imma_m16n16k16_mma_u8:
15536 return {2, 2, 8, 8, MMA_VARIANTS(m16n16k16, u8)};
15537 case NVPTX::BI__imma_m32n8k16_mma_s8:
15538 return {4, 1, 8, 8, MMA_VARIANTS(m32n8k16, s8)};
15539 case NVPTX::BI__imma_m32n8k16_mma_u8:
15540 return {4, 1, 8, 8, MMA_VARIANTS(m32n8k16, u8)};
15541 case NVPTX::BI__imma_m8n32k16_mma_s8:
15542 return {1, 4, 8, 8, MMA_VARIANTS(m8n32k16, s8)};
15543 case NVPTX::BI__imma_m8n32k16_mma_u8:
15544 return {1, 4, 8, 8, MMA_VARIANTS(m8n32k16, u8)};
15547 case NVPTX::BI__imma_m8n8k32_mma_s4:
15548 return {1, 1, 2, 2, MMA_VARIANTS_I4(m8n8k32, s4)};
15549 case NVPTX::BI__imma_m8n8k32_mma_u4:
15550 return {1, 1, 2, 2, MMA_VARIANTS_I4(m8n8k32, u4)};
15551 case NVPTX::BI__bmma_m8n8k128_mma_xor_popc_b1:
15552 return {1, 1, 2, 2, MMA_VARIANTS_B1(m8n8k128, b1)};
15554 llvm_unreachable("Unexpected builtin ID.");
15556 #undef MMA_VARIANTS
15557 #undef MMA_VARIANTS_I4
15558 #undef MMA_VARIANTS_B1
15564 CodeGenFunction::EmitNVPTXBuiltinExpr(unsigned BuiltinID, const CallExpr *E) {
15565 auto MakeLdg = [&](unsigned IntrinsicID) {
15566 Value *Ptr = EmitScalarExpr(E->getArg(0));
15567 clang::CharUnits Align =
15568 CGM.getNaturalPointeeTypeAlignment(E->getArg(0)->getType());
15569 return Builder.CreateCall(
15570 CGM.getIntrinsic(IntrinsicID, {Ptr->getType()->getPointerElementType(),
15572 {Ptr, ConstantInt::get(Builder.getInt32Ty(), Align.getQuantity())});
15574 auto MakeScopedAtomic = [&](unsigned IntrinsicID) {
15575 Value *Ptr = EmitScalarExpr(E->getArg(0));
15576 return Builder.CreateCall(
15577 CGM.getIntrinsic(IntrinsicID, {Ptr->getType()->getPointerElementType(),
15579 {Ptr, EmitScalarExpr(E->getArg(1))});
15581 switch (BuiltinID) {
15582 case NVPTX::BI__nvvm_atom_add_gen_i:
15583 case NVPTX::BI__nvvm_atom_add_gen_l:
15584 case NVPTX::BI__nvvm_atom_add_gen_ll:
15585 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Add, E);
15587 case NVPTX::BI__nvvm_atom_sub_gen_i:
15588 case NVPTX::BI__nvvm_atom_sub_gen_l:
15589 case NVPTX::BI__nvvm_atom_sub_gen_ll:
15590 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Sub, E);
15592 case NVPTX::BI__nvvm_atom_and_gen_i:
15593 case NVPTX::BI__nvvm_atom_and_gen_l:
15594 case NVPTX::BI__nvvm_atom_and_gen_ll:
15595 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::And, E);
15597 case NVPTX::BI__nvvm_atom_or_gen_i:
15598 case NVPTX::BI__nvvm_atom_or_gen_l:
15599 case NVPTX::BI__nvvm_atom_or_gen_ll:
15600 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Or, E);
15602 case NVPTX::BI__nvvm_atom_xor_gen_i:
15603 case NVPTX::BI__nvvm_atom_xor_gen_l:
15604 case NVPTX::BI__nvvm_atom_xor_gen_ll:
15605 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Xor, E);
15607 case NVPTX::BI__nvvm_atom_xchg_gen_i:
15608 case NVPTX::BI__nvvm_atom_xchg_gen_l:
15609 case NVPTX::BI__nvvm_atom_xchg_gen_ll:
15610 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Xchg, E);
15612 case NVPTX::BI__nvvm_atom_max_gen_i:
15613 case NVPTX::BI__nvvm_atom_max_gen_l:
15614 case NVPTX::BI__nvvm_atom_max_gen_ll:
15615 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Max, E);
15617 case NVPTX::BI__nvvm_atom_max_gen_ui:
15618 case NVPTX::BI__nvvm_atom_max_gen_ul:
15619 case NVPTX::BI__nvvm_atom_max_gen_ull:
15620 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::UMax, E);
15622 case NVPTX::BI__nvvm_atom_min_gen_i:
15623 case NVPTX::BI__nvvm_atom_min_gen_l:
15624 case NVPTX::BI__nvvm_atom_min_gen_ll:
15625 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Min, E);
15627 case NVPTX::BI__nvvm_atom_min_gen_ui:
15628 case NVPTX::BI__nvvm_atom_min_gen_ul:
15629 case NVPTX::BI__nvvm_atom_min_gen_ull:
15630 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::UMin, E);
15632 case NVPTX::BI__nvvm_atom_cas_gen_i:
15633 case NVPTX::BI__nvvm_atom_cas_gen_l:
15634 case NVPTX::BI__nvvm_atom_cas_gen_ll:
15635 // __nvvm_atom_cas_gen_* should return the old value rather than the
15637 return MakeAtomicCmpXchgValue(*this, E, /*ReturnBool=*/false);
15639 case NVPTX::BI__nvvm_atom_add_gen_f:
15640 case NVPTX::BI__nvvm_atom_add_gen_d: {
15641 Value *Ptr = EmitScalarExpr(E->getArg(0));
15642 Value *Val = EmitScalarExpr(E->getArg(1));
15643 return Builder.CreateAtomicRMW(llvm::AtomicRMWInst::FAdd, Ptr, Val,
15644 AtomicOrdering::SequentiallyConsistent);
15647 case NVPTX::BI__nvvm_atom_inc_gen_ui: {
15648 Value *Ptr = EmitScalarExpr(E->getArg(0));
15649 Value *Val = EmitScalarExpr(E->getArg(1));
15650 Function *FnALI32 =
15651 CGM.getIntrinsic(Intrinsic::nvvm_atomic_load_inc_32, Ptr->getType());
15652 return Builder.CreateCall(FnALI32, {Ptr, Val});
15655 case NVPTX::BI__nvvm_atom_dec_gen_ui: {
15656 Value *Ptr = EmitScalarExpr(E->getArg(0));
15657 Value *Val = EmitScalarExpr(E->getArg(1));
15658 Function *FnALD32 =
15659 CGM.getIntrinsic(Intrinsic::nvvm_atomic_load_dec_32, Ptr->getType());
15660 return Builder.CreateCall(FnALD32, {Ptr, Val});
15663 case NVPTX::BI__nvvm_ldg_c:
15664 case NVPTX::BI__nvvm_ldg_c2:
15665 case NVPTX::BI__nvvm_ldg_c4:
15666 case NVPTX::BI__nvvm_ldg_s:
15667 case NVPTX::BI__nvvm_ldg_s2:
15668 case NVPTX::BI__nvvm_ldg_s4:
15669 case NVPTX::BI__nvvm_ldg_i:
15670 case NVPTX::BI__nvvm_ldg_i2:
15671 case NVPTX::BI__nvvm_ldg_i4:
15672 case NVPTX::BI__nvvm_ldg_l:
15673 case NVPTX::BI__nvvm_ldg_ll:
15674 case NVPTX::BI__nvvm_ldg_ll2:
15675 case NVPTX::BI__nvvm_ldg_uc:
15676 case NVPTX::BI__nvvm_ldg_uc2:
15677 case NVPTX::BI__nvvm_ldg_uc4:
15678 case NVPTX::BI__nvvm_ldg_us:
15679 case NVPTX::BI__nvvm_ldg_us2:
15680 case NVPTX::BI__nvvm_ldg_us4:
15681 case NVPTX::BI__nvvm_ldg_ui:
15682 case NVPTX::BI__nvvm_ldg_ui2:
15683 case NVPTX::BI__nvvm_ldg_ui4:
15684 case NVPTX::BI__nvvm_ldg_ul:
15685 case NVPTX::BI__nvvm_ldg_ull:
15686 case NVPTX::BI__nvvm_ldg_ull2:
15687 // PTX Interoperability section 2.2: "For a vector with an even number of
15688 // elements, its alignment is set to number of elements times the alignment
15689 // of its member: n*alignof(t)."
15690 return MakeLdg(Intrinsic::nvvm_ldg_global_i);
15691 case NVPTX::BI__nvvm_ldg_f:
15692 case NVPTX::BI__nvvm_ldg_f2:
15693 case NVPTX::BI__nvvm_ldg_f4:
15694 case NVPTX::BI__nvvm_ldg_d:
15695 case NVPTX::BI__nvvm_ldg_d2:
15696 return MakeLdg(Intrinsic::nvvm_ldg_global_f);
15698 case NVPTX::BI__nvvm_atom_cta_add_gen_i:
15699 case NVPTX::BI__nvvm_atom_cta_add_gen_l:
15700 case NVPTX::BI__nvvm_atom_cta_add_gen_ll:
15701 return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_gen_i_cta);
15702 case NVPTX::BI__nvvm_atom_sys_add_gen_i:
15703 case NVPTX::BI__nvvm_atom_sys_add_gen_l:
15704 case NVPTX::BI__nvvm_atom_sys_add_gen_ll:
15705 return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_gen_i_sys);
15706 case NVPTX::BI__nvvm_atom_cta_add_gen_f:
15707 case NVPTX::BI__nvvm_atom_cta_add_gen_d:
15708 return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_gen_f_cta);
15709 case NVPTX::BI__nvvm_atom_sys_add_gen_f:
15710 case NVPTX::BI__nvvm_atom_sys_add_gen_d:
15711 return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_gen_f_sys);
15712 case NVPTX::BI__nvvm_atom_cta_xchg_gen_i:
15713 case NVPTX::BI__nvvm_atom_cta_xchg_gen_l:
15714 case NVPTX::BI__nvvm_atom_cta_xchg_gen_ll:
15715 return MakeScopedAtomic(Intrinsic::nvvm_atomic_exch_gen_i_cta);
15716 case NVPTX::BI__nvvm_atom_sys_xchg_gen_i:
15717 case NVPTX::BI__nvvm_atom_sys_xchg_gen_l:
15718 case NVPTX::BI__nvvm_atom_sys_xchg_gen_ll:
15719 return MakeScopedAtomic(Intrinsic::nvvm_atomic_exch_gen_i_sys);
15720 case NVPTX::BI__nvvm_atom_cta_max_gen_i:
15721 case NVPTX::BI__nvvm_atom_cta_max_gen_ui:
15722 case NVPTX::BI__nvvm_atom_cta_max_gen_l:
15723 case NVPTX::BI__nvvm_atom_cta_max_gen_ul:
15724 case NVPTX::BI__nvvm_atom_cta_max_gen_ll:
15725 case NVPTX::BI__nvvm_atom_cta_max_gen_ull:
15726 return MakeScopedAtomic(Intrinsic::nvvm_atomic_max_gen_i_cta);
15727 case NVPTX::BI__nvvm_atom_sys_max_gen_i:
15728 case NVPTX::BI__nvvm_atom_sys_max_gen_ui:
15729 case NVPTX::BI__nvvm_atom_sys_max_gen_l:
15730 case NVPTX::BI__nvvm_atom_sys_max_gen_ul:
15731 case NVPTX::BI__nvvm_atom_sys_max_gen_ll:
15732 case NVPTX::BI__nvvm_atom_sys_max_gen_ull:
15733 return MakeScopedAtomic(Intrinsic::nvvm_atomic_max_gen_i_sys);
15734 case NVPTX::BI__nvvm_atom_cta_min_gen_i:
15735 case NVPTX::BI__nvvm_atom_cta_min_gen_ui:
15736 case NVPTX::BI__nvvm_atom_cta_min_gen_l:
15737 case NVPTX::BI__nvvm_atom_cta_min_gen_ul:
15738 case NVPTX::BI__nvvm_atom_cta_min_gen_ll:
15739 case NVPTX::BI__nvvm_atom_cta_min_gen_ull:
15740 return MakeScopedAtomic(Intrinsic::nvvm_atomic_min_gen_i_cta);
15741 case NVPTX::BI__nvvm_atom_sys_min_gen_i:
15742 case NVPTX::BI__nvvm_atom_sys_min_gen_ui:
15743 case NVPTX::BI__nvvm_atom_sys_min_gen_l:
15744 case NVPTX::BI__nvvm_atom_sys_min_gen_ul:
15745 case NVPTX::BI__nvvm_atom_sys_min_gen_ll:
15746 case NVPTX::BI__nvvm_atom_sys_min_gen_ull:
15747 return MakeScopedAtomic(Intrinsic::nvvm_atomic_min_gen_i_sys);
15748 case NVPTX::BI__nvvm_atom_cta_inc_gen_ui:
15749 return MakeScopedAtomic(Intrinsic::nvvm_atomic_inc_gen_i_cta);
15750 case NVPTX::BI__nvvm_atom_cta_dec_gen_ui:
15751 return MakeScopedAtomic(Intrinsic::nvvm_atomic_dec_gen_i_cta);
15752 case NVPTX::BI__nvvm_atom_sys_inc_gen_ui:
15753 return MakeScopedAtomic(Intrinsic::nvvm_atomic_inc_gen_i_sys);
15754 case NVPTX::BI__nvvm_atom_sys_dec_gen_ui:
15755 return MakeScopedAtomic(Intrinsic::nvvm_atomic_dec_gen_i_sys);
15756 case NVPTX::BI__nvvm_atom_cta_and_gen_i:
15757 case NVPTX::BI__nvvm_atom_cta_and_gen_l:
15758 case NVPTX::BI__nvvm_atom_cta_and_gen_ll:
15759 return MakeScopedAtomic(Intrinsic::nvvm_atomic_and_gen_i_cta);
15760 case NVPTX::BI__nvvm_atom_sys_and_gen_i:
15761 case NVPTX::BI__nvvm_atom_sys_and_gen_l:
15762 case NVPTX::BI__nvvm_atom_sys_and_gen_ll:
15763 return MakeScopedAtomic(Intrinsic::nvvm_atomic_and_gen_i_sys);
15764 case NVPTX::BI__nvvm_atom_cta_or_gen_i:
15765 case NVPTX::BI__nvvm_atom_cta_or_gen_l:
15766 case NVPTX::BI__nvvm_atom_cta_or_gen_ll:
15767 return MakeScopedAtomic(Intrinsic::nvvm_atomic_or_gen_i_cta);
15768 case NVPTX::BI__nvvm_atom_sys_or_gen_i:
15769 case NVPTX::BI__nvvm_atom_sys_or_gen_l:
15770 case NVPTX::BI__nvvm_atom_sys_or_gen_ll:
15771 return MakeScopedAtomic(Intrinsic::nvvm_atomic_or_gen_i_sys);
15772 case NVPTX::BI__nvvm_atom_cta_xor_gen_i:
15773 case NVPTX::BI__nvvm_atom_cta_xor_gen_l:
15774 case NVPTX::BI__nvvm_atom_cta_xor_gen_ll:
15775 return MakeScopedAtomic(Intrinsic::nvvm_atomic_xor_gen_i_cta);
15776 case NVPTX::BI__nvvm_atom_sys_xor_gen_i:
15777 case NVPTX::BI__nvvm_atom_sys_xor_gen_l:
15778 case NVPTX::BI__nvvm_atom_sys_xor_gen_ll:
15779 return MakeScopedAtomic(Intrinsic::nvvm_atomic_xor_gen_i_sys);
15780 case NVPTX::BI__nvvm_atom_cta_cas_gen_i:
15781 case NVPTX::BI__nvvm_atom_cta_cas_gen_l:
15782 case NVPTX::BI__nvvm_atom_cta_cas_gen_ll: {
15783 Value *Ptr = EmitScalarExpr(E->getArg(0));
15784 return Builder.CreateCall(
15786 Intrinsic::nvvm_atomic_cas_gen_i_cta,
15787 {Ptr->getType()->getPointerElementType(), Ptr->getType()}),
15788 {Ptr, EmitScalarExpr(E->getArg(1)), EmitScalarExpr(E->getArg(2))});
15790 case NVPTX::BI__nvvm_atom_sys_cas_gen_i:
15791 case NVPTX::BI__nvvm_atom_sys_cas_gen_l:
15792 case NVPTX::BI__nvvm_atom_sys_cas_gen_ll: {
15793 Value *Ptr = EmitScalarExpr(E->getArg(0));
15794 return Builder.CreateCall(
15796 Intrinsic::nvvm_atomic_cas_gen_i_sys,
15797 {Ptr->getType()->getPointerElementType(), Ptr->getType()}),
15798 {Ptr, EmitScalarExpr(E->getArg(1)), EmitScalarExpr(E->getArg(2))});
15800 case NVPTX::BI__nvvm_match_all_sync_i32p:
15801 case NVPTX::BI__nvvm_match_all_sync_i64p: {
15802 Value *Mask = EmitScalarExpr(E->getArg(0));
15803 Value *Val = EmitScalarExpr(E->getArg(1));
15804 Address PredOutPtr = EmitPointerWithAlignment(E->getArg(2));
15805 Value *ResultPair = Builder.CreateCall(
15806 CGM.getIntrinsic(BuiltinID == NVPTX::BI__nvvm_match_all_sync_i32p
15807 ? Intrinsic::nvvm_match_all_sync_i32p
15808 : Intrinsic::nvvm_match_all_sync_i64p),
15810 Value *Pred = Builder.CreateZExt(Builder.CreateExtractValue(ResultPair, 1),
15811 PredOutPtr.getElementType());
15812 Builder.CreateStore(Pred, PredOutPtr);
15813 return Builder.CreateExtractValue(ResultPair, 0);
15817 case NVPTX::BI__hmma_m16n16k16_ld_a:
15818 case NVPTX::BI__hmma_m16n16k16_ld_b:
15819 case NVPTX::BI__hmma_m16n16k16_ld_c_f16:
15820 case NVPTX::BI__hmma_m16n16k16_ld_c_f32:
15821 case NVPTX::BI__hmma_m32n8k16_ld_a:
15822 case NVPTX::BI__hmma_m32n8k16_ld_b:
15823 case NVPTX::BI__hmma_m32n8k16_ld_c_f16:
15824 case NVPTX::BI__hmma_m32n8k16_ld_c_f32:
15825 case NVPTX::BI__hmma_m8n32k16_ld_a:
15826 case NVPTX::BI__hmma_m8n32k16_ld_b:
15827 case NVPTX::BI__hmma_m8n32k16_ld_c_f16:
15828 case NVPTX::BI__hmma_m8n32k16_ld_c_f32:
15829 // Integer MMA loads.
15830 case NVPTX::BI__imma_m16n16k16_ld_a_s8:
15831 case NVPTX::BI__imma_m16n16k16_ld_a_u8:
15832 case NVPTX::BI__imma_m16n16k16_ld_b_s8:
15833 case NVPTX::BI__imma_m16n16k16_ld_b_u8:
15834 case NVPTX::BI__imma_m16n16k16_ld_c:
15835 case NVPTX::BI__imma_m32n8k16_ld_a_s8:
15836 case NVPTX::BI__imma_m32n8k16_ld_a_u8:
15837 case NVPTX::BI__imma_m32n8k16_ld_b_s8:
15838 case NVPTX::BI__imma_m32n8k16_ld_b_u8:
15839 case NVPTX::BI__imma_m32n8k16_ld_c:
15840 case NVPTX::BI__imma_m8n32k16_ld_a_s8:
15841 case NVPTX::BI__imma_m8n32k16_ld_a_u8:
15842 case NVPTX::BI__imma_m8n32k16_ld_b_s8:
15843 case NVPTX::BI__imma_m8n32k16_ld_b_u8:
15844 case NVPTX::BI__imma_m8n32k16_ld_c:
15845 // Sub-integer MMA loads.
15846 case NVPTX::BI__imma_m8n8k32_ld_a_s4:
15847 case NVPTX::BI__imma_m8n8k32_ld_a_u4:
15848 case NVPTX::BI__imma_m8n8k32_ld_b_s4:
15849 case NVPTX::BI__imma_m8n8k32_ld_b_u4:
15850 case NVPTX::BI__imma_m8n8k32_ld_c:
15851 case NVPTX::BI__bmma_m8n8k128_ld_a_b1:
15852 case NVPTX::BI__bmma_m8n8k128_ld_b_b1:
15853 case NVPTX::BI__bmma_m8n8k128_ld_c:
15855 Address Dst = EmitPointerWithAlignment(E->getArg(0));
15856 Value *Src = EmitScalarExpr(E->getArg(1));
15857 Value *Ldm = EmitScalarExpr(E->getArg(2));
15858 llvm::APSInt isColMajorArg;
15859 if (!E->getArg(3)->isIntegerConstantExpr(isColMajorArg, getContext()))
15861 bool isColMajor = isColMajorArg.getSExtValue();
15862 NVPTXMmaLdstInfo II = getNVPTXMmaLdstInfo(BuiltinID);
15863 unsigned IID = isColMajor ? II.IID_col : II.IID_row;
15868 Builder.CreateCall(CGM.getIntrinsic(IID, Src->getType()), {Src, Ldm});
15870 // Save returned values.
15871 assert(II.NumResults);
15872 if (II.NumResults == 1) {
15873 Builder.CreateAlignedStore(Result, Dst.getPointer(),
15874 CharUnits::fromQuantity(4));
15876 for (unsigned i = 0; i < II.NumResults; ++i) {
15877 Builder.CreateAlignedStore(
15878 Builder.CreateBitCast(Builder.CreateExtractValue(Result, i),
15879 Dst.getElementType()),
15880 Builder.CreateGEP(Dst.getPointer(),
15881 llvm::ConstantInt::get(IntTy, i)),
15882 CharUnits::fromQuantity(4));
15888 case NVPTX::BI__hmma_m16n16k16_st_c_f16:
15889 case NVPTX::BI__hmma_m16n16k16_st_c_f32:
15890 case NVPTX::BI__hmma_m32n8k16_st_c_f16:
15891 case NVPTX::BI__hmma_m32n8k16_st_c_f32:
15892 case NVPTX::BI__hmma_m8n32k16_st_c_f16:
15893 case NVPTX::BI__hmma_m8n32k16_st_c_f32:
15894 case NVPTX::BI__imma_m16n16k16_st_c_i32:
15895 case NVPTX::BI__imma_m32n8k16_st_c_i32:
15896 case NVPTX::BI__imma_m8n32k16_st_c_i32:
15897 case NVPTX::BI__imma_m8n8k32_st_c_i32:
15898 case NVPTX::BI__bmma_m8n8k128_st_c_i32: {
15899 Value *Dst = EmitScalarExpr(E->getArg(0));
15900 Address Src = EmitPointerWithAlignment(E->getArg(1));
15901 Value *Ldm = EmitScalarExpr(E->getArg(2));
15902 llvm::APSInt isColMajorArg;
15903 if (!E->getArg(3)->isIntegerConstantExpr(isColMajorArg, getContext()))
15905 bool isColMajor = isColMajorArg.getSExtValue();
15906 NVPTXMmaLdstInfo II = getNVPTXMmaLdstInfo(BuiltinID);
15907 unsigned IID = isColMajor ? II.IID_col : II.IID_row;
15910 Function *Intrinsic =
15911 CGM.getIntrinsic(IID, Dst->getType());
15912 llvm::Type *ParamType = Intrinsic->getFunctionType()->getParamType(1);
15913 SmallVector<Value *, 10> Values = {Dst};
15914 for (unsigned i = 0; i < II.NumResults; ++i) {
15915 Value *V = Builder.CreateAlignedLoad(
15916 Builder.CreateGEP(Src.getPointer(), llvm::ConstantInt::get(IntTy, i)),
15917 CharUnits::fromQuantity(4));
15918 Values.push_back(Builder.CreateBitCast(V, ParamType));
15920 Values.push_back(Ldm);
15921 Value *Result = Builder.CreateCall(Intrinsic, Values);
15925 // BI__hmma_m16n16k16_mma_<Dtype><CType>(d, a, b, c, layout, satf) -->
15926 // Intrinsic::nvvm_wmma_m16n16k16_mma_sync<layout A,B><DType><CType><Satf>
15927 case NVPTX::BI__hmma_m16n16k16_mma_f16f16:
15928 case NVPTX::BI__hmma_m16n16k16_mma_f32f16:
15929 case NVPTX::BI__hmma_m16n16k16_mma_f32f32:
15930 case NVPTX::BI__hmma_m16n16k16_mma_f16f32:
15931 case NVPTX::BI__hmma_m32n8k16_mma_f16f16:
15932 case NVPTX::BI__hmma_m32n8k16_mma_f32f16:
15933 case NVPTX::BI__hmma_m32n8k16_mma_f32f32:
15934 case NVPTX::BI__hmma_m32n8k16_mma_f16f32:
15935 case NVPTX::BI__hmma_m8n32k16_mma_f16f16:
15936 case NVPTX::BI__hmma_m8n32k16_mma_f32f16:
15937 case NVPTX::BI__hmma_m8n32k16_mma_f32f32:
15938 case NVPTX::BI__hmma_m8n32k16_mma_f16f32:
15939 case NVPTX::BI__imma_m16n16k16_mma_s8:
15940 case NVPTX::BI__imma_m16n16k16_mma_u8:
15941 case NVPTX::BI__imma_m32n8k16_mma_s8:
15942 case NVPTX::BI__imma_m32n8k16_mma_u8:
15943 case NVPTX::BI__imma_m8n32k16_mma_s8:
15944 case NVPTX::BI__imma_m8n32k16_mma_u8:
15945 case NVPTX::BI__imma_m8n8k32_mma_s4:
15946 case NVPTX::BI__imma_m8n8k32_mma_u4:
15947 case NVPTX::BI__bmma_m8n8k128_mma_xor_popc_b1: {
15948 Address Dst = EmitPointerWithAlignment(E->getArg(0));
15949 Address SrcA = EmitPointerWithAlignment(E->getArg(1));
15950 Address SrcB = EmitPointerWithAlignment(E->getArg(2));
15951 Address SrcC = EmitPointerWithAlignment(E->getArg(3));
15952 llvm::APSInt LayoutArg;
15953 if (!E->getArg(4)->isIntegerConstantExpr(LayoutArg, getContext()))
15955 int Layout = LayoutArg.getSExtValue();
15956 if (Layout < 0 || Layout > 3)
15958 llvm::APSInt SatfArg;
15959 if (BuiltinID == NVPTX::BI__bmma_m8n8k128_mma_xor_popc_b1)
15960 SatfArg = 0; // .b1 does not have satf argument.
15961 else if (!E->getArg(5)->isIntegerConstantExpr(SatfArg, getContext()))
15963 bool Satf = SatfArg.getSExtValue();
15964 NVPTXMmaInfo MI = getNVPTXMmaInfo(BuiltinID);
15965 unsigned IID = MI.getMMAIntrinsic(Layout, Satf);
15966 if (IID == 0) // Unsupported combination of Layout/Satf.
15969 SmallVector<Value *, 24> Values;
15970 Function *Intrinsic = CGM.getIntrinsic(IID);
15971 llvm::Type *AType = Intrinsic->getFunctionType()->getParamType(0);
15973 for (unsigned i = 0; i < MI.NumEltsA; ++i) {
15974 Value *V = Builder.CreateAlignedLoad(
15975 Builder.CreateGEP(SrcA.getPointer(),
15976 llvm::ConstantInt::get(IntTy, i)),
15977 CharUnits::fromQuantity(4));
15978 Values.push_back(Builder.CreateBitCast(V, AType));
15981 llvm::Type *BType = Intrinsic->getFunctionType()->getParamType(MI.NumEltsA);
15982 for (unsigned i = 0; i < MI.NumEltsB; ++i) {
15983 Value *V = Builder.CreateAlignedLoad(
15984 Builder.CreateGEP(SrcB.getPointer(),
15985 llvm::ConstantInt::get(IntTy, i)),
15986 CharUnits::fromQuantity(4));
15987 Values.push_back(Builder.CreateBitCast(V, BType));
15990 llvm::Type *CType =
15991 Intrinsic->getFunctionType()->getParamType(MI.NumEltsA + MI.NumEltsB);
15992 for (unsigned i = 0; i < MI.NumEltsC; ++i) {
15993 Value *V = Builder.CreateAlignedLoad(
15994 Builder.CreateGEP(SrcC.getPointer(),
15995 llvm::ConstantInt::get(IntTy, i)),
15996 CharUnits::fromQuantity(4));
15997 Values.push_back(Builder.CreateBitCast(V, CType));
15999 Value *Result = Builder.CreateCall(Intrinsic, Values);
16000 llvm::Type *DType = Dst.getElementType();
16001 for (unsigned i = 0; i < MI.NumEltsD; ++i)
16002 Builder.CreateAlignedStore(
16003 Builder.CreateBitCast(Builder.CreateExtractValue(Result, i), DType),
16004 Builder.CreateGEP(Dst.getPointer(), llvm::ConstantInt::get(IntTy, i)),
16005 CharUnits::fromQuantity(4));
16014 struct BuiltinAlignArgs {
16015 llvm::Value *Src = nullptr;
16016 llvm::Type *SrcType = nullptr;
16017 llvm::Value *Alignment = nullptr;
16018 llvm::Value *Mask = nullptr;
16019 llvm::IntegerType *IntType = nullptr;
16021 BuiltinAlignArgs(const CallExpr *E, CodeGenFunction &CGF) {
16022 QualType AstType = E->getArg(0)->getType();
16023 if (AstType->isArrayType())
16024 Src = CGF.EmitArrayToPointerDecay(E->getArg(0)).getPointer();
16026 Src = CGF.EmitScalarExpr(E->getArg(0));
16027 SrcType = Src->getType();
16028 if (SrcType->isPointerTy()) {
16029 IntType = IntegerType::get(
16030 CGF.getLLVMContext(),
16031 CGF.CGM.getDataLayout().getIndexTypeSizeInBits(SrcType));
16033 assert(SrcType->isIntegerTy());
16034 IntType = cast<llvm::IntegerType>(SrcType);
16036 Alignment = CGF.EmitScalarExpr(E->getArg(1));
16037 Alignment = CGF.Builder.CreateZExtOrTrunc(Alignment, IntType, "alignment");
16038 auto *One = llvm::ConstantInt::get(IntType, 1);
16039 Mask = CGF.Builder.CreateSub(Alignment, One, "mask");
16044 /// Generate (x & (y-1)) == 0.
16045 RValue CodeGenFunction::EmitBuiltinIsAligned(const CallExpr *E) {
16046 BuiltinAlignArgs Args(E, *this);
16047 llvm::Value *SrcAddress = Args.Src;
16048 if (Args.SrcType->isPointerTy())
16050 Builder.CreateBitOrPointerCast(Args.Src, Args.IntType, "src_addr");
16051 return RValue::get(Builder.CreateICmpEQ(
16052 Builder.CreateAnd(SrcAddress, Args.Mask, "set_bits"),
16053 llvm::Constant::getNullValue(Args.IntType), "is_aligned"));
16056 /// Generate (x & ~(y-1)) to align down or ((x+(y-1)) & ~(y-1)) to align up.
16057 /// Note: For pointer types we can avoid ptrtoint/inttoptr pairs by using the
16058 /// llvm.ptrmask instrinsic (with a GEP before in the align_up case).
16059 /// TODO: actually use ptrmask once most optimization passes know about it.
16060 RValue CodeGenFunction::EmitBuiltinAlignTo(const CallExpr *E, bool AlignUp) {
16061 BuiltinAlignArgs Args(E, *this);
16062 llvm::Value *SrcAddr = Args.Src;
16063 if (Args.Src->getType()->isPointerTy())
16064 SrcAddr = Builder.CreatePtrToInt(Args.Src, Args.IntType, "intptr");
16065 llvm::Value *SrcForMask = SrcAddr;
16067 // When aligning up we have to first add the mask to ensure we go over the
16068 // next alignment value and then align down to the next valid multiple.
16069 // By adding the mask, we ensure that align_up on an already aligned
16070 // value will not change the value.
16071 SrcForMask = Builder.CreateAdd(SrcForMask, Args.Mask, "over_boundary");
16073 // Invert the mask to only clear the lower bits.
16074 llvm::Value *InvertedMask = Builder.CreateNot(Args.Mask, "inverted_mask");
16075 llvm::Value *Result =
16076 Builder.CreateAnd(SrcForMask, InvertedMask, "aligned_result");
16077 if (Args.Src->getType()->isPointerTy()) {
16078 /// TODO: Use ptrmask instead of ptrtoint+gep once it is optimized well.
16079 // Result = Builder.CreateIntrinsic(
16080 // Intrinsic::ptrmask, {Args.SrcType, SrcForMask->getType(), Args.IntType},
16081 // {SrcForMask, NegatedMask}, nullptr, "aligned_result");
16082 Result->setName("aligned_intptr");
16083 llvm::Value *Difference = Builder.CreateSub(Result, SrcAddr, "diff");
16084 // The result must point to the same underlying allocation. This means we
16085 // can use an inbounds GEP to enable better optimization.
16086 Value *Base = EmitCastToVoidPtr(Args.Src);
16087 if (getLangOpts().isSignedOverflowDefined())
16088 Result = Builder.CreateGEP(Base, Difference, "aligned_result");
16090 Result = EmitCheckedInBoundsGEP(Base, Difference,
16091 /*SignedIndices=*/true,
16092 /*isSubtraction=*/!AlignUp,
16093 E->getExprLoc(), "aligned_result");
16094 Result = Builder.CreatePointerCast(Result, Args.SrcType);
16095 // Emit an alignment assumption to ensure that the new alignment is
16096 // propagated to loads/stores, etc.
16097 emitAlignmentAssumption(Result, E, E->getExprLoc(), Args.Alignment);
16099 assert(Result->getType() == Args.SrcType);
16100 return RValue::get(Result);
16103 Value *CodeGenFunction::EmitWebAssemblyBuiltinExpr(unsigned BuiltinID,
16104 const CallExpr *E) {
16105 switch (BuiltinID) {
16106 case WebAssembly::BI__builtin_wasm_memory_size: {
16107 llvm::Type *ResultType = ConvertType(E->getType());
16108 Value *I = EmitScalarExpr(E->getArg(0));
16109 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_memory_size, ResultType);
16110 return Builder.CreateCall(Callee, I);
16112 case WebAssembly::BI__builtin_wasm_memory_grow: {
16113 llvm::Type *ResultType = ConvertType(E->getType());
16115 EmitScalarExpr(E->getArg(0)),
16116 EmitScalarExpr(E->getArg(1))
16118 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_memory_grow, ResultType);
16119 return Builder.CreateCall(Callee, Args);
16121 case WebAssembly::BI__builtin_wasm_tls_size: {
16122 llvm::Type *ResultType = ConvertType(E->getType());
16123 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_tls_size, ResultType);
16124 return Builder.CreateCall(Callee);
16126 case WebAssembly::BI__builtin_wasm_tls_align: {
16127 llvm::Type *ResultType = ConvertType(E->getType());
16128 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_tls_align, ResultType);
16129 return Builder.CreateCall(Callee);
16131 case WebAssembly::BI__builtin_wasm_tls_base: {
16132 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_tls_base);
16133 return Builder.CreateCall(Callee);
16135 case WebAssembly::BI__builtin_wasm_throw: {
16136 Value *Tag = EmitScalarExpr(E->getArg(0));
16137 Value *Obj = EmitScalarExpr(E->getArg(1));
16138 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_throw);
16139 return Builder.CreateCall(Callee, {Tag, Obj});
16141 case WebAssembly::BI__builtin_wasm_rethrow_in_catch: {
16142 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_rethrow_in_catch);
16143 return Builder.CreateCall(Callee);
16145 case WebAssembly::BI__builtin_wasm_atomic_wait_i32: {
16146 Value *Addr = EmitScalarExpr(E->getArg(0));
16147 Value *Expected = EmitScalarExpr(E->getArg(1));
16148 Value *Timeout = EmitScalarExpr(E->getArg(2));
16149 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_atomic_wait_i32);
16150 return Builder.CreateCall(Callee, {Addr, Expected, Timeout});
16152 case WebAssembly::BI__builtin_wasm_atomic_wait_i64: {
16153 Value *Addr = EmitScalarExpr(E->getArg(0));
16154 Value *Expected = EmitScalarExpr(E->getArg(1));
16155 Value *Timeout = EmitScalarExpr(E->getArg(2));
16156 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_atomic_wait_i64);
16157 return Builder.CreateCall(Callee, {Addr, Expected, Timeout});
16159 case WebAssembly::BI__builtin_wasm_atomic_notify: {
16160 Value *Addr = EmitScalarExpr(E->getArg(0));
16161 Value *Count = EmitScalarExpr(E->getArg(1));
16162 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_atomic_notify);
16163 return Builder.CreateCall(Callee, {Addr, Count});
16165 case WebAssembly::BI__builtin_wasm_trunc_s_i32_f32:
16166 case WebAssembly::BI__builtin_wasm_trunc_s_i32_f64:
16167 case WebAssembly::BI__builtin_wasm_trunc_s_i64_f32:
16168 case WebAssembly::BI__builtin_wasm_trunc_s_i64_f64: {
16169 Value *Src = EmitScalarExpr(E->getArg(0));
16170 llvm::Type *ResT = ConvertType(E->getType());
16172 CGM.getIntrinsic(Intrinsic::wasm_trunc_signed, {ResT, Src->getType()});
16173 return Builder.CreateCall(Callee, {Src});
16175 case WebAssembly::BI__builtin_wasm_trunc_u_i32_f32:
16176 case WebAssembly::BI__builtin_wasm_trunc_u_i32_f64:
16177 case WebAssembly::BI__builtin_wasm_trunc_u_i64_f32:
16178 case WebAssembly::BI__builtin_wasm_trunc_u_i64_f64: {
16179 Value *Src = EmitScalarExpr(E->getArg(0));
16180 llvm::Type *ResT = ConvertType(E->getType());
16181 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_trunc_unsigned,
16182 {ResT, Src->getType()});
16183 return Builder.CreateCall(Callee, {Src});
16185 case WebAssembly::BI__builtin_wasm_trunc_saturate_s_i32_f32:
16186 case WebAssembly::BI__builtin_wasm_trunc_saturate_s_i32_f64:
16187 case WebAssembly::BI__builtin_wasm_trunc_saturate_s_i64_f32:
16188 case WebAssembly::BI__builtin_wasm_trunc_saturate_s_i64_f64:
16189 case WebAssembly::BI__builtin_wasm_trunc_saturate_s_i32x4_f32x4: {
16190 Value *Src = EmitScalarExpr(E->getArg(0));
16191 llvm::Type *ResT = ConvertType(E->getType());
16192 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_trunc_saturate_signed,
16193 {ResT, Src->getType()});
16194 return Builder.CreateCall(Callee, {Src});
16196 case WebAssembly::BI__builtin_wasm_trunc_saturate_u_i32_f32:
16197 case WebAssembly::BI__builtin_wasm_trunc_saturate_u_i32_f64:
16198 case WebAssembly::BI__builtin_wasm_trunc_saturate_u_i64_f32:
16199 case WebAssembly::BI__builtin_wasm_trunc_saturate_u_i64_f64:
16200 case WebAssembly::BI__builtin_wasm_trunc_saturate_u_i32x4_f32x4: {
16201 Value *Src = EmitScalarExpr(E->getArg(0));
16202 llvm::Type *ResT = ConvertType(E->getType());
16203 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_trunc_saturate_unsigned,
16204 {ResT, Src->getType()});
16205 return Builder.CreateCall(Callee, {Src});
16207 case WebAssembly::BI__builtin_wasm_min_f32:
16208 case WebAssembly::BI__builtin_wasm_min_f64:
16209 case WebAssembly::BI__builtin_wasm_min_f32x4:
16210 case WebAssembly::BI__builtin_wasm_min_f64x2: {
16211 Value *LHS = EmitScalarExpr(E->getArg(0));
16212 Value *RHS = EmitScalarExpr(E->getArg(1));
16213 Function *Callee = CGM.getIntrinsic(Intrinsic::minimum,
16214 ConvertType(E->getType()));
16215 return Builder.CreateCall(Callee, {LHS, RHS});
16217 case WebAssembly::BI__builtin_wasm_max_f32:
16218 case WebAssembly::BI__builtin_wasm_max_f64:
16219 case WebAssembly::BI__builtin_wasm_max_f32x4:
16220 case WebAssembly::BI__builtin_wasm_max_f64x2: {
16221 Value *LHS = EmitScalarExpr(E->getArg(0));
16222 Value *RHS = EmitScalarExpr(E->getArg(1));
16223 Function *Callee = CGM.getIntrinsic(Intrinsic::maximum,
16224 ConvertType(E->getType()));
16225 return Builder.CreateCall(Callee, {LHS, RHS});
16227 case WebAssembly::BI__builtin_wasm_pmin_f32x4:
16228 case WebAssembly::BI__builtin_wasm_pmin_f64x2: {
16229 Value *LHS = EmitScalarExpr(E->getArg(0));
16230 Value *RHS = EmitScalarExpr(E->getArg(1));
16232 CGM.getIntrinsic(Intrinsic::wasm_pmin, ConvertType(E->getType()));
16233 return Builder.CreateCall(Callee, {LHS, RHS});
16235 case WebAssembly::BI__builtin_wasm_pmax_f32x4:
16236 case WebAssembly::BI__builtin_wasm_pmax_f64x2: {
16237 Value *LHS = EmitScalarExpr(E->getArg(0));
16238 Value *RHS = EmitScalarExpr(E->getArg(1));
16240 CGM.getIntrinsic(Intrinsic::wasm_pmax, ConvertType(E->getType()));
16241 return Builder.CreateCall(Callee, {LHS, RHS});
16243 case WebAssembly::BI__builtin_wasm_ceil_f32x4:
16244 case WebAssembly::BI__builtin_wasm_floor_f32x4:
16245 case WebAssembly::BI__builtin_wasm_trunc_f32x4:
16246 case WebAssembly::BI__builtin_wasm_nearest_f32x4:
16247 case WebAssembly::BI__builtin_wasm_ceil_f64x2:
16248 case WebAssembly::BI__builtin_wasm_floor_f64x2:
16249 case WebAssembly::BI__builtin_wasm_trunc_f64x2:
16250 case WebAssembly::BI__builtin_wasm_nearest_f64x2: {
16252 switch (BuiltinID) {
16253 case WebAssembly::BI__builtin_wasm_ceil_f32x4:
16254 case WebAssembly::BI__builtin_wasm_ceil_f64x2:
16255 IntNo = Intrinsic::wasm_ceil;
16257 case WebAssembly::BI__builtin_wasm_floor_f32x4:
16258 case WebAssembly::BI__builtin_wasm_floor_f64x2:
16259 IntNo = Intrinsic::wasm_floor;
16261 case WebAssembly::BI__builtin_wasm_trunc_f32x4:
16262 case WebAssembly::BI__builtin_wasm_trunc_f64x2:
16263 IntNo = Intrinsic::wasm_trunc;
16265 case WebAssembly::BI__builtin_wasm_nearest_f32x4:
16266 case WebAssembly::BI__builtin_wasm_nearest_f64x2:
16267 IntNo = Intrinsic::wasm_nearest;
16270 llvm_unreachable("unexpected builtin ID");
16272 Value *Value = EmitScalarExpr(E->getArg(0));
16273 Function *Callee = CGM.getIntrinsic(IntNo, ConvertType(E->getType()));
16274 return Builder.CreateCall(Callee, Value);
16276 case WebAssembly::BI__builtin_wasm_swizzle_v8x16: {
16277 Value *Src = EmitScalarExpr(E->getArg(0));
16278 Value *Indices = EmitScalarExpr(E->getArg(1));
16279 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_swizzle);
16280 return Builder.CreateCall(Callee, {Src, Indices});
16282 case WebAssembly::BI__builtin_wasm_extract_lane_s_i8x16:
16283 case WebAssembly::BI__builtin_wasm_extract_lane_u_i8x16:
16284 case WebAssembly::BI__builtin_wasm_extract_lane_s_i16x8:
16285 case WebAssembly::BI__builtin_wasm_extract_lane_u_i16x8:
16286 case WebAssembly::BI__builtin_wasm_extract_lane_i32x4:
16287 case WebAssembly::BI__builtin_wasm_extract_lane_i64x2:
16288 case WebAssembly::BI__builtin_wasm_extract_lane_f32x4:
16289 case WebAssembly::BI__builtin_wasm_extract_lane_f64x2: {
16290 llvm::APSInt LaneConst;
16291 if (!E->getArg(1)->isIntegerConstantExpr(LaneConst, getContext()))
16292 llvm_unreachable("Constant arg isn't actually constant?");
16293 Value *Vec = EmitScalarExpr(E->getArg(0));
16294 Value *Lane = llvm::ConstantInt::get(getLLVMContext(), LaneConst);
16295 Value *Extract = Builder.CreateExtractElement(Vec, Lane);
16296 switch (BuiltinID) {
16297 case WebAssembly::BI__builtin_wasm_extract_lane_s_i8x16:
16298 case WebAssembly::BI__builtin_wasm_extract_lane_s_i16x8:
16299 return Builder.CreateSExt(Extract, ConvertType(E->getType()));
16300 case WebAssembly::BI__builtin_wasm_extract_lane_u_i8x16:
16301 case WebAssembly::BI__builtin_wasm_extract_lane_u_i16x8:
16302 return Builder.CreateZExt(Extract, ConvertType(E->getType()));
16303 case WebAssembly::BI__builtin_wasm_extract_lane_i32x4:
16304 case WebAssembly::BI__builtin_wasm_extract_lane_i64x2:
16305 case WebAssembly::BI__builtin_wasm_extract_lane_f32x4:
16306 case WebAssembly::BI__builtin_wasm_extract_lane_f64x2:
16309 llvm_unreachable("unexpected builtin ID");
16312 case WebAssembly::BI__builtin_wasm_replace_lane_i8x16:
16313 case WebAssembly::BI__builtin_wasm_replace_lane_i16x8:
16314 case WebAssembly::BI__builtin_wasm_replace_lane_i32x4:
16315 case WebAssembly::BI__builtin_wasm_replace_lane_i64x2:
16316 case WebAssembly::BI__builtin_wasm_replace_lane_f32x4:
16317 case WebAssembly::BI__builtin_wasm_replace_lane_f64x2: {
16318 llvm::APSInt LaneConst;
16319 if (!E->getArg(1)->isIntegerConstantExpr(LaneConst, getContext()))
16320 llvm_unreachable("Constant arg isn't actually constant?");
16321 Value *Vec = EmitScalarExpr(E->getArg(0));
16322 Value *Lane = llvm::ConstantInt::get(getLLVMContext(), LaneConst);
16323 Value *Val = EmitScalarExpr(E->getArg(2));
16324 switch (BuiltinID) {
16325 case WebAssembly::BI__builtin_wasm_replace_lane_i8x16:
16326 case WebAssembly::BI__builtin_wasm_replace_lane_i16x8: {
16327 llvm::Type *ElemType =
16328 cast<llvm::VectorType>(ConvertType(E->getType()))->getElementType();
16329 Value *Trunc = Builder.CreateTrunc(Val, ElemType);
16330 return Builder.CreateInsertElement(Vec, Trunc, Lane);
16332 case WebAssembly::BI__builtin_wasm_replace_lane_i32x4:
16333 case WebAssembly::BI__builtin_wasm_replace_lane_i64x2:
16334 case WebAssembly::BI__builtin_wasm_replace_lane_f32x4:
16335 case WebAssembly::BI__builtin_wasm_replace_lane_f64x2:
16336 return Builder.CreateInsertElement(Vec, Val, Lane);
16338 llvm_unreachable("unexpected builtin ID");
16341 case WebAssembly::BI__builtin_wasm_add_saturate_s_i8x16:
16342 case WebAssembly::BI__builtin_wasm_add_saturate_u_i8x16:
16343 case WebAssembly::BI__builtin_wasm_add_saturate_s_i16x8:
16344 case WebAssembly::BI__builtin_wasm_add_saturate_u_i16x8:
16345 case WebAssembly::BI__builtin_wasm_sub_saturate_s_i8x16:
16346 case WebAssembly::BI__builtin_wasm_sub_saturate_u_i8x16:
16347 case WebAssembly::BI__builtin_wasm_sub_saturate_s_i16x8:
16348 case WebAssembly::BI__builtin_wasm_sub_saturate_u_i16x8: {
16350 switch (BuiltinID) {
16351 case WebAssembly::BI__builtin_wasm_add_saturate_s_i8x16:
16352 case WebAssembly::BI__builtin_wasm_add_saturate_s_i16x8:
16353 IntNo = Intrinsic::sadd_sat;
16355 case WebAssembly::BI__builtin_wasm_add_saturate_u_i8x16:
16356 case WebAssembly::BI__builtin_wasm_add_saturate_u_i16x8:
16357 IntNo = Intrinsic::uadd_sat;
16359 case WebAssembly::BI__builtin_wasm_sub_saturate_s_i8x16:
16360 case WebAssembly::BI__builtin_wasm_sub_saturate_s_i16x8:
16361 IntNo = Intrinsic::wasm_sub_saturate_signed;
16363 case WebAssembly::BI__builtin_wasm_sub_saturate_u_i8x16:
16364 case WebAssembly::BI__builtin_wasm_sub_saturate_u_i16x8:
16365 IntNo = Intrinsic::wasm_sub_saturate_unsigned;
16368 llvm_unreachable("unexpected builtin ID");
16370 Value *LHS = EmitScalarExpr(E->getArg(0));
16371 Value *RHS = EmitScalarExpr(E->getArg(1));
16372 Function *Callee = CGM.getIntrinsic(IntNo, ConvertType(E->getType()));
16373 return Builder.CreateCall(Callee, {LHS, RHS});
16375 case WebAssembly::BI__builtin_wasm_abs_i8x16:
16376 case WebAssembly::BI__builtin_wasm_abs_i16x8:
16377 case WebAssembly::BI__builtin_wasm_abs_i32x4: {
16378 Value *Vec = EmitScalarExpr(E->getArg(0));
16379 Value *Neg = Builder.CreateNeg(Vec, "neg");
16380 Constant *Zero = llvm::Constant::getNullValue(Vec->getType());
16381 Value *ICmp = Builder.CreateICmpSLT(Vec, Zero, "abscond");
16382 return Builder.CreateSelect(ICmp, Neg, Vec, "abs");
16384 case WebAssembly::BI__builtin_wasm_min_s_i8x16:
16385 case WebAssembly::BI__builtin_wasm_min_u_i8x16:
16386 case WebAssembly::BI__builtin_wasm_max_s_i8x16:
16387 case WebAssembly::BI__builtin_wasm_max_u_i8x16:
16388 case WebAssembly::BI__builtin_wasm_min_s_i16x8:
16389 case WebAssembly::BI__builtin_wasm_min_u_i16x8:
16390 case WebAssembly::BI__builtin_wasm_max_s_i16x8:
16391 case WebAssembly::BI__builtin_wasm_max_u_i16x8:
16392 case WebAssembly::BI__builtin_wasm_min_s_i32x4:
16393 case WebAssembly::BI__builtin_wasm_min_u_i32x4:
16394 case WebAssembly::BI__builtin_wasm_max_s_i32x4:
16395 case WebAssembly::BI__builtin_wasm_max_u_i32x4: {
16396 Value *LHS = EmitScalarExpr(E->getArg(0));
16397 Value *RHS = EmitScalarExpr(E->getArg(1));
16399 switch (BuiltinID) {
16400 case WebAssembly::BI__builtin_wasm_min_s_i8x16:
16401 case WebAssembly::BI__builtin_wasm_min_s_i16x8:
16402 case WebAssembly::BI__builtin_wasm_min_s_i32x4:
16403 ICmp = Builder.CreateICmpSLT(LHS, RHS);
16405 case WebAssembly::BI__builtin_wasm_min_u_i8x16:
16406 case WebAssembly::BI__builtin_wasm_min_u_i16x8:
16407 case WebAssembly::BI__builtin_wasm_min_u_i32x4:
16408 ICmp = Builder.CreateICmpULT(LHS, RHS);
16410 case WebAssembly::BI__builtin_wasm_max_s_i8x16:
16411 case WebAssembly::BI__builtin_wasm_max_s_i16x8:
16412 case WebAssembly::BI__builtin_wasm_max_s_i32x4:
16413 ICmp = Builder.CreateICmpSGT(LHS, RHS);
16415 case WebAssembly::BI__builtin_wasm_max_u_i8x16:
16416 case WebAssembly::BI__builtin_wasm_max_u_i16x8:
16417 case WebAssembly::BI__builtin_wasm_max_u_i32x4:
16418 ICmp = Builder.CreateICmpUGT(LHS, RHS);
16421 llvm_unreachable("unexpected builtin ID");
16423 return Builder.CreateSelect(ICmp, LHS, RHS);
16425 case WebAssembly::BI__builtin_wasm_avgr_u_i8x16:
16426 case WebAssembly::BI__builtin_wasm_avgr_u_i16x8: {
16427 Value *LHS = EmitScalarExpr(E->getArg(0));
16428 Value *RHS = EmitScalarExpr(E->getArg(1));
16429 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_avgr_unsigned,
16430 ConvertType(E->getType()));
16431 return Builder.CreateCall(Callee, {LHS, RHS});
16433 case WebAssembly::BI__builtin_wasm_bitselect: {
16434 Value *V1 = EmitScalarExpr(E->getArg(0));
16435 Value *V2 = EmitScalarExpr(E->getArg(1));
16436 Value *C = EmitScalarExpr(E->getArg(2));
16437 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_bitselect,
16438 ConvertType(E->getType()));
16439 return Builder.CreateCall(Callee, {V1, V2, C});
16441 case WebAssembly::BI__builtin_wasm_dot_s_i32x4_i16x8: {
16442 Value *LHS = EmitScalarExpr(E->getArg(0));
16443 Value *RHS = EmitScalarExpr(E->getArg(1));
16444 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_dot);
16445 return Builder.CreateCall(Callee, {LHS, RHS});
16447 case WebAssembly::BI__builtin_wasm_any_true_i8x16:
16448 case WebAssembly::BI__builtin_wasm_any_true_i16x8:
16449 case WebAssembly::BI__builtin_wasm_any_true_i32x4:
16450 case WebAssembly::BI__builtin_wasm_any_true_i64x2:
16451 case WebAssembly::BI__builtin_wasm_all_true_i8x16:
16452 case WebAssembly::BI__builtin_wasm_all_true_i16x8:
16453 case WebAssembly::BI__builtin_wasm_all_true_i32x4:
16454 case WebAssembly::BI__builtin_wasm_all_true_i64x2: {
16456 switch (BuiltinID) {
16457 case WebAssembly::BI__builtin_wasm_any_true_i8x16:
16458 case WebAssembly::BI__builtin_wasm_any_true_i16x8:
16459 case WebAssembly::BI__builtin_wasm_any_true_i32x4:
16460 case WebAssembly::BI__builtin_wasm_any_true_i64x2:
16461 IntNo = Intrinsic::wasm_anytrue;
16463 case WebAssembly::BI__builtin_wasm_all_true_i8x16:
16464 case WebAssembly::BI__builtin_wasm_all_true_i16x8:
16465 case WebAssembly::BI__builtin_wasm_all_true_i32x4:
16466 case WebAssembly::BI__builtin_wasm_all_true_i64x2:
16467 IntNo = Intrinsic::wasm_alltrue;
16470 llvm_unreachable("unexpected builtin ID");
16472 Value *Vec = EmitScalarExpr(E->getArg(0));
16473 Function *Callee = CGM.getIntrinsic(IntNo, Vec->getType());
16474 return Builder.CreateCall(Callee, {Vec});
16476 case WebAssembly::BI__builtin_wasm_bitmask_i8x16:
16477 case WebAssembly::BI__builtin_wasm_bitmask_i16x8:
16478 case WebAssembly::BI__builtin_wasm_bitmask_i32x4: {
16479 Value *Vec = EmitScalarExpr(E->getArg(0));
16481 CGM.getIntrinsic(Intrinsic::wasm_bitmask, Vec->getType());
16482 return Builder.CreateCall(Callee, {Vec});
16484 case WebAssembly::BI__builtin_wasm_abs_f32x4:
16485 case WebAssembly::BI__builtin_wasm_abs_f64x2: {
16486 Value *Vec = EmitScalarExpr(E->getArg(0));
16487 Function *Callee = CGM.getIntrinsic(Intrinsic::fabs, Vec->getType());
16488 return Builder.CreateCall(Callee, {Vec});
16490 case WebAssembly::BI__builtin_wasm_sqrt_f32x4:
16491 case WebAssembly::BI__builtin_wasm_sqrt_f64x2: {
16492 Value *Vec = EmitScalarExpr(E->getArg(0));
16493 Function *Callee = CGM.getIntrinsic(Intrinsic::sqrt, Vec->getType());
16494 return Builder.CreateCall(Callee, {Vec});
16496 case WebAssembly::BI__builtin_wasm_qfma_f32x4:
16497 case WebAssembly::BI__builtin_wasm_qfms_f32x4:
16498 case WebAssembly::BI__builtin_wasm_qfma_f64x2:
16499 case WebAssembly::BI__builtin_wasm_qfms_f64x2: {
16500 Value *A = EmitScalarExpr(E->getArg(0));
16501 Value *B = EmitScalarExpr(E->getArg(1));
16502 Value *C = EmitScalarExpr(E->getArg(2));
16504 switch (BuiltinID) {
16505 case WebAssembly::BI__builtin_wasm_qfma_f32x4:
16506 case WebAssembly::BI__builtin_wasm_qfma_f64x2:
16507 IntNo = Intrinsic::wasm_qfma;
16509 case WebAssembly::BI__builtin_wasm_qfms_f32x4:
16510 case WebAssembly::BI__builtin_wasm_qfms_f64x2:
16511 IntNo = Intrinsic::wasm_qfms;
16514 llvm_unreachable("unexpected builtin ID");
16516 Function *Callee = CGM.getIntrinsic(IntNo, A->getType());
16517 return Builder.CreateCall(Callee, {A, B, C});
16519 case WebAssembly::BI__builtin_wasm_narrow_s_i8x16_i16x8:
16520 case WebAssembly::BI__builtin_wasm_narrow_u_i8x16_i16x8:
16521 case WebAssembly::BI__builtin_wasm_narrow_s_i16x8_i32x4:
16522 case WebAssembly::BI__builtin_wasm_narrow_u_i16x8_i32x4: {
16523 Value *Low = EmitScalarExpr(E->getArg(0));
16524 Value *High = EmitScalarExpr(E->getArg(1));
16526 switch (BuiltinID) {
16527 case WebAssembly::BI__builtin_wasm_narrow_s_i8x16_i16x8:
16528 case WebAssembly::BI__builtin_wasm_narrow_s_i16x8_i32x4:
16529 IntNo = Intrinsic::wasm_narrow_signed;
16531 case WebAssembly::BI__builtin_wasm_narrow_u_i8x16_i16x8:
16532 case WebAssembly::BI__builtin_wasm_narrow_u_i16x8_i32x4:
16533 IntNo = Intrinsic::wasm_narrow_unsigned;
16536 llvm_unreachable("unexpected builtin ID");
16539 CGM.getIntrinsic(IntNo, {ConvertType(E->getType()), Low->getType()});
16540 return Builder.CreateCall(Callee, {Low, High});
16542 case WebAssembly::BI__builtin_wasm_widen_low_s_i16x8_i8x16:
16543 case WebAssembly::BI__builtin_wasm_widen_high_s_i16x8_i8x16:
16544 case WebAssembly::BI__builtin_wasm_widen_low_u_i16x8_i8x16:
16545 case WebAssembly::BI__builtin_wasm_widen_high_u_i16x8_i8x16:
16546 case WebAssembly::BI__builtin_wasm_widen_low_s_i32x4_i16x8:
16547 case WebAssembly::BI__builtin_wasm_widen_high_s_i32x4_i16x8:
16548 case WebAssembly::BI__builtin_wasm_widen_low_u_i32x4_i16x8:
16549 case WebAssembly::BI__builtin_wasm_widen_high_u_i32x4_i16x8: {
16550 Value *Vec = EmitScalarExpr(E->getArg(0));
16552 switch (BuiltinID) {
16553 case WebAssembly::BI__builtin_wasm_widen_low_s_i16x8_i8x16:
16554 case WebAssembly::BI__builtin_wasm_widen_low_s_i32x4_i16x8:
16555 IntNo = Intrinsic::wasm_widen_low_signed;
16557 case WebAssembly::BI__builtin_wasm_widen_high_s_i16x8_i8x16:
16558 case WebAssembly::BI__builtin_wasm_widen_high_s_i32x4_i16x8:
16559 IntNo = Intrinsic::wasm_widen_high_signed;
16561 case WebAssembly::BI__builtin_wasm_widen_low_u_i16x8_i8x16:
16562 case WebAssembly::BI__builtin_wasm_widen_low_u_i32x4_i16x8:
16563 IntNo = Intrinsic::wasm_widen_low_unsigned;
16565 case WebAssembly::BI__builtin_wasm_widen_high_u_i16x8_i8x16:
16566 case WebAssembly::BI__builtin_wasm_widen_high_u_i32x4_i16x8:
16567 IntNo = Intrinsic::wasm_widen_high_unsigned;
16570 llvm_unreachable("unexpected builtin ID");
16573 CGM.getIntrinsic(IntNo, {ConvertType(E->getType()), Vec->getType()});
16574 return Builder.CreateCall(Callee, Vec);
16576 case WebAssembly::BI__builtin_wasm_shuffle_v8x16: {
16579 Ops[OpIdx++] = EmitScalarExpr(E->getArg(0));
16580 Ops[OpIdx++] = EmitScalarExpr(E->getArg(1));
16581 while (OpIdx < 18) {
16582 llvm::APSInt LaneConst;
16583 if (!E->getArg(OpIdx)->isIntegerConstantExpr(LaneConst, getContext()))
16584 llvm_unreachable("Constant arg isn't actually constant?");
16585 Ops[OpIdx++] = llvm::ConstantInt::get(getLLVMContext(), LaneConst);
16587 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_shuffle);
16588 return Builder.CreateCall(Callee, Ops);
16595 static std::pair<Intrinsic::ID, unsigned>
16596 getIntrinsicForHexagonNonGCCBuiltin(unsigned BuiltinID) {
16598 unsigned BuiltinID;
16599 Intrinsic::ID IntrinsicID;
16603 #define CUSTOM_BUILTIN_MAPPING(x,s) \
16604 { Hexagon::BI__builtin_HEXAGON_##x, Intrinsic::hexagon_##x, s },
16605 CUSTOM_BUILTIN_MAPPING(L2_loadrub_pci, 0)
16606 CUSTOM_BUILTIN_MAPPING(L2_loadrb_pci, 0)
16607 CUSTOM_BUILTIN_MAPPING(L2_loadruh_pci, 0)
16608 CUSTOM_BUILTIN_MAPPING(L2_loadrh_pci, 0)
16609 CUSTOM_BUILTIN_MAPPING(L2_loadri_pci, 0)
16610 CUSTOM_BUILTIN_MAPPING(L2_loadrd_pci, 0)
16611 CUSTOM_BUILTIN_MAPPING(L2_loadrub_pcr, 0)
16612 CUSTOM_BUILTIN_MAPPING(L2_loadrb_pcr, 0)
16613 CUSTOM_BUILTIN_MAPPING(L2_loadruh_pcr, 0)
16614 CUSTOM_BUILTIN_MAPPING(L2_loadrh_pcr, 0)
16615 CUSTOM_BUILTIN_MAPPING(L2_loadri_pcr, 0)
16616 CUSTOM_BUILTIN_MAPPING(L2_loadrd_pcr, 0)
16617 CUSTOM_BUILTIN_MAPPING(S2_storerb_pci, 0)
16618 CUSTOM_BUILTIN_MAPPING(S2_storerh_pci, 0)
16619 CUSTOM_BUILTIN_MAPPING(S2_storerf_pci, 0)
16620 CUSTOM_BUILTIN_MAPPING(S2_storeri_pci, 0)
16621 CUSTOM_BUILTIN_MAPPING(S2_storerd_pci, 0)
16622 CUSTOM_BUILTIN_MAPPING(S2_storerb_pcr, 0)
16623 CUSTOM_BUILTIN_MAPPING(S2_storerh_pcr, 0)
16624 CUSTOM_BUILTIN_MAPPING(S2_storerf_pcr, 0)
16625 CUSTOM_BUILTIN_MAPPING(S2_storeri_pcr, 0)
16626 CUSTOM_BUILTIN_MAPPING(S2_storerd_pcr, 0)
16627 CUSTOM_BUILTIN_MAPPING(V6_vmaskedstoreq, 64)
16628 CUSTOM_BUILTIN_MAPPING(V6_vmaskedstorenq, 64)
16629 CUSTOM_BUILTIN_MAPPING(V6_vmaskedstorentq, 64)
16630 CUSTOM_BUILTIN_MAPPING(V6_vmaskedstorentnq, 64)
16631 CUSTOM_BUILTIN_MAPPING(V6_vmaskedstoreq_128B, 128)
16632 CUSTOM_BUILTIN_MAPPING(V6_vmaskedstorenq_128B, 128)
16633 CUSTOM_BUILTIN_MAPPING(V6_vmaskedstorentq_128B, 128)
16634 CUSTOM_BUILTIN_MAPPING(V6_vmaskedstorentnq_128B, 128)
16635 #include "clang/Basic/BuiltinsHexagonMapCustomDep.def"
16636 #undef CUSTOM_BUILTIN_MAPPING
16639 auto CmpInfo = [] (Info A, Info B) { return A.BuiltinID < B.BuiltinID; };
16640 static const bool SortOnce = (llvm::sort(Infos, CmpInfo), true);
16643 const Info *F = std::lower_bound(std::begin(Infos), std::end(Infos),
16644 Info{BuiltinID, 0, 0}, CmpInfo);
16645 if (F == std::end(Infos) || F->BuiltinID != BuiltinID)
16646 return {Intrinsic::not_intrinsic, 0};
16648 return {F->IntrinsicID, F->VecLen};
16651 Value *CodeGenFunction::EmitHexagonBuiltinExpr(unsigned BuiltinID,
16652 const CallExpr *E) {
16655 std::tie(ID, VecLen) = getIntrinsicForHexagonNonGCCBuiltin(BuiltinID);
16657 auto MakeCircOp = [this, E](unsigned IntID, bool IsLoad) {
16658 // The base pointer is passed by address, so it needs to be loaded.
16659 Address A = EmitPointerWithAlignment(E->getArg(0));
16660 Address BP = Address(
16661 Builder.CreateBitCast(A.getPointer(), Int8PtrPtrTy), A.getAlignment());
16662 llvm::Value *Base = Builder.CreateLoad(BP);
16663 // The treatment of both loads and stores is the same: the arguments for
16664 // the builtin are the same as the arguments for the intrinsic.
16666 // builtin(Base, Inc, Mod, Start) -> intr(Base, Inc, Mod, Start)
16667 // builtin(Base, Mod, Start) -> intr(Base, Mod, Start)
16669 // builtin(Base, Inc, Mod, Val, Start) -> intr(Base, Inc, Mod, Val, Start)
16670 // builtin(Base, Mod, Val, Start) -> intr(Base, Mod, Val, Start)
16671 SmallVector<llvm::Value*,5> Ops = { Base };
16672 for (unsigned i = 1, e = E->getNumArgs(); i != e; ++i)
16673 Ops.push_back(EmitScalarExpr(E->getArg(i)));
16675 llvm::Value *Result = Builder.CreateCall(CGM.getIntrinsic(IntID), Ops);
16676 // The load intrinsics generate two results (Value, NewBase), stores
16677 // generate one (NewBase). The new base address needs to be stored.
16678 llvm::Value *NewBase = IsLoad ? Builder.CreateExtractValue(Result, 1)
16680 llvm::Value *LV = Builder.CreateBitCast(
16681 EmitScalarExpr(E->getArg(0)), NewBase->getType()->getPointerTo());
16682 Address Dest = EmitPointerWithAlignment(E->getArg(0));
16683 llvm::Value *RetVal =
16684 Builder.CreateAlignedStore(NewBase, LV, Dest.getAlignment());
16686 RetVal = Builder.CreateExtractValue(Result, 0);
16690 // Handle the conversion of bit-reverse load intrinsics to bit code.
16691 // The intrinsic call after this function only reads from memory and the
16692 // write to memory is dealt by the store instruction.
16693 auto MakeBrevLd = [this, E](unsigned IntID, llvm::Type *DestTy) {
16694 // The intrinsic generates one result, which is the new value for the base
16695 // pointer. It needs to be returned. The result of the load instruction is
16696 // passed to intrinsic by address, so the value needs to be stored.
16697 llvm::Value *BaseAddress =
16698 Builder.CreateBitCast(EmitScalarExpr(E->getArg(0)), Int8PtrTy);
16700 // Expressions like &(*pt++) will be incremented per evaluation.
16701 // EmitPointerWithAlignment and EmitScalarExpr evaluates the expression
16703 Address DestAddr = EmitPointerWithAlignment(E->getArg(1));
16704 DestAddr = Address(Builder.CreateBitCast(DestAddr.getPointer(), Int8PtrTy),
16705 DestAddr.getAlignment());
16706 llvm::Value *DestAddress = DestAddr.getPointer();
16708 // Operands are Base, Dest, Modifier.
16709 // The intrinsic format in LLVM IR is defined as
16710 // { ValueType, i8* } (i8*, i32).
16711 llvm::Value *Result = Builder.CreateCall(
16712 CGM.getIntrinsic(IntID), {BaseAddress, EmitScalarExpr(E->getArg(2))});
16714 // The value needs to be stored as the variable is passed by reference.
16715 llvm::Value *DestVal = Builder.CreateExtractValue(Result, 0);
16717 // The store needs to be truncated to fit the destination type.
16718 // While i32 and i64 are natively supported on Hexagon, i8 and i16 needs
16719 // to be handled with stores of respective destination type.
16720 DestVal = Builder.CreateTrunc(DestVal, DestTy);
16722 llvm::Value *DestForStore =
16723 Builder.CreateBitCast(DestAddress, DestVal->getType()->getPointerTo());
16724 Builder.CreateAlignedStore(DestVal, DestForStore, DestAddr.getAlignment());
16725 // The updated value of the base pointer is returned.
16726 return Builder.CreateExtractValue(Result, 1);
16729 auto V2Q = [this, VecLen] (llvm::Value *Vec) {
16730 Intrinsic::ID ID = VecLen == 128 ? Intrinsic::hexagon_V6_vandvrt_128B
16731 : Intrinsic::hexagon_V6_vandvrt;
16732 return Builder.CreateCall(CGM.getIntrinsic(ID),
16733 {Vec, Builder.getInt32(-1)});
16735 auto Q2V = [this, VecLen] (llvm::Value *Pred) {
16736 Intrinsic::ID ID = VecLen == 128 ? Intrinsic::hexagon_V6_vandqrt_128B
16737 : Intrinsic::hexagon_V6_vandqrt;
16738 return Builder.CreateCall(CGM.getIntrinsic(ID),
16739 {Pred, Builder.getInt32(-1)});
16742 switch (BuiltinID) {
16743 // These intrinsics return a tuple {Vector, VectorPred} in LLVM IR,
16744 // and the corresponding C/C++ builtins use loads/stores to update
16746 case Hexagon::BI__builtin_HEXAGON_V6_vaddcarry:
16747 case Hexagon::BI__builtin_HEXAGON_V6_vaddcarry_128B:
16748 case Hexagon::BI__builtin_HEXAGON_V6_vsubcarry:
16749 case Hexagon::BI__builtin_HEXAGON_V6_vsubcarry_128B: {
16750 // Get the type from the 0-th argument.
16751 llvm::Type *VecType = ConvertType(E->getArg(0)->getType());
16752 Address PredAddr = Builder.CreateBitCast(
16753 EmitPointerWithAlignment(E->getArg(2)), VecType->getPointerTo(0));
16754 llvm::Value *PredIn = V2Q(Builder.CreateLoad(PredAddr));
16755 llvm::Value *Result = Builder.CreateCall(CGM.getIntrinsic(ID),
16756 {EmitScalarExpr(E->getArg(0)), EmitScalarExpr(E->getArg(1)), PredIn});
16758 llvm::Value *PredOut = Builder.CreateExtractValue(Result, 1);
16759 Builder.CreateAlignedStore(Q2V(PredOut), PredAddr.getPointer(),
16760 PredAddr.getAlignment());
16761 return Builder.CreateExtractValue(Result, 0);
16764 case Hexagon::BI__builtin_HEXAGON_L2_loadrub_pci:
16765 case Hexagon::BI__builtin_HEXAGON_L2_loadrb_pci:
16766 case Hexagon::BI__builtin_HEXAGON_L2_loadruh_pci:
16767 case Hexagon::BI__builtin_HEXAGON_L2_loadrh_pci:
16768 case Hexagon::BI__builtin_HEXAGON_L2_loadri_pci:
16769 case Hexagon::BI__builtin_HEXAGON_L2_loadrd_pci:
16770 case Hexagon::BI__builtin_HEXAGON_L2_loadrub_pcr:
16771 case Hexagon::BI__builtin_HEXAGON_L2_loadrb_pcr:
16772 case Hexagon::BI__builtin_HEXAGON_L2_loadruh_pcr:
16773 case Hexagon::BI__builtin_HEXAGON_L2_loadrh_pcr:
16774 case Hexagon::BI__builtin_HEXAGON_L2_loadri_pcr:
16775 case Hexagon::BI__builtin_HEXAGON_L2_loadrd_pcr:
16776 return MakeCircOp(ID, /*IsLoad=*/true);
16777 case Hexagon::BI__builtin_HEXAGON_S2_storerb_pci:
16778 case Hexagon::BI__builtin_HEXAGON_S2_storerh_pci:
16779 case Hexagon::BI__builtin_HEXAGON_S2_storerf_pci:
16780 case Hexagon::BI__builtin_HEXAGON_S2_storeri_pci:
16781 case Hexagon::BI__builtin_HEXAGON_S2_storerd_pci:
16782 case Hexagon::BI__builtin_HEXAGON_S2_storerb_pcr:
16783 case Hexagon::BI__builtin_HEXAGON_S2_storerh_pcr:
16784 case Hexagon::BI__builtin_HEXAGON_S2_storerf_pcr:
16785 case Hexagon::BI__builtin_HEXAGON_S2_storeri_pcr:
16786 case Hexagon::BI__builtin_HEXAGON_S2_storerd_pcr:
16787 return MakeCircOp(ID, /*IsLoad=*/false);
16788 case Hexagon::BI__builtin_brev_ldub:
16789 return MakeBrevLd(Intrinsic::hexagon_L2_loadrub_pbr, Int8Ty);
16790 case Hexagon::BI__builtin_brev_ldb:
16791 return MakeBrevLd(Intrinsic::hexagon_L2_loadrb_pbr, Int8Ty);
16792 case Hexagon::BI__builtin_brev_lduh:
16793 return MakeBrevLd(Intrinsic::hexagon_L2_loadruh_pbr, Int16Ty);
16794 case Hexagon::BI__builtin_brev_ldh:
16795 return MakeBrevLd(Intrinsic::hexagon_L2_loadrh_pbr, Int16Ty);
16796 case Hexagon::BI__builtin_brev_ldw:
16797 return MakeBrevLd(Intrinsic::hexagon_L2_loadri_pbr, Int32Ty);
16798 case Hexagon::BI__builtin_brev_ldd:
16799 return MakeBrevLd(Intrinsic::hexagon_L2_loadrd_pbr, Int64Ty);
16802 if (ID == Intrinsic::not_intrinsic)
16805 auto IsVectorPredTy = [](llvm::Type *T) {
16806 return T->isVectorTy() &&
16807 cast<llvm::VectorType>(T)->getElementType()->isIntegerTy(1);
16810 llvm::Function *IntrFn = CGM.getIntrinsic(ID);
16811 llvm::FunctionType *IntrTy = IntrFn->getFunctionType();
16812 SmallVector<llvm::Value*,4> Ops;
16813 for (unsigned i = 0, e = IntrTy->getNumParams(); i != e; ++i) {
16814 llvm::Type *T = IntrTy->getParamType(i);
16815 const Expr *A = E->getArg(i);
16816 if (IsVectorPredTy(T)) {
16817 // There will be an implicit cast to a boolean vector. Strip it.
16818 if (auto *Cast = dyn_cast<ImplicitCastExpr>(A)) {
16819 if (Cast->getCastKind() == CK_BitCast)
16820 A = Cast->getSubExpr();
16822 Ops.push_back(V2Q(EmitScalarExpr(A)));
16824 Ops.push_back(EmitScalarExpr(A));
16828 llvm::Value *Call = Builder.CreateCall(IntrFn, Ops);
16829 if (IsVectorPredTy(IntrTy->getReturnType()))