1 //===---- CGBuiltin.cpp - Emit LLVM Code for builtins ---------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This contains code to emit Builtin calls as LLVM code.
11 //===----------------------------------------------------------------------===//
14 #include "CGObjCRuntime.h"
15 #include "CGOpenCLRuntime.h"
16 #include "CGRecordLayout.h"
17 #include "CodeGenFunction.h"
18 #include "CodeGenModule.h"
19 #include "ConstantEmitter.h"
20 #include "PatternInit.h"
21 #include "TargetInfo.h"
22 #include "clang/AST/ASTContext.h"
23 #include "clang/AST/Attr.h"
24 #include "clang/AST/Decl.h"
25 #include "clang/AST/OSLog.h"
26 #include "clang/Basic/TargetBuiltins.h"
27 #include "clang/Basic/TargetInfo.h"
28 #include "clang/CodeGen/CGFunctionInfo.h"
29 #include "llvm/ADT/SmallPtrSet.h"
30 #include "llvm/ADT/StringExtras.h"
31 #include "llvm/IR/DataLayout.h"
32 #include "llvm/IR/InlineAsm.h"
33 #include "llvm/IR/Intrinsics.h"
34 #include "llvm/IR/IntrinsicsAArch64.h"
35 #include "llvm/IR/IntrinsicsAMDGPU.h"
36 #include "llvm/IR/IntrinsicsARM.h"
37 #include "llvm/IR/IntrinsicsBPF.h"
38 #include "llvm/IR/IntrinsicsHexagon.h"
39 #include "llvm/IR/IntrinsicsNVPTX.h"
40 #include "llvm/IR/IntrinsicsPowerPC.h"
41 #include "llvm/IR/IntrinsicsR600.h"
42 #include "llvm/IR/IntrinsicsS390.h"
43 #include "llvm/IR/IntrinsicsWebAssembly.h"
44 #include "llvm/IR/IntrinsicsX86.h"
45 #include "llvm/IR/MDBuilder.h"
46 #include "llvm/Support/ConvertUTF.h"
47 #include "llvm/Support/ScopedPrinter.h"
48 #include "llvm/Support/TargetParser.h"
51 using namespace clang;
52 using namespace CodeGen;
56 int64_t clamp(int64_t Value, int64_t Low, int64_t High) {
57 return std::min(High, std::max(Low, Value));
60 static void initializeAlloca(CodeGenFunction &CGF, AllocaInst *AI, Value *Size,
61 Align AlignmentInBytes) {
63 switch (CGF.getLangOpts().getTrivialAutoVarInit()) {
64 case LangOptions::TrivialAutoVarInitKind::Uninitialized:
65 // Nothing to initialize.
67 case LangOptions::TrivialAutoVarInitKind::Zero:
68 Byte = CGF.Builder.getInt8(0x00);
70 case LangOptions::TrivialAutoVarInitKind::Pattern: {
71 llvm::Type *Int8 = llvm::IntegerType::getInt8Ty(CGF.CGM.getLLVMContext());
72 Byte = llvm::dyn_cast<llvm::ConstantInt>(
73 initializationPatternFor(CGF.CGM, Int8));
77 CGF.Builder.CreateMemSet(AI, Byte, Size, AlignmentInBytes);
80 /// getBuiltinLibFunction - Given a builtin id for a function like
81 /// "__builtin_fabsf", return a Function* for "fabsf".
82 llvm::Constant *CodeGenModule::getBuiltinLibFunction(const FunctionDecl *FD,
84 assert(Context.BuiltinInfo.isLibFunction(BuiltinID));
86 // Get the name, skip over the __builtin_ prefix (if necessary).
90 // If the builtin has been declared explicitly with an assembler label,
91 // use the mangled name. This differs from the plain label on platforms
92 // that prefix labels.
93 if (FD->hasAttr<AsmLabelAttr>())
94 Name = getMangledName(D);
96 Name = Context.BuiltinInfo.getName(BuiltinID) + 10;
98 llvm::FunctionType *Ty =
99 cast<llvm::FunctionType>(getTypes().ConvertType(FD->getType()));
101 return GetOrCreateLLVMFunction(Name, Ty, D, /*ForVTable=*/false);
104 /// Emit the conversions required to turn the given value into an
105 /// integer of the given size.
106 static Value *EmitToInt(CodeGenFunction &CGF, llvm::Value *V,
107 QualType T, llvm::IntegerType *IntType) {
108 V = CGF.EmitToMemory(V, T);
110 if (V->getType()->isPointerTy())
111 return CGF.Builder.CreatePtrToInt(V, IntType);
113 assert(V->getType() == IntType);
117 static Value *EmitFromInt(CodeGenFunction &CGF, llvm::Value *V,
118 QualType T, llvm::Type *ResultType) {
119 V = CGF.EmitFromMemory(V, T);
121 if (ResultType->isPointerTy())
122 return CGF.Builder.CreateIntToPtr(V, ResultType);
124 assert(V->getType() == ResultType);
128 /// Utility to insert an atomic instruction based on Intrinsic::ID
129 /// and the expression node.
130 static Value *MakeBinaryAtomicValue(
131 CodeGenFunction &CGF, llvm::AtomicRMWInst::BinOp Kind, const CallExpr *E,
132 AtomicOrdering Ordering = AtomicOrdering::SequentiallyConsistent) {
133 QualType T = E->getType();
134 assert(E->getArg(0)->getType()->isPointerType());
135 assert(CGF.getContext().hasSameUnqualifiedType(T,
136 E->getArg(0)->getType()->getPointeeType()));
137 assert(CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->getType()));
139 llvm::Value *DestPtr = CGF.EmitScalarExpr(E->getArg(0));
140 unsigned AddrSpace = DestPtr->getType()->getPointerAddressSpace();
142 llvm::IntegerType *IntType =
143 llvm::IntegerType::get(CGF.getLLVMContext(),
144 CGF.getContext().getTypeSize(T));
145 llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace);
147 llvm::Value *Args[2];
148 Args[0] = CGF.Builder.CreateBitCast(DestPtr, IntPtrType);
149 Args[1] = CGF.EmitScalarExpr(E->getArg(1));
150 llvm::Type *ValueType = Args[1]->getType();
151 Args[1] = EmitToInt(CGF, Args[1], T, IntType);
153 llvm::Value *Result = CGF.Builder.CreateAtomicRMW(
154 Kind, Args[0], Args[1], Ordering);
155 return EmitFromInt(CGF, Result, T, ValueType);
158 static Value *EmitNontemporalStore(CodeGenFunction &CGF, const CallExpr *E) {
159 Value *Val = CGF.EmitScalarExpr(E->getArg(0));
160 Value *Address = CGF.EmitScalarExpr(E->getArg(1));
162 // Convert the type of the pointer to a pointer to the stored type.
163 Val = CGF.EmitToMemory(Val, E->getArg(0)->getType());
164 Value *BC = CGF.Builder.CreateBitCast(
165 Address, llvm::PointerType::getUnqual(Val->getType()), "cast");
166 LValue LV = CGF.MakeNaturalAlignAddrLValue(BC, E->getArg(0)->getType());
167 LV.setNontemporal(true);
168 CGF.EmitStoreOfScalar(Val, LV, false);
172 static Value *EmitNontemporalLoad(CodeGenFunction &CGF, const CallExpr *E) {
173 Value *Address = CGF.EmitScalarExpr(E->getArg(0));
175 LValue LV = CGF.MakeNaturalAlignAddrLValue(Address, E->getType());
176 LV.setNontemporal(true);
177 return CGF.EmitLoadOfScalar(LV, E->getExprLoc());
180 static RValue EmitBinaryAtomic(CodeGenFunction &CGF,
181 llvm::AtomicRMWInst::BinOp Kind,
183 return RValue::get(MakeBinaryAtomicValue(CGF, Kind, E));
186 /// Utility to insert an atomic instruction based Intrinsic::ID and
187 /// the expression node, where the return value is the result of the
189 static RValue EmitBinaryAtomicPost(CodeGenFunction &CGF,
190 llvm::AtomicRMWInst::BinOp Kind,
192 Instruction::BinaryOps Op,
193 bool Invert = false) {
194 QualType T = E->getType();
195 assert(E->getArg(0)->getType()->isPointerType());
196 assert(CGF.getContext().hasSameUnqualifiedType(T,
197 E->getArg(0)->getType()->getPointeeType()));
198 assert(CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->getType()));
200 llvm::Value *DestPtr = CGF.EmitScalarExpr(E->getArg(0));
201 unsigned AddrSpace = DestPtr->getType()->getPointerAddressSpace();
203 llvm::IntegerType *IntType =
204 llvm::IntegerType::get(CGF.getLLVMContext(),
205 CGF.getContext().getTypeSize(T));
206 llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace);
208 llvm::Value *Args[2];
209 Args[1] = CGF.EmitScalarExpr(E->getArg(1));
210 llvm::Type *ValueType = Args[1]->getType();
211 Args[1] = EmitToInt(CGF, Args[1], T, IntType);
212 Args[0] = CGF.Builder.CreateBitCast(DestPtr, IntPtrType);
214 llvm::Value *Result = CGF.Builder.CreateAtomicRMW(
215 Kind, Args[0], Args[1], llvm::AtomicOrdering::SequentiallyConsistent);
216 Result = CGF.Builder.CreateBinOp(Op, Result, Args[1]);
218 Result = CGF.Builder.CreateBinOp(llvm::Instruction::Xor, Result,
219 llvm::ConstantInt::get(IntType, -1));
220 Result = EmitFromInt(CGF, Result, T, ValueType);
221 return RValue::get(Result);
224 /// Utility to insert an atomic cmpxchg instruction.
226 /// @param CGF The current codegen function.
227 /// @param E Builtin call expression to convert to cmpxchg.
228 /// arg0 - address to operate on
229 /// arg1 - value to compare with
231 /// @param ReturnBool Specifies whether to return success flag of
232 /// cmpxchg result or the old value.
234 /// @returns result of cmpxchg, according to ReturnBool
236 /// Note: In order to lower Microsoft's _InterlockedCompareExchange* intrinsics
237 /// invoke the function EmitAtomicCmpXchgForMSIntrin.
238 static Value *MakeAtomicCmpXchgValue(CodeGenFunction &CGF, const CallExpr *E,
240 QualType T = ReturnBool ? E->getArg(1)->getType() : E->getType();
241 llvm::Value *DestPtr = CGF.EmitScalarExpr(E->getArg(0));
242 unsigned AddrSpace = DestPtr->getType()->getPointerAddressSpace();
244 llvm::IntegerType *IntType = llvm::IntegerType::get(
245 CGF.getLLVMContext(), CGF.getContext().getTypeSize(T));
246 llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace);
249 Args[0] = CGF.Builder.CreateBitCast(DestPtr, IntPtrType);
250 Args[1] = CGF.EmitScalarExpr(E->getArg(1));
251 llvm::Type *ValueType = Args[1]->getType();
252 Args[1] = EmitToInt(CGF, Args[1], T, IntType);
253 Args[2] = EmitToInt(CGF, CGF.EmitScalarExpr(E->getArg(2)), T, IntType);
255 Value *Pair = CGF.Builder.CreateAtomicCmpXchg(
256 Args[0], Args[1], Args[2], llvm::AtomicOrdering::SequentiallyConsistent,
257 llvm::AtomicOrdering::SequentiallyConsistent);
259 // Extract boolean success flag and zext it to int.
260 return CGF.Builder.CreateZExt(CGF.Builder.CreateExtractValue(Pair, 1),
261 CGF.ConvertType(E->getType()));
263 // Extract old value and emit it using the same type as compare value.
264 return EmitFromInt(CGF, CGF.Builder.CreateExtractValue(Pair, 0), T,
268 /// This function should be invoked to emit atomic cmpxchg for Microsoft's
269 /// _InterlockedCompareExchange* intrinsics which have the following signature:
270 /// T _InterlockedCompareExchange(T volatile *Destination,
274 /// Whereas the llvm 'cmpxchg' instruction has the following syntax:
275 /// cmpxchg *Destination, Comparand, Exchange.
276 /// So we need to swap Comparand and Exchange when invoking
277 /// CreateAtomicCmpXchg. That is the reason we could not use the above utility
278 /// function MakeAtomicCmpXchgValue since it expects the arguments to be
282 Value *EmitAtomicCmpXchgForMSIntrin(CodeGenFunction &CGF, const CallExpr *E,
283 AtomicOrdering SuccessOrdering = AtomicOrdering::SequentiallyConsistent) {
284 assert(E->getArg(0)->getType()->isPointerType());
285 assert(CGF.getContext().hasSameUnqualifiedType(
286 E->getType(), E->getArg(0)->getType()->getPointeeType()));
287 assert(CGF.getContext().hasSameUnqualifiedType(E->getType(),
288 E->getArg(1)->getType()));
289 assert(CGF.getContext().hasSameUnqualifiedType(E->getType(),
290 E->getArg(2)->getType()));
292 auto *Destination = CGF.EmitScalarExpr(E->getArg(0));
293 auto *Comparand = CGF.EmitScalarExpr(E->getArg(2));
294 auto *Exchange = CGF.EmitScalarExpr(E->getArg(1));
296 // For Release ordering, the failure ordering should be Monotonic.
297 auto FailureOrdering = SuccessOrdering == AtomicOrdering::Release ?
298 AtomicOrdering::Monotonic :
301 auto *Result = CGF.Builder.CreateAtomicCmpXchg(
302 Destination, Comparand, Exchange,
303 SuccessOrdering, FailureOrdering);
304 Result->setVolatile(true);
305 return CGF.Builder.CreateExtractValue(Result, 0);
308 static Value *EmitAtomicIncrementValue(CodeGenFunction &CGF, const CallExpr *E,
309 AtomicOrdering Ordering = AtomicOrdering::SequentiallyConsistent) {
310 assert(E->getArg(0)->getType()->isPointerType());
312 auto *IntTy = CGF.ConvertType(E->getType());
313 auto *Result = CGF.Builder.CreateAtomicRMW(
315 CGF.EmitScalarExpr(E->getArg(0)),
316 ConstantInt::get(IntTy, 1),
318 return CGF.Builder.CreateAdd(Result, ConstantInt::get(IntTy, 1));
321 static Value *EmitAtomicDecrementValue(CodeGenFunction &CGF, const CallExpr *E,
322 AtomicOrdering Ordering = AtomicOrdering::SequentiallyConsistent) {
323 assert(E->getArg(0)->getType()->isPointerType());
325 auto *IntTy = CGF.ConvertType(E->getType());
326 auto *Result = CGF.Builder.CreateAtomicRMW(
328 CGF.EmitScalarExpr(E->getArg(0)),
329 ConstantInt::get(IntTy, 1),
331 return CGF.Builder.CreateSub(Result, ConstantInt::get(IntTy, 1));
334 // Build a plain volatile load.
335 static Value *EmitISOVolatileLoad(CodeGenFunction &CGF, const CallExpr *E) {
336 Value *Ptr = CGF.EmitScalarExpr(E->getArg(0));
337 QualType ElTy = E->getArg(0)->getType()->getPointeeType();
338 CharUnits LoadSize = CGF.getContext().getTypeSizeInChars(ElTy);
340 llvm::IntegerType::get(CGF.getLLVMContext(), LoadSize.getQuantity() * 8);
341 Ptr = CGF.Builder.CreateBitCast(Ptr, ITy->getPointerTo());
342 llvm::LoadInst *Load = CGF.Builder.CreateAlignedLoad(Ptr, LoadSize);
343 Load->setVolatile(true);
347 // Build a plain volatile store.
348 static Value *EmitISOVolatileStore(CodeGenFunction &CGF, const CallExpr *E) {
349 Value *Ptr = CGF.EmitScalarExpr(E->getArg(0));
350 Value *Value = CGF.EmitScalarExpr(E->getArg(1));
351 QualType ElTy = E->getArg(0)->getType()->getPointeeType();
352 CharUnits StoreSize = CGF.getContext().getTypeSizeInChars(ElTy);
354 llvm::IntegerType::get(CGF.getLLVMContext(), StoreSize.getQuantity() * 8);
355 Ptr = CGF.Builder.CreateBitCast(Ptr, ITy->getPointerTo());
356 llvm::StoreInst *Store =
357 CGF.Builder.CreateAlignedStore(Value, Ptr, StoreSize);
358 Store->setVolatile(true);
362 // Emit a simple mangled intrinsic that has 1 argument and a return type
363 // matching the argument type. Depending on mode, this may be a constrained
364 // floating-point intrinsic.
365 static Value *emitUnaryMaybeConstrainedFPBuiltin(CodeGenFunction &CGF,
366 const CallExpr *E, unsigned IntrinsicID,
367 unsigned ConstrainedIntrinsicID) {
368 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
370 if (CGF.Builder.getIsFPConstrained()) {
371 Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID, Src0->getType());
372 return CGF.Builder.CreateConstrainedFPCall(F, { Src0 });
374 Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
375 return CGF.Builder.CreateCall(F, Src0);
379 // Emit an intrinsic that has 2 operands of the same type as its result.
380 // Depending on mode, this may be a constrained floating-point intrinsic.
381 static Value *emitBinaryMaybeConstrainedFPBuiltin(CodeGenFunction &CGF,
382 const CallExpr *E, unsigned IntrinsicID,
383 unsigned ConstrainedIntrinsicID) {
384 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
385 llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
387 if (CGF.Builder.getIsFPConstrained()) {
388 Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID, Src0->getType());
389 return CGF.Builder.CreateConstrainedFPCall(F, { Src0, Src1 });
391 Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
392 return CGF.Builder.CreateCall(F, { Src0, Src1 });
396 // Emit an intrinsic that has 3 operands of the same type as its result.
397 // Depending on mode, this may be a constrained floating-point intrinsic.
398 static Value *emitTernaryMaybeConstrainedFPBuiltin(CodeGenFunction &CGF,
399 const CallExpr *E, unsigned IntrinsicID,
400 unsigned ConstrainedIntrinsicID) {
401 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
402 llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
403 llvm::Value *Src2 = CGF.EmitScalarExpr(E->getArg(2));
405 if (CGF.Builder.getIsFPConstrained()) {
406 Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID, Src0->getType());
407 return CGF.Builder.CreateConstrainedFPCall(F, { Src0, Src1, Src2 });
409 Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
410 return CGF.Builder.CreateCall(F, { Src0, Src1, Src2 });
414 // Emit a simple mangled intrinsic that has 1 argument and a return type
415 // matching the argument type.
416 static Value *emitUnaryBuiltin(CodeGenFunction &CGF,
418 unsigned IntrinsicID) {
419 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
421 Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
422 return CGF.Builder.CreateCall(F, Src0);
425 // Emit an intrinsic that has 2 operands of the same type as its result.
426 static Value *emitBinaryBuiltin(CodeGenFunction &CGF,
428 unsigned IntrinsicID) {
429 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
430 llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
432 Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
433 return CGF.Builder.CreateCall(F, { Src0, Src1 });
436 // Emit an intrinsic that has 3 operands of the same type as its result.
437 static Value *emitTernaryBuiltin(CodeGenFunction &CGF,
439 unsigned IntrinsicID) {
440 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
441 llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
442 llvm::Value *Src2 = CGF.EmitScalarExpr(E->getArg(2));
444 Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
445 return CGF.Builder.CreateCall(F, { Src0, Src1, Src2 });
448 // Emit an intrinsic that has 1 float or double operand, and 1 integer.
449 static Value *emitFPIntBuiltin(CodeGenFunction &CGF,
451 unsigned IntrinsicID) {
452 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
453 llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
455 Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
456 return CGF.Builder.CreateCall(F, {Src0, Src1});
459 // Emit an intrinsic that has overloaded integer result and fp operand.
461 emitMaybeConstrainedFPToIntRoundBuiltin(CodeGenFunction &CGF, const CallExpr *E,
462 unsigned IntrinsicID,
463 unsigned ConstrainedIntrinsicID) {
464 llvm::Type *ResultType = CGF.ConvertType(E->getType());
465 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
467 if (CGF.Builder.getIsFPConstrained()) {
468 Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID,
469 {ResultType, Src0->getType()});
470 return CGF.Builder.CreateConstrainedFPCall(F, {Src0});
473 CGF.CGM.getIntrinsic(IntrinsicID, {ResultType, Src0->getType()});
474 return CGF.Builder.CreateCall(F, Src0);
478 /// EmitFAbs - Emit a call to @llvm.fabs().
479 static Value *EmitFAbs(CodeGenFunction &CGF, Value *V) {
480 Function *F = CGF.CGM.getIntrinsic(Intrinsic::fabs, V->getType());
481 llvm::CallInst *Call = CGF.Builder.CreateCall(F, V);
482 Call->setDoesNotAccessMemory();
486 /// Emit the computation of the sign bit for a floating point value. Returns
487 /// the i1 sign bit value.
488 static Value *EmitSignBit(CodeGenFunction &CGF, Value *V) {
489 LLVMContext &C = CGF.CGM.getLLVMContext();
491 llvm::Type *Ty = V->getType();
492 int Width = Ty->getPrimitiveSizeInBits();
493 llvm::Type *IntTy = llvm::IntegerType::get(C, Width);
494 V = CGF.Builder.CreateBitCast(V, IntTy);
495 if (Ty->isPPC_FP128Ty()) {
496 // We want the sign bit of the higher-order double. The bitcast we just
497 // did works as if the double-double was stored to memory and then
498 // read as an i128. The "store" will put the higher-order double in the
499 // lower address in both little- and big-Endian modes, but the "load"
500 // will treat those bits as a different part of the i128: the low bits in
501 // little-Endian, the high bits in big-Endian. Therefore, on big-Endian
502 // we need to shift the high bits down to the low before truncating.
504 if (CGF.getTarget().isBigEndian()) {
505 Value *ShiftCst = llvm::ConstantInt::get(IntTy, Width);
506 V = CGF.Builder.CreateLShr(V, ShiftCst);
508 // We are truncating value in order to extract the higher-order
509 // double, which we will be using to extract the sign from.
510 IntTy = llvm::IntegerType::get(C, Width);
511 V = CGF.Builder.CreateTrunc(V, IntTy);
513 Value *Zero = llvm::Constant::getNullValue(IntTy);
514 return CGF.Builder.CreateICmpSLT(V, Zero);
517 static RValue emitLibraryCall(CodeGenFunction &CGF, const FunctionDecl *FD,
518 const CallExpr *E, llvm::Constant *calleeValue) {
519 CGCallee callee = CGCallee::forDirect(calleeValue, GlobalDecl(FD));
520 return CGF.EmitCall(E->getCallee()->getType(), callee, E, ReturnValueSlot());
523 /// Emit a call to llvm.{sadd,uadd,ssub,usub,smul,umul}.with.overflow.*
524 /// depending on IntrinsicID.
526 /// \arg CGF The current codegen function.
527 /// \arg IntrinsicID The ID for the Intrinsic we wish to generate.
528 /// \arg X The first argument to the llvm.*.with.overflow.*.
529 /// \arg Y The second argument to the llvm.*.with.overflow.*.
530 /// \arg Carry The carry returned by the llvm.*.with.overflow.*.
531 /// \returns The result (i.e. sum/product) returned by the intrinsic.
532 static llvm::Value *EmitOverflowIntrinsic(CodeGenFunction &CGF,
533 const llvm::Intrinsic::ID IntrinsicID,
534 llvm::Value *X, llvm::Value *Y,
535 llvm::Value *&Carry) {
536 // Make sure we have integers of the same width.
537 assert(X->getType() == Y->getType() &&
538 "Arguments must be the same type. (Did you forget to make sure both "
539 "arguments have the same integer width?)");
541 Function *Callee = CGF.CGM.getIntrinsic(IntrinsicID, X->getType());
542 llvm::Value *Tmp = CGF.Builder.CreateCall(Callee, {X, Y});
543 Carry = CGF.Builder.CreateExtractValue(Tmp, 1);
544 return CGF.Builder.CreateExtractValue(Tmp, 0);
547 static Value *emitRangedBuiltin(CodeGenFunction &CGF,
548 unsigned IntrinsicID,
550 llvm::MDBuilder MDHelper(CGF.getLLVMContext());
551 llvm::MDNode *RNode = MDHelper.createRange(APInt(32, low), APInt(32, high));
552 Function *F = CGF.CGM.getIntrinsic(IntrinsicID, {});
553 llvm::Instruction *Call = CGF.Builder.CreateCall(F);
554 Call->setMetadata(llvm::LLVMContext::MD_range, RNode);
559 struct WidthAndSignedness {
565 static WidthAndSignedness
566 getIntegerWidthAndSignedness(const clang::ASTContext &context,
567 const clang::QualType Type) {
568 assert(Type->isIntegerType() && "Given type is not an integer.");
569 unsigned Width = Type->isBooleanType() ? 1 : context.getTypeInfo(Type).Width;
570 bool Signed = Type->isSignedIntegerType();
571 return {Width, Signed};
574 // Given one or more integer types, this function produces an integer type that
575 // encompasses them: any value in one of the given types could be expressed in
576 // the encompassing type.
577 static struct WidthAndSignedness
578 EncompassingIntegerType(ArrayRef<struct WidthAndSignedness> Types) {
579 assert(Types.size() > 0 && "Empty list of types.");
581 // If any of the given types is signed, we must return a signed type.
583 for (const auto &Type : Types) {
584 Signed |= Type.Signed;
587 // The encompassing type must have a width greater than or equal to the width
588 // of the specified types. Additionally, if the encompassing type is signed,
589 // its width must be strictly greater than the width of any unsigned types
592 for (const auto &Type : Types) {
593 unsigned MinWidth = Type.Width + (Signed && !Type.Signed);
594 if (Width < MinWidth) {
599 return {Width, Signed};
602 Value *CodeGenFunction::EmitVAStartEnd(Value *ArgValue, bool IsStart) {
603 llvm::Type *DestType = Int8PtrTy;
604 if (ArgValue->getType() != DestType)
606 Builder.CreateBitCast(ArgValue, DestType, ArgValue->getName().data());
608 Intrinsic::ID inst = IsStart ? Intrinsic::vastart : Intrinsic::vaend;
609 return Builder.CreateCall(CGM.getIntrinsic(inst), ArgValue);
612 /// Checks if using the result of __builtin_object_size(p, @p From) in place of
613 /// __builtin_object_size(p, @p To) is correct
614 static bool areBOSTypesCompatible(int From, int To) {
615 // Note: Our __builtin_object_size implementation currently treats Type=0 and
616 // Type=2 identically. Encoding this implementation detail here may make
617 // improving __builtin_object_size difficult in the future, so it's omitted.
618 return From == To || (From == 0 && To == 1) || (From == 3 && To == 2);
622 getDefaultBuiltinObjectSizeResult(unsigned Type, llvm::IntegerType *ResType) {
623 return ConstantInt::get(ResType, (Type & 2) ? 0 : -1, /*isSigned=*/true);
627 CodeGenFunction::evaluateOrEmitBuiltinObjectSize(const Expr *E, unsigned Type,
628 llvm::IntegerType *ResType,
629 llvm::Value *EmittedE,
632 if (!E->tryEvaluateObjectSize(ObjectSize, getContext(), Type))
633 return emitBuiltinObjectSize(E, Type, ResType, EmittedE, IsDynamic);
634 return ConstantInt::get(ResType, ObjectSize, /*isSigned=*/true);
637 /// Returns a Value corresponding to the size of the given expression.
638 /// This Value may be either of the following:
639 /// - A llvm::Argument (if E is a param with the pass_object_size attribute on
641 /// - A call to the @llvm.objectsize intrinsic
643 /// EmittedE is the result of emitting `E` as a scalar expr. If it's non-null
644 /// and we wouldn't otherwise try to reference a pass_object_size parameter,
645 /// we'll call @llvm.objectsize on EmittedE, rather than emitting E.
647 CodeGenFunction::emitBuiltinObjectSize(const Expr *E, unsigned Type,
648 llvm::IntegerType *ResType,
649 llvm::Value *EmittedE, bool IsDynamic) {
650 // We need to reference an argument if the pointer is a parameter with the
651 // pass_object_size attribute.
652 if (auto *D = dyn_cast<DeclRefExpr>(E->IgnoreParenImpCasts())) {
653 auto *Param = dyn_cast<ParmVarDecl>(D->getDecl());
654 auto *PS = D->getDecl()->getAttr<PassObjectSizeAttr>();
655 if (Param != nullptr && PS != nullptr &&
656 areBOSTypesCompatible(PS->getType(), Type)) {
657 auto Iter = SizeArguments.find(Param);
658 assert(Iter != SizeArguments.end());
660 const ImplicitParamDecl *D = Iter->second;
661 auto DIter = LocalDeclMap.find(D);
662 assert(DIter != LocalDeclMap.end());
664 return EmitLoadOfScalar(DIter->second, /*Volatile=*/false,
665 getContext().getSizeType(), E->getBeginLoc());
669 // LLVM can't handle Type=3 appropriately, and __builtin_object_size shouldn't
670 // evaluate E for side-effects. In either case, we shouldn't lower to
672 if (Type == 3 || (!EmittedE && E->HasSideEffects(getContext())))
673 return getDefaultBuiltinObjectSizeResult(Type, ResType);
675 Value *Ptr = EmittedE ? EmittedE : EmitScalarExpr(E);
676 assert(Ptr->getType()->isPointerTy() &&
677 "Non-pointer passed to __builtin_object_size?");
680 CGM.getIntrinsic(Intrinsic::objectsize, {ResType, Ptr->getType()});
682 // LLVM only supports 0 and 2, make sure that we pass along that as a boolean.
683 Value *Min = Builder.getInt1((Type & 2) != 0);
684 // For GCC compatibility, __builtin_object_size treat NULL as unknown size.
685 Value *NullIsUnknown = Builder.getTrue();
686 Value *Dynamic = Builder.getInt1(IsDynamic);
687 return Builder.CreateCall(F, {Ptr, Min, NullIsUnknown, Dynamic});
691 /// A struct to generically describe a bit test intrinsic.
693 enum ActionKind : uint8_t { TestOnly, Complement, Reset, Set };
694 enum InterlockingKind : uint8_t {
703 InterlockingKind Interlocking;
706 static BitTest decodeBitTestBuiltin(unsigned BuiltinID);
710 BitTest BitTest::decodeBitTestBuiltin(unsigned BuiltinID) {
712 // Main portable variants.
713 case Builtin::BI_bittest:
714 return {TestOnly, Unlocked, false};
715 case Builtin::BI_bittestandcomplement:
716 return {Complement, Unlocked, false};
717 case Builtin::BI_bittestandreset:
718 return {Reset, Unlocked, false};
719 case Builtin::BI_bittestandset:
720 return {Set, Unlocked, false};
721 case Builtin::BI_interlockedbittestandreset:
722 return {Reset, Sequential, false};
723 case Builtin::BI_interlockedbittestandset:
724 return {Set, Sequential, false};
726 // X86-specific 64-bit variants.
727 case Builtin::BI_bittest64:
728 return {TestOnly, Unlocked, true};
729 case Builtin::BI_bittestandcomplement64:
730 return {Complement, Unlocked, true};
731 case Builtin::BI_bittestandreset64:
732 return {Reset, Unlocked, true};
733 case Builtin::BI_bittestandset64:
734 return {Set, Unlocked, true};
735 case Builtin::BI_interlockedbittestandreset64:
736 return {Reset, Sequential, true};
737 case Builtin::BI_interlockedbittestandset64:
738 return {Set, Sequential, true};
740 // ARM/AArch64-specific ordering variants.
741 case Builtin::BI_interlockedbittestandset_acq:
742 return {Set, Acquire, false};
743 case Builtin::BI_interlockedbittestandset_rel:
744 return {Set, Release, false};
745 case Builtin::BI_interlockedbittestandset_nf:
746 return {Set, NoFence, false};
747 case Builtin::BI_interlockedbittestandreset_acq:
748 return {Reset, Acquire, false};
749 case Builtin::BI_interlockedbittestandreset_rel:
750 return {Reset, Release, false};
751 case Builtin::BI_interlockedbittestandreset_nf:
752 return {Reset, NoFence, false};
754 llvm_unreachable("expected only bittest intrinsics");
757 static char bitActionToX86BTCode(BitTest::ActionKind A) {
759 case BitTest::TestOnly: return '\0';
760 case BitTest::Complement: return 'c';
761 case BitTest::Reset: return 'r';
762 case BitTest::Set: return 's';
764 llvm_unreachable("invalid action");
767 static llvm::Value *EmitX86BitTestIntrinsic(CodeGenFunction &CGF,
769 const CallExpr *E, Value *BitBase,
771 char Action = bitActionToX86BTCode(BT.Action);
772 char SizeSuffix = BT.Is64Bit ? 'q' : 'l';
774 // Build the assembly.
776 raw_svector_ostream AsmOS(Asm);
777 if (BT.Interlocking != BitTest::Unlocked)
782 AsmOS << SizeSuffix << " $2, ($1)\n\tsetc ${0:b}";
784 // Build the constraints. FIXME: We should support immediates when possible.
785 std::string Constraints = "=r,r,r,~{cc},~{flags},~{fpsr}";
786 llvm::IntegerType *IntType = llvm::IntegerType::get(
787 CGF.getLLVMContext(),
788 CGF.getContext().getTypeSize(E->getArg(1)->getType()));
789 llvm::Type *IntPtrType = IntType->getPointerTo();
790 llvm::FunctionType *FTy =
791 llvm::FunctionType::get(CGF.Int8Ty, {IntPtrType, IntType}, false);
793 llvm::InlineAsm *IA =
794 llvm::InlineAsm::get(FTy, Asm, Constraints, /*hasSideEffects=*/true);
795 return CGF.Builder.CreateCall(IA, {BitBase, BitPos});
798 static llvm::AtomicOrdering
799 getBitTestAtomicOrdering(BitTest::InterlockingKind I) {
801 case BitTest::Unlocked: return llvm::AtomicOrdering::NotAtomic;
802 case BitTest::Sequential: return llvm::AtomicOrdering::SequentiallyConsistent;
803 case BitTest::Acquire: return llvm::AtomicOrdering::Acquire;
804 case BitTest::Release: return llvm::AtomicOrdering::Release;
805 case BitTest::NoFence: return llvm::AtomicOrdering::Monotonic;
807 llvm_unreachable("invalid interlocking");
810 /// Emit a _bittest* intrinsic. These intrinsics take a pointer to an array of
811 /// bits and a bit position and read and optionally modify the bit at that
812 /// position. The position index can be arbitrarily large, i.e. it can be larger
813 /// than 31 or 63, so we need an indexed load in the general case.
814 static llvm::Value *EmitBitTestIntrinsic(CodeGenFunction &CGF,
817 Value *BitBase = CGF.EmitScalarExpr(E->getArg(0));
818 Value *BitPos = CGF.EmitScalarExpr(E->getArg(1));
820 BitTest BT = BitTest::decodeBitTestBuiltin(BuiltinID);
822 // X86 has special BT, BTC, BTR, and BTS instructions that handle the array
823 // indexing operation internally. Use them if possible.
824 if (CGF.getTarget().getTriple().isX86())
825 return EmitX86BitTestIntrinsic(CGF, BT, E, BitBase, BitPos);
827 // Otherwise, use generic code to load one byte and test the bit. Use all but
828 // the bottom three bits as the array index, and the bottom three bits to form
830 // Bit = BitBaseI8[BitPos >> 3] & (1 << (BitPos & 0x7)) != 0;
831 Value *ByteIndex = CGF.Builder.CreateAShr(
832 BitPos, llvm::ConstantInt::get(BitPos->getType(), 3), "bittest.byteidx");
833 Value *BitBaseI8 = CGF.Builder.CreatePointerCast(BitBase, CGF.Int8PtrTy);
834 Address ByteAddr(CGF.Builder.CreateInBoundsGEP(CGF.Int8Ty, BitBaseI8,
835 ByteIndex, "bittest.byteaddr"),
838 CGF.Builder.CreateAnd(CGF.Builder.CreateTrunc(BitPos, CGF.Int8Ty),
839 llvm::ConstantInt::get(CGF.Int8Ty, 0x7));
841 // The updating instructions will need a mask.
842 Value *Mask = nullptr;
843 if (BT.Action != BitTest::TestOnly) {
844 Mask = CGF.Builder.CreateShl(llvm::ConstantInt::get(CGF.Int8Ty, 1), PosLow,
848 // Check the action and ordering of the interlocked intrinsics.
849 llvm::AtomicOrdering Ordering = getBitTestAtomicOrdering(BT.Interlocking);
851 Value *OldByte = nullptr;
852 if (Ordering != llvm::AtomicOrdering::NotAtomic) {
853 // Emit a combined atomicrmw load/store operation for the interlocked
855 llvm::AtomicRMWInst::BinOp RMWOp = llvm::AtomicRMWInst::Or;
856 if (BT.Action == BitTest::Reset) {
857 Mask = CGF.Builder.CreateNot(Mask);
858 RMWOp = llvm::AtomicRMWInst::And;
860 OldByte = CGF.Builder.CreateAtomicRMW(RMWOp, ByteAddr.getPointer(), Mask,
863 // Emit a plain load for the non-interlocked intrinsics.
864 OldByte = CGF.Builder.CreateLoad(ByteAddr, "bittest.byte");
865 Value *NewByte = nullptr;
867 case BitTest::TestOnly:
868 // Don't store anything.
870 case BitTest::Complement:
871 NewByte = CGF.Builder.CreateXor(OldByte, Mask);
874 NewByte = CGF.Builder.CreateAnd(OldByte, CGF.Builder.CreateNot(Mask));
877 NewByte = CGF.Builder.CreateOr(OldByte, Mask);
881 CGF.Builder.CreateStore(NewByte, ByteAddr);
884 // However we loaded the old byte, either by plain load or atomicrmw, shift
885 // the bit into the low position and mask it to 0 or 1.
886 Value *ShiftedByte = CGF.Builder.CreateLShr(OldByte, PosLow, "bittest.shr");
887 return CGF.Builder.CreateAnd(
888 ShiftedByte, llvm::ConstantInt::get(CGF.Int8Ty, 1), "bittest.res");
892 enum class MSVCSetJmpKind {
899 /// MSVC handles setjmp a bit differently on different platforms. On every
900 /// architecture except 32-bit x86, the frame address is passed. On x86, extra
901 /// parameters can be passed as variadic arguments, but we always pass none.
902 static RValue EmitMSVCRTSetJmp(CodeGenFunction &CGF, MSVCSetJmpKind SJKind,
904 llvm::Value *Arg1 = nullptr;
905 llvm::Type *Arg1Ty = nullptr;
907 bool IsVarArg = false;
908 if (SJKind == MSVCSetJmpKind::_setjmp3) {
910 Arg1Ty = CGF.Int32Ty;
911 Arg1 = llvm::ConstantInt::get(CGF.IntTy, 0);
914 Name = SJKind == MSVCSetJmpKind::_setjmp ? "_setjmp" : "_setjmpex";
915 Arg1Ty = CGF.Int8PtrTy;
916 if (CGF.getTarget().getTriple().getArch() == llvm::Triple::aarch64) {
917 Arg1 = CGF.Builder.CreateCall(
918 CGF.CGM.getIntrinsic(Intrinsic::sponentry, CGF.AllocaInt8PtrTy));
920 Arg1 = CGF.Builder.CreateCall(
921 CGF.CGM.getIntrinsic(Intrinsic::frameaddress, CGF.AllocaInt8PtrTy),
922 llvm::ConstantInt::get(CGF.Int32Ty, 0));
925 // Mark the call site and declaration with ReturnsTwice.
926 llvm::Type *ArgTypes[2] = {CGF.Int8PtrTy, Arg1Ty};
927 llvm::AttributeList ReturnsTwiceAttr = llvm::AttributeList::get(
928 CGF.getLLVMContext(), llvm::AttributeList::FunctionIndex,
929 llvm::Attribute::ReturnsTwice);
930 llvm::FunctionCallee SetJmpFn = CGF.CGM.CreateRuntimeFunction(
931 llvm::FunctionType::get(CGF.IntTy, ArgTypes, IsVarArg), Name,
932 ReturnsTwiceAttr, /*Local=*/true);
934 llvm::Value *Buf = CGF.Builder.CreateBitOrPointerCast(
935 CGF.EmitScalarExpr(E->getArg(0)), CGF.Int8PtrTy);
936 llvm::Value *Args[] = {Buf, Arg1};
937 llvm::CallBase *CB = CGF.EmitRuntimeCallOrInvoke(SetJmpFn, Args);
938 CB->setAttributes(ReturnsTwiceAttr);
939 return RValue::get(CB);
942 // Many of MSVC builtins are on x64, ARM and AArch64; to avoid repeating code,
943 // we handle them here.
944 enum class CodeGenFunction::MSVCIntrin {
948 _InterlockedDecrement,
949 _InterlockedExchange,
950 _InterlockedExchangeAdd,
951 _InterlockedExchangeSub,
952 _InterlockedIncrement,
955 _InterlockedExchangeAdd_acq,
956 _InterlockedExchangeAdd_rel,
957 _InterlockedExchangeAdd_nf,
958 _InterlockedExchange_acq,
959 _InterlockedExchange_rel,
960 _InterlockedExchange_nf,
961 _InterlockedCompareExchange_acq,
962 _InterlockedCompareExchange_rel,
963 _InterlockedCompareExchange_nf,
973 _InterlockedIncrement_acq,
974 _InterlockedIncrement_rel,
975 _InterlockedIncrement_nf,
976 _InterlockedDecrement_acq,
977 _InterlockedDecrement_rel,
978 _InterlockedDecrement_nf,
982 Value *CodeGenFunction::EmitMSVCBuiltinExpr(MSVCIntrin BuiltinID,
985 case MSVCIntrin::_BitScanForward:
986 case MSVCIntrin::_BitScanReverse: {
987 Value *ArgValue = EmitScalarExpr(E->getArg(1));
989 llvm::Type *ArgType = ArgValue->getType();
990 llvm::Type *IndexType =
991 EmitScalarExpr(E->getArg(0))->getType()->getPointerElementType();
992 llvm::Type *ResultType = ConvertType(E->getType());
994 Value *ArgZero = llvm::Constant::getNullValue(ArgType);
995 Value *ResZero = llvm::Constant::getNullValue(ResultType);
996 Value *ResOne = llvm::ConstantInt::get(ResultType, 1);
998 BasicBlock *Begin = Builder.GetInsertBlock();
999 BasicBlock *End = createBasicBlock("bitscan_end", this->CurFn);
1000 Builder.SetInsertPoint(End);
1001 PHINode *Result = Builder.CreatePHI(ResultType, 2, "bitscan_result");
1003 Builder.SetInsertPoint(Begin);
1004 Value *IsZero = Builder.CreateICmpEQ(ArgValue, ArgZero);
1005 BasicBlock *NotZero = createBasicBlock("bitscan_not_zero", this->CurFn);
1006 Builder.CreateCondBr(IsZero, End, NotZero);
1007 Result->addIncoming(ResZero, Begin);
1009 Builder.SetInsertPoint(NotZero);
1010 Address IndexAddress = EmitPointerWithAlignment(E->getArg(0));
1012 if (BuiltinID == MSVCIntrin::_BitScanForward) {
1013 Function *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType);
1014 Value *ZeroCount = Builder.CreateCall(F, {ArgValue, Builder.getTrue()});
1015 ZeroCount = Builder.CreateIntCast(ZeroCount, IndexType, false);
1016 Builder.CreateStore(ZeroCount, IndexAddress, false);
1018 unsigned ArgWidth = cast<llvm::IntegerType>(ArgType)->getBitWidth();
1019 Value *ArgTypeLastIndex = llvm::ConstantInt::get(IndexType, ArgWidth - 1);
1021 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType);
1022 Value *ZeroCount = Builder.CreateCall(F, {ArgValue, Builder.getTrue()});
1023 ZeroCount = Builder.CreateIntCast(ZeroCount, IndexType, false);
1024 Value *Index = Builder.CreateNSWSub(ArgTypeLastIndex, ZeroCount);
1025 Builder.CreateStore(Index, IndexAddress, false);
1027 Builder.CreateBr(End);
1028 Result->addIncoming(ResOne, NotZero);
1030 Builder.SetInsertPoint(End);
1033 case MSVCIntrin::_InterlockedAnd:
1034 return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E);
1035 case MSVCIntrin::_InterlockedExchange:
1036 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E);
1037 case MSVCIntrin::_InterlockedExchangeAdd:
1038 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add, E);
1039 case MSVCIntrin::_InterlockedExchangeSub:
1040 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Sub, E);
1041 case MSVCIntrin::_InterlockedOr:
1042 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E);
1043 case MSVCIntrin::_InterlockedXor:
1044 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor, E);
1045 case MSVCIntrin::_InterlockedExchangeAdd_acq:
1046 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add, E,
1047 AtomicOrdering::Acquire);
1048 case MSVCIntrin::_InterlockedExchangeAdd_rel:
1049 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add, E,
1050 AtomicOrdering::Release);
1051 case MSVCIntrin::_InterlockedExchangeAdd_nf:
1052 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add, E,
1053 AtomicOrdering::Monotonic);
1054 case MSVCIntrin::_InterlockedExchange_acq:
1055 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E,
1056 AtomicOrdering::Acquire);
1057 case MSVCIntrin::_InterlockedExchange_rel:
1058 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E,
1059 AtomicOrdering::Release);
1060 case MSVCIntrin::_InterlockedExchange_nf:
1061 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E,
1062 AtomicOrdering::Monotonic);
1063 case MSVCIntrin::_InterlockedCompareExchange_acq:
1064 return EmitAtomicCmpXchgForMSIntrin(*this, E, AtomicOrdering::Acquire);
1065 case MSVCIntrin::_InterlockedCompareExchange_rel:
1066 return EmitAtomicCmpXchgForMSIntrin(*this, E, AtomicOrdering::Release);
1067 case MSVCIntrin::_InterlockedCompareExchange_nf:
1068 return EmitAtomicCmpXchgForMSIntrin(*this, E, AtomicOrdering::Monotonic);
1069 case MSVCIntrin::_InterlockedOr_acq:
1070 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E,
1071 AtomicOrdering::Acquire);
1072 case MSVCIntrin::_InterlockedOr_rel:
1073 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E,
1074 AtomicOrdering::Release);
1075 case MSVCIntrin::_InterlockedOr_nf:
1076 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E,
1077 AtomicOrdering::Monotonic);
1078 case MSVCIntrin::_InterlockedXor_acq:
1079 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor, E,
1080 AtomicOrdering::Acquire);
1081 case MSVCIntrin::_InterlockedXor_rel:
1082 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor, E,
1083 AtomicOrdering::Release);
1084 case MSVCIntrin::_InterlockedXor_nf:
1085 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor, E,
1086 AtomicOrdering::Monotonic);
1087 case MSVCIntrin::_InterlockedAnd_acq:
1088 return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E,
1089 AtomicOrdering::Acquire);
1090 case MSVCIntrin::_InterlockedAnd_rel:
1091 return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E,
1092 AtomicOrdering::Release);
1093 case MSVCIntrin::_InterlockedAnd_nf:
1094 return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E,
1095 AtomicOrdering::Monotonic);
1096 case MSVCIntrin::_InterlockedIncrement_acq:
1097 return EmitAtomicIncrementValue(*this, E, AtomicOrdering::Acquire);
1098 case MSVCIntrin::_InterlockedIncrement_rel:
1099 return EmitAtomicIncrementValue(*this, E, AtomicOrdering::Release);
1100 case MSVCIntrin::_InterlockedIncrement_nf:
1101 return EmitAtomicIncrementValue(*this, E, AtomicOrdering::Monotonic);
1102 case MSVCIntrin::_InterlockedDecrement_acq:
1103 return EmitAtomicDecrementValue(*this, E, AtomicOrdering::Acquire);
1104 case MSVCIntrin::_InterlockedDecrement_rel:
1105 return EmitAtomicDecrementValue(*this, E, AtomicOrdering::Release);
1106 case MSVCIntrin::_InterlockedDecrement_nf:
1107 return EmitAtomicDecrementValue(*this, E, AtomicOrdering::Monotonic);
1109 case MSVCIntrin::_InterlockedDecrement:
1110 return EmitAtomicDecrementValue(*this, E);
1111 case MSVCIntrin::_InterlockedIncrement:
1112 return EmitAtomicIncrementValue(*this, E);
1114 case MSVCIntrin::__fastfail: {
1115 // Request immediate process termination from the kernel. The instruction
1116 // sequences to do this are documented on MSDN:
1117 // https://msdn.microsoft.com/en-us/library/dn774154.aspx
1118 llvm::Triple::ArchType ISA = getTarget().getTriple().getArch();
1119 StringRef Asm, Constraints;
1122 ErrorUnsupported(E, "__fastfail call for this architecture");
1124 case llvm::Triple::x86:
1125 case llvm::Triple::x86_64:
1127 Constraints = "{cx}";
1129 case llvm::Triple::thumb:
1131 Constraints = "{r0}";
1133 case llvm::Triple::aarch64:
1134 Asm = "brk #0xF003";
1135 Constraints = "{w0}";
1137 llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, {Int32Ty}, false);
1138 llvm::InlineAsm *IA =
1139 llvm::InlineAsm::get(FTy, Asm, Constraints, /*hasSideEffects=*/true);
1140 llvm::AttributeList NoReturnAttr = llvm::AttributeList::get(
1141 getLLVMContext(), llvm::AttributeList::FunctionIndex,
1142 llvm::Attribute::NoReturn);
1143 llvm::CallInst *CI = Builder.CreateCall(IA, EmitScalarExpr(E->getArg(0)));
1144 CI->setAttributes(NoReturnAttr);
1148 llvm_unreachable("Incorrect MSVC intrinsic!");
1152 // ARC cleanup for __builtin_os_log_format
1153 struct CallObjCArcUse final : EHScopeStack::Cleanup {
1154 CallObjCArcUse(llvm::Value *object) : object(object) {}
1155 llvm::Value *object;
1157 void Emit(CodeGenFunction &CGF, Flags flags) override {
1158 CGF.EmitARCIntrinsicUse(object);
1163 Value *CodeGenFunction::EmitCheckedArgForBuiltin(const Expr *E,
1164 BuiltinCheckKind Kind) {
1165 assert((Kind == BCK_CLZPassedZero || Kind == BCK_CTZPassedZero)
1166 && "Unsupported builtin check kind");
1168 Value *ArgValue = EmitScalarExpr(E);
1169 if (!SanOpts.has(SanitizerKind::Builtin) || !getTarget().isCLZForZeroUndef())
1172 SanitizerScope SanScope(this);
1173 Value *Cond = Builder.CreateICmpNE(
1174 ArgValue, llvm::Constant::getNullValue(ArgValue->getType()));
1175 EmitCheck(std::make_pair(Cond, SanitizerKind::Builtin),
1176 SanitizerHandler::InvalidBuiltin,
1177 {EmitCheckSourceLocation(E->getExprLoc()),
1178 llvm::ConstantInt::get(Builder.getInt8Ty(), Kind)},
1183 /// Get the argument type for arguments to os_log_helper.
1184 static CanQualType getOSLogArgType(ASTContext &C, int Size) {
1185 QualType UnsignedTy = C.getIntTypeForBitwidth(Size * 8, /*Signed=*/false);
1186 return C.getCanonicalType(UnsignedTy);
1189 llvm::Function *CodeGenFunction::generateBuiltinOSLogHelperFunction(
1190 const analyze_os_log::OSLogBufferLayout &Layout,
1191 CharUnits BufferAlignment) {
1192 ASTContext &Ctx = getContext();
1194 llvm::SmallString<64> Name;
1196 raw_svector_ostream OS(Name);
1197 OS << "__os_log_helper";
1198 OS << "_" << BufferAlignment.getQuantity();
1199 OS << "_" << int(Layout.getSummaryByte());
1200 OS << "_" << int(Layout.getNumArgsByte());
1201 for (const auto &Item : Layout.Items)
1202 OS << "_" << int(Item.getSizeByte()) << "_"
1203 << int(Item.getDescriptorByte());
1206 if (llvm::Function *F = CGM.getModule().getFunction(Name))
1209 llvm::SmallVector<QualType, 4> ArgTys;
1210 FunctionArgList Args;
1211 Args.push_back(ImplicitParamDecl::Create(
1212 Ctx, nullptr, SourceLocation(), &Ctx.Idents.get("buffer"), Ctx.VoidPtrTy,
1213 ImplicitParamDecl::Other));
1214 ArgTys.emplace_back(Ctx.VoidPtrTy);
1216 for (unsigned int I = 0, E = Layout.Items.size(); I < E; ++I) {
1217 char Size = Layout.Items[I].getSizeByte();
1221 QualType ArgTy = getOSLogArgType(Ctx, Size);
1222 Args.push_back(ImplicitParamDecl::Create(
1223 Ctx, nullptr, SourceLocation(),
1224 &Ctx.Idents.get(std::string("arg") + llvm::to_string(I)), ArgTy,
1225 ImplicitParamDecl::Other));
1226 ArgTys.emplace_back(ArgTy);
1229 QualType ReturnTy = Ctx.VoidTy;
1230 QualType FuncionTy = Ctx.getFunctionType(ReturnTy, ArgTys, {});
1232 // The helper function has linkonce_odr linkage to enable the linker to merge
1233 // identical functions. To ensure the merging always happens, 'noinline' is
1234 // attached to the function when compiling with -Oz.
1235 const CGFunctionInfo &FI =
1236 CGM.getTypes().arrangeBuiltinFunctionDeclaration(ReturnTy, Args);
1237 llvm::FunctionType *FuncTy = CGM.getTypes().GetFunctionType(FI);
1238 llvm::Function *Fn = llvm::Function::Create(
1239 FuncTy, llvm::GlobalValue::LinkOnceODRLinkage, Name, &CGM.getModule());
1240 Fn->setVisibility(llvm::GlobalValue::HiddenVisibility);
1241 CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI, Fn);
1242 CGM.SetLLVMFunctionAttributesForDefinition(nullptr, Fn);
1243 Fn->setDoesNotThrow();
1245 // Attach 'noinline' at -Oz.
1246 if (CGM.getCodeGenOpts().OptimizeSize == 2)
1247 Fn->addFnAttr(llvm::Attribute::NoInline);
1249 auto NL = ApplyDebugLocation::CreateEmpty(*this);
1250 IdentifierInfo *II = &Ctx.Idents.get(Name);
1251 FunctionDecl *FD = FunctionDecl::Create(
1252 Ctx, Ctx.getTranslationUnitDecl(), SourceLocation(), SourceLocation(), II,
1253 FuncionTy, nullptr, SC_PrivateExtern, false, false);
1255 StartFunction(FD, ReturnTy, Fn, FI, Args);
1257 // Create a scope with an artificial location for the body of this function.
1258 auto AL = ApplyDebugLocation::CreateArtificial(*this);
1261 Address BufAddr(Builder.CreateLoad(GetAddrOfLocalVar(Args[0]), "buf"),
1263 Builder.CreateStore(Builder.getInt8(Layout.getSummaryByte()),
1264 Builder.CreateConstByteGEP(BufAddr, Offset++, "summary"));
1265 Builder.CreateStore(Builder.getInt8(Layout.getNumArgsByte()),
1266 Builder.CreateConstByteGEP(BufAddr, Offset++, "numArgs"));
1269 for (const auto &Item : Layout.Items) {
1270 Builder.CreateStore(
1271 Builder.getInt8(Item.getDescriptorByte()),
1272 Builder.CreateConstByteGEP(BufAddr, Offset++, "argDescriptor"));
1273 Builder.CreateStore(
1274 Builder.getInt8(Item.getSizeByte()),
1275 Builder.CreateConstByteGEP(BufAddr, Offset++, "argSize"));
1277 CharUnits Size = Item.size();
1278 if (!Size.getQuantity())
1281 Address Arg = GetAddrOfLocalVar(Args[I]);
1282 Address Addr = Builder.CreateConstByteGEP(BufAddr, Offset, "argData");
1283 Addr = Builder.CreateBitCast(Addr, Arg.getPointer()->getType(),
1285 Builder.CreateStore(Builder.CreateLoad(Arg), Addr);
1295 RValue CodeGenFunction::emitBuiltinOSLogFormat(const CallExpr &E) {
1296 assert(E.getNumArgs() >= 2 &&
1297 "__builtin_os_log_format takes at least 2 arguments");
1298 ASTContext &Ctx = getContext();
1299 analyze_os_log::OSLogBufferLayout Layout;
1300 analyze_os_log::computeOSLogBufferLayout(Ctx, &E, Layout);
1301 Address BufAddr = EmitPointerWithAlignment(E.getArg(0));
1302 llvm::SmallVector<llvm::Value *, 4> RetainableOperands;
1304 // Ignore argument 1, the format string. It is not currently used.
1306 Args.add(RValue::get(BufAddr.getPointer()), Ctx.VoidPtrTy);
1308 for (const auto &Item : Layout.Items) {
1309 int Size = Item.getSizeByte();
1313 llvm::Value *ArgVal;
1315 if (Item.getKind() == analyze_os_log::OSLogBufferItem::MaskKind) {
1317 for (unsigned I = 0, E = Item.getMaskType().size(); I < E; ++I)
1318 Val |= ((uint64_t)Item.getMaskType()[I]) << I * 8;
1319 ArgVal = llvm::Constant::getIntegerValue(Int64Ty, llvm::APInt(64, Val));
1320 } else if (const Expr *TheExpr = Item.getExpr()) {
1321 ArgVal = EmitScalarExpr(TheExpr, /*Ignore*/ false);
1323 // Check if this is a retainable type.
1324 if (TheExpr->getType()->isObjCRetainableType()) {
1325 assert(getEvaluationKind(TheExpr->getType()) == TEK_Scalar &&
1326 "Only scalar can be a ObjC retainable type");
1327 // Check if the object is constant, if not, save it in
1328 // RetainableOperands.
1329 if (!isa<Constant>(ArgVal))
1330 RetainableOperands.push_back(ArgVal);
1333 ArgVal = Builder.getInt32(Item.getConstValue().getQuantity());
1336 unsigned ArgValSize =
1337 CGM.getDataLayout().getTypeSizeInBits(ArgVal->getType());
1338 llvm::IntegerType *IntTy = llvm::Type::getIntNTy(getLLVMContext(),
1340 ArgVal = Builder.CreateBitOrPointerCast(ArgVal, IntTy);
1341 CanQualType ArgTy = getOSLogArgType(Ctx, Size);
1342 // If ArgVal has type x86_fp80, zero-extend ArgVal.
1343 ArgVal = Builder.CreateZExtOrBitCast(ArgVal, ConvertType(ArgTy));
1344 Args.add(RValue::get(ArgVal), ArgTy);
1347 const CGFunctionInfo &FI =
1348 CGM.getTypes().arrangeBuiltinFunctionCall(Ctx.VoidTy, Args);
1349 llvm::Function *F = CodeGenFunction(CGM).generateBuiltinOSLogHelperFunction(
1350 Layout, BufAddr.getAlignment());
1351 EmitCall(FI, CGCallee::forDirect(F), ReturnValueSlot(), Args);
1353 // Push a clang.arc.use cleanup for each object in RetainableOperands. The
1354 // cleanup will cause the use to appear after the final log call, keeping
1355 // the object valid while it’s held in the log buffer. Note that if there’s
1356 // a release cleanup on the object, it will already be active; since
1357 // cleanups are emitted in reverse order, the use will occur before the
1358 // object is released.
1359 if (!RetainableOperands.empty() && getLangOpts().ObjCAutoRefCount &&
1360 CGM.getCodeGenOpts().OptimizationLevel != 0)
1361 for (llvm::Value *Object : RetainableOperands)
1362 pushFullExprCleanup<CallObjCArcUse>(getARCCleanupKind(), Object);
1364 return RValue::get(BufAddr.getPointer());
1367 /// Determine if a binop is a checked mixed-sign multiply we can specialize.
1368 static bool isSpecialMixedSignMultiply(unsigned BuiltinID,
1369 WidthAndSignedness Op1Info,
1370 WidthAndSignedness Op2Info,
1371 WidthAndSignedness ResultInfo) {
1372 return BuiltinID == Builtin::BI__builtin_mul_overflow &&
1373 std::max(Op1Info.Width, Op2Info.Width) >= ResultInfo.Width &&
1374 Op1Info.Signed != Op2Info.Signed;
1377 /// Emit a checked mixed-sign multiply. This is a cheaper specialization of
1378 /// the generic checked-binop irgen.
1380 EmitCheckedMixedSignMultiply(CodeGenFunction &CGF, const clang::Expr *Op1,
1381 WidthAndSignedness Op1Info, const clang::Expr *Op2,
1382 WidthAndSignedness Op2Info,
1383 const clang::Expr *ResultArg, QualType ResultQTy,
1384 WidthAndSignedness ResultInfo) {
1385 assert(isSpecialMixedSignMultiply(Builtin::BI__builtin_mul_overflow, Op1Info,
1386 Op2Info, ResultInfo) &&
1387 "Not a mixed-sign multipliction we can specialize");
1389 // Emit the signed and unsigned operands.
1390 const clang::Expr *SignedOp = Op1Info.Signed ? Op1 : Op2;
1391 const clang::Expr *UnsignedOp = Op1Info.Signed ? Op2 : Op1;
1392 llvm::Value *Signed = CGF.EmitScalarExpr(SignedOp);
1393 llvm::Value *Unsigned = CGF.EmitScalarExpr(UnsignedOp);
1394 unsigned SignedOpWidth = Op1Info.Signed ? Op1Info.Width : Op2Info.Width;
1395 unsigned UnsignedOpWidth = Op1Info.Signed ? Op2Info.Width : Op1Info.Width;
1397 // One of the operands may be smaller than the other. If so, [s|z]ext it.
1398 if (SignedOpWidth < UnsignedOpWidth)
1399 Signed = CGF.Builder.CreateSExt(Signed, Unsigned->getType(), "op.sext");
1400 if (UnsignedOpWidth < SignedOpWidth)
1401 Unsigned = CGF.Builder.CreateZExt(Unsigned, Signed->getType(), "op.zext");
1403 llvm::Type *OpTy = Signed->getType();
1404 llvm::Value *Zero = llvm::Constant::getNullValue(OpTy);
1405 Address ResultPtr = CGF.EmitPointerWithAlignment(ResultArg);
1406 llvm::Type *ResTy = ResultPtr.getElementType();
1407 unsigned OpWidth = std::max(Op1Info.Width, Op2Info.Width);
1409 // Take the absolute value of the signed operand.
1410 llvm::Value *IsNegative = CGF.Builder.CreateICmpSLT(Signed, Zero);
1411 llvm::Value *AbsOfNegative = CGF.Builder.CreateSub(Zero, Signed);
1412 llvm::Value *AbsSigned =
1413 CGF.Builder.CreateSelect(IsNegative, AbsOfNegative, Signed);
1415 // Perform a checked unsigned multiplication.
1416 llvm::Value *UnsignedOverflow;
1417 llvm::Value *UnsignedResult =
1418 EmitOverflowIntrinsic(CGF, llvm::Intrinsic::umul_with_overflow, AbsSigned,
1419 Unsigned, UnsignedOverflow);
1421 llvm::Value *Overflow, *Result;
1422 if (ResultInfo.Signed) {
1423 // Signed overflow occurs if the result is greater than INT_MAX or lesser
1424 // than INT_MIN, i.e when |Result| > (INT_MAX + IsNegative).
1426 llvm::APInt::getSignedMaxValue(ResultInfo.Width).zextOrSelf(OpWidth);
1427 llvm::Value *MaxResult =
1428 CGF.Builder.CreateAdd(llvm::ConstantInt::get(OpTy, IntMax),
1429 CGF.Builder.CreateZExt(IsNegative, OpTy));
1430 llvm::Value *SignedOverflow =
1431 CGF.Builder.CreateICmpUGT(UnsignedResult, MaxResult);
1432 Overflow = CGF.Builder.CreateOr(UnsignedOverflow, SignedOverflow);
1434 // Prepare the signed result (possibly by negating it).
1435 llvm::Value *NegativeResult = CGF.Builder.CreateNeg(UnsignedResult);
1436 llvm::Value *SignedResult =
1437 CGF.Builder.CreateSelect(IsNegative, NegativeResult, UnsignedResult);
1438 Result = CGF.Builder.CreateTrunc(SignedResult, ResTy);
1440 // Unsigned overflow occurs if the result is < 0 or greater than UINT_MAX.
1441 llvm::Value *Underflow = CGF.Builder.CreateAnd(
1442 IsNegative, CGF.Builder.CreateIsNotNull(UnsignedResult));
1443 Overflow = CGF.Builder.CreateOr(UnsignedOverflow, Underflow);
1444 if (ResultInfo.Width < OpWidth) {
1446 llvm::APInt::getMaxValue(ResultInfo.Width).zext(OpWidth);
1447 llvm::Value *TruncOverflow = CGF.Builder.CreateICmpUGT(
1448 UnsignedResult, llvm::ConstantInt::get(OpTy, IntMax));
1449 Overflow = CGF.Builder.CreateOr(Overflow, TruncOverflow);
1452 // Negate the product if it would be negative in infinite precision.
1453 Result = CGF.Builder.CreateSelect(
1454 IsNegative, CGF.Builder.CreateNeg(UnsignedResult), UnsignedResult);
1456 Result = CGF.Builder.CreateTrunc(Result, ResTy);
1458 assert(Overflow && Result && "Missing overflow or result");
1461 ResultArg->getType()->getPointeeType().isVolatileQualified();
1462 CGF.Builder.CreateStore(CGF.EmitToMemory(Result, ResultQTy), ResultPtr,
1464 return RValue::get(Overflow);
1467 static llvm::Value *dumpRecord(CodeGenFunction &CGF, QualType RType,
1468 Value *&RecordPtr, CharUnits Align,
1469 llvm::FunctionCallee Func, int Lvl) {
1470 ASTContext &Context = CGF.getContext();
1471 RecordDecl *RD = RType->castAs<RecordType>()->getDecl()->getDefinition();
1472 std::string Pad = std::string(Lvl * 4, ' ');
1475 CGF.Builder.CreateGlobalStringPtr(RType.getAsString() + " {\n");
1476 Value *Res = CGF.Builder.CreateCall(Func, {GString});
1478 static llvm::DenseMap<QualType, const char *> Types;
1479 if (Types.empty()) {
1480 Types[Context.CharTy] = "%c";
1481 Types[Context.BoolTy] = "%d";
1482 Types[Context.SignedCharTy] = "%hhd";
1483 Types[Context.UnsignedCharTy] = "%hhu";
1484 Types[Context.IntTy] = "%d";
1485 Types[Context.UnsignedIntTy] = "%u";
1486 Types[Context.LongTy] = "%ld";
1487 Types[Context.UnsignedLongTy] = "%lu";
1488 Types[Context.LongLongTy] = "%lld";
1489 Types[Context.UnsignedLongLongTy] = "%llu";
1490 Types[Context.ShortTy] = "%hd";
1491 Types[Context.UnsignedShortTy] = "%hu";
1492 Types[Context.VoidPtrTy] = "%p";
1493 Types[Context.FloatTy] = "%f";
1494 Types[Context.DoubleTy] = "%f";
1495 Types[Context.LongDoubleTy] = "%Lf";
1496 Types[Context.getPointerType(Context.CharTy)] = "%s";
1497 Types[Context.getPointerType(Context.getConstType(Context.CharTy))] = "%s";
1500 for (const auto *FD : RD->fields()) {
1501 Value *FieldPtr = RecordPtr;
1503 FieldPtr = CGF.Builder.CreatePointerCast(
1504 FieldPtr, CGF.ConvertType(Context.getPointerType(FD->getType())));
1506 FieldPtr = CGF.Builder.CreateStructGEP(CGF.ConvertType(RType), FieldPtr,
1507 FD->getFieldIndex());
1509 GString = CGF.Builder.CreateGlobalStringPtr(
1511 .concat(FD->getType().getAsString())
1512 .concat(llvm::Twine(' '))
1513 .concat(FD->getNameAsString())
1516 Value *TmpRes = CGF.Builder.CreateCall(Func, {GString});
1517 Res = CGF.Builder.CreateAdd(Res, TmpRes);
1519 QualType CanonicalType =
1520 FD->getType().getUnqualifiedType().getCanonicalType();
1522 // We check whether we are in a recursive type
1523 if (CanonicalType->isRecordType()) {
1525 dumpRecord(CGF, CanonicalType, FieldPtr, Align, Func, Lvl + 1);
1526 Res = CGF.Builder.CreateAdd(TmpRes, Res);
1530 // We try to determine the best format to print the current field
1531 llvm::Twine Format = Types.find(CanonicalType) == Types.end()
1532 ? Types[Context.VoidPtrTy]
1533 : Types[CanonicalType];
1535 Address FieldAddress = Address(FieldPtr, Align);
1536 FieldPtr = CGF.Builder.CreateLoad(FieldAddress);
1538 // FIXME Need to handle bitfield here
1539 GString = CGF.Builder.CreateGlobalStringPtr(
1540 Format.concat(llvm::Twine('\n')).str());
1541 TmpRes = CGF.Builder.CreateCall(Func, {GString, FieldPtr});
1542 Res = CGF.Builder.CreateAdd(Res, TmpRes);
1545 GString = CGF.Builder.CreateGlobalStringPtr(Pad + "}\n");
1546 Value *TmpRes = CGF.Builder.CreateCall(Func, {GString});
1547 Res = CGF.Builder.CreateAdd(Res, TmpRes);
1552 TypeRequiresBuiltinLaunderImp(const ASTContext &Ctx, QualType Ty,
1553 llvm::SmallPtrSetImpl<const Decl *> &Seen) {
1554 if (const auto *Arr = Ctx.getAsArrayType(Ty))
1555 Ty = Ctx.getBaseElementType(Arr);
1557 const auto *Record = Ty->getAsCXXRecordDecl();
1561 // We've already checked this type, or are in the process of checking it.
1562 if (!Seen.insert(Record).second)
1565 assert(Record->hasDefinition() &&
1566 "Incomplete types should already be diagnosed");
1568 if (Record->isDynamicClass())
1571 for (FieldDecl *F : Record->fields()) {
1572 if (TypeRequiresBuiltinLaunderImp(Ctx, F->getType(), Seen))
1578 /// Determine if the specified type requires laundering by checking if it is a
1579 /// dynamic class type or contains a subobject which is a dynamic class type.
1580 static bool TypeRequiresBuiltinLaunder(CodeGenModule &CGM, QualType Ty) {
1581 if (!CGM.getCodeGenOpts().StrictVTablePointers)
1583 llvm::SmallPtrSet<const Decl *, 16> Seen;
1584 return TypeRequiresBuiltinLaunderImp(CGM.getContext(), Ty, Seen);
1587 RValue CodeGenFunction::emitRotate(const CallExpr *E, bool IsRotateRight) {
1588 llvm::Value *Src = EmitScalarExpr(E->getArg(0));
1589 llvm::Value *ShiftAmt = EmitScalarExpr(E->getArg(1));
1591 // The builtin's shift arg may have a different type than the source arg and
1592 // result, but the LLVM intrinsic uses the same type for all values.
1593 llvm::Type *Ty = Src->getType();
1594 ShiftAmt = Builder.CreateIntCast(ShiftAmt, Ty, false);
1596 // Rotate is a special case of LLVM funnel shift - 1st 2 args are the same.
1597 unsigned IID = IsRotateRight ? Intrinsic::fshr : Intrinsic::fshl;
1598 Function *F = CGM.getIntrinsic(IID, Ty);
1599 return RValue::get(Builder.CreateCall(F, { Src, Src, ShiftAmt }));
1602 RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
1604 ReturnValueSlot ReturnValue) {
1605 const FunctionDecl *FD = GD.getDecl()->getAsFunction();
1606 // See if we can constant fold this builtin. If so, don't emit it at all.
1607 Expr::EvalResult Result;
1608 if (E->EvaluateAsRValue(Result, CGM.getContext()) &&
1609 !Result.hasSideEffects()) {
1610 if (Result.Val.isInt())
1611 return RValue::get(llvm::ConstantInt::get(getLLVMContext(),
1612 Result.Val.getInt()));
1613 if (Result.Val.isFloat())
1614 return RValue::get(llvm::ConstantFP::get(getLLVMContext(),
1615 Result.Val.getFloat()));
1618 // There are LLVM math intrinsics/instructions corresponding to math library
1619 // functions except the LLVM op will never set errno while the math library
1620 // might. Also, math builtins have the same semantics as their math library
1621 // twins. Thus, we can transform math library and builtin calls to their
1622 // LLVM counterparts if the call is marked 'const' (known to never set errno).
1623 if (FD->hasAttr<ConstAttr>()) {
1624 switch (BuiltinID) {
1625 case Builtin::BIceil:
1626 case Builtin::BIceilf:
1627 case Builtin::BIceill:
1628 case Builtin::BI__builtin_ceil:
1629 case Builtin::BI__builtin_ceilf:
1630 case Builtin::BI__builtin_ceilf16:
1631 case Builtin::BI__builtin_ceill:
1632 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
1634 Intrinsic::experimental_constrained_ceil));
1636 case Builtin::BIcopysign:
1637 case Builtin::BIcopysignf:
1638 case Builtin::BIcopysignl:
1639 case Builtin::BI__builtin_copysign:
1640 case Builtin::BI__builtin_copysignf:
1641 case Builtin::BI__builtin_copysignf16:
1642 case Builtin::BI__builtin_copysignl:
1643 case Builtin::BI__builtin_copysignf128:
1644 return RValue::get(emitBinaryBuiltin(*this, E, Intrinsic::copysign));
1646 case Builtin::BIcos:
1647 case Builtin::BIcosf:
1648 case Builtin::BIcosl:
1649 case Builtin::BI__builtin_cos:
1650 case Builtin::BI__builtin_cosf:
1651 case Builtin::BI__builtin_cosf16:
1652 case Builtin::BI__builtin_cosl:
1653 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
1655 Intrinsic::experimental_constrained_cos));
1657 case Builtin::BIexp:
1658 case Builtin::BIexpf:
1659 case Builtin::BIexpl:
1660 case Builtin::BI__builtin_exp:
1661 case Builtin::BI__builtin_expf:
1662 case Builtin::BI__builtin_expf16:
1663 case Builtin::BI__builtin_expl:
1664 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
1666 Intrinsic::experimental_constrained_exp));
1668 case Builtin::BIexp2:
1669 case Builtin::BIexp2f:
1670 case Builtin::BIexp2l:
1671 case Builtin::BI__builtin_exp2:
1672 case Builtin::BI__builtin_exp2f:
1673 case Builtin::BI__builtin_exp2f16:
1674 case Builtin::BI__builtin_exp2l:
1675 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
1677 Intrinsic::experimental_constrained_exp2));
1679 case Builtin::BIfabs:
1680 case Builtin::BIfabsf:
1681 case Builtin::BIfabsl:
1682 case Builtin::BI__builtin_fabs:
1683 case Builtin::BI__builtin_fabsf:
1684 case Builtin::BI__builtin_fabsf16:
1685 case Builtin::BI__builtin_fabsl:
1686 case Builtin::BI__builtin_fabsf128:
1687 return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::fabs));
1689 case Builtin::BIfloor:
1690 case Builtin::BIfloorf:
1691 case Builtin::BIfloorl:
1692 case Builtin::BI__builtin_floor:
1693 case Builtin::BI__builtin_floorf:
1694 case Builtin::BI__builtin_floorf16:
1695 case Builtin::BI__builtin_floorl:
1696 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
1698 Intrinsic::experimental_constrained_floor));
1700 case Builtin::BIfma:
1701 case Builtin::BIfmaf:
1702 case Builtin::BIfmal:
1703 case Builtin::BI__builtin_fma:
1704 case Builtin::BI__builtin_fmaf:
1705 case Builtin::BI__builtin_fmaf16:
1706 case Builtin::BI__builtin_fmal:
1707 return RValue::get(emitTernaryMaybeConstrainedFPBuiltin(*this, E,
1709 Intrinsic::experimental_constrained_fma));
1711 case Builtin::BIfmax:
1712 case Builtin::BIfmaxf:
1713 case Builtin::BIfmaxl:
1714 case Builtin::BI__builtin_fmax:
1715 case Builtin::BI__builtin_fmaxf:
1716 case Builtin::BI__builtin_fmaxf16:
1717 case Builtin::BI__builtin_fmaxl:
1718 return RValue::get(emitBinaryMaybeConstrainedFPBuiltin(*this, E,
1720 Intrinsic::experimental_constrained_maxnum));
1722 case Builtin::BIfmin:
1723 case Builtin::BIfminf:
1724 case Builtin::BIfminl:
1725 case Builtin::BI__builtin_fmin:
1726 case Builtin::BI__builtin_fminf:
1727 case Builtin::BI__builtin_fminf16:
1728 case Builtin::BI__builtin_fminl:
1729 return RValue::get(emitBinaryMaybeConstrainedFPBuiltin(*this, E,
1731 Intrinsic::experimental_constrained_minnum));
1733 // fmod() is a special-case. It maps to the frem instruction rather than an
1735 case Builtin::BIfmod:
1736 case Builtin::BIfmodf:
1737 case Builtin::BIfmodl:
1738 case Builtin::BI__builtin_fmod:
1739 case Builtin::BI__builtin_fmodf:
1740 case Builtin::BI__builtin_fmodf16:
1741 case Builtin::BI__builtin_fmodl: {
1742 Value *Arg1 = EmitScalarExpr(E->getArg(0));
1743 Value *Arg2 = EmitScalarExpr(E->getArg(1));
1744 return RValue::get(Builder.CreateFRem(Arg1, Arg2, "fmod"));
1747 case Builtin::BIlog:
1748 case Builtin::BIlogf:
1749 case Builtin::BIlogl:
1750 case Builtin::BI__builtin_log:
1751 case Builtin::BI__builtin_logf:
1752 case Builtin::BI__builtin_logf16:
1753 case Builtin::BI__builtin_logl:
1754 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
1756 Intrinsic::experimental_constrained_log));
1758 case Builtin::BIlog10:
1759 case Builtin::BIlog10f:
1760 case Builtin::BIlog10l:
1761 case Builtin::BI__builtin_log10:
1762 case Builtin::BI__builtin_log10f:
1763 case Builtin::BI__builtin_log10f16:
1764 case Builtin::BI__builtin_log10l:
1765 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
1767 Intrinsic::experimental_constrained_log10));
1769 case Builtin::BIlog2:
1770 case Builtin::BIlog2f:
1771 case Builtin::BIlog2l:
1772 case Builtin::BI__builtin_log2:
1773 case Builtin::BI__builtin_log2f:
1774 case Builtin::BI__builtin_log2f16:
1775 case Builtin::BI__builtin_log2l:
1776 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
1778 Intrinsic::experimental_constrained_log2));
1780 case Builtin::BInearbyint:
1781 case Builtin::BInearbyintf:
1782 case Builtin::BInearbyintl:
1783 case Builtin::BI__builtin_nearbyint:
1784 case Builtin::BI__builtin_nearbyintf:
1785 case Builtin::BI__builtin_nearbyintl:
1786 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
1787 Intrinsic::nearbyint,
1788 Intrinsic::experimental_constrained_nearbyint));
1790 case Builtin::BIpow:
1791 case Builtin::BIpowf:
1792 case Builtin::BIpowl:
1793 case Builtin::BI__builtin_pow:
1794 case Builtin::BI__builtin_powf:
1795 case Builtin::BI__builtin_powf16:
1796 case Builtin::BI__builtin_powl:
1797 return RValue::get(emitBinaryMaybeConstrainedFPBuiltin(*this, E,
1799 Intrinsic::experimental_constrained_pow));
1801 case Builtin::BIrint:
1802 case Builtin::BIrintf:
1803 case Builtin::BIrintl:
1804 case Builtin::BI__builtin_rint:
1805 case Builtin::BI__builtin_rintf:
1806 case Builtin::BI__builtin_rintf16:
1807 case Builtin::BI__builtin_rintl:
1808 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
1810 Intrinsic::experimental_constrained_rint));
1812 case Builtin::BIround:
1813 case Builtin::BIroundf:
1814 case Builtin::BIroundl:
1815 case Builtin::BI__builtin_round:
1816 case Builtin::BI__builtin_roundf:
1817 case Builtin::BI__builtin_roundf16:
1818 case Builtin::BI__builtin_roundl:
1819 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
1821 Intrinsic::experimental_constrained_round));
1823 case Builtin::BIsin:
1824 case Builtin::BIsinf:
1825 case Builtin::BIsinl:
1826 case Builtin::BI__builtin_sin:
1827 case Builtin::BI__builtin_sinf:
1828 case Builtin::BI__builtin_sinf16:
1829 case Builtin::BI__builtin_sinl:
1830 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
1832 Intrinsic::experimental_constrained_sin));
1834 case Builtin::BIsqrt:
1835 case Builtin::BIsqrtf:
1836 case Builtin::BIsqrtl:
1837 case Builtin::BI__builtin_sqrt:
1838 case Builtin::BI__builtin_sqrtf:
1839 case Builtin::BI__builtin_sqrtf16:
1840 case Builtin::BI__builtin_sqrtl:
1841 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
1843 Intrinsic::experimental_constrained_sqrt));
1845 case Builtin::BItrunc:
1846 case Builtin::BItruncf:
1847 case Builtin::BItruncl:
1848 case Builtin::BI__builtin_trunc:
1849 case Builtin::BI__builtin_truncf:
1850 case Builtin::BI__builtin_truncf16:
1851 case Builtin::BI__builtin_truncl:
1852 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
1854 Intrinsic::experimental_constrained_trunc));
1856 case Builtin::BIlround:
1857 case Builtin::BIlroundf:
1858 case Builtin::BIlroundl:
1859 case Builtin::BI__builtin_lround:
1860 case Builtin::BI__builtin_lroundf:
1861 case Builtin::BI__builtin_lroundl:
1862 return RValue::get(emitMaybeConstrainedFPToIntRoundBuiltin(
1863 *this, E, Intrinsic::lround,
1864 Intrinsic::experimental_constrained_lround));
1866 case Builtin::BIllround:
1867 case Builtin::BIllroundf:
1868 case Builtin::BIllroundl:
1869 case Builtin::BI__builtin_llround:
1870 case Builtin::BI__builtin_llroundf:
1871 case Builtin::BI__builtin_llroundl:
1872 return RValue::get(emitMaybeConstrainedFPToIntRoundBuiltin(
1873 *this, E, Intrinsic::llround,
1874 Intrinsic::experimental_constrained_llround));
1876 case Builtin::BIlrint:
1877 case Builtin::BIlrintf:
1878 case Builtin::BIlrintl:
1879 case Builtin::BI__builtin_lrint:
1880 case Builtin::BI__builtin_lrintf:
1881 case Builtin::BI__builtin_lrintl:
1882 return RValue::get(emitMaybeConstrainedFPToIntRoundBuiltin(
1883 *this, E, Intrinsic::lrint,
1884 Intrinsic::experimental_constrained_lrint));
1886 case Builtin::BIllrint:
1887 case Builtin::BIllrintf:
1888 case Builtin::BIllrintl:
1889 case Builtin::BI__builtin_llrint:
1890 case Builtin::BI__builtin_llrintf:
1891 case Builtin::BI__builtin_llrintl:
1892 return RValue::get(emitMaybeConstrainedFPToIntRoundBuiltin(
1893 *this, E, Intrinsic::llrint,
1894 Intrinsic::experimental_constrained_llrint));
1901 switch (BuiltinID) {
1903 case Builtin::BI__builtin___CFStringMakeConstantString:
1904 case Builtin::BI__builtin___NSStringMakeConstantString:
1905 return RValue::get(ConstantEmitter(*this).emitAbstract(E, E->getType()));
1906 case Builtin::BI__builtin_stdarg_start:
1907 case Builtin::BI__builtin_va_start:
1908 case Builtin::BI__va_start:
1909 case Builtin::BI__builtin_va_end:
1911 EmitVAStartEnd(BuiltinID == Builtin::BI__va_start
1912 ? EmitScalarExpr(E->getArg(0))
1913 : EmitVAListRef(E->getArg(0)).getPointer(),
1914 BuiltinID != Builtin::BI__builtin_va_end));
1915 case Builtin::BI__builtin_va_copy: {
1916 Value *DstPtr = EmitVAListRef(E->getArg(0)).getPointer();
1917 Value *SrcPtr = EmitVAListRef(E->getArg(1)).getPointer();
1919 llvm::Type *Type = Int8PtrTy;
1921 DstPtr = Builder.CreateBitCast(DstPtr, Type);
1922 SrcPtr = Builder.CreateBitCast(SrcPtr, Type);
1923 return RValue::get(Builder.CreateCall(CGM.getIntrinsic(Intrinsic::vacopy),
1926 case Builtin::BI__builtin_abs:
1927 case Builtin::BI__builtin_labs:
1928 case Builtin::BI__builtin_llabs: {
1930 // The negation has 'nsw' because abs of INT_MIN is undefined.
1931 Value *ArgValue = EmitScalarExpr(E->getArg(0));
1932 Value *NegOp = Builder.CreateNSWNeg(ArgValue, "neg");
1933 Constant *Zero = llvm::Constant::getNullValue(ArgValue->getType());
1934 Value *CmpResult = Builder.CreateICmpSLT(ArgValue, Zero, "abscond");
1935 Value *Result = Builder.CreateSelect(CmpResult, NegOp, ArgValue, "abs");
1936 return RValue::get(Result);
1938 case Builtin::BI__builtin_conj:
1939 case Builtin::BI__builtin_conjf:
1940 case Builtin::BI__builtin_conjl:
1941 case Builtin::BIconj:
1942 case Builtin::BIconjf:
1943 case Builtin::BIconjl: {
1944 ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0));
1945 Value *Real = ComplexVal.first;
1946 Value *Imag = ComplexVal.second;
1947 Imag = Builder.CreateFNeg(Imag, "neg");
1948 return RValue::getComplex(std::make_pair(Real, Imag));
1950 case Builtin::BI__builtin_creal:
1951 case Builtin::BI__builtin_crealf:
1952 case Builtin::BI__builtin_creall:
1953 case Builtin::BIcreal:
1954 case Builtin::BIcrealf:
1955 case Builtin::BIcreall: {
1956 ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0));
1957 return RValue::get(ComplexVal.first);
1960 case Builtin::BI__builtin_dump_struct: {
1961 llvm::Type *LLVMIntTy = getTypes().ConvertType(getContext().IntTy);
1962 llvm::FunctionType *LLVMFuncType = llvm::FunctionType::get(
1963 LLVMIntTy, {llvm::Type::getInt8PtrTy(getLLVMContext())}, true);
1965 Value *Func = EmitScalarExpr(E->getArg(1)->IgnoreImpCasts());
1966 CharUnits Arg0Align = EmitPointerWithAlignment(E->getArg(0)).getAlignment();
1968 const Expr *Arg0 = E->getArg(0)->IgnoreImpCasts();
1969 QualType Arg0Type = Arg0->getType()->getPointeeType();
1971 Value *RecordPtr = EmitScalarExpr(Arg0);
1972 Value *Res = dumpRecord(*this, Arg0Type, RecordPtr, Arg0Align,
1973 {LLVMFuncType, Func}, 0);
1974 return RValue::get(Res);
1977 case Builtin::BI__builtin_preserve_access_index: {
1978 // Only enabled preserved access index region when debuginfo
1979 // is available as debuginfo is needed to preserve user-level
1981 if (!getDebugInfo()) {
1982 CGM.Error(E->getExprLoc(), "using builtin_preserve_access_index() without -g");
1983 return RValue::get(EmitScalarExpr(E->getArg(0)));
1986 // Nested builtin_preserve_access_index() not supported
1987 if (IsInPreservedAIRegion) {
1988 CGM.Error(E->getExprLoc(), "nested builtin_preserve_access_index() not supported");
1989 return RValue::get(EmitScalarExpr(E->getArg(0)));
1992 IsInPreservedAIRegion = true;
1993 Value *Res = EmitScalarExpr(E->getArg(0));
1994 IsInPreservedAIRegion = false;
1995 return RValue::get(Res);
1998 case Builtin::BI__builtin_cimag:
1999 case Builtin::BI__builtin_cimagf:
2000 case Builtin::BI__builtin_cimagl:
2001 case Builtin::BIcimag:
2002 case Builtin::BIcimagf:
2003 case Builtin::BIcimagl: {
2004 ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0));
2005 return RValue::get(ComplexVal.second);
2008 case Builtin::BI__builtin_clrsb:
2009 case Builtin::BI__builtin_clrsbl:
2010 case Builtin::BI__builtin_clrsbll: {
2011 // clrsb(x) -> clz(x < 0 ? ~x : x) - 1 or
2012 Value *ArgValue = EmitScalarExpr(E->getArg(0));
2014 llvm::Type *ArgType = ArgValue->getType();
2015 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType);
2017 llvm::Type *ResultType = ConvertType(E->getType());
2018 Value *Zero = llvm::Constant::getNullValue(ArgType);
2019 Value *IsNeg = Builder.CreateICmpSLT(ArgValue, Zero, "isneg");
2020 Value *Inverse = Builder.CreateNot(ArgValue, "not");
2021 Value *Tmp = Builder.CreateSelect(IsNeg, Inverse, ArgValue);
2022 Value *Ctlz = Builder.CreateCall(F, {Tmp, Builder.getFalse()});
2023 Value *Result = Builder.CreateSub(Ctlz, llvm::ConstantInt::get(ArgType, 1));
2024 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
2026 return RValue::get(Result);
2028 case Builtin::BI__builtin_ctzs:
2029 case Builtin::BI__builtin_ctz:
2030 case Builtin::BI__builtin_ctzl:
2031 case Builtin::BI__builtin_ctzll: {
2032 Value *ArgValue = EmitCheckedArgForBuiltin(E->getArg(0), BCK_CTZPassedZero);
2034 llvm::Type *ArgType = ArgValue->getType();
2035 Function *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType);
2037 llvm::Type *ResultType = ConvertType(E->getType());
2038 Value *ZeroUndef = Builder.getInt1(getTarget().isCLZForZeroUndef());
2039 Value *Result = Builder.CreateCall(F, {ArgValue, ZeroUndef});
2040 if (Result->getType() != ResultType)
2041 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
2043 return RValue::get(Result);
2045 case Builtin::BI__builtin_clzs:
2046 case Builtin::BI__builtin_clz:
2047 case Builtin::BI__builtin_clzl:
2048 case Builtin::BI__builtin_clzll: {
2049 Value *ArgValue = EmitCheckedArgForBuiltin(E->getArg(0), BCK_CLZPassedZero);
2051 llvm::Type *ArgType = ArgValue->getType();
2052 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType);
2054 llvm::Type *ResultType = ConvertType(E->getType());
2055 Value *ZeroUndef = Builder.getInt1(getTarget().isCLZForZeroUndef());
2056 Value *Result = Builder.CreateCall(F, {ArgValue, ZeroUndef});
2057 if (Result->getType() != ResultType)
2058 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
2060 return RValue::get(Result);
2062 case Builtin::BI__builtin_ffs:
2063 case Builtin::BI__builtin_ffsl:
2064 case Builtin::BI__builtin_ffsll: {
2065 // ffs(x) -> x ? cttz(x) + 1 : 0
2066 Value *ArgValue = EmitScalarExpr(E->getArg(0));
2068 llvm::Type *ArgType = ArgValue->getType();
2069 Function *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType);
2071 llvm::Type *ResultType = ConvertType(E->getType());
2073 Builder.CreateAdd(Builder.CreateCall(F, {ArgValue, Builder.getTrue()}),
2074 llvm::ConstantInt::get(ArgType, 1));
2075 Value *Zero = llvm::Constant::getNullValue(ArgType);
2076 Value *IsZero = Builder.CreateICmpEQ(ArgValue, Zero, "iszero");
2077 Value *Result = Builder.CreateSelect(IsZero, Zero, Tmp, "ffs");
2078 if (Result->getType() != ResultType)
2079 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
2081 return RValue::get(Result);
2083 case Builtin::BI__builtin_parity:
2084 case Builtin::BI__builtin_parityl:
2085 case Builtin::BI__builtin_parityll: {
2086 // parity(x) -> ctpop(x) & 1
2087 Value *ArgValue = EmitScalarExpr(E->getArg(0));
2089 llvm::Type *ArgType = ArgValue->getType();
2090 Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ArgType);
2092 llvm::Type *ResultType = ConvertType(E->getType());
2093 Value *Tmp = Builder.CreateCall(F, ArgValue);
2094 Value *Result = Builder.CreateAnd(Tmp, llvm::ConstantInt::get(ArgType, 1));
2095 if (Result->getType() != ResultType)
2096 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
2098 return RValue::get(Result);
2100 case Builtin::BI__lzcnt16:
2101 case Builtin::BI__lzcnt:
2102 case Builtin::BI__lzcnt64: {
2103 Value *ArgValue = EmitScalarExpr(E->getArg(0));
2105 llvm::Type *ArgType = ArgValue->getType();
2106 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType);
2108 llvm::Type *ResultType = ConvertType(E->getType());
2109 Value *Result = Builder.CreateCall(F, {ArgValue, Builder.getFalse()});
2110 if (Result->getType() != ResultType)
2111 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
2113 return RValue::get(Result);
2115 case Builtin::BI__popcnt16:
2116 case Builtin::BI__popcnt:
2117 case Builtin::BI__popcnt64:
2118 case Builtin::BI__builtin_popcount:
2119 case Builtin::BI__builtin_popcountl:
2120 case Builtin::BI__builtin_popcountll: {
2121 Value *ArgValue = EmitScalarExpr(E->getArg(0));
2123 llvm::Type *ArgType = ArgValue->getType();
2124 Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ArgType);
2126 llvm::Type *ResultType = ConvertType(E->getType());
2127 Value *Result = Builder.CreateCall(F, ArgValue);
2128 if (Result->getType() != ResultType)
2129 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
2131 return RValue::get(Result);
2133 case Builtin::BI__builtin_unpredictable: {
2134 // Always return the argument of __builtin_unpredictable. LLVM does not
2135 // handle this builtin. Metadata for this builtin should be added directly
2136 // to instructions such as branches or switches that use it.
2137 return RValue::get(EmitScalarExpr(E->getArg(0)));
2139 case Builtin::BI__builtin_expect: {
2140 Value *ArgValue = EmitScalarExpr(E->getArg(0));
2141 llvm::Type *ArgType = ArgValue->getType();
2143 Value *ExpectedValue = EmitScalarExpr(E->getArg(1));
2144 // Don't generate llvm.expect on -O0 as the backend won't use it for
2146 // Note, we still IRGen ExpectedValue because it could have side-effects.
2147 if (CGM.getCodeGenOpts().OptimizationLevel == 0)
2148 return RValue::get(ArgValue);
2150 Function *FnExpect = CGM.getIntrinsic(Intrinsic::expect, ArgType);
2152 Builder.CreateCall(FnExpect, {ArgValue, ExpectedValue}, "expval");
2153 return RValue::get(Result);
2155 case Builtin::BI__builtin_assume_aligned: {
2156 const Expr *Ptr = E->getArg(0);
2157 Value *PtrValue = EmitScalarExpr(Ptr);
2158 Value *OffsetValue =
2159 (E->getNumArgs() > 2) ? EmitScalarExpr(E->getArg(2)) : nullptr;
2161 Value *AlignmentValue = EmitScalarExpr(E->getArg(1));
2162 ConstantInt *AlignmentCI = cast<ConstantInt>(AlignmentValue);
2163 if (AlignmentCI->getValue().ugt(llvm::Value::MaximumAlignment))
2164 AlignmentCI = ConstantInt::get(AlignmentCI->getType(),
2165 llvm::Value::MaximumAlignment);
2167 EmitAlignmentAssumption(PtrValue, Ptr,
2168 /*The expr loc is sufficient.*/ SourceLocation(),
2169 AlignmentCI, OffsetValue);
2170 return RValue::get(PtrValue);
2172 case Builtin::BI__assume:
2173 case Builtin::BI__builtin_assume: {
2174 if (E->getArg(0)->HasSideEffects(getContext()))
2175 return RValue::get(nullptr);
2177 Value *ArgValue = EmitScalarExpr(E->getArg(0));
2178 Function *FnAssume = CGM.getIntrinsic(Intrinsic::assume);
2179 return RValue::get(Builder.CreateCall(FnAssume, ArgValue));
2181 case Builtin::BI__builtin_bswap16:
2182 case Builtin::BI__builtin_bswap32:
2183 case Builtin::BI__builtin_bswap64: {
2184 return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::bswap));
2186 case Builtin::BI__builtin_bitreverse8:
2187 case Builtin::BI__builtin_bitreverse16:
2188 case Builtin::BI__builtin_bitreverse32:
2189 case Builtin::BI__builtin_bitreverse64: {
2190 return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::bitreverse));
2192 case Builtin::BI__builtin_rotateleft8:
2193 case Builtin::BI__builtin_rotateleft16:
2194 case Builtin::BI__builtin_rotateleft32:
2195 case Builtin::BI__builtin_rotateleft64:
2196 case Builtin::BI_rotl8: // Microsoft variants of rotate left
2197 case Builtin::BI_rotl16:
2198 case Builtin::BI_rotl:
2199 case Builtin::BI_lrotl:
2200 case Builtin::BI_rotl64:
2201 return emitRotate(E, false);
2203 case Builtin::BI__builtin_rotateright8:
2204 case Builtin::BI__builtin_rotateright16:
2205 case Builtin::BI__builtin_rotateright32:
2206 case Builtin::BI__builtin_rotateright64:
2207 case Builtin::BI_rotr8: // Microsoft variants of rotate right
2208 case Builtin::BI_rotr16:
2209 case Builtin::BI_rotr:
2210 case Builtin::BI_lrotr:
2211 case Builtin::BI_rotr64:
2212 return emitRotate(E, true);
2214 case Builtin::BI__builtin_constant_p: {
2215 llvm::Type *ResultType = ConvertType(E->getType());
2217 const Expr *Arg = E->getArg(0);
2218 QualType ArgType = Arg->getType();
2219 // FIXME: The allowance for Obj-C pointers and block pointers is historical
2220 // and likely a mistake.
2221 if (!ArgType->isIntegralOrEnumerationType() && !ArgType->isFloatingType() &&
2222 !ArgType->isObjCObjectPointerType() && !ArgType->isBlockPointerType())
2223 // Per the GCC documentation, only numeric constants are recognized after
2225 return RValue::get(ConstantInt::get(ResultType, 0));
2227 if (Arg->HasSideEffects(getContext()))
2228 // The argument is unevaluated, so be conservative if it might have
2230 return RValue::get(ConstantInt::get(ResultType, 0));
2232 Value *ArgValue = EmitScalarExpr(Arg);
2233 if (ArgType->isObjCObjectPointerType()) {
2234 // Convert Objective-C objects to id because we cannot distinguish between
2235 // LLVM types for Obj-C classes as they are opaque.
2236 ArgType = CGM.getContext().getObjCIdType();
2237 ArgValue = Builder.CreateBitCast(ArgValue, ConvertType(ArgType));
2240 CGM.getIntrinsic(Intrinsic::is_constant, ConvertType(ArgType));
2241 Value *Result = Builder.CreateCall(F, ArgValue);
2242 if (Result->getType() != ResultType)
2243 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/false);
2244 return RValue::get(Result);
2246 case Builtin::BI__builtin_dynamic_object_size:
2247 case Builtin::BI__builtin_object_size: {
2249 E->getArg(1)->EvaluateKnownConstInt(getContext()).getZExtValue();
2250 auto *ResType = cast<llvm::IntegerType>(ConvertType(E->getType()));
2252 // We pass this builtin onto the optimizer so that it can figure out the
2253 // object size in more complex cases.
2254 bool IsDynamic = BuiltinID == Builtin::BI__builtin_dynamic_object_size;
2255 return RValue::get(emitBuiltinObjectSize(E->getArg(0), Type, ResType,
2256 /*EmittedE=*/nullptr, IsDynamic));
2258 case Builtin::BI__builtin_prefetch: {
2259 Value *Locality, *RW, *Address = EmitScalarExpr(E->getArg(0));
2260 // FIXME: Technically these constants should of type 'int', yes?
2261 RW = (E->getNumArgs() > 1) ? EmitScalarExpr(E->getArg(1)) :
2262 llvm::ConstantInt::get(Int32Ty, 0);
2263 Locality = (E->getNumArgs() > 2) ? EmitScalarExpr(E->getArg(2)) :
2264 llvm::ConstantInt::get(Int32Ty, 3);
2265 Value *Data = llvm::ConstantInt::get(Int32Ty, 1);
2266 Function *F = CGM.getIntrinsic(Intrinsic::prefetch, Address->getType());
2267 return RValue::get(Builder.CreateCall(F, {Address, RW, Locality, Data}));
2269 case Builtin::BI__builtin_readcyclecounter: {
2270 Function *F = CGM.getIntrinsic(Intrinsic::readcyclecounter);
2271 return RValue::get(Builder.CreateCall(F));
2273 case Builtin::BI__builtin___clear_cache: {
2274 Value *Begin = EmitScalarExpr(E->getArg(0));
2275 Value *End = EmitScalarExpr(E->getArg(1));
2276 Function *F = CGM.getIntrinsic(Intrinsic::clear_cache);
2277 return RValue::get(Builder.CreateCall(F, {Begin, End}));
2279 case Builtin::BI__builtin_trap:
2280 return RValue::get(EmitTrapCall(Intrinsic::trap));
2281 case Builtin::BI__debugbreak:
2282 return RValue::get(EmitTrapCall(Intrinsic::debugtrap));
2283 case Builtin::BI__builtin_unreachable: {
2284 EmitUnreachable(E->getExprLoc());
2286 // We do need to preserve an insertion point.
2287 EmitBlock(createBasicBlock("unreachable.cont"));
2289 return RValue::get(nullptr);
2292 case Builtin::BI__builtin_powi:
2293 case Builtin::BI__builtin_powif:
2294 case Builtin::BI__builtin_powil:
2295 return RValue::get(emitBinaryMaybeConstrainedFPBuiltin(
2296 *this, E, Intrinsic::powi, Intrinsic::experimental_constrained_powi));
2298 case Builtin::BI__builtin_isgreater:
2299 case Builtin::BI__builtin_isgreaterequal:
2300 case Builtin::BI__builtin_isless:
2301 case Builtin::BI__builtin_islessequal:
2302 case Builtin::BI__builtin_islessgreater:
2303 case Builtin::BI__builtin_isunordered: {
2304 // Ordered comparisons: we know the arguments to these are matching scalar
2305 // floating point values.
2306 Value *LHS = EmitScalarExpr(E->getArg(0));
2307 Value *RHS = EmitScalarExpr(E->getArg(1));
2309 switch (BuiltinID) {
2310 default: llvm_unreachable("Unknown ordered comparison");
2311 case Builtin::BI__builtin_isgreater:
2312 LHS = Builder.CreateFCmpOGT(LHS, RHS, "cmp");
2314 case Builtin::BI__builtin_isgreaterequal:
2315 LHS = Builder.CreateFCmpOGE(LHS, RHS, "cmp");
2317 case Builtin::BI__builtin_isless:
2318 LHS = Builder.CreateFCmpOLT(LHS, RHS, "cmp");
2320 case Builtin::BI__builtin_islessequal:
2321 LHS = Builder.CreateFCmpOLE(LHS, RHS, "cmp");
2323 case Builtin::BI__builtin_islessgreater:
2324 LHS = Builder.CreateFCmpONE(LHS, RHS, "cmp");
2326 case Builtin::BI__builtin_isunordered:
2327 LHS = Builder.CreateFCmpUNO(LHS, RHS, "cmp");
2330 // ZExt bool to int type.
2331 return RValue::get(Builder.CreateZExt(LHS, ConvertType(E->getType())));
2333 case Builtin::BI__builtin_isnan: {
2334 Value *V = EmitScalarExpr(E->getArg(0));
2335 V = Builder.CreateFCmpUNO(V, V, "cmp");
2336 return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType())));
2339 case Builtin::BIfinite:
2340 case Builtin::BI__finite:
2341 case Builtin::BIfinitef:
2342 case Builtin::BI__finitef:
2343 case Builtin::BIfinitel:
2344 case Builtin::BI__finitel:
2345 case Builtin::BI__builtin_isinf:
2346 case Builtin::BI__builtin_isfinite: {
2347 // isinf(x) --> fabs(x) == infinity
2348 // isfinite(x) --> fabs(x) != infinity
2349 // x != NaN via the ordered compare in either case.
2350 Value *V = EmitScalarExpr(E->getArg(0));
2351 Value *Fabs = EmitFAbs(*this, V);
2352 Constant *Infinity = ConstantFP::getInfinity(V->getType());
2353 CmpInst::Predicate Pred = (BuiltinID == Builtin::BI__builtin_isinf)
2355 : CmpInst::FCMP_ONE;
2356 Value *FCmp = Builder.CreateFCmp(Pred, Fabs, Infinity, "cmpinf");
2357 return RValue::get(Builder.CreateZExt(FCmp, ConvertType(E->getType())));
2360 case Builtin::BI__builtin_isinf_sign: {
2361 // isinf_sign(x) -> fabs(x) == infinity ? (signbit(x) ? -1 : 1) : 0
2362 Value *Arg = EmitScalarExpr(E->getArg(0));
2363 Value *AbsArg = EmitFAbs(*this, Arg);
2364 Value *IsInf = Builder.CreateFCmpOEQ(
2365 AbsArg, ConstantFP::getInfinity(Arg->getType()), "isinf");
2366 Value *IsNeg = EmitSignBit(*this, Arg);
2368 llvm::Type *IntTy = ConvertType(E->getType());
2369 Value *Zero = Constant::getNullValue(IntTy);
2370 Value *One = ConstantInt::get(IntTy, 1);
2371 Value *NegativeOne = ConstantInt::get(IntTy, -1);
2372 Value *SignResult = Builder.CreateSelect(IsNeg, NegativeOne, One);
2373 Value *Result = Builder.CreateSelect(IsInf, SignResult, Zero);
2374 return RValue::get(Result);
2377 case Builtin::BI__builtin_isnormal: {
2378 // isnormal(x) --> x == x && fabsf(x) < infinity && fabsf(x) >= float_min
2379 Value *V = EmitScalarExpr(E->getArg(0));
2380 Value *Eq = Builder.CreateFCmpOEQ(V, V, "iseq");
2382 Value *Abs = EmitFAbs(*this, V);
2383 Value *IsLessThanInf =
2384 Builder.CreateFCmpULT(Abs, ConstantFP::getInfinity(V->getType()),"isinf");
2385 APFloat Smallest = APFloat::getSmallestNormalized(
2386 getContext().getFloatTypeSemantics(E->getArg(0)->getType()));
2388 Builder.CreateFCmpUGE(Abs, ConstantFP::get(V->getContext(), Smallest),
2390 V = Builder.CreateAnd(Eq, IsLessThanInf, "and");
2391 V = Builder.CreateAnd(V, IsNormal, "and");
2392 return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType())));
2395 case Builtin::BI__builtin_flt_rounds: {
2396 Function *F = CGM.getIntrinsic(Intrinsic::flt_rounds);
2398 llvm::Type *ResultType = ConvertType(E->getType());
2399 Value *Result = Builder.CreateCall(F);
2400 if (Result->getType() != ResultType)
2401 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
2403 return RValue::get(Result);
2406 case Builtin::BI__builtin_fpclassify: {
2407 Value *V = EmitScalarExpr(E->getArg(5));
2408 llvm::Type *Ty = ConvertType(E->getArg(5)->getType());
2411 BasicBlock *Begin = Builder.GetInsertBlock();
2412 BasicBlock *End = createBasicBlock("fpclassify_end", this->CurFn);
2413 Builder.SetInsertPoint(End);
2415 Builder.CreatePHI(ConvertType(E->getArg(0)->getType()), 4,
2416 "fpclassify_result");
2418 // if (V==0) return FP_ZERO
2419 Builder.SetInsertPoint(Begin);
2420 Value *IsZero = Builder.CreateFCmpOEQ(V, Constant::getNullValue(Ty),
2422 Value *ZeroLiteral = EmitScalarExpr(E->getArg(4));
2423 BasicBlock *NotZero = createBasicBlock("fpclassify_not_zero", this->CurFn);
2424 Builder.CreateCondBr(IsZero, End, NotZero);
2425 Result->addIncoming(ZeroLiteral, Begin);
2427 // if (V != V) return FP_NAN
2428 Builder.SetInsertPoint(NotZero);
2429 Value *IsNan = Builder.CreateFCmpUNO(V, V, "cmp");
2430 Value *NanLiteral = EmitScalarExpr(E->getArg(0));
2431 BasicBlock *NotNan = createBasicBlock("fpclassify_not_nan", this->CurFn);
2432 Builder.CreateCondBr(IsNan, End, NotNan);
2433 Result->addIncoming(NanLiteral, NotZero);
2435 // if (fabs(V) == infinity) return FP_INFINITY
2436 Builder.SetInsertPoint(NotNan);
2437 Value *VAbs = EmitFAbs(*this, V);
2439 Builder.CreateFCmpOEQ(VAbs, ConstantFP::getInfinity(V->getType()),
2441 Value *InfLiteral = EmitScalarExpr(E->getArg(1));
2442 BasicBlock *NotInf = createBasicBlock("fpclassify_not_inf", this->CurFn);
2443 Builder.CreateCondBr(IsInf, End, NotInf);
2444 Result->addIncoming(InfLiteral, NotNan);
2446 // if (fabs(V) >= MIN_NORMAL) return FP_NORMAL else FP_SUBNORMAL
2447 Builder.SetInsertPoint(NotInf);
2448 APFloat Smallest = APFloat::getSmallestNormalized(
2449 getContext().getFloatTypeSemantics(E->getArg(5)->getType()));
2451 Builder.CreateFCmpUGE(VAbs, ConstantFP::get(V->getContext(), Smallest),
2453 Value *NormalResult =
2454 Builder.CreateSelect(IsNormal, EmitScalarExpr(E->getArg(2)),
2455 EmitScalarExpr(E->getArg(3)));
2456 Builder.CreateBr(End);
2457 Result->addIncoming(NormalResult, NotInf);
2460 Builder.SetInsertPoint(End);
2461 return RValue::get(Result);
2464 case Builtin::BIalloca:
2465 case Builtin::BI_alloca:
2466 case Builtin::BI__builtin_alloca: {
2467 Value *Size = EmitScalarExpr(E->getArg(0));
2468 const TargetInfo &TI = getContext().getTargetInfo();
2469 // The alignment of the alloca should correspond to __BIGGEST_ALIGNMENT__.
2470 const Align SuitableAlignmentInBytes =
2472 .toCharUnitsFromBits(TI.getSuitableAlign())
2474 AllocaInst *AI = Builder.CreateAlloca(Builder.getInt8Ty(), Size);
2475 AI->setAlignment(SuitableAlignmentInBytes);
2476 initializeAlloca(*this, AI, Size, SuitableAlignmentInBytes);
2477 return RValue::get(AI);
2480 case Builtin::BI__builtin_alloca_with_align: {
2481 Value *Size = EmitScalarExpr(E->getArg(0));
2482 Value *AlignmentInBitsValue = EmitScalarExpr(E->getArg(1));
2483 auto *AlignmentInBitsCI = cast<ConstantInt>(AlignmentInBitsValue);
2484 unsigned AlignmentInBits = AlignmentInBitsCI->getZExtValue();
2485 const Align AlignmentInBytes =
2486 CGM.getContext().toCharUnitsFromBits(AlignmentInBits).getAsAlign();
2487 AllocaInst *AI = Builder.CreateAlloca(Builder.getInt8Ty(), Size);
2488 AI->setAlignment(AlignmentInBytes);
2489 initializeAlloca(*this, AI, Size, AlignmentInBytes);
2490 return RValue::get(AI);
2493 case Builtin::BIbzero:
2494 case Builtin::BI__builtin_bzero: {
2495 Address Dest = EmitPointerWithAlignment(E->getArg(0));
2496 Value *SizeVal = EmitScalarExpr(E->getArg(1));
2497 EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(),
2498 E->getArg(0)->getExprLoc(), FD, 0);
2499 Builder.CreateMemSet(Dest, Builder.getInt8(0), SizeVal, false);
2500 return RValue::get(nullptr);
2502 case Builtin::BImemcpy:
2503 case Builtin::BI__builtin_memcpy:
2504 case Builtin::BImempcpy:
2505 case Builtin::BI__builtin_mempcpy: {
2506 Address Dest = EmitPointerWithAlignment(E->getArg(0));
2507 Address Src = EmitPointerWithAlignment(E->getArg(1));
2508 Value *SizeVal = EmitScalarExpr(E->getArg(2));
2509 EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(),
2510 E->getArg(0)->getExprLoc(), FD, 0);
2511 EmitNonNullArgCheck(RValue::get(Src.getPointer()), E->getArg(1)->getType(),
2512 E->getArg(1)->getExprLoc(), FD, 1);
2513 Builder.CreateMemCpy(Dest, Src, SizeVal, false);
2514 if (BuiltinID == Builtin::BImempcpy ||
2515 BuiltinID == Builtin::BI__builtin_mempcpy)
2516 return RValue::get(Builder.CreateInBoundsGEP(Dest.getPointer(), SizeVal));
2518 return RValue::get(Dest.getPointer());
2521 case Builtin::BI__builtin_char_memchr:
2522 BuiltinID = Builtin::BI__builtin_memchr;
2525 case Builtin::BI__builtin___memcpy_chk: {
2526 // fold __builtin_memcpy_chk(x, y, cst1, cst2) to memcpy iff cst1<=cst2.
2527 Expr::EvalResult SizeResult, DstSizeResult;
2528 if (!E->getArg(2)->EvaluateAsInt(SizeResult, CGM.getContext()) ||
2529 !E->getArg(3)->EvaluateAsInt(DstSizeResult, CGM.getContext()))
2531 llvm::APSInt Size = SizeResult.Val.getInt();
2532 llvm::APSInt DstSize = DstSizeResult.Val.getInt();
2533 if (Size.ugt(DstSize))
2535 Address Dest = EmitPointerWithAlignment(E->getArg(0));
2536 Address Src = EmitPointerWithAlignment(E->getArg(1));
2537 Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size);
2538 Builder.CreateMemCpy(Dest, Src, SizeVal, false);
2539 return RValue::get(Dest.getPointer());
2542 case Builtin::BI__builtin_objc_memmove_collectable: {
2543 Address DestAddr = EmitPointerWithAlignment(E->getArg(0));
2544 Address SrcAddr = EmitPointerWithAlignment(E->getArg(1));
2545 Value *SizeVal = EmitScalarExpr(E->getArg(2));
2546 CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this,
2547 DestAddr, SrcAddr, SizeVal);
2548 return RValue::get(DestAddr.getPointer());
2551 case Builtin::BI__builtin___memmove_chk: {
2552 // fold __builtin_memmove_chk(x, y, cst1, cst2) to memmove iff cst1<=cst2.
2553 Expr::EvalResult SizeResult, DstSizeResult;
2554 if (!E->getArg(2)->EvaluateAsInt(SizeResult, CGM.getContext()) ||
2555 !E->getArg(3)->EvaluateAsInt(DstSizeResult, CGM.getContext()))
2557 llvm::APSInt Size = SizeResult.Val.getInt();
2558 llvm::APSInt DstSize = DstSizeResult.Val.getInt();
2559 if (Size.ugt(DstSize))
2561 Address Dest = EmitPointerWithAlignment(E->getArg(0));
2562 Address Src = EmitPointerWithAlignment(E->getArg(1));
2563 Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size);
2564 Builder.CreateMemMove(Dest, Src, SizeVal, false);
2565 return RValue::get(Dest.getPointer());
2568 case Builtin::BImemmove:
2569 case Builtin::BI__builtin_memmove: {
2570 Address Dest = EmitPointerWithAlignment(E->getArg(0));
2571 Address Src = EmitPointerWithAlignment(E->getArg(1));
2572 Value *SizeVal = EmitScalarExpr(E->getArg(2));
2573 EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(),
2574 E->getArg(0)->getExprLoc(), FD, 0);
2575 EmitNonNullArgCheck(RValue::get(Src.getPointer()), E->getArg(1)->getType(),
2576 E->getArg(1)->getExprLoc(), FD, 1);
2577 Builder.CreateMemMove(Dest, Src, SizeVal, false);
2578 return RValue::get(Dest.getPointer());
2580 case Builtin::BImemset:
2581 case Builtin::BI__builtin_memset: {
2582 Address Dest = EmitPointerWithAlignment(E->getArg(0));
2583 Value *ByteVal = Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)),
2584 Builder.getInt8Ty());
2585 Value *SizeVal = EmitScalarExpr(E->getArg(2));
2586 EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(),
2587 E->getArg(0)->getExprLoc(), FD, 0);
2588 Builder.CreateMemSet(Dest, ByteVal, SizeVal, false);
2589 return RValue::get(Dest.getPointer());
2591 case Builtin::BI__builtin___memset_chk: {
2592 // fold __builtin_memset_chk(x, y, cst1, cst2) to memset iff cst1<=cst2.
2593 Expr::EvalResult SizeResult, DstSizeResult;
2594 if (!E->getArg(2)->EvaluateAsInt(SizeResult, CGM.getContext()) ||
2595 !E->getArg(3)->EvaluateAsInt(DstSizeResult, CGM.getContext()))
2597 llvm::APSInt Size = SizeResult.Val.getInt();
2598 llvm::APSInt DstSize = DstSizeResult.Val.getInt();
2599 if (Size.ugt(DstSize))
2601 Address Dest = EmitPointerWithAlignment(E->getArg(0));
2602 Value *ByteVal = Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)),
2603 Builder.getInt8Ty());
2604 Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size);
2605 Builder.CreateMemSet(Dest, ByteVal, SizeVal, false);
2606 return RValue::get(Dest.getPointer());
2608 case Builtin::BI__builtin_wmemcmp: {
2609 // The MSVC runtime library does not provide a definition of wmemcmp, so we
2610 // need an inline implementation.
2611 if (!getTarget().getTriple().isOSMSVCRT())
2614 llvm::Type *WCharTy = ConvertType(getContext().WCharTy);
2616 Value *Dst = EmitScalarExpr(E->getArg(0));
2617 Value *Src = EmitScalarExpr(E->getArg(1));
2618 Value *Size = EmitScalarExpr(E->getArg(2));
2620 BasicBlock *Entry = Builder.GetInsertBlock();
2621 BasicBlock *CmpGT = createBasicBlock("wmemcmp.gt");
2622 BasicBlock *CmpLT = createBasicBlock("wmemcmp.lt");
2623 BasicBlock *Next = createBasicBlock("wmemcmp.next");
2624 BasicBlock *Exit = createBasicBlock("wmemcmp.exit");
2625 Value *SizeEq0 = Builder.CreateICmpEQ(Size, ConstantInt::get(SizeTy, 0));
2626 Builder.CreateCondBr(SizeEq0, Exit, CmpGT);
2629 PHINode *DstPhi = Builder.CreatePHI(Dst->getType(), 2);
2630 DstPhi->addIncoming(Dst, Entry);
2631 PHINode *SrcPhi = Builder.CreatePHI(Src->getType(), 2);
2632 SrcPhi->addIncoming(Src, Entry);
2633 PHINode *SizePhi = Builder.CreatePHI(SizeTy, 2);
2634 SizePhi->addIncoming(Size, Entry);
2635 CharUnits WCharAlign =
2636 getContext().getTypeAlignInChars(getContext().WCharTy);
2637 Value *DstCh = Builder.CreateAlignedLoad(WCharTy, DstPhi, WCharAlign);
2638 Value *SrcCh = Builder.CreateAlignedLoad(WCharTy, SrcPhi, WCharAlign);
2639 Value *DstGtSrc = Builder.CreateICmpUGT(DstCh, SrcCh);
2640 Builder.CreateCondBr(DstGtSrc, Exit, CmpLT);
2643 Value *DstLtSrc = Builder.CreateICmpULT(DstCh, SrcCh);
2644 Builder.CreateCondBr(DstLtSrc, Exit, Next);
2647 Value *NextDst = Builder.CreateConstInBoundsGEP1_32(WCharTy, DstPhi, 1);
2648 Value *NextSrc = Builder.CreateConstInBoundsGEP1_32(WCharTy, SrcPhi, 1);
2649 Value *NextSize = Builder.CreateSub(SizePhi, ConstantInt::get(SizeTy, 1));
2650 Value *NextSizeEq0 =
2651 Builder.CreateICmpEQ(NextSize, ConstantInt::get(SizeTy, 0));
2652 Builder.CreateCondBr(NextSizeEq0, Exit, CmpGT);
2653 DstPhi->addIncoming(NextDst, Next);
2654 SrcPhi->addIncoming(NextSrc, Next);
2655 SizePhi->addIncoming(NextSize, Next);
2658 PHINode *Ret = Builder.CreatePHI(IntTy, 4);
2659 Ret->addIncoming(ConstantInt::get(IntTy, 0), Entry);
2660 Ret->addIncoming(ConstantInt::get(IntTy, 1), CmpGT);
2661 Ret->addIncoming(ConstantInt::get(IntTy, -1), CmpLT);
2662 Ret->addIncoming(ConstantInt::get(IntTy, 0), Next);
2663 return RValue::get(Ret);
2665 case Builtin::BI__builtin_dwarf_cfa: {
2666 // The offset in bytes from the first argument to the CFA.
2668 // Why on earth is this in the frontend? Is there any reason at
2669 // all that the backend can't reasonably determine this while
2670 // lowering llvm.eh.dwarf.cfa()?
2672 // TODO: If there's a satisfactory reason, add a target hook for
2673 // this instead of hard-coding 0, which is correct for most targets.
2676 Function *F = CGM.getIntrinsic(Intrinsic::eh_dwarf_cfa);
2677 return RValue::get(Builder.CreateCall(F,
2678 llvm::ConstantInt::get(Int32Ty, Offset)));
2680 case Builtin::BI__builtin_return_address: {
2681 Value *Depth = ConstantEmitter(*this).emitAbstract(E->getArg(0),
2682 getContext().UnsignedIntTy);
2683 Function *F = CGM.getIntrinsic(Intrinsic::returnaddress);
2684 return RValue::get(Builder.CreateCall(F, Depth));
2686 case Builtin::BI_ReturnAddress: {
2687 Function *F = CGM.getIntrinsic(Intrinsic::returnaddress);
2688 return RValue::get(Builder.CreateCall(F, Builder.getInt32(0)));
2690 case Builtin::BI__builtin_frame_address: {
2691 Value *Depth = ConstantEmitter(*this).emitAbstract(E->getArg(0),
2692 getContext().UnsignedIntTy);
2693 Function *F = CGM.getIntrinsic(Intrinsic::frameaddress, AllocaInt8PtrTy);
2694 return RValue::get(Builder.CreateCall(F, Depth));
2696 case Builtin::BI__builtin_extract_return_addr: {
2697 Value *Address = EmitScalarExpr(E->getArg(0));
2698 Value *Result = getTargetHooks().decodeReturnAddress(*this, Address);
2699 return RValue::get(Result);
2701 case Builtin::BI__builtin_frob_return_addr: {
2702 Value *Address = EmitScalarExpr(E->getArg(0));
2703 Value *Result = getTargetHooks().encodeReturnAddress(*this, Address);
2704 return RValue::get(Result);
2706 case Builtin::BI__builtin_dwarf_sp_column: {
2707 llvm::IntegerType *Ty
2708 = cast<llvm::IntegerType>(ConvertType(E->getType()));
2709 int Column = getTargetHooks().getDwarfEHStackPointer(CGM);
2711 CGM.ErrorUnsupported(E, "__builtin_dwarf_sp_column");
2712 return RValue::get(llvm::UndefValue::get(Ty));
2714 return RValue::get(llvm::ConstantInt::get(Ty, Column, true));
2716 case Builtin::BI__builtin_init_dwarf_reg_size_table: {
2717 Value *Address = EmitScalarExpr(E->getArg(0));
2718 if (getTargetHooks().initDwarfEHRegSizeTable(*this, Address))
2719 CGM.ErrorUnsupported(E, "__builtin_init_dwarf_reg_size_table");
2720 return RValue::get(llvm::UndefValue::get(ConvertType(E->getType())));
2722 case Builtin::BI__builtin_eh_return: {
2723 Value *Int = EmitScalarExpr(E->getArg(0));
2724 Value *Ptr = EmitScalarExpr(E->getArg(1));
2726 llvm::IntegerType *IntTy = cast<llvm::IntegerType>(Int->getType());
2727 assert((IntTy->getBitWidth() == 32 || IntTy->getBitWidth() == 64) &&
2728 "LLVM's __builtin_eh_return only supports 32- and 64-bit variants");
2730 CGM.getIntrinsic(IntTy->getBitWidth() == 32 ? Intrinsic::eh_return_i32
2731 : Intrinsic::eh_return_i64);
2732 Builder.CreateCall(F, {Int, Ptr});
2733 Builder.CreateUnreachable();
2735 // We do need to preserve an insertion point.
2736 EmitBlock(createBasicBlock("builtin_eh_return.cont"));
2738 return RValue::get(nullptr);
2740 case Builtin::BI__builtin_unwind_init: {
2741 Function *F = CGM.getIntrinsic(Intrinsic::eh_unwind_init);
2742 return RValue::get(Builder.CreateCall(F));
2744 case Builtin::BI__builtin_extend_pointer: {
2745 // Extends a pointer to the size of an _Unwind_Word, which is
2746 // uint64_t on all platforms. Generally this gets poked into a
2747 // register and eventually used as an address, so if the
2748 // addressing registers are wider than pointers and the platform
2749 // doesn't implicitly ignore high-order bits when doing
2750 // addressing, we need to make sure we zext / sext based on
2751 // the platform's expectations.
2753 // See: http://gcc.gnu.org/ml/gcc-bugs/2002-02/msg00237.html
2755 // Cast the pointer to intptr_t.
2756 Value *Ptr = EmitScalarExpr(E->getArg(0));
2757 Value *Result = Builder.CreatePtrToInt(Ptr, IntPtrTy, "extend.cast");
2759 // If that's 64 bits, we're done.
2760 if (IntPtrTy->getBitWidth() == 64)
2761 return RValue::get(Result);
2763 // Otherwise, ask the codegen data what to do.
2764 if (getTargetHooks().extendPointerWithSExt())
2765 return RValue::get(Builder.CreateSExt(Result, Int64Ty, "extend.sext"));
2767 return RValue::get(Builder.CreateZExt(Result, Int64Ty, "extend.zext"));
2769 case Builtin::BI__builtin_setjmp: {
2770 // Buffer is a void**.
2771 Address Buf = EmitPointerWithAlignment(E->getArg(0));
2773 // Store the frame pointer to the setjmp buffer.
2774 Value *FrameAddr = Builder.CreateCall(
2775 CGM.getIntrinsic(Intrinsic::frameaddress, AllocaInt8PtrTy),
2776 ConstantInt::get(Int32Ty, 0));
2777 Builder.CreateStore(FrameAddr, Buf);
2779 // Store the stack pointer to the setjmp buffer.
2781 Builder.CreateCall(CGM.getIntrinsic(Intrinsic::stacksave));
2782 Address StackSaveSlot = Builder.CreateConstInBoundsGEP(Buf, 2);
2783 Builder.CreateStore(StackAddr, StackSaveSlot);
2785 // Call LLVM's EH setjmp, which is lightweight.
2786 Function *F = CGM.getIntrinsic(Intrinsic::eh_sjlj_setjmp);
2787 Buf = Builder.CreateBitCast(Buf, Int8PtrTy);
2788 return RValue::get(Builder.CreateCall(F, Buf.getPointer()));
2790 case Builtin::BI__builtin_longjmp: {
2791 Value *Buf = EmitScalarExpr(E->getArg(0));
2792 Buf = Builder.CreateBitCast(Buf, Int8PtrTy);
2794 // Call LLVM's EH longjmp, which is lightweight.
2795 Builder.CreateCall(CGM.getIntrinsic(Intrinsic::eh_sjlj_longjmp), Buf);
2797 // longjmp doesn't return; mark this as unreachable.
2798 Builder.CreateUnreachable();
2800 // We do need to preserve an insertion point.
2801 EmitBlock(createBasicBlock("longjmp.cont"));
2803 return RValue::get(nullptr);
2805 case Builtin::BI__builtin_launder: {
2806 const Expr *Arg = E->getArg(0);
2807 QualType ArgTy = Arg->getType()->getPointeeType();
2808 Value *Ptr = EmitScalarExpr(Arg);
2809 if (TypeRequiresBuiltinLaunder(CGM, ArgTy))
2810 Ptr = Builder.CreateLaunderInvariantGroup(Ptr);
2812 return RValue::get(Ptr);
2814 case Builtin::BI__sync_fetch_and_add:
2815 case Builtin::BI__sync_fetch_and_sub:
2816 case Builtin::BI__sync_fetch_and_or:
2817 case Builtin::BI__sync_fetch_and_and:
2818 case Builtin::BI__sync_fetch_and_xor:
2819 case Builtin::BI__sync_fetch_and_nand:
2820 case Builtin::BI__sync_add_and_fetch:
2821 case Builtin::BI__sync_sub_and_fetch:
2822 case Builtin::BI__sync_and_and_fetch:
2823 case Builtin::BI__sync_or_and_fetch:
2824 case Builtin::BI__sync_xor_and_fetch:
2825 case Builtin::BI__sync_nand_and_fetch:
2826 case Builtin::BI__sync_val_compare_and_swap:
2827 case Builtin::BI__sync_bool_compare_and_swap:
2828 case Builtin::BI__sync_lock_test_and_set:
2829 case Builtin::BI__sync_lock_release:
2830 case Builtin::BI__sync_swap:
2831 llvm_unreachable("Shouldn't make it through sema");
2832 case Builtin::BI__sync_fetch_and_add_1:
2833 case Builtin::BI__sync_fetch_and_add_2:
2834 case Builtin::BI__sync_fetch_and_add_4:
2835 case Builtin::BI__sync_fetch_and_add_8:
2836 case Builtin::BI__sync_fetch_and_add_16:
2837 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Add, E);
2838 case Builtin::BI__sync_fetch_and_sub_1:
2839 case Builtin::BI__sync_fetch_and_sub_2:
2840 case Builtin::BI__sync_fetch_and_sub_4:
2841 case Builtin::BI__sync_fetch_and_sub_8:
2842 case Builtin::BI__sync_fetch_and_sub_16:
2843 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Sub, E);
2844 case Builtin::BI__sync_fetch_and_or_1:
2845 case Builtin::BI__sync_fetch_and_or_2:
2846 case Builtin::BI__sync_fetch_and_or_4:
2847 case Builtin::BI__sync_fetch_and_or_8:
2848 case Builtin::BI__sync_fetch_and_or_16:
2849 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Or, E);
2850 case Builtin::BI__sync_fetch_and_and_1:
2851 case Builtin::BI__sync_fetch_and_and_2:
2852 case Builtin::BI__sync_fetch_and_and_4:
2853 case Builtin::BI__sync_fetch_and_and_8:
2854 case Builtin::BI__sync_fetch_and_and_16:
2855 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::And, E);
2856 case Builtin::BI__sync_fetch_and_xor_1:
2857 case Builtin::BI__sync_fetch_and_xor_2:
2858 case Builtin::BI__sync_fetch_and_xor_4:
2859 case Builtin::BI__sync_fetch_and_xor_8:
2860 case Builtin::BI__sync_fetch_and_xor_16:
2861 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xor, E);
2862 case Builtin::BI__sync_fetch_and_nand_1:
2863 case Builtin::BI__sync_fetch_and_nand_2:
2864 case Builtin::BI__sync_fetch_and_nand_4:
2865 case Builtin::BI__sync_fetch_and_nand_8:
2866 case Builtin::BI__sync_fetch_and_nand_16:
2867 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Nand, E);
2869 // Clang extensions: not overloaded yet.
2870 case Builtin::BI__sync_fetch_and_min:
2871 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Min, E);
2872 case Builtin::BI__sync_fetch_and_max:
2873 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Max, E);
2874 case Builtin::BI__sync_fetch_and_umin:
2875 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::UMin, E);
2876 case Builtin::BI__sync_fetch_and_umax:
2877 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::UMax, E);
2879 case Builtin::BI__sync_add_and_fetch_1:
2880 case Builtin::BI__sync_add_and_fetch_2:
2881 case Builtin::BI__sync_add_and_fetch_4:
2882 case Builtin::BI__sync_add_and_fetch_8:
2883 case Builtin::BI__sync_add_and_fetch_16:
2884 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Add, E,
2885 llvm::Instruction::Add);
2886 case Builtin::BI__sync_sub_and_fetch_1:
2887 case Builtin::BI__sync_sub_and_fetch_2:
2888 case Builtin::BI__sync_sub_and_fetch_4:
2889 case Builtin::BI__sync_sub_and_fetch_8:
2890 case Builtin::BI__sync_sub_and_fetch_16:
2891 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Sub, E,
2892 llvm::Instruction::Sub);
2893 case Builtin::BI__sync_and_and_fetch_1:
2894 case Builtin::BI__sync_and_and_fetch_2:
2895 case Builtin::BI__sync_and_and_fetch_4:
2896 case Builtin::BI__sync_and_and_fetch_8:
2897 case Builtin::BI__sync_and_and_fetch_16:
2898 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::And, E,
2899 llvm::Instruction::And);
2900 case Builtin::BI__sync_or_and_fetch_1:
2901 case Builtin::BI__sync_or_and_fetch_2:
2902 case Builtin::BI__sync_or_and_fetch_4:
2903 case Builtin::BI__sync_or_and_fetch_8:
2904 case Builtin::BI__sync_or_and_fetch_16:
2905 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Or, E,
2906 llvm::Instruction::Or);
2907 case Builtin::BI__sync_xor_and_fetch_1:
2908 case Builtin::BI__sync_xor_and_fetch_2:
2909 case Builtin::BI__sync_xor_and_fetch_4:
2910 case Builtin::BI__sync_xor_and_fetch_8:
2911 case Builtin::BI__sync_xor_and_fetch_16:
2912 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Xor, E,
2913 llvm::Instruction::Xor);
2914 case Builtin::BI__sync_nand_and_fetch_1:
2915 case Builtin::BI__sync_nand_and_fetch_2:
2916 case Builtin::BI__sync_nand_and_fetch_4:
2917 case Builtin::BI__sync_nand_and_fetch_8:
2918 case Builtin::BI__sync_nand_and_fetch_16:
2919 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Nand, E,
2920 llvm::Instruction::And, true);
2922 case Builtin::BI__sync_val_compare_and_swap_1:
2923 case Builtin::BI__sync_val_compare_and_swap_2:
2924 case Builtin::BI__sync_val_compare_and_swap_4:
2925 case Builtin::BI__sync_val_compare_and_swap_8:
2926 case Builtin::BI__sync_val_compare_and_swap_16:
2927 return RValue::get(MakeAtomicCmpXchgValue(*this, E, false));
2929 case Builtin::BI__sync_bool_compare_and_swap_1:
2930 case Builtin::BI__sync_bool_compare_and_swap_2:
2931 case Builtin::BI__sync_bool_compare_and_swap_4:
2932 case Builtin::BI__sync_bool_compare_and_swap_8:
2933 case Builtin::BI__sync_bool_compare_and_swap_16:
2934 return RValue::get(MakeAtomicCmpXchgValue(*this, E, true));
2936 case Builtin::BI__sync_swap_1:
2937 case Builtin::BI__sync_swap_2:
2938 case Builtin::BI__sync_swap_4:
2939 case Builtin::BI__sync_swap_8:
2940 case Builtin::BI__sync_swap_16:
2941 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xchg, E);
2943 case Builtin::BI__sync_lock_test_and_set_1:
2944 case Builtin::BI__sync_lock_test_and_set_2:
2945 case Builtin::BI__sync_lock_test_and_set_4:
2946 case Builtin::BI__sync_lock_test_and_set_8:
2947 case Builtin::BI__sync_lock_test_and_set_16:
2948 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xchg, E);
2950 case Builtin::BI__sync_lock_release_1:
2951 case Builtin::BI__sync_lock_release_2:
2952 case Builtin::BI__sync_lock_release_4:
2953 case Builtin::BI__sync_lock_release_8:
2954 case Builtin::BI__sync_lock_release_16: {
2955 Value *Ptr = EmitScalarExpr(E->getArg(0));
2956 QualType ElTy = E->getArg(0)->getType()->getPointeeType();
2957 CharUnits StoreSize = getContext().getTypeSizeInChars(ElTy);
2958 llvm::Type *ITy = llvm::IntegerType::get(getLLVMContext(),
2959 StoreSize.getQuantity() * 8);
2960 Ptr = Builder.CreateBitCast(Ptr, ITy->getPointerTo());
2961 llvm::StoreInst *Store =
2962 Builder.CreateAlignedStore(llvm::Constant::getNullValue(ITy), Ptr,
2964 Store->setAtomic(llvm::AtomicOrdering::Release);
2965 return RValue::get(nullptr);
2968 case Builtin::BI__sync_synchronize: {
2969 // We assume this is supposed to correspond to a C++0x-style
2970 // sequentially-consistent fence (i.e. this is only usable for
2971 // synchronization, not device I/O or anything like that). This intrinsic
2972 // is really badly designed in the sense that in theory, there isn't
2973 // any way to safely use it... but in practice, it mostly works
2974 // to use it with non-atomic loads and stores to get acquire/release
2976 Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent);
2977 return RValue::get(nullptr);
2980 case Builtin::BI__builtin_nontemporal_load:
2981 return RValue::get(EmitNontemporalLoad(*this, E));
2982 case Builtin::BI__builtin_nontemporal_store:
2983 return RValue::get(EmitNontemporalStore(*this, E));
2984 case Builtin::BI__c11_atomic_is_lock_free:
2985 case Builtin::BI__atomic_is_lock_free: {
2986 // Call "bool __atomic_is_lock_free(size_t size, void *ptr)". For the
2987 // __c11 builtin, ptr is 0 (indicating a properly-aligned object), since
2988 // _Atomic(T) is always properly-aligned.
2989 const char *LibCallName = "__atomic_is_lock_free";
2991 Args.add(RValue::get(EmitScalarExpr(E->getArg(0))),
2992 getContext().getSizeType());
2993 if (BuiltinID == Builtin::BI__atomic_is_lock_free)
2994 Args.add(RValue::get(EmitScalarExpr(E->getArg(1))),
2995 getContext().VoidPtrTy);
2997 Args.add(RValue::get(llvm::Constant::getNullValue(VoidPtrTy)),
2998 getContext().VoidPtrTy);
2999 const CGFunctionInfo &FuncInfo =
3000 CGM.getTypes().arrangeBuiltinFunctionCall(E->getType(), Args);
3001 llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FuncInfo);
3002 llvm::FunctionCallee Func = CGM.CreateRuntimeFunction(FTy, LibCallName);
3003 return EmitCall(FuncInfo, CGCallee::forDirect(Func),
3004 ReturnValueSlot(), Args);
3007 case Builtin::BI__atomic_test_and_set: {
3008 // Look at the argument type to determine whether this is a volatile
3009 // operation. The parameter type is always volatile.
3010 QualType PtrTy = E->getArg(0)->IgnoreImpCasts()->getType();
3012 PtrTy->castAs<PointerType>()->getPointeeType().isVolatileQualified();
3014 Value *Ptr = EmitScalarExpr(E->getArg(0));
3015 unsigned AddrSpace = Ptr->getType()->getPointerAddressSpace();
3016 Ptr = Builder.CreateBitCast(Ptr, Int8Ty->getPointerTo(AddrSpace));
3017 Value *NewVal = Builder.getInt8(1);
3018 Value *Order = EmitScalarExpr(E->getArg(1));
3019 if (isa<llvm::ConstantInt>(Order)) {
3020 int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
3021 AtomicRMWInst *Result = nullptr;
3023 case 0: // memory_order_relaxed
3024 default: // invalid order
3025 Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, Ptr, NewVal,
3026 llvm::AtomicOrdering::Monotonic);
3028 case 1: // memory_order_consume
3029 case 2: // memory_order_acquire
3030 Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, Ptr, NewVal,
3031 llvm::AtomicOrdering::Acquire);
3033 case 3: // memory_order_release
3034 Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, Ptr, NewVal,
3035 llvm::AtomicOrdering::Release);
3037 case 4: // memory_order_acq_rel
3039 Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, Ptr, NewVal,
3040 llvm::AtomicOrdering::AcquireRelease);
3042 case 5: // memory_order_seq_cst
3043 Result = Builder.CreateAtomicRMW(
3044 llvm::AtomicRMWInst::Xchg, Ptr, NewVal,
3045 llvm::AtomicOrdering::SequentiallyConsistent);
3048 Result->setVolatile(Volatile);
3049 return RValue::get(Builder.CreateIsNotNull(Result, "tobool"));
3052 llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
3054 llvm::BasicBlock *BBs[5] = {
3055 createBasicBlock("monotonic", CurFn),
3056 createBasicBlock("acquire", CurFn),
3057 createBasicBlock("release", CurFn),
3058 createBasicBlock("acqrel", CurFn),
3059 createBasicBlock("seqcst", CurFn)
3061 llvm::AtomicOrdering Orders[5] = {
3062 llvm::AtomicOrdering::Monotonic, llvm::AtomicOrdering::Acquire,
3063 llvm::AtomicOrdering::Release, llvm::AtomicOrdering::AcquireRelease,
3064 llvm::AtomicOrdering::SequentiallyConsistent};
3066 Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
3067 llvm::SwitchInst *SI = Builder.CreateSwitch(Order, BBs[0]);
3069 Builder.SetInsertPoint(ContBB);
3070 PHINode *Result = Builder.CreatePHI(Int8Ty, 5, "was_set");
3072 for (unsigned i = 0; i < 5; ++i) {
3073 Builder.SetInsertPoint(BBs[i]);
3074 AtomicRMWInst *RMW = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg,
3075 Ptr, NewVal, Orders[i]);
3076 RMW->setVolatile(Volatile);
3077 Result->addIncoming(RMW, BBs[i]);
3078 Builder.CreateBr(ContBB);
3081 SI->addCase(Builder.getInt32(0), BBs[0]);
3082 SI->addCase(Builder.getInt32(1), BBs[1]);
3083 SI->addCase(Builder.getInt32(2), BBs[1]);
3084 SI->addCase(Builder.getInt32(3), BBs[2]);
3085 SI->addCase(Builder.getInt32(4), BBs[3]);
3086 SI->addCase(Builder.getInt32(5), BBs[4]);
3088 Builder.SetInsertPoint(ContBB);
3089 return RValue::get(Builder.CreateIsNotNull(Result, "tobool"));
3092 case Builtin::BI__atomic_clear: {
3093 QualType PtrTy = E->getArg(0)->IgnoreImpCasts()->getType();
3095 PtrTy->castAs<PointerType>()->getPointeeType().isVolatileQualified();
3097 Address Ptr = EmitPointerWithAlignment(E->getArg(0));
3098 unsigned AddrSpace = Ptr.getPointer()->getType()->getPointerAddressSpace();
3099 Ptr = Builder.CreateBitCast(Ptr, Int8Ty->getPointerTo(AddrSpace));
3100 Value *NewVal = Builder.getInt8(0);
3101 Value *Order = EmitScalarExpr(E->getArg(1));
3102 if (isa<llvm::ConstantInt>(Order)) {
3103 int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
3104 StoreInst *Store = Builder.CreateStore(NewVal, Ptr, Volatile);
3106 case 0: // memory_order_relaxed
3107 default: // invalid order
3108 Store->setOrdering(llvm::AtomicOrdering::Monotonic);
3110 case 3: // memory_order_release
3111 Store->setOrdering(llvm::AtomicOrdering::Release);
3113 case 5: // memory_order_seq_cst
3114 Store->setOrdering(llvm::AtomicOrdering::SequentiallyConsistent);
3117 return RValue::get(nullptr);
3120 llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
3122 llvm::BasicBlock *BBs[3] = {
3123 createBasicBlock("monotonic", CurFn),
3124 createBasicBlock("release", CurFn),
3125 createBasicBlock("seqcst", CurFn)
3127 llvm::AtomicOrdering Orders[3] = {
3128 llvm::AtomicOrdering::Monotonic, llvm::AtomicOrdering::Release,
3129 llvm::AtomicOrdering::SequentiallyConsistent};
3131 Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
3132 llvm::SwitchInst *SI = Builder.CreateSwitch(Order, BBs[0]);
3134 for (unsigned i = 0; i < 3; ++i) {
3135 Builder.SetInsertPoint(BBs[i]);
3136 StoreInst *Store = Builder.CreateStore(NewVal, Ptr, Volatile);
3137 Store->setOrdering(Orders[i]);
3138 Builder.CreateBr(ContBB);
3141 SI->addCase(Builder.getInt32(0), BBs[0]);
3142 SI->addCase(Builder.getInt32(3), BBs[1]);
3143 SI->addCase(Builder.getInt32(5), BBs[2]);
3145 Builder.SetInsertPoint(ContBB);
3146 return RValue::get(nullptr);
3149 case Builtin::BI__atomic_thread_fence:
3150 case Builtin::BI__atomic_signal_fence:
3151 case Builtin::BI__c11_atomic_thread_fence:
3152 case Builtin::BI__c11_atomic_signal_fence: {
3153 llvm::SyncScope::ID SSID;
3154 if (BuiltinID == Builtin::BI__atomic_signal_fence ||
3155 BuiltinID == Builtin::BI__c11_atomic_signal_fence)
3156 SSID = llvm::SyncScope::SingleThread;
3158 SSID = llvm::SyncScope::System;
3159 Value *Order = EmitScalarExpr(E->getArg(0));
3160 if (isa<llvm::ConstantInt>(Order)) {
3161 int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
3163 case 0: // memory_order_relaxed
3164 default: // invalid order
3166 case 1: // memory_order_consume
3167 case 2: // memory_order_acquire
3168 Builder.CreateFence(llvm::AtomicOrdering::Acquire, SSID);
3170 case 3: // memory_order_release
3171 Builder.CreateFence(llvm::AtomicOrdering::Release, SSID);
3173 case 4: // memory_order_acq_rel
3174 Builder.CreateFence(llvm::AtomicOrdering::AcquireRelease, SSID);
3176 case 5: // memory_order_seq_cst
3177 Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent, SSID);
3180 return RValue::get(nullptr);
3183 llvm::BasicBlock *AcquireBB, *ReleaseBB, *AcqRelBB, *SeqCstBB;
3184 AcquireBB = createBasicBlock("acquire", CurFn);
3185 ReleaseBB = createBasicBlock("release", CurFn);
3186 AcqRelBB = createBasicBlock("acqrel", CurFn);
3187 SeqCstBB = createBasicBlock("seqcst", CurFn);
3188 llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
3190 Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
3191 llvm::SwitchInst *SI = Builder.CreateSwitch(Order, ContBB);
3193 Builder.SetInsertPoint(AcquireBB);
3194 Builder.CreateFence(llvm::AtomicOrdering::Acquire, SSID);
3195 Builder.CreateBr(ContBB);
3196 SI->addCase(Builder.getInt32(1), AcquireBB);
3197 SI->addCase(Builder.getInt32(2), AcquireBB);
3199 Builder.SetInsertPoint(ReleaseBB);
3200 Builder.CreateFence(llvm::AtomicOrdering::Release, SSID);
3201 Builder.CreateBr(ContBB);
3202 SI->addCase(Builder.getInt32(3), ReleaseBB);
3204 Builder.SetInsertPoint(AcqRelBB);
3205 Builder.CreateFence(llvm::AtomicOrdering::AcquireRelease, SSID);
3206 Builder.CreateBr(ContBB);
3207 SI->addCase(Builder.getInt32(4), AcqRelBB);
3209 Builder.SetInsertPoint(SeqCstBB);
3210 Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent, SSID);
3211 Builder.CreateBr(ContBB);
3212 SI->addCase(Builder.getInt32(5), SeqCstBB);
3214 Builder.SetInsertPoint(ContBB);
3215 return RValue::get(nullptr);
3218 case Builtin::BI__builtin_signbit:
3219 case Builtin::BI__builtin_signbitf:
3220 case Builtin::BI__builtin_signbitl: {
3222 Builder.CreateZExt(EmitSignBit(*this, EmitScalarExpr(E->getArg(0))),
3223 ConvertType(E->getType())));
3225 case Builtin::BI__warn_memset_zero_len:
3226 return RValue::getIgnored();
3227 case Builtin::BI__annotation: {
3228 // Re-encode each wide string to UTF8 and make an MDString.
3229 SmallVector<Metadata *, 1> Strings;
3230 for (const Expr *Arg : E->arguments()) {
3231 const auto *Str = cast<StringLiteral>(Arg->IgnoreParenCasts());
3232 assert(Str->getCharByteWidth() == 2);
3233 StringRef WideBytes = Str->getBytes();
3234 std::string StrUtf8;
3235 if (!convertUTF16ToUTF8String(
3236 makeArrayRef(WideBytes.data(), WideBytes.size()), StrUtf8)) {
3237 CGM.ErrorUnsupported(E, "non-UTF16 __annotation argument");
3240 Strings.push_back(llvm::MDString::get(getLLVMContext(), StrUtf8));
3243 // Build and MDTuple of MDStrings and emit the intrinsic call.
3245 CGM.getIntrinsic(llvm::Intrinsic::codeview_annotation, {});
3246 MDTuple *StrTuple = MDTuple::get(getLLVMContext(), Strings);
3247 Builder.CreateCall(F, MetadataAsValue::get(getLLVMContext(), StrTuple));
3248 return RValue::getIgnored();
3250 case Builtin::BI__builtin_annotation: {
3251 llvm::Value *AnnVal = EmitScalarExpr(E->getArg(0));
3252 llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::annotation,
3255 // Get the annotation string, go through casts. Sema requires this to be a
3256 // non-wide string literal, potentially casted, so the cast<> is safe.
3257 const Expr *AnnotationStrExpr = E->getArg(1)->IgnoreParenCasts();
3258 StringRef Str = cast<StringLiteral>(AnnotationStrExpr)->getString();
3259 return RValue::get(EmitAnnotationCall(F, AnnVal, Str, E->getExprLoc()));
3261 case Builtin::BI__builtin_addcb:
3262 case Builtin::BI__builtin_addcs:
3263 case Builtin::BI__builtin_addc:
3264 case Builtin::BI__builtin_addcl:
3265 case Builtin::BI__builtin_addcll:
3266 case Builtin::BI__builtin_subcb:
3267 case Builtin::BI__builtin_subcs:
3268 case Builtin::BI__builtin_subc:
3269 case Builtin::BI__builtin_subcl:
3270 case Builtin::BI__builtin_subcll: {
3272 // We translate all of these builtins from expressions of the form:
3273 // int x = ..., y = ..., carryin = ..., carryout, result;
3274 // result = __builtin_addc(x, y, carryin, &carryout);
3276 // to LLVM IR of the form:
3278 // %tmp1 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %x, i32 %y)
3279 // %tmpsum1 = extractvalue {i32, i1} %tmp1, 0
3280 // %carry1 = extractvalue {i32, i1} %tmp1, 1
3281 // %tmp2 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %tmpsum1,
3283 // %result = extractvalue {i32, i1} %tmp2, 0
3284 // %carry2 = extractvalue {i32, i1} %tmp2, 1
3285 // %tmp3 = or i1 %carry1, %carry2
3286 // %tmp4 = zext i1 %tmp3 to i32
3287 // store i32 %tmp4, i32* %carryout
3289 // Scalarize our inputs.
3290 llvm::Value *X = EmitScalarExpr(E->getArg(0));
3291 llvm::Value *Y = EmitScalarExpr(E->getArg(1));
3292 llvm::Value *Carryin = EmitScalarExpr(E->getArg(2));
3293 Address CarryOutPtr = EmitPointerWithAlignment(E->getArg(3));
3295 // Decide if we are lowering to a uadd.with.overflow or usub.with.overflow.
3296 llvm::Intrinsic::ID IntrinsicId;
3297 switch (BuiltinID) {
3298 default: llvm_unreachable("Unknown multiprecision builtin id.");
3299 case Builtin::BI__builtin_addcb:
3300 case Builtin::BI__builtin_addcs:
3301 case Builtin::BI__builtin_addc:
3302 case Builtin::BI__builtin_addcl:
3303 case Builtin::BI__builtin_addcll:
3304 IntrinsicId = llvm::Intrinsic::uadd_with_overflow;
3306 case Builtin::BI__builtin_subcb:
3307 case Builtin::BI__builtin_subcs:
3308 case Builtin::BI__builtin_subc:
3309 case Builtin::BI__builtin_subcl:
3310 case Builtin::BI__builtin_subcll:
3311 IntrinsicId = llvm::Intrinsic::usub_with_overflow;
3315 // Construct our resulting LLVM IR expression.
3316 llvm::Value *Carry1;
3317 llvm::Value *Sum1 = EmitOverflowIntrinsic(*this, IntrinsicId,
3319 llvm::Value *Carry2;
3320 llvm::Value *Sum2 = EmitOverflowIntrinsic(*this, IntrinsicId,
3321 Sum1, Carryin, Carry2);
3322 llvm::Value *CarryOut = Builder.CreateZExt(Builder.CreateOr(Carry1, Carry2),
3324 Builder.CreateStore(CarryOut, CarryOutPtr);
3325 return RValue::get(Sum2);
3328 case Builtin::BI__builtin_add_overflow:
3329 case Builtin::BI__builtin_sub_overflow:
3330 case Builtin::BI__builtin_mul_overflow: {
3331 const clang::Expr *LeftArg = E->getArg(0);
3332 const clang::Expr *RightArg = E->getArg(1);
3333 const clang::Expr *ResultArg = E->getArg(2);
3335 clang::QualType ResultQTy =
3336 ResultArg->getType()->castAs<PointerType>()->getPointeeType();
3338 WidthAndSignedness LeftInfo =
3339 getIntegerWidthAndSignedness(CGM.getContext(), LeftArg->getType());
3340 WidthAndSignedness RightInfo =
3341 getIntegerWidthAndSignedness(CGM.getContext(), RightArg->getType());
3342 WidthAndSignedness ResultInfo =
3343 getIntegerWidthAndSignedness(CGM.getContext(), ResultQTy);
3345 // Handle mixed-sign multiplication as a special case, because adding
3346 // runtime or backend support for our generic irgen would be too expensive.
3347 if (isSpecialMixedSignMultiply(BuiltinID, LeftInfo, RightInfo, ResultInfo))
3348 return EmitCheckedMixedSignMultiply(*this, LeftArg, LeftInfo, RightArg,
3349 RightInfo, ResultArg, ResultQTy,
3352 WidthAndSignedness EncompassingInfo =
3353 EncompassingIntegerType({LeftInfo, RightInfo, ResultInfo});
3355 llvm::Type *EncompassingLLVMTy =
3356 llvm::IntegerType::get(CGM.getLLVMContext(), EncompassingInfo.Width);
3358 llvm::Type *ResultLLVMTy = CGM.getTypes().ConvertType(ResultQTy);
3360 llvm::Intrinsic::ID IntrinsicId;
3361 switch (BuiltinID) {
3363 llvm_unreachable("Unknown overflow builtin id.");
3364 case Builtin::BI__builtin_add_overflow:
3365 IntrinsicId = EncompassingInfo.Signed
3366 ? llvm::Intrinsic::sadd_with_overflow
3367 : llvm::Intrinsic::uadd_with_overflow;
3369 case Builtin::BI__builtin_sub_overflow:
3370 IntrinsicId = EncompassingInfo.Signed
3371 ? llvm::Intrinsic::ssub_with_overflow
3372 : llvm::Intrinsic::usub_with_overflow;
3374 case Builtin::BI__builtin_mul_overflow:
3375 IntrinsicId = EncompassingInfo.Signed
3376 ? llvm::Intrinsic::smul_with_overflow
3377 : llvm::Intrinsic::umul_with_overflow;
3381 llvm::Value *Left = EmitScalarExpr(LeftArg);
3382 llvm::Value *Right = EmitScalarExpr(RightArg);
3383 Address ResultPtr = EmitPointerWithAlignment(ResultArg);
3385 // Extend each operand to the encompassing type.
3386 Left = Builder.CreateIntCast(Left, EncompassingLLVMTy, LeftInfo.Signed);
3387 Right = Builder.CreateIntCast(Right, EncompassingLLVMTy, RightInfo.Signed);
3389 // Perform the operation on the extended values.
3390 llvm::Value *Overflow, *Result;
3391 Result = EmitOverflowIntrinsic(*this, IntrinsicId, Left, Right, Overflow);
3393 if (EncompassingInfo.Width > ResultInfo.Width) {
3394 // The encompassing type is wider than the result type, so we need to
3396 llvm::Value *ResultTrunc = Builder.CreateTrunc(Result, ResultLLVMTy);
3398 // To see if the truncation caused an overflow, we will extend
3399 // the result and then compare it to the original result.
3400 llvm::Value *ResultTruncExt = Builder.CreateIntCast(
3401 ResultTrunc, EncompassingLLVMTy, ResultInfo.Signed);
3402 llvm::Value *TruncationOverflow =
3403 Builder.CreateICmpNE(Result, ResultTruncExt);
3405 Overflow = Builder.CreateOr(Overflow, TruncationOverflow);
3406 Result = ResultTrunc;
3409 // Finally, store the result using the pointer.
3411 ResultArg->getType()->getPointeeType().isVolatileQualified();
3412 Builder.CreateStore(EmitToMemory(Result, ResultQTy), ResultPtr, isVolatile);
3414 return RValue::get(Overflow);
3417 case Builtin::BI__builtin_uadd_overflow:
3418 case Builtin::BI__builtin_uaddl_overflow:
3419 case Builtin::BI__builtin_uaddll_overflow:
3420 case Builtin::BI__builtin_usub_overflow:
3421 case Builtin::BI__builtin_usubl_overflow:
3422 case Builtin::BI__builtin_usubll_overflow:
3423 case Builtin::BI__builtin_umul_overflow:
3424 case Builtin::BI__builtin_umull_overflow:
3425 case Builtin::BI__builtin_umulll_overflow:
3426 case Builtin::BI__builtin_sadd_overflow:
3427 case Builtin::BI__builtin_saddl_overflow:
3428 case Builtin::BI__builtin_saddll_overflow:
3429 case Builtin::BI__builtin_ssub_overflow:
3430 case Builtin::BI__builtin_ssubl_overflow:
3431 case Builtin::BI__builtin_ssubll_overflow:
3432 case Builtin::BI__builtin_smul_overflow:
3433 case Builtin::BI__builtin_smull_overflow:
3434 case Builtin::BI__builtin_smulll_overflow: {
3436 // We translate all of these builtins directly to the relevant llvm IR node.
3438 // Scalarize our inputs.
3439 llvm::Value *X = EmitScalarExpr(E->getArg(0));
3440 llvm::Value *Y = EmitScalarExpr(E->getArg(1));
3441 Address SumOutPtr = EmitPointerWithAlignment(E->getArg(2));
3443 // Decide which of the overflow intrinsics we are lowering to:
3444 llvm::Intrinsic::ID IntrinsicId;
3445 switch (BuiltinID) {
3446 default: llvm_unreachable("Unknown overflow builtin id.");
3447 case Builtin::BI__builtin_uadd_overflow:
3448 case Builtin::BI__builtin_uaddl_overflow:
3449 case Builtin::BI__builtin_uaddll_overflow:
3450 IntrinsicId = llvm::Intrinsic::uadd_with_overflow;
3452 case Builtin::BI__builtin_usub_overflow:
3453 case Builtin::BI__builtin_usubl_overflow:
3454 case Builtin::BI__builtin_usubll_overflow:
3455 IntrinsicId = llvm::Intrinsic::usub_with_overflow;
3457 case Builtin::BI__builtin_umul_overflow:
3458 case Builtin::BI__builtin_umull_overflow:
3459 case Builtin::BI__builtin_umulll_overflow:
3460 IntrinsicId = llvm::Intrinsic::umul_with_overflow;
3462 case Builtin::BI__builtin_sadd_overflow:
3463 case Builtin::BI__builtin_saddl_overflow:
3464 case Builtin::BI__builtin_saddll_overflow:
3465 IntrinsicId = llvm::Intrinsic::sadd_with_overflow;
3467 case Builtin::BI__builtin_ssub_overflow:
3468 case Builtin::BI__builtin_ssubl_overflow:
3469 case Builtin::BI__builtin_ssubll_overflow:
3470 IntrinsicId = llvm::Intrinsic::ssub_with_overflow;
3472 case Builtin::BI__builtin_smul_overflow:
3473 case Builtin::BI__builtin_smull_overflow:
3474 case Builtin::BI__builtin_smulll_overflow:
3475 IntrinsicId = llvm::Intrinsic::smul_with_overflow;
3481 llvm::Value *Sum = EmitOverflowIntrinsic(*this, IntrinsicId, X, Y, Carry);
3482 Builder.CreateStore(Sum, SumOutPtr);
3484 return RValue::get(Carry);
3486 case Builtin::BI__builtin_addressof:
3487 return RValue::get(EmitLValue(E->getArg(0)).getPointer(*this));
3488 case Builtin::BI__builtin_operator_new:
3489 return EmitBuiltinNewDeleteCall(
3490 E->getCallee()->getType()->castAs<FunctionProtoType>(), E, false);
3491 case Builtin::BI__builtin_operator_delete:
3492 return EmitBuiltinNewDeleteCall(
3493 E->getCallee()->getType()->castAs<FunctionProtoType>(), E, true);
3495 case Builtin::BI__builtin_is_aligned:
3496 return EmitBuiltinIsAligned(E);
3497 case Builtin::BI__builtin_align_up:
3498 return EmitBuiltinAlignTo(E, true);
3499 case Builtin::BI__builtin_align_down:
3500 return EmitBuiltinAlignTo(E, false);
3502 case Builtin::BI__noop:
3503 // __noop always evaluates to an integer literal zero.
3504 return RValue::get(ConstantInt::get(IntTy, 0));
3505 case Builtin::BI__builtin_call_with_static_chain: {
3506 const CallExpr *Call = cast<CallExpr>(E->getArg(0));
3507 const Expr *Chain = E->getArg(1);
3508 return EmitCall(Call->getCallee()->getType(),
3509 EmitCallee(Call->getCallee()), Call, ReturnValue,
3510 EmitScalarExpr(Chain));
3512 case Builtin::BI_InterlockedExchange8:
3513 case Builtin::BI_InterlockedExchange16:
3514 case Builtin::BI_InterlockedExchange:
3515 case Builtin::BI_InterlockedExchangePointer:
3517 EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange, E));
3518 case Builtin::BI_InterlockedCompareExchangePointer:
3519 case Builtin::BI_InterlockedCompareExchangePointer_nf: {
3521 llvm::IntegerType *IntType =
3522 IntegerType::get(getLLVMContext(),
3523 getContext().getTypeSize(E->getType()));
3524 llvm::Type *IntPtrType = IntType->getPointerTo();
3526 llvm::Value *Destination =
3527 Builder.CreateBitCast(EmitScalarExpr(E->getArg(0)), IntPtrType);
3529 llvm::Value *Exchange = EmitScalarExpr(E->getArg(1));
3530 RTy = Exchange->getType();
3531 Exchange = Builder.CreatePtrToInt(Exchange, IntType);
3533 llvm::Value *Comparand =
3534 Builder.CreatePtrToInt(EmitScalarExpr(E->getArg(2)), IntType);
3537 BuiltinID == Builtin::BI_InterlockedCompareExchangePointer_nf ?
3538 AtomicOrdering::Monotonic : AtomicOrdering::SequentiallyConsistent;
3540 auto Result = Builder.CreateAtomicCmpXchg(Destination, Comparand, Exchange,
3541 Ordering, Ordering);
3542 Result->setVolatile(true);
3544 return RValue::get(Builder.CreateIntToPtr(Builder.CreateExtractValue(Result,
3548 case Builtin::BI_InterlockedCompareExchange8:
3549 case Builtin::BI_InterlockedCompareExchange16:
3550 case Builtin::BI_InterlockedCompareExchange:
3551 case Builtin::BI_InterlockedCompareExchange64:
3552 return RValue::get(EmitAtomicCmpXchgForMSIntrin(*this, E));
3553 case Builtin::BI_InterlockedIncrement16:
3554 case Builtin::BI_InterlockedIncrement:
3556 EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement, E));
3557 case Builtin::BI_InterlockedDecrement16:
3558 case Builtin::BI_InterlockedDecrement:
3560 EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedDecrement, E));
3561 case Builtin::BI_InterlockedAnd8:
3562 case Builtin::BI_InterlockedAnd16:
3563 case Builtin::BI_InterlockedAnd:
3564 return RValue::get(EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedAnd, E));
3565 case Builtin::BI_InterlockedExchangeAdd8:
3566 case Builtin::BI_InterlockedExchangeAdd16:
3567 case Builtin::BI_InterlockedExchangeAdd:
3569 EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeAdd, E));
3570 case Builtin::BI_InterlockedExchangeSub8:
3571 case Builtin::BI_InterlockedExchangeSub16:
3572 case Builtin::BI_InterlockedExchangeSub:
3574 EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeSub, E));
3575 case Builtin::BI_InterlockedOr8:
3576 case Builtin::BI_InterlockedOr16:
3577 case Builtin::BI_InterlockedOr:
3578 return RValue::get(EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr, E));
3579 case Builtin::BI_InterlockedXor8:
3580 case Builtin::BI_InterlockedXor16:
3581 case Builtin::BI_InterlockedXor:
3582 return RValue::get(EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor, E));
3584 case Builtin::BI_bittest64:
3585 case Builtin::BI_bittest:
3586 case Builtin::BI_bittestandcomplement64:
3587 case Builtin::BI_bittestandcomplement:
3588 case Builtin::BI_bittestandreset64:
3589 case Builtin::BI_bittestandreset:
3590 case Builtin::BI_bittestandset64:
3591 case Builtin::BI_bittestandset:
3592 case Builtin::BI_interlockedbittestandreset:
3593 case Builtin::BI_interlockedbittestandreset64:
3594 case Builtin::BI_interlockedbittestandset64:
3595 case Builtin::BI_interlockedbittestandset:
3596 case Builtin::BI_interlockedbittestandset_acq:
3597 case Builtin::BI_interlockedbittestandset_rel:
3598 case Builtin::BI_interlockedbittestandset_nf:
3599 case Builtin::BI_interlockedbittestandreset_acq:
3600 case Builtin::BI_interlockedbittestandreset_rel:
3601 case Builtin::BI_interlockedbittestandreset_nf:
3602 return RValue::get(EmitBitTestIntrinsic(*this, BuiltinID, E));
3604 // These builtins exist to emit regular volatile loads and stores not
3605 // affected by the -fms-volatile setting.
3606 case Builtin::BI__iso_volatile_load8:
3607 case Builtin::BI__iso_volatile_load16:
3608 case Builtin::BI__iso_volatile_load32:
3609 case Builtin::BI__iso_volatile_load64:
3610 return RValue::get(EmitISOVolatileLoad(*this, E));
3611 case Builtin::BI__iso_volatile_store8:
3612 case Builtin::BI__iso_volatile_store16:
3613 case Builtin::BI__iso_volatile_store32:
3614 case Builtin::BI__iso_volatile_store64:
3615 return RValue::get(EmitISOVolatileStore(*this, E));
3617 case Builtin::BI__exception_code:
3618 case Builtin::BI_exception_code:
3619 return RValue::get(EmitSEHExceptionCode());
3620 case Builtin::BI__exception_info:
3621 case Builtin::BI_exception_info:
3622 return RValue::get(EmitSEHExceptionInfo());
3623 case Builtin::BI__abnormal_termination:
3624 case Builtin::BI_abnormal_termination:
3625 return RValue::get(EmitSEHAbnormalTermination());
3626 case Builtin::BI_setjmpex:
3627 if (getTarget().getTriple().isOSMSVCRT())
3628 return EmitMSVCRTSetJmp(*this, MSVCSetJmpKind::_setjmpex, E);
3630 case Builtin::BI_setjmp:
3631 if (getTarget().getTriple().isOSMSVCRT()) {
3632 if (getTarget().getTriple().getArch() == llvm::Triple::x86)
3633 return EmitMSVCRTSetJmp(*this, MSVCSetJmpKind::_setjmp3, E);
3634 else if (getTarget().getTriple().getArch() == llvm::Triple::aarch64)
3635 return EmitMSVCRTSetJmp(*this, MSVCSetJmpKind::_setjmpex, E);
3636 return EmitMSVCRTSetJmp(*this, MSVCSetJmpKind::_setjmp, E);
3640 case Builtin::BI__GetExceptionInfo: {
3641 if (llvm::GlobalVariable *GV =
3642 CGM.getCXXABI().getThrowInfo(FD->getParamDecl(0)->getType()))
3643 return RValue::get(llvm::ConstantExpr::getBitCast(GV, CGM.Int8PtrTy));
3647 case Builtin::BI__fastfail:
3648 return RValue::get(EmitMSVCBuiltinExpr(MSVCIntrin::__fastfail, E));
3650 case Builtin::BI__builtin_coro_size: {
3651 auto & Context = getContext();
3652 auto SizeTy = Context.getSizeType();
3653 auto T = Builder.getIntNTy(Context.getTypeSize(SizeTy));
3654 Function *F = CGM.getIntrinsic(Intrinsic::coro_size, T);
3655 return RValue::get(Builder.CreateCall(F));
3658 case Builtin::BI__builtin_coro_id:
3659 return EmitCoroutineIntrinsic(E, Intrinsic::coro_id);
3660 case Builtin::BI__builtin_coro_promise:
3661 return EmitCoroutineIntrinsic(E, Intrinsic::coro_promise);
3662 case Builtin::BI__builtin_coro_resume:
3663 return EmitCoroutineIntrinsic(E, Intrinsic::coro_resume);
3664 case Builtin::BI__builtin_coro_frame:
3665 return EmitCoroutineIntrinsic(E, Intrinsic::coro_frame);
3666 case Builtin::BI__builtin_coro_noop:
3667 return EmitCoroutineIntrinsic(E, Intrinsic::coro_noop);
3668 case Builtin::BI__builtin_coro_free:
3669 return EmitCoroutineIntrinsic(E, Intrinsic::coro_free);
3670 case Builtin::BI__builtin_coro_destroy:
3671 return EmitCoroutineIntrinsic(E, Intrinsic::coro_destroy);
3672 case Builtin::BI__builtin_coro_done:
3673 return EmitCoroutineIntrinsic(E, Intrinsic::coro_done);
3674 case Builtin::BI__builtin_coro_alloc:
3675 return EmitCoroutineIntrinsic(E, Intrinsic::coro_alloc);
3676 case Builtin::BI__builtin_coro_begin:
3677 return EmitCoroutineIntrinsic(E, Intrinsic::coro_begin);
3678 case Builtin::BI__builtin_coro_end:
3679 return EmitCoroutineIntrinsic(E, Intrinsic::coro_end);
3680 case Builtin::BI__builtin_coro_suspend:
3681 return EmitCoroutineIntrinsic(E, Intrinsic::coro_suspend);
3682 case Builtin::BI__builtin_coro_param:
3683 return EmitCoroutineIntrinsic(E, Intrinsic::coro_param);
3685 // OpenCL v2.0 s6.13.16.2, Built-in pipe read and write functions
3686 case Builtin::BIread_pipe:
3687 case Builtin::BIwrite_pipe: {
3688 Value *Arg0 = EmitScalarExpr(E->getArg(0)),
3689 *Arg1 = EmitScalarExpr(E->getArg(1));
3690 CGOpenCLRuntime OpenCLRT(CGM);
3691 Value *PacketSize = OpenCLRT.getPipeElemSize(E->getArg(0));
3692 Value *PacketAlign = OpenCLRT.getPipeElemAlign(E->getArg(0));
3694 // Type of the generic packet parameter.
3695 unsigned GenericAS =
3696 getContext().getTargetAddressSpace(LangAS::opencl_generic);
3697 llvm::Type *I8PTy = llvm::PointerType::get(
3698 llvm::Type::getInt8Ty(getLLVMContext()), GenericAS);
3700 // Testing which overloaded version we should generate the call for.
3701 if (2U == E->getNumArgs()) {
3702 const char *Name = (BuiltinID == Builtin::BIread_pipe) ? "__read_pipe_2"
3704 // Creating a generic function type to be able to call with any builtin or
3705 // user defined type.
3706 llvm::Type *ArgTys[] = {Arg0->getType(), I8PTy, Int32Ty, Int32Ty};
3707 llvm::FunctionType *FTy = llvm::FunctionType::get(
3708 Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false);
3709 Value *BCast = Builder.CreatePointerCast(Arg1, I8PTy);
3711 Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, Name),
3712 {Arg0, BCast, PacketSize, PacketAlign}));
3714 assert(4 == E->getNumArgs() &&
3715 "Illegal number of parameters to pipe function");
3716 const char *Name = (BuiltinID == Builtin::BIread_pipe) ? "__read_pipe_4"
3719 llvm::Type *ArgTys[] = {Arg0->getType(), Arg1->getType(), Int32Ty, I8PTy,
3721 Value *Arg2 = EmitScalarExpr(E->getArg(2)),
3722 *Arg3 = EmitScalarExpr(E->getArg(3));
3723 llvm::FunctionType *FTy = llvm::FunctionType::get(
3724 Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false);
3725 Value *BCast = Builder.CreatePointerCast(Arg3, I8PTy);
3726 // We know the third argument is an integer type, but we may need to cast
3728 if (Arg2->getType() != Int32Ty)
3729 Arg2 = Builder.CreateZExtOrTrunc(Arg2, Int32Ty);
3730 return RValue::get(Builder.CreateCall(
3731 CGM.CreateRuntimeFunction(FTy, Name),
3732 {Arg0, Arg1, Arg2, BCast, PacketSize, PacketAlign}));
3735 // OpenCL v2.0 s6.13.16 ,s9.17.3.5 - Built-in pipe reserve read and write
3737 case Builtin::BIreserve_read_pipe:
3738 case Builtin::BIreserve_write_pipe:
3739 case Builtin::BIwork_group_reserve_read_pipe:
3740 case Builtin::BIwork_group_reserve_write_pipe:
3741 case Builtin::BIsub_group_reserve_read_pipe:
3742 case Builtin::BIsub_group_reserve_write_pipe: {
3743 // Composing the mangled name for the function.
3745 if (BuiltinID == Builtin::BIreserve_read_pipe)
3746 Name = "__reserve_read_pipe";
3747 else if (BuiltinID == Builtin::BIreserve_write_pipe)
3748 Name = "__reserve_write_pipe";
3749 else if (BuiltinID == Builtin::BIwork_group_reserve_read_pipe)
3750 Name = "__work_group_reserve_read_pipe";
3751 else if (BuiltinID == Builtin::BIwork_group_reserve_write_pipe)
3752 Name = "__work_group_reserve_write_pipe";
3753 else if (BuiltinID == Builtin::BIsub_group_reserve_read_pipe)
3754 Name = "__sub_group_reserve_read_pipe";
3756 Name = "__sub_group_reserve_write_pipe";
3758 Value *Arg0 = EmitScalarExpr(E->getArg(0)),
3759 *Arg1 = EmitScalarExpr(E->getArg(1));
3760 llvm::Type *ReservedIDTy = ConvertType(getContext().OCLReserveIDTy);
3761 CGOpenCLRuntime OpenCLRT(CGM);
3762 Value *PacketSize = OpenCLRT.getPipeElemSize(E->getArg(0));
3763 Value *PacketAlign = OpenCLRT.getPipeElemAlign(E->getArg(0));
3765 // Building the generic function prototype.
3766 llvm::Type *ArgTys[] = {Arg0->getType(), Int32Ty, Int32Ty, Int32Ty};
3767 llvm::FunctionType *FTy = llvm::FunctionType::get(
3768 ReservedIDTy, llvm::ArrayRef<llvm::Type *>(ArgTys), false);
3769 // We know the second argument is an integer type, but we may need to cast
3771 if (Arg1->getType() != Int32Ty)
3772 Arg1 = Builder.CreateZExtOrTrunc(Arg1, Int32Ty);
3774 Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, Name),
3775 {Arg0, Arg1, PacketSize, PacketAlign}));
3777 // OpenCL v2.0 s6.13.16, s9.17.3.5 - Built-in pipe commit read and write
3779 case Builtin::BIcommit_read_pipe:
3780 case Builtin::BIcommit_write_pipe:
3781 case Builtin::BIwork_group_commit_read_pipe:
3782 case Builtin::BIwork_group_commit_write_pipe:
3783 case Builtin::BIsub_group_commit_read_pipe:
3784 case Builtin::BIsub_group_commit_write_pipe: {
3786 if (BuiltinID == Builtin::BIcommit_read_pipe)
3787 Name = "__commit_read_pipe";
3788 else if (BuiltinID == Builtin::BIcommit_write_pipe)
3789 Name = "__commit_write_pipe";
3790 else if (BuiltinID == Builtin::BIwork_group_commit_read_pipe)
3791 Name = "__work_group_commit_read_pipe";
3792 else if (BuiltinID == Builtin::BIwork_group_commit_write_pipe)
3793 Name = "__work_group_commit_write_pipe";
3794 else if (BuiltinID == Builtin::BIsub_group_commit_read_pipe)
3795 Name = "__sub_group_commit_read_pipe";
3797 Name = "__sub_group_commit_write_pipe";
3799 Value *Arg0 = EmitScalarExpr(E->getArg(0)),
3800 *Arg1 = EmitScalarExpr(E->getArg(1));
3801 CGOpenCLRuntime OpenCLRT(CGM);
3802 Value *PacketSize = OpenCLRT.getPipeElemSize(E->getArg(0));
3803 Value *PacketAlign = OpenCLRT.getPipeElemAlign(E->getArg(0));
3805 // Building the generic function prototype.
3806 llvm::Type *ArgTys[] = {Arg0->getType(), Arg1->getType(), Int32Ty, Int32Ty};
3807 llvm::FunctionType *FTy =
3808 llvm::FunctionType::get(llvm::Type::getVoidTy(getLLVMContext()),
3809 llvm::ArrayRef<llvm::Type *>(ArgTys), false);
3812 Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, Name),
3813 {Arg0, Arg1, PacketSize, PacketAlign}));
3815 // OpenCL v2.0 s6.13.16.4 Built-in pipe query functions
3816 case Builtin::BIget_pipe_num_packets:
3817 case Builtin::BIget_pipe_max_packets: {
3818 const char *BaseName;
3819 const auto *PipeTy = E->getArg(0)->getType()->castAs<PipeType>();
3820 if (BuiltinID == Builtin::BIget_pipe_num_packets)
3821 BaseName = "__get_pipe_num_packets";
3823 BaseName = "__get_pipe_max_packets";
3824 std::string Name = std::string(BaseName) +
3825 std::string(PipeTy->isReadOnly() ? "_ro" : "_wo");
3827 // Building the generic function prototype.
3828 Value *Arg0 = EmitScalarExpr(E->getArg(0));
3829 CGOpenCLRuntime OpenCLRT(CGM);
3830 Value *PacketSize = OpenCLRT.getPipeElemSize(E->getArg(0));
3831 Value *PacketAlign = OpenCLRT.getPipeElemAlign(E->getArg(0));
3832 llvm::Type *ArgTys[] = {Arg0->getType(), Int32Ty, Int32Ty};
3833 llvm::FunctionType *FTy = llvm::FunctionType::get(
3834 Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false);
3836 return RValue::get(Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, Name),
3837 {Arg0, PacketSize, PacketAlign}));
3840 // OpenCL v2.0 s6.13.9 - Address space qualifier functions.
3841 case Builtin::BIto_global:
3842 case Builtin::BIto_local:
3843 case Builtin::BIto_private: {
3844 auto Arg0 = EmitScalarExpr(E->getArg(0));
3845 auto NewArgT = llvm::PointerType::get(Int8Ty,
3846 CGM.getContext().getTargetAddressSpace(LangAS::opencl_generic));
3847 auto NewRetT = llvm::PointerType::get(Int8Ty,
3848 CGM.getContext().getTargetAddressSpace(
3849 E->getType()->getPointeeType().getAddressSpace()));
3850 auto FTy = llvm::FunctionType::get(NewRetT, {NewArgT}, false);
3851 llvm::Value *NewArg;
3852 if (Arg0->getType()->getPointerAddressSpace() !=
3853 NewArgT->getPointerAddressSpace())
3854 NewArg = Builder.CreateAddrSpaceCast(Arg0, NewArgT);
3856 NewArg = Builder.CreateBitOrPointerCast(Arg0, NewArgT);
3857 auto NewName = std::string("__") + E->getDirectCallee()->getName().str();
3859 Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, NewName), {NewArg});
3860 return RValue::get(Builder.CreateBitOrPointerCast(NewCall,
3861 ConvertType(E->getType())));
3864 // OpenCL v2.0, s6.13.17 - Enqueue kernel function.
3865 // It contains four different overload formats specified in Table 6.13.17.1.
3866 case Builtin::BIenqueue_kernel: {
3867 StringRef Name; // Generated function call name
3868 unsigned NumArgs = E->getNumArgs();
3870 llvm::Type *QueueTy = ConvertType(getContext().OCLQueueTy);
3871 llvm::Type *GenericVoidPtrTy = Builder.getInt8PtrTy(
3872 getContext().getTargetAddressSpace(LangAS::opencl_generic));
3874 llvm::Value *Queue = EmitScalarExpr(E->getArg(0));
3875 llvm::Value *Flags = EmitScalarExpr(E->getArg(1));
3876 LValue NDRangeL = EmitAggExprToLValue(E->getArg(2));
3877 llvm::Value *Range = NDRangeL.getAddress(*this).getPointer();
3878 llvm::Type *RangeTy = NDRangeL.getAddress(*this).getType();
3881 // The most basic form of the call with parameters:
3882 // queue_t, kernel_enqueue_flags_t, ndrange_t, block(void)
3883 Name = "__enqueue_kernel_basic";
3884 llvm::Type *ArgTys[] = {QueueTy, Int32Ty, RangeTy, GenericVoidPtrTy,
3886 llvm::FunctionType *FTy = llvm::FunctionType::get(
3887 Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false);
3890 CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(3));
3891 llvm::Value *Kernel =
3892 Builder.CreatePointerCast(Info.Kernel, GenericVoidPtrTy);
3893 llvm::Value *Block =
3894 Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
3897 B.addByValAttr(NDRangeL.getAddress(*this).getElementType());
3898 llvm::AttributeList ByValAttrSet =
3899 llvm::AttributeList::get(CGM.getModule().getContext(), 3U, B);
3902 Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, Name, ByValAttrSet),
3903 {Queue, Flags, Range, Kernel, Block});
3904 RTCall->setAttributes(ByValAttrSet);
3905 return RValue::get(RTCall);
3907 assert(NumArgs >= 5 && "Invalid enqueue_kernel signature");
3909 // Create a temporary array to hold the sizes of local pointer arguments
3910 // for the block. \p First is the position of the first size argument.
3911 auto CreateArrayForSizeVar = [=](unsigned First)
3912 -> std::tuple<llvm::Value *, llvm::Value *, llvm::Value *> {
3913 llvm::APInt ArraySize(32, NumArgs - First);
3914 QualType SizeArrayTy = getContext().getConstantArrayType(
3915 getContext().getSizeType(), ArraySize, nullptr, ArrayType::Normal,
3916 /*IndexTypeQuals=*/0);
3917 auto Tmp = CreateMemTemp(SizeArrayTy, "block_sizes");
3918 llvm::Value *TmpPtr = Tmp.getPointer();
3919 llvm::Value *TmpSize = EmitLifetimeStart(
3920 CGM.getDataLayout().getTypeAllocSize(Tmp.getElementType()), TmpPtr);
3921 llvm::Value *ElemPtr;
3922 // Each of the following arguments specifies the size of the corresponding
3923 // argument passed to the enqueued block.
3924 auto *Zero = llvm::ConstantInt::get(IntTy, 0);
3925 for (unsigned I = First; I < NumArgs; ++I) {
3926 auto *Index = llvm::ConstantInt::get(IntTy, I - First);
3927 auto *GEP = Builder.CreateGEP(TmpPtr, {Zero, Index});
3931 Builder.CreateZExtOrTrunc(EmitScalarExpr(E->getArg(I)), SizeTy);
3932 Builder.CreateAlignedStore(
3933 V, GEP, CGM.getDataLayout().getPrefTypeAlignment(SizeTy));
3935 return std::tie(ElemPtr, TmpSize, TmpPtr);
3938 // Could have events and/or varargs.
3939 if (E->getArg(3)->getType()->isBlockPointerType()) {
3940 // No events passed, but has variadic arguments.
3941 Name = "__enqueue_kernel_varargs";
3943 CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(3));
3944 llvm::Value *Kernel =
3945 Builder.CreatePointerCast(Info.Kernel, GenericVoidPtrTy);
3946 auto *Block = Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
3947 llvm::Value *ElemPtr, *TmpSize, *TmpPtr;
3948 std::tie(ElemPtr, TmpSize, TmpPtr) = CreateArrayForSizeVar(4);
3950 // Create a vector of the arguments, as well as a constant value to
3951 // express to the runtime the number of variadic arguments.
3952 std::vector<llvm::Value *> Args = {
3953 Queue, Flags, Range,
3954 Kernel, Block, ConstantInt::get(IntTy, NumArgs - 4),
3956 std::vector<llvm::Type *> ArgTys = {
3957 QueueTy, IntTy, RangeTy, GenericVoidPtrTy,
3958 GenericVoidPtrTy, IntTy, ElemPtr->getType()};
3960 llvm::FunctionType *FTy = llvm::FunctionType::get(
3961 Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false);
3963 RValue::get(Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, Name),
3964 llvm::ArrayRef<llvm::Value *>(Args)));
3966 EmitLifetimeEnd(TmpSize, TmpPtr);
3969 // Any calls now have event arguments passed.
3971 llvm::Type *EventTy = ConvertType(getContext().OCLClkEventTy);
3972 llvm::PointerType *EventPtrTy = EventTy->getPointerTo(
3973 CGM.getContext().getTargetAddressSpace(LangAS::opencl_generic));
3975 llvm::Value *NumEvents =
3976 Builder.CreateZExtOrTrunc(EmitScalarExpr(E->getArg(3)), Int32Ty);
3978 // Since SemaOpenCLBuiltinEnqueueKernel allows fifth and sixth arguments
3979 // to be a null pointer constant (including `0` literal), we can take it
3980 // into account and emit null pointer directly.
3981 llvm::Value *EventWaitList = nullptr;
3982 if (E->getArg(4)->isNullPointerConstant(
3983 getContext(), Expr::NPC_ValueDependentIsNotNull)) {
3984 EventWaitList = llvm::ConstantPointerNull::get(EventPtrTy);
3986 EventWaitList = E->getArg(4)->getType()->isArrayType()
3987 ? EmitArrayToPointerDecay(E->getArg(4)).getPointer()
3988 : EmitScalarExpr(E->getArg(4));
3989 // Convert to generic address space.
3990 EventWaitList = Builder.CreatePointerCast(EventWaitList, EventPtrTy);
3992 llvm::Value *EventRet = nullptr;
3993 if (E->getArg(5)->isNullPointerConstant(
3994 getContext(), Expr::NPC_ValueDependentIsNotNull)) {
3995 EventRet = llvm::ConstantPointerNull::get(EventPtrTy);
3998 Builder.CreatePointerCast(EmitScalarExpr(E->getArg(5)), EventPtrTy);
4002 CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(6));
4003 llvm::Value *Kernel =
4004 Builder.CreatePointerCast(Info.Kernel, GenericVoidPtrTy);
4005 llvm::Value *Block =
4006 Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
4008 std::vector<llvm::Type *> ArgTys = {
4009 QueueTy, Int32Ty, RangeTy, Int32Ty,
4010 EventPtrTy, EventPtrTy, GenericVoidPtrTy, GenericVoidPtrTy};
4012 std::vector<llvm::Value *> Args = {Queue, Flags, Range,
4013 NumEvents, EventWaitList, EventRet,
4017 // Has events but no variadics.
4018 Name = "__enqueue_kernel_basic_events";
4019 llvm::FunctionType *FTy = llvm::FunctionType::get(
4020 Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false);
4022 Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, Name),
4023 llvm::ArrayRef<llvm::Value *>(Args)));
4025 // Has event info and variadics
4026 // Pass the number of variadics to the runtime function too.
4027 Args.push_back(ConstantInt::get(Int32Ty, NumArgs - 7));
4028 ArgTys.push_back(Int32Ty);
4029 Name = "__enqueue_kernel_events_varargs";
4031 llvm::Value *ElemPtr, *TmpSize, *TmpPtr;
4032 std::tie(ElemPtr, TmpSize, TmpPtr) = CreateArrayForSizeVar(7);
4033 Args.push_back(ElemPtr);
4034 ArgTys.push_back(ElemPtr->getType());
4036 llvm::FunctionType *FTy = llvm::FunctionType::get(
4037 Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false);
4039 RValue::get(Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, Name),
4040 llvm::ArrayRef<llvm::Value *>(Args)));
4042 EmitLifetimeEnd(TmpSize, TmpPtr);
4047 // OpenCL v2.0 s6.13.17.6 - Kernel query functions need bitcast of block
4049 case Builtin::BIget_kernel_work_group_size: {
4050 llvm::Type *GenericVoidPtrTy = Builder.getInt8PtrTy(
4051 getContext().getTargetAddressSpace(LangAS::opencl_generic));
4053 CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(0));
4054 Value *Kernel = Builder.CreatePointerCast(Info.Kernel, GenericVoidPtrTy);
4055 Value *Arg = Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
4056 return RValue::get(Builder.CreateCall(
4057 CGM.CreateRuntimeFunction(
4058 llvm::FunctionType::get(IntTy, {GenericVoidPtrTy, GenericVoidPtrTy},
4060 "__get_kernel_work_group_size_impl"),
4063 case Builtin::BIget_kernel_preferred_work_group_size_multiple: {
4064 llvm::Type *GenericVoidPtrTy = Builder.getInt8PtrTy(
4065 getContext().getTargetAddressSpace(LangAS::opencl_generic));
4067 CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(0));
4068 Value *Kernel = Builder.CreatePointerCast(Info.Kernel, GenericVoidPtrTy);
4069 Value *Arg = Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
4070 return RValue::get(Builder.CreateCall(
4071 CGM.CreateRuntimeFunction(
4072 llvm::FunctionType::get(IntTy, {GenericVoidPtrTy, GenericVoidPtrTy},
4074 "__get_kernel_preferred_work_group_size_multiple_impl"),
4077 case Builtin::BIget_kernel_max_sub_group_size_for_ndrange:
4078 case Builtin::BIget_kernel_sub_group_count_for_ndrange: {
4079 llvm::Type *GenericVoidPtrTy = Builder.getInt8PtrTy(
4080 getContext().getTargetAddressSpace(LangAS::opencl_generic));
4081 LValue NDRangeL = EmitAggExprToLValue(E->getArg(0));
4082 llvm::Value *NDRange = NDRangeL.getAddress(*this).getPointer();
4084 CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(1));
4085 Value *Kernel = Builder.CreatePointerCast(Info.Kernel, GenericVoidPtrTy);
4086 Value *Block = Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
4088 BuiltinID == Builtin::BIget_kernel_max_sub_group_size_for_ndrange
4089 ? "__get_kernel_max_sub_group_size_for_ndrange_impl"
4090 : "__get_kernel_sub_group_count_for_ndrange_impl";
4091 return RValue::get(Builder.CreateCall(
4092 CGM.CreateRuntimeFunction(
4093 llvm::FunctionType::get(
4094 IntTy, {NDRange->getType(), GenericVoidPtrTy, GenericVoidPtrTy},
4097 {NDRange, Kernel, Block}));
4100 case Builtin::BI__builtin_store_half:
4101 case Builtin::BI__builtin_store_halff: {
4102 Value *Val = EmitScalarExpr(E->getArg(0));
4103 Address Address = EmitPointerWithAlignment(E->getArg(1));
4104 Value *HalfVal = Builder.CreateFPTrunc(Val, Builder.getHalfTy());
4105 return RValue::get(Builder.CreateStore(HalfVal, Address));
4107 case Builtin::BI__builtin_load_half: {
4108 Address Address = EmitPointerWithAlignment(E->getArg(0));
4109 Value *HalfVal = Builder.CreateLoad(Address);
4110 return RValue::get(Builder.CreateFPExt(HalfVal, Builder.getDoubleTy()));
4112 case Builtin::BI__builtin_load_halff: {
4113 Address Address = EmitPointerWithAlignment(E->getArg(0));
4114 Value *HalfVal = Builder.CreateLoad(Address);
4115 return RValue::get(Builder.CreateFPExt(HalfVal, Builder.getFloatTy()));
4117 case Builtin::BIprintf:
4118 if (getTarget().getTriple().isNVPTX())
4119 return EmitNVPTXDevicePrintfCallExpr(E, ReturnValue);
4121 case Builtin::BI__builtin_canonicalize:
4122 case Builtin::BI__builtin_canonicalizef:
4123 case Builtin::BI__builtin_canonicalizef16:
4124 case Builtin::BI__builtin_canonicalizel:
4125 return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::canonicalize));
4127 case Builtin::BI__builtin_thread_pointer: {
4128 if (!getContext().getTargetInfo().isTLSSupported())
4129 CGM.ErrorUnsupported(E, "__builtin_thread_pointer");
4130 // Fall through - it's already mapped to the intrinsic by GCCBuiltin.
4133 case Builtin::BI__builtin_os_log_format:
4134 return emitBuiltinOSLogFormat(*E);
4136 case Builtin::BI__xray_customevent: {
4137 if (!ShouldXRayInstrumentFunction())
4138 return RValue::getIgnored();
4140 if (!CGM.getCodeGenOpts().XRayInstrumentationBundle.has(
4141 XRayInstrKind::Custom))
4142 return RValue::getIgnored();
4144 if (const auto *XRayAttr = CurFuncDecl->getAttr<XRayInstrumentAttr>())
4145 if (XRayAttr->neverXRayInstrument() && !AlwaysEmitXRayCustomEvents())
4146 return RValue::getIgnored();
4148 Function *F = CGM.getIntrinsic(Intrinsic::xray_customevent);
4149 auto FTy = F->getFunctionType();
4150 auto Arg0 = E->getArg(0);
4151 auto Arg0Val = EmitScalarExpr(Arg0);
4152 auto Arg0Ty = Arg0->getType();
4153 auto PTy0 = FTy->getParamType(0);
4154 if (PTy0 != Arg0Val->getType()) {
4155 if (Arg0Ty->isArrayType())
4156 Arg0Val = EmitArrayToPointerDecay(Arg0).getPointer();
4158 Arg0Val = Builder.CreatePointerCast(Arg0Val, PTy0);
4160 auto Arg1 = EmitScalarExpr(E->getArg(1));
4161 auto PTy1 = FTy->getParamType(1);
4162 if (PTy1 != Arg1->getType())
4163 Arg1 = Builder.CreateTruncOrBitCast(Arg1, PTy1);
4164 return RValue::get(Builder.CreateCall(F, {Arg0Val, Arg1}));
4167 case Builtin::BI__xray_typedevent: {
4168 // TODO: There should be a way to always emit events even if the current
4169 // function is not instrumented. Losing events in a stream can cripple
4171 if (!ShouldXRayInstrumentFunction())
4172 return RValue::getIgnored();
4174 if (!CGM.getCodeGenOpts().XRayInstrumentationBundle.has(
4175 XRayInstrKind::Typed))
4176 return RValue::getIgnored();
4178 if (const auto *XRayAttr = CurFuncDecl->getAttr<XRayInstrumentAttr>())
4179 if (XRayAttr->neverXRayInstrument() && !AlwaysEmitXRayTypedEvents())
4180 return RValue::getIgnored();
4182 Function *F = CGM.getIntrinsic(Intrinsic::xray_typedevent);
4183 auto FTy = F->getFunctionType();
4184 auto Arg0 = EmitScalarExpr(E->getArg(0));
4185 auto PTy0 = FTy->getParamType(0);
4186 if (PTy0 != Arg0->getType())
4187 Arg0 = Builder.CreateTruncOrBitCast(Arg0, PTy0);
4188 auto Arg1 = E->getArg(1);
4189 auto Arg1Val = EmitScalarExpr(Arg1);
4190 auto Arg1Ty = Arg1->getType();
4191 auto PTy1 = FTy->getParamType(1);
4192 if (PTy1 != Arg1Val->getType()) {
4193 if (Arg1Ty->isArrayType())
4194 Arg1Val = EmitArrayToPointerDecay(Arg1).getPointer();
4196 Arg1Val = Builder.CreatePointerCast(Arg1Val, PTy1);
4198 auto Arg2 = EmitScalarExpr(E->getArg(2));
4199 auto PTy2 = FTy->getParamType(2);
4200 if (PTy2 != Arg2->getType())
4201 Arg2 = Builder.CreateTruncOrBitCast(Arg2, PTy2);
4202 return RValue::get(Builder.CreateCall(F, {Arg0, Arg1Val, Arg2}));
4205 case Builtin::BI__builtin_ms_va_start:
4206 case Builtin::BI__builtin_ms_va_end:
4208 EmitVAStartEnd(EmitMSVAListRef(E->getArg(0)).getPointer(),
4209 BuiltinID == Builtin::BI__builtin_ms_va_start));
4211 case Builtin::BI__builtin_ms_va_copy: {
4212 // Lower this manually. We can't reliably determine whether or not any
4213 // given va_copy() is for a Win64 va_list from the calling convention
4214 // alone, because it's legal to do this from a System V ABI function.
4215 // With opaque pointer types, we won't have enough information in LLVM
4216 // IR to determine this from the argument types, either. Best to do it
4217 // now, while we have enough information.
4218 Address DestAddr = EmitMSVAListRef(E->getArg(0));
4219 Address SrcAddr = EmitMSVAListRef(E->getArg(1));
4221 llvm::Type *BPP = Int8PtrPtrTy;
4223 DestAddr = Address(Builder.CreateBitCast(DestAddr.getPointer(), BPP, "cp"),
4224 DestAddr.getAlignment());
4225 SrcAddr = Address(Builder.CreateBitCast(SrcAddr.getPointer(), BPP, "ap"),
4226 SrcAddr.getAlignment());
4228 Value *ArgPtr = Builder.CreateLoad(SrcAddr, "ap.val");
4229 return RValue::get(Builder.CreateStore(ArgPtr, DestAddr));
4233 // If this is an alias for a lib function (e.g. __builtin_sin), emit
4234 // the call using the normal call path, but using the unmangled
4235 // version of the function name.
4236 if (getContext().BuiltinInfo.isLibFunction(BuiltinID))
4237 return emitLibraryCall(*this, FD, E,
4238 CGM.getBuiltinLibFunction(FD, BuiltinID));
4240 // If this is a predefined lib function (e.g. malloc), emit the call
4241 // using exactly the normal call path.
4242 if (getContext().BuiltinInfo.isPredefinedLibFunction(BuiltinID))
4243 return emitLibraryCall(*this, FD, E,
4244 cast<llvm::Constant>(EmitScalarExpr(E->getCallee())));
4246 // Check that a call to a target specific builtin has the correct target
4248 // This is down here to avoid non-target specific builtins, however, if
4249 // generic builtins start to require generic target features then we
4250 // can move this up to the beginning of the function.
4251 checkTargetFeatures(E, FD);
4253 if (unsigned VectorWidth = getContext().BuiltinInfo.getRequiredVectorWidth(BuiltinID))
4254 LargestVectorWidth = std::max(LargestVectorWidth, VectorWidth);
4256 // See if we have a target specific intrinsic.
4257 const char *Name = getContext().BuiltinInfo.getName(BuiltinID);
4258 Intrinsic::ID IntrinsicID = Intrinsic::not_intrinsic;
4260 llvm::Triple::getArchTypePrefix(getTarget().getTriple().getArch());
4261 if (!Prefix.empty()) {
4262 IntrinsicID = Intrinsic::getIntrinsicForGCCBuiltin(Prefix.data(), Name);
4263 // NOTE we don't need to perform a compatibility flag check here since the
4264 // intrinsics are declared in Builtins*.def via LANGBUILTIN which filter the
4265 // MS builtins via ALL_MS_LANGUAGES and are filtered earlier.
4266 if (IntrinsicID == Intrinsic::not_intrinsic)
4267 IntrinsicID = Intrinsic::getIntrinsicForMSBuiltin(Prefix.data(), Name);
4270 if (IntrinsicID != Intrinsic::not_intrinsic) {
4271 SmallVector<Value*, 16> Args;
4273 // Find out if any arguments are required to be integer constant
4275 unsigned ICEArguments = 0;
4276 ASTContext::GetBuiltinTypeError Error;
4277 getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments);
4278 assert(Error == ASTContext::GE_None && "Should not codegen an error");
4280 Function *F = CGM.getIntrinsic(IntrinsicID);
4281 llvm::FunctionType *FTy = F->getFunctionType();
4283 for (unsigned i = 0, e = E->getNumArgs(); i != e; ++i) {
4285 // If this is a normal argument, just emit it as a scalar.
4286 if ((ICEArguments & (1 << i)) == 0) {
4287 ArgValue = EmitScalarExpr(E->getArg(i));
4289 // If this is required to be a constant, constant fold it so that we
4290 // know that the generated intrinsic gets a ConstantInt.
4291 llvm::APSInt Result;
4292 bool IsConst = E->getArg(i)->isIntegerConstantExpr(Result,getContext());
4293 assert(IsConst && "Constant arg isn't actually constant?");
4295 ArgValue = llvm::ConstantInt::get(getLLVMContext(), Result);
4298 // If the intrinsic arg type is different from the builtin arg type
4299 // we need to do a bit cast.
4300 llvm::Type *PTy = FTy->getParamType(i);
4301 if (PTy != ArgValue->getType()) {
4302 // XXX - vector of pointers?
4303 if (auto *PtrTy = dyn_cast<llvm::PointerType>(PTy)) {
4304 if (PtrTy->getAddressSpace() !=
4305 ArgValue->getType()->getPointerAddressSpace()) {
4306 ArgValue = Builder.CreateAddrSpaceCast(
4308 ArgValue->getType()->getPointerTo(PtrTy->getAddressSpace()));
4312 assert(PTy->canLosslesslyBitCastTo(FTy->getParamType(i)) &&
4313 "Must be able to losslessly bit cast to param");
4314 ArgValue = Builder.CreateBitCast(ArgValue, PTy);
4317 Args.push_back(ArgValue);
4320 Value *V = Builder.CreateCall(F, Args);
4321 QualType BuiltinRetType = E->getType();
4323 llvm::Type *RetTy = VoidTy;
4324 if (!BuiltinRetType->isVoidType())
4325 RetTy = ConvertType(BuiltinRetType);
4327 if (RetTy != V->getType()) {
4328 // XXX - vector of pointers?
4329 if (auto *PtrTy = dyn_cast<llvm::PointerType>(RetTy)) {
4330 if (PtrTy->getAddressSpace() != V->getType()->getPointerAddressSpace()) {
4331 V = Builder.CreateAddrSpaceCast(
4332 V, V->getType()->getPointerTo(PtrTy->getAddressSpace()));
4336 assert(V->getType()->canLosslesslyBitCastTo(RetTy) &&
4337 "Must be able to losslessly bit cast result type");
4338 V = Builder.CreateBitCast(V, RetTy);
4341 return RValue::get(V);
4344 // Some target-specific builtins can have aggregate return values, e.g.
4345 // __builtin_arm_mve_vld2q_u32. So if the result is an aggregate, force
4346 // ReturnValue to be non-null, so that the target-specific emission code can
4347 // always just emit into it.
4348 TypeEvaluationKind EvalKind = getEvaluationKind(E->getType());
4349 if (EvalKind == TEK_Aggregate && ReturnValue.isNull()) {
4350 Address DestPtr = CreateMemTemp(E->getType(), "agg.tmp");
4351 ReturnValue = ReturnValueSlot(DestPtr, false);
4354 // Now see if we can emit a target-specific builtin.
4355 if (Value *V = EmitTargetBuiltinExpr(BuiltinID, E, ReturnValue)) {
4358 return RValue::get(V);
4360 return RValue::getAggregate(ReturnValue.getValue(),
4361 ReturnValue.isVolatile());
4363 llvm_unreachable("No current target builtin returns complex");
4365 llvm_unreachable("Bad evaluation kind in EmitBuiltinExpr");
4368 ErrorUnsupported(E, "builtin function");
4370 // Unknown builtin, for now just dump it out and return undef.
4371 return GetUndefRValue(E->getType());
4374 static Value *EmitTargetArchBuiltinExpr(CodeGenFunction *CGF,
4375 unsigned BuiltinID, const CallExpr *E,
4376 ReturnValueSlot ReturnValue,
4377 llvm::Triple::ArchType Arch) {
4379 case llvm::Triple::arm:
4380 case llvm::Triple::armeb:
4381 case llvm::Triple::thumb:
4382 case llvm::Triple::thumbeb:
4383 return CGF->EmitARMBuiltinExpr(BuiltinID, E, ReturnValue, Arch);
4384 case llvm::Triple::aarch64:
4385 case llvm::Triple::aarch64_32:
4386 case llvm::Triple::aarch64_be:
4387 return CGF->EmitAArch64BuiltinExpr(BuiltinID, E, Arch);
4388 case llvm::Triple::bpfeb:
4389 case llvm::Triple::bpfel:
4390 return CGF->EmitBPFBuiltinExpr(BuiltinID, E);
4391 case llvm::Triple::x86:
4392 case llvm::Triple::x86_64:
4393 return CGF->EmitX86BuiltinExpr(BuiltinID, E);
4394 case llvm::Triple::ppc:
4395 case llvm::Triple::ppc64:
4396 case llvm::Triple::ppc64le:
4397 return CGF->EmitPPCBuiltinExpr(BuiltinID, E);
4398 case llvm::Triple::r600:
4399 case llvm::Triple::amdgcn:
4400 return CGF->EmitAMDGPUBuiltinExpr(BuiltinID, E);
4401 case llvm::Triple::systemz:
4402 return CGF->EmitSystemZBuiltinExpr(BuiltinID, E);
4403 case llvm::Triple::nvptx:
4404 case llvm::Triple::nvptx64:
4405 return CGF->EmitNVPTXBuiltinExpr(BuiltinID, E);
4406 case llvm::Triple::wasm32:
4407 case llvm::Triple::wasm64:
4408 return CGF->EmitWebAssemblyBuiltinExpr(BuiltinID, E);
4409 case llvm::Triple::hexagon:
4410 return CGF->EmitHexagonBuiltinExpr(BuiltinID, E);
4416 Value *CodeGenFunction::EmitTargetBuiltinExpr(unsigned BuiltinID,
4418 ReturnValueSlot ReturnValue) {
4419 if (getContext().BuiltinInfo.isAuxBuiltinID(BuiltinID)) {
4420 assert(getContext().getAuxTargetInfo() && "Missing aux target info");
4421 return EmitTargetArchBuiltinExpr(
4422 this, getContext().BuiltinInfo.getAuxBuiltinID(BuiltinID), E,
4423 ReturnValue, getContext().getAuxTargetInfo()->getTriple().getArch());
4426 return EmitTargetArchBuiltinExpr(this, BuiltinID, E, ReturnValue,
4427 getTarget().getTriple().getArch());
4430 static llvm::VectorType *GetNeonType(CodeGenFunction *CGF,
4431 NeonTypeFlags TypeFlags,
4432 bool HasLegalHalfType=true,
4434 int IsQuad = TypeFlags.isQuad();
4435 switch (TypeFlags.getEltType()) {
4436 case NeonTypeFlags::Int8:
4437 case NeonTypeFlags::Poly8:
4438 return llvm::VectorType::get(CGF->Int8Ty, V1Ty ? 1 : (8 << IsQuad));
4439 case NeonTypeFlags::Int16:
4440 case NeonTypeFlags::Poly16:
4441 return llvm::VectorType::get(CGF->Int16Ty, V1Ty ? 1 : (4 << IsQuad));
4442 case NeonTypeFlags::Float16:
4443 if (HasLegalHalfType)
4444 return llvm::VectorType::get(CGF->HalfTy, V1Ty ? 1 : (4 << IsQuad));
4446 return llvm::VectorType::get(CGF->Int16Ty, V1Ty ? 1 : (4 << IsQuad));
4447 case NeonTypeFlags::Int32:
4448 return llvm::VectorType::get(CGF->Int32Ty, V1Ty ? 1 : (2 << IsQuad));
4449 case NeonTypeFlags::Int64:
4450 case NeonTypeFlags::Poly64:
4451 return llvm::VectorType::get(CGF->Int64Ty, V1Ty ? 1 : (1 << IsQuad));
4452 case NeonTypeFlags::Poly128:
4453 // FIXME: i128 and f128 doesn't get fully support in Clang and llvm.
4454 // There is a lot of i128 and f128 API missing.
4455 // so we use v16i8 to represent poly128 and get pattern matched.
4456 return llvm::VectorType::get(CGF->Int8Ty, 16);
4457 case NeonTypeFlags::Float32:
4458 return llvm::VectorType::get(CGF->FloatTy, V1Ty ? 1 : (2 << IsQuad));
4459 case NeonTypeFlags::Float64:
4460 return llvm::VectorType::get(CGF->DoubleTy, V1Ty ? 1 : (1 << IsQuad));
4462 llvm_unreachable("Unknown vector element type!");
4465 static llvm::VectorType *GetFloatNeonType(CodeGenFunction *CGF,
4466 NeonTypeFlags IntTypeFlags) {
4467 int IsQuad = IntTypeFlags.isQuad();
4468 switch (IntTypeFlags.getEltType()) {
4469 case NeonTypeFlags::Int16:
4470 return llvm::VectorType::get(CGF->HalfTy, (4 << IsQuad));
4471 case NeonTypeFlags::Int32:
4472 return llvm::VectorType::get(CGF->FloatTy, (2 << IsQuad));
4473 case NeonTypeFlags::Int64:
4474 return llvm::VectorType::get(CGF->DoubleTy, (1 << IsQuad));
4476 llvm_unreachable("Type can't be converted to floating-point!");
4480 Value *CodeGenFunction::EmitNeonSplat(Value *V, Constant *C) {
4481 unsigned nElts = V->getType()->getVectorNumElements();
4482 Value* SV = llvm::ConstantVector::getSplat(nElts, C);
4483 return Builder.CreateShuffleVector(V, V, SV, "lane");
4486 Value *CodeGenFunction::EmitNeonCall(Function *F, SmallVectorImpl<Value*> &Ops,
4488 unsigned shift, bool rightshift) {
4490 for (Function::const_arg_iterator ai = F->arg_begin(), ae = F->arg_end();
4491 ai != ae; ++ai, ++j)
4492 if (shift > 0 && shift == j)
4493 Ops[j] = EmitNeonShiftVector(Ops[j], ai->getType(), rightshift);
4495 Ops[j] = Builder.CreateBitCast(Ops[j], ai->getType(), name);
4497 return Builder.CreateCall(F, Ops, name);
4500 Value *CodeGenFunction::EmitNeonShiftVector(Value *V, llvm::Type *Ty,
4502 int SV = cast<ConstantInt>(V)->getSExtValue();
4503 return ConstantInt::get(Ty, neg ? -SV : SV);
4506 // Right-shift a vector by a constant.
4507 Value *CodeGenFunction::EmitNeonRShiftImm(Value *Vec, Value *Shift,
4508 llvm::Type *Ty, bool usgn,
4510 llvm::VectorType *VTy = cast<llvm::VectorType>(Ty);
4512 int ShiftAmt = cast<ConstantInt>(Shift)->getSExtValue();
4513 int EltSize = VTy->getScalarSizeInBits();
4515 Vec = Builder.CreateBitCast(Vec, Ty);
4517 // lshr/ashr are undefined when the shift amount is equal to the vector
4519 if (ShiftAmt == EltSize) {
4521 // Right-shifting an unsigned value by its size yields 0.
4522 return llvm::ConstantAggregateZero::get(VTy);
4524 // Right-shifting a signed value by its size is equivalent
4525 // to a shift of size-1.
4527 Shift = ConstantInt::get(VTy->getElementType(), ShiftAmt);
4531 Shift = EmitNeonShiftVector(Shift, Ty, false);
4533 return Builder.CreateLShr(Vec, Shift, name);
4535 return Builder.CreateAShr(Vec, Shift, name);
4539 AddRetType = (1 << 0),
4540 Add1ArgType = (1 << 1),
4541 Add2ArgTypes = (1 << 2),
4543 VectorizeRetType = (1 << 3),
4544 VectorizeArgTypes = (1 << 4),
4546 InventFloatType = (1 << 5),
4547 UnsignedAlts = (1 << 6),
4549 Use64BitVectors = (1 << 7),
4550 Use128BitVectors = (1 << 8),
4552 Vectorize1ArgType = Add1ArgType | VectorizeArgTypes,
4553 VectorRet = AddRetType | VectorizeRetType,
4554 VectorRetGetArgs01 =
4555 AddRetType | Add2ArgTypes | VectorizeRetType | VectorizeArgTypes,
4557 AddRetType | VectorizeRetType | Add1ArgType | InventFloatType
4561 struct NeonIntrinsicInfo {
4562 const char *NameHint;
4564 unsigned LLVMIntrinsic;
4565 unsigned AltLLVMIntrinsic;
4566 unsigned TypeModifier;
4568 bool operator<(unsigned RHSBuiltinID) const {
4569 return BuiltinID < RHSBuiltinID;
4571 bool operator<(const NeonIntrinsicInfo &TE) const {
4572 return BuiltinID < TE.BuiltinID;
4575 } // end anonymous namespace
4577 #define NEONMAP0(NameBase) \
4578 { #NameBase, NEON::BI__builtin_neon_ ## NameBase, 0, 0, 0 }
4580 #define NEONMAP1(NameBase, LLVMIntrinsic, TypeModifier) \
4581 { #NameBase, NEON:: BI__builtin_neon_ ## NameBase, \
4582 Intrinsic::LLVMIntrinsic, 0, TypeModifier }
4584 #define NEONMAP2(NameBase, LLVMIntrinsic, AltLLVMIntrinsic, TypeModifier) \
4585 { #NameBase, NEON:: BI__builtin_neon_ ## NameBase, \
4586 Intrinsic::LLVMIntrinsic, Intrinsic::AltLLVMIntrinsic, \
4589 static const NeonIntrinsicInfo ARMSIMDIntrinsicMap [] = {
4590 NEONMAP2(vabd_v, arm_neon_vabdu, arm_neon_vabds, Add1ArgType | UnsignedAlts),
4591 NEONMAP2(vabdq_v, arm_neon_vabdu, arm_neon_vabds, Add1ArgType | UnsignedAlts),
4592 NEONMAP1(vabs_v, arm_neon_vabs, 0),
4593 NEONMAP1(vabsq_v, arm_neon_vabs, 0),
4595 NEONMAP1(vaesdq_v, arm_neon_aesd, 0),
4596 NEONMAP1(vaeseq_v, arm_neon_aese, 0),
4597 NEONMAP1(vaesimcq_v, arm_neon_aesimc, 0),
4598 NEONMAP1(vaesmcq_v, arm_neon_aesmc, 0),
4599 NEONMAP1(vbsl_v, arm_neon_vbsl, AddRetType),
4600 NEONMAP1(vbslq_v, arm_neon_vbsl, AddRetType),
4601 NEONMAP1(vcadd_rot270_v, arm_neon_vcadd_rot270, Add1ArgType),
4602 NEONMAP1(vcadd_rot90_v, arm_neon_vcadd_rot90, Add1ArgType),
4603 NEONMAP1(vcaddq_rot270_v, arm_neon_vcadd_rot270, Add1ArgType),
4604 NEONMAP1(vcaddq_rot90_v, arm_neon_vcadd_rot90, Add1ArgType),
4605 NEONMAP1(vcage_v, arm_neon_vacge, 0),
4606 NEONMAP1(vcageq_v, arm_neon_vacge, 0),
4607 NEONMAP1(vcagt_v, arm_neon_vacgt, 0),
4608 NEONMAP1(vcagtq_v, arm_neon_vacgt, 0),
4609 NEONMAP1(vcale_v, arm_neon_vacge, 0),
4610 NEONMAP1(vcaleq_v, arm_neon_vacge, 0),
4611 NEONMAP1(vcalt_v, arm_neon_vacgt, 0),
4612 NEONMAP1(vcaltq_v, arm_neon_vacgt, 0),
4621 NEONMAP1(vcls_v, arm_neon_vcls, Add1ArgType),
4622 NEONMAP1(vclsq_v, arm_neon_vcls, Add1ArgType),
4625 NEONMAP1(vclz_v, ctlz, Add1ArgType),
4626 NEONMAP1(vclzq_v, ctlz, Add1ArgType),
4627 NEONMAP1(vcnt_v, ctpop, Add1ArgType),
4628 NEONMAP1(vcntq_v, ctpop, Add1ArgType),
4629 NEONMAP1(vcvt_f16_f32, arm_neon_vcvtfp2hf, 0),
4630 NEONMAP0(vcvt_f16_v),
4631 NEONMAP1(vcvt_f32_f16, arm_neon_vcvthf2fp, 0),
4632 NEONMAP0(vcvt_f32_v),
4633 NEONMAP2(vcvt_n_f16_v, arm_neon_vcvtfxu2fp, arm_neon_vcvtfxs2fp, 0),
4634 NEONMAP2(vcvt_n_f32_v, arm_neon_vcvtfxu2fp, arm_neon_vcvtfxs2fp, 0),
4635 NEONMAP1(vcvt_n_s16_v, arm_neon_vcvtfp2fxs, 0),
4636 NEONMAP1(vcvt_n_s32_v, arm_neon_vcvtfp2fxs, 0),
4637 NEONMAP1(vcvt_n_s64_v, arm_neon_vcvtfp2fxs, 0),
4638 NEONMAP1(vcvt_n_u16_v, arm_neon_vcvtfp2fxu, 0),
4639 NEONMAP1(vcvt_n_u32_v, arm_neon_vcvtfp2fxu, 0),
4640 NEONMAP1(vcvt_n_u64_v, arm_neon_vcvtfp2fxu, 0),
4641 NEONMAP0(vcvt_s16_v),
4642 NEONMAP0(vcvt_s32_v),
4643 NEONMAP0(vcvt_s64_v),
4644 NEONMAP0(vcvt_u16_v),
4645 NEONMAP0(vcvt_u32_v),
4646 NEONMAP0(vcvt_u64_v),
4647 NEONMAP1(vcvta_s16_v, arm_neon_vcvtas, 0),
4648 NEONMAP1(vcvta_s32_v, arm_neon_vcvtas, 0),
4649 NEONMAP1(vcvta_s64_v, arm_neon_vcvtas, 0),
4650 NEONMAP1(vcvta_u16_v, arm_neon_vcvtau, 0),
4651 NEONMAP1(vcvta_u32_v, arm_neon_vcvtau, 0),
4652 NEONMAP1(vcvta_u64_v, arm_neon_vcvtau, 0),
4653 NEONMAP1(vcvtaq_s16_v, arm_neon_vcvtas, 0),
4654 NEONMAP1(vcvtaq_s32_v, arm_neon_vcvtas, 0),
4655 NEONMAP1(vcvtaq_s64_v, arm_neon_vcvtas, 0),
4656 NEONMAP1(vcvtaq_u16_v, arm_neon_vcvtau, 0),
4657 NEONMAP1(vcvtaq_u32_v, arm_neon_vcvtau, 0),
4658 NEONMAP1(vcvtaq_u64_v, arm_neon_vcvtau, 0),
4659 NEONMAP1(vcvtm_s16_v, arm_neon_vcvtms, 0),
4660 NEONMAP1(vcvtm_s32_v, arm_neon_vcvtms, 0),
4661 NEONMAP1(vcvtm_s64_v, arm_neon_vcvtms, 0),
4662 NEONMAP1(vcvtm_u16_v, arm_neon_vcvtmu, 0),
4663 NEONMAP1(vcvtm_u32_v, arm_neon_vcvtmu, 0),
4664 NEONMAP1(vcvtm_u64_v, arm_neon_vcvtmu, 0),
4665 NEONMAP1(vcvtmq_s16_v, arm_neon_vcvtms, 0),
4666 NEONMAP1(vcvtmq_s32_v, arm_neon_vcvtms, 0),
4667 NEONMAP1(vcvtmq_s64_v, arm_neon_vcvtms, 0),
4668 NEONMAP1(vcvtmq_u16_v, arm_neon_vcvtmu, 0),
4669 NEONMAP1(vcvtmq_u32_v, arm_neon_vcvtmu, 0),
4670 NEONMAP1(vcvtmq_u64_v, arm_neon_vcvtmu, 0),
4671 NEONMAP1(vcvtn_s16_v, arm_neon_vcvtns, 0),
4672 NEONMAP1(vcvtn_s32_v, arm_neon_vcvtns, 0),
4673 NEONMAP1(vcvtn_s64_v, arm_neon_vcvtns, 0),
4674 NEONMAP1(vcvtn_u16_v, arm_neon_vcvtnu, 0),
4675 NEONMAP1(vcvtn_u32_v, arm_neon_vcvtnu, 0),
4676 NEONMAP1(vcvtn_u64_v, arm_neon_vcvtnu, 0),
4677 NEONMAP1(vcvtnq_s16_v, arm_neon_vcvtns, 0),
4678 NEONMAP1(vcvtnq_s32_v, arm_neon_vcvtns, 0),
4679 NEONMAP1(vcvtnq_s64_v, arm_neon_vcvtns, 0),
4680 NEONMAP1(vcvtnq_u16_v, arm_neon_vcvtnu, 0),
4681 NEONMAP1(vcvtnq_u32_v, arm_neon_vcvtnu, 0),
4682 NEONMAP1(vcvtnq_u64_v, arm_neon_vcvtnu, 0),
4683 NEONMAP1(vcvtp_s16_v, arm_neon_vcvtps, 0),
4684 NEONMAP1(vcvtp_s32_v, arm_neon_vcvtps, 0),
4685 NEONMAP1(vcvtp_s64_v, arm_neon_vcvtps, 0),
4686 NEONMAP1(vcvtp_u16_v, arm_neon_vcvtpu, 0),
4687 NEONMAP1(vcvtp_u32_v, arm_neon_vcvtpu, 0),
4688 NEONMAP1(vcvtp_u64_v, arm_neon_vcvtpu, 0),
4689 NEONMAP1(vcvtpq_s16_v, arm_neon_vcvtps, 0),
4690 NEONMAP1(vcvtpq_s32_v, arm_neon_vcvtps, 0),
4691 NEONMAP1(vcvtpq_s64_v, arm_neon_vcvtps, 0),
4692 NEONMAP1(vcvtpq_u16_v, arm_neon_vcvtpu, 0),
4693 NEONMAP1(vcvtpq_u32_v, arm_neon_vcvtpu, 0),
4694 NEONMAP1(vcvtpq_u64_v, arm_neon_vcvtpu, 0),
4695 NEONMAP0(vcvtq_f16_v),
4696 NEONMAP0(vcvtq_f32_v),
4697 NEONMAP2(vcvtq_n_f16_v, arm_neon_vcvtfxu2fp, arm_neon_vcvtfxs2fp, 0),
4698 NEONMAP2(vcvtq_n_f32_v, arm_neon_vcvtfxu2fp, arm_neon_vcvtfxs2fp, 0),
4699 NEONMAP1(vcvtq_n_s16_v, arm_neon_vcvtfp2fxs, 0),
4700 NEONMAP1(vcvtq_n_s32_v, arm_neon_vcvtfp2fxs, 0),
4701 NEONMAP1(vcvtq_n_s64_v, arm_neon_vcvtfp2fxs, 0),
4702 NEONMAP1(vcvtq_n_u16_v, arm_neon_vcvtfp2fxu, 0),
4703 NEONMAP1(vcvtq_n_u32_v, arm_neon_vcvtfp2fxu, 0),
4704 NEONMAP1(vcvtq_n_u64_v, arm_neon_vcvtfp2fxu, 0),
4705 NEONMAP0(vcvtq_s16_v),
4706 NEONMAP0(vcvtq_s32_v),
4707 NEONMAP0(vcvtq_s64_v),
4708 NEONMAP0(vcvtq_u16_v),
4709 NEONMAP0(vcvtq_u32_v),
4710 NEONMAP0(vcvtq_u64_v),
4711 NEONMAP2(vdot_v, arm_neon_udot, arm_neon_sdot, 0),
4712 NEONMAP2(vdotq_v, arm_neon_udot, arm_neon_sdot, 0),
4717 NEONMAP2(vhadd_v, arm_neon_vhaddu, arm_neon_vhadds, Add1ArgType | UnsignedAlts),
4718 NEONMAP2(vhaddq_v, arm_neon_vhaddu, arm_neon_vhadds, Add1ArgType | UnsignedAlts),
4719 NEONMAP2(vhsub_v, arm_neon_vhsubu, arm_neon_vhsubs, Add1ArgType | UnsignedAlts),
4720 NEONMAP2(vhsubq_v, arm_neon_vhsubu, arm_neon_vhsubs, Add1ArgType | UnsignedAlts),
4721 NEONMAP0(vld1_dup_v),
4722 NEONMAP1(vld1_v, arm_neon_vld1, 0),
4723 NEONMAP1(vld1_x2_v, arm_neon_vld1x2, 0),
4724 NEONMAP1(vld1_x3_v, arm_neon_vld1x3, 0),
4725 NEONMAP1(vld1_x4_v, arm_neon_vld1x4, 0),
4726 NEONMAP0(vld1q_dup_v),
4727 NEONMAP1(vld1q_v, arm_neon_vld1, 0),
4728 NEONMAP1(vld1q_x2_v, arm_neon_vld1x2, 0),
4729 NEONMAP1(vld1q_x3_v, arm_neon_vld1x3, 0),
4730 NEONMAP1(vld1q_x4_v, arm_neon_vld1x4, 0),
4731 NEONMAP1(vld2_dup_v, arm_neon_vld2dup, 0),
4732 NEONMAP1(vld2_lane_v, arm_neon_vld2lane, 0),
4733 NEONMAP1(vld2_v, arm_neon_vld2, 0),
4734 NEONMAP1(vld2q_dup_v, arm_neon_vld2dup, 0),
4735 NEONMAP1(vld2q_lane_v, arm_neon_vld2lane, 0),
4736 NEONMAP1(vld2q_v, arm_neon_vld2, 0),
4737 NEONMAP1(vld3_dup_v, arm_neon_vld3dup, 0),
4738 NEONMAP1(vld3_lane_v, arm_neon_vld3lane, 0),
4739 NEONMAP1(vld3_v, arm_neon_vld3, 0),
4740 NEONMAP1(vld3q_dup_v, arm_neon_vld3dup, 0),
4741 NEONMAP1(vld3q_lane_v, arm_neon_vld3lane, 0),
4742 NEONMAP1(vld3q_v, arm_neon_vld3, 0),
4743 NEONMAP1(vld4_dup_v, arm_neon_vld4dup, 0),
4744 NEONMAP1(vld4_lane_v, arm_neon_vld4lane, 0),
4745 NEONMAP1(vld4_v, arm_neon_vld4, 0),
4746 NEONMAP1(vld4q_dup_v, arm_neon_vld4dup, 0),
4747 NEONMAP1(vld4q_lane_v, arm_neon_vld4lane, 0),
4748 NEONMAP1(vld4q_v, arm_neon_vld4, 0),
4749 NEONMAP2(vmax_v, arm_neon_vmaxu, arm_neon_vmaxs, Add1ArgType | UnsignedAlts),
4750 NEONMAP1(vmaxnm_v, arm_neon_vmaxnm, Add1ArgType),
4751 NEONMAP1(vmaxnmq_v, arm_neon_vmaxnm, Add1ArgType),
4752 NEONMAP2(vmaxq_v, arm_neon_vmaxu, arm_neon_vmaxs, Add1ArgType | UnsignedAlts),
4753 NEONMAP2(vmin_v, arm_neon_vminu, arm_neon_vmins, Add1ArgType | UnsignedAlts),
4754 NEONMAP1(vminnm_v, arm_neon_vminnm, Add1ArgType),
4755 NEONMAP1(vminnmq_v, arm_neon_vminnm, Add1ArgType),
4756 NEONMAP2(vminq_v, arm_neon_vminu, arm_neon_vmins, Add1ArgType | UnsignedAlts),
4759 NEONMAP1(vmul_v, arm_neon_vmulp, Add1ArgType),
4761 NEONMAP1(vmulq_v, arm_neon_vmulp, Add1ArgType),
4762 NEONMAP2(vpadal_v, arm_neon_vpadalu, arm_neon_vpadals, UnsignedAlts),
4763 NEONMAP2(vpadalq_v, arm_neon_vpadalu, arm_neon_vpadals, UnsignedAlts),
4764 NEONMAP1(vpadd_v, arm_neon_vpadd, Add1ArgType),
4765 NEONMAP2(vpaddl_v, arm_neon_vpaddlu, arm_neon_vpaddls, UnsignedAlts),
4766 NEONMAP2(vpaddlq_v, arm_neon_vpaddlu, arm_neon_vpaddls, UnsignedAlts),
4767 NEONMAP1(vpaddq_v, arm_neon_vpadd, Add1ArgType),
4768 NEONMAP2(vpmax_v, arm_neon_vpmaxu, arm_neon_vpmaxs, Add1ArgType | UnsignedAlts),
4769 NEONMAP2(vpmin_v, arm_neon_vpminu, arm_neon_vpmins, Add1ArgType | UnsignedAlts),
4770 NEONMAP1(vqabs_v, arm_neon_vqabs, Add1ArgType),
4771 NEONMAP1(vqabsq_v, arm_neon_vqabs, Add1ArgType),
4772 NEONMAP2(vqadd_v, uadd_sat, sadd_sat, Add1ArgType | UnsignedAlts),
4773 NEONMAP2(vqaddq_v, uadd_sat, sadd_sat, Add1ArgType | UnsignedAlts),
4774 NEONMAP2(vqdmlal_v, arm_neon_vqdmull, sadd_sat, 0),
4775 NEONMAP2(vqdmlsl_v, arm_neon_vqdmull, ssub_sat, 0),
4776 NEONMAP1(vqdmulh_v, arm_neon_vqdmulh, Add1ArgType),
4777 NEONMAP1(vqdmulhq_v, arm_neon_vqdmulh, Add1ArgType),
4778 NEONMAP1(vqdmull_v, arm_neon_vqdmull, Add1ArgType),
4779 NEONMAP2(vqmovn_v, arm_neon_vqmovnu, arm_neon_vqmovns, Add1ArgType | UnsignedAlts),
4780 NEONMAP1(vqmovun_v, arm_neon_vqmovnsu, Add1ArgType),
4781 NEONMAP1(vqneg_v, arm_neon_vqneg, Add1ArgType),
4782 NEONMAP1(vqnegq_v, arm_neon_vqneg, Add1ArgType),
4783 NEONMAP1(vqrdmulh_v, arm_neon_vqrdmulh, Add1ArgType),
4784 NEONMAP1(vqrdmulhq_v, arm_neon_vqrdmulh, Add1ArgType),
4785 NEONMAP2(vqrshl_v, arm_neon_vqrshiftu, arm_neon_vqrshifts, Add1ArgType | UnsignedAlts),
4786 NEONMAP2(vqrshlq_v, arm_neon_vqrshiftu, arm_neon_vqrshifts, Add1ArgType | UnsignedAlts),
4787 NEONMAP2(vqshl_n_v, arm_neon_vqshiftu, arm_neon_vqshifts, UnsignedAlts),
4788 NEONMAP2(vqshl_v, arm_neon_vqshiftu, arm_neon_vqshifts, Add1ArgType | UnsignedAlts),
4789 NEONMAP2(vqshlq_n_v, arm_neon_vqshiftu, arm_neon_vqshifts, UnsignedAlts),
4790 NEONMAP2(vqshlq_v, arm_neon_vqshiftu, arm_neon_vqshifts, Add1ArgType | UnsignedAlts),
4791 NEONMAP1(vqshlu_n_v, arm_neon_vqshiftsu, 0),
4792 NEONMAP1(vqshluq_n_v, arm_neon_vqshiftsu, 0),
4793 NEONMAP2(vqsub_v, usub_sat, ssub_sat, Add1ArgType | UnsignedAlts),
4794 NEONMAP2(vqsubq_v, usub_sat, ssub_sat, Add1ArgType | UnsignedAlts),
4795 NEONMAP1(vraddhn_v, arm_neon_vraddhn, Add1ArgType),
4796 NEONMAP2(vrecpe_v, arm_neon_vrecpe, arm_neon_vrecpe, 0),
4797 NEONMAP2(vrecpeq_v, arm_neon_vrecpe, arm_neon_vrecpe, 0),
4798 NEONMAP1(vrecps_v, arm_neon_vrecps, Add1ArgType),
4799 NEONMAP1(vrecpsq_v, arm_neon_vrecps, Add1ArgType),
4800 NEONMAP2(vrhadd_v, arm_neon_vrhaddu, arm_neon_vrhadds, Add1ArgType | UnsignedAlts),
4801 NEONMAP2(vrhaddq_v, arm_neon_vrhaddu, arm_neon_vrhadds, Add1ArgType | UnsignedAlts),
4802 NEONMAP1(vrnd_v, arm_neon_vrintz, Add1ArgType),
4803 NEONMAP1(vrnda_v, arm_neon_vrinta, Add1ArgType),
4804 NEONMAP1(vrndaq_v, arm_neon_vrinta, Add1ArgType),
4807 NEONMAP1(vrndm_v, arm_neon_vrintm, Add1ArgType),
4808 NEONMAP1(vrndmq_v, arm_neon_vrintm, Add1ArgType),
4809 NEONMAP1(vrndn_v, arm_neon_vrintn, Add1ArgType),
4810 NEONMAP1(vrndnq_v, arm_neon_vrintn, Add1ArgType),
4811 NEONMAP1(vrndp_v, arm_neon_vrintp, Add1ArgType),
4812 NEONMAP1(vrndpq_v, arm_neon_vrintp, Add1ArgType),
4813 NEONMAP1(vrndq_v, arm_neon_vrintz, Add1ArgType),
4814 NEONMAP1(vrndx_v, arm_neon_vrintx, Add1ArgType),
4815 NEONMAP1(vrndxq_v, arm_neon_vrintx, Add1ArgType),
4816 NEONMAP2(vrshl_v, arm_neon_vrshiftu, arm_neon_vrshifts, Add1ArgType | UnsignedAlts),
4817 NEONMAP2(vrshlq_v, arm_neon_vrshiftu, arm_neon_vrshifts, Add1ArgType | UnsignedAlts),
4818 NEONMAP2(vrshr_n_v, arm_neon_vrshiftu, arm_neon_vrshifts, UnsignedAlts),
4819 NEONMAP2(vrshrq_n_v, arm_neon_vrshiftu, arm_neon_vrshifts, UnsignedAlts),
4820 NEONMAP2(vrsqrte_v, arm_neon_vrsqrte, arm_neon_vrsqrte, 0),
4821 NEONMAP2(vrsqrteq_v, arm_neon_vrsqrte, arm_neon_vrsqrte, 0),
4822 NEONMAP1(vrsqrts_v, arm_neon_vrsqrts, Add1ArgType),
4823 NEONMAP1(vrsqrtsq_v, arm_neon_vrsqrts, Add1ArgType),
4824 NEONMAP1(vrsubhn_v, arm_neon_vrsubhn, Add1ArgType),
4825 NEONMAP1(vsha1su0q_v, arm_neon_sha1su0, 0),
4826 NEONMAP1(vsha1su1q_v, arm_neon_sha1su1, 0),
4827 NEONMAP1(vsha256h2q_v, arm_neon_sha256h2, 0),
4828 NEONMAP1(vsha256hq_v, arm_neon_sha256h, 0),
4829 NEONMAP1(vsha256su0q_v, arm_neon_sha256su0, 0),
4830 NEONMAP1(vsha256su1q_v, arm_neon_sha256su1, 0),
4832 NEONMAP2(vshl_v, arm_neon_vshiftu, arm_neon_vshifts, Add1ArgType | UnsignedAlts),
4833 NEONMAP0(vshll_n_v),
4834 NEONMAP0(vshlq_n_v),
4835 NEONMAP2(vshlq_v, arm_neon_vshiftu, arm_neon_vshifts, Add1ArgType | UnsignedAlts),
4837 NEONMAP0(vshrn_n_v),
4838 NEONMAP0(vshrq_n_v),
4839 NEONMAP1(vst1_v, arm_neon_vst1, 0),
4840 NEONMAP1(vst1_x2_v, arm_neon_vst1x2, 0),
4841 NEONMAP1(vst1_x3_v, arm_neon_vst1x3, 0),
4842 NEONMAP1(vst1_x4_v, arm_neon_vst1x4, 0),
4843 NEONMAP1(vst1q_v, arm_neon_vst1, 0),
4844 NEONMAP1(vst1q_x2_v, arm_neon_vst1x2, 0),
4845 NEONMAP1(vst1q_x3_v, arm_neon_vst1x3, 0),
4846 NEONMAP1(vst1q_x4_v, arm_neon_vst1x4, 0),
4847 NEONMAP1(vst2_lane_v, arm_neon_vst2lane, 0),
4848 NEONMAP1(vst2_v, arm_neon_vst2, 0),
4849 NEONMAP1(vst2q_lane_v, arm_neon_vst2lane, 0),
4850 NEONMAP1(vst2q_v, arm_neon_vst2, 0),
4851 NEONMAP1(vst3_lane_v, arm_neon_vst3lane, 0),
4852 NEONMAP1(vst3_v, arm_neon_vst3, 0),
4853 NEONMAP1(vst3q_lane_v, arm_neon_vst3lane, 0),
4854 NEONMAP1(vst3q_v, arm_neon_vst3, 0),
4855 NEONMAP1(vst4_lane_v, arm_neon_vst4lane, 0),
4856 NEONMAP1(vst4_v, arm_neon_vst4, 0),
4857 NEONMAP1(vst4q_lane_v, arm_neon_vst4lane, 0),
4858 NEONMAP1(vst4q_v, arm_neon_vst4, 0),
4870 static const NeonIntrinsicInfo AArch64SIMDIntrinsicMap[] = {
4871 NEONMAP1(vabs_v, aarch64_neon_abs, 0),
4872 NEONMAP1(vabsq_v, aarch64_neon_abs, 0),
4874 NEONMAP1(vaesdq_v, aarch64_crypto_aesd, 0),
4875 NEONMAP1(vaeseq_v, aarch64_crypto_aese, 0),
4876 NEONMAP1(vaesimcq_v, aarch64_crypto_aesimc, 0),
4877 NEONMAP1(vaesmcq_v, aarch64_crypto_aesmc, 0),
4878 NEONMAP1(vcadd_rot270_v, aarch64_neon_vcadd_rot270, Add1ArgType),
4879 NEONMAP1(vcadd_rot90_v, aarch64_neon_vcadd_rot90, Add1ArgType),
4880 NEONMAP1(vcaddq_rot270_v, aarch64_neon_vcadd_rot270, Add1ArgType),
4881 NEONMAP1(vcaddq_rot90_v, aarch64_neon_vcadd_rot90, Add1ArgType),
4882 NEONMAP1(vcage_v, aarch64_neon_facge, 0),
4883 NEONMAP1(vcageq_v, aarch64_neon_facge, 0),
4884 NEONMAP1(vcagt_v, aarch64_neon_facgt, 0),
4885 NEONMAP1(vcagtq_v, aarch64_neon_facgt, 0),
4886 NEONMAP1(vcale_v, aarch64_neon_facge, 0),
4887 NEONMAP1(vcaleq_v, aarch64_neon_facge, 0),
4888 NEONMAP1(vcalt_v, aarch64_neon_facgt, 0),
4889 NEONMAP1(vcaltq_v, aarch64_neon_facgt, 0),
4898 NEONMAP1(vcls_v, aarch64_neon_cls, Add1ArgType),
4899 NEONMAP1(vclsq_v, aarch64_neon_cls, Add1ArgType),
4902 NEONMAP1(vclz_v, ctlz, Add1ArgType),
4903 NEONMAP1(vclzq_v, ctlz, Add1ArgType),
4904 NEONMAP1(vcnt_v, ctpop, Add1ArgType),
4905 NEONMAP1(vcntq_v, ctpop, Add1ArgType),
4906 NEONMAP1(vcvt_f16_f32, aarch64_neon_vcvtfp2hf, 0),
4907 NEONMAP0(vcvt_f16_v),
4908 NEONMAP1(vcvt_f32_f16, aarch64_neon_vcvthf2fp, 0),
4909 NEONMAP0(vcvt_f32_v),
4910 NEONMAP2(vcvt_n_f16_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
4911 NEONMAP2(vcvt_n_f32_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
4912 NEONMAP2(vcvt_n_f64_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
4913 NEONMAP1(vcvt_n_s16_v, aarch64_neon_vcvtfp2fxs, 0),
4914 NEONMAP1(vcvt_n_s32_v, aarch64_neon_vcvtfp2fxs, 0),
4915 NEONMAP1(vcvt_n_s64_v, aarch64_neon_vcvtfp2fxs, 0),
4916 NEONMAP1(vcvt_n_u16_v, aarch64_neon_vcvtfp2fxu, 0),
4917 NEONMAP1(vcvt_n_u32_v, aarch64_neon_vcvtfp2fxu, 0),
4918 NEONMAP1(vcvt_n_u64_v, aarch64_neon_vcvtfp2fxu, 0),
4919 NEONMAP0(vcvtq_f16_v),
4920 NEONMAP0(vcvtq_f32_v),
4921 NEONMAP2(vcvtq_n_f16_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
4922 NEONMAP2(vcvtq_n_f32_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
4923 NEONMAP2(vcvtq_n_f64_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
4924 NEONMAP1(vcvtq_n_s16_v, aarch64_neon_vcvtfp2fxs, 0),
4925 NEONMAP1(vcvtq_n_s32_v, aarch64_neon_vcvtfp2fxs, 0),
4926 NEONMAP1(vcvtq_n_s64_v, aarch64_neon_vcvtfp2fxs, 0),
4927 NEONMAP1(vcvtq_n_u16_v, aarch64_neon_vcvtfp2fxu, 0),
4928 NEONMAP1(vcvtq_n_u32_v, aarch64_neon_vcvtfp2fxu, 0),
4929 NEONMAP1(vcvtq_n_u64_v, aarch64_neon_vcvtfp2fxu, 0),
4930 NEONMAP1(vcvtx_f32_v, aarch64_neon_fcvtxn, AddRetType | Add1ArgType),
4931 NEONMAP2(vdot_v, aarch64_neon_udot, aarch64_neon_sdot, 0),
4932 NEONMAP2(vdotq_v, aarch64_neon_udot, aarch64_neon_sdot, 0),
4937 NEONMAP1(vfmlal_high_v, aarch64_neon_fmlal2, 0),
4938 NEONMAP1(vfmlal_low_v, aarch64_neon_fmlal, 0),
4939 NEONMAP1(vfmlalq_high_v, aarch64_neon_fmlal2, 0),
4940 NEONMAP1(vfmlalq_low_v, aarch64_neon_fmlal, 0),
4941 NEONMAP1(vfmlsl_high_v, aarch64_neon_fmlsl2, 0),
4942 NEONMAP1(vfmlsl_low_v, aarch64_neon_fmlsl, 0),
4943 NEONMAP1(vfmlslq_high_v, aarch64_neon_fmlsl2, 0),
4944 NEONMAP1(vfmlslq_low_v, aarch64_neon_fmlsl, 0),
4945 NEONMAP2(vhadd_v, aarch64_neon_uhadd, aarch64_neon_shadd, Add1ArgType | UnsignedAlts),
4946 NEONMAP2(vhaddq_v, aarch64_neon_uhadd, aarch64_neon_shadd, Add1ArgType | UnsignedAlts),
4947 NEONMAP2(vhsub_v, aarch64_neon_uhsub, aarch64_neon_shsub, Add1ArgType | UnsignedAlts),
4948 NEONMAP2(vhsubq_v, aarch64_neon_uhsub, aarch64_neon_shsub, Add1ArgType | UnsignedAlts),
4949 NEONMAP1(vld1_x2_v, aarch64_neon_ld1x2, 0),
4950 NEONMAP1(vld1_x3_v, aarch64_neon_ld1x3, 0),
4951 NEONMAP1(vld1_x4_v, aarch64_neon_ld1x4, 0),
4952 NEONMAP1(vld1q_x2_v, aarch64_neon_ld1x2, 0),
4953 NEONMAP1(vld1q_x3_v, aarch64_neon_ld1x3, 0),
4954 NEONMAP1(vld1q_x4_v, aarch64_neon_ld1x4, 0),
4957 NEONMAP1(vmul_v, aarch64_neon_pmul, Add1ArgType),
4958 NEONMAP1(vmulq_v, aarch64_neon_pmul, Add1ArgType),
4959 NEONMAP1(vpadd_v, aarch64_neon_addp, Add1ArgType),
4960 NEONMAP2(vpaddl_v, aarch64_neon_uaddlp, aarch64_neon_saddlp, UnsignedAlts),
4961 NEONMAP2(vpaddlq_v, aarch64_neon_uaddlp, aarch64_neon_saddlp, UnsignedAlts),
4962 NEONMAP1(vpaddq_v, aarch64_neon_addp, Add1ArgType),
4963 NEONMAP1(vqabs_v, aarch64_neon_sqabs, Add1ArgType),
4964 NEONMAP1(vqabsq_v, aarch64_neon_sqabs, Add1ArgType),
4965 NEONMAP2(vqadd_v, aarch64_neon_uqadd, aarch64_neon_sqadd, Add1ArgType | UnsignedAlts),
4966 NEONMAP2(vqaddq_v, aarch64_neon_uqadd, aarch64_neon_sqadd, Add1ArgType | UnsignedAlts),
4967 NEONMAP2(vqdmlal_v, aarch64_neon_sqdmull, aarch64_neon_sqadd, 0),
4968 NEONMAP2(vqdmlsl_v, aarch64_neon_sqdmull, aarch64_neon_sqsub, 0),
4969 NEONMAP1(vqdmulh_v, aarch64_neon_sqdmulh, Add1ArgType),
4970 NEONMAP1(vqdmulhq_v, aarch64_neon_sqdmulh, Add1ArgType),
4971 NEONMAP1(vqdmull_v, aarch64_neon_sqdmull, Add1ArgType),
4972 NEONMAP2(vqmovn_v, aarch64_neon_uqxtn, aarch64_neon_sqxtn, Add1ArgType | UnsignedAlts),
4973 NEONMAP1(vqmovun_v, aarch64_neon_sqxtun, Add1ArgType),
4974 NEONMAP1(vqneg_v, aarch64_neon_sqneg, Add1ArgType),
4975 NEONMAP1(vqnegq_v, aarch64_neon_sqneg, Add1ArgType),
4976 NEONMAP1(vqrdmulh_v, aarch64_neon_sqrdmulh, Add1ArgType),
4977 NEONMAP1(vqrdmulhq_v, aarch64_neon_sqrdmulh, Add1ArgType),
4978 NEONMAP2(vqrshl_v, aarch64_neon_uqrshl, aarch64_neon_sqrshl, Add1ArgType | UnsignedAlts),
4979 NEONMAP2(vqrshlq_v, aarch64_neon_uqrshl, aarch64_neon_sqrshl, Add1ArgType | UnsignedAlts),
4980 NEONMAP2(vqshl_n_v, aarch64_neon_uqshl, aarch64_neon_sqshl, UnsignedAlts),
4981 NEONMAP2(vqshl_v, aarch64_neon_uqshl, aarch64_neon_sqshl, Add1ArgType | UnsignedAlts),
4982 NEONMAP2(vqshlq_n_v, aarch64_neon_uqshl, aarch64_neon_sqshl,UnsignedAlts),
4983 NEONMAP2(vqshlq_v, aarch64_neon_uqshl, aarch64_neon_sqshl, Add1ArgType | UnsignedAlts),
4984 NEONMAP1(vqshlu_n_v, aarch64_neon_sqshlu, 0),
4985 NEONMAP1(vqshluq_n_v, aarch64_neon_sqshlu, 0),
4986 NEONMAP2(vqsub_v, aarch64_neon_uqsub, aarch64_neon_sqsub, Add1ArgType | UnsignedAlts),
4987 NEONMAP2(vqsubq_v, aarch64_neon_uqsub, aarch64_neon_sqsub, Add1ArgType | UnsignedAlts),
4988 NEONMAP1(vraddhn_v, aarch64_neon_raddhn, Add1ArgType),
4989 NEONMAP2(vrecpe_v, aarch64_neon_frecpe, aarch64_neon_urecpe, 0),
4990 NEONMAP2(vrecpeq_v, aarch64_neon_frecpe, aarch64_neon_urecpe, 0),
4991 NEONMAP1(vrecps_v, aarch64_neon_frecps, Add1ArgType),
4992 NEONMAP1(vrecpsq_v, aarch64_neon_frecps, Add1ArgType),
4993 NEONMAP2(vrhadd_v, aarch64_neon_urhadd, aarch64_neon_srhadd, Add1ArgType | UnsignedAlts),
4994 NEONMAP2(vrhaddq_v, aarch64_neon_urhadd, aarch64_neon_srhadd, Add1ArgType | UnsignedAlts),
4997 NEONMAP2(vrshl_v, aarch64_neon_urshl, aarch64_neon_srshl, Add1ArgType | UnsignedAlts),
4998 NEONMAP2(vrshlq_v, aarch64_neon_urshl, aarch64_neon_srshl, Add1ArgType | UnsignedAlts),
4999 NEONMAP2(vrshr_n_v, aarch64_neon_urshl, aarch64_neon_srshl, UnsignedAlts),
5000 NEONMAP2(vrshrq_n_v, aarch64_neon_urshl, aarch64_neon_srshl, UnsignedAlts),
5001 NEONMAP2(vrsqrte_v, aarch64_neon_frsqrte, aarch64_neon_ursqrte, 0),
5002 NEONMAP2(vrsqrteq_v, aarch64_neon_frsqrte, aarch64_neon_ursqrte, 0),
5003 NEONMAP1(vrsqrts_v, aarch64_neon_frsqrts, Add1ArgType),
5004 NEONMAP1(vrsqrtsq_v, aarch64_neon_frsqrts, Add1ArgType),
5005 NEONMAP1(vrsubhn_v, aarch64_neon_rsubhn, Add1ArgType),
5006 NEONMAP1(vsha1su0q_v, aarch64_crypto_sha1su0, 0),
5007 NEONMAP1(vsha1su1q_v, aarch64_crypto_sha1su1, 0),
5008 NEONMAP1(vsha256h2q_v, aarch64_crypto_sha256h2, 0),
5009 NEONMAP1(vsha256hq_v, aarch64_crypto_sha256h, 0),
5010 NEONMAP1(vsha256su0q_v, aarch64_crypto_sha256su0, 0),
5011 NEONMAP1(vsha256su1q_v, aarch64_crypto_sha256su1, 0),
5013 NEONMAP2(vshl_v, aarch64_neon_ushl, aarch64_neon_sshl, Add1ArgType | UnsignedAlts),
5014 NEONMAP0(vshll_n_v),
5015 NEONMAP0(vshlq_n_v),
5016 NEONMAP2(vshlq_v, aarch64_neon_ushl, aarch64_neon_sshl, Add1ArgType | UnsignedAlts),
5018 NEONMAP0(vshrn_n_v),
5019 NEONMAP0(vshrq_n_v),
5020 NEONMAP1(vst1_x2_v, aarch64_neon_st1x2, 0),
5021 NEONMAP1(vst1_x3_v, aarch64_neon_st1x3, 0),
5022 NEONMAP1(vst1_x4_v, aarch64_neon_st1x4, 0),
5023 NEONMAP1(vst1q_x2_v, aarch64_neon_st1x2, 0),
5024 NEONMAP1(vst1q_x3_v, aarch64_neon_st1x3, 0),
5025 NEONMAP1(vst1q_x4_v, aarch64_neon_st1x4, 0),
5031 static const NeonIntrinsicInfo AArch64SISDIntrinsicMap[] = {
5032 NEONMAP1(vabdd_f64, aarch64_sisd_fabd, Add1ArgType),
5033 NEONMAP1(vabds_f32, aarch64_sisd_fabd, Add1ArgType),
5034 NEONMAP1(vabsd_s64, aarch64_neon_abs, Add1ArgType),
5035 NEONMAP1(vaddlv_s32, aarch64_neon_saddlv, AddRetType | Add1ArgType),
5036 NEONMAP1(vaddlv_u32, aarch64_neon_uaddlv, AddRetType | Add1ArgType),
5037 NEONMAP1(vaddlvq_s32, aarch64_neon_saddlv, AddRetType | Add1ArgType),
5038 NEONMAP1(vaddlvq_u32, aarch64_neon_uaddlv, AddRetType | Add1ArgType),
5039 NEONMAP1(vaddv_f32, aarch64_neon_faddv, AddRetType | Add1ArgType),
5040 NEONMAP1(vaddv_s32, aarch64_neon_saddv, AddRetType | Add1ArgType),
5041 NEONMAP1(vaddv_u32, aarch64_neon_uaddv, AddRetType | Add1ArgType),
5042 NEONMAP1(vaddvq_f32, aarch64_neon_faddv, AddRetType | Add1ArgType),
5043 NEONMAP1(vaddvq_f64, aarch64_neon_faddv, AddRetType | Add1ArgType),
5044 NEONMAP1(vaddvq_s32, aarch64_neon_saddv, AddRetType | Add1ArgType),
5045 NEONMAP1(vaddvq_s64, aarch64_neon_saddv, AddRetType | Add1ArgType),
5046 NEONMAP1(vaddvq_u32, aarch64_neon_uaddv, AddRetType | Add1ArgType),
5047 NEONMAP1(vaddvq_u64, aarch64_neon_uaddv, AddRetType | Add1ArgType),
5048 NEONMAP1(vcaged_f64, aarch64_neon_facge, AddRetType | Add1ArgType),
5049 NEONMAP1(vcages_f32, aarch64_neon_facge, AddRetType | Add1ArgType),
5050 NEONMAP1(vcagtd_f64, aarch64_neon_facgt, AddRetType | Add1ArgType),
5051 NEONMAP1(vcagts_f32, aarch64_neon_facgt, AddRetType | Add1ArgType),
5052 NEONMAP1(vcaled_f64, aarch64_neon_facge, AddRetType | Add1ArgType),
5053 NEONMAP1(vcales_f32, aarch64_neon_facge, AddRetType | Add1ArgType),
5054 NEONMAP1(vcaltd_f64, aarch64_neon_facgt, AddRetType | Add1ArgType),
5055 NEONMAP1(vcalts_f32, aarch64_neon_facgt, AddRetType | Add1ArgType),
5056 NEONMAP1(vcvtad_s64_f64, aarch64_neon_fcvtas, AddRetType | Add1ArgType),
5057 NEONMAP1(vcvtad_u64_f64, aarch64_neon_fcvtau, AddRetType | Add1ArgType),
5058 NEONMAP1(vcvtas_s32_f32, aarch64_neon_fcvtas, AddRetType | Add1ArgType),
5059 NEONMAP1(vcvtas_u32_f32, aarch64_neon_fcvtau, AddRetType | Add1ArgType),
5060 NEONMAP1(vcvtd_n_f64_s64, aarch64_neon_vcvtfxs2fp, AddRetType | Add1ArgType),
5061 NEONMAP1(vcvtd_n_f64_u64, aarch64_neon_vcvtfxu2fp, AddRetType | Add1ArgType),
5062 NEONMAP1(vcvtd_n_s64_f64, aarch64_neon_vcvtfp2fxs, AddRetType | Add1ArgType),
5063 NEONMAP1(vcvtd_n_u64_f64, aarch64_neon_vcvtfp2fxu, AddRetType | Add1ArgType),
5064 NEONMAP1(vcvtmd_s64_f64, aarch64_neon_fcvtms, AddRetType | Add1ArgType),
5065 NEONMAP1(vcvtmd_u64_f64, aarch64_neon_fcvtmu, AddRetType | Add1ArgType),
5066 NEONMAP1(vcvtms_s32_f32, aarch64_neon_fcvtms, AddRetType | Add1ArgType),
5067 NEONMAP1(vcvtms_u32_f32, aarch64_neon_fcvtmu, AddRetType | Add1ArgType),
5068 NEONMAP1(vcvtnd_s64_f64, aarch64_neon_fcvtns, AddRetType | Add1ArgType),
5069 NEONMAP1(vcvtnd_u64_f64, aarch64_neon_fcvtnu, AddRetType | Add1ArgType),
5070 NEONMAP1(vcvtns_s32_f32, aarch64_neon_fcvtns, AddRetType | Add1ArgType),
5071 NEONMAP1(vcvtns_u32_f32, aarch64_neon_fcvtnu, AddRetType | Add1ArgType),
5072 NEONMAP1(vcvtpd_s64_f64, aarch64_neon_fcvtps, AddRetType | Add1ArgType),
5073 NEONMAP1(vcvtpd_u64_f64, aarch64_neon_fcvtpu, AddRetType | Add1ArgType),
5074 NEONMAP1(vcvtps_s32_f32, aarch64_neon_fcvtps, AddRetType | Add1ArgType),
5075 NEONMAP1(vcvtps_u32_f32, aarch64_neon_fcvtpu, AddRetType | Add1ArgType),
5076 NEONMAP1(vcvts_n_f32_s32, aarch64_neon_vcvtfxs2fp, AddRetType | Add1ArgType),
5077 NEONMAP1(vcvts_n_f32_u32, aarch64_neon_vcvtfxu2fp, AddRetType | Add1ArgType),
5078 NEONMAP1(vcvts_n_s32_f32, aarch64_neon_vcvtfp2fxs, AddRetType | Add1ArgType),
5079 NEONMAP1(vcvts_n_u32_f32, aarch64_neon_vcvtfp2fxu, AddRetType | Add1ArgType),
5080 NEONMAP1(vcvtxd_f32_f64, aarch64_sisd_fcvtxn, 0),
5081 NEONMAP1(vmaxnmv_f32, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType),
5082 NEONMAP1(vmaxnmvq_f32, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType),
5083 NEONMAP1(vmaxnmvq_f64, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType),
5084 NEONMAP1(vmaxv_f32, aarch64_neon_fmaxv, AddRetType | Add1ArgType),
5085 NEONMAP1(vmaxv_s32, aarch64_neon_smaxv, AddRetType | Add1ArgType),
5086 NEONMAP1(vmaxv_u32, aarch64_neon_umaxv, AddRetType | Add1ArgType),
5087 NEONMAP1(vmaxvq_f32, aarch64_neon_fmaxv, AddRetType | Add1ArgType),
5088 NEONMAP1(vmaxvq_f64, aarch64_neon_fmaxv, AddRetType | Add1ArgType),
5089 NEONMAP1(vmaxvq_s32, aarch64_neon_smaxv, AddRetType | Add1ArgType),
5090 NEONMAP1(vmaxvq_u32, aarch64_neon_umaxv, AddRetType | Add1ArgType),
5091 NEONMAP1(vminnmv_f32, aarch64_neon_fminnmv, AddRetType | Add1ArgType),
5092 NEONMAP1(vminnmvq_f32, aarch64_neon_fminnmv, AddRetType | Add1ArgType),
5093 NEONMAP1(vminnmvq_f64, aarch64_neon_fminnmv, AddRetType | Add1ArgType),
5094 NEONMAP1(vminv_f32, aarch64_neon_fminv, AddRetType | Add1ArgType),
5095 NEONMAP1(vminv_s32, aarch64_neon_sminv, AddRetType | Add1ArgType),
5096 NEONMAP1(vminv_u32, aarch64_neon_uminv, AddRetType | Add1ArgType),
5097 NEONMAP1(vminvq_f32, aarch64_neon_fminv, AddRetType | Add1ArgType),
5098 NEONMAP1(vminvq_f64, aarch64_neon_fminv, AddRetType | Add1ArgType),
5099 NEONMAP1(vminvq_s32, aarch64_neon_sminv, AddRetType | Add1ArgType),
5100 NEONMAP1(vminvq_u32, aarch64_neon_uminv, AddRetType | Add1ArgType),
5101 NEONMAP1(vmull_p64, aarch64_neon_pmull64, 0),
5102 NEONMAP1(vmulxd_f64, aarch64_neon_fmulx, Add1ArgType),
5103 NEONMAP1(vmulxs_f32, aarch64_neon_fmulx, Add1ArgType),
5104 NEONMAP1(vpaddd_s64, aarch64_neon_uaddv, AddRetType | Add1ArgType),
5105 NEONMAP1(vpaddd_u64, aarch64_neon_uaddv, AddRetType | Add1ArgType),
5106 NEONMAP1(vpmaxnmqd_f64, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType),
5107 NEONMAP1(vpmaxnms_f32, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType),
5108 NEONMAP1(vpmaxqd_f64, aarch64_neon_fmaxv, AddRetType | Add1ArgType),
5109 NEONMAP1(vpmaxs_f32, aarch64_neon_fmaxv, AddRetType | Add1ArgType),
5110 NEONMAP1(vpminnmqd_f64, aarch64_neon_fminnmv, AddRetType | Add1ArgType),
5111 NEONMAP1(vpminnms_f32, aarch64_neon_fminnmv, AddRetType | Add1ArgType),
5112 NEONMAP1(vpminqd_f64, aarch64_neon_fminv, AddRetType | Add1ArgType),
5113 NEONMAP1(vpmins_f32, aarch64_neon_fminv, AddRetType | Add1ArgType),
5114 NEONMAP1(vqabsb_s8, aarch64_neon_sqabs, Vectorize1ArgType | Use64BitVectors),
5115 NEONMAP1(vqabsd_s64, aarch64_neon_sqabs, Add1ArgType),
5116 NEONMAP1(vqabsh_s16, aarch64_neon_sqabs, Vectorize1ArgType | Use64BitVectors),
5117 NEONMAP1(vqabss_s32, aarch64_neon_sqabs, Add1ArgType),
5118 NEONMAP1(vqaddb_s8, aarch64_neon_sqadd, Vectorize1ArgType | Use64BitVectors),
5119 NEONMAP1(vqaddb_u8, aarch64_neon_uqadd, Vectorize1ArgType | Use64BitVectors),
5120 NEONMAP1(vqaddd_s64, aarch64_neon_sqadd, Add1ArgType),
5121 NEONMAP1(vqaddd_u64, aarch64_neon_uqadd, Add1ArgType),
5122 NEONMAP1(vqaddh_s16, aarch64_neon_sqadd, Vectorize1ArgType | Use64BitVectors),
5123 NEONMAP1(vqaddh_u16, aarch64_neon_uqadd, Vectorize1ArgType | Use64BitVectors),
5124 NEONMAP1(vqadds_s32, aarch64_neon_sqadd, Add1ArgType),
5125 NEONMAP1(vqadds_u32, aarch64_neon_uqadd, Add1ArgType),
5126 NEONMAP1(vqdmulhh_s16, aarch64_neon_sqdmulh, Vectorize1ArgType | Use64BitVectors),
5127 NEONMAP1(vqdmulhs_s32, aarch64_neon_sqdmulh, Add1ArgType),
5128 NEONMAP1(vqdmullh_s16, aarch64_neon_sqdmull, VectorRet | Use128BitVectors),
5129 NEONMAP1(vqdmulls_s32, aarch64_neon_sqdmulls_scalar, 0),
5130 NEONMAP1(vqmovnd_s64, aarch64_neon_scalar_sqxtn, AddRetType | Add1ArgType),
5131 NEONMAP1(vqmovnd_u64, aarch64_neon_scalar_uqxtn, AddRetType | Add1ArgType),
5132 NEONMAP1(vqmovnh_s16, aarch64_neon_sqxtn, VectorRet | Use64BitVectors),
5133 NEONMAP1(vqmovnh_u16, aarch64_neon_uqxtn, VectorRet | Use64BitVectors),
5134 NEONMAP1(vqmovns_s32, aarch64_neon_sqxtn, VectorRet | Use64BitVectors),
5135 NEONMAP1(vqmovns_u32, aarch64_neon_uqxtn, VectorRet | Use64BitVectors),
5136 NEONMAP1(vqmovund_s64, aarch64_neon_scalar_sqxtun, AddRetType | Add1ArgType),
5137 NEONMAP1(vqmovunh_s16, aarch64_neon_sqxtun, VectorRet | Use64BitVectors),
5138 NEONMAP1(vqmovuns_s32, aarch64_neon_sqxtun, VectorRet | Use64BitVectors),
5139 NEONMAP1(vqnegb_s8, aarch64_neon_sqneg, Vectorize1ArgType | Use64BitVectors),
5140 NEONMAP1(vqnegd_s64, aarch64_neon_sqneg, Add1ArgType),
5141 NEONMAP1(vqnegh_s16, aarch64_neon_sqneg, Vectorize1ArgType | Use64BitVectors),
5142 NEONMAP1(vqnegs_s32, aarch64_neon_sqneg, Add1ArgType),
5143 NEONMAP1(vqrdmulhh_s16, aarch64_neon_sqrdmulh, Vectorize1ArgType | Use64BitVectors),
5144 NEONMAP1(vqrdmulhs_s32, aarch64_neon_sqrdmulh, Add1ArgType),
5145 NEONMAP1(vqrshlb_s8, aarch64_neon_sqrshl, Vectorize1ArgType | Use64BitVectors),
5146 NEONMAP1(vqrshlb_u8, aarch64_neon_uqrshl, Vectorize1ArgType | Use64BitVectors),
5147 NEONMAP1(vqrshld_s64, aarch64_neon_sqrshl, Add1ArgType),
5148 NEONMAP1(vqrshld_u64, aarch64_neon_uqrshl, Add1ArgType),
5149 NEONMAP1(vqrshlh_s16, aarch64_neon_sqrshl, Vectorize1ArgType | Use64BitVectors),
5150 NEONMAP1(vqrshlh_u16, aarch64_neon_uqrshl, Vectorize1ArgType | Use64BitVectors),
5151 NEONMAP1(vqrshls_s32, aarch64_neon_sqrshl, Add1ArgType),
5152 NEONMAP1(vqrshls_u32, aarch64_neon_uqrshl, Add1ArgType),
5153 NEONMAP1(vqrshrnd_n_s64, aarch64_neon_sqrshrn, AddRetType),
5154 NEONMAP1(vqrshrnd_n_u64, aarch64_neon_uqrshrn, AddRetType),
5155 NEONMAP1(vqrshrnh_n_s16, aarch64_neon_sqrshrn, VectorRet | Use64BitVectors),
5156 NEONMAP1(vqrshrnh_n_u16, aarch64_neon_uqrshrn, VectorRet | Use64BitVectors),
5157 NEONMAP1(vqrshrns_n_s32, aarch64_neon_sqrshrn, VectorRet | Use64BitVectors),
5158 NEONMAP1(vqrshrns_n_u32, aarch64_neon_uqrshrn, VectorRet | Use64BitVectors),
5159 NEONMAP1(vqrshrund_n_s64, aarch64_neon_sqrshrun, AddRetType),
5160 NEONMAP1(vqrshrunh_n_s16, aarch64_neon_sqrshrun, VectorRet | Use64BitVectors),
5161 NEONMAP1(vqrshruns_n_s32, aarch64_neon_sqrshrun, VectorRet | Use64BitVectors),
5162 NEONMAP1(vqshlb_n_s8, aarch64_neon_sqshl, Vectorize1ArgType | Use64BitVectors),
5163 NEONMAP1(vqshlb_n_u8, aarch64_neon_uqshl, Vectorize1ArgType | Use64BitVectors),
5164 NEONMAP1(vqshlb_s8, aarch64_neon_sqshl, Vectorize1ArgType | Use64BitVectors),
5165 NEONMAP1(vqshlb_u8, aarch64_neon_uqshl, Vectorize1ArgType | Use64BitVectors),
5166 NEONMAP1(vqshld_s64, aarch64_neon_sqshl, Add1ArgType),
5167 NEONMAP1(vqshld_u64, aarch64_neon_uqshl, Add1ArgType),
5168 NEONMAP1(vqshlh_n_s16, aarch64_neon_sqshl, Vectorize1ArgType | Use64BitVectors),
5169 NEONMAP1(vqshlh_n_u16, aarch64_neon_uqshl, Vectorize1ArgType | Use64BitVectors),
5170 NEONMAP1(vqshlh_s16, aarch64_neon_sqshl, Vectorize1ArgType | Use64BitVectors),
5171 NEONMAP1(vqshlh_u16, aarch64_neon_uqshl, Vectorize1ArgType | Use64BitVectors),
5172 NEONMAP1(vqshls_n_s32, aarch64_neon_sqshl, Add1ArgType),
5173 NEONMAP1(vqshls_n_u32, aarch64_neon_uqshl, Add1ArgType),
5174 NEONMAP1(vqshls_s32, aarch64_neon_sqshl, Add1ArgType),
5175 NEONMAP1(vqshls_u32, aarch64_neon_uqshl, Add1ArgType),
5176 NEONMAP1(vqshlub_n_s8, aarch64_neon_sqshlu, Vectorize1ArgType | Use64BitVectors),
5177 NEONMAP1(vqshluh_n_s16, aarch64_neon_sqshlu, Vectorize1ArgType | Use64BitVectors),
5178 NEONMAP1(vqshlus_n_s32, aarch64_neon_sqshlu, Add1ArgType),
5179 NEONMAP1(vqshrnd_n_s64, aarch64_neon_sqshrn, AddRetType),
5180 NEONMAP1(vqshrnd_n_u64, aarch64_neon_uqshrn, AddRetType),
5181 NEONMAP1(vqshrnh_n_s16, aarch64_neon_sqshrn, VectorRet | Use64BitVectors),
5182 NEONMAP1(vqshrnh_n_u16, aarch64_neon_uqshrn, VectorRet | Use64BitVectors),
5183 NEONMAP1(vqshrns_n_s32, aarch64_neon_sqshrn, VectorRet | Use64BitVectors),
5184 NEONMAP1(vqshrns_n_u32, aarch64_neon_uqshrn, VectorRet | Use64BitVectors),
5185 NEONMAP1(vqshrund_n_s64, aarch64_neon_sqshrun, AddRetType),
5186 NEONMAP1(vqshrunh_n_s16, aarch64_neon_sqshrun, VectorRet | Use64BitVectors),
5187 NEONMAP1(vqshruns_n_s32, aarch64_neon_sqshrun, VectorRet | Use64BitVectors),
5188 NEONMAP1(vqsubb_s8, aarch64_neon_sqsub, Vectorize1ArgType | Use64BitVectors),
5189 NEONMAP1(vqsubb_u8, aarch64_neon_uqsub, Vectorize1ArgType | Use64BitVectors),
5190 NEONMAP1(vqsubd_s64, aarch64_neon_sqsub, Add1ArgType),
5191 NEONMAP1(vqsubd_u64, aarch64_neon_uqsub, Add1ArgType),
5192 NEONMAP1(vqsubh_s16, aarch64_neon_sqsub, Vectorize1ArgType | Use64BitVectors),
5193 NEONMAP1(vqsubh_u16, aarch64_neon_uqsub, Vectorize1ArgType | Use64BitVectors),
5194 NEONMAP1(vqsubs_s32, aarch64_neon_sqsub, Add1ArgType),
5195 NEONMAP1(vqsubs_u32, aarch64_neon_uqsub, Add1ArgType),
5196 NEONMAP1(vrecped_f64, aarch64_neon_frecpe, Add1ArgType),
5197 NEONMAP1(vrecpes_f32, aarch64_neon_frecpe, Add1ArgType),
5198 NEONMAP1(vrecpxd_f64, aarch64_neon_frecpx, Add1ArgType),
5199 NEONMAP1(vrecpxs_f32, aarch64_neon_frecpx, Add1ArgType),
5200 NEONMAP1(vrshld_s64, aarch64_neon_srshl, Add1ArgType),
5201 NEONMAP1(vrshld_u64, aarch64_neon_urshl, Add1ArgType),
5202 NEONMAP1(vrsqrted_f64, aarch64_neon_frsqrte, Add1ArgType),
5203 NEONMAP1(vrsqrtes_f32, aarch64_neon_frsqrte, Add1ArgType),
5204 NEONMAP1(vrsqrtsd_f64, aarch64_neon_frsqrts, Add1ArgType),
5205 NEONMAP1(vrsqrtss_f32, aarch64_neon_frsqrts, Add1ArgType),
5206 NEONMAP1(vsha1cq_u32, aarch64_crypto_sha1c, 0),
5207 NEONMAP1(vsha1h_u32, aarch64_crypto_sha1h, 0),
5208 NEONMAP1(vsha1mq_u32, aarch64_crypto_sha1m, 0),
5209 NEONMAP1(vsha1pq_u32, aarch64_crypto_sha1p, 0),
5210 NEONMAP1(vshld_s64, aarch64_neon_sshl, Add1ArgType),
5211 NEONMAP1(vshld_u64, aarch64_neon_ushl, Add1ArgType),
5212 NEONMAP1(vslid_n_s64, aarch64_neon_vsli, Vectorize1ArgType),
5213 NEONMAP1(vslid_n_u64, aarch64_neon_vsli, Vectorize1ArgType),
5214 NEONMAP1(vsqaddb_u8, aarch64_neon_usqadd, Vectorize1ArgType | Use64BitVectors),
5215 NEONMAP1(vsqaddd_u64, aarch64_neon_usqadd, Add1ArgType),
5216 NEONMAP1(vsqaddh_u16, aarch64_neon_usqadd, Vectorize1ArgType | Use64BitVectors),
5217 NEONMAP1(vsqadds_u32, aarch64_neon_usqadd, Add1ArgType),
5218 NEONMAP1(vsrid_n_s64, aarch64_neon_vsri, Vectorize1ArgType),
5219 NEONMAP1(vsrid_n_u64, aarch64_neon_vsri, Vectorize1ArgType),
5220 NEONMAP1(vuqaddb_s8, aarch64_neon_suqadd, Vectorize1ArgType | Use64BitVectors),
5221 NEONMAP1(vuqaddd_s64, aarch64_neon_suqadd, Add1ArgType),
5222 NEONMAP1(vuqaddh_s16, aarch64_neon_suqadd, Vectorize1ArgType | Use64BitVectors),
5223 NEONMAP1(vuqadds_s32, aarch64_neon_suqadd, Add1ArgType),
5224 // FP16 scalar intrinisics go here.
5225 NEONMAP1(vabdh_f16, aarch64_sisd_fabd, Add1ArgType),
5226 NEONMAP1(vcvtah_s32_f16, aarch64_neon_fcvtas, AddRetType | Add1ArgType),
5227 NEONMAP1(vcvtah_s64_f16, aarch64_neon_fcvtas, AddRetType | Add1ArgType),
5228 NEONMAP1(vcvtah_u32_f16, aarch64_neon_fcvtau, AddRetType | Add1ArgType),
5229 NEONMAP1(vcvtah_u64_f16, aarch64_neon_fcvtau, AddRetType | Add1ArgType),
5230 NEONMAP1(vcvth_n_f16_s32, aarch64_neon_vcvtfxs2fp, AddRetType | Add1ArgType),
5231 NEONMAP1(vcvth_n_f16_s64, aarch64_neon_vcvtfxs2fp, AddRetType | Add1ArgType),
5232 NEONMAP1(vcvth_n_f16_u32, aarch64_neon_vcvtfxu2fp, AddRetType | Add1ArgType),
5233 NEONMAP1(vcvth_n_f16_u64, aarch64_neon_vcvtfxu2fp, AddRetType | Add1ArgType),
5234 NEONMAP1(vcvth_n_s32_f16, aarch64_neon_vcvtfp2fxs, AddRetType | Add1ArgType),
5235 NEONMAP1(vcvth_n_s64_f16, aarch64_neon_vcvtfp2fxs, AddRetType | Add1ArgType),
5236 NEONMAP1(vcvth_n_u32_f16, aarch64_neon_vcvtfp2fxu, AddRetType | Add1ArgType),
5237 NEONMAP1(vcvth_n_u64_f16, aarch64_neon_vcvtfp2fxu, AddRetType | Add1ArgType),
5238 NEONMAP1(vcvtmh_s32_f16, aarch64_neon_fcvtms, AddRetType | Add1ArgType),
5239 NEONMAP1(vcvtmh_s64_f16, aarch64_neon_fcvtms, AddRetType | Add1ArgType),
5240 NEONMAP1(vcvtmh_u32_f16, aarch64_neon_fcvtmu, AddRetType | Add1ArgType),
5241 NEONMAP1(vcvtmh_u64_f16, aarch64_neon_fcvtmu, AddRetType | Add1ArgType),
5242 NEONMAP1(vcvtnh_s32_f16, aarch64_neon_fcvtns, AddRetType | Add1ArgType),
5243 NEONMAP1(vcvtnh_s64_f16, aarch64_neon_fcvtns, AddRetType | Add1ArgType),
5244 NEONMAP1(vcvtnh_u32_f16, aarch64_neon_fcvtnu, AddRetType | Add1ArgType),
5245 NEONMAP1(vcvtnh_u64_f16, aarch64_neon_fcvtnu, AddRetType | Add1ArgType),
5246 NEONMAP1(vcvtph_s32_f16, aarch64_neon_fcvtps, AddRetType | Add1ArgType),
5247 NEONMAP1(vcvtph_s64_f16, aarch64_neon_fcvtps, AddRetType | Add1ArgType),
5248 NEONMAP1(vcvtph_u32_f16, aarch64_neon_fcvtpu, AddRetType | Add1ArgType),
5249 NEONMAP1(vcvtph_u64_f16, aarch64_neon_fcvtpu, AddRetType | Add1ArgType),
5250 NEONMAP1(vmulxh_f16, aarch64_neon_fmulx, Add1ArgType),
5251 NEONMAP1(vrecpeh_f16, aarch64_neon_frecpe, Add1ArgType),
5252 NEONMAP1(vrecpxh_f16, aarch64_neon_frecpx, Add1ArgType),
5253 NEONMAP1(vrsqrteh_f16, aarch64_neon_frsqrte, Add1ArgType),
5254 NEONMAP1(vrsqrtsh_f16, aarch64_neon_frsqrts, Add1ArgType),
5261 static bool NEONSIMDIntrinsicsProvenSorted = false;
5263 static bool AArch64SIMDIntrinsicsProvenSorted = false;
5264 static bool AArch64SISDIntrinsicsProvenSorted = false;
5267 static const NeonIntrinsicInfo *
5268 findNeonIntrinsicInMap(ArrayRef<NeonIntrinsicInfo> IntrinsicMap,
5269 unsigned BuiltinID, bool &MapProvenSorted) {
5272 if (!MapProvenSorted) {
5273 assert(std::is_sorted(std::begin(IntrinsicMap), std::end(IntrinsicMap)));
5274 MapProvenSorted = true;
5278 const NeonIntrinsicInfo *Builtin = llvm::lower_bound(IntrinsicMap, BuiltinID);
5280 if (Builtin != IntrinsicMap.end() && Builtin->BuiltinID == BuiltinID)
5286 Function *CodeGenFunction::LookupNeonLLVMIntrinsic(unsigned IntrinsicID,
5288 llvm::Type *ArgType,
5289 const CallExpr *E) {
5291 if (Modifier & Use64BitVectors)
5293 else if (Modifier & Use128BitVectors)
5297 SmallVector<llvm::Type *, 3> Tys;
5298 if (Modifier & AddRetType) {
5299 llvm::Type *Ty = ConvertType(E->getCallReturnType(getContext()));
5300 if (Modifier & VectorizeRetType)
5301 Ty = llvm::VectorType::get(
5302 Ty, VectorSize ? VectorSize / Ty->getPrimitiveSizeInBits() : 1);
5308 if (Modifier & VectorizeArgTypes) {
5309 int Elts = VectorSize ? VectorSize / ArgType->getPrimitiveSizeInBits() : 1;
5310 ArgType = llvm::VectorType::get(ArgType, Elts);
5313 if (Modifier & (Add1ArgType | Add2ArgTypes))
5314 Tys.push_back(ArgType);
5316 if (Modifier & Add2ArgTypes)
5317 Tys.push_back(ArgType);
5319 if (Modifier & InventFloatType)
5320 Tys.push_back(FloatTy);
5322 return CGM.getIntrinsic(IntrinsicID, Tys);
5325 static Value *EmitCommonNeonSISDBuiltinExpr(CodeGenFunction &CGF,
5326 const NeonIntrinsicInfo &SISDInfo,
5327 SmallVectorImpl<Value *> &Ops,
5328 const CallExpr *E) {
5329 unsigned BuiltinID = SISDInfo.BuiltinID;
5330 unsigned int Int = SISDInfo.LLVMIntrinsic;
5331 unsigned Modifier = SISDInfo.TypeModifier;
5332 const char *s = SISDInfo.NameHint;
5334 switch (BuiltinID) {
5335 case NEON::BI__builtin_neon_vcled_s64:
5336 case NEON::BI__builtin_neon_vcled_u64:
5337 case NEON::BI__builtin_neon_vcles_f32:
5338 case NEON::BI__builtin_neon_vcled_f64:
5339 case NEON::BI__builtin_neon_vcltd_s64:
5340 case NEON::BI__builtin_neon_vcltd_u64:
5341 case NEON::BI__builtin_neon_vclts_f32:
5342 case NEON::BI__builtin_neon_vcltd_f64:
5343 case NEON::BI__builtin_neon_vcales_f32:
5344 case NEON::BI__builtin_neon_vcaled_f64:
5345 case NEON::BI__builtin_neon_vcalts_f32:
5346 case NEON::BI__builtin_neon_vcaltd_f64:
5347 // Only one direction of comparisons actually exist, cmle is actually a cmge
5348 // with swapped operands. The table gives us the right intrinsic but we
5349 // still need to do the swap.
5350 std::swap(Ops[0], Ops[1]);
5354 assert(Int && "Generic code assumes a valid intrinsic");
5356 // Determine the type(s) of this overloaded AArch64 intrinsic.
5357 const Expr *Arg = E->getArg(0);
5358 llvm::Type *ArgTy = CGF.ConvertType(Arg->getType());
5359 Function *F = CGF.LookupNeonLLVMIntrinsic(Int, Modifier, ArgTy, E);
5362 ConstantInt *C0 = ConstantInt::get(CGF.SizeTy, 0);
5363 for (Function::const_arg_iterator ai = F->arg_begin(), ae = F->arg_end();
5364 ai != ae; ++ai, ++j) {
5365 llvm::Type *ArgTy = ai->getType();
5366 if (Ops[j]->getType()->getPrimitiveSizeInBits() ==
5367 ArgTy->getPrimitiveSizeInBits())
5370 assert(ArgTy->isVectorTy() && !Ops[j]->getType()->isVectorTy());
5371 // The constant argument to an _n_ intrinsic always has Int32Ty, so truncate
5372 // it before inserting.
5374 CGF.Builder.CreateTruncOrBitCast(Ops[j], ArgTy->getVectorElementType());
5376 CGF.Builder.CreateInsertElement(UndefValue::get(ArgTy), Ops[j], C0);
5379 Value *Result = CGF.EmitNeonCall(F, Ops, s);
5380 llvm::Type *ResultType = CGF.ConvertType(E->getType());
5381 if (ResultType->getPrimitiveSizeInBits() <
5382 Result->getType()->getPrimitiveSizeInBits())
5383 return CGF.Builder.CreateExtractElement(Result, C0);
5385 return CGF.Builder.CreateBitCast(Result, ResultType, s);
5388 Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
5389 unsigned BuiltinID, unsigned LLVMIntrinsic, unsigned AltLLVMIntrinsic,
5390 const char *NameHint, unsigned Modifier, const CallExpr *E,
5391 SmallVectorImpl<llvm::Value *> &Ops, Address PtrOp0, Address PtrOp1,
5392 llvm::Triple::ArchType Arch) {
5393 // Get the last argument, which specifies the vector type.
5394 llvm::APSInt NeonTypeConst;
5395 const Expr *Arg = E->getArg(E->getNumArgs() - 1);
5396 if (!Arg->isIntegerConstantExpr(NeonTypeConst, getContext()))
5399 // Determine the type of this overloaded NEON intrinsic.
5400 NeonTypeFlags Type(NeonTypeConst.getZExtValue());
5401 bool Usgn = Type.isUnsigned();
5402 bool Quad = Type.isQuad();
5403 const bool HasLegalHalfType = getTarget().hasLegalHalfType();
5405 llvm::VectorType *VTy = GetNeonType(this, Type, HasLegalHalfType);
5406 llvm::Type *Ty = VTy;
5410 auto getAlignmentValue32 = [&](Address addr) -> Value* {
5411 return Builder.getInt32(addr.getAlignment().getQuantity());
5414 unsigned Int = LLVMIntrinsic;
5415 if ((Modifier & UnsignedAlts) && !Usgn)
5416 Int = AltLLVMIntrinsic;
5418 switch (BuiltinID) {
5420 case NEON::BI__builtin_neon_vpadd_v:
5421 case NEON::BI__builtin_neon_vpaddq_v:
5422 // We don't allow fp/int overloading of intrinsics.
5423 if (VTy->getElementType()->isFloatingPointTy() &&
5424 Int == Intrinsic::aarch64_neon_addp)
5425 Int = Intrinsic::aarch64_neon_faddp;
5427 case NEON::BI__builtin_neon_vabs_v:
5428 case NEON::BI__builtin_neon_vabsq_v:
5429 if (VTy->getElementType()->isFloatingPointTy())
5430 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::fabs, Ty), Ops, "vabs");
5431 return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Ty), Ops, "vabs");
5432 case NEON::BI__builtin_neon_vaddhn_v: {
5433 llvm::VectorType *SrcTy =
5434 llvm::VectorType::getExtendedElementVectorType(VTy);
5436 // %sum = add <4 x i32> %lhs, %rhs
5437 Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy);
5438 Ops[1] = Builder.CreateBitCast(Ops[1], SrcTy);
5439 Ops[0] = Builder.CreateAdd(Ops[0], Ops[1], "vaddhn");
5441 // %high = lshr <4 x i32> %sum, <i32 16, i32 16, i32 16, i32 16>
5442 Constant *ShiftAmt =
5443 ConstantInt::get(SrcTy, SrcTy->getScalarSizeInBits() / 2);
5444 Ops[0] = Builder.CreateLShr(Ops[0], ShiftAmt, "vaddhn");
5446 // %res = trunc <4 x i32> %high to <4 x i16>
5447 return Builder.CreateTrunc(Ops[0], VTy, "vaddhn");
5449 case NEON::BI__builtin_neon_vcale_v:
5450 case NEON::BI__builtin_neon_vcaleq_v:
5451 case NEON::BI__builtin_neon_vcalt_v:
5452 case NEON::BI__builtin_neon_vcaltq_v:
5453 std::swap(Ops[0], Ops[1]);
5455 case NEON::BI__builtin_neon_vcage_v:
5456 case NEON::BI__builtin_neon_vcageq_v:
5457 case NEON::BI__builtin_neon_vcagt_v:
5458 case NEON::BI__builtin_neon_vcagtq_v: {
5460 switch (VTy->getScalarSizeInBits()) {
5461 default: llvm_unreachable("unexpected type");
5472 llvm::Type *VecFlt = llvm::VectorType::get(Ty, VTy->getNumElements());
5473 llvm::Type *Tys[] = { VTy, VecFlt };
5474 Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys);
5475 return EmitNeonCall(F, Ops, NameHint);
5477 case NEON::BI__builtin_neon_vceqz_v:
5478 case NEON::BI__builtin_neon_vceqzq_v:
5479 return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OEQ,
5480 ICmpInst::ICMP_EQ, "vceqz");
5481 case NEON::BI__builtin_neon_vcgez_v:
5482 case NEON::BI__builtin_neon_vcgezq_v:
5483 return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OGE,
5484 ICmpInst::ICMP_SGE, "vcgez");
5485 case NEON::BI__builtin_neon_vclez_v:
5486 case NEON::BI__builtin_neon_vclezq_v:
5487 return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OLE,
5488 ICmpInst::ICMP_SLE, "vclez");
5489 case NEON::BI__builtin_neon_vcgtz_v:
5490 case NEON::BI__builtin_neon_vcgtzq_v:
5491 return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OGT,
5492 ICmpInst::ICMP_SGT, "vcgtz");
5493 case NEON::BI__builtin_neon_vcltz_v:
5494 case NEON::BI__builtin_neon_vcltzq_v:
5495 return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OLT,
5496 ICmpInst::ICMP_SLT, "vcltz");
5497 case NEON::BI__builtin_neon_vclz_v:
5498 case NEON::BI__builtin_neon_vclzq_v:
5499 // We generate target-independent intrinsic, which needs a second argument
5500 // for whether or not clz of zero is undefined; on ARM it isn't.
5501 Ops.push_back(Builder.getInt1(getTarget().isCLZForZeroUndef()));
5503 case NEON::BI__builtin_neon_vcvt_f32_v:
5504 case NEON::BI__builtin_neon_vcvtq_f32_v:
5505 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
5506 Ty = GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float32, false, Quad),
5508 return Usgn ? Builder.CreateUIToFP(Ops[0], Ty, "vcvt")
5509 : Builder.CreateSIToFP(Ops[0], Ty, "vcvt");
5510 case NEON::BI__builtin_neon_vcvt_f16_v:
5511 case NEON::BI__builtin_neon_vcvtq_f16_v:
5512 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
5513 Ty = GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float16, false, Quad),
5515 return Usgn ? Builder.CreateUIToFP(Ops[0], Ty, "vcvt")
5516 : Builder.CreateSIToFP(Ops[0], Ty, "vcvt");
5517 case NEON::BI__builtin_neon_vcvt_n_f16_v:
5518 case NEON::BI__builtin_neon_vcvt_n_f32_v:
5519 case NEON::BI__builtin_neon_vcvt_n_f64_v:
5520 case NEON::BI__builtin_neon_vcvtq_n_f16_v:
5521 case NEON::BI__builtin_neon_vcvtq_n_f32_v:
5522 case NEON::BI__builtin_neon_vcvtq_n_f64_v: {
5523 llvm::Type *Tys[2] = { GetFloatNeonType(this, Type), Ty };
5524 Int = Usgn ? LLVMIntrinsic : AltLLVMIntrinsic;
5525 Function *F = CGM.getIntrinsic(Int, Tys);
5526 return EmitNeonCall(F, Ops, "vcvt_n");
5528 case NEON::BI__builtin_neon_vcvt_n_s16_v:
5529 case NEON::BI__builtin_neon_vcvt_n_s32_v:
5530 case NEON::BI__builtin_neon_vcvt_n_u16_v:
5531 case NEON::BI__builtin_neon_vcvt_n_u32_v:
5532 case NEON::BI__builtin_neon_vcvt_n_s64_v:
5533 case NEON::BI__builtin_neon_vcvt_n_u64_v:
5534 case NEON::BI__builtin_neon_vcvtq_n_s16_v:
5535 case NEON::BI__builtin_neon_vcvtq_n_s32_v:
5536 case NEON::BI__builtin_neon_vcvtq_n_u16_v:
5537 case NEON::BI__builtin_neon_vcvtq_n_u32_v:
5538 case NEON::BI__builtin_neon_vcvtq_n_s64_v:
5539 case NEON::BI__builtin_neon_vcvtq_n_u64_v: {
5540 llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) };
5541 Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys);
5542 return EmitNeonCall(F, Ops, "vcvt_n");
5544 case NEON::BI__builtin_neon_vcvt_s32_v:
5545 case NEON::BI__builtin_neon_vcvt_u32_v:
5546 case NEON::BI__builtin_neon_vcvt_s64_v:
5547 case NEON::BI__builtin_neon_vcvt_u64_v:
5548 case NEON::BI__builtin_neon_vcvt_s16_v:
5549 case NEON::BI__builtin_neon_vcvt_u16_v:
5550 case NEON::BI__builtin_neon_vcvtq_s32_v:
5551 case NEON::BI__builtin_neon_vcvtq_u32_v:
5552 case NEON::BI__builtin_neon_vcvtq_s64_v:
5553 case NEON::BI__builtin_neon_vcvtq_u64_v:
5554 case NEON::BI__builtin_neon_vcvtq_s16_v:
5555 case NEON::BI__builtin_neon_vcvtq_u16_v: {
5556 Ops[0] = Builder.CreateBitCast(Ops[0], GetFloatNeonType(this, Type));
5557 return Usgn ? Builder.CreateFPToUI(Ops[0], Ty, "vcvt")
5558 : Builder.CreateFPToSI(Ops[0], Ty, "vcvt");
5560 case NEON::BI__builtin_neon_vcvta_s16_v:
5561 case NEON::BI__builtin_neon_vcvta_s32_v:
5562 case NEON::BI__builtin_neon_vcvta_s64_v:
5563 case NEON::BI__builtin_neon_vcvta_u16_v:
5564 case NEON::BI__builtin_neon_vcvta_u32_v:
5565 case NEON::BI__builtin_neon_vcvta_u64_v:
5566 case NEON::BI__builtin_neon_vcvtaq_s16_v:
5567 case NEON::BI__builtin_neon_vcvtaq_s32_v:
5568 case NEON::BI__builtin_neon_vcvtaq_s64_v:
5569 case NEON::BI__builtin_neon_vcvtaq_u16_v:
5570 case NEON::BI__builtin_neon_vcvtaq_u32_v:
5571 case NEON::BI__builtin_neon_vcvtaq_u64_v:
5572 case NEON::BI__builtin_neon_vcvtn_s16_v:
5573 case NEON::BI__builtin_neon_vcvtn_s32_v:
5574 case NEON::BI__builtin_neon_vcvtn_s64_v:
5575 case NEON::BI__builtin_neon_vcvtn_u16_v:
5576 case NEON::BI__builtin_neon_vcvtn_u32_v:
5577 case NEON::BI__builtin_neon_vcvtn_u64_v:
5578 case NEON::BI__builtin_neon_vcvtnq_s16_v:
5579 case NEON::BI__builtin_neon_vcvtnq_s32_v:
5580 case NEON::BI__builtin_neon_vcvtnq_s64_v:
5581 case NEON::BI__builtin_neon_vcvtnq_u16_v:
5582 case NEON::BI__builtin_neon_vcvtnq_u32_v:
5583 case NEON::BI__builtin_neon_vcvtnq_u64_v:
5584 case NEON::BI__builtin_neon_vcvtp_s16_v:
5585 case NEON::BI__builtin_neon_vcvtp_s32_v:
5586 case NEON::BI__builtin_neon_vcvtp_s64_v:
5587 case NEON::BI__builtin_neon_vcvtp_u16_v:
5588 case NEON::BI__builtin_neon_vcvtp_u32_v:
5589 case NEON::BI__builtin_neon_vcvtp_u64_v:
5590 case NEON::BI__builtin_neon_vcvtpq_s16_v:
5591 case NEON::BI__builtin_neon_vcvtpq_s32_v:
5592 case NEON::BI__builtin_neon_vcvtpq_s64_v:
5593 case NEON::BI__builtin_neon_vcvtpq_u16_v:
5594 case NEON::BI__builtin_neon_vcvtpq_u32_v:
5595 case NEON::BI__builtin_neon_vcvtpq_u64_v:
5596 case NEON::BI__builtin_neon_vcvtm_s16_v:
5597 case NEON::BI__builtin_neon_vcvtm_s32_v:
5598 case NEON::BI__builtin_neon_vcvtm_s64_v:
5599 case NEON::BI__builtin_neon_vcvtm_u16_v:
5600 case NEON::BI__builtin_neon_vcvtm_u32_v:
5601 case NEON::BI__builtin_neon_vcvtm_u64_v:
5602 case NEON::BI__builtin_neon_vcvtmq_s16_v:
5603 case NEON::BI__builtin_neon_vcvtmq_s32_v:
5604 case NEON::BI__builtin_neon_vcvtmq_s64_v:
5605 case NEON::BI__builtin_neon_vcvtmq_u16_v:
5606 case NEON::BI__builtin_neon_vcvtmq_u32_v:
5607 case NEON::BI__builtin_neon_vcvtmq_u64_v: {
5608 llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) };
5609 return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, NameHint);
5611 case NEON::BI__builtin_neon_vcvtx_f32_v: {
5612 llvm::Type *Tys[2] = { VTy->getTruncatedElementVectorType(VTy), Ty};
5613 return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, NameHint);
5616 case NEON::BI__builtin_neon_vext_v:
5617 case NEON::BI__builtin_neon_vextq_v: {
5618 int CV = cast<ConstantInt>(Ops[2])->getSExtValue();
5619 SmallVector<uint32_t, 16> Indices;
5620 for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i)
5621 Indices.push_back(i+CV);
5623 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
5624 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
5625 return Builder.CreateShuffleVector(Ops[0], Ops[1], Indices, "vext");
5627 case NEON::BI__builtin_neon_vfma_v:
5628 case NEON::BI__builtin_neon_vfmaq_v: {
5629 Function *F = CGM.getIntrinsic(Intrinsic::fma, Ty);
5630 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
5631 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
5632 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
5634 // NEON intrinsic puts accumulator first, unlike the LLVM fma.
5635 return Builder.CreateCall(F, {Ops[1], Ops[2], Ops[0]});
5637 case NEON::BI__builtin_neon_vld1_v:
5638 case NEON::BI__builtin_neon_vld1q_v: {
5639 llvm::Type *Tys[] = {Ty, Int8PtrTy};
5640 Ops.push_back(getAlignmentValue32(PtrOp0));
5641 return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, "vld1");
5643 case NEON::BI__builtin_neon_vld1_x2_v:
5644 case NEON::BI__builtin_neon_vld1q_x2_v:
5645 case NEON::BI__builtin_neon_vld1_x3_v:
5646 case NEON::BI__builtin_neon_vld1q_x3_v:
5647 case NEON::BI__builtin_neon_vld1_x4_v:
5648 case NEON::BI__builtin_neon_vld1q_x4_v: {
5649 llvm::Type *PTy = llvm::PointerType::getUnqual(VTy->getVectorElementType());
5650 Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
5651 llvm::Type *Tys[2] = { VTy, PTy };
5652 Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys);
5653 Ops[1] = Builder.CreateCall(F, Ops[1], "vld1xN");
5654 Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
5655 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
5656 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
5658 case NEON::BI__builtin_neon_vld2_v:
5659 case NEON::BI__builtin_neon_vld2q_v:
5660 case NEON::BI__builtin_neon_vld3_v:
5661 case NEON::BI__builtin_neon_vld3q_v:
5662 case NEON::BI__builtin_neon_vld4_v:
5663 case NEON::BI__builtin_neon_vld4q_v:
5664 case NEON::BI__builtin_neon_vld2_dup_v:
5665 case NEON::BI__builtin_neon_vld2q_dup_v:
5666 case NEON::BI__builtin_neon_vld3_dup_v:
5667 case NEON::BI__builtin_neon_vld3q_dup_v:
5668 case NEON::BI__builtin_neon_vld4_dup_v:
5669 case NEON::BI__builtin_neon_vld4q_dup_v: {
5670 llvm::Type *Tys[] = {Ty, Int8PtrTy};
5671 Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys);
5672 Value *Align = getAlignmentValue32(PtrOp1);
5673 Ops[1] = Builder.CreateCall(F, {Ops[1], Align}, NameHint);
5674 Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
5675 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
5676 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
5678 case NEON::BI__builtin_neon_vld1_dup_v:
5679 case NEON::BI__builtin_neon_vld1q_dup_v: {
5680 Value *V = UndefValue::get(Ty);
5681 Ty = llvm::PointerType::getUnqual(VTy->getElementType());
5682 PtrOp0 = Builder.CreateBitCast(PtrOp0, Ty);
5683 LoadInst *Ld = Builder.CreateLoad(PtrOp0);
5684 llvm::Constant *CI = ConstantInt::get(SizeTy, 0);
5685 Ops[0] = Builder.CreateInsertElement(V, Ld, CI);
5686 return EmitNeonSplat(Ops[0], CI);
5688 case NEON::BI__builtin_neon_vld2_lane_v:
5689 case NEON::BI__builtin_neon_vld2q_lane_v:
5690 case NEON::BI__builtin_neon_vld3_lane_v:
5691 case NEON::BI__builtin_neon_vld3q_lane_v:
5692 case NEON::BI__builtin_neon_vld4_lane_v:
5693 case NEON::BI__builtin_neon_vld4q_lane_v: {
5694 llvm::Type *Tys[] = {Ty, Int8PtrTy};
5695 Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys);
5696 for (unsigned I = 2; I < Ops.size() - 1; ++I)
5697 Ops[I] = Builder.CreateBitCast(Ops[I], Ty);
5698 Ops.push_back(getAlignmentValue32(PtrOp1));
5699 Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), NameHint);
5700 Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
5701 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
5702 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
5704 case NEON::BI__builtin_neon_vmovl_v: {
5705 llvm::Type *DTy =llvm::VectorType::getTruncatedElementVectorType(VTy);
5706 Ops[0] = Builder.CreateBitCast(Ops[0], DTy);
5708 return Builder.CreateZExt(Ops[0], Ty, "vmovl");
5709 return Builder.CreateSExt(Ops[0], Ty, "vmovl");
5711 case NEON::BI__builtin_neon_vmovn_v: {
5712 llvm::Type *QTy = llvm::VectorType::getExtendedElementVectorType(VTy);
5713 Ops[0] = Builder.CreateBitCast(Ops[0], QTy);
5714 return Builder.CreateTrunc(Ops[0], Ty, "vmovn");
5716 case NEON::BI__builtin_neon_vmull_v:
5717 // FIXME: the integer vmull operations could be emitted in terms of pure
5718 // LLVM IR (2 exts followed by a mul). Unfortunately LLVM has a habit of
5719 // hoisting the exts outside loops. Until global ISel comes along that can
5720 // see through such movement this leads to bad CodeGen. So we need an
5721 // intrinsic for now.
5722 Int = Usgn ? Intrinsic::arm_neon_vmullu : Intrinsic::arm_neon_vmulls;
5723 Int = Type.isPoly() ? (unsigned)Intrinsic::arm_neon_vmullp : Int;
5724 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmull");
5725 case NEON::BI__builtin_neon_vpadal_v:
5726 case NEON::BI__builtin_neon_vpadalq_v: {
5727 // The source operand type has twice as many elements of half the size.
5728 unsigned EltBits = VTy->getElementType()->getPrimitiveSizeInBits();
5730 llvm::IntegerType::get(getLLVMContext(), EltBits / 2);
5731 llvm::Type *NarrowTy =
5732 llvm::VectorType::get(EltTy, VTy->getNumElements() * 2);
5733 llvm::Type *Tys[2] = { Ty, NarrowTy };
5734 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, NameHint);
5736 case NEON::BI__builtin_neon_vpaddl_v:
5737 case NEON::BI__builtin_neon_vpaddlq_v: {
5738 // The source operand type has twice as many elements of half the size.
5739 unsigned EltBits = VTy->getElementType()->getPrimitiveSizeInBits();
5740 llvm::Type *EltTy = llvm::IntegerType::get(getLLVMContext(), EltBits / 2);
5741 llvm::Type *NarrowTy =
5742 llvm::VectorType::get(EltTy, VTy->getNumElements() * 2);
5743 llvm::Type *Tys[2] = { Ty, NarrowTy };
5744 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vpaddl");
5746 case NEON::BI__builtin_neon_vqdmlal_v:
5747 case NEON::BI__builtin_neon_vqdmlsl_v: {
5748 SmallVector<Value *, 2> MulOps(Ops.begin() + 1, Ops.end());
5750 EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Ty), MulOps, "vqdmlal");
5752 return EmitNeonCall(CGM.getIntrinsic(AltLLVMIntrinsic, Ty), Ops, NameHint);
5754 case NEON::BI__builtin_neon_vqshl_n_v:
5755 case NEON::BI__builtin_neon_vqshlq_n_v:
5756 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshl_n",
5758 case NEON::BI__builtin_neon_vqshlu_n_v:
5759 case NEON::BI__builtin_neon_vqshluq_n_v:
5760 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshlu_n",
5762 case NEON::BI__builtin_neon_vrecpe_v:
5763 case NEON::BI__builtin_neon_vrecpeq_v:
5764 case NEON::BI__builtin_neon_vrsqrte_v:
5765 case NEON::BI__builtin_neon_vrsqrteq_v:
5766 Int = Ty->isFPOrFPVectorTy() ? LLVMIntrinsic : AltLLVMIntrinsic;
5767 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, NameHint);
5768 case NEON::BI__builtin_neon_vrndi_v:
5769 case NEON::BI__builtin_neon_vrndiq_v:
5770 Int = Intrinsic::nearbyint;
5771 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, NameHint);
5772 case NEON::BI__builtin_neon_vrshr_n_v:
5773 case NEON::BI__builtin_neon_vrshrq_n_v:
5774 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrshr_n",
5776 case NEON::BI__builtin_neon_vshl_n_v:
5777 case NEON::BI__builtin_neon_vshlq_n_v:
5778 Ops[1] = EmitNeonShiftVector(Ops[1], Ty, false);
5779 return Builder.CreateShl(Builder.CreateBitCast(Ops[0],Ty), Ops[1],
5781 case NEON::BI__builtin_neon_vshll_n_v: {
5782 llvm::Type *SrcTy = llvm::VectorType::getTruncatedElementVectorType(VTy);
5783 Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy);
5785 Ops[0] = Builder.CreateZExt(Ops[0], VTy);
5787 Ops[0] = Builder.CreateSExt(Ops[0], VTy);
5788 Ops[1] = EmitNeonShiftVector(Ops[1], VTy, false);
5789 return Builder.CreateShl(Ops[0], Ops[1], "vshll_n");
5791 case NEON::BI__builtin_neon_vshrn_n_v: {
5792 llvm::Type *SrcTy = llvm::VectorType::getExtendedElementVectorType(VTy);
5793 Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy);
5794 Ops[1] = EmitNeonShiftVector(Ops[1], SrcTy, false);
5796 Ops[0] = Builder.CreateLShr(Ops[0], Ops[1]);
5798 Ops[0] = Builder.CreateAShr(Ops[0], Ops[1]);
5799 return Builder.CreateTrunc(Ops[0], Ty, "vshrn_n");
5801 case NEON::BI__builtin_neon_vshr_n_v:
5802 case NEON::BI__builtin_neon_vshrq_n_v:
5803 return EmitNeonRShiftImm(Ops[0], Ops[1], Ty, Usgn, "vshr_n");
5804 case NEON::BI__builtin_neon_vst1_v:
5805 case NEON::BI__builtin_neon_vst1q_v:
5806 case NEON::BI__builtin_neon_vst2_v:
5807 case NEON::BI__builtin_neon_vst2q_v:
5808 case NEON::BI__builtin_neon_vst3_v:
5809 case NEON::BI__builtin_neon_vst3q_v:
5810 case NEON::BI__builtin_neon_vst4_v:
5811 case NEON::BI__builtin_neon_vst4q_v:
5812 case NEON::BI__builtin_neon_vst2_lane_v:
5813 case NEON::BI__builtin_neon_vst2q_lane_v:
5814 case NEON::BI__builtin_neon_vst3_lane_v:
5815 case NEON::BI__builtin_neon_vst3q_lane_v:
5816 case NEON::BI__builtin_neon_vst4_lane_v:
5817 case NEON::BI__builtin_neon_vst4q_lane_v: {
5818 llvm::Type *Tys[] = {Int8PtrTy, Ty};
5819 Ops.push_back(getAlignmentValue32(PtrOp0));
5820 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "");
5822 case NEON::BI__builtin_neon_vst1_x2_v:
5823 case NEON::BI__builtin_neon_vst1q_x2_v:
5824 case NEON::BI__builtin_neon_vst1_x3_v:
5825 case NEON::BI__builtin_neon_vst1q_x3_v:
5826 case NEON::BI__builtin_neon_vst1_x4_v:
5827 case NEON::BI__builtin_neon_vst1q_x4_v: {
5828 llvm::Type *PTy = llvm::PointerType::getUnqual(VTy->getVectorElementType());
5829 // TODO: Currently in AArch32 mode the pointer operand comes first, whereas
5830 // in AArch64 it comes last. We may want to stick to one or another.
5831 if (Arch == llvm::Triple::aarch64 || Arch == llvm::Triple::aarch64_be ||
5832 Arch == llvm::Triple::aarch64_32) {
5833 llvm::Type *Tys[2] = { VTy, PTy };
5834 std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end());
5835 return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, "");
5837 llvm::Type *Tys[2] = { PTy, VTy };
5838 return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, "");
5840 case NEON::BI__builtin_neon_vsubhn_v: {
5841 llvm::VectorType *SrcTy =
5842 llvm::VectorType::getExtendedElementVectorType(VTy);
5844 // %sum = add <4 x i32> %lhs, %rhs
5845 Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy);
5846 Ops[1] = Builder.CreateBitCast(Ops[1], SrcTy);
5847 Ops[0] = Builder.CreateSub(Ops[0], Ops[1], "vsubhn");
5849 // %high = lshr <4 x i32> %sum, <i32 16, i32 16, i32 16, i32 16>
5850 Constant *ShiftAmt =
5851 ConstantInt::get(SrcTy, SrcTy->getScalarSizeInBits() / 2);
5852 Ops[0] = Builder.CreateLShr(Ops[0], ShiftAmt, "vsubhn");
5854 // %res = trunc <4 x i32> %high to <4 x i16>
5855 return Builder.CreateTrunc(Ops[0], VTy, "vsubhn");
5857 case NEON::BI__builtin_neon_vtrn_v:
5858 case NEON::BI__builtin_neon_vtrnq_v: {
5859 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
5860 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
5861 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
5862 Value *SV = nullptr;
5864 for (unsigned vi = 0; vi != 2; ++vi) {
5865 SmallVector<uint32_t, 16> Indices;
5866 for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) {
5867 Indices.push_back(i+vi);
5868 Indices.push_back(i+e+vi);
5870 Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi);
5871 SV = Builder.CreateShuffleVector(Ops[1], Ops[2], Indices, "vtrn");
5872 SV = Builder.CreateDefaultAlignedStore(SV, Addr);
5876 case NEON::BI__builtin_neon_vtst_v:
5877 case NEON::BI__builtin_neon_vtstq_v: {
5878 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
5879 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
5880 Ops[0] = Builder.CreateAnd(Ops[0], Ops[1]);
5881 Ops[0] = Builder.CreateICmp(ICmpInst::ICMP_NE, Ops[0],
5882 ConstantAggregateZero::get(Ty));
5883 return Builder.CreateSExt(Ops[0], Ty, "vtst");
5885 case NEON::BI__builtin_neon_vuzp_v:
5886 case NEON::BI__builtin_neon_vuzpq_v: {
5887 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
5888 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
5889 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
5890 Value *SV = nullptr;
5892 for (unsigned vi = 0; vi != 2; ++vi) {
5893 SmallVector<uint32_t, 16> Indices;
5894 for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i)
5895 Indices.push_back(2*i+vi);
5897 Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi);
5898 SV = Builder.CreateShuffleVector(Ops[1], Ops[2], Indices, "vuzp");
5899 SV = Builder.CreateDefaultAlignedStore(SV, Addr);
5903 case NEON::BI__builtin_neon_vzip_v:
5904 case NEON::BI__builtin_neon_vzipq_v: {
5905 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
5906 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
5907 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
5908 Value *SV = nullptr;
5910 for (unsigned vi = 0; vi != 2; ++vi) {
5911 SmallVector<uint32_t, 16> Indices;
5912 for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) {
5913 Indices.push_back((i + vi*e) >> 1);
5914 Indices.push_back(((i + vi*e) >> 1)+e);
5916 Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi);
5917 SV = Builder.CreateShuffleVector(Ops[1], Ops[2], Indices, "vzip");
5918 SV = Builder.CreateDefaultAlignedStore(SV, Addr);
5922 case NEON::BI__builtin_neon_vdot_v:
5923 case NEON::BI__builtin_neon_vdotq_v: {
5924 llvm::Type *InputTy =
5925 llvm::VectorType::get(Int8Ty, Ty->getPrimitiveSizeInBits() / 8);
5926 llvm::Type *Tys[2] = { Ty, InputTy };
5927 Int = Usgn ? LLVMIntrinsic : AltLLVMIntrinsic;
5928 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vdot");
5930 case NEON::BI__builtin_neon_vfmlal_low_v:
5931 case NEON::BI__builtin_neon_vfmlalq_low_v: {
5932 llvm::Type *InputTy =
5933 llvm::VectorType::get(HalfTy, Ty->getPrimitiveSizeInBits() / 16);
5934 llvm::Type *Tys[2] = { Ty, InputTy };
5935 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vfmlal_low");
5937 case NEON::BI__builtin_neon_vfmlsl_low_v:
5938 case NEON::BI__builtin_neon_vfmlslq_low_v: {
5939 llvm::Type *InputTy =
5940 llvm::VectorType::get(HalfTy, Ty->getPrimitiveSizeInBits() / 16);
5941 llvm::Type *Tys[2] = { Ty, InputTy };
5942 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vfmlsl_low");
5944 case NEON::BI__builtin_neon_vfmlal_high_v:
5945 case NEON::BI__builtin_neon_vfmlalq_high_v: {
5946 llvm::Type *InputTy =
5947 llvm::VectorType::get(HalfTy, Ty->getPrimitiveSizeInBits() / 16);
5948 llvm::Type *Tys[2] = { Ty, InputTy };
5949 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vfmlal_high");
5951 case NEON::BI__builtin_neon_vfmlsl_high_v:
5952 case NEON::BI__builtin_neon_vfmlslq_high_v: {
5953 llvm::Type *InputTy =
5954 llvm::VectorType::get(HalfTy, Ty->getPrimitiveSizeInBits() / 16);
5955 llvm::Type *Tys[2] = { Ty, InputTy };
5956 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vfmlsl_high");
5960 assert(Int && "Expected valid intrinsic number");
5962 // Determine the type(s) of this overloaded AArch64 intrinsic.
5963 Function *F = LookupNeonLLVMIntrinsic(Int, Modifier, Ty, E);
5965 Value *Result = EmitNeonCall(F, Ops, NameHint);
5966 llvm::Type *ResultType = ConvertType(E->getType());
5967 // AArch64 intrinsic one-element vector type cast to
5968 // scalar type expected by the builtin
5969 return Builder.CreateBitCast(Result, ResultType, NameHint);
5972 Value *CodeGenFunction::EmitAArch64CompareBuiltinExpr(
5973 Value *Op, llvm::Type *Ty, const CmpInst::Predicate Fp,
5974 const CmpInst::Predicate Ip, const Twine &Name) {
5975 llvm::Type *OTy = Op->getType();
5977 // FIXME: this is utterly horrific. We should not be looking at previous
5978 // codegen context to find out what needs doing. Unfortunately TableGen
5979 // currently gives us exactly the same calls for vceqz_f32 and vceqz_s32
5981 if (BitCastInst *BI = dyn_cast<BitCastInst>(Op))
5982 OTy = BI->getOperand(0)->getType();
5984 Op = Builder.CreateBitCast(Op, OTy);
5985 if (OTy->getScalarType()->isFloatingPointTy()) {
5986 Op = Builder.CreateFCmp(Fp, Op, Constant::getNullValue(OTy));
5988 Op = Builder.CreateICmp(Ip, Op, Constant::getNullValue(OTy));
5990 return Builder.CreateSExt(Op, Ty, Name);
5993 static Value *packTBLDVectorList(CodeGenFunction &CGF, ArrayRef<Value *> Ops,
5994 Value *ExtOp, Value *IndexOp,
5995 llvm::Type *ResTy, unsigned IntID,
5997 SmallVector<Value *, 2> TblOps;
5999 TblOps.push_back(ExtOp);
6001 // Build a vector containing sequential number like (0, 1, 2, ..., 15)
6002 SmallVector<uint32_t, 16> Indices;
6003 llvm::VectorType *TblTy = cast<llvm::VectorType>(Ops[0]->getType());
6004 for (unsigned i = 0, e = TblTy->getNumElements(); i != e; ++i) {
6005 Indices.push_back(2*i);
6006 Indices.push_back(2*i+1);
6009 int PairPos = 0, End = Ops.size() - 1;
6010 while (PairPos < End) {
6011 TblOps.push_back(CGF.Builder.CreateShuffleVector(Ops[PairPos],
6012 Ops[PairPos+1], Indices,
6017 // If there's an odd number of 64-bit lookup table, fill the high 64-bit
6018 // of the 128-bit lookup table with zero.
6019 if (PairPos == End) {
6020 Value *ZeroTbl = ConstantAggregateZero::get(TblTy);
6021 TblOps.push_back(CGF.Builder.CreateShuffleVector(Ops[PairPos],
6022 ZeroTbl, Indices, Name));
6026 TblOps.push_back(IndexOp);
6027 TblF = CGF.CGM.getIntrinsic(IntID, ResTy);
6029 return CGF.EmitNeonCall(TblF, TblOps, Name);
6032 Value *CodeGenFunction::GetValueForARMHint(unsigned BuiltinID) {
6034 switch (BuiltinID) {
6037 case ARM::BI__builtin_arm_nop:
6040 case ARM::BI__builtin_arm_yield:
6041 case ARM::BI__yield:
6044 case ARM::BI__builtin_arm_wfe:
6048 case ARM::BI__builtin_arm_wfi:
6052 case ARM::BI__builtin_arm_sev:
6056 case ARM::BI__builtin_arm_sevl:
6062 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_hint),
6063 llvm::ConstantInt::get(Int32Ty, Value));
6066 // Generates the IR for the read/write special register builtin,
6067 // ValueType is the type of the value that is to be written or read,
6068 // RegisterType is the type of the register being written to or read from.
6069 static Value *EmitSpecialRegisterBuiltin(CodeGenFunction &CGF,
6071 llvm::Type *RegisterType,
6072 llvm::Type *ValueType,
6074 StringRef SysReg = "") {
6075 // write and register intrinsics only support 32 and 64 bit operations.
6076 assert((RegisterType->isIntegerTy(32) || RegisterType->isIntegerTy(64))
6077 && "Unsupported size for register.");
6079 CodeGen::CGBuilderTy &Builder = CGF.Builder;
6080 CodeGen::CodeGenModule &CGM = CGF.CGM;
6081 LLVMContext &Context = CGM.getLLVMContext();
6083 if (SysReg.empty()) {
6084 const Expr *SysRegStrExpr = E->getArg(0)->IgnoreParenCasts();
6085 SysReg = cast<clang::StringLiteral>(SysRegStrExpr)->getString();
6088 llvm::Metadata *Ops[] = { llvm::MDString::get(Context, SysReg) };
6089 llvm::MDNode *RegName = llvm::MDNode::get(Context, Ops);
6090 llvm::Value *Metadata = llvm::MetadataAsValue::get(Context, RegName);
6092 llvm::Type *Types[] = { RegisterType };
6094 bool MixedTypes = RegisterType->isIntegerTy(64) && ValueType->isIntegerTy(32);
6095 assert(!(RegisterType->isIntegerTy(32) && ValueType->isIntegerTy(64))
6096 && "Can't fit 64-bit value in 32-bit register");
6099 llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::read_register, Types);
6100 llvm::Value *Call = Builder.CreateCall(F, Metadata);
6103 // Read into 64 bit register and then truncate result to 32 bit.
6104 return Builder.CreateTrunc(Call, ValueType);
6106 if (ValueType->isPointerTy())
6107 // Have i32/i64 result (Call) but want to return a VoidPtrTy (i8*).
6108 return Builder.CreateIntToPtr(Call, ValueType);
6113 llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::write_register, Types);
6114 llvm::Value *ArgValue = CGF.EmitScalarExpr(E->getArg(1));
6116 // Extend 32 bit write value to 64 bit to pass to write.
6117 ArgValue = Builder.CreateZExt(ArgValue, RegisterType);
6118 return Builder.CreateCall(F, { Metadata, ArgValue });
6121 if (ValueType->isPointerTy()) {
6122 // Have VoidPtrTy ArgValue but want to return an i32/i64.
6123 ArgValue = Builder.CreatePtrToInt(ArgValue, RegisterType);
6124 return Builder.CreateCall(F, { Metadata, ArgValue });
6127 return Builder.CreateCall(F, { Metadata, ArgValue });
6130 /// Return true if BuiltinID is an overloaded Neon intrinsic with an extra
6131 /// argument that specifies the vector type.
6132 static bool HasExtraNeonArgument(unsigned BuiltinID) {
6133 switch (BuiltinID) {
6135 case NEON::BI__builtin_neon_vget_lane_i8:
6136 case NEON::BI__builtin_neon_vget_lane_i16:
6137 case NEON::BI__builtin_neon_vget_lane_i32:
6138 case NEON::BI__builtin_neon_vget_lane_i64:
6139 case NEON::BI__builtin_neon_vget_lane_f32:
6140 case NEON::BI__builtin_neon_vgetq_lane_i8:
6141 case NEON::BI__builtin_neon_vgetq_lane_i16:
6142 case NEON::BI__builtin_neon_vgetq_lane_i32:
6143 case NEON::BI__builtin_neon_vgetq_lane_i64:
6144 case NEON::BI__builtin_neon_vgetq_lane_f32:
6145 case NEON::BI__builtin_neon_vset_lane_i8:
6146 case NEON::BI__builtin_neon_vset_lane_i16:
6147 case NEON::BI__builtin_neon_vset_lane_i32:
6148 case NEON::BI__builtin_neon_vset_lane_i64:
6149 case NEON::BI__builtin_neon_vset_lane_f32:
6150 case NEON::BI__builtin_neon_vsetq_lane_i8:
6151 case NEON::BI__builtin_neon_vsetq_lane_i16:
6152 case NEON::BI__builtin_neon_vsetq_lane_i32:
6153 case NEON::BI__builtin_neon_vsetq_lane_i64:
6154 case NEON::BI__builtin_neon_vsetq_lane_f32:
6155 case NEON::BI__builtin_neon_vsha1h_u32:
6156 case NEON::BI__builtin_neon_vsha1cq_u32:
6157 case NEON::BI__builtin_neon_vsha1pq_u32:
6158 case NEON::BI__builtin_neon_vsha1mq_u32:
6159 case clang::ARM::BI_MoveToCoprocessor:
6160 case clang::ARM::BI_MoveToCoprocessor2:
6166 Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
6168 ReturnValueSlot ReturnValue,
6169 llvm::Triple::ArchType Arch) {
6170 if (auto Hint = GetValueForARMHint(BuiltinID))
6173 if (BuiltinID == ARM::BI__emit) {
6174 bool IsThumb = getTarget().getTriple().getArch() == llvm::Triple::thumb;
6175 llvm::FunctionType *FTy =
6176 llvm::FunctionType::get(VoidTy, /*Variadic=*/false);
6178 Expr::EvalResult Result;
6179 if (!E->getArg(0)->EvaluateAsInt(Result, CGM.getContext()))
6180 llvm_unreachable("Sema will ensure that the parameter is constant");
6182 llvm::APSInt Value = Result.Val.getInt();
6183 uint64_t ZExtValue = Value.zextOrTrunc(IsThumb ? 16 : 32).getZExtValue();
6185 llvm::InlineAsm *Emit =
6186 IsThumb ? InlineAsm::get(FTy, ".inst.n 0x" + utohexstr(ZExtValue), "",
6187 /*hasSideEffects=*/true)
6188 : InlineAsm::get(FTy, ".inst 0x" + utohexstr(ZExtValue), "",
6189 /*hasSideEffects=*/true);
6191 return Builder.CreateCall(Emit);
6194 if (BuiltinID == ARM::BI__builtin_arm_dbg) {
6195 Value *Option = EmitScalarExpr(E->getArg(0));
6196 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_dbg), Option);
6199 if (BuiltinID == ARM::BI__builtin_arm_prefetch) {
6200 Value *Address = EmitScalarExpr(E->getArg(0));
6201 Value *RW = EmitScalarExpr(E->getArg(1));
6202 Value *IsData = EmitScalarExpr(E->getArg(2));
6204 // Locality is not supported on ARM target
6205 Value *Locality = llvm::ConstantInt::get(Int32Ty, 3);
6207 Function *F = CGM.getIntrinsic(Intrinsic::prefetch, Address->getType());
6208 return Builder.CreateCall(F, {Address, RW, Locality, IsData});
6211 if (BuiltinID == ARM::BI__builtin_arm_rbit) {
6212 llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
6213 return Builder.CreateCall(
6214 CGM.getIntrinsic(Intrinsic::bitreverse, Arg->getType()), Arg, "rbit");
6217 if (BuiltinID == ARM::BI__builtin_arm_cls) {
6218 llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
6219 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_cls), Arg, "cls");
6221 if (BuiltinID == ARM::BI__builtin_arm_cls64) {
6222 llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
6223 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_cls64), Arg,
6227 if (BuiltinID == ARM::BI__clear_cache) {
6228 assert(E->getNumArgs() == 2 && "__clear_cache takes 2 arguments");
6229 const FunctionDecl *FD = E->getDirectCallee();
6231 for (unsigned i = 0; i < 2; i++)
6232 Ops[i] = EmitScalarExpr(E->getArg(i));
6233 llvm::Type *Ty = CGM.getTypes().ConvertType(FD->getType());
6234 llvm::FunctionType *FTy = cast<llvm::FunctionType>(Ty);
6235 StringRef Name = FD->getName();
6236 return EmitNounwindRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), Ops);
6239 if (BuiltinID == ARM::BI__builtin_arm_mcrr ||
6240 BuiltinID == ARM::BI__builtin_arm_mcrr2) {
6243 switch (BuiltinID) {
6244 default: llvm_unreachable("unexpected builtin");
6245 case ARM::BI__builtin_arm_mcrr:
6246 F = CGM.getIntrinsic(Intrinsic::arm_mcrr);
6248 case ARM::BI__builtin_arm_mcrr2:
6249 F = CGM.getIntrinsic(Intrinsic::arm_mcrr2);
6253 // MCRR{2} instruction has 5 operands but
6254 // the intrinsic has 4 because Rt and Rt2
6255 // are represented as a single unsigned 64
6256 // bit integer in the intrinsic definition
6257 // but internally it's represented as 2 32
6260 Value *Coproc = EmitScalarExpr(E->getArg(0));
6261 Value *Opc1 = EmitScalarExpr(E->getArg(1));
6262 Value *RtAndRt2 = EmitScalarExpr(E->getArg(2));
6263 Value *CRm = EmitScalarExpr(E->getArg(3));
6265 Value *C1 = llvm::ConstantInt::get(Int64Ty, 32);
6266 Value *Rt = Builder.CreateTruncOrBitCast(RtAndRt2, Int32Ty);
6267 Value *Rt2 = Builder.CreateLShr(RtAndRt2, C1);
6268 Rt2 = Builder.CreateTruncOrBitCast(Rt2, Int32Ty);
6270 return Builder.CreateCall(F, {Coproc, Opc1, Rt, Rt2, CRm});
6273 if (BuiltinID == ARM::BI__builtin_arm_mrrc ||
6274 BuiltinID == ARM::BI__builtin_arm_mrrc2) {
6277 switch (BuiltinID) {
6278 default: llvm_unreachable("unexpected builtin");
6279 case ARM::BI__builtin_arm_mrrc:
6280 F = CGM.getIntrinsic(Intrinsic::arm_mrrc);
6282 case ARM::BI__builtin_arm_mrrc2:
6283 F = CGM.getIntrinsic(Intrinsic::arm_mrrc2);
6287 Value *Coproc = EmitScalarExpr(E->getArg(0));
6288 Value *Opc1 = EmitScalarExpr(E->getArg(1));
6289 Value *CRm = EmitScalarExpr(E->getArg(2));
6290 Value *RtAndRt2 = Builder.CreateCall(F, {Coproc, Opc1, CRm});
6292 // Returns an unsigned 64 bit integer, represented
6293 // as two 32 bit integers.
6295 Value *Rt = Builder.CreateExtractValue(RtAndRt2, 1);
6296 Value *Rt1 = Builder.CreateExtractValue(RtAndRt2, 0);
6297 Rt = Builder.CreateZExt(Rt, Int64Ty);
6298 Rt1 = Builder.CreateZExt(Rt1, Int64Ty);
6300 Value *ShiftCast = llvm::ConstantInt::get(Int64Ty, 32);
6301 RtAndRt2 = Builder.CreateShl(Rt, ShiftCast, "shl", true);
6302 RtAndRt2 = Builder.CreateOr(RtAndRt2, Rt1);
6304 return Builder.CreateBitCast(RtAndRt2, ConvertType(E->getType()));
6307 if (BuiltinID == ARM::BI__builtin_arm_ldrexd ||
6308 ((BuiltinID == ARM::BI__builtin_arm_ldrex ||
6309 BuiltinID == ARM::BI__builtin_arm_ldaex) &&
6310 getContext().getTypeSize(E->getType()) == 64) ||
6311 BuiltinID == ARM::BI__ldrexd) {
6314 switch (BuiltinID) {
6315 default: llvm_unreachable("unexpected builtin");
6316 case ARM::BI__builtin_arm_ldaex:
6317 F = CGM.getIntrinsic(Intrinsic::arm_ldaexd);
6319 case ARM::BI__builtin_arm_ldrexd:
6320 case ARM::BI__builtin_arm_ldrex:
6321 case ARM::BI__ldrexd:
6322 F = CGM.getIntrinsic(Intrinsic::arm_ldrexd);
6326 Value *LdPtr = EmitScalarExpr(E->getArg(0));
6327 Value *Val = Builder.CreateCall(F, Builder.CreateBitCast(LdPtr, Int8PtrTy),
6330 Value *Val0 = Builder.CreateExtractValue(Val, 1);
6331 Value *Val1 = Builder.CreateExtractValue(Val, 0);
6332 Val0 = Builder.CreateZExt(Val0, Int64Ty);
6333 Val1 = Builder.CreateZExt(Val1, Int64Ty);
6335 Value *ShiftCst = llvm::ConstantInt::get(Int64Ty, 32);
6336 Val = Builder.CreateShl(Val0, ShiftCst, "shl", true /* nuw */);
6337 Val = Builder.CreateOr(Val, Val1);
6338 return Builder.CreateBitCast(Val, ConvertType(E->getType()));
6341 if (BuiltinID == ARM::BI__builtin_arm_ldrex ||
6342 BuiltinID == ARM::BI__builtin_arm_ldaex) {
6343 Value *LoadAddr = EmitScalarExpr(E->getArg(0));
6345 QualType Ty = E->getType();
6346 llvm::Type *RealResTy = ConvertType(Ty);
6347 llvm::Type *PtrTy = llvm::IntegerType::get(
6348 getLLVMContext(), getContext().getTypeSize(Ty))->getPointerTo();
6349 LoadAddr = Builder.CreateBitCast(LoadAddr, PtrTy);
6351 Function *F = CGM.getIntrinsic(BuiltinID == ARM::BI__builtin_arm_ldaex
6352 ? Intrinsic::arm_ldaex
6353 : Intrinsic::arm_ldrex,
6355 Value *Val = Builder.CreateCall(F, LoadAddr, "ldrex");
6357 if (RealResTy->isPointerTy())
6358 return Builder.CreateIntToPtr(Val, RealResTy);
6360 llvm::Type *IntResTy = llvm::IntegerType::get(
6361 getLLVMContext(), CGM.getDataLayout().getTypeSizeInBits(RealResTy));
6362 Val = Builder.CreateTruncOrBitCast(Val, IntResTy);
6363 return Builder.CreateBitCast(Val, RealResTy);
6367 if (BuiltinID == ARM::BI__builtin_arm_strexd ||
6368 ((BuiltinID == ARM::BI__builtin_arm_stlex ||
6369 BuiltinID == ARM::BI__builtin_arm_strex) &&
6370 getContext().getTypeSize(E->getArg(0)->getType()) == 64)) {
6371 Function *F = CGM.getIntrinsic(BuiltinID == ARM::BI__builtin_arm_stlex
6372 ? Intrinsic::arm_stlexd
6373 : Intrinsic::arm_strexd);
6374 llvm::Type *STy = llvm::StructType::get(Int32Ty, Int32Ty);
6376 Address Tmp = CreateMemTemp(E->getArg(0)->getType());
6377 Value *Val = EmitScalarExpr(E->getArg(0));
6378 Builder.CreateStore(Val, Tmp);
6380 Address LdPtr = Builder.CreateBitCast(Tmp,llvm::PointerType::getUnqual(STy));
6381 Val = Builder.CreateLoad(LdPtr);
6383 Value *Arg0 = Builder.CreateExtractValue(Val, 0);
6384 Value *Arg1 = Builder.CreateExtractValue(Val, 1);
6385 Value *StPtr = Builder.CreateBitCast(EmitScalarExpr(E->getArg(1)), Int8PtrTy);
6386 return Builder.CreateCall(F, {Arg0, Arg1, StPtr}, "strexd");
6389 if (BuiltinID == ARM::BI__builtin_arm_strex ||
6390 BuiltinID == ARM::BI__builtin_arm_stlex) {
6391 Value *StoreVal = EmitScalarExpr(E->getArg(0));
6392 Value *StoreAddr = EmitScalarExpr(E->getArg(1));
6394 QualType Ty = E->getArg(0)->getType();
6395 llvm::Type *StoreTy = llvm::IntegerType::get(getLLVMContext(),
6396 getContext().getTypeSize(Ty));
6397 StoreAddr = Builder.CreateBitCast(StoreAddr, StoreTy->getPointerTo());
6399 if (StoreVal->getType()->isPointerTy())
6400 StoreVal = Builder.CreatePtrToInt(StoreVal, Int32Ty);
6402 llvm::Type *IntTy = llvm::IntegerType::get(
6404 CGM.getDataLayout().getTypeSizeInBits(StoreVal->getType()));
6405 StoreVal = Builder.CreateBitCast(StoreVal, IntTy);
6406 StoreVal = Builder.CreateZExtOrBitCast(StoreVal, Int32Ty);
6409 Function *F = CGM.getIntrinsic(BuiltinID == ARM::BI__builtin_arm_stlex
6410 ? Intrinsic::arm_stlex
6411 : Intrinsic::arm_strex,
6412 StoreAddr->getType());
6413 return Builder.CreateCall(F, {StoreVal, StoreAddr}, "strex");
6416 if (BuiltinID == ARM::BI__builtin_arm_clrex) {
6417 Function *F = CGM.getIntrinsic(Intrinsic::arm_clrex);
6418 return Builder.CreateCall(F);
6422 Intrinsic::ID CRCIntrinsicID = Intrinsic::not_intrinsic;
6423 switch (BuiltinID) {
6424 case ARM::BI__builtin_arm_crc32b:
6425 CRCIntrinsicID = Intrinsic::arm_crc32b; break;
6426 case ARM::BI__builtin_arm_crc32cb:
6427 CRCIntrinsicID = Intrinsic::arm_crc32cb; break;
6428 case ARM::BI__builtin_arm_crc32h:
6429 CRCIntrinsicID = Intrinsic::arm_crc32h; break;
6430 case ARM::BI__builtin_arm_crc32ch:
6431 CRCIntrinsicID = Intrinsic::arm_crc32ch; break;
6432 case ARM::BI__builtin_arm_crc32w:
6433 case ARM::BI__builtin_arm_crc32d:
6434 CRCIntrinsicID = Intrinsic::arm_crc32w; break;
6435 case ARM::BI__builtin_arm_crc32cw:
6436 case ARM::BI__builtin_arm_crc32cd:
6437 CRCIntrinsicID = Intrinsic::arm_crc32cw; break;
6440 if (CRCIntrinsicID != Intrinsic::not_intrinsic) {
6441 Value *Arg0 = EmitScalarExpr(E->getArg(0));
6442 Value *Arg1 = EmitScalarExpr(E->getArg(1));
6444 // crc32{c,}d intrinsics are implemnted as two calls to crc32{c,}w
6445 // intrinsics, hence we need different codegen for these cases.
6446 if (BuiltinID == ARM::BI__builtin_arm_crc32d ||
6447 BuiltinID == ARM::BI__builtin_arm_crc32cd) {
6448 Value *C1 = llvm::ConstantInt::get(Int64Ty, 32);
6449 Value *Arg1a = Builder.CreateTruncOrBitCast(Arg1, Int32Ty);
6450 Value *Arg1b = Builder.CreateLShr(Arg1, C1);
6451 Arg1b = Builder.CreateTruncOrBitCast(Arg1b, Int32Ty);
6453 Function *F = CGM.getIntrinsic(CRCIntrinsicID);
6454 Value *Res = Builder.CreateCall(F, {Arg0, Arg1a});
6455 return Builder.CreateCall(F, {Res, Arg1b});
6457 Arg1 = Builder.CreateZExtOrBitCast(Arg1, Int32Ty);
6459 Function *F = CGM.getIntrinsic(CRCIntrinsicID);
6460 return Builder.CreateCall(F, {Arg0, Arg1});
6464 if (BuiltinID == ARM::BI__builtin_arm_rsr ||
6465 BuiltinID == ARM::BI__builtin_arm_rsr64 ||
6466 BuiltinID == ARM::BI__builtin_arm_rsrp ||
6467 BuiltinID == ARM::BI__builtin_arm_wsr ||
6468 BuiltinID == ARM::BI__builtin_arm_wsr64 ||
6469 BuiltinID == ARM::BI__builtin_arm_wsrp) {
6471 bool IsRead = BuiltinID == ARM::BI__builtin_arm_rsr ||
6472 BuiltinID == ARM::BI__builtin_arm_rsr64 ||
6473 BuiltinID == ARM::BI__builtin_arm_rsrp;
6475 bool IsPointerBuiltin = BuiltinID == ARM::BI__builtin_arm_rsrp ||
6476 BuiltinID == ARM::BI__builtin_arm_wsrp;
6478 bool Is64Bit = BuiltinID == ARM::BI__builtin_arm_rsr64 ||
6479 BuiltinID == ARM::BI__builtin_arm_wsr64;
6481 llvm::Type *ValueType;
6482 llvm::Type *RegisterType;
6483 if (IsPointerBuiltin) {
6484 ValueType = VoidPtrTy;
6485 RegisterType = Int32Ty;
6486 } else if (Is64Bit) {
6487 ValueType = RegisterType = Int64Ty;
6489 ValueType = RegisterType = Int32Ty;
6492 return EmitSpecialRegisterBuiltin(*this, E, RegisterType, ValueType, IsRead);
6495 // Deal with MVE builtins
6496 if (Value *Result = EmitARMMVEBuiltinExpr(BuiltinID, E, ReturnValue, Arch))
6499 // Find out if any arguments are required to be integer constant
6501 unsigned ICEArguments = 0;
6502 ASTContext::GetBuiltinTypeError Error;
6503 getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments);
6504 assert(Error == ASTContext::GE_None && "Should not codegen an error");
6506 auto getAlignmentValue32 = [&](Address addr) -> Value* {
6507 return Builder.getInt32(addr.getAlignment().getQuantity());
6510 Address PtrOp0 = Address::invalid();
6511 Address PtrOp1 = Address::invalid();
6512 SmallVector<Value*, 4> Ops;
6513 bool HasExtraArg = HasExtraNeonArgument(BuiltinID);
6514 unsigned NumArgs = E->getNumArgs() - (HasExtraArg ? 1 : 0);
6515 for (unsigned i = 0, e = NumArgs; i != e; i++) {
6517 switch (BuiltinID) {
6518 case NEON::BI__builtin_neon_vld1_v:
6519 case NEON::BI__builtin_neon_vld1q_v:
6520 case NEON::BI__builtin_neon_vld1q_lane_v:
6521 case NEON::BI__builtin_neon_vld1_lane_v:
6522 case NEON::BI__builtin_neon_vld1_dup_v:
6523 case NEON::BI__builtin_neon_vld1q_dup_v:
6524 case NEON::BI__builtin_neon_vst1_v:
6525 case NEON::BI__builtin_neon_vst1q_v:
6526 case NEON::BI__builtin_neon_vst1q_lane_v:
6527 case NEON::BI__builtin_neon_vst1_lane_v:
6528 case NEON::BI__builtin_neon_vst2_v:
6529 case NEON::BI__builtin_neon_vst2q_v:
6530 case NEON::BI__builtin_neon_vst2_lane_v:
6531 case NEON::BI__builtin_neon_vst2q_lane_v:
6532 case NEON::BI__builtin_neon_vst3_v:
6533 case NEON::BI__builtin_neon_vst3q_v:
6534 case NEON::BI__builtin_neon_vst3_lane_v:
6535 case NEON::BI__builtin_neon_vst3q_lane_v:
6536 case NEON::BI__builtin_neon_vst4_v:
6537 case NEON::BI__builtin_neon_vst4q_v:
6538 case NEON::BI__builtin_neon_vst4_lane_v:
6539 case NEON::BI__builtin_neon_vst4q_lane_v:
6540 // Get the alignment for the argument in addition to the value;
6541 // we'll use it later.
6542 PtrOp0 = EmitPointerWithAlignment(E->getArg(0));
6543 Ops.push_back(PtrOp0.getPointer());
6548 switch (BuiltinID) {
6549 case NEON::BI__builtin_neon_vld2_v:
6550 case NEON::BI__builtin_neon_vld2q_v:
6551 case NEON::BI__builtin_neon_vld3_v:
6552 case NEON::BI__builtin_neon_vld3q_v:
6553 case NEON::BI__builtin_neon_vld4_v:
6554 case NEON::BI__builtin_neon_vld4q_v:
6555 case NEON::BI__builtin_neon_vld2_lane_v:
6556 case NEON::BI__builtin_neon_vld2q_lane_v:
6557 case NEON::BI__builtin_neon_vld3_lane_v:
6558 case NEON::BI__builtin_neon_vld3q_lane_v:
6559 case NEON::BI__builtin_neon_vld4_lane_v:
6560 case NEON::BI__builtin_neon_vld4q_lane_v:
6561 case NEON::BI__builtin_neon_vld2_dup_v:
6562 case NEON::BI__builtin_neon_vld2q_dup_v:
6563 case NEON::BI__builtin_neon_vld3_dup_v:
6564 case NEON::BI__builtin_neon_vld3q_dup_v:
6565 case NEON::BI__builtin_neon_vld4_dup_v:
6566 case NEON::BI__builtin_neon_vld4q_dup_v:
6567 // Get the alignment for the argument in addition to the value;
6568 // we'll use it later.
6569 PtrOp1 = EmitPointerWithAlignment(E->getArg(1));
6570 Ops.push_back(PtrOp1.getPointer());
6575 if ((ICEArguments & (1 << i)) == 0) {
6576 Ops.push_back(EmitScalarExpr(E->getArg(i)));
6578 // If this is required to be a constant, constant fold it so that we know
6579 // that the generated intrinsic gets a ConstantInt.
6580 llvm::APSInt Result;
6581 bool IsConst = E->getArg(i)->isIntegerConstantExpr(Result, getContext());
6582 assert(IsConst && "Constant arg isn't actually constant?"); (void)IsConst;
6583 Ops.push_back(llvm::ConstantInt::get(getLLVMContext(), Result));
6587 switch (BuiltinID) {
6590 case NEON::BI__builtin_neon_vget_lane_i8:
6591 case NEON::BI__builtin_neon_vget_lane_i16:
6592 case NEON::BI__builtin_neon_vget_lane_i32:
6593 case NEON::BI__builtin_neon_vget_lane_i64:
6594 case NEON::BI__builtin_neon_vget_lane_f32:
6595 case NEON::BI__builtin_neon_vgetq_lane_i8:
6596 case NEON::BI__builtin_neon_vgetq_lane_i16:
6597 case NEON::BI__builtin_neon_vgetq_lane_i32:
6598 case NEON::BI__builtin_neon_vgetq_lane_i64:
6599 case NEON::BI__builtin_neon_vgetq_lane_f32:
6600 return Builder.CreateExtractElement(Ops[0], Ops[1], "vget_lane");
6602 case NEON::BI__builtin_neon_vrndns_f32: {
6603 Value *Arg = EmitScalarExpr(E->getArg(0));
6604 llvm::Type *Tys[] = {Arg->getType()};
6605 Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vrintn, Tys);
6606 return Builder.CreateCall(F, {Arg}, "vrndn"); }
6608 case NEON::BI__builtin_neon_vset_lane_i8:
6609 case NEON::BI__builtin_neon_vset_lane_i16:
6610 case NEON::BI__builtin_neon_vset_lane_i32:
6611 case NEON::BI__builtin_neon_vset_lane_i64:
6612 case NEON::BI__builtin_neon_vset_lane_f32:
6613 case NEON::BI__builtin_neon_vsetq_lane_i8:
6614 case NEON::BI__builtin_neon_vsetq_lane_i16:
6615 case NEON::BI__builtin_neon_vsetq_lane_i32:
6616 case NEON::BI__builtin_neon_vsetq_lane_i64:
6617 case NEON::BI__builtin_neon_vsetq_lane_f32:
6618 return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vset_lane");
6620 case NEON::BI__builtin_neon_vsha1h_u32:
6621 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha1h), Ops,
6623 case NEON::BI__builtin_neon_vsha1cq_u32:
6624 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha1c), Ops,
6626 case NEON::BI__builtin_neon_vsha1pq_u32:
6627 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha1p), Ops,
6629 case NEON::BI__builtin_neon_vsha1mq_u32:
6630 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha1m), Ops,
6633 // The ARM _MoveToCoprocessor builtins put the input register value as
6634 // the first argument, but the LLVM intrinsic expects it as the third one.
6635 case ARM::BI_MoveToCoprocessor:
6636 case ARM::BI_MoveToCoprocessor2: {
6637 Function *F = CGM.getIntrinsic(BuiltinID == ARM::BI_MoveToCoprocessor ?
6638 Intrinsic::arm_mcr : Intrinsic::arm_mcr2);
6639 return Builder.CreateCall(F, {Ops[1], Ops[2], Ops[0],
6640 Ops[3], Ops[4], Ops[5]});
6642 case ARM::BI_BitScanForward:
6643 case ARM::BI_BitScanForward64:
6644 return EmitMSVCBuiltinExpr(MSVCIntrin::_BitScanForward, E);
6645 case ARM::BI_BitScanReverse:
6646 case ARM::BI_BitScanReverse64:
6647 return EmitMSVCBuiltinExpr(MSVCIntrin::_BitScanReverse, E);
6649 case ARM::BI_InterlockedAnd64:
6650 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedAnd, E);
6651 case ARM::BI_InterlockedExchange64:
6652 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange, E);
6653 case ARM::BI_InterlockedExchangeAdd64:
6654 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeAdd, E);
6655 case ARM::BI_InterlockedExchangeSub64:
6656 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeSub, E);
6657 case ARM::BI_InterlockedOr64:
6658 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr, E);
6659 case ARM::BI_InterlockedXor64:
6660 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor, E);
6661 case ARM::BI_InterlockedDecrement64:
6662 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedDecrement, E);
6663 case ARM::BI_InterlockedIncrement64:
6664 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement, E);
6665 case ARM::BI_InterlockedExchangeAdd8_acq:
6666 case ARM::BI_InterlockedExchangeAdd16_acq:
6667 case ARM::BI_InterlockedExchangeAdd_acq:
6668 case ARM::BI_InterlockedExchangeAdd64_acq:
6669 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeAdd_acq, E);
6670 case ARM::BI_InterlockedExchangeAdd8_rel:
6671 case ARM::BI_InterlockedExchangeAdd16_rel:
6672 case ARM::BI_InterlockedExchangeAdd_rel:
6673 case ARM::BI_InterlockedExchangeAdd64_rel:
6674 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeAdd_rel, E);
6675 case ARM::BI_InterlockedExchangeAdd8_nf:
6676 case ARM::BI_InterlockedExchangeAdd16_nf:
6677 case ARM::BI_InterlockedExchangeAdd_nf:
6678 case ARM::BI_InterlockedExchangeAdd64_nf:
6679 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeAdd_nf, E);
6680 case ARM::BI_InterlockedExchange8_acq:
6681 case ARM::BI_InterlockedExchange16_acq:
6682 case ARM::BI_InterlockedExchange_acq:
6683 case ARM::BI_InterlockedExchange64_acq:
6684 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange_acq, E);
6685 case ARM::BI_InterlockedExchange8_rel:
6686 case ARM::BI_InterlockedExchange16_rel:
6687 case ARM::BI_InterlockedExchange_rel:
6688 case ARM::BI_InterlockedExchange64_rel:
6689 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange_rel, E);
6690 case ARM::BI_InterlockedExchange8_nf:
6691 case ARM::BI_InterlockedExchange16_nf:
6692 case ARM::BI_InterlockedExchange_nf:
6693 case ARM::BI_InterlockedExchange64_nf:
6694 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange_nf, E);
6695 case ARM::BI_InterlockedCompareExchange8_acq:
6696 case ARM::BI_InterlockedCompareExchange16_acq:
6697 case ARM::BI_InterlockedCompareExchange_acq:
6698 case ARM::BI_InterlockedCompareExchange64_acq:
6699 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedCompareExchange_acq, E);
6700 case ARM::BI_InterlockedCompareExchange8_rel:
6701 case ARM::BI_InterlockedCompareExchange16_rel:
6702 case ARM::BI_InterlockedCompareExchange_rel:
6703 case ARM::BI_InterlockedCompareExchange64_rel:
6704 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedCompareExchange_rel, E);
6705 case ARM::BI_InterlockedCompareExchange8_nf:
6706 case ARM::BI_InterlockedCompareExchange16_nf:
6707 case ARM::BI_InterlockedCompareExchange_nf:
6708 case ARM::BI_InterlockedCompareExchange64_nf:
6709 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedCompareExchange_nf, E);
6710 case ARM::BI_InterlockedOr8_acq:
6711 case ARM::BI_InterlockedOr16_acq:
6712 case ARM::BI_InterlockedOr_acq:
6713 case ARM::BI_InterlockedOr64_acq:
6714 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr_acq, E);
6715 case ARM::BI_InterlockedOr8_rel:
6716 case ARM::BI_InterlockedOr16_rel:
6717 case ARM::BI_InterlockedOr_rel:
6718 case ARM::BI_InterlockedOr64_rel:
6719 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr_rel, E);
6720 case ARM::BI_InterlockedOr8_nf:
6721 case ARM::BI_InterlockedOr16_nf:
6722 case ARM::BI_InterlockedOr_nf:
6723 case ARM::BI_InterlockedOr64_nf:
6724 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr_nf, E);
6725 case ARM::BI_InterlockedXor8_acq:
6726 case ARM::BI_InterlockedXor16_acq:
6727 case ARM::BI_InterlockedXor_acq:
6728 case ARM::BI_InterlockedXor64_acq:
6729 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor_acq, E);
6730 case ARM::BI_InterlockedXor8_rel:
6731 case ARM::BI_InterlockedXor16_rel:
6732 case ARM::BI_InterlockedXor_rel:
6733 case ARM::BI_InterlockedXor64_rel:
6734 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor_rel, E);
6735 case ARM::BI_InterlockedXor8_nf:
6736 case ARM::BI_InterlockedXor16_nf:
6737 case ARM::BI_InterlockedXor_nf:
6738 case ARM::BI_InterlockedXor64_nf:
6739 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor_nf, E);
6740 case ARM::BI_InterlockedAnd8_acq:
6741 case ARM::BI_InterlockedAnd16_acq:
6742 case ARM::BI_InterlockedAnd_acq:
6743 case ARM::BI_InterlockedAnd64_acq:
6744 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedAnd_acq, E);
6745 case ARM::BI_InterlockedAnd8_rel:
6746 case ARM::BI_InterlockedAnd16_rel:
6747 case ARM::BI_InterlockedAnd_rel:
6748 case ARM::BI_InterlockedAnd64_rel:
6749 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedAnd_rel, E);
6750 case ARM::BI_InterlockedAnd8_nf:
6751 case ARM::BI_InterlockedAnd16_nf:
6752 case ARM::BI_InterlockedAnd_nf:
6753 case ARM::BI_InterlockedAnd64_nf:
6754 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedAnd_nf, E);
6755 case ARM::BI_InterlockedIncrement16_acq:
6756 case ARM::BI_InterlockedIncrement_acq:
6757 case ARM::BI_InterlockedIncrement64_acq:
6758 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement_acq, E);
6759 case ARM::BI_InterlockedIncrement16_rel:
6760 case ARM::BI_InterlockedIncrement_rel:
6761 case ARM::BI_InterlockedIncrement64_rel:
6762 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement_rel, E);
6763 case ARM::BI_InterlockedIncrement16_nf:
6764 case ARM::BI_InterlockedIncrement_nf:
6765 case ARM::BI_InterlockedIncrement64_nf:
6766 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement_nf, E);
6767 case ARM::BI_InterlockedDecrement16_acq:
6768 case ARM::BI_InterlockedDecrement_acq:
6769 case ARM::BI_InterlockedDecrement64_acq:
6770 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedDecrement_acq, E);
6771 case ARM::BI_InterlockedDecrement16_rel:
6772 case ARM::BI_InterlockedDecrement_rel:
6773 case ARM::BI_InterlockedDecrement64_rel:
6774 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedDecrement_rel, E);
6775 case ARM::BI_InterlockedDecrement16_nf:
6776 case ARM::BI_InterlockedDecrement_nf:
6777 case ARM::BI_InterlockedDecrement64_nf:
6778 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedDecrement_nf, E);
6781 // Get the last argument, which specifies the vector type.
6782 assert(HasExtraArg);
6783 llvm::APSInt Result;
6784 const Expr *Arg = E->getArg(E->getNumArgs()-1);
6785 if (!Arg->isIntegerConstantExpr(Result, getContext()))
6788 if (BuiltinID == ARM::BI__builtin_arm_vcvtr_f ||
6789 BuiltinID == ARM::BI__builtin_arm_vcvtr_d) {
6790 // Determine the overloaded type of this builtin.
6792 if (BuiltinID == ARM::BI__builtin_arm_vcvtr_f)
6797 // Determine whether this is an unsigned conversion or not.
6798 bool usgn = Result.getZExtValue() == 1;
6799 unsigned Int = usgn ? Intrinsic::arm_vcvtru : Intrinsic::arm_vcvtr;
6801 // Call the appropriate intrinsic.
6802 Function *F = CGM.getIntrinsic(Int, Ty);
6803 return Builder.CreateCall(F, Ops, "vcvtr");
6806 // Determine the type of this overloaded NEON intrinsic.
6807 NeonTypeFlags Type(Result.getZExtValue());
6808 bool usgn = Type.isUnsigned();
6809 bool rightShift = false;
6811 llvm::VectorType *VTy = GetNeonType(this, Type,
6812 getTarget().hasLegalHalfType());
6813 llvm::Type *Ty = VTy;
6817 // Many NEON builtins have identical semantics and uses in ARM and
6818 // AArch64. Emit these in a single function.
6819 auto IntrinsicMap = makeArrayRef(ARMSIMDIntrinsicMap);
6820 const NeonIntrinsicInfo *Builtin = findNeonIntrinsicInMap(
6821 IntrinsicMap, BuiltinID, NEONSIMDIntrinsicsProvenSorted);
6823 return EmitCommonNeonBuiltinExpr(
6824 Builtin->BuiltinID, Builtin->LLVMIntrinsic, Builtin->AltLLVMIntrinsic,
6825 Builtin->NameHint, Builtin->TypeModifier, E, Ops, PtrOp0, PtrOp1, Arch);
6828 switch (BuiltinID) {
6829 default: return nullptr;
6830 case NEON::BI__builtin_neon_vld1q_lane_v:
6831 // Handle 64-bit integer elements as a special case. Use shuffles of
6832 // one-element vectors to avoid poor code for i64 in the backend.
6833 if (VTy->getElementType()->isIntegerTy(64)) {
6834 // Extract the other lane.
6835 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
6836 uint32_t Lane = cast<ConstantInt>(Ops[2])->getZExtValue();
6837 Value *SV = llvm::ConstantVector::get(ConstantInt::get(Int32Ty, 1-Lane));
6838 Ops[1] = Builder.CreateShuffleVector(Ops[1], Ops[1], SV);
6839 // Load the value as a one-element vector.
6840 Ty = llvm::VectorType::get(VTy->getElementType(), 1);
6841 llvm::Type *Tys[] = {Ty, Int8PtrTy};
6842 Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld1, Tys);
6843 Value *Align = getAlignmentValue32(PtrOp0);
6844 Value *Ld = Builder.CreateCall(F, {Ops[0], Align});
6846 uint32_t Indices[] = {1 - Lane, Lane};
6847 SV = llvm::ConstantDataVector::get(getLLVMContext(), Indices);
6848 return Builder.CreateShuffleVector(Ops[1], Ld, SV, "vld1q_lane");
6851 case NEON::BI__builtin_neon_vld1_lane_v: {
6852 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
6853 PtrOp0 = Builder.CreateElementBitCast(PtrOp0, VTy->getElementType());
6854 Value *Ld = Builder.CreateLoad(PtrOp0);
6855 return Builder.CreateInsertElement(Ops[1], Ld, Ops[2], "vld1_lane");
6857 case NEON::BI__builtin_neon_vqrshrn_n_v:
6859 usgn ? Intrinsic::arm_neon_vqrshiftnu : Intrinsic::arm_neon_vqrshiftns;
6860 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshrn_n",
6862 case NEON::BI__builtin_neon_vqrshrun_n_v:
6863 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqrshiftnsu, Ty),
6864 Ops, "vqrshrun_n", 1, true);
6865 case NEON::BI__builtin_neon_vqshrn_n_v:
6866 Int = usgn ? Intrinsic::arm_neon_vqshiftnu : Intrinsic::arm_neon_vqshiftns;
6867 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshrn_n",
6869 case NEON::BI__builtin_neon_vqshrun_n_v:
6870 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqshiftnsu, Ty),
6871 Ops, "vqshrun_n", 1, true);
6872 case NEON::BI__builtin_neon_vrecpe_v:
6873 case NEON::BI__builtin_neon_vrecpeq_v:
6874 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrecpe, Ty),
6876 case NEON::BI__builtin_neon_vrshrn_n_v:
6877 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrshiftn, Ty),
6878 Ops, "vrshrn_n", 1, true);
6879 case NEON::BI__builtin_neon_vrsra_n_v:
6880 case NEON::BI__builtin_neon_vrsraq_n_v:
6881 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
6882 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
6883 Ops[2] = EmitNeonShiftVector(Ops[2], Ty, true);
6884 Int = usgn ? Intrinsic::arm_neon_vrshiftu : Intrinsic::arm_neon_vrshifts;
6885 Ops[1] = Builder.CreateCall(CGM.getIntrinsic(Int, Ty), {Ops[1], Ops[2]});
6886 return Builder.CreateAdd(Ops[0], Ops[1], "vrsra_n");
6887 case NEON::BI__builtin_neon_vsri_n_v:
6888 case NEON::BI__builtin_neon_vsriq_n_v:
6891 case NEON::BI__builtin_neon_vsli_n_v:
6892 case NEON::BI__builtin_neon_vsliq_n_v:
6893 Ops[2] = EmitNeonShiftVector(Ops[2], Ty, rightShift);
6894 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vshiftins, Ty),
6896 case NEON::BI__builtin_neon_vsra_n_v:
6897 case NEON::BI__builtin_neon_vsraq_n_v:
6898 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
6899 Ops[1] = EmitNeonRShiftImm(Ops[1], Ops[2], Ty, usgn, "vsra_n");
6900 return Builder.CreateAdd(Ops[0], Ops[1]);
6901 case NEON::BI__builtin_neon_vst1q_lane_v:
6902 // Handle 64-bit integer elements as a special case. Use a shuffle to get
6903 // a one-element vector and avoid poor code for i64 in the backend.
6904 if (VTy->getElementType()->isIntegerTy(64)) {
6905 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
6906 Value *SV = llvm::ConstantVector::get(cast<llvm::Constant>(Ops[2]));
6907 Ops[1] = Builder.CreateShuffleVector(Ops[1], Ops[1], SV);
6908 Ops[2] = getAlignmentValue32(PtrOp0);
6909 llvm::Type *Tys[] = {Int8PtrTy, Ops[1]->getType()};
6910 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst1,
6914 case NEON::BI__builtin_neon_vst1_lane_v: {
6915 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
6916 Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2]);
6917 Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
6918 auto St = Builder.CreateStore(Ops[1], Builder.CreateBitCast(PtrOp0, Ty));
6921 case NEON::BI__builtin_neon_vtbl1_v:
6922 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl1),
6924 case NEON::BI__builtin_neon_vtbl2_v:
6925 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl2),
6927 case NEON::BI__builtin_neon_vtbl3_v:
6928 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl3),
6930 case NEON::BI__builtin_neon_vtbl4_v:
6931 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl4),
6933 case NEON::BI__builtin_neon_vtbx1_v:
6934 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx1),
6936 case NEON::BI__builtin_neon_vtbx2_v:
6937 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx2),
6939 case NEON::BI__builtin_neon_vtbx3_v:
6940 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx3),
6942 case NEON::BI__builtin_neon_vtbx4_v:
6943 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx4),
6948 template<typename Integer>
6949 static Integer GetIntegerConstantValue(const Expr *E, ASTContext &Context) {
6950 llvm::APSInt IntVal;
6951 bool IsConst = E->isIntegerConstantExpr(IntVal, Context);
6952 assert(IsConst && "Sema should have checked this was a constant");
6954 return IntVal.getExtValue();
6957 static llvm::Value *SignOrZeroExtend(CGBuilderTy &Builder, llvm::Value *V,
6958 llvm::Type *T, bool Unsigned) {
6959 // Helper function called by Tablegen-constructed ARM MVE builtin codegen,
6960 // which finds it convenient to specify signed/unsigned as a boolean flag.
6961 return Unsigned ? Builder.CreateZExt(V, T) : Builder.CreateSExt(V, T);
6964 static llvm::Value *MVEImmediateShr(CGBuilderTy &Builder, llvm::Value *V,
6965 uint32_t Shift, bool Unsigned) {
6966 // MVE helper function for integer shift right. This must handle signed vs
6967 // unsigned, and also deal specially with the case where the shift count is
6968 // equal to the lane size. In LLVM IR, an LShr with that parameter would be
6969 // undefined behavior, but in MVE it's legal, so we must convert it to code
6970 // that is not undefined in IR.
6972 V->getType()->getVectorElementType()->getPrimitiveSizeInBits();
6973 if (Shift == LaneBits) {
6974 // An unsigned shift of the full lane size always generates zero, so we can
6975 // simply emit a zero vector. A signed shift of the full lane size does the
6976 // same thing as shifting by one bit fewer.
6978 return llvm::Constant::getNullValue(V->getType());
6982 return Unsigned ? Builder.CreateLShr(V, Shift) : Builder.CreateAShr(V, Shift);
6985 static llvm::Value *ARMMVEVectorSplat(CGBuilderTy &Builder, llvm::Value *V) {
6986 // MVE-specific helper function for a vector splat, which infers the element
6987 // count of the output vector by knowing that MVE vectors are all 128 bits
6989 unsigned Elements = 128 / V->getType()->getPrimitiveSizeInBits();
6990 return Builder.CreateVectorSplat(Elements, V);
6993 Value *CodeGenFunction::EmitARMMVEBuiltinExpr(unsigned BuiltinID,
6995 ReturnValueSlot ReturnValue,
6996 llvm::Triple::ArchType Arch) {
6997 enum class CustomCodeGen { VLD24, VST24 } CustomCodeGenType;
6998 Intrinsic::ID IRIntr;
6999 unsigned NumVectors;
7001 // Code autogenerated by Tablegen will handle all the simple builtins.
7002 switch (BuiltinID) {
7003 #include "clang/Basic/arm_mve_builtin_cg.inc"
7005 // If we didn't match an MVE builtin id at all, go back to the
7006 // main EmitARMBuiltinExpr.
7011 // Anything that breaks from that switch is an MVE builtin that
7012 // needs handwritten code to generate.
7014 switch (CustomCodeGenType) {
7016 case CustomCodeGen::VLD24: {
7017 llvm::SmallVector<Value *, 4> Ops;
7018 llvm::SmallVector<llvm::Type *, 4> Tys;
7020 auto MvecCType = E->getType();
7021 auto MvecLType = ConvertType(MvecCType);
7022 assert(MvecLType->isStructTy() &&
7023 "Return type for vld[24]q should be a struct");
7024 assert(MvecLType->getStructNumElements() == 1 &&
7025 "Return-type struct for vld[24]q should have one element");
7026 auto MvecLTypeInner = MvecLType->getStructElementType(0);
7027 assert(MvecLTypeInner->isArrayTy() &&
7028 "Return-type struct for vld[24]q should contain an array");
7029 assert(MvecLTypeInner->getArrayNumElements() == NumVectors &&
7030 "Array member of return-type struct vld[24]q has wrong length");
7031 auto VecLType = MvecLTypeInner->getArrayElementType();
7033 Tys.push_back(VecLType);
7035 auto Addr = E->getArg(0);
7036 Ops.push_back(EmitScalarExpr(Addr));
7037 Tys.push_back(ConvertType(Addr->getType()));
7039 Function *F = CGM.getIntrinsic(IRIntr, makeArrayRef(Tys));
7040 Value *LoadResult = Builder.CreateCall(F, Ops);
7041 Value *MvecOut = UndefValue::get(MvecLType);
7042 for (unsigned i = 0; i < NumVectors; ++i) {
7043 Value *Vec = Builder.CreateExtractValue(LoadResult, i);
7044 MvecOut = Builder.CreateInsertValue(MvecOut, Vec, {0, i});
7047 if (ReturnValue.isNull())
7050 return Builder.CreateStore(MvecOut, ReturnValue.getValue());
7053 case CustomCodeGen::VST24: {
7054 llvm::SmallVector<Value *, 4> Ops;
7055 llvm::SmallVector<llvm::Type *, 4> Tys;
7057 auto Addr = E->getArg(0);
7058 Ops.push_back(EmitScalarExpr(Addr));
7059 Tys.push_back(ConvertType(Addr->getType()));
7061 auto MvecCType = E->getArg(1)->getType();
7062 auto MvecLType = ConvertType(MvecCType);
7063 assert(MvecLType->isStructTy() && "Data type for vst2q should be a struct");
7064 assert(MvecLType->getStructNumElements() == 1 &&
7065 "Data-type struct for vst2q should have one element");
7066 auto MvecLTypeInner = MvecLType->getStructElementType(0);
7067 assert(MvecLTypeInner->isArrayTy() &&
7068 "Data-type struct for vst2q should contain an array");
7069 assert(MvecLTypeInner->getArrayNumElements() == NumVectors &&
7070 "Array member of return-type struct vld[24]q has wrong length");
7071 auto VecLType = MvecLTypeInner->getArrayElementType();
7073 Tys.push_back(VecLType);
7075 AggValueSlot MvecSlot = CreateAggTemp(MvecCType);
7076 EmitAggExpr(E->getArg(1), MvecSlot);
7077 auto Mvec = Builder.CreateLoad(MvecSlot.getAddress());
7078 for (unsigned i = 0; i < NumVectors; i++)
7079 Ops.push_back(Builder.CreateExtractValue(Mvec, {0, i}));
7081 Function *F = CGM.getIntrinsic(IRIntr, makeArrayRef(Tys));
7082 Value *ToReturn = nullptr;
7083 for (unsigned i = 0; i < NumVectors; i++) {
7084 Ops.push_back(llvm::ConstantInt::get(Int32Ty, i));
7085 ToReturn = Builder.CreateCall(F, Ops);
7091 llvm_unreachable("unknown custom codegen type.");
7094 static Value *EmitAArch64TblBuiltinExpr(CodeGenFunction &CGF, unsigned BuiltinID,
7096 SmallVectorImpl<Value *> &Ops,
7097 llvm::Triple::ArchType Arch) {
7098 unsigned int Int = 0;
7099 const char *s = nullptr;
7101 switch (BuiltinID) {
7104 case NEON::BI__builtin_neon_vtbl1_v:
7105 case NEON::BI__builtin_neon_vqtbl1_v:
7106 case NEON::BI__builtin_neon_vqtbl1q_v:
7107 case NEON::BI__builtin_neon_vtbl2_v:
7108 case NEON::BI__builtin_neon_vqtbl2_v:
7109 case NEON::BI__builtin_neon_vqtbl2q_v:
7110 case NEON::BI__builtin_neon_vtbl3_v:
7111 case NEON::BI__builtin_neon_vqtbl3_v:
7112 case NEON::BI__builtin_neon_vqtbl3q_v:
7113 case NEON::BI__builtin_neon_vtbl4_v:
7114 case NEON::BI__builtin_neon_vqtbl4_v:
7115 case NEON::BI__builtin_neon_vqtbl4q_v:
7117 case NEON::BI__builtin_neon_vtbx1_v:
7118 case NEON::BI__builtin_neon_vqtbx1_v:
7119 case NEON::BI__builtin_neon_vqtbx1q_v:
7120 case NEON::BI__builtin_neon_vtbx2_v:
7121 case NEON::BI__builtin_neon_vqtbx2_v:
7122 case NEON::BI__builtin_neon_vqtbx2q_v:
7123 case NEON::BI__builtin_neon_vtbx3_v:
7124 case NEON::BI__builtin_neon_vqtbx3_v:
7125 case NEON::BI__builtin_neon_vqtbx3q_v:
7126 case NEON::BI__builtin_neon_vtbx4_v:
7127 case NEON::BI__builtin_neon_vqtbx4_v:
7128 case NEON::BI__builtin_neon_vqtbx4q_v:
7132 assert(E->getNumArgs() >= 3);
7134 // Get the last argument, which specifies the vector type.
7135 llvm::APSInt Result;
7136 const Expr *Arg = E->getArg(E->getNumArgs() - 1);
7137 if (!Arg->isIntegerConstantExpr(Result, CGF.getContext()))
7140 // Determine the type of this overloaded NEON intrinsic.
7141 NeonTypeFlags Type(Result.getZExtValue());
7142 llvm::VectorType *Ty = GetNeonType(&CGF, Type);
7146 CodeGen::CGBuilderTy &Builder = CGF.Builder;
7148 // AArch64 scalar builtins are not overloaded, they do not have an extra
7149 // argument that specifies the vector type, need to handle each case.
7150 switch (BuiltinID) {
7151 case NEON::BI__builtin_neon_vtbl1_v: {
7152 return packTBLDVectorList(CGF, makeArrayRef(Ops).slice(0, 1), nullptr,
7153 Ops[1], Ty, Intrinsic::aarch64_neon_tbl1,
7156 case NEON::BI__builtin_neon_vtbl2_v: {
7157 return packTBLDVectorList(CGF, makeArrayRef(Ops).slice(0, 2), nullptr,
7158 Ops[2], Ty, Intrinsic::aarch64_neon_tbl1,
7161 case NEON::BI__builtin_neon_vtbl3_v: {
7162 return packTBLDVectorList(CGF, makeArrayRef(Ops).slice(0, 3), nullptr,
7163 Ops[3], Ty, Intrinsic::aarch64_neon_tbl2,
7166 case NEON::BI__builtin_neon_vtbl4_v: {
7167 return packTBLDVectorList(CGF, makeArrayRef(Ops).slice(0, 4), nullptr,
7168 Ops[4], Ty, Intrinsic::aarch64_neon_tbl2,
7171 case NEON::BI__builtin_neon_vtbx1_v: {
7173 packTBLDVectorList(CGF, makeArrayRef(Ops).slice(1, 1), nullptr, Ops[2],
7174 Ty, Intrinsic::aarch64_neon_tbl1, "vtbl1");
7176 llvm::Constant *EightV = ConstantInt::get(Ty, 8);
7177 Value *CmpRes = Builder.CreateICmp(ICmpInst::ICMP_UGE, Ops[2], EightV);
7178 CmpRes = Builder.CreateSExt(CmpRes, Ty);
7180 Value *EltsFromInput = Builder.CreateAnd(CmpRes, Ops[0]);
7181 Value *EltsFromTbl = Builder.CreateAnd(Builder.CreateNot(CmpRes), TblRes);
7182 return Builder.CreateOr(EltsFromInput, EltsFromTbl, "vtbx");
7184 case NEON::BI__builtin_neon_vtbx2_v: {
7185 return packTBLDVectorList(CGF, makeArrayRef(Ops).slice(1, 2), Ops[0],
7186 Ops[3], Ty, Intrinsic::aarch64_neon_tbx1,
7189 case NEON::BI__builtin_neon_vtbx3_v: {
7191 packTBLDVectorList(CGF, makeArrayRef(Ops).slice(1, 3), nullptr, Ops[4],
7192 Ty, Intrinsic::aarch64_neon_tbl2, "vtbl2");
7194 llvm::Constant *TwentyFourV = ConstantInt::get(Ty, 24);
7195 Value *CmpRes = Builder.CreateICmp(ICmpInst::ICMP_UGE, Ops[4],
7197 CmpRes = Builder.CreateSExt(CmpRes, Ty);
7199 Value *EltsFromInput = Builder.CreateAnd(CmpRes, Ops[0]);
7200 Value *EltsFromTbl = Builder.CreateAnd(Builder.CreateNot(CmpRes), TblRes);
7201 return Builder.CreateOr(EltsFromInput, EltsFromTbl, "vtbx");
7203 case NEON::BI__builtin_neon_vtbx4_v: {
7204 return packTBLDVectorList(CGF, makeArrayRef(Ops).slice(1, 4), Ops[0],
7205 Ops[5], Ty, Intrinsic::aarch64_neon_tbx2,
7208 case NEON::BI__builtin_neon_vqtbl1_v:
7209 case NEON::BI__builtin_neon_vqtbl1q_v:
7210 Int = Intrinsic::aarch64_neon_tbl1; s = "vtbl1"; break;
7211 case NEON::BI__builtin_neon_vqtbl2_v:
7212 case NEON::BI__builtin_neon_vqtbl2q_v: {
7213 Int = Intrinsic::aarch64_neon_tbl2; s = "vtbl2"; break;
7214 case NEON::BI__builtin_neon_vqtbl3_v:
7215 case NEON::BI__builtin_neon_vqtbl3q_v:
7216 Int = Intrinsic::aarch64_neon_tbl3; s = "vtbl3"; break;
7217 case NEON::BI__builtin_neon_vqtbl4_v:
7218 case NEON::BI__builtin_neon_vqtbl4q_v:
7219 Int = Intrinsic::aarch64_neon_tbl4; s = "vtbl4"; break;
7220 case NEON::BI__builtin_neon_vqtbx1_v:
7221 case NEON::BI__builtin_neon_vqtbx1q_v:
7222 Int = Intrinsic::aarch64_neon_tbx1; s = "vtbx1"; break;
7223 case NEON::BI__builtin_neon_vqtbx2_v:
7224 case NEON::BI__builtin_neon_vqtbx2q_v:
7225 Int = Intrinsic::aarch64_neon_tbx2; s = "vtbx2"; break;
7226 case NEON::BI__builtin_neon_vqtbx3_v:
7227 case NEON::BI__builtin_neon_vqtbx3q_v:
7228 Int = Intrinsic::aarch64_neon_tbx3; s = "vtbx3"; break;
7229 case NEON::BI__builtin_neon_vqtbx4_v:
7230 case NEON::BI__builtin_neon_vqtbx4q_v:
7231 Int = Intrinsic::aarch64_neon_tbx4; s = "vtbx4"; break;
7238 Function *F = CGF.CGM.getIntrinsic(Int, Ty);
7239 return CGF.EmitNeonCall(F, Ops, s);
7242 Value *CodeGenFunction::vectorWrapScalar16(Value *Op) {
7243 llvm::Type *VTy = llvm::VectorType::get(Int16Ty, 4);
7244 Op = Builder.CreateBitCast(Op, Int16Ty);
7245 Value *V = UndefValue::get(VTy);
7246 llvm::Constant *CI = ConstantInt::get(SizeTy, 0);
7247 Op = Builder.CreateInsertElement(V, Op, CI);
7251 Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
7253 llvm::Triple::ArchType Arch) {
7254 unsigned HintID = static_cast<unsigned>(-1);
7255 switch (BuiltinID) {
7257 case AArch64::BI__builtin_arm_nop:
7260 case AArch64::BI__builtin_arm_yield:
7261 case AArch64::BI__yield:
7264 case AArch64::BI__builtin_arm_wfe:
7265 case AArch64::BI__wfe:
7268 case AArch64::BI__builtin_arm_wfi:
7269 case AArch64::BI__wfi:
7272 case AArch64::BI__builtin_arm_sev:
7273 case AArch64::BI__sev:
7276 case AArch64::BI__builtin_arm_sevl:
7277 case AArch64::BI__sevl:
7282 if (HintID != static_cast<unsigned>(-1)) {
7283 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_hint);
7284 return Builder.CreateCall(F, llvm::ConstantInt::get(Int32Ty, HintID));
7287 if (BuiltinID == AArch64::BI__builtin_arm_prefetch) {
7288 Value *Address = EmitScalarExpr(E->getArg(0));
7289 Value *RW = EmitScalarExpr(E->getArg(1));
7290 Value *CacheLevel = EmitScalarExpr(E->getArg(2));
7291 Value *RetentionPolicy = EmitScalarExpr(E->getArg(3));
7292 Value *IsData = EmitScalarExpr(E->getArg(4));
7294 Value *Locality = nullptr;
7295 if (cast<llvm::ConstantInt>(RetentionPolicy)->isZero()) {
7296 // Temporal fetch, needs to convert cache level to locality.
7297 Locality = llvm::ConstantInt::get(Int32Ty,
7298 -cast<llvm::ConstantInt>(CacheLevel)->getValue() + 3);
7301 Locality = llvm::ConstantInt::get(Int32Ty, 0);
7304 // FIXME: We need AArch64 specific LLVM intrinsic if we want to specify
7305 // PLDL3STRM or PLDL2STRM.
7306 Function *F = CGM.getIntrinsic(Intrinsic::prefetch, Address->getType());
7307 return Builder.CreateCall(F, {Address, RW, Locality, IsData});
7310 if (BuiltinID == AArch64::BI__builtin_arm_rbit) {
7311 assert((getContext().getTypeSize(E->getType()) == 32) &&
7312 "rbit of unusual size!");
7313 llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
7314 return Builder.CreateCall(
7315 CGM.getIntrinsic(Intrinsic::bitreverse, Arg->getType()), Arg, "rbit");
7317 if (BuiltinID == AArch64::BI__builtin_arm_rbit64) {
7318 assert((getContext().getTypeSize(E->getType()) == 64) &&
7319 "rbit of unusual size!");
7320 llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
7321 return Builder.CreateCall(
7322 CGM.getIntrinsic(Intrinsic::bitreverse, Arg->getType()), Arg, "rbit");
7325 if (BuiltinID == AArch64::BI__builtin_arm_cls) {
7326 llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
7327 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::aarch64_cls), Arg,
7330 if (BuiltinID == AArch64::BI__builtin_arm_cls64) {
7331 llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
7332 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::aarch64_cls64), Arg,
7336 if (BuiltinID == AArch64::BI__builtin_arm_jcvt) {
7337 assert((getContext().getTypeSize(E->getType()) == 32) &&
7338 "__jcvt of unusual size!");
7339 llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
7340 return Builder.CreateCall(
7341 CGM.getIntrinsic(Intrinsic::aarch64_fjcvtzs), Arg);
7344 if (BuiltinID == AArch64::BI__clear_cache) {
7345 assert(E->getNumArgs() == 2 && "__clear_cache takes 2 arguments");
7346 const FunctionDecl *FD = E->getDirectCallee();
7348 for (unsigned i = 0; i < 2; i++)
7349 Ops[i] = EmitScalarExpr(E->getArg(i));
7350 llvm::Type *Ty = CGM.getTypes().ConvertType(FD->getType());
7351 llvm::FunctionType *FTy = cast<llvm::FunctionType>(Ty);
7352 StringRef Name = FD->getName();
7353 return EmitNounwindRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), Ops);
7356 if ((BuiltinID == AArch64::BI__builtin_arm_ldrex ||
7357 BuiltinID == AArch64::BI__builtin_arm_ldaex) &&
7358 getContext().getTypeSize(E->getType()) == 128) {
7359 Function *F = CGM.getIntrinsic(BuiltinID == AArch64::BI__builtin_arm_ldaex
7360 ? Intrinsic::aarch64_ldaxp
7361 : Intrinsic::aarch64_ldxp);
7363 Value *LdPtr = EmitScalarExpr(E->getArg(0));
7364 Value *Val = Builder.CreateCall(F, Builder.CreateBitCast(LdPtr, Int8PtrTy),
7367 Value *Val0 = Builder.CreateExtractValue(Val, 1);
7368 Value *Val1 = Builder.CreateExtractValue(Val, 0);
7369 llvm::Type *Int128Ty = llvm::IntegerType::get(getLLVMContext(), 128);
7370 Val0 = Builder.CreateZExt(Val0, Int128Ty);
7371 Val1 = Builder.CreateZExt(Val1, Int128Ty);
7373 Value *ShiftCst = llvm::ConstantInt::get(Int128Ty, 64);
7374 Val = Builder.CreateShl(Val0, ShiftCst, "shl", true /* nuw */);
7375 Val = Builder.CreateOr(Val, Val1);
7376 return Builder.CreateBitCast(Val, ConvertType(E->getType()));
7377 } else if (BuiltinID == AArch64::BI__builtin_arm_ldrex ||
7378 BuiltinID == AArch64::BI__builtin_arm_ldaex) {
7379 Value *LoadAddr = EmitScalarExpr(E->getArg(0));
7381 QualType Ty = E->getType();
7382 llvm::Type *RealResTy = ConvertType(Ty);
7383 llvm::Type *PtrTy = llvm::IntegerType::get(
7384 getLLVMContext(), getContext().getTypeSize(Ty))->getPointerTo();
7385 LoadAddr = Builder.CreateBitCast(LoadAddr, PtrTy);
7387 Function *F = CGM.getIntrinsic(BuiltinID == AArch64::BI__builtin_arm_ldaex
7388 ? Intrinsic::aarch64_ldaxr
7389 : Intrinsic::aarch64_ldxr,
7391 Value *Val = Builder.CreateCall(F, LoadAddr, "ldxr");
7393 if (RealResTy->isPointerTy())
7394 return Builder.CreateIntToPtr(Val, RealResTy);
7396 llvm::Type *IntResTy = llvm::IntegerType::get(
7397 getLLVMContext(), CGM.getDataLayout().getTypeSizeInBits(RealResTy));
7398 Val = Builder.CreateTruncOrBitCast(Val, IntResTy);
7399 return Builder.CreateBitCast(Val, RealResTy);
7402 if ((BuiltinID == AArch64::BI__builtin_arm_strex ||
7403 BuiltinID == AArch64::BI__builtin_arm_stlex) &&
7404 getContext().getTypeSize(E->getArg(0)->getType()) == 128) {
7405 Function *F = CGM.getIntrinsic(BuiltinID == AArch64::BI__builtin_arm_stlex
7406 ? Intrinsic::aarch64_stlxp
7407 : Intrinsic::aarch64_stxp);
7408 llvm::Type *STy = llvm::StructType::get(Int64Ty, Int64Ty);
7410 Address Tmp = CreateMemTemp(E->getArg(0)->getType());
7411 EmitAnyExprToMem(E->getArg(0), Tmp, Qualifiers(), /*init*/ true);
7413 Tmp = Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(STy));
7414 llvm::Value *Val = Builder.CreateLoad(Tmp);
7416 Value *Arg0 = Builder.CreateExtractValue(Val, 0);
7417 Value *Arg1 = Builder.CreateExtractValue(Val, 1);
7418 Value *StPtr = Builder.CreateBitCast(EmitScalarExpr(E->getArg(1)),
7420 return Builder.CreateCall(F, {Arg0, Arg1, StPtr}, "stxp");
7423 if (BuiltinID == AArch64::BI__builtin_arm_strex ||
7424 BuiltinID == AArch64::BI__builtin_arm_stlex) {
7425 Value *StoreVal = EmitScalarExpr(E->getArg(0));
7426 Value *StoreAddr = EmitScalarExpr(E->getArg(1));
7428 QualType Ty = E->getArg(0)->getType();
7429 llvm::Type *StoreTy = llvm::IntegerType::get(getLLVMContext(),
7430 getContext().getTypeSize(Ty));
7431 StoreAddr = Builder.CreateBitCast(StoreAddr, StoreTy->getPointerTo());
7433 if (StoreVal->getType()->isPointerTy())
7434 StoreVal = Builder.CreatePtrToInt(StoreVal, Int64Ty);
7436 llvm::Type *IntTy = llvm::IntegerType::get(
7438 CGM.getDataLayout().getTypeSizeInBits(StoreVal->getType()));
7439 StoreVal = Builder.CreateBitCast(StoreVal, IntTy);
7440 StoreVal = Builder.CreateZExtOrBitCast(StoreVal, Int64Ty);
7443 Function *F = CGM.getIntrinsic(BuiltinID == AArch64::BI__builtin_arm_stlex
7444 ? Intrinsic::aarch64_stlxr
7445 : Intrinsic::aarch64_stxr,
7446 StoreAddr->getType());
7447 return Builder.CreateCall(F, {StoreVal, StoreAddr}, "stxr");
7450 if (BuiltinID == AArch64::BI__getReg) {
7451 Expr::EvalResult Result;
7452 if (!E->getArg(0)->EvaluateAsInt(Result, CGM.getContext()))
7453 llvm_unreachable("Sema will ensure that the parameter is constant");
7455 llvm::APSInt Value = Result.Val.getInt();
7456 LLVMContext &Context = CGM.getLLVMContext();
7457 std::string Reg = Value == 31 ? "sp" : "x" + Value.toString(10);
7459 llvm::Metadata *Ops[] = {llvm::MDString::get(Context, Reg)};
7460 llvm::MDNode *RegName = llvm::MDNode::get(Context, Ops);
7461 llvm::Value *Metadata = llvm::MetadataAsValue::get(Context, RegName);
7464 CGM.getIntrinsic(llvm::Intrinsic::read_register, {Int64Ty});
7465 return Builder.CreateCall(F, Metadata);
7468 if (BuiltinID == AArch64::BI__builtin_arm_clrex) {
7469 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_clrex);
7470 return Builder.CreateCall(F);
7473 if (BuiltinID == AArch64::BI_ReadWriteBarrier)
7474 return Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent,
7475 llvm::SyncScope::SingleThread);
7478 Intrinsic::ID CRCIntrinsicID = Intrinsic::not_intrinsic;
7479 switch (BuiltinID) {
7480 case AArch64::BI__builtin_arm_crc32b:
7481 CRCIntrinsicID = Intrinsic::aarch64_crc32b; break;
7482 case AArch64::BI__builtin_arm_crc32cb:
7483 CRCIntrinsicID = Intrinsic::aarch64_crc32cb; break;
7484 case AArch64::BI__builtin_arm_crc32h:
7485 CRCIntrinsicID = Intrinsic::aarch64_crc32h; break;
7486 case AArch64::BI__builtin_arm_crc32ch:
7487 CRCIntrinsicID = Intrinsic::aarch64_crc32ch; break;
7488 case AArch64::BI__builtin_arm_crc32w:
7489 CRCIntrinsicID = Intrinsic::aarch64_crc32w; break;
7490 case AArch64::BI__builtin_arm_crc32cw:
7491 CRCIntrinsicID = Intrinsic::aarch64_crc32cw; break;
7492 case AArch64::BI__builtin_arm_crc32d:
7493 CRCIntrinsicID = Intrinsic::aarch64_crc32x; break;
7494 case AArch64::BI__builtin_arm_crc32cd:
7495 CRCIntrinsicID = Intrinsic::aarch64_crc32cx; break;
7498 if (CRCIntrinsicID != Intrinsic::not_intrinsic) {
7499 Value *Arg0 = EmitScalarExpr(E->getArg(0));
7500 Value *Arg1 = EmitScalarExpr(E->getArg(1));
7501 Function *F = CGM.getIntrinsic(CRCIntrinsicID);
7503 llvm::Type *DataTy = F->getFunctionType()->getParamType(1);
7504 Arg1 = Builder.CreateZExtOrBitCast(Arg1, DataTy);
7506 return Builder.CreateCall(F, {Arg0, Arg1});
7509 // Memory Tagging Extensions (MTE) Intrinsics
7510 Intrinsic::ID MTEIntrinsicID = Intrinsic::not_intrinsic;
7511 switch (BuiltinID) {
7512 case AArch64::BI__builtin_arm_irg:
7513 MTEIntrinsicID = Intrinsic::aarch64_irg; break;
7514 case AArch64::BI__builtin_arm_addg:
7515 MTEIntrinsicID = Intrinsic::aarch64_addg; break;
7516 case AArch64::BI__builtin_arm_gmi:
7517 MTEIntrinsicID = Intrinsic::aarch64_gmi; break;
7518 case AArch64::BI__builtin_arm_ldg:
7519 MTEIntrinsicID = Intrinsic::aarch64_ldg; break;
7520 case AArch64::BI__builtin_arm_stg:
7521 MTEIntrinsicID = Intrinsic::aarch64_stg; break;
7522 case AArch64::BI__builtin_arm_subp:
7523 MTEIntrinsicID = Intrinsic::aarch64_subp; break;
7526 if (MTEIntrinsicID != Intrinsic::not_intrinsic) {
7527 llvm::Type *T = ConvertType(E->getType());
7529 if (MTEIntrinsicID == Intrinsic::aarch64_irg) {
7530 Value *Pointer = EmitScalarExpr(E->getArg(0));
7531 Value *Mask = EmitScalarExpr(E->getArg(1));
7533 Pointer = Builder.CreatePointerCast(Pointer, Int8PtrTy);
7534 Mask = Builder.CreateZExt(Mask, Int64Ty);
7535 Value *RV = Builder.CreateCall(
7536 CGM.getIntrinsic(MTEIntrinsicID), {Pointer, Mask});
7537 return Builder.CreatePointerCast(RV, T);
7539 if (MTEIntrinsicID == Intrinsic::aarch64_addg) {
7540 Value *Pointer = EmitScalarExpr(E->getArg(0));
7541 Value *TagOffset = EmitScalarExpr(E->getArg(1));
7543 Pointer = Builder.CreatePointerCast(Pointer, Int8PtrTy);
7544 TagOffset = Builder.CreateZExt(TagOffset, Int64Ty);
7545 Value *RV = Builder.CreateCall(
7546 CGM.getIntrinsic(MTEIntrinsicID), {Pointer, TagOffset});
7547 return Builder.CreatePointerCast(RV, T);
7549 if (MTEIntrinsicID == Intrinsic::aarch64_gmi) {
7550 Value *Pointer = EmitScalarExpr(E->getArg(0));
7551 Value *ExcludedMask = EmitScalarExpr(E->getArg(1));
7553 ExcludedMask = Builder.CreateZExt(ExcludedMask, Int64Ty);
7554 Pointer = Builder.CreatePointerCast(Pointer, Int8PtrTy);
7555 return Builder.CreateCall(
7556 CGM.getIntrinsic(MTEIntrinsicID), {Pointer, ExcludedMask});
7558 // Although it is possible to supply a different return
7559 // address (first arg) to this intrinsic, for now we set
7560 // return address same as input address.
7561 if (MTEIntrinsicID == Intrinsic::aarch64_ldg) {
7562 Value *TagAddress = EmitScalarExpr(E->getArg(0));
7563 TagAddress = Builder.CreatePointerCast(TagAddress, Int8PtrTy);
7564 Value *RV = Builder.CreateCall(
7565 CGM.getIntrinsic(MTEIntrinsicID), {TagAddress, TagAddress});
7566 return Builder.CreatePointerCast(RV, T);
7568 // Although it is possible to supply a different tag (to set)
7569 // to this intrinsic (as first arg), for now we supply
7570 // the tag that is in input address arg (common use case).
7571 if (MTEIntrinsicID == Intrinsic::aarch64_stg) {
7572 Value *TagAddress = EmitScalarExpr(E->getArg(0));
7573 TagAddress = Builder.CreatePointerCast(TagAddress, Int8PtrTy);
7574 return Builder.CreateCall(
7575 CGM.getIntrinsic(MTEIntrinsicID), {TagAddress, TagAddress});
7577 if (MTEIntrinsicID == Intrinsic::aarch64_subp) {
7578 Value *PointerA = EmitScalarExpr(E->getArg(0));
7579 Value *PointerB = EmitScalarExpr(E->getArg(1));
7580 PointerA = Builder.CreatePointerCast(PointerA, Int8PtrTy);
7581 PointerB = Builder.CreatePointerCast(PointerB, Int8PtrTy);
7582 return Builder.CreateCall(
7583 CGM.getIntrinsic(MTEIntrinsicID), {PointerA, PointerB});
7587 if (BuiltinID == AArch64::BI__builtin_arm_rsr ||
7588 BuiltinID == AArch64::BI__builtin_arm_rsr64 ||
7589 BuiltinID == AArch64::BI__builtin_arm_rsrp ||
7590 BuiltinID == AArch64::BI__builtin_arm_wsr ||
7591 BuiltinID == AArch64::BI__builtin_arm_wsr64 ||
7592 BuiltinID == AArch64::BI__builtin_arm_wsrp) {
7594 bool IsRead = BuiltinID == AArch64::BI__builtin_arm_rsr ||
7595 BuiltinID == AArch64::BI__builtin_arm_rsr64 ||
7596 BuiltinID == AArch64::BI__builtin_arm_rsrp;
7598 bool IsPointerBuiltin = BuiltinID == AArch64::BI__builtin_arm_rsrp ||
7599 BuiltinID == AArch64::BI__builtin_arm_wsrp;
7601 bool Is64Bit = BuiltinID != AArch64::BI__builtin_arm_rsr &&
7602 BuiltinID != AArch64::BI__builtin_arm_wsr;
7604 llvm::Type *ValueType;
7605 llvm::Type *RegisterType = Int64Ty;
7606 if (IsPointerBuiltin) {
7607 ValueType = VoidPtrTy;
7608 } else if (Is64Bit) {
7609 ValueType = Int64Ty;
7611 ValueType = Int32Ty;
7614 return EmitSpecialRegisterBuiltin(*this, E, RegisterType, ValueType, IsRead);
7617 if (BuiltinID == AArch64::BI_ReadStatusReg ||
7618 BuiltinID == AArch64::BI_WriteStatusReg) {
7619 LLVMContext &Context = CGM.getLLVMContext();
7622 E->getArg(0)->EvaluateKnownConstInt(getContext()).getZExtValue();
7624 std::string SysRegStr;
7625 llvm::raw_string_ostream(SysRegStr) <<
7626 ((1 << 1) | ((SysReg >> 14) & 1)) << ":" <<
7627 ((SysReg >> 11) & 7) << ":" <<
7628 ((SysReg >> 7) & 15) << ":" <<
7629 ((SysReg >> 3) & 15) << ":" <<
7632 llvm::Metadata *Ops[] = { llvm::MDString::get(Context, SysRegStr) };
7633 llvm::MDNode *RegName = llvm::MDNode::get(Context, Ops);
7634 llvm::Value *Metadata = llvm::MetadataAsValue::get(Context, RegName);
7636 llvm::Type *RegisterType = Int64Ty;
7637 llvm::Type *Types[] = { RegisterType };
7639 if (BuiltinID == AArch64::BI_ReadStatusReg) {
7640 llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::read_register, Types);
7642 return Builder.CreateCall(F, Metadata);
7645 llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::write_register, Types);
7646 llvm::Value *ArgValue = EmitScalarExpr(E->getArg(1));
7648 return Builder.CreateCall(F, { Metadata, ArgValue });
7651 if (BuiltinID == AArch64::BI_AddressOfReturnAddress) {
7653 CGM.getIntrinsic(Intrinsic::addressofreturnaddress, AllocaInt8PtrTy);
7654 return Builder.CreateCall(F);
7657 if (BuiltinID == AArch64::BI__builtin_sponentry) {
7658 llvm::Function *F = CGM.getIntrinsic(Intrinsic::sponentry, AllocaInt8PtrTy);
7659 return Builder.CreateCall(F);
7662 // Find out if any arguments are required to be integer constant
7664 unsigned ICEArguments = 0;
7665 ASTContext::GetBuiltinTypeError Error;
7666 getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments);
7667 assert(Error == ASTContext::GE_None && "Should not codegen an error");
7669 llvm::SmallVector<Value*, 4> Ops;
7670 for (unsigned i = 0, e = E->getNumArgs() - 1; i != e; i++) {
7671 if ((ICEArguments & (1 << i)) == 0) {
7672 Ops.push_back(EmitScalarExpr(E->getArg(i)));
7674 // If this is required to be a constant, constant fold it so that we know
7675 // that the generated intrinsic gets a ConstantInt.
7676 llvm::APSInt Result;
7677 bool IsConst = E->getArg(i)->isIntegerConstantExpr(Result, getContext());
7678 assert(IsConst && "Constant arg isn't actually constant?");
7680 Ops.push_back(llvm::ConstantInt::get(getLLVMContext(), Result));
7684 auto SISDMap = makeArrayRef(AArch64SISDIntrinsicMap);
7685 const NeonIntrinsicInfo *Builtin = findNeonIntrinsicInMap(
7686 SISDMap, BuiltinID, AArch64SISDIntrinsicsProvenSorted);
7689 Ops.push_back(EmitScalarExpr(E->getArg(E->getNumArgs() - 1)));
7690 Value *Result = EmitCommonNeonSISDBuiltinExpr(*this, *Builtin, Ops, E);
7691 assert(Result && "SISD intrinsic should have been handled");
7695 llvm::APSInt Result;
7696 const Expr *Arg = E->getArg(E->getNumArgs()-1);
7697 NeonTypeFlags Type(0);
7698 if (Arg->isIntegerConstantExpr(Result, getContext()))
7699 // Determine the type of this overloaded NEON intrinsic.
7700 Type = NeonTypeFlags(Result.getZExtValue());
7702 bool usgn = Type.isUnsigned();
7703 bool quad = Type.isQuad();
7705 // Handle non-overloaded intrinsics first.
7706 switch (BuiltinID) {
7708 case NEON::BI__builtin_neon_vabsh_f16:
7709 Ops.push_back(EmitScalarExpr(E->getArg(0)));
7710 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::fabs, HalfTy), Ops, "vabs");
7711 case NEON::BI__builtin_neon_vldrq_p128: {
7712 llvm::Type *Int128Ty = llvm::Type::getIntNTy(getLLVMContext(), 128);
7713 llvm::Type *Int128PTy = llvm::PointerType::get(Int128Ty, 0);
7714 Value *Ptr = Builder.CreateBitCast(EmitScalarExpr(E->getArg(0)), Int128PTy);
7715 return Builder.CreateAlignedLoad(Int128Ty, Ptr,
7716 CharUnits::fromQuantity(16));
7718 case NEON::BI__builtin_neon_vstrq_p128: {
7719 llvm::Type *Int128PTy = llvm::Type::getIntNPtrTy(getLLVMContext(), 128);
7720 Value *Ptr = Builder.CreateBitCast(Ops[0], Int128PTy);
7721 return Builder.CreateDefaultAlignedStore(EmitScalarExpr(E->getArg(1)), Ptr);
7723 case NEON::BI__builtin_neon_vcvts_u32_f32:
7724 case NEON::BI__builtin_neon_vcvtd_u64_f64:
7727 case NEON::BI__builtin_neon_vcvts_s32_f32:
7728 case NEON::BI__builtin_neon_vcvtd_s64_f64: {
7729 Ops.push_back(EmitScalarExpr(E->getArg(0)));
7730 bool Is64 = Ops[0]->getType()->getPrimitiveSizeInBits() == 64;
7731 llvm::Type *InTy = Is64 ? Int64Ty : Int32Ty;
7732 llvm::Type *FTy = Is64 ? DoubleTy : FloatTy;
7733 Ops[0] = Builder.CreateBitCast(Ops[0], FTy);
7735 return Builder.CreateFPToUI(Ops[0], InTy);
7736 return Builder.CreateFPToSI(Ops[0], InTy);
7738 case NEON::BI__builtin_neon_vcvts_f32_u32:
7739 case NEON::BI__builtin_neon_vcvtd_f64_u64:
7742 case NEON::BI__builtin_neon_vcvts_f32_s32:
7743 case NEON::BI__builtin_neon_vcvtd_f64_s64: {
7744 Ops.push_back(EmitScalarExpr(E->getArg(0)));
7745 bool Is64 = Ops[0]->getType()->getPrimitiveSizeInBits() == 64;
7746 llvm::Type *InTy = Is64 ? Int64Ty : Int32Ty;
7747 llvm::Type *FTy = Is64 ? DoubleTy : FloatTy;
7748 Ops[0] = Builder.CreateBitCast(Ops[0], InTy);
7750 return Builder.CreateUIToFP(Ops[0], FTy);
7751 return Builder.CreateSIToFP(Ops[0], FTy);
7753 case NEON::BI__builtin_neon_vcvth_f16_u16:
7754 case NEON::BI__builtin_neon_vcvth_f16_u32:
7755 case NEON::BI__builtin_neon_vcvth_f16_u64:
7758 case NEON::BI__builtin_neon_vcvth_f16_s16:
7759 case NEON::BI__builtin_neon_vcvth_f16_s32:
7760 case NEON::BI__builtin_neon_vcvth_f16_s64: {
7761 Ops.push_back(EmitScalarExpr(E->getArg(0)));
7762 llvm::Type *FTy = HalfTy;
7764 if (Ops[0]->getType()->getPrimitiveSizeInBits() == 64)
7766 else if (Ops[0]->getType()->getPrimitiveSizeInBits() == 32)
7770 Ops[0] = Builder.CreateBitCast(Ops[0], InTy);
7772 return Builder.CreateUIToFP(Ops[0], FTy);
7773 return Builder.CreateSIToFP(Ops[0], FTy);
7775 case NEON::BI__builtin_neon_vcvth_u16_f16:
7778 case NEON::BI__builtin_neon_vcvth_s16_f16: {
7779 Ops.push_back(EmitScalarExpr(E->getArg(0)));
7780 Ops[0] = Builder.CreateBitCast(Ops[0], HalfTy);
7782 return Builder.CreateFPToUI(Ops[0], Int16Ty);
7783 return Builder.CreateFPToSI(Ops[0], Int16Ty);
7785 case NEON::BI__builtin_neon_vcvth_u32_f16:
7788 case NEON::BI__builtin_neon_vcvth_s32_f16: {
7789 Ops.push_back(EmitScalarExpr(E->getArg(0)));
7790 Ops[0] = Builder.CreateBitCast(Ops[0], HalfTy);
7792 return Builder.CreateFPToUI(Ops[0], Int32Ty);
7793 return Builder.CreateFPToSI(Ops[0], Int32Ty);
7795 case NEON::BI__builtin_neon_vcvth_u64_f16:
7798 case NEON::BI__builtin_neon_vcvth_s64_f16: {
7799 Ops.push_back(EmitScalarExpr(E->getArg(0)));
7800 Ops[0] = Builder.CreateBitCast(Ops[0], HalfTy);
7802 return Builder.CreateFPToUI(Ops[0], Int64Ty);
7803 return Builder.CreateFPToSI(Ops[0], Int64Ty);
7805 case NEON::BI__builtin_neon_vcvtah_u16_f16:
7806 case NEON::BI__builtin_neon_vcvtmh_u16_f16:
7807 case NEON::BI__builtin_neon_vcvtnh_u16_f16:
7808 case NEON::BI__builtin_neon_vcvtph_u16_f16:
7809 case NEON::BI__builtin_neon_vcvtah_s16_f16:
7810 case NEON::BI__builtin_neon_vcvtmh_s16_f16:
7811 case NEON::BI__builtin_neon_vcvtnh_s16_f16:
7812 case NEON::BI__builtin_neon_vcvtph_s16_f16: {
7814 llvm::Type* InTy = Int32Ty;
7815 llvm::Type* FTy = HalfTy;
7816 llvm::Type *Tys[2] = {InTy, FTy};
7817 Ops.push_back(EmitScalarExpr(E->getArg(0)));
7818 switch (BuiltinID) {
7819 default: llvm_unreachable("missing builtin ID in switch!");
7820 case NEON::BI__builtin_neon_vcvtah_u16_f16:
7821 Int = Intrinsic::aarch64_neon_fcvtau; break;
7822 case NEON::BI__builtin_neon_vcvtmh_u16_f16:
7823 Int = Intrinsic::aarch64_neon_fcvtmu; break;
7824 case NEON::BI__builtin_neon_vcvtnh_u16_f16:
7825 Int = Intrinsic::aarch64_neon_fcvtnu; break;
7826 case NEON::BI__builtin_neon_vcvtph_u16_f16:
7827 Int = Intrinsic::aarch64_neon_fcvtpu; break;
7828 case NEON::BI__builtin_neon_vcvtah_s16_f16:
7829 Int = Intrinsic::aarch64_neon_fcvtas; break;
7830 case NEON::BI__builtin_neon_vcvtmh_s16_f16:
7831 Int = Intrinsic::aarch64_neon_fcvtms; break;
7832 case NEON::BI__builtin_neon_vcvtnh_s16_f16:
7833 Int = Intrinsic::aarch64_neon_fcvtns; break;
7834 case NEON::BI__builtin_neon_vcvtph_s16_f16:
7835 Int = Intrinsic::aarch64_neon_fcvtps; break;
7837 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "fcvt");
7838 return Builder.CreateTrunc(Ops[0], Int16Ty);
7840 case NEON::BI__builtin_neon_vcaleh_f16:
7841 case NEON::BI__builtin_neon_vcalth_f16:
7842 case NEON::BI__builtin_neon_vcageh_f16:
7843 case NEON::BI__builtin_neon_vcagth_f16: {
7845 llvm::Type* InTy = Int32Ty;
7846 llvm::Type* FTy = HalfTy;
7847 llvm::Type *Tys[2] = {InTy, FTy};
7848 Ops.push_back(EmitScalarExpr(E->getArg(1)));
7849 switch (BuiltinID) {
7850 default: llvm_unreachable("missing builtin ID in switch!");
7851 case NEON::BI__builtin_neon_vcageh_f16:
7852 Int = Intrinsic::aarch64_neon_facge; break;
7853 case NEON::BI__builtin_neon_vcagth_f16:
7854 Int = Intrinsic::aarch64_neon_facgt; break;
7855 case NEON::BI__builtin_neon_vcaleh_f16:
7856 Int = Intrinsic::aarch64_neon_facge; std::swap(Ops[0], Ops[1]); break;
7857 case NEON::BI__builtin_neon_vcalth_f16:
7858 Int = Intrinsic::aarch64_neon_facgt; std::swap(Ops[0], Ops[1]); break;
7860 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "facg");
7861 return Builder.CreateTrunc(Ops[0], Int16Ty);
7863 case NEON::BI__builtin_neon_vcvth_n_s16_f16:
7864 case NEON::BI__builtin_neon_vcvth_n_u16_f16: {
7866 llvm::Type* InTy = Int32Ty;
7867 llvm::Type* FTy = HalfTy;
7868 llvm::Type *Tys[2] = {InTy, FTy};
7869 Ops.push_back(EmitScalarExpr(E->getArg(1)));
7870 switch (BuiltinID) {
7871 default: llvm_unreachable("missing builtin ID in switch!");
7872 case NEON::BI__builtin_neon_vcvth_n_s16_f16:
7873 Int = Intrinsic::aarch64_neon_vcvtfp2fxs; break;
7874 case NEON::BI__builtin_neon_vcvth_n_u16_f16:
7875 Int = Intrinsic::aarch64_neon_vcvtfp2fxu; break;
7877 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "fcvth_n");
7878 return Builder.CreateTrunc(Ops[0], Int16Ty);
7880 case NEON::BI__builtin_neon_vcvth_n_f16_s16:
7881 case NEON::BI__builtin_neon_vcvth_n_f16_u16: {
7883 llvm::Type* FTy = HalfTy;
7884 llvm::Type* InTy = Int32Ty;
7885 llvm::Type *Tys[2] = {FTy, InTy};
7886 Ops.push_back(EmitScalarExpr(E->getArg(1)));
7887 switch (BuiltinID) {
7888 default: llvm_unreachable("missing builtin ID in switch!");
7889 case NEON::BI__builtin_neon_vcvth_n_f16_s16:
7890 Int = Intrinsic::aarch64_neon_vcvtfxs2fp;
7891 Ops[0] = Builder.CreateSExt(Ops[0], InTy, "sext");
7893 case NEON::BI__builtin_neon_vcvth_n_f16_u16:
7894 Int = Intrinsic::aarch64_neon_vcvtfxu2fp;
7895 Ops[0] = Builder.CreateZExt(Ops[0], InTy);
7898 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "fcvth_n");
7900 case NEON::BI__builtin_neon_vpaddd_s64: {
7901 llvm::Type *Ty = llvm::VectorType::get(Int64Ty, 2);
7902 Value *Vec = EmitScalarExpr(E->getArg(0));
7903 // The vector is v2f64, so make sure it's bitcast to that.
7904 Vec = Builder.CreateBitCast(Vec, Ty, "v2i64");
7905 llvm::Value *Idx0 = llvm::ConstantInt::get(SizeTy, 0);
7906 llvm::Value *Idx1 = llvm::ConstantInt::get(SizeTy, 1);
7907 Value *Op0 = Builder.CreateExtractElement(Vec, Idx0, "lane0");
7908 Value *Op1 = Builder.CreateExtractElement(Vec, Idx1, "lane1");
7909 // Pairwise addition of a v2f64 into a scalar f64.
7910 return Builder.CreateAdd(Op0, Op1, "vpaddd");
7912 case NEON::BI__builtin_neon_vpaddd_f64: {
7914 llvm::VectorType::get(DoubleTy, 2);
7915 Value *Vec = EmitScalarExpr(E->getArg(0));
7916 // The vector is v2f64, so make sure it's bitcast to that.
7917 Vec = Builder.CreateBitCast(Vec, Ty, "v2f64");
7918 llvm::Value *Idx0 = llvm::ConstantInt::get(SizeTy, 0);
7919 llvm::Value *Idx1 = llvm::ConstantInt::get(SizeTy, 1);
7920 Value *Op0 = Builder.CreateExtractElement(Vec, Idx0, "lane0");
7921 Value *Op1 = Builder.CreateExtractElement(Vec, Idx1, "lane1");
7922 // Pairwise addition of a v2f64 into a scalar f64.
7923 return Builder.CreateFAdd(Op0, Op1, "vpaddd");
7925 case NEON::BI__builtin_neon_vpadds_f32: {
7927 llvm::VectorType::get(FloatTy, 2);
7928 Value *Vec = EmitScalarExpr(E->getArg(0));
7929 // The vector is v2f32, so make sure it's bitcast to that.
7930 Vec = Builder.CreateBitCast(Vec, Ty, "v2f32");
7931 llvm::Value *Idx0 = llvm::ConstantInt::get(SizeTy, 0);
7932 llvm::Value *Idx1 = llvm::ConstantInt::get(SizeTy, 1);
7933 Value *Op0 = Builder.CreateExtractElement(Vec, Idx0, "lane0");
7934 Value *Op1 = Builder.CreateExtractElement(Vec, Idx1, "lane1");
7935 // Pairwise addition of a v2f32 into a scalar f32.
7936 return Builder.CreateFAdd(Op0, Op1, "vpaddd");
7938 case NEON::BI__builtin_neon_vceqzd_s64:
7939 case NEON::BI__builtin_neon_vceqzd_f64:
7940 case NEON::BI__builtin_neon_vceqzs_f32:
7941 case NEON::BI__builtin_neon_vceqzh_f16:
7942 Ops.push_back(EmitScalarExpr(E->getArg(0)));
7943 return EmitAArch64CompareBuiltinExpr(
7944 Ops[0], ConvertType(E->getCallReturnType(getContext())),
7945 ICmpInst::FCMP_OEQ, ICmpInst::ICMP_EQ, "vceqz");
7946 case NEON::BI__builtin_neon_vcgezd_s64:
7947 case NEON::BI__builtin_neon_vcgezd_f64:
7948 case NEON::BI__builtin_neon_vcgezs_f32:
7949 case NEON::BI__builtin_neon_vcgezh_f16:
7950 Ops.push_back(EmitScalarExpr(E->getArg(0)));
7951 return EmitAArch64CompareBuiltinExpr(
7952 Ops[0], ConvertType(E->getCallReturnType(getContext())),
7953 ICmpInst::FCMP_OGE, ICmpInst::ICMP_SGE, "vcgez");
7954 case NEON::BI__builtin_neon_vclezd_s64:
7955 case NEON::BI__builtin_neon_vclezd_f64:
7956 case NEON::BI__builtin_neon_vclezs_f32:
7957 case NEON::BI__builtin_neon_vclezh_f16:
7958 Ops.push_back(EmitScalarExpr(E->getArg(0)));
7959 return EmitAArch64CompareBuiltinExpr(
7960 Ops[0], ConvertType(E->getCallReturnType(getContext())),
7961 ICmpInst::FCMP_OLE, ICmpInst::ICMP_SLE, "vclez");
7962 case NEON::BI__builtin_neon_vcgtzd_s64:
7963 case NEON::BI__builtin_neon_vcgtzd_f64:
7964 case NEON::BI__builtin_neon_vcgtzs_f32:
7965 case NEON::BI__builtin_neon_vcgtzh_f16:
7966 Ops.push_back(EmitScalarExpr(E->getArg(0)));
7967 return EmitAArch64CompareBuiltinExpr(
7968 Ops[0], ConvertType(E->getCallReturnType(getContext())),
7969 ICmpInst::FCMP_OGT, ICmpInst::ICMP_SGT, "vcgtz");
7970 case NEON::BI__builtin_neon_vcltzd_s64:
7971 case NEON::BI__builtin_neon_vcltzd_f64:
7972 case NEON::BI__builtin_neon_vcltzs_f32:
7973 case NEON::BI__builtin_neon_vcltzh_f16:
7974 Ops.push_back(EmitScalarExpr(E->getArg(0)));
7975 return EmitAArch64CompareBuiltinExpr(
7976 Ops[0], ConvertType(E->getCallReturnType(getContext())),
7977 ICmpInst::FCMP_OLT, ICmpInst::ICMP_SLT, "vcltz");
7979 case NEON::BI__builtin_neon_vceqzd_u64: {
7980 Ops.push_back(EmitScalarExpr(E->getArg(0)));
7981 Ops[0] = Builder.CreateBitCast(Ops[0], Int64Ty);
7983 Builder.CreateICmpEQ(Ops[0], llvm::Constant::getNullValue(Int64Ty));
7984 return Builder.CreateSExt(Ops[0], Int64Ty, "vceqzd");
7986 case NEON::BI__builtin_neon_vceqd_f64:
7987 case NEON::BI__builtin_neon_vcled_f64:
7988 case NEON::BI__builtin_neon_vcltd_f64:
7989 case NEON::BI__builtin_neon_vcged_f64:
7990 case NEON::BI__builtin_neon_vcgtd_f64: {
7991 llvm::CmpInst::Predicate P;
7992 switch (BuiltinID) {
7993 default: llvm_unreachable("missing builtin ID in switch!");
7994 case NEON::BI__builtin_neon_vceqd_f64: P = llvm::FCmpInst::FCMP_OEQ; break;
7995 case NEON::BI__builtin_neon_vcled_f64: P = llvm::FCmpInst::FCMP_OLE; break;
7996 case NEON::BI__builtin_neon_vcltd_f64: P = llvm::FCmpInst::FCMP_OLT; break;
7997 case NEON::BI__builtin_neon_vcged_f64: P = llvm::FCmpInst::FCMP_OGE; break;
7998 case NEON::BI__builtin_neon_vcgtd_f64: P = llvm::FCmpInst::FCMP_OGT; break;
8000 Ops.push_back(EmitScalarExpr(E->getArg(1)));
8001 Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy);
8002 Ops[1] = Builder.CreateBitCast(Ops[1], DoubleTy);
8003 Ops[0] = Builder.CreateFCmp(P, Ops[0], Ops[1]);
8004 return Builder.CreateSExt(Ops[0], Int64Ty, "vcmpd");
8006 case NEON::BI__builtin_neon_vceqs_f32:
8007 case NEON::BI__builtin_neon_vcles_f32:
8008 case NEON::BI__builtin_neon_vclts_f32:
8009 case NEON::BI__builtin_neon_vcges_f32:
8010 case NEON::BI__builtin_neon_vcgts_f32: {
8011 llvm::CmpInst::Predicate P;
8012 switch (BuiltinID) {
8013 default: llvm_unreachable("missing builtin ID in switch!");
8014 case NEON::BI__builtin_neon_vceqs_f32: P = llvm::FCmpInst::FCMP_OEQ; break;
8015 case NEON::BI__builtin_neon_vcles_f32: P = llvm::FCmpInst::FCMP_OLE; break;
8016 case NEON::BI__builtin_neon_vclts_f32: P = llvm::FCmpInst::FCMP_OLT; break;
8017 case NEON::BI__builtin_neon_vcges_f32: P = llvm::FCmpInst::FCMP_OGE; break;
8018 case NEON::BI__builtin_neon_vcgts_f32: P = llvm::FCmpInst::FCMP_OGT; break;
8020 Ops.push_back(EmitScalarExpr(E->getArg(1)));
8021 Ops[0] = Builder.CreateBitCast(Ops[0], FloatTy);
8022 Ops[1] = Builder.CreateBitCast(Ops[1], FloatTy);
8023 Ops[0] = Builder.CreateFCmp(P, Ops[0], Ops[1]);
8024 return Builder.CreateSExt(Ops[0], Int32Ty, "vcmpd");
8026 case NEON::BI__builtin_neon_vceqh_f16:
8027 case NEON::BI__builtin_neon_vcleh_f16:
8028 case NEON::BI__builtin_neon_vclth_f16:
8029 case NEON::BI__builtin_neon_vcgeh_f16:
8030 case NEON::BI__builtin_neon_vcgth_f16: {
8031 llvm::CmpInst::Predicate P;
8032 switch (BuiltinID) {
8033 default: llvm_unreachable("missing builtin ID in switch!");
8034 case NEON::BI__builtin_neon_vceqh_f16: P = llvm::FCmpInst::FCMP_OEQ; break;
8035 case NEON::BI__builtin_neon_vcleh_f16: P = llvm::FCmpInst::FCMP_OLE; break;
8036 case NEON::BI__builtin_neon_vclth_f16: P = llvm::FCmpInst::FCMP_OLT; break;
8037 case NEON::BI__builtin_neon_vcgeh_f16: P = llvm::FCmpInst::FCMP_OGE; break;
8038 case NEON::BI__builtin_neon_vcgth_f16: P = llvm::FCmpInst::FCMP_OGT; break;
8040 Ops.push_back(EmitScalarExpr(E->getArg(1)));
8041 Ops[0] = Builder.CreateBitCast(Ops[0], HalfTy);
8042 Ops[1] = Builder.CreateBitCast(Ops[1], HalfTy);
8043 Ops[0] = Builder.CreateFCmp(P, Ops[0], Ops[1]);
8044 return Builder.CreateSExt(Ops[0], Int16Ty, "vcmpd");
8046 case NEON::BI__builtin_neon_vceqd_s64:
8047 case NEON::BI__builtin_neon_vceqd_u64:
8048 case NEON::BI__builtin_neon_vcgtd_s64:
8049 case NEON::BI__builtin_neon_vcgtd_u64:
8050 case NEON::BI__builtin_neon_vcltd_s64:
8051 case NEON::BI__builtin_neon_vcltd_u64:
8052 case NEON::BI__builtin_neon_vcged_u64:
8053 case NEON::BI__builtin_neon_vcged_s64:
8054 case NEON::BI__builtin_neon_vcled_u64:
8055 case NEON::BI__builtin_neon_vcled_s64: {
8056 llvm::CmpInst::Predicate P;
8057 switch (BuiltinID) {
8058 default: llvm_unreachable("missing builtin ID in switch!");
8059 case NEON::BI__builtin_neon_vceqd_s64:
8060 case NEON::BI__builtin_neon_vceqd_u64:P = llvm::ICmpInst::ICMP_EQ;break;
8061 case NEON::BI__builtin_neon_vcgtd_s64:P = llvm::ICmpInst::ICMP_SGT;break;
8062 case NEON::BI__builtin_neon_vcgtd_u64:P = llvm::ICmpInst::ICMP_UGT;break;
8063 case NEON::BI__builtin_neon_vcltd_s64:P = llvm::ICmpInst::ICMP_SLT;break;
8064 case NEON::BI__builtin_neon_vcltd_u64:P = llvm::ICmpInst::ICMP_ULT;break;
8065 case NEON::BI__builtin_neon_vcged_u64:P = llvm::ICmpInst::ICMP_UGE;break;
8066 case NEON::BI__builtin_neon_vcged_s64:P = llvm::ICmpInst::ICMP_SGE;break;
8067 case NEON::BI__builtin_neon_vcled_u64:P = llvm::ICmpInst::ICMP_ULE;break;
8068 case NEON::BI__builtin_neon_vcled_s64:P = llvm::ICmpInst::ICMP_SLE;break;
8070 Ops.push_back(EmitScalarExpr(E->getArg(1)));
8071 Ops[0] = Builder.CreateBitCast(Ops[0], Int64Ty);
8072 Ops[1] = Builder.CreateBitCast(Ops[1], Int64Ty);
8073 Ops[0] = Builder.CreateICmp(P, Ops[0], Ops[1]);
8074 return Builder.CreateSExt(Ops[0], Int64Ty, "vceqd");
8076 case NEON::BI__builtin_neon_vtstd_s64:
8077 case NEON::BI__builtin_neon_vtstd_u64: {
8078 Ops.push_back(EmitScalarExpr(E->getArg(1)));
8079 Ops[0] = Builder.CreateBitCast(Ops[0], Int64Ty);
8080 Ops[1] = Builder.CreateBitCast(Ops[1], Int64Ty);
8081 Ops[0] = Builder.CreateAnd(Ops[0], Ops[1]);
8082 Ops[0] = Builder.CreateICmp(ICmpInst::ICMP_NE, Ops[0],
8083 llvm::Constant::getNullValue(Int64Ty));
8084 return Builder.CreateSExt(Ops[0], Int64Ty, "vtstd");
8086 case NEON::BI__builtin_neon_vset_lane_i8:
8087 case NEON::BI__builtin_neon_vset_lane_i16:
8088 case NEON::BI__builtin_neon_vset_lane_i32:
8089 case NEON::BI__builtin_neon_vset_lane_i64:
8090 case NEON::BI__builtin_neon_vset_lane_f32:
8091 case NEON::BI__builtin_neon_vsetq_lane_i8:
8092 case NEON::BI__builtin_neon_vsetq_lane_i16:
8093 case NEON::BI__builtin_neon_vsetq_lane_i32:
8094 case NEON::BI__builtin_neon_vsetq_lane_i64:
8095 case NEON::BI__builtin_neon_vsetq_lane_f32:
8096 Ops.push_back(EmitScalarExpr(E->getArg(2)));
8097 return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vset_lane");
8098 case NEON::BI__builtin_neon_vset_lane_f64:
8099 // The vector type needs a cast for the v1f64 variant.
8100 Ops[1] = Builder.CreateBitCast(Ops[1],
8101 llvm::VectorType::get(DoubleTy, 1));
8102 Ops.push_back(EmitScalarExpr(E->getArg(2)));
8103 return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vset_lane");
8104 case NEON::BI__builtin_neon_vsetq_lane_f64:
8105 // The vector type needs a cast for the v2f64 variant.
8106 Ops[1] = Builder.CreateBitCast(Ops[1],
8107 llvm::VectorType::get(DoubleTy, 2));
8108 Ops.push_back(EmitScalarExpr(E->getArg(2)));
8109 return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vset_lane");
8111 case NEON::BI__builtin_neon_vget_lane_i8:
8112 case NEON::BI__builtin_neon_vdupb_lane_i8:
8113 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::VectorType::get(Int8Ty, 8));
8114 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
8116 case NEON::BI__builtin_neon_vgetq_lane_i8:
8117 case NEON::BI__builtin_neon_vdupb_laneq_i8:
8118 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::VectorType::get(Int8Ty, 16));
8119 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
8121 case NEON::BI__builtin_neon_vget_lane_i16:
8122 case NEON::BI__builtin_neon_vduph_lane_i16:
8123 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::VectorType::get(Int16Ty, 4));
8124 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
8126 case NEON::BI__builtin_neon_vgetq_lane_i16:
8127 case NEON::BI__builtin_neon_vduph_laneq_i16:
8128 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::VectorType::get(Int16Ty, 8));
8129 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
8131 case NEON::BI__builtin_neon_vget_lane_i32:
8132 case NEON::BI__builtin_neon_vdups_lane_i32:
8133 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::VectorType::get(Int32Ty, 2));
8134 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
8136 case NEON::BI__builtin_neon_vdups_lane_f32:
8137 Ops[0] = Builder.CreateBitCast(Ops[0],
8138 llvm::VectorType::get(FloatTy, 2));
8139 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
8141 case NEON::BI__builtin_neon_vgetq_lane_i32:
8142 case NEON::BI__builtin_neon_vdups_laneq_i32:
8143 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::VectorType::get(Int32Ty, 4));
8144 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
8146 case NEON::BI__builtin_neon_vget_lane_i64:
8147 case NEON::BI__builtin_neon_vdupd_lane_i64:
8148 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::VectorType::get(Int64Ty, 1));
8149 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
8151 case NEON::BI__builtin_neon_vdupd_lane_f64:
8152 Ops[0] = Builder.CreateBitCast(Ops[0],
8153 llvm::VectorType::get(DoubleTy, 1));
8154 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
8156 case NEON::BI__builtin_neon_vgetq_lane_i64:
8157 case NEON::BI__builtin_neon_vdupd_laneq_i64:
8158 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::VectorType::get(Int64Ty, 2));
8159 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
8161 case NEON::BI__builtin_neon_vget_lane_f32:
8162 Ops[0] = Builder.CreateBitCast(Ops[0],
8163 llvm::VectorType::get(FloatTy, 2));
8164 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
8166 case NEON::BI__builtin_neon_vget_lane_f64:
8167 Ops[0] = Builder.CreateBitCast(Ops[0],
8168 llvm::VectorType::get(DoubleTy, 1));
8169 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
8171 case NEON::BI__builtin_neon_vgetq_lane_f32:
8172 case NEON::BI__builtin_neon_vdups_laneq_f32:
8173 Ops[0] = Builder.CreateBitCast(Ops[0],
8174 llvm::VectorType::get(FloatTy, 4));
8175 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
8177 case NEON::BI__builtin_neon_vgetq_lane_f64:
8178 case NEON::BI__builtin_neon_vdupd_laneq_f64:
8179 Ops[0] = Builder.CreateBitCast(Ops[0],
8180 llvm::VectorType::get(DoubleTy, 2));
8181 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
8183 case NEON::BI__builtin_neon_vaddh_f16:
8184 Ops.push_back(EmitScalarExpr(E->getArg(1)));
8185 return Builder.CreateFAdd(Ops[0], Ops[1], "vaddh");
8186 case NEON::BI__builtin_neon_vsubh_f16:
8187 Ops.push_back(EmitScalarExpr(E->getArg(1)));
8188 return Builder.CreateFSub(Ops[0], Ops[1], "vsubh");
8189 case NEON::BI__builtin_neon_vmulh_f16:
8190 Ops.push_back(EmitScalarExpr(E->getArg(1)));
8191 return Builder.CreateFMul(Ops[0], Ops[1], "vmulh");
8192 case NEON::BI__builtin_neon_vdivh_f16:
8193 Ops.push_back(EmitScalarExpr(E->getArg(1)));
8194 return Builder.CreateFDiv(Ops[0], Ops[1], "vdivh");
8195 case NEON::BI__builtin_neon_vfmah_f16: {
8196 Function *F = CGM.getIntrinsic(Intrinsic::fma, HalfTy);
8197 // NEON intrinsic puts accumulator first, unlike the LLVM fma.
8198 return Builder.CreateCall(F,
8199 {EmitScalarExpr(E->getArg(1)), EmitScalarExpr(E->getArg(2)), Ops[0]});
8201 case NEON::BI__builtin_neon_vfmsh_f16: {
8202 Function *F = CGM.getIntrinsic(Intrinsic::fma, HalfTy);
8203 Value *Zero = llvm::ConstantFP::getZeroValueForNegation(HalfTy);
8204 Value* Sub = Builder.CreateFSub(Zero, EmitScalarExpr(E->getArg(1)), "vsubh");
8205 // NEON intrinsic puts accumulator first, unlike the LLVM fma.
8206 return Builder.CreateCall(F, {Sub, EmitScalarExpr(E->getArg(2)), Ops[0]});
8208 case NEON::BI__builtin_neon_vaddd_s64:
8209 case NEON::BI__builtin_neon_vaddd_u64:
8210 return Builder.CreateAdd(Ops[0], EmitScalarExpr(E->getArg(1)), "vaddd");
8211 case NEON::BI__builtin_neon_vsubd_s64:
8212 case NEON::BI__builtin_neon_vsubd_u64:
8213 return Builder.CreateSub(Ops[0], EmitScalarExpr(E->getArg(1)), "vsubd");
8214 case NEON::BI__builtin_neon_vqdmlalh_s16:
8215 case NEON::BI__builtin_neon_vqdmlslh_s16: {
8216 SmallVector<Value *, 2> ProductOps;
8217 ProductOps.push_back(vectorWrapScalar16(Ops[1]));
8218 ProductOps.push_back(vectorWrapScalar16(EmitScalarExpr(E->getArg(2))));
8219 llvm::Type *VTy = llvm::VectorType::get(Int32Ty, 4);
8220 Ops[1] = EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqdmull, VTy),
8221 ProductOps, "vqdmlXl");
8222 Constant *CI = ConstantInt::get(SizeTy, 0);
8223 Ops[1] = Builder.CreateExtractElement(Ops[1], CI, "lane0");
8225 unsigned AccumInt = BuiltinID == NEON::BI__builtin_neon_vqdmlalh_s16
8226 ? Intrinsic::aarch64_neon_sqadd
8227 : Intrinsic::aarch64_neon_sqsub;
8228 return EmitNeonCall(CGM.getIntrinsic(AccumInt, Int32Ty), Ops, "vqdmlXl");
8230 case NEON::BI__builtin_neon_vqshlud_n_s64: {
8231 Ops.push_back(EmitScalarExpr(E->getArg(1)));
8232 Ops[1] = Builder.CreateZExt(Ops[1], Int64Ty);
8233 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqshlu, Int64Ty),
8236 case NEON::BI__builtin_neon_vqshld_n_u64:
8237 case NEON::BI__builtin_neon_vqshld_n_s64: {
8238 unsigned Int = BuiltinID == NEON::BI__builtin_neon_vqshld_n_u64
8239 ? Intrinsic::aarch64_neon_uqshl
8240 : Intrinsic::aarch64_neon_sqshl;
8241 Ops.push_back(EmitScalarExpr(E->getArg(1)));
8242 Ops[1] = Builder.CreateZExt(Ops[1], Int64Ty);
8243 return EmitNeonCall(CGM.getIntrinsic(Int, Int64Ty), Ops, "vqshl_n");
8245 case NEON::BI__builtin_neon_vrshrd_n_u64:
8246 case NEON::BI__builtin_neon_vrshrd_n_s64: {
8247 unsigned Int = BuiltinID == NEON::BI__builtin_neon_vrshrd_n_u64
8248 ? Intrinsic::aarch64_neon_urshl
8249 : Intrinsic::aarch64_neon_srshl;
8250 Ops.push_back(EmitScalarExpr(E->getArg(1)));
8251 int SV = cast<ConstantInt>(Ops[1])->getSExtValue();
8252 Ops[1] = ConstantInt::get(Int64Ty, -SV);
8253 return EmitNeonCall(CGM.getIntrinsic(Int, Int64Ty), Ops, "vrshr_n");
8255 case NEON::BI__builtin_neon_vrsrad_n_u64:
8256 case NEON::BI__builtin_neon_vrsrad_n_s64: {
8257 unsigned Int = BuiltinID == NEON::BI__builtin_neon_vrsrad_n_u64
8258 ? Intrinsic::aarch64_neon_urshl
8259 : Intrinsic::aarch64_neon_srshl;
8260 Ops[1] = Builder.CreateBitCast(Ops[1], Int64Ty);
8261 Ops.push_back(Builder.CreateNeg(EmitScalarExpr(E->getArg(2))));
8262 Ops[1] = Builder.CreateCall(CGM.getIntrinsic(Int, Int64Ty),
8263 {Ops[1], Builder.CreateSExt(Ops[2], Int64Ty)});
8264 return Builder.CreateAdd(Ops[0], Builder.CreateBitCast(Ops[1], Int64Ty));
8266 case NEON::BI__builtin_neon_vshld_n_s64:
8267 case NEON::BI__builtin_neon_vshld_n_u64: {
8268 llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(1)));
8269 return Builder.CreateShl(
8270 Ops[0], ConstantInt::get(Int64Ty, Amt->getZExtValue()), "shld_n");
8272 case NEON::BI__builtin_neon_vshrd_n_s64: {
8273 llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(1)));
8274 return Builder.CreateAShr(
8275 Ops[0], ConstantInt::get(Int64Ty, std::min(static_cast<uint64_t>(63),
8276 Amt->getZExtValue())),
8279 case NEON::BI__builtin_neon_vshrd_n_u64: {
8280 llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(1)));
8281 uint64_t ShiftAmt = Amt->getZExtValue();
8282 // Right-shifting an unsigned value by its size yields 0.
8284 return ConstantInt::get(Int64Ty, 0);
8285 return Builder.CreateLShr(Ops[0], ConstantInt::get(Int64Ty, ShiftAmt),
8288 case NEON::BI__builtin_neon_vsrad_n_s64: {
8289 llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(2)));
8290 Ops[1] = Builder.CreateAShr(
8291 Ops[1], ConstantInt::get(Int64Ty, std::min(static_cast<uint64_t>(63),
8292 Amt->getZExtValue())),
8294 return Builder.CreateAdd(Ops[0], Ops[1]);
8296 case NEON::BI__builtin_neon_vsrad_n_u64: {
8297 llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(2)));
8298 uint64_t ShiftAmt = Amt->getZExtValue();
8299 // Right-shifting an unsigned value by its size yields 0.
8300 // As Op + 0 = Op, return Ops[0] directly.
8303 Ops[1] = Builder.CreateLShr(Ops[1], ConstantInt::get(Int64Ty, ShiftAmt),
8305 return Builder.CreateAdd(Ops[0], Ops[1]);
8307 case NEON::BI__builtin_neon_vqdmlalh_lane_s16:
8308 case NEON::BI__builtin_neon_vqdmlalh_laneq_s16:
8309 case NEON::BI__builtin_neon_vqdmlslh_lane_s16:
8310 case NEON::BI__builtin_neon_vqdmlslh_laneq_s16: {
8311 Ops[2] = Builder.CreateExtractElement(Ops[2], EmitScalarExpr(E->getArg(3)),
8313 SmallVector<Value *, 2> ProductOps;
8314 ProductOps.push_back(vectorWrapScalar16(Ops[1]));
8315 ProductOps.push_back(vectorWrapScalar16(Ops[2]));
8316 llvm::Type *VTy = llvm::VectorType::get(Int32Ty, 4);
8317 Ops[1] = EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqdmull, VTy),
8318 ProductOps, "vqdmlXl");
8319 Constant *CI = ConstantInt::get(SizeTy, 0);
8320 Ops[1] = Builder.CreateExtractElement(Ops[1], CI, "lane0");
8323 unsigned AccInt = (BuiltinID == NEON::BI__builtin_neon_vqdmlalh_lane_s16 ||
8324 BuiltinID == NEON::BI__builtin_neon_vqdmlalh_laneq_s16)
8325 ? Intrinsic::aarch64_neon_sqadd
8326 : Intrinsic::aarch64_neon_sqsub;
8327 return EmitNeonCall(CGM.getIntrinsic(AccInt, Int32Ty), Ops, "vqdmlXl");
8329 case NEON::BI__builtin_neon_vqdmlals_s32:
8330 case NEON::BI__builtin_neon_vqdmlsls_s32: {
8331 SmallVector<Value *, 2> ProductOps;
8332 ProductOps.push_back(Ops[1]);
8333 ProductOps.push_back(EmitScalarExpr(E->getArg(2)));
8335 EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqdmulls_scalar),
8336 ProductOps, "vqdmlXl");
8338 unsigned AccumInt = BuiltinID == NEON::BI__builtin_neon_vqdmlals_s32
8339 ? Intrinsic::aarch64_neon_sqadd
8340 : Intrinsic::aarch64_neon_sqsub;
8341 return EmitNeonCall(CGM.getIntrinsic(AccumInt, Int64Ty), Ops, "vqdmlXl");
8343 case NEON::BI__builtin_neon_vqdmlals_lane_s32:
8344 case NEON::BI__builtin_neon_vqdmlals_laneq_s32:
8345 case NEON::BI__builtin_neon_vqdmlsls_lane_s32:
8346 case NEON::BI__builtin_neon_vqdmlsls_laneq_s32: {
8347 Ops[2] = Builder.CreateExtractElement(Ops[2], EmitScalarExpr(E->getArg(3)),
8349 SmallVector<Value *, 2> ProductOps;
8350 ProductOps.push_back(Ops[1]);
8351 ProductOps.push_back(Ops[2]);
8353 EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqdmulls_scalar),
8354 ProductOps, "vqdmlXl");
8357 unsigned AccInt = (BuiltinID == NEON::BI__builtin_neon_vqdmlals_lane_s32 ||
8358 BuiltinID == NEON::BI__builtin_neon_vqdmlals_laneq_s32)
8359 ? Intrinsic::aarch64_neon_sqadd
8360 : Intrinsic::aarch64_neon_sqsub;
8361 return EmitNeonCall(CGM.getIntrinsic(AccInt, Int64Ty), Ops, "vqdmlXl");
8363 case NEON::BI__builtin_neon_vduph_lane_f16: {
8364 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
8367 case NEON::BI__builtin_neon_vduph_laneq_f16: {
8368 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
8371 case AArch64::BI_BitScanForward:
8372 case AArch64::BI_BitScanForward64:
8373 return EmitMSVCBuiltinExpr(MSVCIntrin::_BitScanForward, E);
8374 case AArch64::BI_BitScanReverse:
8375 case AArch64::BI_BitScanReverse64:
8376 return EmitMSVCBuiltinExpr(MSVCIntrin::_BitScanReverse, E);
8377 case AArch64::BI_InterlockedAnd64:
8378 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedAnd, E);
8379 case AArch64::BI_InterlockedExchange64:
8380 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange, E);
8381 case AArch64::BI_InterlockedExchangeAdd64:
8382 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeAdd, E);
8383 case AArch64::BI_InterlockedExchangeSub64:
8384 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeSub, E);
8385 case AArch64::BI_InterlockedOr64:
8386 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr, E);
8387 case AArch64::BI_InterlockedXor64:
8388 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor, E);
8389 case AArch64::BI_InterlockedDecrement64:
8390 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedDecrement, E);
8391 case AArch64::BI_InterlockedIncrement64:
8392 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement, E);
8393 case AArch64::BI_InterlockedExchangeAdd8_acq:
8394 case AArch64::BI_InterlockedExchangeAdd16_acq:
8395 case AArch64::BI_InterlockedExchangeAdd_acq:
8396 case AArch64::BI_InterlockedExchangeAdd64_acq:
8397 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeAdd_acq, E);
8398 case AArch64::BI_InterlockedExchangeAdd8_rel:
8399 case AArch64::BI_InterlockedExchangeAdd16_rel:
8400 case AArch64::BI_InterlockedExchangeAdd_rel:
8401 case AArch64::BI_InterlockedExchangeAdd64_rel:
8402 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeAdd_rel, E);
8403 case AArch64::BI_InterlockedExchangeAdd8_nf:
8404 case AArch64::BI_InterlockedExchangeAdd16_nf:
8405 case AArch64::BI_InterlockedExchangeAdd_nf:
8406 case AArch64::BI_InterlockedExchangeAdd64_nf:
8407 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeAdd_nf, E);
8408 case AArch64::BI_InterlockedExchange8_acq:
8409 case AArch64::BI_InterlockedExchange16_acq:
8410 case AArch64::BI_InterlockedExchange_acq:
8411 case AArch64::BI_InterlockedExchange64_acq:
8412 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange_acq, E);
8413 case AArch64::BI_InterlockedExchange8_rel:
8414 case AArch64::BI_InterlockedExchange16_rel:
8415 case AArch64::BI_InterlockedExchange_rel:
8416 case AArch64::BI_InterlockedExchange64_rel:
8417 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange_rel, E);
8418 case AArch64::BI_InterlockedExchange8_nf:
8419 case AArch64::BI_InterlockedExchange16_nf:
8420 case AArch64::BI_InterlockedExchange_nf:
8421 case AArch64::BI_InterlockedExchange64_nf:
8422 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange_nf, E);
8423 case AArch64::BI_InterlockedCompareExchange8_acq:
8424 case AArch64::BI_InterlockedCompareExchange16_acq:
8425 case AArch64::BI_InterlockedCompareExchange_acq:
8426 case AArch64::BI_InterlockedCompareExchange64_acq:
8427 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedCompareExchange_acq, E);
8428 case AArch64::BI_InterlockedCompareExchange8_rel:
8429 case AArch64::BI_InterlockedCompareExchange16_rel:
8430 case AArch64::BI_InterlockedCompareExchange_rel:
8431 case AArch64::BI_InterlockedCompareExchange64_rel:
8432 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedCompareExchange_rel, E);
8433 case AArch64::BI_InterlockedCompareExchange8_nf:
8434 case AArch64::BI_InterlockedCompareExchange16_nf:
8435 case AArch64::BI_InterlockedCompareExchange_nf:
8436 case AArch64::BI_InterlockedCompareExchange64_nf:
8437 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedCompareExchange_nf, E);
8438 case AArch64::BI_InterlockedOr8_acq:
8439 case AArch64::BI_InterlockedOr16_acq:
8440 case AArch64::BI_InterlockedOr_acq:
8441 case AArch64::BI_InterlockedOr64_acq:
8442 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr_acq, E);
8443 case AArch64::BI_InterlockedOr8_rel:
8444 case AArch64::BI_InterlockedOr16_rel:
8445 case AArch64::BI_InterlockedOr_rel:
8446 case AArch64::BI_InterlockedOr64_rel:
8447 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr_rel, E);
8448 case AArch64::BI_InterlockedOr8_nf:
8449 case AArch64::BI_InterlockedOr16_nf:
8450 case AArch64::BI_InterlockedOr_nf:
8451 case AArch64::BI_InterlockedOr64_nf:
8452 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr_nf, E);
8453 case AArch64::BI_InterlockedXor8_acq:
8454 case AArch64::BI_InterlockedXor16_acq:
8455 case AArch64::BI_InterlockedXor_acq:
8456 case AArch64::BI_InterlockedXor64_acq:
8457 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor_acq, E);
8458 case AArch64::BI_InterlockedXor8_rel:
8459 case AArch64::BI_InterlockedXor16_rel:
8460 case AArch64::BI_InterlockedXor_rel:
8461 case AArch64::BI_InterlockedXor64_rel:
8462 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor_rel, E);
8463 case AArch64::BI_InterlockedXor8_nf:
8464 case AArch64::BI_InterlockedXor16_nf:
8465 case AArch64::BI_InterlockedXor_nf:
8466 case AArch64::BI_InterlockedXor64_nf:
8467 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor_nf, E);
8468 case AArch64::BI_InterlockedAnd8_acq:
8469 case AArch64::BI_InterlockedAnd16_acq:
8470 case AArch64::BI_InterlockedAnd_acq:
8471 case AArch64::BI_InterlockedAnd64_acq:
8472 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedAnd_acq, E);
8473 case AArch64::BI_InterlockedAnd8_rel:
8474 case AArch64::BI_InterlockedAnd16_rel:
8475 case AArch64::BI_InterlockedAnd_rel:
8476 case AArch64::BI_InterlockedAnd64_rel:
8477 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedAnd_rel, E);
8478 case AArch64::BI_InterlockedAnd8_nf:
8479 case AArch64::BI_InterlockedAnd16_nf:
8480 case AArch64::BI_InterlockedAnd_nf:
8481 case AArch64::BI_InterlockedAnd64_nf:
8482 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedAnd_nf, E);
8483 case AArch64::BI_InterlockedIncrement16_acq:
8484 case AArch64::BI_InterlockedIncrement_acq:
8485 case AArch64::BI_InterlockedIncrement64_acq:
8486 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement_acq, E);
8487 case AArch64::BI_InterlockedIncrement16_rel:
8488 case AArch64::BI_InterlockedIncrement_rel:
8489 case AArch64::BI_InterlockedIncrement64_rel:
8490 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement_rel, E);
8491 case AArch64::BI_InterlockedIncrement16_nf:
8492 case AArch64::BI_InterlockedIncrement_nf:
8493 case AArch64::BI_InterlockedIncrement64_nf:
8494 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement_nf, E);
8495 case AArch64::BI_InterlockedDecrement16_acq:
8496 case AArch64::BI_InterlockedDecrement_acq:
8497 case AArch64::BI_InterlockedDecrement64_acq:
8498 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedDecrement_acq, E);
8499 case AArch64::BI_InterlockedDecrement16_rel:
8500 case AArch64::BI_InterlockedDecrement_rel:
8501 case AArch64::BI_InterlockedDecrement64_rel:
8502 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedDecrement_rel, E);
8503 case AArch64::BI_InterlockedDecrement16_nf:
8504 case AArch64::BI_InterlockedDecrement_nf:
8505 case AArch64::BI_InterlockedDecrement64_nf:
8506 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedDecrement_nf, E);
8508 case AArch64::BI_InterlockedAdd: {
8509 Value *Arg0 = EmitScalarExpr(E->getArg(0));
8510 Value *Arg1 = EmitScalarExpr(E->getArg(1));
8511 AtomicRMWInst *RMWI = Builder.CreateAtomicRMW(
8512 AtomicRMWInst::Add, Arg0, Arg1,
8513 llvm::AtomicOrdering::SequentiallyConsistent);
8514 return Builder.CreateAdd(RMWI, Arg1);
8518 llvm::VectorType *VTy = GetNeonType(this, Type);
8519 llvm::Type *Ty = VTy;
8523 // Not all intrinsics handled by the common case work for AArch64 yet, so only
8524 // defer to common code if it's been added to our special map.
8525 Builtin = findNeonIntrinsicInMap(AArch64SIMDIntrinsicMap, BuiltinID,
8526 AArch64SIMDIntrinsicsProvenSorted);
8529 return EmitCommonNeonBuiltinExpr(
8530 Builtin->BuiltinID, Builtin->LLVMIntrinsic, Builtin->AltLLVMIntrinsic,
8531 Builtin->NameHint, Builtin->TypeModifier, E, Ops,
8532 /*never use addresses*/ Address::invalid(), Address::invalid(), Arch);
8534 if (Value *V = EmitAArch64TblBuiltinExpr(*this, BuiltinID, E, Ops, Arch))
8538 switch (BuiltinID) {
8539 default: return nullptr;
8540 case NEON::BI__builtin_neon_vbsl_v:
8541 case NEON::BI__builtin_neon_vbslq_v: {
8542 llvm::Type *BitTy = llvm::VectorType::getInteger(VTy);
8543 Ops[0] = Builder.CreateBitCast(Ops[0], BitTy, "vbsl");
8544 Ops[1] = Builder.CreateBitCast(Ops[1], BitTy, "vbsl");
8545 Ops[2] = Builder.CreateBitCast(Ops[2], BitTy, "vbsl");
8547 Ops[1] = Builder.CreateAnd(Ops[0], Ops[1], "vbsl");
8548 Ops[2] = Builder.CreateAnd(Builder.CreateNot(Ops[0]), Ops[2], "vbsl");
8549 Ops[0] = Builder.CreateOr(Ops[1], Ops[2], "vbsl");
8550 return Builder.CreateBitCast(Ops[0], Ty);
8552 case NEON::BI__builtin_neon_vfma_lane_v:
8553 case NEON::BI__builtin_neon_vfmaq_lane_v: { // Only used for FP types
8554 // The ARM builtins (and instructions) have the addend as the first
8555 // operand, but the 'fma' intrinsics have it last. Swap it around here.
8556 Value *Addend = Ops[0];
8557 Value *Multiplicand = Ops[1];
8558 Value *LaneSource = Ops[2];
8559 Ops[0] = Multiplicand;
8560 Ops[1] = LaneSource;
8563 // Now adjust things to handle the lane access.
8564 llvm::Type *SourceTy = BuiltinID == NEON::BI__builtin_neon_vfmaq_lane_v ?
8565 llvm::VectorType::get(VTy->getElementType(), VTy->getNumElements() / 2) :
8567 llvm::Constant *cst = cast<Constant>(Ops[3]);
8568 Value *SV = llvm::ConstantVector::getSplat(VTy->getNumElements(), cst);
8569 Ops[1] = Builder.CreateBitCast(Ops[1], SourceTy);
8570 Ops[1] = Builder.CreateShuffleVector(Ops[1], Ops[1], SV, "lane");
8573 Int = Intrinsic::fma;
8574 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "fmla");
8576 case NEON::BI__builtin_neon_vfma_laneq_v: {
8577 llvm::VectorType *VTy = cast<llvm::VectorType>(Ty);
8578 // v1f64 fma should be mapped to Neon scalar f64 fma
8579 if (VTy && VTy->getElementType() == DoubleTy) {
8580 Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy);
8581 Ops[1] = Builder.CreateBitCast(Ops[1], DoubleTy);
8582 llvm::Type *VTy = GetNeonType(this,
8583 NeonTypeFlags(NeonTypeFlags::Float64, false, true));
8584 Ops[2] = Builder.CreateBitCast(Ops[2], VTy);
8585 Ops[2] = Builder.CreateExtractElement(Ops[2], Ops[3], "extract");
8586 Function *F = CGM.getIntrinsic(Intrinsic::fma, DoubleTy);
8587 Value *Result = Builder.CreateCall(F, {Ops[1], Ops[2], Ops[0]});
8588 return Builder.CreateBitCast(Result, Ty);
8590 Function *F = CGM.getIntrinsic(Intrinsic::fma, Ty);
8591 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
8592 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
8594 llvm::Type *STy = llvm::VectorType::get(VTy->getElementType(),
8595 VTy->getNumElements() * 2);
8596 Ops[2] = Builder.CreateBitCast(Ops[2], STy);
8597 Value* SV = llvm::ConstantVector::getSplat(VTy->getNumElements(),
8598 cast<ConstantInt>(Ops[3]));
8599 Ops[2] = Builder.CreateShuffleVector(Ops[2], Ops[2], SV, "lane");
8601 return Builder.CreateCall(F, {Ops[2], Ops[1], Ops[0]});
8603 case NEON::BI__builtin_neon_vfmaq_laneq_v: {
8604 Function *F = CGM.getIntrinsic(Intrinsic::fma, Ty);
8605 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
8606 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
8608 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
8609 Ops[2] = EmitNeonSplat(Ops[2], cast<ConstantInt>(Ops[3]));
8610 return Builder.CreateCall(F, {Ops[2], Ops[1], Ops[0]});
8612 case NEON::BI__builtin_neon_vfmah_lane_f16:
8613 case NEON::BI__builtin_neon_vfmas_lane_f32:
8614 case NEON::BI__builtin_neon_vfmah_laneq_f16:
8615 case NEON::BI__builtin_neon_vfmas_laneq_f32:
8616 case NEON::BI__builtin_neon_vfmad_lane_f64:
8617 case NEON::BI__builtin_neon_vfmad_laneq_f64: {
8618 Ops.push_back(EmitScalarExpr(E->getArg(3)));
8619 llvm::Type *Ty = ConvertType(E->getCallReturnType(getContext()));
8620 Function *F = CGM.getIntrinsic(Intrinsic::fma, Ty);
8621 Ops[2] = Builder.CreateExtractElement(Ops[2], Ops[3], "extract");
8622 return Builder.CreateCall(F, {Ops[1], Ops[2], Ops[0]});
8624 case NEON::BI__builtin_neon_vmull_v:
8625 // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
8626 Int = usgn ? Intrinsic::aarch64_neon_umull : Intrinsic::aarch64_neon_smull;
8627 if (Type.isPoly()) Int = Intrinsic::aarch64_neon_pmull;
8628 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmull");
8629 case NEON::BI__builtin_neon_vmax_v:
8630 case NEON::BI__builtin_neon_vmaxq_v:
8631 // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
8632 Int = usgn ? Intrinsic::aarch64_neon_umax : Intrinsic::aarch64_neon_smax;
8633 if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fmax;
8634 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmax");
8635 case NEON::BI__builtin_neon_vmaxh_f16: {
8636 Ops.push_back(EmitScalarExpr(E->getArg(1)));
8637 Int = Intrinsic::aarch64_neon_fmax;
8638 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vmax");
8640 case NEON::BI__builtin_neon_vmin_v:
8641 case NEON::BI__builtin_neon_vminq_v:
8642 // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
8643 Int = usgn ? Intrinsic::aarch64_neon_umin : Intrinsic::aarch64_neon_smin;
8644 if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fmin;
8645 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmin");
8646 case NEON::BI__builtin_neon_vminh_f16: {
8647 Ops.push_back(EmitScalarExpr(E->getArg(1)));
8648 Int = Intrinsic::aarch64_neon_fmin;
8649 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vmin");
8651 case NEON::BI__builtin_neon_vabd_v:
8652 case NEON::BI__builtin_neon_vabdq_v:
8653 // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
8654 Int = usgn ? Intrinsic::aarch64_neon_uabd : Intrinsic::aarch64_neon_sabd;
8655 if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fabd;
8656 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vabd");
8657 case NEON::BI__builtin_neon_vpadal_v:
8658 case NEON::BI__builtin_neon_vpadalq_v: {
8659 unsigned ArgElts = VTy->getNumElements();
8660 llvm::IntegerType *EltTy = cast<IntegerType>(VTy->getElementType());
8661 unsigned BitWidth = EltTy->getBitWidth();
8662 llvm::Type *ArgTy = llvm::VectorType::get(
8663 llvm::IntegerType::get(getLLVMContext(), BitWidth/2), 2*ArgElts);
8664 llvm::Type* Tys[2] = { VTy, ArgTy };
8665 Int = usgn ? Intrinsic::aarch64_neon_uaddlp : Intrinsic::aarch64_neon_saddlp;
8666 SmallVector<llvm::Value*, 1> TmpOps;
8667 TmpOps.push_back(Ops[1]);
8668 Function *F = CGM.getIntrinsic(Int, Tys);
8669 llvm::Value *tmp = EmitNeonCall(F, TmpOps, "vpadal");
8670 llvm::Value *addend = Builder.CreateBitCast(Ops[0], tmp->getType());
8671 return Builder.CreateAdd(tmp, addend);
8673 case NEON::BI__builtin_neon_vpmin_v:
8674 case NEON::BI__builtin_neon_vpminq_v:
8675 // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
8676 Int = usgn ? Intrinsic::aarch64_neon_uminp : Intrinsic::aarch64_neon_sminp;
8677 if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fminp;
8678 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpmin");
8679 case NEON::BI__builtin_neon_vpmax_v:
8680 case NEON::BI__builtin_neon_vpmaxq_v:
8681 // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
8682 Int = usgn ? Intrinsic::aarch64_neon_umaxp : Intrinsic::aarch64_neon_smaxp;
8683 if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fmaxp;
8684 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpmax");
8685 case NEON::BI__builtin_neon_vminnm_v:
8686 case NEON::BI__builtin_neon_vminnmq_v:
8687 Int = Intrinsic::aarch64_neon_fminnm;
8688 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vminnm");
8689 case NEON::BI__builtin_neon_vminnmh_f16:
8690 Ops.push_back(EmitScalarExpr(E->getArg(1)));
8691 Int = Intrinsic::aarch64_neon_fminnm;
8692 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vminnm");
8693 case NEON::BI__builtin_neon_vmaxnm_v:
8694 case NEON::BI__builtin_neon_vmaxnmq_v:
8695 Int = Intrinsic::aarch64_neon_fmaxnm;
8696 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmaxnm");
8697 case NEON::BI__builtin_neon_vmaxnmh_f16:
8698 Ops.push_back(EmitScalarExpr(E->getArg(1)));
8699 Int = Intrinsic::aarch64_neon_fmaxnm;
8700 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vmaxnm");
8701 case NEON::BI__builtin_neon_vrecpss_f32: {
8702 Ops.push_back(EmitScalarExpr(E->getArg(1)));
8703 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_frecps, FloatTy),
8706 case NEON::BI__builtin_neon_vrecpsd_f64:
8707 Ops.push_back(EmitScalarExpr(E->getArg(1)));
8708 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_frecps, DoubleTy),
8710 case NEON::BI__builtin_neon_vrecpsh_f16:
8711 Ops.push_back(EmitScalarExpr(E->getArg(1)));
8712 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_frecps, HalfTy),
8714 case NEON::BI__builtin_neon_vqshrun_n_v:
8715 Int = Intrinsic::aarch64_neon_sqshrun;
8716 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshrun_n");
8717 case NEON::BI__builtin_neon_vqrshrun_n_v:
8718 Int = Intrinsic::aarch64_neon_sqrshrun;
8719 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshrun_n");
8720 case NEON::BI__builtin_neon_vqshrn_n_v:
8721 Int = usgn ? Intrinsic::aarch64_neon_uqshrn : Intrinsic::aarch64_neon_sqshrn;
8722 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshrn_n");
8723 case NEON::BI__builtin_neon_vrshrn_n_v:
8724 Int = Intrinsic::aarch64_neon_rshrn;
8725 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrshrn_n");
8726 case NEON::BI__builtin_neon_vqrshrn_n_v:
8727 Int = usgn ? Intrinsic::aarch64_neon_uqrshrn : Intrinsic::aarch64_neon_sqrshrn;
8728 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshrn_n");
8729 case NEON::BI__builtin_neon_vrndah_f16: {
8730 Ops.push_back(EmitScalarExpr(E->getArg(0)));
8731 Int = Intrinsic::round;
8732 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrnda");
8734 case NEON::BI__builtin_neon_vrnda_v:
8735 case NEON::BI__builtin_neon_vrndaq_v: {
8736 Int = Intrinsic::round;
8737 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrnda");
8739 case NEON::BI__builtin_neon_vrndih_f16: {
8740 Ops.push_back(EmitScalarExpr(E->getArg(0)));
8741 Int = Intrinsic::nearbyint;
8742 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndi");
8744 case NEON::BI__builtin_neon_vrndmh_f16: {
8745 Ops.push_back(EmitScalarExpr(E->getArg(0)));
8746 Int = Intrinsic::floor;
8747 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndm");
8749 case NEON::BI__builtin_neon_vrndm_v:
8750 case NEON::BI__builtin_neon_vrndmq_v: {
8751 Int = Intrinsic::floor;
8752 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndm");
8754 case NEON::BI__builtin_neon_vrndnh_f16: {
8755 Ops.push_back(EmitScalarExpr(E->getArg(0)));
8756 Int = Intrinsic::aarch64_neon_frintn;
8757 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndn");
8759 case NEON::BI__builtin_neon_vrndn_v:
8760 case NEON::BI__builtin_neon_vrndnq_v: {
8761 Int = Intrinsic::aarch64_neon_frintn;
8762 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndn");
8764 case NEON::BI__builtin_neon_vrndns_f32: {
8765 Ops.push_back(EmitScalarExpr(E->getArg(0)));
8766 Int = Intrinsic::aarch64_neon_frintn;
8767 return EmitNeonCall(CGM.getIntrinsic(Int, FloatTy), Ops, "vrndn");
8769 case NEON::BI__builtin_neon_vrndph_f16: {
8770 Ops.push_back(EmitScalarExpr(E->getArg(0)));
8771 Int = Intrinsic::ceil;
8772 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndp");
8774 case NEON::BI__builtin_neon_vrndp_v:
8775 case NEON::BI__builtin_neon_vrndpq_v: {
8776 Int = Intrinsic::ceil;
8777 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndp");
8779 case NEON::BI__builtin_neon_vrndxh_f16: {
8780 Ops.push_back(EmitScalarExpr(E->getArg(0)));
8781 Int = Intrinsic::rint;
8782 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndx");
8784 case NEON::BI__builtin_neon_vrndx_v:
8785 case NEON::BI__builtin_neon_vrndxq_v: {
8786 Int = Intrinsic::rint;
8787 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndx");
8789 case NEON::BI__builtin_neon_vrndh_f16: {
8790 Ops.push_back(EmitScalarExpr(E->getArg(0)));
8791 Int = Intrinsic::trunc;
8792 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndz");
8794 case NEON::BI__builtin_neon_vrnd_v:
8795 case NEON::BI__builtin_neon_vrndq_v: {
8796 Int = Intrinsic::trunc;
8797 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndz");
8799 case NEON::BI__builtin_neon_vcvt_f64_v:
8800 case NEON::BI__builtin_neon_vcvtq_f64_v:
8801 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
8802 Ty = GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float64, false, quad));
8803 return usgn ? Builder.CreateUIToFP(Ops[0], Ty, "vcvt")
8804 : Builder.CreateSIToFP(Ops[0], Ty, "vcvt");
8805 case NEON::BI__builtin_neon_vcvt_f64_f32: {
8806 assert(Type.getEltType() == NeonTypeFlags::Float64 && quad &&
8807 "unexpected vcvt_f64_f32 builtin");
8808 NeonTypeFlags SrcFlag = NeonTypeFlags(NeonTypeFlags::Float32, false, false);
8809 Ops[0] = Builder.CreateBitCast(Ops[0], GetNeonType(this, SrcFlag));
8811 return Builder.CreateFPExt(Ops[0], Ty, "vcvt");
8813 case NEON::BI__builtin_neon_vcvt_f32_f64: {
8814 assert(Type.getEltType() == NeonTypeFlags::Float32 &&
8815 "unexpected vcvt_f32_f64 builtin");
8816 NeonTypeFlags SrcFlag = NeonTypeFlags(NeonTypeFlags::Float64, false, true);
8817 Ops[0] = Builder.CreateBitCast(Ops[0], GetNeonType(this, SrcFlag));
8819 return Builder.CreateFPTrunc(Ops[0], Ty, "vcvt");
8821 case NEON::BI__builtin_neon_vcvt_s32_v:
8822 case NEON::BI__builtin_neon_vcvt_u32_v:
8823 case NEON::BI__builtin_neon_vcvt_s64_v:
8824 case NEON::BI__builtin_neon_vcvt_u64_v:
8825 case NEON::BI__builtin_neon_vcvt_s16_v:
8826 case NEON::BI__builtin_neon_vcvt_u16_v:
8827 case NEON::BI__builtin_neon_vcvtq_s32_v:
8828 case NEON::BI__builtin_neon_vcvtq_u32_v:
8829 case NEON::BI__builtin_neon_vcvtq_s64_v:
8830 case NEON::BI__builtin_neon_vcvtq_u64_v:
8831 case NEON::BI__builtin_neon_vcvtq_s16_v:
8832 case NEON::BI__builtin_neon_vcvtq_u16_v: {
8833 Ops[0] = Builder.CreateBitCast(Ops[0], GetFloatNeonType(this, Type));
8835 return Builder.CreateFPToUI(Ops[0], Ty);
8836 return Builder.CreateFPToSI(Ops[0], Ty);
8838 case NEON::BI__builtin_neon_vcvta_s16_v:
8839 case NEON::BI__builtin_neon_vcvta_u16_v:
8840 case NEON::BI__builtin_neon_vcvta_s32_v:
8841 case NEON::BI__builtin_neon_vcvtaq_s16_v:
8842 case NEON::BI__builtin_neon_vcvtaq_s32_v:
8843 case NEON::BI__builtin_neon_vcvta_u32_v:
8844 case NEON::BI__builtin_neon_vcvtaq_u16_v:
8845 case NEON::BI__builtin_neon_vcvtaq_u32_v:
8846 case NEON::BI__builtin_neon_vcvta_s64_v:
8847 case NEON::BI__builtin_neon_vcvtaq_s64_v:
8848 case NEON::BI__builtin_neon_vcvta_u64_v:
8849 case NEON::BI__builtin_neon_vcvtaq_u64_v: {
8850 Int = usgn ? Intrinsic::aarch64_neon_fcvtau : Intrinsic::aarch64_neon_fcvtas;
8851 llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) };
8852 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvta");
8854 case NEON::BI__builtin_neon_vcvtm_s16_v:
8855 case NEON::BI__builtin_neon_vcvtm_s32_v:
8856 case NEON::BI__builtin_neon_vcvtmq_s16_v:
8857 case NEON::BI__builtin_neon_vcvtmq_s32_v:
8858 case NEON::BI__builtin_neon_vcvtm_u16_v:
8859 case NEON::BI__builtin_neon_vcvtm_u32_v:
8860 case NEON::BI__builtin_neon_vcvtmq_u16_v:
8861 case NEON::BI__builtin_neon_vcvtmq_u32_v:
8862 case NEON::BI__builtin_neon_vcvtm_s64_v:
8863 case NEON::BI__builtin_neon_vcvtmq_s64_v:
8864 case NEON::BI__builtin_neon_vcvtm_u64_v:
8865 case NEON::BI__builtin_neon_vcvtmq_u64_v: {
8866 Int = usgn ? Intrinsic::aarch64_neon_fcvtmu : Intrinsic::aarch64_neon_fcvtms;
8867 llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) };
8868 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtm");
8870 case NEON::BI__builtin_neon_vcvtn_s16_v:
8871 case NEON::BI__builtin_neon_vcvtn_s32_v:
8872 case NEON::BI__builtin_neon_vcvtnq_s16_v:
8873 case NEON::BI__builtin_neon_vcvtnq_s32_v:
8874 case NEON::BI__builtin_neon_vcvtn_u16_v:
8875 case NEON::BI__builtin_neon_vcvtn_u32_v:
8876 case NEON::BI__builtin_neon_vcvtnq_u16_v:
8877 case NEON::BI__builtin_neon_vcvtnq_u32_v:
8878 case NEON::BI__builtin_neon_vcvtn_s64_v:
8879 case NEON::BI__builtin_neon_vcvtnq_s64_v:
8880 case NEON::BI__builtin_neon_vcvtn_u64_v:
8881 case NEON::BI__builtin_neon_vcvtnq_u64_v: {
8882 Int = usgn ? Intrinsic::aarch64_neon_fcvtnu : Intrinsic::aarch64_neon_fcvtns;
8883 llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) };
8884 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtn");
8886 case NEON::BI__builtin_neon_vcvtp_s16_v:
8887 case NEON::BI__builtin_neon_vcvtp_s32_v:
8888 case NEON::BI__builtin_neon_vcvtpq_s16_v:
8889 case NEON::BI__builtin_neon_vcvtpq_s32_v:
8890 case NEON::BI__builtin_neon_vcvtp_u16_v:
8891 case NEON::BI__builtin_neon_vcvtp_u32_v:
8892 case NEON::BI__builtin_neon_vcvtpq_u16_v:
8893 case NEON::BI__builtin_neon_vcvtpq_u32_v:
8894 case NEON::BI__builtin_neon_vcvtp_s64_v:
8895 case NEON::BI__builtin_neon_vcvtpq_s64_v:
8896 case NEON::BI__builtin_neon_vcvtp_u64_v:
8897 case NEON::BI__builtin_neon_vcvtpq_u64_v: {
8898 Int = usgn ? Intrinsic::aarch64_neon_fcvtpu : Intrinsic::aarch64_neon_fcvtps;
8899 llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) };
8900 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtp");
8902 case NEON::BI__builtin_neon_vmulx_v:
8903 case NEON::BI__builtin_neon_vmulxq_v: {
8904 Int = Intrinsic::aarch64_neon_fmulx;
8905 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmulx");
8907 case NEON::BI__builtin_neon_vmulxh_lane_f16:
8908 case NEON::BI__builtin_neon_vmulxh_laneq_f16: {
8909 // vmulx_lane should be mapped to Neon scalar mulx after
8910 // extracting the scalar element
8911 Ops.push_back(EmitScalarExpr(E->getArg(2)));
8912 Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2], "extract");
8914 Int = Intrinsic::aarch64_neon_fmulx;
8915 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vmulx");
8917 case NEON::BI__builtin_neon_vmul_lane_v:
8918 case NEON::BI__builtin_neon_vmul_laneq_v: {
8919 // v1f64 vmul_lane should be mapped to Neon scalar mul lane
8921 if (BuiltinID == NEON::BI__builtin_neon_vmul_laneq_v)
8923 Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy);
8924 llvm::Type *VTy = GetNeonType(this,
8925 NeonTypeFlags(NeonTypeFlags::Float64, false, Quad));
8926 Ops[1] = Builder.CreateBitCast(Ops[1], VTy);
8927 Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2], "extract");
8928 Value *Result = Builder.CreateFMul(Ops[0], Ops[1]);
8929 return Builder.CreateBitCast(Result, Ty);
8931 case NEON::BI__builtin_neon_vnegd_s64:
8932 return Builder.CreateNeg(EmitScalarExpr(E->getArg(0)), "vnegd");
8933 case NEON::BI__builtin_neon_vnegh_f16:
8934 return Builder.CreateFNeg(EmitScalarExpr(E->getArg(0)), "vnegh");
8935 case NEON::BI__builtin_neon_vpmaxnm_v:
8936 case NEON::BI__builtin_neon_vpmaxnmq_v: {
8937 Int = Intrinsic::aarch64_neon_fmaxnmp;
8938 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpmaxnm");
8940 case NEON::BI__builtin_neon_vpminnm_v:
8941 case NEON::BI__builtin_neon_vpminnmq_v: {
8942 Int = Intrinsic::aarch64_neon_fminnmp;
8943 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpminnm");
8945 case NEON::BI__builtin_neon_vsqrth_f16: {
8946 Ops.push_back(EmitScalarExpr(E->getArg(0)));
8947 Int = Intrinsic::sqrt;
8948 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vsqrt");
8950 case NEON::BI__builtin_neon_vsqrt_v:
8951 case NEON::BI__builtin_neon_vsqrtq_v: {
8952 Int = Intrinsic::sqrt;
8953 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
8954 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vsqrt");
8956 case NEON::BI__builtin_neon_vrbit_v:
8957 case NEON::BI__builtin_neon_vrbitq_v: {
8958 Int = Intrinsic::aarch64_neon_rbit;
8959 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrbit");
8961 case NEON::BI__builtin_neon_vaddv_u8:
8962 // FIXME: These are handled by the AArch64 scalar code.
8965 case NEON::BI__builtin_neon_vaddv_s8: {
8966 Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv;
8968 VTy = llvm::VectorType::get(Int8Ty, 8);
8969 llvm::Type *Tys[2] = { Ty, VTy };
8970 Ops.push_back(EmitScalarExpr(E->getArg(0)));
8971 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddv");
8972 return Builder.CreateTrunc(Ops[0], Int8Ty);
8974 case NEON::BI__builtin_neon_vaddv_u16:
8977 case NEON::BI__builtin_neon_vaddv_s16: {
8978 Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv;
8980 VTy = llvm::VectorType::get(Int16Ty, 4);
8981 llvm::Type *Tys[2] = { Ty, VTy };
8982 Ops.push_back(EmitScalarExpr(E->getArg(0)));
8983 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddv");
8984 return Builder.CreateTrunc(Ops[0], Int16Ty);
8986 case NEON::BI__builtin_neon_vaddvq_u8:
8989 case NEON::BI__builtin_neon_vaddvq_s8: {
8990 Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv;
8992 VTy = llvm::VectorType::get(Int8Ty, 16);
8993 llvm::Type *Tys[2] = { Ty, VTy };
8994 Ops.push_back(EmitScalarExpr(E->getArg(0)));
8995 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddv");
8996 return Builder.CreateTrunc(Ops[0], Int8Ty);
8998 case NEON::BI__builtin_neon_vaddvq_u16:
9001 case NEON::BI__builtin_neon_vaddvq_s16: {
9002 Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv;
9004 VTy = llvm::VectorType::get(Int16Ty, 8);
9005 llvm::Type *Tys[2] = { Ty, VTy };
9006 Ops.push_back(EmitScalarExpr(E->getArg(0)));
9007 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddv");
9008 return Builder.CreateTrunc(Ops[0], Int16Ty);
9010 case NEON::BI__builtin_neon_vmaxv_u8: {
9011 Int = Intrinsic::aarch64_neon_umaxv;
9013 VTy = llvm::VectorType::get(Int8Ty, 8);
9014 llvm::Type *Tys[2] = { Ty, VTy };
9015 Ops.push_back(EmitScalarExpr(E->getArg(0)));
9016 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
9017 return Builder.CreateTrunc(Ops[0], Int8Ty);
9019 case NEON::BI__builtin_neon_vmaxv_u16: {
9020 Int = Intrinsic::aarch64_neon_umaxv;
9022 VTy = llvm::VectorType::get(Int16Ty, 4);
9023 llvm::Type *Tys[2] = { Ty, VTy };
9024 Ops.push_back(EmitScalarExpr(E->getArg(0)));
9025 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
9026 return Builder.CreateTrunc(Ops[0], Int16Ty);
9028 case NEON::BI__builtin_neon_vmaxvq_u8: {
9029 Int = Intrinsic::aarch64_neon_umaxv;
9031 VTy = llvm::VectorType::get(Int8Ty, 16);
9032 llvm::Type *Tys[2] = { Ty, VTy };
9033 Ops.push_back(EmitScalarExpr(E->getArg(0)));
9034 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
9035 return Builder.CreateTrunc(Ops[0], Int8Ty);
9037 case NEON::BI__builtin_neon_vmaxvq_u16: {
9038 Int = Intrinsic::aarch64_neon_umaxv;
9040 VTy = llvm::VectorType::get(Int16Ty, 8);
9041 llvm::Type *Tys[2] = { Ty, VTy };
9042 Ops.push_back(EmitScalarExpr(E->getArg(0)));
9043 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
9044 return Builder.CreateTrunc(Ops[0], Int16Ty);
9046 case NEON::BI__builtin_neon_vmaxv_s8: {
9047 Int = Intrinsic::aarch64_neon_smaxv;
9049 VTy = llvm::VectorType::get(Int8Ty, 8);
9050 llvm::Type *Tys[2] = { Ty, VTy };
9051 Ops.push_back(EmitScalarExpr(E->getArg(0)));
9052 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
9053 return Builder.CreateTrunc(Ops[0], Int8Ty);
9055 case NEON::BI__builtin_neon_vmaxv_s16: {
9056 Int = Intrinsic::aarch64_neon_smaxv;
9058 VTy = llvm::VectorType::get(Int16Ty, 4);
9059 llvm::Type *Tys[2] = { Ty, VTy };
9060 Ops.push_back(EmitScalarExpr(E->getArg(0)));
9061 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
9062 return Builder.CreateTrunc(Ops[0], Int16Ty);
9064 case NEON::BI__builtin_neon_vmaxvq_s8: {
9065 Int = Intrinsic::aarch64_neon_smaxv;
9067 VTy = llvm::VectorType::get(Int8Ty, 16);
9068 llvm::Type *Tys[2] = { Ty, VTy };
9069 Ops.push_back(EmitScalarExpr(E->getArg(0)));
9070 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
9071 return Builder.CreateTrunc(Ops[0], Int8Ty);
9073 case NEON::BI__builtin_neon_vmaxvq_s16: {
9074 Int = Intrinsic::aarch64_neon_smaxv;
9076 VTy = llvm::VectorType::get(Int16Ty, 8);
9077 llvm::Type *Tys[2] = { Ty, VTy };
9078 Ops.push_back(EmitScalarExpr(E->getArg(0)));
9079 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
9080 return Builder.CreateTrunc(Ops[0], Int16Ty);
9082 case NEON::BI__builtin_neon_vmaxv_f16: {
9083 Int = Intrinsic::aarch64_neon_fmaxv;
9085 VTy = llvm::VectorType::get(HalfTy, 4);
9086 llvm::Type *Tys[2] = { Ty, VTy };
9087 Ops.push_back(EmitScalarExpr(E->getArg(0)));
9088 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
9089 return Builder.CreateTrunc(Ops[0], HalfTy);
9091 case NEON::BI__builtin_neon_vmaxvq_f16: {
9092 Int = Intrinsic::aarch64_neon_fmaxv;
9094 VTy = llvm::VectorType::get(HalfTy, 8);
9095 llvm::Type *Tys[2] = { Ty, VTy };
9096 Ops.push_back(EmitScalarExpr(E->getArg(0)));
9097 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
9098 return Builder.CreateTrunc(Ops[0], HalfTy);
9100 case NEON::BI__builtin_neon_vminv_u8: {
9101 Int = Intrinsic::aarch64_neon_uminv;
9103 VTy = llvm::VectorType::get(Int8Ty, 8);
9104 llvm::Type *Tys[2] = { Ty, VTy };
9105 Ops.push_back(EmitScalarExpr(E->getArg(0)));
9106 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
9107 return Builder.CreateTrunc(Ops[0], Int8Ty);
9109 case NEON::BI__builtin_neon_vminv_u16: {
9110 Int = Intrinsic::aarch64_neon_uminv;
9112 VTy = llvm::VectorType::get(Int16Ty, 4);
9113 llvm::Type *Tys[2] = { Ty, VTy };
9114 Ops.push_back(EmitScalarExpr(E->getArg(0)));
9115 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
9116 return Builder.CreateTrunc(Ops[0], Int16Ty);
9118 case NEON::BI__builtin_neon_vminvq_u8: {
9119 Int = Intrinsic::aarch64_neon_uminv;
9121 VTy = llvm::VectorType::get(Int8Ty, 16);
9122 llvm::Type *Tys[2] = { Ty, VTy };
9123 Ops.push_back(EmitScalarExpr(E->getArg(0)));
9124 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
9125 return Builder.CreateTrunc(Ops[0], Int8Ty);
9127 case NEON::BI__builtin_neon_vminvq_u16: {
9128 Int = Intrinsic::aarch64_neon_uminv;
9130 VTy = llvm::VectorType::get(Int16Ty, 8);
9131 llvm::Type *Tys[2] = { Ty, VTy };
9132 Ops.push_back(EmitScalarExpr(E->getArg(0)));
9133 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
9134 return Builder.CreateTrunc(Ops[0], Int16Ty);
9136 case NEON::BI__builtin_neon_vminv_s8: {
9137 Int = Intrinsic::aarch64_neon_sminv;
9139 VTy = llvm::VectorType::get(Int8Ty, 8);
9140 llvm::Type *Tys[2] = { Ty, VTy };
9141 Ops.push_back(EmitScalarExpr(E->getArg(0)));
9142 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
9143 return Builder.CreateTrunc(Ops[0], Int8Ty);
9145 case NEON::BI__builtin_neon_vminv_s16: {
9146 Int = Intrinsic::aarch64_neon_sminv;
9148 VTy = llvm::VectorType::get(Int16Ty, 4);
9149 llvm::Type *Tys[2] = { Ty, VTy };
9150 Ops.push_back(EmitScalarExpr(E->getArg(0)));
9151 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
9152 return Builder.CreateTrunc(Ops[0], Int16Ty);
9154 case NEON::BI__builtin_neon_vminvq_s8: {
9155 Int = Intrinsic::aarch64_neon_sminv;
9157 VTy = llvm::VectorType::get(Int8Ty, 16);
9158 llvm::Type *Tys[2] = { Ty, VTy };
9159 Ops.push_back(EmitScalarExpr(E->getArg(0)));
9160 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
9161 return Builder.CreateTrunc(Ops[0], Int8Ty);
9163 case NEON::BI__builtin_neon_vminvq_s16: {
9164 Int = Intrinsic::aarch64_neon_sminv;
9166 VTy = llvm::VectorType::get(Int16Ty, 8);
9167 llvm::Type *Tys[2] = { Ty, VTy };
9168 Ops.push_back(EmitScalarExpr(E->getArg(0)));
9169 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
9170 return Builder.CreateTrunc(Ops[0], Int16Ty);
9172 case NEON::BI__builtin_neon_vminv_f16: {
9173 Int = Intrinsic::aarch64_neon_fminv;
9175 VTy = llvm::VectorType::get(HalfTy, 4);
9176 llvm::Type *Tys[2] = { Ty, VTy };
9177 Ops.push_back(EmitScalarExpr(E->getArg(0)));
9178 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
9179 return Builder.CreateTrunc(Ops[0], HalfTy);
9181 case NEON::BI__builtin_neon_vminvq_f16: {
9182 Int = Intrinsic::aarch64_neon_fminv;
9184 VTy = llvm::VectorType::get(HalfTy, 8);
9185 llvm::Type *Tys[2] = { Ty, VTy };
9186 Ops.push_back(EmitScalarExpr(E->getArg(0)));
9187 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
9188 return Builder.CreateTrunc(Ops[0], HalfTy);
9190 case NEON::BI__builtin_neon_vmaxnmv_f16: {
9191 Int = Intrinsic::aarch64_neon_fmaxnmv;
9193 VTy = llvm::VectorType::get(HalfTy, 4);
9194 llvm::Type *Tys[2] = { Ty, VTy };
9195 Ops.push_back(EmitScalarExpr(E->getArg(0)));
9196 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxnmv");
9197 return Builder.CreateTrunc(Ops[0], HalfTy);
9199 case NEON::BI__builtin_neon_vmaxnmvq_f16: {
9200 Int = Intrinsic::aarch64_neon_fmaxnmv;
9202 VTy = llvm::VectorType::get(HalfTy, 8);
9203 llvm::Type *Tys[2] = { Ty, VTy };
9204 Ops.push_back(EmitScalarExpr(E->getArg(0)));
9205 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxnmv");
9206 return Builder.CreateTrunc(Ops[0], HalfTy);
9208 case NEON::BI__builtin_neon_vminnmv_f16: {
9209 Int = Intrinsic::aarch64_neon_fminnmv;
9211 VTy = llvm::VectorType::get(HalfTy, 4);
9212 llvm::Type *Tys[2] = { Ty, VTy };
9213 Ops.push_back(EmitScalarExpr(E->getArg(0)));
9214 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminnmv");
9215 return Builder.CreateTrunc(Ops[0], HalfTy);
9217 case NEON::BI__builtin_neon_vminnmvq_f16: {
9218 Int = Intrinsic::aarch64_neon_fminnmv;
9220 VTy = llvm::VectorType::get(HalfTy, 8);
9221 llvm::Type *Tys[2] = { Ty, VTy };
9222 Ops.push_back(EmitScalarExpr(E->getArg(0)));
9223 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminnmv");
9224 return Builder.CreateTrunc(Ops[0], HalfTy);
9226 case NEON::BI__builtin_neon_vmul_n_f64: {
9227 Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy);
9228 Value *RHS = Builder.CreateBitCast(EmitScalarExpr(E->getArg(1)), DoubleTy);
9229 return Builder.CreateFMul(Ops[0], RHS);
9231 case NEON::BI__builtin_neon_vaddlv_u8: {
9232 Int = Intrinsic::aarch64_neon_uaddlv;
9234 VTy = llvm::VectorType::get(Int8Ty, 8);
9235 llvm::Type *Tys[2] = { Ty, VTy };
9236 Ops.push_back(EmitScalarExpr(E->getArg(0)));
9237 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
9238 return Builder.CreateTrunc(Ops[0], Int16Ty);
9240 case NEON::BI__builtin_neon_vaddlv_u16: {
9241 Int = Intrinsic::aarch64_neon_uaddlv;
9243 VTy = llvm::VectorType::get(Int16Ty, 4);
9244 llvm::Type *Tys[2] = { Ty, VTy };
9245 Ops.push_back(EmitScalarExpr(E->getArg(0)));
9246 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
9248 case NEON::BI__builtin_neon_vaddlvq_u8: {
9249 Int = Intrinsic::aarch64_neon_uaddlv;
9251 VTy = llvm::VectorType::get(Int8Ty, 16);
9252 llvm::Type *Tys[2] = { Ty, VTy };
9253 Ops.push_back(EmitScalarExpr(E->getArg(0)));
9254 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
9255 return Builder.CreateTrunc(Ops[0], Int16Ty);
9257 case NEON::BI__builtin_neon_vaddlvq_u16: {
9258 Int = Intrinsic::aarch64_neon_uaddlv;
9260 VTy = llvm::VectorType::get(Int16Ty, 8);
9261 llvm::Type *Tys[2] = { Ty, VTy };
9262 Ops.push_back(EmitScalarExpr(E->getArg(0)));
9263 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
9265 case NEON::BI__builtin_neon_vaddlv_s8: {
9266 Int = Intrinsic::aarch64_neon_saddlv;
9268 VTy = llvm::VectorType::get(Int8Ty, 8);
9269 llvm::Type *Tys[2] = { Ty, VTy };
9270 Ops.push_back(EmitScalarExpr(E->getArg(0)));
9271 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
9272 return Builder.CreateTrunc(Ops[0], Int16Ty);
9274 case NEON::BI__builtin_neon_vaddlv_s16: {
9275 Int = Intrinsic::aarch64_neon_saddlv;
9277 VTy = llvm::VectorType::get(Int16Ty, 4);
9278 llvm::Type *Tys[2] = { Ty, VTy };
9279 Ops.push_back(EmitScalarExpr(E->getArg(0)));
9280 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
9282 case NEON::BI__builtin_neon_vaddlvq_s8: {
9283 Int = Intrinsic::aarch64_neon_saddlv;
9285 VTy = llvm::VectorType::get(Int8Ty, 16);
9286 llvm::Type *Tys[2] = { Ty, VTy };
9287 Ops.push_back(EmitScalarExpr(E->getArg(0)));
9288 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
9289 return Builder.CreateTrunc(Ops[0], Int16Ty);
9291 case NEON::BI__builtin_neon_vaddlvq_s16: {
9292 Int = Intrinsic::aarch64_neon_saddlv;
9294 VTy = llvm::VectorType::get(Int16Ty, 8);
9295 llvm::Type *Tys[2] = { Ty, VTy };
9296 Ops.push_back(EmitScalarExpr(E->getArg(0)));
9297 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
9299 case NEON::BI__builtin_neon_vsri_n_v:
9300 case NEON::BI__builtin_neon_vsriq_n_v: {
9301 Int = Intrinsic::aarch64_neon_vsri;
9302 llvm::Function *Intrin = CGM.getIntrinsic(Int, Ty);
9303 return EmitNeonCall(Intrin, Ops, "vsri_n");
9305 case NEON::BI__builtin_neon_vsli_n_v:
9306 case NEON::BI__builtin_neon_vsliq_n_v: {
9307 Int = Intrinsic::aarch64_neon_vsli;
9308 llvm::Function *Intrin = CGM.getIntrinsic(Int, Ty);
9309 return EmitNeonCall(Intrin, Ops, "vsli_n");
9311 case NEON::BI__builtin_neon_vsra_n_v:
9312 case NEON::BI__builtin_neon_vsraq_n_v:
9313 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
9314 Ops[1] = EmitNeonRShiftImm(Ops[1], Ops[2], Ty, usgn, "vsra_n");
9315 return Builder.CreateAdd(Ops[0], Ops[1]);
9316 case NEON::BI__builtin_neon_vrsra_n_v:
9317 case NEON::BI__builtin_neon_vrsraq_n_v: {
9318 Int = usgn ? Intrinsic::aarch64_neon_urshl : Intrinsic::aarch64_neon_srshl;
9319 SmallVector<llvm::Value*,2> TmpOps;
9320 TmpOps.push_back(Ops[1]);
9321 TmpOps.push_back(Ops[2]);
9322 Function* F = CGM.getIntrinsic(Int, Ty);
9323 llvm::Value *tmp = EmitNeonCall(F, TmpOps, "vrshr_n", 1, true);
9324 Ops[0] = Builder.CreateBitCast(Ops[0], VTy);
9325 return Builder.CreateAdd(Ops[0], tmp);
9327 case NEON::BI__builtin_neon_vld1_v:
9328 case NEON::BI__builtin_neon_vld1q_v: {
9329 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(VTy));
9330 auto Alignment = CharUnits::fromQuantity(
9331 BuiltinID == NEON::BI__builtin_neon_vld1_v ? 8 : 16);
9332 return Builder.CreateAlignedLoad(VTy, Ops[0], Alignment);
9334 case NEON::BI__builtin_neon_vst1_v:
9335 case NEON::BI__builtin_neon_vst1q_v:
9336 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(VTy));
9337 Ops[1] = Builder.CreateBitCast(Ops[1], VTy);
9338 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
9339 case NEON::BI__builtin_neon_vld1_lane_v:
9340 case NEON::BI__builtin_neon_vld1q_lane_v: {
9341 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
9342 Ty = llvm::PointerType::getUnqual(VTy->getElementType());
9343 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
9344 auto Alignment = CharUnits::fromQuantity(
9345 BuiltinID == NEON::BI__builtin_neon_vld1_lane_v ? 8 : 16);
9347 Builder.CreateAlignedLoad(VTy->getElementType(), Ops[0], Alignment);
9348 return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vld1_lane");
9350 case NEON::BI__builtin_neon_vld1_dup_v:
9351 case NEON::BI__builtin_neon_vld1q_dup_v: {
9352 Value *V = UndefValue::get(Ty);
9353 Ty = llvm::PointerType::getUnqual(VTy->getElementType());
9354 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
9355 auto Alignment = CharUnits::fromQuantity(
9356 BuiltinID == NEON::BI__builtin_neon_vld1_dup_v ? 8 : 16);
9358 Builder.CreateAlignedLoad(VTy->getElementType(), Ops[0], Alignment);
9359 llvm::Constant *CI = ConstantInt::get(Int32Ty, 0);
9360 Ops[0] = Builder.CreateInsertElement(V, Ops[0], CI);
9361 return EmitNeonSplat(Ops[0], CI);
9363 case NEON::BI__builtin_neon_vst1_lane_v:
9364 case NEON::BI__builtin_neon_vst1q_lane_v:
9365 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
9366 Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2]);
9367 Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
9368 return Builder.CreateDefaultAlignedStore(Ops[1],
9369 Builder.CreateBitCast(Ops[0], Ty));
9370 case NEON::BI__builtin_neon_vld2_v:
9371 case NEON::BI__builtin_neon_vld2q_v: {
9372 llvm::Type *PTy = llvm::PointerType::getUnqual(VTy);
9373 Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
9374 llvm::Type *Tys[2] = { VTy, PTy };
9375 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld2, Tys);
9376 Ops[1] = Builder.CreateCall(F, Ops[1], "vld2");
9377 Ops[0] = Builder.CreateBitCast(Ops[0],
9378 llvm::PointerType::getUnqual(Ops[1]->getType()));
9379 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
9381 case NEON::BI__builtin_neon_vld3_v:
9382 case NEON::BI__builtin_neon_vld3q_v: {
9383 llvm::Type *PTy = llvm::PointerType::getUnqual(VTy);
9384 Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
9385 llvm::Type *Tys[2] = { VTy, PTy };
9386 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld3, Tys);
9387 Ops[1] = Builder.CreateCall(F, Ops[1], "vld3");
9388 Ops[0] = Builder.CreateBitCast(Ops[0],
9389 llvm::PointerType::getUnqual(Ops[1]->getType()));
9390 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
9392 case NEON::BI__builtin_neon_vld4_v:
9393 case NEON::BI__builtin_neon_vld4q_v: {
9394 llvm::Type *PTy = llvm::PointerType::getUnqual(VTy);
9395 Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
9396 llvm::Type *Tys[2] = { VTy, PTy };
9397 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld4, Tys);
9398 Ops[1] = Builder.CreateCall(F, Ops[1], "vld4");
9399 Ops[0] = Builder.CreateBitCast(Ops[0],
9400 llvm::PointerType::getUnqual(Ops[1]->getType()));
9401 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
9403 case NEON::BI__builtin_neon_vld2_dup_v:
9404 case NEON::BI__builtin_neon_vld2q_dup_v: {
9406 llvm::PointerType::getUnqual(VTy->getElementType());
9407 Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
9408 llvm::Type *Tys[2] = { VTy, PTy };
9409 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld2r, Tys);
9410 Ops[1] = Builder.CreateCall(F, Ops[1], "vld2");
9411 Ops[0] = Builder.CreateBitCast(Ops[0],
9412 llvm::PointerType::getUnqual(Ops[1]->getType()));
9413 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
9415 case NEON::BI__builtin_neon_vld3_dup_v:
9416 case NEON::BI__builtin_neon_vld3q_dup_v: {
9418 llvm::PointerType::getUnqual(VTy->getElementType());
9419 Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
9420 llvm::Type *Tys[2] = { VTy, PTy };
9421 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld3r, Tys);
9422 Ops[1] = Builder.CreateCall(F, Ops[1], "vld3");
9423 Ops[0] = Builder.CreateBitCast(Ops[0],
9424 llvm::PointerType::getUnqual(Ops[1]->getType()));
9425 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
9427 case NEON::BI__builtin_neon_vld4_dup_v:
9428 case NEON::BI__builtin_neon_vld4q_dup_v: {
9430 llvm::PointerType::getUnqual(VTy->getElementType());
9431 Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
9432 llvm::Type *Tys[2] = { VTy, PTy };
9433 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld4r, Tys);
9434 Ops[1] = Builder.CreateCall(F, Ops[1], "vld4");
9435 Ops[0] = Builder.CreateBitCast(Ops[0],
9436 llvm::PointerType::getUnqual(Ops[1]->getType()));
9437 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
9439 case NEON::BI__builtin_neon_vld2_lane_v:
9440 case NEON::BI__builtin_neon_vld2q_lane_v: {
9441 llvm::Type *Tys[2] = { VTy, Ops[1]->getType() };
9442 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld2lane, Tys);
9443 Ops.push_back(Ops[1]);
9444 Ops.erase(Ops.begin()+1);
9445 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
9446 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
9447 Ops[3] = Builder.CreateZExt(Ops[3], Int64Ty);
9448 Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), "vld2_lane");
9449 Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
9450 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
9451 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
9453 case NEON::BI__builtin_neon_vld3_lane_v:
9454 case NEON::BI__builtin_neon_vld3q_lane_v: {
9455 llvm::Type *Tys[2] = { VTy, Ops[1]->getType() };
9456 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld3lane, Tys);
9457 Ops.push_back(Ops[1]);
9458 Ops.erase(Ops.begin()+1);
9459 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
9460 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
9461 Ops[3] = Builder.CreateBitCast(Ops[3], Ty);
9462 Ops[4] = Builder.CreateZExt(Ops[4], Int64Ty);
9463 Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), "vld3_lane");
9464 Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
9465 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
9466 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
9468 case NEON::BI__builtin_neon_vld4_lane_v:
9469 case NEON::BI__builtin_neon_vld4q_lane_v: {
9470 llvm::Type *Tys[2] = { VTy, Ops[1]->getType() };
9471 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld4lane, Tys);
9472 Ops.push_back(Ops[1]);
9473 Ops.erase(Ops.begin()+1);
9474 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
9475 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
9476 Ops[3] = Builder.CreateBitCast(Ops[3], Ty);
9477 Ops[4] = Builder.CreateBitCast(Ops[4], Ty);
9478 Ops[5] = Builder.CreateZExt(Ops[5], Int64Ty);
9479 Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), "vld4_lane");
9480 Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
9481 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
9482 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
9484 case NEON::BI__builtin_neon_vst2_v:
9485 case NEON::BI__builtin_neon_vst2q_v: {
9486 Ops.push_back(Ops[0]);
9487 Ops.erase(Ops.begin());
9488 llvm::Type *Tys[2] = { VTy, Ops[2]->getType() };
9489 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st2, Tys),
9492 case NEON::BI__builtin_neon_vst2_lane_v:
9493 case NEON::BI__builtin_neon_vst2q_lane_v: {
9494 Ops.push_back(Ops[0]);
9495 Ops.erase(Ops.begin());
9496 Ops[2] = Builder.CreateZExt(Ops[2], Int64Ty);
9497 llvm::Type *Tys[2] = { VTy, Ops[3]->getType() };
9498 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st2lane, Tys),
9501 case NEON::BI__builtin_neon_vst3_v:
9502 case NEON::BI__builtin_neon_vst3q_v: {
9503 Ops.push_back(Ops[0]);
9504 Ops.erase(Ops.begin());
9505 llvm::Type *Tys[2] = { VTy, Ops[3]->getType() };
9506 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st3, Tys),
9509 case NEON::BI__builtin_neon_vst3_lane_v:
9510 case NEON::BI__builtin_neon_vst3q_lane_v: {
9511 Ops.push_back(Ops[0]);
9512 Ops.erase(Ops.begin());
9513 Ops[3] = Builder.CreateZExt(Ops[3], Int64Ty);
9514 llvm::Type *Tys[2] = { VTy, Ops[4]->getType() };
9515 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st3lane, Tys),
9518 case NEON::BI__builtin_neon_vst4_v:
9519 case NEON::BI__builtin_neon_vst4q_v: {
9520 Ops.push_back(Ops[0]);
9521 Ops.erase(Ops.begin());
9522 llvm::Type *Tys[2] = { VTy, Ops[4]->getType() };
9523 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st4, Tys),
9526 case NEON::BI__builtin_neon_vst4_lane_v:
9527 case NEON::BI__builtin_neon_vst4q_lane_v: {
9528 Ops.push_back(Ops[0]);
9529 Ops.erase(Ops.begin());
9530 Ops[4] = Builder.CreateZExt(Ops[4], Int64Ty);
9531 llvm::Type *Tys[2] = { VTy, Ops[5]->getType() };
9532 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st4lane, Tys),
9535 case NEON::BI__builtin_neon_vtrn_v:
9536 case NEON::BI__builtin_neon_vtrnq_v: {
9537 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
9538 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
9539 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
9540 Value *SV = nullptr;
9542 for (unsigned vi = 0; vi != 2; ++vi) {
9543 SmallVector<uint32_t, 16> Indices;
9544 for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) {
9545 Indices.push_back(i+vi);
9546 Indices.push_back(i+e+vi);
9548 Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi);
9549 SV = Builder.CreateShuffleVector(Ops[1], Ops[2], Indices, "vtrn");
9550 SV = Builder.CreateDefaultAlignedStore(SV, Addr);
9554 case NEON::BI__builtin_neon_vuzp_v:
9555 case NEON::BI__builtin_neon_vuzpq_v: {
9556 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
9557 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
9558 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
9559 Value *SV = nullptr;
9561 for (unsigned vi = 0; vi != 2; ++vi) {
9562 SmallVector<uint32_t, 16> Indices;
9563 for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i)
9564 Indices.push_back(2*i+vi);
9566 Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi);
9567 SV = Builder.CreateShuffleVector(Ops[1], Ops[2], Indices, "vuzp");
9568 SV = Builder.CreateDefaultAlignedStore(SV, Addr);
9572 case NEON::BI__builtin_neon_vzip_v:
9573 case NEON::BI__builtin_neon_vzipq_v: {
9574 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
9575 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
9576 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
9577 Value *SV = nullptr;
9579 for (unsigned vi = 0; vi != 2; ++vi) {
9580 SmallVector<uint32_t, 16> Indices;
9581 for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) {
9582 Indices.push_back((i + vi*e) >> 1);
9583 Indices.push_back(((i + vi*e) >> 1)+e);
9585 Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi);
9586 SV = Builder.CreateShuffleVector(Ops[1], Ops[2], Indices, "vzip");
9587 SV = Builder.CreateDefaultAlignedStore(SV, Addr);
9591 case NEON::BI__builtin_neon_vqtbl1q_v: {
9592 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbl1, Ty),
9595 case NEON::BI__builtin_neon_vqtbl2q_v: {
9596 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbl2, Ty),
9599 case NEON::BI__builtin_neon_vqtbl3q_v: {
9600 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbl3, Ty),
9603 case NEON::BI__builtin_neon_vqtbl4q_v: {
9604 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbl4, Ty),
9607 case NEON::BI__builtin_neon_vqtbx1q_v: {
9608 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbx1, Ty),
9611 case NEON::BI__builtin_neon_vqtbx2q_v: {
9612 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbx2, Ty),
9615 case NEON::BI__builtin_neon_vqtbx3q_v: {
9616 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbx3, Ty),
9619 case NEON::BI__builtin_neon_vqtbx4q_v: {
9620 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbx4, Ty),
9623 case NEON::BI__builtin_neon_vsqadd_v:
9624 case NEON::BI__builtin_neon_vsqaddq_v: {
9625 Int = Intrinsic::aarch64_neon_usqadd;
9626 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vsqadd");
9628 case NEON::BI__builtin_neon_vuqadd_v:
9629 case NEON::BI__builtin_neon_vuqaddq_v: {
9630 Int = Intrinsic::aarch64_neon_suqadd;
9631 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vuqadd");
9636 Value *CodeGenFunction::EmitBPFBuiltinExpr(unsigned BuiltinID,
9637 const CallExpr *E) {
9638 assert(BuiltinID == BPF::BI__builtin_preserve_field_info &&
9639 "unexpected ARM builtin");
9641 const Expr *Arg = E->getArg(0);
9642 bool IsBitField = Arg->IgnoreParens()->getObjectKind() == OK_BitField;
9644 if (!getDebugInfo()) {
9645 CGM.Error(E->getExprLoc(), "using builtin_preserve_field_info() without -g");
9646 return IsBitField ? EmitLValue(Arg).getBitFieldPointer()
9647 : EmitLValue(Arg).getPointer(*this);
9650 // Enable underlying preserve_*_access_index() generation.
9651 bool OldIsInPreservedAIRegion = IsInPreservedAIRegion;
9652 IsInPreservedAIRegion = true;
9653 Value *FieldAddr = IsBitField ? EmitLValue(Arg).getBitFieldPointer()
9654 : EmitLValue(Arg).getPointer(*this);
9655 IsInPreservedAIRegion = OldIsInPreservedAIRegion;
9657 ConstantInt *C = cast<ConstantInt>(EmitScalarExpr(E->getArg(1)));
9658 Value *InfoKind = ConstantInt::get(Int64Ty, C->getSExtValue());
9660 // Built the IR for the preserve_field_info intrinsic.
9661 llvm::Function *FnGetFieldInfo = llvm::Intrinsic::getDeclaration(
9662 &CGM.getModule(), llvm::Intrinsic::bpf_preserve_field_info,
9663 {FieldAddr->getType()});
9664 return Builder.CreateCall(FnGetFieldInfo, {FieldAddr, InfoKind});
9667 llvm::Value *CodeGenFunction::
9668 BuildVector(ArrayRef<llvm::Value*> Ops) {
9669 assert((Ops.size() & (Ops.size() - 1)) == 0 &&
9670 "Not a power-of-two sized vector!");
9671 bool AllConstants = true;
9672 for (unsigned i = 0, e = Ops.size(); i != e && AllConstants; ++i)
9673 AllConstants &= isa<Constant>(Ops[i]);
9675 // If this is a constant vector, create a ConstantVector.
9677 SmallVector<llvm::Constant*, 16> CstOps;
9678 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
9679 CstOps.push_back(cast<Constant>(Ops[i]));
9680 return llvm::ConstantVector::get(CstOps);
9683 // Otherwise, insertelement the values to build the vector.
9685 llvm::UndefValue::get(llvm::VectorType::get(Ops[0]->getType(), Ops.size()));
9687 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
9688 Result = Builder.CreateInsertElement(Result, Ops[i], Builder.getInt32(i));
9693 // Convert the mask from an integer type to a vector of i1.
9694 static Value *getMaskVecValue(CodeGenFunction &CGF, Value *Mask,
9697 llvm::VectorType *MaskTy = llvm::VectorType::get(CGF.Builder.getInt1Ty(),
9698 cast<IntegerType>(Mask->getType())->getBitWidth());
9699 Value *MaskVec = CGF.Builder.CreateBitCast(Mask, MaskTy);
9701 // If we have less than 8 elements, then the starting mask was an i8 and
9702 // we need to extract down to the right number of elements.
9704 uint32_t Indices[4];
9705 for (unsigned i = 0; i != NumElts; ++i)
9707 MaskVec = CGF.Builder.CreateShuffleVector(MaskVec, MaskVec,
9708 makeArrayRef(Indices, NumElts),
9714 static Value *EmitX86MaskedStore(CodeGenFunction &CGF,
9715 ArrayRef<Value *> Ops,
9717 // Cast the pointer to right type.
9718 Value *Ptr = CGF.Builder.CreateBitCast(Ops[0],
9719 llvm::PointerType::getUnqual(Ops[1]->getType()));
9721 Value *MaskVec = getMaskVecValue(CGF, Ops[2],
9722 Ops[1]->getType()->getVectorNumElements());
9724 return CGF.Builder.CreateMaskedStore(Ops[1], Ptr, Align, MaskVec);
9727 static Value *EmitX86MaskedLoad(CodeGenFunction &CGF,
9728 ArrayRef<Value *> Ops, unsigned Align) {
9729 // Cast the pointer to right type.
9730 Value *Ptr = CGF.Builder.CreateBitCast(Ops[0],
9731 llvm::PointerType::getUnqual(Ops[1]->getType()));
9733 Value *MaskVec = getMaskVecValue(CGF, Ops[2],
9734 Ops[1]->getType()->getVectorNumElements());
9736 return CGF.Builder.CreateMaskedLoad(Ptr, Align, MaskVec, Ops[1]);
9739 static Value *EmitX86ExpandLoad(CodeGenFunction &CGF,
9740 ArrayRef<Value *> Ops) {
9741 llvm::Type *ResultTy = Ops[1]->getType();
9742 llvm::Type *PtrTy = ResultTy->getVectorElementType();
9744 // Cast the pointer to element type.
9745 Value *Ptr = CGF.Builder.CreateBitCast(Ops[0],
9746 llvm::PointerType::getUnqual(PtrTy));
9748 Value *MaskVec = getMaskVecValue(CGF, Ops[2],
9749 ResultTy->getVectorNumElements());
9751 llvm::Function *F = CGF.CGM.getIntrinsic(Intrinsic::masked_expandload,
9753 return CGF.Builder.CreateCall(F, { Ptr, MaskVec, Ops[1] });
9756 static Value *EmitX86CompressExpand(CodeGenFunction &CGF,
9757 ArrayRef<Value *> Ops,
9759 llvm::Type *ResultTy = Ops[1]->getType();
9761 Value *MaskVec = getMaskVecValue(CGF, Ops[2],
9762 ResultTy->getVectorNumElements());
9764 Intrinsic::ID IID = IsCompress ? Intrinsic::x86_avx512_mask_compress
9765 : Intrinsic::x86_avx512_mask_expand;
9766 llvm::Function *F = CGF.CGM.getIntrinsic(IID, ResultTy);
9767 return CGF.Builder.CreateCall(F, { Ops[0], Ops[1], MaskVec });
9770 static Value *EmitX86CompressStore(CodeGenFunction &CGF,
9771 ArrayRef<Value *> Ops) {
9772 llvm::Type *ResultTy = Ops[1]->getType();
9773 llvm::Type *PtrTy = ResultTy->getVectorElementType();
9775 // Cast the pointer to element type.
9776 Value *Ptr = CGF.Builder.CreateBitCast(Ops[0],
9777 llvm::PointerType::getUnqual(PtrTy));
9779 Value *MaskVec = getMaskVecValue(CGF, Ops[2],
9780 ResultTy->getVectorNumElements());
9782 llvm::Function *F = CGF.CGM.getIntrinsic(Intrinsic::masked_compressstore,
9784 return CGF.Builder.CreateCall(F, { Ops[1], Ptr, MaskVec });
9787 static Value *EmitX86MaskLogic(CodeGenFunction &CGF, Instruction::BinaryOps Opc,
9788 ArrayRef<Value *> Ops,
9789 bool InvertLHS = false) {
9790 unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth();
9791 Value *LHS = getMaskVecValue(CGF, Ops[0], NumElts);
9792 Value *RHS = getMaskVecValue(CGF, Ops[1], NumElts);
9795 LHS = CGF.Builder.CreateNot(LHS);
9797 return CGF.Builder.CreateBitCast(CGF.Builder.CreateBinOp(Opc, LHS, RHS),
9801 static Value *EmitX86FunnelShift(CodeGenFunction &CGF, Value *Op0, Value *Op1,
9802 Value *Amt, bool IsRight) {
9803 llvm::Type *Ty = Op0->getType();
9805 // Amount may be scalar immediate, in which case create a splat vector.
9806 // Funnel shifts amounts are treated as modulo and types are all power-of-2 so
9807 // we only care about the lowest log2 bits anyway.
9808 if (Amt->getType() != Ty) {
9809 unsigned NumElts = Ty->getVectorNumElements();
9810 Amt = CGF.Builder.CreateIntCast(Amt, Ty->getScalarType(), false);
9811 Amt = CGF.Builder.CreateVectorSplat(NumElts, Amt);
9814 unsigned IID = IsRight ? Intrinsic::fshr : Intrinsic::fshl;
9815 Function *F = CGF.CGM.getIntrinsic(IID, Ty);
9816 return CGF.Builder.CreateCall(F, {Op0, Op1, Amt});
9819 static Value *EmitX86vpcom(CodeGenFunction &CGF, ArrayRef<Value *> Ops,
9821 Value *Op0 = Ops[0];
9822 Value *Op1 = Ops[1];
9823 llvm::Type *Ty = Op0->getType();
9824 uint64_t Imm = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0x7;
9826 CmpInst::Predicate Pred;
9829 Pred = IsSigned ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT;
9832 Pred = IsSigned ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE;
9835 Pred = IsSigned ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT;
9838 Pred = IsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE;
9841 Pred = ICmpInst::ICMP_EQ;
9844 Pred = ICmpInst::ICMP_NE;
9847 return llvm::Constant::getNullValue(Ty); // FALSE
9849 return llvm::Constant::getAllOnesValue(Ty); // TRUE
9851 llvm_unreachable("Unexpected XOP vpcom/vpcomu predicate");
9854 Value *Cmp = CGF.Builder.CreateICmp(Pred, Op0, Op1);
9855 Value *Res = CGF.Builder.CreateSExt(Cmp, Ty);
9859 static Value *EmitX86Select(CodeGenFunction &CGF,
9860 Value *Mask, Value *Op0, Value *Op1) {
9862 // If the mask is all ones just return first argument.
9863 if (const auto *C = dyn_cast<Constant>(Mask))
9864 if (C->isAllOnesValue())
9867 Mask = getMaskVecValue(CGF, Mask, Op0->getType()->getVectorNumElements());
9869 return CGF.Builder.CreateSelect(Mask, Op0, Op1);
9872 static Value *EmitX86ScalarSelect(CodeGenFunction &CGF,
9873 Value *Mask, Value *Op0, Value *Op1) {
9874 // If the mask is all ones just return first argument.
9875 if (const auto *C = dyn_cast<Constant>(Mask))
9876 if (C->isAllOnesValue())
9879 llvm::VectorType *MaskTy =
9880 llvm::VectorType::get(CGF.Builder.getInt1Ty(),
9881 Mask->getType()->getIntegerBitWidth());
9882 Mask = CGF.Builder.CreateBitCast(Mask, MaskTy);
9883 Mask = CGF.Builder.CreateExtractElement(Mask, (uint64_t)0);
9884 return CGF.Builder.CreateSelect(Mask, Op0, Op1);
9887 static Value *EmitX86MaskedCompareResult(CodeGenFunction &CGF, Value *Cmp,
9888 unsigned NumElts, Value *MaskIn) {
9890 const auto *C = dyn_cast<Constant>(MaskIn);
9891 if (!C || !C->isAllOnesValue())
9892 Cmp = CGF.Builder.CreateAnd(Cmp, getMaskVecValue(CGF, MaskIn, NumElts));
9896 uint32_t Indices[8];
9897 for (unsigned i = 0; i != NumElts; ++i)
9899 for (unsigned i = NumElts; i != 8; ++i)
9900 Indices[i] = i % NumElts + NumElts;
9901 Cmp = CGF.Builder.CreateShuffleVector(
9902 Cmp, llvm::Constant::getNullValue(Cmp->getType()), Indices);
9905 return CGF.Builder.CreateBitCast(Cmp,
9906 IntegerType::get(CGF.getLLVMContext(),
9907 std::max(NumElts, 8U)));
9910 static Value *EmitX86MaskedCompare(CodeGenFunction &CGF, unsigned CC,
9911 bool Signed, ArrayRef<Value *> Ops) {
9912 assert((Ops.size() == 2 || Ops.size() == 4) &&
9913 "Unexpected number of arguments");
9914 unsigned NumElts = Ops[0]->getType()->getVectorNumElements();
9918 Cmp = Constant::getNullValue(
9919 llvm::VectorType::get(CGF.Builder.getInt1Ty(), NumElts));
9920 } else if (CC == 7) {
9921 Cmp = Constant::getAllOnesValue(
9922 llvm::VectorType::get(CGF.Builder.getInt1Ty(), NumElts));
9924 ICmpInst::Predicate Pred;
9926 default: llvm_unreachable("Unknown condition code");
9927 case 0: Pred = ICmpInst::ICMP_EQ; break;
9928 case 1: Pred = Signed ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT; break;
9929 case 2: Pred = Signed ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE; break;
9930 case 4: Pred = ICmpInst::ICMP_NE; break;
9931 case 5: Pred = Signed ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE; break;
9932 case 6: Pred = Signed ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT; break;
9934 Cmp = CGF.Builder.CreateICmp(Pred, Ops[0], Ops[1]);
9937 Value *MaskIn = nullptr;
9938 if (Ops.size() == 4)
9941 return EmitX86MaskedCompareResult(CGF, Cmp, NumElts, MaskIn);
9944 static Value *EmitX86ConvertToMask(CodeGenFunction &CGF, Value *In) {
9945 Value *Zero = Constant::getNullValue(In->getType());
9946 return EmitX86MaskedCompare(CGF, 1, true, { In, Zero });
9949 static Value *EmitX86ConvertIntToFp(CodeGenFunction &CGF,
9950 ArrayRef<Value *> Ops, bool IsSigned) {
9951 unsigned Rnd = cast<llvm::ConstantInt>(Ops[3])->getZExtValue();
9952 llvm::Type *Ty = Ops[1]->getType();
9956 Intrinsic::ID IID = IsSigned ? Intrinsic::x86_avx512_sitofp_round
9957 : Intrinsic::x86_avx512_uitofp_round;
9958 Function *F = CGF.CGM.getIntrinsic(IID, { Ty, Ops[0]->getType() });
9959 Res = CGF.Builder.CreateCall(F, { Ops[0], Ops[3] });
9961 Res = IsSigned ? CGF.Builder.CreateSIToFP(Ops[0], Ty)
9962 : CGF.Builder.CreateUIToFP(Ops[0], Ty);
9965 return EmitX86Select(CGF, Ops[2], Res, Ops[1]);
9968 static Value *EmitX86Abs(CodeGenFunction &CGF, ArrayRef<Value *> Ops) {
9970 llvm::Type *Ty = Ops[0]->getType();
9971 Value *Zero = llvm::Constant::getNullValue(Ty);
9972 Value *Sub = CGF.Builder.CreateSub(Zero, Ops[0]);
9973 Value *Cmp = CGF.Builder.CreateICmp(ICmpInst::ICMP_SGT, Ops[0], Zero);
9974 Value *Res = CGF.Builder.CreateSelect(Cmp, Ops[0], Sub);
9978 static Value *EmitX86MinMax(CodeGenFunction &CGF, ICmpInst::Predicate Pred,
9979 ArrayRef<Value *> Ops) {
9980 Value *Cmp = CGF.Builder.CreateICmp(Pred, Ops[0], Ops[1]);
9981 Value *Res = CGF.Builder.CreateSelect(Cmp, Ops[0], Ops[1]);
9983 assert(Ops.size() == 2);
9987 // Lowers X86 FMA intrinsics to IR.
9988 static Value *EmitX86FMAExpr(CodeGenFunction &CGF, ArrayRef<Value *> Ops,
9989 unsigned BuiltinID, bool IsAddSub) {
9991 bool Subtract = false;
9992 Intrinsic::ID IID = Intrinsic::not_intrinsic;
9993 switch (BuiltinID) {
9995 case clang::X86::BI__builtin_ia32_vfmsubps512_mask3:
9998 case clang::X86::BI__builtin_ia32_vfmaddps512_mask:
9999 case clang::X86::BI__builtin_ia32_vfmaddps512_maskz:
10000 case clang::X86::BI__builtin_ia32_vfmaddps512_mask3:
10001 IID = llvm::Intrinsic::x86_avx512_vfmadd_ps_512; break;
10002 case clang::X86::BI__builtin_ia32_vfmsubpd512_mask3:
10005 case clang::X86::BI__builtin_ia32_vfmaddpd512_mask:
10006 case clang::X86::BI__builtin_ia32_vfmaddpd512_maskz:
10007 case clang::X86::BI__builtin_ia32_vfmaddpd512_mask3:
10008 IID = llvm::Intrinsic::x86_avx512_vfmadd_pd_512; break;
10009 case clang::X86::BI__builtin_ia32_vfmsubaddps512_mask3:
10012 case clang::X86::BI__builtin_ia32_vfmaddsubps512_mask:
10013 case clang::X86::BI__builtin_ia32_vfmaddsubps512_maskz:
10014 case clang::X86::BI__builtin_ia32_vfmaddsubps512_mask3:
10015 IID = llvm::Intrinsic::x86_avx512_vfmaddsub_ps_512;
10017 case clang::X86::BI__builtin_ia32_vfmsubaddpd512_mask3:
10020 case clang::X86::BI__builtin_ia32_vfmaddsubpd512_mask:
10021 case clang::X86::BI__builtin_ia32_vfmaddsubpd512_maskz:
10022 case clang::X86::BI__builtin_ia32_vfmaddsubpd512_mask3:
10023 IID = llvm::Intrinsic::x86_avx512_vfmaddsub_pd_512;
10032 C = CGF.Builder.CreateFNeg(C);
10036 // Only handle in case of _MM_FROUND_CUR_DIRECTION/4 (no rounding).
10037 if (IID != Intrinsic::not_intrinsic &&
10038 cast<llvm::ConstantInt>(Ops.back())->getZExtValue() != (uint64_t)4) {
10039 Function *Intr = CGF.CGM.getIntrinsic(IID);
10040 Res = CGF.Builder.CreateCall(Intr, {A, B, C, Ops.back() });
10042 llvm::Type *Ty = A->getType();
10043 Function *FMA = CGF.CGM.getIntrinsic(Intrinsic::fma, Ty);
10044 Res = CGF.Builder.CreateCall(FMA, {A, B, C} );
10047 // Negate even elts in C using a mask.
10048 unsigned NumElts = Ty->getVectorNumElements();
10049 SmallVector<uint32_t, 16> Indices(NumElts);
10050 for (unsigned i = 0; i != NumElts; ++i)
10051 Indices[i] = i + (i % 2) * NumElts;
10053 Value *NegC = CGF.Builder.CreateFNeg(C);
10054 Value *FMSub = CGF.Builder.CreateCall(FMA, {A, B, NegC} );
10055 Res = CGF.Builder.CreateShuffleVector(FMSub, Res, Indices);
10059 // Handle any required masking.
10060 Value *MaskFalseVal = nullptr;
10061 switch (BuiltinID) {
10062 case clang::X86::BI__builtin_ia32_vfmaddps512_mask:
10063 case clang::X86::BI__builtin_ia32_vfmaddpd512_mask:
10064 case clang::X86::BI__builtin_ia32_vfmaddsubps512_mask:
10065 case clang::X86::BI__builtin_ia32_vfmaddsubpd512_mask:
10066 MaskFalseVal = Ops[0];
10068 case clang::X86::BI__builtin_ia32_vfmaddps512_maskz:
10069 case clang::X86::BI__builtin_ia32_vfmaddpd512_maskz:
10070 case clang::X86::BI__builtin_ia32_vfmaddsubps512_maskz:
10071 case clang::X86::BI__builtin_ia32_vfmaddsubpd512_maskz:
10072 MaskFalseVal = Constant::getNullValue(Ops[0]->getType());
10074 case clang::X86::BI__builtin_ia32_vfmsubps512_mask3:
10075 case clang::X86::BI__builtin_ia32_vfmaddps512_mask3:
10076 case clang::X86::BI__builtin_ia32_vfmsubpd512_mask3:
10077 case clang::X86::BI__builtin_ia32_vfmaddpd512_mask3:
10078 case clang::X86::BI__builtin_ia32_vfmsubaddps512_mask3:
10079 case clang::X86::BI__builtin_ia32_vfmaddsubps512_mask3:
10080 case clang::X86::BI__builtin_ia32_vfmsubaddpd512_mask3:
10081 case clang::X86::BI__builtin_ia32_vfmaddsubpd512_mask3:
10082 MaskFalseVal = Ops[2];
10087 return EmitX86Select(CGF, Ops[3], Res, MaskFalseVal);
10093 EmitScalarFMAExpr(CodeGenFunction &CGF, MutableArrayRef<Value *> Ops,
10094 Value *Upper, bool ZeroMask = false, unsigned PTIdx = 0,
10095 bool NegAcc = false) {
10097 if (Ops.size() > 4)
10098 Rnd = cast<llvm::ConstantInt>(Ops[4])->getZExtValue();
10101 Ops[2] = CGF.Builder.CreateFNeg(Ops[2]);
10103 Ops[0] = CGF.Builder.CreateExtractElement(Ops[0], (uint64_t)0);
10104 Ops[1] = CGF.Builder.CreateExtractElement(Ops[1], (uint64_t)0);
10105 Ops[2] = CGF.Builder.CreateExtractElement(Ops[2], (uint64_t)0);
10108 Intrinsic::ID IID = Ops[0]->getType()->getPrimitiveSizeInBits() == 32 ?
10109 Intrinsic::x86_avx512_vfmadd_f32 :
10110 Intrinsic::x86_avx512_vfmadd_f64;
10111 Res = CGF.Builder.CreateCall(CGF.CGM.getIntrinsic(IID),
10112 {Ops[0], Ops[1], Ops[2], Ops[4]});
10114 Function *FMA = CGF.CGM.getIntrinsic(Intrinsic::fma, Ops[0]->getType());
10115 Res = CGF.Builder.CreateCall(FMA, Ops.slice(0, 3));
10117 // If we have more than 3 arguments, we need to do masking.
10118 if (Ops.size() > 3) {
10119 Value *PassThru = ZeroMask ? Constant::getNullValue(Res->getType())
10122 // If we negated the accumulator and the its the PassThru value we need to
10123 // bypass the negate. Conveniently Upper should be the same thing in this
10125 if (NegAcc && PTIdx == 2)
10126 PassThru = CGF.Builder.CreateExtractElement(Upper, (uint64_t)0);
10128 Res = EmitX86ScalarSelect(CGF, Ops[3], Res, PassThru);
10130 return CGF.Builder.CreateInsertElement(Upper, Res, (uint64_t)0);
10133 static Value *EmitX86Muldq(CodeGenFunction &CGF, bool IsSigned,
10134 ArrayRef<Value *> Ops) {
10135 llvm::Type *Ty = Ops[0]->getType();
10136 // Arguments have a vXi32 type so cast to vXi64.
10137 Ty = llvm::VectorType::get(CGF.Int64Ty,
10138 Ty->getPrimitiveSizeInBits() / 64);
10139 Value *LHS = CGF.Builder.CreateBitCast(Ops[0], Ty);
10140 Value *RHS = CGF.Builder.CreateBitCast(Ops[1], Ty);
10143 // Shift left then arithmetic shift right.
10144 Constant *ShiftAmt = ConstantInt::get(Ty, 32);
10145 LHS = CGF.Builder.CreateShl(LHS, ShiftAmt);
10146 LHS = CGF.Builder.CreateAShr(LHS, ShiftAmt);
10147 RHS = CGF.Builder.CreateShl(RHS, ShiftAmt);
10148 RHS = CGF.Builder.CreateAShr(RHS, ShiftAmt);
10150 // Clear the upper bits.
10151 Constant *Mask = ConstantInt::get(Ty, 0xffffffff);
10152 LHS = CGF.Builder.CreateAnd(LHS, Mask);
10153 RHS = CGF.Builder.CreateAnd(RHS, Mask);
10156 return CGF.Builder.CreateMul(LHS, RHS);
10159 // Emit a masked pternlog intrinsic. This only exists because the header has to
10160 // use a macro and we aren't able to pass the input argument to a pternlog
10161 // builtin and a select builtin without evaluating it twice.
10162 static Value *EmitX86Ternlog(CodeGenFunction &CGF, bool ZeroMask,
10163 ArrayRef<Value *> Ops) {
10164 llvm::Type *Ty = Ops[0]->getType();
10166 unsigned VecWidth = Ty->getPrimitiveSizeInBits();
10167 unsigned EltWidth = Ty->getScalarSizeInBits();
10169 if (VecWidth == 128 && EltWidth == 32)
10170 IID = Intrinsic::x86_avx512_pternlog_d_128;
10171 else if (VecWidth == 256 && EltWidth == 32)
10172 IID = Intrinsic::x86_avx512_pternlog_d_256;
10173 else if (VecWidth == 512 && EltWidth == 32)
10174 IID = Intrinsic::x86_avx512_pternlog_d_512;
10175 else if (VecWidth == 128 && EltWidth == 64)
10176 IID = Intrinsic::x86_avx512_pternlog_q_128;
10177 else if (VecWidth == 256 && EltWidth == 64)
10178 IID = Intrinsic::x86_avx512_pternlog_q_256;
10179 else if (VecWidth == 512 && EltWidth == 64)
10180 IID = Intrinsic::x86_avx512_pternlog_q_512;
10182 llvm_unreachable("Unexpected intrinsic");
10184 Value *Ternlog = CGF.Builder.CreateCall(CGF.CGM.getIntrinsic(IID),
10186 Value *PassThru = ZeroMask ? ConstantAggregateZero::get(Ty) : Ops[0];
10187 return EmitX86Select(CGF, Ops[4], Ternlog, PassThru);
10190 static Value *EmitX86SExtMask(CodeGenFunction &CGF, Value *Op,
10191 llvm::Type *DstTy) {
10192 unsigned NumberOfElements = DstTy->getVectorNumElements();
10193 Value *Mask = getMaskVecValue(CGF, Op, NumberOfElements);
10194 return CGF.Builder.CreateSExt(Mask, DstTy, "vpmovm2");
10197 // Emit addition or subtraction with signed/unsigned saturation.
10198 static Value *EmitX86AddSubSatExpr(CodeGenFunction &CGF,
10199 ArrayRef<Value *> Ops, bool IsSigned,
10201 Intrinsic::ID IID =
10202 IsSigned ? (IsAddition ? Intrinsic::sadd_sat : Intrinsic::ssub_sat)
10203 : (IsAddition ? Intrinsic::uadd_sat : Intrinsic::usub_sat);
10204 llvm::Function *F = CGF.CGM.getIntrinsic(IID, Ops[0]->getType());
10205 return CGF.Builder.CreateCall(F, {Ops[0], Ops[1]});
10208 Value *CodeGenFunction::EmitX86CpuIs(const CallExpr *E) {
10209 const Expr *CPUExpr = E->getArg(0)->IgnoreParenCasts();
10210 StringRef CPUStr = cast<clang::StringLiteral>(CPUExpr)->getString();
10211 return EmitX86CpuIs(CPUStr);
10214 // Convert a BF16 to a float.
10215 static Value *EmitX86CvtBF16ToFloatExpr(CodeGenFunction &CGF,
10217 ArrayRef<Value *> Ops) {
10218 llvm::Type *Int32Ty = CGF.Builder.getInt32Ty();
10219 Value *ZeroExt = CGF.Builder.CreateZExt(Ops[0], Int32Ty);
10220 Value *Shl = CGF.Builder.CreateShl(ZeroExt, 16);
10221 llvm::Type *ResultType = CGF.ConvertType(E->getType());
10222 Value *BitCast = CGF.Builder.CreateBitCast(Shl, ResultType);
10226 Value *CodeGenFunction::EmitX86CpuIs(StringRef CPUStr) {
10228 llvm::Type *Int32Ty = Builder.getInt32Ty();
10230 // Matching the struct layout from the compiler-rt/libgcc structure that is
10232 // unsigned int __cpu_vendor;
10233 // unsigned int __cpu_type;
10234 // unsigned int __cpu_subtype;
10235 // unsigned int __cpu_features[1];
10236 llvm::Type *STy = llvm::StructType::get(Int32Ty, Int32Ty, Int32Ty,
10237 llvm::ArrayType::get(Int32Ty, 1));
10239 // Grab the global __cpu_model.
10240 llvm::Constant *CpuModel = CGM.CreateRuntimeVariable(STy, "__cpu_model");
10241 cast<llvm::GlobalValue>(CpuModel)->setDSOLocal(true);
10243 // Calculate the index needed to access the correct field based on the
10244 // range. Also adjust the expected value.
10247 std::tie(Index, Value) = StringSwitch<std::pair<unsigned, unsigned>>(CPUStr)
10248 #define X86_VENDOR(ENUM, STRING) \
10249 .Case(STRING, {0u, static_cast<unsigned>(llvm::X86::ENUM)})
10250 #define X86_CPU_TYPE_COMPAT_WITH_ALIAS(ARCHNAME, ENUM, STR, ALIAS) \
10251 .Cases(STR, ALIAS, {1u, static_cast<unsigned>(llvm::X86::ENUM)})
10252 #define X86_CPU_TYPE_COMPAT(ARCHNAME, ENUM, STR) \
10253 .Case(STR, {1u, static_cast<unsigned>(llvm::X86::ENUM)})
10254 #define X86_CPU_SUBTYPE_COMPAT(ARCHNAME, ENUM, STR) \
10255 .Case(STR, {2u, static_cast<unsigned>(llvm::X86::ENUM)})
10256 #include "llvm/Support/X86TargetParser.def"
10258 assert(Value != 0 && "Invalid CPUStr passed to CpuIs");
10260 // Grab the appropriate field from __cpu_model.
10261 llvm::Value *Idxs[] = {ConstantInt::get(Int32Ty, 0),
10262 ConstantInt::get(Int32Ty, Index)};
10263 llvm::Value *CpuValue = Builder.CreateGEP(STy, CpuModel, Idxs);
10264 CpuValue = Builder.CreateAlignedLoad(CpuValue, CharUnits::fromQuantity(4));
10266 // Check the value of the field against the requested value.
10267 return Builder.CreateICmpEQ(CpuValue,
10268 llvm::ConstantInt::get(Int32Ty, Value));
10271 Value *CodeGenFunction::EmitX86CpuSupports(const CallExpr *E) {
10272 const Expr *FeatureExpr = E->getArg(0)->IgnoreParenCasts();
10273 StringRef FeatureStr = cast<StringLiteral>(FeatureExpr)->getString();
10274 return EmitX86CpuSupports(FeatureStr);
10278 CodeGenFunction::GetX86CpuSupportsMask(ArrayRef<StringRef> FeatureStrs) {
10279 // Processor features and mapping to processor feature value.
10280 uint64_t FeaturesMask = 0;
10281 for (const StringRef &FeatureStr : FeatureStrs) {
10283 StringSwitch<unsigned>(FeatureStr)
10284 #define X86_FEATURE_COMPAT(VAL, ENUM, STR) .Case(STR, VAL)
10285 #include "llvm/Support/X86TargetParser.def"
10287 FeaturesMask |= (1ULL << Feature);
10289 return FeaturesMask;
10292 Value *CodeGenFunction::EmitX86CpuSupports(ArrayRef<StringRef> FeatureStrs) {
10293 return EmitX86CpuSupports(GetX86CpuSupportsMask(FeatureStrs));
10296 llvm::Value *CodeGenFunction::EmitX86CpuSupports(uint64_t FeaturesMask) {
10297 uint32_t Features1 = Lo_32(FeaturesMask);
10298 uint32_t Features2 = Hi_32(FeaturesMask);
10300 Value *Result = Builder.getTrue();
10302 if (Features1 != 0) {
10303 // Matching the struct layout from the compiler-rt/libgcc structure that is
10305 // unsigned int __cpu_vendor;
10306 // unsigned int __cpu_type;
10307 // unsigned int __cpu_subtype;
10308 // unsigned int __cpu_features[1];
10309 llvm::Type *STy = llvm::StructType::get(Int32Ty, Int32Ty, Int32Ty,
10310 llvm::ArrayType::get(Int32Ty, 1));
10312 // Grab the global __cpu_model.
10313 llvm::Constant *CpuModel = CGM.CreateRuntimeVariable(STy, "__cpu_model");
10314 cast<llvm::GlobalValue>(CpuModel)->setDSOLocal(true);
10316 // Grab the first (0th) element from the field __cpu_features off of the
10317 // global in the struct STy.
10318 Value *Idxs[] = {Builder.getInt32(0), Builder.getInt32(3),
10319 Builder.getInt32(0)};
10320 Value *CpuFeatures = Builder.CreateGEP(STy, CpuModel, Idxs);
10322 Builder.CreateAlignedLoad(CpuFeatures, CharUnits::fromQuantity(4));
10324 // Check the value of the bit corresponding to the feature requested.
10325 Value *Mask = Builder.getInt32(Features1);
10326 Value *Bitset = Builder.CreateAnd(Features, Mask);
10327 Value *Cmp = Builder.CreateICmpEQ(Bitset, Mask);
10328 Result = Builder.CreateAnd(Result, Cmp);
10331 if (Features2 != 0) {
10332 llvm::Constant *CpuFeatures2 = CGM.CreateRuntimeVariable(Int32Ty,
10333 "__cpu_features2");
10334 cast<llvm::GlobalValue>(CpuFeatures2)->setDSOLocal(true);
10337 Builder.CreateAlignedLoad(CpuFeatures2, CharUnits::fromQuantity(4));
10339 // Check the value of the bit corresponding to the feature requested.
10340 Value *Mask = Builder.getInt32(Features2);
10341 Value *Bitset = Builder.CreateAnd(Features, Mask);
10342 Value *Cmp = Builder.CreateICmpEQ(Bitset, Mask);
10343 Result = Builder.CreateAnd(Result, Cmp);
10349 Value *CodeGenFunction::EmitX86CpuInit() {
10350 llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy,
10351 /*Variadic*/ false);
10352 llvm::FunctionCallee Func =
10353 CGM.CreateRuntimeFunction(FTy, "__cpu_indicator_init");
10354 cast<llvm::GlobalValue>(Func.getCallee())->setDSOLocal(true);
10355 cast<llvm::GlobalValue>(Func.getCallee())
10356 ->setDLLStorageClass(llvm::GlobalValue::DefaultStorageClass);
10357 return Builder.CreateCall(Func);
10360 Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
10361 const CallExpr *E) {
10362 if (BuiltinID == X86::BI__builtin_cpu_is)
10363 return EmitX86CpuIs(E);
10364 if (BuiltinID == X86::BI__builtin_cpu_supports)
10365 return EmitX86CpuSupports(E);
10366 if (BuiltinID == X86::BI__builtin_cpu_init)
10367 return EmitX86CpuInit();
10369 SmallVector<Value*, 4> Ops;
10371 // Find out if any arguments are required to be integer constant expressions.
10372 unsigned ICEArguments = 0;
10373 ASTContext::GetBuiltinTypeError Error;
10374 getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments);
10375 assert(Error == ASTContext::GE_None && "Should not codegen an error");
10377 for (unsigned i = 0, e = E->getNumArgs(); i != e; i++) {
10378 // If this is a normal argument, just emit it as a scalar.
10379 if ((ICEArguments & (1 << i)) == 0) {
10380 Ops.push_back(EmitScalarExpr(E->getArg(i)));
10384 // If this is required to be a constant, constant fold it so that we know
10385 // that the generated intrinsic gets a ConstantInt.
10386 llvm::APSInt Result;
10387 bool IsConst = E->getArg(i)->isIntegerConstantExpr(Result, getContext());
10388 assert(IsConst && "Constant arg isn't actually constant?"); (void)IsConst;
10389 Ops.push_back(llvm::ConstantInt::get(getLLVMContext(), Result));
10392 // These exist so that the builtin that takes an immediate can be bounds
10393 // checked by clang to avoid passing bad immediates to the backend. Since
10394 // AVX has a larger immediate than SSE we would need separate builtins to
10395 // do the different bounds checking. Rather than create a clang specific
10396 // SSE only builtin, this implements eight separate builtins to match gcc
10398 auto getCmpIntrinsicCall = [this, &Ops](Intrinsic::ID ID, unsigned Imm) {
10399 Ops.push_back(llvm::ConstantInt::get(Int8Ty, Imm));
10400 llvm::Function *F = CGM.getIntrinsic(ID);
10401 return Builder.CreateCall(F, Ops);
10404 // For the vector forms of FP comparisons, translate the builtins directly to
10406 // TODO: The builtins could be removed if the SSE header files used vector
10407 // extension comparisons directly (vector ordered/unordered may need
10408 // additional support via __builtin_isnan()).
10409 auto getVectorFCmpIR = [this, &Ops](CmpInst::Predicate Pred) {
10410 Value *Cmp = Builder.CreateFCmp(Pred, Ops[0], Ops[1]);
10411 llvm::VectorType *FPVecTy = cast<llvm::VectorType>(Ops[0]->getType());
10412 llvm::VectorType *IntVecTy = llvm::VectorType::getInteger(FPVecTy);
10413 Value *Sext = Builder.CreateSExt(Cmp, IntVecTy);
10414 return Builder.CreateBitCast(Sext, FPVecTy);
10417 switch (BuiltinID) {
10418 default: return nullptr;
10419 case X86::BI_mm_prefetch: {
10420 Value *Address = Ops[0];
10421 ConstantInt *C = cast<ConstantInt>(Ops[1]);
10422 Value *RW = ConstantInt::get(Int32Ty, (C->getZExtValue() >> 2) & 0x1);
10423 Value *Locality = ConstantInt::get(Int32Ty, C->getZExtValue() & 0x3);
10424 Value *Data = ConstantInt::get(Int32Ty, 1);
10425 Function *F = CGM.getIntrinsic(Intrinsic::prefetch, Address->getType());
10426 return Builder.CreateCall(F, {Address, RW, Locality, Data});
10428 case X86::BI_mm_clflush: {
10429 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse2_clflush),
10432 case X86::BI_mm_lfence: {
10433 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse2_lfence));
10435 case X86::BI_mm_mfence: {
10436 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse2_mfence));
10438 case X86::BI_mm_sfence: {
10439 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_sfence));
10441 case X86::BI_mm_pause: {
10442 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse2_pause));
10444 case X86::BI__rdtsc: {
10445 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_rdtsc));
10447 case X86::BI__builtin_ia32_rdtscp: {
10448 Value *Call = Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_rdtscp));
10449 Builder.CreateDefaultAlignedStore(Builder.CreateExtractValue(Call, 1),
10451 return Builder.CreateExtractValue(Call, 0);
10453 case X86::BI__builtin_ia32_lzcnt_u16:
10454 case X86::BI__builtin_ia32_lzcnt_u32:
10455 case X86::BI__builtin_ia32_lzcnt_u64: {
10456 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, Ops[0]->getType());
10457 return Builder.CreateCall(F, {Ops[0], Builder.getInt1(false)});
10459 case X86::BI__builtin_ia32_tzcnt_u16:
10460 case X86::BI__builtin_ia32_tzcnt_u32:
10461 case X86::BI__builtin_ia32_tzcnt_u64: {
10462 Function *F = CGM.getIntrinsic(Intrinsic::cttz, Ops[0]->getType());
10463 return Builder.CreateCall(F, {Ops[0], Builder.getInt1(false)});
10465 case X86::BI__builtin_ia32_undef128:
10466 case X86::BI__builtin_ia32_undef256:
10467 case X86::BI__builtin_ia32_undef512:
10468 // The x86 definition of "undef" is not the same as the LLVM definition
10469 // (PR32176). We leave optimizing away an unnecessary zero constant to the
10470 // IR optimizer and backend.
10471 // TODO: If we had a "freeze" IR instruction to generate a fixed undef
10472 // value, we should use that here instead of a zero.
10473 return llvm::Constant::getNullValue(ConvertType(E->getType()));
10474 case X86::BI__builtin_ia32_vec_init_v8qi:
10475 case X86::BI__builtin_ia32_vec_init_v4hi:
10476 case X86::BI__builtin_ia32_vec_init_v2si:
10477 return Builder.CreateBitCast(BuildVector(Ops),
10478 llvm::Type::getX86_MMXTy(getLLVMContext()));
10479 case X86::BI__builtin_ia32_vec_ext_v2si:
10480 case X86::BI__builtin_ia32_vec_ext_v16qi:
10481 case X86::BI__builtin_ia32_vec_ext_v8hi:
10482 case X86::BI__builtin_ia32_vec_ext_v4si:
10483 case X86::BI__builtin_ia32_vec_ext_v4sf:
10484 case X86::BI__builtin_ia32_vec_ext_v2di:
10485 case X86::BI__builtin_ia32_vec_ext_v32qi:
10486 case X86::BI__builtin_ia32_vec_ext_v16hi:
10487 case X86::BI__builtin_ia32_vec_ext_v8si:
10488 case X86::BI__builtin_ia32_vec_ext_v4di: {
10489 unsigned NumElts = Ops[0]->getType()->getVectorNumElements();
10490 uint64_t Index = cast<ConstantInt>(Ops[1])->getZExtValue();
10491 Index &= NumElts - 1;
10492 // These builtins exist so we can ensure the index is an ICE and in range.
10493 // Otherwise we could just do this in the header file.
10494 return Builder.CreateExtractElement(Ops[0], Index);
10496 case X86::BI__builtin_ia32_vec_set_v16qi:
10497 case X86::BI__builtin_ia32_vec_set_v8hi:
10498 case X86::BI__builtin_ia32_vec_set_v4si:
10499 case X86::BI__builtin_ia32_vec_set_v2di:
10500 case X86::BI__builtin_ia32_vec_set_v32qi:
10501 case X86::BI__builtin_ia32_vec_set_v16hi:
10502 case X86::BI__builtin_ia32_vec_set_v8si:
10503 case X86::BI__builtin_ia32_vec_set_v4di: {
10504 unsigned NumElts = Ops[0]->getType()->getVectorNumElements();
10505 unsigned Index = cast<ConstantInt>(Ops[2])->getZExtValue();
10506 Index &= NumElts - 1;
10507 // These builtins exist so we can ensure the index is an ICE and in range.
10508 // Otherwise we could just do this in the header file.
10509 return Builder.CreateInsertElement(Ops[0], Ops[1], Index);
10511 case X86::BI_mm_setcsr:
10512 case X86::BI__builtin_ia32_ldmxcsr: {
10513 Address Tmp = CreateMemTemp(E->getArg(0)->getType());
10514 Builder.CreateStore(Ops[0], Tmp);
10515 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_ldmxcsr),
10516 Builder.CreateBitCast(Tmp.getPointer(), Int8PtrTy));
10518 case X86::BI_mm_getcsr:
10519 case X86::BI__builtin_ia32_stmxcsr: {
10520 Address Tmp = CreateMemTemp(E->getType());
10521 Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_stmxcsr),
10522 Builder.CreateBitCast(Tmp.getPointer(), Int8PtrTy));
10523 return Builder.CreateLoad(Tmp, "stmxcsr");
10525 case X86::BI__builtin_ia32_xsave:
10526 case X86::BI__builtin_ia32_xsave64:
10527 case X86::BI__builtin_ia32_xrstor:
10528 case X86::BI__builtin_ia32_xrstor64:
10529 case X86::BI__builtin_ia32_xsaveopt:
10530 case X86::BI__builtin_ia32_xsaveopt64:
10531 case X86::BI__builtin_ia32_xrstors:
10532 case X86::BI__builtin_ia32_xrstors64:
10533 case X86::BI__builtin_ia32_xsavec:
10534 case X86::BI__builtin_ia32_xsavec64:
10535 case X86::BI__builtin_ia32_xsaves:
10536 case X86::BI__builtin_ia32_xsaves64:
10537 case X86::BI__builtin_ia32_xsetbv:
10538 case X86::BI_xsetbv: {
10540 #define INTRINSIC_X86_XSAVE_ID(NAME) \
10541 case X86::BI__builtin_ia32_##NAME: \
10542 ID = Intrinsic::x86_##NAME; \
10544 switch (BuiltinID) {
10545 default: llvm_unreachable("Unsupported intrinsic!");
10546 INTRINSIC_X86_XSAVE_ID(xsave);
10547 INTRINSIC_X86_XSAVE_ID(xsave64);
10548 INTRINSIC_X86_XSAVE_ID(xrstor);
10549 INTRINSIC_X86_XSAVE_ID(xrstor64);
10550 INTRINSIC_X86_XSAVE_ID(xsaveopt);
10551 INTRINSIC_X86_XSAVE_ID(xsaveopt64);
10552 INTRINSIC_X86_XSAVE_ID(xrstors);
10553 INTRINSIC_X86_XSAVE_ID(xrstors64);
10554 INTRINSIC_X86_XSAVE_ID(xsavec);
10555 INTRINSIC_X86_XSAVE_ID(xsavec64);
10556 INTRINSIC_X86_XSAVE_ID(xsaves);
10557 INTRINSIC_X86_XSAVE_ID(xsaves64);
10558 INTRINSIC_X86_XSAVE_ID(xsetbv);
10559 case X86::BI_xsetbv:
10560 ID = Intrinsic::x86_xsetbv;
10563 #undef INTRINSIC_X86_XSAVE_ID
10564 Value *Mhi = Builder.CreateTrunc(
10565 Builder.CreateLShr(Ops[1], ConstantInt::get(Int64Ty, 32)), Int32Ty);
10566 Value *Mlo = Builder.CreateTrunc(Ops[1], Int32Ty);
10568 Ops.push_back(Mlo);
10569 return Builder.CreateCall(CGM.getIntrinsic(ID), Ops);
10571 case X86::BI__builtin_ia32_xgetbv:
10572 case X86::BI_xgetbv:
10573 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_xgetbv), Ops);
10574 case X86::BI__builtin_ia32_storedqudi128_mask:
10575 case X86::BI__builtin_ia32_storedqusi128_mask:
10576 case X86::BI__builtin_ia32_storedquhi128_mask:
10577 case X86::BI__builtin_ia32_storedquqi128_mask:
10578 case X86::BI__builtin_ia32_storeupd128_mask:
10579 case X86::BI__builtin_ia32_storeups128_mask:
10580 case X86::BI__builtin_ia32_storedqudi256_mask:
10581 case X86::BI__builtin_ia32_storedqusi256_mask:
10582 case X86::BI__builtin_ia32_storedquhi256_mask:
10583 case X86::BI__builtin_ia32_storedquqi256_mask:
10584 case X86::BI__builtin_ia32_storeupd256_mask:
10585 case X86::BI__builtin_ia32_storeups256_mask:
10586 case X86::BI__builtin_ia32_storedqudi512_mask:
10587 case X86::BI__builtin_ia32_storedqusi512_mask:
10588 case X86::BI__builtin_ia32_storedquhi512_mask:
10589 case X86::BI__builtin_ia32_storedquqi512_mask:
10590 case X86::BI__builtin_ia32_storeupd512_mask:
10591 case X86::BI__builtin_ia32_storeups512_mask:
10592 return EmitX86MaskedStore(*this, Ops, 1);
10594 case X86::BI__builtin_ia32_storess128_mask:
10595 case X86::BI__builtin_ia32_storesd128_mask: {
10596 return EmitX86MaskedStore(*this, Ops, 1);
10598 case X86::BI__builtin_ia32_vpopcntb_128:
10599 case X86::BI__builtin_ia32_vpopcntd_128:
10600 case X86::BI__builtin_ia32_vpopcntq_128:
10601 case X86::BI__builtin_ia32_vpopcntw_128:
10602 case X86::BI__builtin_ia32_vpopcntb_256:
10603 case X86::BI__builtin_ia32_vpopcntd_256:
10604 case X86::BI__builtin_ia32_vpopcntq_256:
10605 case X86::BI__builtin_ia32_vpopcntw_256:
10606 case X86::BI__builtin_ia32_vpopcntb_512:
10607 case X86::BI__builtin_ia32_vpopcntd_512:
10608 case X86::BI__builtin_ia32_vpopcntq_512:
10609 case X86::BI__builtin_ia32_vpopcntw_512: {
10610 llvm::Type *ResultType = ConvertType(E->getType());
10611 llvm::Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ResultType);
10612 return Builder.CreateCall(F, Ops);
10614 case X86::BI__builtin_ia32_cvtmask2b128:
10615 case X86::BI__builtin_ia32_cvtmask2b256:
10616 case X86::BI__builtin_ia32_cvtmask2b512:
10617 case X86::BI__builtin_ia32_cvtmask2w128:
10618 case X86::BI__builtin_ia32_cvtmask2w256:
10619 case X86::BI__builtin_ia32_cvtmask2w512:
10620 case X86::BI__builtin_ia32_cvtmask2d128:
10621 case X86::BI__builtin_ia32_cvtmask2d256:
10622 case X86::BI__builtin_ia32_cvtmask2d512:
10623 case X86::BI__builtin_ia32_cvtmask2q128:
10624 case X86::BI__builtin_ia32_cvtmask2q256:
10625 case X86::BI__builtin_ia32_cvtmask2q512:
10626 return EmitX86SExtMask(*this, Ops[0], ConvertType(E->getType()));
10628 case X86::BI__builtin_ia32_cvtb2mask128:
10629 case X86::BI__builtin_ia32_cvtb2mask256:
10630 case X86::BI__builtin_ia32_cvtb2mask512:
10631 case X86::BI__builtin_ia32_cvtw2mask128:
10632 case X86::BI__builtin_ia32_cvtw2mask256:
10633 case X86::BI__builtin_ia32_cvtw2mask512:
10634 case X86::BI__builtin_ia32_cvtd2mask128:
10635 case X86::BI__builtin_ia32_cvtd2mask256:
10636 case X86::BI__builtin_ia32_cvtd2mask512:
10637 case X86::BI__builtin_ia32_cvtq2mask128:
10638 case X86::BI__builtin_ia32_cvtq2mask256:
10639 case X86::BI__builtin_ia32_cvtq2mask512:
10640 return EmitX86ConvertToMask(*this, Ops[0]);
10642 case X86::BI__builtin_ia32_cvtdq2ps512_mask:
10643 case X86::BI__builtin_ia32_cvtqq2ps512_mask:
10644 case X86::BI__builtin_ia32_cvtqq2pd512_mask:
10645 return EmitX86ConvertIntToFp(*this, Ops, /*IsSigned*/true);
10646 case X86::BI__builtin_ia32_cvtudq2ps512_mask:
10647 case X86::BI__builtin_ia32_cvtuqq2ps512_mask:
10648 case X86::BI__builtin_ia32_cvtuqq2pd512_mask:
10649 return EmitX86ConvertIntToFp(*this, Ops, /*IsSigned*/false);
10651 case X86::BI__builtin_ia32_vfmaddss3:
10652 case X86::BI__builtin_ia32_vfmaddsd3:
10653 case X86::BI__builtin_ia32_vfmaddss3_mask:
10654 case X86::BI__builtin_ia32_vfmaddsd3_mask:
10655 return EmitScalarFMAExpr(*this, Ops, Ops[0]);
10656 case X86::BI__builtin_ia32_vfmaddss:
10657 case X86::BI__builtin_ia32_vfmaddsd:
10658 return EmitScalarFMAExpr(*this, Ops,
10659 Constant::getNullValue(Ops[0]->getType()));
10660 case X86::BI__builtin_ia32_vfmaddss3_maskz:
10661 case X86::BI__builtin_ia32_vfmaddsd3_maskz:
10662 return EmitScalarFMAExpr(*this, Ops, Ops[0], /*ZeroMask*/true);
10663 case X86::BI__builtin_ia32_vfmaddss3_mask3:
10664 case X86::BI__builtin_ia32_vfmaddsd3_mask3:
10665 return EmitScalarFMAExpr(*this, Ops, Ops[2], /*ZeroMask*/false, 2);
10666 case X86::BI__builtin_ia32_vfmsubss3_mask3:
10667 case X86::BI__builtin_ia32_vfmsubsd3_mask3:
10668 return EmitScalarFMAExpr(*this, Ops, Ops[2], /*ZeroMask*/false, 2,
10670 case X86::BI__builtin_ia32_vfmaddps:
10671 case X86::BI__builtin_ia32_vfmaddpd:
10672 case X86::BI__builtin_ia32_vfmaddps256:
10673 case X86::BI__builtin_ia32_vfmaddpd256:
10674 case X86::BI__builtin_ia32_vfmaddps512_mask:
10675 case X86::BI__builtin_ia32_vfmaddps512_maskz:
10676 case X86::BI__builtin_ia32_vfmaddps512_mask3:
10677 case X86::BI__builtin_ia32_vfmsubps512_mask3:
10678 case X86::BI__builtin_ia32_vfmaddpd512_mask:
10679 case X86::BI__builtin_ia32_vfmaddpd512_maskz:
10680 case X86::BI__builtin_ia32_vfmaddpd512_mask3:
10681 case X86::BI__builtin_ia32_vfmsubpd512_mask3:
10682 return EmitX86FMAExpr(*this, Ops, BuiltinID, /*IsAddSub*/false);
10683 case X86::BI__builtin_ia32_vfmaddsubps:
10684 case X86::BI__builtin_ia32_vfmaddsubpd:
10685 case X86::BI__builtin_ia32_vfmaddsubps256:
10686 case X86::BI__builtin_ia32_vfmaddsubpd256:
10687 case X86::BI__builtin_ia32_vfmaddsubps512_mask:
10688 case X86::BI__builtin_ia32_vfmaddsubps512_maskz:
10689 case X86::BI__builtin_ia32_vfmaddsubps512_mask3:
10690 case X86::BI__builtin_ia32_vfmsubaddps512_mask3:
10691 case X86::BI__builtin_ia32_vfmaddsubpd512_mask:
10692 case X86::BI__builtin_ia32_vfmaddsubpd512_maskz:
10693 case X86::BI__builtin_ia32_vfmaddsubpd512_mask3:
10694 case X86::BI__builtin_ia32_vfmsubaddpd512_mask3:
10695 return EmitX86FMAExpr(*this, Ops, BuiltinID, /*IsAddSub*/true);
10697 case X86::BI__builtin_ia32_movdqa32store128_mask:
10698 case X86::BI__builtin_ia32_movdqa64store128_mask:
10699 case X86::BI__builtin_ia32_storeaps128_mask:
10700 case X86::BI__builtin_ia32_storeapd128_mask:
10701 case X86::BI__builtin_ia32_movdqa32store256_mask:
10702 case X86::BI__builtin_ia32_movdqa64store256_mask:
10703 case X86::BI__builtin_ia32_storeaps256_mask:
10704 case X86::BI__builtin_ia32_storeapd256_mask:
10705 case X86::BI__builtin_ia32_movdqa32store512_mask:
10706 case X86::BI__builtin_ia32_movdqa64store512_mask:
10707 case X86::BI__builtin_ia32_storeaps512_mask:
10708 case X86::BI__builtin_ia32_storeapd512_mask: {
10710 getContext().getTypeAlignInChars(E->getArg(1)->getType()).getQuantity();
10711 return EmitX86MaskedStore(*this, Ops, Align);
10713 case X86::BI__builtin_ia32_loadups128_mask:
10714 case X86::BI__builtin_ia32_loadups256_mask:
10715 case X86::BI__builtin_ia32_loadups512_mask:
10716 case X86::BI__builtin_ia32_loadupd128_mask:
10717 case X86::BI__builtin_ia32_loadupd256_mask:
10718 case X86::BI__builtin_ia32_loadupd512_mask:
10719 case X86::BI__builtin_ia32_loaddquqi128_mask:
10720 case X86::BI__builtin_ia32_loaddquqi256_mask:
10721 case X86::BI__builtin_ia32_loaddquqi512_mask:
10722 case X86::BI__builtin_ia32_loaddquhi128_mask:
10723 case X86::BI__builtin_ia32_loaddquhi256_mask:
10724 case X86::BI__builtin_ia32_loaddquhi512_mask:
10725 case X86::BI__builtin_ia32_loaddqusi128_mask:
10726 case X86::BI__builtin_ia32_loaddqusi256_mask:
10727 case X86::BI__builtin_ia32_loaddqusi512_mask:
10728 case X86::BI__builtin_ia32_loaddqudi128_mask:
10729 case X86::BI__builtin_ia32_loaddqudi256_mask:
10730 case X86::BI__builtin_ia32_loaddqudi512_mask:
10731 return EmitX86MaskedLoad(*this, Ops, 1);
10733 case X86::BI__builtin_ia32_loadss128_mask:
10734 case X86::BI__builtin_ia32_loadsd128_mask:
10735 return EmitX86MaskedLoad(*this, Ops, 1);
10737 case X86::BI__builtin_ia32_loadaps128_mask:
10738 case X86::BI__builtin_ia32_loadaps256_mask:
10739 case X86::BI__builtin_ia32_loadaps512_mask:
10740 case X86::BI__builtin_ia32_loadapd128_mask:
10741 case X86::BI__builtin_ia32_loadapd256_mask:
10742 case X86::BI__builtin_ia32_loadapd512_mask:
10743 case X86::BI__builtin_ia32_movdqa32load128_mask:
10744 case X86::BI__builtin_ia32_movdqa32load256_mask:
10745 case X86::BI__builtin_ia32_movdqa32load512_mask:
10746 case X86::BI__builtin_ia32_movdqa64load128_mask:
10747 case X86::BI__builtin_ia32_movdqa64load256_mask:
10748 case X86::BI__builtin_ia32_movdqa64load512_mask: {
10750 getContext().getTypeAlignInChars(E->getArg(1)->getType()).getQuantity();
10751 return EmitX86MaskedLoad(*this, Ops, Align);
10754 case X86::BI__builtin_ia32_expandloaddf128_mask:
10755 case X86::BI__builtin_ia32_expandloaddf256_mask:
10756 case X86::BI__builtin_ia32_expandloaddf512_mask:
10757 case X86::BI__builtin_ia32_expandloadsf128_mask:
10758 case X86::BI__builtin_ia32_expandloadsf256_mask:
10759 case X86::BI__builtin_ia32_expandloadsf512_mask:
10760 case X86::BI__builtin_ia32_expandloaddi128_mask:
10761 case X86::BI__builtin_ia32_expandloaddi256_mask:
10762 case X86::BI__builtin_ia32_expandloaddi512_mask:
10763 case X86::BI__builtin_ia32_expandloadsi128_mask:
10764 case X86::BI__builtin_ia32_expandloadsi256_mask:
10765 case X86::BI__builtin_ia32_expandloadsi512_mask:
10766 case X86::BI__builtin_ia32_expandloadhi128_mask:
10767 case X86::BI__builtin_ia32_expandloadhi256_mask:
10768 case X86::BI__builtin_ia32_expandloadhi512_mask:
10769 case X86::BI__builtin_ia32_expandloadqi128_mask:
10770 case X86::BI__builtin_ia32_expandloadqi256_mask:
10771 case X86::BI__builtin_ia32_expandloadqi512_mask:
10772 return EmitX86ExpandLoad(*this, Ops);
10774 case X86::BI__builtin_ia32_compressstoredf128_mask:
10775 case X86::BI__builtin_ia32_compressstoredf256_mask:
10776 case X86::BI__builtin_ia32_compressstoredf512_mask:
10777 case X86::BI__builtin_ia32_compressstoresf128_mask:
10778 case X86::BI__builtin_ia32_compressstoresf256_mask:
10779 case X86::BI__builtin_ia32_compressstoresf512_mask:
10780 case X86::BI__builtin_ia32_compressstoredi128_mask:
10781 case X86::BI__builtin_ia32_compressstoredi256_mask:
10782 case X86::BI__builtin_ia32_compressstoredi512_mask:
10783 case X86::BI__builtin_ia32_compressstoresi128_mask:
10784 case X86::BI__builtin_ia32_compressstoresi256_mask:
10785 case X86::BI__builtin_ia32_compressstoresi512_mask:
10786 case X86::BI__builtin_ia32_compressstorehi128_mask:
10787 case X86::BI__builtin_ia32_compressstorehi256_mask:
10788 case X86::BI__builtin_ia32_compressstorehi512_mask:
10789 case X86::BI__builtin_ia32_compressstoreqi128_mask:
10790 case X86::BI__builtin_ia32_compressstoreqi256_mask:
10791 case X86::BI__builtin_ia32_compressstoreqi512_mask:
10792 return EmitX86CompressStore(*this, Ops);
10794 case X86::BI__builtin_ia32_expanddf128_mask:
10795 case X86::BI__builtin_ia32_expanddf256_mask:
10796 case X86::BI__builtin_ia32_expanddf512_mask:
10797 case X86::BI__builtin_ia32_expandsf128_mask:
10798 case X86::BI__builtin_ia32_expandsf256_mask:
10799 case X86::BI__builtin_ia32_expandsf512_mask:
10800 case X86::BI__builtin_ia32_expanddi128_mask:
10801 case X86::BI__builtin_ia32_expanddi256_mask:
10802 case X86::BI__builtin_ia32_expanddi512_mask:
10803 case X86::BI__builtin_ia32_expandsi128_mask:
10804 case X86::BI__builtin_ia32_expandsi256_mask:
10805 case X86::BI__builtin_ia32_expandsi512_mask:
10806 case X86::BI__builtin_ia32_expandhi128_mask:
10807 case X86::BI__builtin_ia32_expandhi256_mask:
10808 case X86::BI__builtin_ia32_expandhi512_mask:
10809 case X86::BI__builtin_ia32_expandqi128_mask:
10810 case X86::BI__builtin_ia32_expandqi256_mask:
10811 case X86::BI__builtin_ia32_expandqi512_mask:
10812 return EmitX86CompressExpand(*this, Ops, /*IsCompress*/false);
10814 case X86::BI__builtin_ia32_compressdf128_mask:
10815 case X86::BI__builtin_ia32_compressdf256_mask:
10816 case X86::BI__builtin_ia32_compressdf512_mask:
10817 case X86::BI__builtin_ia32_compresssf128_mask:
10818 case X86::BI__builtin_ia32_compresssf256_mask:
10819 case X86::BI__builtin_ia32_compresssf512_mask:
10820 case X86::BI__builtin_ia32_compressdi128_mask:
10821 case X86::BI__builtin_ia32_compressdi256_mask:
10822 case X86::BI__builtin_ia32_compressdi512_mask:
10823 case X86::BI__builtin_ia32_compresssi128_mask:
10824 case X86::BI__builtin_ia32_compresssi256_mask:
10825 case X86::BI__builtin_ia32_compresssi512_mask:
10826 case X86::BI__builtin_ia32_compresshi128_mask:
10827 case X86::BI__builtin_ia32_compresshi256_mask:
10828 case X86::BI__builtin_ia32_compresshi512_mask:
10829 case X86::BI__builtin_ia32_compressqi128_mask:
10830 case X86::BI__builtin_ia32_compressqi256_mask:
10831 case X86::BI__builtin_ia32_compressqi512_mask:
10832 return EmitX86CompressExpand(*this, Ops, /*IsCompress*/true);
10834 case X86::BI__builtin_ia32_gather3div2df:
10835 case X86::BI__builtin_ia32_gather3div2di:
10836 case X86::BI__builtin_ia32_gather3div4df:
10837 case X86::BI__builtin_ia32_gather3div4di:
10838 case X86::BI__builtin_ia32_gather3div4sf:
10839 case X86::BI__builtin_ia32_gather3div4si:
10840 case X86::BI__builtin_ia32_gather3div8sf:
10841 case X86::BI__builtin_ia32_gather3div8si:
10842 case X86::BI__builtin_ia32_gather3siv2df:
10843 case X86::BI__builtin_ia32_gather3siv2di:
10844 case X86::BI__builtin_ia32_gather3siv4df:
10845 case X86::BI__builtin_ia32_gather3siv4di:
10846 case X86::BI__builtin_ia32_gather3siv4sf:
10847 case X86::BI__builtin_ia32_gather3siv4si:
10848 case X86::BI__builtin_ia32_gather3siv8sf:
10849 case X86::BI__builtin_ia32_gather3siv8si:
10850 case X86::BI__builtin_ia32_gathersiv8df:
10851 case X86::BI__builtin_ia32_gathersiv16sf:
10852 case X86::BI__builtin_ia32_gatherdiv8df:
10853 case X86::BI__builtin_ia32_gatherdiv16sf:
10854 case X86::BI__builtin_ia32_gathersiv8di:
10855 case X86::BI__builtin_ia32_gathersiv16si:
10856 case X86::BI__builtin_ia32_gatherdiv8di:
10857 case X86::BI__builtin_ia32_gatherdiv16si: {
10859 switch (BuiltinID) {
10860 default: llvm_unreachable("Unexpected builtin");
10861 case X86::BI__builtin_ia32_gather3div2df:
10862 IID = Intrinsic::x86_avx512_mask_gather3div2_df;
10864 case X86::BI__builtin_ia32_gather3div2di:
10865 IID = Intrinsic::x86_avx512_mask_gather3div2_di;
10867 case X86::BI__builtin_ia32_gather3div4df:
10868 IID = Intrinsic::x86_avx512_mask_gather3div4_df;
10870 case X86::BI__builtin_ia32_gather3div4di:
10871 IID = Intrinsic::x86_avx512_mask_gather3div4_di;
10873 case X86::BI__builtin_ia32_gather3div4sf:
10874 IID = Intrinsic::x86_avx512_mask_gather3div4_sf;
10876 case X86::BI__builtin_ia32_gather3div4si:
10877 IID = Intrinsic::x86_avx512_mask_gather3div4_si;
10879 case X86::BI__builtin_ia32_gather3div8sf:
10880 IID = Intrinsic::x86_avx512_mask_gather3div8_sf;
10882 case X86::BI__builtin_ia32_gather3div8si:
10883 IID = Intrinsic::x86_avx512_mask_gather3div8_si;
10885 case X86::BI__builtin_ia32_gather3siv2df:
10886 IID = Intrinsic::x86_avx512_mask_gather3siv2_df;
10888 case X86::BI__builtin_ia32_gather3siv2di:
10889 IID = Intrinsic::x86_avx512_mask_gather3siv2_di;
10891 case X86::BI__builtin_ia32_gather3siv4df:
10892 IID = Intrinsic::x86_avx512_mask_gather3siv4_df;
10894 case X86::BI__builtin_ia32_gather3siv4di:
10895 IID = Intrinsic::x86_avx512_mask_gather3siv4_di;
10897 case X86::BI__builtin_ia32_gather3siv4sf:
10898 IID = Intrinsic::x86_avx512_mask_gather3siv4_sf;
10900 case X86::BI__builtin_ia32_gather3siv4si:
10901 IID = Intrinsic::x86_avx512_mask_gather3siv4_si;
10903 case X86::BI__builtin_ia32_gather3siv8sf:
10904 IID = Intrinsic::x86_avx512_mask_gather3siv8_sf;
10906 case X86::BI__builtin_ia32_gather3siv8si:
10907 IID = Intrinsic::x86_avx512_mask_gather3siv8_si;
10909 case X86::BI__builtin_ia32_gathersiv8df:
10910 IID = Intrinsic::x86_avx512_mask_gather_dpd_512;
10912 case X86::BI__builtin_ia32_gathersiv16sf:
10913 IID = Intrinsic::x86_avx512_mask_gather_dps_512;
10915 case X86::BI__builtin_ia32_gatherdiv8df:
10916 IID = Intrinsic::x86_avx512_mask_gather_qpd_512;
10918 case X86::BI__builtin_ia32_gatherdiv16sf:
10919 IID = Intrinsic::x86_avx512_mask_gather_qps_512;
10921 case X86::BI__builtin_ia32_gathersiv8di:
10922 IID = Intrinsic::x86_avx512_mask_gather_dpq_512;
10924 case X86::BI__builtin_ia32_gathersiv16si:
10925 IID = Intrinsic::x86_avx512_mask_gather_dpi_512;
10927 case X86::BI__builtin_ia32_gatherdiv8di:
10928 IID = Intrinsic::x86_avx512_mask_gather_qpq_512;
10930 case X86::BI__builtin_ia32_gatherdiv16si:
10931 IID = Intrinsic::x86_avx512_mask_gather_qpi_512;
10935 unsigned MinElts = std::min(Ops[0]->getType()->getVectorNumElements(),
10936 Ops[2]->getType()->getVectorNumElements());
10937 Ops[3] = getMaskVecValue(*this, Ops[3], MinElts);
10938 Function *Intr = CGM.getIntrinsic(IID);
10939 return Builder.CreateCall(Intr, Ops);
10942 case X86::BI__builtin_ia32_scattersiv8df:
10943 case X86::BI__builtin_ia32_scattersiv16sf:
10944 case X86::BI__builtin_ia32_scatterdiv8df:
10945 case X86::BI__builtin_ia32_scatterdiv16sf:
10946 case X86::BI__builtin_ia32_scattersiv8di:
10947 case X86::BI__builtin_ia32_scattersiv16si:
10948 case X86::BI__builtin_ia32_scatterdiv8di:
10949 case X86::BI__builtin_ia32_scatterdiv16si:
10950 case X86::BI__builtin_ia32_scatterdiv2df:
10951 case X86::BI__builtin_ia32_scatterdiv2di:
10952 case X86::BI__builtin_ia32_scatterdiv4df:
10953 case X86::BI__builtin_ia32_scatterdiv4di:
10954 case X86::BI__builtin_ia32_scatterdiv4sf:
10955 case X86::BI__builtin_ia32_scatterdiv4si:
10956 case X86::BI__builtin_ia32_scatterdiv8sf:
10957 case X86::BI__builtin_ia32_scatterdiv8si:
10958 case X86::BI__builtin_ia32_scattersiv2df:
10959 case X86::BI__builtin_ia32_scattersiv2di:
10960 case X86::BI__builtin_ia32_scattersiv4df:
10961 case X86::BI__builtin_ia32_scattersiv4di:
10962 case X86::BI__builtin_ia32_scattersiv4sf:
10963 case X86::BI__builtin_ia32_scattersiv4si:
10964 case X86::BI__builtin_ia32_scattersiv8sf:
10965 case X86::BI__builtin_ia32_scattersiv8si: {
10967 switch (BuiltinID) {
10968 default: llvm_unreachable("Unexpected builtin");
10969 case X86::BI__builtin_ia32_scattersiv8df:
10970 IID = Intrinsic::x86_avx512_mask_scatter_dpd_512;
10972 case X86::BI__builtin_ia32_scattersiv16sf:
10973 IID = Intrinsic::x86_avx512_mask_scatter_dps_512;
10975 case X86::BI__builtin_ia32_scatterdiv8df:
10976 IID = Intrinsic::x86_avx512_mask_scatter_qpd_512;
10978 case X86::BI__builtin_ia32_scatterdiv16sf:
10979 IID = Intrinsic::x86_avx512_mask_scatter_qps_512;
10981 case X86::BI__builtin_ia32_scattersiv8di:
10982 IID = Intrinsic::x86_avx512_mask_scatter_dpq_512;
10984 case X86::BI__builtin_ia32_scattersiv16si:
10985 IID = Intrinsic::x86_avx512_mask_scatter_dpi_512;
10987 case X86::BI__builtin_ia32_scatterdiv8di:
10988 IID = Intrinsic::x86_avx512_mask_scatter_qpq_512;
10990 case X86::BI__builtin_ia32_scatterdiv16si:
10991 IID = Intrinsic::x86_avx512_mask_scatter_qpi_512;
10993 case X86::BI__builtin_ia32_scatterdiv2df:
10994 IID = Intrinsic::x86_avx512_mask_scatterdiv2_df;
10996 case X86::BI__builtin_ia32_scatterdiv2di:
10997 IID = Intrinsic::x86_avx512_mask_scatterdiv2_di;
10999 case X86::BI__builtin_ia32_scatterdiv4df:
11000 IID = Intrinsic::x86_avx512_mask_scatterdiv4_df;
11002 case X86::BI__builtin_ia32_scatterdiv4di:
11003 IID = Intrinsic::x86_avx512_mask_scatterdiv4_di;
11005 case X86::BI__builtin_ia32_scatterdiv4sf:
11006 IID = Intrinsic::x86_avx512_mask_scatterdiv4_sf;
11008 case X86::BI__builtin_ia32_scatterdiv4si:
11009 IID = Intrinsic::x86_avx512_mask_scatterdiv4_si;
11011 case X86::BI__builtin_ia32_scatterdiv8sf:
11012 IID = Intrinsic::x86_avx512_mask_scatterdiv8_sf;
11014 case X86::BI__builtin_ia32_scatterdiv8si:
11015 IID = Intrinsic::x86_avx512_mask_scatterdiv8_si;
11017 case X86::BI__builtin_ia32_scattersiv2df:
11018 IID = Intrinsic::x86_avx512_mask_scattersiv2_df;
11020 case X86::BI__builtin_ia32_scattersiv2di:
11021 IID = Intrinsic::x86_avx512_mask_scattersiv2_di;
11023 case X86::BI__builtin_ia32_scattersiv4df:
11024 IID = Intrinsic::x86_avx512_mask_scattersiv4_df;
11026 case X86::BI__builtin_ia32_scattersiv4di:
11027 IID = Intrinsic::x86_avx512_mask_scattersiv4_di;
11029 case X86::BI__builtin_ia32_scattersiv4sf:
11030 IID = Intrinsic::x86_avx512_mask_scattersiv4_sf;
11032 case X86::BI__builtin_ia32_scattersiv4si:
11033 IID = Intrinsic::x86_avx512_mask_scattersiv4_si;
11035 case X86::BI__builtin_ia32_scattersiv8sf:
11036 IID = Intrinsic::x86_avx512_mask_scattersiv8_sf;
11038 case X86::BI__builtin_ia32_scattersiv8si:
11039 IID = Intrinsic::x86_avx512_mask_scattersiv8_si;
11043 unsigned MinElts = std::min(Ops[2]->getType()->getVectorNumElements(),
11044 Ops[3]->getType()->getVectorNumElements());
11045 Ops[1] = getMaskVecValue(*this, Ops[1], MinElts);
11046 Function *Intr = CGM.getIntrinsic(IID);
11047 return Builder.CreateCall(Intr, Ops);
11050 case X86::BI__builtin_ia32_vextractf128_pd256:
11051 case X86::BI__builtin_ia32_vextractf128_ps256:
11052 case X86::BI__builtin_ia32_vextractf128_si256:
11053 case X86::BI__builtin_ia32_extract128i256:
11054 case X86::BI__builtin_ia32_extractf64x4_mask:
11055 case X86::BI__builtin_ia32_extractf32x4_mask:
11056 case X86::BI__builtin_ia32_extracti64x4_mask:
11057 case X86::BI__builtin_ia32_extracti32x4_mask:
11058 case X86::BI__builtin_ia32_extractf32x8_mask:
11059 case X86::BI__builtin_ia32_extracti32x8_mask:
11060 case X86::BI__builtin_ia32_extractf32x4_256_mask:
11061 case X86::BI__builtin_ia32_extracti32x4_256_mask:
11062 case X86::BI__builtin_ia32_extractf64x2_256_mask:
11063 case X86::BI__builtin_ia32_extracti64x2_256_mask:
11064 case X86::BI__builtin_ia32_extractf64x2_512_mask:
11065 case X86::BI__builtin_ia32_extracti64x2_512_mask: {
11066 llvm::Type *DstTy = ConvertType(E->getType());
11067 unsigned NumElts = DstTy->getVectorNumElements();
11068 unsigned SrcNumElts = Ops[0]->getType()->getVectorNumElements();
11069 unsigned SubVectors = SrcNumElts / NumElts;
11070 unsigned Index = cast<ConstantInt>(Ops[1])->getZExtValue();
11071 assert(llvm::isPowerOf2_32(SubVectors) && "Expected power of 2 subvectors");
11072 Index &= SubVectors - 1; // Remove any extra bits.
11075 uint32_t Indices[16];
11076 for (unsigned i = 0; i != NumElts; ++i)
11077 Indices[i] = i + Index;
11079 Value *Res = Builder.CreateShuffleVector(Ops[0],
11080 UndefValue::get(Ops[0]->getType()),
11081 makeArrayRef(Indices, NumElts),
11084 if (Ops.size() == 4)
11085 Res = EmitX86Select(*this, Ops[3], Res, Ops[2]);
11089 case X86::BI__builtin_ia32_vinsertf128_pd256:
11090 case X86::BI__builtin_ia32_vinsertf128_ps256:
11091 case X86::BI__builtin_ia32_vinsertf128_si256:
11092 case X86::BI__builtin_ia32_insert128i256:
11093 case X86::BI__builtin_ia32_insertf64x4:
11094 case X86::BI__builtin_ia32_insertf32x4:
11095 case X86::BI__builtin_ia32_inserti64x4:
11096 case X86::BI__builtin_ia32_inserti32x4:
11097 case X86::BI__builtin_ia32_insertf32x8:
11098 case X86::BI__builtin_ia32_inserti32x8:
11099 case X86::BI__builtin_ia32_insertf32x4_256:
11100 case X86::BI__builtin_ia32_inserti32x4_256:
11101 case X86::BI__builtin_ia32_insertf64x2_256:
11102 case X86::BI__builtin_ia32_inserti64x2_256:
11103 case X86::BI__builtin_ia32_insertf64x2_512:
11104 case X86::BI__builtin_ia32_inserti64x2_512: {
11105 unsigned DstNumElts = Ops[0]->getType()->getVectorNumElements();
11106 unsigned SrcNumElts = Ops[1]->getType()->getVectorNumElements();
11107 unsigned SubVectors = DstNumElts / SrcNumElts;
11108 unsigned Index = cast<ConstantInt>(Ops[2])->getZExtValue();
11109 assert(llvm::isPowerOf2_32(SubVectors) && "Expected power of 2 subvectors");
11110 Index &= SubVectors - 1; // Remove any extra bits.
11111 Index *= SrcNumElts;
11113 uint32_t Indices[16];
11114 for (unsigned i = 0; i != DstNumElts; ++i)
11115 Indices[i] = (i >= SrcNumElts) ? SrcNumElts + (i % SrcNumElts) : i;
11117 Value *Op1 = Builder.CreateShuffleVector(Ops[1],
11118 UndefValue::get(Ops[1]->getType()),
11119 makeArrayRef(Indices, DstNumElts),
11122 for (unsigned i = 0; i != DstNumElts; ++i) {
11123 if (i >= Index && i < (Index + SrcNumElts))
11124 Indices[i] = (i - Index) + DstNumElts;
11129 return Builder.CreateShuffleVector(Ops[0], Op1,
11130 makeArrayRef(Indices, DstNumElts),
11133 case X86::BI__builtin_ia32_pmovqd512_mask:
11134 case X86::BI__builtin_ia32_pmovwb512_mask: {
11135 Value *Res = Builder.CreateTrunc(Ops[0], Ops[1]->getType());
11136 return EmitX86Select(*this, Ops[2], Res, Ops[1]);
11138 case X86::BI__builtin_ia32_pmovdb512_mask:
11139 case X86::BI__builtin_ia32_pmovdw512_mask:
11140 case X86::BI__builtin_ia32_pmovqw512_mask: {
11141 if (const auto *C = dyn_cast<Constant>(Ops[2]))
11142 if (C->isAllOnesValue())
11143 return Builder.CreateTrunc(Ops[0], Ops[1]->getType());
11146 switch (BuiltinID) {
11147 default: llvm_unreachable("Unsupported intrinsic!");
11148 case X86::BI__builtin_ia32_pmovdb512_mask:
11149 IID = Intrinsic::x86_avx512_mask_pmov_db_512;
11151 case X86::BI__builtin_ia32_pmovdw512_mask:
11152 IID = Intrinsic::x86_avx512_mask_pmov_dw_512;
11154 case X86::BI__builtin_ia32_pmovqw512_mask:
11155 IID = Intrinsic::x86_avx512_mask_pmov_qw_512;
11159 Function *Intr = CGM.getIntrinsic(IID);
11160 return Builder.CreateCall(Intr, Ops);
11162 case X86::BI__builtin_ia32_pblendw128:
11163 case X86::BI__builtin_ia32_blendpd:
11164 case X86::BI__builtin_ia32_blendps:
11165 case X86::BI__builtin_ia32_blendpd256:
11166 case X86::BI__builtin_ia32_blendps256:
11167 case X86::BI__builtin_ia32_pblendw256:
11168 case X86::BI__builtin_ia32_pblendd128:
11169 case X86::BI__builtin_ia32_pblendd256: {
11170 unsigned NumElts = Ops[0]->getType()->getVectorNumElements();
11171 unsigned Imm = cast<llvm::ConstantInt>(Ops[2])->getZExtValue();
11173 uint32_t Indices[16];
11174 // If there are more than 8 elements, the immediate is used twice so make
11175 // sure we handle that.
11176 for (unsigned i = 0; i != NumElts; ++i)
11177 Indices[i] = ((Imm >> (i % 8)) & 0x1) ? NumElts + i : i;
11179 return Builder.CreateShuffleVector(Ops[0], Ops[1],
11180 makeArrayRef(Indices, NumElts),
11183 case X86::BI__builtin_ia32_pshuflw:
11184 case X86::BI__builtin_ia32_pshuflw256:
11185 case X86::BI__builtin_ia32_pshuflw512: {
11186 uint32_t Imm = cast<llvm::ConstantInt>(Ops[1])->getZExtValue();
11187 llvm::Type *Ty = Ops[0]->getType();
11188 unsigned NumElts = Ty->getVectorNumElements();
11190 // Splat the 8-bits of immediate 4 times to help the loop wrap around.
11191 Imm = (Imm & 0xff) * 0x01010101;
11193 uint32_t Indices[32];
11194 for (unsigned l = 0; l != NumElts; l += 8) {
11195 for (unsigned i = 0; i != 4; ++i) {
11196 Indices[l + i] = l + (Imm & 3);
11199 for (unsigned i = 4; i != 8; ++i)
11200 Indices[l + i] = l + i;
11203 return Builder.CreateShuffleVector(Ops[0], UndefValue::get(Ty),
11204 makeArrayRef(Indices, NumElts),
11207 case X86::BI__builtin_ia32_pshufhw:
11208 case X86::BI__builtin_ia32_pshufhw256:
11209 case X86::BI__builtin_ia32_pshufhw512: {
11210 uint32_t Imm = cast<llvm::ConstantInt>(Ops[1])->getZExtValue();
11211 llvm::Type *Ty = Ops[0]->getType();
11212 unsigned NumElts = Ty->getVectorNumElements();
11214 // Splat the 8-bits of immediate 4 times to help the loop wrap around.
11215 Imm = (Imm & 0xff) * 0x01010101;
11217 uint32_t Indices[32];
11218 for (unsigned l = 0; l != NumElts; l += 8) {
11219 for (unsigned i = 0; i != 4; ++i)
11220 Indices[l + i] = l + i;
11221 for (unsigned i = 4; i != 8; ++i) {
11222 Indices[l + i] = l + 4 + (Imm & 3);
11227 return Builder.CreateShuffleVector(Ops[0], UndefValue::get(Ty),
11228 makeArrayRef(Indices, NumElts),
11231 case X86::BI__builtin_ia32_pshufd:
11232 case X86::BI__builtin_ia32_pshufd256:
11233 case X86::BI__builtin_ia32_pshufd512:
11234 case X86::BI__builtin_ia32_vpermilpd:
11235 case X86::BI__builtin_ia32_vpermilps:
11236 case X86::BI__builtin_ia32_vpermilpd256:
11237 case X86::BI__builtin_ia32_vpermilps256:
11238 case X86::BI__builtin_ia32_vpermilpd512:
11239 case X86::BI__builtin_ia32_vpermilps512: {
11240 uint32_t Imm = cast<llvm::ConstantInt>(Ops[1])->getZExtValue();
11241 llvm::Type *Ty = Ops[0]->getType();
11242 unsigned NumElts = Ty->getVectorNumElements();
11243 unsigned NumLanes = Ty->getPrimitiveSizeInBits() / 128;
11244 unsigned NumLaneElts = NumElts / NumLanes;
11246 // Splat the 8-bits of immediate 4 times to help the loop wrap around.
11247 Imm = (Imm & 0xff) * 0x01010101;
11249 uint32_t Indices[16];
11250 for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
11251 for (unsigned i = 0; i != NumLaneElts; ++i) {
11252 Indices[i + l] = (Imm % NumLaneElts) + l;
11253 Imm /= NumLaneElts;
11257 return Builder.CreateShuffleVector(Ops[0], UndefValue::get(Ty),
11258 makeArrayRef(Indices, NumElts),
11261 case X86::BI__builtin_ia32_shufpd:
11262 case X86::BI__builtin_ia32_shufpd256:
11263 case X86::BI__builtin_ia32_shufpd512:
11264 case X86::BI__builtin_ia32_shufps:
11265 case X86::BI__builtin_ia32_shufps256:
11266 case X86::BI__builtin_ia32_shufps512: {
11267 uint32_t Imm = cast<llvm::ConstantInt>(Ops[2])->getZExtValue();
11268 llvm::Type *Ty = Ops[0]->getType();
11269 unsigned NumElts = Ty->getVectorNumElements();
11270 unsigned NumLanes = Ty->getPrimitiveSizeInBits() / 128;
11271 unsigned NumLaneElts = NumElts / NumLanes;
11273 // Splat the 8-bits of immediate 4 times to help the loop wrap around.
11274 Imm = (Imm & 0xff) * 0x01010101;
11276 uint32_t Indices[16];
11277 for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
11278 for (unsigned i = 0; i != NumLaneElts; ++i) {
11279 unsigned Index = Imm % NumLaneElts;
11280 Imm /= NumLaneElts;
11281 if (i >= (NumLaneElts / 2))
11283 Indices[l + i] = l + Index;
11287 return Builder.CreateShuffleVector(Ops[0], Ops[1],
11288 makeArrayRef(Indices, NumElts),
11291 case X86::BI__builtin_ia32_permdi256:
11292 case X86::BI__builtin_ia32_permdf256:
11293 case X86::BI__builtin_ia32_permdi512:
11294 case X86::BI__builtin_ia32_permdf512: {
11295 unsigned Imm = cast<llvm::ConstantInt>(Ops[1])->getZExtValue();
11296 llvm::Type *Ty = Ops[0]->getType();
11297 unsigned NumElts = Ty->getVectorNumElements();
11299 // These intrinsics operate on 256-bit lanes of four 64-bit elements.
11300 uint32_t Indices[8];
11301 for (unsigned l = 0; l != NumElts; l += 4)
11302 for (unsigned i = 0; i != 4; ++i)
11303 Indices[l + i] = l + ((Imm >> (2 * i)) & 0x3);
11305 return Builder.CreateShuffleVector(Ops[0], UndefValue::get(Ty),
11306 makeArrayRef(Indices, NumElts),
11309 case X86::BI__builtin_ia32_palignr128:
11310 case X86::BI__builtin_ia32_palignr256:
11311 case X86::BI__builtin_ia32_palignr512: {
11312 unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0xff;
11314 unsigned NumElts = Ops[0]->getType()->getVectorNumElements();
11315 assert(NumElts % 16 == 0);
11317 // If palignr is shifting the pair of vectors more than the size of two
11318 // lanes, emit zero.
11319 if (ShiftVal >= 32)
11320 return llvm::Constant::getNullValue(ConvertType(E->getType()));
11322 // If palignr is shifting the pair of input vectors more than one lane,
11323 // but less than two lanes, convert to shifting in zeroes.
11324 if (ShiftVal > 16) {
11327 Ops[0] = llvm::Constant::getNullValue(Ops[0]->getType());
11330 uint32_t Indices[64];
11331 // 256-bit palignr operates on 128-bit lanes so we need to handle that
11332 for (unsigned l = 0; l != NumElts; l += 16) {
11333 for (unsigned i = 0; i != 16; ++i) {
11334 unsigned Idx = ShiftVal + i;
11336 Idx += NumElts - 16; // End of lane, switch operand.
11337 Indices[l + i] = Idx + l;
11341 return Builder.CreateShuffleVector(Ops[1], Ops[0],
11342 makeArrayRef(Indices, NumElts),
11345 case X86::BI__builtin_ia32_alignd128:
11346 case X86::BI__builtin_ia32_alignd256:
11347 case X86::BI__builtin_ia32_alignd512:
11348 case X86::BI__builtin_ia32_alignq128:
11349 case X86::BI__builtin_ia32_alignq256:
11350 case X86::BI__builtin_ia32_alignq512: {
11351 unsigned NumElts = Ops[0]->getType()->getVectorNumElements();
11352 unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0xff;
11354 // Mask the shift amount to width of two vectors.
11355 ShiftVal &= (2 * NumElts) - 1;
11357 uint32_t Indices[16];
11358 for (unsigned i = 0; i != NumElts; ++i)
11359 Indices[i] = i + ShiftVal;
11361 return Builder.CreateShuffleVector(Ops[1], Ops[0],
11362 makeArrayRef(Indices, NumElts),
11365 case X86::BI__builtin_ia32_shuf_f32x4_256:
11366 case X86::BI__builtin_ia32_shuf_f64x2_256:
11367 case X86::BI__builtin_ia32_shuf_i32x4_256:
11368 case X86::BI__builtin_ia32_shuf_i64x2_256:
11369 case X86::BI__builtin_ia32_shuf_f32x4:
11370 case X86::BI__builtin_ia32_shuf_f64x2:
11371 case X86::BI__builtin_ia32_shuf_i32x4:
11372 case X86::BI__builtin_ia32_shuf_i64x2: {
11373 unsigned Imm = cast<llvm::ConstantInt>(Ops[2])->getZExtValue();
11374 llvm::Type *Ty = Ops[0]->getType();
11375 unsigned NumElts = Ty->getVectorNumElements();
11376 unsigned NumLanes = Ty->getPrimitiveSizeInBits() == 512 ? 4 : 2;
11377 unsigned NumLaneElts = NumElts / NumLanes;
11379 uint32_t Indices[16];
11380 for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
11381 unsigned Index = (Imm % NumLanes) * NumLaneElts;
11382 Imm /= NumLanes; // Discard the bits we just used.
11383 if (l >= (NumElts / 2))
11384 Index += NumElts; // Switch to other source.
11385 for (unsigned i = 0; i != NumLaneElts; ++i) {
11386 Indices[l + i] = Index + i;
11390 return Builder.CreateShuffleVector(Ops[0], Ops[1],
11391 makeArrayRef(Indices, NumElts),
11395 case X86::BI__builtin_ia32_vperm2f128_pd256:
11396 case X86::BI__builtin_ia32_vperm2f128_ps256:
11397 case X86::BI__builtin_ia32_vperm2f128_si256:
11398 case X86::BI__builtin_ia32_permti256: {
11399 unsigned Imm = cast<llvm::ConstantInt>(Ops[2])->getZExtValue();
11400 unsigned NumElts = Ops[0]->getType()->getVectorNumElements();
11402 // This takes a very simple approach since there are two lanes and a
11403 // shuffle can have 2 inputs. So we reserve the first input for the first
11404 // lane and the second input for the second lane. This may result in
11405 // duplicate sources, but this can be dealt with in the backend.
11408 uint32_t Indices[8];
11409 for (unsigned l = 0; l != 2; ++l) {
11410 // Determine the source for this lane.
11411 if (Imm & (1 << ((l * 4) + 3)))
11412 OutOps[l] = llvm::ConstantAggregateZero::get(Ops[0]->getType());
11413 else if (Imm & (1 << ((l * 4) + 1)))
11414 OutOps[l] = Ops[1];
11416 OutOps[l] = Ops[0];
11418 for (unsigned i = 0; i != NumElts/2; ++i) {
11419 // Start with ith element of the source for this lane.
11420 unsigned Idx = (l * NumElts) + i;
11421 // If bit 0 of the immediate half is set, switch to the high half of
11423 if (Imm & (1 << (l * 4)))
11425 Indices[(l * (NumElts/2)) + i] = Idx;
11429 return Builder.CreateShuffleVector(OutOps[0], OutOps[1],
11430 makeArrayRef(Indices, NumElts),
11434 case X86::BI__builtin_ia32_pslldqi128_byteshift:
11435 case X86::BI__builtin_ia32_pslldqi256_byteshift:
11436 case X86::BI__builtin_ia32_pslldqi512_byteshift: {
11437 unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[1])->getZExtValue() & 0xff;
11438 llvm::Type *ResultType = Ops[0]->getType();
11439 // Builtin type is vXi64 so multiply by 8 to get bytes.
11440 unsigned NumElts = ResultType->getVectorNumElements() * 8;
11442 // If pslldq is shifting the vector more than 15 bytes, emit zero.
11443 if (ShiftVal >= 16)
11444 return llvm::Constant::getNullValue(ResultType);
11446 uint32_t Indices[64];
11447 // 256/512-bit pslldq operates on 128-bit lanes so we need to handle that
11448 for (unsigned l = 0; l != NumElts; l += 16) {
11449 for (unsigned i = 0; i != 16; ++i) {
11450 unsigned Idx = NumElts + i - ShiftVal;
11451 if (Idx < NumElts) Idx -= NumElts - 16; // end of lane, switch operand.
11452 Indices[l + i] = Idx + l;
11456 llvm::Type *VecTy = llvm::VectorType::get(Int8Ty, NumElts);
11457 Value *Cast = Builder.CreateBitCast(Ops[0], VecTy, "cast");
11458 Value *Zero = llvm::Constant::getNullValue(VecTy);
11459 Value *SV = Builder.CreateShuffleVector(Zero, Cast,
11460 makeArrayRef(Indices, NumElts),
11462 return Builder.CreateBitCast(SV, Ops[0]->getType(), "cast");
11464 case X86::BI__builtin_ia32_psrldqi128_byteshift:
11465 case X86::BI__builtin_ia32_psrldqi256_byteshift:
11466 case X86::BI__builtin_ia32_psrldqi512_byteshift: {
11467 unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[1])->getZExtValue() & 0xff;
11468 llvm::Type *ResultType = Ops[0]->getType();
11469 // Builtin type is vXi64 so multiply by 8 to get bytes.
11470 unsigned NumElts = ResultType->getVectorNumElements() * 8;
11472 // If psrldq is shifting the vector more than 15 bytes, emit zero.
11473 if (ShiftVal >= 16)
11474 return llvm::Constant::getNullValue(ResultType);
11476 uint32_t Indices[64];
11477 // 256/512-bit psrldq operates on 128-bit lanes so we need to handle that
11478 for (unsigned l = 0; l != NumElts; l += 16) {
11479 for (unsigned i = 0; i != 16; ++i) {
11480 unsigned Idx = i + ShiftVal;
11481 if (Idx >= 16) Idx += NumElts - 16; // end of lane, switch operand.
11482 Indices[l + i] = Idx + l;
11486 llvm::Type *VecTy = llvm::VectorType::get(Int8Ty, NumElts);
11487 Value *Cast = Builder.CreateBitCast(Ops[0], VecTy, "cast");
11488 Value *Zero = llvm::Constant::getNullValue(VecTy);
11489 Value *SV = Builder.CreateShuffleVector(Cast, Zero,
11490 makeArrayRef(Indices, NumElts),
11492 return Builder.CreateBitCast(SV, ResultType, "cast");
11494 case X86::BI__builtin_ia32_kshiftliqi:
11495 case X86::BI__builtin_ia32_kshiftlihi:
11496 case X86::BI__builtin_ia32_kshiftlisi:
11497 case X86::BI__builtin_ia32_kshiftlidi: {
11498 unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[1])->getZExtValue() & 0xff;
11499 unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth();
11501 if (ShiftVal >= NumElts)
11502 return llvm::Constant::getNullValue(Ops[0]->getType());
11504 Value *In = getMaskVecValue(*this, Ops[0], NumElts);
11506 uint32_t Indices[64];
11507 for (unsigned i = 0; i != NumElts; ++i)
11508 Indices[i] = NumElts + i - ShiftVal;
11510 Value *Zero = llvm::Constant::getNullValue(In->getType());
11511 Value *SV = Builder.CreateShuffleVector(Zero, In,
11512 makeArrayRef(Indices, NumElts),
11514 return Builder.CreateBitCast(SV, Ops[0]->getType());
11516 case X86::BI__builtin_ia32_kshiftriqi:
11517 case X86::BI__builtin_ia32_kshiftrihi:
11518 case X86::BI__builtin_ia32_kshiftrisi:
11519 case X86::BI__builtin_ia32_kshiftridi: {
11520 unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[1])->getZExtValue() & 0xff;
11521 unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth();
11523 if (ShiftVal >= NumElts)
11524 return llvm::Constant::getNullValue(Ops[0]->getType());
11526 Value *In = getMaskVecValue(*this, Ops[0], NumElts);
11528 uint32_t Indices[64];
11529 for (unsigned i = 0; i != NumElts; ++i)
11530 Indices[i] = i + ShiftVal;
11532 Value *Zero = llvm::Constant::getNullValue(In->getType());
11533 Value *SV = Builder.CreateShuffleVector(In, Zero,
11534 makeArrayRef(Indices, NumElts),
11536 return Builder.CreateBitCast(SV, Ops[0]->getType());
11538 case X86::BI__builtin_ia32_movnti:
11539 case X86::BI__builtin_ia32_movnti64:
11540 case X86::BI__builtin_ia32_movntsd:
11541 case X86::BI__builtin_ia32_movntss: {
11542 llvm::MDNode *Node = llvm::MDNode::get(
11543 getLLVMContext(), llvm::ConstantAsMetadata::get(Builder.getInt32(1)));
11545 Value *Ptr = Ops[0];
11546 Value *Src = Ops[1];
11548 // Extract the 0'th element of the source vector.
11549 if (BuiltinID == X86::BI__builtin_ia32_movntsd ||
11550 BuiltinID == X86::BI__builtin_ia32_movntss)
11551 Src = Builder.CreateExtractElement(Src, (uint64_t)0, "extract");
11553 // Convert the type of the pointer to a pointer to the stored type.
11554 Value *BC = Builder.CreateBitCast(
11555 Ptr, llvm::PointerType::getUnqual(Src->getType()), "cast");
11557 // Unaligned nontemporal store of the scalar value.
11558 StoreInst *SI = Builder.CreateDefaultAlignedStore(Src, BC);
11559 SI->setMetadata(CGM.getModule().getMDKindID("nontemporal"), Node);
11560 SI->setAlignment(llvm::Align::None());
11563 // Rotate is a special case of funnel shift - 1st 2 args are the same.
11564 case X86::BI__builtin_ia32_vprotb:
11565 case X86::BI__builtin_ia32_vprotw:
11566 case X86::BI__builtin_ia32_vprotd:
11567 case X86::BI__builtin_ia32_vprotq:
11568 case X86::BI__builtin_ia32_vprotbi:
11569 case X86::BI__builtin_ia32_vprotwi:
11570 case X86::BI__builtin_ia32_vprotdi:
11571 case X86::BI__builtin_ia32_vprotqi:
11572 case X86::BI__builtin_ia32_prold128:
11573 case X86::BI__builtin_ia32_prold256:
11574 case X86::BI__builtin_ia32_prold512:
11575 case X86::BI__builtin_ia32_prolq128:
11576 case X86::BI__builtin_ia32_prolq256:
11577 case X86::BI__builtin_ia32_prolq512:
11578 case X86::BI__builtin_ia32_prolvd128:
11579 case X86::BI__builtin_ia32_prolvd256:
11580 case X86::BI__builtin_ia32_prolvd512:
11581 case X86::BI__builtin_ia32_prolvq128:
11582 case X86::BI__builtin_ia32_prolvq256:
11583 case X86::BI__builtin_ia32_prolvq512:
11584 return EmitX86FunnelShift(*this, Ops[0], Ops[0], Ops[1], false);
11585 case X86::BI__builtin_ia32_prord128:
11586 case X86::BI__builtin_ia32_prord256:
11587 case X86::BI__builtin_ia32_prord512:
11588 case X86::BI__builtin_ia32_prorq128:
11589 case X86::BI__builtin_ia32_prorq256:
11590 case X86::BI__builtin_ia32_prorq512:
11591 case X86::BI__builtin_ia32_prorvd128:
11592 case X86::BI__builtin_ia32_prorvd256:
11593 case X86::BI__builtin_ia32_prorvd512:
11594 case X86::BI__builtin_ia32_prorvq128:
11595 case X86::BI__builtin_ia32_prorvq256:
11596 case X86::BI__builtin_ia32_prorvq512:
11597 return EmitX86FunnelShift(*this, Ops[0], Ops[0], Ops[1], true);
11598 case X86::BI__builtin_ia32_selectb_128:
11599 case X86::BI__builtin_ia32_selectb_256:
11600 case X86::BI__builtin_ia32_selectb_512:
11601 case X86::BI__builtin_ia32_selectw_128:
11602 case X86::BI__builtin_ia32_selectw_256:
11603 case X86::BI__builtin_ia32_selectw_512:
11604 case X86::BI__builtin_ia32_selectd_128:
11605 case X86::BI__builtin_ia32_selectd_256:
11606 case X86::BI__builtin_ia32_selectd_512:
11607 case X86::BI__builtin_ia32_selectq_128:
11608 case X86::BI__builtin_ia32_selectq_256:
11609 case X86::BI__builtin_ia32_selectq_512:
11610 case X86::BI__builtin_ia32_selectps_128:
11611 case X86::BI__builtin_ia32_selectps_256:
11612 case X86::BI__builtin_ia32_selectps_512:
11613 case X86::BI__builtin_ia32_selectpd_128:
11614 case X86::BI__builtin_ia32_selectpd_256:
11615 case X86::BI__builtin_ia32_selectpd_512:
11616 return EmitX86Select(*this, Ops[0], Ops[1], Ops[2]);
11617 case X86::BI__builtin_ia32_selectss_128:
11618 case X86::BI__builtin_ia32_selectsd_128: {
11619 Value *A = Builder.CreateExtractElement(Ops[1], (uint64_t)0);
11620 Value *B = Builder.CreateExtractElement(Ops[2], (uint64_t)0);
11621 A = EmitX86ScalarSelect(*this, Ops[0], A, B);
11622 return Builder.CreateInsertElement(Ops[1], A, (uint64_t)0);
11624 case X86::BI__builtin_ia32_cmpb128_mask:
11625 case X86::BI__builtin_ia32_cmpb256_mask:
11626 case X86::BI__builtin_ia32_cmpb512_mask:
11627 case X86::BI__builtin_ia32_cmpw128_mask:
11628 case X86::BI__builtin_ia32_cmpw256_mask:
11629 case X86::BI__builtin_ia32_cmpw512_mask:
11630 case X86::BI__builtin_ia32_cmpd128_mask:
11631 case X86::BI__builtin_ia32_cmpd256_mask:
11632 case X86::BI__builtin_ia32_cmpd512_mask:
11633 case X86::BI__builtin_ia32_cmpq128_mask:
11634 case X86::BI__builtin_ia32_cmpq256_mask:
11635 case X86::BI__builtin_ia32_cmpq512_mask: {
11636 unsigned CC = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0x7;
11637 return EmitX86MaskedCompare(*this, CC, true, Ops);
11639 case X86::BI__builtin_ia32_ucmpb128_mask:
11640 case X86::BI__builtin_ia32_ucmpb256_mask:
11641 case X86::BI__builtin_ia32_ucmpb512_mask:
11642 case X86::BI__builtin_ia32_ucmpw128_mask:
11643 case X86::BI__builtin_ia32_ucmpw256_mask:
11644 case X86::BI__builtin_ia32_ucmpw512_mask:
11645 case X86::BI__builtin_ia32_ucmpd128_mask:
11646 case X86::BI__builtin_ia32_ucmpd256_mask:
11647 case X86::BI__builtin_ia32_ucmpd512_mask:
11648 case X86::BI__builtin_ia32_ucmpq128_mask:
11649 case X86::BI__builtin_ia32_ucmpq256_mask:
11650 case X86::BI__builtin_ia32_ucmpq512_mask: {
11651 unsigned CC = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0x7;
11652 return EmitX86MaskedCompare(*this, CC, false, Ops);
11654 case X86::BI__builtin_ia32_vpcomb:
11655 case X86::BI__builtin_ia32_vpcomw:
11656 case X86::BI__builtin_ia32_vpcomd:
11657 case X86::BI__builtin_ia32_vpcomq:
11658 return EmitX86vpcom(*this, Ops, true);
11659 case X86::BI__builtin_ia32_vpcomub:
11660 case X86::BI__builtin_ia32_vpcomuw:
11661 case X86::BI__builtin_ia32_vpcomud:
11662 case X86::BI__builtin_ia32_vpcomuq:
11663 return EmitX86vpcom(*this, Ops, false);
11665 case X86::BI__builtin_ia32_kortestcqi:
11666 case X86::BI__builtin_ia32_kortestchi:
11667 case X86::BI__builtin_ia32_kortestcsi:
11668 case X86::BI__builtin_ia32_kortestcdi: {
11669 Value *Or = EmitX86MaskLogic(*this, Instruction::Or, Ops);
11670 Value *C = llvm::Constant::getAllOnesValue(Ops[0]->getType());
11671 Value *Cmp = Builder.CreateICmpEQ(Or, C);
11672 return Builder.CreateZExt(Cmp, ConvertType(E->getType()));
11674 case X86::BI__builtin_ia32_kortestzqi:
11675 case X86::BI__builtin_ia32_kortestzhi:
11676 case X86::BI__builtin_ia32_kortestzsi:
11677 case X86::BI__builtin_ia32_kortestzdi: {
11678 Value *Or = EmitX86MaskLogic(*this, Instruction::Or, Ops);
11679 Value *C = llvm::Constant::getNullValue(Ops[0]->getType());
11680 Value *Cmp = Builder.CreateICmpEQ(Or, C);
11681 return Builder.CreateZExt(Cmp, ConvertType(E->getType()));
11684 case X86::BI__builtin_ia32_ktestcqi:
11685 case X86::BI__builtin_ia32_ktestzqi:
11686 case X86::BI__builtin_ia32_ktestchi:
11687 case X86::BI__builtin_ia32_ktestzhi:
11688 case X86::BI__builtin_ia32_ktestcsi:
11689 case X86::BI__builtin_ia32_ktestzsi:
11690 case X86::BI__builtin_ia32_ktestcdi:
11691 case X86::BI__builtin_ia32_ktestzdi: {
11693 switch (BuiltinID) {
11694 default: llvm_unreachable("Unsupported intrinsic!");
11695 case X86::BI__builtin_ia32_ktestcqi:
11696 IID = Intrinsic::x86_avx512_ktestc_b;
11698 case X86::BI__builtin_ia32_ktestzqi:
11699 IID = Intrinsic::x86_avx512_ktestz_b;
11701 case X86::BI__builtin_ia32_ktestchi:
11702 IID = Intrinsic::x86_avx512_ktestc_w;
11704 case X86::BI__builtin_ia32_ktestzhi:
11705 IID = Intrinsic::x86_avx512_ktestz_w;
11707 case X86::BI__builtin_ia32_ktestcsi:
11708 IID = Intrinsic::x86_avx512_ktestc_d;
11710 case X86::BI__builtin_ia32_ktestzsi:
11711 IID = Intrinsic::x86_avx512_ktestz_d;
11713 case X86::BI__builtin_ia32_ktestcdi:
11714 IID = Intrinsic::x86_avx512_ktestc_q;
11716 case X86::BI__builtin_ia32_ktestzdi:
11717 IID = Intrinsic::x86_avx512_ktestz_q;
11721 unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth();
11722 Value *LHS = getMaskVecValue(*this, Ops[0], NumElts);
11723 Value *RHS = getMaskVecValue(*this, Ops[1], NumElts);
11724 Function *Intr = CGM.getIntrinsic(IID);
11725 return Builder.CreateCall(Intr, {LHS, RHS});
11728 case X86::BI__builtin_ia32_kaddqi:
11729 case X86::BI__builtin_ia32_kaddhi:
11730 case X86::BI__builtin_ia32_kaddsi:
11731 case X86::BI__builtin_ia32_kadddi: {
11733 switch (BuiltinID) {
11734 default: llvm_unreachable("Unsupported intrinsic!");
11735 case X86::BI__builtin_ia32_kaddqi:
11736 IID = Intrinsic::x86_avx512_kadd_b;
11738 case X86::BI__builtin_ia32_kaddhi:
11739 IID = Intrinsic::x86_avx512_kadd_w;
11741 case X86::BI__builtin_ia32_kaddsi:
11742 IID = Intrinsic::x86_avx512_kadd_d;
11744 case X86::BI__builtin_ia32_kadddi:
11745 IID = Intrinsic::x86_avx512_kadd_q;
11749 unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth();
11750 Value *LHS = getMaskVecValue(*this, Ops[0], NumElts);
11751 Value *RHS = getMaskVecValue(*this, Ops[1], NumElts);
11752 Function *Intr = CGM.getIntrinsic(IID);
11753 Value *Res = Builder.CreateCall(Intr, {LHS, RHS});
11754 return Builder.CreateBitCast(Res, Ops[0]->getType());
11756 case X86::BI__builtin_ia32_kandqi:
11757 case X86::BI__builtin_ia32_kandhi:
11758 case X86::BI__builtin_ia32_kandsi:
11759 case X86::BI__builtin_ia32_kanddi:
11760 return EmitX86MaskLogic(*this, Instruction::And, Ops);
11761 case X86::BI__builtin_ia32_kandnqi:
11762 case X86::BI__builtin_ia32_kandnhi:
11763 case X86::BI__builtin_ia32_kandnsi:
11764 case X86::BI__builtin_ia32_kandndi:
11765 return EmitX86MaskLogic(*this, Instruction::And, Ops, true);
11766 case X86::BI__builtin_ia32_korqi:
11767 case X86::BI__builtin_ia32_korhi:
11768 case X86::BI__builtin_ia32_korsi:
11769 case X86::BI__builtin_ia32_kordi:
11770 return EmitX86MaskLogic(*this, Instruction::Or, Ops);
11771 case X86::BI__builtin_ia32_kxnorqi:
11772 case X86::BI__builtin_ia32_kxnorhi:
11773 case X86::BI__builtin_ia32_kxnorsi:
11774 case X86::BI__builtin_ia32_kxnordi:
11775 return EmitX86MaskLogic(*this, Instruction::Xor, Ops, true);
11776 case X86::BI__builtin_ia32_kxorqi:
11777 case X86::BI__builtin_ia32_kxorhi:
11778 case X86::BI__builtin_ia32_kxorsi:
11779 case X86::BI__builtin_ia32_kxordi:
11780 return EmitX86MaskLogic(*this, Instruction::Xor, Ops);
11781 case X86::BI__builtin_ia32_knotqi:
11782 case X86::BI__builtin_ia32_knothi:
11783 case X86::BI__builtin_ia32_knotsi:
11784 case X86::BI__builtin_ia32_knotdi: {
11785 unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth();
11786 Value *Res = getMaskVecValue(*this, Ops[0], NumElts);
11787 return Builder.CreateBitCast(Builder.CreateNot(Res),
11788 Ops[0]->getType());
11790 case X86::BI__builtin_ia32_kmovb:
11791 case X86::BI__builtin_ia32_kmovw:
11792 case X86::BI__builtin_ia32_kmovd:
11793 case X86::BI__builtin_ia32_kmovq: {
11794 // Bitcast to vXi1 type and then back to integer. This gets the mask
11795 // register type into the IR, but might be optimized out depending on
11796 // what's around it.
11797 unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth();
11798 Value *Res = getMaskVecValue(*this, Ops[0], NumElts);
11799 return Builder.CreateBitCast(Res, Ops[0]->getType());
11802 case X86::BI__builtin_ia32_kunpckdi:
11803 case X86::BI__builtin_ia32_kunpcksi:
11804 case X86::BI__builtin_ia32_kunpckhi: {
11805 unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth();
11806 Value *LHS = getMaskVecValue(*this, Ops[0], NumElts);
11807 Value *RHS = getMaskVecValue(*this, Ops[1], NumElts);
11808 uint32_t Indices[64];
11809 for (unsigned i = 0; i != NumElts; ++i)
11812 // First extract half of each vector. This gives better codegen than
11813 // doing it in a single shuffle.
11814 LHS = Builder.CreateShuffleVector(LHS, LHS,
11815 makeArrayRef(Indices, NumElts / 2));
11816 RHS = Builder.CreateShuffleVector(RHS, RHS,
11817 makeArrayRef(Indices, NumElts / 2));
11818 // Concat the vectors.
11819 // NOTE: Operands are swapped to match the intrinsic definition.
11820 Value *Res = Builder.CreateShuffleVector(RHS, LHS,
11821 makeArrayRef(Indices, NumElts));
11822 return Builder.CreateBitCast(Res, Ops[0]->getType());
11825 case X86::BI__builtin_ia32_vplzcntd_128:
11826 case X86::BI__builtin_ia32_vplzcntd_256:
11827 case X86::BI__builtin_ia32_vplzcntd_512:
11828 case X86::BI__builtin_ia32_vplzcntq_128:
11829 case X86::BI__builtin_ia32_vplzcntq_256:
11830 case X86::BI__builtin_ia32_vplzcntq_512: {
11831 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, Ops[0]->getType());
11832 return Builder.CreateCall(F, {Ops[0],Builder.getInt1(false)});
11834 case X86::BI__builtin_ia32_sqrtss:
11835 case X86::BI__builtin_ia32_sqrtsd: {
11836 Value *A = Builder.CreateExtractElement(Ops[0], (uint64_t)0);
11837 Function *F = CGM.getIntrinsic(Intrinsic::sqrt, A->getType());
11838 A = Builder.CreateCall(F, {A});
11839 return Builder.CreateInsertElement(Ops[0], A, (uint64_t)0);
11841 case X86::BI__builtin_ia32_sqrtsd_round_mask:
11842 case X86::BI__builtin_ia32_sqrtss_round_mask: {
11843 unsigned CC = cast<llvm::ConstantInt>(Ops[4])->getZExtValue();
11844 // Support only if the rounding mode is 4 (AKA CUR_DIRECTION),
11845 // otherwise keep the intrinsic.
11847 Intrinsic::ID IID = BuiltinID == X86::BI__builtin_ia32_sqrtsd_round_mask ?
11848 Intrinsic::x86_avx512_mask_sqrt_sd :
11849 Intrinsic::x86_avx512_mask_sqrt_ss;
11850 return Builder.CreateCall(CGM.getIntrinsic(IID), Ops);
11852 Value *A = Builder.CreateExtractElement(Ops[1], (uint64_t)0);
11853 Function *F = CGM.getIntrinsic(Intrinsic::sqrt, A->getType());
11854 A = Builder.CreateCall(F, A);
11855 Value *Src = Builder.CreateExtractElement(Ops[2], (uint64_t)0);
11856 A = EmitX86ScalarSelect(*this, Ops[3], A, Src);
11857 return Builder.CreateInsertElement(Ops[0], A, (uint64_t)0);
11859 case X86::BI__builtin_ia32_sqrtpd256:
11860 case X86::BI__builtin_ia32_sqrtpd:
11861 case X86::BI__builtin_ia32_sqrtps256:
11862 case X86::BI__builtin_ia32_sqrtps:
11863 case X86::BI__builtin_ia32_sqrtps512:
11864 case X86::BI__builtin_ia32_sqrtpd512: {
11865 if (Ops.size() == 2) {
11866 unsigned CC = cast<llvm::ConstantInt>(Ops[1])->getZExtValue();
11867 // Support only if the rounding mode is 4 (AKA CUR_DIRECTION),
11868 // otherwise keep the intrinsic.
11870 Intrinsic::ID IID = BuiltinID == X86::BI__builtin_ia32_sqrtps512 ?
11871 Intrinsic::x86_avx512_sqrt_ps_512 :
11872 Intrinsic::x86_avx512_sqrt_pd_512;
11873 return Builder.CreateCall(CGM.getIntrinsic(IID), Ops);
11876 Function *F = CGM.getIntrinsic(Intrinsic::sqrt, Ops[0]->getType());
11877 return Builder.CreateCall(F, Ops[0]);
11879 case X86::BI__builtin_ia32_pabsb128:
11880 case X86::BI__builtin_ia32_pabsw128:
11881 case X86::BI__builtin_ia32_pabsd128:
11882 case X86::BI__builtin_ia32_pabsb256:
11883 case X86::BI__builtin_ia32_pabsw256:
11884 case X86::BI__builtin_ia32_pabsd256:
11885 case X86::BI__builtin_ia32_pabsq128:
11886 case X86::BI__builtin_ia32_pabsq256:
11887 case X86::BI__builtin_ia32_pabsb512:
11888 case X86::BI__builtin_ia32_pabsw512:
11889 case X86::BI__builtin_ia32_pabsd512:
11890 case X86::BI__builtin_ia32_pabsq512:
11891 return EmitX86Abs(*this, Ops);
11893 case X86::BI__builtin_ia32_pmaxsb128:
11894 case X86::BI__builtin_ia32_pmaxsw128:
11895 case X86::BI__builtin_ia32_pmaxsd128:
11896 case X86::BI__builtin_ia32_pmaxsq128:
11897 case X86::BI__builtin_ia32_pmaxsb256:
11898 case X86::BI__builtin_ia32_pmaxsw256:
11899 case X86::BI__builtin_ia32_pmaxsd256:
11900 case X86::BI__builtin_ia32_pmaxsq256:
11901 case X86::BI__builtin_ia32_pmaxsb512:
11902 case X86::BI__builtin_ia32_pmaxsw512:
11903 case X86::BI__builtin_ia32_pmaxsd512:
11904 case X86::BI__builtin_ia32_pmaxsq512:
11905 return EmitX86MinMax(*this, ICmpInst::ICMP_SGT, Ops);
11906 case X86::BI__builtin_ia32_pmaxub128:
11907 case X86::BI__builtin_ia32_pmaxuw128:
11908 case X86::BI__builtin_ia32_pmaxud128:
11909 case X86::BI__builtin_ia32_pmaxuq128:
11910 case X86::BI__builtin_ia32_pmaxub256:
11911 case X86::BI__builtin_ia32_pmaxuw256:
11912 case X86::BI__builtin_ia32_pmaxud256:
11913 case X86::BI__builtin_ia32_pmaxuq256:
11914 case X86::BI__builtin_ia32_pmaxub512:
11915 case X86::BI__builtin_ia32_pmaxuw512:
11916 case X86::BI__builtin_ia32_pmaxud512:
11917 case X86::BI__builtin_ia32_pmaxuq512:
11918 return EmitX86MinMax(*this, ICmpInst::ICMP_UGT, Ops);
11919 case X86::BI__builtin_ia32_pminsb128:
11920 case X86::BI__builtin_ia32_pminsw128:
11921 case X86::BI__builtin_ia32_pminsd128:
11922 case X86::BI__builtin_ia32_pminsq128:
11923 case X86::BI__builtin_ia32_pminsb256:
11924 case X86::BI__builtin_ia32_pminsw256:
11925 case X86::BI__builtin_ia32_pminsd256:
11926 case X86::BI__builtin_ia32_pminsq256:
11927 case X86::BI__builtin_ia32_pminsb512:
11928 case X86::BI__builtin_ia32_pminsw512:
11929 case X86::BI__builtin_ia32_pminsd512:
11930 case X86::BI__builtin_ia32_pminsq512:
11931 return EmitX86MinMax(*this, ICmpInst::ICMP_SLT, Ops);
11932 case X86::BI__builtin_ia32_pminub128:
11933 case X86::BI__builtin_ia32_pminuw128:
11934 case X86::BI__builtin_ia32_pminud128:
11935 case X86::BI__builtin_ia32_pminuq128:
11936 case X86::BI__builtin_ia32_pminub256:
11937 case X86::BI__builtin_ia32_pminuw256:
11938 case X86::BI__builtin_ia32_pminud256:
11939 case X86::BI__builtin_ia32_pminuq256:
11940 case X86::BI__builtin_ia32_pminub512:
11941 case X86::BI__builtin_ia32_pminuw512:
11942 case X86::BI__builtin_ia32_pminud512:
11943 case X86::BI__builtin_ia32_pminuq512:
11944 return EmitX86MinMax(*this, ICmpInst::ICMP_ULT, Ops);
11946 case X86::BI__builtin_ia32_pmuludq128:
11947 case X86::BI__builtin_ia32_pmuludq256:
11948 case X86::BI__builtin_ia32_pmuludq512:
11949 return EmitX86Muldq(*this, /*IsSigned*/false, Ops);
11951 case X86::BI__builtin_ia32_pmuldq128:
11952 case X86::BI__builtin_ia32_pmuldq256:
11953 case X86::BI__builtin_ia32_pmuldq512:
11954 return EmitX86Muldq(*this, /*IsSigned*/true, Ops);
11956 case X86::BI__builtin_ia32_pternlogd512_mask:
11957 case X86::BI__builtin_ia32_pternlogq512_mask:
11958 case X86::BI__builtin_ia32_pternlogd128_mask:
11959 case X86::BI__builtin_ia32_pternlogd256_mask:
11960 case X86::BI__builtin_ia32_pternlogq128_mask:
11961 case X86::BI__builtin_ia32_pternlogq256_mask:
11962 return EmitX86Ternlog(*this, /*ZeroMask*/false, Ops);
11964 case X86::BI__builtin_ia32_pternlogd512_maskz:
11965 case X86::BI__builtin_ia32_pternlogq512_maskz:
11966 case X86::BI__builtin_ia32_pternlogd128_maskz:
11967 case X86::BI__builtin_ia32_pternlogd256_maskz:
11968 case X86::BI__builtin_ia32_pternlogq128_maskz:
11969 case X86::BI__builtin_ia32_pternlogq256_maskz:
11970 return EmitX86Ternlog(*this, /*ZeroMask*/true, Ops);
11972 case X86::BI__builtin_ia32_vpshldd128:
11973 case X86::BI__builtin_ia32_vpshldd256:
11974 case X86::BI__builtin_ia32_vpshldd512:
11975 case X86::BI__builtin_ia32_vpshldq128:
11976 case X86::BI__builtin_ia32_vpshldq256:
11977 case X86::BI__builtin_ia32_vpshldq512:
11978 case X86::BI__builtin_ia32_vpshldw128:
11979 case X86::BI__builtin_ia32_vpshldw256:
11980 case X86::BI__builtin_ia32_vpshldw512:
11981 return EmitX86FunnelShift(*this, Ops[0], Ops[1], Ops[2], false);
11983 case X86::BI__builtin_ia32_vpshrdd128:
11984 case X86::BI__builtin_ia32_vpshrdd256:
11985 case X86::BI__builtin_ia32_vpshrdd512:
11986 case X86::BI__builtin_ia32_vpshrdq128:
11987 case X86::BI__builtin_ia32_vpshrdq256:
11988 case X86::BI__builtin_ia32_vpshrdq512:
11989 case X86::BI__builtin_ia32_vpshrdw128:
11990 case X86::BI__builtin_ia32_vpshrdw256:
11991 case X86::BI__builtin_ia32_vpshrdw512:
11992 // Ops 0 and 1 are swapped.
11993 return EmitX86FunnelShift(*this, Ops[1], Ops[0], Ops[2], true);
11995 case X86::BI__builtin_ia32_vpshldvd128:
11996 case X86::BI__builtin_ia32_vpshldvd256:
11997 case X86::BI__builtin_ia32_vpshldvd512:
11998 case X86::BI__builtin_ia32_vpshldvq128:
11999 case X86::BI__builtin_ia32_vpshldvq256:
12000 case X86::BI__builtin_ia32_vpshldvq512:
12001 case X86::BI__builtin_ia32_vpshldvw128:
12002 case X86::BI__builtin_ia32_vpshldvw256:
12003 case X86::BI__builtin_ia32_vpshldvw512:
12004 return EmitX86FunnelShift(*this, Ops[0], Ops[1], Ops[2], false);
12006 case X86::BI__builtin_ia32_vpshrdvd128:
12007 case X86::BI__builtin_ia32_vpshrdvd256:
12008 case X86::BI__builtin_ia32_vpshrdvd512:
12009 case X86::BI__builtin_ia32_vpshrdvq128:
12010 case X86::BI__builtin_ia32_vpshrdvq256:
12011 case X86::BI__builtin_ia32_vpshrdvq512:
12012 case X86::BI__builtin_ia32_vpshrdvw128:
12013 case X86::BI__builtin_ia32_vpshrdvw256:
12014 case X86::BI__builtin_ia32_vpshrdvw512:
12015 // Ops 0 and 1 are swapped.
12016 return EmitX86FunnelShift(*this, Ops[1], Ops[0], Ops[2], true);
12019 case X86::BI__builtin_ia32_pswapdsf:
12020 case X86::BI__builtin_ia32_pswapdsi: {
12021 llvm::Type *MMXTy = llvm::Type::getX86_MMXTy(getLLVMContext());
12022 Ops[0] = Builder.CreateBitCast(Ops[0], MMXTy, "cast");
12023 llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_3dnowa_pswapd);
12024 return Builder.CreateCall(F, Ops, "pswapd");
12026 case X86::BI__builtin_ia32_rdrand16_step:
12027 case X86::BI__builtin_ia32_rdrand32_step:
12028 case X86::BI__builtin_ia32_rdrand64_step:
12029 case X86::BI__builtin_ia32_rdseed16_step:
12030 case X86::BI__builtin_ia32_rdseed32_step:
12031 case X86::BI__builtin_ia32_rdseed64_step: {
12033 switch (BuiltinID) {
12034 default: llvm_unreachable("Unsupported intrinsic!");
12035 case X86::BI__builtin_ia32_rdrand16_step:
12036 ID = Intrinsic::x86_rdrand_16;
12038 case X86::BI__builtin_ia32_rdrand32_step:
12039 ID = Intrinsic::x86_rdrand_32;
12041 case X86::BI__builtin_ia32_rdrand64_step:
12042 ID = Intrinsic::x86_rdrand_64;
12044 case X86::BI__builtin_ia32_rdseed16_step:
12045 ID = Intrinsic::x86_rdseed_16;
12047 case X86::BI__builtin_ia32_rdseed32_step:
12048 ID = Intrinsic::x86_rdseed_32;
12050 case X86::BI__builtin_ia32_rdseed64_step:
12051 ID = Intrinsic::x86_rdseed_64;
12055 Value *Call = Builder.CreateCall(CGM.getIntrinsic(ID));
12056 Builder.CreateDefaultAlignedStore(Builder.CreateExtractValue(Call, 0),
12058 return Builder.CreateExtractValue(Call, 1);
12060 case X86::BI__builtin_ia32_addcarryx_u32:
12061 case X86::BI__builtin_ia32_addcarryx_u64:
12062 case X86::BI__builtin_ia32_subborrow_u32:
12063 case X86::BI__builtin_ia32_subborrow_u64: {
12065 switch (BuiltinID) {
12066 default: llvm_unreachable("Unsupported intrinsic!");
12067 case X86::BI__builtin_ia32_addcarryx_u32:
12068 IID = Intrinsic::x86_addcarry_32;
12070 case X86::BI__builtin_ia32_addcarryx_u64:
12071 IID = Intrinsic::x86_addcarry_64;
12073 case X86::BI__builtin_ia32_subborrow_u32:
12074 IID = Intrinsic::x86_subborrow_32;
12076 case X86::BI__builtin_ia32_subborrow_u64:
12077 IID = Intrinsic::x86_subborrow_64;
12081 Value *Call = Builder.CreateCall(CGM.getIntrinsic(IID),
12082 { Ops[0], Ops[1], Ops[2] });
12083 Builder.CreateDefaultAlignedStore(Builder.CreateExtractValue(Call, 1),
12085 return Builder.CreateExtractValue(Call, 0);
12088 case X86::BI__builtin_ia32_fpclassps128_mask:
12089 case X86::BI__builtin_ia32_fpclassps256_mask:
12090 case X86::BI__builtin_ia32_fpclassps512_mask:
12091 case X86::BI__builtin_ia32_fpclasspd128_mask:
12092 case X86::BI__builtin_ia32_fpclasspd256_mask:
12093 case X86::BI__builtin_ia32_fpclasspd512_mask: {
12094 unsigned NumElts = Ops[0]->getType()->getVectorNumElements();
12095 Value *MaskIn = Ops[2];
12096 Ops.erase(&Ops[2]);
12099 switch (BuiltinID) {
12100 default: llvm_unreachable("Unsupported intrinsic!");
12101 case X86::BI__builtin_ia32_fpclassps128_mask:
12102 ID = Intrinsic::x86_avx512_fpclass_ps_128;
12104 case X86::BI__builtin_ia32_fpclassps256_mask:
12105 ID = Intrinsic::x86_avx512_fpclass_ps_256;
12107 case X86::BI__builtin_ia32_fpclassps512_mask:
12108 ID = Intrinsic::x86_avx512_fpclass_ps_512;
12110 case X86::BI__builtin_ia32_fpclasspd128_mask:
12111 ID = Intrinsic::x86_avx512_fpclass_pd_128;
12113 case X86::BI__builtin_ia32_fpclasspd256_mask:
12114 ID = Intrinsic::x86_avx512_fpclass_pd_256;
12116 case X86::BI__builtin_ia32_fpclasspd512_mask:
12117 ID = Intrinsic::x86_avx512_fpclass_pd_512;
12121 Value *Fpclass = Builder.CreateCall(CGM.getIntrinsic(ID), Ops);
12122 return EmitX86MaskedCompareResult(*this, Fpclass, NumElts, MaskIn);
12125 case X86::BI__builtin_ia32_vp2intersect_q_512:
12126 case X86::BI__builtin_ia32_vp2intersect_q_256:
12127 case X86::BI__builtin_ia32_vp2intersect_q_128:
12128 case X86::BI__builtin_ia32_vp2intersect_d_512:
12129 case X86::BI__builtin_ia32_vp2intersect_d_256:
12130 case X86::BI__builtin_ia32_vp2intersect_d_128: {
12131 unsigned NumElts = Ops[0]->getType()->getVectorNumElements();
12134 switch (BuiltinID) {
12135 default: llvm_unreachable("Unsupported intrinsic!");
12136 case X86::BI__builtin_ia32_vp2intersect_q_512:
12137 ID = Intrinsic::x86_avx512_vp2intersect_q_512;
12139 case X86::BI__builtin_ia32_vp2intersect_q_256:
12140 ID = Intrinsic::x86_avx512_vp2intersect_q_256;
12142 case X86::BI__builtin_ia32_vp2intersect_q_128:
12143 ID = Intrinsic::x86_avx512_vp2intersect_q_128;
12145 case X86::BI__builtin_ia32_vp2intersect_d_512:
12146 ID = Intrinsic::x86_avx512_vp2intersect_d_512;
12148 case X86::BI__builtin_ia32_vp2intersect_d_256:
12149 ID = Intrinsic::x86_avx512_vp2intersect_d_256;
12151 case X86::BI__builtin_ia32_vp2intersect_d_128:
12152 ID = Intrinsic::x86_avx512_vp2intersect_d_128;
12156 Value *Call = Builder.CreateCall(CGM.getIntrinsic(ID), {Ops[0], Ops[1]});
12157 Value *Result = Builder.CreateExtractValue(Call, 0);
12158 Result = EmitX86MaskedCompareResult(*this, Result, NumElts, nullptr);
12159 Builder.CreateDefaultAlignedStore(Result, Ops[2]);
12161 Result = Builder.CreateExtractValue(Call, 1);
12162 Result = EmitX86MaskedCompareResult(*this, Result, NumElts, nullptr);
12163 return Builder.CreateDefaultAlignedStore(Result, Ops[3]);
12166 case X86::BI__builtin_ia32_vpmultishiftqb128:
12167 case X86::BI__builtin_ia32_vpmultishiftqb256:
12168 case X86::BI__builtin_ia32_vpmultishiftqb512: {
12170 switch (BuiltinID) {
12171 default: llvm_unreachable("Unsupported intrinsic!");
12172 case X86::BI__builtin_ia32_vpmultishiftqb128:
12173 ID = Intrinsic::x86_avx512_pmultishift_qb_128;
12175 case X86::BI__builtin_ia32_vpmultishiftqb256:
12176 ID = Intrinsic::x86_avx512_pmultishift_qb_256;
12178 case X86::BI__builtin_ia32_vpmultishiftqb512:
12179 ID = Intrinsic::x86_avx512_pmultishift_qb_512;
12183 return Builder.CreateCall(CGM.getIntrinsic(ID), Ops);
12186 case X86::BI__builtin_ia32_vpshufbitqmb128_mask:
12187 case X86::BI__builtin_ia32_vpshufbitqmb256_mask:
12188 case X86::BI__builtin_ia32_vpshufbitqmb512_mask: {
12189 unsigned NumElts = Ops[0]->getType()->getVectorNumElements();
12190 Value *MaskIn = Ops[2];
12191 Ops.erase(&Ops[2]);
12194 switch (BuiltinID) {
12195 default: llvm_unreachable("Unsupported intrinsic!");
12196 case X86::BI__builtin_ia32_vpshufbitqmb128_mask:
12197 ID = Intrinsic::x86_avx512_vpshufbitqmb_128;
12199 case X86::BI__builtin_ia32_vpshufbitqmb256_mask:
12200 ID = Intrinsic::x86_avx512_vpshufbitqmb_256;
12202 case X86::BI__builtin_ia32_vpshufbitqmb512_mask:
12203 ID = Intrinsic::x86_avx512_vpshufbitqmb_512;
12207 Value *Shufbit = Builder.CreateCall(CGM.getIntrinsic(ID), Ops);
12208 return EmitX86MaskedCompareResult(*this, Shufbit, NumElts, MaskIn);
12211 // packed comparison intrinsics
12212 case X86::BI__builtin_ia32_cmpeqps:
12213 case X86::BI__builtin_ia32_cmpeqpd:
12214 return getVectorFCmpIR(CmpInst::FCMP_OEQ);
12215 case X86::BI__builtin_ia32_cmpltps:
12216 case X86::BI__builtin_ia32_cmpltpd:
12217 return getVectorFCmpIR(CmpInst::FCMP_OLT);
12218 case X86::BI__builtin_ia32_cmpleps:
12219 case X86::BI__builtin_ia32_cmplepd:
12220 return getVectorFCmpIR(CmpInst::FCMP_OLE);
12221 case X86::BI__builtin_ia32_cmpunordps:
12222 case X86::BI__builtin_ia32_cmpunordpd:
12223 return getVectorFCmpIR(CmpInst::FCMP_UNO);
12224 case X86::BI__builtin_ia32_cmpneqps:
12225 case X86::BI__builtin_ia32_cmpneqpd:
12226 return getVectorFCmpIR(CmpInst::FCMP_UNE);
12227 case X86::BI__builtin_ia32_cmpnltps:
12228 case X86::BI__builtin_ia32_cmpnltpd:
12229 return getVectorFCmpIR(CmpInst::FCMP_UGE);
12230 case X86::BI__builtin_ia32_cmpnleps:
12231 case X86::BI__builtin_ia32_cmpnlepd:
12232 return getVectorFCmpIR(CmpInst::FCMP_UGT);
12233 case X86::BI__builtin_ia32_cmpordps:
12234 case X86::BI__builtin_ia32_cmpordpd:
12235 return getVectorFCmpIR(CmpInst::FCMP_ORD);
12236 case X86::BI__builtin_ia32_cmpps:
12237 case X86::BI__builtin_ia32_cmpps256:
12238 case X86::BI__builtin_ia32_cmppd:
12239 case X86::BI__builtin_ia32_cmppd256:
12240 case X86::BI__builtin_ia32_cmpps128_mask:
12241 case X86::BI__builtin_ia32_cmpps256_mask:
12242 case X86::BI__builtin_ia32_cmpps512_mask:
12243 case X86::BI__builtin_ia32_cmppd128_mask:
12244 case X86::BI__builtin_ia32_cmppd256_mask:
12245 case X86::BI__builtin_ia32_cmppd512_mask: {
12246 // Lowering vector comparisons to fcmp instructions, while
12247 // ignoring signalling behaviour requested
12248 // ignoring rounding mode requested
12249 // This is is only possible as long as FENV_ACCESS is not implemented.
12250 // See also: https://reviews.llvm.org/D45616
12252 // The third argument is the comparison condition, and integer in the
12254 unsigned CC = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0x1f;
12256 // Lowering to IR fcmp instruction.
12257 // Ignoring requested signaling behaviour,
12258 // e.g. both _CMP_GT_OS & _CMP_GT_OQ are translated to FCMP_OGT.
12259 FCmpInst::Predicate Pred;
12261 case 0x00: Pred = FCmpInst::FCMP_OEQ; break;
12262 case 0x01: Pred = FCmpInst::FCMP_OLT; break;
12263 case 0x02: Pred = FCmpInst::FCMP_OLE; break;
12264 case 0x03: Pred = FCmpInst::FCMP_UNO; break;
12265 case 0x04: Pred = FCmpInst::FCMP_UNE; break;
12266 case 0x05: Pred = FCmpInst::FCMP_UGE; break;
12267 case 0x06: Pred = FCmpInst::FCMP_UGT; break;
12268 case 0x07: Pred = FCmpInst::FCMP_ORD; break;
12269 case 0x08: Pred = FCmpInst::FCMP_UEQ; break;
12270 case 0x09: Pred = FCmpInst::FCMP_ULT; break;
12271 case 0x0a: Pred = FCmpInst::FCMP_ULE; break;
12272 case 0x0b: Pred = FCmpInst::FCMP_FALSE; break;
12273 case 0x0c: Pred = FCmpInst::FCMP_ONE; break;
12274 case 0x0d: Pred = FCmpInst::FCMP_OGE; break;
12275 case 0x0e: Pred = FCmpInst::FCMP_OGT; break;
12276 case 0x0f: Pred = FCmpInst::FCMP_TRUE; break;
12277 case 0x10: Pred = FCmpInst::FCMP_OEQ; break;
12278 case 0x11: Pred = FCmpInst::FCMP_OLT; break;
12279 case 0x12: Pred = FCmpInst::FCMP_OLE; break;
12280 case 0x13: Pred = FCmpInst::FCMP_UNO; break;
12281 case 0x14: Pred = FCmpInst::FCMP_UNE; break;
12282 case 0x15: Pred = FCmpInst::FCMP_UGE; break;
12283 case 0x16: Pred = FCmpInst::FCMP_UGT; break;
12284 case 0x17: Pred = FCmpInst::FCMP_ORD; break;
12285 case 0x18: Pred = FCmpInst::FCMP_UEQ; break;
12286 case 0x19: Pred = FCmpInst::FCMP_ULT; break;
12287 case 0x1a: Pred = FCmpInst::FCMP_ULE; break;
12288 case 0x1b: Pred = FCmpInst::FCMP_FALSE; break;
12289 case 0x1c: Pred = FCmpInst::FCMP_ONE; break;
12290 case 0x1d: Pred = FCmpInst::FCMP_OGE; break;
12291 case 0x1e: Pred = FCmpInst::FCMP_OGT; break;
12292 case 0x1f: Pred = FCmpInst::FCMP_TRUE; break;
12293 default: llvm_unreachable("Unhandled CC");
12296 // Builtins without the _mask suffix return a vector of integers
12297 // of the same width as the input vectors
12298 switch (BuiltinID) {
12299 case X86::BI__builtin_ia32_cmpps512_mask:
12300 case X86::BI__builtin_ia32_cmppd512_mask:
12301 case X86::BI__builtin_ia32_cmpps128_mask:
12302 case X86::BI__builtin_ia32_cmpps256_mask:
12303 case X86::BI__builtin_ia32_cmppd128_mask:
12304 case X86::BI__builtin_ia32_cmppd256_mask: {
12305 unsigned NumElts = Ops[0]->getType()->getVectorNumElements();
12306 Value *Cmp = Builder.CreateFCmp(Pred, Ops[0], Ops[1]);
12307 return EmitX86MaskedCompareResult(*this, Cmp, NumElts, Ops[3]);
12310 return getVectorFCmpIR(Pred);
12314 // SSE scalar comparison intrinsics
12315 case X86::BI__builtin_ia32_cmpeqss:
12316 return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 0);
12317 case X86::BI__builtin_ia32_cmpltss:
12318 return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 1);
12319 case X86::BI__builtin_ia32_cmpless:
12320 return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 2);
12321 case X86::BI__builtin_ia32_cmpunordss:
12322 return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 3);
12323 case X86::BI__builtin_ia32_cmpneqss:
12324 return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 4);
12325 case X86::BI__builtin_ia32_cmpnltss:
12326 return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 5);
12327 case X86::BI__builtin_ia32_cmpnless:
12328 return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 6);
12329 case X86::BI__builtin_ia32_cmpordss:
12330 return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 7);
12331 case X86::BI__builtin_ia32_cmpeqsd:
12332 return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 0);
12333 case X86::BI__builtin_ia32_cmpltsd:
12334 return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 1);
12335 case X86::BI__builtin_ia32_cmplesd:
12336 return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 2);
12337 case X86::BI__builtin_ia32_cmpunordsd:
12338 return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 3);
12339 case X86::BI__builtin_ia32_cmpneqsd:
12340 return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 4);
12341 case X86::BI__builtin_ia32_cmpnltsd:
12342 return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 5);
12343 case X86::BI__builtin_ia32_cmpnlesd:
12344 return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 6);
12345 case X86::BI__builtin_ia32_cmpordsd:
12346 return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 7);
12348 // AVX512 bf16 intrinsics
12349 case X86::BI__builtin_ia32_cvtneps2bf16_128_mask: {
12350 Ops[2] = getMaskVecValue(*this, Ops[2],
12351 Ops[0]->getType()->getVectorNumElements());
12352 Intrinsic::ID IID = Intrinsic::x86_avx512bf16_mask_cvtneps2bf16_128;
12353 return Builder.CreateCall(CGM.getIntrinsic(IID), Ops);
12355 case X86::BI__builtin_ia32_cvtsbf162ss_32:
12356 return EmitX86CvtBF16ToFloatExpr(*this, E, Ops);
12358 case X86::BI__builtin_ia32_cvtneps2bf16_256_mask:
12359 case X86::BI__builtin_ia32_cvtneps2bf16_512_mask: {
12361 switch (BuiltinID) {
12362 default: llvm_unreachable("Unsupported intrinsic!");
12363 case X86::BI__builtin_ia32_cvtneps2bf16_256_mask:
12364 IID = Intrinsic::x86_avx512bf16_cvtneps2bf16_256;
12366 case X86::BI__builtin_ia32_cvtneps2bf16_512_mask:
12367 IID = Intrinsic::x86_avx512bf16_cvtneps2bf16_512;
12370 Value *Res = Builder.CreateCall(CGM.getIntrinsic(IID), Ops[0]);
12371 return EmitX86Select(*this, Ops[2], Res, Ops[1]);
12374 case X86::BI__emul:
12375 case X86::BI__emulu: {
12376 llvm::Type *Int64Ty = llvm::IntegerType::get(getLLVMContext(), 64);
12377 bool isSigned = (BuiltinID == X86::BI__emul);
12378 Value *LHS = Builder.CreateIntCast(Ops[0], Int64Ty, isSigned);
12379 Value *RHS = Builder.CreateIntCast(Ops[1], Int64Ty, isSigned);
12380 return Builder.CreateMul(LHS, RHS, "", !isSigned, isSigned);
12382 case X86::BI__mulh:
12383 case X86::BI__umulh:
12384 case X86::BI_mul128:
12385 case X86::BI_umul128: {
12386 llvm::Type *ResType = ConvertType(E->getType());
12387 llvm::Type *Int128Ty = llvm::IntegerType::get(getLLVMContext(), 128);
12389 bool IsSigned = (BuiltinID == X86::BI__mulh || BuiltinID == X86::BI_mul128);
12390 Value *LHS = Builder.CreateIntCast(Ops[0], Int128Ty, IsSigned);
12391 Value *RHS = Builder.CreateIntCast(Ops[1], Int128Ty, IsSigned);
12393 Value *MulResult, *HigherBits;
12395 MulResult = Builder.CreateNSWMul(LHS, RHS);
12396 HigherBits = Builder.CreateAShr(MulResult, 64);
12398 MulResult = Builder.CreateNUWMul(LHS, RHS);
12399 HigherBits = Builder.CreateLShr(MulResult, 64);
12401 HigherBits = Builder.CreateIntCast(HigherBits, ResType, IsSigned);
12403 if (BuiltinID == X86::BI__mulh || BuiltinID == X86::BI__umulh)
12406 Address HighBitsAddress = EmitPointerWithAlignment(E->getArg(2));
12407 Builder.CreateStore(HigherBits, HighBitsAddress);
12408 return Builder.CreateIntCast(MulResult, ResType, IsSigned);
12411 case X86::BI__faststorefence: {
12412 return Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent,
12413 llvm::SyncScope::System);
12415 case X86::BI__shiftleft128:
12416 case X86::BI__shiftright128: {
12417 // FIXME: Once fshl/fshr no longer add an unneeded and and cmov, do this:
12418 // llvm::Function *F = CGM.getIntrinsic(
12419 // BuiltinID == X86::BI__shiftleft128 ? Intrinsic::fshl : Intrinsic::fshr,
12421 // Ops[2] = Builder.CreateZExt(Ops[2], Int64Ty);
12422 // return Builder.CreateCall(F, Ops);
12423 llvm::Type *Int128Ty = Builder.getInt128Ty();
12424 Value *HighPart128 =
12425 Builder.CreateShl(Builder.CreateZExt(Ops[1], Int128Ty), 64);
12426 Value *LowPart128 = Builder.CreateZExt(Ops[0], Int128Ty);
12427 Value *Val = Builder.CreateOr(HighPart128, LowPart128);
12428 Value *Amt = Builder.CreateAnd(Builder.CreateZExt(Ops[2], Int128Ty),
12429 llvm::ConstantInt::get(Int128Ty, 0x3f));
12431 if (BuiltinID == X86::BI__shiftleft128)
12432 Res = Builder.CreateLShr(Builder.CreateShl(Val, Amt), 64);
12434 Res = Builder.CreateLShr(Val, Amt);
12435 return Builder.CreateTrunc(Res, Int64Ty);
12437 case X86::BI_ReadWriteBarrier:
12438 case X86::BI_ReadBarrier:
12439 case X86::BI_WriteBarrier: {
12440 return Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent,
12441 llvm::SyncScope::SingleThread);
12443 case X86::BI_BitScanForward:
12444 case X86::BI_BitScanForward64:
12445 return EmitMSVCBuiltinExpr(MSVCIntrin::_BitScanForward, E);
12446 case X86::BI_BitScanReverse:
12447 case X86::BI_BitScanReverse64:
12448 return EmitMSVCBuiltinExpr(MSVCIntrin::_BitScanReverse, E);
12450 case X86::BI_InterlockedAnd64:
12451 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedAnd, E);
12452 case X86::BI_InterlockedExchange64:
12453 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange, E);
12454 case X86::BI_InterlockedExchangeAdd64:
12455 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeAdd, E);
12456 case X86::BI_InterlockedExchangeSub64:
12457 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeSub, E);
12458 case X86::BI_InterlockedOr64:
12459 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr, E);
12460 case X86::BI_InterlockedXor64:
12461 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor, E);
12462 case X86::BI_InterlockedDecrement64:
12463 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedDecrement, E);
12464 case X86::BI_InterlockedIncrement64:
12465 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement, E);
12466 case X86::BI_InterlockedCompareExchange128: {
12467 // InterlockedCompareExchange128 doesn't directly refer to 128bit ints,
12468 // instead it takes pointers to 64bit ints for Destination and
12469 // ComparandResult, and exchange is taken as two 64bit ints (high & low).
12470 // The previous value is written to ComparandResult, and success is
12473 llvm::Type *Int128Ty = Builder.getInt128Ty();
12474 llvm::Type *Int128PtrTy = Int128Ty->getPointerTo();
12476 Value *Destination =
12477 Builder.CreateBitCast(Ops[0], Int128PtrTy);
12478 Value *ExchangeHigh128 = Builder.CreateZExt(Ops[1], Int128Ty);
12479 Value *ExchangeLow128 = Builder.CreateZExt(Ops[2], Int128Ty);
12480 Address ComparandResult(Builder.CreateBitCast(Ops[3], Int128PtrTy),
12481 getContext().toCharUnitsFromBits(128));
12483 Value *Exchange = Builder.CreateOr(
12484 Builder.CreateShl(ExchangeHigh128, 64, "", false, false),
12487 Value *Comparand = Builder.CreateLoad(ComparandResult);
12489 AtomicCmpXchgInst *CXI =
12490 Builder.CreateAtomicCmpXchg(Destination, Comparand, Exchange,
12491 AtomicOrdering::SequentiallyConsistent,
12492 AtomicOrdering::SequentiallyConsistent);
12493 CXI->setVolatile(true);
12495 // Write the result back to the inout pointer.
12496 Builder.CreateStore(Builder.CreateExtractValue(CXI, 0), ComparandResult);
12498 // Get the success boolean and zero extend it to i8.
12499 Value *Success = Builder.CreateExtractValue(CXI, 1);
12500 return Builder.CreateZExt(Success, ConvertType(E->getType()));
12503 case X86::BI_AddressOfReturnAddress: {
12505 CGM.getIntrinsic(Intrinsic::addressofreturnaddress, AllocaInt8PtrTy);
12506 return Builder.CreateCall(F);
12508 case X86::BI__stosb: {
12509 // We treat __stosb as a volatile memset - it may not generate "rep stosb"
12510 // instruction, but it will create a memset that won't be optimized away.
12511 return Builder.CreateMemSet(Ops[0], Ops[1], Ops[2], Align::None(), true);
12514 // llvm.trap makes a ud2a instruction on x86.
12515 return EmitTrapCall(Intrinsic::trap);
12516 case X86::BI__int2c: {
12517 // This syscall signals a driver assertion failure in x86 NT kernels.
12518 llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false);
12519 llvm::InlineAsm *IA =
12520 llvm::InlineAsm::get(FTy, "int $$0x2c", "", /*hasSideEffects=*/true);
12521 llvm::AttributeList NoReturnAttr = llvm::AttributeList::get(
12522 getLLVMContext(), llvm::AttributeList::FunctionIndex,
12523 llvm::Attribute::NoReturn);
12524 llvm::CallInst *CI = Builder.CreateCall(IA);
12525 CI->setAttributes(NoReturnAttr);
12528 case X86::BI__readfsbyte:
12529 case X86::BI__readfsword:
12530 case X86::BI__readfsdword:
12531 case X86::BI__readfsqword: {
12532 llvm::Type *IntTy = ConvertType(E->getType());
12534 Builder.CreateIntToPtr(Ops[0], llvm::PointerType::get(IntTy, 257));
12535 LoadInst *Load = Builder.CreateAlignedLoad(
12536 IntTy, Ptr, getContext().getTypeAlignInChars(E->getType()));
12537 Load->setVolatile(true);
12540 case X86::BI__readgsbyte:
12541 case X86::BI__readgsword:
12542 case X86::BI__readgsdword:
12543 case X86::BI__readgsqword: {
12544 llvm::Type *IntTy = ConvertType(E->getType());
12546 Builder.CreateIntToPtr(Ops[0], llvm::PointerType::get(IntTy, 256));
12547 LoadInst *Load = Builder.CreateAlignedLoad(
12548 IntTy, Ptr, getContext().getTypeAlignInChars(E->getType()));
12549 Load->setVolatile(true);
12552 case X86::BI__builtin_ia32_paddsb512:
12553 case X86::BI__builtin_ia32_paddsw512:
12554 case X86::BI__builtin_ia32_paddsb256:
12555 case X86::BI__builtin_ia32_paddsw256:
12556 case X86::BI__builtin_ia32_paddsb128:
12557 case X86::BI__builtin_ia32_paddsw128:
12558 return EmitX86AddSubSatExpr(*this, Ops, true, true);
12559 case X86::BI__builtin_ia32_paddusb512:
12560 case X86::BI__builtin_ia32_paddusw512:
12561 case X86::BI__builtin_ia32_paddusb256:
12562 case X86::BI__builtin_ia32_paddusw256:
12563 case X86::BI__builtin_ia32_paddusb128:
12564 case X86::BI__builtin_ia32_paddusw128:
12565 return EmitX86AddSubSatExpr(*this, Ops, false, true);
12566 case X86::BI__builtin_ia32_psubsb512:
12567 case X86::BI__builtin_ia32_psubsw512:
12568 case X86::BI__builtin_ia32_psubsb256:
12569 case X86::BI__builtin_ia32_psubsw256:
12570 case X86::BI__builtin_ia32_psubsb128:
12571 case X86::BI__builtin_ia32_psubsw128:
12572 return EmitX86AddSubSatExpr(*this, Ops, true, false);
12573 case X86::BI__builtin_ia32_psubusb512:
12574 case X86::BI__builtin_ia32_psubusw512:
12575 case X86::BI__builtin_ia32_psubusb256:
12576 case X86::BI__builtin_ia32_psubusw256:
12577 case X86::BI__builtin_ia32_psubusb128:
12578 case X86::BI__builtin_ia32_psubusw128:
12579 return EmitX86AddSubSatExpr(*this, Ops, false, false);
12583 Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
12584 const CallExpr *E) {
12585 SmallVector<Value*, 4> Ops;
12587 for (unsigned i = 0, e = E->getNumArgs(); i != e; i++)
12588 Ops.push_back(EmitScalarExpr(E->getArg(i)));
12590 Intrinsic::ID ID = Intrinsic::not_intrinsic;
12592 switch (BuiltinID) {
12593 default: return nullptr;
12595 // __builtin_ppc_get_timebase is GCC 4.8+'s PowerPC-specific name for what we
12596 // call __builtin_readcyclecounter.
12597 case PPC::BI__builtin_ppc_get_timebase:
12598 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::readcyclecounter));
12600 // vec_ld, vec_xl_be, vec_lvsl, vec_lvsr
12601 case PPC::BI__builtin_altivec_lvx:
12602 case PPC::BI__builtin_altivec_lvxl:
12603 case PPC::BI__builtin_altivec_lvebx:
12604 case PPC::BI__builtin_altivec_lvehx:
12605 case PPC::BI__builtin_altivec_lvewx:
12606 case PPC::BI__builtin_altivec_lvsl:
12607 case PPC::BI__builtin_altivec_lvsr:
12608 case PPC::BI__builtin_vsx_lxvd2x:
12609 case PPC::BI__builtin_vsx_lxvw4x:
12610 case PPC::BI__builtin_vsx_lxvd2x_be:
12611 case PPC::BI__builtin_vsx_lxvw4x_be:
12612 case PPC::BI__builtin_vsx_lxvl:
12613 case PPC::BI__builtin_vsx_lxvll:
12615 if(BuiltinID == PPC::BI__builtin_vsx_lxvl ||
12616 BuiltinID == PPC::BI__builtin_vsx_lxvll){
12617 Ops[0] = Builder.CreateBitCast(Ops[0], Int8PtrTy);
12619 Ops[1] = Builder.CreateBitCast(Ops[1], Int8PtrTy);
12620 Ops[0] = Builder.CreateGEP(Ops[1], Ops[0]);
12624 switch (BuiltinID) {
12625 default: llvm_unreachable("Unsupported ld/lvsl/lvsr intrinsic!");
12626 case PPC::BI__builtin_altivec_lvx:
12627 ID = Intrinsic::ppc_altivec_lvx;
12629 case PPC::BI__builtin_altivec_lvxl:
12630 ID = Intrinsic::ppc_altivec_lvxl;
12632 case PPC::BI__builtin_altivec_lvebx:
12633 ID = Intrinsic::ppc_altivec_lvebx;
12635 case PPC::BI__builtin_altivec_lvehx:
12636 ID = Intrinsic::ppc_altivec_lvehx;
12638 case PPC::BI__builtin_altivec_lvewx:
12639 ID = Intrinsic::ppc_altivec_lvewx;
12641 case PPC::BI__builtin_altivec_lvsl:
12642 ID = Intrinsic::ppc_altivec_lvsl;
12644 case PPC::BI__builtin_altivec_lvsr:
12645 ID = Intrinsic::ppc_altivec_lvsr;
12647 case PPC::BI__builtin_vsx_lxvd2x:
12648 ID = Intrinsic::ppc_vsx_lxvd2x;
12650 case PPC::BI__builtin_vsx_lxvw4x:
12651 ID = Intrinsic::ppc_vsx_lxvw4x;
12653 case PPC::BI__builtin_vsx_lxvd2x_be:
12654 ID = Intrinsic::ppc_vsx_lxvd2x_be;
12656 case PPC::BI__builtin_vsx_lxvw4x_be:
12657 ID = Intrinsic::ppc_vsx_lxvw4x_be;
12659 case PPC::BI__builtin_vsx_lxvl:
12660 ID = Intrinsic::ppc_vsx_lxvl;
12662 case PPC::BI__builtin_vsx_lxvll:
12663 ID = Intrinsic::ppc_vsx_lxvll;
12666 llvm::Function *F = CGM.getIntrinsic(ID);
12667 return Builder.CreateCall(F, Ops, "");
12670 // vec_st, vec_xst_be
12671 case PPC::BI__builtin_altivec_stvx:
12672 case PPC::BI__builtin_altivec_stvxl:
12673 case PPC::BI__builtin_altivec_stvebx:
12674 case PPC::BI__builtin_altivec_stvehx:
12675 case PPC::BI__builtin_altivec_stvewx:
12676 case PPC::BI__builtin_vsx_stxvd2x:
12677 case PPC::BI__builtin_vsx_stxvw4x:
12678 case PPC::BI__builtin_vsx_stxvd2x_be:
12679 case PPC::BI__builtin_vsx_stxvw4x_be:
12680 case PPC::BI__builtin_vsx_stxvl:
12681 case PPC::BI__builtin_vsx_stxvll:
12683 if(BuiltinID == PPC::BI__builtin_vsx_stxvl ||
12684 BuiltinID == PPC::BI__builtin_vsx_stxvll ){
12685 Ops[1] = Builder.CreateBitCast(Ops[1], Int8PtrTy);
12687 Ops[2] = Builder.CreateBitCast(Ops[2], Int8PtrTy);
12688 Ops[1] = Builder.CreateGEP(Ops[2], Ops[1]);
12692 switch (BuiltinID) {
12693 default: llvm_unreachable("Unsupported st intrinsic!");
12694 case PPC::BI__builtin_altivec_stvx:
12695 ID = Intrinsic::ppc_altivec_stvx;
12697 case PPC::BI__builtin_altivec_stvxl:
12698 ID = Intrinsic::ppc_altivec_stvxl;
12700 case PPC::BI__builtin_altivec_stvebx:
12701 ID = Intrinsic::ppc_altivec_stvebx;
12703 case PPC::BI__builtin_altivec_stvehx:
12704 ID = Intrinsic::ppc_altivec_stvehx;
12706 case PPC::BI__builtin_altivec_stvewx:
12707 ID = Intrinsic::ppc_altivec_stvewx;
12709 case PPC::BI__builtin_vsx_stxvd2x:
12710 ID = Intrinsic::ppc_vsx_stxvd2x;
12712 case PPC::BI__builtin_vsx_stxvw4x:
12713 ID = Intrinsic::ppc_vsx_stxvw4x;
12715 case PPC::BI__builtin_vsx_stxvd2x_be:
12716 ID = Intrinsic::ppc_vsx_stxvd2x_be;
12718 case PPC::BI__builtin_vsx_stxvw4x_be:
12719 ID = Intrinsic::ppc_vsx_stxvw4x_be;
12721 case PPC::BI__builtin_vsx_stxvl:
12722 ID = Intrinsic::ppc_vsx_stxvl;
12724 case PPC::BI__builtin_vsx_stxvll:
12725 ID = Intrinsic::ppc_vsx_stxvll;
12728 llvm::Function *F = CGM.getIntrinsic(ID);
12729 return Builder.CreateCall(F, Ops, "");
12732 case PPC::BI__builtin_vsx_xvsqrtsp:
12733 case PPC::BI__builtin_vsx_xvsqrtdp: {
12734 llvm::Type *ResultType = ConvertType(E->getType());
12735 Value *X = EmitScalarExpr(E->getArg(0));
12736 ID = Intrinsic::sqrt;
12737 llvm::Function *F = CGM.getIntrinsic(ID, ResultType);
12738 return Builder.CreateCall(F, X);
12740 // Count leading zeros
12741 case PPC::BI__builtin_altivec_vclzb:
12742 case PPC::BI__builtin_altivec_vclzh:
12743 case PPC::BI__builtin_altivec_vclzw:
12744 case PPC::BI__builtin_altivec_vclzd: {
12745 llvm::Type *ResultType = ConvertType(E->getType());
12746 Value *X = EmitScalarExpr(E->getArg(0));
12747 Value *Undef = ConstantInt::get(Builder.getInt1Ty(), false);
12748 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ResultType);
12749 return Builder.CreateCall(F, {X, Undef});
12751 case PPC::BI__builtin_altivec_vctzb:
12752 case PPC::BI__builtin_altivec_vctzh:
12753 case PPC::BI__builtin_altivec_vctzw:
12754 case PPC::BI__builtin_altivec_vctzd: {
12755 llvm::Type *ResultType = ConvertType(E->getType());
12756 Value *X = EmitScalarExpr(E->getArg(0));
12757 Value *Undef = ConstantInt::get(Builder.getInt1Ty(), false);
12758 Function *F = CGM.getIntrinsic(Intrinsic::cttz, ResultType);
12759 return Builder.CreateCall(F, {X, Undef});
12761 case PPC::BI__builtin_altivec_vpopcntb:
12762 case PPC::BI__builtin_altivec_vpopcnth:
12763 case PPC::BI__builtin_altivec_vpopcntw:
12764 case PPC::BI__builtin_altivec_vpopcntd: {
12765 llvm::Type *ResultType = ConvertType(E->getType());
12766 Value *X = EmitScalarExpr(E->getArg(0));
12767 llvm::Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ResultType);
12768 return Builder.CreateCall(F, X);
12771 case PPC::BI__builtin_vsx_xvcpsgnsp:
12772 case PPC::BI__builtin_vsx_xvcpsgndp: {
12773 llvm::Type *ResultType = ConvertType(E->getType());
12774 Value *X = EmitScalarExpr(E->getArg(0));
12775 Value *Y = EmitScalarExpr(E->getArg(1));
12776 ID = Intrinsic::copysign;
12777 llvm::Function *F = CGM.getIntrinsic(ID, ResultType);
12778 return Builder.CreateCall(F, {X, Y});
12780 // Rounding/truncation
12781 case PPC::BI__builtin_vsx_xvrspip:
12782 case PPC::BI__builtin_vsx_xvrdpip:
12783 case PPC::BI__builtin_vsx_xvrdpim:
12784 case PPC::BI__builtin_vsx_xvrspim:
12785 case PPC::BI__builtin_vsx_xvrdpi:
12786 case PPC::BI__builtin_vsx_xvrspi:
12787 case PPC::BI__builtin_vsx_xvrdpic:
12788 case PPC::BI__builtin_vsx_xvrspic:
12789 case PPC::BI__builtin_vsx_xvrdpiz:
12790 case PPC::BI__builtin_vsx_xvrspiz: {
12791 llvm::Type *ResultType = ConvertType(E->getType());
12792 Value *X = EmitScalarExpr(E->getArg(0));
12793 if (BuiltinID == PPC::BI__builtin_vsx_xvrdpim ||
12794 BuiltinID == PPC::BI__builtin_vsx_xvrspim)
12795 ID = Intrinsic::floor;
12796 else if (BuiltinID == PPC::BI__builtin_vsx_xvrdpi ||
12797 BuiltinID == PPC::BI__builtin_vsx_xvrspi)
12798 ID = Intrinsic::round;
12799 else if (BuiltinID == PPC::BI__builtin_vsx_xvrdpic ||
12800 BuiltinID == PPC::BI__builtin_vsx_xvrspic)
12801 ID = Intrinsic::nearbyint;
12802 else if (BuiltinID == PPC::BI__builtin_vsx_xvrdpip ||
12803 BuiltinID == PPC::BI__builtin_vsx_xvrspip)
12804 ID = Intrinsic::ceil;
12805 else if (BuiltinID == PPC::BI__builtin_vsx_xvrdpiz ||
12806 BuiltinID == PPC::BI__builtin_vsx_xvrspiz)
12807 ID = Intrinsic::trunc;
12808 llvm::Function *F = CGM.getIntrinsic(ID, ResultType);
12809 return Builder.CreateCall(F, X);
12813 case PPC::BI__builtin_vsx_xvabsdp:
12814 case PPC::BI__builtin_vsx_xvabssp: {
12815 llvm::Type *ResultType = ConvertType(E->getType());
12816 Value *X = EmitScalarExpr(E->getArg(0));
12817 llvm::Function *F = CGM.getIntrinsic(Intrinsic::fabs, ResultType);
12818 return Builder.CreateCall(F, X);
12822 case PPC::BI__builtin_vsx_xvmaddadp:
12823 case PPC::BI__builtin_vsx_xvmaddasp:
12824 case PPC::BI__builtin_vsx_xvnmaddadp:
12825 case PPC::BI__builtin_vsx_xvnmaddasp:
12826 case PPC::BI__builtin_vsx_xvmsubadp:
12827 case PPC::BI__builtin_vsx_xvmsubasp:
12828 case PPC::BI__builtin_vsx_xvnmsubadp:
12829 case PPC::BI__builtin_vsx_xvnmsubasp: {
12830 llvm::Type *ResultType = ConvertType(E->getType());
12831 Value *X = EmitScalarExpr(E->getArg(0));
12832 Value *Y = EmitScalarExpr(E->getArg(1));
12833 Value *Z = EmitScalarExpr(E->getArg(2));
12834 Value *Zero = llvm::ConstantFP::getZeroValueForNegation(ResultType);
12835 llvm::Function *F = CGM.getIntrinsic(Intrinsic::fma, ResultType);
12836 switch (BuiltinID) {
12837 case PPC::BI__builtin_vsx_xvmaddadp:
12838 case PPC::BI__builtin_vsx_xvmaddasp:
12839 return Builder.CreateCall(F, {X, Y, Z});
12840 case PPC::BI__builtin_vsx_xvnmaddadp:
12841 case PPC::BI__builtin_vsx_xvnmaddasp:
12842 return Builder.CreateFSub(Zero,
12843 Builder.CreateCall(F, {X, Y, Z}), "sub");
12844 case PPC::BI__builtin_vsx_xvmsubadp:
12845 case PPC::BI__builtin_vsx_xvmsubasp:
12846 return Builder.CreateCall(F,
12847 {X, Y, Builder.CreateFSub(Zero, Z, "sub")});
12848 case PPC::BI__builtin_vsx_xvnmsubadp:
12849 case PPC::BI__builtin_vsx_xvnmsubasp:
12851 Builder.CreateCall(F, {X, Y, Builder.CreateFSub(Zero, Z, "sub")});
12852 return Builder.CreateFSub(Zero, FsubRes, "sub");
12854 llvm_unreachable("Unknown FMA operation");
12855 return nullptr; // Suppress no-return warning
12858 case PPC::BI__builtin_vsx_insertword: {
12859 llvm::Function *F = CGM.getIntrinsic(Intrinsic::ppc_vsx_xxinsertw);
12861 // Third argument is a compile time constant int. It must be clamped to
12862 // to the range [0, 12].
12863 ConstantInt *ArgCI = dyn_cast<ConstantInt>(Ops[2]);
12865 "Third arg to xxinsertw intrinsic must be constant integer");
12866 const int64_t MaxIndex = 12;
12867 int64_t Index = clamp(ArgCI->getSExtValue(), 0, MaxIndex);
12869 // The builtin semantics don't exactly match the xxinsertw instructions
12870 // semantics (which ppc_vsx_xxinsertw follows). The builtin extracts the
12871 // word from the first argument, and inserts it in the second argument. The
12872 // instruction extracts the word from its second input register and inserts
12873 // it into its first input register, so swap the first and second arguments.
12874 std::swap(Ops[0], Ops[1]);
12876 // Need to cast the second argument from a vector of unsigned int to a
12877 // vector of long long.
12878 Ops[1] = Builder.CreateBitCast(Ops[1], llvm::VectorType::get(Int64Ty, 2));
12880 if (getTarget().isLittleEndian()) {
12881 // Create a shuffle mask of (1, 0)
12882 Constant *ShuffleElts[2] = { ConstantInt::get(Int32Ty, 1),
12883 ConstantInt::get(Int32Ty, 0)
12885 Constant *ShuffleMask = llvm::ConstantVector::get(ShuffleElts);
12887 // Reverse the double words in the vector we will extract from.
12888 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::VectorType::get(Int64Ty, 2));
12889 Ops[0] = Builder.CreateShuffleVector(Ops[0], Ops[0], ShuffleMask);
12891 // Reverse the index.
12892 Index = MaxIndex - Index;
12895 // Intrinsic expects the first arg to be a vector of int.
12896 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::VectorType::get(Int32Ty, 4));
12897 Ops[2] = ConstantInt::getSigned(Int32Ty, Index);
12898 return Builder.CreateCall(F, Ops);
12901 case PPC::BI__builtin_vsx_extractuword: {
12902 llvm::Function *F = CGM.getIntrinsic(Intrinsic::ppc_vsx_xxextractuw);
12904 // Intrinsic expects the first argument to be a vector of doublewords.
12905 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::VectorType::get(Int64Ty, 2));
12907 // The second argument is a compile time constant int that needs to
12908 // be clamped to the range [0, 12].
12909 ConstantInt *ArgCI = dyn_cast<ConstantInt>(Ops[1]);
12911 "Second Arg to xxextractuw intrinsic must be a constant integer!");
12912 const int64_t MaxIndex = 12;
12913 int64_t Index = clamp(ArgCI->getSExtValue(), 0, MaxIndex);
12915 if (getTarget().isLittleEndian()) {
12916 // Reverse the index.
12917 Index = MaxIndex - Index;
12918 Ops[1] = ConstantInt::getSigned(Int32Ty, Index);
12920 // Emit the call, then reverse the double words of the results vector.
12921 Value *Call = Builder.CreateCall(F, Ops);
12923 // Create a shuffle mask of (1, 0)
12924 Constant *ShuffleElts[2] = { ConstantInt::get(Int32Ty, 1),
12925 ConstantInt::get(Int32Ty, 0)
12927 Constant *ShuffleMask = llvm::ConstantVector::get(ShuffleElts);
12929 Value *ShuffleCall = Builder.CreateShuffleVector(Call, Call, ShuffleMask);
12930 return ShuffleCall;
12932 Ops[1] = ConstantInt::getSigned(Int32Ty, Index);
12933 return Builder.CreateCall(F, Ops);
12937 case PPC::BI__builtin_vsx_xxpermdi: {
12938 ConstantInt *ArgCI = dyn_cast<ConstantInt>(Ops[2]);
12939 assert(ArgCI && "Third arg must be constant integer!");
12941 unsigned Index = ArgCI->getZExtValue();
12942 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::VectorType::get(Int64Ty, 2));
12943 Ops[1] = Builder.CreateBitCast(Ops[1], llvm::VectorType::get(Int64Ty, 2));
12945 // Account for endianness by treating this as just a shuffle. So we use the
12946 // same indices for both LE and BE in order to produce expected results in
12948 unsigned ElemIdx0 = (Index & 2) >> 1;
12949 unsigned ElemIdx1 = 2 + (Index & 1);
12951 Constant *ShuffleElts[2] = {ConstantInt::get(Int32Ty, ElemIdx0),
12952 ConstantInt::get(Int32Ty, ElemIdx1)};
12953 Constant *ShuffleMask = llvm::ConstantVector::get(ShuffleElts);
12955 Value *ShuffleCall =
12956 Builder.CreateShuffleVector(Ops[0], Ops[1], ShuffleMask);
12957 QualType BIRetType = E->getType();
12958 auto RetTy = ConvertType(BIRetType);
12959 return Builder.CreateBitCast(ShuffleCall, RetTy);
12962 case PPC::BI__builtin_vsx_xxsldwi: {
12963 ConstantInt *ArgCI = dyn_cast<ConstantInt>(Ops[2]);
12964 assert(ArgCI && "Third argument must be a compile time constant");
12965 unsigned Index = ArgCI->getZExtValue() & 0x3;
12966 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::VectorType::get(Int32Ty, 4));
12967 Ops[1] = Builder.CreateBitCast(Ops[1], llvm::VectorType::get(Int32Ty, 4));
12969 // Create a shuffle mask
12974 if (getTarget().isLittleEndian()) {
12975 // Little endian element N comes from element 8+N-Index of the
12976 // concatenated wide vector (of course, using modulo arithmetic on
12977 // the total number of elements).
12978 ElemIdx0 = (8 - Index) % 8;
12979 ElemIdx1 = (9 - Index) % 8;
12980 ElemIdx2 = (10 - Index) % 8;
12981 ElemIdx3 = (11 - Index) % 8;
12983 // Big endian ElemIdx<N> = Index + N
12985 ElemIdx1 = Index + 1;
12986 ElemIdx2 = Index + 2;
12987 ElemIdx3 = Index + 3;
12990 Constant *ShuffleElts[4] = {ConstantInt::get(Int32Ty, ElemIdx0),
12991 ConstantInt::get(Int32Ty, ElemIdx1),
12992 ConstantInt::get(Int32Ty, ElemIdx2),
12993 ConstantInt::get(Int32Ty, ElemIdx3)};
12995 Constant *ShuffleMask = llvm::ConstantVector::get(ShuffleElts);
12996 Value *ShuffleCall =
12997 Builder.CreateShuffleVector(Ops[0], Ops[1], ShuffleMask);
12998 QualType BIRetType = E->getType();
12999 auto RetTy = ConvertType(BIRetType);
13000 return Builder.CreateBitCast(ShuffleCall, RetTy);
13003 case PPC::BI__builtin_pack_vector_int128: {
13004 bool isLittleEndian = getTarget().isLittleEndian();
13005 Value *UndefValue =
13006 llvm::UndefValue::get(llvm::VectorType::get(Ops[0]->getType(), 2));
13007 Value *Res = Builder.CreateInsertElement(
13008 UndefValue, Ops[0], (uint64_t)(isLittleEndian ? 1 : 0));
13009 Res = Builder.CreateInsertElement(Res, Ops[1],
13010 (uint64_t)(isLittleEndian ? 0 : 1));
13011 return Builder.CreateBitCast(Res, ConvertType(E->getType()));
13014 case PPC::BI__builtin_unpack_vector_int128: {
13015 ConstantInt *Index = cast<ConstantInt>(Ops[1]);
13016 Value *Unpacked = Builder.CreateBitCast(
13017 Ops[0], llvm::VectorType::get(ConvertType(E->getType()), 2));
13019 if (getTarget().isLittleEndian())
13020 Index = ConstantInt::get(Index->getType(), 1 - Index->getZExtValue());
13022 return Builder.CreateExtractElement(Unpacked, Index);
13027 Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID,
13028 const CallExpr *E) {
13029 switch (BuiltinID) {
13030 case AMDGPU::BI__builtin_amdgcn_div_scale:
13031 case AMDGPU::BI__builtin_amdgcn_div_scalef: {
13032 // Translate from the intrinsics's struct return to the builtin's out
13035 Address FlagOutPtr = EmitPointerWithAlignment(E->getArg(3));
13037 llvm::Value *X = EmitScalarExpr(E->getArg(0));
13038 llvm::Value *Y = EmitScalarExpr(E->getArg(1));
13039 llvm::Value *Z = EmitScalarExpr(E->getArg(2));
13041 llvm::Function *Callee = CGM.getIntrinsic(Intrinsic::amdgcn_div_scale,
13044 llvm::Value *Tmp = Builder.CreateCall(Callee, {X, Y, Z});
13046 llvm::Value *Result = Builder.CreateExtractValue(Tmp, 0);
13047 llvm::Value *Flag = Builder.CreateExtractValue(Tmp, 1);
13049 llvm::Type *RealFlagType
13050 = FlagOutPtr.getPointer()->getType()->getPointerElementType();
13052 llvm::Value *FlagExt = Builder.CreateZExt(Flag, RealFlagType);
13053 Builder.CreateStore(FlagExt, FlagOutPtr);
13056 case AMDGPU::BI__builtin_amdgcn_div_fmas:
13057 case AMDGPU::BI__builtin_amdgcn_div_fmasf: {
13058 llvm::Value *Src0 = EmitScalarExpr(E->getArg(0));
13059 llvm::Value *Src1 = EmitScalarExpr(E->getArg(1));
13060 llvm::Value *Src2 = EmitScalarExpr(E->getArg(2));
13061 llvm::Value *Src3 = EmitScalarExpr(E->getArg(3));
13063 llvm::Function *F = CGM.getIntrinsic(Intrinsic::amdgcn_div_fmas,
13065 llvm::Value *Src3ToBool = Builder.CreateIsNotNull(Src3);
13066 return Builder.CreateCall(F, {Src0, Src1, Src2, Src3ToBool});
13069 case AMDGPU::BI__builtin_amdgcn_ds_swizzle:
13070 return emitBinaryBuiltin(*this, E, Intrinsic::amdgcn_ds_swizzle);
13071 case AMDGPU::BI__builtin_amdgcn_mov_dpp8:
13072 return emitBinaryBuiltin(*this, E, Intrinsic::amdgcn_mov_dpp8);
13073 case AMDGPU::BI__builtin_amdgcn_mov_dpp:
13074 case AMDGPU::BI__builtin_amdgcn_update_dpp: {
13075 llvm::SmallVector<llvm::Value *, 6> Args;
13076 for (unsigned I = 0; I != E->getNumArgs(); ++I)
13077 Args.push_back(EmitScalarExpr(E->getArg(I)));
13078 assert(Args.size() == 5 || Args.size() == 6);
13079 if (Args.size() == 5)
13080 Args.insert(Args.begin(), llvm::UndefValue::get(Args[0]->getType()));
13082 CGM.getIntrinsic(Intrinsic::amdgcn_update_dpp, Args[0]->getType());
13083 return Builder.CreateCall(F, Args);
13085 case AMDGPU::BI__builtin_amdgcn_div_fixup:
13086 case AMDGPU::BI__builtin_amdgcn_div_fixupf:
13087 case AMDGPU::BI__builtin_amdgcn_div_fixuph:
13088 return emitTernaryBuiltin(*this, E, Intrinsic::amdgcn_div_fixup);
13089 case AMDGPU::BI__builtin_amdgcn_trig_preop:
13090 case AMDGPU::BI__builtin_amdgcn_trig_preopf:
13091 return emitFPIntBuiltin(*this, E, Intrinsic::amdgcn_trig_preop);
13092 case AMDGPU::BI__builtin_amdgcn_rcp:
13093 case AMDGPU::BI__builtin_amdgcn_rcpf:
13094 case AMDGPU::BI__builtin_amdgcn_rcph:
13095 return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_rcp);
13096 case AMDGPU::BI__builtin_amdgcn_rsq:
13097 case AMDGPU::BI__builtin_amdgcn_rsqf:
13098 case AMDGPU::BI__builtin_amdgcn_rsqh:
13099 return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_rsq);
13100 case AMDGPU::BI__builtin_amdgcn_rsq_clamp:
13101 case AMDGPU::BI__builtin_amdgcn_rsq_clampf:
13102 return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_rsq_clamp);
13103 case AMDGPU::BI__builtin_amdgcn_sinf:
13104 case AMDGPU::BI__builtin_amdgcn_sinh:
13105 return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_sin);
13106 case AMDGPU::BI__builtin_amdgcn_cosf:
13107 case AMDGPU::BI__builtin_amdgcn_cosh:
13108 return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_cos);
13109 case AMDGPU::BI__builtin_amdgcn_log_clampf:
13110 return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_log_clamp);
13111 case AMDGPU::BI__builtin_amdgcn_ldexp:
13112 case AMDGPU::BI__builtin_amdgcn_ldexpf:
13113 case AMDGPU::BI__builtin_amdgcn_ldexph:
13114 return emitFPIntBuiltin(*this, E, Intrinsic::amdgcn_ldexp);
13115 case AMDGPU::BI__builtin_amdgcn_frexp_mant:
13116 case AMDGPU::BI__builtin_amdgcn_frexp_mantf:
13117 case AMDGPU::BI__builtin_amdgcn_frexp_manth:
13118 return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_frexp_mant);
13119 case AMDGPU::BI__builtin_amdgcn_frexp_exp:
13120 case AMDGPU::BI__builtin_amdgcn_frexp_expf: {
13121 Value *Src0 = EmitScalarExpr(E->getArg(0));
13122 Function *F = CGM.getIntrinsic(Intrinsic::amdgcn_frexp_exp,
13123 { Builder.getInt32Ty(), Src0->getType() });
13124 return Builder.CreateCall(F, Src0);
13126 case AMDGPU::BI__builtin_amdgcn_frexp_exph: {
13127 Value *Src0 = EmitScalarExpr(E->getArg(0));
13128 Function *F = CGM.getIntrinsic(Intrinsic::amdgcn_frexp_exp,
13129 { Builder.getInt16Ty(), Src0->getType() });
13130 return Builder.CreateCall(F, Src0);
13132 case AMDGPU::BI__builtin_amdgcn_fract:
13133 case AMDGPU::BI__builtin_amdgcn_fractf:
13134 case AMDGPU::BI__builtin_amdgcn_fracth:
13135 return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_fract);
13136 case AMDGPU::BI__builtin_amdgcn_lerp:
13137 return emitTernaryBuiltin(*this, E, Intrinsic::amdgcn_lerp);
13138 case AMDGPU::BI__builtin_amdgcn_ubfe:
13139 return emitTernaryBuiltin(*this, E, Intrinsic::amdgcn_ubfe);
13140 case AMDGPU::BI__builtin_amdgcn_sbfe:
13141 return emitTernaryBuiltin(*this, E, Intrinsic::amdgcn_sbfe);
13142 case AMDGPU::BI__builtin_amdgcn_uicmp:
13143 case AMDGPU::BI__builtin_amdgcn_uicmpl:
13144 case AMDGPU::BI__builtin_amdgcn_sicmp:
13145 case AMDGPU::BI__builtin_amdgcn_sicmpl: {
13146 llvm::Value *Src0 = EmitScalarExpr(E->getArg(0));
13147 llvm::Value *Src1 = EmitScalarExpr(E->getArg(1));
13148 llvm::Value *Src2 = EmitScalarExpr(E->getArg(2));
13150 // FIXME-GFX10: How should 32 bit mask be handled?
13151 Value *F = CGM.getIntrinsic(Intrinsic::amdgcn_icmp,
13152 { Builder.getInt64Ty(), Src0->getType() });
13153 return Builder.CreateCall(F, { Src0, Src1, Src2 });
13155 case AMDGPU::BI__builtin_amdgcn_fcmp:
13156 case AMDGPU::BI__builtin_amdgcn_fcmpf: {
13157 llvm::Value *Src0 = EmitScalarExpr(E->getArg(0));
13158 llvm::Value *Src1 = EmitScalarExpr(E->getArg(1));
13159 llvm::Value *Src2 = EmitScalarExpr(E->getArg(2));
13161 // FIXME-GFX10: How should 32 bit mask be handled?
13162 Value *F = CGM.getIntrinsic(Intrinsic::amdgcn_fcmp,
13163 { Builder.getInt64Ty(), Src0->getType() });
13164 return Builder.CreateCall(F, { Src0, Src1, Src2 });
13166 case AMDGPU::BI__builtin_amdgcn_class:
13167 case AMDGPU::BI__builtin_amdgcn_classf:
13168 case AMDGPU::BI__builtin_amdgcn_classh:
13169 return emitFPIntBuiltin(*this, E, Intrinsic::amdgcn_class);
13170 case AMDGPU::BI__builtin_amdgcn_fmed3f:
13171 case AMDGPU::BI__builtin_amdgcn_fmed3h:
13172 return emitTernaryBuiltin(*this, E, Intrinsic::amdgcn_fmed3);
13173 case AMDGPU::BI__builtin_amdgcn_ds_append:
13174 case AMDGPU::BI__builtin_amdgcn_ds_consume: {
13175 Intrinsic::ID Intrin = BuiltinID == AMDGPU::BI__builtin_amdgcn_ds_append ?
13176 Intrinsic::amdgcn_ds_append : Intrinsic::amdgcn_ds_consume;
13177 Value *Src0 = EmitScalarExpr(E->getArg(0));
13178 Function *F = CGM.getIntrinsic(Intrin, { Src0->getType() });
13179 return Builder.CreateCall(F, { Src0, Builder.getFalse() });
13181 case AMDGPU::BI__builtin_amdgcn_read_exec: {
13182 CallInst *CI = cast<CallInst>(
13183 EmitSpecialRegisterBuiltin(*this, E, Int64Ty, Int64Ty, true, "exec"));
13184 CI->setConvergent();
13187 case AMDGPU::BI__builtin_amdgcn_read_exec_lo:
13188 case AMDGPU::BI__builtin_amdgcn_read_exec_hi: {
13189 StringRef RegName = BuiltinID == AMDGPU::BI__builtin_amdgcn_read_exec_lo ?
13190 "exec_lo" : "exec_hi";
13191 CallInst *CI = cast<CallInst>(
13192 EmitSpecialRegisterBuiltin(*this, E, Int32Ty, Int32Ty, true, RegName));
13193 CI->setConvergent();
13197 case AMDGPU::BI__builtin_amdgcn_workitem_id_x:
13198 return emitRangedBuiltin(*this, Intrinsic::amdgcn_workitem_id_x, 0, 1024);
13199 case AMDGPU::BI__builtin_amdgcn_workitem_id_y:
13200 return emitRangedBuiltin(*this, Intrinsic::amdgcn_workitem_id_y, 0, 1024);
13201 case AMDGPU::BI__builtin_amdgcn_workitem_id_z:
13202 return emitRangedBuiltin(*this, Intrinsic::amdgcn_workitem_id_z, 0, 1024);
13205 case AMDGPU::BI__builtin_r600_recipsqrt_ieee:
13206 case AMDGPU::BI__builtin_r600_recipsqrt_ieeef:
13207 return emitUnaryBuiltin(*this, E, Intrinsic::r600_recipsqrt_ieee);
13208 case AMDGPU::BI__builtin_r600_read_tidig_x:
13209 return emitRangedBuiltin(*this, Intrinsic::r600_read_tidig_x, 0, 1024);
13210 case AMDGPU::BI__builtin_r600_read_tidig_y:
13211 return emitRangedBuiltin(*this, Intrinsic::r600_read_tidig_y, 0, 1024);
13212 case AMDGPU::BI__builtin_r600_read_tidig_z:
13213 return emitRangedBuiltin(*this, Intrinsic::r600_read_tidig_z, 0, 1024);
13219 /// Handle a SystemZ function in which the final argument is a pointer
13220 /// to an int that receives the post-instruction CC value. At the LLVM level
13221 /// this is represented as a function that returns a {result, cc} pair.
13222 static Value *EmitSystemZIntrinsicWithCC(CodeGenFunction &CGF,
13223 unsigned IntrinsicID,
13224 const CallExpr *E) {
13225 unsigned NumArgs = E->getNumArgs() - 1;
13226 SmallVector<Value *, 8> Args(NumArgs);
13227 for (unsigned I = 0; I < NumArgs; ++I)
13228 Args[I] = CGF.EmitScalarExpr(E->getArg(I));
13229 Address CCPtr = CGF.EmitPointerWithAlignment(E->getArg(NumArgs));
13230 Function *F = CGF.CGM.getIntrinsic(IntrinsicID);
13231 Value *Call = CGF.Builder.CreateCall(F, Args);
13232 Value *CC = CGF.Builder.CreateExtractValue(Call, 1);
13233 CGF.Builder.CreateStore(CC, CCPtr);
13234 return CGF.Builder.CreateExtractValue(Call, 0);
13237 Value *CodeGenFunction::EmitSystemZBuiltinExpr(unsigned BuiltinID,
13238 const CallExpr *E) {
13239 switch (BuiltinID) {
13240 case SystemZ::BI__builtin_tbegin: {
13241 Value *TDB = EmitScalarExpr(E->getArg(0));
13242 Value *Control = llvm::ConstantInt::get(Int32Ty, 0xff0c);
13243 Function *F = CGM.getIntrinsic(Intrinsic::s390_tbegin);
13244 return Builder.CreateCall(F, {TDB, Control});
13246 case SystemZ::BI__builtin_tbegin_nofloat: {
13247 Value *TDB = EmitScalarExpr(E->getArg(0));
13248 Value *Control = llvm::ConstantInt::get(Int32Ty, 0xff0c);
13249 Function *F = CGM.getIntrinsic(Intrinsic::s390_tbegin_nofloat);
13250 return Builder.CreateCall(F, {TDB, Control});
13252 case SystemZ::BI__builtin_tbeginc: {
13253 Value *TDB = llvm::ConstantPointerNull::get(Int8PtrTy);
13254 Value *Control = llvm::ConstantInt::get(Int32Ty, 0xff08);
13255 Function *F = CGM.getIntrinsic(Intrinsic::s390_tbeginc);
13256 return Builder.CreateCall(F, {TDB, Control});
13258 case SystemZ::BI__builtin_tabort: {
13259 Value *Data = EmitScalarExpr(E->getArg(0));
13260 Function *F = CGM.getIntrinsic(Intrinsic::s390_tabort);
13261 return Builder.CreateCall(F, Builder.CreateSExt(Data, Int64Ty, "tabort"));
13263 case SystemZ::BI__builtin_non_tx_store: {
13264 Value *Address = EmitScalarExpr(E->getArg(0));
13265 Value *Data = EmitScalarExpr(E->getArg(1));
13266 Function *F = CGM.getIntrinsic(Intrinsic::s390_ntstg);
13267 return Builder.CreateCall(F, {Data, Address});
13270 // Vector builtins. Note that most vector builtins are mapped automatically
13271 // to target-specific LLVM intrinsics. The ones handled specially here can
13272 // be represented via standard LLVM IR, which is preferable to enable common
13273 // LLVM optimizations.
13275 case SystemZ::BI__builtin_s390_vpopctb:
13276 case SystemZ::BI__builtin_s390_vpopcth:
13277 case SystemZ::BI__builtin_s390_vpopctf:
13278 case SystemZ::BI__builtin_s390_vpopctg: {
13279 llvm::Type *ResultType = ConvertType(E->getType());
13280 Value *X = EmitScalarExpr(E->getArg(0));
13281 Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ResultType);
13282 return Builder.CreateCall(F, X);
13285 case SystemZ::BI__builtin_s390_vclzb:
13286 case SystemZ::BI__builtin_s390_vclzh:
13287 case SystemZ::BI__builtin_s390_vclzf:
13288 case SystemZ::BI__builtin_s390_vclzg: {
13289 llvm::Type *ResultType = ConvertType(E->getType());
13290 Value *X = EmitScalarExpr(E->getArg(0));
13291 Value *Undef = ConstantInt::get(Builder.getInt1Ty(), false);
13292 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ResultType);
13293 return Builder.CreateCall(F, {X, Undef});
13296 case SystemZ::BI__builtin_s390_vctzb:
13297 case SystemZ::BI__builtin_s390_vctzh:
13298 case SystemZ::BI__builtin_s390_vctzf:
13299 case SystemZ::BI__builtin_s390_vctzg: {
13300 llvm::Type *ResultType = ConvertType(E->getType());
13301 Value *X = EmitScalarExpr(E->getArg(0));
13302 Value *Undef = ConstantInt::get(Builder.getInt1Ty(), false);
13303 Function *F = CGM.getIntrinsic(Intrinsic::cttz, ResultType);
13304 return Builder.CreateCall(F, {X, Undef});
13307 case SystemZ::BI__builtin_s390_vfsqsb:
13308 case SystemZ::BI__builtin_s390_vfsqdb: {
13309 llvm::Type *ResultType = ConvertType(E->getType());
13310 Value *X = EmitScalarExpr(E->getArg(0));
13311 Function *F = CGM.getIntrinsic(Intrinsic::sqrt, ResultType);
13312 return Builder.CreateCall(F, X);
13314 case SystemZ::BI__builtin_s390_vfmasb:
13315 case SystemZ::BI__builtin_s390_vfmadb: {
13316 llvm::Type *ResultType = ConvertType(E->getType());
13317 Value *X = EmitScalarExpr(E->getArg(0));
13318 Value *Y = EmitScalarExpr(E->getArg(1));
13319 Value *Z = EmitScalarExpr(E->getArg(2));
13320 Function *F = CGM.getIntrinsic(Intrinsic::fma, ResultType);
13321 return Builder.CreateCall(F, {X, Y, Z});
13323 case SystemZ::BI__builtin_s390_vfmssb:
13324 case SystemZ::BI__builtin_s390_vfmsdb: {
13325 llvm::Type *ResultType = ConvertType(E->getType());
13326 Value *X = EmitScalarExpr(E->getArg(0));
13327 Value *Y = EmitScalarExpr(E->getArg(1));
13328 Value *Z = EmitScalarExpr(E->getArg(2));
13329 Function *F = CGM.getIntrinsic(Intrinsic::fma, ResultType);
13330 return Builder.CreateCall(F, {X, Y, Builder.CreateFNeg(Z, "neg")});
13332 case SystemZ::BI__builtin_s390_vfnmasb:
13333 case SystemZ::BI__builtin_s390_vfnmadb: {
13334 llvm::Type *ResultType = ConvertType(E->getType());
13335 Value *X = EmitScalarExpr(E->getArg(0));
13336 Value *Y = EmitScalarExpr(E->getArg(1));
13337 Value *Z = EmitScalarExpr(E->getArg(2));
13338 Function *F = CGM.getIntrinsic(Intrinsic::fma, ResultType);
13339 return Builder.CreateFNeg(Builder.CreateCall(F, {X, Y, Z}), "neg");
13341 case SystemZ::BI__builtin_s390_vfnmssb:
13342 case SystemZ::BI__builtin_s390_vfnmsdb: {
13343 llvm::Type *ResultType = ConvertType(E->getType());
13344 Value *X = EmitScalarExpr(E->getArg(0));
13345 Value *Y = EmitScalarExpr(E->getArg(1));
13346 Value *Z = EmitScalarExpr(E->getArg(2));
13347 Function *F = CGM.getIntrinsic(Intrinsic::fma, ResultType);
13348 Value *NegZ = Builder.CreateFNeg(Z, "neg");
13349 return Builder.CreateFNeg(Builder.CreateCall(F, {X, Y, NegZ}));
13351 case SystemZ::BI__builtin_s390_vflpsb:
13352 case SystemZ::BI__builtin_s390_vflpdb: {
13353 llvm::Type *ResultType = ConvertType(E->getType());
13354 Value *X = EmitScalarExpr(E->getArg(0));
13355 Function *F = CGM.getIntrinsic(Intrinsic::fabs, ResultType);
13356 return Builder.CreateCall(F, X);
13358 case SystemZ::BI__builtin_s390_vflnsb:
13359 case SystemZ::BI__builtin_s390_vflndb: {
13360 llvm::Type *ResultType = ConvertType(E->getType());
13361 Value *X = EmitScalarExpr(E->getArg(0));
13362 Function *F = CGM.getIntrinsic(Intrinsic::fabs, ResultType);
13363 return Builder.CreateFNeg(Builder.CreateCall(F, X), "neg");
13365 case SystemZ::BI__builtin_s390_vfisb:
13366 case SystemZ::BI__builtin_s390_vfidb: {
13367 llvm::Type *ResultType = ConvertType(E->getType());
13368 Value *X = EmitScalarExpr(E->getArg(0));
13369 // Constant-fold the M4 and M5 mask arguments.
13370 llvm::APSInt M4, M5;
13371 bool IsConstM4 = E->getArg(1)->isIntegerConstantExpr(M4, getContext());
13372 bool IsConstM5 = E->getArg(2)->isIntegerConstantExpr(M5, getContext());
13373 assert(IsConstM4 && IsConstM5 && "Constant arg isn't actually constant?");
13374 (void)IsConstM4; (void)IsConstM5;
13375 // Check whether this instance can be represented via a LLVM standard
13376 // intrinsic. We only support some combinations of M4 and M5.
13377 Intrinsic::ID ID = Intrinsic::not_intrinsic;
13378 switch (M4.getZExtValue()) {
13380 case 0: // IEEE-inexact exception allowed
13381 switch (M5.getZExtValue()) {
13383 case 0: ID = Intrinsic::rint; break;
13386 case 4: // IEEE-inexact exception suppressed
13387 switch (M5.getZExtValue()) {
13389 case 0: ID = Intrinsic::nearbyint; break;
13390 case 1: ID = Intrinsic::round; break;
13391 case 5: ID = Intrinsic::trunc; break;
13392 case 6: ID = Intrinsic::ceil; break;
13393 case 7: ID = Intrinsic::floor; break;
13397 if (ID != Intrinsic::not_intrinsic) {
13398 Function *F = CGM.getIntrinsic(ID, ResultType);
13399 return Builder.CreateCall(F, X);
13401 switch (BuiltinID) {
13402 case SystemZ::BI__builtin_s390_vfisb: ID = Intrinsic::s390_vfisb; break;
13403 case SystemZ::BI__builtin_s390_vfidb: ID = Intrinsic::s390_vfidb; break;
13404 default: llvm_unreachable("Unknown BuiltinID");
13406 Function *F = CGM.getIntrinsic(ID);
13407 Value *M4Value = llvm::ConstantInt::get(getLLVMContext(), M4);
13408 Value *M5Value = llvm::ConstantInt::get(getLLVMContext(), M5);
13409 return Builder.CreateCall(F, {X, M4Value, M5Value});
13411 case SystemZ::BI__builtin_s390_vfmaxsb:
13412 case SystemZ::BI__builtin_s390_vfmaxdb: {
13413 llvm::Type *ResultType = ConvertType(E->getType());
13414 Value *X = EmitScalarExpr(E->getArg(0));
13415 Value *Y = EmitScalarExpr(E->getArg(1));
13416 // Constant-fold the M4 mask argument.
13418 bool IsConstM4 = E->getArg(2)->isIntegerConstantExpr(M4, getContext());
13419 assert(IsConstM4 && "Constant arg isn't actually constant?");
13421 // Check whether this instance can be represented via a LLVM standard
13422 // intrinsic. We only support some values of M4.
13423 Intrinsic::ID ID = Intrinsic::not_intrinsic;
13424 switch (M4.getZExtValue()) {
13426 case 4: ID = Intrinsic::maxnum; break;
13428 if (ID != Intrinsic::not_intrinsic) {
13429 Function *F = CGM.getIntrinsic(ID, ResultType);
13430 return Builder.CreateCall(F, {X, Y});
13432 switch (BuiltinID) {
13433 case SystemZ::BI__builtin_s390_vfmaxsb: ID = Intrinsic::s390_vfmaxsb; break;
13434 case SystemZ::BI__builtin_s390_vfmaxdb: ID = Intrinsic::s390_vfmaxdb; break;
13435 default: llvm_unreachable("Unknown BuiltinID");
13437 Function *F = CGM.getIntrinsic(ID);
13438 Value *M4Value = llvm::ConstantInt::get(getLLVMContext(), M4);
13439 return Builder.CreateCall(F, {X, Y, M4Value});
13441 case SystemZ::BI__builtin_s390_vfminsb:
13442 case SystemZ::BI__builtin_s390_vfmindb: {
13443 llvm::Type *ResultType = ConvertType(E->getType());
13444 Value *X = EmitScalarExpr(E->getArg(0));
13445 Value *Y = EmitScalarExpr(E->getArg(1));
13446 // Constant-fold the M4 mask argument.
13448 bool IsConstM4 = E->getArg(2)->isIntegerConstantExpr(M4, getContext());
13449 assert(IsConstM4 && "Constant arg isn't actually constant?");
13451 // Check whether this instance can be represented via a LLVM standard
13452 // intrinsic. We only support some values of M4.
13453 Intrinsic::ID ID = Intrinsic::not_intrinsic;
13454 switch (M4.getZExtValue()) {
13456 case 4: ID = Intrinsic::minnum; break;
13458 if (ID != Intrinsic::not_intrinsic) {
13459 Function *F = CGM.getIntrinsic(ID, ResultType);
13460 return Builder.CreateCall(F, {X, Y});
13462 switch (BuiltinID) {
13463 case SystemZ::BI__builtin_s390_vfminsb: ID = Intrinsic::s390_vfminsb; break;
13464 case SystemZ::BI__builtin_s390_vfmindb: ID = Intrinsic::s390_vfmindb; break;
13465 default: llvm_unreachable("Unknown BuiltinID");
13467 Function *F = CGM.getIntrinsic(ID);
13468 Value *M4Value = llvm::ConstantInt::get(getLLVMContext(), M4);
13469 return Builder.CreateCall(F, {X, Y, M4Value});
13472 case SystemZ::BI__builtin_s390_vlbrh:
13473 case SystemZ::BI__builtin_s390_vlbrf:
13474 case SystemZ::BI__builtin_s390_vlbrg: {
13475 llvm::Type *ResultType = ConvertType(E->getType());
13476 Value *X = EmitScalarExpr(E->getArg(0));
13477 Function *F = CGM.getIntrinsic(Intrinsic::bswap, ResultType);
13478 return Builder.CreateCall(F, X);
13481 // Vector intrinsics that output the post-instruction CC value.
13483 #define INTRINSIC_WITH_CC(NAME) \
13484 case SystemZ::BI__builtin_##NAME: \
13485 return EmitSystemZIntrinsicWithCC(*this, Intrinsic::NAME, E)
13487 INTRINSIC_WITH_CC(s390_vpkshs);
13488 INTRINSIC_WITH_CC(s390_vpksfs);
13489 INTRINSIC_WITH_CC(s390_vpksgs);
13491 INTRINSIC_WITH_CC(s390_vpklshs);
13492 INTRINSIC_WITH_CC(s390_vpklsfs);
13493 INTRINSIC_WITH_CC(s390_vpklsgs);
13495 INTRINSIC_WITH_CC(s390_vceqbs);
13496 INTRINSIC_WITH_CC(s390_vceqhs);
13497 INTRINSIC_WITH_CC(s390_vceqfs);
13498 INTRINSIC_WITH_CC(s390_vceqgs);
13500 INTRINSIC_WITH_CC(s390_vchbs);
13501 INTRINSIC_WITH_CC(s390_vchhs);
13502 INTRINSIC_WITH_CC(s390_vchfs);
13503 INTRINSIC_WITH_CC(s390_vchgs);
13505 INTRINSIC_WITH_CC(s390_vchlbs);
13506 INTRINSIC_WITH_CC(s390_vchlhs);
13507 INTRINSIC_WITH_CC(s390_vchlfs);
13508 INTRINSIC_WITH_CC(s390_vchlgs);
13510 INTRINSIC_WITH_CC(s390_vfaebs);
13511 INTRINSIC_WITH_CC(s390_vfaehs);
13512 INTRINSIC_WITH_CC(s390_vfaefs);
13514 INTRINSIC_WITH_CC(s390_vfaezbs);
13515 INTRINSIC_WITH_CC(s390_vfaezhs);
13516 INTRINSIC_WITH_CC(s390_vfaezfs);
13518 INTRINSIC_WITH_CC(s390_vfeebs);
13519 INTRINSIC_WITH_CC(s390_vfeehs);
13520 INTRINSIC_WITH_CC(s390_vfeefs);
13522 INTRINSIC_WITH_CC(s390_vfeezbs);
13523 INTRINSIC_WITH_CC(s390_vfeezhs);
13524 INTRINSIC_WITH_CC(s390_vfeezfs);
13526 INTRINSIC_WITH_CC(s390_vfenebs);
13527 INTRINSIC_WITH_CC(s390_vfenehs);
13528 INTRINSIC_WITH_CC(s390_vfenefs);
13530 INTRINSIC_WITH_CC(s390_vfenezbs);
13531 INTRINSIC_WITH_CC(s390_vfenezhs);
13532 INTRINSIC_WITH_CC(s390_vfenezfs);
13534 INTRINSIC_WITH_CC(s390_vistrbs);
13535 INTRINSIC_WITH_CC(s390_vistrhs);
13536 INTRINSIC_WITH_CC(s390_vistrfs);
13538 INTRINSIC_WITH_CC(s390_vstrcbs);
13539 INTRINSIC_WITH_CC(s390_vstrchs);
13540 INTRINSIC_WITH_CC(s390_vstrcfs);
13542 INTRINSIC_WITH_CC(s390_vstrczbs);
13543 INTRINSIC_WITH_CC(s390_vstrczhs);
13544 INTRINSIC_WITH_CC(s390_vstrczfs);
13546 INTRINSIC_WITH_CC(s390_vfcesbs);
13547 INTRINSIC_WITH_CC(s390_vfcedbs);
13548 INTRINSIC_WITH_CC(s390_vfchsbs);
13549 INTRINSIC_WITH_CC(s390_vfchdbs);
13550 INTRINSIC_WITH_CC(s390_vfchesbs);
13551 INTRINSIC_WITH_CC(s390_vfchedbs);
13553 INTRINSIC_WITH_CC(s390_vftcisb);
13554 INTRINSIC_WITH_CC(s390_vftcidb);
13556 INTRINSIC_WITH_CC(s390_vstrsb);
13557 INTRINSIC_WITH_CC(s390_vstrsh);
13558 INTRINSIC_WITH_CC(s390_vstrsf);
13560 INTRINSIC_WITH_CC(s390_vstrszb);
13561 INTRINSIC_WITH_CC(s390_vstrszh);
13562 INTRINSIC_WITH_CC(s390_vstrszf);
13564 #undef INTRINSIC_WITH_CC
13572 // Helper classes for mapping MMA builtins to particular LLVM intrinsic variant.
13573 struct NVPTXMmaLdstInfo {
13574 unsigned NumResults; // Number of elements to load/store
13575 // Intrinsic IDs for row/col variants. 0 if particular layout is unsupported.
13580 #define MMA_INTR(geom_op_type, layout) \
13581 Intrinsic::nvvm_wmma_##geom_op_type##_##layout##_stride
13582 #define MMA_LDST(n, geom_op_type) \
13583 { n, MMA_INTR(geom_op_type, col), MMA_INTR(geom_op_type, row) }
13585 static NVPTXMmaLdstInfo getNVPTXMmaLdstInfo(unsigned BuiltinID) {
13586 switch (BuiltinID) {
13588 case NVPTX::BI__hmma_m16n16k16_ld_a:
13589 return MMA_LDST(8, m16n16k16_load_a_f16);
13590 case NVPTX::BI__hmma_m16n16k16_ld_b:
13591 return MMA_LDST(8, m16n16k16_load_b_f16);
13592 case NVPTX::BI__hmma_m16n16k16_ld_c_f16:
13593 return MMA_LDST(4, m16n16k16_load_c_f16);
13594 case NVPTX::BI__hmma_m16n16k16_ld_c_f32:
13595 return MMA_LDST(8, m16n16k16_load_c_f32);
13596 case NVPTX::BI__hmma_m32n8k16_ld_a:
13597 return MMA_LDST(8, m32n8k16_load_a_f16);
13598 case NVPTX::BI__hmma_m32n8k16_ld_b:
13599 return MMA_LDST(8, m32n8k16_load_b_f16);
13600 case NVPTX::BI__hmma_m32n8k16_ld_c_f16:
13601 return MMA_LDST(4, m32n8k16_load_c_f16);
13602 case NVPTX::BI__hmma_m32n8k16_ld_c_f32:
13603 return MMA_LDST(8, m32n8k16_load_c_f32);
13604 case NVPTX::BI__hmma_m8n32k16_ld_a:
13605 return MMA_LDST(8, m8n32k16_load_a_f16);
13606 case NVPTX::BI__hmma_m8n32k16_ld_b:
13607 return MMA_LDST(8, m8n32k16_load_b_f16);
13608 case NVPTX::BI__hmma_m8n32k16_ld_c_f16:
13609 return MMA_LDST(4, m8n32k16_load_c_f16);
13610 case NVPTX::BI__hmma_m8n32k16_ld_c_f32:
13611 return MMA_LDST(8, m8n32k16_load_c_f32);
13613 // Integer MMA loads
13614 case NVPTX::BI__imma_m16n16k16_ld_a_s8:
13615 return MMA_LDST(2, m16n16k16_load_a_s8);
13616 case NVPTX::BI__imma_m16n16k16_ld_a_u8:
13617 return MMA_LDST(2, m16n16k16_load_a_u8);
13618 case NVPTX::BI__imma_m16n16k16_ld_b_s8:
13619 return MMA_LDST(2, m16n16k16_load_b_s8);
13620 case NVPTX::BI__imma_m16n16k16_ld_b_u8:
13621 return MMA_LDST(2, m16n16k16_load_b_u8);
13622 case NVPTX::BI__imma_m16n16k16_ld_c:
13623 return MMA_LDST(8, m16n16k16_load_c_s32);
13624 case NVPTX::BI__imma_m32n8k16_ld_a_s8:
13625 return MMA_LDST(4, m32n8k16_load_a_s8);
13626 case NVPTX::BI__imma_m32n8k16_ld_a_u8:
13627 return MMA_LDST(4, m32n8k16_load_a_u8);
13628 case NVPTX::BI__imma_m32n8k16_ld_b_s8:
13629 return MMA_LDST(1, m32n8k16_load_b_s8);
13630 case NVPTX::BI__imma_m32n8k16_ld_b_u8:
13631 return MMA_LDST(1, m32n8k16_load_b_u8);
13632 case NVPTX::BI__imma_m32n8k16_ld_c:
13633 return MMA_LDST(8, m32n8k16_load_c_s32);
13634 case NVPTX::BI__imma_m8n32k16_ld_a_s8:
13635 return MMA_LDST(1, m8n32k16_load_a_s8);
13636 case NVPTX::BI__imma_m8n32k16_ld_a_u8:
13637 return MMA_LDST(1, m8n32k16_load_a_u8);
13638 case NVPTX::BI__imma_m8n32k16_ld_b_s8:
13639 return MMA_LDST(4, m8n32k16_load_b_s8);
13640 case NVPTX::BI__imma_m8n32k16_ld_b_u8:
13641 return MMA_LDST(4, m8n32k16_load_b_u8);
13642 case NVPTX::BI__imma_m8n32k16_ld_c:
13643 return MMA_LDST(8, m8n32k16_load_c_s32);
13645 // Sub-integer MMA loads.
13646 // Only row/col layout is supported by A/B fragments.
13647 case NVPTX::BI__imma_m8n8k32_ld_a_s4:
13648 return {1, 0, MMA_INTR(m8n8k32_load_a_s4, row)};
13649 case NVPTX::BI__imma_m8n8k32_ld_a_u4:
13650 return {1, 0, MMA_INTR(m8n8k32_load_a_u4, row)};
13651 case NVPTX::BI__imma_m8n8k32_ld_b_s4:
13652 return {1, MMA_INTR(m8n8k32_load_b_s4, col), 0};
13653 case NVPTX::BI__imma_m8n8k32_ld_b_u4:
13654 return {1, MMA_INTR(m8n8k32_load_b_u4, col), 0};
13655 case NVPTX::BI__imma_m8n8k32_ld_c:
13656 return MMA_LDST(2, m8n8k32_load_c_s32);
13657 case NVPTX::BI__bmma_m8n8k128_ld_a_b1:
13658 return {1, 0, MMA_INTR(m8n8k128_load_a_b1, row)};
13659 case NVPTX::BI__bmma_m8n8k128_ld_b_b1:
13660 return {1, MMA_INTR(m8n8k128_load_b_b1, col), 0};
13661 case NVPTX::BI__bmma_m8n8k128_ld_c:
13662 return MMA_LDST(2, m8n8k128_load_c_s32);
13664 // NOTE: We need to follow inconsitent naming scheme used by NVCC. Unlike
13665 // PTX and LLVM IR where stores always use fragment D, NVCC builtins always
13666 // use fragment C for both loads and stores.
13668 case NVPTX::BI__hmma_m16n16k16_st_c_f16:
13669 return MMA_LDST(4, m16n16k16_store_d_f16);
13670 case NVPTX::BI__hmma_m16n16k16_st_c_f32:
13671 return MMA_LDST(8, m16n16k16_store_d_f32);
13672 case NVPTX::BI__hmma_m32n8k16_st_c_f16:
13673 return MMA_LDST(4, m32n8k16_store_d_f16);
13674 case NVPTX::BI__hmma_m32n8k16_st_c_f32:
13675 return MMA_LDST(8, m32n8k16_store_d_f32);
13676 case NVPTX::BI__hmma_m8n32k16_st_c_f16:
13677 return MMA_LDST(4, m8n32k16_store_d_f16);
13678 case NVPTX::BI__hmma_m8n32k16_st_c_f32:
13679 return MMA_LDST(8, m8n32k16_store_d_f32);
13681 // Integer and sub-integer MMA stores.
13682 // Another naming quirk. Unlike other MMA builtins that use PTX types in the
13683 // name, integer loads/stores use LLVM's i32.
13684 case NVPTX::BI__imma_m16n16k16_st_c_i32:
13685 return MMA_LDST(8, m16n16k16_store_d_s32);
13686 case NVPTX::BI__imma_m32n8k16_st_c_i32:
13687 return MMA_LDST(8, m32n8k16_store_d_s32);
13688 case NVPTX::BI__imma_m8n32k16_st_c_i32:
13689 return MMA_LDST(8, m8n32k16_store_d_s32);
13690 case NVPTX::BI__imma_m8n8k32_st_c_i32:
13691 return MMA_LDST(2, m8n8k32_store_d_s32);
13692 case NVPTX::BI__bmma_m8n8k128_st_c_i32:
13693 return MMA_LDST(2, m8n8k128_store_d_s32);
13696 llvm_unreachable("Unknown MMA builtin");
13703 struct NVPTXMmaInfo {
13708 std::array<unsigned, 8> Variants;
13710 unsigned getMMAIntrinsic(int Layout, bool Satf) {
13711 unsigned Index = Layout * 2 + Satf;
13712 if (Index >= Variants.size())
13714 return Variants[Index];
13718 // Returns an intrinsic that matches Layout and Satf for valid combinations of
13719 // Layout and Satf, 0 otherwise.
13720 static NVPTXMmaInfo getNVPTXMmaInfo(unsigned BuiltinID) {
13721 // clang-format off
13722 #define MMA_VARIANTS(geom, type) {{ \
13723 Intrinsic::nvvm_wmma_##geom##_mma_row_row_##type, \
13724 Intrinsic::nvvm_wmma_##geom##_mma_row_row_##type##_satfinite, \
13725 Intrinsic::nvvm_wmma_##geom##_mma_row_col_##type, \
13726 Intrinsic::nvvm_wmma_##geom##_mma_row_col_##type##_satfinite, \
13727 Intrinsic::nvvm_wmma_##geom##_mma_col_row_##type, \
13728 Intrinsic::nvvm_wmma_##geom##_mma_col_row_##type##_satfinite, \
13729 Intrinsic::nvvm_wmma_##geom##_mma_col_col_##type, \
13730 Intrinsic::nvvm_wmma_##geom##_mma_col_col_##type##_satfinite \
13732 // Sub-integer MMA only supports row.col layout.
13733 #define MMA_VARIANTS_I4(geom, type) {{ \
13736 Intrinsic::nvvm_wmma_##geom##_mma_row_col_##type, \
13737 Intrinsic::nvvm_wmma_##geom##_mma_row_col_##type##_satfinite, \
13743 // b1 MMA does not support .satfinite.
13744 #define MMA_VARIANTS_B1(geom, type) {{ \
13747 Intrinsic::nvvm_wmma_##geom##_mma_row_col_##type, \
13755 switch (BuiltinID) {
13757 // Note that 'type' argument of MMA_VARIANT uses D_C notation, while
13758 // NumEltsN of return value are ordered as A,B,C,D.
13759 case NVPTX::BI__hmma_m16n16k16_mma_f16f16:
13760 return {8, 8, 4, 4, MMA_VARIANTS(m16n16k16, f16_f16)};
13761 case NVPTX::BI__hmma_m16n16k16_mma_f32f16:
13762 return {8, 8, 4, 8, MMA_VARIANTS(m16n16k16, f32_f16)};
13763 case NVPTX::BI__hmma_m16n16k16_mma_f16f32:
13764 return {8, 8, 8, 4, MMA_VARIANTS(m16n16k16, f16_f32)};
13765 case NVPTX::BI__hmma_m16n16k16_mma_f32f32:
13766 return {8, 8, 8, 8, MMA_VARIANTS(m16n16k16, f32_f32)};
13767 case NVPTX::BI__hmma_m32n8k16_mma_f16f16:
13768 return {8, 8, 4, 4, MMA_VARIANTS(m32n8k16, f16_f16)};
13769 case NVPTX::BI__hmma_m32n8k16_mma_f32f16:
13770 return {8, 8, 4, 8, MMA_VARIANTS(m32n8k16, f32_f16)};
13771 case NVPTX::BI__hmma_m32n8k16_mma_f16f32:
13772 return {8, 8, 8, 4, MMA_VARIANTS(m32n8k16, f16_f32)};
13773 case NVPTX::BI__hmma_m32n8k16_mma_f32f32:
13774 return {8, 8, 8, 8, MMA_VARIANTS(m32n8k16, f32_f32)};
13775 case NVPTX::BI__hmma_m8n32k16_mma_f16f16:
13776 return {8, 8, 4, 4, MMA_VARIANTS(m8n32k16, f16_f16)};
13777 case NVPTX::BI__hmma_m8n32k16_mma_f32f16:
13778 return {8, 8, 4, 8, MMA_VARIANTS(m8n32k16, f32_f16)};
13779 case NVPTX::BI__hmma_m8n32k16_mma_f16f32:
13780 return {8, 8, 8, 4, MMA_VARIANTS(m8n32k16, f16_f32)};
13781 case NVPTX::BI__hmma_m8n32k16_mma_f32f32:
13782 return {8, 8, 8, 8, MMA_VARIANTS(m8n32k16, f32_f32)};
13785 case NVPTX::BI__imma_m16n16k16_mma_s8:
13786 return {2, 2, 8, 8, MMA_VARIANTS(m16n16k16, s8)};
13787 case NVPTX::BI__imma_m16n16k16_mma_u8:
13788 return {2, 2, 8, 8, MMA_VARIANTS(m16n16k16, u8)};
13789 case NVPTX::BI__imma_m32n8k16_mma_s8:
13790 return {4, 1, 8, 8, MMA_VARIANTS(m32n8k16, s8)};
13791 case NVPTX::BI__imma_m32n8k16_mma_u8:
13792 return {4, 1, 8, 8, MMA_VARIANTS(m32n8k16, u8)};
13793 case NVPTX::BI__imma_m8n32k16_mma_s8:
13794 return {1, 4, 8, 8, MMA_VARIANTS(m8n32k16, s8)};
13795 case NVPTX::BI__imma_m8n32k16_mma_u8:
13796 return {1, 4, 8, 8, MMA_VARIANTS(m8n32k16, u8)};
13799 case NVPTX::BI__imma_m8n8k32_mma_s4:
13800 return {1, 1, 2, 2, MMA_VARIANTS_I4(m8n8k32, s4)};
13801 case NVPTX::BI__imma_m8n8k32_mma_u4:
13802 return {1, 1, 2, 2, MMA_VARIANTS_I4(m8n8k32, u4)};
13803 case NVPTX::BI__bmma_m8n8k128_mma_xor_popc_b1:
13804 return {1, 1, 2, 2, MMA_VARIANTS_B1(m8n8k128, b1)};
13806 llvm_unreachable("Unexpected builtin ID.");
13808 #undef MMA_VARIANTS
13809 #undef MMA_VARIANTS_I4
13810 #undef MMA_VARIANTS_B1
13816 CodeGenFunction::EmitNVPTXBuiltinExpr(unsigned BuiltinID, const CallExpr *E) {
13817 auto MakeLdg = [&](unsigned IntrinsicID) {
13818 Value *Ptr = EmitScalarExpr(E->getArg(0));
13819 clang::CharUnits Align =
13820 getNaturalPointeeTypeAlignment(E->getArg(0)->getType());
13821 return Builder.CreateCall(
13822 CGM.getIntrinsic(IntrinsicID, {Ptr->getType()->getPointerElementType(),
13824 {Ptr, ConstantInt::get(Builder.getInt32Ty(), Align.getQuantity())});
13826 auto MakeScopedAtomic = [&](unsigned IntrinsicID) {
13827 Value *Ptr = EmitScalarExpr(E->getArg(0));
13828 return Builder.CreateCall(
13829 CGM.getIntrinsic(IntrinsicID, {Ptr->getType()->getPointerElementType(),
13831 {Ptr, EmitScalarExpr(E->getArg(1))});
13833 switch (BuiltinID) {
13834 case NVPTX::BI__nvvm_atom_add_gen_i:
13835 case NVPTX::BI__nvvm_atom_add_gen_l:
13836 case NVPTX::BI__nvvm_atom_add_gen_ll:
13837 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Add, E);
13839 case NVPTX::BI__nvvm_atom_sub_gen_i:
13840 case NVPTX::BI__nvvm_atom_sub_gen_l:
13841 case NVPTX::BI__nvvm_atom_sub_gen_ll:
13842 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Sub, E);
13844 case NVPTX::BI__nvvm_atom_and_gen_i:
13845 case NVPTX::BI__nvvm_atom_and_gen_l:
13846 case NVPTX::BI__nvvm_atom_and_gen_ll:
13847 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::And, E);
13849 case NVPTX::BI__nvvm_atom_or_gen_i:
13850 case NVPTX::BI__nvvm_atom_or_gen_l:
13851 case NVPTX::BI__nvvm_atom_or_gen_ll:
13852 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Or, E);
13854 case NVPTX::BI__nvvm_atom_xor_gen_i:
13855 case NVPTX::BI__nvvm_atom_xor_gen_l:
13856 case NVPTX::BI__nvvm_atom_xor_gen_ll:
13857 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Xor, E);
13859 case NVPTX::BI__nvvm_atom_xchg_gen_i:
13860 case NVPTX::BI__nvvm_atom_xchg_gen_l:
13861 case NVPTX::BI__nvvm_atom_xchg_gen_ll:
13862 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Xchg, E);
13864 case NVPTX::BI__nvvm_atom_max_gen_i:
13865 case NVPTX::BI__nvvm_atom_max_gen_l:
13866 case NVPTX::BI__nvvm_atom_max_gen_ll:
13867 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Max, E);
13869 case NVPTX::BI__nvvm_atom_max_gen_ui:
13870 case NVPTX::BI__nvvm_atom_max_gen_ul:
13871 case NVPTX::BI__nvvm_atom_max_gen_ull:
13872 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::UMax, E);
13874 case NVPTX::BI__nvvm_atom_min_gen_i:
13875 case NVPTX::BI__nvvm_atom_min_gen_l:
13876 case NVPTX::BI__nvvm_atom_min_gen_ll:
13877 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Min, E);
13879 case NVPTX::BI__nvvm_atom_min_gen_ui:
13880 case NVPTX::BI__nvvm_atom_min_gen_ul:
13881 case NVPTX::BI__nvvm_atom_min_gen_ull:
13882 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::UMin, E);
13884 case NVPTX::BI__nvvm_atom_cas_gen_i:
13885 case NVPTX::BI__nvvm_atom_cas_gen_l:
13886 case NVPTX::BI__nvvm_atom_cas_gen_ll:
13887 // __nvvm_atom_cas_gen_* should return the old value rather than the
13889 return MakeAtomicCmpXchgValue(*this, E, /*ReturnBool=*/false);
13891 case NVPTX::BI__nvvm_atom_add_gen_f:
13892 case NVPTX::BI__nvvm_atom_add_gen_d: {
13893 Value *Ptr = EmitScalarExpr(E->getArg(0));
13894 Value *Val = EmitScalarExpr(E->getArg(1));
13895 return Builder.CreateAtomicRMW(llvm::AtomicRMWInst::FAdd, Ptr, Val,
13896 AtomicOrdering::SequentiallyConsistent);
13899 case NVPTX::BI__nvvm_atom_inc_gen_ui: {
13900 Value *Ptr = EmitScalarExpr(E->getArg(0));
13901 Value *Val = EmitScalarExpr(E->getArg(1));
13902 Function *FnALI32 =
13903 CGM.getIntrinsic(Intrinsic::nvvm_atomic_load_inc_32, Ptr->getType());
13904 return Builder.CreateCall(FnALI32, {Ptr, Val});
13907 case NVPTX::BI__nvvm_atom_dec_gen_ui: {
13908 Value *Ptr = EmitScalarExpr(E->getArg(0));
13909 Value *Val = EmitScalarExpr(E->getArg(1));
13910 Function *FnALD32 =
13911 CGM.getIntrinsic(Intrinsic::nvvm_atomic_load_dec_32, Ptr->getType());
13912 return Builder.CreateCall(FnALD32, {Ptr, Val});
13915 case NVPTX::BI__nvvm_ldg_c:
13916 case NVPTX::BI__nvvm_ldg_c2:
13917 case NVPTX::BI__nvvm_ldg_c4:
13918 case NVPTX::BI__nvvm_ldg_s:
13919 case NVPTX::BI__nvvm_ldg_s2:
13920 case NVPTX::BI__nvvm_ldg_s4:
13921 case NVPTX::BI__nvvm_ldg_i:
13922 case NVPTX::BI__nvvm_ldg_i2:
13923 case NVPTX::BI__nvvm_ldg_i4:
13924 case NVPTX::BI__nvvm_ldg_l:
13925 case NVPTX::BI__nvvm_ldg_ll:
13926 case NVPTX::BI__nvvm_ldg_ll2:
13927 case NVPTX::BI__nvvm_ldg_uc:
13928 case NVPTX::BI__nvvm_ldg_uc2:
13929 case NVPTX::BI__nvvm_ldg_uc4:
13930 case NVPTX::BI__nvvm_ldg_us:
13931 case NVPTX::BI__nvvm_ldg_us2:
13932 case NVPTX::BI__nvvm_ldg_us4:
13933 case NVPTX::BI__nvvm_ldg_ui:
13934 case NVPTX::BI__nvvm_ldg_ui2:
13935 case NVPTX::BI__nvvm_ldg_ui4:
13936 case NVPTX::BI__nvvm_ldg_ul:
13937 case NVPTX::BI__nvvm_ldg_ull:
13938 case NVPTX::BI__nvvm_ldg_ull2:
13939 // PTX Interoperability section 2.2: "For a vector with an even number of
13940 // elements, its alignment is set to number of elements times the alignment
13941 // of its member: n*alignof(t)."
13942 return MakeLdg(Intrinsic::nvvm_ldg_global_i);
13943 case NVPTX::BI__nvvm_ldg_f:
13944 case NVPTX::BI__nvvm_ldg_f2:
13945 case NVPTX::BI__nvvm_ldg_f4:
13946 case NVPTX::BI__nvvm_ldg_d:
13947 case NVPTX::BI__nvvm_ldg_d2:
13948 return MakeLdg(Intrinsic::nvvm_ldg_global_f);
13950 case NVPTX::BI__nvvm_atom_cta_add_gen_i:
13951 case NVPTX::BI__nvvm_atom_cta_add_gen_l:
13952 case NVPTX::BI__nvvm_atom_cta_add_gen_ll:
13953 return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_gen_i_cta);
13954 case NVPTX::BI__nvvm_atom_sys_add_gen_i:
13955 case NVPTX::BI__nvvm_atom_sys_add_gen_l:
13956 case NVPTX::BI__nvvm_atom_sys_add_gen_ll:
13957 return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_gen_i_sys);
13958 case NVPTX::BI__nvvm_atom_cta_add_gen_f:
13959 case NVPTX::BI__nvvm_atom_cta_add_gen_d:
13960 return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_gen_f_cta);
13961 case NVPTX::BI__nvvm_atom_sys_add_gen_f:
13962 case NVPTX::BI__nvvm_atom_sys_add_gen_d:
13963 return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_gen_f_sys);
13964 case NVPTX::BI__nvvm_atom_cta_xchg_gen_i:
13965 case NVPTX::BI__nvvm_atom_cta_xchg_gen_l:
13966 case NVPTX::BI__nvvm_atom_cta_xchg_gen_ll:
13967 return MakeScopedAtomic(Intrinsic::nvvm_atomic_exch_gen_i_cta);
13968 case NVPTX::BI__nvvm_atom_sys_xchg_gen_i:
13969 case NVPTX::BI__nvvm_atom_sys_xchg_gen_l:
13970 case NVPTX::BI__nvvm_atom_sys_xchg_gen_ll:
13971 return MakeScopedAtomic(Intrinsic::nvvm_atomic_exch_gen_i_sys);
13972 case NVPTX::BI__nvvm_atom_cta_max_gen_i:
13973 case NVPTX::BI__nvvm_atom_cta_max_gen_ui:
13974 case NVPTX::BI__nvvm_atom_cta_max_gen_l:
13975 case NVPTX::BI__nvvm_atom_cta_max_gen_ul:
13976 case NVPTX::BI__nvvm_atom_cta_max_gen_ll:
13977 case NVPTX::BI__nvvm_atom_cta_max_gen_ull:
13978 return MakeScopedAtomic(Intrinsic::nvvm_atomic_max_gen_i_cta);
13979 case NVPTX::BI__nvvm_atom_sys_max_gen_i:
13980 case NVPTX::BI__nvvm_atom_sys_max_gen_ui:
13981 case NVPTX::BI__nvvm_atom_sys_max_gen_l:
13982 case NVPTX::BI__nvvm_atom_sys_max_gen_ul:
13983 case NVPTX::BI__nvvm_atom_sys_max_gen_ll:
13984 case NVPTX::BI__nvvm_atom_sys_max_gen_ull:
13985 return MakeScopedAtomic(Intrinsic::nvvm_atomic_max_gen_i_sys);
13986 case NVPTX::BI__nvvm_atom_cta_min_gen_i:
13987 case NVPTX::BI__nvvm_atom_cta_min_gen_ui:
13988 case NVPTX::BI__nvvm_atom_cta_min_gen_l:
13989 case NVPTX::BI__nvvm_atom_cta_min_gen_ul:
13990 case NVPTX::BI__nvvm_atom_cta_min_gen_ll:
13991 case NVPTX::BI__nvvm_atom_cta_min_gen_ull:
13992 return MakeScopedAtomic(Intrinsic::nvvm_atomic_min_gen_i_cta);
13993 case NVPTX::BI__nvvm_atom_sys_min_gen_i:
13994 case NVPTX::BI__nvvm_atom_sys_min_gen_ui:
13995 case NVPTX::BI__nvvm_atom_sys_min_gen_l:
13996 case NVPTX::BI__nvvm_atom_sys_min_gen_ul:
13997 case NVPTX::BI__nvvm_atom_sys_min_gen_ll:
13998 case NVPTX::BI__nvvm_atom_sys_min_gen_ull:
13999 return MakeScopedAtomic(Intrinsic::nvvm_atomic_min_gen_i_sys);
14000 case NVPTX::BI__nvvm_atom_cta_inc_gen_ui:
14001 return MakeScopedAtomic(Intrinsic::nvvm_atomic_inc_gen_i_cta);
14002 case NVPTX::BI__nvvm_atom_cta_dec_gen_ui:
14003 return MakeScopedAtomic(Intrinsic::nvvm_atomic_dec_gen_i_cta);
14004 case NVPTX::BI__nvvm_atom_sys_inc_gen_ui:
14005 return MakeScopedAtomic(Intrinsic::nvvm_atomic_inc_gen_i_sys);
14006 case NVPTX::BI__nvvm_atom_sys_dec_gen_ui:
14007 return MakeScopedAtomic(Intrinsic::nvvm_atomic_dec_gen_i_sys);
14008 case NVPTX::BI__nvvm_atom_cta_and_gen_i:
14009 case NVPTX::BI__nvvm_atom_cta_and_gen_l:
14010 case NVPTX::BI__nvvm_atom_cta_and_gen_ll:
14011 return MakeScopedAtomic(Intrinsic::nvvm_atomic_and_gen_i_cta);
14012 case NVPTX::BI__nvvm_atom_sys_and_gen_i:
14013 case NVPTX::BI__nvvm_atom_sys_and_gen_l:
14014 case NVPTX::BI__nvvm_atom_sys_and_gen_ll:
14015 return MakeScopedAtomic(Intrinsic::nvvm_atomic_and_gen_i_sys);
14016 case NVPTX::BI__nvvm_atom_cta_or_gen_i:
14017 case NVPTX::BI__nvvm_atom_cta_or_gen_l:
14018 case NVPTX::BI__nvvm_atom_cta_or_gen_ll:
14019 return MakeScopedAtomic(Intrinsic::nvvm_atomic_or_gen_i_cta);
14020 case NVPTX::BI__nvvm_atom_sys_or_gen_i:
14021 case NVPTX::BI__nvvm_atom_sys_or_gen_l:
14022 case NVPTX::BI__nvvm_atom_sys_or_gen_ll:
14023 return MakeScopedAtomic(Intrinsic::nvvm_atomic_or_gen_i_sys);
14024 case NVPTX::BI__nvvm_atom_cta_xor_gen_i:
14025 case NVPTX::BI__nvvm_atom_cta_xor_gen_l:
14026 case NVPTX::BI__nvvm_atom_cta_xor_gen_ll:
14027 return MakeScopedAtomic(Intrinsic::nvvm_atomic_xor_gen_i_cta);
14028 case NVPTX::BI__nvvm_atom_sys_xor_gen_i:
14029 case NVPTX::BI__nvvm_atom_sys_xor_gen_l:
14030 case NVPTX::BI__nvvm_atom_sys_xor_gen_ll:
14031 return MakeScopedAtomic(Intrinsic::nvvm_atomic_xor_gen_i_sys);
14032 case NVPTX::BI__nvvm_atom_cta_cas_gen_i:
14033 case NVPTX::BI__nvvm_atom_cta_cas_gen_l:
14034 case NVPTX::BI__nvvm_atom_cta_cas_gen_ll: {
14035 Value *Ptr = EmitScalarExpr(E->getArg(0));
14036 return Builder.CreateCall(
14038 Intrinsic::nvvm_atomic_cas_gen_i_cta,
14039 {Ptr->getType()->getPointerElementType(), Ptr->getType()}),
14040 {Ptr, EmitScalarExpr(E->getArg(1)), EmitScalarExpr(E->getArg(2))});
14042 case NVPTX::BI__nvvm_atom_sys_cas_gen_i:
14043 case NVPTX::BI__nvvm_atom_sys_cas_gen_l:
14044 case NVPTX::BI__nvvm_atom_sys_cas_gen_ll: {
14045 Value *Ptr = EmitScalarExpr(E->getArg(0));
14046 return Builder.CreateCall(
14048 Intrinsic::nvvm_atomic_cas_gen_i_sys,
14049 {Ptr->getType()->getPointerElementType(), Ptr->getType()}),
14050 {Ptr, EmitScalarExpr(E->getArg(1)), EmitScalarExpr(E->getArg(2))});
14052 case NVPTX::BI__nvvm_match_all_sync_i32p:
14053 case NVPTX::BI__nvvm_match_all_sync_i64p: {
14054 Value *Mask = EmitScalarExpr(E->getArg(0));
14055 Value *Val = EmitScalarExpr(E->getArg(1));
14056 Address PredOutPtr = EmitPointerWithAlignment(E->getArg(2));
14057 Value *ResultPair = Builder.CreateCall(
14058 CGM.getIntrinsic(BuiltinID == NVPTX::BI__nvvm_match_all_sync_i32p
14059 ? Intrinsic::nvvm_match_all_sync_i32p
14060 : Intrinsic::nvvm_match_all_sync_i64p),
14062 Value *Pred = Builder.CreateZExt(Builder.CreateExtractValue(ResultPair, 1),
14063 PredOutPtr.getElementType());
14064 Builder.CreateStore(Pred, PredOutPtr);
14065 return Builder.CreateExtractValue(ResultPair, 0);
14069 case NVPTX::BI__hmma_m16n16k16_ld_a:
14070 case NVPTX::BI__hmma_m16n16k16_ld_b:
14071 case NVPTX::BI__hmma_m16n16k16_ld_c_f16:
14072 case NVPTX::BI__hmma_m16n16k16_ld_c_f32:
14073 case NVPTX::BI__hmma_m32n8k16_ld_a:
14074 case NVPTX::BI__hmma_m32n8k16_ld_b:
14075 case NVPTX::BI__hmma_m32n8k16_ld_c_f16:
14076 case NVPTX::BI__hmma_m32n8k16_ld_c_f32:
14077 case NVPTX::BI__hmma_m8n32k16_ld_a:
14078 case NVPTX::BI__hmma_m8n32k16_ld_b:
14079 case NVPTX::BI__hmma_m8n32k16_ld_c_f16:
14080 case NVPTX::BI__hmma_m8n32k16_ld_c_f32:
14081 // Integer MMA loads.
14082 case NVPTX::BI__imma_m16n16k16_ld_a_s8:
14083 case NVPTX::BI__imma_m16n16k16_ld_a_u8:
14084 case NVPTX::BI__imma_m16n16k16_ld_b_s8:
14085 case NVPTX::BI__imma_m16n16k16_ld_b_u8:
14086 case NVPTX::BI__imma_m16n16k16_ld_c:
14087 case NVPTX::BI__imma_m32n8k16_ld_a_s8:
14088 case NVPTX::BI__imma_m32n8k16_ld_a_u8:
14089 case NVPTX::BI__imma_m32n8k16_ld_b_s8:
14090 case NVPTX::BI__imma_m32n8k16_ld_b_u8:
14091 case NVPTX::BI__imma_m32n8k16_ld_c:
14092 case NVPTX::BI__imma_m8n32k16_ld_a_s8:
14093 case NVPTX::BI__imma_m8n32k16_ld_a_u8:
14094 case NVPTX::BI__imma_m8n32k16_ld_b_s8:
14095 case NVPTX::BI__imma_m8n32k16_ld_b_u8:
14096 case NVPTX::BI__imma_m8n32k16_ld_c:
14097 // Sub-integer MMA loads.
14098 case NVPTX::BI__imma_m8n8k32_ld_a_s4:
14099 case NVPTX::BI__imma_m8n8k32_ld_a_u4:
14100 case NVPTX::BI__imma_m8n8k32_ld_b_s4:
14101 case NVPTX::BI__imma_m8n8k32_ld_b_u4:
14102 case NVPTX::BI__imma_m8n8k32_ld_c:
14103 case NVPTX::BI__bmma_m8n8k128_ld_a_b1:
14104 case NVPTX::BI__bmma_m8n8k128_ld_b_b1:
14105 case NVPTX::BI__bmma_m8n8k128_ld_c:
14107 Address Dst = EmitPointerWithAlignment(E->getArg(0));
14108 Value *Src = EmitScalarExpr(E->getArg(1));
14109 Value *Ldm = EmitScalarExpr(E->getArg(2));
14110 llvm::APSInt isColMajorArg;
14111 if (!E->getArg(3)->isIntegerConstantExpr(isColMajorArg, getContext()))
14113 bool isColMajor = isColMajorArg.getSExtValue();
14114 NVPTXMmaLdstInfo II = getNVPTXMmaLdstInfo(BuiltinID);
14115 unsigned IID = isColMajor ? II.IID_col : II.IID_row;
14120 Builder.CreateCall(CGM.getIntrinsic(IID, Src->getType()), {Src, Ldm});
14122 // Save returned values.
14123 assert(II.NumResults);
14124 if (II.NumResults == 1) {
14125 Builder.CreateAlignedStore(Result, Dst.getPointer(),
14126 CharUnits::fromQuantity(4));
14128 for (unsigned i = 0; i < II.NumResults; ++i) {
14129 Builder.CreateAlignedStore(
14130 Builder.CreateBitCast(Builder.CreateExtractValue(Result, i),
14131 Dst.getElementType()),
14132 Builder.CreateGEP(Dst.getPointer(),
14133 llvm::ConstantInt::get(IntTy, i)),
14134 CharUnits::fromQuantity(4));
14140 case NVPTX::BI__hmma_m16n16k16_st_c_f16:
14141 case NVPTX::BI__hmma_m16n16k16_st_c_f32:
14142 case NVPTX::BI__hmma_m32n8k16_st_c_f16:
14143 case NVPTX::BI__hmma_m32n8k16_st_c_f32:
14144 case NVPTX::BI__hmma_m8n32k16_st_c_f16:
14145 case NVPTX::BI__hmma_m8n32k16_st_c_f32:
14146 case NVPTX::BI__imma_m16n16k16_st_c_i32:
14147 case NVPTX::BI__imma_m32n8k16_st_c_i32:
14148 case NVPTX::BI__imma_m8n32k16_st_c_i32:
14149 case NVPTX::BI__imma_m8n8k32_st_c_i32:
14150 case NVPTX::BI__bmma_m8n8k128_st_c_i32: {
14151 Value *Dst = EmitScalarExpr(E->getArg(0));
14152 Address Src = EmitPointerWithAlignment(E->getArg(1));
14153 Value *Ldm = EmitScalarExpr(E->getArg(2));
14154 llvm::APSInt isColMajorArg;
14155 if (!E->getArg(3)->isIntegerConstantExpr(isColMajorArg, getContext()))
14157 bool isColMajor = isColMajorArg.getSExtValue();
14158 NVPTXMmaLdstInfo II = getNVPTXMmaLdstInfo(BuiltinID);
14159 unsigned IID = isColMajor ? II.IID_col : II.IID_row;
14162 Function *Intrinsic =
14163 CGM.getIntrinsic(IID, Dst->getType());
14164 llvm::Type *ParamType = Intrinsic->getFunctionType()->getParamType(1);
14165 SmallVector<Value *, 10> Values = {Dst};
14166 for (unsigned i = 0; i < II.NumResults; ++i) {
14167 Value *V = Builder.CreateAlignedLoad(
14168 Builder.CreateGEP(Src.getPointer(), llvm::ConstantInt::get(IntTy, i)),
14169 CharUnits::fromQuantity(4));
14170 Values.push_back(Builder.CreateBitCast(V, ParamType));
14172 Values.push_back(Ldm);
14173 Value *Result = Builder.CreateCall(Intrinsic, Values);
14177 // BI__hmma_m16n16k16_mma_<Dtype><CType>(d, a, b, c, layout, satf) -->
14178 // Intrinsic::nvvm_wmma_m16n16k16_mma_sync<layout A,B><DType><CType><Satf>
14179 case NVPTX::BI__hmma_m16n16k16_mma_f16f16:
14180 case NVPTX::BI__hmma_m16n16k16_mma_f32f16:
14181 case NVPTX::BI__hmma_m16n16k16_mma_f32f32:
14182 case NVPTX::BI__hmma_m16n16k16_mma_f16f32:
14183 case NVPTX::BI__hmma_m32n8k16_mma_f16f16:
14184 case NVPTX::BI__hmma_m32n8k16_mma_f32f16:
14185 case NVPTX::BI__hmma_m32n8k16_mma_f32f32:
14186 case NVPTX::BI__hmma_m32n8k16_mma_f16f32:
14187 case NVPTX::BI__hmma_m8n32k16_mma_f16f16:
14188 case NVPTX::BI__hmma_m8n32k16_mma_f32f16:
14189 case NVPTX::BI__hmma_m8n32k16_mma_f32f32:
14190 case NVPTX::BI__hmma_m8n32k16_mma_f16f32:
14191 case NVPTX::BI__imma_m16n16k16_mma_s8:
14192 case NVPTX::BI__imma_m16n16k16_mma_u8:
14193 case NVPTX::BI__imma_m32n8k16_mma_s8:
14194 case NVPTX::BI__imma_m32n8k16_mma_u8:
14195 case NVPTX::BI__imma_m8n32k16_mma_s8:
14196 case NVPTX::BI__imma_m8n32k16_mma_u8:
14197 case NVPTX::BI__imma_m8n8k32_mma_s4:
14198 case NVPTX::BI__imma_m8n8k32_mma_u4:
14199 case NVPTX::BI__bmma_m8n8k128_mma_xor_popc_b1: {
14200 Address Dst = EmitPointerWithAlignment(E->getArg(0));
14201 Address SrcA = EmitPointerWithAlignment(E->getArg(1));
14202 Address SrcB = EmitPointerWithAlignment(E->getArg(2));
14203 Address SrcC = EmitPointerWithAlignment(E->getArg(3));
14204 llvm::APSInt LayoutArg;
14205 if (!E->getArg(4)->isIntegerConstantExpr(LayoutArg, getContext()))
14207 int Layout = LayoutArg.getSExtValue();
14208 if (Layout < 0 || Layout > 3)
14210 llvm::APSInt SatfArg;
14211 if (BuiltinID == NVPTX::BI__bmma_m8n8k128_mma_xor_popc_b1)
14212 SatfArg = 0; // .b1 does not have satf argument.
14213 else if (!E->getArg(5)->isIntegerConstantExpr(SatfArg, getContext()))
14215 bool Satf = SatfArg.getSExtValue();
14216 NVPTXMmaInfo MI = getNVPTXMmaInfo(BuiltinID);
14217 unsigned IID = MI.getMMAIntrinsic(Layout, Satf);
14218 if (IID == 0) // Unsupported combination of Layout/Satf.
14221 SmallVector<Value *, 24> Values;
14222 Function *Intrinsic = CGM.getIntrinsic(IID);
14223 llvm::Type *AType = Intrinsic->getFunctionType()->getParamType(0);
14225 for (unsigned i = 0; i < MI.NumEltsA; ++i) {
14226 Value *V = Builder.CreateAlignedLoad(
14227 Builder.CreateGEP(SrcA.getPointer(),
14228 llvm::ConstantInt::get(IntTy, i)),
14229 CharUnits::fromQuantity(4));
14230 Values.push_back(Builder.CreateBitCast(V, AType));
14233 llvm::Type *BType = Intrinsic->getFunctionType()->getParamType(MI.NumEltsA);
14234 for (unsigned i = 0; i < MI.NumEltsB; ++i) {
14235 Value *V = Builder.CreateAlignedLoad(
14236 Builder.CreateGEP(SrcB.getPointer(),
14237 llvm::ConstantInt::get(IntTy, i)),
14238 CharUnits::fromQuantity(4));
14239 Values.push_back(Builder.CreateBitCast(V, BType));
14242 llvm::Type *CType =
14243 Intrinsic->getFunctionType()->getParamType(MI.NumEltsA + MI.NumEltsB);
14244 for (unsigned i = 0; i < MI.NumEltsC; ++i) {
14245 Value *V = Builder.CreateAlignedLoad(
14246 Builder.CreateGEP(SrcC.getPointer(),
14247 llvm::ConstantInt::get(IntTy, i)),
14248 CharUnits::fromQuantity(4));
14249 Values.push_back(Builder.CreateBitCast(V, CType));
14251 Value *Result = Builder.CreateCall(Intrinsic, Values);
14252 llvm::Type *DType = Dst.getElementType();
14253 for (unsigned i = 0; i < MI.NumEltsD; ++i)
14254 Builder.CreateAlignedStore(
14255 Builder.CreateBitCast(Builder.CreateExtractValue(Result, i), DType),
14256 Builder.CreateGEP(Dst.getPointer(), llvm::ConstantInt::get(IntTy, i)),
14257 CharUnits::fromQuantity(4));
14266 struct BuiltinAlignArgs {
14267 llvm::Value *Src = nullptr;
14268 llvm::Type *SrcType = nullptr;
14269 llvm::Value *Alignment = nullptr;
14270 llvm::Value *Mask = nullptr;
14271 llvm::IntegerType *IntType = nullptr;
14273 BuiltinAlignArgs(const CallExpr *E, CodeGenFunction &CGF) {
14274 QualType AstType = E->getArg(0)->getType();
14275 if (AstType->isArrayType())
14276 Src = CGF.EmitArrayToPointerDecay(E->getArg(0)).getPointer();
14278 Src = CGF.EmitScalarExpr(E->getArg(0));
14279 SrcType = Src->getType();
14280 if (SrcType->isPointerTy()) {
14281 IntType = IntegerType::get(
14282 CGF.getLLVMContext(),
14283 CGF.CGM.getDataLayout().getIndexTypeSizeInBits(SrcType));
14285 assert(SrcType->isIntegerTy());
14286 IntType = cast<llvm::IntegerType>(SrcType);
14288 Alignment = CGF.EmitScalarExpr(E->getArg(1));
14289 Alignment = CGF.Builder.CreateZExtOrTrunc(Alignment, IntType, "alignment");
14290 auto *One = llvm::ConstantInt::get(IntType, 1);
14291 Mask = CGF.Builder.CreateSub(Alignment, One, "mask");
14296 /// Generate (x & (y-1)) == 0.
14297 RValue CodeGenFunction::EmitBuiltinIsAligned(const CallExpr *E) {
14298 BuiltinAlignArgs Args(E, *this);
14299 llvm::Value *SrcAddress = Args.Src;
14300 if (Args.SrcType->isPointerTy())
14302 Builder.CreateBitOrPointerCast(Args.Src, Args.IntType, "src_addr");
14303 return RValue::get(Builder.CreateICmpEQ(
14304 Builder.CreateAnd(SrcAddress, Args.Mask, "set_bits"),
14305 llvm::Constant::getNullValue(Args.IntType), "is_aligned"));
14308 /// Generate (x & ~(y-1)) to align down or ((x+(y-1)) & ~(y-1)) to align up.
14309 /// Note: For pointer types we can avoid ptrtoint/inttoptr pairs by using the
14310 /// llvm.ptrmask instrinsic (with a GEP before in the align_up case).
14311 /// TODO: actually use ptrmask once most optimization passes know about it.
14312 RValue CodeGenFunction::EmitBuiltinAlignTo(const CallExpr *E, bool AlignUp) {
14313 BuiltinAlignArgs Args(E, *this);
14314 llvm::Value *SrcAddr = Args.Src;
14315 if (Args.Src->getType()->isPointerTy())
14316 SrcAddr = Builder.CreatePtrToInt(Args.Src, Args.IntType, "intptr");
14317 llvm::Value *SrcForMask = SrcAddr;
14319 // When aligning up we have to first add the mask to ensure we go over the
14320 // next alignment value and then align down to the next valid multiple.
14321 // By adding the mask, we ensure that align_up on an already aligned
14322 // value will not change the value.
14323 SrcForMask = Builder.CreateAdd(SrcForMask, Args.Mask, "over_boundary");
14325 // Invert the mask to only clear the lower bits.
14326 llvm::Value *InvertedMask = Builder.CreateNot(Args.Mask, "inverted_mask");
14327 llvm::Value *Result =
14328 Builder.CreateAnd(SrcForMask, InvertedMask, "aligned_result");
14329 if (Args.Src->getType()->isPointerTy()) {
14330 /// TODO: Use ptrmask instead of ptrtoint+gep once it is optimized well.
14331 // Result = Builder.CreateIntrinsic(
14332 // Intrinsic::ptrmask, {Args.SrcType, SrcForMask->getType(), Args.IntType},
14333 // {SrcForMask, NegatedMask}, nullptr, "aligned_result");
14334 Result->setName("aligned_intptr");
14335 llvm::Value *Difference = Builder.CreateSub(Result, SrcAddr, "diff");
14336 // The result must point to the same underlying allocation. This means we
14337 // can use an inbounds GEP to enable better optimization.
14338 Value *Base = EmitCastToVoidPtr(Args.Src);
14339 if (getLangOpts().isSignedOverflowDefined())
14340 Result = Builder.CreateGEP(Base, Difference, "aligned_result");
14342 Result = EmitCheckedInBoundsGEP(Base, Difference,
14343 /*SignedIndices=*/true,
14344 /*isSubtraction=*/!AlignUp,
14345 E->getExprLoc(), "aligned_result");
14346 Result = Builder.CreatePointerCast(Result, Args.SrcType);
14347 // Emit an alignment assumption to ensure that the new alignment is
14348 // propagated to loads/stores, etc.
14349 EmitAlignmentAssumption(Result, E, E->getExprLoc(), Args.Alignment);
14351 assert(Result->getType() == Args.SrcType);
14352 return RValue::get(Result);
14355 Value *CodeGenFunction::EmitWebAssemblyBuiltinExpr(unsigned BuiltinID,
14356 const CallExpr *E) {
14357 switch (BuiltinID) {
14358 case WebAssembly::BI__builtin_wasm_memory_size: {
14359 llvm::Type *ResultType = ConvertType(E->getType());
14360 Value *I = EmitScalarExpr(E->getArg(0));
14361 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_memory_size, ResultType);
14362 return Builder.CreateCall(Callee, I);
14364 case WebAssembly::BI__builtin_wasm_memory_grow: {
14365 llvm::Type *ResultType = ConvertType(E->getType());
14367 EmitScalarExpr(E->getArg(0)),
14368 EmitScalarExpr(E->getArg(1))
14370 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_memory_grow, ResultType);
14371 return Builder.CreateCall(Callee, Args);
14373 case WebAssembly::BI__builtin_wasm_memory_init: {
14374 llvm::APSInt SegConst;
14375 if (!E->getArg(0)->isIntegerConstantExpr(SegConst, getContext()))
14376 llvm_unreachable("Constant arg isn't actually constant?");
14377 llvm::APSInt MemConst;
14378 if (!E->getArg(1)->isIntegerConstantExpr(MemConst, getContext()))
14379 llvm_unreachable("Constant arg isn't actually constant?");
14380 if (!MemConst.isNullValue())
14381 ErrorUnsupported(E, "non-zero memory index");
14382 Value *Args[] = {llvm::ConstantInt::get(getLLVMContext(), SegConst),
14383 llvm::ConstantInt::get(getLLVMContext(), MemConst),
14384 EmitScalarExpr(E->getArg(2)), EmitScalarExpr(E->getArg(3)),
14385 EmitScalarExpr(E->getArg(4))};
14386 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_memory_init);
14387 return Builder.CreateCall(Callee, Args);
14389 case WebAssembly::BI__builtin_wasm_data_drop: {
14390 llvm::APSInt SegConst;
14391 if (!E->getArg(0)->isIntegerConstantExpr(SegConst, getContext()))
14392 llvm_unreachable("Constant arg isn't actually constant?");
14393 Value *Arg = llvm::ConstantInt::get(getLLVMContext(), SegConst);
14394 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_data_drop);
14395 return Builder.CreateCall(Callee, {Arg});
14397 case WebAssembly::BI__builtin_wasm_tls_size: {
14398 llvm::Type *ResultType = ConvertType(E->getType());
14399 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_tls_size, ResultType);
14400 return Builder.CreateCall(Callee);
14402 case WebAssembly::BI__builtin_wasm_tls_align: {
14403 llvm::Type *ResultType = ConvertType(E->getType());
14404 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_tls_align, ResultType);
14405 return Builder.CreateCall(Callee);
14407 case WebAssembly::BI__builtin_wasm_tls_base: {
14408 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_tls_base);
14409 return Builder.CreateCall(Callee);
14411 case WebAssembly::BI__builtin_wasm_throw: {
14412 Value *Tag = EmitScalarExpr(E->getArg(0));
14413 Value *Obj = EmitScalarExpr(E->getArg(1));
14414 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_throw);
14415 return Builder.CreateCall(Callee, {Tag, Obj});
14417 case WebAssembly::BI__builtin_wasm_rethrow_in_catch: {
14418 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_rethrow_in_catch);
14419 return Builder.CreateCall(Callee);
14421 case WebAssembly::BI__builtin_wasm_atomic_wait_i32: {
14422 Value *Addr = EmitScalarExpr(E->getArg(0));
14423 Value *Expected = EmitScalarExpr(E->getArg(1));
14424 Value *Timeout = EmitScalarExpr(E->getArg(2));
14425 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_atomic_wait_i32);
14426 return Builder.CreateCall(Callee, {Addr, Expected, Timeout});
14428 case WebAssembly::BI__builtin_wasm_atomic_wait_i64: {
14429 Value *Addr = EmitScalarExpr(E->getArg(0));
14430 Value *Expected = EmitScalarExpr(E->getArg(1));
14431 Value *Timeout = EmitScalarExpr(E->getArg(2));
14432 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_atomic_wait_i64);
14433 return Builder.CreateCall(Callee, {Addr, Expected, Timeout});
14435 case WebAssembly::BI__builtin_wasm_atomic_notify: {
14436 Value *Addr = EmitScalarExpr(E->getArg(0));
14437 Value *Count = EmitScalarExpr(E->getArg(1));
14438 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_atomic_notify);
14439 return Builder.CreateCall(Callee, {Addr, Count});
14441 case WebAssembly::BI__builtin_wasm_trunc_s_i32_f32:
14442 case WebAssembly::BI__builtin_wasm_trunc_s_i32_f64:
14443 case WebAssembly::BI__builtin_wasm_trunc_s_i64_f32:
14444 case WebAssembly::BI__builtin_wasm_trunc_s_i64_f64: {
14445 Value *Src = EmitScalarExpr(E->getArg(0));
14446 llvm::Type *ResT = ConvertType(E->getType());
14448 CGM.getIntrinsic(Intrinsic::wasm_trunc_signed, {ResT, Src->getType()});
14449 return Builder.CreateCall(Callee, {Src});
14451 case WebAssembly::BI__builtin_wasm_trunc_u_i32_f32:
14452 case WebAssembly::BI__builtin_wasm_trunc_u_i32_f64:
14453 case WebAssembly::BI__builtin_wasm_trunc_u_i64_f32:
14454 case WebAssembly::BI__builtin_wasm_trunc_u_i64_f64: {
14455 Value *Src = EmitScalarExpr(E->getArg(0));
14456 llvm::Type *ResT = ConvertType(E->getType());
14457 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_trunc_unsigned,
14458 {ResT, Src->getType()});
14459 return Builder.CreateCall(Callee, {Src});
14461 case WebAssembly::BI__builtin_wasm_trunc_saturate_s_i32_f32:
14462 case WebAssembly::BI__builtin_wasm_trunc_saturate_s_i32_f64:
14463 case WebAssembly::BI__builtin_wasm_trunc_saturate_s_i64_f32:
14464 case WebAssembly::BI__builtin_wasm_trunc_saturate_s_i64_f64:
14465 case WebAssembly::BI__builtin_wasm_trunc_saturate_s_i32x4_f32x4:
14466 case WebAssembly::BI__builtin_wasm_trunc_saturate_s_i64x2_f64x2: {
14467 Value *Src = EmitScalarExpr(E->getArg(0));
14468 llvm::Type *ResT = ConvertType(E->getType());
14469 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_trunc_saturate_signed,
14470 {ResT, Src->getType()});
14471 return Builder.CreateCall(Callee, {Src});
14473 case WebAssembly::BI__builtin_wasm_trunc_saturate_u_i32_f32:
14474 case WebAssembly::BI__builtin_wasm_trunc_saturate_u_i32_f64:
14475 case WebAssembly::BI__builtin_wasm_trunc_saturate_u_i64_f32:
14476 case WebAssembly::BI__builtin_wasm_trunc_saturate_u_i64_f64:
14477 case WebAssembly::BI__builtin_wasm_trunc_saturate_u_i32x4_f32x4:
14478 case WebAssembly::BI__builtin_wasm_trunc_saturate_u_i64x2_f64x2: {
14479 Value *Src = EmitScalarExpr(E->getArg(0));
14480 llvm::Type *ResT = ConvertType(E->getType());
14481 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_trunc_saturate_unsigned,
14482 {ResT, Src->getType()});
14483 return Builder.CreateCall(Callee, {Src});
14485 case WebAssembly::BI__builtin_wasm_min_f32:
14486 case WebAssembly::BI__builtin_wasm_min_f64:
14487 case WebAssembly::BI__builtin_wasm_min_f32x4:
14488 case WebAssembly::BI__builtin_wasm_min_f64x2: {
14489 Value *LHS = EmitScalarExpr(E->getArg(0));
14490 Value *RHS = EmitScalarExpr(E->getArg(1));
14491 Function *Callee = CGM.getIntrinsic(Intrinsic::minimum,
14492 ConvertType(E->getType()));
14493 return Builder.CreateCall(Callee, {LHS, RHS});
14495 case WebAssembly::BI__builtin_wasm_max_f32:
14496 case WebAssembly::BI__builtin_wasm_max_f64:
14497 case WebAssembly::BI__builtin_wasm_max_f32x4:
14498 case WebAssembly::BI__builtin_wasm_max_f64x2: {
14499 Value *LHS = EmitScalarExpr(E->getArg(0));
14500 Value *RHS = EmitScalarExpr(E->getArg(1));
14501 Function *Callee = CGM.getIntrinsic(Intrinsic::maximum,
14502 ConvertType(E->getType()));
14503 return Builder.CreateCall(Callee, {LHS, RHS});
14505 case WebAssembly::BI__builtin_wasm_swizzle_v8x16: {
14506 Value *Src = EmitScalarExpr(E->getArg(0));
14507 Value *Indices = EmitScalarExpr(E->getArg(1));
14508 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_swizzle);
14509 return Builder.CreateCall(Callee, {Src, Indices});
14511 case WebAssembly::BI__builtin_wasm_extract_lane_s_i8x16:
14512 case WebAssembly::BI__builtin_wasm_extract_lane_u_i8x16:
14513 case WebAssembly::BI__builtin_wasm_extract_lane_s_i16x8:
14514 case WebAssembly::BI__builtin_wasm_extract_lane_u_i16x8:
14515 case WebAssembly::BI__builtin_wasm_extract_lane_i32x4:
14516 case WebAssembly::BI__builtin_wasm_extract_lane_i64x2:
14517 case WebAssembly::BI__builtin_wasm_extract_lane_f32x4:
14518 case WebAssembly::BI__builtin_wasm_extract_lane_f64x2: {
14519 llvm::APSInt LaneConst;
14520 if (!E->getArg(1)->isIntegerConstantExpr(LaneConst, getContext()))
14521 llvm_unreachable("Constant arg isn't actually constant?");
14522 Value *Vec = EmitScalarExpr(E->getArg(0));
14523 Value *Lane = llvm::ConstantInt::get(getLLVMContext(), LaneConst);
14524 Value *Extract = Builder.CreateExtractElement(Vec, Lane);
14525 switch (BuiltinID) {
14526 case WebAssembly::BI__builtin_wasm_extract_lane_s_i8x16:
14527 case WebAssembly::BI__builtin_wasm_extract_lane_s_i16x8:
14528 return Builder.CreateSExt(Extract, ConvertType(E->getType()));
14529 case WebAssembly::BI__builtin_wasm_extract_lane_u_i8x16:
14530 case WebAssembly::BI__builtin_wasm_extract_lane_u_i16x8:
14531 return Builder.CreateZExt(Extract, ConvertType(E->getType()));
14532 case WebAssembly::BI__builtin_wasm_extract_lane_i32x4:
14533 case WebAssembly::BI__builtin_wasm_extract_lane_i64x2:
14534 case WebAssembly::BI__builtin_wasm_extract_lane_f32x4:
14535 case WebAssembly::BI__builtin_wasm_extract_lane_f64x2:
14538 llvm_unreachable("unexpected builtin ID");
14541 case WebAssembly::BI__builtin_wasm_replace_lane_i8x16:
14542 case WebAssembly::BI__builtin_wasm_replace_lane_i16x8:
14543 case WebAssembly::BI__builtin_wasm_replace_lane_i32x4:
14544 case WebAssembly::BI__builtin_wasm_replace_lane_i64x2:
14545 case WebAssembly::BI__builtin_wasm_replace_lane_f32x4:
14546 case WebAssembly::BI__builtin_wasm_replace_lane_f64x2: {
14547 llvm::APSInt LaneConst;
14548 if (!E->getArg(1)->isIntegerConstantExpr(LaneConst, getContext()))
14549 llvm_unreachable("Constant arg isn't actually constant?");
14550 Value *Vec = EmitScalarExpr(E->getArg(0));
14551 Value *Lane = llvm::ConstantInt::get(getLLVMContext(), LaneConst);
14552 Value *Val = EmitScalarExpr(E->getArg(2));
14553 switch (BuiltinID) {
14554 case WebAssembly::BI__builtin_wasm_replace_lane_i8x16:
14555 case WebAssembly::BI__builtin_wasm_replace_lane_i16x8: {
14556 llvm::Type *ElemType = ConvertType(E->getType())->getVectorElementType();
14557 Value *Trunc = Builder.CreateTrunc(Val, ElemType);
14558 return Builder.CreateInsertElement(Vec, Trunc, Lane);
14560 case WebAssembly::BI__builtin_wasm_replace_lane_i32x4:
14561 case WebAssembly::BI__builtin_wasm_replace_lane_i64x2:
14562 case WebAssembly::BI__builtin_wasm_replace_lane_f32x4:
14563 case WebAssembly::BI__builtin_wasm_replace_lane_f64x2:
14564 return Builder.CreateInsertElement(Vec, Val, Lane);
14566 llvm_unreachable("unexpected builtin ID");
14569 case WebAssembly::BI__builtin_wasm_add_saturate_s_i8x16:
14570 case WebAssembly::BI__builtin_wasm_add_saturate_u_i8x16:
14571 case WebAssembly::BI__builtin_wasm_add_saturate_s_i16x8:
14572 case WebAssembly::BI__builtin_wasm_add_saturate_u_i16x8:
14573 case WebAssembly::BI__builtin_wasm_sub_saturate_s_i8x16:
14574 case WebAssembly::BI__builtin_wasm_sub_saturate_u_i8x16:
14575 case WebAssembly::BI__builtin_wasm_sub_saturate_s_i16x8:
14576 case WebAssembly::BI__builtin_wasm_sub_saturate_u_i16x8: {
14578 switch (BuiltinID) {
14579 case WebAssembly::BI__builtin_wasm_add_saturate_s_i8x16:
14580 case WebAssembly::BI__builtin_wasm_add_saturate_s_i16x8:
14581 IntNo = Intrinsic::sadd_sat;
14583 case WebAssembly::BI__builtin_wasm_add_saturate_u_i8x16:
14584 case WebAssembly::BI__builtin_wasm_add_saturate_u_i16x8:
14585 IntNo = Intrinsic::uadd_sat;
14587 case WebAssembly::BI__builtin_wasm_sub_saturate_s_i8x16:
14588 case WebAssembly::BI__builtin_wasm_sub_saturate_s_i16x8:
14589 IntNo = Intrinsic::wasm_sub_saturate_signed;
14591 case WebAssembly::BI__builtin_wasm_sub_saturate_u_i8x16:
14592 case WebAssembly::BI__builtin_wasm_sub_saturate_u_i16x8:
14593 IntNo = Intrinsic::wasm_sub_saturate_unsigned;
14596 llvm_unreachable("unexpected builtin ID");
14598 Value *LHS = EmitScalarExpr(E->getArg(0));
14599 Value *RHS = EmitScalarExpr(E->getArg(1));
14600 Function *Callee = CGM.getIntrinsic(IntNo, ConvertType(E->getType()));
14601 return Builder.CreateCall(Callee, {LHS, RHS});
14603 case WebAssembly::BI__builtin_wasm_avgr_u_i8x16:
14604 case WebAssembly::BI__builtin_wasm_avgr_u_i16x8: {
14605 Value *LHS = EmitScalarExpr(E->getArg(0));
14606 Value *RHS = EmitScalarExpr(E->getArg(1));
14607 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_avgr_unsigned,
14608 ConvertType(E->getType()));
14609 return Builder.CreateCall(Callee, {LHS, RHS});
14611 case WebAssembly::BI__builtin_wasm_bitselect: {
14612 Value *V1 = EmitScalarExpr(E->getArg(0));
14613 Value *V2 = EmitScalarExpr(E->getArg(1));
14614 Value *C = EmitScalarExpr(E->getArg(2));
14615 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_bitselect,
14616 ConvertType(E->getType()));
14617 return Builder.CreateCall(Callee, {V1, V2, C});
14619 case WebAssembly::BI__builtin_wasm_dot_s_i32x4_i16x8: {
14620 Value *LHS = EmitScalarExpr(E->getArg(0));
14621 Value *RHS = EmitScalarExpr(E->getArg(1));
14622 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_dot);
14623 return Builder.CreateCall(Callee, {LHS, RHS});
14625 case WebAssembly::BI__builtin_wasm_any_true_i8x16:
14626 case WebAssembly::BI__builtin_wasm_any_true_i16x8:
14627 case WebAssembly::BI__builtin_wasm_any_true_i32x4:
14628 case WebAssembly::BI__builtin_wasm_any_true_i64x2:
14629 case WebAssembly::BI__builtin_wasm_all_true_i8x16:
14630 case WebAssembly::BI__builtin_wasm_all_true_i16x8:
14631 case WebAssembly::BI__builtin_wasm_all_true_i32x4:
14632 case WebAssembly::BI__builtin_wasm_all_true_i64x2: {
14634 switch (BuiltinID) {
14635 case WebAssembly::BI__builtin_wasm_any_true_i8x16:
14636 case WebAssembly::BI__builtin_wasm_any_true_i16x8:
14637 case WebAssembly::BI__builtin_wasm_any_true_i32x4:
14638 case WebAssembly::BI__builtin_wasm_any_true_i64x2:
14639 IntNo = Intrinsic::wasm_anytrue;
14641 case WebAssembly::BI__builtin_wasm_all_true_i8x16:
14642 case WebAssembly::BI__builtin_wasm_all_true_i16x8:
14643 case WebAssembly::BI__builtin_wasm_all_true_i32x4:
14644 case WebAssembly::BI__builtin_wasm_all_true_i64x2:
14645 IntNo = Intrinsic::wasm_alltrue;
14648 llvm_unreachable("unexpected builtin ID");
14650 Value *Vec = EmitScalarExpr(E->getArg(0));
14651 Function *Callee = CGM.getIntrinsic(IntNo, Vec->getType());
14652 return Builder.CreateCall(Callee, {Vec});
14654 case WebAssembly::BI__builtin_wasm_abs_f32x4:
14655 case WebAssembly::BI__builtin_wasm_abs_f64x2: {
14656 Value *Vec = EmitScalarExpr(E->getArg(0));
14657 Function *Callee = CGM.getIntrinsic(Intrinsic::fabs, Vec->getType());
14658 return Builder.CreateCall(Callee, {Vec});
14660 case WebAssembly::BI__builtin_wasm_sqrt_f32x4:
14661 case WebAssembly::BI__builtin_wasm_sqrt_f64x2: {
14662 Value *Vec = EmitScalarExpr(E->getArg(0));
14663 Function *Callee = CGM.getIntrinsic(Intrinsic::sqrt, Vec->getType());
14664 return Builder.CreateCall(Callee, {Vec});
14666 case WebAssembly::BI__builtin_wasm_qfma_f32x4:
14667 case WebAssembly::BI__builtin_wasm_qfms_f32x4:
14668 case WebAssembly::BI__builtin_wasm_qfma_f64x2:
14669 case WebAssembly::BI__builtin_wasm_qfms_f64x2: {
14670 Value *A = EmitScalarExpr(E->getArg(0));
14671 Value *B = EmitScalarExpr(E->getArg(1));
14672 Value *C = EmitScalarExpr(E->getArg(2));
14674 switch (BuiltinID) {
14675 case WebAssembly::BI__builtin_wasm_qfma_f32x4:
14676 case WebAssembly::BI__builtin_wasm_qfma_f64x2:
14677 IntNo = Intrinsic::wasm_qfma;
14679 case WebAssembly::BI__builtin_wasm_qfms_f32x4:
14680 case WebAssembly::BI__builtin_wasm_qfms_f64x2:
14681 IntNo = Intrinsic::wasm_qfms;
14684 llvm_unreachable("unexpected builtin ID");
14686 Function *Callee = CGM.getIntrinsic(IntNo, A->getType());
14687 return Builder.CreateCall(Callee, {A, B, C});
14689 case WebAssembly::BI__builtin_wasm_narrow_s_i8x16_i16x8:
14690 case WebAssembly::BI__builtin_wasm_narrow_u_i8x16_i16x8:
14691 case WebAssembly::BI__builtin_wasm_narrow_s_i16x8_i32x4:
14692 case WebAssembly::BI__builtin_wasm_narrow_u_i16x8_i32x4: {
14693 Value *Low = EmitScalarExpr(E->getArg(0));
14694 Value *High = EmitScalarExpr(E->getArg(1));
14696 switch (BuiltinID) {
14697 case WebAssembly::BI__builtin_wasm_narrow_s_i8x16_i16x8:
14698 case WebAssembly::BI__builtin_wasm_narrow_s_i16x8_i32x4:
14699 IntNo = Intrinsic::wasm_narrow_signed;
14701 case WebAssembly::BI__builtin_wasm_narrow_u_i8x16_i16x8:
14702 case WebAssembly::BI__builtin_wasm_narrow_u_i16x8_i32x4:
14703 IntNo = Intrinsic::wasm_narrow_unsigned;
14706 llvm_unreachable("unexpected builtin ID");
14709 CGM.getIntrinsic(IntNo, {ConvertType(E->getType()), Low->getType()});
14710 return Builder.CreateCall(Callee, {Low, High});
14712 case WebAssembly::BI__builtin_wasm_widen_low_s_i16x8_i8x16:
14713 case WebAssembly::BI__builtin_wasm_widen_high_s_i16x8_i8x16:
14714 case WebAssembly::BI__builtin_wasm_widen_low_u_i16x8_i8x16:
14715 case WebAssembly::BI__builtin_wasm_widen_high_u_i16x8_i8x16:
14716 case WebAssembly::BI__builtin_wasm_widen_low_s_i32x4_i16x8:
14717 case WebAssembly::BI__builtin_wasm_widen_high_s_i32x4_i16x8:
14718 case WebAssembly::BI__builtin_wasm_widen_low_u_i32x4_i16x8:
14719 case WebAssembly::BI__builtin_wasm_widen_high_u_i32x4_i16x8: {
14720 Value *Vec = EmitScalarExpr(E->getArg(0));
14722 switch (BuiltinID) {
14723 case WebAssembly::BI__builtin_wasm_widen_low_s_i16x8_i8x16:
14724 case WebAssembly::BI__builtin_wasm_widen_low_s_i32x4_i16x8:
14725 IntNo = Intrinsic::wasm_widen_low_signed;
14727 case WebAssembly::BI__builtin_wasm_widen_high_s_i16x8_i8x16:
14728 case WebAssembly::BI__builtin_wasm_widen_high_s_i32x4_i16x8:
14729 IntNo = Intrinsic::wasm_widen_high_signed;
14731 case WebAssembly::BI__builtin_wasm_widen_low_u_i16x8_i8x16:
14732 case WebAssembly::BI__builtin_wasm_widen_low_u_i32x4_i16x8:
14733 IntNo = Intrinsic::wasm_widen_low_unsigned;
14735 case WebAssembly::BI__builtin_wasm_widen_high_u_i16x8_i8x16:
14736 case WebAssembly::BI__builtin_wasm_widen_high_u_i32x4_i16x8:
14737 IntNo = Intrinsic::wasm_widen_high_unsigned;
14740 llvm_unreachable("unexpected builtin ID");
14743 CGM.getIntrinsic(IntNo, {ConvertType(E->getType()), Vec->getType()});
14744 return Builder.CreateCall(Callee, Vec);
14751 Value *CodeGenFunction::EmitHexagonBuiltinExpr(unsigned BuiltinID,
14752 const CallExpr *E) {
14753 SmallVector<llvm::Value *, 4> Ops;
14754 Intrinsic::ID ID = Intrinsic::not_intrinsic;
14756 auto MakeCircLd = [&](unsigned IntID, bool HasImm) {
14757 // The base pointer is passed by address, so it needs to be loaded.
14758 Address BP = EmitPointerWithAlignment(E->getArg(0));
14759 BP = Address(Builder.CreateBitCast(BP.getPointer(), Int8PtrPtrTy),
14760 BP.getAlignment());
14761 llvm::Value *Base = Builder.CreateLoad(BP);
14762 // Operands are Base, Increment, Modifier, Start.
14764 Ops = { Base, EmitScalarExpr(E->getArg(1)), EmitScalarExpr(E->getArg(2)),
14765 EmitScalarExpr(E->getArg(3)) };
14767 Ops = { Base, EmitScalarExpr(E->getArg(1)),
14768 EmitScalarExpr(E->getArg(2)) };
14770 llvm::Value *Result = Builder.CreateCall(CGM.getIntrinsic(IntID), Ops);
14771 llvm::Value *NewBase = Builder.CreateExtractValue(Result, 1);
14772 llvm::Value *LV = Builder.CreateBitCast(EmitScalarExpr(E->getArg(0)),
14773 NewBase->getType()->getPointerTo());
14774 Address Dest = EmitPointerWithAlignment(E->getArg(0));
14775 // The intrinsic generates two results. The new value for the base pointer
14776 // needs to be stored.
14777 Builder.CreateAlignedStore(NewBase, LV, Dest.getAlignment());
14778 return Builder.CreateExtractValue(Result, 0);
14781 auto MakeCircSt = [&](unsigned IntID, bool HasImm) {
14782 // The base pointer is passed by address, so it needs to be loaded.
14783 Address BP = EmitPointerWithAlignment(E->getArg(0));
14784 BP = Address(Builder.CreateBitCast(BP.getPointer(), Int8PtrPtrTy),
14785 BP.getAlignment());
14786 llvm::Value *Base = Builder.CreateLoad(BP);
14787 // Operands are Base, Increment, Modifier, Value, Start.
14789 Ops = { Base, EmitScalarExpr(E->getArg(1)), EmitScalarExpr(E->getArg(2)),
14790 EmitScalarExpr(E->getArg(3)), EmitScalarExpr(E->getArg(4)) };
14792 Ops = { Base, EmitScalarExpr(E->getArg(1)),
14793 EmitScalarExpr(E->getArg(2)), EmitScalarExpr(E->getArg(3)) };
14795 llvm::Value *NewBase = Builder.CreateCall(CGM.getIntrinsic(IntID), Ops);
14796 llvm::Value *LV = Builder.CreateBitCast(EmitScalarExpr(E->getArg(0)),
14797 NewBase->getType()->getPointerTo());
14798 Address Dest = EmitPointerWithAlignment(E->getArg(0));
14799 // The intrinsic generates one result, which is the new value for the base
14800 // pointer. It needs to be stored.
14801 return Builder.CreateAlignedStore(NewBase, LV, Dest.getAlignment());
14804 // Handle the conversion of bit-reverse load intrinsics to bit code.
14805 // The intrinsic call after this function only reads from memory and the
14806 // write to memory is dealt by the store instruction.
14807 auto MakeBrevLd = [&](unsigned IntID, llvm::Type *DestTy) {
14808 // The intrinsic generates one result, which is the new value for the base
14809 // pointer. It needs to be returned. The result of the load instruction is
14810 // passed to intrinsic by address, so the value needs to be stored.
14811 llvm::Value *BaseAddress =
14812 Builder.CreateBitCast(EmitScalarExpr(E->getArg(0)), Int8PtrTy);
14814 // Expressions like &(*pt++) will be incremented per evaluation.
14815 // EmitPointerWithAlignment and EmitScalarExpr evaluates the expression
14817 Address DestAddr = EmitPointerWithAlignment(E->getArg(1));
14818 DestAddr = Address(Builder.CreateBitCast(DestAddr.getPointer(), Int8PtrTy),
14819 DestAddr.getAlignment());
14820 llvm::Value *DestAddress = DestAddr.getPointer();
14822 // Operands are Base, Dest, Modifier.
14823 // The intrinsic format in LLVM IR is defined as
14824 // { ValueType, i8* } (i8*, i32).
14825 Ops = {BaseAddress, EmitScalarExpr(E->getArg(2))};
14827 llvm::Value *Result = Builder.CreateCall(CGM.getIntrinsic(IntID), Ops);
14828 // The value needs to be stored as the variable is passed by reference.
14829 llvm::Value *DestVal = Builder.CreateExtractValue(Result, 0);
14831 // The store needs to be truncated to fit the destination type.
14832 // While i32 and i64 are natively supported on Hexagon, i8 and i16 needs
14833 // to be handled with stores of respective destination type.
14834 DestVal = Builder.CreateTrunc(DestVal, DestTy);
14836 llvm::Value *DestForStore =
14837 Builder.CreateBitCast(DestAddress, DestVal->getType()->getPointerTo());
14838 Builder.CreateAlignedStore(DestVal, DestForStore, DestAddr.getAlignment());
14839 // The updated value of the base pointer is returned.
14840 return Builder.CreateExtractValue(Result, 1);
14843 switch (BuiltinID) {
14844 case Hexagon::BI__builtin_HEXAGON_V6_vaddcarry:
14845 case Hexagon::BI__builtin_HEXAGON_V6_vaddcarry_128B: {
14846 Address Dest = EmitPointerWithAlignment(E->getArg(2));
14848 if (BuiltinID == Hexagon::BI__builtin_HEXAGON_V6_vaddcarry) {
14850 ID = Intrinsic::hexagon_V6_vaddcarry;
14853 ID = Intrinsic::hexagon_V6_vaddcarry_128B;
14855 Dest = Builder.CreateBitCast(Dest,
14856 llvm::VectorType::get(Builder.getInt1Ty(), Size)->getPointerTo(0));
14857 LoadInst *QLd = Builder.CreateLoad(Dest);
14858 Ops = { EmitScalarExpr(E->getArg(0)), EmitScalarExpr(E->getArg(1)), QLd };
14859 llvm::Value *Result = Builder.CreateCall(CGM.getIntrinsic(ID), Ops);
14860 llvm::Value *Vprd = Builder.CreateExtractValue(Result, 1);
14861 llvm::Value *Base = Builder.CreateBitCast(EmitScalarExpr(E->getArg(2)),
14862 Vprd->getType()->getPointerTo(0));
14863 Builder.CreateAlignedStore(Vprd, Base, Dest.getAlignment());
14864 return Builder.CreateExtractValue(Result, 0);
14866 case Hexagon::BI__builtin_HEXAGON_V6_vsubcarry:
14867 case Hexagon::BI__builtin_HEXAGON_V6_vsubcarry_128B: {
14868 Address Dest = EmitPointerWithAlignment(E->getArg(2));
14870 if (BuiltinID == Hexagon::BI__builtin_HEXAGON_V6_vsubcarry) {
14872 ID = Intrinsic::hexagon_V6_vsubcarry;
14875 ID = Intrinsic::hexagon_V6_vsubcarry_128B;
14877 Dest = Builder.CreateBitCast(Dest,
14878 llvm::VectorType::get(Builder.getInt1Ty(), Size)->getPointerTo(0));
14879 LoadInst *QLd = Builder.CreateLoad(Dest);
14880 Ops = { EmitScalarExpr(E->getArg(0)), EmitScalarExpr(E->getArg(1)), QLd };
14881 llvm::Value *Result = Builder.CreateCall(CGM.getIntrinsic(ID), Ops);
14882 llvm::Value *Vprd = Builder.CreateExtractValue(Result, 1);
14883 llvm::Value *Base = Builder.CreateBitCast(EmitScalarExpr(E->getArg(2)),
14884 Vprd->getType()->getPointerTo(0));
14885 Builder.CreateAlignedStore(Vprd, Base, Dest.getAlignment());
14886 return Builder.CreateExtractValue(Result, 0);
14888 case Hexagon::BI__builtin_HEXAGON_L2_loadrub_pci:
14889 return MakeCircLd(Intrinsic::hexagon_L2_loadrub_pci, /*HasImm*/true);
14890 case Hexagon::BI__builtin_HEXAGON_L2_loadrb_pci:
14891 return MakeCircLd(Intrinsic::hexagon_L2_loadrb_pci, /*HasImm*/true);
14892 case Hexagon::BI__builtin_HEXAGON_L2_loadruh_pci:
14893 return MakeCircLd(Intrinsic::hexagon_L2_loadruh_pci, /*HasImm*/true);
14894 case Hexagon::BI__builtin_HEXAGON_L2_loadrh_pci:
14895 return MakeCircLd(Intrinsic::hexagon_L2_loadrh_pci, /*HasImm*/true);
14896 case Hexagon::BI__builtin_HEXAGON_L2_loadri_pci:
14897 return MakeCircLd(Intrinsic::hexagon_L2_loadri_pci, /*HasImm*/true);
14898 case Hexagon::BI__builtin_HEXAGON_L2_loadrd_pci:
14899 return MakeCircLd(Intrinsic::hexagon_L2_loadrd_pci, /*HasImm*/true);
14900 case Hexagon::BI__builtin_HEXAGON_L2_loadrub_pcr:
14901 return MakeCircLd(Intrinsic::hexagon_L2_loadrub_pcr, /*HasImm*/false);
14902 case Hexagon::BI__builtin_HEXAGON_L2_loadrb_pcr:
14903 return MakeCircLd(Intrinsic::hexagon_L2_loadrb_pcr, /*HasImm*/false);
14904 case Hexagon::BI__builtin_HEXAGON_L2_loadruh_pcr:
14905 return MakeCircLd(Intrinsic::hexagon_L2_loadruh_pcr, /*HasImm*/false);
14906 case Hexagon::BI__builtin_HEXAGON_L2_loadrh_pcr:
14907 return MakeCircLd(Intrinsic::hexagon_L2_loadrh_pcr, /*HasImm*/false);
14908 case Hexagon::BI__builtin_HEXAGON_L2_loadri_pcr:
14909 return MakeCircLd(Intrinsic::hexagon_L2_loadri_pcr, /*HasImm*/false);
14910 case Hexagon::BI__builtin_HEXAGON_L2_loadrd_pcr:
14911 return MakeCircLd(Intrinsic::hexagon_L2_loadrd_pcr, /*HasImm*/false);
14912 case Hexagon::BI__builtin_HEXAGON_S2_storerb_pci:
14913 return MakeCircSt(Intrinsic::hexagon_S2_storerb_pci, /*HasImm*/true);
14914 case Hexagon::BI__builtin_HEXAGON_S2_storerh_pci:
14915 return MakeCircSt(Intrinsic::hexagon_S2_storerh_pci, /*HasImm*/true);
14916 case Hexagon::BI__builtin_HEXAGON_S2_storerf_pci:
14917 return MakeCircSt(Intrinsic::hexagon_S2_storerf_pci, /*HasImm*/true);
14918 case Hexagon::BI__builtin_HEXAGON_S2_storeri_pci:
14919 return MakeCircSt(Intrinsic::hexagon_S2_storeri_pci, /*HasImm*/true);
14920 case Hexagon::BI__builtin_HEXAGON_S2_storerd_pci:
14921 return MakeCircSt(Intrinsic::hexagon_S2_storerd_pci, /*HasImm*/true);
14922 case Hexagon::BI__builtin_HEXAGON_S2_storerb_pcr:
14923 return MakeCircSt(Intrinsic::hexagon_S2_storerb_pcr, /*HasImm*/false);
14924 case Hexagon::BI__builtin_HEXAGON_S2_storerh_pcr:
14925 return MakeCircSt(Intrinsic::hexagon_S2_storerh_pcr, /*HasImm*/false);
14926 case Hexagon::BI__builtin_HEXAGON_S2_storerf_pcr:
14927 return MakeCircSt(Intrinsic::hexagon_S2_storerf_pcr, /*HasImm*/false);
14928 case Hexagon::BI__builtin_HEXAGON_S2_storeri_pcr:
14929 return MakeCircSt(Intrinsic::hexagon_S2_storeri_pcr, /*HasImm*/false);
14930 case Hexagon::BI__builtin_HEXAGON_S2_storerd_pcr:
14931 return MakeCircSt(Intrinsic::hexagon_S2_storerd_pcr, /*HasImm*/false);
14932 case Hexagon::BI__builtin_brev_ldub:
14933 return MakeBrevLd(Intrinsic::hexagon_L2_loadrub_pbr, Int8Ty);
14934 case Hexagon::BI__builtin_brev_ldb:
14935 return MakeBrevLd(Intrinsic::hexagon_L2_loadrb_pbr, Int8Ty);
14936 case Hexagon::BI__builtin_brev_lduh:
14937 return MakeBrevLd(Intrinsic::hexagon_L2_loadruh_pbr, Int16Ty);
14938 case Hexagon::BI__builtin_brev_ldh:
14939 return MakeBrevLd(Intrinsic::hexagon_L2_loadrh_pbr, Int16Ty);
14940 case Hexagon::BI__builtin_brev_ldw:
14941 return MakeBrevLd(Intrinsic::hexagon_L2_loadri_pbr, Int32Ty);
14942 case Hexagon::BI__builtin_brev_ldd:
14943 return MakeBrevLd(Intrinsic::hexagon_L2_loadrd_pbr, Int64Ty);