1 //===---- CGBuiltin.cpp - Emit LLVM Code for builtins ---------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This contains code to emit Builtin calls as LLVM code.
11 //===----------------------------------------------------------------------===//
14 #include "CGObjCRuntime.h"
15 #include "CGOpenCLRuntime.h"
16 #include "CGRecordLayout.h"
17 #include "CodeGenFunction.h"
18 #include "CodeGenModule.h"
19 #include "ConstantEmitter.h"
20 #include "PatternInit.h"
21 #include "TargetInfo.h"
22 #include "clang/AST/ASTContext.h"
23 #include "clang/AST/Decl.h"
24 #include "clang/AST/OSLog.h"
25 #include "clang/Basic/TargetBuiltins.h"
26 #include "clang/Basic/TargetInfo.h"
27 #include "clang/CodeGen/CGFunctionInfo.h"
28 #include "llvm/ADT/SmallPtrSet.h"
29 #include "llvm/ADT/StringExtras.h"
30 #include "llvm/IR/DataLayout.h"
31 #include "llvm/IR/InlineAsm.h"
32 #include "llvm/IR/Intrinsics.h"
33 #include "llvm/IR/MDBuilder.h"
34 #include "llvm/Support/ConvertUTF.h"
35 #include "llvm/Support/ScopedPrinter.h"
36 #include "llvm/Support/TargetParser.h"
39 using namespace clang;
40 using namespace CodeGen;
44 int64_t clamp(int64_t Value, int64_t Low, int64_t High) {
45 return std::min(High, std::max(Low, Value));
48 static void initializeAlloca(CodeGenFunction &CGF, AllocaInst *AI, Value *Size, unsigned AlignmentInBytes) {
50 switch (CGF.getLangOpts().getTrivialAutoVarInit()) {
51 case LangOptions::TrivialAutoVarInitKind::Uninitialized:
52 // Nothing to initialize.
54 case LangOptions::TrivialAutoVarInitKind::Zero:
55 Byte = CGF.Builder.getInt8(0x00);
57 case LangOptions::TrivialAutoVarInitKind::Pattern: {
58 llvm::Type *Int8 = llvm::IntegerType::getInt8Ty(CGF.CGM.getLLVMContext());
59 Byte = llvm::dyn_cast<llvm::ConstantInt>(
60 initializationPatternFor(CGF.CGM, Int8));
64 CGF.Builder.CreateMemSet(AI, Byte, Size, AlignmentInBytes);
67 /// getBuiltinLibFunction - Given a builtin id for a function like
68 /// "__builtin_fabsf", return a Function* for "fabsf".
69 llvm::Constant *CodeGenModule::getBuiltinLibFunction(const FunctionDecl *FD,
71 assert(Context.BuiltinInfo.isLibFunction(BuiltinID));
73 // Get the name, skip over the __builtin_ prefix (if necessary).
77 // If the builtin has been declared explicitly with an assembler label,
78 // use the mangled name. This differs from the plain label on platforms
79 // that prefix labels.
80 if (FD->hasAttr<AsmLabelAttr>())
81 Name = getMangledName(D);
83 Name = Context.BuiltinInfo.getName(BuiltinID) + 10;
85 llvm::FunctionType *Ty =
86 cast<llvm::FunctionType>(getTypes().ConvertType(FD->getType()));
88 return GetOrCreateLLVMFunction(Name, Ty, D, /*ForVTable=*/false);
91 /// Emit the conversions required to turn the given value into an
92 /// integer of the given size.
93 static Value *EmitToInt(CodeGenFunction &CGF, llvm::Value *V,
94 QualType T, llvm::IntegerType *IntType) {
95 V = CGF.EmitToMemory(V, T);
97 if (V->getType()->isPointerTy())
98 return CGF.Builder.CreatePtrToInt(V, IntType);
100 assert(V->getType() == IntType);
104 static Value *EmitFromInt(CodeGenFunction &CGF, llvm::Value *V,
105 QualType T, llvm::Type *ResultType) {
106 V = CGF.EmitFromMemory(V, T);
108 if (ResultType->isPointerTy())
109 return CGF.Builder.CreateIntToPtr(V, ResultType);
111 assert(V->getType() == ResultType);
115 /// Utility to insert an atomic instruction based on Intrinsic::ID
116 /// and the expression node.
117 static Value *MakeBinaryAtomicValue(
118 CodeGenFunction &CGF, llvm::AtomicRMWInst::BinOp Kind, const CallExpr *E,
119 AtomicOrdering Ordering = AtomicOrdering::SequentiallyConsistent) {
120 QualType T = E->getType();
121 assert(E->getArg(0)->getType()->isPointerType());
122 assert(CGF.getContext().hasSameUnqualifiedType(T,
123 E->getArg(0)->getType()->getPointeeType()));
124 assert(CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->getType()));
126 llvm::Value *DestPtr = CGF.EmitScalarExpr(E->getArg(0));
127 unsigned AddrSpace = DestPtr->getType()->getPointerAddressSpace();
129 llvm::IntegerType *IntType =
130 llvm::IntegerType::get(CGF.getLLVMContext(),
131 CGF.getContext().getTypeSize(T));
132 llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace);
134 llvm::Value *Args[2];
135 Args[0] = CGF.Builder.CreateBitCast(DestPtr, IntPtrType);
136 Args[1] = CGF.EmitScalarExpr(E->getArg(1));
137 llvm::Type *ValueType = Args[1]->getType();
138 Args[1] = EmitToInt(CGF, Args[1], T, IntType);
140 llvm::Value *Result = CGF.Builder.CreateAtomicRMW(
141 Kind, Args[0], Args[1], Ordering);
142 return EmitFromInt(CGF, Result, T, ValueType);
145 static Value *EmitNontemporalStore(CodeGenFunction &CGF, const CallExpr *E) {
146 Value *Val = CGF.EmitScalarExpr(E->getArg(0));
147 Value *Address = CGF.EmitScalarExpr(E->getArg(1));
149 // Convert the type of the pointer to a pointer to the stored type.
150 Val = CGF.EmitToMemory(Val, E->getArg(0)->getType());
151 Value *BC = CGF.Builder.CreateBitCast(
152 Address, llvm::PointerType::getUnqual(Val->getType()), "cast");
153 LValue LV = CGF.MakeNaturalAlignAddrLValue(BC, E->getArg(0)->getType());
154 LV.setNontemporal(true);
155 CGF.EmitStoreOfScalar(Val, LV, false);
159 static Value *EmitNontemporalLoad(CodeGenFunction &CGF, const CallExpr *E) {
160 Value *Address = CGF.EmitScalarExpr(E->getArg(0));
162 LValue LV = CGF.MakeNaturalAlignAddrLValue(Address, E->getType());
163 LV.setNontemporal(true);
164 return CGF.EmitLoadOfScalar(LV, E->getExprLoc());
167 static RValue EmitBinaryAtomic(CodeGenFunction &CGF,
168 llvm::AtomicRMWInst::BinOp Kind,
170 return RValue::get(MakeBinaryAtomicValue(CGF, Kind, E));
173 /// Utility to insert an atomic instruction based Intrinsic::ID and
174 /// the expression node, where the return value is the result of the
176 static RValue EmitBinaryAtomicPost(CodeGenFunction &CGF,
177 llvm::AtomicRMWInst::BinOp Kind,
179 Instruction::BinaryOps Op,
180 bool Invert = false) {
181 QualType T = E->getType();
182 assert(E->getArg(0)->getType()->isPointerType());
183 assert(CGF.getContext().hasSameUnqualifiedType(T,
184 E->getArg(0)->getType()->getPointeeType()));
185 assert(CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->getType()));
187 llvm::Value *DestPtr = CGF.EmitScalarExpr(E->getArg(0));
188 unsigned AddrSpace = DestPtr->getType()->getPointerAddressSpace();
190 llvm::IntegerType *IntType =
191 llvm::IntegerType::get(CGF.getLLVMContext(),
192 CGF.getContext().getTypeSize(T));
193 llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace);
195 llvm::Value *Args[2];
196 Args[1] = CGF.EmitScalarExpr(E->getArg(1));
197 llvm::Type *ValueType = Args[1]->getType();
198 Args[1] = EmitToInt(CGF, Args[1], T, IntType);
199 Args[0] = CGF.Builder.CreateBitCast(DestPtr, IntPtrType);
201 llvm::Value *Result = CGF.Builder.CreateAtomicRMW(
202 Kind, Args[0], Args[1], llvm::AtomicOrdering::SequentiallyConsistent);
203 Result = CGF.Builder.CreateBinOp(Op, Result, Args[1]);
205 Result = CGF.Builder.CreateBinOp(llvm::Instruction::Xor, Result,
206 llvm::ConstantInt::get(IntType, -1));
207 Result = EmitFromInt(CGF, Result, T, ValueType);
208 return RValue::get(Result);
211 /// Utility to insert an atomic cmpxchg instruction.
213 /// @param CGF The current codegen function.
214 /// @param E Builtin call expression to convert to cmpxchg.
215 /// arg0 - address to operate on
216 /// arg1 - value to compare with
218 /// @param ReturnBool Specifies whether to return success flag of
219 /// cmpxchg result or the old value.
221 /// @returns result of cmpxchg, according to ReturnBool
223 /// Note: In order to lower Microsoft's _InterlockedCompareExchange* intrinsics
224 /// invoke the function EmitAtomicCmpXchgForMSIntrin.
225 static Value *MakeAtomicCmpXchgValue(CodeGenFunction &CGF, const CallExpr *E,
227 QualType T = ReturnBool ? E->getArg(1)->getType() : E->getType();
228 llvm::Value *DestPtr = CGF.EmitScalarExpr(E->getArg(0));
229 unsigned AddrSpace = DestPtr->getType()->getPointerAddressSpace();
231 llvm::IntegerType *IntType = llvm::IntegerType::get(
232 CGF.getLLVMContext(), CGF.getContext().getTypeSize(T));
233 llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace);
236 Args[0] = CGF.Builder.CreateBitCast(DestPtr, IntPtrType);
237 Args[1] = CGF.EmitScalarExpr(E->getArg(1));
238 llvm::Type *ValueType = Args[1]->getType();
239 Args[1] = EmitToInt(CGF, Args[1], T, IntType);
240 Args[2] = EmitToInt(CGF, CGF.EmitScalarExpr(E->getArg(2)), T, IntType);
242 Value *Pair = CGF.Builder.CreateAtomicCmpXchg(
243 Args[0], Args[1], Args[2], llvm::AtomicOrdering::SequentiallyConsistent,
244 llvm::AtomicOrdering::SequentiallyConsistent);
246 // Extract boolean success flag and zext it to int.
247 return CGF.Builder.CreateZExt(CGF.Builder.CreateExtractValue(Pair, 1),
248 CGF.ConvertType(E->getType()));
250 // Extract old value and emit it using the same type as compare value.
251 return EmitFromInt(CGF, CGF.Builder.CreateExtractValue(Pair, 0), T,
255 /// This function should be invoked to emit atomic cmpxchg for Microsoft's
256 /// _InterlockedCompareExchange* intrinsics which have the following signature:
257 /// T _InterlockedCompareExchange(T volatile *Destination,
261 /// Whereas the llvm 'cmpxchg' instruction has the following syntax:
262 /// cmpxchg *Destination, Comparand, Exchange.
263 /// So we need to swap Comparand and Exchange when invoking
264 /// CreateAtomicCmpXchg. That is the reason we could not use the above utility
265 /// function MakeAtomicCmpXchgValue since it expects the arguments to be
269 Value *EmitAtomicCmpXchgForMSIntrin(CodeGenFunction &CGF, const CallExpr *E,
270 AtomicOrdering SuccessOrdering = AtomicOrdering::SequentiallyConsistent) {
271 assert(E->getArg(0)->getType()->isPointerType());
272 assert(CGF.getContext().hasSameUnqualifiedType(
273 E->getType(), E->getArg(0)->getType()->getPointeeType()));
274 assert(CGF.getContext().hasSameUnqualifiedType(E->getType(),
275 E->getArg(1)->getType()));
276 assert(CGF.getContext().hasSameUnqualifiedType(E->getType(),
277 E->getArg(2)->getType()));
279 auto *Destination = CGF.EmitScalarExpr(E->getArg(0));
280 auto *Comparand = CGF.EmitScalarExpr(E->getArg(2));
281 auto *Exchange = CGF.EmitScalarExpr(E->getArg(1));
283 // For Release ordering, the failure ordering should be Monotonic.
284 auto FailureOrdering = SuccessOrdering == AtomicOrdering::Release ?
285 AtomicOrdering::Monotonic :
288 auto *Result = CGF.Builder.CreateAtomicCmpXchg(
289 Destination, Comparand, Exchange,
290 SuccessOrdering, FailureOrdering);
291 Result->setVolatile(true);
292 return CGF.Builder.CreateExtractValue(Result, 0);
295 static Value *EmitAtomicIncrementValue(CodeGenFunction &CGF, const CallExpr *E,
296 AtomicOrdering Ordering = AtomicOrdering::SequentiallyConsistent) {
297 assert(E->getArg(0)->getType()->isPointerType());
299 auto *IntTy = CGF.ConvertType(E->getType());
300 auto *Result = CGF.Builder.CreateAtomicRMW(
302 CGF.EmitScalarExpr(E->getArg(0)),
303 ConstantInt::get(IntTy, 1),
305 return CGF.Builder.CreateAdd(Result, ConstantInt::get(IntTy, 1));
308 static Value *EmitAtomicDecrementValue(CodeGenFunction &CGF, const CallExpr *E,
309 AtomicOrdering Ordering = AtomicOrdering::SequentiallyConsistent) {
310 assert(E->getArg(0)->getType()->isPointerType());
312 auto *IntTy = CGF.ConvertType(E->getType());
313 auto *Result = CGF.Builder.CreateAtomicRMW(
315 CGF.EmitScalarExpr(E->getArg(0)),
316 ConstantInt::get(IntTy, 1),
318 return CGF.Builder.CreateSub(Result, ConstantInt::get(IntTy, 1));
321 // Build a plain volatile load.
322 static Value *EmitISOVolatileLoad(CodeGenFunction &CGF, const CallExpr *E) {
323 Value *Ptr = CGF.EmitScalarExpr(E->getArg(0));
324 QualType ElTy = E->getArg(0)->getType()->getPointeeType();
325 CharUnits LoadSize = CGF.getContext().getTypeSizeInChars(ElTy);
327 llvm::IntegerType::get(CGF.getLLVMContext(), LoadSize.getQuantity() * 8);
328 Ptr = CGF.Builder.CreateBitCast(Ptr, ITy->getPointerTo());
329 llvm::LoadInst *Load = CGF.Builder.CreateAlignedLoad(Ptr, LoadSize);
330 Load->setVolatile(true);
334 // Build a plain volatile store.
335 static Value *EmitISOVolatileStore(CodeGenFunction &CGF, const CallExpr *E) {
336 Value *Ptr = CGF.EmitScalarExpr(E->getArg(0));
337 Value *Value = CGF.EmitScalarExpr(E->getArg(1));
338 QualType ElTy = E->getArg(0)->getType()->getPointeeType();
339 CharUnits StoreSize = CGF.getContext().getTypeSizeInChars(ElTy);
341 llvm::IntegerType::get(CGF.getLLVMContext(), StoreSize.getQuantity() * 8);
342 Ptr = CGF.Builder.CreateBitCast(Ptr, ITy->getPointerTo());
343 llvm::StoreInst *Store =
344 CGF.Builder.CreateAlignedStore(Value, Ptr, StoreSize);
345 Store->setVolatile(true);
349 // Emit a simple mangled intrinsic that has 1 argument and a return type
350 // matching the argument type.
351 static Value *emitUnaryBuiltin(CodeGenFunction &CGF,
353 unsigned IntrinsicID) {
354 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
356 Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
357 return CGF.Builder.CreateCall(F, Src0);
360 // Emit an intrinsic that has 2 operands of the same type as its result.
361 static Value *emitBinaryBuiltin(CodeGenFunction &CGF,
363 unsigned IntrinsicID) {
364 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
365 llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
367 Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
368 return CGF.Builder.CreateCall(F, { Src0, Src1 });
371 // Emit an intrinsic that has 3 operands of the same type as its result.
372 static Value *emitTernaryBuiltin(CodeGenFunction &CGF,
374 unsigned IntrinsicID) {
375 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
376 llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
377 llvm::Value *Src2 = CGF.EmitScalarExpr(E->getArg(2));
379 Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
380 return CGF.Builder.CreateCall(F, { Src0, Src1, Src2 });
383 // Emit an intrinsic that has 1 float or double operand, and 1 integer.
384 static Value *emitFPIntBuiltin(CodeGenFunction &CGF,
386 unsigned IntrinsicID) {
387 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
388 llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
390 Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
391 return CGF.Builder.CreateCall(F, {Src0, Src1});
394 // Emit an intrinsic that has overloaded integer result and fp operand.
395 static Value *emitFPToIntRoundBuiltin(CodeGenFunction &CGF,
397 unsigned IntrinsicID) {
398 llvm::Type *ResultType = CGF.ConvertType(E->getType());
399 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
401 Function *F = CGF.CGM.getIntrinsic(IntrinsicID,
402 {ResultType, Src0->getType()});
403 return CGF.Builder.CreateCall(F, Src0);
406 /// EmitFAbs - Emit a call to @llvm.fabs().
407 static Value *EmitFAbs(CodeGenFunction &CGF, Value *V) {
408 Function *F = CGF.CGM.getIntrinsic(Intrinsic::fabs, V->getType());
409 llvm::CallInst *Call = CGF.Builder.CreateCall(F, V);
410 Call->setDoesNotAccessMemory();
414 /// Emit the computation of the sign bit for a floating point value. Returns
415 /// the i1 sign bit value.
416 static Value *EmitSignBit(CodeGenFunction &CGF, Value *V) {
417 LLVMContext &C = CGF.CGM.getLLVMContext();
419 llvm::Type *Ty = V->getType();
420 int Width = Ty->getPrimitiveSizeInBits();
421 llvm::Type *IntTy = llvm::IntegerType::get(C, Width);
422 V = CGF.Builder.CreateBitCast(V, IntTy);
423 if (Ty->isPPC_FP128Ty()) {
424 // We want the sign bit of the higher-order double. The bitcast we just
425 // did works as if the double-double was stored to memory and then
426 // read as an i128. The "store" will put the higher-order double in the
427 // lower address in both little- and big-Endian modes, but the "load"
428 // will treat those bits as a different part of the i128: the low bits in
429 // little-Endian, the high bits in big-Endian. Therefore, on big-Endian
430 // we need to shift the high bits down to the low before truncating.
432 if (CGF.getTarget().isBigEndian()) {
433 Value *ShiftCst = llvm::ConstantInt::get(IntTy, Width);
434 V = CGF.Builder.CreateLShr(V, ShiftCst);
436 // We are truncating value in order to extract the higher-order
437 // double, which we will be using to extract the sign from.
438 IntTy = llvm::IntegerType::get(C, Width);
439 V = CGF.Builder.CreateTrunc(V, IntTy);
441 Value *Zero = llvm::Constant::getNullValue(IntTy);
442 return CGF.Builder.CreateICmpSLT(V, Zero);
445 static RValue emitLibraryCall(CodeGenFunction &CGF, const FunctionDecl *FD,
446 const CallExpr *E, llvm::Constant *calleeValue) {
447 CGCallee callee = CGCallee::forDirect(calleeValue, GlobalDecl(FD));
448 return CGF.EmitCall(E->getCallee()->getType(), callee, E, ReturnValueSlot());
451 /// Emit a call to llvm.{sadd,uadd,ssub,usub,smul,umul}.with.overflow.*
452 /// depending on IntrinsicID.
454 /// \arg CGF The current codegen function.
455 /// \arg IntrinsicID The ID for the Intrinsic we wish to generate.
456 /// \arg X The first argument to the llvm.*.with.overflow.*.
457 /// \arg Y The second argument to the llvm.*.with.overflow.*.
458 /// \arg Carry The carry returned by the llvm.*.with.overflow.*.
459 /// \returns The result (i.e. sum/product) returned by the intrinsic.
460 static llvm::Value *EmitOverflowIntrinsic(CodeGenFunction &CGF,
461 const llvm::Intrinsic::ID IntrinsicID,
462 llvm::Value *X, llvm::Value *Y,
463 llvm::Value *&Carry) {
464 // Make sure we have integers of the same width.
465 assert(X->getType() == Y->getType() &&
466 "Arguments must be the same type. (Did you forget to make sure both "
467 "arguments have the same integer width?)");
469 Function *Callee = CGF.CGM.getIntrinsic(IntrinsicID, X->getType());
470 llvm::Value *Tmp = CGF.Builder.CreateCall(Callee, {X, Y});
471 Carry = CGF.Builder.CreateExtractValue(Tmp, 1);
472 return CGF.Builder.CreateExtractValue(Tmp, 0);
475 static Value *emitRangedBuiltin(CodeGenFunction &CGF,
476 unsigned IntrinsicID,
478 llvm::MDBuilder MDHelper(CGF.getLLVMContext());
479 llvm::MDNode *RNode = MDHelper.createRange(APInt(32, low), APInt(32, high));
480 Function *F = CGF.CGM.getIntrinsic(IntrinsicID, {});
481 llvm::Instruction *Call = CGF.Builder.CreateCall(F);
482 Call->setMetadata(llvm::LLVMContext::MD_range, RNode);
487 struct WidthAndSignedness {
493 static WidthAndSignedness
494 getIntegerWidthAndSignedness(const clang::ASTContext &context,
495 const clang::QualType Type) {
496 assert(Type->isIntegerType() && "Given type is not an integer.");
497 unsigned Width = Type->isBooleanType() ? 1 : context.getTypeInfo(Type).Width;
498 bool Signed = Type->isSignedIntegerType();
499 return {Width, Signed};
502 // Given one or more integer types, this function produces an integer type that
503 // encompasses them: any value in one of the given types could be expressed in
504 // the encompassing type.
505 static struct WidthAndSignedness
506 EncompassingIntegerType(ArrayRef<struct WidthAndSignedness> Types) {
507 assert(Types.size() > 0 && "Empty list of types.");
509 // If any of the given types is signed, we must return a signed type.
511 for (const auto &Type : Types) {
512 Signed |= Type.Signed;
515 // The encompassing type must have a width greater than or equal to the width
516 // of the specified types. Additionally, if the encompassing type is signed,
517 // its width must be strictly greater than the width of any unsigned types
520 for (const auto &Type : Types) {
521 unsigned MinWidth = Type.Width + (Signed && !Type.Signed);
522 if (Width < MinWidth) {
527 return {Width, Signed};
530 Value *CodeGenFunction::EmitVAStartEnd(Value *ArgValue, bool IsStart) {
531 llvm::Type *DestType = Int8PtrTy;
532 if (ArgValue->getType() != DestType)
534 Builder.CreateBitCast(ArgValue, DestType, ArgValue->getName().data());
536 Intrinsic::ID inst = IsStart ? Intrinsic::vastart : Intrinsic::vaend;
537 return Builder.CreateCall(CGM.getIntrinsic(inst), ArgValue);
540 /// Checks if using the result of __builtin_object_size(p, @p From) in place of
541 /// __builtin_object_size(p, @p To) is correct
542 static bool areBOSTypesCompatible(int From, int To) {
543 // Note: Our __builtin_object_size implementation currently treats Type=0 and
544 // Type=2 identically. Encoding this implementation detail here may make
545 // improving __builtin_object_size difficult in the future, so it's omitted.
546 return From == To || (From == 0 && To == 1) || (From == 3 && To == 2);
550 getDefaultBuiltinObjectSizeResult(unsigned Type, llvm::IntegerType *ResType) {
551 return ConstantInt::get(ResType, (Type & 2) ? 0 : -1, /*isSigned=*/true);
555 CodeGenFunction::evaluateOrEmitBuiltinObjectSize(const Expr *E, unsigned Type,
556 llvm::IntegerType *ResType,
557 llvm::Value *EmittedE,
560 if (!E->tryEvaluateObjectSize(ObjectSize, getContext(), Type))
561 return emitBuiltinObjectSize(E, Type, ResType, EmittedE, IsDynamic);
562 return ConstantInt::get(ResType, ObjectSize, /*isSigned=*/true);
565 /// Returns a Value corresponding to the size of the given expression.
566 /// This Value may be either of the following:
567 /// - A llvm::Argument (if E is a param with the pass_object_size attribute on
569 /// - A call to the @llvm.objectsize intrinsic
571 /// EmittedE is the result of emitting `E` as a scalar expr. If it's non-null
572 /// and we wouldn't otherwise try to reference a pass_object_size parameter,
573 /// we'll call @llvm.objectsize on EmittedE, rather than emitting E.
575 CodeGenFunction::emitBuiltinObjectSize(const Expr *E, unsigned Type,
576 llvm::IntegerType *ResType,
577 llvm::Value *EmittedE, bool IsDynamic) {
578 // We need to reference an argument if the pointer is a parameter with the
579 // pass_object_size attribute.
580 if (auto *D = dyn_cast<DeclRefExpr>(E->IgnoreParenImpCasts())) {
581 auto *Param = dyn_cast<ParmVarDecl>(D->getDecl());
582 auto *PS = D->getDecl()->getAttr<PassObjectSizeAttr>();
583 if (Param != nullptr && PS != nullptr &&
584 areBOSTypesCompatible(PS->getType(), Type)) {
585 auto Iter = SizeArguments.find(Param);
586 assert(Iter != SizeArguments.end());
588 const ImplicitParamDecl *D = Iter->second;
589 auto DIter = LocalDeclMap.find(D);
590 assert(DIter != LocalDeclMap.end());
592 return EmitLoadOfScalar(DIter->second, /*Volatile=*/false,
593 getContext().getSizeType(), E->getBeginLoc());
597 // LLVM can't handle Type=3 appropriately, and __builtin_object_size shouldn't
598 // evaluate E for side-effects. In either case, we shouldn't lower to
600 if (Type == 3 || (!EmittedE && E->HasSideEffects(getContext())))
601 return getDefaultBuiltinObjectSizeResult(Type, ResType);
603 Value *Ptr = EmittedE ? EmittedE : EmitScalarExpr(E);
604 assert(Ptr->getType()->isPointerTy() &&
605 "Non-pointer passed to __builtin_object_size?");
608 CGM.getIntrinsic(Intrinsic::objectsize, {ResType, Ptr->getType()});
610 // LLVM only supports 0 and 2, make sure that we pass along that as a boolean.
611 Value *Min = Builder.getInt1((Type & 2) != 0);
612 // For GCC compatibility, __builtin_object_size treat NULL as unknown size.
613 Value *NullIsUnknown = Builder.getTrue();
614 Value *Dynamic = Builder.getInt1(IsDynamic);
615 return Builder.CreateCall(F, {Ptr, Min, NullIsUnknown, Dynamic});
619 /// A struct to generically describe a bit test intrinsic.
621 enum ActionKind : uint8_t { TestOnly, Complement, Reset, Set };
622 enum InterlockingKind : uint8_t {
631 InterlockingKind Interlocking;
634 static BitTest decodeBitTestBuiltin(unsigned BuiltinID);
638 BitTest BitTest::decodeBitTestBuiltin(unsigned BuiltinID) {
640 // Main portable variants.
641 case Builtin::BI_bittest:
642 return {TestOnly, Unlocked, false};
643 case Builtin::BI_bittestandcomplement:
644 return {Complement, Unlocked, false};
645 case Builtin::BI_bittestandreset:
646 return {Reset, Unlocked, false};
647 case Builtin::BI_bittestandset:
648 return {Set, Unlocked, false};
649 case Builtin::BI_interlockedbittestandreset:
650 return {Reset, Sequential, false};
651 case Builtin::BI_interlockedbittestandset:
652 return {Set, Sequential, false};
654 // X86-specific 64-bit variants.
655 case Builtin::BI_bittest64:
656 return {TestOnly, Unlocked, true};
657 case Builtin::BI_bittestandcomplement64:
658 return {Complement, Unlocked, true};
659 case Builtin::BI_bittestandreset64:
660 return {Reset, Unlocked, true};
661 case Builtin::BI_bittestandset64:
662 return {Set, Unlocked, true};
663 case Builtin::BI_interlockedbittestandreset64:
664 return {Reset, Sequential, true};
665 case Builtin::BI_interlockedbittestandset64:
666 return {Set, Sequential, true};
668 // ARM/AArch64-specific ordering variants.
669 case Builtin::BI_interlockedbittestandset_acq:
670 return {Set, Acquire, false};
671 case Builtin::BI_interlockedbittestandset_rel:
672 return {Set, Release, false};
673 case Builtin::BI_interlockedbittestandset_nf:
674 return {Set, NoFence, false};
675 case Builtin::BI_interlockedbittestandreset_acq:
676 return {Reset, Acquire, false};
677 case Builtin::BI_interlockedbittestandreset_rel:
678 return {Reset, Release, false};
679 case Builtin::BI_interlockedbittestandreset_nf:
680 return {Reset, NoFence, false};
682 llvm_unreachable("expected only bittest intrinsics");
685 static char bitActionToX86BTCode(BitTest::ActionKind A) {
687 case BitTest::TestOnly: return '\0';
688 case BitTest::Complement: return 'c';
689 case BitTest::Reset: return 'r';
690 case BitTest::Set: return 's';
692 llvm_unreachable("invalid action");
695 static llvm::Value *EmitX86BitTestIntrinsic(CodeGenFunction &CGF,
697 const CallExpr *E, Value *BitBase,
699 char Action = bitActionToX86BTCode(BT.Action);
700 char SizeSuffix = BT.Is64Bit ? 'q' : 'l';
702 // Build the assembly.
704 raw_svector_ostream AsmOS(Asm);
705 if (BT.Interlocking != BitTest::Unlocked)
710 AsmOS << SizeSuffix << " $2, ($1)\n\tsetc ${0:b}";
712 // Build the constraints. FIXME: We should support immediates when possible.
713 std::string Constraints = "=r,r,r,~{cc},~{flags},~{fpsr}";
714 llvm::IntegerType *IntType = llvm::IntegerType::get(
715 CGF.getLLVMContext(),
716 CGF.getContext().getTypeSize(E->getArg(1)->getType()));
717 llvm::Type *IntPtrType = IntType->getPointerTo();
718 llvm::FunctionType *FTy =
719 llvm::FunctionType::get(CGF.Int8Ty, {IntPtrType, IntType}, false);
721 llvm::InlineAsm *IA =
722 llvm::InlineAsm::get(FTy, Asm, Constraints, /*hasSideEffects=*/true);
723 return CGF.Builder.CreateCall(IA, {BitBase, BitPos});
726 static llvm::AtomicOrdering
727 getBitTestAtomicOrdering(BitTest::InterlockingKind I) {
729 case BitTest::Unlocked: return llvm::AtomicOrdering::NotAtomic;
730 case BitTest::Sequential: return llvm::AtomicOrdering::SequentiallyConsistent;
731 case BitTest::Acquire: return llvm::AtomicOrdering::Acquire;
732 case BitTest::Release: return llvm::AtomicOrdering::Release;
733 case BitTest::NoFence: return llvm::AtomicOrdering::Monotonic;
735 llvm_unreachable("invalid interlocking");
738 /// Emit a _bittest* intrinsic. These intrinsics take a pointer to an array of
739 /// bits and a bit position and read and optionally modify the bit at that
740 /// position. The position index can be arbitrarily large, i.e. it can be larger
741 /// than 31 or 63, so we need an indexed load in the general case.
742 static llvm::Value *EmitBitTestIntrinsic(CodeGenFunction &CGF,
745 Value *BitBase = CGF.EmitScalarExpr(E->getArg(0));
746 Value *BitPos = CGF.EmitScalarExpr(E->getArg(1));
748 BitTest BT = BitTest::decodeBitTestBuiltin(BuiltinID);
750 // X86 has special BT, BTC, BTR, and BTS instructions that handle the array
751 // indexing operation internally. Use them if possible.
752 llvm::Triple::ArchType Arch = CGF.getTarget().getTriple().getArch();
753 if (Arch == llvm::Triple::x86 || Arch == llvm::Triple::x86_64)
754 return EmitX86BitTestIntrinsic(CGF, BT, E, BitBase, BitPos);
756 // Otherwise, use generic code to load one byte and test the bit. Use all but
757 // the bottom three bits as the array index, and the bottom three bits to form
759 // Bit = BitBaseI8[BitPos >> 3] & (1 << (BitPos & 0x7)) != 0;
760 Value *ByteIndex = CGF.Builder.CreateAShr(
761 BitPos, llvm::ConstantInt::get(BitPos->getType(), 3), "bittest.byteidx");
762 Value *BitBaseI8 = CGF.Builder.CreatePointerCast(BitBase, CGF.Int8PtrTy);
763 Address ByteAddr(CGF.Builder.CreateInBoundsGEP(CGF.Int8Ty, BitBaseI8,
764 ByteIndex, "bittest.byteaddr"),
767 CGF.Builder.CreateAnd(CGF.Builder.CreateTrunc(BitPos, CGF.Int8Ty),
768 llvm::ConstantInt::get(CGF.Int8Ty, 0x7));
770 // The updating instructions will need a mask.
771 Value *Mask = nullptr;
772 if (BT.Action != BitTest::TestOnly) {
773 Mask = CGF.Builder.CreateShl(llvm::ConstantInt::get(CGF.Int8Ty, 1), PosLow,
777 // Check the action and ordering of the interlocked intrinsics.
778 llvm::AtomicOrdering Ordering = getBitTestAtomicOrdering(BT.Interlocking);
780 Value *OldByte = nullptr;
781 if (Ordering != llvm::AtomicOrdering::NotAtomic) {
782 // Emit a combined atomicrmw load/store operation for the interlocked
784 llvm::AtomicRMWInst::BinOp RMWOp = llvm::AtomicRMWInst::Or;
785 if (BT.Action == BitTest::Reset) {
786 Mask = CGF.Builder.CreateNot(Mask);
787 RMWOp = llvm::AtomicRMWInst::And;
789 OldByte = CGF.Builder.CreateAtomicRMW(RMWOp, ByteAddr.getPointer(), Mask,
792 // Emit a plain load for the non-interlocked intrinsics.
793 OldByte = CGF.Builder.CreateLoad(ByteAddr, "bittest.byte");
794 Value *NewByte = nullptr;
796 case BitTest::TestOnly:
797 // Don't store anything.
799 case BitTest::Complement:
800 NewByte = CGF.Builder.CreateXor(OldByte, Mask);
803 NewByte = CGF.Builder.CreateAnd(OldByte, CGF.Builder.CreateNot(Mask));
806 NewByte = CGF.Builder.CreateOr(OldByte, Mask);
810 CGF.Builder.CreateStore(NewByte, ByteAddr);
813 // However we loaded the old byte, either by plain load or atomicrmw, shift
814 // the bit into the low position and mask it to 0 or 1.
815 Value *ShiftedByte = CGF.Builder.CreateLShr(OldByte, PosLow, "bittest.shr");
816 return CGF.Builder.CreateAnd(
817 ShiftedByte, llvm::ConstantInt::get(CGF.Int8Ty, 1), "bittest.res");
821 enum class MSVCSetJmpKind {
828 /// MSVC handles setjmp a bit differently on different platforms. On every
829 /// architecture except 32-bit x86, the frame address is passed. On x86, extra
830 /// parameters can be passed as variadic arguments, but we always pass none.
831 static RValue EmitMSVCRTSetJmp(CodeGenFunction &CGF, MSVCSetJmpKind SJKind,
833 llvm::Value *Arg1 = nullptr;
834 llvm::Type *Arg1Ty = nullptr;
836 bool IsVarArg = false;
837 if (SJKind == MSVCSetJmpKind::_setjmp3) {
839 Arg1Ty = CGF.Int32Ty;
840 Arg1 = llvm::ConstantInt::get(CGF.IntTy, 0);
843 Name = SJKind == MSVCSetJmpKind::_setjmp ? "_setjmp" : "_setjmpex";
844 Arg1Ty = CGF.Int8PtrTy;
845 if (CGF.getTarget().getTriple().getArch() == llvm::Triple::aarch64) {
846 Arg1 = CGF.Builder.CreateCall(
847 CGF.CGM.getIntrinsic(Intrinsic::sponentry, CGF.AllocaInt8PtrTy));
849 Arg1 = CGF.Builder.CreateCall(
850 CGF.CGM.getIntrinsic(Intrinsic::frameaddress, CGF.AllocaInt8PtrTy),
851 llvm::ConstantInt::get(CGF.Int32Ty, 0));
854 // Mark the call site and declaration with ReturnsTwice.
855 llvm::Type *ArgTypes[2] = {CGF.Int8PtrTy, Arg1Ty};
856 llvm::AttributeList ReturnsTwiceAttr = llvm::AttributeList::get(
857 CGF.getLLVMContext(), llvm::AttributeList::FunctionIndex,
858 llvm::Attribute::ReturnsTwice);
859 llvm::FunctionCallee SetJmpFn = CGF.CGM.CreateRuntimeFunction(
860 llvm::FunctionType::get(CGF.IntTy, ArgTypes, IsVarArg), Name,
861 ReturnsTwiceAttr, /*Local=*/true);
863 llvm::Value *Buf = CGF.Builder.CreateBitOrPointerCast(
864 CGF.EmitScalarExpr(E->getArg(0)), CGF.Int8PtrTy);
865 llvm::Value *Args[] = {Buf, Arg1};
866 llvm::CallBase *CB = CGF.EmitRuntimeCallOrInvoke(SetJmpFn, Args);
867 CB->setAttributes(ReturnsTwiceAttr);
868 return RValue::get(CB);
871 // Many of MSVC builtins are on x64, ARM and AArch64; to avoid repeating code,
872 // we handle them here.
873 enum class CodeGenFunction::MSVCIntrin {
877 _InterlockedDecrement,
878 _InterlockedExchange,
879 _InterlockedExchangeAdd,
880 _InterlockedExchangeSub,
881 _InterlockedIncrement,
884 _InterlockedExchangeAdd_acq,
885 _InterlockedExchangeAdd_rel,
886 _InterlockedExchangeAdd_nf,
887 _InterlockedExchange_acq,
888 _InterlockedExchange_rel,
889 _InterlockedExchange_nf,
890 _InterlockedCompareExchange_acq,
891 _InterlockedCompareExchange_rel,
892 _InterlockedCompareExchange_nf,
902 _InterlockedIncrement_acq,
903 _InterlockedIncrement_rel,
904 _InterlockedIncrement_nf,
905 _InterlockedDecrement_acq,
906 _InterlockedDecrement_rel,
907 _InterlockedDecrement_nf,
911 Value *CodeGenFunction::EmitMSVCBuiltinExpr(MSVCIntrin BuiltinID,
914 case MSVCIntrin::_BitScanForward:
915 case MSVCIntrin::_BitScanReverse: {
916 Value *ArgValue = EmitScalarExpr(E->getArg(1));
918 llvm::Type *ArgType = ArgValue->getType();
919 llvm::Type *IndexType =
920 EmitScalarExpr(E->getArg(0))->getType()->getPointerElementType();
921 llvm::Type *ResultType = ConvertType(E->getType());
923 Value *ArgZero = llvm::Constant::getNullValue(ArgType);
924 Value *ResZero = llvm::Constant::getNullValue(ResultType);
925 Value *ResOne = llvm::ConstantInt::get(ResultType, 1);
927 BasicBlock *Begin = Builder.GetInsertBlock();
928 BasicBlock *End = createBasicBlock("bitscan_end", this->CurFn);
929 Builder.SetInsertPoint(End);
930 PHINode *Result = Builder.CreatePHI(ResultType, 2, "bitscan_result");
932 Builder.SetInsertPoint(Begin);
933 Value *IsZero = Builder.CreateICmpEQ(ArgValue, ArgZero);
934 BasicBlock *NotZero = createBasicBlock("bitscan_not_zero", this->CurFn);
935 Builder.CreateCondBr(IsZero, End, NotZero);
936 Result->addIncoming(ResZero, Begin);
938 Builder.SetInsertPoint(NotZero);
939 Address IndexAddress = EmitPointerWithAlignment(E->getArg(0));
941 if (BuiltinID == MSVCIntrin::_BitScanForward) {
942 Function *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType);
943 Value *ZeroCount = Builder.CreateCall(F, {ArgValue, Builder.getTrue()});
944 ZeroCount = Builder.CreateIntCast(ZeroCount, IndexType, false);
945 Builder.CreateStore(ZeroCount, IndexAddress, false);
947 unsigned ArgWidth = cast<llvm::IntegerType>(ArgType)->getBitWidth();
948 Value *ArgTypeLastIndex = llvm::ConstantInt::get(IndexType, ArgWidth - 1);
950 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType);
951 Value *ZeroCount = Builder.CreateCall(F, {ArgValue, Builder.getTrue()});
952 ZeroCount = Builder.CreateIntCast(ZeroCount, IndexType, false);
953 Value *Index = Builder.CreateNSWSub(ArgTypeLastIndex, ZeroCount);
954 Builder.CreateStore(Index, IndexAddress, false);
956 Builder.CreateBr(End);
957 Result->addIncoming(ResOne, NotZero);
959 Builder.SetInsertPoint(End);
962 case MSVCIntrin::_InterlockedAnd:
963 return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E);
964 case MSVCIntrin::_InterlockedExchange:
965 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E);
966 case MSVCIntrin::_InterlockedExchangeAdd:
967 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add, E);
968 case MSVCIntrin::_InterlockedExchangeSub:
969 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Sub, E);
970 case MSVCIntrin::_InterlockedOr:
971 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E);
972 case MSVCIntrin::_InterlockedXor:
973 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor, E);
974 case MSVCIntrin::_InterlockedExchangeAdd_acq:
975 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add, E,
976 AtomicOrdering::Acquire);
977 case MSVCIntrin::_InterlockedExchangeAdd_rel:
978 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add, E,
979 AtomicOrdering::Release);
980 case MSVCIntrin::_InterlockedExchangeAdd_nf:
981 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add, E,
982 AtomicOrdering::Monotonic);
983 case MSVCIntrin::_InterlockedExchange_acq:
984 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E,
985 AtomicOrdering::Acquire);
986 case MSVCIntrin::_InterlockedExchange_rel:
987 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E,
988 AtomicOrdering::Release);
989 case MSVCIntrin::_InterlockedExchange_nf:
990 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E,
991 AtomicOrdering::Monotonic);
992 case MSVCIntrin::_InterlockedCompareExchange_acq:
993 return EmitAtomicCmpXchgForMSIntrin(*this, E, AtomicOrdering::Acquire);
994 case MSVCIntrin::_InterlockedCompareExchange_rel:
995 return EmitAtomicCmpXchgForMSIntrin(*this, E, AtomicOrdering::Release);
996 case MSVCIntrin::_InterlockedCompareExchange_nf:
997 return EmitAtomicCmpXchgForMSIntrin(*this, E, AtomicOrdering::Monotonic);
998 case MSVCIntrin::_InterlockedOr_acq:
999 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E,
1000 AtomicOrdering::Acquire);
1001 case MSVCIntrin::_InterlockedOr_rel:
1002 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E,
1003 AtomicOrdering::Release);
1004 case MSVCIntrin::_InterlockedOr_nf:
1005 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E,
1006 AtomicOrdering::Monotonic);
1007 case MSVCIntrin::_InterlockedXor_acq:
1008 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor, E,
1009 AtomicOrdering::Acquire);
1010 case MSVCIntrin::_InterlockedXor_rel:
1011 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor, E,
1012 AtomicOrdering::Release);
1013 case MSVCIntrin::_InterlockedXor_nf:
1014 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor, E,
1015 AtomicOrdering::Monotonic);
1016 case MSVCIntrin::_InterlockedAnd_acq:
1017 return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E,
1018 AtomicOrdering::Acquire);
1019 case MSVCIntrin::_InterlockedAnd_rel:
1020 return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E,
1021 AtomicOrdering::Release);
1022 case MSVCIntrin::_InterlockedAnd_nf:
1023 return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E,
1024 AtomicOrdering::Monotonic);
1025 case MSVCIntrin::_InterlockedIncrement_acq:
1026 return EmitAtomicIncrementValue(*this, E, AtomicOrdering::Acquire);
1027 case MSVCIntrin::_InterlockedIncrement_rel:
1028 return EmitAtomicIncrementValue(*this, E, AtomicOrdering::Release);
1029 case MSVCIntrin::_InterlockedIncrement_nf:
1030 return EmitAtomicIncrementValue(*this, E, AtomicOrdering::Monotonic);
1031 case MSVCIntrin::_InterlockedDecrement_acq:
1032 return EmitAtomicDecrementValue(*this, E, AtomicOrdering::Acquire);
1033 case MSVCIntrin::_InterlockedDecrement_rel:
1034 return EmitAtomicDecrementValue(*this, E, AtomicOrdering::Release);
1035 case MSVCIntrin::_InterlockedDecrement_nf:
1036 return EmitAtomicDecrementValue(*this, E, AtomicOrdering::Monotonic);
1038 case MSVCIntrin::_InterlockedDecrement:
1039 return EmitAtomicDecrementValue(*this, E);
1040 case MSVCIntrin::_InterlockedIncrement:
1041 return EmitAtomicIncrementValue(*this, E);
1043 case MSVCIntrin::__fastfail: {
1044 // Request immediate process termination from the kernel. The instruction
1045 // sequences to do this are documented on MSDN:
1046 // https://msdn.microsoft.com/en-us/library/dn774154.aspx
1047 llvm::Triple::ArchType ISA = getTarget().getTriple().getArch();
1048 StringRef Asm, Constraints;
1051 ErrorUnsupported(E, "__fastfail call for this architecture");
1053 case llvm::Triple::x86:
1054 case llvm::Triple::x86_64:
1056 Constraints = "{cx}";
1058 case llvm::Triple::thumb:
1060 Constraints = "{r0}";
1062 case llvm::Triple::aarch64:
1063 Asm = "brk #0xF003";
1064 Constraints = "{w0}";
1066 llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, {Int32Ty}, false);
1067 llvm::InlineAsm *IA =
1068 llvm::InlineAsm::get(FTy, Asm, Constraints, /*hasSideEffects=*/true);
1069 llvm::AttributeList NoReturnAttr = llvm::AttributeList::get(
1070 getLLVMContext(), llvm::AttributeList::FunctionIndex,
1071 llvm::Attribute::NoReturn);
1072 llvm::CallInst *CI = Builder.CreateCall(IA, EmitScalarExpr(E->getArg(0)));
1073 CI->setAttributes(NoReturnAttr);
1077 llvm_unreachable("Incorrect MSVC intrinsic!");
1081 // ARC cleanup for __builtin_os_log_format
1082 struct CallObjCArcUse final : EHScopeStack::Cleanup {
1083 CallObjCArcUse(llvm::Value *object) : object(object) {}
1084 llvm::Value *object;
1086 void Emit(CodeGenFunction &CGF, Flags flags) override {
1087 CGF.EmitARCIntrinsicUse(object);
1092 Value *CodeGenFunction::EmitCheckedArgForBuiltin(const Expr *E,
1093 BuiltinCheckKind Kind) {
1094 assert((Kind == BCK_CLZPassedZero || Kind == BCK_CTZPassedZero)
1095 && "Unsupported builtin check kind");
1097 Value *ArgValue = EmitScalarExpr(E);
1098 if (!SanOpts.has(SanitizerKind::Builtin) || !getTarget().isCLZForZeroUndef())
1101 SanitizerScope SanScope(this);
1102 Value *Cond = Builder.CreateICmpNE(
1103 ArgValue, llvm::Constant::getNullValue(ArgValue->getType()));
1104 EmitCheck(std::make_pair(Cond, SanitizerKind::Builtin),
1105 SanitizerHandler::InvalidBuiltin,
1106 {EmitCheckSourceLocation(E->getExprLoc()),
1107 llvm::ConstantInt::get(Builder.getInt8Ty(), Kind)},
1112 /// Get the argument type for arguments to os_log_helper.
1113 static CanQualType getOSLogArgType(ASTContext &C, int Size) {
1114 QualType UnsignedTy = C.getIntTypeForBitwidth(Size * 8, /*Signed=*/false);
1115 return C.getCanonicalType(UnsignedTy);
1118 llvm::Function *CodeGenFunction::generateBuiltinOSLogHelperFunction(
1119 const analyze_os_log::OSLogBufferLayout &Layout,
1120 CharUnits BufferAlignment) {
1121 ASTContext &Ctx = getContext();
1123 llvm::SmallString<64> Name;
1125 raw_svector_ostream OS(Name);
1126 OS << "__os_log_helper";
1127 OS << "_" << BufferAlignment.getQuantity();
1128 OS << "_" << int(Layout.getSummaryByte());
1129 OS << "_" << int(Layout.getNumArgsByte());
1130 for (const auto &Item : Layout.Items)
1131 OS << "_" << int(Item.getSizeByte()) << "_"
1132 << int(Item.getDescriptorByte());
1135 if (llvm::Function *F = CGM.getModule().getFunction(Name))
1138 llvm::SmallVector<QualType, 4> ArgTys;
1139 FunctionArgList Args;
1140 Args.push_back(ImplicitParamDecl::Create(
1141 Ctx, nullptr, SourceLocation(), &Ctx.Idents.get("buffer"), Ctx.VoidPtrTy,
1142 ImplicitParamDecl::Other));
1143 ArgTys.emplace_back(Ctx.VoidPtrTy);
1145 for (unsigned int I = 0, E = Layout.Items.size(); I < E; ++I) {
1146 char Size = Layout.Items[I].getSizeByte();
1150 QualType ArgTy = getOSLogArgType(Ctx, Size);
1151 Args.push_back(ImplicitParamDecl::Create(
1152 Ctx, nullptr, SourceLocation(),
1153 &Ctx.Idents.get(std::string("arg") + llvm::to_string(I)), ArgTy,
1154 ImplicitParamDecl::Other));
1155 ArgTys.emplace_back(ArgTy);
1158 QualType ReturnTy = Ctx.VoidTy;
1159 QualType FuncionTy = Ctx.getFunctionType(ReturnTy, ArgTys, {});
1161 // The helper function has linkonce_odr linkage to enable the linker to merge
1162 // identical functions. To ensure the merging always happens, 'noinline' is
1163 // attached to the function when compiling with -Oz.
1164 const CGFunctionInfo &FI =
1165 CGM.getTypes().arrangeBuiltinFunctionDeclaration(ReturnTy, Args);
1166 llvm::FunctionType *FuncTy = CGM.getTypes().GetFunctionType(FI);
1167 llvm::Function *Fn = llvm::Function::Create(
1168 FuncTy, llvm::GlobalValue::LinkOnceODRLinkage, Name, &CGM.getModule());
1169 Fn->setVisibility(llvm::GlobalValue::HiddenVisibility);
1170 CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI, Fn);
1171 CGM.SetLLVMFunctionAttributesForDefinition(nullptr, Fn);
1172 Fn->setDoesNotThrow();
1174 // Attach 'noinline' at -Oz.
1175 if (CGM.getCodeGenOpts().OptimizeSize == 2)
1176 Fn->addFnAttr(llvm::Attribute::NoInline);
1178 auto NL = ApplyDebugLocation::CreateEmpty(*this);
1179 IdentifierInfo *II = &Ctx.Idents.get(Name);
1180 FunctionDecl *FD = FunctionDecl::Create(
1181 Ctx, Ctx.getTranslationUnitDecl(), SourceLocation(), SourceLocation(), II,
1182 FuncionTy, nullptr, SC_PrivateExtern, false, false);
1184 StartFunction(FD, ReturnTy, Fn, FI, Args);
1186 // Create a scope with an artificial location for the body of this function.
1187 auto AL = ApplyDebugLocation::CreateArtificial(*this);
1190 Address BufAddr(Builder.CreateLoad(GetAddrOfLocalVar(Args[0]), "buf"),
1192 Builder.CreateStore(Builder.getInt8(Layout.getSummaryByte()),
1193 Builder.CreateConstByteGEP(BufAddr, Offset++, "summary"));
1194 Builder.CreateStore(Builder.getInt8(Layout.getNumArgsByte()),
1195 Builder.CreateConstByteGEP(BufAddr, Offset++, "numArgs"));
1198 for (const auto &Item : Layout.Items) {
1199 Builder.CreateStore(
1200 Builder.getInt8(Item.getDescriptorByte()),
1201 Builder.CreateConstByteGEP(BufAddr, Offset++, "argDescriptor"));
1202 Builder.CreateStore(
1203 Builder.getInt8(Item.getSizeByte()),
1204 Builder.CreateConstByteGEP(BufAddr, Offset++, "argSize"));
1206 CharUnits Size = Item.size();
1207 if (!Size.getQuantity())
1210 Address Arg = GetAddrOfLocalVar(Args[I]);
1211 Address Addr = Builder.CreateConstByteGEP(BufAddr, Offset, "argData");
1212 Addr = Builder.CreateBitCast(Addr, Arg.getPointer()->getType(),
1214 Builder.CreateStore(Builder.CreateLoad(Arg), Addr);
1224 RValue CodeGenFunction::emitBuiltinOSLogFormat(const CallExpr &E) {
1225 assert(E.getNumArgs() >= 2 &&
1226 "__builtin_os_log_format takes at least 2 arguments");
1227 ASTContext &Ctx = getContext();
1228 analyze_os_log::OSLogBufferLayout Layout;
1229 analyze_os_log::computeOSLogBufferLayout(Ctx, &E, Layout);
1230 Address BufAddr = EmitPointerWithAlignment(E.getArg(0));
1231 llvm::SmallVector<llvm::Value *, 4> RetainableOperands;
1233 // Ignore argument 1, the format string. It is not currently used.
1235 Args.add(RValue::get(BufAddr.getPointer()), Ctx.VoidPtrTy);
1237 for (const auto &Item : Layout.Items) {
1238 int Size = Item.getSizeByte();
1242 llvm::Value *ArgVal;
1244 if (Item.getKind() == analyze_os_log::OSLogBufferItem::MaskKind) {
1246 for (unsigned I = 0, E = Item.getMaskType().size(); I < E; ++I)
1247 Val |= ((uint64_t)Item.getMaskType()[I]) << I * 8;
1248 ArgVal = llvm::Constant::getIntegerValue(Int64Ty, llvm::APInt(64, Val));
1249 } else if (const Expr *TheExpr = Item.getExpr()) {
1250 ArgVal = EmitScalarExpr(TheExpr, /*Ignore*/ false);
1252 // Check if this is a retainable type.
1253 if (TheExpr->getType()->isObjCRetainableType()) {
1254 assert(getEvaluationKind(TheExpr->getType()) == TEK_Scalar &&
1255 "Only scalar can be a ObjC retainable type");
1256 // Check if the object is constant, if not, save it in
1257 // RetainableOperands.
1258 if (!isa<Constant>(ArgVal))
1259 RetainableOperands.push_back(ArgVal);
1262 ArgVal = Builder.getInt32(Item.getConstValue().getQuantity());
1265 unsigned ArgValSize =
1266 CGM.getDataLayout().getTypeSizeInBits(ArgVal->getType());
1267 llvm::IntegerType *IntTy = llvm::Type::getIntNTy(getLLVMContext(),
1269 ArgVal = Builder.CreateBitOrPointerCast(ArgVal, IntTy);
1270 CanQualType ArgTy = getOSLogArgType(Ctx, Size);
1271 // If ArgVal has type x86_fp80, zero-extend ArgVal.
1272 ArgVal = Builder.CreateZExtOrBitCast(ArgVal, ConvertType(ArgTy));
1273 Args.add(RValue::get(ArgVal), ArgTy);
1276 const CGFunctionInfo &FI =
1277 CGM.getTypes().arrangeBuiltinFunctionCall(Ctx.VoidTy, Args);
1278 llvm::Function *F = CodeGenFunction(CGM).generateBuiltinOSLogHelperFunction(
1279 Layout, BufAddr.getAlignment());
1280 EmitCall(FI, CGCallee::forDirect(F), ReturnValueSlot(), Args);
1282 // Push a clang.arc.use cleanup for each object in RetainableOperands. The
1283 // cleanup will cause the use to appear after the final log call, keeping
1284 // the object valid while it’s held in the log buffer. Note that if there’s
1285 // a release cleanup on the object, it will already be active; since
1286 // cleanups are emitted in reverse order, the use will occur before the
1287 // object is released.
1288 if (!RetainableOperands.empty() && getLangOpts().ObjCAutoRefCount &&
1289 CGM.getCodeGenOpts().OptimizationLevel != 0)
1290 for (llvm::Value *Object : RetainableOperands)
1291 pushFullExprCleanup<CallObjCArcUse>(getARCCleanupKind(), Object);
1293 return RValue::get(BufAddr.getPointer());
1296 /// Determine if a binop is a checked mixed-sign multiply we can specialize.
1297 static bool isSpecialMixedSignMultiply(unsigned BuiltinID,
1298 WidthAndSignedness Op1Info,
1299 WidthAndSignedness Op2Info,
1300 WidthAndSignedness ResultInfo) {
1301 return BuiltinID == Builtin::BI__builtin_mul_overflow &&
1302 std::max(Op1Info.Width, Op2Info.Width) >= ResultInfo.Width &&
1303 Op1Info.Signed != Op2Info.Signed;
1306 /// Emit a checked mixed-sign multiply. This is a cheaper specialization of
1307 /// the generic checked-binop irgen.
1309 EmitCheckedMixedSignMultiply(CodeGenFunction &CGF, const clang::Expr *Op1,
1310 WidthAndSignedness Op1Info, const clang::Expr *Op2,
1311 WidthAndSignedness Op2Info,
1312 const clang::Expr *ResultArg, QualType ResultQTy,
1313 WidthAndSignedness ResultInfo) {
1314 assert(isSpecialMixedSignMultiply(Builtin::BI__builtin_mul_overflow, Op1Info,
1315 Op2Info, ResultInfo) &&
1316 "Not a mixed-sign multipliction we can specialize");
1318 // Emit the signed and unsigned operands.
1319 const clang::Expr *SignedOp = Op1Info.Signed ? Op1 : Op2;
1320 const clang::Expr *UnsignedOp = Op1Info.Signed ? Op2 : Op1;
1321 llvm::Value *Signed = CGF.EmitScalarExpr(SignedOp);
1322 llvm::Value *Unsigned = CGF.EmitScalarExpr(UnsignedOp);
1323 unsigned SignedOpWidth = Op1Info.Signed ? Op1Info.Width : Op2Info.Width;
1324 unsigned UnsignedOpWidth = Op1Info.Signed ? Op2Info.Width : Op1Info.Width;
1326 // One of the operands may be smaller than the other. If so, [s|z]ext it.
1327 if (SignedOpWidth < UnsignedOpWidth)
1328 Signed = CGF.Builder.CreateSExt(Signed, Unsigned->getType(), "op.sext");
1329 if (UnsignedOpWidth < SignedOpWidth)
1330 Unsigned = CGF.Builder.CreateZExt(Unsigned, Signed->getType(), "op.zext");
1332 llvm::Type *OpTy = Signed->getType();
1333 llvm::Value *Zero = llvm::Constant::getNullValue(OpTy);
1334 Address ResultPtr = CGF.EmitPointerWithAlignment(ResultArg);
1335 llvm::Type *ResTy = ResultPtr.getElementType();
1336 unsigned OpWidth = std::max(Op1Info.Width, Op2Info.Width);
1338 // Take the absolute value of the signed operand.
1339 llvm::Value *IsNegative = CGF.Builder.CreateICmpSLT(Signed, Zero);
1340 llvm::Value *AbsOfNegative = CGF.Builder.CreateSub(Zero, Signed);
1341 llvm::Value *AbsSigned =
1342 CGF.Builder.CreateSelect(IsNegative, AbsOfNegative, Signed);
1344 // Perform a checked unsigned multiplication.
1345 llvm::Value *UnsignedOverflow;
1346 llvm::Value *UnsignedResult =
1347 EmitOverflowIntrinsic(CGF, llvm::Intrinsic::umul_with_overflow, AbsSigned,
1348 Unsigned, UnsignedOverflow);
1350 llvm::Value *Overflow, *Result;
1351 if (ResultInfo.Signed) {
1352 // Signed overflow occurs if the result is greater than INT_MAX or lesser
1353 // than INT_MIN, i.e when |Result| > (INT_MAX + IsNegative).
1355 llvm::APInt::getSignedMaxValue(ResultInfo.Width).zextOrSelf(OpWidth);
1356 llvm::Value *MaxResult =
1357 CGF.Builder.CreateAdd(llvm::ConstantInt::get(OpTy, IntMax),
1358 CGF.Builder.CreateZExt(IsNegative, OpTy));
1359 llvm::Value *SignedOverflow =
1360 CGF.Builder.CreateICmpUGT(UnsignedResult, MaxResult);
1361 Overflow = CGF.Builder.CreateOr(UnsignedOverflow, SignedOverflow);
1363 // Prepare the signed result (possibly by negating it).
1364 llvm::Value *NegativeResult = CGF.Builder.CreateNeg(UnsignedResult);
1365 llvm::Value *SignedResult =
1366 CGF.Builder.CreateSelect(IsNegative, NegativeResult, UnsignedResult);
1367 Result = CGF.Builder.CreateTrunc(SignedResult, ResTy);
1369 // Unsigned overflow occurs if the result is < 0 or greater than UINT_MAX.
1370 llvm::Value *Underflow = CGF.Builder.CreateAnd(
1371 IsNegative, CGF.Builder.CreateIsNotNull(UnsignedResult));
1372 Overflow = CGF.Builder.CreateOr(UnsignedOverflow, Underflow);
1373 if (ResultInfo.Width < OpWidth) {
1375 llvm::APInt::getMaxValue(ResultInfo.Width).zext(OpWidth);
1376 llvm::Value *TruncOverflow = CGF.Builder.CreateICmpUGT(
1377 UnsignedResult, llvm::ConstantInt::get(OpTy, IntMax));
1378 Overflow = CGF.Builder.CreateOr(Overflow, TruncOverflow);
1381 // Negate the product if it would be negative in infinite precision.
1382 Result = CGF.Builder.CreateSelect(
1383 IsNegative, CGF.Builder.CreateNeg(UnsignedResult), UnsignedResult);
1385 Result = CGF.Builder.CreateTrunc(Result, ResTy);
1387 assert(Overflow && Result && "Missing overflow or result");
1390 ResultArg->getType()->getPointeeType().isVolatileQualified();
1391 CGF.Builder.CreateStore(CGF.EmitToMemory(Result, ResultQTy), ResultPtr,
1393 return RValue::get(Overflow);
1396 static llvm::Value *dumpRecord(CodeGenFunction &CGF, QualType RType,
1397 Value *&RecordPtr, CharUnits Align,
1398 llvm::FunctionCallee Func, int Lvl) {
1399 ASTContext &Context = CGF.getContext();
1400 RecordDecl *RD = RType->castAs<RecordType>()->getDecl()->getDefinition();
1401 std::string Pad = std::string(Lvl * 4, ' ');
1404 CGF.Builder.CreateGlobalStringPtr(RType.getAsString() + " {\n");
1405 Value *Res = CGF.Builder.CreateCall(Func, {GString});
1407 static llvm::DenseMap<QualType, const char *> Types;
1408 if (Types.empty()) {
1409 Types[Context.CharTy] = "%c";
1410 Types[Context.BoolTy] = "%d";
1411 Types[Context.SignedCharTy] = "%hhd";
1412 Types[Context.UnsignedCharTy] = "%hhu";
1413 Types[Context.IntTy] = "%d";
1414 Types[Context.UnsignedIntTy] = "%u";
1415 Types[Context.LongTy] = "%ld";
1416 Types[Context.UnsignedLongTy] = "%lu";
1417 Types[Context.LongLongTy] = "%lld";
1418 Types[Context.UnsignedLongLongTy] = "%llu";
1419 Types[Context.ShortTy] = "%hd";
1420 Types[Context.UnsignedShortTy] = "%hu";
1421 Types[Context.VoidPtrTy] = "%p";
1422 Types[Context.FloatTy] = "%f";
1423 Types[Context.DoubleTy] = "%f";
1424 Types[Context.LongDoubleTy] = "%Lf";
1425 Types[Context.getPointerType(Context.CharTy)] = "%s";
1426 Types[Context.getPointerType(Context.getConstType(Context.CharTy))] = "%s";
1429 for (const auto *FD : RD->fields()) {
1430 Value *FieldPtr = RecordPtr;
1432 FieldPtr = CGF.Builder.CreatePointerCast(
1433 FieldPtr, CGF.ConvertType(Context.getPointerType(FD->getType())));
1435 FieldPtr = CGF.Builder.CreateStructGEP(CGF.ConvertType(RType), FieldPtr,
1436 FD->getFieldIndex());
1438 GString = CGF.Builder.CreateGlobalStringPtr(
1440 .concat(FD->getType().getAsString())
1441 .concat(llvm::Twine(' '))
1442 .concat(FD->getNameAsString())
1445 Value *TmpRes = CGF.Builder.CreateCall(Func, {GString});
1446 Res = CGF.Builder.CreateAdd(Res, TmpRes);
1448 QualType CanonicalType =
1449 FD->getType().getUnqualifiedType().getCanonicalType();
1451 // We check whether we are in a recursive type
1452 if (CanonicalType->isRecordType()) {
1454 dumpRecord(CGF, CanonicalType, FieldPtr, Align, Func, Lvl + 1);
1455 Res = CGF.Builder.CreateAdd(TmpRes, Res);
1459 // We try to determine the best format to print the current field
1460 llvm::Twine Format = Types.find(CanonicalType) == Types.end()
1461 ? Types[Context.VoidPtrTy]
1462 : Types[CanonicalType];
1464 Address FieldAddress = Address(FieldPtr, Align);
1465 FieldPtr = CGF.Builder.CreateLoad(FieldAddress);
1467 // FIXME Need to handle bitfield here
1468 GString = CGF.Builder.CreateGlobalStringPtr(
1469 Format.concat(llvm::Twine('\n')).str());
1470 TmpRes = CGF.Builder.CreateCall(Func, {GString, FieldPtr});
1471 Res = CGF.Builder.CreateAdd(Res, TmpRes);
1474 GString = CGF.Builder.CreateGlobalStringPtr(Pad + "}\n");
1475 Value *TmpRes = CGF.Builder.CreateCall(Func, {GString});
1476 Res = CGF.Builder.CreateAdd(Res, TmpRes);
1481 TypeRequiresBuiltinLaunderImp(const ASTContext &Ctx, QualType Ty,
1482 llvm::SmallPtrSetImpl<const Decl *> &Seen) {
1483 if (const auto *Arr = Ctx.getAsArrayType(Ty))
1484 Ty = Ctx.getBaseElementType(Arr);
1486 const auto *Record = Ty->getAsCXXRecordDecl();
1490 // We've already checked this type, or are in the process of checking it.
1491 if (!Seen.insert(Record).second)
1494 assert(Record->hasDefinition() &&
1495 "Incomplete types should already be diagnosed");
1497 if (Record->isDynamicClass())
1500 for (FieldDecl *F : Record->fields()) {
1501 if (TypeRequiresBuiltinLaunderImp(Ctx, F->getType(), Seen))
1507 /// Determine if the specified type requires laundering by checking if it is a
1508 /// dynamic class type or contains a subobject which is a dynamic class type.
1509 static bool TypeRequiresBuiltinLaunder(CodeGenModule &CGM, QualType Ty) {
1510 if (!CGM.getCodeGenOpts().StrictVTablePointers)
1512 llvm::SmallPtrSet<const Decl *, 16> Seen;
1513 return TypeRequiresBuiltinLaunderImp(CGM.getContext(), Ty, Seen);
1516 RValue CodeGenFunction::emitRotate(const CallExpr *E, bool IsRotateRight) {
1517 llvm::Value *Src = EmitScalarExpr(E->getArg(0));
1518 llvm::Value *ShiftAmt = EmitScalarExpr(E->getArg(1));
1520 // The builtin's shift arg may have a different type than the source arg and
1521 // result, but the LLVM intrinsic uses the same type for all values.
1522 llvm::Type *Ty = Src->getType();
1523 ShiftAmt = Builder.CreateIntCast(ShiftAmt, Ty, false);
1525 // Rotate is a special case of LLVM funnel shift - 1st 2 args are the same.
1526 unsigned IID = IsRotateRight ? Intrinsic::fshr : Intrinsic::fshl;
1527 Function *F = CGM.getIntrinsic(IID, Ty);
1528 return RValue::get(Builder.CreateCall(F, { Src, Src, ShiftAmt }));
1531 RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
1533 ReturnValueSlot ReturnValue) {
1534 const FunctionDecl *FD = GD.getDecl()->getAsFunction();
1535 // See if we can constant fold this builtin. If so, don't emit it at all.
1536 Expr::EvalResult Result;
1537 if (E->EvaluateAsRValue(Result, CGM.getContext()) &&
1538 !Result.hasSideEffects()) {
1539 if (Result.Val.isInt())
1540 return RValue::get(llvm::ConstantInt::get(getLLVMContext(),
1541 Result.Val.getInt()));
1542 if (Result.Val.isFloat())
1543 return RValue::get(llvm::ConstantFP::get(getLLVMContext(),
1544 Result.Val.getFloat()));
1547 // There are LLVM math intrinsics/instructions corresponding to math library
1548 // functions except the LLVM op will never set errno while the math library
1549 // might. Also, math builtins have the same semantics as their math library
1550 // twins. Thus, we can transform math library and builtin calls to their
1551 // LLVM counterparts if the call is marked 'const' (known to never set errno).
1552 if (FD->hasAttr<ConstAttr>()) {
1553 switch (BuiltinID) {
1554 case Builtin::BIceil:
1555 case Builtin::BIceilf:
1556 case Builtin::BIceill:
1557 case Builtin::BI__builtin_ceil:
1558 case Builtin::BI__builtin_ceilf:
1559 case Builtin::BI__builtin_ceilf16:
1560 case Builtin::BI__builtin_ceill:
1561 return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::ceil));
1563 case Builtin::BIcopysign:
1564 case Builtin::BIcopysignf:
1565 case Builtin::BIcopysignl:
1566 case Builtin::BI__builtin_copysign:
1567 case Builtin::BI__builtin_copysignf:
1568 case Builtin::BI__builtin_copysignf16:
1569 case Builtin::BI__builtin_copysignl:
1570 case Builtin::BI__builtin_copysignf128:
1571 return RValue::get(emitBinaryBuiltin(*this, E, Intrinsic::copysign));
1573 case Builtin::BIcos:
1574 case Builtin::BIcosf:
1575 case Builtin::BIcosl:
1576 case Builtin::BI__builtin_cos:
1577 case Builtin::BI__builtin_cosf:
1578 case Builtin::BI__builtin_cosf16:
1579 case Builtin::BI__builtin_cosl:
1580 return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::cos));
1582 case Builtin::BIexp:
1583 case Builtin::BIexpf:
1584 case Builtin::BIexpl:
1585 case Builtin::BI__builtin_exp:
1586 case Builtin::BI__builtin_expf:
1587 case Builtin::BI__builtin_expf16:
1588 case Builtin::BI__builtin_expl:
1589 return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::exp));
1591 case Builtin::BIexp2:
1592 case Builtin::BIexp2f:
1593 case Builtin::BIexp2l:
1594 case Builtin::BI__builtin_exp2:
1595 case Builtin::BI__builtin_exp2f:
1596 case Builtin::BI__builtin_exp2f16:
1597 case Builtin::BI__builtin_exp2l:
1598 return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::exp2));
1600 case Builtin::BIfabs:
1601 case Builtin::BIfabsf:
1602 case Builtin::BIfabsl:
1603 case Builtin::BI__builtin_fabs:
1604 case Builtin::BI__builtin_fabsf:
1605 case Builtin::BI__builtin_fabsf16:
1606 case Builtin::BI__builtin_fabsl:
1607 case Builtin::BI__builtin_fabsf128:
1608 return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::fabs));
1610 case Builtin::BIfloor:
1611 case Builtin::BIfloorf:
1612 case Builtin::BIfloorl:
1613 case Builtin::BI__builtin_floor:
1614 case Builtin::BI__builtin_floorf:
1615 case Builtin::BI__builtin_floorf16:
1616 case Builtin::BI__builtin_floorl:
1617 return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::floor));
1619 case Builtin::BIfma:
1620 case Builtin::BIfmaf:
1621 case Builtin::BIfmal:
1622 case Builtin::BI__builtin_fma:
1623 case Builtin::BI__builtin_fmaf:
1624 case Builtin::BI__builtin_fmaf16:
1625 case Builtin::BI__builtin_fmal:
1626 return RValue::get(emitTernaryBuiltin(*this, E, Intrinsic::fma));
1628 case Builtin::BIfmax:
1629 case Builtin::BIfmaxf:
1630 case Builtin::BIfmaxl:
1631 case Builtin::BI__builtin_fmax:
1632 case Builtin::BI__builtin_fmaxf:
1633 case Builtin::BI__builtin_fmaxf16:
1634 case Builtin::BI__builtin_fmaxl:
1635 return RValue::get(emitBinaryBuiltin(*this, E, Intrinsic::maxnum));
1637 case Builtin::BIfmin:
1638 case Builtin::BIfminf:
1639 case Builtin::BIfminl:
1640 case Builtin::BI__builtin_fmin:
1641 case Builtin::BI__builtin_fminf:
1642 case Builtin::BI__builtin_fminf16:
1643 case Builtin::BI__builtin_fminl:
1644 return RValue::get(emitBinaryBuiltin(*this, E, Intrinsic::minnum));
1646 // fmod() is a special-case. It maps to the frem instruction rather than an
1648 case Builtin::BIfmod:
1649 case Builtin::BIfmodf:
1650 case Builtin::BIfmodl:
1651 case Builtin::BI__builtin_fmod:
1652 case Builtin::BI__builtin_fmodf:
1653 case Builtin::BI__builtin_fmodf16:
1654 case Builtin::BI__builtin_fmodl: {
1655 Value *Arg1 = EmitScalarExpr(E->getArg(0));
1656 Value *Arg2 = EmitScalarExpr(E->getArg(1));
1657 return RValue::get(Builder.CreateFRem(Arg1, Arg2, "fmod"));
1660 case Builtin::BIlog:
1661 case Builtin::BIlogf:
1662 case Builtin::BIlogl:
1663 case Builtin::BI__builtin_log:
1664 case Builtin::BI__builtin_logf:
1665 case Builtin::BI__builtin_logf16:
1666 case Builtin::BI__builtin_logl:
1667 return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::log));
1669 case Builtin::BIlog10:
1670 case Builtin::BIlog10f:
1671 case Builtin::BIlog10l:
1672 case Builtin::BI__builtin_log10:
1673 case Builtin::BI__builtin_log10f:
1674 case Builtin::BI__builtin_log10f16:
1675 case Builtin::BI__builtin_log10l:
1676 return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::log10));
1678 case Builtin::BIlog2:
1679 case Builtin::BIlog2f:
1680 case Builtin::BIlog2l:
1681 case Builtin::BI__builtin_log2:
1682 case Builtin::BI__builtin_log2f:
1683 case Builtin::BI__builtin_log2f16:
1684 case Builtin::BI__builtin_log2l:
1685 return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::log2));
1687 case Builtin::BInearbyint:
1688 case Builtin::BInearbyintf:
1689 case Builtin::BInearbyintl:
1690 case Builtin::BI__builtin_nearbyint:
1691 case Builtin::BI__builtin_nearbyintf:
1692 case Builtin::BI__builtin_nearbyintl:
1693 return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::nearbyint));
1695 case Builtin::BIpow:
1696 case Builtin::BIpowf:
1697 case Builtin::BIpowl:
1698 case Builtin::BI__builtin_pow:
1699 case Builtin::BI__builtin_powf:
1700 case Builtin::BI__builtin_powf16:
1701 case Builtin::BI__builtin_powl:
1702 return RValue::get(emitBinaryBuiltin(*this, E, Intrinsic::pow));
1704 case Builtin::BIrint:
1705 case Builtin::BIrintf:
1706 case Builtin::BIrintl:
1707 case Builtin::BI__builtin_rint:
1708 case Builtin::BI__builtin_rintf:
1709 case Builtin::BI__builtin_rintf16:
1710 case Builtin::BI__builtin_rintl:
1711 return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::rint));
1713 case Builtin::BIround:
1714 case Builtin::BIroundf:
1715 case Builtin::BIroundl:
1716 case Builtin::BI__builtin_round:
1717 case Builtin::BI__builtin_roundf:
1718 case Builtin::BI__builtin_roundf16:
1719 case Builtin::BI__builtin_roundl:
1720 return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::round));
1722 case Builtin::BIsin:
1723 case Builtin::BIsinf:
1724 case Builtin::BIsinl:
1725 case Builtin::BI__builtin_sin:
1726 case Builtin::BI__builtin_sinf:
1727 case Builtin::BI__builtin_sinf16:
1728 case Builtin::BI__builtin_sinl:
1729 return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::sin));
1731 case Builtin::BIsqrt:
1732 case Builtin::BIsqrtf:
1733 case Builtin::BIsqrtl:
1734 case Builtin::BI__builtin_sqrt:
1735 case Builtin::BI__builtin_sqrtf:
1736 case Builtin::BI__builtin_sqrtf16:
1737 case Builtin::BI__builtin_sqrtl:
1738 return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::sqrt));
1740 case Builtin::BItrunc:
1741 case Builtin::BItruncf:
1742 case Builtin::BItruncl:
1743 case Builtin::BI__builtin_trunc:
1744 case Builtin::BI__builtin_truncf:
1745 case Builtin::BI__builtin_truncf16:
1746 case Builtin::BI__builtin_truncl:
1747 return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::trunc));
1749 case Builtin::BIlround:
1750 case Builtin::BIlroundf:
1751 case Builtin::BIlroundl:
1752 case Builtin::BI__builtin_lround:
1753 case Builtin::BI__builtin_lroundf:
1754 case Builtin::BI__builtin_lroundl:
1755 return RValue::get(emitFPToIntRoundBuiltin(*this, E, Intrinsic::lround));
1757 case Builtin::BIllround:
1758 case Builtin::BIllroundf:
1759 case Builtin::BIllroundl:
1760 case Builtin::BI__builtin_llround:
1761 case Builtin::BI__builtin_llroundf:
1762 case Builtin::BI__builtin_llroundl:
1763 return RValue::get(emitFPToIntRoundBuiltin(*this, E, Intrinsic::llround));
1765 case Builtin::BIlrint:
1766 case Builtin::BIlrintf:
1767 case Builtin::BIlrintl:
1768 case Builtin::BI__builtin_lrint:
1769 case Builtin::BI__builtin_lrintf:
1770 case Builtin::BI__builtin_lrintl:
1771 return RValue::get(emitFPToIntRoundBuiltin(*this, E, Intrinsic::lrint));
1773 case Builtin::BIllrint:
1774 case Builtin::BIllrintf:
1775 case Builtin::BIllrintl:
1776 case Builtin::BI__builtin_llrint:
1777 case Builtin::BI__builtin_llrintf:
1778 case Builtin::BI__builtin_llrintl:
1779 return RValue::get(emitFPToIntRoundBuiltin(*this, E, Intrinsic::llrint));
1786 switch (BuiltinID) {
1788 case Builtin::BI__builtin___CFStringMakeConstantString:
1789 case Builtin::BI__builtin___NSStringMakeConstantString:
1790 return RValue::get(ConstantEmitter(*this).emitAbstract(E, E->getType()));
1791 case Builtin::BI__builtin_stdarg_start:
1792 case Builtin::BI__builtin_va_start:
1793 case Builtin::BI__va_start:
1794 case Builtin::BI__builtin_va_end:
1796 EmitVAStartEnd(BuiltinID == Builtin::BI__va_start
1797 ? EmitScalarExpr(E->getArg(0))
1798 : EmitVAListRef(E->getArg(0)).getPointer(),
1799 BuiltinID != Builtin::BI__builtin_va_end));
1800 case Builtin::BI__builtin_va_copy: {
1801 Value *DstPtr = EmitVAListRef(E->getArg(0)).getPointer();
1802 Value *SrcPtr = EmitVAListRef(E->getArg(1)).getPointer();
1804 llvm::Type *Type = Int8PtrTy;
1806 DstPtr = Builder.CreateBitCast(DstPtr, Type);
1807 SrcPtr = Builder.CreateBitCast(SrcPtr, Type);
1808 return RValue::get(Builder.CreateCall(CGM.getIntrinsic(Intrinsic::vacopy),
1811 case Builtin::BI__builtin_abs:
1812 case Builtin::BI__builtin_labs:
1813 case Builtin::BI__builtin_llabs: {
1815 // The negation has 'nsw' because abs of INT_MIN is undefined.
1816 Value *ArgValue = EmitScalarExpr(E->getArg(0));
1817 Value *NegOp = Builder.CreateNSWNeg(ArgValue, "neg");
1818 Constant *Zero = llvm::Constant::getNullValue(ArgValue->getType());
1819 Value *CmpResult = Builder.CreateICmpSLT(ArgValue, Zero, "abscond");
1820 Value *Result = Builder.CreateSelect(CmpResult, NegOp, ArgValue, "abs");
1821 return RValue::get(Result);
1823 case Builtin::BI__builtin_conj:
1824 case Builtin::BI__builtin_conjf:
1825 case Builtin::BI__builtin_conjl: {
1826 ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0));
1827 Value *Real = ComplexVal.first;
1828 Value *Imag = ComplexVal.second;
1830 Imag->getType()->isFPOrFPVectorTy()
1831 ? llvm::ConstantFP::getZeroValueForNegation(Imag->getType())
1832 : llvm::Constant::getNullValue(Imag->getType());
1834 Imag = Builder.CreateFSub(Zero, Imag, "sub");
1835 return RValue::getComplex(std::make_pair(Real, Imag));
1837 case Builtin::BI__builtin_creal:
1838 case Builtin::BI__builtin_crealf:
1839 case Builtin::BI__builtin_creall:
1840 case Builtin::BIcreal:
1841 case Builtin::BIcrealf:
1842 case Builtin::BIcreall: {
1843 ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0));
1844 return RValue::get(ComplexVal.first);
1847 case Builtin::BI__builtin_dump_struct: {
1848 llvm::Type *LLVMIntTy = getTypes().ConvertType(getContext().IntTy);
1849 llvm::FunctionType *LLVMFuncType = llvm::FunctionType::get(
1850 LLVMIntTy, {llvm::Type::getInt8PtrTy(getLLVMContext())}, true);
1852 Value *Func = EmitScalarExpr(E->getArg(1)->IgnoreImpCasts());
1853 CharUnits Arg0Align = EmitPointerWithAlignment(E->getArg(0)).getAlignment();
1855 const Expr *Arg0 = E->getArg(0)->IgnoreImpCasts();
1856 QualType Arg0Type = Arg0->getType()->getPointeeType();
1858 Value *RecordPtr = EmitScalarExpr(Arg0);
1859 Value *Res = dumpRecord(*this, Arg0Type, RecordPtr, Arg0Align,
1860 {LLVMFuncType, Func}, 0);
1861 return RValue::get(Res);
1864 case Builtin::BI__builtin_preserve_access_index: {
1865 // Only enabled preserved access index region when debuginfo
1866 // is available as debuginfo is needed to preserve user-level
1868 if (!getDebugInfo()) {
1869 CGM.Error(E->getExprLoc(), "using builtin_preserve_access_index() without -g");
1870 return RValue::get(EmitScalarExpr(E->getArg(0)));
1873 // Nested builtin_preserve_access_index() not supported
1874 if (IsInPreservedAIRegion) {
1875 CGM.Error(E->getExprLoc(), "nested builtin_preserve_access_index() not supported");
1876 return RValue::get(EmitScalarExpr(E->getArg(0)));
1879 IsInPreservedAIRegion = true;
1880 Value *Res = EmitScalarExpr(E->getArg(0));
1881 IsInPreservedAIRegion = false;
1882 return RValue::get(Res);
1885 case Builtin::BI__builtin_cimag:
1886 case Builtin::BI__builtin_cimagf:
1887 case Builtin::BI__builtin_cimagl:
1888 case Builtin::BIcimag:
1889 case Builtin::BIcimagf:
1890 case Builtin::BIcimagl: {
1891 ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0));
1892 return RValue::get(ComplexVal.second);
1895 case Builtin::BI__builtin_clrsb:
1896 case Builtin::BI__builtin_clrsbl:
1897 case Builtin::BI__builtin_clrsbll: {
1898 // clrsb(x) -> clz(x < 0 ? ~x : x) - 1 or
1899 Value *ArgValue = EmitScalarExpr(E->getArg(0));
1901 llvm::Type *ArgType = ArgValue->getType();
1902 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType);
1904 llvm::Type *ResultType = ConvertType(E->getType());
1905 Value *Zero = llvm::Constant::getNullValue(ArgType);
1906 Value *IsNeg = Builder.CreateICmpSLT(ArgValue, Zero, "isneg");
1907 Value *Inverse = Builder.CreateNot(ArgValue, "not");
1908 Value *Tmp = Builder.CreateSelect(IsNeg, Inverse, ArgValue);
1909 Value *Ctlz = Builder.CreateCall(F, {Tmp, Builder.getFalse()});
1910 Value *Result = Builder.CreateSub(Ctlz, llvm::ConstantInt::get(ArgType, 1));
1911 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
1913 return RValue::get(Result);
1915 case Builtin::BI__builtin_ctzs:
1916 case Builtin::BI__builtin_ctz:
1917 case Builtin::BI__builtin_ctzl:
1918 case Builtin::BI__builtin_ctzll: {
1919 Value *ArgValue = EmitCheckedArgForBuiltin(E->getArg(0), BCK_CTZPassedZero);
1921 llvm::Type *ArgType = ArgValue->getType();
1922 Function *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType);
1924 llvm::Type *ResultType = ConvertType(E->getType());
1925 Value *ZeroUndef = Builder.getInt1(getTarget().isCLZForZeroUndef());
1926 Value *Result = Builder.CreateCall(F, {ArgValue, ZeroUndef});
1927 if (Result->getType() != ResultType)
1928 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
1930 return RValue::get(Result);
1932 case Builtin::BI__builtin_clzs:
1933 case Builtin::BI__builtin_clz:
1934 case Builtin::BI__builtin_clzl:
1935 case Builtin::BI__builtin_clzll: {
1936 Value *ArgValue = EmitCheckedArgForBuiltin(E->getArg(0), BCK_CLZPassedZero);
1938 llvm::Type *ArgType = ArgValue->getType();
1939 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType);
1941 llvm::Type *ResultType = ConvertType(E->getType());
1942 Value *ZeroUndef = Builder.getInt1(getTarget().isCLZForZeroUndef());
1943 Value *Result = Builder.CreateCall(F, {ArgValue, ZeroUndef});
1944 if (Result->getType() != ResultType)
1945 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
1947 return RValue::get(Result);
1949 case Builtin::BI__builtin_ffs:
1950 case Builtin::BI__builtin_ffsl:
1951 case Builtin::BI__builtin_ffsll: {
1952 // ffs(x) -> x ? cttz(x) + 1 : 0
1953 Value *ArgValue = EmitScalarExpr(E->getArg(0));
1955 llvm::Type *ArgType = ArgValue->getType();
1956 Function *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType);
1958 llvm::Type *ResultType = ConvertType(E->getType());
1960 Builder.CreateAdd(Builder.CreateCall(F, {ArgValue, Builder.getTrue()}),
1961 llvm::ConstantInt::get(ArgType, 1));
1962 Value *Zero = llvm::Constant::getNullValue(ArgType);
1963 Value *IsZero = Builder.CreateICmpEQ(ArgValue, Zero, "iszero");
1964 Value *Result = Builder.CreateSelect(IsZero, Zero, Tmp, "ffs");
1965 if (Result->getType() != ResultType)
1966 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
1968 return RValue::get(Result);
1970 case Builtin::BI__builtin_parity:
1971 case Builtin::BI__builtin_parityl:
1972 case Builtin::BI__builtin_parityll: {
1973 // parity(x) -> ctpop(x) & 1
1974 Value *ArgValue = EmitScalarExpr(E->getArg(0));
1976 llvm::Type *ArgType = ArgValue->getType();
1977 Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ArgType);
1979 llvm::Type *ResultType = ConvertType(E->getType());
1980 Value *Tmp = Builder.CreateCall(F, ArgValue);
1981 Value *Result = Builder.CreateAnd(Tmp, llvm::ConstantInt::get(ArgType, 1));
1982 if (Result->getType() != ResultType)
1983 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
1985 return RValue::get(Result);
1987 case Builtin::BI__lzcnt16:
1988 case Builtin::BI__lzcnt:
1989 case Builtin::BI__lzcnt64: {
1990 Value *ArgValue = EmitScalarExpr(E->getArg(0));
1992 llvm::Type *ArgType = ArgValue->getType();
1993 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType);
1995 llvm::Type *ResultType = ConvertType(E->getType());
1996 Value *Result = Builder.CreateCall(F, {ArgValue, Builder.getFalse()});
1997 if (Result->getType() != ResultType)
1998 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
2000 return RValue::get(Result);
2002 case Builtin::BI__popcnt16:
2003 case Builtin::BI__popcnt:
2004 case Builtin::BI__popcnt64:
2005 case Builtin::BI__builtin_popcount:
2006 case Builtin::BI__builtin_popcountl:
2007 case Builtin::BI__builtin_popcountll: {
2008 Value *ArgValue = EmitScalarExpr(E->getArg(0));
2010 llvm::Type *ArgType = ArgValue->getType();
2011 Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ArgType);
2013 llvm::Type *ResultType = ConvertType(E->getType());
2014 Value *Result = Builder.CreateCall(F, ArgValue);
2015 if (Result->getType() != ResultType)
2016 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
2018 return RValue::get(Result);
2020 case Builtin::BI__builtin_unpredictable: {
2021 // Always return the argument of __builtin_unpredictable. LLVM does not
2022 // handle this builtin. Metadata for this builtin should be added directly
2023 // to instructions such as branches or switches that use it.
2024 return RValue::get(EmitScalarExpr(E->getArg(0)));
2026 case Builtin::BI__builtin_expect: {
2027 Value *ArgValue = EmitScalarExpr(E->getArg(0));
2028 llvm::Type *ArgType = ArgValue->getType();
2030 Value *ExpectedValue = EmitScalarExpr(E->getArg(1));
2031 // Don't generate llvm.expect on -O0 as the backend won't use it for
2033 // Note, we still IRGen ExpectedValue because it could have side-effects.
2034 if (CGM.getCodeGenOpts().OptimizationLevel == 0)
2035 return RValue::get(ArgValue);
2037 Function *FnExpect = CGM.getIntrinsic(Intrinsic::expect, ArgType);
2039 Builder.CreateCall(FnExpect, {ArgValue, ExpectedValue}, "expval");
2040 return RValue::get(Result);
2042 case Builtin::BI__builtin_assume_aligned: {
2043 const Expr *Ptr = E->getArg(0);
2044 Value *PtrValue = EmitScalarExpr(Ptr);
2045 Value *OffsetValue =
2046 (E->getNumArgs() > 2) ? EmitScalarExpr(E->getArg(2)) : nullptr;
2048 Value *AlignmentValue = EmitScalarExpr(E->getArg(1));
2049 ConstantInt *AlignmentCI = cast<ConstantInt>(AlignmentValue);
2050 if (AlignmentCI->getValue().ugt(llvm::Value::MaximumAlignment))
2051 AlignmentCI = ConstantInt::get(AlignmentCI->getType(),
2052 llvm::Value::MaximumAlignment);
2054 EmitAlignmentAssumption(PtrValue, Ptr,
2055 /*The expr loc is sufficient.*/ SourceLocation(),
2056 AlignmentCI, OffsetValue);
2057 return RValue::get(PtrValue);
2059 case Builtin::BI__assume:
2060 case Builtin::BI__builtin_assume: {
2061 if (E->getArg(0)->HasSideEffects(getContext()))
2062 return RValue::get(nullptr);
2064 Value *ArgValue = EmitScalarExpr(E->getArg(0));
2065 Function *FnAssume = CGM.getIntrinsic(Intrinsic::assume);
2066 return RValue::get(Builder.CreateCall(FnAssume, ArgValue));
2068 case Builtin::BI__builtin_bswap16:
2069 case Builtin::BI__builtin_bswap32:
2070 case Builtin::BI__builtin_bswap64: {
2071 return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::bswap));
2073 case Builtin::BI__builtin_bitreverse8:
2074 case Builtin::BI__builtin_bitreverse16:
2075 case Builtin::BI__builtin_bitreverse32:
2076 case Builtin::BI__builtin_bitreverse64: {
2077 return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::bitreverse));
2079 case Builtin::BI__builtin_rotateleft8:
2080 case Builtin::BI__builtin_rotateleft16:
2081 case Builtin::BI__builtin_rotateleft32:
2082 case Builtin::BI__builtin_rotateleft64:
2083 case Builtin::BI_rotl8: // Microsoft variants of rotate left
2084 case Builtin::BI_rotl16:
2085 case Builtin::BI_rotl:
2086 case Builtin::BI_lrotl:
2087 case Builtin::BI_rotl64:
2088 return emitRotate(E, false);
2090 case Builtin::BI__builtin_rotateright8:
2091 case Builtin::BI__builtin_rotateright16:
2092 case Builtin::BI__builtin_rotateright32:
2093 case Builtin::BI__builtin_rotateright64:
2094 case Builtin::BI_rotr8: // Microsoft variants of rotate right
2095 case Builtin::BI_rotr16:
2096 case Builtin::BI_rotr:
2097 case Builtin::BI_lrotr:
2098 case Builtin::BI_rotr64:
2099 return emitRotate(E, true);
2101 case Builtin::BI__builtin_constant_p: {
2102 llvm::Type *ResultType = ConvertType(E->getType());
2104 const Expr *Arg = E->getArg(0);
2105 QualType ArgType = Arg->getType();
2106 // FIXME: The allowance for Obj-C pointers and block pointers is historical
2107 // and likely a mistake.
2108 if (!ArgType->isIntegralOrEnumerationType() && !ArgType->isFloatingType() &&
2109 !ArgType->isObjCObjectPointerType() && !ArgType->isBlockPointerType())
2110 // Per the GCC documentation, only numeric constants are recognized after
2112 return RValue::get(ConstantInt::get(ResultType, 0));
2114 if (Arg->HasSideEffects(getContext()))
2115 // The argument is unevaluated, so be conservative if it might have
2117 return RValue::get(ConstantInt::get(ResultType, 0));
2119 Value *ArgValue = EmitScalarExpr(Arg);
2120 if (ArgType->isObjCObjectPointerType()) {
2121 // Convert Objective-C objects to id because we cannot distinguish between
2122 // LLVM types for Obj-C classes as they are opaque.
2123 ArgType = CGM.getContext().getObjCIdType();
2124 ArgValue = Builder.CreateBitCast(ArgValue, ConvertType(ArgType));
2127 CGM.getIntrinsic(Intrinsic::is_constant, ConvertType(ArgType));
2128 Value *Result = Builder.CreateCall(F, ArgValue);
2129 if (Result->getType() != ResultType)
2130 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/false);
2131 return RValue::get(Result);
2133 case Builtin::BI__builtin_dynamic_object_size:
2134 case Builtin::BI__builtin_object_size: {
2136 E->getArg(1)->EvaluateKnownConstInt(getContext()).getZExtValue();
2137 auto *ResType = cast<llvm::IntegerType>(ConvertType(E->getType()));
2139 // We pass this builtin onto the optimizer so that it can figure out the
2140 // object size in more complex cases.
2141 bool IsDynamic = BuiltinID == Builtin::BI__builtin_dynamic_object_size;
2142 return RValue::get(emitBuiltinObjectSize(E->getArg(0), Type, ResType,
2143 /*EmittedE=*/nullptr, IsDynamic));
2145 case Builtin::BI__builtin_prefetch: {
2146 Value *Locality, *RW, *Address = EmitScalarExpr(E->getArg(0));
2147 // FIXME: Technically these constants should of type 'int', yes?
2148 RW = (E->getNumArgs() > 1) ? EmitScalarExpr(E->getArg(1)) :
2149 llvm::ConstantInt::get(Int32Ty, 0);
2150 Locality = (E->getNumArgs() > 2) ? EmitScalarExpr(E->getArg(2)) :
2151 llvm::ConstantInt::get(Int32Ty, 3);
2152 Value *Data = llvm::ConstantInt::get(Int32Ty, 1);
2153 Function *F = CGM.getIntrinsic(Intrinsic::prefetch, Address->getType());
2154 return RValue::get(Builder.CreateCall(F, {Address, RW, Locality, Data}));
2156 case Builtin::BI__builtin_readcyclecounter: {
2157 Function *F = CGM.getIntrinsic(Intrinsic::readcyclecounter);
2158 return RValue::get(Builder.CreateCall(F));
2160 case Builtin::BI__builtin___clear_cache: {
2161 Value *Begin = EmitScalarExpr(E->getArg(0));
2162 Value *End = EmitScalarExpr(E->getArg(1));
2163 Function *F = CGM.getIntrinsic(Intrinsic::clear_cache);
2164 return RValue::get(Builder.CreateCall(F, {Begin, End}));
2166 case Builtin::BI__builtin_trap:
2167 return RValue::get(EmitTrapCall(Intrinsic::trap));
2168 case Builtin::BI__debugbreak:
2169 return RValue::get(EmitTrapCall(Intrinsic::debugtrap));
2170 case Builtin::BI__builtin_unreachable: {
2171 EmitUnreachable(E->getExprLoc());
2173 // We do need to preserve an insertion point.
2174 EmitBlock(createBasicBlock("unreachable.cont"));
2176 return RValue::get(nullptr);
2179 case Builtin::BI__builtin_powi:
2180 case Builtin::BI__builtin_powif:
2181 case Builtin::BI__builtin_powil: {
2182 Value *Base = EmitScalarExpr(E->getArg(0));
2183 Value *Exponent = EmitScalarExpr(E->getArg(1));
2184 llvm::Type *ArgType = Base->getType();
2185 Function *F = CGM.getIntrinsic(Intrinsic::powi, ArgType);
2186 return RValue::get(Builder.CreateCall(F, {Base, Exponent}));
2189 case Builtin::BI__builtin_isgreater:
2190 case Builtin::BI__builtin_isgreaterequal:
2191 case Builtin::BI__builtin_isless:
2192 case Builtin::BI__builtin_islessequal:
2193 case Builtin::BI__builtin_islessgreater:
2194 case Builtin::BI__builtin_isunordered: {
2195 // Ordered comparisons: we know the arguments to these are matching scalar
2196 // floating point values.
2197 Value *LHS = EmitScalarExpr(E->getArg(0));
2198 Value *RHS = EmitScalarExpr(E->getArg(1));
2200 switch (BuiltinID) {
2201 default: llvm_unreachable("Unknown ordered comparison");
2202 case Builtin::BI__builtin_isgreater:
2203 LHS = Builder.CreateFCmpOGT(LHS, RHS, "cmp");
2205 case Builtin::BI__builtin_isgreaterequal:
2206 LHS = Builder.CreateFCmpOGE(LHS, RHS, "cmp");
2208 case Builtin::BI__builtin_isless:
2209 LHS = Builder.CreateFCmpOLT(LHS, RHS, "cmp");
2211 case Builtin::BI__builtin_islessequal:
2212 LHS = Builder.CreateFCmpOLE(LHS, RHS, "cmp");
2214 case Builtin::BI__builtin_islessgreater:
2215 LHS = Builder.CreateFCmpONE(LHS, RHS, "cmp");
2217 case Builtin::BI__builtin_isunordered:
2218 LHS = Builder.CreateFCmpUNO(LHS, RHS, "cmp");
2221 // ZExt bool to int type.
2222 return RValue::get(Builder.CreateZExt(LHS, ConvertType(E->getType())));
2224 case Builtin::BI__builtin_isnan: {
2225 Value *V = EmitScalarExpr(E->getArg(0));
2226 V = Builder.CreateFCmpUNO(V, V, "cmp");
2227 return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType())));
2230 case Builtin::BIfinite:
2231 case Builtin::BI__finite:
2232 case Builtin::BIfinitef:
2233 case Builtin::BI__finitef:
2234 case Builtin::BIfinitel:
2235 case Builtin::BI__finitel:
2236 case Builtin::BI__builtin_isinf:
2237 case Builtin::BI__builtin_isfinite: {
2238 // isinf(x) --> fabs(x) == infinity
2239 // isfinite(x) --> fabs(x) != infinity
2240 // x != NaN via the ordered compare in either case.
2241 Value *V = EmitScalarExpr(E->getArg(0));
2242 Value *Fabs = EmitFAbs(*this, V);
2243 Constant *Infinity = ConstantFP::getInfinity(V->getType());
2244 CmpInst::Predicate Pred = (BuiltinID == Builtin::BI__builtin_isinf)
2246 : CmpInst::FCMP_ONE;
2247 Value *FCmp = Builder.CreateFCmp(Pred, Fabs, Infinity, "cmpinf");
2248 return RValue::get(Builder.CreateZExt(FCmp, ConvertType(E->getType())));
2251 case Builtin::BI__builtin_isinf_sign: {
2252 // isinf_sign(x) -> fabs(x) == infinity ? (signbit(x) ? -1 : 1) : 0
2253 Value *Arg = EmitScalarExpr(E->getArg(0));
2254 Value *AbsArg = EmitFAbs(*this, Arg);
2255 Value *IsInf = Builder.CreateFCmpOEQ(
2256 AbsArg, ConstantFP::getInfinity(Arg->getType()), "isinf");
2257 Value *IsNeg = EmitSignBit(*this, Arg);
2259 llvm::Type *IntTy = ConvertType(E->getType());
2260 Value *Zero = Constant::getNullValue(IntTy);
2261 Value *One = ConstantInt::get(IntTy, 1);
2262 Value *NegativeOne = ConstantInt::get(IntTy, -1);
2263 Value *SignResult = Builder.CreateSelect(IsNeg, NegativeOne, One);
2264 Value *Result = Builder.CreateSelect(IsInf, SignResult, Zero);
2265 return RValue::get(Result);
2268 case Builtin::BI__builtin_isnormal: {
2269 // isnormal(x) --> x == x && fabsf(x) < infinity && fabsf(x) >= float_min
2270 Value *V = EmitScalarExpr(E->getArg(0));
2271 Value *Eq = Builder.CreateFCmpOEQ(V, V, "iseq");
2273 Value *Abs = EmitFAbs(*this, V);
2274 Value *IsLessThanInf =
2275 Builder.CreateFCmpULT(Abs, ConstantFP::getInfinity(V->getType()),"isinf");
2276 APFloat Smallest = APFloat::getSmallestNormalized(
2277 getContext().getFloatTypeSemantics(E->getArg(0)->getType()));
2279 Builder.CreateFCmpUGE(Abs, ConstantFP::get(V->getContext(), Smallest),
2281 V = Builder.CreateAnd(Eq, IsLessThanInf, "and");
2282 V = Builder.CreateAnd(V, IsNormal, "and");
2283 return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType())));
2286 case Builtin::BI__builtin_flt_rounds: {
2287 Function *F = CGM.getIntrinsic(Intrinsic::flt_rounds);
2289 llvm::Type *ResultType = ConvertType(E->getType());
2290 Value *Result = Builder.CreateCall(F);
2291 if (Result->getType() != ResultType)
2292 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
2294 return RValue::get(Result);
2297 case Builtin::BI__builtin_fpclassify: {
2298 Value *V = EmitScalarExpr(E->getArg(5));
2299 llvm::Type *Ty = ConvertType(E->getArg(5)->getType());
2302 BasicBlock *Begin = Builder.GetInsertBlock();
2303 BasicBlock *End = createBasicBlock("fpclassify_end", this->CurFn);
2304 Builder.SetInsertPoint(End);
2306 Builder.CreatePHI(ConvertType(E->getArg(0)->getType()), 4,
2307 "fpclassify_result");
2309 // if (V==0) return FP_ZERO
2310 Builder.SetInsertPoint(Begin);
2311 Value *IsZero = Builder.CreateFCmpOEQ(V, Constant::getNullValue(Ty),
2313 Value *ZeroLiteral = EmitScalarExpr(E->getArg(4));
2314 BasicBlock *NotZero = createBasicBlock("fpclassify_not_zero", this->CurFn);
2315 Builder.CreateCondBr(IsZero, End, NotZero);
2316 Result->addIncoming(ZeroLiteral, Begin);
2318 // if (V != V) return FP_NAN
2319 Builder.SetInsertPoint(NotZero);
2320 Value *IsNan = Builder.CreateFCmpUNO(V, V, "cmp");
2321 Value *NanLiteral = EmitScalarExpr(E->getArg(0));
2322 BasicBlock *NotNan = createBasicBlock("fpclassify_not_nan", this->CurFn);
2323 Builder.CreateCondBr(IsNan, End, NotNan);
2324 Result->addIncoming(NanLiteral, NotZero);
2326 // if (fabs(V) == infinity) return FP_INFINITY
2327 Builder.SetInsertPoint(NotNan);
2328 Value *VAbs = EmitFAbs(*this, V);
2330 Builder.CreateFCmpOEQ(VAbs, ConstantFP::getInfinity(V->getType()),
2332 Value *InfLiteral = EmitScalarExpr(E->getArg(1));
2333 BasicBlock *NotInf = createBasicBlock("fpclassify_not_inf", this->CurFn);
2334 Builder.CreateCondBr(IsInf, End, NotInf);
2335 Result->addIncoming(InfLiteral, NotNan);
2337 // if (fabs(V) >= MIN_NORMAL) return FP_NORMAL else FP_SUBNORMAL
2338 Builder.SetInsertPoint(NotInf);
2339 APFloat Smallest = APFloat::getSmallestNormalized(
2340 getContext().getFloatTypeSemantics(E->getArg(5)->getType()));
2342 Builder.CreateFCmpUGE(VAbs, ConstantFP::get(V->getContext(), Smallest),
2344 Value *NormalResult =
2345 Builder.CreateSelect(IsNormal, EmitScalarExpr(E->getArg(2)),
2346 EmitScalarExpr(E->getArg(3)));
2347 Builder.CreateBr(End);
2348 Result->addIncoming(NormalResult, NotInf);
2351 Builder.SetInsertPoint(End);
2352 return RValue::get(Result);
2355 case Builtin::BIalloca:
2356 case Builtin::BI_alloca:
2357 case Builtin::BI__builtin_alloca: {
2358 Value *Size = EmitScalarExpr(E->getArg(0));
2359 const TargetInfo &TI = getContext().getTargetInfo();
2360 // The alignment of the alloca should correspond to __BIGGEST_ALIGNMENT__.
2361 unsigned SuitableAlignmentInBytes =
2363 .toCharUnitsFromBits(TI.getSuitableAlign())
2365 AllocaInst *AI = Builder.CreateAlloca(Builder.getInt8Ty(), Size);
2366 AI->setAlignment(MaybeAlign(SuitableAlignmentInBytes));
2367 initializeAlloca(*this, AI, Size, SuitableAlignmentInBytes);
2368 return RValue::get(AI);
2371 case Builtin::BI__builtin_alloca_with_align: {
2372 Value *Size = EmitScalarExpr(E->getArg(0));
2373 Value *AlignmentInBitsValue = EmitScalarExpr(E->getArg(1));
2374 auto *AlignmentInBitsCI = cast<ConstantInt>(AlignmentInBitsValue);
2375 unsigned AlignmentInBits = AlignmentInBitsCI->getZExtValue();
2376 unsigned AlignmentInBytes =
2377 CGM.getContext().toCharUnitsFromBits(AlignmentInBits).getQuantity();
2378 AllocaInst *AI = Builder.CreateAlloca(Builder.getInt8Ty(), Size);
2379 AI->setAlignment(MaybeAlign(AlignmentInBytes));
2380 initializeAlloca(*this, AI, Size, AlignmentInBytes);
2381 return RValue::get(AI);
2384 case Builtin::BIbzero:
2385 case Builtin::BI__builtin_bzero: {
2386 Address Dest = EmitPointerWithAlignment(E->getArg(0));
2387 Value *SizeVal = EmitScalarExpr(E->getArg(1));
2388 EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(),
2389 E->getArg(0)->getExprLoc(), FD, 0);
2390 Builder.CreateMemSet(Dest, Builder.getInt8(0), SizeVal, false);
2391 return RValue::get(nullptr);
2393 case Builtin::BImemcpy:
2394 case Builtin::BI__builtin_memcpy: {
2395 Address Dest = EmitPointerWithAlignment(E->getArg(0));
2396 Address Src = EmitPointerWithAlignment(E->getArg(1));
2397 Value *SizeVal = EmitScalarExpr(E->getArg(2));
2398 EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(),
2399 E->getArg(0)->getExprLoc(), FD, 0);
2400 EmitNonNullArgCheck(RValue::get(Src.getPointer()), E->getArg(1)->getType(),
2401 E->getArg(1)->getExprLoc(), FD, 1);
2402 Builder.CreateMemCpy(Dest, Src, SizeVal, false);
2403 return RValue::get(Dest.getPointer());
2406 case Builtin::BI__builtin_char_memchr:
2407 BuiltinID = Builtin::BI__builtin_memchr;
2410 case Builtin::BI__builtin___memcpy_chk: {
2411 // fold __builtin_memcpy_chk(x, y, cst1, cst2) to memcpy iff cst1<=cst2.
2412 Expr::EvalResult SizeResult, DstSizeResult;
2413 if (!E->getArg(2)->EvaluateAsInt(SizeResult, CGM.getContext()) ||
2414 !E->getArg(3)->EvaluateAsInt(DstSizeResult, CGM.getContext()))
2416 llvm::APSInt Size = SizeResult.Val.getInt();
2417 llvm::APSInt DstSize = DstSizeResult.Val.getInt();
2418 if (Size.ugt(DstSize))
2420 Address Dest = EmitPointerWithAlignment(E->getArg(0));
2421 Address Src = EmitPointerWithAlignment(E->getArg(1));
2422 Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size);
2423 Builder.CreateMemCpy(Dest, Src, SizeVal, false);
2424 return RValue::get(Dest.getPointer());
2427 case Builtin::BI__builtin_objc_memmove_collectable: {
2428 Address DestAddr = EmitPointerWithAlignment(E->getArg(0));
2429 Address SrcAddr = EmitPointerWithAlignment(E->getArg(1));
2430 Value *SizeVal = EmitScalarExpr(E->getArg(2));
2431 CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this,
2432 DestAddr, SrcAddr, SizeVal);
2433 return RValue::get(DestAddr.getPointer());
2436 case Builtin::BI__builtin___memmove_chk: {
2437 // fold __builtin_memmove_chk(x, y, cst1, cst2) to memmove iff cst1<=cst2.
2438 Expr::EvalResult SizeResult, DstSizeResult;
2439 if (!E->getArg(2)->EvaluateAsInt(SizeResult, CGM.getContext()) ||
2440 !E->getArg(3)->EvaluateAsInt(DstSizeResult, CGM.getContext()))
2442 llvm::APSInt Size = SizeResult.Val.getInt();
2443 llvm::APSInt DstSize = DstSizeResult.Val.getInt();
2444 if (Size.ugt(DstSize))
2446 Address Dest = EmitPointerWithAlignment(E->getArg(0));
2447 Address Src = EmitPointerWithAlignment(E->getArg(1));
2448 Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size);
2449 Builder.CreateMemMove(Dest, Src, SizeVal, false);
2450 return RValue::get(Dest.getPointer());
2453 case Builtin::BImemmove:
2454 case Builtin::BI__builtin_memmove: {
2455 Address Dest = EmitPointerWithAlignment(E->getArg(0));
2456 Address Src = EmitPointerWithAlignment(E->getArg(1));
2457 Value *SizeVal = EmitScalarExpr(E->getArg(2));
2458 EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(),
2459 E->getArg(0)->getExprLoc(), FD, 0);
2460 EmitNonNullArgCheck(RValue::get(Src.getPointer()), E->getArg(1)->getType(),
2461 E->getArg(1)->getExprLoc(), FD, 1);
2462 Builder.CreateMemMove(Dest, Src, SizeVal, false);
2463 return RValue::get(Dest.getPointer());
2465 case Builtin::BImemset:
2466 case Builtin::BI__builtin_memset: {
2467 Address Dest = EmitPointerWithAlignment(E->getArg(0));
2468 Value *ByteVal = Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)),
2469 Builder.getInt8Ty());
2470 Value *SizeVal = EmitScalarExpr(E->getArg(2));
2471 EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(),
2472 E->getArg(0)->getExprLoc(), FD, 0);
2473 Builder.CreateMemSet(Dest, ByteVal, SizeVal, false);
2474 return RValue::get(Dest.getPointer());
2476 case Builtin::BI__builtin___memset_chk: {
2477 // fold __builtin_memset_chk(x, y, cst1, cst2) to memset iff cst1<=cst2.
2478 Expr::EvalResult SizeResult, DstSizeResult;
2479 if (!E->getArg(2)->EvaluateAsInt(SizeResult, CGM.getContext()) ||
2480 !E->getArg(3)->EvaluateAsInt(DstSizeResult, CGM.getContext()))
2482 llvm::APSInt Size = SizeResult.Val.getInt();
2483 llvm::APSInt DstSize = DstSizeResult.Val.getInt();
2484 if (Size.ugt(DstSize))
2486 Address Dest = EmitPointerWithAlignment(E->getArg(0));
2487 Value *ByteVal = Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)),
2488 Builder.getInt8Ty());
2489 Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size);
2490 Builder.CreateMemSet(Dest, ByteVal, SizeVal, false);
2491 return RValue::get(Dest.getPointer());
2493 case Builtin::BI__builtin_wmemcmp: {
2494 // The MSVC runtime library does not provide a definition of wmemcmp, so we
2495 // need an inline implementation.
2496 if (!getTarget().getTriple().isOSMSVCRT())
2499 llvm::Type *WCharTy = ConvertType(getContext().WCharTy);
2501 Value *Dst = EmitScalarExpr(E->getArg(0));
2502 Value *Src = EmitScalarExpr(E->getArg(1));
2503 Value *Size = EmitScalarExpr(E->getArg(2));
2505 BasicBlock *Entry = Builder.GetInsertBlock();
2506 BasicBlock *CmpGT = createBasicBlock("wmemcmp.gt");
2507 BasicBlock *CmpLT = createBasicBlock("wmemcmp.lt");
2508 BasicBlock *Next = createBasicBlock("wmemcmp.next");
2509 BasicBlock *Exit = createBasicBlock("wmemcmp.exit");
2510 Value *SizeEq0 = Builder.CreateICmpEQ(Size, ConstantInt::get(SizeTy, 0));
2511 Builder.CreateCondBr(SizeEq0, Exit, CmpGT);
2514 PHINode *DstPhi = Builder.CreatePHI(Dst->getType(), 2);
2515 DstPhi->addIncoming(Dst, Entry);
2516 PHINode *SrcPhi = Builder.CreatePHI(Src->getType(), 2);
2517 SrcPhi->addIncoming(Src, Entry);
2518 PHINode *SizePhi = Builder.CreatePHI(SizeTy, 2);
2519 SizePhi->addIncoming(Size, Entry);
2520 CharUnits WCharAlign =
2521 getContext().getTypeAlignInChars(getContext().WCharTy);
2522 Value *DstCh = Builder.CreateAlignedLoad(WCharTy, DstPhi, WCharAlign);
2523 Value *SrcCh = Builder.CreateAlignedLoad(WCharTy, SrcPhi, WCharAlign);
2524 Value *DstGtSrc = Builder.CreateICmpUGT(DstCh, SrcCh);
2525 Builder.CreateCondBr(DstGtSrc, Exit, CmpLT);
2528 Value *DstLtSrc = Builder.CreateICmpULT(DstCh, SrcCh);
2529 Builder.CreateCondBr(DstLtSrc, Exit, Next);
2532 Value *NextDst = Builder.CreateConstInBoundsGEP1_32(WCharTy, DstPhi, 1);
2533 Value *NextSrc = Builder.CreateConstInBoundsGEP1_32(WCharTy, SrcPhi, 1);
2534 Value *NextSize = Builder.CreateSub(SizePhi, ConstantInt::get(SizeTy, 1));
2535 Value *NextSizeEq0 =
2536 Builder.CreateICmpEQ(NextSize, ConstantInt::get(SizeTy, 0));
2537 Builder.CreateCondBr(NextSizeEq0, Exit, CmpGT);
2538 DstPhi->addIncoming(NextDst, Next);
2539 SrcPhi->addIncoming(NextSrc, Next);
2540 SizePhi->addIncoming(NextSize, Next);
2543 PHINode *Ret = Builder.CreatePHI(IntTy, 4);
2544 Ret->addIncoming(ConstantInt::get(IntTy, 0), Entry);
2545 Ret->addIncoming(ConstantInt::get(IntTy, 1), CmpGT);
2546 Ret->addIncoming(ConstantInt::get(IntTy, -1), CmpLT);
2547 Ret->addIncoming(ConstantInt::get(IntTy, 0), Next);
2548 return RValue::get(Ret);
2550 case Builtin::BI__builtin_dwarf_cfa: {
2551 // The offset in bytes from the first argument to the CFA.
2553 // Why on earth is this in the frontend? Is there any reason at
2554 // all that the backend can't reasonably determine this while
2555 // lowering llvm.eh.dwarf.cfa()?
2557 // TODO: If there's a satisfactory reason, add a target hook for
2558 // this instead of hard-coding 0, which is correct for most targets.
2561 Function *F = CGM.getIntrinsic(Intrinsic::eh_dwarf_cfa);
2562 return RValue::get(Builder.CreateCall(F,
2563 llvm::ConstantInt::get(Int32Ty, Offset)));
2565 case Builtin::BI__builtin_return_address: {
2566 Value *Depth = ConstantEmitter(*this).emitAbstract(E->getArg(0),
2567 getContext().UnsignedIntTy);
2568 Function *F = CGM.getIntrinsic(Intrinsic::returnaddress);
2569 return RValue::get(Builder.CreateCall(F, Depth));
2571 case Builtin::BI_ReturnAddress: {
2572 Function *F = CGM.getIntrinsic(Intrinsic::returnaddress);
2573 return RValue::get(Builder.CreateCall(F, Builder.getInt32(0)));
2575 case Builtin::BI__builtin_frame_address: {
2576 Value *Depth = ConstantEmitter(*this).emitAbstract(E->getArg(0),
2577 getContext().UnsignedIntTy);
2578 Function *F = CGM.getIntrinsic(Intrinsic::frameaddress, AllocaInt8PtrTy);
2579 return RValue::get(Builder.CreateCall(F, Depth));
2581 case Builtin::BI__builtin_extract_return_addr: {
2582 Value *Address = EmitScalarExpr(E->getArg(0));
2583 Value *Result = getTargetHooks().decodeReturnAddress(*this, Address);
2584 return RValue::get(Result);
2586 case Builtin::BI__builtin_frob_return_addr: {
2587 Value *Address = EmitScalarExpr(E->getArg(0));
2588 Value *Result = getTargetHooks().encodeReturnAddress(*this, Address);
2589 return RValue::get(Result);
2591 case Builtin::BI__builtin_dwarf_sp_column: {
2592 llvm::IntegerType *Ty
2593 = cast<llvm::IntegerType>(ConvertType(E->getType()));
2594 int Column = getTargetHooks().getDwarfEHStackPointer(CGM);
2596 CGM.ErrorUnsupported(E, "__builtin_dwarf_sp_column");
2597 return RValue::get(llvm::UndefValue::get(Ty));
2599 return RValue::get(llvm::ConstantInt::get(Ty, Column, true));
2601 case Builtin::BI__builtin_init_dwarf_reg_size_table: {
2602 Value *Address = EmitScalarExpr(E->getArg(0));
2603 if (getTargetHooks().initDwarfEHRegSizeTable(*this, Address))
2604 CGM.ErrorUnsupported(E, "__builtin_init_dwarf_reg_size_table");
2605 return RValue::get(llvm::UndefValue::get(ConvertType(E->getType())));
2607 case Builtin::BI__builtin_eh_return: {
2608 Value *Int = EmitScalarExpr(E->getArg(0));
2609 Value *Ptr = EmitScalarExpr(E->getArg(1));
2611 llvm::IntegerType *IntTy = cast<llvm::IntegerType>(Int->getType());
2612 assert((IntTy->getBitWidth() == 32 || IntTy->getBitWidth() == 64) &&
2613 "LLVM's __builtin_eh_return only supports 32- and 64-bit variants");
2615 CGM.getIntrinsic(IntTy->getBitWidth() == 32 ? Intrinsic::eh_return_i32
2616 : Intrinsic::eh_return_i64);
2617 Builder.CreateCall(F, {Int, Ptr});
2618 Builder.CreateUnreachable();
2620 // We do need to preserve an insertion point.
2621 EmitBlock(createBasicBlock("builtin_eh_return.cont"));
2623 return RValue::get(nullptr);
2625 case Builtin::BI__builtin_unwind_init: {
2626 Function *F = CGM.getIntrinsic(Intrinsic::eh_unwind_init);
2627 return RValue::get(Builder.CreateCall(F));
2629 case Builtin::BI__builtin_extend_pointer: {
2630 // Extends a pointer to the size of an _Unwind_Word, which is
2631 // uint64_t on all platforms. Generally this gets poked into a
2632 // register and eventually used as an address, so if the
2633 // addressing registers are wider than pointers and the platform
2634 // doesn't implicitly ignore high-order bits when doing
2635 // addressing, we need to make sure we zext / sext based on
2636 // the platform's expectations.
2638 // See: http://gcc.gnu.org/ml/gcc-bugs/2002-02/msg00237.html
2640 // Cast the pointer to intptr_t.
2641 Value *Ptr = EmitScalarExpr(E->getArg(0));
2642 Value *Result = Builder.CreatePtrToInt(Ptr, IntPtrTy, "extend.cast");
2644 // If that's 64 bits, we're done.
2645 if (IntPtrTy->getBitWidth() == 64)
2646 return RValue::get(Result);
2648 // Otherwise, ask the codegen data what to do.
2649 if (getTargetHooks().extendPointerWithSExt())
2650 return RValue::get(Builder.CreateSExt(Result, Int64Ty, "extend.sext"));
2652 return RValue::get(Builder.CreateZExt(Result, Int64Ty, "extend.zext"));
2654 case Builtin::BI__builtin_setjmp: {
2655 // Buffer is a void**.
2656 Address Buf = EmitPointerWithAlignment(E->getArg(0));
2658 // Store the frame pointer to the setjmp buffer.
2659 Value *FrameAddr = Builder.CreateCall(
2660 CGM.getIntrinsic(Intrinsic::frameaddress, AllocaInt8PtrTy),
2661 ConstantInt::get(Int32Ty, 0));
2662 Builder.CreateStore(FrameAddr, Buf);
2664 // Store the stack pointer to the setjmp buffer.
2666 Builder.CreateCall(CGM.getIntrinsic(Intrinsic::stacksave));
2667 Address StackSaveSlot = Builder.CreateConstInBoundsGEP(Buf, 2);
2668 Builder.CreateStore(StackAddr, StackSaveSlot);
2670 // Call LLVM's EH setjmp, which is lightweight.
2671 Function *F = CGM.getIntrinsic(Intrinsic::eh_sjlj_setjmp);
2672 Buf = Builder.CreateBitCast(Buf, Int8PtrTy);
2673 return RValue::get(Builder.CreateCall(F, Buf.getPointer()));
2675 case Builtin::BI__builtin_longjmp: {
2676 Value *Buf = EmitScalarExpr(E->getArg(0));
2677 Buf = Builder.CreateBitCast(Buf, Int8PtrTy);
2679 // Call LLVM's EH longjmp, which is lightweight.
2680 Builder.CreateCall(CGM.getIntrinsic(Intrinsic::eh_sjlj_longjmp), Buf);
2682 // longjmp doesn't return; mark this as unreachable.
2683 Builder.CreateUnreachable();
2685 // We do need to preserve an insertion point.
2686 EmitBlock(createBasicBlock("longjmp.cont"));
2688 return RValue::get(nullptr);
2690 case Builtin::BI__builtin_launder: {
2691 const Expr *Arg = E->getArg(0);
2692 QualType ArgTy = Arg->getType()->getPointeeType();
2693 Value *Ptr = EmitScalarExpr(Arg);
2694 if (TypeRequiresBuiltinLaunder(CGM, ArgTy))
2695 Ptr = Builder.CreateLaunderInvariantGroup(Ptr);
2697 return RValue::get(Ptr);
2699 case Builtin::BI__sync_fetch_and_add:
2700 case Builtin::BI__sync_fetch_and_sub:
2701 case Builtin::BI__sync_fetch_and_or:
2702 case Builtin::BI__sync_fetch_and_and:
2703 case Builtin::BI__sync_fetch_and_xor:
2704 case Builtin::BI__sync_fetch_and_nand:
2705 case Builtin::BI__sync_add_and_fetch:
2706 case Builtin::BI__sync_sub_and_fetch:
2707 case Builtin::BI__sync_and_and_fetch:
2708 case Builtin::BI__sync_or_and_fetch:
2709 case Builtin::BI__sync_xor_and_fetch:
2710 case Builtin::BI__sync_nand_and_fetch:
2711 case Builtin::BI__sync_val_compare_and_swap:
2712 case Builtin::BI__sync_bool_compare_and_swap:
2713 case Builtin::BI__sync_lock_test_and_set:
2714 case Builtin::BI__sync_lock_release:
2715 case Builtin::BI__sync_swap:
2716 llvm_unreachable("Shouldn't make it through sema");
2717 case Builtin::BI__sync_fetch_and_add_1:
2718 case Builtin::BI__sync_fetch_and_add_2:
2719 case Builtin::BI__sync_fetch_and_add_4:
2720 case Builtin::BI__sync_fetch_and_add_8:
2721 case Builtin::BI__sync_fetch_and_add_16:
2722 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Add, E);
2723 case Builtin::BI__sync_fetch_and_sub_1:
2724 case Builtin::BI__sync_fetch_and_sub_2:
2725 case Builtin::BI__sync_fetch_and_sub_4:
2726 case Builtin::BI__sync_fetch_and_sub_8:
2727 case Builtin::BI__sync_fetch_and_sub_16:
2728 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Sub, E);
2729 case Builtin::BI__sync_fetch_and_or_1:
2730 case Builtin::BI__sync_fetch_and_or_2:
2731 case Builtin::BI__sync_fetch_and_or_4:
2732 case Builtin::BI__sync_fetch_and_or_8:
2733 case Builtin::BI__sync_fetch_and_or_16:
2734 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Or, E);
2735 case Builtin::BI__sync_fetch_and_and_1:
2736 case Builtin::BI__sync_fetch_and_and_2:
2737 case Builtin::BI__sync_fetch_and_and_4:
2738 case Builtin::BI__sync_fetch_and_and_8:
2739 case Builtin::BI__sync_fetch_and_and_16:
2740 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::And, E);
2741 case Builtin::BI__sync_fetch_and_xor_1:
2742 case Builtin::BI__sync_fetch_and_xor_2:
2743 case Builtin::BI__sync_fetch_and_xor_4:
2744 case Builtin::BI__sync_fetch_and_xor_8:
2745 case Builtin::BI__sync_fetch_and_xor_16:
2746 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xor, E);
2747 case Builtin::BI__sync_fetch_and_nand_1:
2748 case Builtin::BI__sync_fetch_and_nand_2:
2749 case Builtin::BI__sync_fetch_and_nand_4:
2750 case Builtin::BI__sync_fetch_and_nand_8:
2751 case Builtin::BI__sync_fetch_and_nand_16:
2752 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Nand, E);
2754 // Clang extensions: not overloaded yet.
2755 case Builtin::BI__sync_fetch_and_min:
2756 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Min, E);
2757 case Builtin::BI__sync_fetch_and_max:
2758 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Max, E);
2759 case Builtin::BI__sync_fetch_and_umin:
2760 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::UMin, E);
2761 case Builtin::BI__sync_fetch_and_umax:
2762 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::UMax, E);
2764 case Builtin::BI__sync_add_and_fetch_1:
2765 case Builtin::BI__sync_add_and_fetch_2:
2766 case Builtin::BI__sync_add_and_fetch_4:
2767 case Builtin::BI__sync_add_and_fetch_8:
2768 case Builtin::BI__sync_add_and_fetch_16:
2769 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Add, E,
2770 llvm::Instruction::Add);
2771 case Builtin::BI__sync_sub_and_fetch_1:
2772 case Builtin::BI__sync_sub_and_fetch_2:
2773 case Builtin::BI__sync_sub_and_fetch_4:
2774 case Builtin::BI__sync_sub_and_fetch_8:
2775 case Builtin::BI__sync_sub_and_fetch_16:
2776 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Sub, E,
2777 llvm::Instruction::Sub);
2778 case Builtin::BI__sync_and_and_fetch_1:
2779 case Builtin::BI__sync_and_and_fetch_2:
2780 case Builtin::BI__sync_and_and_fetch_4:
2781 case Builtin::BI__sync_and_and_fetch_8:
2782 case Builtin::BI__sync_and_and_fetch_16:
2783 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::And, E,
2784 llvm::Instruction::And);
2785 case Builtin::BI__sync_or_and_fetch_1:
2786 case Builtin::BI__sync_or_and_fetch_2:
2787 case Builtin::BI__sync_or_and_fetch_4:
2788 case Builtin::BI__sync_or_and_fetch_8:
2789 case Builtin::BI__sync_or_and_fetch_16:
2790 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Or, E,
2791 llvm::Instruction::Or);
2792 case Builtin::BI__sync_xor_and_fetch_1:
2793 case Builtin::BI__sync_xor_and_fetch_2:
2794 case Builtin::BI__sync_xor_and_fetch_4:
2795 case Builtin::BI__sync_xor_and_fetch_8:
2796 case Builtin::BI__sync_xor_and_fetch_16:
2797 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Xor, E,
2798 llvm::Instruction::Xor);
2799 case Builtin::BI__sync_nand_and_fetch_1:
2800 case Builtin::BI__sync_nand_and_fetch_2:
2801 case Builtin::BI__sync_nand_and_fetch_4:
2802 case Builtin::BI__sync_nand_and_fetch_8:
2803 case Builtin::BI__sync_nand_and_fetch_16:
2804 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Nand, E,
2805 llvm::Instruction::And, true);
2807 case Builtin::BI__sync_val_compare_and_swap_1:
2808 case Builtin::BI__sync_val_compare_and_swap_2:
2809 case Builtin::BI__sync_val_compare_and_swap_4:
2810 case Builtin::BI__sync_val_compare_and_swap_8:
2811 case Builtin::BI__sync_val_compare_and_swap_16:
2812 return RValue::get(MakeAtomicCmpXchgValue(*this, E, false));
2814 case Builtin::BI__sync_bool_compare_and_swap_1:
2815 case Builtin::BI__sync_bool_compare_and_swap_2:
2816 case Builtin::BI__sync_bool_compare_and_swap_4:
2817 case Builtin::BI__sync_bool_compare_and_swap_8:
2818 case Builtin::BI__sync_bool_compare_and_swap_16:
2819 return RValue::get(MakeAtomicCmpXchgValue(*this, E, true));
2821 case Builtin::BI__sync_swap_1:
2822 case Builtin::BI__sync_swap_2:
2823 case Builtin::BI__sync_swap_4:
2824 case Builtin::BI__sync_swap_8:
2825 case Builtin::BI__sync_swap_16:
2826 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xchg, E);
2828 case Builtin::BI__sync_lock_test_and_set_1:
2829 case Builtin::BI__sync_lock_test_and_set_2:
2830 case Builtin::BI__sync_lock_test_and_set_4:
2831 case Builtin::BI__sync_lock_test_and_set_8:
2832 case Builtin::BI__sync_lock_test_and_set_16:
2833 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xchg, E);
2835 case Builtin::BI__sync_lock_release_1:
2836 case Builtin::BI__sync_lock_release_2:
2837 case Builtin::BI__sync_lock_release_4:
2838 case Builtin::BI__sync_lock_release_8:
2839 case Builtin::BI__sync_lock_release_16: {
2840 Value *Ptr = EmitScalarExpr(E->getArg(0));
2841 QualType ElTy = E->getArg(0)->getType()->getPointeeType();
2842 CharUnits StoreSize = getContext().getTypeSizeInChars(ElTy);
2843 llvm::Type *ITy = llvm::IntegerType::get(getLLVMContext(),
2844 StoreSize.getQuantity() * 8);
2845 Ptr = Builder.CreateBitCast(Ptr, ITy->getPointerTo());
2846 llvm::StoreInst *Store =
2847 Builder.CreateAlignedStore(llvm::Constant::getNullValue(ITy), Ptr,
2849 Store->setAtomic(llvm::AtomicOrdering::Release);
2850 return RValue::get(nullptr);
2853 case Builtin::BI__sync_synchronize: {
2854 // We assume this is supposed to correspond to a C++0x-style
2855 // sequentially-consistent fence (i.e. this is only usable for
2856 // synchronization, not device I/O or anything like that). This intrinsic
2857 // is really badly designed in the sense that in theory, there isn't
2858 // any way to safely use it... but in practice, it mostly works
2859 // to use it with non-atomic loads and stores to get acquire/release
2861 Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent);
2862 return RValue::get(nullptr);
2865 case Builtin::BI__builtin_nontemporal_load:
2866 return RValue::get(EmitNontemporalLoad(*this, E));
2867 case Builtin::BI__builtin_nontemporal_store:
2868 return RValue::get(EmitNontemporalStore(*this, E));
2869 case Builtin::BI__c11_atomic_is_lock_free:
2870 case Builtin::BI__atomic_is_lock_free: {
2871 // Call "bool __atomic_is_lock_free(size_t size, void *ptr)". For the
2872 // __c11 builtin, ptr is 0 (indicating a properly-aligned object), since
2873 // _Atomic(T) is always properly-aligned.
2874 const char *LibCallName = "__atomic_is_lock_free";
2876 Args.add(RValue::get(EmitScalarExpr(E->getArg(0))),
2877 getContext().getSizeType());
2878 if (BuiltinID == Builtin::BI__atomic_is_lock_free)
2879 Args.add(RValue::get(EmitScalarExpr(E->getArg(1))),
2880 getContext().VoidPtrTy);
2882 Args.add(RValue::get(llvm::Constant::getNullValue(VoidPtrTy)),
2883 getContext().VoidPtrTy);
2884 const CGFunctionInfo &FuncInfo =
2885 CGM.getTypes().arrangeBuiltinFunctionCall(E->getType(), Args);
2886 llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FuncInfo);
2887 llvm::FunctionCallee Func = CGM.CreateRuntimeFunction(FTy, LibCallName);
2888 return EmitCall(FuncInfo, CGCallee::forDirect(Func),
2889 ReturnValueSlot(), Args);
2892 case Builtin::BI__atomic_test_and_set: {
2893 // Look at the argument type to determine whether this is a volatile
2894 // operation. The parameter type is always volatile.
2895 QualType PtrTy = E->getArg(0)->IgnoreImpCasts()->getType();
2897 PtrTy->castAs<PointerType>()->getPointeeType().isVolatileQualified();
2899 Value *Ptr = EmitScalarExpr(E->getArg(0));
2900 unsigned AddrSpace = Ptr->getType()->getPointerAddressSpace();
2901 Ptr = Builder.CreateBitCast(Ptr, Int8Ty->getPointerTo(AddrSpace));
2902 Value *NewVal = Builder.getInt8(1);
2903 Value *Order = EmitScalarExpr(E->getArg(1));
2904 if (isa<llvm::ConstantInt>(Order)) {
2905 int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
2906 AtomicRMWInst *Result = nullptr;
2908 case 0: // memory_order_relaxed
2909 default: // invalid order
2910 Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, Ptr, NewVal,
2911 llvm::AtomicOrdering::Monotonic);
2913 case 1: // memory_order_consume
2914 case 2: // memory_order_acquire
2915 Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, Ptr, NewVal,
2916 llvm::AtomicOrdering::Acquire);
2918 case 3: // memory_order_release
2919 Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, Ptr, NewVal,
2920 llvm::AtomicOrdering::Release);
2922 case 4: // memory_order_acq_rel
2924 Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, Ptr, NewVal,
2925 llvm::AtomicOrdering::AcquireRelease);
2927 case 5: // memory_order_seq_cst
2928 Result = Builder.CreateAtomicRMW(
2929 llvm::AtomicRMWInst::Xchg, Ptr, NewVal,
2930 llvm::AtomicOrdering::SequentiallyConsistent);
2933 Result->setVolatile(Volatile);
2934 return RValue::get(Builder.CreateIsNotNull(Result, "tobool"));
2937 llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
2939 llvm::BasicBlock *BBs[5] = {
2940 createBasicBlock("monotonic", CurFn),
2941 createBasicBlock("acquire", CurFn),
2942 createBasicBlock("release", CurFn),
2943 createBasicBlock("acqrel", CurFn),
2944 createBasicBlock("seqcst", CurFn)
2946 llvm::AtomicOrdering Orders[5] = {
2947 llvm::AtomicOrdering::Monotonic, llvm::AtomicOrdering::Acquire,
2948 llvm::AtomicOrdering::Release, llvm::AtomicOrdering::AcquireRelease,
2949 llvm::AtomicOrdering::SequentiallyConsistent};
2951 Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
2952 llvm::SwitchInst *SI = Builder.CreateSwitch(Order, BBs[0]);
2954 Builder.SetInsertPoint(ContBB);
2955 PHINode *Result = Builder.CreatePHI(Int8Ty, 5, "was_set");
2957 for (unsigned i = 0; i < 5; ++i) {
2958 Builder.SetInsertPoint(BBs[i]);
2959 AtomicRMWInst *RMW = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg,
2960 Ptr, NewVal, Orders[i]);
2961 RMW->setVolatile(Volatile);
2962 Result->addIncoming(RMW, BBs[i]);
2963 Builder.CreateBr(ContBB);
2966 SI->addCase(Builder.getInt32(0), BBs[0]);
2967 SI->addCase(Builder.getInt32(1), BBs[1]);
2968 SI->addCase(Builder.getInt32(2), BBs[1]);
2969 SI->addCase(Builder.getInt32(3), BBs[2]);
2970 SI->addCase(Builder.getInt32(4), BBs[3]);
2971 SI->addCase(Builder.getInt32(5), BBs[4]);
2973 Builder.SetInsertPoint(ContBB);
2974 return RValue::get(Builder.CreateIsNotNull(Result, "tobool"));
2977 case Builtin::BI__atomic_clear: {
2978 QualType PtrTy = E->getArg(0)->IgnoreImpCasts()->getType();
2980 PtrTy->castAs<PointerType>()->getPointeeType().isVolatileQualified();
2982 Address Ptr = EmitPointerWithAlignment(E->getArg(0));
2983 unsigned AddrSpace = Ptr.getPointer()->getType()->getPointerAddressSpace();
2984 Ptr = Builder.CreateBitCast(Ptr, Int8Ty->getPointerTo(AddrSpace));
2985 Value *NewVal = Builder.getInt8(0);
2986 Value *Order = EmitScalarExpr(E->getArg(1));
2987 if (isa<llvm::ConstantInt>(Order)) {
2988 int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
2989 StoreInst *Store = Builder.CreateStore(NewVal, Ptr, Volatile);
2991 case 0: // memory_order_relaxed
2992 default: // invalid order
2993 Store->setOrdering(llvm::AtomicOrdering::Monotonic);
2995 case 3: // memory_order_release
2996 Store->setOrdering(llvm::AtomicOrdering::Release);
2998 case 5: // memory_order_seq_cst
2999 Store->setOrdering(llvm::AtomicOrdering::SequentiallyConsistent);
3002 return RValue::get(nullptr);
3005 llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
3007 llvm::BasicBlock *BBs[3] = {
3008 createBasicBlock("monotonic", CurFn),
3009 createBasicBlock("release", CurFn),
3010 createBasicBlock("seqcst", CurFn)
3012 llvm::AtomicOrdering Orders[3] = {
3013 llvm::AtomicOrdering::Monotonic, llvm::AtomicOrdering::Release,
3014 llvm::AtomicOrdering::SequentiallyConsistent};
3016 Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
3017 llvm::SwitchInst *SI = Builder.CreateSwitch(Order, BBs[0]);
3019 for (unsigned i = 0; i < 3; ++i) {
3020 Builder.SetInsertPoint(BBs[i]);
3021 StoreInst *Store = Builder.CreateStore(NewVal, Ptr, Volatile);
3022 Store->setOrdering(Orders[i]);
3023 Builder.CreateBr(ContBB);
3026 SI->addCase(Builder.getInt32(0), BBs[0]);
3027 SI->addCase(Builder.getInt32(3), BBs[1]);
3028 SI->addCase(Builder.getInt32(5), BBs[2]);
3030 Builder.SetInsertPoint(ContBB);
3031 return RValue::get(nullptr);
3034 case Builtin::BI__atomic_thread_fence:
3035 case Builtin::BI__atomic_signal_fence:
3036 case Builtin::BI__c11_atomic_thread_fence:
3037 case Builtin::BI__c11_atomic_signal_fence: {
3038 llvm::SyncScope::ID SSID;
3039 if (BuiltinID == Builtin::BI__atomic_signal_fence ||
3040 BuiltinID == Builtin::BI__c11_atomic_signal_fence)
3041 SSID = llvm::SyncScope::SingleThread;
3043 SSID = llvm::SyncScope::System;
3044 Value *Order = EmitScalarExpr(E->getArg(0));
3045 if (isa<llvm::ConstantInt>(Order)) {
3046 int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
3048 case 0: // memory_order_relaxed
3049 default: // invalid order
3051 case 1: // memory_order_consume
3052 case 2: // memory_order_acquire
3053 Builder.CreateFence(llvm::AtomicOrdering::Acquire, SSID);
3055 case 3: // memory_order_release
3056 Builder.CreateFence(llvm::AtomicOrdering::Release, SSID);
3058 case 4: // memory_order_acq_rel
3059 Builder.CreateFence(llvm::AtomicOrdering::AcquireRelease, SSID);
3061 case 5: // memory_order_seq_cst
3062 Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent, SSID);
3065 return RValue::get(nullptr);
3068 llvm::BasicBlock *AcquireBB, *ReleaseBB, *AcqRelBB, *SeqCstBB;
3069 AcquireBB = createBasicBlock("acquire", CurFn);
3070 ReleaseBB = createBasicBlock("release", CurFn);
3071 AcqRelBB = createBasicBlock("acqrel", CurFn);
3072 SeqCstBB = createBasicBlock("seqcst", CurFn);
3073 llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
3075 Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
3076 llvm::SwitchInst *SI = Builder.CreateSwitch(Order, ContBB);
3078 Builder.SetInsertPoint(AcquireBB);
3079 Builder.CreateFence(llvm::AtomicOrdering::Acquire, SSID);
3080 Builder.CreateBr(ContBB);
3081 SI->addCase(Builder.getInt32(1), AcquireBB);
3082 SI->addCase(Builder.getInt32(2), AcquireBB);
3084 Builder.SetInsertPoint(ReleaseBB);
3085 Builder.CreateFence(llvm::AtomicOrdering::Release, SSID);
3086 Builder.CreateBr(ContBB);
3087 SI->addCase(Builder.getInt32(3), ReleaseBB);
3089 Builder.SetInsertPoint(AcqRelBB);
3090 Builder.CreateFence(llvm::AtomicOrdering::AcquireRelease, SSID);
3091 Builder.CreateBr(ContBB);
3092 SI->addCase(Builder.getInt32(4), AcqRelBB);
3094 Builder.SetInsertPoint(SeqCstBB);
3095 Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent, SSID);
3096 Builder.CreateBr(ContBB);
3097 SI->addCase(Builder.getInt32(5), SeqCstBB);
3099 Builder.SetInsertPoint(ContBB);
3100 return RValue::get(nullptr);
3103 case Builtin::BI__builtin_signbit:
3104 case Builtin::BI__builtin_signbitf:
3105 case Builtin::BI__builtin_signbitl: {
3107 Builder.CreateZExt(EmitSignBit(*this, EmitScalarExpr(E->getArg(0))),
3108 ConvertType(E->getType())));
3110 case Builtin::BI__annotation: {
3111 // Re-encode each wide string to UTF8 and make an MDString.
3112 SmallVector<Metadata *, 1> Strings;
3113 for (const Expr *Arg : E->arguments()) {
3114 const auto *Str = cast<StringLiteral>(Arg->IgnoreParenCasts());
3115 assert(Str->getCharByteWidth() == 2);
3116 StringRef WideBytes = Str->getBytes();
3117 std::string StrUtf8;
3118 if (!convertUTF16ToUTF8String(
3119 makeArrayRef(WideBytes.data(), WideBytes.size()), StrUtf8)) {
3120 CGM.ErrorUnsupported(E, "non-UTF16 __annotation argument");
3123 Strings.push_back(llvm::MDString::get(getLLVMContext(), StrUtf8));
3126 // Build and MDTuple of MDStrings and emit the intrinsic call.
3128 CGM.getIntrinsic(llvm::Intrinsic::codeview_annotation, {});
3129 MDTuple *StrTuple = MDTuple::get(getLLVMContext(), Strings);
3130 Builder.CreateCall(F, MetadataAsValue::get(getLLVMContext(), StrTuple));
3131 return RValue::getIgnored();
3133 case Builtin::BI__builtin_annotation: {
3134 llvm::Value *AnnVal = EmitScalarExpr(E->getArg(0));
3135 llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::annotation,
3138 // Get the annotation string, go through casts. Sema requires this to be a
3139 // non-wide string literal, potentially casted, so the cast<> is safe.
3140 const Expr *AnnotationStrExpr = E->getArg(1)->IgnoreParenCasts();
3141 StringRef Str = cast<StringLiteral>(AnnotationStrExpr)->getString();
3142 return RValue::get(EmitAnnotationCall(F, AnnVal, Str, E->getExprLoc()));
3144 case Builtin::BI__builtin_addcb:
3145 case Builtin::BI__builtin_addcs:
3146 case Builtin::BI__builtin_addc:
3147 case Builtin::BI__builtin_addcl:
3148 case Builtin::BI__builtin_addcll:
3149 case Builtin::BI__builtin_subcb:
3150 case Builtin::BI__builtin_subcs:
3151 case Builtin::BI__builtin_subc:
3152 case Builtin::BI__builtin_subcl:
3153 case Builtin::BI__builtin_subcll: {
3155 // We translate all of these builtins from expressions of the form:
3156 // int x = ..., y = ..., carryin = ..., carryout, result;
3157 // result = __builtin_addc(x, y, carryin, &carryout);
3159 // to LLVM IR of the form:
3161 // %tmp1 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %x, i32 %y)
3162 // %tmpsum1 = extractvalue {i32, i1} %tmp1, 0
3163 // %carry1 = extractvalue {i32, i1} %tmp1, 1
3164 // %tmp2 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %tmpsum1,
3166 // %result = extractvalue {i32, i1} %tmp2, 0
3167 // %carry2 = extractvalue {i32, i1} %tmp2, 1
3168 // %tmp3 = or i1 %carry1, %carry2
3169 // %tmp4 = zext i1 %tmp3 to i32
3170 // store i32 %tmp4, i32* %carryout
3172 // Scalarize our inputs.
3173 llvm::Value *X = EmitScalarExpr(E->getArg(0));
3174 llvm::Value *Y = EmitScalarExpr(E->getArg(1));
3175 llvm::Value *Carryin = EmitScalarExpr(E->getArg(2));
3176 Address CarryOutPtr = EmitPointerWithAlignment(E->getArg(3));
3178 // Decide if we are lowering to a uadd.with.overflow or usub.with.overflow.
3179 llvm::Intrinsic::ID IntrinsicId;
3180 switch (BuiltinID) {
3181 default: llvm_unreachable("Unknown multiprecision builtin id.");
3182 case Builtin::BI__builtin_addcb:
3183 case Builtin::BI__builtin_addcs:
3184 case Builtin::BI__builtin_addc:
3185 case Builtin::BI__builtin_addcl:
3186 case Builtin::BI__builtin_addcll:
3187 IntrinsicId = llvm::Intrinsic::uadd_with_overflow;
3189 case Builtin::BI__builtin_subcb:
3190 case Builtin::BI__builtin_subcs:
3191 case Builtin::BI__builtin_subc:
3192 case Builtin::BI__builtin_subcl:
3193 case Builtin::BI__builtin_subcll:
3194 IntrinsicId = llvm::Intrinsic::usub_with_overflow;
3198 // Construct our resulting LLVM IR expression.
3199 llvm::Value *Carry1;
3200 llvm::Value *Sum1 = EmitOverflowIntrinsic(*this, IntrinsicId,
3202 llvm::Value *Carry2;
3203 llvm::Value *Sum2 = EmitOverflowIntrinsic(*this, IntrinsicId,
3204 Sum1, Carryin, Carry2);
3205 llvm::Value *CarryOut = Builder.CreateZExt(Builder.CreateOr(Carry1, Carry2),
3207 Builder.CreateStore(CarryOut, CarryOutPtr);
3208 return RValue::get(Sum2);
3211 case Builtin::BI__builtin_add_overflow:
3212 case Builtin::BI__builtin_sub_overflow:
3213 case Builtin::BI__builtin_mul_overflow: {
3214 const clang::Expr *LeftArg = E->getArg(0);
3215 const clang::Expr *RightArg = E->getArg(1);
3216 const clang::Expr *ResultArg = E->getArg(2);
3218 clang::QualType ResultQTy =
3219 ResultArg->getType()->castAs<PointerType>()->getPointeeType();
3221 WidthAndSignedness LeftInfo =
3222 getIntegerWidthAndSignedness(CGM.getContext(), LeftArg->getType());
3223 WidthAndSignedness RightInfo =
3224 getIntegerWidthAndSignedness(CGM.getContext(), RightArg->getType());
3225 WidthAndSignedness ResultInfo =
3226 getIntegerWidthAndSignedness(CGM.getContext(), ResultQTy);
3228 // Handle mixed-sign multiplication as a special case, because adding
3229 // runtime or backend support for our generic irgen would be too expensive.
3230 if (isSpecialMixedSignMultiply(BuiltinID, LeftInfo, RightInfo, ResultInfo))
3231 return EmitCheckedMixedSignMultiply(*this, LeftArg, LeftInfo, RightArg,
3232 RightInfo, ResultArg, ResultQTy,
3235 WidthAndSignedness EncompassingInfo =
3236 EncompassingIntegerType({LeftInfo, RightInfo, ResultInfo});
3238 llvm::Type *EncompassingLLVMTy =
3239 llvm::IntegerType::get(CGM.getLLVMContext(), EncompassingInfo.Width);
3241 llvm::Type *ResultLLVMTy = CGM.getTypes().ConvertType(ResultQTy);
3243 llvm::Intrinsic::ID IntrinsicId;
3244 switch (BuiltinID) {
3246 llvm_unreachable("Unknown overflow builtin id.");
3247 case Builtin::BI__builtin_add_overflow:
3248 IntrinsicId = EncompassingInfo.Signed
3249 ? llvm::Intrinsic::sadd_with_overflow
3250 : llvm::Intrinsic::uadd_with_overflow;
3252 case Builtin::BI__builtin_sub_overflow:
3253 IntrinsicId = EncompassingInfo.Signed
3254 ? llvm::Intrinsic::ssub_with_overflow
3255 : llvm::Intrinsic::usub_with_overflow;
3257 case Builtin::BI__builtin_mul_overflow:
3258 IntrinsicId = EncompassingInfo.Signed
3259 ? llvm::Intrinsic::smul_with_overflow
3260 : llvm::Intrinsic::umul_with_overflow;
3264 llvm::Value *Left = EmitScalarExpr(LeftArg);
3265 llvm::Value *Right = EmitScalarExpr(RightArg);
3266 Address ResultPtr = EmitPointerWithAlignment(ResultArg);
3268 // Extend each operand to the encompassing type.
3269 Left = Builder.CreateIntCast(Left, EncompassingLLVMTy, LeftInfo.Signed);
3270 Right = Builder.CreateIntCast(Right, EncompassingLLVMTy, RightInfo.Signed);
3272 // Perform the operation on the extended values.
3273 llvm::Value *Overflow, *Result;
3274 Result = EmitOverflowIntrinsic(*this, IntrinsicId, Left, Right, Overflow);
3276 if (EncompassingInfo.Width > ResultInfo.Width) {
3277 // The encompassing type is wider than the result type, so we need to
3279 llvm::Value *ResultTrunc = Builder.CreateTrunc(Result, ResultLLVMTy);
3281 // To see if the truncation caused an overflow, we will extend
3282 // the result and then compare it to the original result.
3283 llvm::Value *ResultTruncExt = Builder.CreateIntCast(
3284 ResultTrunc, EncompassingLLVMTy, ResultInfo.Signed);
3285 llvm::Value *TruncationOverflow =
3286 Builder.CreateICmpNE(Result, ResultTruncExt);
3288 Overflow = Builder.CreateOr(Overflow, TruncationOverflow);
3289 Result = ResultTrunc;
3292 // Finally, store the result using the pointer.
3294 ResultArg->getType()->getPointeeType().isVolatileQualified();
3295 Builder.CreateStore(EmitToMemory(Result, ResultQTy), ResultPtr, isVolatile);
3297 return RValue::get(Overflow);
3300 case Builtin::BI__builtin_uadd_overflow:
3301 case Builtin::BI__builtin_uaddl_overflow:
3302 case Builtin::BI__builtin_uaddll_overflow:
3303 case Builtin::BI__builtin_usub_overflow:
3304 case Builtin::BI__builtin_usubl_overflow:
3305 case Builtin::BI__builtin_usubll_overflow:
3306 case Builtin::BI__builtin_umul_overflow:
3307 case Builtin::BI__builtin_umull_overflow:
3308 case Builtin::BI__builtin_umulll_overflow:
3309 case Builtin::BI__builtin_sadd_overflow:
3310 case Builtin::BI__builtin_saddl_overflow:
3311 case Builtin::BI__builtin_saddll_overflow:
3312 case Builtin::BI__builtin_ssub_overflow:
3313 case Builtin::BI__builtin_ssubl_overflow:
3314 case Builtin::BI__builtin_ssubll_overflow:
3315 case Builtin::BI__builtin_smul_overflow:
3316 case Builtin::BI__builtin_smull_overflow:
3317 case Builtin::BI__builtin_smulll_overflow: {
3319 // We translate all of these builtins directly to the relevant llvm IR node.
3321 // Scalarize our inputs.
3322 llvm::Value *X = EmitScalarExpr(E->getArg(0));
3323 llvm::Value *Y = EmitScalarExpr(E->getArg(1));
3324 Address SumOutPtr = EmitPointerWithAlignment(E->getArg(2));
3326 // Decide which of the overflow intrinsics we are lowering to:
3327 llvm::Intrinsic::ID IntrinsicId;
3328 switch (BuiltinID) {
3329 default: llvm_unreachable("Unknown overflow builtin id.");
3330 case Builtin::BI__builtin_uadd_overflow:
3331 case Builtin::BI__builtin_uaddl_overflow:
3332 case Builtin::BI__builtin_uaddll_overflow:
3333 IntrinsicId = llvm::Intrinsic::uadd_with_overflow;
3335 case Builtin::BI__builtin_usub_overflow:
3336 case Builtin::BI__builtin_usubl_overflow:
3337 case Builtin::BI__builtin_usubll_overflow:
3338 IntrinsicId = llvm::Intrinsic::usub_with_overflow;
3340 case Builtin::BI__builtin_umul_overflow:
3341 case Builtin::BI__builtin_umull_overflow:
3342 case Builtin::BI__builtin_umulll_overflow:
3343 IntrinsicId = llvm::Intrinsic::umul_with_overflow;
3345 case Builtin::BI__builtin_sadd_overflow:
3346 case Builtin::BI__builtin_saddl_overflow:
3347 case Builtin::BI__builtin_saddll_overflow:
3348 IntrinsicId = llvm::Intrinsic::sadd_with_overflow;
3350 case Builtin::BI__builtin_ssub_overflow:
3351 case Builtin::BI__builtin_ssubl_overflow:
3352 case Builtin::BI__builtin_ssubll_overflow:
3353 IntrinsicId = llvm::Intrinsic::ssub_with_overflow;
3355 case Builtin::BI__builtin_smul_overflow:
3356 case Builtin::BI__builtin_smull_overflow:
3357 case Builtin::BI__builtin_smulll_overflow:
3358 IntrinsicId = llvm::Intrinsic::smul_with_overflow;
3364 llvm::Value *Sum = EmitOverflowIntrinsic(*this, IntrinsicId, X, Y, Carry);
3365 Builder.CreateStore(Sum, SumOutPtr);
3367 return RValue::get(Carry);
3369 case Builtin::BI__builtin_addressof:
3370 return RValue::get(EmitLValue(E->getArg(0)).getPointer());
3371 case Builtin::BI__builtin_operator_new:
3372 return EmitBuiltinNewDeleteCall(
3373 E->getCallee()->getType()->castAs<FunctionProtoType>(), E, false);
3374 case Builtin::BI__builtin_operator_delete:
3375 return EmitBuiltinNewDeleteCall(
3376 E->getCallee()->getType()->castAs<FunctionProtoType>(), E, true);
3378 case Builtin::BI__noop:
3379 // __noop always evaluates to an integer literal zero.
3380 return RValue::get(ConstantInt::get(IntTy, 0));
3381 case Builtin::BI__builtin_call_with_static_chain: {
3382 const CallExpr *Call = cast<CallExpr>(E->getArg(0));
3383 const Expr *Chain = E->getArg(1);
3384 return EmitCall(Call->getCallee()->getType(),
3385 EmitCallee(Call->getCallee()), Call, ReturnValue,
3386 EmitScalarExpr(Chain));
3388 case Builtin::BI_InterlockedExchange8:
3389 case Builtin::BI_InterlockedExchange16:
3390 case Builtin::BI_InterlockedExchange:
3391 case Builtin::BI_InterlockedExchangePointer:
3393 EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange, E));
3394 case Builtin::BI_InterlockedCompareExchangePointer:
3395 case Builtin::BI_InterlockedCompareExchangePointer_nf: {
3397 llvm::IntegerType *IntType =
3398 IntegerType::get(getLLVMContext(),
3399 getContext().getTypeSize(E->getType()));
3400 llvm::Type *IntPtrType = IntType->getPointerTo();
3402 llvm::Value *Destination =
3403 Builder.CreateBitCast(EmitScalarExpr(E->getArg(0)), IntPtrType);
3405 llvm::Value *Exchange = EmitScalarExpr(E->getArg(1));
3406 RTy = Exchange->getType();
3407 Exchange = Builder.CreatePtrToInt(Exchange, IntType);
3409 llvm::Value *Comparand =
3410 Builder.CreatePtrToInt(EmitScalarExpr(E->getArg(2)), IntType);
3413 BuiltinID == Builtin::BI_InterlockedCompareExchangePointer_nf ?
3414 AtomicOrdering::Monotonic : AtomicOrdering::SequentiallyConsistent;
3416 auto Result = Builder.CreateAtomicCmpXchg(Destination, Comparand, Exchange,
3417 Ordering, Ordering);
3418 Result->setVolatile(true);
3420 return RValue::get(Builder.CreateIntToPtr(Builder.CreateExtractValue(Result,
3424 case Builtin::BI_InterlockedCompareExchange8:
3425 case Builtin::BI_InterlockedCompareExchange16:
3426 case Builtin::BI_InterlockedCompareExchange:
3427 case Builtin::BI_InterlockedCompareExchange64:
3428 return RValue::get(EmitAtomicCmpXchgForMSIntrin(*this, E));
3429 case Builtin::BI_InterlockedIncrement16:
3430 case Builtin::BI_InterlockedIncrement:
3432 EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement, E));
3433 case Builtin::BI_InterlockedDecrement16:
3434 case Builtin::BI_InterlockedDecrement:
3436 EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedDecrement, E));
3437 case Builtin::BI_InterlockedAnd8:
3438 case Builtin::BI_InterlockedAnd16:
3439 case Builtin::BI_InterlockedAnd:
3440 return RValue::get(EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedAnd, E));
3441 case Builtin::BI_InterlockedExchangeAdd8:
3442 case Builtin::BI_InterlockedExchangeAdd16:
3443 case Builtin::BI_InterlockedExchangeAdd:
3445 EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeAdd, E));
3446 case Builtin::BI_InterlockedExchangeSub8:
3447 case Builtin::BI_InterlockedExchangeSub16:
3448 case Builtin::BI_InterlockedExchangeSub:
3450 EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeSub, E));
3451 case Builtin::BI_InterlockedOr8:
3452 case Builtin::BI_InterlockedOr16:
3453 case Builtin::BI_InterlockedOr:
3454 return RValue::get(EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr, E));
3455 case Builtin::BI_InterlockedXor8:
3456 case Builtin::BI_InterlockedXor16:
3457 case Builtin::BI_InterlockedXor:
3458 return RValue::get(EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor, E));
3460 case Builtin::BI_bittest64:
3461 case Builtin::BI_bittest:
3462 case Builtin::BI_bittestandcomplement64:
3463 case Builtin::BI_bittestandcomplement:
3464 case Builtin::BI_bittestandreset64:
3465 case Builtin::BI_bittestandreset:
3466 case Builtin::BI_bittestandset64:
3467 case Builtin::BI_bittestandset:
3468 case Builtin::BI_interlockedbittestandreset:
3469 case Builtin::BI_interlockedbittestandreset64:
3470 case Builtin::BI_interlockedbittestandset64:
3471 case Builtin::BI_interlockedbittestandset:
3472 case Builtin::BI_interlockedbittestandset_acq:
3473 case Builtin::BI_interlockedbittestandset_rel:
3474 case Builtin::BI_interlockedbittestandset_nf:
3475 case Builtin::BI_interlockedbittestandreset_acq:
3476 case Builtin::BI_interlockedbittestandreset_rel:
3477 case Builtin::BI_interlockedbittestandreset_nf:
3478 return RValue::get(EmitBitTestIntrinsic(*this, BuiltinID, E));
3480 // These builtins exist to emit regular volatile loads and stores not
3481 // affected by the -fms-volatile setting.
3482 case Builtin::BI__iso_volatile_load8:
3483 case Builtin::BI__iso_volatile_load16:
3484 case Builtin::BI__iso_volatile_load32:
3485 case Builtin::BI__iso_volatile_load64:
3486 return RValue::get(EmitISOVolatileLoad(*this, E));
3487 case Builtin::BI__iso_volatile_store8:
3488 case Builtin::BI__iso_volatile_store16:
3489 case Builtin::BI__iso_volatile_store32:
3490 case Builtin::BI__iso_volatile_store64:
3491 return RValue::get(EmitISOVolatileStore(*this, E));
3493 case Builtin::BI__exception_code:
3494 case Builtin::BI_exception_code:
3495 return RValue::get(EmitSEHExceptionCode());
3496 case Builtin::BI__exception_info:
3497 case Builtin::BI_exception_info:
3498 return RValue::get(EmitSEHExceptionInfo());
3499 case Builtin::BI__abnormal_termination:
3500 case Builtin::BI_abnormal_termination:
3501 return RValue::get(EmitSEHAbnormalTermination());
3502 case Builtin::BI_setjmpex:
3503 if (getTarget().getTriple().isOSMSVCRT())
3504 return EmitMSVCRTSetJmp(*this, MSVCSetJmpKind::_setjmpex, E);
3506 case Builtin::BI_setjmp:
3507 if (getTarget().getTriple().isOSMSVCRT()) {
3508 if (getTarget().getTriple().getArch() == llvm::Triple::x86)
3509 return EmitMSVCRTSetJmp(*this, MSVCSetJmpKind::_setjmp3, E);
3510 else if (getTarget().getTriple().getArch() == llvm::Triple::aarch64)
3511 return EmitMSVCRTSetJmp(*this, MSVCSetJmpKind::_setjmpex, E);
3512 return EmitMSVCRTSetJmp(*this, MSVCSetJmpKind::_setjmp, E);
3516 case Builtin::BI__GetExceptionInfo: {
3517 if (llvm::GlobalVariable *GV =
3518 CGM.getCXXABI().getThrowInfo(FD->getParamDecl(0)->getType()))
3519 return RValue::get(llvm::ConstantExpr::getBitCast(GV, CGM.Int8PtrTy));
3523 case Builtin::BI__fastfail:
3524 return RValue::get(EmitMSVCBuiltinExpr(MSVCIntrin::__fastfail, E));
3526 case Builtin::BI__builtin_coro_size: {
3527 auto & Context = getContext();
3528 auto SizeTy = Context.getSizeType();
3529 auto T = Builder.getIntNTy(Context.getTypeSize(SizeTy));
3530 Function *F = CGM.getIntrinsic(Intrinsic::coro_size, T);
3531 return RValue::get(Builder.CreateCall(F));
3534 case Builtin::BI__builtin_coro_id:
3535 return EmitCoroutineIntrinsic(E, Intrinsic::coro_id);
3536 case Builtin::BI__builtin_coro_promise:
3537 return EmitCoroutineIntrinsic(E, Intrinsic::coro_promise);
3538 case Builtin::BI__builtin_coro_resume:
3539 return EmitCoroutineIntrinsic(E, Intrinsic::coro_resume);
3540 case Builtin::BI__builtin_coro_frame:
3541 return EmitCoroutineIntrinsic(E, Intrinsic::coro_frame);
3542 case Builtin::BI__builtin_coro_noop:
3543 return EmitCoroutineIntrinsic(E, Intrinsic::coro_noop);
3544 case Builtin::BI__builtin_coro_free:
3545 return EmitCoroutineIntrinsic(E, Intrinsic::coro_free);
3546 case Builtin::BI__builtin_coro_destroy:
3547 return EmitCoroutineIntrinsic(E, Intrinsic::coro_destroy);
3548 case Builtin::BI__builtin_coro_done:
3549 return EmitCoroutineIntrinsic(E, Intrinsic::coro_done);
3550 case Builtin::BI__builtin_coro_alloc:
3551 return EmitCoroutineIntrinsic(E, Intrinsic::coro_alloc);
3552 case Builtin::BI__builtin_coro_begin:
3553 return EmitCoroutineIntrinsic(E, Intrinsic::coro_begin);
3554 case Builtin::BI__builtin_coro_end:
3555 return EmitCoroutineIntrinsic(E, Intrinsic::coro_end);
3556 case Builtin::BI__builtin_coro_suspend:
3557 return EmitCoroutineIntrinsic(E, Intrinsic::coro_suspend);
3558 case Builtin::BI__builtin_coro_param:
3559 return EmitCoroutineIntrinsic(E, Intrinsic::coro_param);
3561 // OpenCL v2.0 s6.13.16.2, Built-in pipe read and write functions
3562 case Builtin::BIread_pipe:
3563 case Builtin::BIwrite_pipe: {
3564 Value *Arg0 = EmitScalarExpr(E->getArg(0)),
3565 *Arg1 = EmitScalarExpr(E->getArg(1));
3566 CGOpenCLRuntime OpenCLRT(CGM);
3567 Value *PacketSize = OpenCLRT.getPipeElemSize(E->getArg(0));
3568 Value *PacketAlign = OpenCLRT.getPipeElemAlign(E->getArg(0));
3570 // Type of the generic packet parameter.
3571 unsigned GenericAS =
3572 getContext().getTargetAddressSpace(LangAS::opencl_generic);
3573 llvm::Type *I8PTy = llvm::PointerType::get(
3574 llvm::Type::getInt8Ty(getLLVMContext()), GenericAS);
3576 // Testing which overloaded version we should generate the call for.
3577 if (2U == E->getNumArgs()) {
3578 const char *Name = (BuiltinID == Builtin::BIread_pipe) ? "__read_pipe_2"
3580 // Creating a generic function type to be able to call with any builtin or
3581 // user defined type.
3582 llvm::Type *ArgTys[] = {Arg0->getType(), I8PTy, Int32Ty, Int32Ty};
3583 llvm::FunctionType *FTy = llvm::FunctionType::get(
3584 Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false);
3585 Value *BCast = Builder.CreatePointerCast(Arg1, I8PTy);
3587 Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, Name),
3588 {Arg0, BCast, PacketSize, PacketAlign}));
3590 assert(4 == E->getNumArgs() &&
3591 "Illegal number of parameters to pipe function");
3592 const char *Name = (BuiltinID == Builtin::BIread_pipe) ? "__read_pipe_4"
3595 llvm::Type *ArgTys[] = {Arg0->getType(), Arg1->getType(), Int32Ty, I8PTy,
3597 Value *Arg2 = EmitScalarExpr(E->getArg(2)),
3598 *Arg3 = EmitScalarExpr(E->getArg(3));
3599 llvm::FunctionType *FTy = llvm::FunctionType::get(
3600 Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false);
3601 Value *BCast = Builder.CreatePointerCast(Arg3, I8PTy);
3602 // We know the third argument is an integer type, but we may need to cast
3604 if (Arg2->getType() != Int32Ty)
3605 Arg2 = Builder.CreateZExtOrTrunc(Arg2, Int32Ty);
3606 return RValue::get(Builder.CreateCall(
3607 CGM.CreateRuntimeFunction(FTy, Name),
3608 {Arg0, Arg1, Arg2, BCast, PacketSize, PacketAlign}));
3611 // OpenCL v2.0 s6.13.16 ,s9.17.3.5 - Built-in pipe reserve read and write
3613 case Builtin::BIreserve_read_pipe:
3614 case Builtin::BIreserve_write_pipe:
3615 case Builtin::BIwork_group_reserve_read_pipe:
3616 case Builtin::BIwork_group_reserve_write_pipe:
3617 case Builtin::BIsub_group_reserve_read_pipe:
3618 case Builtin::BIsub_group_reserve_write_pipe: {
3619 // Composing the mangled name for the function.
3621 if (BuiltinID == Builtin::BIreserve_read_pipe)
3622 Name = "__reserve_read_pipe";
3623 else if (BuiltinID == Builtin::BIreserve_write_pipe)
3624 Name = "__reserve_write_pipe";
3625 else if (BuiltinID == Builtin::BIwork_group_reserve_read_pipe)
3626 Name = "__work_group_reserve_read_pipe";
3627 else if (BuiltinID == Builtin::BIwork_group_reserve_write_pipe)
3628 Name = "__work_group_reserve_write_pipe";
3629 else if (BuiltinID == Builtin::BIsub_group_reserve_read_pipe)
3630 Name = "__sub_group_reserve_read_pipe";
3632 Name = "__sub_group_reserve_write_pipe";
3634 Value *Arg0 = EmitScalarExpr(E->getArg(0)),
3635 *Arg1 = EmitScalarExpr(E->getArg(1));
3636 llvm::Type *ReservedIDTy = ConvertType(getContext().OCLReserveIDTy);
3637 CGOpenCLRuntime OpenCLRT(CGM);
3638 Value *PacketSize = OpenCLRT.getPipeElemSize(E->getArg(0));
3639 Value *PacketAlign = OpenCLRT.getPipeElemAlign(E->getArg(0));
3641 // Building the generic function prototype.
3642 llvm::Type *ArgTys[] = {Arg0->getType(), Int32Ty, Int32Ty, Int32Ty};
3643 llvm::FunctionType *FTy = llvm::FunctionType::get(
3644 ReservedIDTy, llvm::ArrayRef<llvm::Type *>(ArgTys), false);
3645 // We know the second argument is an integer type, but we may need to cast
3647 if (Arg1->getType() != Int32Ty)
3648 Arg1 = Builder.CreateZExtOrTrunc(Arg1, Int32Ty);
3650 Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, Name),
3651 {Arg0, Arg1, PacketSize, PacketAlign}));
3653 // OpenCL v2.0 s6.13.16, s9.17.3.5 - Built-in pipe commit read and write
3655 case Builtin::BIcommit_read_pipe:
3656 case Builtin::BIcommit_write_pipe:
3657 case Builtin::BIwork_group_commit_read_pipe:
3658 case Builtin::BIwork_group_commit_write_pipe:
3659 case Builtin::BIsub_group_commit_read_pipe:
3660 case Builtin::BIsub_group_commit_write_pipe: {
3662 if (BuiltinID == Builtin::BIcommit_read_pipe)
3663 Name = "__commit_read_pipe";
3664 else if (BuiltinID == Builtin::BIcommit_write_pipe)
3665 Name = "__commit_write_pipe";
3666 else if (BuiltinID == Builtin::BIwork_group_commit_read_pipe)
3667 Name = "__work_group_commit_read_pipe";
3668 else if (BuiltinID == Builtin::BIwork_group_commit_write_pipe)
3669 Name = "__work_group_commit_write_pipe";
3670 else if (BuiltinID == Builtin::BIsub_group_commit_read_pipe)
3671 Name = "__sub_group_commit_read_pipe";
3673 Name = "__sub_group_commit_write_pipe";
3675 Value *Arg0 = EmitScalarExpr(E->getArg(0)),
3676 *Arg1 = EmitScalarExpr(E->getArg(1));
3677 CGOpenCLRuntime OpenCLRT(CGM);
3678 Value *PacketSize = OpenCLRT.getPipeElemSize(E->getArg(0));
3679 Value *PacketAlign = OpenCLRT.getPipeElemAlign(E->getArg(0));
3681 // Building the generic function prototype.
3682 llvm::Type *ArgTys[] = {Arg0->getType(), Arg1->getType(), Int32Ty, Int32Ty};
3683 llvm::FunctionType *FTy =
3684 llvm::FunctionType::get(llvm::Type::getVoidTy(getLLVMContext()),
3685 llvm::ArrayRef<llvm::Type *>(ArgTys), false);
3688 Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, Name),
3689 {Arg0, Arg1, PacketSize, PacketAlign}));
3691 // OpenCL v2.0 s6.13.16.4 Built-in pipe query functions
3692 case Builtin::BIget_pipe_num_packets:
3693 case Builtin::BIget_pipe_max_packets: {
3694 const char *BaseName;
3695 const auto *PipeTy = E->getArg(0)->getType()->castAs<PipeType>();
3696 if (BuiltinID == Builtin::BIget_pipe_num_packets)
3697 BaseName = "__get_pipe_num_packets";
3699 BaseName = "__get_pipe_max_packets";
3700 std::string Name = std::string(BaseName) +
3701 std::string(PipeTy->isReadOnly() ? "_ro" : "_wo");
3703 // Building the generic function prototype.
3704 Value *Arg0 = EmitScalarExpr(E->getArg(0));
3705 CGOpenCLRuntime OpenCLRT(CGM);
3706 Value *PacketSize = OpenCLRT.getPipeElemSize(E->getArg(0));
3707 Value *PacketAlign = OpenCLRT.getPipeElemAlign(E->getArg(0));
3708 llvm::Type *ArgTys[] = {Arg0->getType(), Int32Ty, Int32Ty};
3709 llvm::FunctionType *FTy = llvm::FunctionType::get(
3710 Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false);
3712 return RValue::get(Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, Name),
3713 {Arg0, PacketSize, PacketAlign}));
3716 // OpenCL v2.0 s6.13.9 - Address space qualifier functions.
3717 case Builtin::BIto_global:
3718 case Builtin::BIto_local:
3719 case Builtin::BIto_private: {
3720 auto Arg0 = EmitScalarExpr(E->getArg(0));
3721 auto NewArgT = llvm::PointerType::get(Int8Ty,
3722 CGM.getContext().getTargetAddressSpace(LangAS::opencl_generic));
3723 auto NewRetT = llvm::PointerType::get(Int8Ty,
3724 CGM.getContext().getTargetAddressSpace(
3725 E->getType()->getPointeeType().getAddressSpace()));
3726 auto FTy = llvm::FunctionType::get(NewRetT, {NewArgT}, false);
3727 llvm::Value *NewArg;
3728 if (Arg0->getType()->getPointerAddressSpace() !=
3729 NewArgT->getPointerAddressSpace())
3730 NewArg = Builder.CreateAddrSpaceCast(Arg0, NewArgT);
3732 NewArg = Builder.CreateBitOrPointerCast(Arg0, NewArgT);
3733 auto NewName = std::string("__") + E->getDirectCallee()->getName().str();
3735 Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, NewName), {NewArg});
3736 return RValue::get(Builder.CreateBitOrPointerCast(NewCall,
3737 ConvertType(E->getType())));
3740 // OpenCL v2.0, s6.13.17 - Enqueue kernel function.
3741 // It contains four different overload formats specified in Table 6.13.17.1.
3742 case Builtin::BIenqueue_kernel: {
3743 StringRef Name; // Generated function call name
3744 unsigned NumArgs = E->getNumArgs();
3746 llvm::Type *QueueTy = ConvertType(getContext().OCLQueueTy);
3747 llvm::Type *GenericVoidPtrTy = Builder.getInt8PtrTy(
3748 getContext().getTargetAddressSpace(LangAS::opencl_generic));
3750 llvm::Value *Queue = EmitScalarExpr(E->getArg(0));
3751 llvm::Value *Flags = EmitScalarExpr(E->getArg(1));
3752 LValue NDRangeL = EmitAggExprToLValue(E->getArg(2));
3753 llvm::Value *Range = NDRangeL.getAddress().getPointer();
3754 llvm::Type *RangeTy = NDRangeL.getAddress().getType();
3757 // The most basic form of the call with parameters:
3758 // queue_t, kernel_enqueue_flags_t, ndrange_t, block(void)
3759 Name = "__enqueue_kernel_basic";
3760 llvm::Type *ArgTys[] = {QueueTy, Int32Ty, RangeTy, GenericVoidPtrTy,
3762 llvm::FunctionType *FTy = llvm::FunctionType::get(
3763 Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false);
3766 CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(3));
3767 llvm::Value *Kernel =
3768 Builder.CreatePointerCast(Info.Kernel, GenericVoidPtrTy);
3769 llvm::Value *Block =
3770 Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
3773 B.addByValAttr(NDRangeL.getAddress().getElementType());
3774 llvm::AttributeList ByValAttrSet =
3775 llvm::AttributeList::get(CGM.getModule().getContext(), 3U, B);
3778 Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, Name, ByValAttrSet),
3779 {Queue, Flags, Range, Kernel, Block});
3780 RTCall->setAttributes(ByValAttrSet);
3781 return RValue::get(RTCall);
3783 assert(NumArgs >= 5 && "Invalid enqueue_kernel signature");
3785 // Create a temporary array to hold the sizes of local pointer arguments
3786 // for the block. \p First is the position of the first size argument.
3787 auto CreateArrayForSizeVar = [=](unsigned First)
3788 -> std::tuple<llvm::Value *, llvm::Value *, llvm::Value *> {
3789 llvm::APInt ArraySize(32, NumArgs - First);
3790 QualType SizeArrayTy = getContext().getConstantArrayType(
3791 getContext().getSizeType(), ArraySize, nullptr, ArrayType::Normal,
3792 /*IndexTypeQuals=*/0);
3793 auto Tmp = CreateMemTemp(SizeArrayTy, "block_sizes");
3794 llvm::Value *TmpPtr = Tmp.getPointer();
3795 llvm::Value *TmpSize = EmitLifetimeStart(
3796 CGM.getDataLayout().getTypeAllocSize(Tmp.getElementType()), TmpPtr);
3797 llvm::Value *ElemPtr;
3798 // Each of the following arguments specifies the size of the corresponding
3799 // argument passed to the enqueued block.
3800 auto *Zero = llvm::ConstantInt::get(IntTy, 0);
3801 for (unsigned I = First; I < NumArgs; ++I) {
3802 auto *Index = llvm::ConstantInt::get(IntTy, I - First);
3803 auto *GEP = Builder.CreateGEP(TmpPtr, {Zero, Index});
3807 Builder.CreateZExtOrTrunc(EmitScalarExpr(E->getArg(I)), SizeTy);
3808 Builder.CreateAlignedStore(
3809 V, GEP, CGM.getDataLayout().getPrefTypeAlignment(SizeTy));
3811 return std::tie(ElemPtr, TmpSize, TmpPtr);
3814 // Could have events and/or varargs.
3815 if (E->getArg(3)->getType()->isBlockPointerType()) {
3816 // No events passed, but has variadic arguments.
3817 Name = "__enqueue_kernel_varargs";
3819 CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(3));
3820 llvm::Value *Kernel =
3821 Builder.CreatePointerCast(Info.Kernel, GenericVoidPtrTy);
3822 auto *Block = Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
3823 llvm::Value *ElemPtr, *TmpSize, *TmpPtr;
3824 std::tie(ElemPtr, TmpSize, TmpPtr) = CreateArrayForSizeVar(4);
3826 // Create a vector of the arguments, as well as a constant value to
3827 // express to the runtime the number of variadic arguments.
3828 std::vector<llvm::Value *> Args = {
3829 Queue, Flags, Range,
3830 Kernel, Block, ConstantInt::get(IntTy, NumArgs - 4),
3832 std::vector<llvm::Type *> ArgTys = {
3833 QueueTy, IntTy, RangeTy, GenericVoidPtrTy,
3834 GenericVoidPtrTy, IntTy, ElemPtr->getType()};
3836 llvm::FunctionType *FTy = llvm::FunctionType::get(
3837 Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false);
3839 RValue::get(Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, Name),
3840 llvm::ArrayRef<llvm::Value *>(Args)));
3842 EmitLifetimeEnd(TmpSize, TmpPtr);
3845 // Any calls now have event arguments passed.
3847 llvm::Type *EventTy = ConvertType(getContext().OCLClkEventTy);
3848 llvm::PointerType *EventPtrTy = EventTy->getPointerTo(
3849 CGM.getContext().getTargetAddressSpace(LangAS::opencl_generic));
3851 llvm::Value *NumEvents =
3852 Builder.CreateZExtOrTrunc(EmitScalarExpr(E->getArg(3)), Int32Ty);
3854 // Since SemaOpenCLBuiltinEnqueueKernel allows fifth and sixth arguments
3855 // to be a null pointer constant (including `0` literal), we can take it
3856 // into account and emit null pointer directly.
3857 llvm::Value *EventWaitList = nullptr;
3858 if (E->getArg(4)->isNullPointerConstant(
3859 getContext(), Expr::NPC_ValueDependentIsNotNull)) {
3860 EventWaitList = llvm::ConstantPointerNull::get(EventPtrTy);
3862 EventWaitList = E->getArg(4)->getType()->isArrayType()
3863 ? EmitArrayToPointerDecay(E->getArg(4)).getPointer()
3864 : EmitScalarExpr(E->getArg(4));
3865 // Convert to generic address space.
3866 EventWaitList = Builder.CreatePointerCast(EventWaitList, EventPtrTy);
3868 llvm::Value *EventRet = nullptr;
3869 if (E->getArg(5)->isNullPointerConstant(
3870 getContext(), Expr::NPC_ValueDependentIsNotNull)) {
3871 EventRet = llvm::ConstantPointerNull::get(EventPtrTy);
3874 Builder.CreatePointerCast(EmitScalarExpr(E->getArg(5)), EventPtrTy);
3878 CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(6));
3879 llvm::Value *Kernel =
3880 Builder.CreatePointerCast(Info.Kernel, GenericVoidPtrTy);
3881 llvm::Value *Block =
3882 Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
3884 std::vector<llvm::Type *> ArgTys = {
3885 QueueTy, Int32Ty, RangeTy, Int32Ty,
3886 EventPtrTy, EventPtrTy, GenericVoidPtrTy, GenericVoidPtrTy};
3888 std::vector<llvm::Value *> Args = {Queue, Flags, Range,
3889 NumEvents, EventWaitList, EventRet,
3893 // Has events but no variadics.
3894 Name = "__enqueue_kernel_basic_events";
3895 llvm::FunctionType *FTy = llvm::FunctionType::get(
3896 Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false);
3898 Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, Name),
3899 llvm::ArrayRef<llvm::Value *>(Args)));
3901 // Has event info and variadics
3902 // Pass the number of variadics to the runtime function too.
3903 Args.push_back(ConstantInt::get(Int32Ty, NumArgs - 7));
3904 ArgTys.push_back(Int32Ty);
3905 Name = "__enqueue_kernel_events_varargs";
3907 llvm::Value *ElemPtr, *TmpSize, *TmpPtr;
3908 std::tie(ElemPtr, TmpSize, TmpPtr) = CreateArrayForSizeVar(7);
3909 Args.push_back(ElemPtr);
3910 ArgTys.push_back(ElemPtr->getType());
3912 llvm::FunctionType *FTy = llvm::FunctionType::get(
3913 Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false);
3915 RValue::get(Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, Name),
3916 llvm::ArrayRef<llvm::Value *>(Args)));
3918 EmitLifetimeEnd(TmpSize, TmpPtr);
3923 // OpenCL v2.0 s6.13.17.6 - Kernel query functions need bitcast of block
3925 case Builtin::BIget_kernel_work_group_size: {
3926 llvm::Type *GenericVoidPtrTy = Builder.getInt8PtrTy(
3927 getContext().getTargetAddressSpace(LangAS::opencl_generic));
3929 CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(0));
3930 Value *Kernel = Builder.CreatePointerCast(Info.Kernel, GenericVoidPtrTy);
3931 Value *Arg = Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
3932 return RValue::get(Builder.CreateCall(
3933 CGM.CreateRuntimeFunction(
3934 llvm::FunctionType::get(IntTy, {GenericVoidPtrTy, GenericVoidPtrTy},
3936 "__get_kernel_work_group_size_impl"),
3939 case Builtin::BIget_kernel_preferred_work_group_size_multiple: {
3940 llvm::Type *GenericVoidPtrTy = Builder.getInt8PtrTy(
3941 getContext().getTargetAddressSpace(LangAS::opencl_generic));
3943 CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(0));
3944 Value *Kernel = Builder.CreatePointerCast(Info.Kernel, GenericVoidPtrTy);
3945 Value *Arg = Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
3946 return RValue::get(Builder.CreateCall(
3947 CGM.CreateRuntimeFunction(
3948 llvm::FunctionType::get(IntTy, {GenericVoidPtrTy, GenericVoidPtrTy},
3950 "__get_kernel_preferred_work_group_size_multiple_impl"),
3953 case Builtin::BIget_kernel_max_sub_group_size_for_ndrange:
3954 case Builtin::BIget_kernel_sub_group_count_for_ndrange: {
3955 llvm::Type *GenericVoidPtrTy = Builder.getInt8PtrTy(
3956 getContext().getTargetAddressSpace(LangAS::opencl_generic));
3957 LValue NDRangeL = EmitAggExprToLValue(E->getArg(0));
3958 llvm::Value *NDRange = NDRangeL.getAddress().getPointer();
3960 CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(1));
3961 Value *Kernel = Builder.CreatePointerCast(Info.Kernel, GenericVoidPtrTy);
3962 Value *Block = Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
3964 BuiltinID == Builtin::BIget_kernel_max_sub_group_size_for_ndrange
3965 ? "__get_kernel_max_sub_group_size_for_ndrange_impl"
3966 : "__get_kernel_sub_group_count_for_ndrange_impl";
3967 return RValue::get(Builder.CreateCall(
3968 CGM.CreateRuntimeFunction(
3969 llvm::FunctionType::get(
3970 IntTy, {NDRange->getType(), GenericVoidPtrTy, GenericVoidPtrTy},
3973 {NDRange, Kernel, Block}));
3976 case Builtin::BI__builtin_store_half:
3977 case Builtin::BI__builtin_store_halff: {
3978 Value *Val = EmitScalarExpr(E->getArg(0));
3979 Address Address = EmitPointerWithAlignment(E->getArg(1));
3980 Value *HalfVal = Builder.CreateFPTrunc(Val, Builder.getHalfTy());
3981 return RValue::get(Builder.CreateStore(HalfVal, Address));
3983 case Builtin::BI__builtin_load_half: {
3984 Address Address = EmitPointerWithAlignment(E->getArg(0));
3985 Value *HalfVal = Builder.CreateLoad(Address);
3986 return RValue::get(Builder.CreateFPExt(HalfVal, Builder.getDoubleTy()));
3988 case Builtin::BI__builtin_load_halff: {
3989 Address Address = EmitPointerWithAlignment(E->getArg(0));
3990 Value *HalfVal = Builder.CreateLoad(Address);
3991 return RValue::get(Builder.CreateFPExt(HalfVal, Builder.getFloatTy()));
3993 case Builtin::BIprintf:
3994 if (getTarget().getTriple().isNVPTX())
3995 return EmitNVPTXDevicePrintfCallExpr(E, ReturnValue);
3997 case Builtin::BI__builtin_canonicalize:
3998 case Builtin::BI__builtin_canonicalizef:
3999 case Builtin::BI__builtin_canonicalizef16:
4000 case Builtin::BI__builtin_canonicalizel:
4001 return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::canonicalize));
4003 case Builtin::BI__builtin_thread_pointer: {
4004 if (!getContext().getTargetInfo().isTLSSupported())
4005 CGM.ErrorUnsupported(E, "__builtin_thread_pointer");
4006 // Fall through - it's already mapped to the intrinsic by GCCBuiltin.
4009 case Builtin::BI__builtin_os_log_format:
4010 return emitBuiltinOSLogFormat(*E);
4012 case Builtin::BI__xray_customevent: {
4013 if (!ShouldXRayInstrumentFunction())
4014 return RValue::getIgnored();
4016 if (!CGM.getCodeGenOpts().XRayInstrumentationBundle.has(
4017 XRayInstrKind::Custom))
4018 return RValue::getIgnored();
4020 if (const auto *XRayAttr = CurFuncDecl->getAttr<XRayInstrumentAttr>())
4021 if (XRayAttr->neverXRayInstrument() && !AlwaysEmitXRayCustomEvents())
4022 return RValue::getIgnored();
4024 Function *F = CGM.getIntrinsic(Intrinsic::xray_customevent);
4025 auto FTy = F->getFunctionType();
4026 auto Arg0 = E->getArg(0);
4027 auto Arg0Val = EmitScalarExpr(Arg0);
4028 auto Arg0Ty = Arg0->getType();
4029 auto PTy0 = FTy->getParamType(0);
4030 if (PTy0 != Arg0Val->getType()) {
4031 if (Arg0Ty->isArrayType())
4032 Arg0Val = EmitArrayToPointerDecay(Arg0).getPointer();
4034 Arg0Val = Builder.CreatePointerCast(Arg0Val, PTy0);
4036 auto Arg1 = EmitScalarExpr(E->getArg(1));
4037 auto PTy1 = FTy->getParamType(1);
4038 if (PTy1 != Arg1->getType())
4039 Arg1 = Builder.CreateTruncOrBitCast(Arg1, PTy1);
4040 return RValue::get(Builder.CreateCall(F, {Arg0Val, Arg1}));
4043 case Builtin::BI__xray_typedevent: {
4044 // TODO: There should be a way to always emit events even if the current
4045 // function is not instrumented. Losing events in a stream can cripple
4047 if (!ShouldXRayInstrumentFunction())
4048 return RValue::getIgnored();
4050 if (!CGM.getCodeGenOpts().XRayInstrumentationBundle.has(
4051 XRayInstrKind::Typed))
4052 return RValue::getIgnored();
4054 if (const auto *XRayAttr = CurFuncDecl->getAttr<XRayInstrumentAttr>())
4055 if (XRayAttr->neverXRayInstrument() && !AlwaysEmitXRayTypedEvents())
4056 return RValue::getIgnored();
4058 Function *F = CGM.getIntrinsic(Intrinsic::xray_typedevent);
4059 auto FTy = F->getFunctionType();
4060 auto Arg0 = EmitScalarExpr(E->getArg(0));
4061 auto PTy0 = FTy->getParamType(0);
4062 if (PTy0 != Arg0->getType())
4063 Arg0 = Builder.CreateTruncOrBitCast(Arg0, PTy0);
4064 auto Arg1 = E->getArg(1);
4065 auto Arg1Val = EmitScalarExpr(Arg1);
4066 auto Arg1Ty = Arg1->getType();
4067 auto PTy1 = FTy->getParamType(1);
4068 if (PTy1 != Arg1Val->getType()) {
4069 if (Arg1Ty->isArrayType())
4070 Arg1Val = EmitArrayToPointerDecay(Arg1).getPointer();
4072 Arg1Val = Builder.CreatePointerCast(Arg1Val, PTy1);
4074 auto Arg2 = EmitScalarExpr(E->getArg(2));
4075 auto PTy2 = FTy->getParamType(2);
4076 if (PTy2 != Arg2->getType())
4077 Arg2 = Builder.CreateTruncOrBitCast(Arg2, PTy2);
4078 return RValue::get(Builder.CreateCall(F, {Arg0, Arg1Val, Arg2}));
4081 case Builtin::BI__builtin_ms_va_start:
4082 case Builtin::BI__builtin_ms_va_end:
4084 EmitVAStartEnd(EmitMSVAListRef(E->getArg(0)).getPointer(),
4085 BuiltinID == Builtin::BI__builtin_ms_va_start));
4087 case Builtin::BI__builtin_ms_va_copy: {
4088 // Lower this manually. We can't reliably determine whether or not any
4089 // given va_copy() is for a Win64 va_list from the calling convention
4090 // alone, because it's legal to do this from a System V ABI function.
4091 // With opaque pointer types, we won't have enough information in LLVM
4092 // IR to determine this from the argument types, either. Best to do it
4093 // now, while we have enough information.
4094 Address DestAddr = EmitMSVAListRef(E->getArg(0));
4095 Address SrcAddr = EmitMSVAListRef(E->getArg(1));
4097 llvm::Type *BPP = Int8PtrPtrTy;
4099 DestAddr = Address(Builder.CreateBitCast(DestAddr.getPointer(), BPP, "cp"),
4100 DestAddr.getAlignment());
4101 SrcAddr = Address(Builder.CreateBitCast(SrcAddr.getPointer(), BPP, "ap"),
4102 SrcAddr.getAlignment());
4104 Value *ArgPtr = Builder.CreateLoad(SrcAddr, "ap.val");
4105 return RValue::get(Builder.CreateStore(ArgPtr, DestAddr));
4109 // If this is an alias for a lib function (e.g. __builtin_sin), emit
4110 // the call using the normal call path, but using the unmangled
4111 // version of the function name.
4112 if (getContext().BuiltinInfo.isLibFunction(BuiltinID))
4113 return emitLibraryCall(*this, FD, E,
4114 CGM.getBuiltinLibFunction(FD, BuiltinID));
4116 // If this is a predefined lib function (e.g. malloc), emit the call
4117 // using exactly the normal call path.
4118 if (getContext().BuiltinInfo.isPredefinedLibFunction(BuiltinID))
4119 return emitLibraryCall(*this, FD, E,
4120 cast<llvm::Constant>(EmitScalarExpr(E->getCallee())));
4122 // Check that a call to a target specific builtin has the correct target
4124 // This is down here to avoid non-target specific builtins, however, if
4125 // generic builtins start to require generic target features then we
4126 // can move this up to the beginning of the function.
4127 checkTargetFeatures(E, FD);
4129 if (unsigned VectorWidth = getContext().BuiltinInfo.getRequiredVectorWidth(BuiltinID))
4130 LargestVectorWidth = std::max(LargestVectorWidth, VectorWidth);
4132 // See if we have a target specific intrinsic.
4133 const char *Name = getContext().BuiltinInfo.getName(BuiltinID);
4134 Intrinsic::ID IntrinsicID = Intrinsic::not_intrinsic;
4136 llvm::Triple::getArchTypePrefix(getTarget().getTriple().getArch());
4137 if (!Prefix.empty()) {
4138 IntrinsicID = Intrinsic::getIntrinsicForGCCBuiltin(Prefix.data(), Name);
4139 // NOTE we don't need to perform a compatibility flag check here since the
4140 // intrinsics are declared in Builtins*.def via LANGBUILTIN which filter the
4141 // MS builtins via ALL_MS_LANGUAGES and are filtered earlier.
4142 if (IntrinsicID == Intrinsic::not_intrinsic)
4143 IntrinsicID = Intrinsic::getIntrinsicForMSBuiltin(Prefix.data(), Name);
4146 if (IntrinsicID != Intrinsic::not_intrinsic) {
4147 SmallVector<Value*, 16> Args;
4149 // Find out if any arguments are required to be integer constant
4151 unsigned ICEArguments = 0;
4152 ASTContext::GetBuiltinTypeError Error;
4153 getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments);
4154 assert(Error == ASTContext::GE_None && "Should not codegen an error");
4156 Function *F = CGM.getIntrinsic(IntrinsicID);
4157 llvm::FunctionType *FTy = F->getFunctionType();
4159 for (unsigned i = 0, e = E->getNumArgs(); i != e; ++i) {
4161 // If this is a normal argument, just emit it as a scalar.
4162 if ((ICEArguments & (1 << i)) == 0) {
4163 ArgValue = EmitScalarExpr(E->getArg(i));
4165 // If this is required to be a constant, constant fold it so that we
4166 // know that the generated intrinsic gets a ConstantInt.
4167 llvm::APSInt Result;
4168 bool IsConst = E->getArg(i)->isIntegerConstantExpr(Result,getContext());
4169 assert(IsConst && "Constant arg isn't actually constant?");
4171 ArgValue = llvm::ConstantInt::get(getLLVMContext(), Result);
4174 // If the intrinsic arg type is different from the builtin arg type
4175 // we need to do a bit cast.
4176 llvm::Type *PTy = FTy->getParamType(i);
4177 if (PTy != ArgValue->getType()) {
4178 // XXX - vector of pointers?
4179 if (auto *PtrTy = dyn_cast<llvm::PointerType>(PTy)) {
4180 if (PtrTy->getAddressSpace() !=
4181 ArgValue->getType()->getPointerAddressSpace()) {
4182 ArgValue = Builder.CreateAddrSpaceCast(
4184 ArgValue->getType()->getPointerTo(PtrTy->getAddressSpace()));
4188 assert(PTy->canLosslesslyBitCastTo(FTy->getParamType(i)) &&
4189 "Must be able to losslessly bit cast to param");
4190 ArgValue = Builder.CreateBitCast(ArgValue, PTy);
4193 Args.push_back(ArgValue);
4196 Value *V = Builder.CreateCall(F, Args);
4197 QualType BuiltinRetType = E->getType();
4199 llvm::Type *RetTy = VoidTy;
4200 if (!BuiltinRetType->isVoidType())
4201 RetTy = ConvertType(BuiltinRetType);
4203 if (RetTy != V->getType()) {
4204 // XXX - vector of pointers?
4205 if (auto *PtrTy = dyn_cast<llvm::PointerType>(RetTy)) {
4206 if (PtrTy->getAddressSpace() != V->getType()->getPointerAddressSpace()) {
4207 V = Builder.CreateAddrSpaceCast(
4208 V, V->getType()->getPointerTo(PtrTy->getAddressSpace()));
4212 assert(V->getType()->canLosslesslyBitCastTo(RetTy) &&
4213 "Must be able to losslessly bit cast result type");
4214 V = Builder.CreateBitCast(V, RetTy);
4217 return RValue::get(V);
4220 // See if we have a target specific builtin that needs to be lowered.
4221 if (Value *V = EmitTargetBuiltinExpr(BuiltinID, E))
4222 return RValue::get(V);
4224 ErrorUnsupported(E, "builtin function");
4226 // Unknown builtin, for now just dump it out and return undef.
4227 return GetUndefRValue(E->getType());
4230 static Value *EmitTargetArchBuiltinExpr(CodeGenFunction *CGF,
4231 unsigned BuiltinID, const CallExpr *E,
4232 llvm::Triple::ArchType Arch) {
4234 case llvm::Triple::arm:
4235 case llvm::Triple::armeb:
4236 case llvm::Triple::thumb:
4237 case llvm::Triple::thumbeb:
4238 return CGF->EmitARMBuiltinExpr(BuiltinID, E, Arch);
4239 case llvm::Triple::aarch64:
4240 case llvm::Triple::aarch64_be:
4241 return CGF->EmitAArch64BuiltinExpr(BuiltinID, E, Arch);
4242 case llvm::Triple::bpfeb:
4243 case llvm::Triple::bpfel:
4244 return CGF->EmitBPFBuiltinExpr(BuiltinID, E);
4245 case llvm::Triple::x86:
4246 case llvm::Triple::x86_64:
4247 return CGF->EmitX86BuiltinExpr(BuiltinID, E);
4248 case llvm::Triple::ppc:
4249 case llvm::Triple::ppc64:
4250 case llvm::Triple::ppc64le:
4251 return CGF->EmitPPCBuiltinExpr(BuiltinID, E);
4252 case llvm::Triple::r600:
4253 case llvm::Triple::amdgcn:
4254 return CGF->EmitAMDGPUBuiltinExpr(BuiltinID, E);
4255 case llvm::Triple::systemz:
4256 return CGF->EmitSystemZBuiltinExpr(BuiltinID, E);
4257 case llvm::Triple::nvptx:
4258 case llvm::Triple::nvptx64:
4259 return CGF->EmitNVPTXBuiltinExpr(BuiltinID, E);
4260 case llvm::Triple::wasm32:
4261 case llvm::Triple::wasm64:
4262 return CGF->EmitWebAssemblyBuiltinExpr(BuiltinID, E);
4263 case llvm::Triple::hexagon:
4264 return CGF->EmitHexagonBuiltinExpr(BuiltinID, E);
4270 Value *CodeGenFunction::EmitTargetBuiltinExpr(unsigned BuiltinID,
4271 const CallExpr *E) {
4272 if (getContext().BuiltinInfo.isAuxBuiltinID(BuiltinID)) {
4273 assert(getContext().getAuxTargetInfo() && "Missing aux target info");
4274 return EmitTargetArchBuiltinExpr(
4275 this, getContext().BuiltinInfo.getAuxBuiltinID(BuiltinID), E,
4276 getContext().getAuxTargetInfo()->getTriple().getArch());
4279 return EmitTargetArchBuiltinExpr(this, BuiltinID, E,
4280 getTarget().getTriple().getArch());
4283 static llvm::VectorType *GetNeonType(CodeGenFunction *CGF,
4284 NeonTypeFlags TypeFlags,
4285 bool HasLegalHalfType=true,
4287 int IsQuad = TypeFlags.isQuad();
4288 switch (TypeFlags.getEltType()) {
4289 case NeonTypeFlags::Int8:
4290 case NeonTypeFlags::Poly8:
4291 return llvm::VectorType::get(CGF->Int8Ty, V1Ty ? 1 : (8 << IsQuad));
4292 case NeonTypeFlags::Int16:
4293 case NeonTypeFlags::Poly16:
4294 return llvm::VectorType::get(CGF->Int16Ty, V1Ty ? 1 : (4 << IsQuad));
4295 case NeonTypeFlags::Float16:
4296 if (HasLegalHalfType)
4297 return llvm::VectorType::get(CGF->HalfTy, V1Ty ? 1 : (4 << IsQuad));
4299 return llvm::VectorType::get(CGF->Int16Ty, V1Ty ? 1 : (4 << IsQuad));
4300 case NeonTypeFlags::Int32:
4301 return llvm::VectorType::get(CGF->Int32Ty, V1Ty ? 1 : (2 << IsQuad));
4302 case NeonTypeFlags::Int64:
4303 case NeonTypeFlags::Poly64:
4304 return llvm::VectorType::get(CGF->Int64Ty, V1Ty ? 1 : (1 << IsQuad));
4305 case NeonTypeFlags::Poly128:
4306 // FIXME: i128 and f128 doesn't get fully support in Clang and llvm.
4307 // There is a lot of i128 and f128 API missing.
4308 // so we use v16i8 to represent poly128 and get pattern matched.
4309 return llvm::VectorType::get(CGF->Int8Ty, 16);
4310 case NeonTypeFlags::Float32:
4311 return llvm::VectorType::get(CGF->FloatTy, V1Ty ? 1 : (2 << IsQuad));
4312 case NeonTypeFlags::Float64:
4313 return llvm::VectorType::get(CGF->DoubleTy, V1Ty ? 1 : (1 << IsQuad));
4315 llvm_unreachable("Unknown vector element type!");
4318 static llvm::VectorType *GetFloatNeonType(CodeGenFunction *CGF,
4319 NeonTypeFlags IntTypeFlags) {
4320 int IsQuad = IntTypeFlags.isQuad();
4321 switch (IntTypeFlags.getEltType()) {
4322 case NeonTypeFlags::Int16:
4323 return llvm::VectorType::get(CGF->HalfTy, (4 << IsQuad));
4324 case NeonTypeFlags::Int32:
4325 return llvm::VectorType::get(CGF->FloatTy, (2 << IsQuad));
4326 case NeonTypeFlags::Int64:
4327 return llvm::VectorType::get(CGF->DoubleTy, (1 << IsQuad));
4329 llvm_unreachable("Type can't be converted to floating-point!");
4333 Value *CodeGenFunction::EmitNeonSplat(Value *V, Constant *C) {
4334 unsigned nElts = V->getType()->getVectorNumElements();
4335 Value* SV = llvm::ConstantVector::getSplat(nElts, C);
4336 return Builder.CreateShuffleVector(V, V, SV, "lane");
4339 Value *CodeGenFunction::EmitNeonCall(Function *F, SmallVectorImpl<Value*> &Ops,
4341 unsigned shift, bool rightshift) {
4343 for (Function::const_arg_iterator ai = F->arg_begin(), ae = F->arg_end();
4344 ai != ae; ++ai, ++j)
4345 if (shift > 0 && shift == j)
4346 Ops[j] = EmitNeonShiftVector(Ops[j], ai->getType(), rightshift);
4348 Ops[j] = Builder.CreateBitCast(Ops[j], ai->getType(), name);
4350 return Builder.CreateCall(F, Ops, name);
4353 Value *CodeGenFunction::EmitNeonShiftVector(Value *V, llvm::Type *Ty,
4355 int SV = cast<ConstantInt>(V)->getSExtValue();
4356 return ConstantInt::get(Ty, neg ? -SV : SV);
4359 // Right-shift a vector by a constant.
4360 Value *CodeGenFunction::EmitNeonRShiftImm(Value *Vec, Value *Shift,
4361 llvm::Type *Ty, bool usgn,
4363 llvm::VectorType *VTy = cast<llvm::VectorType>(Ty);
4365 int ShiftAmt = cast<ConstantInt>(Shift)->getSExtValue();
4366 int EltSize = VTy->getScalarSizeInBits();
4368 Vec = Builder.CreateBitCast(Vec, Ty);
4370 // lshr/ashr are undefined when the shift amount is equal to the vector
4372 if (ShiftAmt == EltSize) {
4374 // Right-shifting an unsigned value by its size yields 0.
4375 return llvm::ConstantAggregateZero::get(VTy);
4377 // Right-shifting a signed value by its size is equivalent
4378 // to a shift of size-1.
4380 Shift = ConstantInt::get(VTy->getElementType(), ShiftAmt);
4384 Shift = EmitNeonShiftVector(Shift, Ty, false);
4386 return Builder.CreateLShr(Vec, Shift, name);
4388 return Builder.CreateAShr(Vec, Shift, name);
4392 AddRetType = (1 << 0),
4393 Add1ArgType = (1 << 1),
4394 Add2ArgTypes = (1 << 2),
4396 VectorizeRetType = (1 << 3),
4397 VectorizeArgTypes = (1 << 4),
4399 InventFloatType = (1 << 5),
4400 UnsignedAlts = (1 << 6),
4402 Use64BitVectors = (1 << 7),
4403 Use128BitVectors = (1 << 8),
4405 Vectorize1ArgType = Add1ArgType | VectorizeArgTypes,
4406 VectorRet = AddRetType | VectorizeRetType,
4407 VectorRetGetArgs01 =
4408 AddRetType | Add2ArgTypes | VectorizeRetType | VectorizeArgTypes,
4410 AddRetType | VectorizeRetType | Add1ArgType | InventFloatType
4414 struct NeonIntrinsicInfo {
4415 const char *NameHint;
4417 unsigned LLVMIntrinsic;
4418 unsigned AltLLVMIntrinsic;
4419 unsigned TypeModifier;
4421 bool operator<(unsigned RHSBuiltinID) const {
4422 return BuiltinID < RHSBuiltinID;
4424 bool operator<(const NeonIntrinsicInfo &TE) const {
4425 return BuiltinID < TE.BuiltinID;
4428 } // end anonymous namespace
4430 #define NEONMAP0(NameBase) \
4431 { #NameBase, NEON::BI__builtin_neon_ ## NameBase, 0, 0, 0 }
4433 #define NEONMAP1(NameBase, LLVMIntrinsic, TypeModifier) \
4434 { #NameBase, NEON:: BI__builtin_neon_ ## NameBase, \
4435 Intrinsic::LLVMIntrinsic, 0, TypeModifier }
4437 #define NEONMAP2(NameBase, LLVMIntrinsic, AltLLVMIntrinsic, TypeModifier) \
4438 { #NameBase, NEON:: BI__builtin_neon_ ## NameBase, \
4439 Intrinsic::LLVMIntrinsic, Intrinsic::AltLLVMIntrinsic, \
4442 static const NeonIntrinsicInfo ARMSIMDIntrinsicMap [] = {
4443 NEONMAP2(vabd_v, arm_neon_vabdu, arm_neon_vabds, Add1ArgType | UnsignedAlts),
4444 NEONMAP2(vabdq_v, arm_neon_vabdu, arm_neon_vabds, Add1ArgType | UnsignedAlts),
4445 NEONMAP1(vabs_v, arm_neon_vabs, 0),
4446 NEONMAP1(vabsq_v, arm_neon_vabs, 0),
4448 NEONMAP1(vaesdq_v, arm_neon_aesd, 0),
4449 NEONMAP1(vaeseq_v, arm_neon_aese, 0),
4450 NEONMAP1(vaesimcq_v, arm_neon_aesimc, 0),
4451 NEONMAP1(vaesmcq_v, arm_neon_aesmc, 0),
4452 NEONMAP1(vbsl_v, arm_neon_vbsl, AddRetType),
4453 NEONMAP1(vbslq_v, arm_neon_vbsl, AddRetType),
4454 NEONMAP1(vcage_v, arm_neon_vacge, 0),
4455 NEONMAP1(vcageq_v, arm_neon_vacge, 0),
4456 NEONMAP1(vcagt_v, arm_neon_vacgt, 0),
4457 NEONMAP1(vcagtq_v, arm_neon_vacgt, 0),
4458 NEONMAP1(vcale_v, arm_neon_vacge, 0),
4459 NEONMAP1(vcaleq_v, arm_neon_vacge, 0),
4460 NEONMAP1(vcalt_v, arm_neon_vacgt, 0),
4461 NEONMAP1(vcaltq_v, arm_neon_vacgt, 0),
4470 NEONMAP1(vcls_v, arm_neon_vcls, Add1ArgType),
4471 NEONMAP1(vclsq_v, arm_neon_vcls, Add1ArgType),
4474 NEONMAP1(vclz_v, ctlz, Add1ArgType),
4475 NEONMAP1(vclzq_v, ctlz, Add1ArgType),
4476 NEONMAP1(vcnt_v, ctpop, Add1ArgType),
4477 NEONMAP1(vcntq_v, ctpop, Add1ArgType),
4478 NEONMAP1(vcvt_f16_f32, arm_neon_vcvtfp2hf, 0),
4479 NEONMAP0(vcvt_f16_v),
4480 NEONMAP1(vcvt_f32_f16, arm_neon_vcvthf2fp, 0),
4481 NEONMAP0(vcvt_f32_v),
4482 NEONMAP2(vcvt_n_f16_v, arm_neon_vcvtfxu2fp, arm_neon_vcvtfxs2fp, 0),
4483 NEONMAP2(vcvt_n_f32_v, arm_neon_vcvtfxu2fp, arm_neon_vcvtfxs2fp, 0),
4484 NEONMAP1(vcvt_n_s16_v, arm_neon_vcvtfp2fxs, 0),
4485 NEONMAP1(vcvt_n_s32_v, arm_neon_vcvtfp2fxs, 0),
4486 NEONMAP1(vcvt_n_s64_v, arm_neon_vcvtfp2fxs, 0),
4487 NEONMAP1(vcvt_n_u16_v, arm_neon_vcvtfp2fxu, 0),
4488 NEONMAP1(vcvt_n_u32_v, arm_neon_vcvtfp2fxu, 0),
4489 NEONMAP1(vcvt_n_u64_v, arm_neon_vcvtfp2fxu, 0),
4490 NEONMAP0(vcvt_s16_v),
4491 NEONMAP0(vcvt_s32_v),
4492 NEONMAP0(vcvt_s64_v),
4493 NEONMAP0(vcvt_u16_v),
4494 NEONMAP0(vcvt_u32_v),
4495 NEONMAP0(vcvt_u64_v),
4496 NEONMAP1(vcvta_s16_v, arm_neon_vcvtas, 0),
4497 NEONMAP1(vcvta_s32_v, arm_neon_vcvtas, 0),
4498 NEONMAP1(vcvta_s64_v, arm_neon_vcvtas, 0),
4499 NEONMAP1(vcvta_u16_v, arm_neon_vcvtau, 0),
4500 NEONMAP1(vcvta_u32_v, arm_neon_vcvtau, 0),
4501 NEONMAP1(vcvta_u64_v, arm_neon_vcvtau, 0),
4502 NEONMAP1(vcvtaq_s16_v, arm_neon_vcvtas, 0),
4503 NEONMAP1(vcvtaq_s32_v, arm_neon_vcvtas, 0),
4504 NEONMAP1(vcvtaq_s64_v, arm_neon_vcvtas, 0),
4505 NEONMAP1(vcvtaq_u16_v, arm_neon_vcvtau, 0),
4506 NEONMAP1(vcvtaq_u32_v, arm_neon_vcvtau, 0),
4507 NEONMAP1(vcvtaq_u64_v, arm_neon_vcvtau, 0),
4508 NEONMAP1(vcvtm_s16_v, arm_neon_vcvtms, 0),
4509 NEONMAP1(vcvtm_s32_v, arm_neon_vcvtms, 0),
4510 NEONMAP1(vcvtm_s64_v, arm_neon_vcvtms, 0),
4511 NEONMAP1(vcvtm_u16_v, arm_neon_vcvtmu, 0),
4512 NEONMAP1(vcvtm_u32_v, arm_neon_vcvtmu, 0),
4513 NEONMAP1(vcvtm_u64_v, arm_neon_vcvtmu, 0),
4514 NEONMAP1(vcvtmq_s16_v, arm_neon_vcvtms, 0),
4515 NEONMAP1(vcvtmq_s32_v, arm_neon_vcvtms, 0),
4516 NEONMAP1(vcvtmq_s64_v, arm_neon_vcvtms, 0),
4517 NEONMAP1(vcvtmq_u16_v, arm_neon_vcvtmu, 0),
4518 NEONMAP1(vcvtmq_u32_v, arm_neon_vcvtmu, 0),
4519 NEONMAP1(vcvtmq_u64_v, arm_neon_vcvtmu, 0),
4520 NEONMAP1(vcvtn_s16_v, arm_neon_vcvtns, 0),
4521 NEONMAP1(vcvtn_s32_v, arm_neon_vcvtns, 0),
4522 NEONMAP1(vcvtn_s64_v, arm_neon_vcvtns, 0),
4523 NEONMAP1(vcvtn_u16_v, arm_neon_vcvtnu, 0),
4524 NEONMAP1(vcvtn_u32_v, arm_neon_vcvtnu, 0),
4525 NEONMAP1(vcvtn_u64_v, arm_neon_vcvtnu, 0),
4526 NEONMAP1(vcvtnq_s16_v, arm_neon_vcvtns, 0),
4527 NEONMAP1(vcvtnq_s32_v, arm_neon_vcvtns, 0),
4528 NEONMAP1(vcvtnq_s64_v, arm_neon_vcvtns, 0),
4529 NEONMAP1(vcvtnq_u16_v, arm_neon_vcvtnu, 0),
4530 NEONMAP1(vcvtnq_u32_v, arm_neon_vcvtnu, 0),
4531 NEONMAP1(vcvtnq_u64_v, arm_neon_vcvtnu, 0),
4532 NEONMAP1(vcvtp_s16_v, arm_neon_vcvtps, 0),
4533 NEONMAP1(vcvtp_s32_v, arm_neon_vcvtps, 0),
4534 NEONMAP1(vcvtp_s64_v, arm_neon_vcvtps, 0),
4535 NEONMAP1(vcvtp_u16_v, arm_neon_vcvtpu, 0),
4536 NEONMAP1(vcvtp_u32_v, arm_neon_vcvtpu, 0),
4537 NEONMAP1(vcvtp_u64_v, arm_neon_vcvtpu, 0),
4538 NEONMAP1(vcvtpq_s16_v, arm_neon_vcvtps, 0),
4539 NEONMAP1(vcvtpq_s32_v, arm_neon_vcvtps, 0),
4540 NEONMAP1(vcvtpq_s64_v, arm_neon_vcvtps, 0),
4541 NEONMAP1(vcvtpq_u16_v, arm_neon_vcvtpu, 0),
4542 NEONMAP1(vcvtpq_u32_v, arm_neon_vcvtpu, 0),
4543 NEONMAP1(vcvtpq_u64_v, arm_neon_vcvtpu, 0),
4544 NEONMAP0(vcvtq_f16_v),
4545 NEONMAP0(vcvtq_f32_v),
4546 NEONMAP2(vcvtq_n_f16_v, arm_neon_vcvtfxu2fp, arm_neon_vcvtfxs2fp, 0),
4547 NEONMAP2(vcvtq_n_f32_v, arm_neon_vcvtfxu2fp, arm_neon_vcvtfxs2fp, 0),
4548 NEONMAP1(vcvtq_n_s16_v, arm_neon_vcvtfp2fxs, 0),
4549 NEONMAP1(vcvtq_n_s32_v, arm_neon_vcvtfp2fxs, 0),
4550 NEONMAP1(vcvtq_n_s64_v, arm_neon_vcvtfp2fxs, 0),
4551 NEONMAP1(vcvtq_n_u16_v, arm_neon_vcvtfp2fxu, 0),
4552 NEONMAP1(vcvtq_n_u32_v, arm_neon_vcvtfp2fxu, 0),
4553 NEONMAP1(vcvtq_n_u64_v, arm_neon_vcvtfp2fxu, 0),
4554 NEONMAP0(vcvtq_s16_v),
4555 NEONMAP0(vcvtq_s32_v),
4556 NEONMAP0(vcvtq_s64_v),
4557 NEONMAP0(vcvtq_u16_v),
4558 NEONMAP0(vcvtq_u32_v),
4559 NEONMAP0(vcvtq_u64_v),
4560 NEONMAP2(vdot_v, arm_neon_udot, arm_neon_sdot, 0),
4561 NEONMAP2(vdotq_v, arm_neon_udot, arm_neon_sdot, 0),
4566 NEONMAP2(vhadd_v, arm_neon_vhaddu, arm_neon_vhadds, Add1ArgType | UnsignedAlts),
4567 NEONMAP2(vhaddq_v, arm_neon_vhaddu, arm_neon_vhadds, Add1ArgType | UnsignedAlts),
4568 NEONMAP2(vhsub_v, arm_neon_vhsubu, arm_neon_vhsubs, Add1ArgType | UnsignedAlts),
4569 NEONMAP2(vhsubq_v, arm_neon_vhsubu, arm_neon_vhsubs, Add1ArgType | UnsignedAlts),
4570 NEONMAP0(vld1_dup_v),
4571 NEONMAP1(vld1_v, arm_neon_vld1, 0),
4572 NEONMAP1(vld1_x2_v, arm_neon_vld1x2, 0),
4573 NEONMAP1(vld1_x3_v, arm_neon_vld1x3, 0),
4574 NEONMAP1(vld1_x4_v, arm_neon_vld1x4, 0),
4575 NEONMAP0(vld1q_dup_v),
4576 NEONMAP1(vld1q_v, arm_neon_vld1, 0),
4577 NEONMAP1(vld1q_x2_v, arm_neon_vld1x2, 0),
4578 NEONMAP1(vld1q_x3_v, arm_neon_vld1x3, 0),
4579 NEONMAP1(vld1q_x4_v, arm_neon_vld1x4, 0),
4580 NEONMAP1(vld2_dup_v, arm_neon_vld2dup, 0),
4581 NEONMAP1(vld2_lane_v, arm_neon_vld2lane, 0),
4582 NEONMAP1(vld2_v, arm_neon_vld2, 0),
4583 NEONMAP1(vld2q_dup_v, arm_neon_vld2dup, 0),
4584 NEONMAP1(vld2q_lane_v, arm_neon_vld2lane, 0),
4585 NEONMAP1(vld2q_v, arm_neon_vld2, 0),
4586 NEONMAP1(vld3_dup_v, arm_neon_vld3dup, 0),
4587 NEONMAP1(vld3_lane_v, arm_neon_vld3lane, 0),
4588 NEONMAP1(vld3_v, arm_neon_vld3, 0),
4589 NEONMAP1(vld3q_dup_v, arm_neon_vld3dup, 0),
4590 NEONMAP1(vld3q_lane_v, arm_neon_vld3lane, 0),
4591 NEONMAP1(vld3q_v, arm_neon_vld3, 0),
4592 NEONMAP1(vld4_dup_v, arm_neon_vld4dup, 0),
4593 NEONMAP1(vld4_lane_v, arm_neon_vld4lane, 0),
4594 NEONMAP1(vld4_v, arm_neon_vld4, 0),
4595 NEONMAP1(vld4q_dup_v, arm_neon_vld4dup, 0),
4596 NEONMAP1(vld4q_lane_v, arm_neon_vld4lane, 0),
4597 NEONMAP1(vld4q_v, arm_neon_vld4, 0),
4598 NEONMAP2(vmax_v, arm_neon_vmaxu, arm_neon_vmaxs, Add1ArgType | UnsignedAlts),
4599 NEONMAP1(vmaxnm_v, arm_neon_vmaxnm, Add1ArgType),
4600 NEONMAP1(vmaxnmq_v, arm_neon_vmaxnm, Add1ArgType),
4601 NEONMAP2(vmaxq_v, arm_neon_vmaxu, arm_neon_vmaxs, Add1ArgType | UnsignedAlts),
4602 NEONMAP2(vmin_v, arm_neon_vminu, arm_neon_vmins, Add1ArgType | UnsignedAlts),
4603 NEONMAP1(vminnm_v, arm_neon_vminnm, Add1ArgType),
4604 NEONMAP1(vminnmq_v, arm_neon_vminnm, Add1ArgType),
4605 NEONMAP2(vminq_v, arm_neon_vminu, arm_neon_vmins, Add1ArgType | UnsignedAlts),
4608 NEONMAP1(vmul_v, arm_neon_vmulp, Add1ArgType),
4610 NEONMAP1(vmulq_v, arm_neon_vmulp, Add1ArgType),
4611 NEONMAP2(vpadal_v, arm_neon_vpadalu, arm_neon_vpadals, UnsignedAlts),
4612 NEONMAP2(vpadalq_v, arm_neon_vpadalu, arm_neon_vpadals, UnsignedAlts),
4613 NEONMAP1(vpadd_v, arm_neon_vpadd, Add1ArgType),
4614 NEONMAP2(vpaddl_v, arm_neon_vpaddlu, arm_neon_vpaddls, UnsignedAlts),
4615 NEONMAP2(vpaddlq_v, arm_neon_vpaddlu, arm_neon_vpaddls, UnsignedAlts),
4616 NEONMAP1(vpaddq_v, arm_neon_vpadd, Add1ArgType),
4617 NEONMAP2(vpmax_v, arm_neon_vpmaxu, arm_neon_vpmaxs, Add1ArgType | UnsignedAlts),
4618 NEONMAP2(vpmin_v, arm_neon_vpminu, arm_neon_vpmins, Add1ArgType | UnsignedAlts),
4619 NEONMAP1(vqabs_v, arm_neon_vqabs, Add1ArgType),
4620 NEONMAP1(vqabsq_v, arm_neon_vqabs, Add1ArgType),
4621 NEONMAP2(vqadd_v, arm_neon_vqaddu, arm_neon_vqadds, Add1ArgType | UnsignedAlts),
4622 NEONMAP2(vqaddq_v, arm_neon_vqaddu, arm_neon_vqadds, Add1ArgType | UnsignedAlts),
4623 NEONMAP2(vqdmlal_v, arm_neon_vqdmull, arm_neon_vqadds, 0),
4624 NEONMAP2(vqdmlsl_v, arm_neon_vqdmull, arm_neon_vqsubs, 0),
4625 NEONMAP1(vqdmulh_v, arm_neon_vqdmulh, Add1ArgType),
4626 NEONMAP1(vqdmulhq_v, arm_neon_vqdmulh, Add1ArgType),
4627 NEONMAP1(vqdmull_v, arm_neon_vqdmull, Add1ArgType),
4628 NEONMAP2(vqmovn_v, arm_neon_vqmovnu, arm_neon_vqmovns, Add1ArgType | UnsignedAlts),
4629 NEONMAP1(vqmovun_v, arm_neon_vqmovnsu, Add1ArgType),
4630 NEONMAP1(vqneg_v, arm_neon_vqneg, Add1ArgType),
4631 NEONMAP1(vqnegq_v, arm_neon_vqneg, Add1ArgType),
4632 NEONMAP1(vqrdmulh_v, arm_neon_vqrdmulh, Add1ArgType),
4633 NEONMAP1(vqrdmulhq_v, arm_neon_vqrdmulh, Add1ArgType),
4634 NEONMAP2(vqrshl_v, arm_neon_vqrshiftu, arm_neon_vqrshifts, Add1ArgType | UnsignedAlts),
4635 NEONMAP2(vqrshlq_v, arm_neon_vqrshiftu, arm_neon_vqrshifts, Add1ArgType | UnsignedAlts),
4636 NEONMAP2(vqshl_n_v, arm_neon_vqshiftu, arm_neon_vqshifts, UnsignedAlts),
4637 NEONMAP2(vqshl_v, arm_neon_vqshiftu, arm_neon_vqshifts, Add1ArgType | UnsignedAlts),
4638 NEONMAP2(vqshlq_n_v, arm_neon_vqshiftu, arm_neon_vqshifts, UnsignedAlts),
4639 NEONMAP2(vqshlq_v, arm_neon_vqshiftu, arm_neon_vqshifts, Add1ArgType | UnsignedAlts),
4640 NEONMAP1(vqshlu_n_v, arm_neon_vqshiftsu, 0),
4641 NEONMAP1(vqshluq_n_v, arm_neon_vqshiftsu, 0),
4642 NEONMAP2(vqsub_v, arm_neon_vqsubu, arm_neon_vqsubs, Add1ArgType | UnsignedAlts),
4643 NEONMAP2(vqsubq_v, arm_neon_vqsubu, arm_neon_vqsubs, Add1ArgType | UnsignedAlts),
4644 NEONMAP1(vraddhn_v, arm_neon_vraddhn, Add1ArgType),
4645 NEONMAP2(vrecpe_v, arm_neon_vrecpe, arm_neon_vrecpe, 0),
4646 NEONMAP2(vrecpeq_v, arm_neon_vrecpe, arm_neon_vrecpe, 0),
4647 NEONMAP1(vrecps_v, arm_neon_vrecps, Add1ArgType),
4648 NEONMAP1(vrecpsq_v, arm_neon_vrecps, Add1ArgType),
4649 NEONMAP2(vrhadd_v, arm_neon_vrhaddu, arm_neon_vrhadds, Add1ArgType | UnsignedAlts),
4650 NEONMAP2(vrhaddq_v, arm_neon_vrhaddu, arm_neon_vrhadds, Add1ArgType | UnsignedAlts),
4651 NEONMAP1(vrnd_v, arm_neon_vrintz, Add1ArgType),
4652 NEONMAP1(vrnda_v, arm_neon_vrinta, Add1ArgType),
4653 NEONMAP1(vrndaq_v, arm_neon_vrinta, Add1ArgType),
4656 NEONMAP1(vrndm_v, arm_neon_vrintm, Add1ArgType),
4657 NEONMAP1(vrndmq_v, arm_neon_vrintm, Add1ArgType),
4658 NEONMAP1(vrndn_v, arm_neon_vrintn, Add1ArgType),
4659 NEONMAP1(vrndnq_v, arm_neon_vrintn, Add1ArgType),
4660 NEONMAP1(vrndp_v, arm_neon_vrintp, Add1ArgType),
4661 NEONMAP1(vrndpq_v, arm_neon_vrintp, Add1ArgType),
4662 NEONMAP1(vrndq_v, arm_neon_vrintz, Add1ArgType),
4663 NEONMAP1(vrndx_v, arm_neon_vrintx, Add1ArgType),
4664 NEONMAP1(vrndxq_v, arm_neon_vrintx, Add1ArgType),
4665 NEONMAP2(vrshl_v, arm_neon_vrshiftu, arm_neon_vrshifts, Add1ArgType | UnsignedAlts),
4666 NEONMAP2(vrshlq_v, arm_neon_vrshiftu, arm_neon_vrshifts, Add1ArgType | UnsignedAlts),
4667 NEONMAP2(vrshr_n_v, arm_neon_vrshiftu, arm_neon_vrshifts, UnsignedAlts),
4668 NEONMAP2(vrshrq_n_v, arm_neon_vrshiftu, arm_neon_vrshifts, UnsignedAlts),
4669 NEONMAP2(vrsqrte_v, arm_neon_vrsqrte, arm_neon_vrsqrte, 0),
4670 NEONMAP2(vrsqrteq_v, arm_neon_vrsqrte, arm_neon_vrsqrte, 0),
4671 NEONMAP1(vrsqrts_v, arm_neon_vrsqrts, Add1ArgType),
4672 NEONMAP1(vrsqrtsq_v, arm_neon_vrsqrts, Add1ArgType),
4673 NEONMAP1(vrsubhn_v, arm_neon_vrsubhn, Add1ArgType),
4674 NEONMAP1(vsha1su0q_v, arm_neon_sha1su0, 0),
4675 NEONMAP1(vsha1su1q_v, arm_neon_sha1su1, 0),
4676 NEONMAP1(vsha256h2q_v, arm_neon_sha256h2, 0),
4677 NEONMAP1(vsha256hq_v, arm_neon_sha256h, 0),
4678 NEONMAP1(vsha256su0q_v, arm_neon_sha256su0, 0),
4679 NEONMAP1(vsha256su1q_v, arm_neon_sha256su1, 0),
4681 NEONMAP2(vshl_v, arm_neon_vshiftu, arm_neon_vshifts, Add1ArgType | UnsignedAlts),
4682 NEONMAP0(vshll_n_v),
4683 NEONMAP0(vshlq_n_v),
4684 NEONMAP2(vshlq_v, arm_neon_vshiftu, arm_neon_vshifts, Add1ArgType | UnsignedAlts),
4686 NEONMAP0(vshrn_n_v),
4687 NEONMAP0(vshrq_n_v),
4688 NEONMAP1(vst1_v, arm_neon_vst1, 0),
4689 NEONMAP1(vst1_x2_v, arm_neon_vst1x2, 0),
4690 NEONMAP1(vst1_x3_v, arm_neon_vst1x3, 0),
4691 NEONMAP1(vst1_x4_v, arm_neon_vst1x4, 0),
4692 NEONMAP1(vst1q_v, arm_neon_vst1, 0),
4693 NEONMAP1(vst1q_x2_v, arm_neon_vst1x2, 0),
4694 NEONMAP1(vst1q_x3_v, arm_neon_vst1x3, 0),
4695 NEONMAP1(vst1q_x4_v, arm_neon_vst1x4, 0),
4696 NEONMAP1(vst2_lane_v, arm_neon_vst2lane, 0),
4697 NEONMAP1(vst2_v, arm_neon_vst2, 0),
4698 NEONMAP1(vst2q_lane_v, arm_neon_vst2lane, 0),
4699 NEONMAP1(vst2q_v, arm_neon_vst2, 0),
4700 NEONMAP1(vst3_lane_v, arm_neon_vst3lane, 0),
4701 NEONMAP1(vst3_v, arm_neon_vst3, 0),
4702 NEONMAP1(vst3q_lane_v, arm_neon_vst3lane, 0),
4703 NEONMAP1(vst3q_v, arm_neon_vst3, 0),
4704 NEONMAP1(vst4_lane_v, arm_neon_vst4lane, 0),
4705 NEONMAP1(vst4_v, arm_neon_vst4, 0),
4706 NEONMAP1(vst4q_lane_v, arm_neon_vst4lane, 0),
4707 NEONMAP1(vst4q_v, arm_neon_vst4, 0),
4719 static const NeonIntrinsicInfo AArch64SIMDIntrinsicMap[] = {
4720 NEONMAP1(vabs_v, aarch64_neon_abs, 0),
4721 NEONMAP1(vabsq_v, aarch64_neon_abs, 0),
4723 NEONMAP1(vaesdq_v, aarch64_crypto_aesd, 0),
4724 NEONMAP1(vaeseq_v, aarch64_crypto_aese, 0),
4725 NEONMAP1(vaesimcq_v, aarch64_crypto_aesimc, 0),
4726 NEONMAP1(vaesmcq_v, aarch64_crypto_aesmc, 0),
4727 NEONMAP1(vcage_v, aarch64_neon_facge, 0),
4728 NEONMAP1(vcageq_v, aarch64_neon_facge, 0),
4729 NEONMAP1(vcagt_v, aarch64_neon_facgt, 0),
4730 NEONMAP1(vcagtq_v, aarch64_neon_facgt, 0),
4731 NEONMAP1(vcale_v, aarch64_neon_facge, 0),
4732 NEONMAP1(vcaleq_v, aarch64_neon_facge, 0),
4733 NEONMAP1(vcalt_v, aarch64_neon_facgt, 0),
4734 NEONMAP1(vcaltq_v, aarch64_neon_facgt, 0),
4743 NEONMAP1(vcls_v, aarch64_neon_cls, Add1ArgType),
4744 NEONMAP1(vclsq_v, aarch64_neon_cls, Add1ArgType),
4747 NEONMAP1(vclz_v, ctlz, Add1ArgType),
4748 NEONMAP1(vclzq_v, ctlz, Add1ArgType),
4749 NEONMAP1(vcnt_v, ctpop, Add1ArgType),
4750 NEONMAP1(vcntq_v, ctpop, Add1ArgType),
4751 NEONMAP1(vcvt_f16_f32, aarch64_neon_vcvtfp2hf, 0),
4752 NEONMAP0(vcvt_f16_v),
4753 NEONMAP1(vcvt_f32_f16, aarch64_neon_vcvthf2fp, 0),
4754 NEONMAP0(vcvt_f32_v),
4755 NEONMAP2(vcvt_n_f16_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
4756 NEONMAP2(vcvt_n_f32_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
4757 NEONMAP2(vcvt_n_f64_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
4758 NEONMAP1(vcvt_n_s16_v, aarch64_neon_vcvtfp2fxs, 0),
4759 NEONMAP1(vcvt_n_s32_v, aarch64_neon_vcvtfp2fxs, 0),
4760 NEONMAP1(vcvt_n_s64_v, aarch64_neon_vcvtfp2fxs, 0),
4761 NEONMAP1(vcvt_n_u16_v, aarch64_neon_vcvtfp2fxu, 0),
4762 NEONMAP1(vcvt_n_u32_v, aarch64_neon_vcvtfp2fxu, 0),
4763 NEONMAP1(vcvt_n_u64_v, aarch64_neon_vcvtfp2fxu, 0),
4764 NEONMAP0(vcvtq_f16_v),
4765 NEONMAP0(vcvtq_f32_v),
4766 NEONMAP2(vcvtq_n_f16_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
4767 NEONMAP2(vcvtq_n_f32_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
4768 NEONMAP2(vcvtq_n_f64_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
4769 NEONMAP1(vcvtq_n_s16_v, aarch64_neon_vcvtfp2fxs, 0),
4770 NEONMAP1(vcvtq_n_s32_v, aarch64_neon_vcvtfp2fxs, 0),
4771 NEONMAP1(vcvtq_n_s64_v, aarch64_neon_vcvtfp2fxs, 0),
4772 NEONMAP1(vcvtq_n_u16_v, aarch64_neon_vcvtfp2fxu, 0),
4773 NEONMAP1(vcvtq_n_u32_v, aarch64_neon_vcvtfp2fxu, 0),
4774 NEONMAP1(vcvtq_n_u64_v, aarch64_neon_vcvtfp2fxu, 0),
4775 NEONMAP1(vcvtx_f32_v, aarch64_neon_fcvtxn, AddRetType | Add1ArgType),
4776 NEONMAP2(vdot_v, aarch64_neon_udot, aarch64_neon_sdot, 0),
4777 NEONMAP2(vdotq_v, aarch64_neon_udot, aarch64_neon_sdot, 0),
4782 NEONMAP1(vfmlal_high_v, aarch64_neon_fmlal2, 0),
4783 NEONMAP1(vfmlal_low_v, aarch64_neon_fmlal, 0),
4784 NEONMAP1(vfmlalq_high_v, aarch64_neon_fmlal2, 0),
4785 NEONMAP1(vfmlalq_low_v, aarch64_neon_fmlal, 0),
4786 NEONMAP1(vfmlsl_high_v, aarch64_neon_fmlsl2, 0),
4787 NEONMAP1(vfmlsl_low_v, aarch64_neon_fmlsl, 0),
4788 NEONMAP1(vfmlslq_high_v, aarch64_neon_fmlsl2, 0),
4789 NEONMAP1(vfmlslq_low_v, aarch64_neon_fmlsl, 0),
4790 NEONMAP2(vhadd_v, aarch64_neon_uhadd, aarch64_neon_shadd, Add1ArgType | UnsignedAlts),
4791 NEONMAP2(vhaddq_v, aarch64_neon_uhadd, aarch64_neon_shadd, Add1ArgType | UnsignedAlts),
4792 NEONMAP2(vhsub_v, aarch64_neon_uhsub, aarch64_neon_shsub, Add1ArgType | UnsignedAlts),
4793 NEONMAP2(vhsubq_v, aarch64_neon_uhsub, aarch64_neon_shsub, Add1ArgType | UnsignedAlts),
4794 NEONMAP1(vld1_x2_v, aarch64_neon_ld1x2, 0),
4795 NEONMAP1(vld1_x3_v, aarch64_neon_ld1x3, 0),
4796 NEONMAP1(vld1_x4_v, aarch64_neon_ld1x4, 0),
4797 NEONMAP1(vld1q_x2_v, aarch64_neon_ld1x2, 0),
4798 NEONMAP1(vld1q_x3_v, aarch64_neon_ld1x3, 0),
4799 NEONMAP1(vld1q_x4_v, aarch64_neon_ld1x4, 0),
4802 NEONMAP1(vmul_v, aarch64_neon_pmul, Add1ArgType),
4803 NEONMAP1(vmulq_v, aarch64_neon_pmul, Add1ArgType),
4804 NEONMAP1(vpadd_v, aarch64_neon_addp, Add1ArgType),
4805 NEONMAP2(vpaddl_v, aarch64_neon_uaddlp, aarch64_neon_saddlp, UnsignedAlts),
4806 NEONMAP2(vpaddlq_v, aarch64_neon_uaddlp, aarch64_neon_saddlp, UnsignedAlts),
4807 NEONMAP1(vpaddq_v, aarch64_neon_addp, Add1ArgType),
4808 NEONMAP1(vqabs_v, aarch64_neon_sqabs, Add1ArgType),
4809 NEONMAP1(vqabsq_v, aarch64_neon_sqabs, Add1ArgType),
4810 NEONMAP2(vqadd_v, aarch64_neon_uqadd, aarch64_neon_sqadd, Add1ArgType | UnsignedAlts),
4811 NEONMAP2(vqaddq_v, aarch64_neon_uqadd, aarch64_neon_sqadd, Add1ArgType | UnsignedAlts),
4812 NEONMAP2(vqdmlal_v, aarch64_neon_sqdmull, aarch64_neon_sqadd, 0),
4813 NEONMAP2(vqdmlsl_v, aarch64_neon_sqdmull, aarch64_neon_sqsub, 0),
4814 NEONMAP1(vqdmulh_v, aarch64_neon_sqdmulh, Add1ArgType),
4815 NEONMAP1(vqdmulhq_v, aarch64_neon_sqdmulh, Add1ArgType),
4816 NEONMAP1(vqdmull_v, aarch64_neon_sqdmull, Add1ArgType),
4817 NEONMAP2(vqmovn_v, aarch64_neon_uqxtn, aarch64_neon_sqxtn, Add1ArgType | UnsignedAlts),
4818 NEONMAP1(vqmovun_v, aarch64_neon_sqxtun, Add1ArgType),
4819 NEONMAP1(vqneg_v, aarch64_neon_sqneg, Add1ArgType),
4820 NEONMAP1(vqnegq_v, aarch64_neon_sqneg, Add1ArgType),
4821 NEONMAP1(vqrdmulh_v, aarch64_neon_sqrdmulh, Add1ArgType),
4822 NEONMAP1(vqrdmulhq_v, aarch64_neon_sqrdmulh, Add1ArgType),
4823 NEONMAP2(vqrshl_v, aarch64_neon_uqrshl, aarch64_neon_sqrshl, Add1ArgType | UnsignedAlts),
4824 NEONMAP2(vqrshlq_v, aarch64_neon_uqrshl, aarch64_neon_sqrshl, Add1ArgType | UnsignedAlts),
4825 NEONMAP2(vqshl_n_v, aarch64_neon_uqshl, aarch64_neon_sqshl, UnsignedAlts),
4826 NEONMAP2(vqshl_v, aarch64_neon_uqshl, aarch64_neon_sqshl, Add1ArgType | UnsignedAlts),
4827 NEONMAP2(vqshlq_n_v, aarch64_neon_uqshl, aarch64_neon_sqshl,UnsignedAlts),
4828 NEONMAP2(vqshlq_v, aarch64_neon_uqshl, aarch64_neon_sqshl, Add1ArgType | UnsignedAlts),
4829 NEONMAP1(vqshlu_n_v, aarch64_neon_sqshlu, 0),
4830 NEONMAP1(vqshluq_n_v, aarch64_neon_sqshlu, 0),
4831 NEONMAP2(vqsub_v, aarch64_neon_uqsub, aarch64_neon_sqsub, Add1ArgType | UnsignedAlts),
4832 NEONMAP2(vqsubq_v, aarch64_neon_uqsub, aarch64_neon_sqsub, Add1ArgType | UnsignedAlts),
4833 NEONMAP1(vraddhn_v, aarch64_neon_raddhn, Add1ArgType),
4834 NEONMAP2(vrecpe_v, aarch64_neon_frecpe, aarch64_neon_urecpe, 0),
4835 NEONMAP2(vrecpeq_v, aarch64_neon_frecpe, aarch64_neon_urecpe, 0),
4836 NEONMAP1(vrecps_v, aarch64_neon_frecps, Add1ArgType),
4837 NEONMAP1(vrecpsq_v, aarch64_neon_frecps, Add1ArgType),
4838 NEONMAP2(vrhadd_v, aarch64_neon_urhadd, aarch64_neon_srhadd, Add1ArgType | UnsignedAlts),
4839 NEONMAP2(vrhaddq_v, aarch64_neon_urhadd, aarch64_neon_srhadd, Add1ArgType | UnsignedAlts),
4842 NEONMAP2(vrshl_v, aarch64_neon_urshl, aarch64_neon_srshl, Add1ArgType | UnsignedAlts),
4843 NEONMAP2(vrshlq_v, aarch64_neon_urshl, aarch64_neon_srshl, Add1ArgType | UnsignedAlts),
4844 NEONMAP2(vrshr_n_v, aarch64_neon_urshl, aarch64_neon_srshl, UnsignedAlts),
4845 NEONMAP2(vrshrq_n_v, aarch64_neon_urshl, aarch64_neon_srshl, UnsignedAlts),
4846 NEONMAP2(vrsqrte_v, aarch64_neon_frsqrte, aarch64_neon_ursqrte, 0),
4847 NEONMAP2(vrsqrteq_v, aarch64_neon_frsqrte, aarch64_neon_ursqrte, 0),
4848 NEONMAP1(vrsqrts_v, aarch64_neon_frsqrts, Add1ArgType),
4849 NEONMAP1(vrsqrtsq_v, aarch64_neon_frsqrts, Add1ArgType),
4850 NEONMAP1(vrsubhn_v, aarch64_neon_rsubhn, Add1ArgType),
4851 NEONMAP1(vsha1su0q_v, aarch64_crypto_sha1su0, 0),
4852 NEONMAP1(vsha1su1q_v, aarch64_crypto_sha1su1, 0),
4853 NEONMAP1(vsha256h2q_v, aarch64_crypto_sha256h2, 0),
4854 NEONMAP1(vsha256hq_v, aarch64_crypto_sha256h, 0),
4855 NEONMAP1(vsha256su0q_v, aarch64_crypto_sha256su0, 0),
4856 NEONMAP1(vsha256su1q_v, aarch64_crypto_sha256su1, 0),
4858 NEONMAP2(vshl_v, aarch64_neon_ushl, aarch64_neon_sshl, Add1ArgType | UnsignedAlts),
4859 NEONMAP0(vshll_n_v),
4860 NEONMAP0(vshlq_n_v),
4861 NEONMAP2(vshlq_v, aarch64_neon_ushl, aarch64_neon_sshl, Add1ArgType | UnsignedAlts),
4863 NEONMAP0(vshrn_n_v),
4864 NEONMAP0(vshrq_n_v),
4865 NEONMAP1(vst1_x2_v, aarch64_neon_st1x2, 0),
4866 NEONMAP1(vst1_x3_v, aarch64_neon_st1x3, 0),
4867 NEONMAP1(vst1_x4_v, aarch64_neon_st1x4, 0),
4868 NEONMAP1(vst1q_x2_v, aarch64_neon_st1x2, 0),
4869 NEONMAP1(vst1q_x3_v, aarch64_neon_st1x3, 0),
4870 NEONMAP1(vst1q_x4_v, aarch64_neon_st1x4, 0),
4876 static const NeonIntrinsicInfo AArch64SISDIntrinsicMap[] = {
4877 NEONMAP1(vabdd_f64, aarch64_sisd_fabd, Add1ArgType),
4878 NEONMAP1(vabds_f32, aarch64_sisd_fabd, Add1ArgType),
4879 NEONMAP1(vabsd_s64, aarch64_neon_abs, Add1ArgType),
4880 NEONMAP1(vaddlv_s32, aarch64_neon_saddlv, AddRetType | Add1ArgType),
4881 NEONMAP1(vaddlv_u32, aarch64_neon_uaddlv, AddRetType | Add1ArgType),
4882 NEONMAP1(vaddlvq_s32, aarch64_neon_saddlv, AddRetType | Add1ArgType),
4883 NEONMAP1(vaddlvq_u32, aarch64_neon_uaddlv, AddRetType | Add1ArgType),
4884 NEONMAP1(vaddv_f32, aarch64_neon_faddv, AddRetType | Add1ArgType),
4885 NEONMAP1(vaddv_s32, aarch64_neon_saddv, AddRetType | Add1ArgType),
4886 NEONMAP1(vaddv_u32, aarch64_neon_uaddv, AddRetType | Add1ArgType),
4887 NEONMAP1(vaddvq_f32, aarch64_neon_faddv, AddRetType | Add1ArgType),
4888 NEONMAP1(vaddvq_f64, aarch64_neon_faddv, AddRetType | Add1ArgType),
4889 NEONMAP1(vaddvq_s32, aarch64_neon_saddv, AddRetType | Add1ArgType),
4890 NEONMAP1(vaddvq_s64, aarch64_neon_saddv, AddRetType | Add1ArgType),
4891 NEONMAP1(vaddvq_u32, aarch64_neon_uaddv, AddRetType | Add1ArgType),
4892 NEONMAP1(vaddvq_u64, aarch64_neon_uaddv, AddRetType | Add1ArgType),
4893 NEONMAP1(vcaged_f64, aarch64_neon_facge, AddRetType | Add1ArgType),
4894 NEONMAP1(vcages_f32, aarch64_neon_facge, AddRetType | Add1ArgType),
4895 NEONMAP1(vcagtd_f64, aarch64_neon_facgt, AddRetType | Add1ArgType),
4896 NEONMAP1(vcagts_f32, aarch64_neon_facgt, AddRetType | Add1ArgType),
4897 NEONMAP1(vcaled_f64, aarch64_neon_facge, AddRetType | Add1ArgType),
4898 NEONMAP1(vcales_f32, aarch64_neon_facge, AddRetType | Add1ArgType),
4899 NEONMAP1(vcaltd_f64, aarch64_neon_facgt, AddRetType | Add1ArgType),
4900 NEONMAP1(vcalts_f32, aarch64_neon_facgt, AddRetType | Add1ArgType),
4901 NEONMAP1(vcvtad_s64_f64, aarch64_neon_fcvtas, AddRetType | Add1ArgType),
4902 NEONMAP1(vcvtad_u64_f64, aarch64_neon_fcvtau, AddRetType | Add1ArgType),
4903 NEONMAP1(vcvtas_s32_f32, aarch64_neon_fcvtas, AddRetType | Add1ArgType),
4904 NEONMAP1(vcvtas_u32_f32, aarch64_neon_fcvtau, AddRetType | Add1ArgType),
4905 NEONMAP1(vcvtd_n_f64_s64, aarch64_neon_vcvtfxs2fp, AddRetType | Add1ArgType),
4906 NEONMAP1(vcvtd_n_f64_u64, aarch64_neon_vcvtfxu2fp, AddRetType | Add1ArgType),
4907 NEONMAP1(vcvtd_n_s64_f64, aarch64_neon_vcvtfp2fxs, AddRetType | Add1ArgType),
4908 NEONMAP1(vcvtd_n_u64_f64, aarch64_neon_vcvtfp2fxu, AddRetType | Add1ArgType),
4909 NEONMAP1(vcvtmd_s64_f64, aarch64_neon_fcvtms, AddRetType | Add1ArgType),
4910 NEONMAP1(vcvtmd_u64_f64, aarch64_neon_fcvtmu, AddRetType | Add1ArgType),
4911 NEONMAP1(vcvtms_s32_f32, aarch64_neon_fcvtms, AddRetType | Add1ArgType),
4912 NEONMAP1(vcvtms_u32_f32, aarch64_neon_fcvtmu, AddRetType | Add1ArgType),
4913 NEONMAP1(vcvtnd_s64_f64, aarch64_neon_fcvtns, AddRetType | Add1ArgType),
4914 NEONMAP1(vcvtnd_u64_f64, aarch64_neon_fcvtnu, AddRetType | Add1ArgType),
4915 NEONMAP1(vcvtns_s32_f32, aarch64_neon_fcvtns, AddRetType | Add1ArgType),
4916 NEONMAP1(vcvtns_u32_f32, aarch64_neon_fcvtnu, AddRetType | Add1ArgType),
4917 NEONMAP1(vcvtpd_s64_f64, aarch64_neon_fcvtps, AddRetType | Add1ArgType),
4918 NEONMAP1(vcvtpd_u64_f64, aarch64_neon_fcvtpu, AddRetType | Add1ArgType),
4919 NEONMAP1(vcvtps_s32_f32, aarch64_neon_fcvtps, AddRetType | Add1ArgType),
4920 NEONMAP1(vcvtps_u32_f32, aarch64_neon_fcvtpu, AddRetType | Add1ArgType),
4921 NEONMAP1(vcvts_n_f32_s32, aarch64_neon_vcvtfxs2fp, AddRetType | Add1ArgType),
4922 NEONMAP1(vcvts_n_f32_u32, aarch64_neon_vcvtfxu2fp, AddRetType | Add1ArgType),
4923 NEONMAP1(vcvts_n_s32_f32, aarch64_neon_vcvtfp2fxs, AddRetType | Add1ArgType),
4924 NEONMAP1(vcvts_n_u32_f32, aarch64_neon_vcvtfp2fxu, AddRetType | Add1ArgType),
4925 NEONMAP1(vcvtxd_f32_f64, aarch64_sisd_fcvtxn, 0),
4926 NEONMAP1(vmaxnmv_f32, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType),
4927 NEONMAP1(vmaxnmvq_f32, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType),
4928 NEONMAP1(vmaxnmvq_f64, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType),
4929 NEONMAP1(vmaxv_f32, aarch64_neon_fmaxv, AddRetType | Add1ArgType),
4930 NEONMAP1(vmaxv_s32, aarch64_neon_smaxv, AddRetType | Add1ArgType),
4931 NEONMAP1(vmaxv_u32, aarch64_neon_umaxv, AddRetType | Add1ArgType),
4932 NEONMAP1(vmaxvq_f32, aarch64_neon_fmaxv, AddRetType | Add1ArgType),
4933 NEONMAP1(vmaxvq_f64, aarch64_neon_fmaxv, AddRetType | Add1ArgType),
4934 NEONMAP1(vmaxvq_s32, aarch64_neon_smaxv, AddRetType | Add1ArgType),
4935 NEONMAP1(vmaxvq_u32, aarch64_neon_umaxv, AddRetType | Add1ArgType),
4936 NEONMAP1(vminnmv_f32, aarch64_neon_fminnmv, AddRetType | Add1ArgType),
4937 NEONMAP1(vminnmvq_f32, aarch64_neon_fminnmv, AddRetType | Add1ArgType),
4938 NEONMAP1(vminnmvq_f64, aarch64_neon_fminnmv, AddRetType | Add1ArgType),
4939 NEONMAP1(vminv_f32, aarch64_neon_fminv, AddRetType | Add1ArgType),
4940 NEONMAP1(vminv_s32, aarch64_neon_sminv, AddRetType | Add1ArgType),
4941 NEONMAP1(vminv_u32, aarch64_neon_uminv, AddRetType | Add1ArgType),
4942 NEONMAP1(vminvq_f32, aarch64_neon_fminv, AddRetType | Add1ArgType),
4943 NEONMAP1(vminvq_f64, aarch64_neon_fminv, AddRetType | Add1ArgType),
4944 NEONMAP1(vminvq_s32, aarch64_neon_sminv, AddRetType | Add1ArgType),
4945 NEONMAP1(vminvq_u32, aarch64_neon_uminv, AddRetType | Add1ArgType),
4946 NEONMAP1(vmull_p64, aarch64_neon_pmull64, 0),
4947 NEONMAP1(vmulxd_f64, aarch64_neon_fmulx, Add1ArgType),
4948 NEONMAP1(vmulxs_f32, aarch64_neon_fmulx, Add1ArgType),
4949 NEONMAP1(vpaddd_s64, aarch64_neon_uaddv, AddRetType | Add1ArgType),
4950 NEONMAP1(vpaddd_u64, aarch64_neon_uaddv, AddRetType | Add1ArgType),
4951 NEONMAP1(vpmaxnmqd_f64, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType),
4952 NEONMAP1(vpmaxnms_f32, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType),
4953 NEONMAP1(vpmaxqd_f64, aarch64_neon_fmaxv, AddRetType | Add1ArgType),
4954 NEONMAP1(vpmaxs_f32, aarch64_neon_fmaxv, AddRetType | Add1ArgType),
4955 NEONMAP1(vpminnmqd_f64, aarch64_neon_fminnmv, AddRetType | Add1ArgType),
4956 NEONMAP1(vpminnms_f32, aarch64_neon_fminnmv, AddRetType | Add1ArgType),
4957 NEONMAP1(vpminqd_f64, aarch64_neon_fminv, AddRetType | Add1ArgType),
4958 NEONMAP1(vpmins_f32, aarch64_neon_fminv, AddRetType | Add1ArgType),
4959 NEONMAP1(vqabsb_s8, aarch64_neon_sqabs, Vectorize1ArgType | Use64BitVectors),
4960 NEONMAP1(vqabsd_s64, aarch64_neon_sqabs, Add1ArgType),
4961 NEONMAP1(vqabsh_s16, aarch64_neon_sqabs, Vectorize1ArgType | Use64BitVectors),
4962 NEONMAP1(vqabss_s32, aarch64_neon_sqabs, Add1ArgType),
4963 NEONMAP1(vqaddb_s8, aarch64_neon_sqadd, Vectorize1ArgType | Use64BitVectors),
4964 NEONMAP1(vqaddb_u8, aarch64_neon_uqadd, Vectorize1ArgType | Use64BitVectors),
4965 NEONMAP1(vqaddd_s64, aarch64_neon_sqadd, Add1ArgType),
4966 NEONMAP1(vqaddd_u64, aarch64_neon_uqadd, Add1ArgType),
4967 NEONMAP1(vqaddh_s16, aarch64_neon_sqadd, Vectorize1ArgType | Use64BitVectors),
4968 NEONMAP1(vqaddh_u16, aarch64_neon_uqadd, Vectorize1ArgType | Use64BitVectors),
4969 NEONMAP1(vqadds_s32, aarch64_neon_sqadd, Add1ArgType),
4970 NEONMAP1(vqadds_u32, aarch64_neon_uqadd, Add1ArgType),
4971 NEONMAP1(vqdmulhh_s16, aarch64_neon_sqdmulh, Vectorize1ArgType | Use64BitVectors),
4972 NEONMAP1(vqdmulhs_s32, aarch64_neon_sqdmulh, Add1ArgType),
4973 NEONMAP1(vqdmullh_s16, aarch64_neon_sqdmull, VectorRet | Use128BitVectors),
4974 NEONMAP1(vqdmulls_s32, aarch64_neon_sqdmulls_scalar, 0),
4975 NEONMAP1(vqmovnd_s64, aarch64_neon_scalar_sqxtn, AddRetType | Add1ArgType),
4976 NEONMAP1(vqmovnd_u64, aarch64_neon_scalar_uqxtn, AddRetType | Add1ArgType),
4977 NEONMAP1(vqmovnh_s16, aarch64_neon_sqxtn, VectorRet | Use64BitVectors),
4978 NEONMAP1(vqmovnh_u16, aarch64_neon_uqxtn, VectorRet | Use64BitVectors),
4979 NEONMAP1(vqmovns_s32, aarch64_neon_sqxtn, VectorRet | Use64BitVectors),
4980 NEONMAP1(vqmovns_u32, aarch64_neon_uqxtn, VectorRet | Use64BitVectors),
4981 NEONMAP1(vqmovund_s64, aarch64_neon_scalar_sqxtun, AddRetType | Add1ArgType),
4982 NEONMAP1(vqmovunh_s16, aarch64_neon_sqxtun, VectorRet | Use64BitVectors),
4983 NEONMAP1(vqmovuns_s32, aarch64_neon_sqxtun, VectorRet | Use64BitVectors),
4984 NEONMAP1(vqnegb_s8, aarch64_neon_sqneg, Vectorize1ArgType | Use64BitVectors),
4985 NEONMAP1(vqnegd_s64, aarch64_neon_sqneg, Add1ArgType),
4986 NEONMAP1(vqnegh_s16, aarch64_neon_sqneg, Vectorize1ArgType | Use64BitVectors),
4987 NEONMAP1(vqnegs_s32, aarch64_neon_sqneg, Add1ArgType),
4988 NEONMAP1(vqrdmulhh_s16, aarch64_neon_sqrdmulh, Vectorize1ArgType | Use64BitVectors),
4989 NEONMAP1(vqrdmulhs_s32, aarch64_neon_sqrdmulh, Add1ArgType),
4990 NEONMAP1(vqrshlb_s8, aarch64_neon_sqrshl, Vectorize1ArgType | Use64BitVectors),
4991 NEONMAP1(vqrshlb_u8, aarch64_neon_uqrshl, Vectorize1ArgType | Use64BitVectors),
4992 NEONMAP1(vqrshld_s64, aarch64_neon_sqrshl, Add1ArgType),
4993 NEONMAP1(vqrshld_u64, aarch64_neon_uqrshl, Add1ArgType),
4994 NEONMAP1(vqrshlh_s16, aarch64_neon_sqrshl, Vectorize1ArgType | Use64BitVectors),
4995 NEONMAP1(vqrshlh_u16, aarch64_neon_uqrshl, Vectorize1ArgType | Use64BitVectors),
4996 NEONMAP1(vqrshls_s32, aarch64_neon_sqrshl, Add1ArgType),
4997 NEONMAP1(vqrshls_u32, aarch64_neon_uqrshl, Add1ArgType),
4998 NEONMAP1(vqrshrnd_n_s64, aarch64_neon_sqrshrn, AddRetType),
4999 NEONMAP1(vqrshrnd_n_u64, aarch64_neon_uqrshrn, AddRetType),
5000 NEONMAP1(vqrshrnh_n_s16, aarch64_neon_sqrshrn, VectorRet | Use64BitVectors),
5001 NEONMAP1(vqrshrnh_n_u16, aarch64_neon_uqrshrn, VectorRet | Use64BitVectors),
5002 NEONMAP1(vqrshrns_n_s32, aarch64_neon_sqrshrn, VectorRet | Use64BitVectors),
5003 NEONMAP1(vqrshrns_n_u32, aarch64_neon_uqrshrn, VectorRet | Use64BitVectors),
5004 NEONMAP1(vqrshrund_n_s64, aarch64_neon_sqrshrun, AddRetType),
5005 NEONMAP1(vqrshrunh_n_s16, aarch64_neon_sqrshrun, VectorRet | Use64BitVectors),
5006 NEONMAP1(vqrshruns_n_s32, aarch64_neon_sqrshrun, VectorRet | Use64BitVectors),
5007 NEONMAP1(vqshlb_n_s8, aarch64_neon_sqshl, Vectorize1ArgType | Use64BitVectors),
5008 NEONMAP1(vqshlb_n_u8, aarch64_neon_uqshl, Vectorize1ArgType | Use64BitVectors),
5009 NEONMAP1(vqshlb_s8, aarch64_neon_sqshl, Vectorize1ArgType | Use64BitVectors),
5010 NEONMAP1(vqshlb_u8, aarch64_neon_uqshl, Vectorize1ArgType | Use64BitVectors),
5011 NEONMAP1(vqshld_s64, aarch64_neon_sqshl, Add1ArgType),
5012 NEONMAP1(vqshld_u64, aarch64_neon_uqshl, Add1ArgType),
5013 NEONMAP1(vqshlh_n_s16, aarch64_neon_sqshl, Vectorize1ArgType | Use64BitVectors),
5014 NEONMAP1(vqshlh_n_u16, aarch64_neon_uqshl, Vectorize1ArgType | Use64BitVectors),
5015 NEONMAP1(vqshlh_s16, aarch64_neon_sqshl, Vectorize1ArgType | Use64BitVectors),
5016 NEONMAP1(vqshlh_u16, aarch64_neon_uqshl, Vectorize1ArgType | Use64BitVectors),
5017 NEONMAP1(vqshls_n_s32, aarch64_neon_sqshl, Add1ArgType),
5018 NEONMAP1(vqshls_n_u32, aarch64_neon_uqshl, Add1ArgType),
5019 NEONMAP1(vqshls_s32, aarch64_neon_sqshl, Add1ArgType),
5020 NEONMAP1(vqshls_u32, aarch64_neon_uqshl, Add1ArgType),
5021 NEONMAP1(vqshlub_n_s8, aarch64_neon_sqshlu, Vectorize1ArgType | Use64BitVectors),
5022 NEONMAP1(vqshluh_n_s16, aarch64_neon_sqshlu, Vectorize1ArgType | Use64BitVectors),
5023 NEONMAP1(vqshlus_n_s32, aarch64_neon_sqshlu, Add1ArgType),
5024 NEONMAP1(vqshrnd_n_s64, aarch64_neon_sqshrn, AddRetType),
5025 NEONMAP1(vqshrnd_n_u64, aarch64_neon_uqshrn, AddRetType),
5026 NEONMAP1(vqshrnh_n_s16, aarch64_neon_sqshrn, VectorRet | Use64BitVectors),
5027 NEONMAP1(vqshrnh_n_u16, aarch64_neon_uqshrn, VectorRet | Use64BitVectors),
5028 NEONMAP1(vqshrns_n_s32, aarch64_neon_sqshrn, VectorRet | Use64BitVectors),
5029 NEONMAP1(vqshrns_n_u32, aarch64_neon_uqshrn, VectorRet | Use64BitVectors),
5030 NEONMAP1(vqshrund_n_s64, aarch64_neon_sqshrun, AddRetType),
5031 NEONMAP1(vqshrunh_n_s16, aarch64_neon_sqshrun, VectorRet | Use64BitVectors),
5032 NEONMAP1(vqshruns_n_s32, aarch64_neon_sqshrun, VectorRet | Use64BitVectors),
5033 NEONMAP1(vqsubb_s8, aarch64_neon_sqsub, Vectorize1ArgType | Use64BitVectors),
5034 NEONMAP1(vqsubb_u8, aarch64_neon_uqsub, Vectorize1ArgType | Use64BitVectors),
5035 NEONMAP1(vqsubd_s64, aarch64_neon_sqsub, Add1ArgType),
5036 NEONMAP1(vqsubd_u64, aarch64_neon_uqsub, Add1ArgType),
5037 NEONMAP1(vqsubh_s16, aarch64_neon_sqsub, Vectorize1ArgType | Use64BitVectors),
5038 NEONMAP1(vqsubh_u16, aarch64_neon_uqsub, Vectorize1ArgType | Use64BitVectors),
5039 NEONMAP1(vqsubs_s32, aarch64_neon_sqsub, Add1ArgType),
5040 NEONMAP1(vqsubs_u32, aarch64_neon_uqsub, Add1ArgType),
5041 NEONMAP1(vrecped_f64, aarch64_neon_frecpe, Add1ArgType),
5042 NEONMAP1(vrecpes_f32, aarch64_neon_frecpe, Add1ArgType),
5043 NEONMAP1(vrecpxd_f64, aarch64_neon_frecpx, Add1ArgType),
5044 NEONMAP1(vrecpxs_f32, aarch64_neon_frecpx, Add1ArgType),
5045 NEONMAP1(vrshld_s64, aarch64_neon_srshl, Add1ArgType),
5046 NEONMAP1(vrshld_u64, aarch64_neon_urshl, Add1ArgType),
5047 NEONMAP1(vrsqrted_f64, aarch64_neon_frsqrte, Add1ArgType),
5048 NEONMAP1(vrsqrtes_f32, aarch64_neon_frsqrte, Add1ArgType),
5049 NEONMAP1(vrsqrtsd_f64, aarch64_neon_frsqrts, Add1ArgType),
5050 NEONMAP1(vrsqrtss_f32, aarch64_neon_frsqrts, Add1ArgType),
5051 NEONMAP1(vsha1cq_u32, aarch64_crypto_sha1c, 0),
5052 NEONMAP1(vsha1h_u32, aarch64_crypto_sha1h, 0),
5053 NEONMAP1(vsha1mq_u32, aarch64_crypto_sha1m, 0),
5054 NEONMAP1(vsha1pq_u32, aarch64_crypto_sha1p, 0),
5055 NEONMAP1(vshld_s64, aarch64_neon_sshl, Add1ArgType),
5056 NEONMAP1(vshld_u64, aarch64_neon_ushl, Add1ArgType),
5057 NEONMAP1(vslid_n_s64, aarch64_neon_vsli, Vectorize1ArgType),
5058 NEONMAP1(vslid_n_u64, aarch64_neon_vsli, Vectorize1ArgType),
5059 NEONMAP1(vsqaddb_u8, aarch64_neon_usqadd, Vectorize1ArgType | Use64BitVectors),
5060 NEONMAP1(vsqaddd_u64, aarch64_neon_usqadd, Add1ArgType),
5061 NEONMAP1(vsqaddh_u16, aarch64_neon_usqadd, Vectorize1ArgType | Use64BitVectors),
5062 NEONMAP1(vsqadds_u32, aarch64_neon_usqadd, Add1ArgType),
5063 NEONMAP1(vsrid_n_s64, aarch64_neon_vsri, Vectorize1ArgType),
5064 NEONMAP1(vsrid_n_u64, aarch64_neon_vsri, Vectorize1ArgType),
5065 NEONMAP1(vuqaddb_s8, aarch64_neon_suqadd, Vectorize1ArgType | Use64BitVectors),
5066 NEONMAP1(vuqaddd_s64, aarch64_neon_suqadd, Add1ArgType),
5067 NEONMAP1(vuqaddh_s16, aarch64_neon_suqadd, Vectorize1ArgType | Use64BitVectors),
5068 NEONMAP1(vuqadds_s32, aarch64_neon_suqadd, Add1ArgType),
5069 // FP16 scalar intrinisics go here.
5070 NEONMAP1(vabdh_f16, aarch64_sisd_fabd, Add1ArgType),
5071 NEONMAP1(vcvtah_s32_f16, aarch64_neon_fcvtas, AddRetType | Add1ArgType),
5072 NEONMAP1(vcvtah_s64_f16, aarch64_neon_fcvtas, AddRetType | Add1ArgType),
5073 NEONMAP1(vcvtah_u32_f16, aarch64_neon_fcvtau, AddRetType | Add1ArgType),
5074 NEONMAP1(vcvtah_u64_f16, aarch64_neon_fcvtau, AddRetType | Add1ArgType),
5075 NEONMAP1(vcvth_n_f16_s32, aarch64_neon_vcvtfxs2fp, AddRetType | Add1ArgType),
5076 NEONMAP1(vcvth_n_f16_s64, aarch64_neon_vcvtfxs2fp, AddRetType | Add1ArgType),
5077 NEONMAP1(vcvth_n_f16_u32, aarch64_neon_vcvtfxu2fp, AddRetType | Add1ArgType),
5078 NEONMAP1(vcvth_n_f16_u64, aarch64_neon_vcvtfxu2fp, AddRetType | Add1ArgType),
5079 NEONMAP1(vcvth_n_s32_f16, aarch64_neon_vcvtfp2fxs, AddRetType | Add1ArgType),
5080 NEONMAP1(vcvth_n_s64_f16, aarch64_neon_vcvtfp2fxs, AddRetType | Add1ArgType),
5081 NEONMAP1(vcvth_n_u32_f16, aarch64_neon_vcvtfp2fxu, AddRetType | Add1ArgType),
5082 NEONMAP1(vcvth_n_u64_f16, aarch64_neon_vcvtfp2fxu, AddRetType | Add1ArgType),
5083 NEONMAP1(vcvtmh_s32_f16, aarch64_neon_fcvtms, AddRetType | Add1ArgType),
5084 NEONMAP1(vcvtmh_s64_f16, aarch64_neon_fcvtms, AddRetType | Add1ArgType),
5085 NEONMAP1(vcvtmh_u32_f16, aarch64_neon_fcvtmu, AddRetType | Add1ArgType),
5086 NEONMAP1(vcvtmh_u64_f16, aarch64_neon_fcvtmu, AddRetType | Add1ArgType),
5087 NEONMAP1(vcvtnh_s32_f16, aarch64_neon_fcvtns, AddRetType | Add1ArgType),
5088 NEONMAP1(vcvtnh_s64_f16, aarch64_neon_fcvtns, AddRetType | Add1ArgType),
5089 NEONMAP1(vcvtnh_u32_f16, aarch64_neon_fcvtnu, AddRetType | Add1ArgType),
5090 NEONMAP1(vcvtnh_u64_f16, aarch64_neon_fcvtnu, AddRetType | Add1ArgType),
5091 NEONMAP1(vcvtph_s32_f16, aarch64_neon_fcvtps, AddRetType | Add1ArgType),
5092 NEONMAP1(vcvtph_s64_f16, aarch64_neon_fcvtps, AddRetType | Add1ArgType),
5093 NEONMAP1(vcvtph_u32_f16, aarch64_neon_fcvtpu, AddRetType | Add1ArgType),
5094 NEONMAP1(vcvtph_u64_f16, aarch64_neon_fcvtpu, AddRetType | Add1ArgType),
5095 NEONMAP1(vmulxh_f16, aarch64_neon_fmulx, Add1ArgType),
5096 NEONMAP1(vrecpeh_f16, aarch64_neon_frecpe, Add1ArgType),
5097 NEONMAP1(vrecpxh_f16, aarch64_neon_frecpx, Add1ArgType),
5098 NEONMAP1(vrsqrteh_f16, aarch64_neon_frsqrte, Add1ArgType),
5099 NEONMAP1(vrsqrtsh_f16, aarch64_neon_frsqrts, Add1ArgType),
5106 static bool NEONSIMDIntrinsicsProvenSorted = false;
5108 static bool AArch64SIMDIntrinsicsProvenSorted = false;
5109 static bool AArch64SISDIntrinsicsProvenSorted = false;
5112 static const NeonIntrinsicInfo *
5113 findNeonIntrinsicInMap(ArrayRef<NeonIntrinsicInfo> IntrinsicMap,
5114 unsigned BuiltinID, bool &MapProvenSorted) {
5117 if (!MapProvenSorted) {
5118 assert(std::is_sorted(std::begin(IntrinsicMap), std::end(IntrinsicMap)));
5119 MapProvenSorted = true;
5123 const NeonIntrinsicInfo *Builtin = llvm::lower_bound(IntrinsicMap, BuiltinID);
5125 if (Builtin != IntrinsicMap.end() && Builtin->BuiltinID == BuiltinID)
5131 Function *CodeGenFunction::LookupNeonLLVMIntrinsic(unsigned IntrinsicID,
5133 llvm::Type *ArgType,
5134 const CallExpr *E) {
5136 if (Modifier & Use64BitVectors)
5138 else if (Modifier & Use128BitVectors)
5142 SmallVector<llvm::Type *, 3> Tys;
5143 if (Modifier & AddRetType) {
5144 llvm::Type *Ty = ConvertType(E->getCallReturnType(getContext()));
5145 if (Modifier & VectorizeRetType)
5146 Ty = llvm::VectorType::get(
5147 Ty, VectorSize ? VectorSize / Ty->getPrimitiveSizeInBits() : 1);
5153 if (Modifier & VectorizeArgTypes) {
5154 int Elts = VectorSize ? VectorSize / ArgType->getPrimitiveSizeInBits() : 1;
5155 ArgType = llvm::VectorType::get(ArgType, Elts);
5158 if (Modifier & (Add1ArgType | Add2ArgTypes))
5159 Tys.push_back(ArgType);
5161 if (Modifier & Add2ArgTypes)
5162 Tys.push_back(ArgType);
5164 if (Modifier & InventFloatType)
5165 Tys.push_back(FloatTy);
5167 return CGM.getIntrinsic(IntrinsicID, Tys);
5170 static Value *EmitCommonNeonSISDBuiltinExpr(CodeGenFunction &CGF,
5171 const NeonIntrinsicInfo &SISDInfo,
5172 SmallVectorImpl<Value *> &Ops,
5173 const CallExpr *E) {
5174 unsigned BuiltinID = SISDInfo.BuiltinID;
5175 unsigned int Int = SISDInfo.LLVMIntrinsic;
5176 unsigned Modifier = SISDInfo.TypeModifier;
5177 const char *s = SISDInfo.NameHint;
5179 switch (BuiltinID) {
5180 case NEON::BI__builtin_neon_vcled_s64:
5181 case NEON::BI__builtin_neon_vcled_u64:
5182 case NEON::BI__builtin_neon_vcles_f32:
5183 case NEON::BI__builtin_neon_vcled_f64:
5184 case NEON::BI__builtin_neon_vcltd_s64:
5185 case NEON::BI__builtin_neon_vcltd_u64:
5186 case NEON::BI__builtin_neon_vclts_f32:
5187 case NEON::BI__builtin_neon_vcltd_f64:
5188 case NEON::BI__builtin_neon_vcales_f32:
5189 case NEON::BI__builtin_neon_vcaled_f64:
5190 case NEON::BI__builtin_neon_vcalts_f32:
5191 case NEON::BI__builtin_neon_vcaltd_f64:
5192 // Only one direction of comparisons actually exist, cmle is actually a cmge
5193 // with swapped operands. The table gives us the right intrinsic but we
5194 // still need to do the swap.
5195 std::swap(Ops[0], Ops[1]);
5199 assert(Int && "Generic code assumes a valid intrinsic");
5201 // Determine the type(s) of this overloaded AArch64 intrinsic.
5202 const Expr *Arg = E->getArg(0);
5203 llvm::Type *ArgTy = CGF.ConvertType(Arg->getType());
5204 Function *F = CGF.LookupNeonLLVMIntrinsic(Int, Modifier, ArgTy, E);
5207 ConstantInt *C0 = ConstantInt::get(CGF.SizeTy, 0);
5208 for (Function::const_arg_iterator ai = F->arg_begin(), ae = F->arg_end();
5209 ai != ae; ++ai, ++j) {
5210 llvm::Type *ArgTy = ai->getType();
5211 if (Ops[j]->getType()->getPrimitiveSizeInBits() ==
5212 ArgTy->getPrimitiveSizeInBits())
5215 assert(ArgTy->isVectorTy() && !Ops[j]->getType()->isVectorTy());
5216 // The constant argument to an _n_ intrinsic always has Int32Ty, so truncate
5217 // it before inserting.
5219 CGF.Builder.CreateTruncOrBitCast(Ops[j], ArgTy->getVectorElementType());
5221 CGF.Builder.CreateInsertElement(UndefValue::get(ArgTy), Ops[j], C0);
5224 Value *Result = CGF.EmitNeonCall(F, Ops, s);
5225 llvm::Type *ResultType = CGF.ConvertType(E->getType());
5226 if (ResultType->getPrimitiveSizeInBits() <
5227 Result->getType()->getPrimitiveSizeInBits())
5228 return CGF.Builder.CreateExtractElement(Result, C0);
5230 return CGF.Builder.CreateBitCast(Result, ResultType, s);
5233 Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
5234 unsigned BuiltinID, unsigned LLVMIntrinsic, unsigned AltLLVMIntrinsic,
5235 const char *NameHint, unsigned Modifier, const CallExpr *E,
5236 SmallVectorImpl<llvm::Value *> &Ops, Address PtrOp0, Address PtrOp1,
5237 llvm::Triple::ArchType Arch) {
5238 // Get the last argument, which specifies the vector type.
5239 llvm::APSInt NeonTypeConst;
5240 const Expr *Arg = E->getArg(E->getNumArgs() - 1);
5241 if (!Arg->isIntegerConstantExpr(NeonTypeConst, getContext()))
5244 // Determine the type of this overloaded NEON intrinsic.
5245 NeonTypeFlags Type(NeonTypeConst.getZExtValue());
5246 bool Usgn = Type.isUnsigned();
5247 bool Quad = Type.isQuad();
5248 const bool HasLegalHalfType = getTarget().hasLegalHalfType();
5250 llvm::VectorType *VTy = GetNeonType(this, Type, HasLegalHalfType);
5251 llvm::Type *Ty = VTy;
5255 auto getAlignmentValue32 = [&](Address addr) -> Value* {
5256 return Builder.getInt32(addr.getAlignment().getQuantity());
5259 unsigned Int = LLVMIntrinsic;
5260 if ((Modifier & UnsignedAlts) && !Usgn)
5261 Int = AltLLVMIntrinsic;
5263 switch (BuiltinID) {
5265 case NEON::BI__builtin_neon_vpadd_v:
5266 case NEON::BI__builtin_neon_vpaddq_v:
5267 // We don't allow fp/int overloading of intrinsics.
5268 if (VTy->getElementType()->isFloatingPointTy() &&
5269 Int == Intrinsic::aarch64_neon_addp)
5270 Int = Intrinsic::aarch64_neon_faddp;
5272 case NEON::BI__builtin_neon_vabs_v:
5273 case NEON::BI__builtin_neon_vabsq_v:
5274 if (VTy->getElementType()->isFloatingPointTy())
5275 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::fabs, Ty), Ops, "vabs");
5276 return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Ty), Ops, "vabs");
5277 case NEON::BI__builtin_neon_vaddhn_v: {
5278 llvm::VectorType *SrcTy =
5279 llvm::VectorType::getExtendedElementVectorType(VTy);
5281 // %sum = add <4 x i32> %lhs, %rhs
5282 Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy);
5283 Ops[1] = Builder.CreateBitCast(Ops[1], SrcTy);
5284 Ops[0] = Builder.CreateAdd(Ops[0], Ops[1], "vaddhn");
5286 // %high = lshr <4 x i32> %sum, <i32 16, i32 16, i32 16, i32 16>
5287 Constant *ShiftAmt =
5288 ConstantInt::get(SrcTy, SrcTy->getScalarSizeInBits() / 2);
5289 Ops[0] = Builder.CreateLShr(Ops[0], ShiftAmt, "vaddhn");
5291 // %res = trunc <4 x i32> %high to <4 x i16>
5292 return Builder.CreateTrunc(Ops[0], VTy, "vaddhn");
5294 case NEON::BI__builtin_neon_vcale_v:
5295 case NEON::BI__builtin_neon_vcaleq_v:
5296 case NEON::BI__builtin_neon_vcalt_v:
5297 case NEON::BI__builtin_neon_vcaltq_v:
5298 std::swap(Ops[0], Ops[1]);
5300 case NEON::BI__builtin_neon_vcage_v:
5301 case NEON::BI__builtin_neon_vcageq_v:
5302 case NEON::BI__builtin_neon_vcagt_v:
5303 case NEON::BI__builtin_neon_vcagtq_v: {
5305 switch (VTy->getScalarSizeInBits()) {
5306 default: llvm_unreachable("unexpected type");
5317 llvm::Type *VecFlt = llvm::VectorType::get(Ty, VTy->getNumElements());
5318 llvm::Type *Tys[] = { VTy, VecFlt };
5319 Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys);
5320 return EmitNeonCall(F, Ops, NameHint);
5322 case NEON::BI__builtin_neon_vceqz_v:
5323 case NEON::BI__builtin_neon_vceqzq_v:
5324 return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OEQ,
5325 ICmpInst::ICMP_EQ, "vceqz");
5326 case NEON::BI__builtin_neon_vcgez_v:
5327 case NEON::BI__builtin_neon_vcgezq_v:
5328 return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OGE,
5329 ICmpInst::ICMP_SGE, "vcgez");
5330 case NEON::BI__builtin_neon_vclez_v:
5331 case NEON::BI__builtin_neon_vclezq_v:
5332 return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OLE,
5333 ICmpInst::ICMP_SLE, "vclez");
5334 case NEON::BI__builtin_neon_vcgtz_v:
5335 case NEON::BI__builtin_neon_vcgtzq_v:
5336 return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OGT,
5337 ICmpInst::ICMP_SGT, "vcgtz");
5338 case NEON::BI__builtin_neon_vcltz_v:
5339 case NEON::BI__builtin_neon_vcltzq_v:
5340 return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OLT,
5341 ICmpInst::ICMP_SLT, "vcltz");
5342 case NEON::BI__builtin_neon_vclz_v:
5343 case NEON::BI__builtin_neon_vclzq_v:
5344 // We generate target-independent intrinsic, which needs a second argument
5345 // for whether or not clz of zero is undefined; on ARM it isn't.
5346 Ops.push_back(Builder.getInt1(getTarget().isCLZForZeroUndef()));
5348 case NEON::BI__builtin_neon_vcvt_f32_v:
5349 case NEON::BI__builtin_neon_vcvtq_f32_v:
5350 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
5351 Ty = GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float32, false, Quad),
5353 return Usgn ? Builder.CreateUIToFP(Ops[0], Ty, "vcvt")
5354 : Builder.CreateSIToFP(Ops[0], Ty, "vcvt");
5355 case NEON::BI__builtin_neon_vcvt_f16_v:
5356 case NEON::BI__builtin_neon_vcvtq_f16_v:
5357 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
5358 Ty = GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float16, false, Quad),
5360 return Usgn ? Builder.CreateUIToFP(Ops[0], Ty, "vcvt")
5361 : Builder.CreateSIToFP(Ops[0], Ty, "vcvt");
5362 case NEON::BI__builtin_neon_vcvt_n_f16_v:
5363 case NEON::BI__builtin_neon_vcvt_n_f32_v:
5364 case NEON::BI__builtin_neon_vcvt_n_f64_v:
5365 case NEON::BI__builtin_neon_vcvtq_n_f16_v:
5366 case NEON::BI__builtin_neon_vcvtq_n_f32_v:
5367 case NEON::BI__builtin_neon_vcvtq_n_f64_v: {
5368 llvm::Type *Tys[2] = { GetFloatNeonType(this, Type), Ty };
5369 Int = Usgn ? LLVMIntrinsic : AltLLVMIntrinsic;
5370 Function *F = CGM.getIntrinsic(Int, Tys);
5371 return EmitNeonCall(F, Ops, "vcvt_n");
5373 case NEON::BI__builtin_neon_vcvt_n_s16_v:
5374 case NEON::BI__builtin_neon_vcvt_n_s32_v:
5375 case NEON::BI__builtin_neon_vcvt_n_u16_v:
5376 case NEON::BI__builtin_neon_vcvt_n_u32_v:
5377 case NEON::BI__builtin_neon_vcvt_n_s64_v:
5378 case NEON::BI__builtin_neon_vcvt_n_u64_v:
5379 case NEON::BI__builtin_neon_vcvtq_n_s16_v:
5380 case NEON::BI__builtin_neon_vcvtq_n_s32_v:
5381 case NEON::BI__builtin_neon_vcvtq_n_u16_v:
5382 case NEON::BI__builtin_neon_vcvtq_n_u32_v:
5383 case NEON::BI__builtin_neon_vcvtq_n_s64_v:
5384 case NEON::BI__builtin_neon_vcvtq_n_u64_v: {
5385 llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) };
5386 Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys);
5387 return EmitNeonCall(F, Ops, "vcvt_n");
5389 case NEON::BI__builtin_neon_vcvt_s32_v:
5390 case NEON::BI__builtin_neon_vcvt_u32_v:
5391 case NEON::BI__builtin_neon_vcvt_s64_v:
5392 case NEON::BI__builtin_neon_vcvt_u64_v:
5393 case NEON::BI__builtin_neon_vcvt_s16_v:
5394 case NEON::BI__builtin_neon_vcvt_u16_v:
5395 case NEON::BI__builtin_neon_vcvtq_s32_v:
5396 case NEON::BI__builtin_neon_vcvtq_u32_v:
5397 case NEON::BI__builtin_neon_vcvtq_s64_v:
5398 case NEON::BI__builtin_neon_vcvtq_u64_v:
5399 case NEON::BI__builtin_neon_vcvtq_s16_v:
5400 case NEON::BI__builtin_neon_vcvtq_u16_v: {
5401 Ops[0] = Builder.CreateBitCast(Ops[0], GetFloatNeonType(this, Type));
5402 return Usgn ? Builder.CreateFPToUI(Ops[0], Ty, "vcvt")
5403 : Builder.CreateFPToSI(Ops[0], Ty, "vcvt");
5405 case NEON::BI__builtin_neon_vcvta_s16_v:
5406 case NEON::BI__builtin_neon_vcvta_s32_v:
5407 case NEON::BI__builtin_neon_vcvta_s64_v:
5408 case NEON::BI__builtin_neon_vcvta_u16_v:
5409 case NEON::BI__builtin_neon_vcvta_u32_v:
5410 case NEON::BI__builtin_neon_vcvta_u64_v:
5411 case NEON::BI__builtin_neon_vcvtaq_s16_v:
5412 case NEON::BI__builtin_neon_vcvtaq_s32_v:
5413 case NEON::BI__builtin_neon_vcvtaq_s64_v:
5414 case NEON::BI__builtin_neon_vcvtaq_u16_v:
5415 case NEON::BI__builtin_neon_vcvtaq_u32_v:
5416 case NEON::BI__builtin_neon_vcvtaq_u64_v:
5417 case NEON::BI__builtin_neon_vcvtn_s16_v:
5418 case NEON::BI__builtin_neon_vcvtn_s32_v:
5419 case NEON::BI__builtin_neon_vcvtn_s64_v:
5420 case NEON::BI__builtin_neon_vcvtn_u16_v:
5421 case NEON::BI__builtin_neon_vcvtn_u32_v:
5422 case NEON::BI__builtin_neon_vcvtn_u64_v:
5423 case NEON::BI__builtin_neon_vcvtnq_s16_v:
5424 case NEON::BI__builtin_neon_vcvtnq_s32_v:
5425 case NEON::BI__builtin_neon_vcvtnq_s64_v:
5426 case NEON::BI__builtin_neon_vcvtnq_u16_v:
5427 case NEON::BI__builtin_neon_vcvtnq_u32_v:
5428 case NEON::BI__builtin_neon_vcvtnq_u64_v:
5429 case NEON::BI__builtin_neon_vcvtp_s16_v:
5430 case NEON::BI__builtin_neon_vcvtp_s32_v:
5431 case NEON::BI__builtin_neon_vcvtp_s64_v:
5432 case NEON::BI__builtin_neon_vcvtp_u16_v:
5433 case NEON::BI__builtin_neon_vcvtp_u32_v:
5434 case NEON::BI__builtin_neon_vcvtp_u64_v:
5435 case NEON::BI__builtin_neon_vcvtpq_s16_v:
5436 case NEON::BI__builtin_neon_vcvtpq_s32_v:
5437 case NEON::BI__builtin_neon_vcvtpq_s64_v:
5438 case NEON::BI__builtin_neon_vcvtpq_u16_v:
5439 case NEON::BI__builtin_neon_vcvtpq_u32_v:
5440 case NEON::BI__builtin_neon_vcvtpq_u64_v:
5441 case NEON::BI__builtin_neon_vcvtm_s16_v:
5442 case NEON::BI__builtin_neon_vcvtm_s32_v:
5443 case NEON::BI__builtin_neon_vcvtm_s64_v:
5444 case NEON::BI__builtin_neon_vcvtm_u16_v:
5445 case NEON::BI__builtin_neon_vcvtm_u32_v:
5446 case NEON::BI__builtin_neon_vcvtm_u64_v:
5447 case NEON::BI__builtin_neon_vcvtmq_s16_v:
5448 case NEON::BI__builtin_neon_vcvtmq_s32_v:
5449 case NEON::BI__builtin_neon_vcvtmq_s64_v:
5450 case NEON::BI__builtin_neon_vcvtmq_u16_v:
5451 case NEON::BI__builtin_neon_vcvtmq_u32_v:
5452 case NEON::BI__builtin_neon_vcvtmq_u64_v: {
5453 llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) };
5454 return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, NameHint);
5456 case NEON::BI__builtin_neon_vext_v:
5457 case NEON::BI__builtin_neon_vextq_v: {
5458 int CV = cast<ConstantInt>(Ops[2])->getSExtValue();
5459 SmallVector<uint32_t, 16> Indices;
5460 for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i)
5461 Indices.push_back(i+CV);
5463 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
5464 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
5465 return Builder.CreateShuffleVector(Ops[0], Ops[1], Indices, "vext");
5467 case NEON::BI__builtin_neon_vfma_v:
5468 case NEON::BI__builtin_neon_vfmaq_v: {
5469 Function *F = CGM.getIntrinsic(Intrinsic::fma, Ty);
5470 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
5471 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
5472 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
5474 // NEON intrinsic puts accumulator first, unlike the LLVM fma.
5475 return Builder.CreateCall(F, {Ops[1], Ops[2], Ops[0]});
5477 case NEON::BI__builtin_neon_vld1_v:
5478 case NEON::BI__builtin_neon_vld1q_v: {
5479 llvm::Type *Tys[] = {Ty, Int8PtrTy};
5480 Ops.push_back(getAlignmentValue32(PtrOp0));
5481 return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, "vld1");
5483 case NEON::BI__builtin_neon_vld1_x2_v:
5484 case NEON::BI__builtin_neon_vld1q_x2_v:
5485 case NEON::BI__builtin_neon_vld1_x3_v:
5486 case NEON::BI__builtin_neon_vld1q_x3_v:
5487 case NEON::BI__builtin_neon_vld1_x4_v:
5488 case NEON::BI__builtin_neon_vld1q_x4_v: {
5489 llvm::Type *PTy = llvm::PointerType::getUnqual(VTy->getVectorElementType());
5490 Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
5491 llvm::Type *Tys[2] = { VTy, PTy };
5492 Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys);
5493 Ops[1] = Builder.CreateCall(F, Ops[1], "vld1xN");
5494 Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
5495 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
5496 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
5498 case NEON::BI__builtin_neon_vld2_v:
5499 case NEON::BI__builtin_neon_vld2q_v:
5500 case NEON::BI__builtin_neon_vld3_v:
5501 case NEON::BI__builtin_neon_vld3q_v:
5502 case NEON::BI__builtin_neon_vld4_v:
5503 case NEON::BI__builtin_neon_vld4q_v:
5504 case NEON::BI__builtin_neon_vld2_dup_v:
5505 case NEON::BI__builtin_neon_vld2q_dup_v:
5506 case NEON::BI__builtin_neon_vld3_dup_v:
5507 case NEON::BI__builtin_neon_vld3q_dup_v:
5508 case NEON::BI__builtin_neon_vld4_dup_v:
5509 case NEON::BI__builtin_neon_vld4q_dup_v: {
5510 llvm::Type *Tys[] = {Ty, Int8PtrTy};
5511 Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys);
5512 Value *Align = getAlignmentValue32(PtrOp1);
5513 Ops[1] = Builder.CreateCall(F, {Ops[1], Align}, NameHint);
5514 Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
5515 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
5516 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
5518 case NEON::BI__builtin_neon_vld1_dup_v:
5519 case NEON::BI__builtin_neon_vld1q_dup_v: {
5520 Value *V = UndefValue::get(Ty);
5521 Ty = llvm::PointerType::getUnqual(VTy->getElementType());
5522 PtrOp0 = Builder.CreateBitCast(PtrOp0, Ty);
5523 LoadInst *Ld = Builder.CreateLoad(PtrOp0);
5524 llvm::Constant *CI = ConstantInt::get(SizeTy, 0);
5525 Ops[0] = Builder.CreateInsertElement(V, Ld, CI);
5526 return EmitNeonSplat(Ops[0], CI);
5528 case NEON::BI__builtin_neon_vld2_lane_v:
5529 case NEON::BI__builtin_neon_vld2q_lane_v:
5530 case NEON::BI__builtin_neon_vld3_lane_v:
5531 case NEON::BI__builtin_neon_vld3q_lane_v:
5532 case NEON::BI__builtin_neon_vld4_lane_v:
5533 case NEON::BI__builtin_neon_vld4q_lane_v: {
5534 llvm::Type *Tys[] = {Ty, Int8PtrTy};
5535 Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys);
5536 for (unsigned I = 2; I < Ops.size() - 1; ++I)
5537 Ops[I] = Builder.CreateBitCast(Ops[I], Ty);
5538 Ops.push_back(getAlignmentValue32(PtrOp1));
5539 Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), NameHint);
5540 Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
5541 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
5542 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
5544 case NEON::BI__builtin_neon_vmovl_v: {
5545 llvm::Type *DTy =llvm::VectorType::getTruncatedElementVectorType(VTy);
5546 Ops[0] = Builder.CreateBitCast(Ops[0], DTy);
5548 return Builder.CreateZExt(Ops[0], Ty, "vmovl");
5549 return Builder.CreateSExt(Ops[0], Ty, "vmovl");
5551 case NEON::BI__builtin_neon_vmovn_v: {
5552 llvm::Type *QTy = llvm::VectorType::getExtendedElementVectorType(VTy);
5553 Ops[0] = Builder.CreateBitCast(Ops[0], QTy);
5554 return Builder.CreateTrunc(Ops[0], Ty, "vmovn");
5556 case NEON::BI__builtin_neon_vmull_v:
5557 // FIXME: the integer vmull operations could be emitted in terms of pure
5558 // LLVM IR (2 exts followed by a mul). Unfortunately LLVM has a habit of
5559 // hoisting the exts outside loops. Until global ISel comes along that can
5560 // see through such movement this leads to bad CodeGen. So we need an
5561 // intrinsic for now.
5562 Int = Usgn ? Intrinsic::arm_neon_vmullu : Intrinsic::arm_neon_vmulls;
5563 Int = Type.isPoly() ? (unsigned)Intrinsic::arm_neon_vmullp : Int;
5564 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmull");
5565 case NEON::BI__builtin_neon_vpadal_v:
5566 case NEON::BI__builtin_neon_vpadalq_v: {
5567 // The source operand type has twice as many elements of half the size.
5568 unsigned EltBits = VTy->getElementType()->getPrimitiveSizeInBits();
5570 llvm::IntegerType::get(getLLVMContext(), EltBits / 2);
5571 llvm::Type *NarrowTy =
5572 llvm::VectorType::get(EltTy, VTy->getNumElements() * 2);
5573 llvm::Type *Tys[2] = { Ty, NarrowTy };
5574 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, NameHint);
5576 case NEON::BI__builtin_neon_vpaddl_v:
5577 case NEON::BI__builtin_neon_vpaddlq_v: {
5578 // The source operand type has twice as many elements of half the size.
5579 unsigned EltBits = VTy->getElementType()->getPrimitiveSizeInBits();
5580 llvm::Type *EltTy = llvm::IntegerType::get(getLLVMContext(), EltBits / 2);
5581 llvm::Type *NarrowTy =
5582 llvm::VectorType::get(EltTy, VTy->getNumElements() * 2);
5583 llvm::Type *Tys[2] = { Ty, NarrowTy };
5584 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vpaddl");
5586 case NEON::BI__builtin_neon_vqdmlal_v:
5587 case NEON::BI__builtin_neon_vqdmlsl_v: {
5588 SmallVector<Value *, 2> MulOps(Ops.begin() + 1, Ops.end());
5590 EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Ty), MulOps, "vqdmlal");
5592 return EmitNeonCall(CGM.getIntrinsic(AltLLVMIntrinsic, Ty), Ops, NameHint);
5594 case NEON::BI__builtin_neon_vqshl_n_v:
5595 case NEON::BI__builtin_neon_vqshlq_n_v:
5596 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshl_n",
5598 case NEON::BI__builtin_neon_vqshlu_n_v:
5599 case NEON::BI__builtin_neon_vqshluq_n_v:
5600 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshlu_n",
5602 case NEON::BI__builtin_neon_vrecpe_v:
5603 case NEON::BI__builtin_neon_vrecpeq_v:
5604 case NEON::BI__builtin_neon_vrsqrte_v:
5605 case NEON::BI__builtin_neon_vrsqrteq_v:
5606 Int = Ty->isFPOrFPVectorTy() ? LLVMIntrinsic : AltLLVMIntrinsic;
5607 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, NameHint);
5608 case NEON::BI__builtin_neon_vrndi_v:
5609 case NEON::BI__builtin_neon_vrndiq_v:
5610 Int = Intrinsic::nearbyint;
5611 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, NameHint);
5612 case NEON::BI__builtin_neon_vrshr_n_v:
5613 case NEON::BI__builtin_neon_vrshrq_n_v:
5614 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrshr_n",
5616 case NEON::BI__builtin_neon_vshl_n_v:
5617 case NEON::BI__builtin_neon_vshlq_n_v:
5618 Ops[1] = EmitNeonShiftVector(Ops[1], Ty, false);
5619 return Builder.CreateShl(Builder.CreateBitCast(Ops[0],Ty), Ops[1],
5621 case NEON::BI__builtin_neon_vshll_n_v: {
5622 llvm::Type *SrcTy = llvm::VectorType::getTruncatedElementVectorType(VTy);
5623 Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy);
5625 Ops[0] = Builder.CreateZExt(Ops[0], VTy);
5627 Ops[0] = Builder.CreateSExt(Ops[0], VTy);
5628 Ops[1] = EmitNeonShiftVector(Ops[1], VTy, false);
5629 return Builder.CreateShl(Ops[0], Ops[1], "vshll_n");
5631 case NEON::BI__builtin_neon_vshrn_n_v: {
5632 llvm::Type *SrcTy = llvm::VectorType::getExtendedElementVectorType(VTy);
5633 Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy);
5634 Ops[1] = EmitNeonShiftVector(Ops[1], SrcTy, false);
5636 Ops[0] = Builder.CreateLShr(Ops[0], Ops[1]);
5638 Ops[0] = Builder.CreateAShr(Ops[0], Ops[1]);
5639 return Builder.CreateTrunc(Ops[0], Ty, "vshrn_n");
5641 case NEON::BI__builtin_neon_vshr_n_v:
5642 case NEON::BI__builtin_neon_vshrq_n_v:
5643 return EmitNeonRShiftImm(Ops[0], Ops[1], Ty, Usgn, "vshr_n");
5644 case NEON::BI__builtin_neon_vst1_v:
5645 case NEON::BI__builtin_neon_vst1q_v:
5646 case NEON::BI__builtin_neon_vst2_v:
5647 case NEON::BI__builtin_neon_vst2q_v:
5648 case NEON::BI__builtin_neon_vst3_v:
5649 case NEON::BI__builtin_neon_vst3q_v:
5650 case NEON::BI__builtin_neon_vst4_v:
5651 case NEON::BI__builtin_neon_vst4q_v:
5652 case NEON::BI__builtin_neon_vst2_lane_v:
5653 case NEON::BI__builtin_neon_vst2q_lane_v:
5654 case NEON::BI__builtin_neon_vst3_lane_v:
5655 case NEON::BI__builtin_neon_vst3q_lane_v:
5656 case NEON::BI__builtin_neon_vst4_lane_v:
5657 case NEON::BI__builtin_neon_vst4q_lane_v: {
5658 llvm::Type *Tys[] = {Int8PtrTy, Ty};
5659 Ops.push_back(getAlignmentValue32(PtrOp0));
5660 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "");
5662 case NEON::BI__builtin_neon_vst1_x2_v:
5663 case NEON::BI__builtin_neon_vst1q_x2_v:
5664 case NEON::BI__builtin_neon_vst1_x3_v:
5665 case NEON::BI__builtin_neon_vst1q_x3_v:
5666 case NEON::BI__builtin_neon_vst1_x4_v:
5667 case NEON::BI__builtin_neon_vst1q_x4_v: {
5668 llvm::Type *PTy = llvm::PointerType::getUnqual(VTy->getVectorElementType());
5669 // TODO: Currently in AArch32 mode the pointer operand comes first, whereas
5670 // in AArch64 it comes last. We may want to stick to one or another.
5671 if (Arch == llvm::Triple::aarch64 || Arch == llvm::Triple::aarch64_be) {
5672 llvm::Type *Tys[2] = { VTy, PTy };
5673 std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end());
5674 return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, "");
5676 llvm::Type *Tys[2] = { PTy, VTy };
5677 return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, "");
5679 case NEON::BI__builtin_neon_vsubhn_v: {
5680 llvm::VectorType *SrcTy =
5681 llvm::VectorType::getExtendedElementVectorType(VTy);
5683 // %sum = add <4 x i32> %lhs, %rhs
5684 Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy);
5685 Ops[1] = Builder.CreateBitCast(Ops[1], SrcTy);
5686 Ops[0] = Builder.CreateSub(Ops[0], Ops[1], "vsubhn");
5688 // %high = lshr <4 x i32> %sum, <i32 16, i32 16, i32 16, i32 16>
5689 Constant *ShiftAmt =
5690 ConstantInt::get(SrcTy, SrcTy->getScalarSizeInBits() / 2);
5691 Ops[0] = Builder.CreateLShr(Ops[0], ShiftAmt, "vsubhn");
5693 // %res = trunc <4 x i32> %high to <4 x i16>
5694 return Builder.CreateTrunc(Ops[0], VTy, "vsubhn");
5696 case NEON::BI__builtin_neon_vtrn_v:
5697 case NEON::BI__builtin_neon_vtrnq_v: {
5698 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
5699 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
5700 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
5701 Value *SV = nullptr;
5703 for (unsigned vi = 0; vi != 2; ++vi) {
5704 SmallVector<uint32_t, 16> Indices;
5705 for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) {
5706 Indices.push_back(i+vi);
5707 Indices.push_back(i+e+vi);
5709 Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi);
5710 SV = Builder.CreateShuffleVector(Ops[1], Ops[2], Indices, "vtrn");
5711 SV = Builder.CreateDefaultAlignedStore(SV, Addr);
5715 case NEON::BI__builtin_neon_vtst_v:
5716 case NEON::BI__builtin_neon_vtstq_v: {
5717 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
5718 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
5719 Ops[0] = Builder.CreateAnd(Ops[0], Ops[1]);
5720 Ops[0] = Builder.CreateICmp(ICmpInst::ICMP_NE, Ops[0],
5721 ConstantAggregateZero::get(Ty));
5722 return Builder.CreateSExt(Ops[0], Ty, "vtst");
5724 case NEON::BI__builtin_neon_vuzp_v:
5725 case NEON::BI__builtin_neon_vuzpq_v: {
5726 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
5727 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
5728 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
5729 Value *SV = nullptr;
5731 for (unsigned vi = 0; vi != 2; ++vi) {
5732 SmallVector<uint32_t, 16> Indices;
5733 for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i)
5734 Indices.push_back(2*i+vi);
5736 Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi);
5737 SV = Builder.CreateShuffleVector(Ops[1], Ops[2], Indices, "vuzp");
5738 SV = Builder.CreateDefaultAlignedStore(SV, Addr);
5742 case NEON::BI__builtin_neon_vzip_v:
5743 case NEON::BI__builtin_neon_vzipq_v: {
5744 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
5745 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
5746 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
5747 Value *SV = nullptr;
5749 for (unsigned vi = 0; vi != 2; ++vi) {
5750 SmallVector<uint32_t, 16> Indices;
5751 for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) {
5752 Indices.push_back((i + vi*e) >> 1);
5753 Indices.push_back(((i + vi*e) >> 1)+e);
5755 Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi);
5756 SV = Builder.CreateShuffleVector(Ops[1], Ops[2], Indices, "vzip");
5757 SV = Builder.CreateDefaultAlignedStore(SV, Addr);
5761 case NEON::BI__builtin_neon_vdot_v:
5762 case NEON::BI__builtin_neon_vdotq_v: {
5763 llvm::Type *InputTy =
5764 llvm::VectorType::get(Int8Ty, Ty->getPrimitiveSizeInBits() / 8);
5765 llvm::Type *Tys[2] = { Ty, InputTy };
5766 Int = Usgn ? LLVMIntrinsic : AltLLVMIntrinsic;
5767 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vdot");
5769 case NEON::BI__builtin_neon_vfmlal_low_v:
5770 case NEON::BI__builtin_neon_vfmlalq_low_v: {
5771 llvm::Type *InputTy =
5772 llvm::VectorType::get(HalfTy, Ty->getPrimitiveSizeInBits() / 16);
5773 llvm::Type *Tys[2] = { Ty, InputTy };
5774 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vfmlal_low");
5776 case NEON::BI__builtin_neon_vfmlsl_low_v:
5777 case NEON::BI__builtin_neon_vfmlslq_low_v: {
5778 llvm::Type *InputTy =
5779 llvm::VectorType::get(HalfTy, Ty->getPrimitiveSizeInBits() / 16);
5780 llvm::Type *Tys[2] = { Ty, InputTy };
5781 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vfmlsl_low");
5783 case NEON::BI__builtin_neon_vfmlal_high_v:
5784 case NEON::BI__builtin_neon_vfmlalq_high_v: {
5785 llvm::Type *InputTy =
5786 llvm::VectorType::get(HalfTy, Ty->getPrimitiveSizeInBits() / 16);
5787 llvm::Type *Tys[2] = { Ty, InputTy };
5788 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vfmlal_high");
5790 case NEON::BI__builtin_neon_vfmlsl_high_v:
5791 case NEON::BI__builtin_neon_vfmlslq_high_v: {
5792 llvm::Type *InputTy =
5793 llvm::VectorType::get(HalfTy, Ty->getPrimitiveSizeInBits() / 16);
5794 llvm::Type *Tys[2] = { Ty, InputTy };
5795 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vfmlsl_high");
5799 assert(Int && "Expected valid intrinsic number");
5801 // Determine the type(s) of this overloaded AArch64 intrinsic.
5802 Function *F = LookupNeonLLVMIntrinsic(Int, Modifier, Ty, E);
5804 Value *Result = EmitNeonCall(F, Ops, NameHint);
5805 llvm::Type *ResultType = ConvertType(E->getType());
5806 // AArch64 intrinsic one-element vector type cast to
5807 // scalar type expected by the builtin
5808 return Builder.CreateBitCast(Result, ResultType, NameHint);
5811 Value *CodeGenFunction::EmitAArch64CompareBuiltinExpr(
5812 Value *Op, llvm::Type *Ty, const CmpInst::Predicate Fp,
5813 const CmpInst::Predicate Ip, const Twine &Name) {
5814 llvm::Type *OTy = Op->getType();
5816 // FIXME: this is utterly horrific. We should not be looking at previous
5817 // codegen context to find out what needs doing. Unfortunately TableGen
5818 // currently gives us exactly the same calls for vceqz_f32 and vceqz_s32
5820 if (BitCastInst *BI = dyn_cast<BitCastInst>(Op))
5821 OTy = BI->getOperand(0)->getType();
5823 Op = Builder.CreateBitCast(Op, OTy);
5824 if (OTy->getScalarType()->isFloatingPointTy()) {
5825 Op = Builder.CreateFCmp(Fp, Op, Constant::getNullValue(OTy));
5827 Op = Builder.CreateICmp(Ip, Op, Constant::getNullValue(OTy));
5829 return Builder.CreateSExt(Op, Ty, Name);
5832 static Value *packTBLDVectorList(CodeGenFunction &CGF, ArrayRef<Value *> Ops,
5833 Value *ExtOp, Value *IndexOp,
5834 llvm::Type *ResTy, unsigned IntID,
5836 SmallVector<Value *, 2> TblOps;
5838 TblOps.push_back(ExtOp);
5840 // Build a vector containing sequential number like (0, 1, 2, ..., 15)
5841 SmallVector<uint32_t, 16> Indices;
5842 llvm::VectorType *TblTy = cast<llvm::VectorType>(Ops[0]->getType());
5843 for (unsigned i = 0, e = TblTy->getNumElements(); i != e; ++i) {
5844 Indices.push_back(2*i);
5845 Indices.push_back(2*i+1);
5848 int PairPos = 0, End = Ops.size() - 1;
5849 while (PairPos < End) {
5850 TblOps.push_back(CGF.Builder.CreateShuffleVector(Ops[PairPos],
5851 Ops[PairPos+1], Indices,
5856 // If there's an odd number of 64-bit lookup table, fill the high 64-bit
5857 // of the 128-bit lookup table with zero.
5858 if (PairPos == End) {
5859 Value *ZeroTbl = ConstantAggregateZero::get(TblTy);
5860 TblOps.push_back(CGF.Builder.CreateShuffleVector(Ops[PairPos],
5861 ZeroTbl, Indices, Name));
5865 TblOps.push_back(IndexOp);
5866 TblF = CGF.CGM.getIntrinsic(IntID, ResTy);
5868 return CGF.EmitNeonCall(TblF, TblOps, Name);
5871 Value *CodeGenFunction::GetValueForARMHint(unsigned BuiltinID) {
5873 switch (BuiltinID) {
5876 case ARM::BI__builtin_arm_nop:
5879 case ARM::BI__builtin_arm_yield:
5880 case ARM::BI__yield:
5883 case ARM::BI__builtin_arm_wfe:
5887 case ARM::BI__builtin_arm_wfi:
5891 case ARM::BI__builtin_arm_sev:
5895 case ARM::BI__builtin_arm_sevl:
5901 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_hint),
5902 llvm::ConstantInt::get(Int32Ty, Value));
5905 // Generates the IR for the read/write special register builtin,
5906 // ValueType is the type of the value that is to be written or read,
5907 // RegisterType is the type of the register being written to or read from.
5908 static Value *EmitSpecialRegisterBuiltin(CodeGenFunction &CGF,
5910 llvm::Type *RegisterType,
5911 llvm::Type *ValueType,
5913 StringRef SysReg = "") {
5914 // write and register intrinsics only support 32 and 64 bit operations.
5915 assert((RegisterType->isIntegerTy(32) || RegisterType->isIntegerTy(64))
5916 && "Unsupported size for register.");
5918 CodeGen::CGBuilderTy &Builder = CGF.Builder;
5919 CodeGen::CodeGenModule &CGM = CGF.CGM;
5920 LLVMContext &Context = CGM.getLLVMContext();
5922 if (SysReg.empty()) {
5923 const Expr *SysRegStrExpr = E->getArg(0)->IgnoreParenCasts();
5924 SysReg = cast<clang::StringLiteral>(SysRegStrExpr)->getString();
5927 llvm::Metadata *Ops[] = { llvm::MDString::get(Context, SysReg) };
5928 llvm::MDNode *RegName = llvm::MDNode::get(Context, Ops);
5929 llvm::Value *Metadata = llvm::MetadataAsValue::get(Context, RegName);
5931 llvm::Type *Types[] = { RegisterType };
5933 bool MixedTypes = RegisterType->isIntegerTy(64) && ValueType->isIntegerTy(32);
5934 assert(!(RegisterType->isIntegerTy(32) && ValueType->isIntegerTy(64))
5935 && "Can't fit 64-bit value in 32-bit register");
5938 llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::read_register, Types);
5939 llvm::Value *Call = Builder.CreateCall(F, Metadata);
5942 // Read into 64 bit register and then truncate result to 32 bit.
5943 return Builder.CreateTrunc(Call, ValueType);
5945 if (ValueType->isPointerTy())
5946 // Have i32/i64 result (Call) but want to return a VoidPtrTy (i8*).
5947 return Builder.CreateIntToPtr(Call, ValueType);
5952 llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::write_register, Types);
5953 llvm::Value *ArgValue = CGF.EmitScalarExpr(E->getArg(1));
5955 // Extend 32 bit write value to 64 bit to pass to write.
5956 ArgValue = Builder.CreateZExt(ArgValue, RegisterType);
5957 return Builder.CreateCall(F, { Metadata, ArgValue });
5960 if (ValueType->isPointerTy()) {
5961 // Have VoidPtrTy ArgValue but want to return an i32/i64.
5962 ArgValue = Builder.CreatePtrToInt(ArgValue, RegisterType);
5963 return Builder.CreateCall(F, { Metadata, ArgValue });
5966 return Builder.CreateCall(F, { Metadata, ArgValue });
5969 /// Return true if BuiltinID is an overloaded Neon intrinsic with an extra
5970 /// argument that specifies the vector type.
5971 static bool HasExtraNeonArgument(unsigned BuiltinID) {
5972 switch (BuiltinID) {
5974 case NEON::BI__builtin_neon_vget_lane_i8:
5975 case NEON::BI__builtin_neon_vget_lane_i16:
5976 case NEON::BI__builtin_neon_vget_lane_i32:
5977 case NEON::BI__builtin_neon_vget_lane_i64:
5978 case NEON::BI__builtin_neon_vget_lane_f32:
5979 case NEON::BI__builtin_neon_vgetq_lane_i8:
5980 case NEON::BI__builtin_neon_vgetq_lane_i16:
5981 case NEON::BI__builtin_neon_vgetq_lane_i32:
5982 case NEON::BI__builtin_neon_vgetq_lane_i64:
5983 case NEON::BI__builtin_neon_vgetq_lane_f32:
5984 case NEON::BI__builtin_neon_vset_lane_i8:
5985 case NEON::BI__builtin_neon_vset_lane_i16:
5986 case NEON::BI__builtin_neon_vset_lane_i32:
5987 case NEON::BI__builtin_neon_vset_lane_i64:
5988 case NEON::BI__builtin_neon_vset_lane_f32:
5989 case NEON::BI__builtin_neon_vsetq_lane_i8:
5990 case NEON::BI__builtin_neon_vsetq_lane_i16:
5991 case NEON::BI__builtin_neon_vsetq_lane_i32:
5992 case NEON::BI__builtin_neon_vsetq_lane_i64:
5993 case NEON::BI__builtin_neon_vsetq_lane_f32:
5994 case NEON::BI__builtin_neon_vsha1h_u32:
5995 case NEON::BI__builtin_neon_vsha1cq_u32:
5996 case NEON::BI__builtin_neon_vsha1pq_u32:
5997 case NEON::BI__builtin_neon_vsha1mq_u32:
5998 case clang::ARM::BI_MoveToCoprocessor:
5999 case clang::ARM::BI_MoveToCoprocessor2:
6005 Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
6007 llvm::Triple::ArchType Arch) {
6008 if (auto Hint = GetValueForARMHint(BuiltinID))
6011 if (BuiltinID == ARM::BI__emit) {
6012 bool IsThumb = getTarget().getTriple().getArch() == llvm::Triple::thumb;
6013 llvm::FunctionType *FTy =
6014 llvm::FunctionType::get(VoidTy, /*Variadic=*/false);
6016 Expr::EvalResult Result;
6017 if (!E->getArg(0)->EvaluateAsInt(Result, CGM.getContext()))
6018 llvm_unreachable("Sema will ensure that the parameter is constant");
6020 llvm::APSInt Value = Result.Val.getInt();
6021 uint64_t ZExtValue = Value.zextOrTrunc(IsThumb ? 16 : 32).getZExtValue();
6023 llvm::InlineAsm *Emit =
6024 IsThumb ? InlineAsm::get(FTy, ".inst.n 0x" + utohexstr(ZExtValue), "",
6025 /*hasSideEffects=*/true)
6026 : InlineAsm::get(FTy, ".inst 0x" + utohexstr(ZExtValue), "",
6027 /*hasSideEffects=*/true);
6029 return Builder.CreateCall(Emit);
6032 if (BuiltinID == ARM::BI__builtin_arm_dbg) {
6033 Value *Option = EmitScalarExpr(E->getArg(0));
6034 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_dbg), Option);
6037 if (BuiltinID == ARM::BI__builtin_arm_prefetch) {
6038 Value *Address = EmitScalarExpr(E->getArg(0));
6039 Value *RW = EmitScalarExpr(E->getArg(1));
6040 Value *IsData = EmitScalarExpr(E->getArg(2));
6042 // Locality is not supported on ARM target
6043 Value *Locality = llvm::ConstantInt::get(Int32Ty, 3);
6045 Function *F = CGM.getIntrinsic(Intrinsic::prefetch, Address->getType());
6046 return Builder.CreateCall(F, {Address, RW, Locality, IsData});
6049 if (BuiltinID == ARM::BI__builtin_arm_rbit) {
6050 llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
6051 return Builder.CreateCall(
6052 CGM.getIntrinsic(Intrinsic::bitreverse, Arg->getType()), Arg, "rbit");
6055 if (BuiltinID == ARM::BI__clear_cache) {
6056 assert(E->getNumArgs() == 2 && "__clear_cache takes 2 arguments");
6057 const FunctionDecl *FD = E->getDirectCallee();
6059 for (unsigned i = 0; i < 2; i++)
6060 Ops[i] = EmitScalarExpr(E->getArg(i));
6061 llvm::Type *Ty = CGM.getTypes().ConvertType(FD->getType());
6062 llvm::FunctionType *FTy = cast<llvm::FunctionType>(Ty);
6063 StringRef Name = FD->getName();
6064 return EmitNounwindRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), Ops);
6067 if (BuiltinID == ARM::BI__builtin_arm_mcrr ||
6068 BuiltinID == ARM::BI__builtin_arm_mcrr2) {
6071 switch (BuiltinID) {
6072 default: llvm_unreachable("unexpected builtin");
6073 case ARM::BI__builtin_arm_mcrr:
6074 F = CGM.getIntrinsic(Intrinsic::arm_mcrr);
6076 case ARM::BI__builtin_arm_mcrr2:
6077 F = CGM.getIntrinsic(Intrinsic::arm_mcrr2);
6081 // MCRR{2} instruction has 5 operands but
6082 // the intrinsic has 4 because Rt and Rt2
6083 // are represented as a single unsigned 64
6084 // bit integer in the intrinsic definition
6085 // but internally it's represented as 2 32
6088 Value *Coproc = EmitScalarExpr(E->getArg(0));
6089 Value *Opc1 = EmitScalarExpr(E->getArg(1));
6090 Value *RtAndRt2 = EmitScalarExpr(E->getArg(2));
6091 Value *CRm = EmitScalarExpr(E->getArg(3));
6093 Value *C1 = llvm::ConstantInt::get(Int64Ty, 32);
6094 Value *Rt = Builder.CreateTruncOrBitCast(RtAndRt2, Int32Ty);
6095 Value *Rt2 = Builder.CreateLShr(RtAndRt2, C1);
6096 Rt2 = Builder.CreateTruncOrBitCast(Rt2, Int32Ty);
6098 return Builder.CreateCall(F, {Coproc, Opc1, Rt, Rt2, CRm});
6101 if (BuiltinID == ARM::BI__builtin_arm_mrrc ||
6102 BuiltinID == ARM::BI__builtin_arm_mrrc2) {
6105 switch (BuiltinID) {
6106 default: llvm_unreachable("unexpected builtin");
6107 case ARM::BI__builtin_arm_mrrc:
6108 F = CGM.getIntrinsic(Intrinsic::arm_mrrc);
6110 case ARM::BI__builtin_arm_mrrc2:
6111 F = CGM.getIntrinsic(Intrinsic::arm_mrrc2);
6115 Value *Coproc = EmitScalarExpr(E->getArg(0));
6116 Value *Opc1 = EmitScalarExpr(E->getArg(1));
6117 Value *CRm = EmitScalarExpr(E->getArg(2));
6118 Value *RtAndRt2 = Builder.CreateCall(F, {Coproc, Opc1, CRm});
6120 // Returns an unsigned 64 bit integer, represented
6121 // as two 32 bit integers.
6123 Value *Rt = Builder.CreateExtractValue(RtAndRt2, 1);
6124 Value *Rt1 = Builder.CreateExtractValue(RtAndRt2, 0);
6125 Rt = Builder.CreateZExt(Rt, Int64Ty);
6126 Rt1 = Builder.CreateZExt(Rt1, Int64Ty);
6128 Value *ShiftCast = llvm::ConstantInt::get(Int64Ty, 32);
6129 RtAndRt2 = Builder.CreateShl(Rt, ShiftCast, "shl", true);
6130 RtAndRt2 = Builder.CreateOr(RtAndRt2, Rt1);
6132 return Builder.CreateBitCast(RtAndRt2, ConvertType(E->getType()));
6135 if (BuiltinID == ARM::BI__builtin_arm_ldrexd ||
6136 ((BuiltinID == ARM::BI__builtin_arm_ldrex ||
6137 BuiltinID == ARM::BI__builtin_arm_ldaex) &&
6138 getContext().getTypeSize(E->getType()) == 64) ||
6139 BuiltinID == ARM::BI__ldrexd) {
6142 switch (BuiltinID) {
6143 default: llvm_unreachable("unexpected builtin");
6144 case ARM::BI__builtin_arm_ldaex:
6145 F = CGM.getIntrinsic(Intrinsic::arm_ldaexd);
6147 case ARM::BI__builtin_arm_ldrexd:
6148 case ARM::BI__builtin_arm_ldrex:
6149 case ARM::BI__ldrexd:
6150 F = CGM.getIntrinsic(Intrinsic::arm_ldrexd);
6154 Value *LdPtr = EmitScalarExpr(E->getArg(0));
6155 Value *Val = Builder.CreateCall(F, Builder.CreateBitCast(LdPtr, Int8PtrTy),
6158 Value *Val0 = Builder.CreateExtractValue(Val, 1);
6159 Value *Val1 = Builder.CreateExtractValue(Val, 0);
6160 Val0 = Builder.CreateZExt(Val0, Int64Ty);
6161 Val1 = Builder.CreateZExt(Val1, Int64Ty);
6163 Value *ShiftCst = llvm::ConstantInt::get(Int64Ty, 32);
6164 Val = Builder.CreateShl(Val0, ShiftCst, "shl", true /* nuw */);
6165 Val = Builder.CreateOr(Val, Val1);
6166 return Builder.CreateBitCast(Val, ConvertType(E->getType()));
6169 if (BuiltinID == ARM::BI__builtin_arm_ldrex ||
6170 BuiltinID == ARM::BI__builtin_arm_ldaex) {
6171 Value *LoadAddr = EmitScalarExpr(E->getArg(0));
6173 QualType Ty = E->getType();
6174 llvm::Type *RealResTy = ConvertType(Ty);
6175 llvm::Type *PtrTy = llvm::IntegerType::get(
6176 getLLVMContext(), getContext().getTypeSize(Ty))->getPointerTo();
6177 LoadAddr = Builder.CreateBitCast(LoadAddr, PtrTy);
6179 Function *F = CGM.getIntrinsic(BuiltinID == ARM::BI__builtin_arm_ldaex
6180 ? Intrinsic::arm_ldaex
6181 : Intrinsic::arm_ldrex,
6183 Value *Val = Builder.CreateCall(F, LoadAddr, "ldrex");
6185 if (RealResTy->isPointerTy())
6186 return Builder.CreateIntToPtr(Val, RealResTy);
6188 llvm::Type *IntResTy = llvm::IntegerType::get(
6189 getLLVMContext(), CGM.getDataLayout().getTypeSizeInBits(RealResTy));
6190 Val = Builder.CreateTruncOrBitCast(Val, IntResTy);
6191 return Builder.CreateBitCast(Val, RealResTy);
6195 if (BuiltinID == ARM::BI__builtin_arm_strexd ||
6196 ((BuiltinID == ARM::BI__builtin_arm_stlex ||
6197 BuiltinID == ARM::BI__builtin_arm_strex) &&
6198 getContext().getTypeSize(E->getArg(0)->getType()) == 64)) {
6199 Function *F = CGM.getIntrinsic(BuiltinID == ARM::BI__builtin_arm_stlex
6200 ? Intrinsic::arm_stlexd
6201 : Intrinsic::arm_strexd);
6202 llvm::Type *STy = llvm::StructType::get(Int32Ty, Int32Ty);
6204 Address Tmp = CreateMemTemp(E->getArg(0)->getType());
6205 Value *Val = EmitScalarExpr(E->getArg(0));
6206 Builder.CreateStore(Val, Tmp);
6208 Address LdPtr = Builder.CreateBitCast(Tmp,llvm::PointerType::getUnqual(STy));
6209 Val = Builder.CreateLoad(LdPtr);
6211 Value *Arg0 = Builder.CreateExtractValue(Val, 0);
6212 Value *Arg1 = Builder.CreateExtractValue(Val, 1);
6213 Value *StPtr = Builder.CreateBitCast(EmitScalarExpr(E->getArg(1)), Int8PtrTy);
6214 return Builder.CreateCall(F, {Arg0, Arg1, StPtr}, "strexd");
6217 if (BuiltinID == ARM::BI__builtin_arm_strex ||
6218 BuiltinID == ARM::BI__builtin_arm_stlex) {
6219 Value *StoreVal = EmitScalarExpr(E->getArg(0));
6220 Value *StoreAddr = EmitScalarExpr(E->getArg(1));
6222 QualType Ty = E->getArg(0)->getType();
6223 llvm::Type *StoreTy = llvm::IntegerType::get(getLLVMContext(),
6224 getContext().getTypeSize(Ty));
6225 StoreAddr = Builder.CreateBitCast(StoreAddr, StoreTy->getPointerTo());
6227 if (StoreVal->getType()->isPointerTy())
6228 StoreVal = Builder.CreatePtrToInt(StoreVal, Int32Ty);
6230 llvm::Type *IntTy = llvm::IntegerType::get(
6232 CGM.getDataLayout().getTypeSizeInBits(StoreVal->getType()));
6233 StoreVal = Builder.CreateBitCast(StoreVal, IntTy);
6234 StoreVal = Builder.CreateZExtOrBitCast(StoreVal, Int32Ty);
6237 Function *F = CGM.getIntrinsic(BuiltinID == ARM::BI__builtin_arm_stlex
6238 ? Intrinsic::arm_stlex
6239 : Intrinsic::arm_strex,
6240 StoreAddr->getType());
6241 return Builder.CreateCall(F, {StoreVal, StoreAddr}, "strex");
6244 if (BuiltinID == ARM::BI__builtin_arm_clrex) {
6245 Function *F = CGM.getIntrinsic(Intrinsic::arm_clrex);
6246 return Builder.CreateCall(F);
6250 Intrinsic::ID CRCIntrinsicID = Intrinsic::not_intrinsic;
6251 switch (BuiltinID) {
6252 case ARM::BI__builtin_arm_crc32b:
6253 CRCIntrinsicID = Intrinsic::arm_crc32b; break;
6254 case ARM::BI__builtin_arm_crc32cb:
6255 CRCIntrinsicID = Intrinsic::arm_crc32cb; break;
6256 case ARM::BI__builtin_arm_crc32h:
6257 CRCIntrinsicID = Intrinsic::arm_crc32h; break;
6258 case ARM::BI__builtin_arm_crc32ch:
6259 CRCIntrinsicID = Intrinsic::arm_crc32ch; break;
6260 case ARM::BI__builtin_arm_crc32w:
6261 case ARM::BI__builtin_arm_crc32d:
6262 CRCIntrinsicID = Intrinsic::arm_crc32w; break;
6263 case ARM::BI__builtin_arm_crc32cw:
6264 case ARM::BI__builtin_arm_crc32cd:
6265 CRCIntrinsicID = Intrinsic::arm_crc32cw; break;
6268 if (CRCIntrinsicID != Intrinsic::not_intrinsic) {
6269 Value *Arg0 = EmitScalarExpr(E->getArg(0));
6270 Value *Arg1 = EmitScalarExpr(E->getArg(1));
6272 // crc32{c,}d intrinsics are implemnted as two calls to crc32{c,}w
6273 // intrinsics, hence we need different codegen for these cases.
6274 if (BuiltinID == ARM::BI__builtin_arm_crc32d ||
6275 BuiltinID == ARM::BI__builtin_arm_crc32cd) {
6276 Value *C1 = llvm::ConstantInt::get(Int64Ty, 32);
6277 Value *Arg1a = Builder.CreateTruncOrBitCast(Arg1, Int32Ty);
6278 Value *Arg1b = Builder.CreateLShr(Arg1, C1);
6279 Arg1b = Builder.CreateTruncOrBitCast(Arg1b, Int32Ty);
6281 Function *F = CGM.getIntrinsic(CRCIntrinsicID);
6282 Value *Res = Builder.CreateCall(F, {Arg0, Arg1a});
6283 return Builder.CreateCall(F, {Res, Arg1b});
6285 Arg1 = Builder.CreateZExtOrBitCast(Arg1, Int32Ty);
6287 Function *F = CGM.getIntrinsic(CRCIntrinsicID);
6288 return Builder.CreateCall(F, {Arg0, Arg1});
6292 if (BuiltinID == ARM::BI__builtin_arm_rsr ||
6293 BuiltinID == ARM::BI__builtin_arm_rsr64 ||
6294 BuiltinID == ARM::BI__builtin_arm_rsrp ||
6295 BuiltinID == ARM::BI__builtin_arm_wsr ||
6296 BuiltinID == ARM::BI__builtin_arm_wsr64 ||
6297 BuiltinID == ARM::BI__builtin_arm_wsrp) {
6299 bool IsRead = BuiltinID == ARM::BI__builtin_arm_rsr ||
6300 BuiltinID == ARM::BI__builtin_arm_rsr64 ||
6301 BuiltinID == ARM::BI__builtin_arm_rsrp;
6303 bool IsPointerBuiltin = BuiltinID == ARM::BI__builtin_arm_rsrp ||
6304 BuiltinID == ARM::BI__builtin_arm_wsrp;
6306 bool Is64Bit = BuiltinID == ARM::BI__builtin_arm_rsr64 ||
6307 BuiltinID == ARM::BI__builtin_arm_wsr64;
6309 llvm::Type *ValueType;
6310 llvm::Type *RegisterType;
6311 if (IsPointerBuiltin) {
6312 ValueType = VoidPtrTy;
6313 RegisterType = Int32Ty;
6314 } else if (Is64Bit) {
6315 ValueType = RegisterType = Int64Ty;
6317 ValueType = RegisterType = Int32Ty;
6320 return EmitSpecialRegisterBuiltin(*this, E, RegisterType, ValueType, IsRead);
6323 // Find out if any arguments are required to be integer constant
6325 unsigned ICEArguments = 0;
6326 ASTContext::GetBuiltinTypeError Error;
6327 getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments);
6328 assert(Error == ASTContext::GE_None && "Should not codegen an error");
6330 auto getAlignmentValue32 = [&](Address addr) -> Value* {
6331 return Builder.getInt32(addr.getAlignment().getQuantity());
6334 Address PtrOp0 = Address::invalid();
6335 Address PtrOp1 = Address::invalid();
6336 SmallVector<Value*, 4> Ops;
6337 bool HasExtraArg = HasExtraNeonArgument(BuiltinID);
6338 unsigned NumArgs = E->getNumArgs() - (HasExtraArg ? 1 : 0);
6339 for (unsigned i = 0, e = NumArgs; i != e; i++) {
6341 switch (BuiltinID) {
6342 case NEON::BI__builtin_neon_vld1_v:
6343 case NEON::BI__builtin_neon_vld1q_v:
6344 case NEON::BI__builtin_neon_vld1q_lane_v:
6345 case NEON::BI__builtin_neon_vld1_lane_v:
6346 case NEON::BI__builtin_neon_vld1_dup_v:
6347 case NEON::BI__builtin_neon_vld1q_dup_v:
6348 case NEON::BI__builtin_neon_vst1_v:
6349 case NEON::BI__builtin_neon_vst1q_v:
6350 case NEON::BI__builtin_neon_vst1q_lane_v:
6351 case NEON::BI__builtin_neon_vst1_lane_v:
6352 case NEON::BI__builtin_neon_vst2_v:
6353 case NEON::BI__builtin_neon_vst2q_v:
6354 case NEON::BI__builtin_neon_vst2_lane_v:
6355 case NEON::BI__builtin_neon_vst2q_lane_v:
6356 case NEON::BI__builtin_neon_vst3_v:
6357 case NEON::BI__builtin_neon_vst3q_v:
6358 case NEON::BI__builtin_neon_vst3_lane_v:
6359 case NEON::BI__builtin_neon_vst3q_lane_v:
6360 case NEON::BI__builtin_neon_vst4_v:
6361 case NEON::BI__builtin_neon_vst4q_v:
6362 case NEON::BI__builtin_neon_vst4_lane_v:
6363 case NEON::BI__builtin_neon_vst4q_lane_v:
6364 // Get the alignment for the argument in addition to the value;
6365 // we'll use it later.
6366 PtrOp0 = EmitPointerWithAlignment(E->getArg(0));
6367 Ops.push_back(PtrOp0.getPointer());
6372 switch (BuiltinID) {
6373 case NEON::BI__builtin_neon_vld2_v:
6374 case NEON::BI__builtin_neon_vld2q_v:
6375 case NEON::BI__builtin_neon_vld3_v:
6376 case NEON::BI__builtin_neon_vld3q_v:
6377 case NEON::BI__builtin_neon_vld4_v:
6378 case NEON::BI__builtin_neon_vld4q_v:
6379 case NEON::BI__builtin_neon_vld2_lane_v:
6380 case NEON::BI__builtin_neon_vld2q_lane_v:
6381 case NEON::BI__builtin_neon_vld3_lane_v:
6382 case NEON::BI__builtin_neon_vld3q_lane_v:
6383 case NEON::BI__builtin_neon_vld4_lane_v:
6384 case NEON::BI__builtin_neon_vld4q_lane_v:
6385 case NEON::BI__builtin_neon_vld2_dup_v:
6386 case NEON::BI__builtin_neon_vld2q_dup_v:
6387 case NEON::BI__builtin_neon_vld3_dup_v:
6388 case NEON::BI__builtin_neon_vld3q_dup_v:
6389 case NEON::BI__builtin_neon_vld4_dup_v:
6390 case NEON::BI__builtin_neon_vld4q_dup_v:
6391 // Get the alignment for the argument in addition to the value;
6392 // we'll use it later.
6393 PtrOp1 = EmitPointerWithAlignment(E->getArg(1));
6394 Ops.push_back(PtrOp1.getPointer());
6399 if ((ICEArguments & (1 << i)) == 0) {
6400 Ops.push_back(EmitScalarExpr(E->getArg(i)));
6402 // If this is required to be a constant, constant fold it so that we know
6403 // that the generated intrinsic gets a ConstantInt.
6404 llvm::APSInt Result;
6405 bool IsConst = E->getArg(i)->isIntegerConstantExpr(Result, getContext());
6406 assert(IsConst && "Constant arg isn't actually constant?"); (void)IsConst;
6407 Ops.push_back(llvm::ConstantInt::get(getLLVMContext(), Result));
6411 switch (BuiltinID) {
6414 case NEON::BI__builtin_neon_vget_lane_i8:
6415 case NEON::BI__builtin_neon_vget_lane_i16:
6416 case NEON::BI__builtin_neon_vget_lane_i32:
6417 case NEON::BI__builtin_neon_vget_lane_i64:
6418 case NEON::BI__builtin_neon_vget_lane_f32:
6419 case NEON::BI__builtin_neon_vgetq_lane_i8:
6420 case NEON::BI__builtin_neon_vgetq_lane_i16:
6421 case NEON::BI__builtin_neon_vgetq_lane_i32:
6422 case NEON::BI__builtin_neon_vgetq_lane_i64:
6423 case NEON::BI__builtin_neon_vgetq_lane_f32:
6424 return Builder.CreateExtractElement(Ops[0], Ops[1], "vget_lane");
6426 case NEON::BI__builtin_neon_vrndns_f32: {
6427 Value *Arg = EmitScalarExpr(E->getArg(0));
6428 llvm::Type *Tys[] = {Arg->getType()};
6429 Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vrintn, Tys);
6430 return Builder.CreateCall(F, {Arg}, "vrndn"); }
6432 case NEON::BI__builtin_neon_vset_lane_i8:
6433 case NEON::BI__builtin_neon_vset_lane_i16:
6434 case NEON::BI__builtin_neon_vset_lane_i32:
6435 case NEON::BI__builtin_neon_vset_lane_i64:
6436 case NEON::BI__builtin_neon_vset_lane_f32:
6437 case NEON::BI__builtin_neon_vsetq_lane_i8:
6438 case NEON::BI__builtin_neon_vsetq_lane_i16:
6439 case NEON::BI__builtin_neon_vsetq_lane_i32:
6440 case NEON::BI__builtin_neon_vsetq_lane_i64:
6441 case NEON::BI__builtin_neon_vsetq_lane_f32:
6442 return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vset_lane");
6444 case NEON::BI__builtin_neon_vsha1h_u32:
6445 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha1h), Ops,
6447 case NEON::BI__builtin_neon_vsha1cq_u32:
6448 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha1c), Ops,
6450 case NEON::BI__builtin_neon_vsha1pq_u32:
6451 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha1p), Ops,
6453 case NEON::BI__builtin_neon_vsha1mq_u32:
6454 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha1m), Ops,
6457 // The ARM _MoveToCoprocessor builtins put the input register value as
6458 // the first argument, but the LLVM intrinsic expects it as the third one.
6459 case ARM::BI_MoveToCoprocessor:
6460 case ARM::BI_MoveToCoprocessor2: {
6461 Function *F = CGM.getIntrinsic(BuiltinID == ARM::BI_MoveToCoprocessor ?
6462 Intrinsic::arm_mcr : Intrinsic::arm_mcr2);
6463 return Builder.CreateCall(F, {Ops[1], Ops[2], Ops[0],
6464 Ops[3], Ops[4], Ops[5]});
6466 case ARM::BI_BitScanForward:
6467 case ARM::BI_BitScanForward64:
6468 return EmitMSVCBuiltinExpr(MSVCIntrin::_BitScanForward, E);
6469 case ARM::BI_BitScanReverse:
6470 case ARM::BI_BitScanReverse64:
6471 return EmitMSVCBuiltinExpr(MSVCIntrin::_BitScanReverse, E);
6473 case ARM::BI_InterlockedAnd64:
6474 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedAnd, E);
6475 case ARM::BI_InterlockedExchange64:
6476 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange, E);
6477 case ARM::BI_InterlockedExchangeAdd64:
6478 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeAdd, E);
6479 case ARM::BI_InterlockedExchangeSub64:
6480 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeSub, E);
6481 case ARM::BI_InterlockedOr64:
6482 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr, E);
6483 case ARM::BI_InterlockedXor64:
6484 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor, E);
6485 case ARM::BI_InterlockedDecrement64:
6486 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedDecrement, E);
6487 case ARM::BI_InterlockedIncrement64:
6488 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement, E);
6489 case ARM::BI_InterlockedExchangeAdd8_acq:
6490 case ARM::BI_InterlockedExchangeAdd16_acq:
6491 case ARM::BI_InterlockedExchangeAdd_acq:
6492 case ARM::BI_InterlockedExchangeAdd64_acq:
6493 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeAdd_acq, E);
6494 case ARM::BI_InterlockedExchangeAdd8_rel:
6495 case ARM::BI_InterlockedExchangeAdd16_rel:
6496 case ARM::BI_InterlockedExchangeAdd_rel:
6497 case ARM::BI_InterlockedExchangeAdd64_rel:
6498 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeAdd_rel, E);
6499 case ARM::BI_InterlockedExchangeAdd8_nf:
6500 case ARM::BI_InterlockedExchangeAdd16_nf:
6501 case ARM::BI_InterlockedExchangeAdd_nf:
6502 case ARM::BI_InterlockedExchangeAdd64_nf:
6503 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeAdd_nf, E);
6504 case ARM::BI_InterlockedExchange8_acq:
6505 case ARM::BI_InterlockedExchange16_acq:
6506 case ARM::BI_InterlockedExchange_acq:
6507 case ARM::BI_InterlockedExchange64_acq:
6508 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange_acq, E);
6509 case ARM::BI_InterlockedExchange8_rel:
6510 case ARM::BI_InterlockedExchange16_rel:
6511 case ARM::BI_InterlockedExchange_rel:
6512 case ARM::BI_InterlockedExchange64_rel:
6513 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange_rel, E);
6514 case ARM::BI_InterlockedExchange8_nf:
6515 case ARM::BI_InterlockedExchange16_nf:
6516 case ARM::BI_InterlockedExchange_nf:
6517 case ARM::BI_InterlockedExchange64_nf:
6518 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange_nf, E);
6519 case ARM::BI_InterlockedCompareExchange8_acq:
6520 case ARM::BI_InterlockedCompareExchange16_acq:
6521 case ARM::BI_InterlockedCompareExchange_acq:
6522 case ARM::BI_InterlockedCompareExchange64_acq:
6523 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedCompareExchange_acq, E);
6524 case ARM::BI_InterlockedCompareExchange8_rel:
6525 case ARM::BI_InterlockedCompareExchange16_rel:
6526 case ARM::BI_InterlockedCompareExchange_rel:
6527 case ARM::BI_InterlockedCompareExchange64_rel:
6528 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedCompareExchange_rel, E);
6529 case ARM::BI_InterlockedCompareExchange8_nf:
6530 case ARM::BI_InterlockedCompareExchange16_nf:
6531 case ARM::BI_InterlockedCompareExchange_nf:
6532 case ARM::BI_InterlockedCompareExchange64_nf:
6533 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedCompareExchange_nf, E);
6534 case ARM::BI_InterlockedOr8_acq:
6535 case ARM::BI_InterlockedOr16_acq:
6536 case ARM::BI_InterlockedOr_acq:
6537 case ARM::BI_InterlockedOr64_acq:
6538 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr_acq, E);
6539 case ARM::BI_InterlockedOr8_rel:
6540 case ARM::BI_InterlockedOr16_rel:
6541 case ARM::BI_InterlockedOr_rel:
6542 case ARM::BI_InterlockedOr64_rel:
6543 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr_rel, E);
6544 case ARM::BI_InterlockedOr8_nf:
6545 case ARM::BI_InterlockedOr16_nf:
6546 case ARM::BI_InterlockedOr_nf:
6547 case ARM::BI_InterlockedOr64_nf:
6548 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr_nf, E);
6549 case ARM::BI_InterlockedXor8_acq:
6550 case ARM::BI_InterlockedXor16_acq:
6551 case ARM::BI_InterlockedXor_acq:
6552 case ARM::BI_InterlockedXor64_acq:
6553 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor_acq, E);
6554 case ARM::BI_InterlockedXor8_rel:
6555 case ARM::BI_InterlockedXor16_rel:
6556 case ARM::BI_InterlockedXor_rel:
6557 case ARM::BI_InterlockedXor64_rel:
6558 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor_rel, E);
6559 case ARM::BI_InterlockedXor8_nf:
6560 case ARM::BI_InterlockedXor16_nf:
6561 case ARM::BI_InterlockedXor_nf:
6562 case ARM::BI_InterlockedXor64_nf:
6563 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor_nf, E);
6564 case ARM::BI_InterlockedAnd8_acq:
6565 case ARM::BI_InterlockedAnd16_acq:
6566 case ARM::BI_InterlockedAnd_acq:
6567 case ARM::BI_InterlockedAnd64_acq:
6568 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedAnd_acq, E);
6569 case ARM::BI_InterlockedAnd8_rel:
6570 case ARM::BI_InterlockedAnd16_rel:
6571 case ARM::BI_InterlockedAnd_rel:
6572 case ARM::BI_InterlockedAnd64_rel:
6573 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedAnd_rel, E);
6574 case ARM::BI_InterlockedAnd8_nf:
6575 case ARM::BI_InterlockedAnd16_nf:
6576 case ARM::BI_InterlockedAnd_nf:
6577 case ARM::BI_InterlockedAnd64_nf:
6578 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedAnd_nf, E);
6579 case ARM::BI_InterlockedIncrement16_acq:
6580 case ARM::BI_InterlockedIncrement_acq:
6581 case ARM::BI_InterlockedIncrement64_acq:
6582 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement_acq, E);
6583 case ARM::BI_InterlockedIncrement16_rel:
6584 case ARM::BI_InterlockedIncrement_rel:
6585 case ARM::BI_InterlockedIncrement64_rel:
6586 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement_rel, E);
6587 case ARM::BI_InterlockedIncrement16_nf:
6588 case ARM::BI_InterlockedIncrement_nf:
6589 case ARM::BI_InterlockedIncrement64_nf:
6590 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement_nf, E);
6591 case ARM::BI_InterlockedDecrement16_acq:
6592 case ARM::BI_InterlockedDecrement_acq:
6593 case ARM::BI_InterlockedDecrement64_acq:
6594 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedDecrement_acq, E);
6595 case ARM::BI_InterlockedDecrement16_rel:
6596 case ARM::BI_InterlockedDecrement_rel:
6597 case ARM::BI_InterlockedDecrement64_rel:
6598 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedDecrement_rel, E);
6599 case ARM::BI_InterlockedDecrement16_nf:
6600 case ARM::BI_InterlockedDecrement_nf:
6601 case ARM::BI_InterlockedDecrement64_nf:
6602 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedDecrement_nf, E);
6605 // Get the last argument, which specifies the vector type.
6606 assert(HasExtraArg);
6607 llvm::APSInt Result;
6608 const Expr *Arg = E->getArg(E->getNumArgs()-1);
6609 if (!Arg->isIntegerConstantExpr(Result, getContext()))
6612 if (BuiltinID == ARM::BI__builtin_arm_vcvtr_f ||
6613 BuiltinID == ARM::BI__builtin_arm_vcvtr_d) {
6614 // Determine the overloaded type of this builtin.
6616 if (BuiltinID == ARM::BI__builtin_arm_vcvtr_f)
6621 // Determine whether this is an unsigned conversion or not.
6622 bool usgn = Result.getZExtValue() == 1;
6623 unsigned Int = usgn ? Intrinsic::arm_vcvtru : Intrinsic::arm_vcvtr;
6625 // Call the appropriate intrinsic.
6626 Function *F = CGM.getIntrinsic(Int, Ty);
6627 return Builder.CreateCall(F, Ops, "vcvtr");
6630 // Determine the type of this overloaded NEON intrinsic.
6631 NeonTypeFlags Type(Result.getZExtValue());
6632 bool usgn = Type.isUnsigned();
6633 bool rightShift = false;
6635 llvm::VectorType *VTy = GetNeonType(this, Type,
6636 getTarget().hasLegalHalfType());
6637 llvm::Type *Ty = VTy;
6641 // Many NEON builtins have identical semantics and uses in ARM and
6642 // AArch64. Emit these in a single function.
6643 auto IntrinsicMap = makeArrayRef(ARMSIMDIntrinsicMap);
6644 const NeonIntrinsicInfo *Builtin = findNeonIntrinsicInMap(
6645 IntrinsicMap, BuiltinID, NEONSIMDIntrinsicsProvenSorted);
6647 return EmitCommonNeonBuiltinExpr(
6648 Builtin->BuiltinID, Builtin->LLVMIntrinsic, Builtin->AltLLVMIntrinsic,
6649 Builtin->NameHint, Builtin->TypeModifier, E, Ops, PtrOp0, PtrOp1, Arch);
6652 switch (BuiltinID) {
6653 default: return nullptr;
6654 case NEON::BI__builtin_neon_vld1q_lane_v:
6655 // Handle 64-bit integer elements as a special case. Use shuffles of
6656 // one-element vectors to avoid poor code for i64 in the backend.
6657 if (VTy->getElementType()->isIntegerTy(64)) {
6658 // Extract the other lane.
6659 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
6660 uint32_t Lane = cast<ConstantInt>(Ops[2])->getZExtValue();
6661 Value *SV = llvm::ConstantVector::get(ConstantInt::get(Int32Ty, 1-Lane));
6662 Ops[1] = Builder.CreateShuffleVector(Ops[1], Ops[1], SV);
6663 // Load the value as a one-element vector.
6664 Ty = llvm::VectorType::get(VTy->getElementType(), 1);
6665 llvm::Type *Tys[] = {Ty, Int8PtrTy};
6666 Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld1, Tys);
6667 Value *Align = getAlignmentValue32(PtrOp0);
6668 Value *Ld = Builder.CreateCall(F, {Ops[0], Align});
6670 uint32_t Indices[] = {1 - Lane, Lane};
6671 SV = llvm::ConstantDataVector::get(getLLVMContext(), Indices);
6672 return Builder.CreateShuffleVector(Ops[1], Ld, SV, "vld1q_lane");
6675 case NEON::BI__builtin_neon_vld1_lane_v: {
6676 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
6677 PtrOp0 = Builder.CreateElementBitCast(PtrOp0, VTy->getElementType());
6678 Value *Ld = Builder.CreateLoad(PtrOp0);
6679 return Builder.CreateInsertElement(Ops[1], Ld, Ops[2], "vld1_lane");
6681 case NEON::BI__builtin_neon_vqrshrn_n_v:
6683 usgn ? Intrinsic::arm_neon_vqrshiftnu : Intrinsic::arm_neon_vqrshiftns;
6684 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshrn_n",
6686 case NEON::BI__builtin_neon_vqrshrun_n_v:
6687 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqrshiftnsu, Ty),
6688 Ops, "vqrshrun_n", 1, true);
6689 case NEON::BI__builtin_neon_vqshrn_n_v:
6690 Int = usgn ? Intrinsic::arm_neon_vqshiftnu : Intrinsic::arm_neon_vqshiftns;
6691 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshrn_n",
6693 case NEON::BI__builtin_neon_vqshrun_n_v:
6694 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqshiftnsu, Ty),
6695 Ops, "vqshrun_n", 1, true);
6696 case NEON::BI__builtin_neon_vrecpe_v:
6697 case NEON::BI__builtin_neon_vrecpeq_v:
6698 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrecpe, Ty),
6700 case NEON::BI__builtin_neon_vrshrn_n_v:
6701 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrshiftn, Ty),
6702 Ops, "vrshrn_n", 1, true);
6703 case NEON::BI__builtin_neon_vrsra_n_v:
6704 case NEON::BI__builtin_neon_vrsraq_n_v:
6705 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
6706 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
6707 Ops[2] = EmitNeonShiftVector(Ops[2], Ty, true);
6708 Int = usgn ? Intrinsic::arm_neon_vrshiftu : Intrinsic::arm_neon_vrshifts;
6709 Ops[1] = Builder.CreateCall(CGM.getIntrinsic(Int, Ty), {Ops[1], Ops[2]});
6710 return Builder.CreateAdd(Ops[0], Ops[1], "vrsra_n");
6711 case NEON::BI__builtin_neon_vsri_n_v:
6712 case NEON::BI__builtin_neon_vsriq_n_v:
6715 case NEON::BI__builtin_neon_vsli_n_v:
6716 case NEON::BI__builtin_neon_vsliq_n_v:
6717 Ops[2] = EmitNeonShiftVector(Ops[2], Ty, rightShift);
6718 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vshiftins, Ty),
6720 case NEON::BI__builtin_neon_vsra_n_v:
6721 case NEON::BI__builtin_neon_vsraq_n_v:
6722 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
6723 Ops[1] = EmitNeonRShiftImm(Ops[1], Ops[2], Ty, usgn, "vsra_n");
6724 return Builder.CreateAdd(Ops[0], Ops[1]);
6725 case NEON::BI__builtin_neon_vst1q_lane_v:
6726 // Handle 64-bit integer elements as a special case. Use a shuffle to get
6727 // a one-element vector and avoid poor code for i64 in the backend.
6728 if (VTy->getElementType()->isIntegerTy(64)) {
6729 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
6730 Value *SV = llvm::ConstantVector::get(cast<llvm::Constant>(Ops[2]));
6731 Ops[1] = Builder.CreateShuffleVector(Ops[1], Ops[1], SV);
6732 Ops[2] = getAlignmentValue32(PtrOp0);
6733 llvm::Type *Tys[] = {Int8PtrTy, Ops[1]->getType()};
6734 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst1,
6738 case NEON::BI__builtin_neon_vst1_lane_v: {
6739 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
6740 Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2]);
6741 Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
6742 auto St = Builder.CreateStore(Ops[1], Builder.CreateBitCast(PtrOp0, Ty));
6745 case NEON::BI__builtin_neon_vtbl1_v:
6746 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl1),
6748 case NEON::BI__builtin_neon_vtbl2_v:
6749 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl2),
6751 case NEON::BI__builtin_neon_vtbl3_v:
6752 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl3),
6754 case NEON::BI__builtin_neon_vtbl4_v:
6755 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl4),
6757 case NEON::BI__builtin_neon_vtbx1_v:
6758 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx1),
6760 case NEON::BI__builtin_neon_vtbx2_v:
6761 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx2),
6763 case NEON::BI__builtin_neon_vtbx3_v:
6764 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx3),
6766 case NEON::BI__builtin_neon_vtbx4_v:
6767 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx4),
6772 static Value *EmitAArch64TblBuiltinExpr(CodeGenFunction &CGF, unsigned BuiltinID,
6774 SmallVectorImpl<Value *> &Ops,
6775 llvm::Triple::ArchType Arch) {
6776 unsigned int Int = 0;
6777 const char *s = nullptr;
6779 switch (BuiltinID) {
6782 case NEON::BI__builtin_neon_vtbl1_v:
6783 case NEON::BI__builtin_neon_vqtbl1_v:
6784 case NEON::BI__builtin_neon_vqtbl1q_v:
6785 case NEON::BI__builtin_neon_vtbl2_v:
6786 case NEON::BI__builtin_neon_vqtbl2_v:
6787 case NEON::BI__builtin_neon_vqtbl2q_v:
6788 case NEON::BI__builtin_neon_vtbl3_v:
6789 case NEON::BI__builtin_neon_vqtbl3_v:
6790 case NEON::BI__builtin_neon_vqtbl3q_v:
6791 case NEON::BI__builtin_neon_vtbl4_v:
6792 case NEON::BI__builtin_neon_vqtbl4_v:
6793 case NEON::BI__builtin_neon_vqtbl4q_v:
6795 case NEON::BI__builtin_neon_vtbx1_v:
6796 case NEON::BI__builtin_neon_vqtbx1_v:
6797 case NEON::BI__builtin_neon_vqtbx1q_v:
6798 case NEON::BI__builtin_neon_vtbx2_v:
6799 case NEON::BI__builtin_neon_vqtbx2_v:
6800 case NEON::BI__builtin_neon_vqtbx2q_v:
6801 case NEON::BI__builtin_neon_vtbx3_v:
6802 case NEON::BI__builtin_neon_vqtbx3_v:
6803 case NEON::BI__builtin_neon_vqtbx3q_v:
6804 case NEON::BI__builtin_neon_vtbx4_v:
6805 case NEON::BI__builtin_neon_vqtbx4_v:
6806 case NEON::BI__builtin_neon_vqtbx4q_v:
6810 assert(E->getNumArgs() >= 3);
6812 // Get the last argument, which specifies the vector type.
6813 llvm::APSInt Result;
6814 const Expr *Arg = E->getArg(E->getNumArgs() - 1);
6815 if (!Arg->isIntegerConstantExpr(Result, CGF.getContext()))
6818 // Determine the type of this overloaded NEON intrinsic.
6819 NeonTypeFlags Type(Result.getZExtValue());
6820 llvm::VectorType *Ty = GetNeonType(&CGF, Type);
6824 CodeGen::CGBuilderTy &Builder = CGF.Builder;
6826 // AArch64 scalar builtins are not overloaded, they do not have an extra
6827 // argument that specifies the vector type, need to handle each case.
6828 switch (BuiltinID) {
6829 case NEON::BI__builtin_neon_vtbl1_v: {
6830 return packTBLDVectorList(CGF, makeArrayRef(Ops).slice(0, 1), nullptr,
6831 Ops[1], Ty, Intrinsic::aarch64_neon_tbl1,
6834 case NEON::BI__builtin_neon_vtbl2_v: {
6835 return packTBLDVectorList(CGF, makeArrayRef(Ops).slice(0, 2), nullptr,
6836 Ops[2], Ty, Intrinsic::aarch64_neon_tbl1,
6839 case NEON::BI__builtin_neon_vtbl3_v: {
6840 return packTBLDVectorList(CGF, makeArrayRef(Ops).slice(0, 3), nullptr,
6841 Ops[3], Ty, Intrinsic::aarch64_neon_tbl2,
6844 case NEON::BI__builtin_neon_vtbl4_v: {
6845 return packTBLDVectorList(CGF, makeArrayRef(Ops).slice(0, 4), nullptr,
6846 Ops[4], Ty, Intrinsic::aarch64_neon_tbl2,
6849 case NEON::BI__builtin_neon_vtbx1_v: {
6851 packTBLDVectorList(CGF, makeArrayRef(Ops).slice(1, 1), nullptr, Ops[2],
6852 Ty, Intrinsic::aarch64_neon_tbl1, "vtbl1");
6854 llvm::Constant *EightV = ConstantInt::get(Ty, 8);
6855 Value *CmpRes = Builder.CreateICmp(ICmpInst::ICMP_UGE, Ops[2], EightV);
6856 CmpRes = Builder.CreateSExt(CmpRes, Ty);
6858 Value *EltsFromInput = Builder.CreateAnd(CmpRes, Ops[0]);
6859 Value *EltsFromTbl = Builder.CreateAnd(Builder.CreateNot(CmpRes), TblRes);
6860 return Builder.CreateOr(EltsFromInput, EltsFromTbl, "vtbx");
6862 case NEON::BI__builtin_neon_vtbx2_v: {
6863 return packTBLDVectorList(CGF, makeArrayRef(Ops).slice(1, 2), Ops[0],
6864 Ops[3], Ty, Intrinsic::aarch64_neon_tbx1,
6867 case NEON::BI__builtin_neon_vtbx3_v: {
6869 packTBLDVectorList(CGF, makeArrayRef(Ops).slice(1, 3), nullptr, Ops[4],
6870 Ty, Intrinsic::aarch64_neon_tbl2, "vtbl2");
6872 llvm::Constant *TwentyFourV = ConstantInt::get(Ty, 24);
6873 Value *CmpRes = Builder.CreateICmp(ICmpInst::ICMP_UGE, Ops[4],
6875 CmpRes = Builder.CreateSExt(CmpRes, Ty);
6877 Value *EltsFromInput = Builder.CreateAnd(CmpRes, Ops[0]);
6878 Value *EltsFromTbl = Builder.CreateAnd(Builder.CreateNot(CmpRes), TblRes);
6879 return Builder.CreateOr(EltsFromInput, EltsFromTbl, "vtbx");
6881 case NEON::BI__builtin_neon_vtbx4_v: {
6882 return packTBLDVectorList(CGF, makeArrayRef(Ops).slice(1, 4), Ops[0],
6883 Ops[5], Ty, Intrinsic::aarch64_neon_tbx2,
6886 case NEON::BI__builtin_neon_vqtbl1_v:
6887 case NEON::BI__builtin_neon_vqtbl1q_v:
6888 Int = Intrinsic::aarch64_neon_tbl1; s = "vtbl1"; break;
6889 case NEON::BI__builtin_neon_vqtbl2_v:
6890 case NEON::BI__builtin_neon_vqtbl2q_v: {
6891 Int = Intrinsic::aarch64_neon_tbl2; s = "vtbl2"; break;
6892 case NEON::BI__builtin_neon_vqtbl3_v:
6893 case NEON::BI__builtin_neon_vqtbl3q_v:
6894 Int = Intrinsic::aarch64_neon_tbl3; s = "vtbl3"; break;
6895 case NEON::BI__builtin_neon_vqtbl4_v:
6896 case NEON::BI__builtin_neon_vqtbl4q_v:
6897 Int = Intrinsic::aarch64_neon_tbl4; s = "vtbl4"; break;
6898 case NEON::BI__builtin_neon_vqtbx1_v:
6899 case NEON::BI__builtin_neon_vqtbx1q_v:
6900 Int = Intrinsic::aarch64_neon_tbx1; s = "vtbx1"; break;
6901 case NEON::BI__builtin_neon_vqtbx2_v:
6902 case NEON::BI__builtin_neon_vqtbx2q_v:
6903 Int = Intrinsic::aarch64_neon_tbx2; s = "vtbx2"; break;
6904 case NEON::BI__builtin_neon_vqtbx3_v:
6905 case NEON::BI__builtin_neon_vqtbx3q_v:
6906 Int = Intrinsic::aarch64_neon_tbx3; s = "vtbx3"; break;
6907 case NEON::BI__builtin_neon_vqtbx4_v:
6908 case NEON::BI__builtin_neon_vqtbx4q_v:
6909 Int = Intrinsic::aarch64_neon_tbx4; s = "vtbx4"; break;
6916 Function *F = CGF.CGM.getIntrinsic(Int, Ty);
6917 return CGF.EmitNeonCall(F, Ops, s);
6920 Value *CodeGenFunction::vectorWrapScalar16(Value *Op) {
6921 llvm::Type *VTy = llvm::VectorType::get(Int16Ty, 4);
6922 Op = Builder.CreateBitCast(Op, Int16Ty);
6923 Value *V = UndefValue::get(VTy);
6924 llvm::Constant *CI = ConstantInt::get(SizeTy, 0);
6925 Op = Builder.CreateInsertElement(V, Op, CI);
6929 Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
6931 llvm::Triple::ArchType Arch) {
6932 unsigned HintID = static_cast<unsigned>(-1);
6933 switch (BuiltinID) {
6935 case AArch64::BI__builtin_arm_nop:
6938 case AArch64::BI__builtin_arm_yield:
6939 case AArch64::BI__yield:
6942 case AArch64::BI__builtin_arm_wfe:
6943 case AArch64::BI__wfe:
6946 case AArch64::BI__builtin_arm_wfi:
6947 case AArch64::BI__wfi:
6950 case AArch64::BI__builtin_arm_sev:
6951 case AArch64::BI__sev:
6954 case AArch64::BI__builtin_arm_sevl:
6955 case AArch64::BI__sevl:
6960 if (HintID != static_cast<unsigned>(-1)) {
6961 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_hint);
6962 return Builder.CreateCall(F, llvm::ConstantInt::get(Int32Ty, HintID));
6965 if (BuiltinID == AArch64::BI__builtin_arm_prefetch) {
6966 Value *Address = EmitScalarExpr(E->getArg(0));
6967 Value *RW = EmitScalarExpr(E->getArg(1));
6968 Value *CacheLevel = EmitScalarExpr(E->getArg(2));
6969 Value *RetentionPolicy = EmitScalarExpr(E->getArg(3));
6970 Value *IsData = EmitScalarExpr(E->getArg(4));
6972 Value *Locality = nullptr;
6973 if (cast<llvm::ConstantInt>(RetentionPolicy)->isZero()) {
6974 // Temporal fetch, needs to convert cache level to locality.
6975 Locality = llvm::ConstantInt::get(Int32Ty,
6976 -cast<llvm::ConstantInt>(CacheLevel)->getValue() + 3);
6979 Locality = llvm::ConstantInt::get(Int32Ty, 0);
6982 // FIXME: We need AArch64 specific LLVM intrinsic if we want to specify
6983 // PLDL3STRM or PLDL2STRM.
6984 Function *F = CGM.getIntrinsic(Intrinsic::prefetch, Address->getType());
6985 return Builder.CreateCall(F, {Address, RW, Locality, IsData});
6988 if (BuiltinID == AArch64::BI__builtin_arm_rbit) {
6989 assert((getContext().getTypeSize(E->getType()) == 32) &&
6990 "rbit of unusual size!");
6991 llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
6992 return Builder.CreateCall(
6993 CGM.getIntrinsic(Intrinsic::bitreverse, Arg->getType()), Arg, "rbit");
6995 if (BuiltinID == AArch64::BI__builtin_arm_rbit64) {
6996 assert((getContext().getTypeSize(E->getType()) == 64) &&
6997 "rbit of unusual size!");
6998 llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
6999 return Builder.CreateCall(
7000 CGM.getIntrinsic(Intrinsic::bitreverse, Arg->getType()), Arg, "rbit");
7003 if (BuiltinID == AArch64::BI__builtin_arm_jcvt) {
7004 assert((getContext().getTypeSize(E->getType()) == 32) &&
7005 "__jcvt of unusual size!");
7006 llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
7007 return Builder.CreateCall(
7008 CGM.getIntrinsic(Intrinsic::aarch64_fjcvtzs), Arg);
7011 if (BuiltinID == AArch64::BI__clear_cache) {
7012 assert(E->getNumArgs() == 2 && "__clear_cache takes 2 arguments");
7013 const FunctionDecl *FD = E->getDirectCallee();
7015 for (unsigned i = 0; i < 2; i++)
7016 Ops[i] = EmitScalarExpr(E->getArg(i));
7017 llvm::Type *Ty = CGM.getTypes().ConvertType(FD->getType());
7018 llvm::FunctionType *FTy = cast<llvm::FunctionType>(Ty);
7019 StringRef Name = FD->getName();
7020 return EmitNounwindRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), Ops);
7023 if ((BuiltinID == AArch64::BI__builtin_arm_ldrex ||
7024 BuiltinID == AArch64::BI__builtin_arm_ldaex) &&
7025 getContext().getTypeSize(E->getType()) == 128) {
7026 Function *F = CGM.getIntrinsic(BuiltinID == AArch64::BI__builtin_arm_ldaex
7027 ? Intrinsic::aarch64_ldaxp
7028 : Intrinsic::aarch64_ldxp);
7030 Value *LdPtr = EmitScalarExpr(E->getArg(0));
7031 Value *Val = Builder.CreateCall(F, Builder.CreateBitCast(LdPtr, Int8PtrTy),
7034 Value *Val0 = Builder.CreateExtractValue(Val, 1);
7035 Value *Val1 = Builder.CreateExtractValue(Val, 0);
7036 llvm::Type *Int128Ty = llvm::IntegerType::get(getLLVMContext(), 128);
7037 Val0 = Builder.CreateZExt(Val0, Int128Ty);
7038 Val1 = Builder.CreateZExt(Val1, Int128Ty);
7040 Value *ShiftCst = llvm::ConstantInt::get(Int128Ty, 64);
7041 Val = Builder.CreateShl(Val0, ShiftCst, "shl", true /* nuw */);
7042 Val = Builder.CreateOr(Val, Val1);
7043 return Builder.CreateBitCast(Val, ConvertType(E->getType()));
7044 } else if (BuiltinID == AArch64::BI__builtin_arm_ldrex ||
7045 BuiltinID == AArch64::BI__builtin_arm_ldaex) {
7046 Value *LoadAddr = EmitScalarExpr(E->getArg(0));
7048 QualType Ty = E->getType();
7049 llvm::Type *RealResTy = ConvertType(Ty);
7050 llvm::Type *PtrTy = llvm::IntegerType::get(
7051 getLLVMContext(), getContext().getTypeSize(Ty))->getPointerTo();
7052 LoadAddr = Builder.CreateBitCast(LoadAddr, PtrTy);
7054 Function *F = CGM.getIntrinsic(BuiltinID == AArch64::BI__builtin_arm_ldaex
7055 ? Intrinsic::aarch64_ldaxr
7056 : Intrinsic::aarch64_ldxr,
7058 Value *Val = Builder.CreateCall(F, LoadAddr, "ldxr");
7060 if (RealResTy->isPointerTy())
7061 return Builder.CreateIntToPtr(Val, RealResTy);
7063 llvm::Type *IntResTy = llvm::IntegerType::get(
7064 getLLVMContext(), CGM.getDataLayout().getTypeSizeInBits(RealResTy));
7065 Val = Builder.CreateTruncOrBitCast(Val, IntResTy);
7066 return Builder.CreateBitCast(Val, RealResTy);
7069 if ((BuiltinID == AArch64::BI__builtin_arm_strex ||
7070 BuiltinID == AArch64::BI__builtin_arm_stlex) &&
7071 getContext().getTypeSize(E->getArg(0)->getType()) == 128) {
7072 Function *F = CGM.getIntrinsic(BuiltinID == AArch64::BI__builtin_arm_stlex
7073 ? Intrinsic::aarch64_stlxp
7074 : Intrinsic::aarch64_stxp);
7075 llvm::Type *STy = llvm::StructType::get(Int64Ty, Int64Ty);
7077 Address Tmp = CreateMemTemp(E->getArg(0)->getType());
7078 EmitAnyExprToMem(E->getArg(0), Tmp, Qualifiers(), /*init*/ true);
7080 Tmp = Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(STy));
7081 llvm::Value *Val = Builder.CreateLoad(Tmp);
7083 Value *Arg0 = Builder.CreateExtractValue(Val, 0);
7084 Value *Arg1 = Builder.CreateExtractValue(Val, 1);
7085 Value *StPtr = Builder.CreateBitCast(EmitScalarExpr(E->getArg(1)),
7087 return Builder.CreateCall(F, {Arg0, Arg1, StPtr}, "stxp");
7090 if (BuiltinID == AArch64::BI__builtin_arm_strex ||
7091 BuiltinID == AArch64::BI__builtin_arm_stlex) {
7092 Value *StoreVal = EmitScalarExpr(E->getArg(0));
7093 Value *StoreAddr = EmitScalarExpr(E->getArg(1));
7095 QualType Ty = E->getArg(0)->getType();
7096 llvm::Type *StoreTy = llvm::IntegerType::get(getLLVMContext(),
7097 getContext().getTypeSize(Ty));
7098 StoreAddr = Builder.CreateBitCast(StoreAddr, StoreTy->getPointerTo());
7100 if (StoreVal->getType()->isPointerTy())
7101 StoreVal = Builder.CreatePtrToInt(StoreVal, Int64Ty);
7103 llvm::Type *IntTy = llvm::IntegerType::get(
7105 CGM.getDataLayout().getTypeSizeInBits(StoreVal->getType()));
7106 StoreVal = Builder.CreateBitCast(StoreVal, IntTy);
7107 StoreVal = Builder.CreateZExtOrBitCast(StoreVal, Int64Ty);
7110 Function *F = CGM.getIntrinsic(BuiltinID == AArch64::BI__builtin_arm_stlex
7111 ? Intrinsic::aarch64_stlxr
7112 : Intrinsic::aarch64_stxr,
7113 StoreAddr->getType());
7114 return Builder.CreateCall(F, {StoreVal, StoreAddr}, "stxr");
7117 if (BuiltinID == AArch64::BI__getReg) {
7118 Expr::EvalResult Result;
7119 if (!E->getArg(0)->EvaluateAsInt(Result, CGM.getContext()))
7120 llvm_unreachable("Sema will ensure that the parameter is constant");
7122 llvm::APSInt Value = Result.Val.getInt();
7123 LLVMContext &Context = CGM.getLLVMContext();
7124 std::string Reg = Value == 31 ? "sp" : "x" + Value.toString(10);
7126 llvm::Metadata *Ops[] = {llvm::MDString::get(Context, Reg)};
7127 llvm::MDNode *RegName = llvm::MDNode::get(Context, Ops);
7128 llvm::Value *Metadata = llvm::MetadataAsValue::get(Context, RegName);
7131 CGM.getIntrinsic(llvm::Intrinsic::read_register, {Int64Ty});
7132 return Builder.CreateCall(F, Metadata);
7135 if (BuiltinID == AArch64::BI__builtin_arm_clrex) {
7136 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_clrex);
7137 return Builder.CreateCall(F);
7140 if (BuiltinID == AArch64::BI_ReadWriteBarrier)
7141 return Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent,
7142 llvm::SyncScope::SingleThread);
7145 Intrinsic::ID CRCIntrinsicID = Intrinsic::not_intrinsic;
7146 switch (BuiltinID) {
7147 case AArch64::BI__builtin_arm_crc32b:
7148 CRCIntrinsicID = Intrinsic::aarch64_crc32b; break;
7149 case AArch64::BI__builtin_arm_crc32cb:
7150 CRCIntrinsicID = Intrinsic::aarch64_crc32cb; break;
7151 case AArch64::BI__builtin_arm_crc32h:
7152 CRCIntrinsicID = Intrinsic::aarch64_crc32h; break;
7153 case AArch64::BI__builtin_arm_crc32ch:
7154 CRCIntrinsicID = Intrinsic::aarch64_crc32ch; break;
7155 case AArch64::BI__builtin_arm_crc32w:
7156 CRCIntrinsicID = Intrinsic::aarch64_crc32w; break;
7157 case AArch64::BI__builtin_arm_crc32cw:
7158 CRCIntrinsicID = Intrinsic::aarch64_crc32cw; break;
7159 case AArch64::BI__builtin_arm_crc32d:
7160 CRCIntrinsicID = Intrinsic::aarch64_crc32x; break;
7161 case AArch64::BI__builtin_arm_crc32cd:
7162 CRCIntrinsicID = Intrinsic::aarch64_crc32cx; break;
7165 if (CRCIntrinsicID != Intrinsic::not_intrinsic) {
7166 Value *Arg0 = EmitScalarExpr(E->getArg(0));
7167 Value *Arg1 = EmitScalarExpr(E->getArg(1));
7168 Function *F = CGM.getIntrinsic(CRCIntrinsicID);
7170 llvm::Type *DataTy = F->getFunctionType()->getParamType(1);
7171 Arg1 = Builder.CreateZExtOrBitCast(Arg1, DataTy);
7173 return Builder.CreateCall(F, {Arg0, Arg1});
7176 // Memory Tagging Extensions (MTE) Intrinsics
7177 Intrinsic::ID MTEIntrinsicID = Intrinsic::not_intrinsic;
7178 switch (BuiltinID) {
7179 case AArch64::BI__builtin_arm_irg:
7180 MTEIntrinsicID = Intrinsic::aarch64_irg; break;
7181 case AArch64::BI__builtin_arm_addg:
7182 MTEIntrinsicID = Intrinsic::aarch64_addg; break;
7183 case AArch64::BI__builtin_arm_gmi:
7184 MTEIntrinsicID = Intrinsic::aarch64_gmi; break;
7185 case AArch64::BI__builtin_arm_ldg:
7186 MTEIntrinsicID = Intrinsic::aarch64_ldg; break;
7187 case AArch64::BI__builtin_arm_stg:
7188 MTEIntrinsicID = Intrinsic::aarch64_stg; break;
7189 case AArch64::BI__builtin_arm_subp:
7190 MTEIntrinsicID = Intrinsic::aarch64_subp; break;
7193 if (MTEIntrinsicID != Intrinsic::not_intrinsic) {
7194 llvm::Type *T = ConvertType(E->getType());
7196 if (MTEIntrinsicID == Intrinsic::aarch64_irg) {
7197 Value *Pointer = EmitScalarExpr(E->getArg(0));
7198 Value *Mask = EmitScalarExpr(E->getArg(1));
7200 Pointer = Builder.CreatePointerCast(Pointer, Int8PtrTy);
7201 Mask = Builder.CreateZExt(Mask, Int64Ty);
7202 Value *RV = Builder.CreateCall(
7203 CGM.getIntrinsic(MTEIntrinsicID), {Pointer, Mask});
7204 return Builder.CreatePointerCast(RV, T);
7206 if (MTEIntrinsicID == Intrinsic::aarch64_addg) {
7207 Value *Pointer = EmitScalarExpr(E->getArg(0));
7208 Value *TagOffset = EmitScalarExpr(E->getArg(1));
7210 Pointer = Builder.CreatePointerCast(Pointer, Int8PtrTy);
7211 TagOffset = Builder.CreateZExt(TagOffset, Int64Ty);
7212 Value *RV = Builder.CreateCall(
7213 CGM.getIntrinsic(MTEIntrinsicID), {Pointer, TagOffset});
7214 return Builder.CreatePointerCast(RV, T);
7216 if (MTEIntrinsicID == Intrinsic::aarch64_gmi) {
7217 Value *Pointer = EmitScalarExpr(E->getArg(0));
7218 Value *ExcludedMask = EmitScalarExpr(E->getArg(1));
7220 ExcludedMask = Builder.CreateZExt(ExcludedMask, Int64Ty);
7221 Pointer = Builder.CreatePointerCast(Pointer, Int8PtrTy);
7222 return Builder.CreateCall(
7223 CGM.getIntrinsic(MTEIntrinsicID), {Pointer, ExcludedMask});
7225 // Although it is possible to supply a different return
7226 // address (first arg) to this intrinsic, for now we set
7227 // return address same as input address.
7228 if (MTEIntrinsicID == Intrinsic::aarch64_ldg) {
7229 Value *TagAddress = EmitScalarExpr(E->getArg(0));
7230 TagAddress = Builder.CreatePointerCast(TagAddress, Int8PtrTy);
7231 Value *RV = Builder.CreateCall(
7232 CGM.getIntrinsic(MTEIntrinsicID), {TagAddress, TagAddress});
7233 return Builder.CreatePointerCast(RV, T);
7235 // Although it is possible to supply a different tag (to set)
7236 // to this intrinsic (as first arg), for now we supply
7237 // the tag that is in input address arg (common use case).
7238 if (MTEIntrinsicID == Intrinsic::aarch64_stg) {
7239 Value *TagAddress = EmitScalarExpr(E->getArg(0));
7240 TagAddress = Builder.CreatePointerCast(TagAddress, Int8PtrTy);
7241 return Builder.CreateCall(
7242 CGM.getIntrinsic(MTEIntrinsicID), {TagAddress, TagAddress});
7244 if (MTEIntrinsicID == Intrinsic::aarch64_subp) {
7245 Value *PointerA = EmitScalarExpr(E->getArg(0));
7246 Value *PointerB = EmitScalarExpr(E->getArg(1));
7247 PointerA = Builder.CreatePointerCast(PointerA, Int8PtrTy);
7248 PointerB = Builder.CreatePointerCast(PointerB, Int8PtrTy);
7249 return Builder.CreateCall(
7250 CGM.getIntrinsic(MTEIntrinsicID), {PointerA, PointerB});
7254 if (BuiltinID == AArch64::BI__builtin_arm_rsr ||
7255 BuiltinID == AArch64::BI__builtin_arm_rsr64 ||
7256 BuiltinID == AArch64::BI__builtin_arm_rsrp ||
7257 BuiltinID == AArch64::BI__builtin_arm_wsr ||
7258 BuiltinID == AArch64::BI__builtin_arm_wsr64 ||
7259 BuiltinID == AArch64::BI__builtin_arm_wsrp) {
7261 bool IsRead = BuiltinID == AArch64::BI__builtin_arm_rsr ||
7262 BuiltinID == AArch64::BI__builtin_arm_rsr64 ||
7263 BuiltinID == AArch64::BI__builtin_arm_rsrp;
7265 bool IsPointerBuiltin = BuiltinID == AArch64::BI__builtin_arm_rsrp ||
7266 BuiltinID == AArch64::BI__builtin_arm_wsrp;
7268 bool Is64Bit = BuiltinID != AArch64::BI__builtin_arm_rsr &&
7269 BuiltinID != AArch64::BI__builtin_arm_wsr;
7271 llvm::Type *ValueType;
7272 llvm::Type *RegisterType = Int64Ty;
7273 if (IsPointerBuiltin) {
7274 ValueType = VoidPtrTy;
7275 } else if (Is64Bit) {
7276 ValueType = Int64Ty;
7278 ValueType = Int32Ty;
7281 return EmitSpecialRegisterBuiltin(*this, E, RegisterType, ValueType, IsRead);
7284 if (BuiltinID == AArch64::BI_ReadStatusReg ||
7285 BuiltinID == AArch64::BI_WriteStatusReg) {
7286 LLVMContext &Context = CGM.getLLVMContext();
7289 E->getArg(0)->EvaluateKnownConstInt(getContext()).getZExtValue();
7291 std::string SysRegStr;
7292 llvm::raw_string_ostream(SysRegStr) <<
7293 ((1 << 1) | ((SysReg >> 14) & 1)) << ":" <<
7294 ((SysReg >> 11) & 7) << ":" <<
7295 ((SysReg >> 7) & 15) << ":" <<
7296 ((SysReg >> 3) & 15) << ":" <<
7299 llvm::Metadata *Ops[] = { llvm::MDString::get(Context, SysRegStr) };
7300 llvm::MDNode *RegName = llvm::MDNode::get(Context, Ops);
7301 llvm::Value *Metadata = llvm::MetadataAsValue::get(Context, RegName);
7303 llvm::Type *RegisterType = Int64Ty;
7304 llvm::Type *Types[] = { RegisterType };
7306 if (BuiltinID == AArch64::BI_ReadStatusReg) {
7307 llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::read_register, Types);
7309 return Builder.CreateCall(F, Metadata);
7312 llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::write_register, Types);
7313 llvm::Value *ArgValue = EmitScalarExpr(E->getArg(1));
7315 return Builder.CreateCall(F, { Metadata, ArgValue });
7318 if (BuiltinID == AArch64::BI_AddressOfReturnAddress) {
7320 CGM.getIntrinsic(Intrinsic::addressofreturnaddress, AllocaInt8PtrTy);
7321 return Builder.CreateCall(F);
7324 if (BuiltinID == AArch64::BI__builtin_sponentry) {
7325 llvm::Function *F = CGM.getIntrinsic(Intrinsic::sponentry, AllocaInt8PtrTy);
7326 return Builder.CreateCall(F);
7329 // Find out if any arguments are required to be integer constant
7331 unsigned ICEArguments = 0;
7332 ASTContext::GetBuiltinTypeError Error;
7333 getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments);
7334 assert(Error == ASTContext::GE_None && "Should not codegen an error");
7336 llvm::SmallVector<Value*, 4> Ops;
7337 for (unsigned i = 0, e = E->getNumArgs() - 1; i != e; i++) {
7338 if ((ICEArguments & (1 << i)) == 0) {
7339 Ops.push_back(EmitScalarExpr(E->getArg(i)));
7341 // If this is required to be a constant, constant fold it so that we know
7342 // that the generated intrinsic gets a ConstantInt.
7343 llvm::APSInt Result;
7344 bool IsConst = E->getArg(i)->isIntegerConstantExpr(Result, getContext());
7345 assert(IsConst && "Constant arg isn't actually constant?");
7347 Ops.push_back(llvm::ConstantInt::get(getLLVMContext(), Result));
7351 auto SISDMap = makeArrayRef(AArch64SISDIntrinsicMap);
7352 const NeonIntrinsicInfo *Builtin = findNeonIntrinsicInMap(
7353 SISDMap, BuiltinID, AArch64SISDIntrinsicsProvenSorted);
7356 Ops.push_back(EmitScalarExpr(E->getArg(E->getNumArgs() - 1)));
7357 Value *Result = EmitCommonNeonSISDBuiltinExpr(*this, *Builtin, Ops, E);
7358 assert(Result && "SISD intrinsic should have been handled");
7362 llvm::APSInt Result;
7363 const Expr *Arg = E->getArg(E->getNumArgs()-1);
7364 NeonTypeFlags Type(0);
7365 if (Arg->isIntegerConstantExpr(Result, getContext()))
7366 // Determine the type of this overloaded NEON intrinsic.
7367 Type = NeonTypeFlags(Result.getZExtValue());
7369 bool usgn = Type.isUnsigned();
7370 bool quad = Type.isQuad();
7372 // Handle non-overloaded intrinsics first.
7373 switch (BuiltinID) {
7375 case NEON::BI__builtin_neon_vabsh_f16:
7376 Ops.push_back(EmitScalarExpr(E->getArg(0)));
7377 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::fabs, HalfTy), Ops, "vabs");
7378 case NEON::BI__builtin_neon_vldrq_p128: {
7379 llvm::Type *Int128Ty = llvm::Type::getIntNTy(getLLVMContext(), 128);
7380 llvm::Type *Int128PTy = llvm::PointerType::get(Int128Ty, 0);
7381 Value *Ptr = Builder.CreateBitCast(EmitScalarExpr(E->getArg(0)), Int128PTy);
7382 return Builder.CreateAlignedLoad(Int128Ty, Ptr,
7383 CharUnits::fromQuantity(16));
7385 case NEON::BI__builtin_neon_vstrq_p128: {
7386 llvm::Type *Int128PTy = llvm::Type::getIntNPtrTy(getLLVMContext(), 128);
7387 Value *Ptr = Builder.CreateBitCast(Ops[0], Int128PTy);
7388 return Builder.CreateDefaultAlignedStore(EmitScalarExpr(E->getArg(1)), Ptr);
7390 case NEON::BI__builtin_neon_vcvts_u32_f32:
7391 case NEON::BI__builtin_neon_vcvtd_u64_f64:
7394 case NEON::BI__builtin_neon_vcvts_s32_f32:
7395 case NEON::BI__builtin_neon_vcvtd_s64_f64: {
7396 Ops.push_back(EmitScalarExpr(E->getArg(0)));
7397 bool Is64 = Ops[0]->getType()->getPrimitiveSizeInBits() == 64;
7398 llvm::Type *InTy = Is64 ? Int64Ty : Int32Ty;
7399 llvm::Type *FTy = Is64 ? DoubleTy : FloatTy;
7400 Ops[0] = Builder.CreateBitCast(Ops[0], FTy);
7402 return Builder.CreateFPToUI(Ops[0], InTy);
7403 return Builder.CreateFPToSI(Ops[0], InTy);
7405 case NEON::BI__builtin_neon_vcvts_f32_u32:
7406 case NEON::BI__builtin_neon_vcvtd_f64_u64:
7409 case NEON::BI__builtin_neon_vcvts_f32_s32:
7410 case NEON::BI__builtin_neon_vcvtd_f64_s64: {
7411 Ops.push_back(EmitScalarExpr(E->getArg(0)));
7412 bool Is64 = Ops[0]->getType()->getPrimitiveSizeInBits() == 64;
7413 llvm::Type *InTy = Is64 ? Int64Ty : Int32Ty;
7414 llvm::Type *FTy = Is64 ? DoubleTy : FloatTy;
7415 Ops[0] = Builder.CreateBitCast(Ops[0], InTy);
7417 return Builder.CreateUIToFP(Ops[0], FTy);
7418 return Builder.CreateSIToFP(Ops[0], FTy);
7420 case NEON::BI__builtin_neon_vcvth_f16_u16:
7421 case NEON::BI__builtin_neon_vcvth_f16_u32:
7422 case NEON::BI__builtin_neon_vcvth_f16_u64:
7425 case NEON::BI__builtin_neon_vcvth_f16_s16:
7426 case NEON::BI__builtin_neon_vcvth_f16_s32:
7427 case NEON::BI__builtin_neon_vcvth_f16_s64: {
7428 Ops.push_back(EmitScalarExpr(E->getArg(0)));
7429 llvm::Type *FTy = HalfTy;
7431 if (Ops[0]->getType()->getPrimitiveSizeInBits() == 64)
7433 else if (Ops[0]->getType()->getPrimitiveSizeInBits() == 32)
7437 Ops[0] = Builder.CreateBitCast(Ops[0], InTy);
7439 return Builder.CreateUIToFP(Ops[0], FTy);
7440 return Builder.CreateSIToFP(Ops[0], FTy);
7442 case NEON::BI__builtin_neon_vcvth_u16_f16:
7445 case NEON::BI__builtin_neon_vcvth_s16_f16: {
7446 Ops.push_back(EmitScalarExpr(E->getArg(0)));
7447 Ops[0] = Builder.CreateBitCast(Ops[0], HalfTy);
7449 return Builder.CreateFPToUI(Ops[0], Int16Ty);
7450 return Builder.CreateFPToSI(Ops[0], Int16Ty);
7452 case NEON::BI__builtin_neon_vcvth_u32_f16:
7455 case NEON::BI__builtin_neon_vcvth_s32_f16: {
7456 Ops.push_back(EmitScalarExpr(E->getArg(0)));
7457 Ops[0] = Builder.CreateBitCast(Ops[0], HalfTy);
7459 return Builder.CreateFPToUI(Ops[0], Int32Ty);
7460 return Builder.CreateFPToSI(Ops[0], Int32Ty);
7462 case NEON::BI__builtin_neon_vcvth_u64_f16:
7465 case NEON::BI__builtin_neon_vcvth_s64_f16: {
7466 Ops.push_back(EmitScalarExpr(E->getArg(0)));
7467 Ops[0] = Builder.CreateBitCast(Ops[0], HalfTy);
7469 return Builder.CreateFPToUI(Ops[0], Int64Ty);
7470 return Builder.CreateFPToSI(Ops[0], Int64Ty);
7472 case NEON::BI__builtin_neon_vcvtah_u16_f16:
7473 case NEON::BI__builtin_neon_vcvtmh_u16_f16:
7474 case NEON::BI__builtin_neon_vcvtnh_u16_f16:
7475 case NEON::BI__builtin_neon_vcvtph_u16_f16:
7476 case NEON::BI__builtin_neon_vcvtah_s16_f16:
7477 case NEON::BI__builtin_neon_vcvtmh_s16_f16:
7478 case NEON::BI__builtin_neon_vcvtnh_s16_f16:
7479 case NEON::BI__builtin_neon_vcvtph_s16_f16: {
7481 llvm::Type* InTy = Int32Ty;
7482 llvm::Type* FTy = HalfTy;
7483 llvm::Type *Tys[2] = {InTy, FTy};
7484 Ops.push_back(EmitScalarExpr(E->getArg(0)));
7485 switch (BuiltinID) {
7486 default: llvm_unreachable("missing builtin ID in switch!");
7487 case NEON::BI__builtin_neon_vcvtah_u16_f16:
7488 Int = Intrinsic::aarch64_neon_fcvtau; break;
7489 case NEON::BI__builtin_neon_vcvtmh_u16_f16:
7490 Int = Intrinsic::aarch64_neon_fcvtmu; break;
7491 case NEON::BI__builtin_neon_vcvtnh_u16_f16:
7492 Int = Intrinsic::aarch64_neon_fcvtnu; break;
7493 case NEON::BI__builtin_neon_vcvtph_u16_f16:
7494 Int = Intrinsic::aarch64_neon_fcvtpu; break;
7495 case NEON::BI__builtin_neon_vcvtah_s16_f16:
7496 Int = Intrinsic::aarch64_neon_fcvtas; break;
7497 case NEON::BI__builtin_neon_vcvtmh_s16_f16:
7498 Int = Intrinsic::aarch64_neon_fcvtms; break;
7499 case NEON::BI__builtin_neon_vcvtnh_s16_f16:
7500 Int = Intrinsic::aarch64_neon_fcvtns; break;
7501 case NEON::BI__builtin_neon_vcvtph_s16_f16:
7502 Int = Intrinsic::aarch64_neon_fcvtps; break;
7504 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "fcvt");
7505 return Builder.CreateTrunc(Ops[0], Int16Ty);
7507 case NEON::BI__builtin_neon_vcaleh_f16:
7508 case NEON::BI__builtin_neon_vcalth_f16:
7509 case NEON::BI__builtin_neon_vcageh_f16:
7510 case NEON::BI__builtin_neon_vcagth_f16: {
7512 llvm::Type* InTy = Int32Ty;
7513 llvm::Type* FTy = HalfTy;
7514 llvm::Type *Tys[2] = {InTy, FTy};
7515 Ops.push_back(EmitScalarExpr(E->getArg(1)));
7516 switch (BuiltinID) {
7517 default: llvm_unreachable("missing builtin ID in switch!");
7518 case NEON::BI__builtin_neon_vcageh_f16:
7519 Int = Intrinsic::aarch64_neon_facge; break;
7520 case NEON::BI__builtin_neon_vcagth_f16:
7521 Int = Intrinsic::aarch64_neon_facgt; break;
7522 case NEON::BI__builtin_neon_vcaleh_f16:
7523 Int = Intrinsic::aarch64_neon_facge; std::swap(Ops[0], Ops[1]); break;
7524 case NEON::BI__builtin_neon_vcalth_f16:
7525 Int = Intrinsic::aarch64_neon_facgt; std::swap(Ops[0], Ops[1]); break;
7527 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "facg");
7528 return Builder.CreateTrunc(Ops[0], Int16Ty);
7530 case NEON::BI__builtin_neon_vcvth_n_s16_f16:
7531 case NEON::BI__builtin_neon_vcvth_n_u16_f16: {
7533 llvm::Type* InTy = Int32Ty;
7534 llvm::Type* FTy = HalfTy;
7535 llvm::Type *Tys[2] = {InTy, FTy};
7536 Ops.push_back(EmitScalarExpr(E->getArg(1)));
7537 switch (BuiltinID) {
7538 default: llvm_unreachable("missing builtin ID in switch!");
7539 case NEON::BI__builtin_neon_vcvth_n_s16_f16:
7540 Int = Intrinsic::aarch64_neon_vcvtfp2fxs; break;
7541 case NEON::BI__builtin_neon_vcvth_n_u16_f16:
7542 Int = Intrinsic::aarch64_neon_vcvtfp2fxu; break;
7544 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "fcvth_n");
7545 return Builder.CreateTrunc(Ops[0], Int16Ty);
7547 case NEON::BI__builtin_neon_vcvth_n_f16_s16:
7548 case NEON::BI__builtin_neon_vcvth_n_f16_u16: {
7550 llvm::Type* FTy = HalfTy;
7551 llvm::Type* InTy = Int32Ty;
7552 llvm::Type *Tys[2] = {FTy, InTy};
7553 Ops.push_back(EmitScalarExpr(E->getArg(1)));
7554 switch (BuiltinID) {
7555 default: llvm_unreachable("missing builtin ID in switch!");
7556 case NEON::BI__builtin_neon_vcvth_n_f16_s16:
7557 Int = Intrinsic::aarch64_neon_vcvtfxs2fp;
7558 Ops[0] = Builder.CreateSExt(Ops[0], InTy, "sext");
7560 case NEON::BI__builtin_neon_vcvth_n_f16_u16:
7561 Int = Intrinsic::aarch64_neon_vcvtfxu2fp;
7562 Ops[0] = Builder.CreateZExt(Ops[0], InTy);
7565 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "fcvth_n");
7567 case NEON::BI__builtin_neon_vpaddd_s64: {
7568 llvm::Type *Ty = llvm::VectorType::get(Int64Ty, 2);
7569 Value *Vec = EmitScalarExpr(E->getArg(0));
7570 // The vector is v2f64, so make sure it's bitcast to that.
7571 Vec = Builder.CreateBitCast(Vec, Ty, "v2i64");
7572 llvm::Value *Idx0 = llvm::ConstantInt::get(SizeTy, 0);
7573 llvm::Value *Idx1 = llvm::ConstantInt::get(SizeTy, 1);
7574 Value *Op0 = Builder.CreateExtractElement(Vec, Idx0, "lane0");
7575 Value *Op1 = Builder.CreateExtractElement(Vec, Idx1, "lane1");
7576 // Pairwise addition of a v2f64 into a scalar f64.
7577 return Builder.CreateAdd(Op0, Op1, "vpaddd");
7579 case NEON::BI__builtin_neon_vpaddd_f64: {
7581 llvm::VectorType::get(DoubleTy, 2);
7582 Value *Vec = EmitScalarExpr(E->getArg(0));
7583 // The vector is v2f64, so make sure it's bitcast to that.
7584 Vec = Builder.CreateBitCast(Vec, Ty, "v2f64");
7585 llvm::Value *Idx0 = llvm::ConstantInt::get(SizeTy, 0);
7586 llvm::Value *Idx1 = llvm::ConstantInt::get(SizeTy, 1);
7587 Value *Op0 = Builder.CreateExtractElement(Vec, Idx0, "lane0");
7588 Value *Op1 = Builder.CreateExtractElement(Vec, Idx1, "lane1");
7589 // Pairwise addition of a v2f64 into a scalar f64.
7590 return Builder.CreateFAdd(Op0, Op1, "vpaddd");
7592 case NEON::BI__builtin_neon_vpadds_f32: {
7594 llvm::VectorType::get(FloatTy, 2);
7595 Value *Vec = EmitScalarExpr(E->getArg(0));
7596 // The vector is v2f32, so make sure it's bitcast to that.
7597 Vec = Builder.CreateBitCast(Vec, Ty, "v2f32");
7598 llvm::Value *Idx0 = llvm::ConstantInt::get(SizeTy, 0);
7599 llvm::Value *Idx1 = llvm::ConstantInt::get(SizeTy, 1);
7600 Value *Op0 = Builder.CreateExtractElement(Vec, Idx0, "lane0");
7601 Value *Op1 = Builder.CreateExtractElement(Vec, Idx1, "lane1");
7602 // Pairwise addition of a v2f32 into a scalar f32.
7603 return Builder.CreateFAdd(Op0, Op1, "vpaddd");
7605 case NEON::BI__builtin_neon_vceqzd_s64:
7606 case NEON::BI__builtin_neon_vceqzd_f64:
7607 case NEON::BI__builtin_neon_vceqzs_f32:
7608 case NEON::BI__builtin_neon_vceqzh_f16:
7609 Ops.push_back(EmitScalarExpr(E->getArg(0)));
7610 return EmitAArch64CompareBuiltinExpr(
7611 Ops[0], ConvertType(E->getCallReturnType(getContext())),
7612 ICmpInst::FCMP_OEQ, ICmpInst::ICMP_EQ, "vceqz");
7613 case NEON::BI__builtin_neon_vcgezd_s64:
7614 case NEON::BI__builtin_neon_vcgezd_f64:
7615 case NEON::BI__builtin_neon_vcgezs_f32:
7616 case NEON::BI__builtin_neon_vcgezh_f16:
7617 Ops.push_back(EmitScalarExpr(E->getArg(0)));
7618 return EmitAArch64CompareBuiltinExpr(
7619 Ops[0], ConvertType(E->getCallReturnType(getContext())),
7620 ICmpInst::FCMP_OGE, ICmpInst::ICMP_SGE, "vcgez");
7621 case NEON::BI__builtin_neon_vclezd_s64:
7622 case NEON::BI__builtin_neon_vclezd_f64:
7623 case NEON::BI__builtin_neon_vclezs_f32:
7624 case NEON::BI__builtin_neon_vclezh_f16:
7625 Ops.push_back(EmitScalarExpr(E->getArg(0)));
7626 return EmitAArch64CompareBuiltinExpr(
7627 Ops[0], ConvertType(E->getCallReturnType(getContext())),
7628 ICmpInst::FCMP_OLE, ICmpInst::ICMP_SLE, "vclez");
7629 case NEON::BI__builtin_neon_vcgtzd_s64:
7630 case NEON::BI__builtin_neon_vcgtzd_f64:
7631 case NEON::BI__builtin_neon_vcgtzs_f32:
7632 case NEON::BI__builtin_neon_vcgtzh_f16:
7633 Ops.push_back(EmitScalarExpr(E->getArg(0)));
7634 return EmitAArch64CompareBuiltinExpr(
7635 Ops[0], ConvertType(E->getCallReturnType(getContext())),
7636 ICmpInst::FCMP_OGT, ICmpInst::ICMP_SGT, "vcgtz");
7637 case NEON::BI__builtin_neon_vcltzd_s64:
7638 case NEON::BI__builtin_neon_vcltzd_f64:
7639 case NEON::BI__builtin_neon_vcltzs_f32:
7640 case NEON::BI__builtin_neon_vcltzh_f16:
7641 Ops.push_back(EmitScalarExpr(E->getArg(0)));
7642 return EmitAArch64CompareBuiltinExpr(
7643 Ops[0], ConvertType(E->getCallReturnType(getContext())),
7644 ICmpInst::FCMP_OLT, ICmpInst::ICMP_SLT, "vcltz");
7646 case NEON::BI__builtin_neon_vceqzd_u64: {
7647 Ops.push_back(EmitScalarExpr(E->getArg(0)));
7648 Ops[0] = Builder.CreateBitCast(Ops[0], Int64Ty);
7650 Builder.CreateICmpEQ(Ops[0], llvm::Constant::getNullValue(Int64Ty));
7651 return Builder.CreateSExt(Ops[0], Int64Ty, "vceqzd");
7653 case NEON::BI__builtin_neon_vceqd_f64:
7654 case NEON::BI__builtin_neon_vcled_f64:
7655 case NEON::BI__builtin_neon_vcltd_f64:
7656 case NEON::BI__builtin_neon_vcged_f64:
7657 case NEON::BI__builtin_neon_vcgtd_f64: {
7658 llvm::CmpInst::Predicate P;
7659 switch (BuiltinID) {
7660 default: llvm_unreachable("missing builtin ID in switch!");
7661 case NEON::BI__builtin_neon_vceqd_f64: P = llvm::FCmpInst::FCMP_OEQ; break;
7662 case NEON::BI__builtin_neon_vcled_f64: P = llvm::FCmpInst::FCMP_OLE; break;
7663 case NEON::BI__builtin_neon_vcltd_f64: P = llvm::FCmpInst::FCMP_OLT; break;
7664 case NEON::BI__builtin_neon_vcged_f64: P = llvm::FCmpInst::FCMP_OGE; break;
7665 case NEON::BI__builtin_neon_vcgtd_f64: P = llvm::FCmpInst::FCMP_OGT; break;
7667 Ops.push_back(EmitScalarExpr(E->getArg(1)));
7668 Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy);
7669 Ops[1] = Builder.CreateBitCast(Ops[1], DoubleTy);
7670 Ops[0] = Builder.CreateFCmp(P, Ops[0], Ops[1]);
7671 return Builder.CreateSExt(Ops[0], Int64Ty, "vcmpd");
7673 case NEON::BI__builtin_neon_vceqs_f32:
7674 case NEON::BI__builtin_neon_vcles_f32:
7675 case NEON::BI__builtin_neon_vclts_f32:
7676 case NEON::BI__builtin_neon_vcges_f32:
7677 case NEON::BI__builtin_neon_vcgts_f32: {
7678 llvm::CmpInst::Predicate P;
7679 switch (BuiltinID) {
7680 default: llvm_unreachable("missing builtin ID in switch!");
7681 case NEON::BI__builtin_neon_vceqs_f32: P = llvm::FCmpInst::FCMP_OEQ; break;
7682 case NEON::BI__builtin_neon_vcles_f32: P = llvm::FCmpInst::FCMP_OLE; break;
7683 case NEON::BI__builtin_neon_vclts_f32: P = llvm::FCmpInst::FCMP_OLT; break;
7684 case NEON::BI__builtin_neon_vcges_f32: P = llvm::FCmpInst::FCMP_OGE; break;
7685 case NEON::BI__builtin_neon_vcgts_f32: P = llvm::FCmpInst::FCMP_OGT; break;
7687 Ops.push_back(EmitScalarExpr(E->getArg(1)));
7688 Ops[0] = Builder.CreateBitCast(Ops[0], FloatTy);
7689 Ops[1] = Builder.CreateBitCast(Ops[1], FloatTy);
7690 Ops[0] = Builder.CreateFCmp(P, Ops[0], Ops[1]);
7691 return Builder.CreateSExt(Ops[0], Int32Ty, "vcmpd");
7693 case NEON::BI__builtin_neon_vceqh_f16:
7694 case NEON::BI__builtin_neon_vcleh_f16:
7695 case NEON::BI__builtin_neon_vclth_f16:
7696 case NEON::BI__builtin_neon_vcgeh_f16:
7697 case NEON::BI__builtin_neon_vcgth_f16: {
7698 llvm::CmpInst::Predicate P;
7699 switch (BuiltinID) {
7700 default: llvm_unreachable("missing builtin ID in switch!");
7701 case NEON::BI__builtin_neon_vceqh_f16: P = llvm::FCmpInst::FCMP_OEQ; break;
7702 case NEON::BI__builtin_neon_vcleh_f16: P = llvm::FCmpInst::FCMP_OLE; break;
7703 case NEON::BI__builtin_neon_vclth_f16: P = llvm::FCmpInst::FCMP_OLT; break;
7704 case NEON::BI__builtin_neon_vcgeh_f16: P = llvm::FCmpInst::FCMP_OGE; break;
7705 case NEON::BI__builtin_neon_vcgth_f16: P = llvm::FCmpInst::FCMP_OGT; break;
7707 Ops.push_back(EmitScalarExpr(E->getArg(1)));
7708 Ops[0] = Builder.CreateBitCast(Ops[0], HalfTy);
7709 Ops[1] = Builder.CreateBitCast(Ops[1], HalfTy);
7710 Ops[0] = Builder.CreateFCmp(P, Ops[0], Ops[1]);
7711 return Builder.CreateSExt(Ops[0], Int16Ty, "vcmpd");
7713 case NEON::BI__builtin_neon_vceqd_s64:
7714 case NEON::BI__builtin_neon_vceqd_u64:
7715 case NEON::BI__builtin_neon_vcgtd_s64:
7716 case NEON::BI__builtin_neon_vcgtd_u64:
7717 case NEON::BI__builtin_neon_vcltd_s64:
7718 case NEON::BI__builtin_neon_vcltd_u64:
7719 case NEON::BI__builtin_neon_vcged_u64:
7720 case NEON::BI__builtin_neon_vcged_s64:
7721 case NEON::BI__builtin_neon_vcled_u64:
7722 case NEON::BI__builtin_neon_vcled_s64: {
7723 llvm::CmpInst::Predicate P;
7724 switch (BuiltinID) {
7725 default: llvm_unreachable("missing builtin ID in switch!");
7726 case NEON::BI__builtin_neon_vceqd_s64:
7727 case NEON::BI__builtin_neon_vceqd_u64:P = llvm::ICmpInst::ICMP_EQ;break;
7728 case NEON::BI__builtin_neon_vcgtd_s64:P = llvm::ICmpInst::ICMP_SGT;break;
7729 case NEON::BI__builtin_neon_vcgtd_u64:P = llvm::ICmpInst::ICMP_UGT;break;
7730 case NEON::BI__builtin_neon_vcltd_s64:P = llvm::ICmpInst::ICMP_SLT;break;
7731 case NEON::BI__builtin_neon_vcltd_u64:P = llvm::ICmpInst::ICMP_ULT;break;
7732 case NEON::BI__builtin_neon_vcged_u64:P = llvm::ICmpInst::ICMP_UGE;break;
7733 case NEON::BI__builtin_neon_vcged_s64:P = llvm::ICmpInst::ICMP_SGE;break;
7734 case NEON::BI__builtin_neon_vcled_u64:P = llvm::ICmpInst::ICMP_ULE;break;
7735 case NEON::BI__builtin_neon_vcled_s64:P = llvm::ICmpInst::ICMP_SLE;break;
7737 Ops.push_back(EmitScalarExpr(E->getArg(1)));
7738 Ops[0] = Builder.CreateBitCast(Ops[0], Int64Ty);
7739 Ops[1] = Builder.CreateBitCast(Ops[1], Int64Ty);
7740 Ops[0] = Builder.CreateICmp(P, Ops[0], Ops[1]);
7741 return Builder.CreateSExt(Ops[0], Int64Ty, "vceqd");
7743 case NEON::BI__builtin_neon_vtstd_s64:
7744 case NEON::BI__builtin_neon_vtstd_u64: {
7745 Ops.push_back(EmitScalarExpr(E->getArg(1)));
7746 Ops[0] = Builder.CreateBitCast(Ops[0], Int64Ty);
7747 Ops[1] = Builder.CreateBitCast(Ops[1], Int64Ty);
7748 Ops[0] = Builder.CreateAnd(Ops[0], Ops[1]);
7749 Ops[0] = Builder.CreateICmp(ICmpInst::ICMP_NE, Ops[0],
7750 llvm::Constant::getNullValue(Int64Ty));
7751 return Builder.CreateSExt(Ops[0], Int64Ty, "vtstd");
7753 case NEON::BI__builtin_neon_vset_lane_i8:
7754 case NEON::BI__builtin_neon_vset_lane_i16:
7755 case NEON::BI__builtin_neon_vset_lane_i32:
7756 case NEON::BI__builtin_neon_vset_lane_i64:
7757 case NEON::BI__builtin_neon_vset_lane_f32:
7758 case NEON::BI__builtin_neon_vsetq_lane_i8:
7759 case NEON::BI__builtin_neon_vsetq_lane_i16:
7760 case NEON::BI__builtin_neon_vsetq_lane_i32:
7761 case NEON::BI__builtin_neon_vsetq_lane_i64:
7762 case NEON::BI__builtin_neon_vsetq_lane_f32:
7763 Ops.push_back(EmitScalarExpr(E->getArg(2)));
7764 return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vset_lane");
7765 case NEON::BI__builtin_neon_vset_lane_f64:
7766 // The vector type needs a cast for the v1f64 variant.
7767 Ops[1] = Builder.CreateBitCast(Ops[1],
7768 llvm::VectorType::get(DoubleTy, 1));
7769 Ops.push_back(EmitScalarExpr(E->getArg(2)));
7770 return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vset_lane");
7771 case NEON::BI__builtin_neon_vsetq_lane_f64:
7772 // The vector type needs a cast for the v2f64 variant.
7773 Ops[1] = Builder.CreateBitCast(Ops[1],
7774 llvm::VectorType::get(DoubleTy, 2));
7775 Ops.push_back(EmitScalarExpr(E->getArg(2)));
7776 return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vset_lane");
7778 case NEON::BI__builtin_neon_vget_lane_i8:
7779 case NEON::BI__builtin_neon_vdupb_lane_i8:
7780 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::VectorType::get(Int8Ty, 8));
7781 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
7783 case NEON::BI__builtin_neon_vgetq_lane_i8:
7784 case NEON::BI__builtin_neon_vdupb_laneq_i8:
7785 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::VectorType::get(Int8Ty, 16));
7786 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
7788 case NEON::BI__builtin_neon_vget_lane_i16:
7789 case NEON::BI__builtin_neon_vduph_lane_i16:
7790 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::VectorType::get(Int16Ty, 4));
7791 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
7793 case NEON::BI__builtin_neon_vgetq_lane_i16:
7794 case NEON::BI__builtin_neon_vduph_laneq_i16:
7795 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::VectorType::get(Int16Ty, 8));
7796 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
7798 case NEON::BI__builtin_neon_vget_lane_i32:
7799 case NEON::BI__builtin_neon_vdups_lane_i32:
7800 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::VectorType::get(Int32Ty, 2));
7801 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
7803 case NEON::BI__builtin_neon_vdups_lane_f32:
7804 Ops[0] = Builder.CreateBitCast(Ops[0],
7805 llvm::VectorType::get(FloatTy, 2));
7806 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
7808 case NEON::BI__builtin_neon_vgetq_lane_i32:
7809 case NEON::BI__builtin_neon_vdups_laneq_i32:
7810 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::VectorType::get(Int32Ty, 4));
7811 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
7813 case NEON::BI__builtin_neon_vget_lane_i64:
7814 case NEON::BI__builtin_neon_vdupd_lane_i64:
7815 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::VectorType::get(Int64Ty, 1));
7816 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
7818 case NEON::BI__builtin_neon_vdupd_lane_f64:
7819 Ops[0] = Builder.CreateBitCast(Ops[0],
7820 llvm::VectorType::get(DoubleTy, 1));
7821 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
7823 case NEON::BI__builtin_neon_vgetq_lane_i64:
7824 case NEON::BI__builtin_neon_vdupd_laneq_i64:
7825 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::VectorType::get(Int64Ty, 2));
7826 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
7828 case NEON::BI__builtin_neon_vget_lane_f32:
7829 Ops[0] = Builder.CreateBitCast(Ops[0],
7830 llvm::VectorType::get(FloatTy, 2));
7831 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
7833 case NEON::BI__builtin_neon_vget_lane_f64:
7834 Ops[0] = Builder.CreateBitCast(Ops[0],
7835 llvm::VectorType::get(DoubleTy, 1));
7836 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
7838 case NEON::BI__builtin_neon_vgetq_lane_f32:
7839 case NEON::BI__builtin_neon_vdups_laneq_f32:
7840 Ops[0] = Builder.CreateBitCast(Ops[0],
7841 llvm::VectorType::get(FloatTy, 4));
7842 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
7844 case NEON::BI__builtin_neon_vgetq_lane_f64:
7845 case NEON::BI__builtin_neon_vdupd_laneq_f64:
7846 Ops[0] = Builder.CreateBitCast(Ops[0],
7847 llvm::VectorType::get(DoubleTy, 2));
7848 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
7850 case NEON::BI__builtin_neon_vaddh_f16:
7851 Ops.push_back(EmitScalarExpr(E->getArg(1)));
7852 return Builder.CreateFAdd(Ops[0], Ops[1], "vaddh");
7853 case NEON::BI__builtin_neon_vsubh_f16:
7854 Ops.push_back(EmitScalarExpr(E->getArg(1)));
7855 return Builder.CreateFSub(Ops[0], Ops[1], "vsubh");
7856 case NEON::BI__builtin_neon_vmulh_f16:
7857 Ops.push_back(EmitScalarExpr(E->getArg(1)));
7858 return Builder.CreateFMul(Ops[0], Ops[1], "vmulh");
7859 case NEON::BI__builtin_neon_vdivh_f16:
7860 Ops.push_back(EmitScalarExpr(E->getArg(1)));
7861 return Builder.CreateFDiv(Ops[0], Ops[1], "vdivh");
7862 case NEON::BI__builtin_neon_vfmah_f16: {
7863 Function *F = CGM.getIntrinsic(Intrinsic::fma, HalfTy);
7864 // NEON intrinsic puts accumulator first, unlike the LLVM fma.
7865 return Builder.CreateCall(F,
7866 {EmitScalarExpr(E->getArg(1)), EmitScalarExpr(E->getArg(2)), Ops[0]});
7868 case NEON::BI__builtin_neon_vfmsh_f16: {
7869 Function *F = CGM.getIntrinsic(Intrinsic::fma, HalfTy);
7870 Value *Zero = llvm::ConstantFP::getZeroValueForNegation(HalfTy);
7871 Value* Sub = Builder.CreateFSub(Zero, EmitScalarExpr(E->getArg(1)), "vsubh");
7872 // NEON intrinsic puts accumulator first, unlike the LLVM fma.
7873 return Builder.CreateCall(F, {Sub, EmitScalarExpr(E->getArg(2)), Ops[0]});
7875 case NEON::BI__builtin_neon_vaddd_s64:
7876 case NEON::BI__builtin_neon_vaddd_u64:
7877 return Builder.CreateAdd(Ops[0], EmitScalarExpr(E->getArg(1)), "vaddd");
7878 case NEON::BI__builtin_neon_vsubd_s64:
7879 case NEON::BI__builtin_neon_vsubd_u64:
7880 return Builder.CreateSub(Ops[0], EmitScalarExpr(E->getArg(1)), "vsubd");
7881 case NEON::BI__builtin_neon_vqdmlalh_s16:
7882 case NEON::BI__builtin_neon_vqdmlslh_s16: {
7883 SmallVector<Value *, 2> ProductOps;
7884 ProductOps.push_back(vectorWrapScalar16(Ops[1]));
7885 ProductOps.push_back(vectorWrapScalar16(EmitScalarExpr(E->getArg(2))));
7886 llvm::Type *VTy = llvm::VectorType::get(Int32Ty, 4);
7887 Ops[1] = EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqdmull, VTy),
7888 ProductOps, "vqdmlXl");
7889 Constant *CI = ConstantInt::get(SizeTy, 0);
7890 Ops[1] = Builder.CreateExtractElement(Ops[1], CI, "lane0");
7892 unsigned AccumInt = BuiltinID == NEON::BI__builtin_neon_vqdmlalh_s16
7893 ? Intrinsic::aarch64_neon_sqadd
7894 : Intrinsic::aarch64_neon_sqsub;
7895 return EmitNeonCall(CGM.getIntrinsic(AccumInt, Int32Ty), Ops, "vqdmlXl");
7897 case NEON::BI__builtin_neon_vqshlud_n_s64: {
7898 Ops.push_back(EmitScalarExpr(E->getArg(1)));
7899 Ops[1] = Builder.CreateZExt(Ops[1], Int64Ty);
7900 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqshlu, Int64Ty),
7903 case NEON::BI__builtin_neon_vqshld_n_u64:
7904 case NEON::BI__builtin_neon_vqshld_n_s64: {
7905 unsigned Int = BuiltinID == NEON::BI__builtin_neon_vqshld_n_u64
7906 ? Intrinsic::aarch64_neon_uqshl
7907 : Intrinsic::aarch64_neon_sqshl;
7908 Ops.push_back(EmitScalarExpr(E->getArg(1)));
7909 Ops[1] = Builder.CreateZExt(Ops[1], Int64Ty);
7910 return EmitNeonCall(CGM.getIntrinsic(Int, Int64Ty), Ops, "vqshl_n");
7912 case NEON::BI__builtin_neon_vrshrd_n_u64:
7913 case NEON::BI__builtin_neon_vrshrd_n_s64: {
7914 unsigned Int = BuiltinID == NEON::BI__builtin_neon_vrshrd_n_u64
7915 ? Intrinsic::aarch64_neon_urshl
7916 : Intrinsic::aarch64_neon_srshl;
7917 Ops.push_back(EmitScalarExpr(E->getArg(1)));
7918 int SV = cast<ConstantInt>(Ops[1])->getSExtValue();
7919 Ops[1] = ConstantInt::get(Int64Ty, -SV);
7920 return EmitNeonCall(CGM.getIntrinsic(Int, Int64Ty), Ops, "vrshr_n");
7922 case NEON::BI__builtin_neon_vrsrad_n_u64:
7923 case NEON::BI__builtin_neon_vrsrad_n_s64: {
7924 unsigned Int = BuiltinID == NEON::BI__builtin_neon_vrsrad_n_u64
7925 ? Intrinsic::aarch64_neon_urshl
7926 : Intrinsic::aarch64_neon_srshl;
7927 Ops[1] = Builder.CreateBitCast(Ops[1], Int64Ty);
7928 Ops.push_back(Builder.CreateNeg(EmitScalarExpr(E->getArg(2))));
7929 Ops[1] = Builder.CreateCall(CGM.getIntrinsic(Int, Int64Ty),
7930 {Ops[1], Builder.CreateSExt(Ops[2], Int64Ty)});
7931 return Builder.CreateAdd(Ops[0], Builder.CreateBitCast(Ops[1], Int64Ty));
7933 case NEON::BI__builtin_neon_vshld_n_s64:
7934 case NEON::BI__builtin_neon_vshld_n_u64: {
7935 llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(1)));
7936 return Builder.CreateShl(
7937 Ops[0], ConstantInt::get(Int64Ty, Amt->getZExtValue()), "shld_n");
7939 case NEON::BI__builtin_neon_vshrd_n_s64: {
7940 llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(1)));
7941 return Builder.CreateAShr(
7942 Ops[0], ConstantInt::get(Int64Ty, std::min(static_cast<uint64_t>(63),
7943 Amt->getZExtValue())),
7946 case NEON::BI__builtin_neon_vshrd_n_u64: {
7947 llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(1)));
7948 uint64_t ShiftAmt = Amt->getZExtValue();
7949 // Right-shifting an unsigned value by its size yields 0.
7951 return ConstantInt::get(Int64Ty, 0);
7952 return Builder.CreateLShr(Ops[0], ConstantInt::get(Int64Ty, ShiftAmt),
7955 case NEON::BI__builtin_neon_vsrad_n_s64: {
7956 llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(2)));
7957 Ops[1] = Builder.CreateAShr(
7958 Ops[1], ConstantInt::get(Int64Ty, std::min(static_cast<uint64_t>(63),
7959 Amt->getZExtValue())),
7961 return Builder.CreateAdd(Ops[0], Ops[1]);
7963 case NEON::BI__builtin_neon_vsrad_n_u64: {
7964 llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(2)));
7965 uint64_t ShiftAmt = Amt->getZExtValue();
7966 // Right-shifting an unsigned value by its size yields 0.
7967 // As Op + 0 = Op, return Ops[0] directly.
7970 Ops[1] = Builder.CreateLShr(Ops[1], ConstantInt::get(Int64Ty, ShiftAmt),
7972 return Builder.CreateAdd(Ops[0], Ops[1]);
7974 case NEON::BI__builtin_neon_vqdmlalh_lane_s16:
7975 case NEON::BI__builtin_neon_vqdmlalh_laneq_s16:
7976 case NEON::BI__builtin_neon_vqdmlslh_lane_s16:
7977 case NEON::BI__builtin_neon_vqdmlslh_laneq_s16: {
7978 Ops[2] = Builder.CreateExtractElement(Ops[2], EmitScalarExpr(E->getArg(3)),
7980 SmallVector<Value *, 2> ProductOps;
7981 ProductOps.push_back(vectorWrapScalar16(Ops[1]));
7982 ProductOps.push_back(vectorWrapScalar16(Ops[2]));
7983 llvm::Type *VTy = llvm::VectorType::get(Int32Ty, 4);
7984 Ops[1] = EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqdmull, VTy),
7985 ProductOps, "vqdmlXl");
7986 Constant *CI = ConstantInt::get(SizeTy, 0);
7987 Ops[1] = Builder.CreateExtractElement(Ops[1], CI, "lane0");
7990 unsigned AccInt = (BuiltinID == NEON::BI__builtin_neon_vqdmlalh_lane_s16 ||
7991 BuiltinID == NEON::BI__builtin_neon_vqdmlalh_laneq_s16)
7992 ? Intrinsic::aarch64_neon_sqadd
7993 : Intrinsic::aarch64_neon_sqsub;
7994 return EmitNeonCall(CGM.getIntrinsic(AccInt, Int32Ty), Ops, "vqdmlXl");
7996 case NEON::BI__builtin_neon_vqdmlals_s32:
7997 case NEON::BI__builtin_neon_vqdmlsls_s32: {
7998 SmallVector<Value *, 2> ProductOps;
7999 ProductOps.push_back(Ops[1]);
8000 ProductOps.push_back(EmitScalarExpr(E->getArg(2)));
8002 EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqdmulls_scalar),
8003 ProductOps, "vqdmlXl");
8005 unsigned AccumInt = BuiltinID == NEON::BI__builtin_neon_vqdmlals_s32
8006 ? Intrinsic::aarch64_neon_sqadd
8007 : Intrinsic::aarch64_neon_sqsub;
8008 return EmitNeonCall(CGM.getIntrinsic(AccumInt, Int64Ty), Ops, "vqdmlXl");
8010 case NEON::BI__builtin_neon_vqdmlals_lane_s32:
8011 case NEON::BI__builtin_neon_vqdmlals_laneq_s32:
8012 case NEON::BI__builtin_neon_vqdmlsls_lane_s32:
8013 case NEON::BI__builtin_neon_vqdmlsls_laneq_s32: {
8014 Ops[2] = Builder.CreateExtractElement(Ops[2], EmitScalarExpr(E->getArg(3)),
8016 SmallVector<Value *, 2> ProductOps;
8017 ProductOps.push_back(Ops[1]);
8018 ProductOps.push_back(Ops[2]);
8020 EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqdmulls_scalar),
8021 ProductOps, "vqdmlXl");
8024 unsigned AccInt = (BuiltinID == NEON::BI__builtin_neon_vqdmlals_lane_s32 ||
8025 BuiltinID == NEON::BI__builtin_neon_vqdmlals_laneq_s32)
8026 ? Intrinsic::aarch64_neon_sqadd
8027 : Intrinsic::aarch64_neon_sqsub;
8028 return EmitNeonCall(CGM.getIntrinsic(AccInt, Int64Ty), Ops, "vqdmlXl");
8030 case NEON::BI__builtin_neon_vduph_lane_f16: {
8031 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
8034 case NEON::BI__builtin_neon_vduph_laneq_f16: {
8035 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
8038 case AArch64::BI_BitScanForward:
8039 case AArch64::BI_BitScanForward64:
8040 return EmitMSVCBuiltinExpr(MSVCIntrin::_BitScanForward, E);
8041 case AArch64::BI_BitScanReverse:
8042 case AArch64::BI_BitScanReverse64:
8043 return EmitMSVCBuiltinExpr(MSVCIntrin::_BitScanReverse, E);
8044 case AArch64::BI_InterlockedAnd64:
8045 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedAnd, E);
8046 case AArch64::BI_InterlockedExchange64:
8047 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange, E);
8048 case AArch64::BI_InterlockedExchangeAdd64:
8049 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeAdd, E);
8050 case AArch64::BI_InterlockedExchangeSub64:
8051 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeSub, E);
8052 case AArch64::BI_InterlockedOr64:
8053 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr, E);
8054 case AArch64::BI_InterlockedXor64:
8055 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor, E);
8056 case AArch64::BI_InterlockedDecrement64:
8057 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedDecrement, E);
8058 case AArch64::BI_InterlockedIncrement64:
8059 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement, E);
8060 case AArch64::BI_InterlockedExchangeAdd8_acq:
8061 case AArch64::BI_InterlockedExchangeAdd16_acq:
8062 case AArch64::BI_InterlockedExchangeAdd_acq:
8063 case AArch64::BI_InterlockedExchangeAdd64_acq:
8064 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeAdd_acq, E);
8065 case AArch64::BI_InterlockedExchangeAdd8_rel:
8066 case AArch64::BI_InterlockedExchangeAdd16_rel:
8067 case AArch64::BI_InterlockedExchangeAdd_rel:
8068 case AArch64::BI_InterlockedExchangeAdd64_rel:
8069 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeAdd_rel, E);
8070 case AArch64::BI_InterlockedExchangeAdd8_nf:
8071 case AArch64::BI_InterlockedExchangeAdd16_nf:
8072 case AArch64::BI_InterlockedExchangeAdd_nf:
8073 case AArch64::BI_InterlockedExchangeAdd64_nf:
8074 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeAdd_nf, E);
8075 case AArch64::BI_InterlockedExchange8_acq:
8076 case AArch64::BI_InterlockedExchange16_acq:
8077 case AArch64::BI_InterlockedExchange_acq:
8078 case AArch64::BI_InterlockedExchange64_acq:
8079 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange_acq, E);
8080 case AArch64::BI_InterlockedExchange8_rel:
8081 case AArch64::BI_InterlockedExchange16_rel:
8082 case AArch64::BI_InterlockedExchange_rel:
8083 case AArch64::BI_InterlockedExchange64_rel:
8084 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange_rel, E);
8085 case AArch64::BI_InterlockedExchange8_nf:
8086 case AArch64::BI_InterlockedExchange16_nf:
8087 case AArch64::BI_InterlockedExchange_nf:
8088 case AArch64::BI_InterlockedExchange64_nf:
8089 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange_nf, E);
8090 case AArch64::BI_InterlockedCompareExchange8_acq:
8091 case AArch64::BI_InterlockedCompareExchange16_acq:
8092 case AArch64::BI_InterlockedCompareExchange_acq:
8093 case AArch64::BI_InterlockedCompareExchange64_acq:
8094 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedCompareExchange_acq, E);
8095 case AArch64::BI_InterlockedCompareExchange8_rel:
8096 case AArch64::BI_InterlockedCompareExchange16_rel:
8097 case AArch64::BI_InterlockedCompareExchange_rel:
8098 case AArch64::BI_InterlockedCompareExchange64_rel:
8099 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedCompareExchange_rel, E);
8100 case AArch64::BI_InterlockedCompareExchange8_nf:
8101 case AArch64::BI_InterlockedCompareExchange16_nf:
8102 case AArch64::BI_InterlockedCompareExchange_nf:
8103 case AArch64::BI_InterlockedCompareExchange64_nf:
8104 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedCompareExchange_nf, E);
8105 case AArch64::BI_InterlockedOr8_acq:
8106 case AArch64::BI_InterlockedOr16_acq:
8107 case AArch64::BI_InterlockedOr_acq:
8108 case AArch64::BI_InterlockedOr64_acq:
8109 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr_acq, E);
8110 case AArch64::BI_InterlockedOr8_rel:
8111 case AArch64::BI_InterlockedOr16_rel:
8112 case AArch64::BI_InterlockedOr_rel:
8113 case AArch64::BI_InterlockedOr64_rel:
8114 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr_rel, E);
8115 case AArch64::BI_InterlockedOr8_nf:
8116 case AArch64::BI_InterlockedOr16_nf:
8117 case AArch64::BI_InterlockedOr_nf:
8118 case AArch64::BI_InterlockedOr64_nf:
8119 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr_nf, E);
8120 case AArch64::BI_InterlockedXor8_acq:
8121 case AArch64::BI_InterlockedXor16_acq:
8122 case AArch64::BI_InterlockedXor_acq:
8123 case AArch64::BI_InterlockedXor64_acq:
8124 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor_acq, E);
8125 case AArch64::BI_InterlockedXor8_rel:
8126 case AArch64::BI_InterlockedXor16_rel:
8127 case AArch64::BI_InterlockedXor_rel:
8128 case AArch64::BI_InterlockedXor64_rel:
8129 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor_rel, E);
8130 case AArch64::BI_InterlockedXor8_nf:
8131 case AArch64::BI_InterlockedXor16_nf:
8132 case AArch64::BI_InterlockedXor_nf:
8133 case AArch64::BI_InterlockedXor64_nf:
8134 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor_nf, E);
8135 case AArch64::BI_InterlockedAnd8_acq:
8136 case AArch64::BI_InterlockedAnd16_acq:
8137 case AArch64::BI_InterlockedAnd_acq:
8138 case AArch64::BI_InterlockedAnd64_acq:
8139 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedAnd_acq, E);
8140 case AArch64::BI_InterlockedAnd8_rel:
8141 case AArch64::BI_InterlockedAnd16_rel:
8142 case AArch64::BI_InterlockedAnd_rel:
8143 case AArch64::BI_InterlockedAnd64_rel:
8144 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedAnd_rel, E);
8145 case AArch64::BI_InterlockedAnd8_nf:
8146 case AArch64::BI_InterlockedAnd16_nf:
8147 case AArch64::BI_InterlockedAnd_nf:
8148 case AArch64::BI_InterlockedAnd64_nf:
8149 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedAnd_nf, E);
8150 case AArch64::BI_InterlockedIncrement16_acq:
8151 case AArch64::BI_InterlockedIncrement_acq:
8152 case AArch64::BI_InterlockedIncrement64_acq:
8153 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement_acq, E);
8154 case AArch64::BI_InterlockedIncrement16_rel:
8155 case AArch64::BI_InterlockedIncrement_rel:
8156 case AArch64::BI_InterlockedIncrement64_rel:
8157 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement_rel, E);
8158 case AArch64::BI_InterlockedIncrement16_nf:
8159 case AArch64::BI_InterlockedIncrement_nf:
8160 case AArch64::BI_InterlockedIncrement64_nf:
8161 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement_nf, E);
8162 case AArch64::BI_InterlockedDecrement16_acq:
8163 case AArch64::BI_InterlockedDecrement_acq:
8164 case AArch64::BI_InterlockedDecrement64_acq:
8165 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedDecrement_acq, E);
8166 case AArch64::BI_InterlockedDecrement16_rel:
8167 case AArch64::BI_InterlockedDecrement_rel:
8168 case AArch64::BI_InterlockedDecrement64_rel:
8169 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedDecrement_rel, E);
8170 case AArch64::BI_InterlockedDecrement16_nf:
8171 case AArch64::BI_InterlockedDecrement_nf:
8172 case AArch64::BI_InterlockedDecrement64_nf:
8173 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedDecrement_nf, E);
8175 case AArch64::BI_InterlockedAdd: {
8176 Value *Arg0 = EmitScalarExpr(E->getArg(0));
8177 Value *Arg1 = EmitScalarExpr(E->getArg(1));
8178 AtomicRMWInst *RMWI = Builder.CreateAtomicRMW(
8179 AtomicRMWInst::Add, Arg0, Arg1,
8180 llvm::AtomicOrdering::SequentiallyConsistent);
8181 return Builder.CreateAdd(RMWI, Arg1);
8185 llvm::VectorType *VTy = GetNeonType(this, Type);
8186 llvm::Type *Ty = VTy;
8190 // Not all intrinsics handled by the common case work for AArch64 yet, so only
8191 // defer to common code if it's been added to our special map.
8192 Builtin = findNeonIntrinsicInMap(AArch64SIMDIntrinsicMap, BuiltinID,
8193 AArch64SIMDIntrinsicsProvenSorted);
8196 return EmitCommonNeonBuiltinExpr(
8197 Builtin->BuiltinID, Builtin->LLVMIntrinsic, Builtin->AltLLVMIntrinsic,
8198 Builtin->NameHint, Builtin->TypeModifier, E, Ops,
8199 /*never use addresses*/ Address::invalid(), Address::invalid(), Arch);
8201 if (Value *V = EmitAArch64TblBuiltinExpr(*this, BuiltinID, E, Ops, Arch))
8205 switch (BuiltinID) {
8206 default: return nullptr;
8207 case NEON::BI__builtin_neon_vbsl_v:
8208 case NEON::BI__builtin_neon_vbslq_v: {
8209 llvm::Type *BitTy = llvm::VectorType::getInteger(VTy);
8210 Ops[0] = Builder.CreateBitCast(Ops[0], BitTy, "vbsl");
8211 Ops[1] = Builder.CreateBitCast(Ops[1], BitTy, "vbsl");
8212 Ops[2] = Builder.CreateBitCast(Ops[2], BitTy, "vbsl");
8214 Ops[1] = Builder.CreateAnd(Ops[0], Ops[1], "vbsl");
8215 Ops[2] = Builder.CreateAnd(Builder.CreateNot(Ops[0]), Ops[2], "vbsl");
8216 Ops[0] = Builder.CreateOr(Ops[1], Ops[2], "vbsl");
8217 return Builder.CreateBitCast(Ops[0], Ty);
8219 case NEON::BI__builtin_neon_vfma_lane_v:
8220 case NEON::BI__builtin_neon_vfmaq_lane_v: { // Only used for FP types
8221 // The ARM builtins (and instructions) have the addend as the first
8222 // operand, but the 'fma' intrinsics have it last. Swap it around here.
8223 Value *Addend = Ops[0];
8224 Value *Multiplicand = Ops[1];
8225 Value *LaneSource = Ops[2];
8226 Ops[0] = Multiplicand;
8227 Ops[1] = LaneSource;
8230 // Now adjust things to handle the lane access.
8231 llvm::Type *SourceTy = BuiltinID == NEON::BI__builtin_neon_vfmaq_lane_v ?
8232 llvm::VectorType::get(VTy->getElementType(), VTy->getNumElements() / 2) :
8234 llvm::Constant *cst = cast<Constant>(Ops[3]);
8235 Value *SV = llvm::ConstantVector::getSplat(VTy->getNumElements(), cst);
8236 Ops[1] = Builder.CreateBitCast(Ops[1], SourceTy);
8237 Ops[1] = Builder.CreateShuffleVector(Ops[1], Ops[1], SV, "lane");
8240 Int = Intrinsic::fma;
8241 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "fmla");
8243 case NEON::BI__builtin_neon_vfma_laneq_v: {
8244 llvm::VectorType *VTy = cast<llvm::VectorType>(Ty);
8245 // v1f64 fma should be mapped to Neon scalar f64 fma
8246 if (VTy && VTy->getElementType() == DoubleTy) {
8247 Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy);
8248 Ops[1] = Builder.CreateBitCast(Ops[1], DoubleTy);
8249 llvm::Type *VTy = GetNeonType(this,
8250 NeonTypeFlags(NeonTypeFlags::Float64, false, true));
8251 Ops[2] = Builder.CreateBitCast(Ops[2], VTy);
8252 Ops[2] = Builder.CreateExtractElement(Ops[2], Ops[3], "extract");
8253 Function *F = CGM.getIntrinsic(Intrinsic::fma, DoubleTy);
8254 Value *Result = Builder.CreateCall(F, {Ops[1], Ops[2], Ops[0]});
8255 return Builder.CreateBitCast(Result, Ty);
8257 Function *F = CGM.getIntrinsic(Intrinsic::fma, Ty);
8258 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
8259 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
8261 llvm::Type *STy = llvm::VectorType::get(VTy->getElementType(),
8262 VTy->getNumElements() * 2);
8263 Ops[2] = Builder.CreateBitCast(Ops[2], STy);
8264 Value* SV = llvm::ConstantVector::getSplat(VTy->getNumElements(),
8265 cast<ConstantInt>(Ops[3]));
8266 Ops[2] = Builder.CreateShuffleVector(Ops[2], Ops[2], SV, "lane");
8268 return Builder.CreateCall(F, {Ops[2], Ops[1], Ops[0]});
8270 case NEON::BI__builtin_neon_vfmaq_laneq_v: {
8271 Function *F = CGM.getIntrinsic(Intrinsic::fma, Ty);
8272 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
8273 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
8275 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
8276 Ops[2] = EmitNeonSplat(Ops[2], cast<ConstantInt>(Ops[3]));
8277 return Builder.CreateCall(F, {Ops[2], Ops[1], Ops[0]});
8279 case NEON::BI__builtin_neon_vfmah_lane_f16:
8280 case NEON::BI__builtin_neon_vfmas_lane_f32:
8281 case NEON::BI__builtin_neon_vfmah_laneq_f16:
8282 case NEON::BI__builtin_neon_vfmas_laneq_f32:
8283 case NEON::BI__builtin_neon_vfmad_lane_f64:
8284 case NEON::BI__builtin_neon_vfmad_laneq_f64: {
8285 Ops.push_back(EmitScalarExpr(E->getArg(3)));
8286 llvm::Type *Ty = ConvertType(E->getCallReturnType(getContext()));
8287 Function *F = CGM.getIntrinsic(Intrinsic::fma, Ty);
8288 Ops[2] = Builder.CreateExtractElement(Ops[2], Ops[3], "extract");
8289 return Builder.CreateCall(F, {Ops[1], Ops[2], Ops[0]});
8291 case NEON::BI__builtin_neon_vmull_v:
8292 // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
8293 Int = usgn ? Intrinsic::aarch64_neon_umull : Intrinsic::aarch64_neon_smull;
8294 if (Type.isPoly()) Int = Intrinsic::aarch64_neon_pmull;
8295 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmull");
8296 case NEON::BI__builtin_neon_vmax_v:
8297 case NEON::BI__builtin_neon_vmaxq_v:
8298 // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
8299 Int = usgn ? Intrinsic::aarch64_neon_umax : Intrinsic::aarch64_neon_smax;
8300 if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fmax;
8301 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmax");
8302 case NEON::BI__builtin_neon_vmaxh_f16: {
8303 Ops.push_back(EmitScalarExpr(E->getArg(1)));
8304 Int = Intrinsic::aarch64_neon_fmax;
8305 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vmax");
8307 case NEON::BI__builtin_neon_vmin_v:
8308 case NEON::BI__builtin_neon_vminq_v:
8309 // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
8310 Int = usgn ? Intrinsic::aarch64_neon_umin : Intrinsic::aarch64_neon_smin;
8311 if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fmin;
8312 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmin");
8313 case NEON::BI__builtin_neon_vminh_f16: {
8314 Ops.push_back(EmitScalarExpr(E->getArg(1)));
8315 Int = Intrinsic::aarch64_neon_fmin;
8316 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vmin");
8318 case NEON::BI__builtin_neon_vabd_v:
8319 case NEON::BI__builtin_neon_vabdq_v:
8320 // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
8321 Int = usgn ? Intrinsic::aarch64_neon_uabd : Intrinsic::aarch64_neon_sabd;
8322 if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fabd;
8323 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vabd");
8324 case NEON::BI__builtin_neon_vpadal_v:
8325 case NEON::BI__builtin_neon_vpadalq_v: {
8326 unsigned ArgElts = VTy->getNumElements();
8327 llvm::IntegerType *EltTy = cast<IntegerType>(VTy->getElementType());
8328 unsigned BitWidth = EltTy->getBitWidth();
8329 llvm::Type *ArgTy = llvm::VectorType::get(
8330 llvm::IntegerType::get(getLLVMContext(), BitWidth/2), 2*ArgElts);
8331 llvm::Type* Tys[2] = { VTy, ArgTy };
8332 Int = usgn ? Intrinsic::aarch64_neon_uaddlp : Intrinsic::aarch64_neon_saddlp;
8333 SmallVector<llvm::Value*, 1> TmpOps;
8334 TmpOps.push_back(Ops[1]);
8335 Function *F = CGM.getIntrinsic(Int, Tys);
8336 llvm::Value *tmp = EmitNeonCall(F, TmpOps, "vpadal");
8337 llvm::Value *addend = Builder.CreateBitCast(Ops[0], tmp->getType());
8338 return Builder.CreateAdd(tmp, addend);
8340 case NEON::BI__builtin_neon_vpmin_v:
8341 case NEON::BI__builtin_neon_vpminq_v:
8342 // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
8343 Int = usgn ? Intrinsic::aarch64_neon_uminp : Intrinsic::aarch64_neon_sminp;
8344 if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fminp;
8345 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpmin");
8346 case NEON::BI__builtin_neon_vpmax_v:
8347 case NEON::BI__builtin_neon_vpmaxq_v:
8348 // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
8349 Int = usgn ? Intrinsic::aarch64_neon_umaxp : Intrinsic::aarch64_neon_smaxp;
8350 if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fmaxp;
8351 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpmax");
8352 case NEON::BI__builtin_neon_vminnm_v:
8353 case NEON::BI__builtin_neon_vminnmq_v:
8354 Int = Intrinsic::aarch64_neon_fminnm;
8355 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vminnm");
8356 case NEON::BI__builtin_neon_vminnmh_f16:
8357 Ops.push_back(EmitScalarExpr(E->getArg(1)));
8358 Int = Intrinsic::aarch64_neon_fminnm;
8359 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vminnm");
8360 case NEON::BI__builtin_neon_vmaxnm_v:
8361 case NEON::BI__builtin_neon_vmaxnmq_v:
8362 Int = Intrinsic::aarch64_neon_fmaxnm;
8363 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmaxnm");
8364 case NEON::BI__builtin_neon_vmaxnmh_f16:
8365 Ops.push_back(EmitScalarExpr(E->getArg(1)));
8366 Int = Intrinsic::aarch64_neon_fmaxnm;
8367 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vmaxnm");
8368 case NEON::BI__builtin_neon_vrecpss_f32: {
8369 Ops.push_back(EmitScalarExpr(E->getArg(1)));
8370 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_frecps, FloatTy),
8373 case NEON::BI__builtin_neon_vrecpsd_f64:
8374 Ops.push_back(EmitScalarExpr(E->getArg(1)));
8375 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_frecps, DoubleTy),
8377 case NEON::BI__builtin_neon_vrecpsh_f16:
8378 Ops.push_back(EmitScalarExpr(E->getArg(1)));
8379 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_frecps, HalfTy),
8381 case NEON::BI__builtin_neon_vqshrun_n_v:
8382 Int = Intrinsic::aarch64_neon_sqshrun;
8383 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshrun_n");
8384 case NEON::BI__builtin_neon_vqrshrun_n_v:
8385 Int = Intrinsic::aarch64_neon_sqrshrun;
8386 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshrun_n");
8387 case NEON::BI__builtin_neon_vqshrn_n_v:
8388 Int = usgn ? Intrinsic::aarch64_neon_uqshrn : Intrinsic::aarch64_neon_sqshrn;
8389 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshrn_n");
8390 case NEON::BI__builtin_neon_vrshrn_n_v:
8391 Int = Intrinsic::aarch64_neon_rshrn;
8392 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrshrn_n");
8393 case NEON::BI__builtin_neon_vqrshrn_n_v:
8394 Int = usgn ? Intrinsic::aarch64_neon_uqrshrn : Intrinsic::aarch64_neon_sqrshrn;
8395 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshrn_n");
8396 case NEON::BI__builtin_neon_vrndah_f16: {
8397 Ops.push_back(EmitScalarExpr(E->getArg(0)));
8398 Int = Intrinsic::round;
8399 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrnda");
8401 case NEON::BI__builtin_neon_vrnda_v:
8402 case NEON::BI__builtin_neon_vrndaq_v: {
8403 Int = Intrinsic::round;
8404 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrnda");
8406 case NEON::BI__builtin_neon_vrndih_f16: {
8407 Ops.push_back(EmitScalarExpr(E->getArg(0)));
8408 Int = Intrinsic::nearbyint;
8409 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndi");
8411 case NEON::BI__builtin_neon_vrndmh_f16: {
8412 Ops.push_back(EmitScalarExpr(E->getArg(0)));
8413 Int = Intrinsic::floor;
8414 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndm");
8416 case NEON::BI__builtin_neon_vrndm_v:
8417 case NEON::BI__builtin_neon_vrndmq_v: {
8418 Int = Intrinsic::floor;
8419 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndm");
8421 case NEON::BI__builtin_neon_vrndnh_f16: {
8422 Ops.push_back(EmitScalarExpr(E->getArg(0)));
8423 Int = Intrinsic::aarch64_neon_frintn;
8424 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndn");
8426 case NEON::BI__builtin_neon_vrndn_v:
8427 case NEON::BI__builtin_neon_vrndnq_v: {
8428 Int = Intrinsic::aarch64_neon_frintn;
8429 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndn");
8431 case NEON::BI__builtin_neon_vrndns_f32: {
8432 Ops.push_back(EmitScalarExpr(E->getArg(0)));
8433 Int = Intrinsic::aarch64_neon_frintn;
8434 return EmitNeonCall(CGM.getIntrinsic(Int, FloatTy), Ops, "vrndn");
8436 case NEON::BI__builtin_neon_vrndph_f16: {
8437 Ops.push_back(EmitScalarExpr(E->getArg(0)));
8438 Int = Intrinsic::ceil;
8439 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndp");
8441 case NEON::BI__builtin_neon_vrndp_v:
8442 case NEON::BI__builtin_neon_vrndpq_v: {
8443 Int = Intrinsic::ceil;
8444 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndp");
8446 case NEON::BI__builtin_neon_vrndxh_f16: {
8447 Ops.push_back(EmitScalarExpr(E->getArg(0)));
8448 Int = Intrinsic::rint;
8449 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndx");
8451 case NEON::BI__builtin_neon_vrndx_v:
8452 case NEON::BI__builtin_neon_vrndxq_v: {
8453 Int = Intrinsic::rint;
8454 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndx");
8456 case NEON::BI__builtin_neon_vrndh_f16: {
8457 Ops.push_back(EmitScalarExpr(E->getArg(0)));
8458 Int = Intrinsic::trunc;
8459 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndz");
8461 case NEON::BI__builtin_neon_vrnd_v:
8462 case NEON::BI__builtin_neon_vrndq_v: {
8463 Int = Intrinsic::trunc;
8464 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndz");
8466 case NEON::BI__builtin_neon_vcvt_f64_v:
8467 case NEON::BI__builtin_neon_vcvtq_f64_v:
8468 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
8469 Ty = GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float64, false, quad));
8470 return usgn ? Builder.CreateUIToFP(Ops[0], Ty, "vcvt")
8471 : Builder.CreateSIToFP(Ops[0], Ty, "vcvt");
8472 case NEON::BI__builtin_neon_vcvt_f64_f32: {
8473 assert(Type.getEltType() == NeonTypeFlags::Float64 && quad &&
8474 "unexpected vcvt_f64_f32 builtin");
8475 NeonTypeFlags SrcFlag = NeonTypeFlags(NeonTypeFlags::Float32, false, false);
8476 Ops[0] = Builder.CreateBitCast(Ops[0], GetNeonType(this, SrcFlag));
8478 return Builder.CreateFPExt(Ops[0], Ty, "vcvt");
8480 case NEON::BI__builtin_neon_vcvt_f32_f64: {
8481 assert(Type.getEltType() == NeonTypeFlags::Float32 &&
8482 "unexpected vcvt_f32_f64 builtin");
8483 NeonTypeFlags SrcFlag = NeonTypeFlags(NeonTypeFlags::Float64, false, true);
8484 Ops[0] = Builder.CreateBitCast(Ops[0], GetNeonType(this, SrcFlag));
8486 return Builder.CreateFPTrunc(Ops[0], Ty, "vcvt");
8488 case NEON::BI__builtin_neon_vcvt_s32_v:
8489 case NEON::BI__builtin_neon_vcvt_u32_v:
8490 case NEON::BI__builtin_neon_vcvt_s64_v:
8491 case NEON::BI__builtin_neon_vcvt_u64_v:
8492 case NEON::BI__builtin_neon_vcvt_s16_v:
8493 case NEON::BI__builtin_neon_vcvt_u16_v:
8494 case NEON::BI__builtin_neon_vcvtq_s32_v:
8495 case NEON::BI__builtin_neon_vcvtq_u32_v:
8496 case NEON::BI__builtin_neon_vcvtq_s64_v:
8497 case NEON::BI__builtin_neon_vcvtq_u64_v:
8498 case NEON::BI__builtin_neon_vcvtq_s16_v:
8499 case NEON::BI__builtin_neon_vcvtq_u16_v: {
8500 Ops[0] = Builder.CreateBitCast(Ops[0], GetFloatNeonType(this, Type));
8502 return Builder.CreateFPToUI(Ops[0], Ty);
8503 return Builder.CreateFPToSI(Ops[0], Ty);
8505 case NEON::BI__builtin_neon_vcvta_s16_v:
8506 case NEON::BI__builtin_neon_vcvta_u16_v:
8507 case NEON::BI__builtin_neon_vcvta_s32_v:
8508 case NEON::BI__builtin_neon_vcvtaq_s16_v:
8509 case NEON::BI__builtin_neon_vcvtaq_s32_v:
8510 case NEON::BI__builtin_neon_vcvta_u32_v:
8511 case NEON::BI__builtin_neon_vcvtaq_u16_v:
8512 case NEON::BI__builtin_neon_vcvtaq_u32_v:
8513 case NEON::BI__builtin_neon_vcvta_s64_v:
8514 case NEON::BI__builtin_neon_vcvtaq_s64_v:
8515 case NEON::BI__builtin_neon_vcvta_u64_v:
8516 case NEON::BI__builtin_neon_vcvtaq_u64_v: {
8517 Int = usgn ? Intrinsic::aarch64_neon_fcvtau : Intrinsic::aarch64_neon_fcvtas;
8518 llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) };
8519 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvta");
8521 case NEON::BI__builtin_neon_vcvtm_s16_v:
8522 case NEON::BI__builtin_neon_vcvtm_s32_v:
8523 case NEON::BI__builtin_neon_vcvtmq_s16_v:
8524 case NEON::BI__builtin_neon_vcvtmq_s32_v:
8525 case NEON::BI__builtin_neon_vcvtm_u16_v:
8526 case NEON::BI__builtin_neon_vcvtm_u32_v:
8527 case NEON::BI__builtin_neon_vcvtmq_u16_v:
8528 case NEON::BI__builtin_neon_vcvtmq_u32_v:
8529 case NEON::BI__builtin_neon_vcvtm_s64_v:
8530 case NEON::BI__builtin_neon_vcvtmq_s64_v:
8531 case NEON::BI__builtin_neon_vcvtm_u64_v:
8532 case NEON::BI__builtin_neon_vcvtmq_u64_v: {
8533 Int = usgn ? Intrinsic::aarch64_neon_fcvtmu : Intrinsic::aarch64_neon_fcvtms;
8534 llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) };
8535 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtm");
8537 case NEON::BI__builtin_neon_vcvtn_s16_v:
8538 case NEON::BI__builtin_neon_vcvtn_s32_v:
8539 case NEON::BI__builtin_neon_vcvtnq_s16_v:
8540 case NEON::BI__builtin_neon_vcvtnq_s32_v:
8541 case NEON::BI__builtin_neon_vcvtn_u16_v:
8542 case NEON::BI__builtin_neon_vcvtn_u32_v:
8543 case NEON::BI__builtin_neon_vcvtnq_u16_v:
8544 case NEON::BI__builtin_neon_vcvtnq_u32_v:
8545 case NEON::BI__builtin_neon_vcvtn_s64_v:
8546 case NEON::BI__builtin_neon_vcvtnq_s64_v:
8547 case NEON::BI__builtin_neon_vcvtn_u64_v:
8548 case NEON::BI__builtin_neon_vcvtnq_u64_v: {
8549 Int = usgn ? Intrinsic::aarch64_neon_fcvtnu : Intrinsic::aarch64_neon_fcvtns;
8550 llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) };
8551 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtn");
8553 case NEON::BI__builtin_neon_vcvtp_s16_v:
8554 case NEON::BI__builtin_neon_vcvtp_s32_v:
8555 case NEON::BI__builtin_neon_vcvtpq_s16_v:
8556 case NEON::BI__builtin_neon_vcvtpq_s32_v:
8557 case NEON::BI__builtin_neon_vcvtp_u16_v:
8558 case NEON::BI__builtin_neon_vcvtp_u32_v:
8559 case NEON::BI__builtin_neon_vcvtpq_u16_v:
8560 case NEON::BI__builtin_neon_vcvtpq_u32_v:
8561 case NEON::BI__builtin_neon_vcvtp_s64_v:
8562 case NEON::BI__builtin_neon_vcvtpq_s64_v:
8563 case NEON::BI__builtin_neon_vcvtp_u64_v:
8564 case NEON::BI__builtin_neon_vcvtpq_u64_v: {
8565 Int = usgn ? Intrinsic::aarch64_neon_fcvtpu : Intrinsic::aarch64_neon_fcvtps;
8566 llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) };
8567 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtp");
8569 case NEON::BI__builtin_neon_vmulx_v:
8570 case NEON::BI__builtin_neon_vmulxq_v: {
8571 Int = Intrinsic::aarch64_neon_fmulx;
8572 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmulx");
8574 case NEON::BI__builtin_neon_vmulxh_lane_f16:
8575 case NEON::BI__builtin_neon_vmulxh_laneq_f16: {
8576 // vmulx_lane should be mapped to Neon scalar mulx after
8577 // extracting the scalar element
8578 Ops.push_back(EmitScalarExpr(E->getArg(2)));
8579 Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2], "extract");
8581 Int = Intrinsic::aarch64_neon_fmulx;
8582 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vmulx");
8584 case NEON::BI__builtin_neon_vmul_lane_v:
8585 case NEON::BI__builtin_neon_vmul_laneq_v: {
8586 // v1f64 vmul_lane should be mapped to Neon scalar mul lane
8588 if (BuiltinID == NEON::BI__builtin_neon_vmul_laneq_v)
8590 Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy);
8591 llvm::Type *VTy = GetNeonType(this,
8592 NeonTypeFlags(NeonTypeFlags::Float64, false, Quad));
8593 Ops[1] = Builder.CreateBitCast(Ops[1], VTy);
8594 Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2], "extract");
8595 Value *Result = Builder.CreateFMul(Ops[0], Ops[1]);
8596 return Builder.CreateBitCast(Result, Ty);
8598 case NEON::BI__builtin_neon_vnegd_s64:
8599 return Builder.CreateNeg(EmitScalarExpr(E->getArg(0)), "vnegd");
8600 case NEON::BI__builtin_neon_vnegh_f16:
8601 return Builder.CreateFNeg(EmitScalarExpr(E->getArg(0)), "vnegh");
8602 case NEON::BI__builtin_neon_vpmaxnm_v:
8603 case NEON::BI__builtin_neon_vpmaxnmq_v: {
8604 Int = Intrinsic::aarch64_neon_fmaxnmp;
8605 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpmaxnm");
8607 case NEON::BI__builtin_neon_vpminnm_v:
8608 case NEON::BI__builtin_neon_vpminnmq_v: {
8609 Int = Intrinsic::aarch64_neon_fminnmp;
8610 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpminnm");
8612 case NEON::BI__builtin_neon_vsqrth_f16: {
8613 Ops.push_back(EmitScalarExpr(E->getArg(0)));
8614 Int = Intrinsic::sqrt;
8615 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vsqrt");
8617 case NEON::BI__builtin_neon_vsqrt_v:
8618 case NEON::BI__builtin_neon_vsqrtq_v: {
8619 Int = Intrinsic::sqrt;
8620 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
8621 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vsqrt");
8623 case NEON::BI__builtin_neon_vrbit_v:
8624 case NEON::BI__builtin_neon_vrbitq_v: {
8625 Int = Intrinsic::aarch64_neon_rbit;
8626 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrbit");
8628 case NEON::BI__builtin_neon_vaddv_u8:
8629 // FIXME: These are handled by the AArch64 scalar code.
8632 case NEON::BI__builtin_neon_vaddv_s8: {
8633 Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv;
8635 VTy = llvm::VectorType::get(Int8Ty, 8);
8636 llvm::Type *Tys[2] = { Ty, VTy };
8637 Ops.push_back(EmitScalarExpr(E->getArg(0)));
8638 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddv");
8639 return Builder.CreateTrunc(Ops[0], Int8Ty);
8641 case NEON::BI__builtin_neon_vaddv_u16:
8644 case NEON::BI__builtin_neon_vaddv_s16: {
8645 Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv;
8647 VTy = llvm::VectorType::get(Int16Ty, 4);
8648 llvm::Type *Tys[2] = { Ty, VTy };
8649 Ops.push_back(EmitScalarExpr(E->getArg(0)));
8650 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddv");
8651 return Builder.CreateTrunc(Ops[0], Int16Ty);
8653 case NEON::BI__builtin_neon_vaddvq_u8:
8656 case NEON::BI__builtin_neon_vaddvq_s8: {
8657 Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv;
8659 VTy = llvm::VectorType::get(Int8Ty, 16);
8660 llvm::Type *Tys[2] = { Ty, VTy };
8661 Ops.push_back(EmitScalarExpr(E->getArg(0)));
8662 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddv");
8663 return Builder.CreateTrunc(Ops[0], Int8Ty);
8665 case NEON::BI__builtin_neon_vaddvq_u16:
8668 case NEON::BI__builtin_neon_vaddvq_s16: {
8669 Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv;
8671 VTy = llvm::VectorType::get(Int16Ty, 8);
8672 llvm::Type *Tys[2] = { Ty, VTy };
8673 Ops.push_back(EmitScalarExpr(E->getArg(0)));
8674 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddv");
8675 return Builder.CreateTrunc(Ops[0], Int16Ty);
8677 case NEON::BI__builtin_neon_vmaxv_u8: {
8678 Int = Intrinsic::aarch64_neon_umaxv;
8680 VTy = llvm::VectorType::get(Int8Ty, 8);
8681 llvm::Type *Tys[2] = { Ty, VTy };
8682 Ops.push_back(EmitScalarExpr(E->getArg(0)));
8683 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
8684 return Builder.CreateTrunc(Ops[0], Int8Ty);
8686 case NEON::BI__builtin_neon_vmaxv_u16: {
8687 Int = Intrinsic::aarch64_neon_umaxv;
8689 VTy = llvm::VectorType::get(Int16Ty, 4);
8690 llvm::Type *Tys[2] = { Ty, VTy };
8691 Ops.push_back(EmitScalarExpr(E->getArg(0)));
8692 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
8693 return Builder.CreateTrunc(Ops[0], Int16Ty);
8695 case NEON::BI__builtin_neon_vmaxvq_u8: {
8696 Int = Intrinsic::aarch64_neon_umaxv;
8698 VTy = llvm::VectorType::get(Int8Ty, 16);
8699 llvm::Type *Tys[2] = { Ty, VTy };
8700 Ops.push_back(EmitScalarExpr(E->getArg(0)));
8701 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
8702 return Builder.CreateTrunc(Ops[0], Int8Ty);
8704 case NEON::BI__builtin_neon_vmaxvq_u16: {
8705 Int = Intrinsic::aarch64_neon_umaxv;
8707 VTy = llvm::VectorType::get(Int16Ty, 8);
8708 llvm::Type *Tys[2] = { Ty, VTy };
8709 Ops.push_back(EmitScalarExpr(E->getArg(0)));
8710 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
8711 return Builder.CreateTrunc(Ops[0], Int16Ty);
8713 case NEON::BI__builtin_neon_vmaxv_s8: {
8714 Int = Intrinsic::aarch64_neon_smaxv;
8716 VTy = llvm::VectorType::get(Int8Ty, 8);
8717 llvm::Type *Tys[2] = { Ty, VTy };
8718 Ops.push_back(EmitScalarExpr(E->getArg(0)));
8719 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
8720 return Builder.CreateTrunc(Ops[0], Int8Ty);
8722 case NEON::BI__builtin_neon_vmaxv_s16: {
8723 Int = Intrinsic::aarch64_neon_smaxv;
8725 VTy = llvm::VectorType::get(Int16Ty, 4);
8726 llvm::Type *Tys[2] = { Ty, VTy };
8727 Ops.push_back(EmitScalarExpr(E->getArg(0)));
8728 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
8729 return Builder.CreateTrunc(Ops[0], Int16Ty);
8731 case NEON::BI__builtin_neon_vmaxvq_s8: {
8732 Int = Intrinsic::aarch64_neon_smaxv;
8734 VTy = llvm::VectorType::get(Int8Ty, 16);
8735 llvm::Type *Tys[2] = { Ty, VTy };
8736 Ops.push_back(EmitScalarExpr(E->getArg(0)));
8737 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
8738 return Builder.CreateTrunc(Ops[0], Int8Ty);
8740 case NEON::BI__builtin_neon_vmaxvq_s16: {
8741 Int = Intrinsic::aarch64_neon_smaxv;
8743 VTy = llvm::VectorType::get(Int16Ty, 8);
8744 llvm::Type *Tys[2] = { Ty, VTy };
8745 Ops.push_back(EmitScalarExpr(E->getArg(0)));
8746 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
8747 return Builder.CreateTrunc(Ops[0], Int16Ty);
8749 case NEON::BI__builtin_neon_vmaxv_f16: {
8750 Int = Intrinsic::aarch64_neon_fmaxv;
8752 VTy = llvm::VectorType::get(HalfTy, 4);
8753 llvm::Type *Tys[2] = { Ty, VTy };
8754 Ops.push_back(EmitScalarExpr(E->getArg(0)));
8755 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
8756 return Builder.CreateTrunc(Ops[0], HalfTy);
8758 case NEON::BI__builtin_neon_vmaxvq_f16: {
8759 Int = Intrinsic::aarch64_neon_fmaxv;
8761 VTy = llvm::VectorType::get(HalfTy, 8);
8762 llvm::Type *Tys[2] = { Ty, VTy };
8763 Ops.push_back(EmitScalarExpr(E->getArg(0)));
8764 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
8765 return Builder.CreateTrunc(Ops[0], HalfTy);
8767 case NEON::BI__builtin_neon_vminv_u8: {
8768 Int = Intrinsic::aarch64_neon_uminv;
8770 VTy = llvm::VectorType::get(Int8Ty, 8);
8771 llvm::Type *Tys[2] = { Ty, VTy };
8772 Ops.push_back(EmitScalarExpr(E->getArg(0)));
8773 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
8774 return Builder.CreateTrunc(Ops[0], Int8Ty);
8776 case NEON::BI__builtin_neon_vminv_u16: {
8777 Int = Intrinsic::aarch64_neon_uminv;
8779 VTy = llvm::VectorType::get(Int16Ty, 4);
8780 llvm::Type *Tys[2] = { Ty, VTy };
8781 Ops.push_back(EmitScalarExpr(E->getArg(0)));
8782 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
8783 return Builder.CreateTrunc(Ops[0], Int16Ty);
8785 case NEON::BI__builtin_neon_vminvq_u8: {
8786 Int = Intrinsic::aarch64_neon_uminv;
8788 VTy = llvm::VectorType::get(Int8Ty, 16);
8789 llvm::Type *Tys[2] = { Ty, VTy };
8790 Ops.push_back(EmitScalarExpr(E->getArg(0)));
8791 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
8792 return Builder.CreateTrunc(Ops[0], Int8Ty);
8794 case NEON::BI__builtin_neon_vminvq_u16: {
8795 Int = Intrinsic::aarch64_neon_uminv;
8797 VTy = llvm::VectorType::get(Int16Ty, 8);
8798 llvm::Type *Tys[2] = { Ty, VTy };
8799 Ops.push_back(EmitScalarExpr(E->getArg(0)));
8800 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
8801 return Builder.CreateTrunc(Ops[0], Int16Ty);
8803 case NEON::BI__builtin_neon_vminv_s8: {
8804 Int = Intrinsic::aarch64_neon_sminv;
8806 VTy = llvm::VectorType::get(Int8Ty, 8);
8807 llvm::Type *Tys[2] = { Ty, VTy };
8808 Ops.push_back(EmitScalarExpr(E->getArg(0)));
8809 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
8810 return Builder.CreateTrunc(Ops[0], Int8Ty);
8812 case NEON::BI__builtin_neon_vminv_s16: {
8813 Int = Intrinsic::aarch64_neon_sminv;
8815 VTy = llvm::VectorType::get(Int16Ty, 4);
8816 llvm::Type *Tys[2] = { Ty, VTy };
8817 Ops.push_back(EmitScalarExpr(E->getArg(0)));
8818 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
8819 return Builder.CreateTrunc(Ops[0], Int16Ty);
8821 case NEON::BI__builtin_neon_vminvq_s8: {
8822 Int = Intrinsic::aarch64_neon_sminv;
8824 VTy = llvm::VectorType::get(Int8Ty, 16);
8825 llvm::Type *Tys[2] = { Ty, VTy };
8826 Ops.push_back(EmitScalarExpr(E->getArg(0)));
8827 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
8828 return Builder.CreateTrunc(Ops[0], Int8Ty);
8830 case NEON::BI__builtin_neon_vminvq_s16: {
8831 Int = Intrinsic::aarch64_neon_sminv;
8833 VTy = llvm::VectorType::get(Int16Ty, 8);
8834 llvm::Type *Tys[2] = { Ty, VTy };
8835 Ops.push_back(EmitScalarExpr(E->getArg(0)));
8836 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
8837 return Builder.CreateTrunc(Ops[0], Int16Ty);
8839 case NEON::BI__builtin_neon_vminv_f16: {
8840 Int = Intrinsic::aarch64_neon_fminv;
8842 VTy = llvm::VectorType::get(HalfTy, 4);
8843 llvm::Type *Tys[2] = { Ty, VTy };
8844 Ops.push_back(EmitScalarExpr(E->getArg(0)));
8845 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
8846 return Builder.CreateTrunc(Ops[0], HalfTy);
8848 case NEON::BI__builtin_neon_vminvq_f16: {
8849 Int = Intrinsic::aarch64_neon_fminv;
8851 VTy = llvm::VectorType::get(HalfTy, 8);
8852 llvm::Type *Tys[2] = { Ty, VTy };
8853 Ops.push_back(EmitScalarExpr(E->getArg(0)));
8854 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
8855 return Builder.CreateTrunc(Ops[0], HalfTy);
8857 case NEON::BI__builtin_neon_vmaxnmv_f16: {
8858 Int = Intrinsic::aarch64_neon_fmaxnmv;
8860 VTy = llvm::VectorType::get(HalfTy, 4);
8861 llvm::Type *Tys[2] = { Ty, VTy };
8862 Ops.push_back(EmitScalarExpr(E->getArg(0)));
8863 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxnmv");
8864 return Builder.CreateTrunc(Ops[0], HalfTy);
8866 case NEON::BI__builtin_neon_vmaxnmvq_f16: {
8867 Int = Intrinsic::aarch64_neon_fmaxnmv;
8869 VTy = llvm::VectorType::get(HalfTy, 8);
8870 llvm::Type *Tys[2] = { Ty, VTy };
8871 Ops.push_back(EmitScalarExpr(E->getArg(0)));
8872 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxnmv");
8873 return Builder.CreateTrunc(Ops[0], HalfTy);
8875 case NEON::BI__builtin_neon_vminnmv_f16: {
8876 Int = Intrinsic::aarch64_neon_fminnmv;
8878 VTy = llvm::VectorType::get(HalfTy, 4);
8879 llvm::Type *Tys[2] = { Ty, VTy };
8880 Ops.push_back(EmitScalarExpr(E->getArg(0)));
8881 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminnmv");
8882 return Builder.CreateTrunc(Ops[0], HalfTy);
8884 case NEON::BI__builtin_neon_vminnmvq_f16: {
8885 Int = Intrinsic::aarch64_neon_fminnmv;
8887 VTy = llvm::VectorType::get(HalfTy, 8);
8888 llvm::Type *Tys[2] = { Ty, VTy };
8889 Ops.push_back(EmitScalarExpr(E->getArg(0)));
8890 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminnmv");
8891 return Builder.CreateTrunc(Ops[0], HalfTy);
8893 case NEON::BI__builtin_neon_vmul_n_f64: {
8894 Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy);
8895 Value *RHS = Builder.CreateBitCast(EmitScalarExpr(E->getArg(1)), DoubleTy);
8896 return Builder.CreateFMul(Ops[0], RHS);
8898 case NEON::BI__builtin_neon_vaddlv_u8: {
8899 Int = Intrinsic::aarch64_neon_uaddlv;
8901 VTy = llvm::VectorType::get(Int8Ty, 8);
8902 llvm::Type *Tys[2] = { Ty, VTy };
8903 Ops.push_back(EmitScalarExpr(E->getArg(0)));
8904 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
8905 return Builder.CreateTrunc(Ops[0], Int16Ty);
8907 case NEON::BI__builtin_neon_vaddlv_u16: {
8908 Int = Intrinsic::aarch64_neon_uaddlv;
8910 VTy = llvm::VectorType::get(Int16Ty, 4);
8911 llvm::Type *Tys[2] = { Ty, VTy };
8912 Ops.push_back(EmitScalarExpr(E->getArg(0)));
8913 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
8915 case NEON::BI__builtin_neon_vaddlvq_u8: {
8916 Int = Intrinsic::aarch64_neon_uaddlv;
8918 VTy = llvm::VectorType::get(Int8Ty, 16);
8919 llvm::Type *Tys[2] = { Ty, VTy };
8920 Ops.push_back(EmitScalarExpr(E->getArg(0)));
8921 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
8922 return Builder.CreateTrunc(Ops[0], Int16Ty);
8924 case NEON::BI__builtin_neon_vaddlvq_u16: {
8925 Int = Intrinsic::aarch64_neon_uaddlv;
8927 VTy = llvm::VectorType::get(Int16Ty, 8);
8928 llvm::Type *Tys[2] = { Ty, VTy };
8929 Ops.push_back(EmitScalarExpr(E->getArg(0)));
8930 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
8932 case NEON::BI__builtin_neon_vaddlv_s8: {
8933 Int = Intrinsic::aarch64_neon_saddlv;
8935 VTy = llvm::VectorType::get(Int8Ty, 8);
8936 llvm::Type *Tys[2] = { Ty, VTy };
8937 Ops.push_back(EmitScalarExpr(E->getArg(0)));
8938 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
8939 return Builder.CreateTrunc(Ops[0], Int16Ty);
8941 case NEON::BI__builtin_neon_vaddlv_s16: {
8942 Int = Intrinsic::aarch64_neon_saddlv;
8944 VTy = llvm::VectorType::get(Int16Ty, 4);
8945 llvm::Type *Tys[2] = { Ty, VTy };
8946 Ops.push_back(EmitScalarExpr(E->getArg(0)));
8947 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
8949 case NEON::BI__builtin_neon_vaddlvq_s8: {
8950 Int = Intrinsic::aarch64_neon_saddlv;
8952 VTy = llvm::VectorType::get(Int8Ty, 16);
8953 llvm::Type *Tys[2] = { Ty, VTy };
8954 Ops.push_back(EmitScalarExpr(E->getArg(0)));
8955 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
8956 return Builder.CreateTrunc(Ops[0], Int16Ty);
8958 case NEON::BI__builtin_neon_vaddlvq_s16: {
8959 Int = Intrinsic::aarch64_neon_saddlv;
8961 VTy = llvm::VectorType::get(Int16Ty, 8);
8962 llvm::Type *Tys[2] = { Ty, VTy };
8963 Ops.push_back(EmitScalarExpr(E->getArg(0)));
8964 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
8966 case NEON::BI__builtin_neon_vsri_n_v:
8967 case NEON::BI__builtin_neon_vsriq_n_v: {
8968 Int = Intrinsic::aarch64_neon_vsri;
8969 llvm::Function *Intrin = CGM.getIntrinsic(Int, Ty);
8970 return EmitNeonCall(Intrin, Ops, "vsri_n");
8972 case NEON::BI__builtin_neon_vsli_n_v:
8973 case NEON::BI__builtin_neon_vsliq_n_v: {
8974 Int = Intrinsic::aarch64_neon_vsli;
8975 llvm::Function *Intrin = CGM.getIntrinsic(Int, Ty);
8976 return EmitNeonCall(Intrin, Ops, "vsli_n");
8978 case NEON::BI__builtin_neon_vsra_n_v:
8979 case NEON::BI__builtin_neon_vsraq_n_v:
8980 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
8981 Ops[1] = EmitNeonRShiftImm(Ops[1], Ops[2], Ty, usgn, "vsra_n");
8982 return Builder.CreateAdd(Ops[0], Ops[1]);
8983 case NEON::BI__builtin_neon_vrsra_n_v:
8984 case NEON::BI__builtin_neon_vrsraq_n_v: {
8985 Int = usgn ? Intrinsic::aarch64_neon_urshl : Intrinsic::aarch64_neon_srshl;
8986 SmallVector<llvm::Value*,2> TmpOps;
8987 TmpOps.push_back(Ops[1]);
8988 TmpOps.push_back(Ops[2]);
8989 Function* F = CGM.getIntrinsic(Int, Ty);
8990 llvm::Value *tmp = EmitNeonCall(F, TmpOps, "vrshr_n", 1, true);
8991 Ops[0] = Builder.CreateBitCast(Ops[0], VTy);
8992 return Builder.CreateAdd(Ops[0], tmp);
8994 case NEON::BI__builtin_neon_vld1_v:
8995 case NEON::BI__builtin_neon_vld1q_v: {
8996 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(VTy));
8997 auto Alignment = CharUnits::fromQuantity(
8998 BuiltinID == NEON::BI__builtin_neon_vld1_v ? 8 : 16);
8999 return Builder.CreateAlignedLoad(VTy, Ops[0], Alignment);
9001 case NEON::BI__builtin_neon_vst1_v:
9002 case NEON::BI__builtin_neon_vst1q_v:
9003 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(VTy));
9004 Ops[1] = Builder.CreateBitCast(Ops[1], VTy);
9005 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
9006 case NEON::BI__builtin_neon_vld1_lane_v:
9007 case NEON::BI__builtin_neon_vld1q_lane_v: {
9008 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
9009 Ty = llvm::PointerType::getUnqual(VTy->getElementType());
9010 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
9011 auto Alignment = CharUnits::fromQuantity(
9012 BuiltinID == NEON::BI__builtin_neon_vld1_lane_v ? 8 : 16);
9014 Builder.CreateAlignedLoad(VTy->getElementType(), Ops[0], Alignment);
9015 return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vld1_lane");
9017 case NEON::BI__builtin_neon_vld1_dup_v:
9018 case NEON::BI__builtin_neon_vld1q_dup_v: {
9019 Value *V = UndefValue::get(Ty);
9020 Ty = llvm::PointerType::getUnqual(VTy->getElementType());
9021 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
9022 auto Alignment = CharUnits::fromQuantity(
9023 BuiltinID == NEON::BI__builtin_neon_vld1_dup_v ? 8 : 16);
9025 Builder.CreateAlignedLoad(VTy->getElementType(), Ops[0], Alignment);
9026 llvm::Constant *CI = ConstantInt::get(Int32Ty, 0);
9027 Ops[0] = Builder.CreateInsertElement(V, Ops[0], CI);
9028 return EmitNeonSplat(Ops[0], CI);
9030 case NEON::BI__builtin_neon_vst1_lane_v:
9031 case NEON::BI__builtin_neon_vst1q_lane_v:
9032 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
9033 Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2]);
9034 Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
9035 return Builder.CreateDefaultAlignedStore(Ops[1],
9036 Builder.CreateBitCast(Ops[0], Ty));
9037 case NEON::BI__builtin_neon_vld2_v:
9038 case NEON::BI__builtin_neon_vld2q_v: {
9039 llvm::Type *PTy = llvm::PointerType::getUnqual(VTy);
9040 Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
9041 llvm::Type *Tys[2] = { VTy, PTy };
9042 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld2, Tys);
9043 Ops[1] = Builder.CreateCall(F, Ops[1], "vld2");
9044 Ops[0] = Builder.CreateBitCast(Ops[0],
9045 llvm::PointerType::getUnqual(Ops[1]->getType()));
9046 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
9048 case NEON::BI__builtin_neon_vld3_v:
9049 case NEON::BI__builtin_neon_vld3q_v: {
9050 llvm::Type *PTy = llvm::PointerType::getUnqual(VTy);
9051 Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
9052 llvm::Type *Tys[2] = { VTy, PTy };
9053 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld3, Tys);
9054 Ops[1] = Builder.CreateCall(F, Ops[1], "vld3");
9055 Ops[0] = Builder.CreateBitCast(Ops[0],
9056 llvm::PointerType::getUnqual(Ops[1]->getType()));
9057 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
9059 case NEON::BI__builtin_neon_vld4_v:
9060 case NEON::BI__builtin_neon_vld4q_v: {
9061 llvm::Type *PTy = llvm::PointerType::getUnqual(VTy);
9062 Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
9063 llvm::Type *Tys[2] = { VTy, PTy };
9064 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld4, Tys);
9065 Ops[1] = Builder.CreateCall(F, Ops[1], "vld4");
9066 Ops[0] = Builder.CreateBitCast(Ops[0],
9067 llvm::PointerType::getUnqual(Ops[1]->getType()));
9068 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
9070 case NEON::BI__builtin_neon_vld2_dup_v:
9071 case NEON::BI__builtin_neon_vld2q_dup_v: {
9073 llvm::PointerType::getUnqual(VTy->getElementType());
9074 Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
9075 llvm::Type *Tys[2] = { VTy, PTy };
9076 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld2r, Tys);
9077 Ops[1] = Builder.CreateCall(F, Ops[1], "vld2");
9078 Ops[0] = Builder.CreateBitCast(Ops[0],
9079 llvm::PointerType::getUnqual(Ops[1]->getType()));
9080 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
9082 case NEON::BI__builtin_neon_vld3_dup_v:
9083 case NEON::BI__builtin_neon_vld3q_dup_v: {
9085 llvm::PointerType::getUnqual(VTy->getElementType());
9086 Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
9087 llvm::Type *Tys[2] = { VTy, PTy };
9088 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld3r, Tys);
9089 Ops[1] = Builder.CreateCall(F, Ops[1], "vld3");
9090 Ops[0] = Builder.CreateBitCast(Ops[0],
9091 llvm::PointerType::getUnqual(Ops[1]->getType()));
9092 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
9094 case NEON::BI__builtin_neon_vld4_dup_v:
9095 case NEON::BI__builtin_neon_vld4q_dup_v: {
9097 llvm::PointerType::getUnqual(VTy->getElementType());
9098 Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
9099 llvm::Type *Tys[2] = { VTy, PTy };
9100 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld4r, Tys);
9101 Ops[1] = Builder.CreateCall(F, Ops[1], "vld4");
9102 Ops[0] = Builder.CreateBitCast(Ops[0],
9103 llvm::PointerType::getUnqual(Ops[1]->getType()));
9104 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
9106 case NEON::BI__builtin_neon_vld2_lane_v:
9107 case NEON::BI__builtin_neon_vld2q_lane_v: {
9108 llvm::Type *Tys[2] = { VTy, Ops[1]->getType() };
9109 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld2lane, Tys);
9110 Ops.push_back(Ops[1]);
9111 Ops.erase(Ops.begin()+1);
9112 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
9113 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
9114 Ops[3] = Builder.CreateZExt(Ops[3], Int64Ty);
9115 Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), "vld2_lane");
9116 Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
9117 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
9118 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
9120 case NEON::BI__builtin_neon_vld3_lane_v:
9121 case NEON::BI__builtin_neon_vld3q_lane_v: {
9122 llvm::Type *Tys[2] = { VTy, Ops[1]->getType() };
9123 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld3lane, Tys);
9124 Ops.push_back(Ops[1]);
9125 Ops.erase(Ops.begin()+1);
9126 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
9127 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
9128 Ops[3] = Builder.CreateBitCast(Ops[3], Ty);
9129 Ops[4] = Builder.CreateZExt(Ops[4], Int64Ty);
9130 Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), "vld3_lane");
9131 Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
9132 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
9133 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
9135 case NEON::BI__builtin_neon_vld4_lane_v:
9136 case NEON::BI__builtin_neon_vld4q_lane_v: {
9137 llvm::Type *Tys[2] = { VTy, Ops[1]->getType() };
9138 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld4lane, Tys);
9139 Ops.push_back(Ops[1]);
9140 Ops.erase(Ops.begin()+1);
9141 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
9142 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
9143 Ops[3] = Builder.CreateBitCast(Ops[3], Ty);
9144 Ops[4] = Builder.CreateBitCast(Ops[4], Ty);
9145 Ops[5] = Builder.CreateZExt(Ops[5], Int64Ty);
9146 Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), "vld4_lane");
9147 Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
9148 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
9149 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
9151 case NEON::BI__builtin_neon_vst2_v:
9152 case NEON::BI__builtin_neon_vst2q_v: {
9153 Ops.push_back(Ops[0]);
9154 Ops.erase(Ops.begin());
9155 llvm::Type *Tys[2] = { VTy, Ops[2]->getType() };
9156 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st2, Tys),
9159 case NEON::BI__builtin_neon_vst2_lane_v:
9160 case NEON::BI__builtin_neon_vst2q_lane_v: {
9161 Ops.push_back(Ops[0]);
9162 Ops.erase(Ops.begin());
9163 Ops[2] = Builder.CreateZExt(Ops[2], Int64Ty);
9164 llvm::Type *Tys[2] = { VTy, Ops[3]->getType() };
9165 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st2lane, Tys),
9168 case NEON::BI__builtin_neon_vst3_v:
9169 case NEON::BI__builtin_neon_vst3q_v: {
9170 Ops.push_back(Ops[0]);
9171 Ops.erase(Ops.begin());
9172 llvm::Type *Tys[2] = { VTy, Ops[3]->getType() };
9173 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st3, Tys),
9176 case NEON::BI__builtin_neon_vst3_lane_v:
9177 case NEON::BI__builtin_neon_vst3q_lane_v: {
9178 Ops.push_back(Ops[0]);
9179 Ops.erase(Ops.begin());
9180 Ops[3] = Builder.CreateZExt(Ops[3], Int64Ty);
9181 llvm::Type *Tys[2] = { VTy, Ops[4]->getType() };
9182 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st3lane, Tys),
9185 case NEON::BI__builtin_neon_vst4_v:
9186 case NEON::BI__builtin_neon_vst4q_v: {
9187 Ops.push_back(Ops[0]);
9188 Ops.erase(Ops.begin());
9189 llvm::Type *Tys[2] = { VTy, Ops[4]->getType() };
9190 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st4, Tys),
9193 case NEON::BI__builtin_neon_vst4_lane_v:
9194 case NEON::BI__builtin_neon_vst4q_lane_v: {
9195 Ops.push_back(Ops[0]);
9196 Ops.erase(Ops.begin());
9197 Ops[4] = Builder.CreateZExt(Ops[4], Int64Ty);
9198 llvm::Type *Tys[2] = { VTy, Ops[5]->getType() };
9199 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st4lane, Tys),
9202 case NEON::BI__builtin_neon_vtrn_v:
9203 case NEON::BI__builtin_neon_vtrnq_v: {
9204 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
9205 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
9206 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
9207 Value *SV = nullptr;
9209 for (unsigned vi = 0; vi != 2; ++vi) {
9210 SmallVector<uint32_t, 16> Indices;
9211 for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) {
9212 Indices.push_back(i+vi);
9213 Indices.push_back(i+e+vi);
9215 Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi);
9216 SV = Builder.CreateShuffleVector(Ops[1], Ops[2], Indices, "vtrn");
9217 SV = Builder.CreateDefaultAlignedStore(SV, Addr);
9221 case NEON::BI__builtin_neon_vuzp_v:
9222 case NEON::BI__builtin_neon_vuzpq_v: {
9223 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
9224 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
9225 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
9226 Value *SV = nullptr;
9228 for (unsigned vi = 0; vi != 2; ++vi) {
9229 SmallVector<uint32_t, 16> Indices;
9230 for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i)
9231 Indices.push_back(2*i+vi);
9233 Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi);
9234 SV = Builder.CreateShuffleVector(Ops[1], Ops[2], Indices, "vuzp");
9235 SV = Builder.CreateDefaultAlignedStore(SV, Addr);
9239 case NEON::BI__builtin_neon_vzip_v:
9240 case NEON::BI__builtin_neon_vzipq_v: {
9241 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
9242 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
9243 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
9244 Value *SV = nullptr;
9246 for (unsigned vi = 0; vi != 2; ++vi) {
9247 SmallVector<uint32_t, 16> Indices;
9248 for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) {
9249 Indices.push_back((i + vi*e) >> 1);
9250 Indices.push_back(((i + vi*e) >> 1)+e);
9252 Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi);
9253 SV = Builder.CreateShuffleVector(Ops[1], Ops[2], Indices, "vzip");
9254 SV = Builder.CreateDefaultAlignedStore(SV, Addr);
9258 case NEON::BI__builtin_neon_vqtbl1q_v: {
9259 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbl1, Ty),
9262 case NEON::BI__builtin_neon_vqtbl2q_v: {
9263 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbl2, Ty),
9266 case NEON::BI__builtin_neon_vqtbl3q_v: {
9267 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbl3, Ty),
9270 case NEON::BI__builtin_neon_vqtbl4q_v: {
9271 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbl4, Ty),
9274 case NEON::BI__builtin_neon_vqtbx1q_v: {
9275 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbx1, Ty),
9278 case NEON::BI__builtin_neon_vqtbx2q_v: {
9279 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbx2, Ty),
9282 case NEON::BI__builtin_neon_vqtbx3q_v: {
9283 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbx3, Ty),
9286 case NEON::BI__builtin_neon_vqtbx4q_v: {
9287 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbx4, Ty),
9290 case NEON::BI__builtin_neon_vsqadd_v:
9291 case NEON::BI__builtin_neon_vsqaddq_v: {
9292 Int = Intrinsic::aarch64_neon_usqadd;
9293 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vsqadd");
9295 case NEON::BI__builtin_neon_vuqadd_v:
9296 case NEON::BI__builtin_neon_vuqaddq_v: {
9297 Int = Intrinsic::aarch64_neon_suqadd;
9298 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vuqadd");
9303 Value *CodeGenFunction::EmitBPFBuiltinExpr(unsigned BuiltinID,
9304 const CallExpr *E) {
9305 assert(BuiltinID == BPF::BI__builtin_preserve_field_info &&
9306 "unexpected ARM builtin");
9308 const Expr *Arg = E->getArg(0);
9309 bool IsBitField = Arg->IgnoreParens()->getObjectKind() == OK_BitField;
9311 if (!getDebugInfo()) {
9312 CGM.Error(E->getExprLoc(), "using builtin_preserve_field_info() without -g");
9313 return IsBitField ? EmitLValue(Arg).getBitFieldPointer()
9314 : EmitLValue(Arg).getPointer();
9317 // Enable underlying preserve_*_access_index() generation.
9318 bool OldIsInPreservedAIRegion = IsInPreservedAIRegion;
9319 IsInPreservedAIRegion = true;
9320 Value *FieldAddr = IsBitField ? EmitLValue(Arg).getBitFieldPointer()
9321 : EmitLValue(Arg).getPointer();
9322 IsInPreservedAIRegion = OldIsInPreservedAIRegion;
9324 ConstantInt *C = cast<ConstantInt>(EmitScalarExpr(E->getArg(1)));
9325 Value *InfoKind = ConstantInt::get(Int64Ty, C->getSExtValue());
9327 // Built the IR for the preserve_field_info intrinsic.
9328 llvm::Function *FnGetFieldInfo = llvm::Intrinsic::getDeclaration(
9329 &CGM.getModule(), llvm::Intrinsic::bpf_preserve_field_info,
9330 {FieldAddr->getType()});
9331 return Builder.CreateCall(FnGetFieldInfo, {FieldAddr, InfoKind});
9334 llvm::Value *CodeGenFunction::
9335 BuildVector(ArrayRef<llvm::Value*> Ops) {
9336 assert((Ops.size() & (Ops.size() - 1)) == 0 &&
9337 "Not a power-of-two sized vector!");
9338 bool AllConstants = true;
9339 for (unsigned i = 0, e = Ops.size(); i != e && AllConstants; ++i)
9340 AllConstants &= isa<Constant>(Ops[i]);
9342 // If this is a constant vector, create a ConstantVector.
9344 SmallVector<llvm::Constant*, 16> CstOps;
9345 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
9346 CstOps.push_back(cast<Constant>(Ops[i]));
9347 return llvm::ConstantVector::get(CstOps);
9350 // Otherwise, insertelement the values to build the vector.
9352 llvm::UndefValue::get(llvm::VectorType::get(Ops[0]->getType(), Ops.size()));
9354 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
9355 Result = Builder.CreateInsertElement(Result, Ops[i], Builder.getInt32(i));
9360 // Convert the mask from an integer type to a vector of i1.
9361 static Value *getMaskVecValue(CodeGenFunction &CGF, Value *Mask,
9364 llvm::VectorType *MaskTy = llvm::VectorType::get(CGF.Builder.getInt1Ty(),
9365 cast<IntegerType>(Mask->getType())->getBitWidth());
9366 Value *MaskVec = CGF.Builder.CreateBitCast(Mask, MaskTy);
9368 // If we have less than 8 elements, then the starting mask was an i8 and
9369 // we need to extract down to the right number of elements.
9371 uint32_t Indices[4];
9372 for (unsigned i = 0; i != NumElts; ++i)
9374 MaskVec = CGF.Builder.CreateShuffleVector(MaskVec, MaskVec,
9375 makeArrayRef(Indices, NumElts),
9381 static Value *EmitX86MaskedStore(CodeGenFunction &CGF,
9382 ArrayRef<Value *> Ops,
9384 // Cast the pointer to right type.
9385 Value *Ptr = CGF.Builder.CreateBitCast(Ops[0],
9386 llvm::PointerType::getUnqual(Ops[1]->getType()));
9388 Value *MaskVec = getMaskVecValue(CGF, Ops[2],
9389 Ops[1]->getType()->getVectorNumElements());
9391 return CGF.Builder.CreateMaskedStore(Ops[1], Ptr, Align, MaskVec);
9394 static Value *EmitX86MaskedLoad(CodeGenFunction &CGF,
9395 ArrayRef<Value *> Ops, unsigned Align) {
9396 // Cast the pointer to right type.
9397 Value *Ptr = CGF.Builder.CreateBitCast(Ops[0],
9398 llvm::PointerType::getUnqual(Ops[1]->getType()));
9400 Value *MaskVec = getMaskVecValue(CGF, Ops[2],
9401 Ops[1]->getType()->getVectorNumElements());
9403 return CGF.Builder.CreateMaskedLoad(Ptr, Align, MaskVec, Ops[1]);
9406 static Value *EmitX86ExpandLoad(CodeGenFunction &CGF,
9407 ArrayRef<Value *> Ops) {
9408 llvm::Type *ResultTy = Ops[1]->getType();
9409 llvm::Type *PtrTy = ResultTy->getVectorElementType();
9411 // Cast the pointer to element type.
9412 Value *Ptr = CGF.Builder.CreateBitCast(Ops[0],
9413 llvm::PointerType::getUnqual(PtrTy));
9415 Value *MaskVec = getMaskVecValue(CGF, Ops[2],
9416 ResultTy->getVectorNumElements());
9418 llvm::Function *F = CGF.CGM.getIntrinsic(Intrinsic::masked_expandload,
9420 return CGF.Builder.CreateCall(F, { Ptr, MaskVec, Ops[1] });
9423 static Value *EmitX86CompressExpand(CodeGenFunction &CGF,
9424 ArrayRef<Value *> Ops,
9426 llvm::Type *ResultTy = Ops[1]->getType();
9428 Value *MaskVec = getMaskVecValue(CGF, Ops[2],
9429 ResultTy->getVectorNumElements());
9431 Intrinsic::ID IID = IsCompress ? Intrinsic::x86_avx512_mask_compress
9432 : Intrinsic::x86_avx512_mask_expand;
9433 llvm::Function *F = CGF.CGM.getIntrinsic(IID, ResultTy);
9434 return CGF.Builder.CreateCall(F, { Ops[0], Ops[1], MaskVec });
9437 static Value *EmitX86CompressStore(CodeGenFunction &CGF,
9438 ArrayRef<Value *> Ops) {
9439 llvm::Type *ResultTy = Ops[1]->getType();
9440 llvm::Type *PtrTy = ResultTy->getVectorElementType();
9442 // Cast the pointer to element type.
9443 Value *Ptr = CGF.Builder.CreateBitCast(Ops[0],
9444 llvm::PointerType::getUnqual(PtrTy));
9446 Value *MaskVec = getMaskVecValue(CGF, Ops[2],
9447 ResultTy->getVectorNumElements());
9449 llvm::Function *F = CGF.CGM.getIntrinsic(Intrinsic::masked_compressstore,
9451 return CGF.Builder.CreateCall(F, { Ops[1], Ptr, MaskVec });
9454 static Value *EmitX86MaskLogic(CodeGenFunction &CGF, Instruction::BinaryOps Opc,
9455 ArrayRef<Value *> Ops,
9456 bool InvertLHS = false) {
9457 unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth();
9458 Value *LHS = getMaskVecValue(CGF, Ops[0], NumElts);
9459 Value *RHS = getMaskVecValue(CGF, Ops[1], NumElts);
9462 LHS = CGF.Builder.CreateNot(LHS);
9464 return CGF.Builder.CreateBitCast(CGF.Builder.CreateBinOp(Opc, LHS, RHS),
9468 static Value *EmitX86FunnelShift(CodeGenFunction &CGF, Value *Op0, Value *Op1,
9469 Value *Amt, bool IsRight) {
9470 llvm::Type *Ty = Op0->getType();
9472 // Amount may be scalar immediate, in which case create a splat vector.
9473 // Funnel shifts amounts are treated as modulo and types are all power-of-2 so
9474 // we only care about the lowest log2 bits anyway.
9475 if (Amt->getType() != Ty) {
9476 unsigned NumElts = Ty->getVectorNumElements();
9477 Amt = CGF.Builder.CreateIntCast(Amt, Ty->getScalarType(), false);
9478 Amt = CGF.Builder.CreateVectorSplat(NumElts, Amt);
9481 unsigned IID = IsRight ? Intrinsic::fshr : Intrinsic::fshl;
9482 Function *F = CGF.CGM.getIntrinsic(IID, Ty);
9483 return CGF.Builder.CreateCall(F, {Op0, Op1, Amt});
9486 static Value *EmitX86vpcom(CodeGenFunction &CGF, ArrayRef<Value *> Ops,
9488 Value *Op0 = Ops[0];
9489 Value *Op1 = Ops[1];
9490 llvm::Type *Ty = Op0->getType();
9491 uint64_t Imm = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0x7;
9493 CmpInst::Predicate Pred;
9496 Pred = IsSigned ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT;
9499 Pred = IsSigned ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE;
9502 Pred = IsSigned ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT;
9505 Pred = IsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE;
9508 Pred = ICmpInst::ICMP_EQ;
9511 Pred = ICmpInst::ICMP_NE;
9514 return llvm::Constant::getNullValue(Ty); // FALSE
9516 return llvm::Constant::getAllOnesValue(Ty); // TRUE
9518 llvm_unreachable("Unexpected XOP vpcom/vpcomu predicate");
9521 Value *Cmp = CGF.Builder.CreateICmp(Pred, Op0, Op1);
9522 Value *Res = CGF.Builder.CreateSExt(Cmp, Ty);
9526 static Value *EmitX86Select(CodeGenFunction &CGF,
9527 Value *Mask, Value *Op0, Value *Op1) {
9529 // If the mask is all ones just return first argument.
9530 if (const auto *C = dyn_cast<Constant>(Mask))
9531 if (C->isAllOnesValue())
9534 Mask = getMaskVecValue(CGF, Mask, Op0->getType()->getVectorNumElements());
9536 return CGF.Builder.CreateSelect(Mask, Op0, Op1);
9539 static Value *EmitX86ScalarSelect(CodeGenFunction &CGF,
9540 Value *Mask, Value *Op0, Value *Op1) {
9541 // If the mask is all ones just return first argument.
9542 if (const auto *C = dyn_cast<Constant>(Mask))
9543 if (C->isAllOnesValue())
9546 llvm::VectorType *MaskTy =
9547 llvm::VectorType::get(CGF.Builder.getInt1Ty(),
9548 Mask->getType()->getIntegerBitWidth());
9549 Mask = CGF.Builder.CreateBitCast(Mask, MaskTy);
9550 Mask = CGF.Builder.CreateExtractElement(Mask, (uint64_t)0);
9551 return CGF.Builder.CreateSelect(Mask, Op0, Op1);
9554 static Value *EmitX86MaskedCompareResult(CodeGenFunction &CGF, Value *Cmp,
9555 unsigned NumElts, Value *MaskIn) {
9557 const auto *C = dyn_cast<Constant>(MaskIn);
9558 if (!C || !C->isAllOnesValue())
9559 Cmp = CGF.Builder.CreateAnd(Cmp, getMaskVecValue(CGF, MaskIn, NumElts));
9563 uint32_t Indices[8];
9564 for (unsigned i = 0; i != NumElts; ++i)
9566 for (unsigned i = NumElts; i != 8; ++i)
9567 Indices[i] = i % NumElts + NumElts;
9568 Cmp = CGF.Builder.CreateShuffleVector(
9569 Cmp, llvm::Constant::getNullValue(Cmp->getType()), Indices);
9572 return CGF.Builder.CreateBitCast(Cmp,
9573 IntegerType::get(CGF.getLLVMContext(),
9574 std::max(NumElts, 8U)));
9577 static Value *EmitX86MaskedCompare(CodeGenFunction &CGF, unsigned CC,
9578 bool Signed, ArrayRef<Value *> Ops) {
9579 assert((Ops.size() == 2 || Ops.size() == 4) &&
9580 "Unexpected number of arguments");
9581 unsigned NumElts = Ops[0]->getType()->getVectorNumElements();
9585 Cmp = Constant::getNullValue(
9586 llvm::VectorType::get(CGF.Builder.getInt1Ty(), NumElts));
9587 } else if (CC == 7) {
9588 Cmp = Constant::getAllOnesValue(
9589 llvm::VectorType::get(CGF.Builder.getInt1Ty(), NumElts));
9591 ICmpInst::Predicate Pred;
9593 default: llvm_unreachable("Unknown condition code");
9594 case 0: Pred = ICmpInst::ICMP_EQ; break;
9595 case 1: Pred = Signed ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT; break;
9596 case 2: Pred = Signed ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE; break;
9597 case 4: Pred = ICmpInst::ICMP_NE; break;
9598 case 5: Pred = Signed ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE; break;
9599 case 6: Pred = Signed ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT; break;
9601 Cmp = CGF.Builder.CreateICmp(Pred, Ops[0], Ops[1]);
9604 Value *MaskIn = nullptr;
9605 if (Ops.size() == 4)
9608 return EmitX86MaskedCompareResult(CGF, Cmp, NumElts, MaskIn);
9611 static Value *EmitX86ConvertToMask(CodeGenFunction &CGF, Value *In) {
9612 Value *Zero = Constant::getNullValue(In->getType());
9613 return EmitX86MaskedCompare(CGF, 1, true, { In, Zero });
9616 static Value *EmitX86ConvertIntToFp(CodeGenFunction &CGF,
9617 ArrayRef<Value *> Ops, bool IsSigned) {
9618 unsigned Rnd = cast<llvm::ConstantInt>(Ops[3])->getZExtValue();
9619 llvm::Type *Ty = Ops[1]->getType();
9623 Intrinsic::ID IID = IsSigned ? Intrinsic::x86_avx512_sitofp_round
9624 : Intrinsic::x86_avx512_uitofp_round;
9625 Function *F = CGF.CGM.getIntrinsic(IID, { Ty, Ops[0]->getType() });
9626 Res = CGF.Builder.CreateCall(F, { Ops[0], Ops[3] });
9628 Res = IsSigned ? CGF.Builder.CreateSIToFP(Ops[0], Ty)
9629 : CGF.Builder.CreateUIToFP(Ops[0], Ty);
9632 return EmitX86Select(CGF, Ops[2], Res, Ops[1]);
9635 static Value *EmitX86Abs(CodeGenFunction &CGF, ArrayRef<Value *> Ops) {
9637 llvm::Type *Ty = Ops[0]->getType();
9638 Value *Zero = llvm::Constant::getNullValue(Ty);
9639 Value *Sub = CGF.Builder.CreateSub(Zero, Ops[0]);
9640 Value *Cmp = CGF.Builder.CreateICmp(ICmpInst::ICMP_SGT, Ops[0], Zero);
9641 Value *Res = CGF.Builder.CreateSelect(Cmp, Ops[0], Sub);
9645 static Value *EmitX86MinMax(CodeGenFunction &CGF, ICmpInst::Predicate Pred,
9646 ArrayRef<Value *> Ops) {
9647 Value *Cmp = CGF.Builder.CreateICmp(Pred, Ops[0], Ops[1]);
9648 Value *Res = CGF.Builder.CreateSelect(Cmp, Ops[0], Ops[1]);
9650 assert(Ops.size() == 2);
9654 // Lowers X86 FMA intrinsics to IR.
9655 static Value *EmitX86FMAExpr(CodeGenFunction &CGF, ArrayRef<Value *> Ops,
9656 unsigned BuiltinID, bool IsAddSub) {
9658 bool Subtract = false;
9659 Intrinsic::ID IID = Intrinsic::not_intrinsic;
9660 switch (BuiltinID) {
9662 case clang::X86::BI__builtin_ia32_vfmsubps512_mask3:
9665 case clang::X86::BI__builtin_ia32_vfmaddps512_mask:
9666 case clang::X86::BI__builtin_ia32_vfmaddps512_maskz:
9667 case clang::X86::BI__builtin_ia32_vfmaddps512_mask3:
9668 IID = llvm::Intrinsic::x86_avx512_vfmadd_ps_512; break;
9669 case clang::X86::BI__builtin_ia32_vfmsubpd512_mask3:
9672 case clang::X86::BI__builtin_ia32_vfmaddpd512_mask:
9673 case clang::X86::BI__builtin_ia32_vfmaddpd512_maskz:
9674 case clang::X86::BI__builtin_ia32_vfmaddpd512_mask3:
9675 IID = llvm::Intrinsic::x86_avx512_vfmadd_pd_512; break;
9676 case clang::X86::BI__builtin_ia32_vfmsubaddps512_mask3:
9679 case clang::X86::BI__builtin_ia32_vfmaddsubps512_mask:
9680 case clang::X86::BI__builtin_ia32_vfmaddsubps512_maskz:
9681 case clang::X86::BI__builtin_ia32_vfmaddsubps512_mask3:
9682 IID = llvm::Intrinsic::x86_avx512_vfmaddsub_ps_512;
9684 case clang::X86::BI__builtin_ia32_vfmsubaddpd512_mask3:
9687 case clang::X86::BI__builtin_ia32_vfmaddsubpd512_mask:
9688 case clang::X86::BI__builtin_ia32_vfmaddsubpd512_maskz:
9689 case clang::X86::BI__builtin_ia32_vfmaddsubpd512_mask3:
9690 IID = llvm::Intrinsic::x86_avx512_vfmaddsub_pd_512;
9699 C = CGF.Builder.CreateFNeg(C);
9703 // Only handle in case of _MM_FROUND_CUR_DIRECTION/4 (no rounding).
9704 if (IID != Intrinsic::not_intrinsic &&
9705 cast<llvm::ConstantInt>(Ops.back())->getZExtValue() != (uint64_t)4) {
9706 Function *Intr = CGF.CGM.getIntrinsic(IID);
9707 Res = CGF.Builder.CreateCall(Intr, {A, B, C, Ops.back() });
9709 llvm::Type *Ty = A->getType();
9710 Function *FMA = CGF.CGM.getIntrinsic(Intrinsic::fma, Ty);
9711 Res = CGF.Builder.CreateCall(FMA, {A, B, C} );
9714 // Negate even elts in C using a mask.
9715 unsigned NumElts = Ty->getVectorNumElements();
9716 SmallVector<uint32_t, 16> Indices(NumElts);
9717 for (unsigned i = 0; i != NumElts; ++i)
9718 Indices[i] = i + (i % 2) * NumElts;
9720 Value *NegC = CGF.Builder.CreateFNeg(C);
9721 Value *FMSub = CGF.Builder.CreateCall(FMA, {A, B, NegC} );
9722 Res = CGF.Builder.CreateShuffleVector(FMSub, Res, Indices);
9726 // Handle any required masking.
9727 Value *MaskFalseVal = nullptr;
9728 switch (BuiltinID) {
9729 case clang::X86::BI__builtin_ia32_vfmaddps512_mask:
9730 case clang::X86::BI__builtin_ia32_vfmaddpd512_mask:
9731 case clang::X86::BI__builtin_ia32_vfmaddsubps512_mask:
9732 case clang::X86::BI__builtin_ia32_vfmaddsubpd512_mask:
9733 MaskFalseVal = Ops[0];
9735 case clang::X86::BI__builtin_ia32_vfmaddps512_maskz:
9736 case clang::X86::BI__builtin_ia32_vfmaddpd512_maskz:
9737 case clang::X86::BI__builtin_ia32_vfmaddsubps512_maskz:
9738 case clang::X86::BI__builtin_ia32_vfmaddsubpd512_maskz:
9739 MaskFalseVal = Constant::getNullValue(Ops[0]->getType());
9741 case clang::X86::BI__builtin_ia32_vfmsubps512_mask3:
9742 case clang::X86::BI__builtin_ia32_vfmaddps512_mask3:
9743 case clang::X86::BI__builtin_ia32_vfmsubpd512_mask3:
9744 case clang::X86::BI__builtin_ia32_vfmaddpd512_mask3:
9745 case clang::X86::BI__builtin_ia32_vfmsubaddps512_mask3:
9746 case clang::X86::BI__builtin_ia32_vfmaddsubps512_mask3:
9747 case clang::X86::BI__builtin_ia32_vfmsubaddpd512_mask3:
9748 case clang::X86::BI__builtin_ia32_vfmaddsubpd512_mask3:
9749 MaskFalseVal = Ops[2];
9754 return EmitX86Select(CGF, Ops[3], Res, MaskFalseVal);
9760 EmitScalarFMAExpr(CodeGenFunction &CGF, MutableArrayRef<Value *> Ops,
9761 Value *Upper, bool ZeroMask = false, unsigned PTIdx = 0,
9762 bool NegAcc = false) {
9765 Rnd = cast<llvm::ConstantInt>(Ops[4])->getZExtValue();
9768 Ops[2] = CGF.Builder.CreateFNeg(Ops[2]);
9770 Ops[0] = CGF.Builder.CreateExtractElement(Ops[0], (uint64_t)0);
9771 Ops[1] = CGF.Builder.CreateExtractElement(Ops[1], (uint64_t)0);
9772 Ops[2] = CGF.Builder.CreateExtractElement(Ops[2], (uint64_t)0);
9775 Intrinsic::ID IID = Ops[0]->getType()->getPrimitiveSizeInBits() == 32 ?
9776 Intrinsic::x86_avx512_vfmadd_f32 :
9777 Intrinsic::x86_avx512_vfmadd_f64;
9778 Res = CGF.Builder.CreateCall(CGF.CGM.getIntrinsic(IID),
9779 {Ops[0], Ops[1], Ops[2], Ops[4]});
9781 Function *FMA = CGF.CGM.getIntrinsic(Intrinsic::fma, Ops[0]->getType());
9782 Res = CGF.Builder.CreateCall(FMA, Ops.slice(0, 3));
9784 // If we have more than 3 arguments, we need to do masking.
9785 if (Ops.size() > 3) {
9786 Value *PassThru = ZeroMask ? Constant::getNullValue(Res->getType())
9789 // If we negated the accumulator and the its the PassThru value we need to
9790 // bypass the negate. Conveniently Upper should be the same thing in this
9792 if (NegAcc && PTIdx == 2)
9793 PassThru = CGF.Builder.CreateExtractElement(Upper, (uint64_t)0);
9795 Res = EmitX86ScalarSelect(CGF, Ops[3], Res, PassThru);
9797 return CGF.Builder.CreateInsertElement(Upper, Res, (uint64_t)0);
9800 static Value *EmitX86Muldq(CodeGenFunction &CGF, bool IsSigned,
9801 ArrayRef<Value *> Ops) {
9802 llvm::Type *Ty = Ops[0]->getType();
9803 // Arguments have a vXi32 type so cast to vXi64.
9804 Ty = llvm::VectorType::get(CGF.Int64Ty,
9805 Ty->getPrimitiveSizeInBits() / 64);
9806 Value *LHS = CGF.Builder.CreateBitCast(Ops[0], Ty);
9807 Value *RHS = CGF.Builder.CreateBitCast(Ops[1], Ty);
9810 // Shift left then arithmetic shift right.
9811 Constant *ShiftAmt = ConstantInt::get(Ty, 32);
9812 LHS = CGF.Builder.CreateShl(LHS, ShiftAmt);
9813 LHS = CGF.Builder.CreateAShr(LHS, ShiftAmt);
9814 RHS = CGF.Builder.CreateShl(RHS, ShiftAmt);
9815 RHS = CGF.Builder.CreateAShr(RHS, ShiftAmt);
9817 // Clear the upper bits.
9818 Constant *Mask = ConstantInt::get(Ty, 0xffffffff);
9819 LHS = CGF.Builder.CreateAnd(LHS, Mask);
9820 RHS = CGF.Builder.CreateAnd(RHS, Mask);
9823 return CGF.Builder.CreateMul(LHS, RHS);
9826 // Emit a masked pternlog intrinsic. This only exists because the header has to
9827 // use a macro and we aren't able to pass the input argument to a pternlog
9828 // builtin and a select builtin without evaluating it twice.
9829 static Value *EmitX86Ternlog(CodeGenFunction &CGF, bool ZeroMask,
9830 ArrayRef<Value *> Ops) {
9831 llvm::Type *Ty = Ops[0]->getType();
9833 unsigned VecWidth = Ty->getPrimitiveSizeInBits();
9834 unsigned EltWidth = Ty->getScalarSizeInBits();
9836 if (VecWidth == 128 && EltWidth == 32)
9837 IID = Intrinsic::x86_avx512_pternlog_d_128;
9838 else if (VecWidth == 256 && EltWidth == 32)
9839 IID = Intrinsic::x86_avx512_pternlog_d_256;
9840 else if (VecWidth == 512 && EltWidth == 32)
9841 IID = Intrinsic::x86_avx512_pternlog_d_512;
9842 else if (VecWidth == 128 && EltWidth == 64)
9843 IID = Intrinsic::x86_avx512_pternlog_q_128;
9844 else if (VecWidth == 256 && EltWidth == 64)
9845 IID = Intrinsic::x86_avx512_pternlog_q_256;
9846 else if (VecWidth == 512 && EltWidth == 64)
9847 IID = Intrinsic::x86_avx512_pternlog_q_512;
9849 llvm_unreachable("Unexpected intrinsic");
9851 Value *Ternlog = CGF.Builder.CreateCall(CGF.CGM.getIntrinsic(IID),
9853 Value *PassThru = ZeroMask ? ConstantAggregateZero::get(Ty) : Ops[0];
9854 return EmitX86Select(CGF, Ops[4], Ternlog, PassThru);
9857 static Value *EmitX86SExtMask(CodeGenFunction &CGF, Value *Op,
9858 llvm::Type *DstTy) {
9859 unsigned NumberOfElements = DstTy->getVectorNumElements();
9860 Value *Mask = getMaskVecValue(CGF, Op, NumberOfElements);
9861 return CGF.Builder.CreateSExt(Mask, DstTy, "vpmovm2");
9864 // Emit addition or subtraction with signed/unsigned saturation.
9865 static Value *EmitX86AddSubSatExpr(CodeGenFunction &CGF,
9866 ArrayRef<Value *> Ops, bool IsSigned,
9869 IsSigned ? (IsAddition ? Intrinsic::sadd_sat : Intrinsic::ssub_sat)
9870 : (IsAddition ? Intrinsic::uadd_sat : Intrinsic::usub_sat);
9871 llvm::Function *F = CGF.CGM.getIntrinsic(IID, Ops[0]->getType());
9872 return CGF.Builder.CreateCall(F, {Ops[0], Ops[1]});
9875 Value *CodeGenFunction::EmitX86CpuIs(const CallExpr *E) {
9876 const Expr *CPUExpr = E->getArg(0)->IgnoreParenCasts();
9877 StringRef CPUStr = cast<clang::StringLiteral>(CPUExpr)->getString();
9878 return EmitX86CpuIs(CPUStr);
9881 // Convert a BF16 to a float.
9882 static Value *EmitX86CvtBF16ToFloatExpr(CodeGenFunction &CGF,
9884 ArrayRef<Value *> Ops) {
9885 llvm::Type *Int32Ty = CGF.Builder.getInt32Ty();
9886 Value *ZeroExt = CGF.Builder.CreateZExt(Ops[0], Int32Ty);
9887 Value *Shl = CGF.Builder.CreateShl(ZeroExt, 16);
9888 llvm::Type *ResultType = CGF.ConvertType(E->getType());
9889 Value *BitCast = CGF.Builder.CreateBitCast(Shl, ResultType);
9893 Value *CodeGenFunction::EmitX86CpuIs(StringRef CPUStr) {
9895 llvm::Type *Int32Ty = Builder.getInt32Ty();
9897 // Matching the struct layout from the compiler-rt/libgcc structure that is
9899 // unsigned int __cpu_vendor;
9900 // unsigned int __cpu_type;
9901 // unsigned int __cpu_subtype;
9902 // unsigned int __cpu_features[1];
9903 llvm::Type *STy = llvm::StructType::get(Int32Ty, Int32Ty, Int32Ty,
9904 llvm::ArrayType::get(Int32Ty, 1));
9906 // Grab the global __cpu_model.
9907 llvm::Constant *CpuModel = CGM.CreateRuntimeVariable(STy, "__cpu_model");
9908 cast<llvm::GlobalValue>(CpuModel)->setDSOLocal(true);
9910 // Calculate the index needed to access the correct field based on the
9911 // range. Also adjust the expected value.
9914 std::tie(Index, Value) = StringSwitch<std::pair<unsigned, unsigned>>(CPUStr)
9915 #define X86_VENDOR(ENUM, STRING) \
9916 .Case(STRING, {0u, static_cast<unsigned>(llvm::X86::ENUM)})
9917 #define X86_CPU_TYPE_COMPAT_WITH_ALIAS(ARCHNAME, ENUM, STR, ALIAS) \
9918 .Cases(STR, ALIAS, {1u, static_cast<unsigned>(llvm::X86::ENUM)})
9919 #define X86_CPU_TYPE_COMPAT(ARCHNAME, ENUM, STR) \
9920 .Case(STR, {1u, static_cast<unsigned>(llvm::X86::ENUM)})
9921 #define X86_CPU_SUBTYPE_COMPAT(ARCHNAME, ENUM, STR) \
9922 .Case(STR, {2u, static_cast<unsigned>(llvm::X86::ENUM)})
9923 #include "llvm/Support/X86TargetParser.def"
9925 assert(Value != 0 && "Invalid CPUStr passed to CpuIs");
9927 // Grab the appropriate field from __cpu_model.
9928 llvm::Value *Idxs[] = {ConstantInt::get(Int32Ty, 0),
9929 ConstantInt::get(Int32Ty, Index)};
9930 llvm::Value *CpuValue = Builder.CreateGEP(STy, CpuModel, Idxs);
9931 CpuValue = Builder.CreateAlignedLoad(CpuValue, CharUnits::fromQuantity(4));
9933 // Check the value of the field against the requested value.
9934 return Builder.CreateICmpEQ(CpuValue,
9935 llvm::ConstantInt::get(Int32Ty, Value));
9938 Value *CodeGenFunction::EmitX86CpuSupports(const CallExpr *E) {
9939 const Expr *FeatureExpr = E->getArg(0)->IgnoreParenCasts();
9940 StringRef FeatureStr = cast<StringLiteral>(FeatureExpr)->getString();
9941 return EmitX86CpuSupports(FeatureStr);
9945 CodeGenFunction::GetX86CpuSupportsMask(ArrayRef<StringRef> FeatureStrs) {
9946 // Processor features and mapping to processor feature value.
9947 uint64_t FeaturesMask = 0;
9948 for (const StringRef &FeatureStr : FeatureStrs) {
9950 StringSwitch<unsigned>(FeatureStr)
9951 #define X86_FEATURE_COMPAT(VAL, ENUM, STR) .Case(STR, VAL)
9952 #include "llvm/Support/X86TargetParser.def"
9954 FeaturesMask |= (1ULL << Feature);
9956 return FeaturesMask;
9959 Value *CodeGenFunction::EmitX86CpuSupports(ArrayRef<StringRef> FeatureStrs) {
9960 return EmitX86CpuSupports(GetX86CpuSupportsMask(FeatureStrs));
9963 llvm::Value *CodeGenFunction::EmitX86CpuSupports(uint64_t FeaturesMask) {
9964 uint32_t Features1 = Lo_32(FeaturesMask);
9965 uint32_t Features2 = Hi_32(FeaturesMask);
9967 Value *Result = Builder.getTrue();
9969 if (Features1 != 0) {
9970 // Matching the struct layout from the compiler-rt/libgcc structure that is
9972 // unsigned int __cpu_vendor;
9973 // unsigned int __cpu_type;
9974 // unsigned int __cpu_subtype;
9975 // unsigned int __cpu_features[1];
9976 llvm::Type *STy = llvm::StructType::get(Int32Ty, Int32Ty, Int32Ty,
9977 llvm::ArrayType::get(Int32Ty, 1));
9979 // Grab the global __cpu_model.
9980 llvm::Constant *CpuModel = CGM.CreateRuntimeVariable(STy, "__cpu_model");
9981 cast<llvm::GlobalValue>(CpuModel)->setDSOLocal(true);
9983 // Grab the first (0th) element from the field __cpu_features off of the
9984 // global in the struct STy.
9985 Value *Idxs[] = {Builder.getInt32(0), Builder.getInt32(3),
9986 Builder.getInt32(0)};
9987 Value *CpuFeatures = Builder.CreateGEP(STy, CpuModel, Idxs);
9989 Builder.CreateAlignedLoad(CpuFeatures, CharUnits::fromQuantity(4));
9991 // Check the value of the bit corresponding to the feature requested.
9992 Value *Mask = Builder.getInt32(Features1);
9993 Value *Bitset = Builder.CreateAnd(Features, Mask);
9994 Value *Cmp = Builder.CreateICmpEQ(Bitset, Mask);
9995 Result = Builder.CreateAnd(Result, Cmp);
9998 if (Features2 != 0) {
9999 llvm::Constant *CpuFeatures2 = CGM.CreateRuntimeVariable(Int32Ty,
10000 "__cpu_features2");
10001 cast<llvm::GlobalValue>(CpuFeatures2)->setDSOLocal(true);
10004 Builder.CreateAlignedLoad(CpuFeatures2, CharUnits::fromQuantity(4));
10006 // Check the value of the bit corresponding to the feature requested.
10007 Value *Mask = Builder.getInt32(Features2);
10008 Value *Bitset = Builder.CreateAnd(Features, Mask);
10009 Value *Cmp = Builder.CreateICmpEQ(Bitset, Mask);
10010 Result = Builder.CreateAnd(Result, Cmp);
10016 Value *CodeGenFunction::EmitX86CpuInit() {
10017 llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy,
10018 /*Variadic*/ false);
10019 llvm::FunctionCallee Func =
10020 CGM.CreateRuntimeFunction(FTy, "__cpu_indicator_init");
10021 cast<llvm::GlobalValue>(Func.getCallee())->setDSOLocal(true);
10022 cast<llvm::GlobalValue>(Func.getCallee())
10023 ->setDLLStorageClass(llvm::GlobalValue::DefaultStorageClass);
10024 return Builder.CreateCall(Func);
10027 Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
10028 const CallExpr *E) {
10029 if (BuiltinID == X86::BI__builtin_cpu_is)
10030 return EmitX86CpuIs(E);
10031 if (BuiltinID == X86::BI__builtin_cpu_supports)
10032 return EmitX86CpuSupports(E);
10033 if (BuiltinID == X86::BI__builtin_cpu_init)
10034 return EmitX86CpuInit();
10036 SmallVector<Value*, 4> Ops;
10038 // Find out if any arguments are required to be integer constant expressions.
10039 unsigned ICEArguments = 0;
10040 ASTContext::GetBuiltinTypeError Error;
10041 getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments);
10042 assert(Error == ASTContext::GE_None && "Should not codegen an error");
10044 for (unsigned i = 0, e = E->getNumArgs(); i != e; i++) {
10045 // If this is a normal argument, just emit it as a scalar.
10046 if ((ICEArguments & (1 << i)) == 0) {
10047 Ops.push_back(EmitScalarExpr(E->getArg(i)));
10051 // If this is required to be a constant, constant fold it so that we know
10052 // that the generated intrinsic gets a ConstantInt.
10053 llvm::APSInt Result;
10054 bool IsConst = E->getArg(i)->isIntegerConstantExpr(Result, getContext());
10055 assert(IsConst && "Constant arg isn't actually constant?"); (void)IsConst;
10056 Ops.push_back(llvm::ConstantInt::get(getLLVMContext(), Result));
10059 // These exist so that the builtin that takes an immediate can be bounds
10060 // checked by clang to avoid passing bad immediates to the backend. Since
10061 // AVX has a larger immediate than SSE we would need separate builtins to
10062 // do the different bounds checking. Rather than create a clang specific
10063 // SSE only builtin, this implements eight separate builtins to match gcc
10065 auto getCmpIntrinsicCall = [this, &Ops](Intrinsic::ID ID, unsigned Imm) {
10066 Ops.push_back(llvm::ConstantInt::get(Int8Ty, Imm));
10067 llvm::Function *F = CGM.getIntrinsic(ID);
10068 return Builder.CreateCall(F, Ops);
10071 // For the vector forms of FP comparisons, translate the builtins directly to
10073 // TODO: The builtins could be removed if the SSE header files used vector
10074 // extension comparisons directly (vector ordered/unordered may need
10075 // additional support via __builtin_isnan()).
10076 auto getVectorFCmpIR = [this, &Ops](CmpInst::Predicate Pred) {
10077 Value *Cmp = Builder.CreateFCmp(Pred, Ops[0], Ops[1]);
10078 llvm::VectorType *FPVecTy = cast<llvm::VectorType>(Ops[0]->getType());
10079 llvm::VectorType *IntVecTy = llvm::VectorType::getInteger(FPVecTy);
10080 Value *Sext = Builder.CreateSExt(Cmp, IntVecTy);
10081 return Builder.CreateBitCast(Sext, FPVecTy);
10084 switch (BuiltinID) {
10085 default: return nullptr;
10086 case X86::BI_mm_prefetch: {
10087 Value *Address = Ops[0];
10088 ConstantInt *C = cast<ConstantInt>(Ops[1]);
10089 Value *RW = ConstantInt::get(Int32Ty, (C->getZExtValue() >> 2) & 0x1);
10090 Value *Locality = ConstantInt::get(Int32Ty, C->getZExtValue() & 0x3);
10091 Value *Data = ConstantInt::get(Int32Ty, 1);
10092 Function *F = CGM.getIntrinsic(Intrinsic::prefetch, Address->getType());
10093 return Builder.CreateCall(F, {Address, RW, Locality, Data});
10095 case X86::BI_mm_clflush: {
10096 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse2_clflush),
10099 case X86::BI_mm_lfence: {
10100 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse2_lfence));
10102 case X86::BI_mm_mfence: {
10103 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse2_mfence));
10105 case X86::BI_mm_sfence: {
10106 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_sfence));
10108 case X86::BI_mm_pause: {
10109 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse2_pause));
10111 case X86::BI__rdtsc: {
10112 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_rdtsc));
10114 case X86::BI__builtin_ia32_rdtscp: {
10115 Value *Call = Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_rdtscp));
10116 Builder.CreateDefaultAlignedStore(Builder.CreateExtractValue(Call, 1),
10118 return Builder.CreateExtractValue(Call, 0);
10120 case X86::BI__builtin_ia32_lzcnt_u16:
10121 case X86::BI__builtin_ia32_lzcnt_u32:
10122 case X86::BI__builtin_ia32_lzcnt_u64: {
10123 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, Ops[0]->getType());
10124 return Builder.CreateCall(F, {Ops[0], Builder.getInt1(false)});
10126 case X86::BI__builtin_ia32_tzcnt_u16:
10127 case X86::BI__builtin_ia32_tzcnt_u32:
10128 case X86::BI__builtin_ia32_tzcnt_u64: {
10129 Function *F = CGM.getIntrinsic(Intrinsic::cttz, Ops[0]->getType());
10130 return Builder.CreateCall(F, {Ops[0], Builder.getInt1(false)});
10132 case X86::BI__builtin_ia32_undef128:
10133 case X86::BI__builtin_ia32_undef256:
10134 case X86::BI__builtin_ia32_undef512:
10135 // The x86 definition of "undef" is not the same as the LLVM definition
10136 // (PR32176). We leave optimizing away an unnecessary zero constant to the
10137 // IR optimizer and backend.
10138 // TODO: If we had a "freeze" IR instruction to generate a fixed undef
10139 // value, we should use that here instead of a zero.
10140 return llvm::Constant::getNullValue(ConvertType(E->getType()));
10141 case X86::BI__builtin_ia32_vec_init_v8qi:
10142 case X86::BI__builtin_ia32_vec_init_v4hi:
10143 case X86::BI__builtin_ia32_vec_init_v2si:
10144 return Builder.CreateBitCast(BuildVector(Ops),
10145 llvm::Type::getX86_MMXTy(getLLVMContext()));
10146 case X86::BI__builtin_ia32_vec_ext_v2si:
10147 case X86::BI__builtin_ia32_vec_ext_v16qi:
10148 case X86::BI__builtin_ia32_vec_ext_v8hi:
10149 case X86::BI__builtin_ia32_vec_ext_v4si:
10150 case X86::BI__builtin_ia32_vec_ext_v4sf:
10151 case X86::BI__builtin_ia32_vec_ext_v2di:
10152 case X86::BI__builtin_ia32_vec_ext_v32qi:
10153 case X86::BI__builtin_ia32_vec_ext_v16hi:
10154 case X86::BI__builtin_ia32_vec_ext_v8si:
10155 case X86::BI__builtin_ia32_vec_ext_v4di: {
10156 unsigned NumElts = Ops[0]->getType()->getVectorNumElements();
10157 uint64_t Index = cast<ConstantInt>(Ops[1])->getZExtValue();
10158 Index &= NumElts - 1;
10159 // These builtins exist so we can ensure the index is an ICE and in range.
10160 // Otherwise we could just do this in the header file.
10161 return Builder.CreateExtractElement(Ops[0], Index);
10163 case X86::BI__builtin_ia32_vec_set_v16qi:
10164 case X86::BI__builtin_ia32_vec_set_v8hi:
10165 case X86::BI__builtin_ia32_vec_set_v4si:
10166 case X86::BI__builtin_ia32_vec_set_v2di:
10167 case X86::BI__builtin_ia32_vec_set_v32qi:
10168 case X86::BI__builtin_ia32_vec_set_v16hi:
10169 case X86::BI__builtin_ia32_vec_set_v8si:
10170 case X86::BI__builtin_ia32_vec_set_v4di: {
10171 unsigned NumElts = Ops[0]->getType()->getVectorNumElements();
10172 unsigned Index = cast<ConstantInt>(Ops[2])->getZExtValue();
10173 Index &= NumElts - 1;
10174 // These builtins exist so we can ensure the index is an ICE and in range.
10175 // Otherwise we could just do this in the header file.
10176 return Builder.CreateInsertElement(Ops[0], Ops[1], Index);
10178 case X86::BI_mm_setcsr:
10179 case X86::BI__builtin_ia32_ldmxcsr: {
10180 Address Tmp = CreateMemTemp(E->getArg(0)->getType());
10181 Builder.CreateStore(Ops[0], Tmp);
10182 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_ldmxcsr),
10183 Builder.CreateBitCast(Tmp.getPointer(), Int8PtrTy));
10185 case X86::BI_mm_getcsr:
10186 case X86::BI__builtin_ia32_stmxcsr: {
10187 Address Tmp = CreateMemTemp(E->getType());
10188 Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_stmxcsr),
10189 Builder.CreateBitCast(Tmp.getPointer(), Int8PtrTy));
10190 return Builder.CreateLoad(Tmp, "stmxcsr");
10192 case X86::BI__builtin_ia32_xsave:
10193 case X86::BI__builtin_ia32_xsave64:
10194 case X86::BI__builtin_ia32_xrstor:
10195 case X86::BI__builtin_ia32_xrstor64:
10196 case X86::BI__builtin_ia32_xsaveopt:
10197 case X86::BI__builtin_ia32_xsaveopt64:
10198 case X86::BI__builtin_ia32_xrstors:
10199 case X86::BI__builtin_ia32_xrstors64:
10200 case X86::BI__builtin_ia32_xsavec:
10201 case X86::BI__builtin_ia32_xsavec64:
10202 case X86::BI__builtin_ia32_xsaves:
10203 case X86::BI__builtin_ia32_xsaves64:
10204 case X86::BI__builtin_ia32_xsetbv:
10205 case X86::BI_xsetbv: {
10207 #define INTRINSIC_X86_XSAVE_ID(NAME) \
10208 case X86::BI__builtin_ia32_##NAME: \
10209 ID = Intrinsic::x86_##NAME; \
10211 switch (BuiltinID) {
10212 default: llvm_unreachable("Unsupported intrinsic!");
10213 INTRINSIC_X86_XSAVE_ID(xsave);
10214 INTRINSIC_X86_XSAVE_ID(xsave64);
10215 INTRINSIC_X86_XSAVE_ID(xrstor);
10216 INTRINSIC_X86_XSAVE_ID(xrstor64);
10217 INTRINSIC_X86_XSAVE_ID(xsaveopt);
10218 INTRINSIC_X86_XSAVE_ID(xsaveopt64);
10219 INTRINSIC_X86_XSAVE_ID(xrstors);
10220 INTRINSIC_X86_XSAVE_ID(xrstors64);
10221 INTRINSIC_X86_XSAVE_ID(xsavec);
10222 INTRINSIC_X86_XSAVE_ID(xsavec64);
10223 INTRINSIC_X86_XSAVE_ID(xsaves);
10224 INTRINSIC_X86_XSAVE_ID(xsaves64);
10225 INTRINSIC_X86_XSAVE_ID(xsetbv);
10226 case X86::BI_xsetbv:
10227 ID = Intrinsic::x86_xsetbv;
10230 #undef INTRINSIC_X86_XSAVE_ID
10231 Value *Mhi = Builder.CreateTrunc(
10232 Builder.CreateLShr(Ops[1], ConstantInt::get(Int64Ty, 32)), Int32Ty);
10233 Value *Mlo = Builder.CreateTrunc(Ops[1], Int32Ty);
10235 Ops.push_back(Mlo);
10236 return Builder.CreateCall(CGM.getIntrinsic(ID), Ops);
10238 case X86::BI__builtin_ia32_xgetbv:
10239 case X86::BI_xgetbv:
10240 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_xgetbv), Ops);
10241 case X86::BI__builtin_ia32_storedqudi128_mask:
10242 case X86::BI__builtin_ia32_storedqusi128_mask:
10243 case X86::BI__builtin_ia32_storedquhi128_mask:
10244 case X86::BI__builtin_ia32_storedquqi128_mask:
10245 case X86::BI__builtin_ia32_storeupd128_mask:
10246 case X86::BI__builtin_ia32_storeups128_mask:
10247 case X86::BI__builtin_ia32_storedqudi256_mask:
10248 case X86::BI__builtin_ia32_storedqusi256_mask:
10249 case X86::BI__builtin_ia32_storedquhi256_mask:
10250 case X86::BI__builtin_ia32_storedquqi256_mask:
10251 case X86::BI__builtin_ia32_storeupd256_mask:
10252 case X86::BI__builtin_ia32_storeups256_mask:
10253 case X86::BI__builtin_ia32_storedqudi512_mask:
10254 case X86::BI__builtin_ia32_storedqusi512_mask:
10255 case X86::BI__builtin_ia32_storedquhi512_mask:
10256 case X86::BI__builtin_ia32_storedquqi512_mask:
10257 case X86::BI__builtin_ia32_storeupd512_mask:
10258 case X86::BI__builtin_ia32_storeups512_mask:
10259 return EmitX86MaskedStore(*this, Ops, 1);
10261 case X86::BI__builtin_ia32_storess128_mask:
10262 case X86::BI__builtin_ia32_storesd128_mask: {
10263 return EmitX86MaskedStore(*this, Ops, 1);
10265 case X86::BI__builtin_ia32_vpopcntb_128:
10266 case X86::BI__builtin_ia32_vpopcntd_128:
10267 case X86::BI__builtin_ia32_vpopcntq_128:
10268 case X86::BI__builtin_ia32_vpopcntw_128:
10269 case X86::BI__builtin_ia32_vpopcntb_256:
10270 case X86::BI__builtin_ia32_vpopcntd_256:
10271 case X86::BI__builtin_ia32_vpopcntq_256:
10272 case X86::BI__builtin_ia32_vpopcntw_256:
10273 case X86::BI__builtin_ia32_vpopcntb_512:
10274 case X86::BI__builtin_ia32_vpopcntd_512:
10275 case X86::BI__builtin_ia32_vpopcntq_512:
10276 case X86::BI__builtin_ia32_vpopcntw_512: {
10277 llvm::Type *ResultType = ConvertType(E->getType());
10278 llvm::Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ResultType);
10279 return Builder.CreateCall(F, Ops);
10281 case X86::BI__builtin_ia32_cvtmask2b128:
10282 case X86::BI__builtin_ia32_cvtmask2b256:
10283 case X86::BI__builtin_ia32_cvtmask2b512:
10284 case X86::BI__builtin_ia32_cvtmask2w128:
10285 case X86::BI__builtin_ia32_cvtmask2w256:
10286 case X86::BI__builtin_ia32_cvtmask2w512:
10287 case X86::BI__builtin_ia32_cvtmask2d128:
10288 case X86::BI__builtin_ia32_cvtmask2d256:
10289 case X86::BI__builtin_ia32_cvtmask2d512:
10290 case X86::BI__builtin_ia32_cvtmask2q128:
10291 case X86::BI__builtin_ia32_cvtmask2q256:
10292 case X86::BI__builtin_ia32_cvtmask2q512:
10293 return EmitX86SExtMask(*this, Ops[0], ConvertType(E->getType()));
10295 case X86::BI__builtin_ia32_cvtb2mask128:
10296 case X86::BI__builtin_ia32_cvtb2mask256:
10297 case X86::BI__builtin_ia32_cvtb2mask512:
10298 case X86::BI__builtin_ia32_cvtw2mask128:
10299 case X86::BI__builtin_ia32_cvtw2mask256:
10300 case X86::BI__builtin_ia32_cvtw2mask512:
10301 case X86::BI__builtin_ia32_cvtd2mask128:
10302 case X86::BI__builtin_ia32_cvtd2mask256:
10303 case X86::BI__builtin_ia32_cvtd2mask512:
10304 case X86::BI__builtin_ia32_cvtq2mask128:
10305 case X86::BI__builtin_ia32_cvtq2mask256:
10306 case X86::BI__builtin_ia32_cvtq2mask512:
10307 return EmitX86ConvertToMask(*this, Ops[0]);
10309 case X86::BI__builtin_ia32_cvtdq2ps512_mask:
10310 case X86::BI__builtin_ia32_cvtqq2ps512_mask:
10311 case X86::BI__builtin_ia32_cvtqq2pd512_mask:
10312 return EmitX86ConvertIntToFp(*this, Ops, /*IsSigned*/true);
10313 case X86::BI__builtin_ia32_cvtudq2ps512_mask:
10314 case X86::BI__builtin_ia32_cvtuqq2ps512_mask:
10315 case X86::BI__builtin_ia32_cvtuqq2pd512_mask:
10316 return EmitX86ConvertIntToFp(*this, Ops, /*IsSigned*/false);
10318 case X86::BI__builtin_ia32_vfmaddss3:
10319 case X86::BI__builtin_ia32_vfmaddsd3:
10320 case X86::BI__builtin_ia32_vfmaddss3_mask:
10321 case X86::BI__builtin_ia32_vfmaddsd3_mask:
10322 return EmitScalarFMAExpr(*this, Ops, Ops[0]);
10323 case X86::BI__builtin_ia32_vfmaddss:
10324 case X86::BI__builtin_ia32_vfmaddsd:
10325 return EmitScalarFMAExpr(*this, Ops,
10326 Constant::getNullValue(Ops[0]->getType()));
10327 case X86::BI__builtin_ia32_vfmaddss3_maskz:
10328 case X86::BI__builtin_ia32_vfmaddsd3_maskz:
10329 return EmitScalarFMAExpr(*this, Ops, Ops[0], /*ZeroMask*/true);
10330 case X86::BI__builtin_ia32_vfmaddss3_mask3:
10331 case X86::BI__builtin_ia32_vfmaddsd3_mask3:
10332 return EmitScalarFMAExpr(*this, Ops, Ops[2], /*ZeroMask*/false, 2);
10333 case X86::BI__builtin_ia32_vfmsubss3_mask3:
10334 case X86::BI__builtin_ia32_vfmsubsd3_mask3:
10335 return EmitScalarFMAExpr(*this, Ops, Ops[2], /*ZeroMask*/false, 2,
10337 case X86::BI__builtin_ia32_vfmaddps:
10338 case X86::BI__builtin_ia32_vfmaddpd:
10339 case X86::BI__builtin_ia32_vfmaddps256:
10340 case X86::BI__builtin_ia32_vfmaddpd256:
10341 case X86::BI__builtin_ia32_vfmaddps512_mask:
10342 case X86::BI__builtin_ia32_vfmaddps512_maskz:
10343 case X86::BI__builtin_ia32_vfmaddps512_mask3:
10344 case X86::BI__builtin_ia32_vfmsubps512_mask3:
10345 case X86::BI__builtin_ia32_vfmaddpd512_mask:
10346 case X86::BI__builtin_ia32_vfmaddpd512_maskz:
10347 case X86::BI__builtin_ia32_vfmaddpd512_mask3:
10348 case X86::BI__builtin_ia32_vfmsubpd512_mask3:
10349 return EmitX86FMAExpr(*this, Ops, BuiltinID, /*IsAddSub*/false);
10350 case X86::BI__builtin_ia32_vfmaddsubps:
10351 case X86::BI__builtin_ia32_vfmaddsubpd:
10352 case X86::BI__builtin_ia32_vfmaddsubps256:
10353 case X86::BI__builtin_ia32_vfmaddsubpd256:
10354 case X86::BI__builtin_ia32_vfmaddsubps512_mask:
10355 case X86::BI__builtin_ia32_vfmaddsubps512_maskz:
10356 case X86::BI__builtin_ia32_vfmaddsubps512_mask3:
10357 case X86::BI__builtin_ia32_vfmsubaddps512_mask3:
10358 case X86::BI__builtin_ia32_vfmaddsubpd512_mask:
10359 case X86::BI__builtin_ia32_vfmaddsubpd512_maskz:
10360 case X86::BI__builtin_ia32_vfmaddsubpd512_mask3:
10361 case X86::BI__builtin_ia32_vfmsubaddpd512_mask3:
10362 return EmitX86FMAExpr(*this, Ops, BuiltinID, /*IsAddSub*/true);
10364 case X86::BI__builtin_ia32_movdqa32store128_mask:
10365 case X86::BI__builtin_ia32_movdqa64store128_mask:
10366 case X86::BI__builtin_ia32_storeaps128_mask:
10367 case X86::BI__builtin_ia32_storeapd128_mask:
10368 case X86::BI__builtin_ia32_movdqa32store256_mask:
10369 case X86::BI__builtin_ia32_movdqa64store256_mask:
10370 case X86::BI__builtin_ia32_storeaps256_mask:
10371 case X86::BI__builtin_ia32_storeapd256_mask:
10372 case X86::BI__builtin_ia32_movdqa32store512_mask:
10373 case X86::BI__builtin_ia32_movdqa64store512_mask:
10374 case X86::BI__builtin_ia32_storeaps512_mask:
10375 case X86::BI__builtin_ia32_storeapd512_mask: {
10377 getContext().getTypeAlignInChars(E->getArg(1)->getType()).getQuantity();
10378 return EmitX86MaskedStore(*this, Ops, Align);
10380 case X86::BI__builtin_ia32_loadups128_mask:
10381 case X86::BI__builtin_ia32_loadups256_mask:
10382 case X86::BI__builtin_ia32_loadups512_mask:
10383 case X86::BI__builtin_ia32_loadupd128_mask:
10384 case X86::BI__builtin_ia32_loadupd256_mask:
10385 case X86::BI__builtin_ia32_loadupd512_mask:
10386 case X86::BI__builtin_ia32_loaddquqi128_mask:
10387 case X86::BI__builtin_ia32_loaddquqi256_mask:
10388 case X86::BI__builtin_ia32_loaddquqi512_mask:
10389 case X86::BI__builtin_ia32_loaddquhi128_mask:
10390 case X86::BI__builtin_ia32_loaddquhi256_mask:
10391 case X86::BI__builtin_ia32_loaddquhi512_mask:
10392 case X86::BI__builtin_ia32_loaddqusi128_mask:
10393 case X86::BI__builtin_ia32_loaddqusi256_mask:
10394 case X86::BI__builtin_ia32_loaddqusi512_mask:
10395 case X86::BI__builtin_ia32_loaddqudi128_mask:
10396 case X86::BI__builtin_ia32_loaddqudi256_mask:
10397 case X86::BI__builtin_ia32_loaddqudi512_mask:
10398 return EmitX86MaskedLoad(*this, Ops, 1);
10400 case X86::BI__builtin_ia32_loadss128_mask:
10401 case X86::BI__builtin_ia32_loadsd128_mask:
10402 return EmitX86MaskedLoad(*this, Ops, 1);
10404 case X86::BI__builtin_ia32_loadaps128_mask:
10405 case X86::BI__builtin_ia32_loadaps256_mask:
10406 case X86::BI__builtin_ia32_loadaps512_mask:
10407 case X86::BI__builtin_ia32_loadapd128_mask:
10408 case X86::BI__builtin_ia32_loadapd256_mask:
10409 case X86::BI__builtin_ia32_loadapd512_mask:
10410 case X86::BI__builtin_ia32_movdqa32load128_mask:
10411 case X86::BI__builtin_ia32_movdqa32load256_mask:
10412 case X86::BI__builtin_ia32_movdqa32load512_mask:
10413 case X86::BI__builtin_ia32_movdqa64load128_mask:
10414 case X86::BI__builtin_ia32_movdqa64load256_mask:
10415 case X86::BI__builtin_ia32_movdqa64load512_mask: {
10417 getContext().getTypeAlignInChars(E->getArg(1)->getType()).getQuantity();
10418 return EmitX86MaskedLoad(*this, Ops, Align);
10421 case X86::BI__builtin_ia32_expandloaddf128_mask:
10422 case X86::BI__builtin_ia32_expandloaddf256_mask:
10423 case X86::BI__builtin_ia32_expandloaddf512_mask:
10424 case X86::BI__builtin_ia32_expandloadsf128_mask:
10425 case X86::BI__builtin_ia32_expandloadsf256_mask:
10426 case X86::BI__builtin_ia32_expandloadsf512_mask:
10427 case X86::BI__builtin_ia32_expandloaddi128_mask:
10428 case X86::BI__builtin_ia32_expandloaddi256_mask:
10429 case X86::BI__builtin_ia32_expandloaddi512_mask:
10430 case X86::BI__builtin_ia32_expandloadsi128_mask:
10431 case X86::BI__builtin_ia32_expandloadsi256_mask:
10432 case X86::BI__builtin_ia32_expandloadsi512_mask:
10433 case X86::BI__builtin_ia32_expandloadhi128_mask:
10434 case X86::BI__builtin_ia32_expandloadhi256_mask:
10435 case X86::BI__builtin_ia32_expandloadhi512_mask:
10436 case X86::BI__builtin_ia32_expandloadqi128_mask:
10437 case X86::BI__builtin_ia32_expandloadqi256_mask:
10438 case X86::BI__builtin_ia32_expandloadqi512_mask:
10439 return EmitX86ExpandLoad(*this, Ops);
10441 case X86::BI__builtin_ia32_compressstoredf128_mask:
10442 case X86::BI__builtin_ia32_compressstoredf256_mask:
10443 case X86::BI__builtin_ia32_compressstoredf512_mask:
10444 case X86::BI__builtin_ia32_compressstoresf128_mask:
10445 case X86::BI__builtin_ia32_compressstoresf256_mask:
10446 case X86::BI__builtin_ia32_compressstoresf512_mask:
10447 case X86::BI__builtin_ia32_compressstoredi128_mask:
10448 case X86::BI__builtin_ia32_compressstoredi256_mask:
10449 case X86::BI__builtin_ia32_compressstoredi512_mask:
10450 case X86::BI__builtin_ia32_compressstoresi128_mask:
10451 case X86::BI__builtin_ia32_compressstoresi256_mask:
10452 case X86::BI__builtin_ia32_compressstoresi512_mask:
10453 case X86::BI__builtin_ia32_compressstorehi128_mask:
10454 case X86::BI__builtin_ia32_compressstorehi256_mask:
10455 case X86::BI__builtin_ia32_compressstorehi512_mask:
10456 case X86::BI__builtin_ia32_compressstoreqi128_mask:
10457 case X86::BI__builtin_ia32_compressstoreqi256_mask:
10458 case X86::BI__builtin_ia32_compressstoreqi512_mask:
10459 return EmitX86CompressStore(*this, Ops);
10461 case X86::BI__builtin_ia32_expanddf128_mask:
10462 case X86::BI__builtin_ia32_expanddf256_mask:
10463 case X86::BI__builtin_ia32_expanddf512_mask:
10464 case X86::BI__builtin_ia32_expandsf128_mask:
10465 case X86::BI__builtin_ia32_expandsf256_mask:
10466 case X86::BI__builtin_ia32_expandsf512_mask:
10467 case X86::BI__builtin_ia32_expanddi128_mask:
10468 case X86::BI__builtin_ia32_expanddi256_mask:
10469 case X86::BI__builtin_ia32_expanddi512_mask:
10470 case X86::BI__builtin_ia32_expandsi128_mask:
10471 case X86::BI__builtin_ia32_expandsi256_mask:
10472 case X86::BI__builtin_ia32_expandsi512_mask:
10473 case X86::BI__builtin_ia32_expandhi128_mask:
10474 case X86::BI__builtin_ia32_expandhi256_mask:
10475 case X86::BI__builtin_ia32_expandhi512_mask:
10476 case X86::BI__builtin_ia32_expandqi128_mask:
10477 case X86::BI__builtin_ia32_expandqi256_mask:
10478 case X86::BI__builtin_ia32_expandqi512_mask:
10479 return EmitX86CompressExpand(*this, Ops, /*IsCompress*/false);
10481 case X86::BI__builtin_ia32_compressdf128_mask:
10482 case X86::BI__builtin_ia32_compressdf256_mask:
10483 case X86::BI__builtin_ia32_compressdf512_mask:
10484 case X86::BI__builtin_ia32_compresssf128_mask:
10485 case X86::BI__builtin_ia32_compresssf256_mask:
10486 case X86::BI__builtin_ia32_compresssf512_mask:
10487 case X86::BI__builtin_ia32_compressdi128_mask:
10488 case X86::BI__builtin_ia32_compressdi256_mask:
10489 case X86::BI__builtin_ia32_compressdi512_mask:
10490 case X86::BI__builtin_ia32_compresssi128_mask:
10491 case X86::BI__builtin_ia32_compresssi256_mask:
10492 case X86::BI__builtin_ia32_compresssi512_mask:
10493 case X86::BI__builtin_ia32_compresshi128_mask:
10494 case X86::BI__builtin_ia32_compresshi256_mask:
10495 case X86::BI__builtin_ia32_compresshi512_mask:
10496 case X86::BI__builtin_ia32_compressqi128_mask:
10497 case X86::BI__builtin_ia32_compressqi256_mask:
10498 case X86::BI__builtin_ia32_compressqi512_mask:
10499 return EmitX86CompressExpand(*this, Ops, /*IsCompress*/true);
10501 case X86::BI__builtin_ia32_gather3div2df:
10502 case X86::BI__builtin_ia32_gather3div2di:
10503 case X86::BI__builtin_ia32_gather3div4df:
10504 case X86::BI__builtin_ia32_gather3div4di:
10505 case X86::BI__builtin_ia32_gather3div4sf:
10506 case X86::BI__builtin_ia32_gather3div4si:
10507 case X86::BI__builtin_ia32_gather3div8sf:
10508 case X86::BI__builtin_ia32_gather3div8si:
10509 case X86::BI__builtin_ia32_gather3siv2df:
10510 case X86::BI__builtin_ia32_gather3siv2di:
10511 case X86::BI__builtin_ia32_gather3siv4df:
10512 case X86::BI__builtin_ia32_gather3siv4di:
10513 case X86::BI__builtin_ia32_gather3siv4sf:
10514 case X86::BI__builtin_ia32_gather3siv4si:
10515 case X86::BI__builtin_ia32_gather3siv8sf:
10516 case X86::BI__builtin_ia32_gather3siv8si:
10517 case X86::BI__builtin_ia32_gathersiv8df:
10518 case X86::BI__builtin_ia32_gathersiv16sf:
10519 case X86::BI__builtin_ia32_gatherdiv8df:
10520 case X86::BI__builtin_ia32_gatherdiv16sf:
10521 case X86::BI__builtin_ia32_gathersiv8di:
10522 case X86::BI__builtin_ia32_gathersiv16si:
10523 case X86::BI__builtin_ia32_gatherdiv8di:
10524 case X86::BI__builtin_ia32_gatherdiv16si: {
10526 switch (BuiltinID) {
10527 default: llvm_unreachable("Unexpected builtin");
10528 case X86::BI__builtin_ia32_gather3div2df:
10529 IID = Intrinsic::x86_avx512_mask_gather3div2_df;
10531 case X86::BI__builtin_ia32_gather3div2di:
10532 IID = Intrinsic::x86_avx512_mask_gather3div2_di;
10534 case X86::BI__builtin_ia32_gather3div4df:
10535 IID = Intrinsic::x86_avx512_mask_gather3div4_df;
10537 case X86::BI__builtin_ia32_gather3div4di:
10538 IID = Intrinsic::x86_avx512_mask_gather3div4_di;
10540 case X86::BI__builtin_ia32_gather3div4sf:
10541 IID = Intrinsic::x86_avx512_mask_gather3div4_sf;
10543 case X86::BI__builtin_ia32_gather3div4si:
10544 IID = Intrinsic::x86_avx512_mask_gather3div4_si;
10546 case X86::BI__builtin_ia32_gather3div8sf:
10547 IID = Intrinsic::x86_avx512_mask_gather3div8_sf;
10549 case X86::BI__builtin_ia32_gather3div8si:
10550 IID = Intrinsic::x86_avx512_mask_gather3div8_si;
10552 case X86::BI__builtin_ia32_gather3siv2df:
10553 IID = Intrinsic::x86_avx512_mask_gather3siv2_df;
10555 case X86::BI__builtin_ia32_gather3siv2di:
10556 IID = Intrinsic::x86_avx512_mask_gather3siv2_di;
10558 case X86::BI__builtin_ia32_gather3siv4df:
10559 IID = Intrinsic::x86_avx512_mask_gather3siv4_df;
10561 case X86::BI__builtin_ia32_gather3siv4di:
10562 IID = Intrinsic::x86_avx512_mask_gather3siv4_di;
10564 case X86::BI__builtin_ia32_gather3siv4sf:
10565 IID = Intrinsic::x86_avx512_mask_gather3siv4_sf;
10567 case X86::BI__builtin_ia32_gather3siv4si:
10568 IID = Intrinsic::x86_avx512_mask_gather3siv4_si;
10570 case X86::BI__builtin_ia32_gather3siv8sf:
10571 IID = Intrinsic::x86_avx512_mask_gather3siv8_sf;
10573 case X86::BI__builtin_ia32_gather3siv8si:
10574 IID = Intrinsic::x86_avx512_mask_gather3siv8_si;
10576 case X86::BI__builtin_ia32_gathersiv8df:
10577 IID = Intrinsic::x86_avx512_mask_gather_dpd_512;
10579 case X86::BI__builtin_ia32_gathersiv16sf:
10580 IID = Intrinsic::x86_avx512_mask_gather_dps_512;
10582 case X86::BI__builtin_ia32_gatherdiv8df:
10583 IID = Intrinsic::x86_avx512_mask_gather_qpd_512;
10585 case X86::BI__builtin_ia32_gatherdiv16sf:
10586 IID = Intrinsic::x86_avx512_mask_gather_qps_512;
10588 case X86::BI__builtin_ia32_gathersiv8di:
10589 IID = Intrinsic::x86_avx512_mask_gather_dpq_512;
10591 case X86::BI__builtin_ia32_gathersiv16si:
10592 IID = Intrinsic::x86_avx512_mask_gather_dpi_512;
10594 case X86::BI__builtin_ia32_gatherdiv8di:
10595 IID = Intrinsic::x86_avx512_mask_gather_qpq_512;
10597 case X86::BI__builtin_ia32_gatherdiv16si:
10598 IID = Intrinsic::x86_avx512_mask_gather_qpi_512;
10602 unsigned MinElts = std::min(Ops[0]->getType()->getVectorNumElements(),
10603 Ops[2]->getType()->getVectorNumElements());
10604 Ops[3] = getMaskVecValue(*this, Ops[3], MinElts);
10605 Function *Intr = CGM.getIntrinsic(IID);
10606 return Builder.CreateCall(Intr, Ops);
10609 case X86::BI__builtin_ia32_scattersiv8df:
10610 case X86::BI__builtin_ia32_scattersiv16sf:
10611 case X86::BI__builtin_ia32_scatterdiv8df:
10612 case X86::BI__builtin_ia32_scatterdiv16sf:
10613 case X86::BI__builtin_ia32_scattersiv8di:
10614 case X86::BI__builtin_ia32_scattersiv16si:
10615 case X86::BI__builtin_ia32_scatterdiv8di:
10616 case X86::BI__builtin_ia32_scatterdiv16si:
10617 case X86::BI__builtin_ia32_scatterdiv2df:
10618 case X86::BI__builtin_ia32_scatterdiv2di:
10619 case X86::BI__builtin_ia32_scatterdiv4df:
10620 case X86::BI__builtin_ia32_scatterdiv4di:
10621 case X86::BI__builtin_ia32_scatterdiv4sf:
10622 case X86::BI__builtin_ia32_scatterdiv4si:
10623 case X86::BI__builtin_ia32_scatterdiv8sf:
10624 case X86::BI__builtin_ia32_scatterdiv8si:
10625 case X86::BI__builtin_ia32_scattersiv2df:
10626 case X86::BI__builtin_ia32_scattersiv2di:
10627 case X86::BI__builtin_ia32_scattersiv4df:
10628 case X86::BI__builtin_ia32_scattersiv4di:
10629 case X86::BI__builtin_ia32_scattersiv4sf:
10630 case X86::BI__builtin_ia32_scattersiv4si:
10631 case X86::BI__builtin_ia32_scattersiv8sf:
10632 case X86::BI__builtin_ia32_scattersiv8si: {
10634 switch (BuiltinID) {
10635 default: llvm_unreachable("Unexpected builtin");
10636 case X86::BI__builtin_ia32_scattersiv8df:
10637 IID = Intrinsic::x86_avx512_mask_scatter_dpd_512;
10639 case X86::BI__builtin_ia32_scattersiv16sf:
10640 IID = Intrinsic::x86_avx512_mask_scatter_dps_512;
10642 case X86::BI__builtin_ia32_scatterdiv8df:
10643 IID = Intrinsic::x86_avx512_mask_scatter_qpd_512;
10645 case X86::BI__builtin_ia32_scatterdiv16sf:
10646 IID = Intrinsic::x86_avx512_mask_scatter_qps_512;
10648 case X86::BI__builtin_ia32_scattersiv8di:
10649 IID = Intrinsic::x86_avx512_mask_scatter_dpq_512;
10651 case X86::BI__builtin_ia32_scattersiv16si:
10652 IID = Intrinsic::x86_avx512_mask_scatter_dpi_512;
10654 case X86::BI__builtin_ia32_scatterdiv8di:
10655 IID = Intrinsic::x86_avx512_mask_scatter_qpq_512;
10657 case X86::BI__builtin_ia32_scatterdiv16si:
10658 IID = Intrinsic::x86_avx512_mask_scatter_qpi_512;
10660 case X86::BI__builtin_ia32_scatterdiv2df:
10661 IID = Intrinsic::x86_avx512_mask_scatterdiv2_df;
10663 case X86::BI__builtin_ia32_scatterdiv2di:
10664 IID = Intrinsic::x86_avx512_mask_scatterdiv2_di;
10666 case X86::BI__builtin_ia32_scatterdiv4df:
10667 IID = Intrinsic::x86_avx512_mask_scatterdiv4_df;
10669 case X86::BI__builtin_ia32_scatterdiv4di:
10670 IID = Intrinsic::x86_avx512_mask_scatterdiv4_di;
10672 case X86::BI__builtin_ia32_scatterdiv4sf:
10673 IID = Intrinsic::x86_avx512_mask_scatterdiv4_sf;
10675 case X86::BI__builtin_ia32_scatterdiv4si:
10676 IID = Intrinsic::x86_avx512_mask_scatterdiv4_si;
10678 case X86::BI__builtin_ia32_scatterdiv8sf:
10679 IID = Intrinsic::x86_avx512_mask_scatterdiv8_sf;
10681 case X86::BI__builtin_ia32_scatterdiv8si:
10682 IID = Intrinsic::x86_avx512_mask_scatterdiv8_si;
10684 case X86::BI__builtin_ia32_scattersiv2df:
10685 IID = Intrinsic::x86_avx512_mask_scattersiv2_df;
10687 case X86::BI__builtin_ia32_scattersiv2di:
10688 IID = Intrinsic::x86_avx512_mask_scattersiv2_di;
10690 case X86::BI__builtin_ia32_scattersiv4df:
10691 IID = Intrinsic::x86_avx512_mask_scattersiv4_df;
10693 case X86::BI__builtin_ia32_scattersiv4di:
10694 IID = Intrinsic::x86_avx512_mask_scattersiv4_di;
10696 case X86::BI__builtin_ia32_scattersiv4sf:
10697 IID = Intrinsic::x86_avx512_mask_scattersiv4_sf;
10699 case X86::BI__builtin_ia32_scattersiv4si:
10700 IID = Intrinsic::x86_avx512_mask_scattersiv4_si;
10702 case X86::BI__builtin_ia32_scattersiv8sf:
10703 IID = Intrinsic::x86_avx512_mask_scattersiv8_sf;
10705 case X86::BI__builtin_ia32_scattersiv8si:
10706 IID = Intrinsic::x86_avx512_mask_scattersiv8_si;
10710 unsigned MinElts = std::min(Ops[2]->getType()->getVectorNumElements(),
10711 Ops[3]->getType()->getVectorNumElements());
10712 Ops[1] = getMaskVecValue(*this, Ops[1], MinElts);
10713 Function *Intr = CGM.getIntrinsic(IID);
10714 return Builder.CreateCall(Intr, Ops);
10717 case X86::BI__builtin_ia32_vextractf128_pd256:
10718 case X86::BI__builtin_ia32_vextractf128_ps256:
10719 case X86::BI__builtin_ia32_vextractf128_si256:
10720 case X86::BI__builtin_ia32_extract128i256:
10721 case X86::BI__builtin_ia32_extractf64x4_mask:
10722 case X86::BI__builtin_ia32_extractf32x4_mask:
10723 case X86::BI__builtin_ia32_extracti64x4_mask:
10724 case X86::BI__builtin_ia32_extracti32x4_mask:
10725 case X86::BI__builtin_ia32_extractf32x8_mask:
10726 case X86::BI__builtin_ia32_extracti32x8_mask:
10727 case X86::BI__builtin_ia32_extractf32x4_256_mask:
10728 case X86::BI__builtin_ia32_extracti32x4_256_mask:
10729 case X86::BI__builtin_ia32_extractf64x2_256_mask:
10730 case X86::BI__builtin_ia32_extracti64x2_256_mask:
10731 case X86::BI__builtin_ia32_extractf64x2_512_mask:
10732 case X86::BI__builtin_ia32_extracti64x2_512_mask: {
10733 llvm::Type *DstTy = ConvertType(E->getType());
10734 unsigned NumElts = DstTy->getVectorNumElements();
10735 unsigned SrcNumElts = Ops[0]->getType()->getVectorNumElements();
10736 unsigned SubVectors = SrcNumElts / NumElts;
10737 unsigned Index = cast<ConstantInt>(Ops[1])->getZExtValue();
10738 assert(llvm::isPowerOf2_32(SubVectors) && "Expected power of 2 subvectors");
10739 Index &= SubVectors - 1; // Remove any extra bits.
10742 uint32_t Indices[16];
10743 for (unsigned i = 0; i != NumElts; ++i)
10744 Indices[i] = i + Index;
10746 Value *Res = Builder.CreateShuffleVector(Ops[0],
10747 UndefValue::get(Ops[0]->getType()),
10748 makeArrayRef(Indices, NumElts),
10751 if (Ops.size() == 4)
10752 Res = EmitX86Select(*this, Ops[3], Res, Ops[2]);
10756 case X86::BI__builtin_ia32_vinsertf128_pd256:
10757 case X86::BI__builtin_ia32_vinsertf128_ps256:
10758 case X86::BI__builtin_ia32_vinsertf128_si256:
10759 case X86::BI__builtin_ia32_insert128i256:
10760 case X86::BI__builtin_ia32_insertf64x4:
10761 case X86::BI__builtin_ia32_insertf32x4:
10762 case X86::BI__builtin_ia32_inserti64x4:
10763 case X86::BI__builtin_ia32_inserti32x4:
10764 case X86::BI__builtin_ia32_insertf32x8:
10765 case X86::BI__builtin_ia32_inserti32x8:
10766 case X86::BI__builtin_ia32_insertf32x4_256:
10767 case X86::BI__builtin_ia32_inserti32x4_256:
10768 case X86::BI__builtin_ia32_insertf64x2_256:
10769 case X86::BI__builtin_ia32_inserti64x2_256:
10770 case X86::BI__builtin_ia32_insertf64x2_512:
10771 case X86::BI__builtin_ia32_inserti64x2_512: {
10772 unsigned DstNumElts = Ops[0]->getType()->getVectorNumElements();
10773 unsigned SrcNumElts = Ops[1]->getType()->getVectorNumElements();
10774 unsigned SubVectors = DstNumElts / SrcNumElts;
10775 unsigned Index = cast<ConstantInt>(Ops[2])->getZExtValue();
10776 assert(llvm::isPowerOf2_32(SubVectors) && "Expected power of 2 subvectors");
10777 Index &= SubVectors - 1; // Remove any extra bits.
10778 Index *= SrcNumElts;
10780 uint32_t Indices[16];
10781 for (unsigned i = 0; i != DstNumElts; ++i)
10782 Indices[i] = (i >= SrcNumElts) ? SrcNumElts + (i % SrcNumElts) : i;
10784 Value *Op1 = Builder.CreateShuffleVector(Ops[1],
10785 UndefValue::get(Ops[1]->getType()),
10786 makeArrayRef(Indices, DstNumElts),
10789 for (unsigned i = 0; i != DstNumElts; ++i) {
10790 if (i >= Index && i < (Index + SrcNumElts))
10791 Indices[i] = (i - Index) + DstNumElts;
10796 return Builder.CreateShuffleVector(Ops[0], Op1,
10797 makeArrayRef(Indices, DstNumElts),
10800 case X86::BI__builtin_ia32_pmovqd512_mask:
10801 case X86::BI__builtin_ia32_pmovwb512_mask: {
10802 Value *Res = Builder.CreateTrunc(Ops[0], Ops[1]->getType());
10803 return EmitX86Select(*this, Ops[2], Res, Ops[1]);
10805 case X86::BI__builtin_ia32_pmovdb512_mask:
10806 case X86::BI__builtin_ia32_pmovdw512_mask:
10807 case X86::BI__builtin_ia32_pmovqw512_mask: {
10808 if (const auto *C = dyn_cast<Constant>(Ops[2]))
10809 if (C->isAllOnesValue())
10810 return Builder.CreateTrunc(Ops[0], Ops[1]->getType());
10813 switch (BuiltinID) {
10814 default: llvm_unreachable("Unsupported intrinsic!");
10815 case X86::BI__builtin_ia32_pmovdb512_mask:
10816 IID = Intrinsic::x86_avx512_mask_pmov_db_512;
10818 case X86::BI__builtin_ia32_pmovdw512_mask:
10819 IID = Intrinsic::x86_avx512_mask_pmov_dw_512;
10821 case X86::BI__builtin_ia32_pmovqw512_mask:
10822 IID = Intrinsic::x86_avx512_mask_pmov_qw_512;
10826 Function *Intr = CGM.getIntrinsic(IID);
10827 return Builder.CreateCall(Intr, Ops);
10829 case X86::BI__builtin_ia32_pblendw128:
10830 case X86::BI__builtin_ia32_blendpd:
10831 case X86::BI__builtin_ia32_blendps:
10832 case X86::BI__builtin_ia32_blendpd256:
10833 case X86::BI__builtin_ia32_blendps256:
10834 case X86::BI__builtin_ia32_pblendw256:
10835 case X86::BI__builtin_ia32_pblendd128:
10836 case X86::BI__builtin_ia32_pblendd256: {
10837 unsigned NumElts = Ops[0]->getType()->getVectorNumElements();
10838 unsigned Imm = cast<llvm::ConstantInt>(Ops[2])->getZExtValue();
10840 uint32_t Indices[16];
10841 // If there are more than 8 elements, the immediate is used twice so make
10842 // sure we handle that.
10843 for (unsigned i = 0; i != NumElts; ++i)
10844 Indices[i] = ((Imm >> (i % 8)) & 0x1) ? NumElts + i : i;
10846 return Builder.CreateShuffleVector(Ops[0], Ops[1],
10847 makeArrayRef(Indices, NumElts),
10850 case X86::BI__builtin_ia32_pshuflw:
10851 case X86::BI__builtin_ia32_pshuflw256:
10852 case X86::BI__builtin_ia32_pshuflw512: {
10853 uint32_t Imm = cast<llvm::ConstantInt>(Ops[1])->getZExtValue();
10854 llvm::Type *Ty = Ops[0]->getType();
10855 unsigned NumElts = Ty->getVectorNumElements();
10857 // Splat the 8-bits of immediate 4 times to help the loop wrap around.
10858 Imm = (Imm & 0xff) * 0x01010101;
10860 uint32_t Indices[32];
10861 for (unsigned l = 0; l != NumElts; l += 8) {
10862 for (unsigned i = 0; i != 4; ++i) {
10863 Indices[l + i] = l + (Imm & 3);
10866 for (unsigned i = 4; i != 8; ++i)
10867 Indices[l + i] = l + i;
10870 return Builder.CreateShuffleVector(Ops[0], UndefValue::get(Ty),
10871 makeArrayRef(Indices, NumElts),
10874 case X86::BI__builtin_ia32_pshufhw:
10875 case X86::BI__builtin_ia32_pshufhw256:
10876 case X86::BI__builtin_ia32_pshufhw512: {
10877 uint32_t Imm = cast<llvm::ConstantInt>(Ops[1])->getZExtValue();
10878 llvm::Type *Ty = Ops[0]->getType();
10879 unsigned NumElts = Ty->getVectorNumElements();
10881 // Splat the 8-bits of immediate 4 times to help the loop wrap around.
10882 Imm = (Imm & 0xff) * 0x01010101;
10884 uint32_t Indices[32];
10885 for (unsigned l = 0; l != NumElts; l += 8) {
10886 for (unsigned i = 0; i != 4; ++i)
10887 Indices[l + i] = l + i;
10888 for (unsigned i = 4; i != 8; ++i) {
10889 Indices[l + i] = l + 4 + (Imm & 3);
10894 return Builder.CreateShuffleVector(Ops[0], UndefValue::get(Ty),
10895 makeArrayRef(Indices, NumElts),
10898 case X86::BI__builtin_ia32_pshufd:
10899 case X86::BI__builtin_ia32_pshufd256:
10900 case X86::BI__builtin_ia32_pshufd512:
10901 case X86::BI__builtin_ia32_vpermilpd:
10902 case X86::BI__builtin_ia32_vpermilps:
10903 case X86::BI__builtin_ia32_vpermilpd256:
10904 case X86::BI__builtin_ia32_vpermilps256:
10905 case X86::BI__builtin_ia32_vpermilpd512:
10906 case X86::BI__builtin_ia32_vpermilps512: {
10907 uint32_t Imm = cast<llvm::ConstantInt>(Ops[1])->getZExtValue();
10908 llvm::Type *Ty = Ops[0]->getType();
10909 unsigned NumElts = Ty->getVectorNumElements();
10910 unsigned NumLanes = Ty->getPrimitiveSizeInBits() / 128;
10911 unsigned NumLaneElts = NumElts / NumLanes;
10913 // Splat the 8-bits of immediate 4 times to help the loop wrap around.
10914 Imm = (Imm & 0xff) * 0x01010101;
10916 uint32_t Indices[16];
10917 for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
10918 for (unsigned i = 0; i != NumLaneElts; ++i) {
10919 Indices[i + l] = (Imm % NumLaneElts) + l;
10920 Imm /= NumLaneElts;
10924 return Builder.CreateShuffleVector(Ops[0], UndefValue::get(Ty),
10925 makeArrayRef(Indices, NumElts),
10928 case X86::BI__builtin_ia32_shufpd:
10929 case X86::BI__builtin_ia32_shufpd256:
10930 case X86::BI__builtin_ia32_shufpd512:
10931 case X86::BI__builtin_ia32_shufps:
10932 case X86::BI__builtin_ia32_shufps256:
10933 case X86::BI__builtin_ia32_shufps512: {
10934 uint32_t Imm = cast<llvm::ConstantInt>(Ops[2])->getZExtValue();
10935 llvm::Type *Ty = Ops[0]->getType();
10936 unsigned NumElts = Ty->getVectorNumElements();
10937 unsigned NumLanes = Ty->getPrimitiveSizeInBits() / 128;
10938 unsigned NumLaneElts = NumElts / NumLanes;
10940 // Splat the 8-bits of immediate 4 times to help the loop wrap around.
10941 Imm = (Imm & 0xff) * 0x01010101;
10943 uint32_t Indices[16];
10944 for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
10945 for (unsigned i = 0; i != NumLaneElts; ++i) {
10946 unsigned Index = Imm % NumLaneElts;
10947 Imm /= NumLaneElts;
10948 if (i >= (NumLaneElts / 2))
10950 Indices[l + i] = l + Index;
10954 return Builder.CreateShuffleVector(Ops[0], Ops[1],
10955 makeArrayRef(Indices, NumElts),
10958 case X86::BI__builtin_ia32_permdi256:
10959 case X86::BI__builtin_ia32_permdf256:
10960 case X86::BI__builtin_ia32_permdi512:
10961 case X86::BI__builtin_ia32_permdf512: {
10962 unsigned Imm = cast<llvm::ConstantInt>(Ops[1])->getZExtValue();
10963 llvm::Type *Ty = Ops[0]->getType();
10964 unsigned NumElts = Ty->getVectorNumElements();
10966 // These intrinsics operate on 256-bit lanes of four 64-bit elements.
10967 uint32_t Indices[8];
10968 for (unsigned l = 0; l != NumElts; l += 4)
10969 for (unsigned i = 0; i != 4; ++i)
10970 Indices[l + i] = l + ((Imm >> (2 * i)) & 0x3);
10972 return Builder.CreateShuffleVector(Ops[0], UndefValue::get(Ty),
10973 makeArrayRef(Indices, NumElts),
10976 case X86::BI__builtin_ia32_palignr128:
10977 case X86::BI__builtin_ia32_palignr256:
10978 case X86::BI__builtin_ia32_palignr512: {
10979 unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0xff;
10981 unsigned NumElts = Ops[0]->getType()->getVectorNumElements();
10982 assert(NumElts % 16 == 0);
10984 // If palignr is shifting the pair of vectors more than the size of two
10985 // lanes, emit zero.
10986 if (ShiftVal >= 32)
10987 return llvm::Constant::getNullValue(ConvertType(E->getType()));
10989 // If palignr is shifting the pair of input vectors more than one lane,
10990 // but less than two lanes, convert to shifting in zeroes.
10991 if (ShiftVal > 16) {
10994 Ops[0] = llvm::Constant::getNullValue(Ops[0]->getType());
10997 uint32_t Indices[64];
10998 // 256-bit palignr operates on 128-bit lanes so we need to handle that
10999 for (unsigned l = 0; l != NumElts; l += 16) {
11000 for (unsigned i = 0; i != 16; ++i) {
11001 unsigned Idx = ShiftVal + i;
11003 Idx += NumElts - 16; // End of lane, switch operand.
11004 Indices[l + i] = Idx + l;
11008 return Builder.CreateShuffleVector(Ops[1], Ops[0],
11009 makeArrayRef(Indices, NumElts),
11012 case X86::BI__builtin_ia32_alignd128:
11013 case X86::BI__builtin_ia32_alignd256:
11014 case X86::BI__builtin_ia32_alignd512:
11015 case X86::BI__builtin_ia32_alignq128:
11016 case X86::BI__builtin_ia32_alignq256:
11017 case X86::BI__builtin_ia32_alignq512: {
11018 unsigned NumElts = Ops[0]->getType()->getVectorNumElements();
11019 unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0xff;
11021 // Mask the shift amount to width of two vectors.
11022 ShiftVal &= (2 * NumElts) - 1;
11024 uint32_t Indices[16];
11025 for (unsigned i = 0; i != NumElts; ++i)
11026 Indices[i] = i + ShiftVal;
11028 return Builder.CreateShuffleVector(Ops[1], Ops[0],
11029 makeArrayRef(Indices, NumElts),
11032 case X86::BI__builtin_ia32_shuf_f32x4_256:
11033 case X86::BI__builtin_ia32_shuf_f64x2_256:
11034 case X86::BI__builtin_ia32_shuf_i32x4_256:
11035 case X86::BI__builtin_ia32_shuf_i64x2_256:
11036 case X86::BI__builtin_ia32_shuf_f32x4:
11037 case X86::BI__builtin_ia32_shuf_f64x2:
11038 case X86::BI__builtin_ia32_shuf_i32x4:
11039 case X86::BI__builtin_ia32_shuf_i64x2: {
11040 unsigned Imm = cast<llvm::ConstantInt>(Ops[2])->getZExtValue();
11041 llvm::Type *Ty = Ops[0]->getType();
11042 unsigned NumElts = Ty->getVectorNumElements();
11043 unsigned NumLanes = Ty->getPrimitiveSizeInBits() == 512 ? 4 : 2;
11044 unsigned NumLaneElts = NumElts / NumLanes;
11046 uint32_t Indices[16];
11047 for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
11048 unsigned Index = (Imm % NumLanes) * NumLaneElts;
11049 Imm /= NumLanes; // Discard the bits we just used.
11050 if (l >= (NumElts / 2))
11051 Index += NumElts; // Switch to other source.
11052 for (unsigned i = 0; i != NumLaneElts; ++i) {
11053 Indices[l + i] = Index + i;
11057 return Builder.CreateShuffleVector(Ops[0], Ops[1],
11058 makeArrayRef(Indices, NumElts),
11062 case X86::BI__builtin_ia32_vperm2f128_pd256:
11063 case X86::BI__builtin_ia32_vperm2f128_ps256:
11064 case X86::BI__builtin_ia32_vperm2f128_si256:
11065 case X86::BI__builtin_ia32_permti256: {
11066 unsigned Imm = cast<llvm::ConstantInt>(Ops[2])->getZExtValue();
11067 unsigned NumElts = Ops[0]->getType()->getVectorNumElements();
11069 // This takes a very simple approach since there are two lanes and a
11070 // shuffle can have 2 inputs. So we reserve the first input for the first
11071 // lane and the second input for the second lane. This may result in
11072 // duplicate sources, but this can be dealt with in the backend.
11075 uint32_t Indices[8];
11076 for (unsigned l = 0; l != 2; ++l) {
11077 // Determine the source for this lane.
11078 if (Imm & (1 << ((l * 4) + 3)))
11079 OutOps[l] = llvm::ConstantAggregateZero::get(Ops[0]->getType());
11080 else if (Imm & (1 << ((l * 4) + 1)))
11081 OutOps[l] = Ops[1];
11083 OutOps[l] = Ops[0];
11085 for (unsigned i = 0; i != NumElts/2; ++i) {
11086 // Start with ith element of the source for this lane.
11087 unsigned Idx = (l * NumElts) + i;
11088 // If bit 0 of the immediate half is set, switch to the high half of
11090 if (Imm & (1 << (l * 4)))
11092 Indices[(l * (NumElts/2)) + i] = Idx;
11096 return Builder.CreateShuffleVector(OutOps[0], OutOps[1],
11097 makeArrayRef(Indices, NumElts),
11101 case X86::BI__builtin_ia32_pslldqi128_byteshift:
11102 case X86::BI__builtin_ia32_pslldqi256_byteshift:
11103 case X86::BI__builtin_ia32_pslldqi512_byteshift: {
11104 unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[1])->getZExtValue() & 0xff;
11105 llvm::Type *ResultType = Ops[0]->getType();
11106 // Builtin type is vXi64 so multiply by 8 to get bytes.
11107 unsigned NumElts = ResultType->getVectorNumElements() * 8;
11109 // If pslldq is shifting the vector more than 15 bytes, emit zero.
11110 if (ShiftVal >= 16)
11111 return llvm::Constant::getNullValue(ResultType);
11113 uint32_t Indices[64];
11114 // 256/512-bit pslldq operates on 128-bit lanes so we need to handle that
11115 for (unsigned l = 0; l != NumElts; l += 16) {
11116 for (unsigned i = 0; i != 16; ++i) {
11117 unsigned Idx = NumElts + i - ShiftVal;
11118 if (Idx < NumElts) Idx -= NumElts - 16; // end of lane, switch operand.
11119 Indices[l + i] = Idx + l;
11123 llvm::Type *VecTy = llvm::VectorType::get(Int8Ty, NumElts);
11124 Value *Cast = Builder.CreateBitCast(Ops[0], VecTy, "cast");
11125 Value *Zero = llvm::Constant::getNullValue(VecTy);
11126 Value *SV = Builder.CreateShuffleVector(Zero, Cast,
11127 makeArrayRef(Indices, NumElts),
11129 return Builder.CreateBitCast(SV, Ops[0]->getType(), "cast");
11131 case X86::BI__builtin_ia32_psrldqi128_byteshift:
11132 case X86::BI__builtin_ia32_psrldqi256_byteshift:
11133 case X86::BI__builtin_ia32_psrldqi512_byteshift: {
11134 unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[1])->getZExtValue() & 0xff;
11135 llvm::Type *ResultType = Ops[0]->getType();
11136 // Builtin type is vXi64 so multiply by 8 to get bytes.
11137 unsigned NumElts = ResultType->getVectorNumElements() * 8;
11139 // If psrldq is shifting the vector more than 15 bytes, emit zero.
11140 if (ShiftVal >= 16)
11141 return llvm::Constant::getNullValue(ResultType);
11143 uint32_t Indices[64];
11144 // 256/512-bit psrldq operates on 128-bit lanes so we need to handle that
11145 for (unsigned l = 0; l != NumElts; l += 16) {
11146 for (unsigned i = 0; i != 16; ++i) {
11147 unsigned Idx = i + ShiftVal;
11148 if (Idx >= 16) Idx += NumElts - 16; // end of lane, switch operand.
11149 Indices[l + i] = Idx + l;
11153 llvm::Type *VecTy = llvm::VectorType::get(Int8Ty, NumElts);
11154 Value *Cast = Builder.CreateBitCast(Ops[0], VecTy, "cast");
11155 Value *Zero = llvm::Constant::getNullValue(VecTy);
11156 Value *SV = Builder.CreateShuffleVector(Cast, Zero,
11157 makeArrayRef(Indices, NumElts),
11159 return Builder.CreateBitCast(SV, ResultType, "cast");
11161 case X86::BI__builtin_ia32_kshiftliqi:
11162 case X86::BI__builtin_ia32_kshiftlihi:
11163 case X86::BI__builtin_ia32_kshiftlisi:
11164 case X86::BI__builtin_ia32_kshiftlidi: {
11165 unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[1])->getZExtValue() & 0xff;
11166 unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth();
11168 if (ShiftVal >= NumElts)
11169 return llvm::Constant::getNullValue(Ops[0]->getType());
11171 Value *In = getMaskVecValue(*this, Ops[0], NumElts);
11173 uint32_t Indices[64];
11174 for (unsigned i = 0; i != NumElts; ++i)
11175 Indices[i] = NumElts + i - ShiftVal;
11177 Value *Zero = llvm::Constant::getNullValue(In->getType());
11178 Value *SV = Builder.CreateShuffleVector(Zero, In,
11179 makeArrayRef(Indices, NumElts),
11181 return Builder.CreateBitCast(SV, Ops[0]->getType());
11183 case X86::BI__builtin_ia32_kshiftriqi:
11184 case X86::BI__builtin_ia32_kshiftrihi:
11185 case X86::BI__builtin_ia32_kshiftrisi:
11186 case X86::BI__builtin_ia32_kshiftridi: {
11187 unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[1])->getZExtValue() & 0xff;
11188 unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth();
11190 if (ShiftVal >= NumElts)
11191 return llvm::Constant::getNullValue(Ops[0]->getType());
11193 Value *In = getMaskVecValue(*this, Ops[0], NumElts);
11195 uint32_t Indices[64];
11196 for (unsigned i = 0; i != NumElts; ++i)
11197 Indices[i] = i + ShiftVal;
11199 Value *Zero = llvm::Constant::getNullValue(In->getType());
11200 Value *SV = Builder.CreateShuffleVector(In, Zero,
11201 makeArrayRef(Indices, NumElts),
11203 return Builder.CreateBitCast(SV, Ops[0]->getType());
11205 case X86::BI__builtin_ia32_movnti:
11206 case X86::BI__builtin_ia32_movnti64:
11207 case X86::BI__builtin_ia32_movntsd:
11208 case X86::BI__builtin_ia32_movntss: {
11209 llvm::MDNode *Node = llvm::MDNode::get(
11210 getLLVMContext(), llvm::ConstantAsMetadata::get(Builder.getInt32(1)));
11212 Value *Ptr = Ops[0];
11213 Value *Src = Ops[1];
11215 // Extract the 0'th element of the source vector.
11216 if (BuiltinID == X86::BI__builtin_ia32_movntsd ||
11217 BuiltinID == X86::BI__builtin_ia32_movntss)
11218 Src = Builder.CreateExtractElement(Src, (uint64_t)0, "extract");
11220 // Convert the type of the pointer to a pointer to the stored type.
11221 Value *BC = Builder.CreateBitCast(
11222 Ptr, llvm::PointerType::getUnqual(Src->getType()), "cast");
11224 // Unaligned nontemporal store of the scalar value.
11225 StoreInst *SI = Builder.CreateDefaultAlignedStore(Src, BC);
11226 SI->setMetadata(CGM.getModule().getMDKindID("nontemporal"), Node);
11227 SI->setAlignment(llvm::Align::None());
11230 // Rotate is a special case of funnel shift - 1st 2 args are the same.
11231 case X86::BI__builtin_ia32_vprotb:
11232 case X86::BI__builtin_ia32_vprotw:
11233 case X86::BI__builtin_ia32_vprotd:
11234 case X86::BI__builtin_ia32_vprotq:
11235 case X86::BI__builtin_ia32_vprotbi:
11236 case X86::BI__builtin_ia32_vprotwi:
11237 case X86::BI__builtin_ia32_vprotdi:
11238 case X86::BI__builtin_ia32_vprotqi:
11239 case X86::BI__builtin_ia32_prold128:
11240 case X86::BI__builtin_ia32_prold256:
11241 case X86::BI__builtin_ia32_prold512:
11242 case X86::BI__builtin_ia32_prolq128:
11243 case X86::BI__builtin_ia32_prolq256:
11244 case X86::BI__builtin_ia32_prolq512:
11245 case X86::BI__builtin_ia32_prolvd128:
11246 case X86::BI__builtin_ia32_prolvd256:
11247 case X86::BI__builtin_ia32_prolvd512:
11248 case X86::BI__builtin_ia32_prolvq128:
11249 case X86::BI__builtin_ia32_prolvq256:
11250 case X86::BI__builtin_ia32_prolvq512:
11251 return EmitX86FunnelShift(*this, Ops[0], Ops[0], Ops[1], false);
11252 case X86::BI__builtin_ia32_prord128:
11253 case X86::BI__builtin_ia32_prord256:
11254 case X86::BI__builtin_ia32_prord512:
11255 case X86::BI__builtin_ia32_prorq128:
11256 case X86::BI__builtin_ia32_prorq256:
11257 case X86::BI__builtin_ia32_prorq512:
11258 case X86::BI__builtin_ia32_prorvd128:
11259 case X86::BI__builtin_ia32_prorvd256:
11260 case X86::BI__builtin_ia32_prorvd512:
11261 case X86::BI__builtin_ia32_prorvq128:
11262 case X86::BI__builtin_ia32_prorvq256:
11263 case X86::BI__builtin_ia32_prorvq512:
11264 return EmitX86FunnelShift(*this, Ops[0], Ops[0], Ops[1], true);
11265 case X86::BI__builtin_ia32_selectb_128:
11266 case X86::BI__builtin_ia32_selectb_256:
11267 case X86::BI__builtin_ia32_selectb_512:
11268 case X86::BI__builtin_ia32_selectw_128:
11269 case X86::BI__builtin_ia32_selectw_256:
11270 case X86::BI__builtin_ia32_selectw_512:
11271 case X86::BI__builtin_ia32_selectd_128:
11272 case X86::BI__builtin_ia32_selectd_256:
11273 case X86::BI__builtin_ia32_selectd_512:
11274 case X86::BI__builtin_ia32_selectq_128:
11275 case X86::BI__builtin_ia32_selectq_256:
11276 case X86::BI__builtin_ia32_selectq_512:
11277 case X86::BI__builtin_ia32_selectps_128:
11278 case X86::BI__builtin_ia32_selectps_256:
11279 case X86::BI__builtin_ia32_selectps_512:
11280 case X86::BI__builtin_ia32_selectpd_128:
11281 case X86::BI__builtin_ia32_selectpd_256:
11282 case X86::BI__builtin_ia32_selectpd_512:
11283 return EmitX86Select(*this, Ops[0], Ops[1], Ops[2]);
11284 case X86::BI__builtin_ia32_selectss_128:
11285 case X86::BI__builtin_ia32_selectsd_128: {
11286 Value *A = Builder.CreateExtractElement(Ops[1], (uint64_t)0);
11287 Value *B = Builder.CreateExtractElement(Ops[2], (uint64_t)0);
11288 A = EmitX86ScalarSelect(*this, Ops[0], A, B);
11289 return Builder.CreateInsertElement(Ops[1], A, (uint64_t)0);
11291 case X86::BI__builtin_ia32_cmpb128_mask:
11292 case X86::BI__builtin_ia32_cmpb256_mask:
11293 case X86::BI__builtin_ia32_cmpb512_mask:
11294 case X86::BI__builtin_ia32_cmpw128_mask:
11295 case X86::BI__builtin_ia32_cmpw256_mask:
11296 case X86::BI__builtin_ia32_cmpw512_mask:
11297 case X86::BI__builtin_ia32_cmpd128_mask:
11298 case X86::BI__builtin_ia32_cmpd256_mask:
11299 case X86::BI__builtin_ia32_cmpd512_mask:
11300 case X86::BI__builtin_ia32_cmpq128_mask:
11301 case X86::BI__builtin_ia32_cmpq256_mask:
11302 case X86::BI__builtin_ia32_cmpq512_mask: {
11303 unsigned CC = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0x7;
11304 return EmitX86MaskedCompare(*this, CC, true, Ops);
11306 case X86::BI__builtin_ia32_ucmpb128_mask:
11307 case X86::BI__builtin_ia32_ucmpb256_mask:
11308 case X86::BI__builtin_ia32_ucmpb512_mask:
11309 case X86::BI__builtin_ia32_ucmpw128_mask:
11310 case X86::BI__builtin_ia32_ucmpw256_mask:
11311 case X86::BI__builtin_ia32_ucmpw512_mask:
11312 case X86::BI__builtin_ia32_ucmpd128_mask:
11313 case X86::BI__builtin_ia32_ucmpd256_mask:
11314 case X86::BI__builtin_ia32_ucmpd512_mask:
11315 case X86::BI__builtin_ia32_ucmpq128_mask:
11316 case X86::BI__builtin_ia32_ucmpq256_mask:
11317 case X86::BI__builtin_ia32_ucmpq512_mask: {
11318 unsigned CC = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0x7;
11319 return EmitX86MaskedCompare(*this, CC, false, Ops);
11321 case X86::BI__builtin_ia32_vpcomb:
11322 case X86::BI__builtin_ia32_vpcomw:
11323 case X86::BI__builtin_ia32_vpcomd:
11324 case X86::BI__builtin_ia32_vpcomq:
11325 return EmitX86vpcom(*this, Ops, true);
11326 case X86::BI__builtin_ia32_vpcomub:
11327 case X86::BI__builtin_ia32_vpcomuw:
11328 case X86::BI__builtin_ia32_vpcomud:
11329 case X86::BI__builtin_ia32_vpcomuq:
11330 return EmitX86vpcom(*this, Ops, false);
11332 case X86::BI__builtin_ia32_kortestcqi:
11333 case X86::BI__builtin_ia32_kortestchi:
11334 case X86::BI__builtin_ia32_kortestcsi:
11335 case X86::BI__builtin_ia32_kortestcdi: {
11336 Value *Or = EmitX86MaskLogic(*this, Instruction::Or, Ops);
11337 Value *C = llvm::Constant::getAllOnesValue(Ops[0]->getType());
11338 Value *Cmp = Builder.CreateICmpEQ(Or, C);
11339 return Builder.CreateZExt(Cmp, ConvertType(E->getType()));
11341 case X86::BI__builtin_ia32_kortestzqi:
11342 case X86::BI__builtin_ia32_kortestzhi:
11343 case X86::BI__builtin_ia32_kortestzsi:
11344 case X86::BI__builtin_ia32_kortestzdi: {
11345 Value *Or = EmitX86MaskLogic(*this, Instruction::Or, Ops);
11346 Value *C = llvm::Constant::getNullValue(Ops[0]->getType());
11347 Value *Cmp = Builder.CreateICmpEQ(Or, C);
11348 return Builder.CreateZExt(Cmp, ConvertType(E->getType()));
11351 case X86::BI__builtin_ia32_ktestcqi:
11352 case X86::BI__builtin_ia32_ktestzqi:
11353 case X86::BI__builtin_ia32_ktestchi:
11354 case X86::BI__builtin_ia32_ktestzhi:
11355 case X86::BI__builtin_ia32_ktestcsi:
11356 case X86::BI__builtin_ia32_ktestzsi:
11357 case X86::BI__builtin_ia32_ktestcdi:
11358 case X86::BI__builtin_ia32_ktestzdi: {
11360 switch (BuiltinID) {
11361 default: llvm_unreachable("Unsupported intrinsic!");
11362 case X86::BI__builtin_ia32_ktestcqi:
11363 IID = Intrinsic::x86_avx512_ktestc_b;
11365 case X86::BI__builtin_ia32_ktestzqi:
11366 IID = Intrinsic::x86_avx512_ktestz_b;
11368 case X86::BI__builtin_ia32_ktestchi:
11369 IID = Intrinsic::x86_avx512_ktestc_w;
11371 case X86::BI__builtin_ia32_ktestzhi:
11372 IID = Intrinsic::x86_avx512_ktestz_w;
11374 case X86::BI__builtin_ia32_ktestcsi:
11375 IID = Intrinsic::x86_avx512_ktestc_d;
11377 case X86::BI__builtin_ia32_ktestzsi:
11378 IID = Intrinsic::x86_avx512_ktestz_d;
11380 case X86::BI__builtin_ia32_ktestcdi:
11381 IID = Intrinsic::x86_avx512_ktestc_q;
11383 case X86::BI__builtin_ia32_ktestzdi:
11384 IID = Intrinsic::x86_avx512_ktestz_q;
11388 unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth();
11389 Value *LHS = getMaskVecValue(*this, Ops[0], NumElts);
11390 Value *RHS = getMaskVecValue(*this, Ops[1], NumElts);
11391 Function *Intr = CGM.getIntrinsic(IID);
11392 return Builder.CreateCall(Intr, {LHS, RHS});
11395 case X86::BI__builtin_ia32_kaddqi:
11396 case X86::BI__builtin_ia32_kaddhi:
11397 case X86::BI__builtin_ia32_kaddsi:
11398 case X86::BI__builtin_ia32_kadddi: {
11400 switch (BuiltinID) {
11401 default: llvm_unreachable("Unsupported intrinsic!");
11402 case X86::BI__builtin_ia32_kaddqi:
11403 IID = Intrinsic::x86_avx512_kadd_b;
11405 case X86::BI__builtin_ia32_kaddhi:
11406 IID = Intrinsic::x86_avx512_kadd_w;
11408 case X86::BI__builtin_ia32_kaddsi:
11409 IID = Intrinsic::x86_avx512_kadd_d;
11411 case X86::BI__builtin_ia32_kadddi:
11412 IID = Intrinsic::x86_avx512_kadd_q;
11416 unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth();
11417 Value *LHS = getMaskVecValue(*this, Ops[0], NumElts);
11418 Value *RHS = getMaskVecValue(*this, Ops[1], NumElts);
11419 Function *Intr = CGM.getIntrinsic(IID);
11420 Value *Res = Builder.CreateCall(Intr, {LHS, RHS});
11421 return Builder.CreateBitCast(Res, Ops[0]->getType());
11423 case X86::BI__builtin_ia32_kandqi:
11424 case X86::BI__builtin_ia32_kandhi:
11425 case X86::BI__builtin_ia32_kandsi:
11426 case X86::BI__builtin_ia32_kanddi:
11427 return EmitX86MaskLogic(*this, Instruction::And, Ops);
11428 case X86::BI__builtin_ia32_kandnqi:
11429 case X86::BI__builtin_ia32_kandnhi:
11430 case X86::BI__builtin_ia32_kandnsi:
11431 case X86::BI__builtin_ia32_kandndi:
11432 return EmitX86MaskLogic(*this, Instruction::And, Ops, true);
11433 case X86::BI__builtin_ia32_korqi:
11434 case X86::BI__builtin_ia32_korhi:
11435 case X86::BI__builtin_ia32_korsi:
11436 case X86::BI__builtin_ia32_kordi:
11437 return EmitX86MaskLogic(*this, Instruction::Or, Ops);
11438 case X86::BI__builtin_ia32_kxnorqi:
11439 case X86::BI__builtin_ia32_kxnorhi:
11440 case X86::BI__builtin_ia32_kxnorsi:
11441 case X86::BI__builtin_ia32_kxnordi:
11442 return EmitX86MaskLogic(*this, Instruction::Xor, Ops, true);
11443 case X86::BI__builtin_ia32_kxorqi:
11444 case X86::BI__builtin_ia32_kxorhi:
11445 case X86::BI__builtin_ia32_kxorsi:
11446 case X86::BI__builtin_ia32_kxordi:
11447 return EmitX86MaskLogic(*this, Instruction::Xor, Ops);
11448 case X86::BI__builtin_ia32_knotqi:
11449 case X86::BI__builtin_ia32_knothi:
11450 case X86::BI__builtin_ia32_knotsi:
11451 case X86::BI__builtin_ia32_knotdi: {
11452 unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth();
11453 Value *Res = getMaskVecValue(*this, Ops[0], NumElts);
11454 return Builder.CreateBitCast(Builder.CreateNot(Res),
11455 Ops[0]->getType());
11457 case X86::BI__builtin_ia32_kmovb:
11458 case X86::BI__builtin_ia32_kmovw:
11459 case X86::BI__builtin_ia32_kmovd:
11460 case X86::BI__builtin_ia32_kmovq: {
11461 // Bitcast to vXi1 type and then back to integer. This gets the mask
11462 // register type into the IR, but might be optimized out depending on
11463 // what's around it.
11464 unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth();
11465 Value *Res = getMaskVecValue(*this, Ops[0], NumElts);
11466 return Builder.CreateBitCast(Res, Ops[0]->getType());
11469 case X86::BI__builtin_ia32_kunpckdi:
11470 case X86::BI__builtin_ia32_kunpcksi:
11471 case X86::BI__builtin_ia32_kunpckhi: {
11472 unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth();
11473 Value *LHS = getMaskVecValue(*this, Ops[0], NumElts);
11474 Value *RHS = getMaskVecValue(*this, Ops[1], NumElts);
11475 uint32_t Indices[64];
11476 for (unsigned i = 0; i != NumElts; ++i)
11479 // First extract half of each vector. This gives better codegen than
11480 // doing it in a single shuffle.
11481 LHS = Builder.CreateShuffleVector(LHS, LHS,
11482 makeArrayRef(Indices, NumElts / 2));
11483 RHS = Builder.CreateShuffleVector(RHS, RHS,
11484 makeArrayRef(Indices, NumElts / 2));
11485 // Concat the vectors.
11486 // NOTE: Operands are swapped to match the intrinsic definition.
11487 Value *Res = Builder.CreateShuffleVector(RHS, LHS,
11488 makeArrayRef(Indices, NumElts));
11489 return Builder.CreateBitCast(Res, Ops[0]->getType());
11492 case X86::BI__builtin_ia32_vplzcntd_128:
11493 case X86::BI__builtin_ia32_vplzcntd_256:
11494 case X86::BI__builtin_ia32_vplzcntd_512:
11495 case X86::BI__builtin_ia32_vplzcntq_128:
11496 case X86::BI__builtin_ia32_vplzcntq_256:
11497 case X86::BI__builtin_ia32_vplzcntq_512: {
11498 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, Ops[0]->getType());
11499 return Builder.CreateCall(F, {Ops[0],Builder.getInt1(false)});
11501 case X86::BI__builtin_ia32_sqrtss:
11502 case X86::BI__builtin_ia32_sqrtsd: {
11503 Value *A = Builder.CreateExtractElement(Ops[0], (uint64_t)0);
11504 Function *F = CGM.getIntrinsic(Intrinsic::sqrt, A->getType());
11505 A = Builder.CreateCall(F, {A});
11506 return Builder.CreateInsertElement(Ops[0], A, (uint64_t)0);
11508 case X86::BI__builtin_ia32_sqrtsd_round_mask:
11509 case X86::BI__builtin_ia32_sqrtss_round_mask: {
11510 unsigned CC = cast<llvm::ConstantInt>(Ops[4])->getZExtValue();
11511 // Support only if the rounding mode is 4 (AKA CUR_DIRECTION),
11512 // otherwise keep the intrinsic.
11514 Intrinsic::ID IID = BuiltinID == X86::BI__builtin_ia32_sqrtsd_round_mask ?
11515 Intrinsic::x86_avx512_mask_sqrt_sd :
11516 Intrinsic::x86_avx512_mask_sqrt_ss;
11517 return Builder.CreateCall(CGM.getIntrinsic(IID), Ops);
11519 Value *A = Builder.CreateExtractElement(Ops[1], (uint64_t)0);
11520 Function *F = CGM.getIntrinsic(Intrinsic::sqrt, A->getType());
11521 A = Builder.CreateCall(F, A);
11522 Value *Src = Builder.CreateExtractElement(Ops[2], (uint64_t)0);
11523 A = EmitX86ScalarSelect(*this, Ops[3], A, Src);
11524 return Builder.CreateInsertElement(Ops[0], A, (uint64_t)0);
11526 case X86::BI__builtin_ia32_sqrtpd256:
11527 case X86::BI__builtin_ia32_sqrtpd:
11528 case X86::BI__builtin_ia32_sqrtps256:
11529 case X86::BI__builtin_ia32_sqrtps:
11530 case X86::BI__builtin_ia32_sqrtps512:
11531 case X86::BI__builtin_ia32_sqrtpd512: {
11532 if (Ops.size() == 2) {
11533 unsigned CC = cast<llvm::ConstantInt>(Ops[1])->getZExtValue();
11534 // Support only if the rounding mode is 4 (AKA CUR_DIRECTION),
11535 // otherwise keep the intrinsic.
11537 Intrinsic::ID IID = BuiltinID == X86::BI__builtin_ia32_sqrtps512 ?
11538 Intrinsic::x86_avx512_sqrt_ps_512 :
11539 Intrinsic::x86_avx512_sqrt_pd_512;
11540 return Builder.CreateCall(CGM.getIntrinsic(IID), Ops);
11543 Function *F = CGM.getIntrinsic(Intrinsic::sqrt, Ops[0]->getType());
11544 return Builder.CreateCall(F, Ops[0]);
11546 case X86::BI__builtin_ia32_pabsb128:
11547 case X86::BI__builtin_ia32_pabsw128:
11548 case X86::BI__builtin_ia32_pabsd128:
11549 case X86::BI__builtin_ia32_pabsb256:
11550 case X86::BI__builtin_ia32_pabsw256:
11551 case X86::BI__builtin_ia32_pabsd256:
11552 case X86::BI__builtin_ia32_pabsq128:
11553 case X86::BI__builtin_ia32_pabsq256:
11554 case X86::BI__builtin_ia32_pabsb512:
11555 case X86::BI__builtin_ia32_pabsw512:
11556 case X86::BI__builtin_ia32_pabsd512:
11557 case X86::BI__builtin_ia32_pabsq512:
11558 return EmitX86Abs(*this, Ops);
11560 case X86::BI__builtin_ia32_pmaxsb128:
11561 case X86::BI__builtin_ia32_pmaxsw128:
11562 case X86::BI__builtin_ia32_pmaxsd128:
11563 case X86::BI__builtin_ia32_pmaxsq128:
11564 case X86::BI__builtin_ia32_pmaxsb256:
11565 case X86::BI__builtin_ia32_pmaxsw256:
11566 case X86::BI__builtin_ia32_pmaxsd256:
11567 case X86::BI__builtin_ia32_pmaxsq256:
11568 case X86::BI__builtin_ia32_pmaxsb512:
11569 case X86::BI__builtin_ia32_pmaxsw512:
11570 case X86::BI__builtin_ia32_pmaxsd512:
11571 case X86::BI__builtin_ia32_pmaxsq512:
11572 return EmitX86MinMax(*this, ICmpInst::ICMP_SGT, Ops);
11573 case X86::BI__builtin_ia32_pmaxub128:
11574 case X86::BI__builtin_ia32_pmaxuw128:
11575 case X86::BI__builtin_ia32_pmaxud128:
11576 case X86::BI__builtin_ia32_pmaxuq128:
11577 case X86::BI__builtin_ia32_pmaxub256:
11578 case X86::BI__builtin_ia32_pmaxuw256:
11579 case X86::BI__builtin_ia32_pmaxud256:
11580 case X86::BI__builtin_ia32_pmaxuq256:
11581 case X86::BI__builtin_ia32_pmaxub512:
11582 case X86::BI__builtin_ia32_pmaxuw512:
11583 case X86::BI__builtin_ia32_pmaxud512:
11584 case X86::BI__builtin_ia32_pmaxuq512:
11585 return EmitX86MinMax(*this, ICmpInst::ICMP_UGT, Ops);
11586 case X86::BI__builtin_ia32_pminsb128:
11587 case X86::BI__builtin_ia32_pminsw128:
11588 case X86::BI__builtin_ia32_pminsd128:
11589 case X86::BI__builtin_ia32_pminsq128:
11590 case X86::BI__builtin_ia32_pminsb256:
11591 case X86::BI__builtin_ia32_pminsw256:
11592 case X86::BI__builtin_ia32_pminsd256:
11593 case X86::BI__builtin_ia32_pminsq256:
11594 case X86::BI__builtin_ia32_pminsb512:
11595 case X86::BI__builtin_ia32_pminsw512:
11596 case X86::BI__builtin_ia32_pminsd512:
11597 case X86::BI__builtin_ia32_pminsq512:
11598 return EmitX86MinMax(*this, ICmpInst::ICMP_SLT, Ops);
11599 case X86::BI__builtin_ia32_pminub128:
11600 case X86::BI__builtin_ia32_pminuw128:
11601 case X86::BI__builtin_ia32_pminud128:
11602 case X86::BI__builtin_ia32_pminuq128:
11603 case X86::BI__builtin_ia32_pminub256:
11604 case X86::BI__builtin_ia32_pminuw256:
11605 case X86::BI__builtin_ia32_pminud256:
11606 case X86::BI__builtin_ia32_pminuq256:
11607 case X86::BI__builtin_ia32_pminub512:
11608 case X86::BI__builtin_ia32_pminuw512:
11609 case X86::BI__builtin_ia32_pminud512:
11610 case X86::BI__builtin_ia32_pminuq512:
11611 return EmitX86MinMax(*this, ICmpInst::ICMP_ULT, Ops);
11613 case X86::BI__builtin_ia32_pmuludq128:
11614 case X86::BI__builtin_ia32_pmuludq256:
11615 case X86::BI__builtin_ia32_pmuludq512:
11616 return EmitX86Muldq(*this, /*IsSigned*/false, Ops);
11618 case X86::BI__builtin_ia32_pmuldq128:
11619 case X86::BI__builtin_ia32_pmuldq256:
11620 case X86::BI__builtin_ia32_pmuldq512:
11621 return EmitX86Muldq(*this, /*IsSigned*/true, Ops);
11623 case X86::BI__builtin_ia32_pternlogd512_mask:
11624 case X86::BI__builtin_ia32_pternlogq512_mask:
11625 case X86::BI__builtin_ia32_pternlogd128_mask:
11626 case X86::BI__builtin_ia32_pternlogd256_mask:
11627 case X86::BI__builtin_ia32_pternlogq128_mask:
11628 case X86::BI__builtin_ia32_pternlogq256_mask:
11629 return EmitX86Ternlog(*this, /*ZeroMask*/false, Ops);
11631 case X86::BI__builtin_ia32_pternlogd512_maskz:
11632 case X86::BI__builtin_ia32_pternlogq512_maskz:
11633 case X86::BI__builtin_ia32_pternlogd128_maskz:
11634 case X86::BI__builtin_ia32_pternlogd256_maskz:
11635 case X86::BI__builtin_ia32_pternlogq128_maskz:
11636 case X86::BI__builtin_ia32_pternlogq256_maskz:
11637 return EmitX86Ternlog(*this, /*ZeroMask*/true, Ops);
11639 case X86::BI__builtin_ia32_vpshldd128:
11640 case X86::BI__builtin_ia32_vpshldd256:
11641 case X86::BI__builtin_ia32_vpshldd512:
11642 case X86::BI__builtin_ia32_vpshldq128:
11643 case X86::BI__builtin_ia32_vpshldq256:
11644 case X86::BI__builtin_ia32_vpshldq512:
11645 case X86::BI__builtin_ia32_vpshldw128:
11646 case X86::BI__builtin_ia32_vpshldw256:
11647 case X86::BI__builtin_ia32_vpshldw512:
11648 return EmitX86FunnelShift(*this, Ops[0], Ops[1], Ops[2], false);
11650 case X86::BI__builtin_ia32_vpshrdd128:
11651 case X86::BI__builtin_ia32_vpshrdd256:
11652 case X86::BI__builtin_ia32_vpshrdd512:
11653 case X86::BI__builtin_ia32_vpshrdq128:
11654 case X86::BI__builtin_ia32_vpshrdq256:
11655 case X86::BI__builtin_ia32_vpshrdq512:
11656 case X86::BI__builtin_ia32_vpshrdw128:
11657 case X86::BI__builtin_ia32_vpshrdw256:
11658 case X86::BI__builtin_ia32_vpshrdw512:
11659 // Ops 0 and 1 are swapped.
11660 return EmitX86FunnelShift(*this, Ops[1], Ops[0], Ops[2], true);
11662 case X86::BI__builtin_ia32_vpshldvd128:
11663 case X86::BI__builtin_ia32_vpshldvd256:
11664 case X86::BI__builtin_ia32_vpshldvd512:
11665 case X86::BI__builtin_ia32_vpshldvq128:
11666 case X86::BI__builtin_ia32_vpshldvq256:
11667 case X86::BI__builtin_ia32_vpshldvq512:
11668 case X86::BI__builtin_ia32_vpshldvw128:
11669 case X86::BI__builtin_ia32_vpshldvw256:
11670 case X86::BI__builtin_ia32_vpshldvw512:
11671 return EmitX86FunnelShift(*this, Ops[0], Ops[1], Ops[2], false);
11673 case X86::BI__builtin_ia32_vpshrdvd128:
11674 case X86::BI__builtin_ia32_vpshrdvd256:
11675 case X86::BI__builtin_ia32_vpshrdvd512:
11676 case X86::BI__builtin_ia32_vpshrdvq128:
11677 case X86::BI__builtin_ia32_vpshrdvq256:
11678 case X86::BI__builtin_ia32_vpshrdvq512:
11679 case X86::BI__builtin_ia32_vpshrdvw128:
11680 case X86::BI__builtin_ia32_vpshrdvw256:
11681 case X86::BI__builtin_ia32_vpshrdvw512:
11682 // Ops 0 and 1 are swapped.
11683 return EmitX86FunnelShift(*this, Ops[1], Ops[0], Ops[2], true);
11686 case X86::BI__builtin_ia32_pswapdsf:
11687 case X86::BI__builtin_ia32_pswapdsi: {
11688 llvm::Type *MMXTy = llvm::Type::getX86_MMXTy(getLLVMContext());
11689 Ops[0] = Builder.CreateBitCast(Ops[0], MMXTy, "cast");
11690 llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_3dnowa_pswapd);
11691 return Builder.CreateCall(F, Ops, "pswapd");
11693 case X86::BI__builtin_ia32_rdrand16_step:
11694 case X86::BI__builtin_ia32_rdrand32_step:
11695 case X86::BI__builtin_ia32_rdrand64_step:
11696 case X86::BI__builtin_ia32_rdseed16_step:
11697 case X86::BI__builtin_ia32_rdseed32_step:
11698 case X86::BI__builtin_ia32_rdseed64_step: {
11700 switch (BuiltinID) {
11701 default: llvm_unreachable("Unsupported intrinsic!");
11702 case X86::BI__builtin_ia32_rdrand16_step:
11703 ID = Intrinsic::x86_rdrand_16;
11705 case X86::BI__builtin_ia32_rdrand32_step:
11706 ID = Intrinsic::x86_rdrand_32;
11708 case X86::BI__builtin_ia32_rdrand64_step:
11709 ID = Intrinsic::x86_rdrand_64;
11711 case X86::BI__builtin_ia32_rdseed16_step:
11712 ID = Intrinsic::x86_rdseed_16;
11714 case X86::BI__builtin_ia32_rdseed32_step:
11715 ID = Intrinsic::x86_rdseed_32;
11717 case X86::BI__builtin_ia32_rdseed64_step:
11718 ID = Intrinsic::x86_rdseed_64;
11722 Value *Call = Builder.CreateCall(CGM.getIntrinsic(ID));
11723 Builder.CreateDefaultAlignedStore(Builder.CreateExtractValue(Call, 0),
11725 return Builder.CreateExtractValue(Call, 1);
11727 case X86::BI__builtin_ia32_addcarryx_u32:
11728 case X86::BI__builtin_ia32_addcarryx_u64:
11729 case X86::BI__builtin_ia32_subborrow_u32:
11730 case X86::BI__builtin_ia32_subborrow_u64: {
11732 switch (BuiltinID) {
11733 default: llvm_unreachable("Unsupported intrinsic!");
11734 case X86::BI__builtin_ia32_addcarryx_u32:
11735 IID = Intrinsic::x86_addcarry_32;
11737 case X86::BI__builtin_ia32_addcarryx_u64:
11738 IID = Intrinsic::x86_addcarry_64;
11740 case X86::BI__builtin_ia32_subborrow_u32:
11741 IID = Intrinsic::x86_subborrow_32;
11743 case X86::BI__builtin_ia32_subborrow_u64:
11744 IID = Intrinsic::x86_subborrow_64;
11748 Value *Call = Builder.CreateCall(CGM.getIntrinsic(IID),
11749 { Ops[0], Ops[1], Ops[2] });
11750 Builder.CreateDefaultAlignedStore(Builder.CreateExtractValue(Call, 1),
11752 return Builder.CreateExtractValue(Call, 0);
11755 case X86::BI__builtin_ia32_fpclassps128_mask:
11756 case X86::BI__builtin_ia32_fpclassps256_mask:
11757 case X86::BI__builtin_ia32_fpclassps512_mask:
11758 case X86::BI__builtin_ia32_fpclasspd128_mask:
11759 case X86::BI__builtin_ia32_fpclasspd256_mask:
11760 case X86::BI__builtin_ia32_fpclasspd512_mask: {
11761 unsigned NumElts = Ops[0]->getType()->getVectorNumElements();
11762 Value *MaskIn = Ops[2];
11763 Ops.erase(&Ops[2]);
11766 switch (BuiltinID) {
11767 default: llvm_unreachable("Unsupported intrinsic!");
11768 case X86::BI__builtin_ia32_fpclassps128_mask:
11769 ID = Intrinsic::x86_avx512_fpclass_ps_128;
11771 case X86::BI__builtin_ia32_fpclassps256_mask:
11772 ID = Intrinsic::x86_avx512_fpclass_ps_256;
11774 case X86::BI__builtin_ia32_fpclassps512_mask:
11775 ID = Intrinsic::x86_avx512_fpclass_ps_512;
11777 case X86::BI__builtin_ia32_fpclasspd128_mask:
11778 ID = Intrinsic::x86_avx512_fpclass_pd_128;
11780 case X86::BI__builtin_ia32_fpclasspd256_mask:
11781 ID = Intrinsic::x86_avx512_fpclass_pd_256;
11783 case X86::BI__builtin_ia32_fpclasspd512_mask:
11784 ID = Intrinsic::x86_avx512_fpclass_pd_512;
11788 Value *Fpclass = Builder.CreateCall(CGM.getIntrinsic(ID), Ops);
11789 return EmitX86MaskedCompareResult(*this, Fpclass, NumElts, MaskIn);
11792 case X86::BI__builtin_ia32_vp2intersect_q_512:
11793 case X86::BI__builtin_ia32_vp2intersect_q_256:
11794 case X86::BI__builtin_ia32_vp2intersect_q_128:
11795 case X86::BI__builtin_ia32_vp2intersect_d_512:
11796 case X86::BI__builtin_ia32_vp2intersect_d_256:
11797 case X86::BI__builtin_ia32_vp2intersect_d_128: {
11798 unsigned NumElts = Ops[0]->getType()->getVectorNumElements();
11801 switch (BuiltinID) {
11802 default: llvm_unreachable("Unsupported intrinsic!");
11803 case X86::BI__builtin_ia32_vp2intersect_q_512:
11804 ID = Intrinsic::x86_avx512_vp2intersect_q_512;
11806 case X86::BI__builtin_ia32_vp2intersect_q_256:
11807 ID = Intrinsic::x86_avx512_vp2intersect_q_256;
11809 case X86::BI__builtin_ia32_vp2intersect_q_128:
11810 ID = Intrinsic::x86_avx512_vp2intersect_q_128;
11812 case X86::BI__builtin_ia32_vp2intersect_d_512:
11813 ID = Intrinsic::x86_avx512_vp2intersect_d_512;
11815 case X86::BI__builtin_ia32_vp2intersect_d_256:
11816 ID = Intrinsic::x86_avx512_vp2intersect_d_256;
11818 case X86::BI__builtin_ia32_vp2intersect_d_128:
11819 ID = Intrinsic::x86_avx512_vp2intersect_d_128;
11823 Value *Call = Builder.CreateCall(CGM.getIntrinsic(ID), {Ops[0], Ops[1]});
11824 Value *Result = Builder.CreateExtractValue(Call, 0);
11825 Result = EmitX86MaskedCompareResult(*this, Result, NumElts, nullptr);
11826 Builder.CreateDefaultAlignedStore(Result, Ops[2]);
11828 Result = Builder.CreateExtractValue(Call, 1);
11829 Result = EmitX86MaskedCompareResult(*this, Result, NumElts, nullptr);
11830 return Builder.CreateDefaultAlignedStore(Result, Ops[3]);
11833 case X86::BI__builtin_ia32_vpmultishiftqb128:
11834 case X86::BI__builtin_ia32_vpmultishiftqb256:
11835 case X86::BI__builtin_ia32_vpmultishiftqb512: {
11837 switch (BuiltinID) {
11838 default: llvm_unreachable("Unsupported intrinsic!");
11839 case X86::BI__builtin_ia32_vpmultishiftqb128:
11840 ID = Intrinsic::x86_avx512_pmultishift_qb_128;
11842 case X86::BI__builtin_ia32_vpmultishiftqb256:
11843 ID = Intrinsic::x86_avx512_pmultishift_qb_256;
11845 case X86::BI__builtin_ia32_vpmultishiftqb512:
11846 ID = Intrinsic::x86_avx512_pmultishift_qb_512;
11850 return Builder.CreateCall(CGM.getIntrinsic(ID), Ops);
11853 case X86::BI__builtin_ia32_vpshufbitqmb128_mask:
11854 case X86::BI__builtin_ia32_vpshufbitqmb256_mask:
11855 case X86::BI__builtin_ia32_vpshufbitqmb512_mask: {
11856 unsigned NumElts = Ops[0]->getType()->getVectorNumElements();
11857 Value *MaskIn = Ops[2];
11858 Ops.erase(&Ops[2]);
11861 switch (BuiltinID) {
11862 default: llvm_unreachable("Unsupported intrinsic!");
11863 case X86::BI__builtin_ia32_vpshufbitqmb128_mask:
11864 ID = Intrinsic::x86_avx512_vpshufbitqmb_128;
11866 case X86::BI__builtin_ia32_vpshufbitqmb256_mask:
11867 ID = Intrinsic::x86_avx512_vpshufbitqmb_256;
11869 case X86::BI__builtin_ia32_vpshufbitqmb512_mask:
11870 ID = Intrinsic::x86_avx512_vpshufbitqmb_512;
11874 Value *Shufbit = Builder.CreateCall(CGM.getIntrinsic(ID), Ops);
11875 return EmitX86MaskedCompareResult(*this, Shufbit, NumElts, MaskIn);
11878 // packed comparison intrinsics
11879 case X86::BI__builtin_ia32_cmpeqps:
11880 case X86::BI__builtin_ia32_cmpeqpd:
11881 return getVectorFCmpIR(CmpInst::FCMP_OEQ);
11882 case X86::BI__builtin_ia32_cmpltps:
11883 case X86::BI__builtin_ia32_cmpltpd:
11884 return getVectorFCmpIR(CmpInst::FCMP_OLT);
11885 case X86::BI__builtin_ia32_cmpleps:
11886 case X86::BI__builtin_ia32_cmplepd:
11887 return getVectorFCmpIR(CmpInst::FCMP_OLE);
11888 case X86::BI__builtin_ia32_cmpunordps:
11889 case X86::BI__builtin_ia32_cmpunordpd:
11890 return getVectorFCmpIR(CmpInst::FCMP_UNO);
11891 case X86::BI__builtin_ia32_cmpneqps:
11892 case X86::BI__builtin_ia32_cmpneqpd:
11893 return getVectorFCmpIR(CmpInst::FCMP_UNE);
11894 case X86::BI__builtin_ia32_cmpnltps:
11895 case X86::BI__builtin_ia32_cmpnltpd:
11896 return getVectorFCmpIR(CmpInst::FCMP_UGE);
11897 case X86::BI__builtin_ia32_cmpnleps:
11898 case X86::BI__builtin_ia32_cmpnlepd:
11899 return getVectorFCmpIR(CmpInst::FCMP_UGT);
11900 case X86::BI__builtin_ia32_cmpordps:
11901 case X86::BI__builtin_ia32_cmpordpd:
11902 return getVectorFCmpIR(CmpInst::FCMP_ORD);
11903 case X86::BI__builtin_ia32_cmpps:
11904 case X86::BI__builtin_ia32_cmpps256:
11905 case X86::BI__builtin_ia32_cmppd:
11906 case X86::BI__builtin_ia32_cmppd256:
11907 case X86::BI__builtin_ia32_cmpps128_mask:
11908 case X86::BI__builtin_ia32_cmpps256_mask:
11909 case X86::BI__builtin_ia32_cmpps512_mask:
11910 case X86::BI__builtin_ia32_cmppd128_mask:
11911 case X86::BI__builtin_ia32_cmppd256_mask:
11912 case X86::BI__builtin_ia32_cmppd512_mask: {
11913 // Lowering vector comparisons to fcmp instructions, while
11914 // ignoring signalling behaviour requested
11915 // ignoring rounding mode requested
11916 // This is is only possible as long as FENV_ACCESS is not implemented.
11917 // See also: https://reviews.llvm.org/D45616
11919 // The third argument is the comparison condition, and integer in the
11921 unsigned CC = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0x1f;
11923 // Lowering to IR fcmp instruction.
11924 // Ignoring requested signaling behaviour,
11925 // e.g. both _CMP_GT_OS & _CMP_GT_OQ are translated to FCMP_OGT.
11926 FCmpInst::Predicate Pred;
11928 case 0x00: Pred = FCmpInst::FCMP_OEQ; break;
11929 case 0x01: Pred = FCmpInst::FCMP_OLT; break;
11930 case 0x02: Pred = FCmpInst::FCMP_OLE; break;
11931 case 0x03: Pred = FCmpInst::FCMP_UNO; break;
11932 case 0x04: Pred = FCmpInst::FCMP_UNE; break;
11933 case 0x05: Pred = FCmpInst::FCMP_UGE; break;
11934 case 0x06: Pred = FCmpInst::FCMP_UGT; break;
11935 case 0x07: Pred = FCmpInst::FCMP_ORD; break;
11936 case 0x08: Pred = FCmpInst::FCMP_UEQ; break;
11937 case 0x09: Pred = FCmpInst::FCMP_ULT; break;
11938 case 0x0a: Pred = FCmpInst::FCMP_ULE; break;
11939 case 0x0b: Pred = FCmpInst::FCMP_FALSE; break;
11940 case 0x0c: Pred = FCmpInst::FCMP_ONE; break;
11941 case 0x0d: Pred = FCmpInst::FCMP_OGE; break;
11942 case 0x0e: Pred = FCmpInst::FCMP_OGT; break;
11943 case 0x0f: Pred = FCmpInst::FCMP_TRUE; break;
11944 case 0x10: Pred = FCmpInst::FCMP_OEQ; break;
11945 case 0x11: Pred = FCmpInst::FCMP_OLT; break;
11946 case 0x12: Pred = FCmpInst::FCMP_OLE; break;
11947 case 0x13: Pred = FCmpInst::FCMP_UNO; break;
11948 case 0x14: Pred = FCmpInst::FCMP_UNE; break;
11949 case 0x15: Pred = FCmpInst::FCMP_UGE; break;
11950 case 0x16: Pred = FCmpInst::FCMP_UGT; break;
11951 case 0x17: Pred = FCmpInst::FCMP_ORD; break;
11952 case 0x18: Pred = FCmpInst::FCMP_UEQ; break;
11953 case 0x19: Pred = FCmpInst::FCMP_ULT; break;
11954 case 0x1a: Pred = FCmpInst::FCMP_ULE; break;
11955 case 0x1b: Pred = FCmpInst::FCMP_FALSE; break;
11956 case 0x1c: Pred = FCmpInst::FCMP_ONE; break;
11957 case 0x1d: Pred = FCmpInst::FCMP_OGE; break;
11958 case 0x1e: Pred = FCmpInst::FCMP_OGT; break;
11959 case 0x1f: Pred = FCmpInst::FCMP_TRUE; break;
11960 default: llvm_unreachable("Unhandled CC");
11963 // Builtins without the _mask suffix return a vector of integers
11964 // of the same width as the input vectors
11965 switch (BuiltinID) {
11966 case X86::BI__builtin_ia32_cmpps512_mask:
11967 case X86::BI__builtin_ia32_cmppd512_mask:
11968 case X86::BI__builtin_ia32_cmpps128_mask:
11969 case X86::BI__builtin_ia32_cmpps256_mask:
11970 case X86::BI__builtin_ia32_cmppd128_mask:
11971 case X86::BI__builtin_ia32_cmppd256_mask: {
11972 unsigned NumElts = Ops[0]->getType()->getVectorNumElements();
11973 Value *Cmp = Builder.CreateFCmp(Pred, Ops[0], Ops[1]);
11974 return EmitX86MaskedCompareResult(*this, Cmp, NumElts, Ops[3]);
11977 return getVectorFCmpIR(Pred);
11981 // SSE scalar comparison intrinsics
11982 case X86::BI__builtin_ia32_cmpeqss:
11983 return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 0);
11984 case X86::BI__builtin_ia32_cmpltss:
11985 return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 1);
11986 case X86::BI__builtin_ia32_cmpless:
11987 return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 2);
11988 case X86::BI__builtin_ia32_cmpunordss:
11989 return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 3);
11990 case X86::BI__builtin_ia32_cmpneqss:
11991 return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 4);
11992 case X86::BI__builtin_ia32_cmpnltss:
11993 return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 5);
11994 case X86::BI__builtin_ia32_cmpnless:
11995 return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 6);
11996 case X86::BI__builtin_ia32_cmpordss:
11997 return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 7);
11998 case X86::BI__builtin_ia32_cmpeqsd:
11999 return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 0);
12000 case X86::BI__builtin_ia32_cmpltsd:
12001 return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 1);
12002 case X86::BI__builtin_ia32_cmplesd:
12003 return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 2);
12004 case X86::BI__builtin_ia32_cmpunordsd:
12005 return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 3);
12006 case X86::BI__builtin_ia32_cmpneqsd:
12007 return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 4);
12008 case X86::BI__builtin_ia32_cmpnltsd:
12009 return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 5);
12010 case X86::BI__builtin_ia32_cmpnlesd:
12011 return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 6);
12012 case X86::BI__builtin_ia32_cmpordsd:
12013 return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 7);
12015 // AVX512 bf16 intrinsics
12016 case X86::BI__builtin_ia32_cvtneps2bf16_128_mask: {
12017 Ops[2] = getMaskVecValue(*this, Ops[2],
12018 Ops[0]->getType()->getVectorNumElements());
12019 Intrinsic::ID IID = Intrinsic::x86_avx512bf16_mask_cvtneps2bf16_128;
12020 return Builder.CreateCall(CGM.getIntrinsic(IID), Ops);
12022 case X86::BI__builtin_ia32_cvtsbf162ss_32:
12023 return EmitX86CvtBF16ToFloatExpr(*this, E, Ops);
12025 case X86::BI__builtin_ia32_cvtneps2bf16_256_mask:
12026 case X86::BI__builtin_ia32_cvtneps2bf16_512_mask: {
12028 switch (BuiltinID) {
12029 default: llvm_unreachable("Unsupported intrinsic!");
12030 case X86::BI__builtin_ia32_cvtneps2bf16_256_mask:
12031 IID = Intrinsic::x86_avx512bf16_cvtneps2bf16_256;
12033 case X86::BI__builtin_ia32_cvtneps2bf16_512_mask:
12034 IID = Intrinsic::x86_avx512bf16_cvtneps2bf16_512;
12037 Value *Res = Builder.CreateCall(CGM.getIntrinsic(IID), Ops[0]);
12038 return EmitX86Select(*this, Ops[2], Res, Ops[1]);
12041 case X86::BI__emul:
12042 case X86::BI__emulu: {
12043 llvm::Type *Int64Ty = llvm::IntegerType::get(getLLVMContext(), 64);
12044 bool isSigned = (BuiltinID == X86::BI__emul);
12045 Value *LHS = Builder.CreateIntCast(Ops[0], Int64Ty, isSigned);
12046 Value *RHS = Builder.CreateIntCast(Ops[1], Int64Ty, isSigned);
12047 return Builder.CreateMul(LHS, RHS, "", !isSigned, isSigned);
12049 case X86::BI__mulh:
12050 case X86::BI__umulh:
12051 case X86::BI_mul128:
12052 case X86::BI_umul128: {
12053 llvm::Type *ResType = ConvertType(E->getType());
12054 llvm::Type *Int128Ty = llvm::IntegerType::get(getLLVMContext(), 128);
12056 bool IsSigned = (BuiltinID == X86::BI__mulh || BuiltinID == X86::BI_mul128);
12057 Value *LHS = Builder.CreateIntCast(Ops[0], Int128Ty, IsSigned);
12058 Value *RHS = Builder.CreateIntCast(Ops[1], Int128Ty, IsSigned);
12060 Value *MulResult, *HigherBits;
12062 MulResult = Builder.CreateNSWMul(LHS, RHS);
12063 HigherBits = Builder.CreateAShr(MulResult, 64);
12065 MulResult = Builder.CreateNUWMul(LHS, RHS);
12066 HigherBits = Builder.CreateLShr(MulResult, 64);
12068 HigherBits = Builder.CreateIntCast(HigherBits, ResType, IsSigned);
12070 if (BuiltinID == X86::BI__mulh || BuiltinID == X86::BI__umulh)
12073 Address HighBitsAddress = EmitPointerWithAlignment(E->getArg(2));
12074 Builder.CreateStore(HigherBits, HighBitsAddress);
12075 return Builder.CreateIntCast(MulResult, ResType, IsSigned);
12078 case X86::BI__faststorefence: {
12079 return Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent,
12080 llvm::SyncScope::System);
12082 case X86::BI__shiftleft128:
12083 case X86::BI__shiftright128: {
12084 // FIXME: Once fshl/fshr no longer add an unneeded and and cmov, do this:
12085 // llvm::Function *F = CGM.getIntrinsic(
12086 // BuiltinID == X86::BI__shiftleft128 ? Intrinsic::fshl : Intrinsic::fshr,
12088 // Ops[2] = Builder.CreateZExt(Ops[2], Int64Ty);
12089 // return Builder.CreateCall(F, Ops);
12090 llvm::Type *Int128Ty = Builder.getInt128Ty();
12091 Value *HighPart128 =
12092 Builder.CreateShl(Builder.CreateZExt(Ops[1], Int128Ty), 64);
12093 Value *LowPart128 = Builder.CreateZExt(Ops[0], Int128Ty);
12094 Value *Val = Builder.CreateOr(HighPart128, LowPart128);
12095 Value *Amt = Builder.CreateAnd(Builder.CreateZExt(Ops[2], Int128Ty),
12096 llvm::ConstantInt::get(Int128Ty, 0x3f));
12098 if (BuiltinID == X86::BI__shiftleft128)
12099 Res = Builder.CreateLShr(Builder.CreateShl(Val, Amt), 64);
12101 Res = Builder.CreateLShr(Val, Amt);
12102 return Builder.CreateTrunc(Res, Int64Ty);
12104 case X86::BI_ReadWriteBarrier:
12105 case X86::BI_ReadBarrier:
12106 case X86::BI_WriteBarrier: {
12107 return Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent,
12108 llvm::SyncScope::SingleThread);
12110 case X86::BI_BitScanForward:
12111 case X86::BI_BitScanForward64:
12112 return EmitMSVCBuiltinExpr(MSVCIntrin::_BitScanForward, E);
12113 case X86::BI_BitScanReverse:
12114 case X86::BI_BitScanReverse64:
12115 return EmitMSVCBuiltinExpr(MSVCIntrin::_BitScanReverse, E);
12117 case X86::BI_InterlockedAnd64:
12118 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedAnd, E);
12119 case X86::BI_InterlockedExchange64:
12120 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange, E);
12121 case X86::BI_InterlockedExchangeAdd64:
12122 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeAdd, E);
12123 case X86::BI_InterlockedExchangeSub64:
12124 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeSub, E);
12125 case X86::BI_InterlockedOr64:
12126 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr, E);
12127 case X86::BI_InterlockedXor64:
12128 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor, E);
12129 case X86::BI_InterlockedDecrement64:
12130 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedDecrement, E);
12131 case X86::BI_InterlockedIncrement64:
12132 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement, E);
12133 case X86::BI_InterlockedCompareExchange128: {
12134 // InterlockedCompareExchange128 doesn't directly refer to 128bit ints,
12135 // instead it takes pointers to 64bit ints for Destination and
12136 // ComparandResult, and exchange is taken as two 64bit ints (high & low).
12137 // The previous value is written to ComparandResult, and success is
12140 llvm::Type *Int128Ty = Builder.getInt128Ty();
12141 llvm::Type *Int128PtrTy = Int128Ty->getPointerTo();
12143 Value *Destination =
12144 Builder.CreateBitCast(Ops[0], Int128PtrTy);
12145 Value *ExchangeHigh128 = Builder.CreateZExt(Ops[1], Int128Ty);
12146 Value *ExchangeLow128 = Builder.CreateZExt(Ops[2], Int128Ty);
12147 Address ComparandResult(Builder.CreateBitCast(Ops[3], Int128PtrTy),
12148 getContext().toCharUnitsFromBits(128));
12150 Value *Exchange = Builder.CreateOr(
12151 Builder.CreateShl(ExchangeHigh128, 64, "", false, false),
12154 Value *Comparand = Builder.CreateLoad(ComparandResult);
12156 AtomicCmpXchgInst *CXI =
12157 Builder.CreateAtomicCmpXchg(Destination, Comparand, Exchange,
12158 AtomicOrdering::SequentiallyConsistent,
12159 AtomicOrdering::SequentiallyConsistent);
12160 CXI->setVolatile(true);
12162 // Write the result back to the inout pointer.
12163 Builder.CreateStore(Builder.CreateExtractValue(CXI, 0), ComparandResult);
12165 // Get the success boolean and zero extend it to i8.
12166 Value *Success = Builder.CreateExtractValue(CXI, 1);
12167 return Builder.CreateZExt(Success, ConvertType(E->getType()));
12170 case X86::BI_AddressOfReturnAddress: {
12172 CGM.getIntrinsic(Intrinsic::addressofreturnaddress, AllocaInt8PtrTy);
12173 return Builder.CreateCall(F);
12175 case X86::BI__stosb: {
12176 // We treat __stosb as a volatile memset - it may not generate "rep stosb"
12177 // instruction, but it will create a memset that won't be optimized away.
12178 return Builder.CreateMemSet(Ops[0], Ops[1], Ops[2], 1, true);
12181 // llvm.trap makes a ud2a instruction on x86.
12182 return EmitTrapCall(Intrinsic::trap);
12183 case X86::BI__int2c: {
12184 // This syscall signals a driver assertion failure in x86 NT kernels.
12185 llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false);
12186 llvm::InlineAsm *IA =
12187 llvm::InlineAsm::get(FTy, "int $$0x2c", "", /*hasSideEffects=*/true);
12188 llvm::AttributeList NoReturnAttr = llvm::AttributeList::get(
12189 getLLVMContext(), llvm::AttributeList::FunctionIndex,
12190 llvm::Attribute::NoReturn);
12191 llvm::CallInst *CI = Builder.CreateCall(IA);
12192 CI->setAttributes(NoReturnAttr);
12195 case X86::BI__readfsbyte:
12196 case X86::BI__readfsword:
12197 case X86::BI__readfsdword:
12198 case X86::BI__readfsqword: {
12199 llvm::Type *IntTy = ConvertType(E->getType());
12201 Builder.CreateIntToPtr(Ops[0], llvm::PointerType::get(IntTy, 257));
12202 LoadInst *Load = Builder.CreateAlignedLoad(
12203 IntTy, Ptr, getContext().getTypeAlignInChars(E->getType()));
12204 Load->setVolatile(true);
12207 case X86::BI__readgsbyte:
12208 case X86::BI__readgsword:
12209 case X86::BI__readgsdword:
12210 case X86::BI__readgsqword: {
12211 llvm::Type *IntTy = ConvertType(E->getType());
12213 Builder.CreateIntToPtr(Ops[0], llvm::PointerType::get(IntTy, 256));
12214 LoadInst *Load = Builder.CreateAlignedLoad(
12215 IntTy, Ptr, getContext().getTypeAlignInChars(E->getType()));
12216 Load->setVolatile(true);
12219 case X86::BI__builtin_ia32_paddsb512:
12220 case X86::BI__builtin_ia32_paddsw512:
12221 case X86::BI__builtin_ia32_paddsb256:
12222 case X86::BI__builtin_ia32_paddsw256:
12223 case X86::BI__builtin_ia32_paddsb128:
12224 case X86::BI__builtin_ia32_paddsw128:
12225 return EmitX86AddSubSatExpr(*this, Ops, true, true);
12226 case X86::BI__builtin_ia32_paddusb512:
12227 case X86::BI__builtin_ia32_paddusw512:
12228 case X86::BI__builtin_ia32_paddusb256:
12229 case X86::BI__builtin_ia32_paddusw256:
12230 case X86::BI__builtin_ia32_paddusb128:
12231 case X86::BI__builtin_ia32_paddusw128:
12232 return EmitX86AddSubSatExpr(*this, Ops, false, true);
12233 case X86::BI__builtin_ia32_psubsb512:
12234 case X86::BI__builtin_ia32_psubsw512:
12235 case X86::BI__builtin_ia32_psubsb256:
12236 case X86::BI__builtin_ia32_psubsw256:
12237 case X86::BI__builtin_ia32_psubsb128:
12238 case X86::BI__builtin_ia32_psubsw128:
12239 return EmitX86AddSubSatExpr(*this, Ops, true, false);
12240 case X86::BI__builtin_ia32_psubusb512:
12241 case X86::BI__builtin_ia32_psubusw512:
12242 case X86::BI__builtin_ia32_psubusb256:
12243 case X86::BI__builtin_ia32_psubusw256:
12244 case X86::BI__builtin_ia32_psubusb128:
12245 case X86::BI__builtin_ia32_psubusw128:
12246 return EmitX86AddSubSatExpr(*this, Ops, false, false);
12250 Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
12251 const CallExpr *E) {
12252 SmallVector<Value*, 4> Ops;
12254 for (unsigned i = 0, e = E->getNumArgs(); i != e; i++)
12255 Ops.push_back(EmitScalarExpr(E->getArg(i)));
12257 Intrinsic::ID ID = Intrinsic::not_intrinsic;
12259 switch (BuiltinID) {
12260 default: return nullptr;
12262 // __builtin_ppc_get_timebase is GCC 4.8+'s PowerPC-specific name for what we
12263 // call __builtin_readcyclecounter.
12264 case PPC::BI__builtin_ppc_get_timebase:
12265 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::readcyclecounter));
12267 // vec_ld, vec_xl_be, vec_lvsl, vec_lvsr
12268 case PPC::BI__builtin_altivec_lvx:
12269 case PPC::BI__builtin_altivec_lvxl:
12270 case PPC::BI__builtin_altivec_lvebx:
12271 case PPC::BI__builtin_altivec_lvehx:
12272 case PPC::BI__builtin_altivec_lvewx:
12273 case PPC::BI__builtin_altivec_lvsl:
12274 case PPC::BI__builtin_altivec_lvsr:
12275 case PPC::BI__builtin_vsx_lxvd2x:
12276 case PPC::BI__builtin_vsx_lxvw4x:
12277 case PPC::BI__builtin_vsx_lxvd2x_be:
12278 case PPC::BI__builtin_vsx_lxvw4x_be:
12279 case PPC::BI__builtin_vsx_lxvl:
12280 case PPC::BI__builtin_vsx_lxvll:
12282 if(BuiltinID == PPC::BI__builtin_vsx_lxvl ||
12283 BuiltinID == PPC::BI__builtin_vsx_lxvll){
12284 Ops[0] = Builder.CreateBitCast(Ops[0], Int8PtrTy);
12286 Ops[1] = Builder.CreateBitCast(Ops[1], Int8PtrTy);
12287 Ops[0] = Builder.CreateGEP(Ops[1], Ops[0]);
12291 switch (BuiltinID) {
12292 default: llvm_unreachable("Unsupported ld/lvsl/lvsr intrinsic!");
12293 case PPC::BI__builtin_altivec_lvx:
12294 ID = Intrinsic::ppc_altivec_lvx;
12296 case PPC::BI__builtin_altivec_lvxl:
12297 ID = Intrinsic::ppc_altivec_lvxl;
12299 case PPC::BI__builtin_altivec_lvebx:
12300 ID = Intrinsic::ppc_altivec_lvebx;
12302 case PPC::BI__builtin_altivec_lvehx:
12303 ID = Intrinsic::ppc_altivec_lvehx;
12305 case PPC::BI__builtin_altivec_lvewx:
12306 ID = Intrinsic::ppc_altivec_lvewx;
12308 case PPC::BI__builtin_altivec_lvsl:
12309 ID = Intrinsic::ppc_altivec_lvsl;
12311 case PPC::BI__builtin_altivec_lvsr:
12312 ID = Intrinsic::ppc_altivec_lvsr;
12314 case PPC::BI__builtin_vsx_lxvd2x:
12315 ID = Intrinsic::ppc_vsx_lxvd2x;
12317 case PPC::BI__builtin_vsx_lxvw4x:
12318 ID = Intrinsic::ppc_vsx_lxvw4x;
12320 case PPC::BI__builtin_vsx_lxvd2x_be:
12321 ID = Intrinsic::ppc_vsx_lxvd2x_be;
12323 case PPC::BI__builtin_vsx_lxvw4x_be:
12324 ID = Intrinsic::ppc_vsx_lxvw4x_be;
12326 case PPC::BI__builtin_vsx_lxvl:
12327 ID = Intrinsic::ppc_vsx_lxvl;
12329 case PPC::BI__builtin_vsx_lxvll:
12330 ID = Intrinsic::ppc_vsx_lxvll;
12333 llvm::Function *F = CGM.getIntrinsic(ID);
12334 return Builder.CreateCall(F, Ops, "");
12337 // vec_st, vec_xst_be
12338 case PPC::BI__builtin_altivec_stvx:
12339 case PPC::BI__builtin_altivec_stvxl:
12340 case PPC::BI__builtin_altivec_stvebx:
12341 case PPC::BI__builtin_altivec_stvehx:
12342 case PPC::BI__builtin_altivec_stvewx:
12343 case PPC::BI__builtin_vsx_stxvd2x:
12344 case PPC::BI__builtin_vsx_stxvw4x:
12345 case PPC::BI__builtin_vsx_stxvd2x_be:
12346 case PPC::BI__builtin_vsx_stxvw4x_be:
12347 case PPC::BI__builtin_vsx_stxvl:
12348 case PPC::BI__builtin_vsx_stxvll:
12350 if(BuiltinID == PPC::BI__builtin_vsx_stxvl ||
12351 BuiltinID == PPC::BI__builtin_vsx_stxvll ){
12352 Ops[1] = Builder.CreateBitCast(Ops[1], Int8PtrTy);
12354 Ops[2] = Builder.CreateBitCast(Ops[2], Int8PtrTy);
12355 Ops[1] = Builder.CreateGEP(Ops[2], Ops[1]);
12359 switch (BuiltinID) {
12360 default: llvm_unreachable("Unsupported st intrinsic!");
12361 case PPC::BI__builtin_altivec_stvx:
12362 ID = Intrinsic::ppc_altivec_stvx;
12364 case PPC::BI__builtin_altivec_stvxl:
12365 ID = Intrinsic::ppc_altivec_stvxl;
12367 case PPC::BI__builtin_altivec_stvebx:
12368 ID = Intrinsic::ppc_altivec_stvebx;
12370 case PPC::BI__builtin_altivec_stvehx:
12371 ID = Intrinsic::ppc_altivec_stvehx;
12373 case PPC::BI__builtin_altivec_stvewx:
12374 ID = Intrinsic::ppc_altivec_stvewx;
12376 case PPC::BI__builtin_vsx_stxvd2x:
12377 ID = Intrinsic::ppc_vsx_stxvd2x;
12379 case PPC::BI__builtin_vsx_stxvw4x:
12380 ID = Intrinsic::ppc_vsx_stxvw4x;
12382 case PPC::BI__builtin_vsx_stxvd2x_be:
12383 ID = Intrinsic::ppc_vsx_stxvd2x_be;
12385 case PPC::BI__builtin_vsx_stxvw4x_be:
12386 ID = Intrinsic::ppc_vsx_stxvw4x_be;
12388 case PPC::BI__builtin_vsx_stxvl:
12389 ID = Intrinsic::ppc_vsx_stxvl;
12391 case PPC::BI__builtin_vsx_stxvll:
12392 ID = Intrinsic::ppc_vsx_stxvll;
12395 llvm::Function *F = CGM.getIntrinsic(ID);
12396 return Builder.CreateCall(F, Ops, "");
12399 case PPC::BI__builtin_vsx_xvsqrtsp:
12400 case PPC::BI__builtin_vsx_xvsqrtdp: {
12401 llvm::Type *ResultType = ConvertType(E->getType());
12402 Value *X = EmitScalarExpr(E->getArg(0));
12403 ID = Intrinsic::sqrt;
12404 llvm::Function *F = CGM.getIntrinsic(ID, ResultType);
12405 return Builder.CreateCall(F, X);
12407 // Count leading zeros
12408 case PPC::BI__builtin_altivec_vclzb:
12409 case PPC::BI__builtin_altivec_vclzh:
12410 case PPC::BI__builtin_altivec_vclzw:
12411 case PPC::BI__builtin_altivec_vclzd: {
12412 llvm::Type *ResultType = ConvertType(E->getType());
12413 Value *X = EmitScalarExpr(E->getArg(0));
12414 Value *Undef = ConstantInt::get(Builder.getInt1Ty(), false);
12415 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ResultType);
12416 return Builder.CreateCall(F, {X, Undef});
12418 case PPC::BI__builtin_altivec_vctzb:
12419 case PPC::BI__builtin_altivec_vctzh:
12420 case PPC::BI__builtin_altivec_vctzw:
12421 case PPC::BI__builtin_altivec_vctzd: {
12422 llvm::Type *ResultType = ConvertType(E->getType());
12423 Value *X = EmitScalarExpr(E->getArg(0));
12424 Value *Undef = ConstantInt::get(Builder.getInt1Ty(), false);
12425 Function *F = CGM.getIntrinsic(Intrinsic::cttz, ResultType);
12426 return Builder.CreateCall(F, {X, Undef});
12428 case PPC::BI__builtin_altivec_vpopcntb:
12429 case PPC::BI__builtin_altivec_vpopcnth:
12430 case PPC::BI__builtin_altivec_vpopcntw:
12431 case PPC::BI__builtin_altivec_vpopcntd: {
12432 llvm::Type *ResultType = ConvertType(E->getType());
12433 Value *X = EmitScalarExpr(E->getArg(0));
12434 llvm::Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ResultType);
12435 return Builder.CreateCall(F, X);
12438 case PPC::BI__builtin_vsx_xvcpsgnsp:
12439 case PPC::BI__builtin_vsx_xvcpsgndp: {
12440 llvm::Type *ResultType = ConvertType(E->getType());
12441 Value *X = EmitScalarExpr(E->getArg(0));
12442 Value *Y = EmitScalarExpr(E->getArg(1));
12443 ID = Intrinsic::copysign;
12444 llvm::Function *F = CGM.getIntrinsic(ID, ResultType);
12445 return Builder.CreateCall(F, {X, Y});
12447 // Rounding/truncation
12448 case PPC::BI__builtin_vsx_xvrspip:
12449 case PPC::BI__builtin_vsx_xvrdpip:
12450 case PPC::BI__builtin_vsx_xvrdpim:
12451 case PPC::BI__builtin_vsx_xvrspim:
12452 case PPC::BI__builtin_vsx_xvrdpi:
12453 case PPC::BI__builtin_vsx_xvrspi:
12454 case PPC::BI__builtin_vsx_xvrdpic:
12455 case PPC::BI__builtin_vsx_xvrspic:
12456 case PPC::BI__builtin_vsx_xvrdpiz:
12457 case PPC::BI__builtin_vsx_xvrspiz: {
12458 llvm::Type *ResultType = ConvertType(E->getType());
12459 Value *X = EmitScalarExpr(E->getArg(0));
12460 if (BuiltinID == PPC::BI__builtin_vsx_xvrdpim ||
12461 BuiltinID == PPC::BI__builtin_vsx_xvrspim)
12462 ID = Intrinsic::floor;
12463 else if (BuiltinID == PPC::BI__builtin_vsx_xvrdpi ||
12464 BuiltinID == PPC::BI__builtin_vsx_xvrspi)
12465 ID = Intrinsic::round;
12466 else if (BuiltinID == PPC::BI__builtin_vsx_xvrdpic ||
12467 BuiltinID == PPC::BI__builtin_vsx_xvrspic)
12468 ID = Intrinsic::nearbyint;
12469 else if (BuiltinID == PPC::BI__builtin_vsx_xvrdpip ||
12470 BuiltinID == PPC::BI__builtin_vsx_xvrspip)
12471 ID = Intrinsic::ceil;
12472 else if (BuiltinID == PPC::BI__builtin_vsx_xvrdpiz ||
12473 BuiltinID == PPC::BI__builtin_vsx_xvrspiz)
12474 ID = Intrinsic::trunc;
12475 llvm::Function *F = CGM.getIntrinsic(ID, ResultType);
12476 return Builder.CreateCall(F, X);
12480 case PPC::BI__builtin_vsx_xvabsdp:
12481 case PPC::BI__builtin_vsx_xvabssp: {
12482 llvm::Type *ResultType = ConvertType(E->getType());
12483 Value *X = EmitScalarExpr(E->getArg(0));
12484 llvm::Function *F = CGM.getIntrinsic(Intrinsic::fabs, ResultType);
12485 return Builder.CreateCall(F, X);
12489 case PPC::BI__builtin_vsx_xvmaddadp:
12490 case PPC::BI__builtin_vsx_xvmaddasp:
12491 case PPC::BI__builtin_vsx_xvnmaddadp:
12492 case PPC::BI__builtin_vsx_xvnmaddasp:
12493 case PPC::BI__builtin_vsx_xvmsubadp:
12494 case PPC::BI__builtin_vsx_xvmsubasp:
12495 case PPC::BI__builtin_vsx_xvnmsubadp:
12496 case PPC::BI__builtin_vsx_xvnmsubasp: {
12497 llvm::Type *ResultType = ConvertType(E->getType());
12498 Value *X = EmitScalarExpr(E->getArg(0));
12499 Value *Y = EmitScalarExpr(E->getArg(1));
12500 Value *Z = EmitScalarExpr(E->getArg(2));
12501 Value *Zero = llvm::ConstantFP::getZeroValueForNegation(ResultType);
12502 llvm::Function *F = CGM.getIntrinsic(Intrinsic::fma, ResultType);
12503 switch (BuiltinID) {
12504 case PPC::BI__builtin_vsx_xvmaddadp:
12505 case PPC::BI__builtin_vsx_xvmaddasp:
12506 return Builder.CreateCall(F, {X, Y, Z});
12507 case PPC::BI__builtin_vsx_xvnmaddadp:
12508 case PPC::BI__builtin_vsx_xvnmaddasp:
12509 return Builder.CreateFSub(Zero,
12510 Builder.CreateCall(F, {X, Y, Z}), "sub");
12511 case PPC::BI__builtin_vsx_xvmsubadp:
12512 case PPC::BI__builtin_vsx_xvmsubasp:
12513 return Builder.CreateCall(F,
12514 {X, Y, Builder.CreateFSub(Zero, Z, "sub")});
12515 case PPC::BI__builtin_vsx_xvnmsubadp:
12516 case PPC::BI__builtin_vsx_xvnmsubasp:
12518 Builder.CreateCall(F, {X, Y, Builder.CreateFSub(Zero, Z, "sub")});
12519 return Builder.CreateFSub(Zero, FsubRes, "sub");
12521 llvm_unreachable("Unknown FMA operation");
12522 return nullptr; // Suppress no-return warning
12525 case PPC::BI__builtin_vsx_insertword: {
12526 llvm::Function *F = CGM.getIntrinsic(Intrinsic::ppc_vsx_xxinsertw);
12528 // Third argument is a compile time constant int. It must be clamped to
12529 // to the range [0, 12].
12530 ConstantInt *ArgCI = dyn_cast<ConstantInt>(Ops[2]);
12532 "Third arg to xxinsertw intrinsic must be constant integer");
12533 const int64_t MaxIndex = 12;
12534 int64_t Index = clamp(ArgCI->getSExtValue(), 0, MaxIndex);
12536 // The builtin semantics don't exactly match the xxinsertw instructions
12537 // semantics (which ppc_vsx_xxinsertw follows). The builtin extracts the
12538 // word from the first argument, and inserts it in the second argument. The
12539 // instruction extracts the word from its second input register and inserts
12540 // it into its first input register, so swap the first and second arguments.
12541 std::swap(Ops[0], Ops[1]);
12543 // Need to cast the second argument from a vector of unsigned int to a
12544 // vector of long long.
12545 Ops[1] = Builder.CreateBitCast(Ops[1], llvm::VectorType::get(Int64Ty, 2));
12547 if (getTarget().isLittleEndian()) {
12548 // Create a shuffle mask of (1, 0)
12549 Constant *ShuffleElts[2] = { ConstantInt::get(Int32Ty, 1),
12550 ConstantInt::get(Int32Ty, 0)
12552 Constant *ShuffleMask = llvm::ConstantVector::get(ShuffleElts);
12554 // Reverse the double words in the vector we will extract from.
12555 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::VectorType::get(Int64Ty, 2));
12556 Ops[0] = Builder.CreateShuffleVector(Ops[0], Ops[0], ShuffleMask);
12558 // Reverse the index.
12559 Index = MaxIndex - Index;
12562 // Intrinsic expects the first arg to be a vector of int.
12563 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::VectorType::get(Int32Ty, 4));
12564 Ops[2] = ConstantInt::getSigned(Int32Ty, Index);
12565 return Builder.CreateCall(F, Ops);
12568 case PPC::BI__builtin_vsx_extractuword: {
12569 llvm::Function *F = CGM.getIntrinsic(Intrinsic::ppc_vsx_xxextractuw);
12571 // Intrinsic expects the first argument to be a vector of doublewords.
12572 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::VectorType::get(Int64Ty, 2));
12574 // The second argument is a compile time constant int that needs to
12575 // be clamped to the range [0, 12].
12576 ConstantInt *ArgCI = dyn_cast<ConstantInt>(Ops[1]);
12578 "Second Arg to xxextractuw intrinsic must be a constant integer!");
12579 const int64_t MaxIndex = 12;
12580 int64_t Index = clamp(ArgCI->getSExtValue(), 0, MaxIndex);
12582 if (getTarget().isLittleEndian()) {
12583 // Reverse the index.
12584 Index = MaxIndex - Index;
12585 Ops[1] = ConstantInt::getSigned(Int32Ty, Index);
12587 // Emit the call, then reverse the double words of the results vector.
12588 Value *Call = Builder.CreateCall(F, Ops);
12590 // Create a shuffle mask of (1, 0)
12591 Constant *ShuffleElts[2] = { ConstantInt::get(Int32Ty, 1),
12592 ConstantInt::get(Int32Ty, 0)
12594 Constant *ShuffleMask = llvm::ConstantVector::get(ShuffleElts);
12596 Value *ShuffleCall = Builder.CreateShuffleVector(Call, Call, ShuffleMask);
12597 return ShuffleCall;
12599 Ops[1] = ConstantInt::getSigned(Int32Ty, Index);
12600 return Builder.CreateCall(F, Ops);
12604 case PPC::BI__builtin_vsx_xxpermdi: {
12605 ConstantInt *ArgCI = dyn_cast<ConstantInt>(Ops[2]);
12606 assert(ArgCI && "Third arg must be constant integer!");
12608 unsigned Index = ArgCI->getZExtValue();
12609 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::VectorType::get(Int64Ty, 2));
12610 Ops[1] = Builder.CreateBitCast(Ops[1], llvm::VectorType::get(Int64Ty, 2));
12612 // Account for endianness by treating this as just a shuffle. So we use the
12613 // same indices for both LE and BE in order to produce expected results in
12615 unsigned ElemIdx0 = (Index & 2) >> 1;
12616 unsigned ElemIdx1 = 2 + (Index & 1);
12618 Constant *ShuffleElts[2] = {ConstantInt::get(Int32Ty, ElemIdx0),
12619 ConstantInt::get(Int32Ty, ElemIdx1)};
12620 Constant *ShuffleMask = llvm::ConstantVector::get(ShuffleElts);
12622 Value *ShuffleCall =
12623 Builder.CreateShuffleVector(Ops[0], Ops[1], ShuffleMask);
12624 QualType BIRetType = E->getType();
12625 auto RetTy = ConvertType(BIRetType);
12626 return Builder.CreateBitCast(ShuffleCall, RetTy);
12629 case PPC::BI__builtin_vsx_xxsldwi: {
12630 ConstantInt *ArgCI = dyn_cast<ConstantInt>(Ops[2]);
12631 assert(ArgCI && "Third argument must be a compile time constant");
12632 unsigned Index = ArgCI->getZExtValue() & 0x3;
12633 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::VectorType::get(Int32Ty, 4));
12634 Ops[1] = Builder.CreateBitCast(Ops[1], llvm::VectorType::get(Int32Ty, 4));
12636 // Create a shuffle mask
12641 if (getTarget().isLittleEndian()) {
12642 // Little endian element N comes from element 8+N-Index of the
12643 // concatenated wide vector (of course, using modulo arithmetic on
12644 // the total number of elements).
12645 ElemIdx0 = (8 - Index) % 8;
12646 ElemIdx1 = (9 - Index) % 8;
12647 ElemIdx2 = (10 - Index) % 8;
12648 ElemIdx3 = (11 - Index) % 8;
12650 // Big endian ElemIdx<N> = Index + N
12652 ElemIdx1 = Index + 1;
12653 ElemIdx2 = Index + 2;
12654 ElemIdx3 = Index + 3;
12657 Constant *ShuffleElts[4] = {ConstantInt::get(Int32Ty, ElemIdx0),
12658 ConstantInt::get(Int32Ty, ElemIdx1),
12659 ConstantInt::get(Int32Ty, ElemIdx2),
12660 ConstantInt::get(Int32Ty, ElemIdx3)};
12662 Constant *ShuffleMask = llvm::ConstantVector::get(ShuffleElts);
12663 Value *ShuffleCall =
12664 Builder.CreateShuffleVector(Ops[0], Ops[1], ShuffleMask);
12665 QualType BIRetType = E->getType();
12666 auto RetTy = ConvertType(BIRetType);
12667 return Builder.CreateBitCast(ShuffleCall, RetTy);
12670 case PPC::BI__builtin_pack_vector_int128: {
12671 bool isLittleEndian = getTarget().isLittleEndian();
12672 Value *UndefValue =
12673 llvm::UndefValue::get(llvm::VectorType::get(Ops[0]->getType(), 2));
12674 Value *Res = Builder.CreateInsertElement(
12675 UndefValue, Ops[0], (uint64_t)(isLittleEndian ? 1 : 0));
12676 Res = Builder.CreateInsertElement(Res, Ops[1],
12677 (uint64_t)(isLittleEndian ? 0 : 1));
12678 return Builder.CreateBitCast(Res, ConvertType(E->getType()));
12681 case PPC::BI__builtin_unpack_vector_int128: {
12682 ConstantInt *Index = cast<ConstantInt>(Ops[1]);
12683 Value *Unpacked = Builder.CreateBitCast(
12684 Ops[0], llvm::VectorType::get(ConvertType(E->getType()), 2));
12686 if (getTarget().isLittleEndian())
12687 Index = ConstantInt::get(Index->getType(), 1 - Index->getZExtValue());
12689 return Builder.CreateExtractElement(Unpacked, Index);
12694 Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID,
12695 const CallExpr *E) {
12696 switch (BuiltinID) {
12697 case AMDGPU::BI__builtin_amdgcn_div_scale:
12698 case AMDGPU::BI__builtin_amdgcn_div_scalef: {
12699 // Translate from the intrinsics's struct return to the builtin's out
12702 Address FlagOutPtr = EmitPointerWithAlignment(E->getArg(3));
12704 llvm::Value *X = EmitScalarExpr(E->getArg(0));
12705 llvm::Value *Y = EmitScalarExpr(E->getArg(1));
12706 llvm::Value *Z = EmitScalarExpr(E->getArg(2));
12708 llvm::Function *Callee = CGM.getIntrinsic(Intrinsic::amdgcn_div_scale,
12711 llvm::Value *Tmp = Builder.CreateCall(Callee, {X, Y, Z});
12713 llvm::Value *Result = Builder.CreateExtractValue(Tmp, 0);
12714 llvm::Value *Flag = Builder.CreateExtractValue(Tmp, 1);
12716 llvm::Type *RealFlagType
12717 = FlagOutPtr.getPointer()->getType()->getPointerElementType();
12719 llvm::Value *FlagExt = Builder.CreateZExt(Flag, RealFlagType);
12720 Builder.CreateStore(FlagExt, FlagOutPtr);
12723 case AMDGPU::BI__builtin_amdgcn_div_fmas:
12724 case AMDGPU::BI__builtin_amdgcn_div_fmasf: {
12725 llvm::Value *Src0 = EmitScalarExpr(E->getArg(0));
12726 llvm::Value *Src1 = EmitScalarExpr(E->getArg(1));
12727 llvm::Value *Src2 = EmitScalarExpr(E->getArg(2));
12728 llvm::Value *Src3 = EmitScalarExpr(E->getArg(3));
12730 llvm::Function *F = CGM.getIntrinsic(Intrinsic::amdgcn_div_fmas,
12732 llvm::Value *Src3ToBool = Builder.CreateIsNotNull(Src3);
12733 return Builder.CreateCall(F, {Src0, Src1, Src2, Src3ToBool});
12736 case AMDGPU::BI__builtin_amdgcn_ds_swizzle:
12737 return emitBinaryBuiltin(*this, E, Intrinsic::amdgcn_ds_swizzle);
12738 case AMDGPU::BI__builtin_amdgcn_mov_dpp8:
12739 return emitBinaryBuiltin(*this, E, Intrinsic::amdgcn_mov_dpp8);
12740 case AMDGPU::BI__builtin_amdgcn_mov_dpp:
12741 case AMDGPU::BI__builtin_amdgcn_update_dpp: {
12742 llvm::SmallVector<llvm::Value *, 6> Args;
12743 for (unsigned I = 0; I != E->getNumArgs(); ++I)
12744 Args.push_back(EmitScalarExpr(E->getArg(I)));
12745 assert(Args.size() == 5 || Args.size() == 6);
12746 if (Args.size() == 5)
12747 Args.insert(Args.begin(), llvm::UndefValue::get(Args[0]->getType()));
12749 CGM.getIntrinsic(Intrinsic::amdgcn_update_dpp, Args[0]->getType());
12750 return Builder.CreateCall(F, Args);
12752 case AMDGPU::BI__builtin_amdgcn_div_fixup:
12753 case AMDGPU::BI__builtin_amdgcn_div_fixupf:
12754 case AMDGPU::BI__builtin_amdgcn_div_fixuph:
12755 return emitTernaryBuiltin(*this, E, Intrinsic::amdgcn_div_fixup);
12756 case AMDGPU::BI__builtin_amdgcn_trig_preop:
12757 case AMDGPU::BI__builtin_amdgcn_trig_preopf:
12758 return emitFPIntBuiltin(*this, E, Intrinsic::amdgcn_trig_preop);
12759 case AMDGPU::BI__builtin_amdgcn_rcp:
12760 case AMDGPU::BI__builtin_amdgcn_rcpf:
12761 case AMDGPU::BI__builtin_amdgcn_rcph:
12762 return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_rcp);
12763 case AMDGPU::BI__builtin_amdgcn_rsq:
12764 case AMDGPU::BI__builtin_amdgcn_rsqf:
12765 case AMDGPU::BI__builtin_amdgcn_rsqh:
12766 return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_rsq);
12767 case AMDGPU::BI__builtin_amdgcn_rsq_clamp:
12768 case AMDGPU::BI__builtin_amdgcn_rsq_clampf:
12769 return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_rsq_clamp);
12770 case AMDGPU::BI__builtin_amdgcn_sinf:
12771 case AMDGPU::BI__builtin_amdgcn_sinh:
12772 return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_sin);
12773 case AMDGPU::BI__builtin_amdgcn_cosf:
12774 case AMDGPU::BI__builtin_amdgcn_cosh:
12775 return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_cos);
12776 case AMDGPU::BI__builtin_amdgcn_log_clampf:
12777 return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_log_clamp);
12778 case AMDGPU::BI__builtin_amdgcn_ldexp:
12779 case AMDGPU::BI__builtin_amdgcn_ldexpf:
12780 case AMDGPU::BI__builtin_amdgcn_ldexph:
12781 return emitFPIntBuiltin(*this, E, Intrinsic::amdgcn_ldexp);
12782 case AMDGPU::BI__builtin_amdgcn_frexp_mant:
12783 case AMDGPU::BI__builtin_amdgcn_frexp_mantf:
12784 case AMDGPU::BI__builtin_amdgcn_frexp_manth:
12785 return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_frexp_mant);
12786 case AMDGPU::BI__builtin_amdgcn_frexp_exp:
12787 case AMDGPU::BI__builtin_amdgcn_frexp_expf: {
12788 Value *Src0 = EmitScalarExpr(E->getArg(0));
12789 Function *F = CGM.getIntrinsic(Intrinsic::amdgcn_frexp_exp,
12790 { Builder.getInt32Ty(), Src0->getType() });
12791 return Builder.CreateCall(F, Src0);
12793 case AMDGPU::BI__builtin_amdgcn_frexp_exph: {
12794 Value *Src0 = EmitScalarExpr(E->getArg(0));
12795 Function *F = CGM.getIntrinsic(Intrinsic::amdgcn_frexp_exp,
12796 { Builder.getInt16Ty(), Src0->getType() });
12797 return Builder.CreateCall(F, Src0);
12799 case AMDGPU::BI__builtin_amdgcn_fract:
12800 case AMDGPU::BI__builtin_amdgcn_fractf:
12801 case AMDGPU::BI__builtin_amdgcn_fracth:
12802 return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_fract);
12803 case AMDGPU::BI__builtin_amdgcn_lerp:
12804 return emitTernaryBuiltin(*this, E, Intrinsic::amdgcn_lerp);
12805 case AMDGPU::BI__builtin_amdgcn_ubfe:
12806 return emitTernaryBuiltin(*this, E, Intrinsic::amdgcn_ubfe);
12807 case AMDGPU::BI__builtin_amdgcn_sbfe:
12808 return emitTernaryBuiltin(*this, E, Intrinsic::amdgcn_sbfe);
12809 case AMDGPU::BI__builtin_amdgcn_uicmp:
12810 case AMDGPU::BI__builtin_amdgcn_uicmpl:
12811 case AMDGPU::BI__builtin_amdgcn_sicmp:
12812 case AMDGPU::BI__builtin_amdgcn_sicmpl: {
12813 llvm::Value *Src0 = EmitScalarExpr(E->getArg(0));
12814 llvm::Value *Src1 = EmitScalarExpr(E->getArg(1));
12815 llvm::Value *Src2 = EmitScalarExpr(E->getArg(2));
12817 // FIXME-GFX10: How should 32 bit mask be handled?
12818 Value *F = CGM.getIntrinsic(Intrinsic::amdgcn_icmp,
12819 { Builder.getInt64Ty(), Src0->getType() });
12820 return Builder.CreateCall(F, { Src0, Src1, Src2 });
12822 case AMDGPU::BI__builtin_amdgcn_fcmp:
12823 case AMDGPU::BI__builtin_amdgcn_fcmpf: {
12824 llvm::Value *Src0 = EmitScalarExpr(E->getArg(0));
12825 llvm::Value *Src1 = EmitScalarExpr(E->getArg(1));
12826 llvm::Value *Src2 = EmitScalarExpr(E->getArg(2));
12828 // FIXME-GFX10: How should 32 bit mask be handled?
12829 Value *F = CGM.getIntrinsic(Intrinsic::amdgcn_fcmp,
12830 { Builder.getInt64Ty(), Src0->getType() });
12831 return Builder.CreateCall(F, { Src0, Src1, Src2 });
12833 case AMDGPU::BI__builtin_amdgcn_class:
12834 case AMDGPU::BI__builtin_amdgcn_classf:
12835 case AMDGPU::BI__builtin_amdgcn_classh:
12836 return emitFPIntBuiltin(*this, E, Intrinsic::amdgcn_class);
12837 case AMDGPU::BI__builtin_amdgcn_fmed3f:
12838 case AMDGPU::BI__builtin_amdgcn_fmed3h:
12839 return emitTernaryBuiltin(*this, E, Intrinsic::amdgcn_fmed3);
12840 case AMDGPU::BI__builtin_amdgcn_ds_append:
12841 case AMDGPU::BI__builtin_amdgcn_ds_consume: {
12842 Intrinsic::ID Intrin = BuiltinID == AMDGPU::BI__builtin_amdgcn_ds_append ?
12843 Intrinsic::amdgcn_ds_append : Intrinsic::amdgcn_ds_consume;
12844 Value *Src0 = EmitScalarExpr(E->getArg(0));
12845 Function *F = CGM.getIntrinsic(Intrin, { Src0->getType() });
12846 return Builder.CreateCall(F, { Src0, Builder.getFalse() });
12848 case AMDGPU::BI__builtin_amdgcn_read_exec: {
12849 CallInst *CI = cast<CallInst>(
12850 EmitSpecialRegisterBuiltin(*this, E, Int64Ty, Int64Ty, true, "exec"));
12851 CI->setConvergent();
12854 case AMDGPU::BI__builtin_amdgcn_read_exec_lo:
12855 case AMDGPU::BI__builtin_amdgcn_read_exec_hi: {
12856 StringRef RegName = BuiltinID == AMDGPU::BI__builtin_amdgcn_read_exec_lo ?
12857 "exec_lo" : "exec_hi";
12858 CallInst *CI = cast<CallInst>(
12859 EmitSpecialRegisterBuiltin(*this, E, Int32Ty, Int32Ty, true, RegName));
12860 CI->setConvergent();
12864 case AMDGPU::BI__builtin_amdgcn_workitem_id_x:
12865 return emitRangedBuiltin(*this, Intrinsic::amdgcn_workitem_id_x, 0, 1024);
12866 case AMDGPU::BI__builtin_amdgcn_workitem_id_y:
12867 return emitRangedBuiltin(*this, Intrinsic::amdgcn_workitem_id_y, 0, 1024);
12868 case AMDGPU::BI__builtin_amdgcn_workitem_id_z:
12869 return emitRangedBuiltin(*this, Intrinsic::amdgcn_workitem_id_z, 0, 1024);
12872 case AMDGPU::BI__builtin_r600_recipsqrt_ieee:
12873 case AMDGPU::BI__builtin_r600_recipsqrt_ieeef:
12874 return emitUnaryBuiltin(*this, E, Intrinsic::r600_recipsqrt_ieee);
12875 case AMDGPU::BI__builtin_r600_read_tidig_x:
12876 return emitRangedBuiltin(*this, Intrinsic::r600_read_tidig_x, 0, 1024);
12877 case AMDGPU::BI__builtin_r600_read_tidig_y:
12878 return emitRangedBuiltin(*this, Intrinsic::r600_read_tidig_y, 0, 1024);
12879 case AMDGPU::BI__builtin_r600_read_tidig_z:
12880 return emitRangedBuiltin(*this, Intrinsic::r600_read_tidig_z, 0, 1024);
12886 /// Handle a SystemZ function in which the final argument is a pointer
12887 /// to an int that receives the post-instruction CC value. At the LLVM level
12888 /// this is represented as a function that returns a {result, cc} pair.
12889 static Value *EmitSystemZIntrinsicWithCC(CodeGenFunction &CGF,
12890 unsigned IntrinsicID,
12891 const CallExpr *E) {
12892 unsigned NumArgs = E->getNumArgs() - 1;
12893 SmallVector<Value *, 8> Args(NumArgs);
12894 for (unsigned I = 0; I < NumArgs; ++I)
12895 Args[I] = CGF.EmitScalarExpr(E->getArg(I));
12896 Address CCPtr = CGF.EmitPointerWithAlignment(E->getArg(NumArgs));
12897 Function *F = CGF.CGM.getIntrinsic(IntrinsicID);
12898 Value *Call = CGF.Builder.CreateCall(F, Args);
12899 Value *CC = CGF.Builder.CreateExtractValue(Call, 1);
12900 CGF.Builder.CreateStore(CC, CCPtr);
12901 return CGF.Builder.CreateExtractValue(Call, 0);
12904 Value *CodeGenFunction::EmitSystemZBuiltinExpr(unsigned BuiltinID,
12905 const CallExpr *E) {
12906 switch (BuiltinID) {
12907 case SystemZ::BI__builtin_tbegin: {
12908 Value *TDB = EmitScalarExpr(E->getArg(0));
12909 Value *Control = llvm::ConstantInt::get(Int32Ty, 0xff0c);
12910 Function *F = CGM.getIntrinsic(Intrinsic::s390_tbegin);
12911 return Builder.CreateCall(F, {TDB, Control});
12913 case SystemZ::BI__builtin_tbegin_nofloat: {
12914 Value *TDB = EmitScalarExpr(E->getArg(0));
12915 Value *Control = llvm::ConstantInt::get(Int32Ty, 0xff0c);
12916 Function *F = CGM.getIntrinsic(Intrinsic::s390_tbegin_nofloat);
12917 return Builder.CreateCall(F, {TDB, Control});
12919 case SystemZ::BI__builtin_tbeginc: {
12920 Value *TDB = llvm::ConstantPointerNull::get(Int8PtrTy);
12921 Value *Control = llvm::ConstantInt::get(Int32Ty, 0xff08);
12922 Function *F = CGM.getIntrinsic(Intrinsic::s390_tbeginc);
12923 return Builder.CreateCall(F, {TDB, Control});
12925 case SystemZ::BI__builtin_tabort: {
12926 Value *Data = EmitScalarExpr(E->getArg(0));
12927 Function *F = CGM.getIntrinsic(Intrinsic::s390_tabort);
12928 return Builder.CreateCall(F, Builder.CreateSExt(Data, Int64Ty, "tabort"));
12930 case SystemZ::BI__builtin_non_tx_store: {
12931 Value *Address = EmitScalarExpr(E->getArg(0));
12932 Value *Data = EmitScalarExpr(E->getArg(1));
12933 Function *F = CGM.getIntrinsic(Intrinsic::s390_ntstg);
12934 return Builder.CreateCall(F, {Data, Address});
12937 // Vector builtins. Note that most vector builtins are mapped automatically
12938 // to target-specific LLVM intrinsics. The ones handled specially here can
12939 // be represented via standard LLVM IR, which is preferable to enable common
12940 // LLVM optimizations.
12942 case SystemZ::BI__builtin_s390_vpopctb:
12943 case SystemZ::BI__builtin_s390_vpopcth:
12944 case SystemZ::BI__builtin_s390_vpopctf:
12945 case SystemZ::BI__builtin_s390_vpopctg: {
12946 llvm::Type *ResultType = ConvertType(E->getType());
12947 Value *X = EmitScalarExpr(E->getArg(0));
12948 Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ResultType);
12949 return Builder.CreateCall(F, X);
12952 case SystemZ::BI__builtin_s390_vclzb:
12953 case SystemZ::BI__builtin_s390_vclzh:
12954 case SystemZ::BI__builtin_s390_vclzf:
12955 case SystemZ::BI__builtin_s390_vclzg: {
12956 llvm::Type *ResultType = ConvertType(E->getType());
12957 Value *X = EmitScalarExpr(E->getArg(0));
12958 Value *Undef = ConstantInt::get(Builder.getInt1Ty(), false);
12959 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ResultType);
12960 return Builder.CreateCall(F, {X, Undef});
12963 case SystemZ::BI__builtin_s390_vctzb:
12964 case SystemZ::BI__builtin_s390_vctzh:
12965 case SystemZ::BI__builtin_s390_vctzf:
12966 case SystemZ::BI__builtin_s390_vctzg: {
12967 llvm::Type *ResultType = ConvertType(E->getType());
12968 Value *X = EmitScalarExpr(E->getArg(0));
12969 Value *Undef = ConstantInt::get(Builder.getInt1Ty(), false);
12970 Function *F = CGM.getIntrinsic(Intrinsic::cttz, ResultType);
12971 return Builder.CreateCall(F, {X, Undef});
12974 case SystemZ::BI__builtin_s390_vfsqsb:
12975 case SystemZ::BI__builtin_s390_vfsqdb: {
12976 llvm::Type *ResultType = ConvertType(E->getType());
12977 Value *X = EmitScalarExpr(E->getArg(0));
12978 Function *F = CGM.getIntrinsic(Intrinsic::sqrt, ResultType);
12979 return Builder.CreateCall(F, X);
12981 case SystemZ::BI__builtin_s390_vfmasb:
12982 case SystemZ::BI__builtin_s390_vfmadb: {
12983 llvm::Type *ResultType = ConvertType(E->getType());
12984 Value *X = EmitScalarExpr(E->getArg(0));
12985 Value *Y = EmitScalarExpr(E->getArg(1));
12986 Value *Z = EmitScalarExpr(E->getArg(2));
12987 Function *F = CGM.getIntrinsic(Intrinsic::fma, ResultType);
12988 return Builder.CreateCall(F, {X, Y, Z});
12990 case SystemZ::BI__builtin_s390_vfmssb:
12991 case SystemZ::BI__builtin_s390_vfmsdb: {
12992 llvm::Type *ResultType = ConvertType(E->getType());
12993 Value *X = EmitScalarExpr(E->getArg(0));
12994 Value *Y = EmitScalarExpr(E->getArg(1));
12995 Value *Z = EmitScalarExpr(E->getArg(2));
12996 Value *Zero = llvm::ConstantFP::getZeroValueForNegation(ResultType);
12997 Function *F = CGM.getIntrinsic(Intrinsic::fma, ResultType);
12998 return Builder.CreateCall(F, {X, Y, Builder.CreateFSub(Zero, Z, "sub")});
13000 case SystemZ::BI__builtin_s390_vfnmasb:
13001 case SystemZ::BI__builtin_s390_vfnmadb: {
13002 llvm::Type *ResultType = ConvertType(E->getType());
13003 Value *X = EmitScalarExpr(E->getArg(0));
13004 Value *Y = EmitScalarExpr(E->getArg(1));
13005 Value *Z = EmitScalarExpr(E->getArg(2));
13006 Value *Zero = llvm::ConstantFP::getZeroValueForNegation(ResultType);
13007 Function *F = CGM.getIntrinsic(Intrinsic::fma, ResultType);
13008 return Builder.CreateFSub(Zero, Builder.CreateCall(F, {X, Y, Z}), "sub");
13010 case SystemZ::BI__builtin_s390_vfnmssb:
13011 case SystemZ::BI__builtin_s390_vfnmsdb: {
13012 llvm::Type *ResultType = ConvertType(E->getType());
13013 Value *X = EmitScalarExpr(E->getArg(0));
13014 Value *Y = EmitScalarExpr(E->getArg(1));
13015 Value *Z = EmitScalarExpr(E->getArg(2));
13016 Value *Zero = llvm::ConstantFP::getZeroValueForNegation(ResultType);
13017 Function *F = CGM.getIntrinsic(Intrinsic::fma, ResultType);
13018 Value *NegZ = Builder.CreateFSub(Zero, Z, "sub");
13019 return Builder.CreateFSub(Zero, Builder.CreateCall(F, {X, Y, NegZ}));
13021 case SystemZ::BI__builtin_s390_vflpsb:
13022 case SystemZ::BI__builtin_s390_vflpdb: {
13023 llvm::Type *ResultType = ConvertType(E->getType());
13024 Value *X = EmitScalarExpr(E->getArg(0));
13025 Function *F = CGM.getIntrinsic(Intrinsic::fabs, ResultType);
13026 return Builder.CreateCall(F, X);
13028 case SystemZ::BI__builtin_s390_vflnsb:
13029 case SystemZ::BI__builtin_s390_vflndb: {
13030 llvm::Type *ResultType = ConvertType(E->getType());
13031 Value *X = EmitScalarExpr(E->getArg(0));
13032 Value *Zero = llvm::ConstantFP::getZeroValueForNegation(ResultType);
13033 Function *F = CGM.getIntrinsic(Intrinsic::fabs, ResultType);
13034 return Builder.CreateFSub(Zero, Builder.CreateCall(F, X), "sub");
13036 case SystemZ::BI__builtin_s390_vfisb:
13037 case SystemZ::BI__builtin_s390_vfidb: {
13038 llvm::Type *ResultType = ConvertType(E->getType());
13039 Value *X = EmitScalarExpr(E->getArg(0));
13040 // Constant-fold the M4 and M5 mask arguments.
13041 llvm::APSInt M4, M5;
13042 bool IsConstM4 = E->getArg(1)->isIntegerConstantExpr(M4, getContext());
13043 bool IsConstM5 = E->getArg(2)->isIntegerConstantExpr(M5, getContext());
13044 assert(IsConstM4 && IsConstM5 && "Constant arg isn't actually constant?");
13045 (void)IsConstM4; (void)IsConstM5;
13046 // Check whether this instance can be represented via a LLVM standard
13047 // intrinsic. We only support some combinations of M4 and M5.
13048 Intrinsic::ID ID = Intrinsic::not_intrinsic;
13049 switch (M4.getZExtValue()) {
13051 case 0: // IEEE-inexact exception allowed
13052 switch (M5.getZExtValue()) {
13054 case 0: ID = Intrinsic::rint; break;
13057 case 4: // IEEE-inexact exception suppressed
13058 switch (M5.getZExtValue()) {
13060 case 0: ID = Intrinsic::nearbyint; break;
13061 case 1: ID = Intrinsic::round; break;
13062 case 5: ID = Intrinsic::trunc; break;
13063 case 6: ID = Intrinsic::ceil; break;
13064 case 7: ID = Intrinsic::floor; break;
13068 if (ID != Intrinsic::not_intrinsic) {
13069 Function *F = CGM.getIntrinsic(ID, ResultType);
13070 return Builder.CreateCall(F, X);
13072 switch (BuiltinID) {
13073 case SystemZ::BI__builtin_s390_vfisb: ID = Intrinsic::s390_vfisb; break;
13074 case SystemZ::BI__builtin_s390_vfidb: ID = Intrinsic::s390_vfidb; break;
13075 default: llvm_unreachable("Unknown BuiltinID");
13077 Function *F = CGM.getIntrinsic(ID);
13078 Value *M4Value = llvm::ConstantInt::get(getLLVMContext(), M4);
13079 Value *M5Value = llvm::ConstantInt::get(getLLVMContext(), M5);
13080 return Builder.CreateCall(F, {X, M4Value, M5Value});
13082 case SystemZ::BI__builtin_s390_vfmaxsb:
13083 case SystemZ::BI__builtin_s390_vfmaxdb: {
13084 llvm::Type *ResultType = ConvertType(E->getType());
13085 Value *X = EmitScalarExpr(E->getArg(0));
13086 Value *Y = EmitScalarExpr(E->getArg(1));
13087 // Constant-fold the M4 mask argument.
13089 bool IsConstM4 = E->getArg(2)->isIntegerConstantExpr(M4, getContext());
13090 assert(IsConstM4 && "Constant arg isn't actually constant?");
13092 // Check whether this instance can be represented via a LLVM standard
13093 // intrinsic. We only support some values of M4.
13094 Intrinsic::ID ID = Intrinsic::not_intrinsic;
13095 switch (M4.getZExtValue()) {
13097 case 4: ID = Intrinsic::maxnum; break;
13099 if (ID != Intrinsic::not_intrinsic) {
13100 Function *F = CGM.getIntrinsic(ID, ResultType);
13101 return Builder.CreateCall(F, {X, Y});
13103 switch (BuiltinID) {
13104 case SystemZ::BI__builtin_s390_vfmaxsb: ID = Intrinsic::s390_vfmaxsb; break;
13105 case SystemZ::BI__builtin_s390_vfmaxdb: ID = Intrinsic::s390_vfmaxdb; break;
13106 default: llvm_unreachable("Unknown BuiltinID");
13108 Function *F = CGM.getIntrinsic(ID);
13109 Value *M4Value = llvm::ConstantInt::get(getLLVMContext(), M4);
13110 return Builder.CreateCall(F, {X, Y, M4Value});
13112 case SystemZ::BI__builtin_s390_vfminsb:
13113 case SystemZ::BI__builtin_s390_vfmindb: {
13114 llvm::Type *ResultType = ConvertType(E->getType());
13115 Value *X = EmitScalarExpr(E->getArg(0));
13116 Value *Y = EmitScalarExpr(E->getArg(1));
13117 // Constant-fold the M4 mask argument.
13119 bool IsConstM4 = E->getArg(2)->isIntegerConstantExpr(M4, getContext());
13120 assert(IsConstM4 && "Constant arg isn't actually constant?");
13122 // Check whether this instance can be represented via a LLVM standard
13123 // intrinsic. We only support some values of M4.
13124 Intrinsic::ID ID = Intrinsic::not_intrinsic;
13125 switch (M4.getZExtValue()) {
13127 case 4: ID = Intrinsic::minnum; break;
13129 if (ID != Intrinsic::not_intrinsic) {
13130 Function *F = CGM.getIntrinsic(ID, ResultType);
13131 return Builder.CreateCall(F, {X, Y});
13133 switch (BuiltinID) {
13134 case SystemZ::BI__builtin_s390_vfminsb: ID = Intrinsic::s390_vfminsb; break;
13135 case SystemZ::BI__builtin_s390_vfmindb: ID = Intrinsic::s390_vfmindb; break;
13136 default: llvm_unreachable("Unknown BuiltinID");
13138 Function *F = CGM.getIntrinsic(ID);
13139 Value *M4Value = llvm::ConstantInt::get(getLLVMContext(), M4);
13140 return Builder.CreateCall(F, {X, Y, M4Value});
13143 case SystemZ::BI__builtin_s390_vlbrh:
13144 case SystemZ::BI__builtin_s390_vlbrf:
13145 case SystemZ::BI__builtin_s390_vlbrg: {
13146 llvm::Type *ResultType = ConvertType(E->getType());
13147 Value *X = EmitScalarExpr(E->getArg(0));
13148 Function *F = CGM.getIntrinsic(Intrinsic::bswap, ResultType);
13149 return Builder.CreateCall(F, X);
13152 // Vector intrinsics that output the post-instruction CC value.
13154 #define INTRINSIC_WITH_CC(NAME) \
13155 case SystemZ::BI__builtin_##NAME: \
13156 return EmitSystemZIntrinsicWithCC(*this, Intrinsic::NAME, E)
13158 INTRINSIC_WITH_CC(s390_vpkshs);
13159 INTRINSIC_WITH_CC(s390_vpksfs);
13160 INTRINSIC_WITH_CC(s390_vpksgs);
13162 INTRINSIC_WITH_CC(s390_vpklshs);
13163 INTRINSIC_WITH_CC(s390_vpklsfs);
13164 INTRINSIC_WITH_CC(s390_vpklsgs);
13166 INTRINSIC_WITH_CC(s390_vceqbs);
13167 INTRINSIC_WITH_CC(s390_vceqhs);
13168 INTRINSIC_WITH_CC(s390_vceqfs);
13169 INTRINSIC_WITH_CC(s390_vceqgs);
13171 INTRINSIC_WITH_CC(s390_vchbs);
13172 INTRINSIC_WITH_CC(s390_vchhs);
13173 INTRINSIC_WITH_CC(s390_vchfs);
13174 INTRINSIC_WITH_CC(s390_vchgs);
13176 INTRINSIC_WITH_CC(s390_vchlbs);
13177 INTRINSIC_WITH_CC(s390_vchlhs);
13178 INTRINSIC_WITH_CC(s390_vchlfs);
13179 INTRINSIC_WITH_CC(s390_vchlgs);
13181 INTRINSIC_WITH_CC(s390_vfaebs);
13182 INTRINSIC_WITH_CC(s390_vfaehs);
13183 INTRINSIC_WITH_CC(s390_vfaefs);
13185 INTRINSIC_WITH_CC(s390_vfaezbs);
13186 INTRINSIC_WITH_CC(s390_vfaezhs);
13187 INTRINSIC_WITH_CC(s390_vfaezfs);
13189 INTRINSIC_WITH_CC(s390_vfeebs);
13190 INTRINSIC_WITH_CC(s390_vfeehs);
13191 INTRINSIC_WITH_CC(s390_vfeefs);
13193 INTRINSIC_WITH_CC(s390_vfeezbs);
13194 INTRINSIC_WITH_CC(s390_vfeezhs);
13195 INTRINSIC_WITH_CC(s390_vfeezfs);
13197 INTRINSIC_WITH_CC(s390_vfenebs);
13198 INTRINSIC_WITH_CC(s390_vfenehs);
13199 INTRINSIC_WITH_CC(s390_vfenefs);
13201 INTRINSIC_WITH_CC(s390_vfenezbs);
13202 INTRINSIC_WITH_CC(s390_vfenezhs);
13203 INTRINSIC_WITH_CC(s390_vfenezfs);
13205 INTRINSIC_WITH_CC(s390_vistrbs);
13206 INTRINSIC_WITH_CC(s390_vistrhs);
13207 INTRINSIC_WITH_CC(s390_vistrfs);
13209 INTRINSIC_WITH_CC(s390_vstrcbs);
13210 INTRINSIC_WITH_CC(s390_vstrchs);
13211 INTRINSIC_WITH_CC(s390_vstrcfs);
13213 INTRINSIC_WITH_CC(s390_vstrczbs);
13214 INTRINSIC_WITH_CC(s390_vstrczhs);
13215 INTRINSIC_WITH_CC(s390_vstrczfs);
13217 INTRINSIC_WITH_CC(s390_vfcesbs);
13218 INTRINSIC_WITH_CC(s390_vfcedbs);
13219 INTRINSIC_WITH_CC(s390_vfchsbs);
13220 INTRINSIC_WITH_CC(s390_vfchdbs);
13221 INTRINSIC_WITH_CC(s390_vfchesbs);
13222 INTRINSIC_WITH_CC(s390_vfchedbs);
13224 INTRINSIC_WITH_CC(s390_vftcisb);
13225 INTRINSIC_WITH_CC(s390_vftcidb);
13227 INTRINSIC_WITH_CC(s390_vstrsb);
13228 INTRINSIC_WITH_CC(s390_vstrsh);
13229 INTRINSIC_WITH_CC(s390_vstrsf);
13231 INTRINSIC_WITH_CC(s390_vstrszb);
13232 INTRINSIC_WITH_CC(s390_vstrszh);
13233 INTRINSIC_WITH_CC(s390_vstrszf);
13235 #undef INTRINSIC_WITH_CC
13243 // Helper classes for mapping MMA builtins to particular LLVM intrinsic variant.
13244 struct NVPTXMmaLdstInfo {
13245 unsigned NumResults; // Number of elements to load/store
13246 // Intrinsic IDs for row/col variants. 0 if particular layout is unsupported.
13251 #define MMA_INTR(geom_op_type, layout) \
13252 Intrinsic::nvvm_wmma_##geom_op_type##_##layout##_stride
13253 #define MMA_LDST(n, geom_op_type) \
13254 { n, MMA_INTR(geom_op_type, col), MMA_INTR(geom_op_type, row) }
13256 static NVPTXMmaLdstInfo getNVPTXMmaLdstInfo(unsigned BuiltinID) {
13257 switch (BuiltinID) {
13259 case NVPTX::BI__hmma_m16n16k16_ld_a:
13260 return MMA_LDST(8, m16n16k16_load_a_f16);
13261 case NVPTX::BI__hmma_m16n16k16_ld_b:
13262 return MMA_LDST(8, m16n16k16_load_b_f16);
13263 case NVPTX::BI__hmma_m16n16k16_ld_c_f16:
13264 return MMA_LDST(4, m16n16k16_load_c_f16);
13265 case NVPTX::BI__hmma_m16n16k16_ld_c_f32:
13266 return MMA_LDST(8, m16n16k16_load_c_f32);
13267 case NVPTX::BI__hmma_m32n8k16_ld_a:
13268 return MMA_LDST(8, m32n8k16_load_a_f16);
13269 case NVPTX::BI__hmma_m32n8k16_ld_b:
13270 return MMA_LDST(8, m32n8k16_load_b_f16);
13271 case NVPTX::BI__hmma_m32n8k16_ld_c_f16:
13272 return MMA_LDST(4, m32n8k16_load_c_f16);
13273 case NVPTX::BI__hmma_m32n8k16_ld_c_f32:
13274 return MMA_LDST(8, m32n8k16_load_c_f32);
13275 case NVPTX::BI__hmma_m8n32k16_ld_a:
13276 return MMA_LDST(8, m8n32k16_load_a_f16);
13277 case NVPTX::BI__hmma_m8n32k16_ld_b:
13278 return MMA_LDST(8, m8n32k16_load_b_f16);
13279 case NVPTX::BI__hmma_m8n32k16_ld_c_f16:
13280 return MMA_LDST(4, m8n32k16_load_c_f16);
13281 case NVPTX::BI__hmma_m8n32k16_ld_c_f32:
13282 return MMA_LDST(8, m8n32k16_load_c_f32);
13284 // Integer MMA loads
13285 case NVPTX::BI__imma_m16n16k16_ld_a_s8:
13286 return MMA_LDST(2, m16n16k16_load_a_s8);
13287 case NVPTX::BI__imma_m16n16k16_ld_a_u8:
13288 return MMA_LDST(2, m16n16k16_load_a_u8);
13289 case NVPTX::BI__imma_m16n16k16_ld_b_s8:
13290 return MMA_LDST(2, m16n16k16_load_b_s8);
13291 case NVPTX::BI__imma_m16n16k16_ld_b_u8:
13292 return MMA_LDST(2, m16n16k16_load_b_u8);
13293 case NVPTX::BI__imma_m16n16k16_ld_c:
13294 return MMA_LDST(8, m16n16k16_load_c_s32);
13295 case NVPTX::BI__imma_m32n8k16_ld_a_s8:
13296 return MMA_LDST(4, m32n8k16_load_a_s8);
13297 case NVPTX::BI__imma_m32n8k16_ld_a_u8:
13298 return MMA_LDST(4, m32n8k16_load_a_u8);
13299 case NVPTX::BI__imma_m32n8k16_ld_b_s8:
13300 return MMA_LDST(1, m32n8k16_load_b_s8);
13301 case NVPTX::BI__imma_m32n8k16_ld_b_u8:
13302 return MMA_LDST(1, m32n8k16_load_b_u8);
13303 case NVPTX::BI__imma_m32n8k16_ld_c:
13304 return MMA_LDST(8, m32n8k16_load_c_s32);
13305 case NVPTX::BI__imma_m8n32k16_ld_a_s8:
13306 return MMA_LDST(1, m8n32k16_load_a_s8);
13307 case NVPTX::BI__imma_m8n32k16_ld_a_u8:
13308 return MMA_LDST(1, m8n32k16_load_a_u8);
13309 case NVPTX::BI__imma_m8n32k16_ld_b_s8:
13310 return MMA_LDST(4, m8n32k16_load_b_s8);
13311 case NVPTX::BI__imma_m8n32k16_ld_b_u8:
13312 return MMA_LDST(4, m8n32k16_load_b_u8);
13313 case NVPTX::BI__imma_m8n32k16_ld_c:
13314 return MMA_LDST(8, m8n32k16_load_c_s32);
13316 // Sub-integer MMA loads.
13317 // Only row/col layout is supported by A/B fragments.
13318 case NVPTX::BI__imma_m8n8k32_ld_a_s4:
13319 return {1, 0, MMA_INTR(m8n8k32_load_a_s4, row)};
13320 case NVPTX::BI__imma_m8n8k32_ld_a_u4:
13321 return {1, 0, MMA_INTR(m8n8k32_load_a_u4, row)};
13322 case NVPTX::BI__imma_m8n8k32_ld_b_s4:
13323 return {1, MMA_INTR(m8n8k32_load_b_s4, col), 0};
13324 case NVPTX::BI__imma_m8n8k32_ld_b_u4:
13325 return {1, MMA_INTR(m8n8k32_load_b_u4, col), 0};
13326 case NVPTX::BI__imma_m8n8k32_ld_c:
13327 return MMA_LDST(2, m8n8k32_load_c_s32);
13328 case NVPTX::BI__bmma_m8n8k128_ld_a_b1:
13329 return {1, 0, MMA_INTR(m8n8k128_load_a_b1, row)};
13330 case NVPTX::BI__bmma_m8n8k128_ld_b_b1:
13331 return {1, MMA_INTR(m8n8k128_load_b_b1, col), 0};
13332 case NVPTX::BI__bmma_m8n8k128_ld_c:
13333 return MMA_LDST(2, m8n8k128_load_c_s32);
13335 // NOTE: We need to follow inconsitent naming scheme used by NVCC. Unlike
13336 // PTX and LLVM IR where stores always use fragment D, NVCC builtins always
13337 // use fragment C for both loads and stores.
13339 case NVPTX::BI__hmma_m16n16k16_st_c_f16:
13340 return MMA_LDST(4, m16n16k16_store_d_f16);
13341 case NVPTX::BI__hmma_m16n16k16_st_c_f32:
13342 return MMA_LDST(8, m16n16k16_store_d_f32);
13343 case NVPTX::BI__hmma_m32n8k16_st_c_f16:
13344 return MMA_LDST(4, m32n8k16_store_d_f16);
13345 case NVPTX::BI__hmma_m32n8k16_st_c_f32:
13346 return MMA_LDST(8, m32n8k16_store_d_f32);
13347 case NVPTX::BI__hmma_m8n32k16_st_c_f16:
13348 return MMA_LDST(4, m8n32k16_store_d_f16);
13349 case NVPTX::BI__hmma_m8n32k16_st_c_f32:
13350 return MMA_LDST(8, m8n32k16_store_d_f32);
13352 // Integer and sub-integer MMA stores.
13353 // Another naming quirk. Unlike other MMA builtins that use PTX types in the
13354 // name, integer loads/stores use LLVM's i32.
13355 case NVPTX::BI__imma_m16n16k16_st_c_i32:
13356 return MMA_LDST(8, m16n16k16_store_d_s32);
13357 case NVPTX::BI__imma_m32n8k16_st_c_i32:
13358 return MMA_LDST(8, m32n8k16_store_d_s32);
13359 case NVPTX::BI__imma_m8n32k16_st_c_i32:
13360 return MMA_LDST(8, m8n32k16_store_d_s32);
13361 case NVPTX::BI__imma_m8n8k32_st_c_i32:
13362 return MMA_LDST(2, m8n8k32_store_d_s32);
13363 case NVPTX::BI__bmma_m8n8k128_st_c_i32:
13364 return MMA_LDST(2, m8n8k128_store_d_s32);
13367 llvm_unreachable("Unknown MMA builtin");
13374 struct NVPTXMmaInfo {
13379 std::array<unsigned, 8> Variants;
13381 unsigned getMMAIntrinsic(int Layout, bool Satf) {
13382 unsigned Index = Layout * 2 + Satf;
13383 if (Index >= Variants.size())
13385 return Variants[Index];
13389 // Returns an intrinsic that matches Layout and Satf for valid combinations of
13390 // Layout and Satf, 0 otherwise.
13391 static NVPTXMmaInfo getNVPTXMmaInfo(unsigned BuiltinID) {
13392 // clang-format off
13393 #define MMA_VARIANTS(geom, type) {{ \
13394 Intrinsic::nvvm_wmma_##geom##_mma_row_row_##type, \
13395 Intrinsic::nvvm_wmma_##geom##_mma_row_row_##type##_satfinite, \
13396 Intrinsic::nvvm_wmma_##geom##_mma_row_col_##type, \
13397 Intrinsic::nvvm_wmma_##geom##_mma_row_col_##type##_satfinite, \
13398 Intrinsic::nvvm_wmma_##geom##_mma_col_row_##type, \
13399 Intrinsic::nvvm_wmma_##geom##_mma_col_row_##type##_satfinite, \
13400 Intrinsic::nvvm_wmma_##geom##_mma_col_col_##type, \
13401 Intrinsic::nvvm_wmma_##geom##_mma_col_col_##type##_satfinite \
13403 // Sub-integer MMA only supports row.col layout.
13404 #define MMA_VARIANTS_I4(geom, type) {{ \
13407 Intrinsic::nvvm_wmma_##geom##_mma_row_col_##type, \
13408 Intrinsic::nvvm_wmma_##geom##_mma_row_col_##type##_satfinite, \
13414 // b1 MMA does not support .satfinite.
13415 #define MMA_VARIANTS_B1(geom, type) {{ \
13418 Intrinsic::nvvm_wmma_##geom##_mma_row_col_##type, \
13426 switch (BuiltinID) {
13428 // Note that 'type' argument of MMA_VARIANT uses D_C notation, while
13429 // NumEltsN of return value are ordered as A,B,C,D.
13430 case NVPTX::BI__hmma_m16n16k16_mma_f16f16:
13431 return {8, 8, 4, 4, MMA_VARIANTS(m16n16k16, f16_f16)};
13432 case NVPTX::BI__hmma_m16n16k16_mma_f32f16:
13433 return {8, 8, 4, 8, MMA_VARIANTS(m16n16k16, f32_f16)};
13434 case NVPTX::BI__hmma_m16n16k16_mma_f16f32:
13435 return {8, 8, 8, 4, MMA_VARIANTS(m16n16k16, f16_f32)};
13436 case NVPTX::BI__hmma_m16n16k16_mma_f32f32:
13437 return {8, 8, 8, 8, MMA_VARIANTS(m16n16k16, f32_f32)};
13438 case NVPTX::BI__hmma_m32n8k16_mma_f16f16:
13439 return {8, 8, 4, 4, MMA_VARIANTS(m32n8k16, f16_f16)};
13440 case NVPTX::BI__hmma_m32n8k16_mma_f32f16:
13441 return {8, 8, 4, 8, MMA_VARIANTS(m32n8k16, f32_f16)};
13442 case NVPTX::BI__hmma_m32n8k16_mma_f16f32:
13443 return {8, 8, 8, 4, MMA_VARIANTS(m32n8k16, f16_f32)};
13444 case NVPTX::BI__hmma_m32n8k16_mma_f32f32:
13445 return {8, 8, 8, 8, MMA_VARIANTS(m32n8k16, f32_f32)};
13446 case NVPTX::BI__hmma_m8n32k16_mma_f16f16:
13447 return {8, 8, 4, 4, MMA_VARIANTS(m8n32k16, f16_f16)};
13448 case NVPTX::BI__hmma_m8n32k16_mma_f32f16:
13449 return {8, 8, 4, 8, MMA_VARIANTS(m8n32k16, f32_f16)};
13450 case NVPTX::BI__hmma_m8n32k16_mma_f16f32:
13451 return {8, 8, 8, 4, MMA_VARIANTS(m8n32k16, f16_f32)};
13452 case NVPTX::BI__hmma_m8n32k16_mma_f32f32:
13453 return {8, 8, 8, 8, MMA_VARIANTS(m8n32k16, f32_f32)};
13456 case NVPTX::BI__imma_m16n16k16_mma_s8:
13457 return {2, 2, 8, 8, MMA_VARIANTS(m16n16k16, s8)};
13458 case NVPTX::BI__imma_m16n16k16_mma_u8:
13459 return {2, 2, 8, 8, MMA_VARIANTS(m16n16k16, u8)};
13460 case NVPTX::BI__imma_m32n8k16_mma_s8:
13461 return {4, 1, 8, 8, MMA_VARIANTS(m32n8k16, s8)};
13462 case NVPTX::BI__imma_m32n8k16_mma_u8:
13463 return {4, 1, 8, 8, MMA_VARIANTS(m32n8k16, u8)};
13464 case NVPTX::BI__imma_m8n32k16_mma_s8:
13465 return {1, 4, 8, 8, MMA_VARIANTS(m8n32k16, s8)};
13466 case NVPTX::BI__imma_m8n32k16_mma_u8:
13467 return {1, 4, 8, 8, MMA_VARIANTS(m8n32k16, u8)};
13470 case NVPTX::BI__imma_m8n8k32_mma_s4:
13471 return {1, 1, 2, 2, MMA_VARIANTS_I4(m8n8k32, s4)};
13472 case NVPTX::BI__imma_m8n8k32_mma_u4:
13473 return {1, 1, 2, 2, MMA_VARIANTS_I4(m8n8k32, u4)};
13474 case NVPTX::BI__bmma_m8n8k128_mma_xor_popc_b1:
13475 return {1, 1, 2, 2, MMA_VARIANTS_B1(m8n8k128, b1)};
13477 llvm_unreachable("Unexpected builtin ID.");
13479 #undef MMA_VARIANTS
13480 #undef MMA_VARIANTS_I4
13481 #undef MMA_VARIANTS_B1
13487 CodeGenFunction::EmitNVPTXBuiltinExpr(unsigned BuiltinID, const CallExpr *E) {
13488 auto MakeLdg = [&](unsigned IntrinsicID) {
13489 Value *Ptr = EmitScalarExpr(E->getArg(0));
13490 clang::CharUnits Align =
13491 getNaturalPointeeTypeAlignment(E->getArg(0)->getType());
13492 return Builder.CreateCall(
13493 CGM.getIntrinsic(IntrinsicID, {Ptr->getType()->getPointerElementType(),
13495 {Ptr, ConstantInt::get(Builder.getInt32Ty(), Align.getQuantity())});
13497 auto MakeScopedAtomic = [&](unsigned IntrinsicID) {
13498 Value *Ptr = EmitScalarExpr(E->getArg(0));
13499 return Builder.CreateCall(
13500 CGM.getIntrinsic(IntrinsicID, {Ptr->getType()->getPointerElementType(),
13502 {Ptr, EmitScalarExpr(E->getArg(1))});
13504 switch (BuiltinID) {
13505 case NVPTX::BI__nvvm_atom_add_gen_i:
13506 case NVPTX::BI__nvvm_atom_add_gen_l:
13507 case NVPTX::BI__nvvm_atom_add_gen_ll:
13508 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Add, E);
13510 case NVPTX::BI__nvvm_atom_sub_gen_i:
13511 case NVPTX::BI__nvvm_atom_sub_gen_l:
13512 case NVPTX::BI__nvvm_atom_sub_gen_ll:
13513 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Sub, E);
13515 case NVPTX::BI__nvvm_atom_and_gen_i:
13516 case NVPTX::BI__nvvm_atom_and_gen_l:
13517 case NVPTX::BI__nvvm_atom_and_gen_ll:
13518 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::And, E);
13520 case NVPTX::BI__nvvm_atom_or_gen_i:
13521 case NVPTX::BI__nvvm_atom_or_gen_l:
13522 case NVPTX::BI__nvvm_atom_or_gen_ll:
13523 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Or, E);
13525 case NVPTX::BI__nvvm_atom_xor_gen_i:
13526 case NVPTX::BI__nvvm_atom_xor_gen_l:
13527 case NVPTX::BI__nvvm_atom_xor_gen_ll:
13528 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Xor, E);
13530 case NVPTX::BI__nvvm_atom_xchg_gen_i:
13531 case NVPTX::BI__nvvm_atom_xchg_gen_l:
13532 case NVPTX::BI__nvvm_atom_xchg_gen_ll:
13533 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Xchg, E);
13535 case NVPTX::BI__nvvm_atom_max_gen_i:
13536 case NVPTX::BI__nvvm_atom_max_gen_l:
13537 case NVPTX::BI__nvvm_atom_max_gen_ll:
13538 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Max, E);
13540 case NVPTX::BI__nvvm_atom_max_gen_ui:
13541 case NVPTX::BI__nvvm_atom_max_gen_ul:
13542 case NVPTX::BI__nvvm_atom_max_gen_ull:
13543 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::UMax, E);
13545 case NVPTX::BI__nvvm_atom_min_gen_i:
13546 case NVPTX::BI__nvvm_atom_min_gen_l:
13547 case NVPTX::BI__nvvm_atom_min_gen_ll:
13548 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Min, E);
13550 case NVPTX::BI__nvvm_atom_min_gen_ui:
13551 case NVPTX::BI__nvvm_atom_min_gen_ul:
13552 case NVPTX::BI__nvvm_atom_min_gen_ull:
13553 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::UMin, E);
13555 case NVPTX::BI__nvvm_atom_cas_gen_i:
13556 case NVPTX::BI__nvvm_atom_cas_gen_l:
13557 case NVPTX::BI__nvvm_atom_cas_gen_ll:
13558 // __nvvm_atom_cas_gen_* should return the old value rather than the
13560 return MakeAtomicCmpXchgValue(*this, E, /*ReturnBool=*/false);
13562 case NVPTX::BI__nvvm_atom_add_gen_f:
13563 case NVPTX::BI__nvvm_atom_add_gen_d: {
13564 Value *Ptr = EmitScalarExpr(E->getArg(0));
13565 Value *Val = EmitScalarExpr(E->getArg(1));
13566 return Builder.CreateAtomicRMW(llvm::AtomicRMWInst::FAdd, Ptr, Val,
13567 AtomicOrdering::SequentiallyConsistent);
13570 case NVPTX::BI__nvvm_atom_inc_gen_ui: {
13571 Value *Ptr = EmitScalarExpr(E->getArg(0));
13572 Value *Val = EmitScalarExpr(E->getArg(1));
13573 Function *FnALI32 =
13574 CGM.getIntrinsic(Intrinsic::nvvm_atomic_load_inc_32, Ptr->getType());
13575 return Builder.CreateCall(FnALI32, {Ptr, Val});
13578 case NVPTX::BI__nvvm_atom_dec_gen_ui: {
13579 Value *Ptr = EmitScalarExpr(E->getArg(0));
13580 Value *Val = EmitScalarExpr(E->getArg(1));
13581 Function *FnALD32 =
13582 CGM.getIntrinsic(Intrinsic::nvvm_atomic_load_dec_32, Ptr->getType());
13583 return Builder.CreateCall(FnALD32, {Ptr, Val});
13586 case NVPTX::BI__nvvm_ldg_c:
13587 case NVPTX::BI__nvvm_ldg_c2:
13588 case NVPTX::BI__nvvm_ldg_c4:
13589 case NVPTX::BI__nvvm_ldg_s:
13590 case NVPTX::BI__nvvm_ldg_s2:
13591 case NVPTX::BI__nvvm_ldg_s4:
13592 case NVPTX::BI__nvvm_ldg_i:
13593 case NVPTX::BI__nvvm_ldg_i2:
13594 case NVPTX::BI__nvvm_ldg_i4:
13595 case NVPTX::BI__nvvm_ldg_l:
13596 case NVPTX::BI__nvvm_ldg_ll:
13597 case NVPTX::BI__nvvm_ldg_ll2:
13598 case NVPTX::BI__nvvm_ldg_uc:
13599 case NVPTX::BI__nvvm_ldg_uc2:
13600 case NVPTX::BI__nvvm_ldg_uc4:
13601 case NVPTX::BI__nvvm_ldg_us:
13602 case NVPTX::BI__nvvm_ldg_us2:
13603 case NVPTX::BI__nvvm_ldg_us4:
13604 case NVPTX::BI__nvvm_ldg_ui:
13605 case NVPTX::BI__nvvm_ldg_ui2:
13606 case NVPTX::BI__nvvm_ldg_ui4:
13607 case NVPTX::BI__nvvm_ldg_ul:
13608 case NVPTX::BI__nvvm_ldg_ull:
13609 case NVPTX::BI__nvvm_ldg_ull2:
13610 // PTX Interoperability section 2.2: "For a vector with an even number of
13611 // elements, its alignment is set to number of elements times the alignment
13612 // of its member: n*alignof(t)."
13613 return MakeLdg(Intrinsic::nvvm_ldg_global_i);
13614 case NVPTX::BI__nvvm_ldg_f:
13615 case NVPTX::BI__nvvm_ldg_f2:
13616 case NVPTX::BI__nvvm_ldg_f4:
13617 case NVPTX::BI__nvvm_ldg_d:
13618 case NVPTX::BI__nvvm_ldg_d2:
13619 return MakeLdg(Intrinsic::nvvm_ldg_global_f);
13621 case NVPTX::BI__nvvm_atom_cta_add_gen_i:
13622 case NVPTX::BI__nvvm_atom_cta_add_gen_l:
13623 case NVPTX::BI__nvvm_atom_cta_add_gen_ll:
13624 return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_gen_i_cta);
13625 case NVPTX::BI__nvvm_atom_sys_add_gen_i:
13626 case NVPTX::BI__nvvm_atom_sys_add_gen_l:
13627 case NVPTX::BI__nvvm_atom_sys_add_gen_ll:
13628 return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_gen_i_sys);
13629 case NVPTX::BI__nvvm_atom_cta_add_gen_f:
13630 case NVPTX::BI__nvvm_atom_cta_add_gen_d:
13631 return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_gen_f_cta);
13632 case NVPTX::BI__nvvm_atom_sys_add_gen_f:
13633 case NVPTX::BI__nvvm_atom_sys_add_gen_d:
13634 return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_gen_f_sys);
13635 case NVPTX::BI__nvvm_atom_cta_xchg_gen_i:
13636 case NVPTX::BI__nvvm_atom_cta_xchg_gen_l:
13637 case NVPTX::BI__nvvm_atom_cta_xchg_gen_ll:
13638 return MakeScopedAtomic(Intrinsic::nvvm_atomic_exch_gen_i_cta);
13639 case NVPTX::BI__nvvm_atom_sys_xchg_gen_i:
13640 case NVPTX::BI__nvvm_atom_sys_xchg_gen_l:
13641 case NVPTX::BI__nvvm_atom_sys_xchg_gen_ll:
13642 return MakeScopedAtomic(Intrinsic::nvvm_atomic_exch_gen_i_sys);
13643 case NVPTX::BI__nvvm_atom_cta_max_gen_i:
13644 case NVPTX::BI__nvvm_atom_cta_max_gen_ui:
13645 case NVPTX::BI__nvvm_atom_cta_max_gen_l:
13646 case NVPTX::BI__nvvm_atom_cta_max_gen_ul:
13647 case NVPTX::BI__nvvm_atom_cta_max_gen_ll:
13648 case NVPTX::BI__nvvm_atom_cta_max_gen_ull:
13649 return MakeScopedAtomic(Intrinsic::nvvm_atomic_max_gen_i_cta);
13650 case NVPTX::BI__nvvm_atom_sys_max_gen_i:
13651 case NVPTX::BI__nvvm_atom_sys_max_gen_ui:
13652 case NVPTX::BI__nvvm_atom_sys_max_gen_l:
13653 case NVPTX::BI__nvvm_atom_sys_max_gen_ul:
13654 case NVPTX::BI__nvvm_atom_sys_max_gen_ll:
13655 case NVPTX::BI__nvvm_atom_sys_max_gen_ull:
13656 return MakeScopedAtomic(Intrinsic::nvvm_atomic_max_gen_i_sys);
13657 case NVPTX::BI__nvvm_atom_cta_min_gen_i:
13658 case NVPTX::BI__nvvm_atom_cta_min_gen_ui:
13659 case NVPTX::BI__nvvm_atom_cta_min_gen_l:
13660 case NVPTX::BI__nvvm_atom_cta_min_gen_ul:
13661 case NVPTX::BI__nvvm_atom_cta_min_gen_ll:
13662 case NVPTX::BI__nvvm_atom_cta_min_gen_ull:
13663 return MakeScopedAtomic(Intrinsic::nvvm_atomic_min_gen_i_cta);
13664 case NVPTX::BI__nvvm_atom_sys_min_gen_i:
13665 case NVPTX::BI__nvvm_atom_sys_min_gen_ui:
13666 case NVPTX::BI__nvvm_atom_sys_min_gen_l:
13667 case NVPTX::BI__nvvm_atom_sys_min_gen_ul:
13668 case NVPTX::BI__nvvm_atom_sys_min_gen_ll:
13669 case NVPTX::BI__nvvm_atom_sys_min_gen_ull:
13670 return MakeScopedAtomic(Intrinsic::nvvm_atomic_min_gen_i_sys);
13671 case NVPTX::BI__nvvm_atom_cta_inc_gen_ui:
13672 return MakeScopedAtomic(Intrinsic::nvvm_atomic_inc_gen_i_cta);
13673 case NVPTX::BI__nvvm_atom_cta_dec_gen_ui:
13674 return MakeScopedAtomic(Intrinsic::nvvm_atomic_dec_gen_i_cta);
13675 case NVPTX::BI__nvvm_atom_sys_inc_gen_ui:
13676 return MakeScopedAtomic(Intrinsic::nvvm_atomic_inc_gen_i_sys);
13677 case NVPTX::BI__nvvm_atom_sys_dec_gen_ui:
13678 return MakeScopedAtomic(Intrinsic::nvvm_atomic_dec_gen_i_sys);
13679 case NVPTX::BI__nvvm_atom_cta_and_gen_i:
13680 case NVPTX::BI__nvvm_atom_cta_and_gen_l:
13681 case NVPTX::BI__nvvm_atom_cta_and_gen_ll:
13682 return MakeScopedAtomic(Intrinsic::nvvm_atomic_and_gen_i_cta);
13683 case NVPTX::BI__nvvm_atom_sys_and_gen_i:
13684 case NVPTX::BI__nvvm_atom_sys_and_gen_l:
13685 case NVPTX::BI__nvvm_atom_sys_and_gen_ll:
13686 return MakeScopedAtomic(Intrinsic::nvvm_atomic_and_gen_i_sys);
13687 case NVPTX::BI__nvvm_atom_cta_or_gen_i:
13688 case NVPTX::BI__nvvm_atom_cta_or_gen_l:
13689 case NVPTX::BI__nvvm_atom_cta_or_gen_ll:
13690 return MakeScopedAtomic(Intrinsic::nvvm_atomic_or_gen_i_cta);
13691 case NVPTX::BI__nvvm_atom_sys_or_gen_i:
13692 case NVPTX::BI__nvvm_atom_sys_or_gen_l:
13693 case NVPTX::BI__nvvm_atom_sys_or_gen_ll:
13694 return MakeScopedAtomic(Intrinsic::nvvm_atomic_or_gen_i_sys);
13695 case NVPTX::BI__nvvm_atom_cta_xor_gen_i:
13696 case NVPTX::BI__nvvm_atom_cta_xor_gen_l:
13697 case NVPTX::BI__nvvm_atom_cta_xor_gen_ll:
13698 return MakeScopedAtomic(Intrinsic::nvvm_atomic_xor_gen_i_cta);
13699 case NVPTX::BI__nvvm_atom_sys_xor_gen_i:
13700 case NVPTX::BI__nvvm_atom_sys_xor_gen_l:
13701 case NVPTX::BI__nvvm_atom_sys_xor_gen_ll:
13702 return MakeScopedAtomic(Intrinsic::nvvm_atomic_xor_gen_i_sys);
13703 case NVPTX::BI__nvvm_atom_cta_cas_gen_i:
13704 case NVPTX::BI__nvvm_atom_cta_cas_gen_l:
13705 case NVPTX::BI__nvvm_atom_cta_cas_gen_ll: {
13706 Value *Ptr = EmitScalarExpr(E->getArg(0));
13707 return Builder.CreateCall(
13709 Intrinsic::nvvm_atomic_cas_gen_i_cta,
13710 {Ptr->getType()->getPointerElementType(), Ptr->getType()}),
13711 {Ptr, EmitScalarExpr(E->getArg(1)), EmitScalarExpr(E->getArg(2))});
13713 case NVPTX::BI__nvvm_atom_sys_cas_gen_i:
13714 case NVPTX::BI__nvvm_atom_sys_cas_gen_l:
13715 case NVPTX::BI__nvvm_atom_sys_cas_gen_ll: {
13716 Value *Ptr = EmitScalarExpr(E->getArg(0));
13717 return Builder.CreateCall(
13719 Intrinsic::nvvm_atomic_cas_gen_i_sys,
13720 {Ptr->getType()->getPointerElementType(), Ptr->getType()}),
13721 {Ptr, EmitScalarExpr(E->getArg(1)), EmitScalarExpr(E->getArg(2))});
13723 case NVPTX::BI__nvvm_match_all_sync_i32p:
13724 case NVPTX::BI__nvvm_match_all_sync_i64p: {
13725 Value *Mask = EmitScalarExpr(E->getArg(0));
13726 Value *Val = EmitScalarExpr(E->getArg(1));
13727 Address PredOutPtr = EmitPointerWithAlignment(E->getArg(2));
13728 Value *ResultPair = Builder.CreateCall(
13729 CGM.getIntrinsic(BuiltinID == NVPTX::BI__nvvm_match_all_sync_i32p
13730 ? Intrinsic::nvvm_match_all_sync_i32p
13731 : Intrinsic::nvvm_match_all_sync_i64p),
13733 Value *Pred = Builder.CreateZExt(Builder.CreateExtractValue(ResultPair, 1),
13734 PredOutPtr.getElementType());
13735 Builder.CreateStore(Pred, PredOutPtr);
13736 return Builder.CreateExtractValue(ResultPair, 0);
13740 case NVPTX::BI__hmma_m16n16k16_ld_a:
13741 case NVPTX::BI__hmma_m16n16k16_ld_b:
13742 case NVPTX::BI__hmma_m16n16k16_ld_c_f16:
13743 case NVPTX::BI__hmma_m16n16k16_ld_c_f32:
13744 case NVPTX::BI__hmma_m32n8k16_ld_a:
13745 case NVPTX::BI__hmma_m32n8k16_ld_b:
13746 case NVPTX::BI__hmma_m32n8k16_ld_c_f16:
13747 case NVPTX::BI__hmma_m32n8k16_ld_c_f32:
13748 case NVPTX::BI__hmma_m8n32k16_ld_a:
13749 case NVPTX::BI__hmma_m8n32k16_ld_b:
13750 case NVPTX::BI__hmma_m8n32k16_ld_c_f16:
13751 case NVPTX::BI__hmma_m8n32k16_ld_c_f32:
13752 // Integer MMA loads.
13753 case NVPTX::BI__imma_m16n16k16_ld_a_s8:
13754 case NVPTX::BI__imma_m16n16k16_ld_a_u8:
13755 case NVPTX::BI__imma_m16n16k16_ld_b_s8:
13756 case NVPTX::BI__imma_m16n16k16_ld_b_u8:
13757 case NVPTX::BI__imma_m16n16k16_ld_c:
13758 case NVPTX::BI__imma_m32n8k16_ld_a_s8:
13759 case NVPTX::BI__imma_m32n8k16_ld_a_u8:
13760 case NVPTX::BI__imma_m32n8k16_ld_b_s8:
13761 case NVPTX::BI__imma_m32n8k16_ld_b_u8:
13762 case NVPTX::BI__imma_m32n8k16_ld_c:
13763 case NVPTX::BI__imma_m8n32k16_ld_a_s8:
13764 case NVPTX::BI__imma_m8n32k16_ld_a_u8:
13765 case NVPTX::BI__imma_m8n32k16_ld_b_s8:
13766 case NVPTX::BI__imma_m8n32k16_ld_b_u8:
13767 case NVPTX::BI__imma_m8n32k16_ld_c:
13768 // Sub-integer MMA loads.
13769 case NVPTX::BI__imma_m8n8k32_ld_a_s4:
13770 case NVPTX::BI__imma_m8n8k32_ld_a_u4:
13771 case NVPTX::BI__imma_m8n8k32_ld_b_s4:
13772 case NVPTX::BI__imma_m8n8k32_ld_b_u4:
13773 case NVPTX::BI__imma_m8n8k32_ld_c:
13774 case NVPTX::BI__bmma_m8n8k128_ld_a_b1:
13775 case NVPTX::BI__bmma_m8n8k128_ld_b_b1:
13776 case NVPTX::BI__bmma_m8n8k128_ld_c:
13778 Address Dst = EmitPointerWithAlignment(E->getArg(0));
13779 Value *Src = EmitScalarExpr(E->getArg(1));
13780 Value *Ldm = EmitScalarExpr(E->getArg(2));
13781 llvm::APSInt isColMajorArg;
13782 if (!E->getArg(3)->isIntegerConstantExpr(isColMajorArg, getContext()))
13784 bool isColMajor = isColMajorArg.getSExtValue();
13785 NVPTXMmaLdstInfo II = getNVPTXMmaLdstInfo(BuiltinID);
13786 unsigned IID = isColMajor ? II.IID_col : II.IID_row;
13791 Builder.CreateCall(CGM.getIntrinsic(IID, Src->getType()), {Src, Ldm});
13793 // Save returned values.
13794 assert(II.NumResults);
13795 if (II.NumResults == 1) {
13796 Builder.CreateAlignedStore(Result, Dst.getPointer(),
13797 CharUnits::fromQuantity(4));
13799 for (unsigned i = 0; i < II.NumResults; ++i) {
13800 Builder.CreateAlignedStore(
13801 Builder.CreateBitCast(Builder.CreateExtractValue(Result, i),
13802 Dst.getElementType()),
13803 Builder.CreateGEP(Dst.getPointer(),
13804 llvm::ConstantInt::get(IntTy, i)),
13805 CharUnits::fromQuantity(4));
13811 case NVPTX::BI__hmma_m16n16k16_st_c_f16:
13812 case NVPTX::BI__hmma_m16n16k16_st_c_f32:
13813 case NVPTX::BI__hmma_m32n8k16_st_c_f16:
13814 case NVPTX::BI__hmma_m32n8k16_st_c_f32:
13815 case NVPTX::BI__hmma_m8n32k16_st_c_f16:
13816 case NVPTX::BI__hmma_m8n32k16_st_c_f32:
13817 case NVPTX::BI__imma_m16n16k16_st_c_i32:
13818 case NVPTX::BI__imma_m32n8k16_st_c_i32:
13819 case NVPTX::BI__imma_m8n32k16_st_c_i32:
13820 case NVPTX::BI__imma_m8n8k32_st_c_i32:
13821 case NVPTX::BI__bmma_m8n8k128_st_c_i32: {
13822 Value *Dst = EmitScalarExpr(E->getArg(0));
13823 Address Src = EmitPointerWithAlignment(E->getArg(1));
13824 Value *Ldm = EmitScalarExpr(E->getArg(2));
13825 llvm::APSInt isColMajorArg;
13826 if (!E->getArg(3)->isIntegerConstantExpr(isColMajorArg, getContext()))
13828 bool isColMajor = isColMajorArg.getSExtValue();
13829 NVPTXMmaLdstInfo II = getNVPTXMmaLdstInfo(BuiltinID);
13830 unsigned IID = isColMajor ? II.IID_col : II.IID_row;
13833 Function *Intrinsic =
13834 CGM.getIntrinsic(IID, Dst->getType());
13835 llvm::Type *ParamType = Intrinsic->getFunctionType()->getParamType(1);
13836 SmallVector<Value *, 10> Values = {Dst};
13837 for (unsigned i = 0; i < II.NumResults; ++i) {
13838 Value *V = Builder.CreateAlignedLoad(
13839 Builder.CreateGEP(Src.getPointer(), llvm::ConstantInt::get(IntTy, i)),
13840 CharUnits::fromQuantity(4));
13841 Values.push_back(Builder.CreateBitCast(V, ParamType));
13843 Values.push_back(Ldm);
13844 Value *Result = Builder.CreateCall(Intrinsic, Values);
13848 // BI__hmma_m16n16k16_mma_<Dtype><CType>(d, a, b, c, layout, satf) -->
13849 // Intrinsic::nvvm_wmma_m16n16k16_mma_sync<layout A,B><DType><CType><Satf>
13850 case NVPTX::BI__hmma_m16n16k16_mma_f16f16:
13851 case NVPTX::BI__hmma_m16n16k16_mma_f32f16:
13852 case NVPTX::BI__hmma_m16n16k16_mma_f32f32:
13853 case NVPTX::BI__hmma_m16n16k16_mma_f16f32:
13854 case NVPTX::BI__hmma_m32n8k16_mma_f16f16:
13855 case NVPTX::BI__hmma_m32n8k16_mma_f32f16:
13856 case NVPTX::BI__hmma_m32n8k16_mma_f32f32:
13857 case NVPTX::BI__hmma_m32n8k16_mma_f16f32:
13858 case NVPTX::BI__hmma_m8n32k16_mma_f16f16:
13859 case NVPTX::BI__hmma_m8n32k16_mma_f32f16:
13860 case NVPTX::BI__hmma_m8n32k16_mma_f32f32:
13861 case NVPTX::BI__hmma_m8n32k16_mma_f16f32:
13862 case NVPTX::BI__imma_m16n16k16_mma_s8:
13863 case NVPTX::BI__imma_m16n16k16_mma_u8:
13864 case NVPTX::BI__imma_m32n8k16_mma_s8:
13865 case NVPTX::BI__imma_m32n8k16_mma_u8:
13866 case NVPTX::BI__imma_m8n32k16_mma_s8:
13867 case NVPTX::BI__imma_m8n32k16_mma_u8:
13868 case NVPTX::BI__imma_m8n8k32_mma_s4:
13869 case NVPTX::BI__imma_m8n8k32_mma_u4:
13870 case NVPTX::BI__bmma_m8n8k128_mma_xor_popc_b1: {
13871 Address Dst = EmitPointerWithAlignment(E->getArg(0));
13872 Address SrcA = EmitPointerWithAlignment(E->getArg(1));
13873 Address SrcB = EmitPointerWithAlignment(E->getArg(2));
13874 Address SrcC = EmitPointerWithAlignment(E->getArg(3));
13875 llvm::APSInt LayoutArg;
13876 if (!E->getArg(4)->isIntegerConstantExpr(LayoutArg, getContext()))
13878 int Layout = LayoutArg.getSExtValue();
13879 if (Layout < 0 || Layout > 3)
13881 llvm::APSInt SatfArg;
13882 if (BuiltinID == NVPTX::BI__bmma_m8n8k128_mma_xor_popc_b1)
13883 SatfArg = 0; // .b1 does not have satf argument.
13884 else if (!E->getArg(5)->isIntegerConstantExpr(SatfArg, getContext()))
13886 bool Satf = SatfArg.getSExtValue();
13887 NVPTXMmaInfo MI = getNVPTXMmaInfo(BuiltinID);
13888 unsigned IID = MI.getMMAIntrinsic(Layout, Satf);
13889 if (IID == 0) // Unsupported combination of Layout/Satf.
13892 SmallVector<Value *, 24> Values;
13893 Function *Intrinsic = CGM.getIntrinsic(IID);
13894 llvm::Type *AType = Intrinsic->getFunctionType()->getParamType(0);
13896 for (unsigned i = 0; i < MI.NumEltsA; ++i) {
13897 Value *V = Builder.CreateAlignedLoad(
13898 Builder.CreateGEP(SrcA.getPointer(),
13899 llvm::ConstantInt::get(IntTy, i)),
13900 CharUnits::fromQuantity(4));
13901 Values.push_back(Builder.CreateBitCast(V, AType));
13904 llvm::Type *BType = Intrinsic->getFunctionType()->getParamType(MI.NumEltsA);
13905 for (unsigned i = 0; i < MI.NumEltsB; ++i) {
13906 Value *V = Builder.CreateAlignedLoad(
13907 Builder.CreateGEP(SrcB.getPointer(),
13908 llvm::ConstantInt::get(IntTy, i)),
13909 CharUnits::fromQuantity(4));
13910 Values.push_back(Builder.CreateBitCast(V, BType));
13913 llvm::Type *CType =
13914 Intrinsic->getFunctionType()->getParamType(MI.NumEltsA + MI.NumEltsB);
13915 for (unsigned i = 0; i < MI.NumEltsC; ++i) {
13916 Value *V = Builder.CreateAlignedLoad(
13917 Builder.CreateGEP(SrcC.getPointer(),
13918 llvm::ConstantInt::get(IntTy, i)),
13919 CharUnits::fromQuantity(4));
13920 Values.push_back(Builder.CreateBitCast(V, CType));
13922 Value *Result = Builder.CreateCall(Intrinsic, Values);
13923 llvm::Type *DType = Dst.getElementType();
13924 for (unsigned i = 0; i < MI.NumEltsD; ++i)
13925 Builder.CreateAlignedStore(
13926 Builder.CreateBitCast(Builder.CreateExtractValue(Result, i), DType),
13927 Builder.CreateGEP(Dst.getPointer(), llvm::ConstantInt::get(IntTy, i)),
13928 CharUnits::fromQuantity(4));
13936 Value *CodeGenFunction::EmitWebAssemblyBuiltinExpr(unsigned BuiltinID,
13937 const CallExpr *E) {
13938 switch (BuiltinID) {
13939 case WebAssembly::BI__builtin_wasm_memory_size: {
13940 llvm::Type *ResultType = ConvertType(E->getType());
13941 Value *I = EmitScalarExpr(E->getArg(0));
13942 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_memory_size, ResultType);
13943 return Builder.CreateCall(Callee, I);
13945 case WebAssembly::BI__builtin_wasm_memory_grow: {
13946 llvm::Type *ResultType = ConvertType(E->getType());
13948 EmitScalarExpr(E->getArg(0)),
13949 EmitScalarExpr(E->getArg(1))
13951 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_memory_grow, ResultType);
13952 return Builder.CreateCall(Callee, Args);
13954 case WebAssembly::BI__builtin_wasm_memory_init: {
13955 llvm::APSInt SegConst;
13956 if (!E->getArg(0)->isIntegerConstantExpr(SegConst, getContext()))
13957 llvm_unreachable("Constant arg isn't actually constant?");
13958 llvm::APSInt MemConst;
13959 if (!E->getArg(1)->isIntegerConstantExpr(MemConst, getContext()))
13960 llvm_unreachable("Constant arg isn't actually constant?");
13961 if (!MemConst.isNullValue())
13962 ErrorUnsupported(E, "non-zero memory index");
13963 Value *Args[] = {llvm::ConstantInt::get(getLLVMContext(), SegConst),
13964 llvm::ConstantInt::get(getLLVMContext(), MemConst),
13965 EmitScalarExpr(E->getArg(2)), EmitScalarExpr(E->getArg(3)),
13966 EmitScalarExpr(E->getArg(4))};
13967 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_memory_init);
13968 return Builder.CreateCall(Callee, Args);
13970 case WebAssembly::BI__builtin_wasm_data_drop: {
13971 llvm::APSInt SegConst;
13972 if (!E->getArg(0)->isIntegerConstantExpr(SegConst, getContext()))
13973 llvm_unreachable("Constant arg isn't actually constant?");
13974 Value *Arg = llvm::ConstantInt::get(getLLVMContext(), SegConst);
13975 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_data_drop);
13976 return Builder.CreateCall(Callee, {Arg});
13978 case WebAssembly::BI__builtin_wasm_tls_size: {
13979 llvm::Type *ResultType = ConvertType(E->getType());
13980 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_tls_size, ResultType);
13981 return Builder.CreateCall(Callee);
13983 case WebAssembly::BI__builtin_wasm_tls_align: {
13984 llvm::Type *ResultType = ConvertType(E->getType());
13985 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_tls_align, ResultType);
13986 return Builder.CreateCall(Callee);
13988 case WebAssembly::BI__builtin_wasm_tls_base: {
13989 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_tls_base);
13990 return Builder.CreateCall(Callee);
13992 case WebAssembly::BI__builtin_wasm_throw: {
13993 Value *Tag = EmitScalarExpr(E->getArg(0));
13994 Value *Obj = EmitScalarExpr(E->getArg(1));
13995 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_throw);
13996 return Builder.CreateCall(Callee, {Tag, Obj});
13998 case WebAssembly::BI__builtin_wasm_rethrow_in_catch: {
13999 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_rethrow_in_catch);
14000 return Builder.CreateCall(Callee);
14002 case WebAssembly::BI__builtin_wasm_atomic_wait_i32: {
14003 Value *Addr = EmitScalarExpr(E->getArg(0));
14004 Value *Expected = EmitScalarExpr(E->getArg(1));
14005 Value *Timeout = EmitScalarExpr(E->getArg(2));
14006 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_atomic_wait_i32);
14007 return Builder.CreateCall(Callee, {Addr, Expected, Timeout});
14009 case WebAssembly::BI__builtin_wasm_atomic_wait_i64: {
14010 Value *Addr = EmitScalarExpr(E->getArg(0));
14011 Value *Expected = EmitScalarExpr(E->getArg(1));
14012 Value *Timeout = EmitScalarExpr(E->getArg(2));
14013 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_atomic_wait_i64);
14014 return Builder.CreateCall(Callee, {Addr, Expected, Timeout});
14016 case WebAssembly::BI__builtin_wasm_atomic_notify: {
14017 Value *Addr = EmitScalarExpr(E->getArg(0));
14018 Value *Count = EmitScalarExpr(E->getArg(1));
14019 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_atomic_notify);
14020 return Builder.CreateCall(Callee, {Addr, Count});
14022 case WebAssembly::BI__builtin_wasm_trunc_s_i32_f32:
14023 case WebAssembly::BI__builtin_wasm_trunc_s_i32_f64:
14024 case WebAssembly::BI__builtin_wasm_trunc_s_i64_f32:
14025 case WebAssembly::BI__builtin_wasm_trunc_s_i64_f64: {
14026 Value *Src = EmitScalarExpr(E->getArg(0));
14027 llvm::Type *ResT = ConvertType(E->getType());
14029 CGM.getIntrinsic(Intrinsic::wasm_trunc_signed, {ResT, Src->getType()});
14030 return Builder.CreateCall(Callee, {Src});
14032 case WebAssembly::BI__builtin_wasm_trunc_u_i32_f32:
14033 case WebAssembly::BI__builtin_wasm_trunc_u_i32_f64:
14034 case WebAssembly::BI__builtin_wasm_trunc_u_i64_f32:
14035 case WebAssembly::BI__builtin_wasm_trunc_u_i64_f64: {
14036 Value *Src = EmitScalarExpr(E->getArg(0));
14037 llvm::Type *ResT = ConvertType(E->getType());
14038 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_trunc_unsigned,
14039 {ResT, Src->getType()});
14040 return Builder.CreateCall(Callee, {Src});
14042 case WebAssembly::BI__builtin_wasm_trunc_saturate_s_i32_f32:
14043 case WebAssembly::BI__builtin_wasm_trunc_saturate_s_i32_f64:
14044 case WebAssembly::BI__builtin_wasm_trunc_saturate_s_i64_f32:
14045 case WebAssembly::BI__builtin_wasm_trunc_saturate_s_i64_f64:
14046 case WebAssembly::BI__builtin_wasm_trunc_saturate_s_i32x4_f32x4:
14047 case WebAssembly::BI__builtin_wasm_trunc_saturate_s_i64x2_f64x2: {
14048 Value *Src = EmitScalarExpr(E->getArg(0));
14049 llvm::Type *ResT = ConvertType(E->getType());
14050 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_trunc_saturate_signed,
14051 {ResT, Src->getType()});
14052 return Builder.CreateCall(Callee, {Src});
14054 case WebAssembly::BI__builtin_wasm_trunc_saturate_u_i32_f32:
14055 case WebAssembly::BI__builtin_wasm_trunc_saturate_u_i32_f64:
14056 case WebAssembly::BI__builtin_wasm_trunc_saturate_u_i64_f32:
14057 case WebAssembly::BI__builtin_wasm_trunc_saturate_u_i64_f64:
14058 case WebAssembly::BI__builtin_wasm_trunc_saturate_u_i32x4_f32x4:
14059 case WebAssembly::BI__builtin_wasm_trunc_saturate_u_i64x2_f64x2: {
14060 Value *Src = EmitScalarExpr(E->getArg(0));
14061 llvm::Type *ResT = ConvertType(E->getType());
14062 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_trunc_saturate_unsigned,
14063 {ResT, Src->getType()});
14064 return Builder.CreateCall(Callee, {Src});
14066 case WebAssembly::BI__builtin_wasm_min_f32:
14067 case WebAssembly::BI__builtin_wasm_min_f64:
14068 case WebAssembly::BI__builtin_wasm_min_f32x4:
14069 case WebAssembly::BI__builtin_wasm_min_f64x2: {
14070 Value *LHS = EmitScalarExpr(E->getArg(0));
14071 Value *RHS = EmitScalarExpr(E->getArg(1));
14072 Function *Callee = CGM.getIntrinsic(Intrinsic::minimum,
14073 ConvertType(E->getType()));
14074 return Builder.CreateCall(Callee, {LHS, RHS});
14076 case WebAssembly::BI__builtin_wasm_max_f32:
14077 case WebAssembly::BI__builtin_wasm_max_f64:
14078 case WebAssembly::BI__builtin_wasm_max_f32x4:
14079 case WebAssembly::BI__builtin_wasm_max_f64x2: {
14080 Value *LHS = EmitScalarExpr(E->getArg(0));
14081 Value *RHS = EmitScalarExpr(E->getArg(1));
14082 Function *Callee = CGM.getIntrinsic(Intrinsic::maximum,
14083 ConvertType(E->getType()));
14084 return Builder.CreateCall(Callee, {LHS, RHS});
14086 case WebAssembly::BI__builtin_wasm_swizzle_v8x16: {
14087 Value *Src = EmitScalarExpr(E->getArg(0));
14088 Value *Indices = EmitScalarExpr(E->getArg(1));
14089 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_swizzle);
14090 return Builder.CreateCall(Callee, {Src, Indices});
14092 case WebAssembly::BI__builtin_wasm_extract_lane_s_i8x16:
14093 case WebAssembly::BI__builtin_wasm_extract_lane_u_i8x16:
14094 case WebAssembly::BI__builtin_wasm_extract_lane_s_i16x8:
14095 case WebAssembly::BI__builtin_wasm_extract_lane_u_i16x8:
14096 case WebAssembly::BI__builtin_wasm_extract_lane_i32x4:
14097 case WebAssembly::BI__builtin_wasm_extract_lane_i64x2:
14098 case WebAssembly::BI__builtin_wasm_extract_lane_f32x4:
14099 case WebAssembly::BI__builtin_wasm_extract_lane_f64x2: {
14100 llvm::APSInt LaneConst;
14101 if (!E->getArg(1)->isIntegerConstantExpr(LaneConst, getContext()))
14102 llvm_unreachable("Constant arg isn't actually constant?");
14103 Value *Vec = EmitScalarExpr(E->getArg(0));
14104 Value *Lane = llvm::ConstantInt::get(getLLVMContext(), LaneConst);
14105 Value *Extract = Builder.CreateExtractElement(Vec, Lane);
14106 switch (BuiltinID) {
14107 case WebAssembly::BI__builtin_wasm_extract_lane_s_i8x16:
14108 case WebAssembly::BI__builtin_wasm_extract_lane_s_i16x8:
14109 return Builder.CreateSExt(Extract, ConvertType(E->getType()));
14110 case WebAssembly::BI__builtin_wasm_extract_lane_u_i8x16:
14111 case WebAssembly::BI__builtin_wasm_extract_lane_u_i16x8:
14112 return Builder.CreateZExt(Extract, ConvertType(E->getType()));
14113 case WebAssembly::BI__builtin_wasm_extract_lane_i32x4:
14114 case WebAssembly::BI__builtin_wasm_extract_lane_i64x2:
14115 case WebAssembly::BI__builtin_wasm_extract_lane_f32x4:
14116 case WebAssembly::BI__builtin_wasm_extract_lane_f64x2:
14119 llvm_unreachable("unexpected builtin ID");
14122 case WebAssembly::BI__builtin_wasm_replace_lane_i8x16:
14123 case WebAssembly::BI__builtin_wasm_replace_lane_i16x8:
14124 case WebAssembly::BI__builtin_wasm_replace_lane_i32x4:
14125 case WebAssembly::BI__builtin_wasm_replace_lane_i64x2:
14126 case WebAssembly::BI__builtin_wasm_replace_lane_f32x4:
14127 case WebAssembly::BI__builtin_wasm_replace_lane_f64x2: {
14128 llvm::APSInt LaneConst;
14129 if (!E->getArg(1)->isIntegerConstantExpr(LaneConst, getContext()))
14130 llvm_unreachable("Constant arg isn't actually constant?");
14131 Value *Vec = EmitScalarExpr(E->getArg(0));
14132 Value *Lane = llvm::ConstantInt::get(getLLVMContext(), LaneConst);
14133 Value *Val = EmitScalarExpr(E->getArg(2));
14134 switch (BuiltinID) {
14135 case WebAssembly::BI__builtin_wasm_replace_lane_i8x16:
14136 case WebAssembly::BI__builtin_wasm_replace_lane_i16x8: {
14137 llvm::Type *ElemType = ConvertType(E->getType())->getVectorElementType();
14138 Value *Trunc = Builder.CreateTrunc(Val, ElemType);
14139 return Builder.CreateInsertElement(Vec, Trunc, Lane);
14141 case WebAssembly::BI__builtin_wasm_replace_lane_i32x4:
14142 case WebAssembly::BI__builtin_wasm_replace_lane_i64x2:
14143 case WebAssembly::BI__builtin_wasm_replace_lane_f32x4:
14144 case WebAssembly::BI__builtin_wasm_replace_lane_f64x2:
14145 return Builder.CreateInsertElement(Vec, Val, Lane);
14147 llvm_unreachable("unexpected builtin ID");
14150 case WebAssembly::BI__builtin_wasm_add_saturate_s_i8x16:
14151 case WebAssembly::BI__builtin_wasm_add_saturate_u_i8x16:
14152 case WebAssembly::BI__builtin_wasm_add_saturate_s_i16x8:
14153 case WebAssembly::BI__builtin_wasm_add_saturate_u_i16x8:
14154 case WebAssembly::BI__builtin_wasm_sub_saturate_s_i8x16:
14155 case WebAssembly::BI__builtin_wasm_sub_saturate_u_i8x16:
14156 case WebAssembly::BI__builtin_wasm_sub_saturate_s_i16x8:
14157 case WebAssembly::BI__builtin_wasm_sub_saturate_u_i16x8: {
14159 switch (BuiltinID) {
14160 case WebAssembly::BI__builtin_wasm_add_saturate_s_i8x16:
14161 case WebAssembly::BI__builtin_wasm_add_saturate_s_i16x8:
14162 IntNo = Intrinsic::sadd_sat;
14164 case WebAssembly::BI__builtin_wasm_add_saturate_u_i8x16:
14165 case WebAssembly::BI__builtin_wasm_add_saturate_u_i16x8:
14166 IntNo = Intrinsic::uadd_sat;
14168 case WebAssembly::BI__builtin_wasm_sub_saturate_s_i8x16:
14169 case WebAssembly::BI__builtin_wasm_sub_saturate_s_i16x8:
14170 IntNo = Intrinsic::wasm_sub_saturate_signed;
14172 case WebAssembly::BI__builtin_wasm_sub_saturate_u_i8x16:
14173 case WebAssembly::BI__builtin_wasm_sub_saturate_u_i16x8:
14174 IntNo = Intrinsic::wasm_sub_saturate_unsigned;
14177 llvm_unreachable("unexpected builtin ID");
14179 Value *LHS = EmitScalarExpr(E->getArg(0));
14180 Value *RHS = EmitScalarExpr(E->getArg(1));
14181 Function *Callee = CGM.getIntrinsic(IntNo, ConvertType(E->getType()));
14182 return Builder.CreateCall(Callee, {LHS, RHS});
14184 case WebAssembly::BI__builtin_wasm_bitselect: {
14185 Value *V1 = EmitScalarExpr(E->getArg(0));
14186 Value *V2 = EmitScalarExpr(E->getArg(1));
14187 Value *C = EmitScalarExpr(E->getArg(2));
14188 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_bitselect,
14189 ConvertType(E->getType()));
14190 return Builder.CreateCall(Callee, {V1, V2, C});
14192 case WebAssembly::BI__builtin_wasm_any_true_i8x16:
14193 case WebAssembly::BI__builtin_wasm_any_true_i16x8:
14194 case WebAssembly::BI__builtin_wasm_any_true_i32x4:
14195 case WebAssembly::BI__builtin_wasm_any_true_i64x2:
14196 case WebAssembly::BI__builtin_wasm_all_true_i8x16:
14197 case WebAssembly::BI__builtin_wasm_all_true_i16x8:
14198 case WebAssembly::BI__builtin_wasm_all_true_i32x4:
14199 case WebAssembly::BI__builtin_wasm_all_true_i64x2: {
14201 switch (BuiltinID) {
14202 case WebAssembly::BI__builtin_wasm_any_true_i8x16:
14203 case WebAssembly::BI__builtin_wasm_any_true_i16x8:
14204 case WebAssembly::BI__builtin_wasm_any_true_i32x4:
14205 case WebAssembly::BI__builtin_wasm_any_true_i64x2:
14206 IntNo = Intrinsic::wasm_anytrue;
14208 case WebAssembly::BI__builtin_wasm_all_true_i8x16:
14209 case WebAssembly::BI__builtin_wasm_all_true_i16x8:
14210 case WebAssembly::BI__builtin_wasm_all_true_i32x4:
14211 case WebAssembly::BI__builtin_wasm_all_true_i64x2:
14212 IntNo = Intrinsic::wasm_alltrue;
14215 llvm_unreachable("unexpected builtin ID");
14217 Value *Vec = EmitScalarExpr(E->getArg(0));
14218 Function *Callee = CGM.getIntrinsic(IntNo, Vec->getType());
14219 return Builder.CreateCall(Callee, {Vec});
14221 case WebAssembly::BI__builtin_wasm_abs_f32x4:
14222 case WebAssembly::BI__builtin_wasm_abs_f64x2: {
14223 Value *Vec = EmitScalarExpr(E->getArg(0));
14224 Function *Callee = CGM.getIntrinsic(Intrinsic::fabs, Vec->getType());
14225 return Builder.CreateCall(Callee, {Vec});
14227 case WebAssembly::BI__builtin_wasm_sqrt_f32x4:
14228 case WebAssembly::BI__builtin_wasm_sqrt_f64x2: {
14229 Value *Vec = EmitScalarExpr(E->getArg(0));
14230 Function *Callee = CGM.getIntrinsic(Intrinsic::sqrt, Vec->getType());
14231 return Builder.CreateCall(Callee, {Vec});
14233 case WebAssembly::BI__builtin_wasm_qfma_f32x4:
14234 case WebAssembly::BI__builtin_wasm_qfms_f32x4:
14235 case WebAssembly::BI__builtin_wasm_qfma_f64x2:
14236 case WebAssembly::BI__builtin_wasm_qfms_f64x2: {
14237 Value *A = EmitScalarExpr(E->getArg(0));
14238 Value *B = EmitScalarExpr(E->getArg(1));
14239 Value *C = EmitScalarExpr(E->getArg(2));
14241 switch (BuiltinID) {
14242 case WebAssembly::BI__builtin_wasm_qfma_f32x4:
14243 case WebAssembly::BI__builtin_wasm_qfma_f64x2:
14244 IntNo = Intrinsic::wasm_qfma;
14246 case WebAssembly::BI__builtin_wasm_qfms_f32x4:
14247 case WebAssembly::BI__builtin_wasm_qfms_f64x2:
14248 IntNo = Intrinsic::wasm_qfms;
14251 llvm_unreachable("unexpected builtin ID");
14253 Function *Callee = CGM.getIntrinsic(IntNo, A->getType());
14254 return Builder.CreateCall(Callee, {A, B, C});
14256 case WebAssembly::BI__builtin_wasm_narrow_s_i8x16_i16x8:
14257 case WebAssembly::BI__builtin_wasm_narrow_u_i8x16_i16x8:
14258 case WebAssembly::BI__builtin_wasm_narrow_s_i16x8_i32x4:
14259 case WebAssembly::BI__builtin_wasm_narrow_u_i16x8_i32x4: {
14260 Value *Low = EmitScalarExpr(E->getArg(0));
14261 Value *High = EmitScalarExpr(E->getArg(1));
14263 switch (BuiltinID) {
14264 case WebAssembly::BI__builtin_wasm_narrow_s_i8x16_i16x8:
14265 case WebAssembly::BI__builtin_wasm_narrow_s_i16x8_i32x4:
14266 IntNo = Intrinsic::wasm_narrow_signed;
14268 case WebAssembly::BI__builtin_wasm_narrow_u_i8x16_i16x8:
14269 case WebAssembly::BI__builtin_wasm_narrow_u_i16x8_i32x4:
14270 IntNo = Intrinsic::wasm_narrow_unsigned;
14273 llvm_unreachable("unexpected builtin ID");
14276 CGM.getIntrinsic(IntNo, {ConvertType(E->getType()), Low->getType()});
14277 return Builder.CreateCall(Callee, {Low, High});
14279 case WebAssembly::BI__builtin_wasm_widen_low_s_i16x8_i8x16:
14280 case WebAssembly::BI__builtin_wasm_widen_high_s_i16x8_i8x16:
14281 case WebAssembly::BI__builtin_wasm_widen_low_u_i16x8_i8x16:
14282 case WebAssembly::BI__builtin_wasm_widen_high_u_i16x8_i8x16:
14283 case WebAssembly::BI__builtin_wasm_widen_low_s_i32x4_i16x8:
14284 case WebAssembly::BI__builtin_wasm_widen_high_s_i32x4_i16x8:
14285 case WebAssembly::BI__builtin_wasm_widen_low_u_i32x4_i16x8:
14286 case WebAssembly::BI__builtin_wasm_widen_high_u_i32x4_i16x8: {
14287 Value *Vec = EmitScalarExpr(E->getArg(0));
14289 switch (BuiltinID) {
14290 case WebAssembly::BI__builtin_wasm_widen_low_s_i16x8_i8x16:
14291 case WebAssembly::BI__builtin_wasm_widen_low_s_i32x4_i16x8:
14292 IntNo = Intrinsic::wasm_widen_low_signed;
14294 case WebAssembly::BI__builtin_wasm_widen_high_s_i16x8_i8x16:
14295 case WebAssembly::BI__builtin_wasm_widen_high_s_i32x4_i16x8:
14296 IntNo = Intrinsic::wasm_widen_high_signed;
14298 case WebAssembly::BI__builtin_wasm_widen_low_u_i16x8_i8x16:
14299 case WebAssembly::BI__builtin_wasm_widen_low_u_i32x4_i16x8:
14300 IntNo = Intrinsic::wasm_widen_low_unsigned;
14302 case WebAssembly::BI__builtin_wasm_widen_high_u_i16x8_i8x16:
14303 case WebAssembly::BI__builtin_wasm_widen_high_u_i32x4_i16x8:
14304 IntNo = Intrinsic::wasm_widen_high_unsigned;
14307 llvm_unreachable("unexpected builtin ID");
14310 CGM.getIntrinsic(IntNo, {ConvertType(E->getType()), Vec->getType()});
14311 return Builder.CreateCall(Callee, Vec);
14318 Value *CodeGenFunction::EmitHexagonBuiltinExpr(unsigned BuiltinID,
14319 const CallExpr *E) {
14320 SmallVector<llvm::Value *, 4> Ops;
14321 Intrinsic::ID ID = Intrinsic::not_intrinsic;
14323 auto MakeCircLd = [&](unsigned IntID, bool HasImm) {
14324 // The base pointer is passed by address, so it needs to be loaded.
14325 Address BP = EmitPointerWithAlignment(E->getArg(0));
14326 BP = Address(Builder.CreateBitCast(BP.getPointer(), Int8PtrPtrTy),
14327 BP.getAlignment());
14328 llvm::Value *Base = Builder.CreateLoad(BP);
14329 // Operands are Base, Increment, Modifier, Start.
14331 Ops = { Base, EmitScalarExpr(E->getArg(1)), EmitScalarExpr(E->getArg(2)),
14332 EmitScalarExpr(E->getArg(3)) };
14334 Ops = { Base, EmitScalarExpr(E->getArg(1)),
14335 EmitScalarExpr(E->getArg(2)) };
14337 llvm::Value *Result = Builder.CreateCall(CGM.getIntrinsic(IntID), Ops);
14338 llvm::Value *NewBase = Builder.CreateExtractValue(Result, 1);
14339 llvm::Value *LV = Builder.CreateBitCast(EmitScalarExpr(E->getArg(0)),
14340 NewBase->getType()->getPointerTo());
14341 Address Dest = EmitPointerWithAlignment(E->getArg(0));
14342 // The intrinsic generates two results. The new value for the base pointer
14343 // needs to be stored.
14344 Builder.CreateAlignedStore(NewBase, LV, Dest.getAlignment());
14345 return Builder.CreateExtractValue(Result, 0);
14348 auto MakeCircSt = [&](unsigned IntID, bool HasImm) {
14349 // The base pointer is passed by address, so it needs to be loaded.
14350 Address BP = EmitPointerWithAlignment(E->getArg(0));
14351 BP = Address(Builder.CreateBitCast(BP.getPointer(), Int8PtrPtrTy),
14352 BP.getAlignment());
14353 llvm::Value *Base = Builder.CreateLoad(BP);
14354 // Operands are Base, Increment, Modifier, Value, Start.
14356 Ops = { Base, EmitScalarExpr(E->getArg(1)), EmitScalarExpr(E->getArg(2)),
14357 EmitScalarExpr(E->getArg(3)), EmitScalarExpr(E->getArg(4)) };
14359 Ops = { Base, EmitScalarExpr(E->getArg(1)),
14360 EmitScalarExpr(E->getArg(2)), EmitScalarExpr(E->getArg(3)) };
14362 llvm::Value *NewBase = Builder.CreateCall(CGM.getIntrinsic(IntID), Ops);
14363 llvm::Value *LV = Builder.CreateBitCast(EmitScalarExpr(E->getArg(0)),
14364 NewBase->getType()->getPointerTo());
14365 Address Dest = EmitPointerWithAlignment(E->getArg(0));
14366 // The intrinsic generates one result, which is the new value for the base
14367 // pointer. It needs to be stored.
14368 return Builder.CreateAlignedStore(NewBase, LV, Dest.getAlignment());
14371 // Handle the conversion of bit-reverse load intrinsics to bit code.
14372 // The intrinsic call after this function only reads from memory and the
14373 // write to memory is dealt by the store instruction.
14374 auto MakeBrevLd = [&](unsigned IntID, llvm::Type *DestTy) {
14375 // The intrinsic generates one result, which is the new value for the base
14376 // pointer. It needs to be returned. The result of the load instruction is
14377 // passed to intrinsic by address, so the value needs to be stored.
14378 llvm::Value *BaseAddress =
14379 Builder.CreateBitCast(EmitScalarExpr(E->getArg(0)), Int8PtrTy);
14381 // Expressions like &(*pt++) will be incremented per evaluation.
14382 // EmitPointerWithAlignment and EmitScalarExpr evaluates the expression
14384 Address DestAddr = EmitPointerWithAlignment(E->getArg(1));
14385 DestAddr = Address(Builder.CreateBitCast(DestAddr.getPointer(), Int8PtrTy),
14386 DestAddr.getAlignment());
14387 llvm::Value *DestAddress = DestAddr.getPointer();
14389 // Operands are Base, Dest, Modifier.
14390 // The intrinsic format in LLVM IR is defined as
14391 // { ValueType, i8* } (i8*, i32).
14392 Ops = {BaseAddress, EmitScalarExpr(E->getArg(2))};
14394 llvm::Value *Result = Builder.CreateCall(CGM.getIntrinsic(IntID), Ops);
14395 // The value needs to be stored as the variable is passed by reference.
14396 llvm::Value *DestVal = Builder.CreateExtractValue(Result, 0);
14398 // The store needs to be truncated to fit the destination type.
14399 // While i32 and i64 are natively supported on Hexagon, i8 and i16 needs
14400 // to be handled with stores of respective destination type.
14401 DestVal = Builder.CreateTrunc(DestVal, DestTy);
14403 llvm::Value *DestForStore =
14404 Builder.CreateBitCast(DestAddress, DestVal->getType()->getPointerTo());
14405 Builder.CreateAlignedStore(DestVal, DestForStore, DestAddr.getAlignment());
14406 // The updated value of the base pointer is returned.
14407 return Builder.CreateExtractValue(Result, 1);
14410 switch (BuiltinID) {
14411 case Hexagon::BI__builtin_HEXAGON_V6_vaddcarry:
14412 case Hexagon::BI__builtin_HEXAGON_V6_vaddcarry_128B: {
14413 Address Dest = EmitPointerWithAlignment(E->getArg(2));
14415 if (BuiltinID == Hexagon::BI__builtin_HEXAGON_V6_vaddcarry) {
14417 ID = Intrinsic::hexagon_V6_vaddcarry;
14420 ID = Intrinsic::hexagon_V6_vaddcarry_128B;
14422 Dest = Builder.CreateBitCast(Dest,
14423 llvm::VectorType::get(Builder.getInt1Ty(), Size)->getPointerTo(0));
14424 LoadInst *QLd = Builder.CreateLoad(Dest);
14425 Ops = { EmitScalarExpr(E->getArg(0)), EmitScalarExpr(E->getArg(1)), QLd };
14426 llvm::Value *Result = Builder.CreateCall(CGM.getIntrinsic(ID), Ops);
14427 llvm::Value *Vprd = Builder.CreateExtractValue(Result, 1);
14428 llvm::Value *Base = Builder.CreateBitCast(EmitScalarExpr(E->getArg(2)),
14429 Vprd->getType()->getPointerTo(0));
14430 Builder.CreateAlignedStore(Vprd, Base, Dest.getAlignment());
14431 return Builder.CreateExtractValue(Result, 0);
14433 case Hexagon::BI__builtin_HEXAGON_V6_vsubcarry:
14434 case Hexagon::BI__builtin_HEXAGON_V6_vsubcarry_128B: {
14435 Address Dest = EmitPointerWithAlignment(E->getArg(2));
14437 if (BuiltinID == Hexagon::BI__builtin_HEXAGON_V6_vsubcarry) {
14439 ID = Intrinsic::hexagon_V6_vsubcarry;
14442 ID = Intrinsic::hexagon_V6_vsubcarry_128B;
14444 Dest = Builder.CreateBitCast(Dest,
14445 llvm::VectorType::get(Builder.getInt1Ty(), Size)->getPointerTo(0));
14446 LoadInst *QLd = Builder.CreateLoad(Dest);
14447 Ops = { EmitScalarExpr(E->getArg(0)), EmitScalarExpr(E->getArg(1)), QLd };
14448 llvm::Value *Result = Builder.CreateCall(CGM.getIntrinsic(ID), Ops);
14449 llvm::Value *Vprd = Builder.CreateExtractValue(Result, 1);
14450 llvm::Value *Base = Builder.CreateBitCast(EmitScalarExpr(E->getArg(2)),
14451 Vprd->getType()->getPointerTo(0));
14452 Builder.CreateAlignedStore(Vprd, Base, Dest.getAlignment());
14453 return Builder.CreateExtractValue(Result, 0);
14455 case Hexagon::BI__builtin_HEXAGON_L2_loadrub_pci:
14456 return MakeCircLd(Intrinsic::hexagon_L2_loadrub_pci, /*HasImm*/true);
14457 case Hexagon::BI__builtin_HEXAGON_L2_loadrb_pci:
14458 return MakeCircLd(Intrinsic::hexagon_L2_loadrb_pci, /*HasImm*/true);
14459 case Hexagon::BI__builtin_HEXAGON_L2_loadruh_pci:
14460 return MakeCircLd(Intrinsic::hexagon_L2_loadruh_pci, /*HasImm*/true);
14461 case Hexagon::BI__builtin_HEXAGON_L2_loadrh_pci:
14462 return MakeCircLd(Intrinsic::hexagon_L2_loadrh_pci, /*HasImm*/true);
14463 case Hexagon::BI__builtin_HEXAGON_L2_loadri_pci:
14464 return MakeCircLd(Intrinsic::hexagon_L2_loadri_pci, /*HasImm*/true);
14465 case Hexagon::BI__builtin_HEXAGON_L2_loadrd_pci:
14466 return MakeCircLd(Intrinsic::hexagon_L2_loadrd_pci, /*HasImm*/true);
14467 case Hexagon::BI__builtin_HEXAGON_L2_loadrub_pcr:
14468 return MakeCircLd(Intrinsic::hexagon_L2_loadrub_pcr, /*HasImm*/false);
14469 case Hexagon::BI__builtin_HEXAGON_L2_loadrb_pcr:
14470 return MakeCircLd(Intrinsic::hexagon_L2_loadrb_pcr, /*HasImm*/false);
14471 case Hexagon::BI__builtin_HEXAGON_L2_loadruh_pcr:
14472 return MakeCircLd(Intrinsic::hexagon_L2_loadruh_pcr, /*HasImm*/false);
14473 case Hexagon::BI__builtin_HEXAGON_L2_loadrh_pcr:
14474 return MakeCircLd(Intrinsic::hexagon_L2_loadrh_pcr, /*HasImm*/false);
14475 case Hexagon::BI__builtin_HEXAGON_L2_loadri_pcr:
14476 return MakeCircLd(Intrinsic::hexagon_L2_loadri_pcr, /*HasImm*/false);
14477 case Hexagon::BI__builtin_HEXAGON_L2_loadrd_pcr:
14478 return MakeCircLd(Intrinsic::hexagon_L2_loadrd_pcr, /*HasImm*/false);
14479 case Hexagon::BI__builtin_HEXAGON_S2_storerb_pci:
14480 return MakeCircSt(Intrinsic::hexagon_S2_storerb_pci, /*HasImm*/true);
14481 case Hexagon::BI__builtin_HEXAGON_S2_storerh_pci:
14482 return MakeCircSt(Intrinsic::hexagon_S2_storerh_pci, /*HasImm*/true);
14483 case Hexagon::BI__builtin_HEXAGON_S2_storerf_pci:
14484 return MakeCircSt(Intrinsic::hexagon_S2_storerf_pci, /*HasImm*/true);
14485 case Hexagon::BI__builtin_HEXAGON_S2_storeri_pci:
14486 return MakeCircSt(Intrinsic::hexagon_S2_storeri_pci, /*HasImm*/true);
14487 case Hexagon::BI__builtin_HEXAGON_S2_storerd_pci:
14488 return MakeCircSt(Intrinsic::hexagon_S2_storerd_pci, /*HasImm*/true);
14489 case Hexagon::BI__builtin_HEXAGON_S2_storerb_pcr:
14490 return MakeCircSt(Intrinsic::hexagon_S2_storerb_pcr, /*HasImm*/false);
14491 case Hexagon::BI__builtin_HEXAGON_S2_storerh_pcr:
14492 return MakeCircSt(Intrinsic::hexagon_S2_storerh_pcr, /*HasImm*/false);
14493 case Hexagon::BI__builtin_HEXAGON_S2_storerf_pcr:
14494 return MakeCircSt(Intrinsic::hexagon_S2_storerf_pcr, /*HasImm*/false);
14495 case Hexagon::BI__builtin_HEXAGON_S2_storeri_pcr:
14496 return MakeCircSt(Intrinsic::hexagon_S2_storeri_pcr, /*HasImm*/false);
14497 case Hexagon::BI__builtin_HEXAGON_S2_storerd_pcr:
14498 return MakeCircSt(Intrinsic::hexagon_S2_storerd_pcr, /*HasImm*/false);
14499 case Hexagon::BI__builtin_brev_ldub:
14500 return MakeBrevLd(Intrinsic::hexagon_L2_loadrub_pbr, Int8Ty);
14501 case Hexagon::BI__builtin_brev_ldb:
14502 return MakeBrevLd(Intrinsic::hexagon_L2_loadrb_pbr, Int8Ty);
14503 case Hexagon::BI__builtin_brev_lduh:
14504 return MakeBrevLd(Intrinsic::hexagon_L2_loadruh_pbr, Int16Ty);
14505 case Hexagon::BI__builtin_brev_ldh:
14506 return MakeBrevLd(Intrinsic::hexagon_L2_loadrh_pbr, Int16Ty);
14507 case Hexagon::BI__builtin_brev_ldw:
14508 return MakeBrevLd(Intrinsic::hexagon_L2_loadri_pbr, Int32Ty);
14509 case Hexagon::BI__builtin_brev_ldd:
14510 return MakeBrevLd(Intrinsic::hexagon_L2_loadrd_pbr, Int64Ty);