1 //===---- CGBuiltin.cpp - Emit LLVM Code for builtins ---------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This contains code to emit Builtin calls as LLVM code.
12 //===----------------------------------------------------------------------===//
14 #include "CodeGenFunction.h"
16 #include "CGObjCRuntime.h"
17 #include "CodeGenModule.h"
18 #include "TargetInfo.h"
19 #include "clang/AST/ASTContext.h"
20 #include "clang/AST/Decl.h"
21 #include "clang/Basic/TargetBuiltins.h"
22 #include "clang/Basic/TargetInfo.h"
23 #include "clang/CodeGen/CGFunctionInfo.h"
24 #include "llvm/ADT/StringExtras.h"
25 #include "llvm/IR/CallSite.h"
26 #include "llvm/IR/DataLayout.h"
27 #include "llvm/IR/InlineAsm.h"
28 #include "llvm/IR/Intrinsics.h"
31 using namespace clang;
32 using namespace CodeGen;
35 /// getBuiltinLibFunction - Given a builtin id for a function like
36 /// "__builtin_fabsf", return a Function* for "fabsf".
37 llvm::Value *CodeGenModule::getBuiltinLibFunction(const FunctionDecl *FD,
39 assert(Context.BuiltinInfo.isLibFunction(BuiltinID));
41 // Get the name, skip over the __builtin_ prefix (if necessary).
45 // If the builtin has been declared explicitly with an assembler label,
46 // use the mangled name. This differs from the plain label on platforms
47 // that prefix labels.
48 if (FD->hasAttr<AsmLabelAttr>())
49 Name = getMangledName(D);
51 Name = Context.BuiltinInfo.GetName(BuiltinID) + 10;
53 llvm::FunctionType *Ty =
54 cast<llvm::FunctionType>(getTypes().ConvertType(FD->getType()));
56 return GetOrCreateLLVMFunction(Name, Ty, D, /*ForVTable=*/false);
59 /// Emit the conversions required to turn the given value into an
60 /// integer of the given size.
61 static Value *EmitToInt(CodeGenFunction &CGF, llvm::Value *V,
62 QualType T, llvm::IntegerType *IntType) {
63 V = CGF.EmitToMemory(V, T);
65 if (V->getType()->isPointerTy())
66 return CGF.Builder.CreatePtrToInt(V, IntType);
68 assert(V->getType() == IntType);
72 static Value *EmitFromInt(CodeGenFunction &CGF, llvm::Value *V,
73 QualType T, llvm::Type *ResultType) {
74 V = CGF.EmitFromMemory(V, T);
76 if (ResultType->isPointerTy())
77 return CGF.Builder.CreateIntToPtr(V, ResultType);
79 assert(V->getType() == ResultType);
83 /// Utility to insert an atomic instruction based on Instrinsic::ID
84 /// and the expression node.
85 static Value *MakeBinaryAtomicValue(CodeGenFunction &CGF,
86 llvm::AtomicRMWInst::BinOp Kind,
88 QualType T = E->getType();
89 assert(E->getArg(0)->getType()->isPointerType());
90 assert(CGF.getContext().hasSameUnqualifiedType(T,
91 E->getArg(0)->getType()->getPointeeType()));
92 assert(CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->getType()));
94 llvm::Value *DestPtr = CGF.EmitScalarExpr(E->getArg(0));
95 unsigned AddrSpace = DestPtr->getType()->getPointerAddressSpace();
97 llvm::IntegerType *IntType =
98 llvm::IntegerType::get(CGF.getLLVMContext(),
99 CGF.getContext().getTypeSize(T));
100 llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace);
102 llvm::Value *Args[2];
103 Args[0] = CGF.Builder.CreateBitCast(DestPtr, IntPtrType);
104 Args[1] = CGF.EmitScalarExpr(E->getArg(1));
105 llvm::Type *ValueType = Args[1]->getType();
106 Args[1] = EmitToInt(CGF, Args[1], T, IntType);
108 llvm::Value *Result =
109 CGF.Builder.CreateAtomicRMW(Kind, Args[0], Args[1],
110 llvm::SequentiallyConsistent);
111 return EmitFromInt(CGF, Result, T, ValueType);
114 static RValue EmitBinaryAtomic(CodeGenFunction &CGF,
115 llvm::AtomicRMWInst::BinOp Kind,
117 return RValue::get(MakeBinaryAtomicValue(CGF, Kind, E));
120 /// Utility to insert an atomic instruction based Instrinsic::ID and
121 /// the expression node, where the return value is the result of the
123 static RValue EmitBinaryAtomicPost(CodeGenFunction &CGF,
124 llvm::AtomicRMWInst::BinOp Kind,
126 Instruction::BinaryOps Op,
127 bool Invert = false) {
128 QualType T = E->getType();
129 assert(E->getArg(0)->getType()->isPointerType());
130 assert(CGF.getContext().hasSameUnqualifiedType(T,
131 E->getArg(0)->getType()->getPointeeType()));
132 assert(CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->getType()));
134 llvm::Value *DestPtr = CGF.EmitScalarExpr(E->getArg(0));
135 unsigned AddrSpace = DestPtr->getType()->getPointerAddressSpace();
137 llvm::IntegerType *IntType =
138 llvm::IntegerType::get(CGF.getLLVMContext(),
139 CGF.getContext().getTypeSize(T));
140 llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace);
142 llvm::Value *Args[2];
143 Args[1] = CGF.EmitScalarExpr(E->getArg(1));
144 llvm::Type *ValueType = Args[1]->getType();
145 Args[1] = EmitToInt(CGF, Args[1], T, IntType);
146 Args[0] = CGF.Builder.CreateBitCast(DestPtr, IntPtrType);
148 llvm::Value *Result =
149 CGF.Builder.CreateAtomicRMW(Kind, Args[0], Args[1],
150 llvm::SequentiallyConsistent);
151 Result = CGF.Builder.CreateBinOp(Op, Result, Args[1]);
153 Result = CGF.Builder.CreateBinOp(llvm::Instruction::Xor, Result,
154 llvm::ConstantInt::get(IntType, -1));
155 Result = EmitFromInt(CGF, Result, T, ValueType);
156 return RValue::get(Result);
159 /// @brief Utility to insert an atomic cmpxchg instruction.
161 /// @param CGF The current codegen function.
162 /// @param E Builtin call expression to convert to cmpxchg.
163 /// arg0 - address to operate on
164 /// arg1 - value to compare with
166 /// @param ReturnBool Specifies whether to return success flag of
167 /// cmpxchg result or the old value.
169 /// @returns result of cmpxchg, according to ReturnBool
170 static Value *MakeAtomicCmpXchgValue(CodeGenFunction &CGF, const CallExpr *E,
172 QualType T = ReturnBool ? E->getArg(1)->getType() : E->getType();
173 llvm::Value *DestPtr = CGF.EmitScalarExpr(E->getArg(0));
174 unsigned AddrSpace = DestPtr->getType()->getPointerAddressSpace();
176 llvm::IntegerType *IntType = llvm::IntegerType::get(
177 CGF.getLLVMContext(), CGF.getContext().getTypeSize(T));
178 llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace);
181 Args[0] = CGF.Builder.CreateBitCast(DestPtr, IntPtrType);
182 Args[1] = CGF.EmitScalarExpr(E->getArg(1));
183 llvm::Type *ValueType = Args[1]->getType();
184 Args[1] = EmitToInt(CGF, Args[1], T, IntType);
185 Args[2] = EmitToInt(CGF, CGF.EmitScalarExpr(E->getArg(2)), T, IntType);
187 Value *Pair = CGF.Builder.CreateAtomicCmpXchg(Args[0], Args[1], Args[2],
188 llvm::SequentiallyConsistent,
189 llvm::SequentiallyConsistent);
191 // Extract boolean success flag and zext it to int.
192 return CGF.Builder.CreateZExt(CGF.Builder.CreateExtractValue(Pair, 1),
193 CGF.ConvertType(E->getType()));
195 // Extract old value and emit it using the same type as compare value.
196 return EmitFromInt(CGF, CGF.Builder.CreateExtractValue(Pair, 0), T,
200 /// EmitFAbs - Emit a call to @llvm.fabs().
201 static Value *EmitFAbs(CodeGenFunction &CGF, Value *V) {
202 Value *F = CGF.CGM.getIntrinsic(Intrinsic::fabs, V->getType());
203 llvm::CallInst *Call = CGF.Builder.CreateCall(F, V);
204 Call->setDoesNotAccessMemory();
208 /// Emit the computation of the sign bit for a floating point value. Returns
209 /// the i1 sign bit value.
210 static Value *EmitSignBit(CodeGenFunction &CGF, Value *V) {
211 LLVMContext &C = CGF.CGM.getLLVMContext();
213 llvm::Type *Ty = V->getType();
214 int Width = Ty->getPrimitiveSizeInBits();
215 llvm::Type *IntTy = llvm::IntegerType::get(C, Width);
216 V = CGF.Builder.CreateBitCast(V, IntTy);
217 if (Ty->isPPC_FP128Ty()) {
218 // The higher-order double comes first, and so we need to truncate the
219 // pair to extract the overall sign. The order of the pair is the same
220 // in both little- and big-Endian modes.
222 IntTy = llvm::IntegerType::get(C, Width);
223 V = CGF.Builder.CreateTrunc(V, IntTy);
225 Value *Zero = llvm::Constant::getNullValue(IntTy);
226 return CGF.Builder.CreateICmpSLT(V, Zero);
229 static RValue emitLibraryCall(CodeGenFunction &CGF, const FunctionDecl *Fn,
230 const CallExpr *E, llvm::Value *calleeValue) {
231 return CGF.EmitCall(E->getCallee()->getType(), calleeValue, E,
232 ReturnValueSlot(), Fn);
235 /// \brief Emit a call to llvm.{sadd,uadd,ssub,usub,smul,umul}.with.overflow.*
236 /// depending on IntrinsicID.
238 /// \arg CGF The current codegen function.
239 /// \arg IntrinsicID The ID for the Intrinsic we wish to generate.
240 /// \arg X The first argument to the llvm.*.with.overflow.*.
241 /// \arg Y The second argument to the llvm.*.with.overflow.*.
242 /// \arg Carry The carry returned by the llvm.*.with.overflow.*.
243 /// \returns The result (i.e. sum/product) returned by the intrinsic.
244 static llvm::Value *EmitOverflowIntrinsic(CodeGenFunction &CGF,
245 const llvm::Intrinsic::ID IntrinsicID,
246 llvm::Value *X, llvm::Value *Y,
247 llvm::Value *&Carry) {
248 // Make sure we have integers of the same width.
249 assert(X->getType() == Y->getType() &&
250 "Arguments must be the same type. (Did you forget to make sure both "
251 "arguments have the same integer width?)");
253 llvm::Value *Callee = CGF.CGM.getIntrinsic(IntrinsicID, X->getType());
254 llvm::Value *Tmp = CGF.Builder.CreateCall(Callee, {X, Y});
255 Carry = CGF.Builder.CreateExtractValue(Tmp, 1);
256 return CGF.Builder.CreateExtractValue(Tmp, 0);
259 RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
260 unsigned BuiltinID, const CallExpr *E,
261 ReturnValueSlot ReturnValue) {
262 // See if we can constant fold this builtin. If so, don't emit it at all.
263 Expr::EvalResult Result;
264 if (E->EvaluateAsRValue(Result, CGM.getContext()) &&
265 !Result.hasSideEffects()) {
266 if (Result.Val.isInt())
267 return RValue::get(llvm::ConstantInt::get(getLLVMContext(),
268 Result.Val.getInt()));
269 if (Result.Val.isFloat())
270 return RValue::get(llvm::ConstantFP::get(getLLVMContext(),
271 Result.Val.getFloat()));
275 default: break; // Handle intrinsics and libm functions below.
276 case Builtin::BI__builtin___CFStringMakeConstantString:
277 case Builtin::BI__builtin___NSStringMakeConstantString:
278 return RValue::get(CGM.EmitConstantExpr(E, E->getType(), nullptr));
279 case Builtin::BI__builtin_stdarg_start:
280 case Builtin::BI__builtin_va_start:
281 case Builtin::BI__va_start:
282 case Builtin::BI__builtin_va_end: {
283 Value *ArgValue = (BuiltinID == Builtin::BI__va_start)
284 ? EmitScalarExpr(E->getArg(0))
285 : EmitVAListRef(E->getArg(0));
286 llvm::Type *DestType = Int8PtrTy;
287 if (ArgValue->getType() != DestType)
288 ArgValue = Builder.CreateBitCast(ArgValue, DestType,
289 ArgValue->getName().data());
291 Intrinsic::ID inst = (BuiltinID == Builtin::BI__builtin_va_end) ?
292 Intrinsic::vaend : Intrinsic::vastart;
293 return RValue::get(Builder.CreateCall(CGM.getIntrinsic(inst), ArgValue));
295 case Builtin::BI__builtin_va_copy: {
296 Value *DstPtr = EmitVAListRef(E->getArg(0));
297 Value *SrcPtr = EmitVAListRef(E->getArg(1));
299 llvm::Type *Type = Int8PtrTy;
301 DstPtr = Builder.CreateBitCast(DstPtr, Type);
302 SrcPtr = Builder.CreateBitCast(SrcPtr, Type);
303 return RValue::get(Builder.CreateCall(CGM.getIntrinsic(Intrinsic::vacopy),
306 case Builtin::BI__builtin_abs:
307 case Builtin::BI__builtin_labs:
308 case Builtin::BI__builtin_llabs: {
309 Value *ArgValue = EmitScalarExpr(E->getArg(0));
311 Value *NegOp = Builder.CreateNeg(ArgValue, "neg");
313 Builder.CreateICmpSGE(ArgValue,
314 llvm::Constant::getNullValue(ArgValue->getType()),
317 Builder.CreateSelect(CmpResult, ArgValue, NegOp, "abs");
319 return RValue::get(Result);
321 case Builtin::BI__builtin_fabs:
322 case Builtin::BI__builtin_fabsf:
323 case Builtin::BI__builtin_fabsl: {
324 Value *Arg1 = EmitScalarExpr(E->getArg(0));
325 Value *Result = EmitFAbs(*this, Arg1);
326 return RValue::get(Result);
328 case Builtin::BI__builtin_fmod:
329 case Builtin::BI__builtin_fmodf:
330 case Builtin::BI__builtin_fmodl: {
331 Value *Arg1 = EmitScalarExpr(E->getArg(0));
332 Value *Arg2 = EmitScalarExpr(E->getArg(1));
333 Value *Result = Builder.CreateFRem(Arg1, Arg2, "fmod");
334 return RValue::get(Result);
337 case Builtin::BI__builtin_conj:
338 case Builtin::BI__builtin_conjf:
339 case Builtin::BI__builtin_conjl: {
340 ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0));
341 Value *Real = ComplexVal.first;
342 Value *Imag = ComplexVal.second;
344 Imag->getType()->isFPOrFPVectorTy()
345 ? llvm::ConstantFP::getZeroValueForNegation(Imag->getType())
346 : llvm::Constant::getNullValue(Imag->getType());
348 Imag = Builder.CreateFSub(Zero, Imag, "sub");
349 return RValue::getComplex(std::make_pair(Real, Imag));
351 case Builtin::BI__builtin_creal:
352 case Builtin::BI__builtin_crealf:
353 case Builtin::BI__builtin_creall:
354 case Builtin::BIcreal:
355 case Builtin::BIcrealf:
356 case Builtin::BIcreall: {
357 ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0));
358 return RValue::get(ComplexVal.first);
361 case Builtin::BI__builtin_cimag:
362 case Builtin::BI__builtin_cimagf:
363 case Builtin::BI__builtin_cimagl:
364 case Builtin::BIcimag:
365 case Builtin::BIcimagf:
366 case Builtin::BIcimagl: {
367 ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0));
368 return RValue::get(ComplexVal.second);
371 case Builtin::BI__builtin_ctzs:
372 case Builtin::BI__builtin_ctz:
373 case Builtin::BI__builtin_ctzl:
374 case Builtin::BI__builtin_ctzll: {
375 Value *ArgValue = EmitScalarExpr(E->getArg(0));
377 llvm::Type *ArgType = ArgValue->getType();
378 Value *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType);
380 llvm::Type *ResultType = ConvertType(E->getType());
381 Value *ZeroUndef = Builder.getInt1(getTarget().isCLZForZeroUndef());
382 Value *Result = Builder.CreateCall(F, {ArgValue, ZeroUndef});
383 if (Result->getType() != ResultType)
384 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
386 return RValue::get(Result);
388 case Builtin::BI__builtin_clzs:
389 case Builtin::BI__builtin_clz:
390 case Builtin::BI__builtin_clzl:
391 case Builtin::BI__builtin_clzll: {
392 Value *ArgValue = EmitScalarExpr(E->getArg(0));
394 llvm::Type *ArgType = ArgValue->getType();
395 Value *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType);
397 llvm::Type *ResultType = ConvertType(E->getType());
398 Value *ZeroUndef = Builder.getInt1(getTarget().isCLZForZeroUndef());
399 Value *Result = Builder.CreateCall(F, {ArgValue, ZeroUndef});
400 if (Result->getType() != ResultType)
401 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
403 return RValue::get(Result);
405 case Builtin::BI__builtin_ffs:
406 case Builtin::BI__builtin_ffsl:
407 case Builtin::BI__builtin_ffsll: {
408 // ffs(x) -> x ? cttz(x) + 1 : 0
409 Value *ArgValue = EmitScalarExpr(E->getArg(0));
411 llvm::Type *ArgType = ArgValue->getType();
412 Value *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType);
414 llvm::Type *ResultType = ConvertType(E->getType());
416 Builder.CreateAdd(Builder.CreateCall(F, {ArgValue, Builder.getTrue()}),
417 llvm::ConstantInt::get(ArgType, 1));
418 Value *Zero = llvm::Constant::getNullValue(ArgType);
419 Value *IsZero = Builder.CreateICmpEQ(ArgValue, Zero, "iszero");
420 Value *Result = Builder.CreateSelect(IsZero, Zero, Tmp, "ffs");
421 if (Result->getType() != ResultType)
422 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
424 return RValue::get(Result);
426 case Builtin::BI__builtin_parity:
427 case Builtin::BI__builtin_parityl:
428 case Builtin::BI__builtin_parityll: {
429 // parity(x) -> ctpop(x) & 1
430 Value *ArgValue = EmitScalarExpr(E->getArg(0));
432 llvm::Type *ArgType = ArgValue->getType();
433 Value *F = CGM.getIntrinsic(Intrinsic::ctpop, ArgType);
435 llvm::Type *ResultType = ConvertType(E->getType());
436 Value *Tmp = Builder.CreateCall(F, ArgValue);
437 Value *Result = Builder.CreateAnd(Tmp, llvm::ConstantInt::get(ArgType, 1));
438 if (Result->getType() != ResultType)
439 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
441 return RValue::get(Result);
443 case Builtin::BI__builtin_popcount:
444 case Builtin::BI__builtin_popcountl:
445 case Builtin::BI__builtin_popcountll: {
446 Value *ArgValue = EmitScalarExpr(E->getArg(0));
448 llvm::Type *ArgType = ArgValue->getType();
449 Value *F = CGM.getIntrinsic(Intrinsic::ctpop, ArgType);
451 llvm::Type *ResultType = ConvertType(E->getType());
452 Value *Result = Builder.CreateCall(F, ArgValue);
453 if (Result->getType() != ResultType)
454 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
456 return RValue::get(Result);
458 case Builtin::BI__builtin_expect: {
459 Value *ArgValue = EmitScalarExpr(E->getArg(0));
460 llvm::Type *ArgType = ArgValue->getType();
462 Value *ExpectedValue = EmitScalarExpr(E->getArg(1));
463 // Don't generate llvm.expect on -O0 as the backend won't use it for
465 // Note, we still IRGen ExpectedValue because it could have side-effects.
466 if (CGM.getCodeGenOpts().OptimizationLevel == 0)
467 return RValue::get(ArgValue);
469 Value *FnExpect = CGM.getIntrinsic(Intrinsic::expect, ArgType);
471 Builder.CreateCall(FnExpect, {ArgValue, ExpectedValue}, "expval");
472 return RValue::get(Result);
474 case Builtin::BI__builtin_assume_aligned: {
475 Value *PtrValue = EmitScalarExpr(E->getArg(0));
477 (E->getNumArgs() > 2) ? EmitScalarExpr(E->getArg(2)) : nullptr;
479 Value *AlignmentValue = EmitScalarExpr(E->getArg(1));
480 ConstantInt *AlignmentCI = cast<ConstantInt>(AlignmentValue);
481 unsigned Alignment = (unsigned) AlignmentCI->getZExtValue();
483 EmitAlignmentAssumption(PtrValue, Alignment, OffsetValue);
484 return RValue::get(PtrValue);
486 case Builtin::BI__assume:
487 case Builtin::BI__builtin_assume: {
488 if (E->getArg(0)->HasSideEffects(getContext()))
489 return RValue::get(nullptr);
491 Value *ArgValue = EmitScalarExpr(E->getArg(0));
492 Value *FnAssume = CGM.getIntrinsic(Intrinsic::assume);
493 return RValue::get(Builder.CreateCall(FnAssume, ArgValue));
495 case Builtin::BI__builtin_bswap16:
496 case Builtin::BI__builtin_bswap32:
497 case Builtin::BI__builtin_bswap64: {
498 Value *ArgValue = EmitScalarExpr(E->getArg(0));
499 llvm::Type *ArgType = ArgValue->getType();
500 Value *F = CGM.getIntrinsic(Intrinsic::bswap, ArgType);
501 return RValue::get(Builder.CreateCall(F, ArgValue));
503 case Builtin::BI__builtin_object_size: {
504 // We rely on constant folding to deal with expressions with side effects.
505 assert(!E->getArg(0)->HasSideEffects(getContext()) &&
506 "should have been constant folded");
508 // We pass this builtin onto the optimizer so that it can
509 // figure out the object size in more complex cases.
510 llvm::Type *ResType = ConvertType(E->getType());
512 // LLVM only supports 0 and 2, make sure that we pass along that
514 Value *Ty = EmitScalarExpr(E->getArg(1));
515 ConstantInt *CI = dyn_cast<ConstantInt>(Ty);
517 uint64_t val = CI->getZExtValue();
518 CI = ConstantInt::get(Builder.getInt1Ty(), (val & 0x2) >> 1);
519 // FIXME: Get right address space.
520 llvm::Type *Tys[] = { ResType, Builder.getInt8PtrTy(0) };
521 Value *F = CGM.getIntrinsic(Intrinsic::objectsize, Tys);
523 Builder.CreateCall(F, {EmitScalarExpr(E->getArg(0)), CI}));
525 case Builtin::BI__builtin_prefetch: {
526 Value *Locality, *RW, *Address = EmitScalarExpr(E->getArg(0));
527 // FIXME: Technically these constants should of type 'int', yes?
528 RW = (E->getNumArgs() > 1) ? EmitScalarExpr(E->getArg(1)) :
529 llvm::ConstantInt::get(Int32Ty, 0);
530 Locality = (E->getNumArgs() > 2) ? EmitScalarExpr(E->getArg(2)) :
531 llvm::ConstantInt::get(Int32Ty, 3);
532 Value *Data = llvm::ConstantInt::get(Int32Ty, 1);
533 Value *F = CGM.getIntrinsic(Intrinsic::prefetch);
534 return RValue::get(Builder.CreateCall(F, {Address, RW, Locality, Data}));
536 case Builtin::BI__builtin_readcyclecounter: {
537 Value *F = CGM.getIntrinsic(Intrinsic::readcyclecounter);
538 return RValue::get(Builder.CreateCall(F));
540 case Builtin::BI__builtin___clear_cache: {
541 Value *Begin = EmitScalarExpr(E->getArg(0));
542 Value *End = EmitScalarExpr(E->getArg(1));
543 Value *F = CGM.getIntrinsic(Intrinsic::clear_cache);
544 return RValue::get(Builder.CreateCall(F, {Begin, End}));
546 case Builtin::BI__builtin_trap:
547 return RValue::get(EmitTrapCall(Intrinsic::trap));
548 case Builtin::BI__debugbreak:
549 return RValue::get(EmitTrapCall(Intrinsic::debugtrap));
550 case Builtin::BI__builtin_unreachable: {
551 if (SanOpts.has(SanitizerKind::Unreachable)) {
552 SanitizerScope SanScope(this);
553 EmitCheck(std::make_pair(static_cast<llvm::Value *>(Builder.getFalse()),
554 SanitizerKind::Unreachable),
555 "builtin_unreachable", EmitCheckSourceLocation(E->getExprLoc()),
558 Builder.CreateUnreachable();
560 // We do need to preserve an insertion point.
561 EmitBlock(createBasicBlock("unreachable.cont"));
563 return RValue::get(nullptr);
566 case Builtin::BI__builtin_powi:
567 case Builtin::BI__builtin_powif:
568 case Builtin::BI__builtin_powil: {
569 Value *Base = EmitScalarExpr(E->getArg(0));
570 Value *Exponent = EmitScalarExpr(E->getArg(1));
571 llvm::Type *ArgType = Base->getType();
572 Value *F = CGM.getIntrinsic(Intrinsic::powi, ArgType);
573 return RValue::get(Builder.CreateCall(F, {Base, Exponent}));
576 case Builtin::BI__builtin_isgreater:
577 case Builtin::BI__builtin_isgreaterequal:
578 case Builtin::BI__builtin_isless:
579 case Builtin::BI__builtin_islessequal:
580 case Builtin::BI__builtin_islessgreater:
581 case Builtin::BI__builtin_isunordered: {
582 // Ordered comparisons: we know the arguments to these are matching scalar
583 // floating point values.
584 Value *LHS = EmitScalarExpr(E->getArg(0));
585 Value *RHS = EmitScalarExpr(E->getArg(1));
588 default: llvm_unreachable("Unknown ordered comparison");
589 case Builtin::BI__builtin_isgreater:
590 LHS = Builder.CreateFCmpOGT(LHS, RHS, "cmp");
592 case Builtin::BI__builtin_isgreaterequal:
593 LHS = Builder.CreateFCmpOGE(LHS, RHS, "cmp");
595 case Builtin::BI__builtin_isless:
596 LHS = Builder.CreateFCmpOLT(LHS, RHS, "cmp");
598 case Builtin::BI__builtin_islessequal:
599 LHS = Builder.CreateFCmpOLE(LHS, RHS, "cmp");
601 case Builtin::BI__builtin_islessgreater:
602 LHS = Builder.CreateFCmpONE(LHS, RHS, "cmp");
604 case Builtin::BI__builtin_isunordered:
605 LHS = Builder.CreateFCmpUNO(LHS, RHS, "cmp");
608 // ZExt bool to int type.
609 return RValue::get(Builder.CreateZExt(LHS, ConvertType(E->getType())));
611 case Builtin::BI__builtin_isnan: {
612 Value *V = EmitScalarExpr(E->getArg(0));
613 V = Builder.CreateFCmpUNO(V, V, "cmp");
614 return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType())));
617 case Builtin::BI__builtin_isinf: {
618 // isinf(x) --> fabs(x) == infinity
619 Value *V = EmitScalarExpr(E->getArg(0));
620 V = EmitFAbs(*this, V);
622 V = Builder.CreateFCmpOEQ(V, ConstantFP::getInfinity(V->getType()),"isinf");
623 return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType())));
626 case Builtin::BI__builtin_isinf_sign: {
627 // isinf_sign(x) -> fabs(x) == infinity ? (signbit(x) ? -1 : 1) : 0
628 Value *Arg = EmitScalarExpr(E->getArg(0));
629 Value *AbsArg = EmitFAbs(*this, Arg);
630 Value *IsInf = Builder.CreateFCmpOEQ(
631 AbsArg, ConstantFP::getInfinity(Arg->getType()), "isinf");
632 Value *IsNeg = EmitSignBit(*this, Arg);
634 llvm::Type *IntTy = ConvertType(E->getType());
635 Value *Zero = Constant::getNullValue(IntTy);
636 Value *One = ConstantInt::get(IntTy, 1);
637 Value *NegativeOne = ConstantInt::get(IntTy, -1);
638 Value *SignResult = Builder.CreateSelect(IsNeg, NegativeOne, One);
639 Value *Result = Builder.CreateSelect(IsInf, SignResult, Zero);
640 return RValue::get(Result);
643 case Builtin::BI__builtin_isnormal: {
644 // isnormal(x) --> x == x && fabsf(x) < infinity && fabsf(x) >= float_min
645 Value *V = EmitScalarExpr(E->getArg(0));
646 Value *Eq = Builder.CreateFCmpOEQ(V, V, "iseq");
648 Value *Abs = EmitFAbs(*this, V);
649 Value *IsLessThanInf =
650 Builder.CreateFCmpULT(Abs, ConstantFP::getInfinity(V->getType()),"isinf");
651 APFloat Smallest = APFloat::getSmallestNormalized(
652 getContext().getFloatTypeSemantics(E->getArg(0)->getType()));
654 Builder.CreateFCmpUGE(Abs, ConstantFP::get(V->getContext(), Smallest),
656 V = Builder.CreateAnd(Eq, IsLessThanInf, "and");
657 V = Builder.CreateAnd(V, IsNormal, "and");
658 return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType())));
661 case Builtin::BI__builtin_isfinite: {
662 // isfinite(x) --> x == x && fabs(x) != infinity;
663 Value *V = EmitScalarExpr(E->getArg(0));
664 Value *Eq = Builder.CreateFCmpOEQ(V, V, "iseq");
666 Value *Abs = EmitFAbs(*this, V);
668 Builder.CreateFCmpUNE(Abs, ConstantFP::getInfinity(V->getType()),"isinf");
670 V = Builder.CreateAnd(Eq, IsNotInf, "and");
671 return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType())));
674 case Builtin::BI__builtin_fpclassify: {
675 Value *V = EmitScalarExpr(E->getArg(5));
676 llvm::Type *Ty = ConvertType(E->getArg(5)->getType());
679 BasicBlock *Begin = Builder.GetInsertBlock();
680 BasicBlock *End = createBasicBlock("fpclassify_end", this->CurFn);
681 Builder.SetInsertPoint(End);
683 Builder.CreatePHI(ConvertType(E->getArg(0)->getType()), 4,
684 "fpclassify_result");
686 // if (V==0) return FP_ZERO
687 Builder.SetInsertPoint(Begin);
688 Value *IsZero = Builder.CreateFCmpOEQ(V, Constant::getNullValue(Ty),
690 Value *ZeroLiteral = EmitScalarExpr(E->getArg(4));
691 BasicBlock *NotZero = createBasicBlock("fpclassify_not_zero", this->CurFn);
692 Builder.CreateCondBr(IsZero, End, NotZero);
693 Result->addIncoming(ZeroLiteral, Begin);
695 // if (V != V) return FP_NAN
696 Builder.SetInsertPoint(NotZero);
697 Value *IsNan = Builder.CreateFCmpUNO(V, V, "cmp");
698 Value *NanLiteral = EmitScalarExpr(E->getArg(0));
699 BasicBlock *NotNan = createBasicBlock("fpclassify_not_nan", this->CurFn);
700 Builder.CreateCondBr(IsNan, End, NotNan);
701 Result->addIncoming(NanLiteral, NotZero);
703 // if (fabs(V) == infinity) return FP_INFINITY
704 Builder.SetInsertPoint(NotNan);
705 Value *VAbs = EmitFAbs(*this, V);
707 Builder.CreateFCmpOEQ(VAbs, ConstantFP::getInfinity(V->getType()),
709 Value *InfLiteral = EmitScalarExpr(E->getArg(1));
710 BasicBlock *NotInf = createBasicBlock("fpclassify_not_inf", this->CurFn);
711 Builder.CreateCondBr(IsInf, End, NotInf);
712 Result->addIncoming(InfLiteral, NotNan);
714 // if (fabs(V) >= MIN_NORMAL) return FP_NORMAL else FP_SUBNORMAL
715 Builder.SetInsertPoint(NotInf);
716 APFloat Smallest = APFloat::getSmallestNormalized(
717 getContext().getFloatTypeSemantics(E->getArg(5)->getType()));
719 Builder.CreateFCmpUGE(VAbs, ConstantFP::get(V->getContext(), Smallest),
721 Value *NormalResult =
722 Builder.CreateSelect(IsNormal, EmitScalarExpr(E->getArg(2)),
723 EmitScalarExpr(E->getArg(3)));
724 Builder.CreateBr(End);
725 Result->addIncoming(NormalResult, NotInf);
728 Builder.SetInsertPoint(End);
729 return RValue::get(Result);
732 case Builtin::BIalloca:
733 case Builtin::BI_alloca:
734 case Builtin::BI__builtin_alloca: {
735 Value *Size = EmitScalarExpr(E->getArg(0));
736 return RValue::get(Builder.CreateAlloca(Builder.getInt8Ty(), Size));
738 case Builtin::BIbzero:
739 case Builtin::BI__builtin_bzero: {
740 std::pair<llvm::Value*, unsigned> Dest =
741 EmitPointerWithAlignment(E->getArg(0));
742 Value *SizeVal = EmitScalarExpr(E->getArg(1));
743 EmitNonNullArgCheck(RValue::get(Dest.first), E->getArg(0)->getType(),
744 E->getArg(0)->getExprLoc(), FD, 0);
745 Builder.CreateMemSet(Dest.first, Builder.getInt8(0), SizeVal,
747 return RValue::get(Dest.first);
749 case Builtin::BImemcpy:
750 case Builtin::BI__builtin_memcpy: {
751 std::pair<llvm::Value*, unsigned> Dest =
752 EmitPointerWithAlignment(E->getArg(0));
753 std::pair<llvm::Value*, unsigned> Src =
754 EmitPointerWithAlignment(E->getArg(1));
755 Value *SizeVal = EmitScalarExpr(E->getArg(2));
756 unsigned Align = std::min(Dest.second, Src.second);
757 EmitNonNullArgCheck(RValue::get(Dest.first), E->getArg(0)->getType(),
758 E->getArg(0)->getExprLoc(), FD, 0);
759 EmitNonNullArgCheck(RValue::get(Src.first), E->getArg(1)->getType(),
760 E->getArg(1)->getExprLoc(), FD, 1);
761 Builder.CreateMemCpy(Dest.first, Src.first, SizeVal, Align, false);
762 return RValue::get(Dest.first);
765 case Builtin::BI__builtin___memcpy_chk: {
766 // fold __builtin_memcpy_chk(x, y, cst1, cst2) to memcpy iff cst1<=cst2.
767 llvm::APSInt Size, DstSize;
768 if (!E->getArg(2)->EvaluateAsInt(Size, CGM.getContext()) ||
769 !E->getArg(3)->EvaluateAsInt(DstSize, CGM.getContext()))
771 if (Size.ugt(DstSize))
773 std::pair<llvm::Value*, unsigned> Dest =
774 EmitPointerWithAlignment(E->getArg(0));
775 std::pair<llvm::Value*, unsigned> Src =
776 EmitPointerWithAlignment(E->getArg(1));
777 Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size);
778 unsigned Align = std::min(Dest.second, Src.second);
779 Builder.CreateMemCpy(Dest.first, Src.first, SizeVal, Align, false);
780 return RValue::get(Dest.first);
783 case Builtin::BI__builtin_objc_memmove_collectable: {
784 Value *Address = EmitScalarExpr(E->getArg(0));
785 Value *SrcAddr = EmitScalarExpr(E->getArg(1));
786 Value *SizeVal = EmitScalarExpr(E->getArg(2));
787 CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this,
788 Address, SrcAddr, SizeVal);
789 return RValue::get(Address);
792 case Builtin::BI__builtin___memmove_chk: {
793 // fold __builtin_memmove_chk(x, y, cst1, cst2) to memmove iff cst1<=cst2.
794 llvm::APSInt Size, DstSize;
795 if (!E->getArg(2)->EvaluateAsInt(Size, CGM.getContext()) ||
796 !E->getArg(3)->EvaluateAsInt(DstSize, CGM.getContext()))
798 if (Size.ugt(DstSize))
800 std::pair<llvm::Value*, unsigned> Dest =
801 EmitPointerWithAlignment(E->getArg(0));
802 std::pair<llvm::Value*, unsigned> Src =
803 EmitPointerWithAlignment(E->getArg(1));
804 Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size);
805 unsigned Align = std::min(Dest.second, Src.second);
806 Builder.CreateMemMove(Dest.first, Src.first, SizeVal, Align, false);
807 return RValue::get(Dest.first);
810 case Builtin::BImemmove:
811 case Builtin::BI__builtin_memmove: {
812 std::pair<llvm::Value*, unsigned> Dest =
813 EmitPointerWithAlignment(E->getArg(0));
814 std::pair<llvm::Value*, unsigned> Src =
815 EmitPointerWithAlignment(E->getArg(1));
816 Value *SizeVal = EmitScalarExpr(E->getArg(2));
817 unsigned Align = std::min(Dest.second, Src.second);
818 EmitNonNullArgCheck(RValue::get(Dest.first), E->getArg(0)->getType(),
819 E->getArg(0)->getExprLoc(), FD, 0);
820 EmitNonNullArgCheck(RValue::get(Src.first), E->getArg(1)->getType(),
821 E->getArg(1)->getExprLoc(), FD, 1);
822 Builder.CreateMemMove(Dest.first, Src.first, SizeVal, Align, false);
823 return RValue::get(Dest.first);
825 case Builtin::BImemset:
826 case Builtin::BI__builtin_memset: {
827 std::pair<llvm::Value*, unsigned> Dest =
828 EmitPointerWithAlignment(E->getArg(0));
829 Value *ByteVal = Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)),
830 Builder.getInt8Ty());
831 Value *SizeVal = EmitScalarExpr(E->getArg(2));
832 EmitNonNullArgCheck(RValue::get(Dest.first), E->getArg(0)->getType(),
833 E->getArg(0)->getExprLoc(), FD, 0);
834 Builder.CreateMemSet(Dest.first, ByteVal, SizeVal, Dest.second, false);
835 return RValue::get(Dest.first);
837 case Builtin::BI__builtin___memset_chk: {
838 // fold __builtin_memset_chk(x, y, cst1, cst2) to memset iff cst1<=cst2.
839 llvm::APSInt Size, DstSize;
840 if (!E->getArg(2)->EvaluateAsInt(Size, CGM.getContext()) ||
841 !E->getArg(3)->EvaluateAsInt(DstSize, CGM.getContext()))
843 if (Size.ugt(DstSize))
845 std::pair<llvm::Value*, unsigned> Dest =
846 EmitPointerWithAlignment(E->getArg(0));
847 Value *ByteVal = Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)),
848 Builder.getInt8Ty());
849 Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size);
850 Builder.CreateMemSet(Dest.first, ByteVal, SizeVal, Dest.second, false);
851 return RValue::get(Dest.first);
853 case Builtin::BI__builtin_dwarf_cfa: {
854 // The offset in bytes from the first argument to the CFA.
856 // Why on earth is this in the frontend? Is there any reason at
857 // all that the backend can't reasonably determine this while
858 // lowering llvm.eh.dwarf.cfa()?
860 // TODO: If there's a satisfactory reason, add a target hook for
861 // this instead of hard-coding 0, which is correct for most targets.
864 Value *F = CGM.getIntrinsic(Intrinsic::eh_dwarf_cfa);
865 return RValue::get(Builder.CreateCall(F,
866 llvm::ConstantInt::get(Int32Ty, Offset)));
868 case Builtin::BI__builtin_return_address: {
869 Value *Depth = EmitScalarExpr(E->getArg(0));
870 Depth = Builder.CreateIntCast(Depth, Int32Ty, false);
871 Value *F = CGM.getIntrinsic(Intrinsic::returnaddress);
872 return RValue::get(Builder.CreateCall(F, Depth));
874 case Builtin::BI__builtin_frame_address: {
875 Value *Depth = EmitScalarExpr(E->getArg(0));
876 Depth = Builder.CreateIntCast(Depth, Int32Ty, false);
877 Value *F = CGM.getIntrinsic(Intrinsic::frameaddress);
878 return RValue::get(Builder.CreateCall(F, Depth));
880 case Builtin::BI__builtin_extract_return_addr: {
881 Value *Address = EmitScalarExpr(E->getArg(0));
882 Value *Result = getTargetHooks().decodeReturnAddress(*this, Address);
883 return RValue::get(Result);
885 case Builtin::BI__builtin_frob_return_addr: {
886 Value *Address = EmitScalarExpr(E->getArg(0));
887 Value *Result = getTargetHooks().encodeReturnAddress(*this, Address);
888 return RValue::get(Result);
890 case Builtin::BI__builtin_dwarf_sp_column: {
891 llvm::IntegerType *Ty
892 = cast<llvm::IntegerType>(ConvertType(E->getType()));
893 int Column = getTargetHooks().getDwarfEHStackPointer(CGM);
895 CGM.ErrorUnsupported(E, "__builtin_dwarf_sp_column");
896 return RValue::get(llvm::UndefValue::get(Ty));
898 return RValue::get(llvm::ConstantInt::get(Ty, Column, true));
900 case Builtin::BI__builtin_init_dwarf_reg_size_table: {
901 Value *Address = EmitScalarExpr(E->getArg(0));
902 if (getTargetHooks().initDwarfEHRegSizeTable(*this, Address))
903 CGM.ErrorUnsupported(E, "__builtin_init_dwarf_reg_size_table");
904 return RValue::get(llvm::UndefValue::get(ConvertType(E->getType())));
906 case Builtin::BI__builtin_eh_return: {
907 Value *Int = EmitScalarExpr(E->getArg(0));
908 Value *Ptr = EmitScalarExpr(E->getArg(1));
910 llvm::IntegerType *IntTy = cast<llvm::IntegerType>(Int->getType());
911 assert((IntTy->getBitWidth() == 32 || IntTy->getBitWidth() == 64) &&
912 "LLVM's __builtin_eh_return only supports 32- and 64-bit variants");
913 Value *F = CGM.getIntrinsic(IntTy->getBitWidth() == 32
914 ? Intrinsic::eh_return_i32
915 : Intrinsic::eh_return_i64);
916 Builder.CreateCall(F, {Int, Ptr});
917 Builder.CreateUnreachable();
919 // We do need to preserve an insertion point.
920 EmitBlock(createBasicBlock("builtin_eh_return.cont"));
922 return RValue::get(nullptr);
924 case Builtin::BI__builtin_unwind_init: {
925 Value *F = CGM.getIntrinsic(Intrinsic::eh_unwind_init);
926 return RValue::get(Builder.CreateCall(F));
928 case Builtin::BI__builtin_extend_pointer: {
929 // Extends a pointer to the size of an _Unwind_Word, which is
930 // uint64_t on all platforms. Generally this gets poked into a
931 // register and eventually used as an address, so if the
932 // addressing registers are wider than pointers and the platform
933 // doesn't implicitly ignore high-order bits when doing
934 // addressing, we need to make sure we zext / sext based on
935 // the platform's expectations.
937 // See: http://gcc.gnu.org/ml/gcc-bugs/2002-02/msg00237.html
939 // Cast the pointer to intptr_t.
940 Value *Ptr = EmitScalarExpr(E->getArg(0));
941 Value *Result = Builder.CreatePtrToInt(Ptr, IntPtrTy, "extend.cast");
943 // If that's 64 bits, we're done.
944 if (IntPtrTy->getBitWidth() == 64)
945 return RValue::get(Result);
947 // Otherwise, ask the codegen data what to do.
948 if (getTargetHooks().extendPointerWithSExt())
949 return RValue::get(Builder.CreateSExt(Result, Int64Ty, "extend.sext"));
951 return RValue::get(Builder.CreateZExt(Result, Int64Ty, "extend.zext"));
953 case Builtin::BI__builtin_setjmp: {
954 // Buffer is a void**.
955 Value *Buf = EmitScalarExpr(E->getArg(0));
957 // Store the frame pointer to the setjmp buffer.
959 Builder.CreateCall(CGM.getIntrinsic(Intrinsic::frameaddress),
960 ConstantInt::get(Int32Ty, 0));
961 Builder.CreateStore(FrameAddr, Buf);
963 // Store the stack pointer to the setjmp buffer.
965 Builder.CreateCall(CGM.getIntrinsic(Intrinsic::stacksave));
966 Value *StackSaveSlot =
967 Builder.CreateGEP(Buf, ConstantInt::get(Int32Ty, 2));
968 Builder.CreateStore(StackAddr, StackSaveSlot);
970 // Call LLVM's EH setjmp, which is lightweight.
971 Value *F = CGM.getIntrinsic(Intrinsic::eh_sjlj_setjmp);
972 Buf = Builder.CreateBitCast(Buf, Int8PtrTy);
973 return RValue::get(Builder.CreateCall(F, Buf));
975 case Builtin::BI__builtin_longjmp: {
976 Value *Buf = EmitScalarExpr(E->getArg(0));
977 Buf = Builder.CreateBitCast(Buf, Int8PtrTy);
979 // Call LLVM's EH longjmp, which is lightweight.
980 Builder.CreateCall(CGM.getIntrinsic(Intrinsic::eh_sjlj_longjmp), Buf);
982 // longjmp doesn't return; mark this as unreachable.
983 Builder.CreateUnreachable();
985 // We do need to preserve an insertion point.
986 EmitBlock(createBasicBlock("longjmp.cont"));
988 return RValue::get(nullptr);
990 case Builtin::BI__sync_fetch_and_add:
991 case Builtin::BI__sync_fetch_and_sub:
992 case Builtin::BI__sync_fetch_and_or:
993 case Builtin::BI__sync_fetch_and_and:
994 case Builtin::BI__sync_fetch_and_xor:
995 case Builtin::BI__sync_fetch_and_nand:
996 case Builtin::BI__sync_add_and_fetch:
997 case Builtin::BI__sync_sub_and_fetch:
998 case Builtin::BI__sync_and_and_fetch:
999 case Builtin::BI__sync_or_and_fetch:
1000 case Builtin::BI__sync_xor_and_fetch:
1001 case Builtin::BI__sync_nand_and_fetch:
1002 case Builtin::BI__sync_val_compare_and_swap:
1003 case Builtin::BI__sync_bool_compare_and_swap:
1004 case Builtin::BI__sync_lock_test_and_set:
1005 case Builtin::BI__sync_lock_release:
1006 case Builtin::BI__sync_swap:
1007 llvm_unreachable("Shouldn't make it through sema");
1008 case Builtin::BI__sync_fetch_and_add_1:
1009 case Builtin::BI__sync_fetch_and_add_2:
1010 case Builtin::BI__sync_fetch_and_add_4:
1011 case Builtin::BI__sync_fetch_and_add_8:
1012 case Builtin::BI__sync_fetch_and_add_16:
1013 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Add, E);
1014 case Builtin::BI__sync_fetch_and_sub_1:
1015 case Builtin::BI__sync_fetch_and_sub_2:
1016 case Builtin::BI__sync_fetch_and_sub_4:
1017 case Builtin::BI__sync_fetch_and_sub_8:
1018 case Builtin::BI__sync_fetch_and_sub_16:
1019 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Sub, E);
1020 case Builtin::BI__sync_fetch_and_or_1:
1021 case Builtin::BI__sync_fetch_and_or_2:
1022 case Builtin::BI__sync_fetch_and_or_4:
1023 case Builtin::BI__sync_fetch_and_or_8:
1024 case Builtin::BI__sync_fetch_and_or_16:
1025 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Or, E);
1026 case Builtin::BI__sync_fetch_and_and_1:
1027 case Builtin::BI__sync_fetch_and_and_2:
1028 case Builtin::BI__sync_fetch_and_and_4:
1029 case Builtin::BI__sync_fetch_and_and_8:
1030 case Builtin::BI__sync_fetch_and_and_16:
1031 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::And, E);
1032 case Builtin::BI__sync_fetch_and_xor_1:
1033 case Builtin::BI__sync_fetch_and_xor_2:
1034 case Builtin::BI__sync_fetch_and_xor_4:
1035 case Builtin::BI__sync_fetch_and_xor_8:
1036 case Builtin::BI__sync_fetch_and_xor_16:
1037 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xor, E);
1038 case Builtin::BI__sync_fetch_and_nand_1:
1039 case Builtin::BI__sync_fetch_and_nand_2:
1040 case Builtin::BI__sync_fetch_and_nand_4:
1041 case Builtin::BI__sync_fetch_and_nand_8:
1042 case Builtin::BI__sync_fetch_and_nand_16:
1043 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Nand, E);
1045 // Clang extensions: not overloaded yet.
1046 case Builtin::BI__sync_fetch_and_min:
1047 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Min, E);
1048 case Builtin::BI__sync_fetch_and_max:
1049 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Max, E);
1050 case Builtin::BI__sync_fetch_and_umin:
1051 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::UMin, E);
1052 case Builtin::BI__sync_fetch_and_umax:
1053 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::UMax, E);
1055 case Builtin::BI__sync_add_and_fetch_1:
1056 case Builtin::BI__sync_add_and_fetch_2:
1057 case Builtin::BI__sync_add_and_fetch_4:
1058 case Builtin::BI__sync_add_and_fetch_8:
1059 case Builtin::BI__sync_add_and_fetch_16:
1060 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Add, E,
1061 llvm::Instruction::Add);
1062 case Builtin::BI__sync_sub_and_fetch_1:
1063 case Builtin::BI__sync_sub_and_fetch_2:
1064 case Builtin::BI__sync_sub_and_fetch_4:
1065 case Builtin::BI__sync_sub_and_fetch_8:
1066 case Builtin::BI__sync_sub_and_fetch_16:
1067 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Sub, E,
1068 llvm::Instruction::Sub);
1069 case Builtin::BI__sync_and_and_fetch_1:
1070 case Builtin::BI__sync_and_and_fetch_2:
1071 case Builtin::BI__sync_and_and_fetch_4:
1072 case Builtin::BI__sync_and_and_fetch_8:
1073 case Builtin::BI__sync_and_and_fetch_16:
1074 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::And, E,
1075 llvm::Instruction::And);
1076 case Builtin::BI__sync_or_and_fetch_1:
1077 case Builtin::BI__sync_or_and_fetch_2:
1078 case Builtin::BI__sync_or_and_fetch_4:
1079 case Builtin::BI__sync_or_and_fetch_8:
1080 case Builtin::BI__sync_or_and_fetch_16:
1081 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Or, E,
1082 llvm::Instruction::Or);
1083 case Builtin::BI__sync_xor_and_fetch_1:
1084 case Builtin::BI__sync_xor_and_fetch_2:
1085 case Builtin::BI__sync_xor_and_fetch_4:
1086 case Builtin::BI__sync_xor_and_fetch_8:
1087 case Builtin::BI__sync_xor_and_fetch_16:
1088 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Xor, E,
1089 llvm::Instruction::Xor);
1090 case Builtin::BI__sync_nand_and_fetch_1:
1091 case Builtin::BI__sync_nand_and_fetch_2:
1092 case Builtin::BI__sync_nand_and_fetch_4:
1093 case Builtin::BI__sync_nand_and_fetch_8:
1094 case Builtin::BI__sync_nand_and_fetch_16:
1095 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Nand, E,
1096 llvm::Instruction::And, true);
1098 case Builtin::BI__sync_val_compare_and_swap_1:
1099 case Builtin::BI__sync_val_compare_and_swap_2:
1100 case Builtin::BI__sync_val_compare_and_swap_4:
1101 case Builtin::BI__sync_val_compare_and_swap_8:
1102 case Builtin::BI__sync_val_compare_and_swap_16:
1103 return RValue::get(MakeAtomicCmpXchgValue(*this, E, false));
1105 case Builtin::BI__sync_bool_compare_and_swap_1:
1106 case Builtin::BI__sync_bool_compare_and_swap_2:
1107 case Builtin::BI__sync_bool_compare_and_swap_4:
1108 case Builtin::BI__sync_bool_compare_and_swap_8:
1109 case Builtin::BI__sync_bool_compare_and_swap_16:
1110 return RValue::get(MakeAtomicCmpXchgValue(*this, E, true));
1112 case Builtin::BI__sync_swap_1:
1113 case Builtin::BI__sync_swap_2:
1114 case Builtin::BI__sync_swap_4:
1115 case Builtin::BI__sync_swap_8:
1116 case Builtin::BI__sync_swap_16:
1117 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xchg, E);
1119 case Builtin::BI__sync_lock_test_and_set_1:
1120 case Builtin::BI__sync_lock_test_and_set_2:
1121 case Builtin::BI__sync_lock_test_and_set_4:
1122 case Builtin::BI__sync_lock_test_and_set_8:
1123 case Builtin::BI__sync_lock_test_and_set_16:
1124 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xchg, E);
1126 case Builtin::BI__sync_lock_release_1:
1127 case Builtin::BI__sync_lock_release_2:
1128 case Builtin::BI__sync_lock_release_4:
1129 case Builtin::BI__sync_lock_release_8:
1130 case Builtin::BI__sync_lock_release_16: {
1131 Value *Ptr = EmitScalarExpr(E->getArg(0));
1132 QualType ElTy = E->getArg(0)->getType()->getPointeeType();
1133 CharUnits StoreSize = getContext().getTypeSizeInChars(ElTy);
1134 llvm::Type *ITy = llvm::IntegerType::get(getLLVMContext(),
1135 StoreSize.getQuantity() * 8);
1136 Ptr = Builder.CreateBitCast(Ptr, ITy->getPointerTo());
1137 llvm::StoreInst *Store =
1138 Builder.CreateStore(llvm::Constant::getNullValue(ITy), Ptr);
1139 Store->setAlignment(StoreSize.getQuantity());
1140 Store->setAtomic(llvm::Release);
1141 return RValue::get(nullptr);
1144 case Builtin::BI__sync_synchronize: {
1145 // We assume this is supposed to correspond to a C++0x-style
1146 // sequentially-consistent fence (i.e. this is only usable for
1147 // synchonization, not device I/O or anything like that). This intrinsic
1148 // is really badly designed in the sense that in theory, there isn't
1149 // any way to safely use it... but in practice, it mostly works
1150 // to use it with non-atomic loads and stores to get acquire/release
1152 Builder.CreateFence(llvm::SequentiallyConsistent);
1153 return RValue::get(nullptr);
1156 case Builtin::BI__c11_atomic_is_lock_free:
1157 case Builtin::BI__atomic_is_lock_free: {
1158 // Call "bool __atomic_is_lock_free(size_t size, void *ptr)". For the
1159 // __c11 builtin, ptr is 0 (indicating a properly-aligned object), since
1160 // _Atomic(T) is always properly-aligned.
1161 const char *LibCallName = "__atomic_is_lock_free";
1163 Args.add(RValue::get(EmitScalarExpr(E->getArg(0))),
1164 getContext().getSizeType());
1165 if (BuiltinID == Builtin::BI__atomic_is_lock_free)
1166 Args.add(RValue::get(EmitScalarExpr(E->getArg(1))),
1167 getContext().VoidPtrTy);
1169 Args.add(RValue::get(llvm::Constant::getNullValue(VoidPtrTy)),
1170 getContext().VoidPtrTy);
1171 const CGFunctionInfo &FuncInfo =
1172 CGM.getTypes().arrangeFreeFunctionCall(E->getType(), Args,
1173 FunctionType::ExtInfo(),
1175 llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FuncInfo);
1176 llvm::Constant *Func = CGM.CreateRuntimeFunction(FTy, LibCallName);
1177 return EmitCall(FuncInfo, Func, ReturnValueSlot(), Args);
1180 case Builtin::BI__atomic_test_and_set: {
1181 // Look at the argument type to determine whether this is a volatile
1182 // operation. The parameter type is always volatile.
1183 QualType PtrTy = E->getArg(0)->IgnoreImpCasts()->getType();
1185 PtrTy->castAs<PointerType>()->getPointeeType().isVolatileQualified();
1187 Value *Ptr = EmitScalarExpr(E->getArg(0));
1188 unsigned AddrSpace = Ptr->getType()->getPointerAddressSpace();
1189 Ptr = Builder.CreateBitCast(Ptr, Int8Ty->getPointerTo(AddrSpace));
1190 Value *NewVal = Builder.getInt8(1);
1191 Value *Order = EmitScalarExpr(E->getArg(1));
1192 if (isa<llvm::ConstantInt>(Order)) {
1193 int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
1194 AtomicRMWInst *Result = nullptr;
1196 case 0: // memory_order_relaxed
1197 default: // invalid order
1198 Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg,
1202 case 1: // memory_order_consume
1203 case 2: // memory_order_acquire
1204 Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg,
1208 case 3: // memory_order_release
1209 Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg,
1213 case 4: // memory_order_acq_rel
1214 Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg,
1216 llvm::AcquireRelease);
1218 case 5: // memory_order_seq_cst
1219 Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg,
1221 llvm::SequentiallyConsistent);
1224 Result->setVolatile(Volatile);
1225 return RValue::get(Builder.CreateIsNotNull(Result, "tobool"));
1228 llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
1230 llvm::BasicBlock *BBs[5] = {
1231 createBasicBlock("monotonic", CurFn),
1232 createBasicBlock("acquire", CurFn),
1233 createBasicBlock("release", CurFn),
1234 createBasicBlock("acqrel", CurFn),
1235 createBasicBlock("seqcst", CurFn)
1237 llvm::AtomicOrdering Orders[5] = {
1238 llvm::Monotonic, llvm::Acquire, llvm::Release,
1239 llvm::AcquireRelease, llvm::SequentiallyConsistent
1242 Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
1243 llvm::SwitchInst *SI = Builder.CreateSwitch(Order, BBs[0]);
1245 Builder.SetInsertPoint(ContBB);
1246 PHINode *Result = Builder.CreatePHI(Int8Ty, 5, "was_set");
1248 for (unsigned i = 0; i < 5; ++i) {
1249 Builder.SetInsertPoint(BBs[i]);
1250 AtomicRMWInst *RMW = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg,
1251 Ptr, NewVal, Orders[i]);
1252 RMW->setVolatile(Volatile);
1253 Result->addIncoming(RMW, BBs[i]);
1254 Builder.CreateBr(ContBB);
1257 SI->addCase(Builder.getInt32(0), BBs[0]);
1258 SI->addCase(Builder.getInt32(1), BBs[1]);
1259 SI->addCase(Builder.getInt32(2), BBs[1]);
1260 SI->addCase(Builder.getInt32(3), BBs[2]);
1261 SI->addCase(Builder.getInt32(4), BBs[3]);
1262 SI->addCase(Builder.getInt32(5), BBs[4]);
1264 Builder.SetInsertPoint(ContBB);
1265 return RValue::get(Builder.CreateIsNotNull(Result, "tobool"));
1268 case Builtin::BI__atomic_clear: {
1269 QualType PtrTy = E->getArg(0)->IgnoreImpCasts()->getType();
1271 PtrTy->castAs<PointerType>()->getPointeeType().isVolatileQualified();
1273 Value *Ptr = EmitScalarExpr(E->getArg(0));
1274 unsigned AddrSpace = Ptr->getType()->getPointerAddressSpace();
1275 Ptr = Builder.CreateBitCast(Ptr, Int8Ty->getPointerTo(AddrSpace));
1276 Value *NewVal = Builder.getInt8(0);
1277 Value *Order = EmitScalarExpr(E->getArg(1));
1278 if (isa<llvm::ConstantInt>(Order)) {
1279 int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
1280 StoreInst *Store = Builder.CreateStore(NewVal, Ptr, Volatile);
1281 Store->setAlignment(1);
1283 case 0: // memory_order_relaxed
1284 default: // invalid order
1285 Store->setOrdering(llvm::Monotonic);
1287 case 3: // memory_order_release
1288 Store->setOrdering(llvm::Release);
1290 case 5: // memory_order_seq_cst
1291 Store->setOrdering(llvm::SequentiallyConsistent);
1294 return RValue::get(nullptr);
1297 llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
1299 llvm::BasicBlock *BBs[3] = {
1300 createBasicBlock("monotonic", CurFn),
1301 createBasicBlock("release", CurFn),
1302 createBasicBlock("seqcst", CurFn)
1304 llvm::AtomicOrdering Orders[3] = {
1305 llvm::Monotonic, llvm::Release, llvm::SequentiallyConsistent
1308 Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
1309 llvm::SwitchInst *SI = Builder.CreateSwitch(Order, BBs[0]);
1311 for (unsigned i = 0; i < 3; ++i) {
1312 Builder.SetInsertPoint(BBs[i]);
1313 StoreInst *Store = Builder.CreateStore(NewVal, Ptr, Volatile);
1314 Store->setAlignment(1);
1315 Store->setOrdering(Orders[i]);
1316 Builder.CreateBr(ContBB);
1319 SI->addCase(Builder.getInt32(0), BBs[0]);
1320 SI->addCase(Builder.getInt32(3), BBs[1]);
1321 SI->addCase(Builder.getInt32(5), BBs[2]);
1323 Builder.SetInsertPoint(ContBB);
1324 return RValue::get(nullptr);
1327 case Builtin::BI__atomic_thread_fence:
1328 case Builtin::BI__atomic_signal_fence:
1329 case Builtin::BI__c11_atomic_thread_fence:
1330 case Builtin::BI__c11_atomic_signal_fence: {
1331 llvm::SynchronizationScope Scope;
1332 if (BuiltinID == Builtin::BI__atomic_signal_fence ||
1333 BuiltinID == Builtin::BI__c11_atomic_signal_fence)
1334 Scope = llvm::SingleThread;
1336 Scope = llvm::CrossThread;
1337 Value *Order = EmitScalarExpr(E->getArg(0));
1338 if (isa<llvm::ConstantInt>(Order)) {
1339 int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
1341 case 0: // memory_order_relaxed
1342 default: // invalid order
1344 case 1: // memory_order_consume
1345 case 2: // memory_order_acquire
1346 Builder.CreateFence(llvm::Acquire, Scope);
1348 case 3: // memory_order_release
1349 Builder.CreateFence(llvm::Release, Scope);
1351 case 4: // memory_order_acq_rel
1352 Builder.CreateFence(llvm::AcquireRelease, Scope);
1354 case 5: // memory_order_seq_cst
1355 Builder.CreateFence(llvm::SequentiallyConsistent, Scope);
1358 return RValue::get(nullptr);
1361 llvm::BasicBlock *AcquireBB, *ReleaseBB, *AcqRelBB, *SeqCstBB;
1362 AcquireBB = createBasicBlock("acquire", CurFn);
1363 ReleaseBB = createBasicBlock("release", CurFn);
1364 AcqRelBB = createBasicBlock("acqrel", CurFn);
1365 SeqCstBB = createBasicBlock("seqcst", CurFn);
1366 llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
1368 Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
1369 llvm::SwitchInst *SI = Builder.CreateSwitch(Order, ContBB);
1371 Builder.SetInsertPoint(AcquireBB);
1372 Builder.CreateFence(llvm::Acquire, Scope);
1373 Builder.CreateBr(ContBB);
1374 SI->addCase(Builder.getInt32(1), AcquireBB);
1375 SI->addCase(Builder.getInt32(2), AcquireBB);
1377 Builder.SetInsertPoint(ReleaseBB);
1378 Builder.CreateFence(llvm::Release, Scope);
1379 Builder.CreateBr(ContBB);
1380 SI->addCase(Builder.getInt32(3), ReleaseBB);
1382 Builder.SetInsertPoint(AcqRelBB);
1383 Builder.CreateFence(llvm::AcquireRelease, Scope);
1384 Builder.CreateBr(ContBB);
1385 SI->addCase(Builder.getInt32(4), AcqRelBB);
1387 Builder.SetInsertPoint(SeqCstBB);
1388 Builder.CreateFence(llvm::SequentiallyConsistent, Scope);
1389 Builder.CreateBr(ContBB);
1390 SI->addCase(Builder.getInt32(5), SeqCstBB);
1392 Builder.SetInsertPoint(ContBB);
1393 return RValue::get(nullptr);
1396 // Library functions with special handling.
1397 case Builtin::BIsqrt:
1398 case Builtin::BIsqrtf:
1399 case Builtin::BIsqrtl: {
1400 // Transform a call to sqrt* into a @llvm.sqrt.* intrinsic call, but only
1401 // in finite- or unsafe-math mode (the intrinsic has different semantics
1402 // for handling negative numbers compared to the library function, so
1403 // -fmath-errno=0 is not enough).
1404 if (!FD->hasAttr<ConstAttr>())
1406 if (!(CGM.getCodeGenOpts().UnsafeFPMath ||
1407 CGM.getCodeGenOpts().NoNaNsFPMath))
1409 Value *Arg0 = EmitScalarExpr(E->getArg(0));
1410 llvm::Type *ArgType = Arg0->getType();
1411 Value *F = CGM.getIntrinsic(Intrinsic::sqrt, ArgType);
1412 return RValue::get(Builder.CreateCall(F, Arg0));
1415 case Builtin::BI__builtin_pow:
1416 case Builtin::BI__builtin_powf:
1417 case Builtin::BI__builtin_powl:
1418 case Builtin::BIpow:
1419 case Builtin::BIpowf:
1420 case Builtin::BIpowl: {
1421 // Transform a call to pow* into a @llvm.pow.* intrinsic call.
1422 if (!FD->hasAttr<ConstAttr>())
1424 Value *Base = EmitScalarExpr(E->getArg(0));
1425 Value *Exponent = EmitScalarExpr(E->getArg(1));
1426 llvm::Type *ArgType = Base->getType();
1427 Value *F = CGM.getIntrinsic(Intrinsic::pow, ArgType);
1428 return RValue::get(Builder.CreateCall(F, {Base, Exponent}));
1431 case Builtin::BIfma:
1432 case Builtin::BIfmaf:
1433 case Builtin::BIfmal:
1434 case Builtin::BI__builtin_fma:
1435 case Builtin::BI__builtin_fmaf:
1436 case Builtin::BI__builtin_fmal: {
1437 // Rewrite fma to intrinsic.
1438 Value *FirstArg = EmitScalarExpr(E->getArg(0));
1439 llvm::Type *ArgType = FirstArg->getType();
1440 Value *F = CGM.getIntrinsic(Intrinsic::fma, ArgType);
1442 Builder.CreateCall(F, {FirstArg, EmitScalarExpr(E->getArg(1)),
1443 EmitScalarExpr(E->getArg(2))}));
1446 case Builtin::BI__builtin_signbit:
1447 case Builtin::BI__builtin_signbitf:
1448 case Builtin::BI__builtin_signbitl: {
1450 Builder.CreateZExt(EmitSignBit(*this, EmitScalarExpr(E->getArg(0))),
1451 ConvertType(E->getType())));
1453 case Builtin::BI__builtin_annotation: {
1454 llvm::Value *AnnVal = EmitScalarExpr(E->getArg(0));
1455 llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::annotation,
1458 // Get the annotation string, go through casts. Sema requires this to be a
1459 // non-wide string literal, potentially casted, so the cast<> is safe.
1460 const Expr *AnnotationStrExpr = E->getArg(1)->IgnoreParenCasts();
1461 StringRef Str = cast<StringLiteral>(AnnotationStrExpr)->getString();
1462 return RValue::get(EmitAnnotationCall(F, AnnVal, Str, E->getExprLoc()));
1464 case Builtin::BI__builtin_addcb:
1465 case Builtin::BI__builtin_addcs:
1466 case Builtin::BI__builtin_addc:
1467 case Builtin::BI__builtin_addcl:
1468 case Builtin::BI__builtin_addcll:
1469 case Builtin::BI__builtin_subcb:
1470 case Builtin::BI__builtin_subcs:
1471 case Builtin::BI__builtin_subc:
1472 case Builtin::BI__builtin_subcl:
1473 case Builtin::BI__builtin_subcll: {
1475 // We translate all of these builtins from expressions of the form:
1476 // int x = ..., y = ..., carryin = ..., carryout, result;
1477 // result = __builtin_addc(x, y, carryin, &carryout);
1479 // to LLVM IR of the form:
1481 // %tmp1 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %x, i32 %y)
1482 // %tmpsum1 = extractvalue {i32, i1} %tmp1, 0
1483 // %carry1 = extractvalue {i32, i1} %tmp1, 1
1484 // %tmp2 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %tmpsum1,
1486 // %result = extractvalue {i32, i1} %tmp2, 0
1487 // %carry2 = extractvalue {i32, i1} %tmp2, 1
1488 // %tmp3 = or i1 %carry1, %carry2
1489 // %tmp4 = zext i1 %tmp3 to i32
1490 // store i32 %tmp4, i32* %carryout
1492 // Scalarize our inputs.
1493 llvm::Value *X = EmitScalarExpr(E->getArg(0));
1494 llvm::Value *Y = EmitScalarExpr(E->getArg(1));
1495 llvm::Value *Carryin = EmitScalarExpr(E->getArg(2));
1496 std::pair<llvm::Value*, unsigned> CarryOutPtr =
1497 EmitPointerWithAlignment(E->getArg(3));
1499 // Decide if we are lowering to a uadd.with.overflow or usub.with.overflow.
1500 llvm::Intrinsic::ID IntrinsicId;
1501 switch (BuiltinID) {
1502 default: llvm_unreachable("Unknown multiprecision builtin id.");
1503 case Builtin::BI__builtin_addcb:
1504 case Builtin::BI__builtin_addcs:
1505 case Builtin::BI__builtin_addc:
1506 case Builtin::BI__builtin_addcl:
1507 case Builtin::BI__builtin_addcll:
1508 IntrinsicId = llvm::Intrinsic::uadd_with_overflow;
1510 case Builtin::BI__builtin_subcb:
1511 case Builtin::BI__builtin_subcs:
1512 case Builtin::BI__builtin_subc:
1513 case Builtin::BI__builtin_subcl:
1514 case Builtin::BI__builtin_subcll:
1515 IntrinsicId = llvm::Intrinsic::usub_with_overflow;
1519 // Construct our resulting LLVM IR expression.
1520 llvm::Value *Carry1;
1521 llvm::Value *Sum1 = EmitOverflowIntrinsic(*this, IntrinsicId,
1523 llvm::Value *Carry2;
1524 llvm::Value *Sum2 = EmitOverflowIntrinsic(*this, IntrinsicId,
1525 Sum1, Carryin, Carry2);
1526 llvm::Value *CarryOut = Builder.CreateZExt(Builder.CreateOr(Carry1, Carry2),
1528 llvm::StoreInst *CarryOutStore = Builder.CreateStore(CarryOut,
1530 CarryOutStore->setAlignment(CarryOutPtr.second);
1531 return RValue::get(Sum2);
1533 case Builtin::BI__builtin_uadd_overflow:
1534 case Builtin::BI__builtin_uaddl_overflow:
1535 case Builtin::BI__builtin_uaddll_overflow:
1536 case Builtin::BI__builtin_usub_overflow:
1537 case Builtin::BI__builtin_usubl_overflow:
1538 case Builtin::BI__builtin_usubll_overflow:
1539 case Builtin::BI__builtin_umul_overflow:
1540 case Builtin::BI__builtin_umull_overflow:
1541 case Builtin::BI__builtin_umulll_overflow:
1542 case Builtin::BI__builtin_sadd_overflow:
1543 case Builtin::BI__builtin_saddl_overflow:
1544 case Builtin::BI__builtin_saddll_overflow:
1545 case Builtin::BI__builtin_ssub_overflow:
1546 case Builtin::BI__builtin_ssubl_overflow:
1547 case Builtin::BI__builtin_ssubll_overflow:
1548 case Builtin::BI__builtin_smul_overflow:
1549 case Builtin::BI__builtin_smull_overflow:
1550 case Builtin::BI__builtin_smulll_overflow: {
1552 // We translate all of these builtins directly to the relevant llvm IR node.
1554 // Scalarize our inputs.
1555 llvm::Value *X = EmitScalarExpr(E->getArg(0));
1556 llvm::Value *Y = EmitScalarExpr(E->getArg(1));
1557 std::pair<llvm::Value *, unsigned> SumOutPtr =
1558 EmitPointerWithAlignment(E->getArg(2));
1560 // Decide which of the overflow intrinsics we are lowering to:
1561 llvm::Intrinsic::ID IntrinsicId;
1562 switch (BuiltinID) {
1563 default: llvm_unreachable("Unknown security overflow builtin id.");
1564 case Builtin::BI__builtin_uadd_overflow:
1565 case Builtin::BI__builtin_uaddl_overflow:
1566 case Builtin::BI__builtin_uaddll_overflow:
1567 IntrinsicId = llvm::Intrinsic::uadd_with_overflow;
1569 case Builtin::BI__builtin_usub_overflow:
1570 case Builtin::BI__builtin_usubl_overflow:
1571 case Builtin::BI__builtin_usubll_overflow:
1572 IntrinsicId = llvm::Intrinsic::usub_with_overflow;
1574 case Builtin::BI__builtin_umul_overflow:
1575 case Builtin::BI__builtin_umull_overflow:
1576 case Builtin::BI__builtin_umulll_overflow:
1577 IntrinsicId = llvm::Intrinsic::umul_with_overflow;
1579 case Builtin::BI__builtin_sadd_overflow:
1580 case Builtin::BI__builtin_saddl_overflow:
1581 case Builtin::BI__builtin_saddll_overflow:
1582 IntrinsicId = llvm::Intrinsic::sadd_with_overflow;
1584 case Builtin::BI__builtin_ssub_overflow:
1585 case Builtin::BI__builtin_ssubl_overflow:
1586 case Builtin::BI__builtin_ssubll_overflow:
1587 IntrinsicId = llvm::Intrinsic::ssub_with_overflow;
1589 case Builtin::BI__builtin_smul_overflow:
1590 case Builtin::BI__builtin_smull_overflow:
1591 case Builtin::BI__builtin_smulll_overflow:
1592 IntrinsicId = llvm::Intrinsic::smul_with_overflow;
1598 llvm::Value *Sum = EmitOverflowIntrinsic(*this, IntrinsicId, X, Y, Carry);
1599 llvm::StoreInst *SumOutStore = Builder.CreateStore(Sum, SumOutPtr.first);
1600 SumOutStore->setAlignment(SumOutPtr.second);
1602 return RValue::get(Carry);
1604 case Builtin::BI__builtin_addressof:
1605 return RValue::get(EmitLValue(E->getArg(0)).getAddress());
1606 case Builtin::BI__builtin_operator_new:
1607 return EmitBuiltinNewDeleteCall(FD->getType()->castAs<FunctionProtoType>(),
1608 E->getArg(0), false);
1609 case Builtin::BI__builtin_operator_delete:
1610 return EmitBuiltinNewDeleteCall(FD->getType()->castAs<FunctionProtoType>(),
1611 E->getArg(0), true);
1612 case Builtin::BI__noop:
1613 // __noop always evaluates to an integer literal zero.
1614 return RValue::get(ConstantInt::get(IntTy, 0));
1615 case Builtin::BI__builtin_call_with_static_chain: {
1616 const CallExpr *Call = cast<CallExpr>(E->getArg(0));
1617 const Expr *Chain = E->getArg(1);
1618 return EmitCall(Call->getCallee()->getType(),
1619 EmitScalarExpr(Call->getCallee()), Call, ReturnValue,
1620 Call->getCalleeDecl(), EmitScalarExpr(Chain));
1622 case Builtin::BI_InterlockedExchange:
1623 case Builtin::BI_InterlockedExchangePointer:
1624 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xchg, E);
1625 case Builtin::BI_InterlockedCompareExchangePointer: {
1627 llvm::IntegerType *IntType =
1628 IntegerType::get(getLLVMContext(),
1629 getContext().getTypeSize(E->getType()));
1630 llvm::Type *IntPtrType = IntType->getPointerTo();
1632 llvm::Value *Destination =
1633 Builder.CreateBitCast(EmitScalarExpr(E->getArg(0)), IntPtrType);
1635 llvm::Value *Exchange = EmitScalarExpr(E->getArg(1));
1636 RTy = Exchange->getType();
1637 Exchange = Builder.CreatePtrToInt(Exchange, IntType);
1639 llvm::Value *Comparand =
1640 Builder.CreatePtrToInt(EmitScalarExpr(E->getArg(2)), IntType);
1642 auto Result = Builder.CreateAtomicCmpXchg(Destination, Comparand, Exchange,
1643 SequentiallyConsistent,
1644 SequentiallyConsistent);
1645 Result->setVolatile(true);
1647 return RValue::get(Builder.CreateIntToPtr(Builder.CreateExtractValue(Result,
1651 case Builtin::BI_InterlockedCompareExchange: {
1652 AtomicCmpXchgInst *CXI = Builder.CreateAtomicCmpXchg(
1653 EmitScalarExpr(E->getArg(0)),
1654 EmitScalarExpr(E->getArg(2)),
1655 EmitScalarExpr(E->getArg(1)),
1656 SequentiallyConsistent,
1657 SequentiallyConsistent);
1658 CXI->setVolatile(true);
1659 return RValue::get(Builder.CreateExtractValue(CXI, 0));
1661 case Builtin::BI_InterlockedIncrement: {
1662 AtomicRMWInst *RMWI = Builder.CreateAtomicRMW(
1664 EmitScalarExpr(E->getArg(0)),
1665 ConstantInt::get(Int32Ty, 1),
1666 llvm::SequentiallyConsistent);
1667 RMWI->setVolatile(true);
1668 return RValue::get(Builder.CreateAdd(RMWI, ConstantInt::get(Int32Ty, 1)));
1670 case Builtin::BI_InterlockedDecrement: {
1671 AtomicRMWInst *RMWI = Builder.CreateAtomicRMW(
1673 EmitScalarExpr(E->getArg(0)),
1674 ConstantInt::get(Int32Ty, 1),
1675 llvm::SequentiallyConsistent);
1676 RMWI->setVolatile(true);
1677 return RValue::get(Builder.CreateSub(RMWI, ConstantInt::get(Int32Ty, 1)));
1679 case Builtin::BI_InterlockedExchangeAdd: {
1680 AtomicRMWInst *RMWI = Builder.CreateAtomicRMW(
1682 EmitScalarExpr(E->getArg(0)),
1683 EmitScalarExpr(E->getArg(1)),
1684 llvm::SequentiallyConsistent);
1685 RMWI->setVolatile(true);
1686 return RValue::get(RMWI);
1688 case Builtin::BI__readfsdword: {
1690 Builder.CreateIntToPtr(EmitScalarExpr(E->getArg(0)),
1691 llvm::PointerType::get(CGM.Int32Ty, 257));
1693 Builder.CreateAlignedLoad(IntToPtr, /*Align=*/4, /*isVolatile=*/true);
1694 return RValue::get(Load);
1697 case Builtin::BI__exception_code:
1698 case Builtin::BI_exception_code:
1699 return RValue::get(EmitSEHExceptionCode());
1700 case Builtin::BI__exception_info:
1701 case Builtin::BI_exception_info:
1702 return RValue::get(EmitSEHExceptionInfo());
1703 case Builtin::BI__abnormal_termination:
1704 case Builtin::BI_abnormal_termination:
1705 return RValue::get(EmitSEHAbnormalTermination());
1706 case Builtin::BI_setjmpex: {
1707 if (getTarget().getTriple().isOSMSVCRT()) {
1708 llvm::Type *ArgTypes[] = {Int8PtrTy, Int8PtrTy};
1709 llvm::AttributeSet ReturnsTwiceAttr =
1710 AttributeSet::get(getLLVMContext(), llvm::AttributeSet::FunctionIndex,
1711 llvm::Attribute::ReturnsTwice);
1712 llvm::Constant *SetJmpEx = CGM.CreateRuntimeFunction(
1713 llvm::FunctionType::get(IntTy, ArgTypes, /*isVarArg=*/false),
1714 "_setjmpex", ReturnsTwiceAttr);
1715 llvm::Value *Buf = Builder.CreateBitOrPointerCast(
1716 EmitScalarExpr(E->getArg(0)), Int8PtrTy);
1717 llvm::Value *FrameAddr =
1718 Builder.CreateCall(CGM.getIntrinsic(Intrinsic::frameaddress),
1719 ConstantInt::get(Int32Ty, 0));
1720 llvm::Value *Args[] = {Buf, FrameAddr};
1721 llvm::CallSite CS = EmitRuntimeCallOrInvoke(SetJmpEx, Args);
1722 CS.setAttributes(ReturnsTwiceAttr);
1723 return RValue::get(CS.getInstruction());
1727 case Builtin::BI_setjmp: {
1728 if (getTarget().getTriple().isOSMSVCRT()) {
1729 llvm::AttributeSet ReturnsTwiceAttr =
1730 AttributeSet::get(getLLVMContext(), llvm::AttributeSet::FunctionIndex,
1731 llvm::Attribute::ReturnsTwice);
1732 llvm::Value *Buf = Builder.CreateBitOrPointerCast(
1733 EmitScalarExpr(E->getArg(0)), Int8PtrTy);
1735 if (getTarget().getTriple().getArch() == llvm::Triple::x86) {
1736 llvm::Type *ArgTypes[] = {Int8PtrTy, IntTy};
1737 llvm::Constant *SetJmp3 = CGM.CreateRuntimeFunction(
1738 llvm::FunctionType::get(IntTy, ArgTypes, /*isVarArg=*/true),
1739 "_setjmp3", ReturnsTwiceAttr);
1740 llvm::Value *Count = ConstantInt::get(IntTy, 0);
1741 llvm::Value *Args[] = {Buf, Count};
1742 CS = EmitRuntimeCallOrInvoke(SetJmp3, Args);
1744 llvm::Type *ArgTypes[] = {Int8PtrTy, Int8PtrTy};
1745 llvm::Constant *SetJmp = CGM.CreateRuntimeFunction(
1746 llvm::FunctionType::get(IntTy, ArgTypes, /*isVarArg=*/false),
1747 "_setjmp", ReturnsTwiceAttr);
1748 llvm::Value *FrameAddr =
1749 Builder.CreateCall(CGM.getIntrinsic(Intrinsic::frameaddress),
1750 ConstantInt::get(Int32Ty, 0));
1751 llvm::Value *Args[] = {Buf, FrameAddr};
1752 CS = EmitRuntimeCallOrInvoke(SetJmp, Args);
1754 CS.setAttributes(ReturnsTwiceAttr);
1755 return RValue::get(CS.getInstruction());
1760 case Builtin::BI__GetExceptionInfo: {
1761 if (llvm::GlobalVariable *GV =
1762 CGM.getCXXABI().getThrowInfo(FD->getParamDecl(0)->getType()))
1763 return RValue::get(llvm::ConstantExpr::getBitCast(GV, CGM.Int8PtrTy));
1768 // If this is an alias for a lib function (e.g. __builtin_sin), emit
1769 // the call using the normal call path, but using the unmangled
1770 // version of the function name.
1771 if (getContext().BuiltinInfo.isLibFunction(BuiltinID))
1772 return emitLibraryCall(*this, FD, E,
1773 CGM.getBuiltinLibFunction(FD, BuiltinID));
1775 // If this is a predefined lib function (e.g. malloc), emit the call
1776 // using exactly the normal call path.
1777 if (getContext().BuiltinInfo.isPredefinedLibFunction(BuiltinID))
1778 return emitLibraryCall(*this, FD, E, EmitScalarExpr(E->getCallee()));
1780 // See if we have a target specific intrinsic.
1781 const char *Name = getContext().BuiltinInfo.GetName(BuiltinID);
1782 Intrinsic::ID IntrinsicID = Intrinsic::not_intrinsic;
1783 if (const char *Prefix =
1784 llvm::Triple::getArchTypePrefix(getTarget().getTriple().getArch())) {
1785 IntrinsicID = Intrinsic::getIntrinsicForGCCBuiltin(Prefix, Name);
1786 // NOTE we dont need to perform a compatibility flag check here since the
1787 // intrinsics are declared in Builtins*.def via LANGBUILTIN which filter the
1788 // MS builtins via ALL_MS_LANGUAGES and are filtered earlier.
1789 if (IntrinsicID == Intrinsic::not_intrinsic)
1790 IntrinsicID = Intrinsic::getIntrinsicForMSBuiltin(Prefix, Name);
1793 if (IntrinsicID != Intrinsic::not_intrinsic) {
1794 SmallVector<Value*, 16> Args;
1796 // Find out if any arguments are required to be integer constant
1798 unsigned ICEArguments = 0;
1799 ASTContext::GetBuiltinTypeError Error;
1800 getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments);
1801 assert(Error == ASTContext::GE_None && "Should not codegen an error");
1803 Function *F = CGM.getIntrinsic(IntrinsicID);
1804 llvm::FunctionType *FTy = F->getFunctionType();
1806 for (unsigned i = 0, e = E->getNumArgs(); i != e; ++i) {
1808 // If this is a normal argument, just emit it as a scalar.
1809 if ((ICEArguments & (1 << i)) == 0) {
1810 ArgValue = EmitScalarExpr(E->getArg(i));
1812 // If this is required to be a constant, constant fold it so that we
1813 // know that the generated intrinsic gets a ConstantInt.
1814 llvm::APSInt Result;
1815 bool IsConst = E->getArg(i)->isIntegerConstantExpr(Result,getContext());
1816 assert(IsConst && "Constant arg isn't actually constant?");
1818 ArgValue = llvm::ConstantInt::get(getLLVMContext(), Result);
1821 // If the intrinsic arg type is different from the builtin arg type
1822 // we need to do a bit cast.
1823 llvm::Type *PTy = FTy->getParamType(i);
1824 if (PTy != ArgValue->getType()) {
1825 assert(PTy->canLosslesslyBitCastTo(FTy->getParamType(i)) &&
1826 "Must be able to losslessly bit cast to param");
1827 ArgValue = Builder.CreateBitCast(ArgValue, PTy);
1830 Args.push_back(ArgValue);
1833 Value *V = Builder.CreateCall(F, Args);
1834 QualType BuiltinRetType = E->getType();
1836 llvm::Type *RetTy = VoidTy;
1837 if (!BuiltinRetType->isVoidType())
1838 RetTy = ConvertType(BuiltinRetType);
1840 if (RetTy != V->getType()) {
1841 assert(V->getType()->canLosslesslyBitCastTo(RetTy) &&
1842 "Must be able to losslessly bit cast result type");
1843 V = Builder.CreateBitCast(V, RetTy);
1846 return RValue::get(V);
1849 // See if we have a target specific builtin that needs to be lowered.
1850 if (Value *V = EmitTargetBuiltinExpr(BuiltinID, E))
1851 return RValue::get(V);
1853 ErrorUnsupported(E, "builtin function");
1855 // Unknown builtin, for now just dump it out and return undef.
1856 return GetUndefRValue(E->getType());
1859 Value *CodeGenFunction::EmitTargetBuiltinExpr(unsigned BuiltinID,
1860 const CallExpr *E) {
1861 switch (getTarget().getTriple().getArch()) {
1862 case llvm::Triple::arm:
1863 case llvm::Triple::armeb:
1864 case llvm::Triple::thumb:
1865 case llvm::Triple::thumbeb:
1866 return EmitARMBuiltinExpr(BuiltinID, E);
1867 case llvm::Triple::aarch64:
1868 case llvm::Triple::aarch64_be:
1869 return EmitAArch64BuiltinExpr(BuiltinID, E);
1870 case llvm::Triple::x86:
1871 case llvm::Triple::x86_64:
1872 return EmitX86BuiltinExpr(BuiltinID, E);
1873 case llvm::Triple::ppc:
1874 case llvm::Triple::ppc64:
1875 case llvm::Triple::ppc64le:
1876 return EmitPPCBuiltinExpr(BuiltinID, E);
1877 case llvm::Triple::r600:
1878 case llvm::Triple::amdgcn:
1879 return EmitAMDGPUBuiltinExpr(BuiltinID, E);
1880 case llvm::Triple::systemz:
1881 return EmitSystemZBuiltinExpr(BuiltinID, E);
1882 case llvm::Triple::nvptx:
1883 case llvm::Triple::nvptx64:
1884 return EmitNVPTXBuiltinExpr(BuiltinID, E);
1890 static llvm::VectorType *GetNeonType(CodeGenFunction *CGF,
1891 NeonTypeFlags TypeFlags,
1893 int IsQuad = TypeFlags.isQuad();
1894 switch (TypeFlags.getEltType()) {
1895 case NeonTypeFlags::Int8:
1896 case NeonTypeFlags::Poly8:
1897 return llvm::VectorType::get(CGF->Int8Ty, V1Ty ? 1 : (8 << IsQuad));
1898 case NeonTypeFlags::Int16:
1899 case NeonTypeFlags::Poly16:
1900 case NeonTypeFlags::Float16:
1901 return llvm::VectorType::get(CGF->Int16Ty, V1Ty ? 1 : (4 << IsQuad));
1902 case NeonTypeFlags::Int32:
1903 return llvm::VectorType::get(CGF->Int32Ty, V1Ty ? 1 : (2 << IsQuad));
1904 case NeonTypeFlags::Int64:
1905 case NeonTypeFlags::Poly64:
1906 return llvm::VectorType::get(CGF->Int64Ty, V1Ty ? 1 : (1 << IsQuad));
1907 case NeonTypeFlags::Poly128:
1908 // FIXME: i128 and f128 doesn't get fully support in Clang and llvm.
1909 // There is a lot of i128 and f128 API missing.
1910 // so we use v16i8 to represent poly128 and get pattern matched.
1911 return llvm::VectorType::get(CGF->Int8Ty, 16);
1912 case NeonTypeFlags::Float32:
1913 return llvm::VectorType::get(CGF->FloatTy, V1Ty ? 1 : (2 << IsQuad));
1914 case NeonTypeFlags::Float64:
1915 return llvm::VectorType::get(CGF->DoubleTy, V1Ty ? 1 : (1 << IsQuad));
1917 llvm_unreachable("Unknown vector element type!");
1920 Value *CodeGenFunction::EmitNeonSplat(Value *V, Constant *C) {
1921 unsigned nElts = cast<llvm::VectorType>(V->getType())->getNumElements();
1922 Value* SV = llvm::ConstantVector::getSplat(nElts, C);
1923 return Builder.CreateShuffleVector(V, V, SV, "lane");
1926 Value *CodeGenFunction::EmitNeonCall(Function *F, SmallVectorImpl<Value*> &Ops,
1928 unsigned shift, bool rightshift) {
1930 for (Function::const_arg_iterator ai = F->arg_begin(), ae = F->arg_end();
1931 ai != ae; ++ai, ++j)
1932 if (shift > 0 && shift == j)
1933 Ops[j] = EmitNeonShiftVector(Ops[j], ai->getType(), rightshift);
1935 Ops[j] = Builder.CreateBitCast(Ops[j], ai->getType(), name);
1937 return Builder.CreateCall(F, Ops, name);
1940 Value *CodeGenFunction::EmitNeonShiftVector(Value *V, llvm::Type *Ty,
1942 int SV = cast<ConstantInt>(V)->getSExtValue();
1944 llvm::VectorType *VTy = cast<llvm::VectorType>(Ty);
1945 llvm::Constant *C = ConstantInt::get(VTy->getElementType(), neg ? -SV : SV);
1946 return llvm::ConstantVector::getSplat(VTy->getNumElements(), C);
1949 // \brief Right-shift a vector by a constant.
1950 Value *CodeGenFunction::EmitNeonRShiftImm(Value *Vec, Value *Shift,
1951 llvm::Type *Ty, bool usgn,
1953 llvm::VectorType *VTy = cast<llvm::VectorType>(Ty);
1955 int ShiftAmt = cast<ConstantInt>(Shift)->getSExtValue();
1956 int EltSize = VTy->getScalarSizeInBits();
1958 Vec = Builder.CreateBitCast(Vec, Ty);
1960 // lshr/ashr are undefined when the shift amount is equal to the vector
1962 if (ShiftAmt == EltSize) {
1964 // Right-shifting an unsigned value by its size yields 0.
1965 llvm::Constant *Zero = ConstantInt::get(VTy->getElementType(), 0);
1966 return llvm::ConstantVector::getSplat(VTy->getNumElements(), Zero);
1968 // Right-shifting a signed value by its size is equivalent
1969 // to a shift of size-1.
1971 Shift = ConstantInt::get(VTy->getElementType(), ShiftAmt);
1975 Shift = EmitNeonShiftVector(Shift, Ty, false);
1977 return Builder.CreateLShr(Vec, Shift, name);
1979 return Builder.CreateAShr(Vec, Shift, name);
1982 /// GetPointeeAlignment - Given an expression with a pointer type, find the
1983 /// alignment of the type referenced by the pointer. Skip over implicit
1985 std::pair<llvm::Value*, unsigned>
1986 CodeGenFunction::EmitPointerWithAlignment(const Expr *Addr) {
1987 assert(Addr->getType()->isPointerType());
1988 Addr = Addr->IgnoreParens();
1989 if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Addr)) {
1990 if ((ICE->getCastKind() == CK_BitCast || ICE->getCastKind() == CK_NoOp) &&
1991 ICE->getSubExpr()->getType()->isPointerType()) {
1992 std::pair<llvm::Value*, unsigned> Ptr =
1993 EmitPointerWithAlignment(ICE->getSubExpr());
1994 Ptr.first = Builder.CreateBitCast(Ptr.first,
1995 ConvertType(Addr->getType()));
1997 } else if (ICE->getCastKind() == CK_ArrayToPointerDecay) {
1998 LValue LV = EmitLValue(ICE->getSubExpr());
1999 unsigned Align = LV.getAlignment().getQuantity();
2001 // FIXME: Once LValues are fixed to always set alignment,
2003 QualType PtTy = ICE->getSubExpr()->getType();
2004 if (!PtTy->isIncompleteType())
2005 Align = getContext().getTypeAlignInChars(PtTy).getQuantity();
2009 return std::make_pair(LV.getAddress(), Align);
2012 if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(Addr)) {
2013 if (UO->getOpcode() == UO_AddrOf) {
2014 LValue LV = EmitLValue(UO->getSubExpr());
2015 unsigned Align = LV.getAlignment().getQuantity();
2017 // FIXME: Once LValues are fixed to always set alignment,
2019 QualType PtTy = UO->getSubExpr()->getType();
2020 if (!PtTy->isIncompleteType())
2021 Align = getContext().getTypeAlignInChars(PtTy).getQuantity();
2025 return std::make_pair(LV.getAddress(), Align);
2030 QualType PtTy = Addr->getType()->getPointeeType();
2031 if (!PtTy->isIncompleteType())
2032 Align = getContext().getTypeAlignInChars(PtTy).getQuantity();
2034 return std::make_pair(EmitScalarExpr(Addr), Align);
2038 AddRetType = (1 << 0),
2039 Add1ArgType = (1 << 1),
2040 Add2ArgTypes = (1 << 2),
2042 VectorizeRetType = (1 << 3),
2043 VectorizeArgTypes = (1 << 4),
2045 InventFloatType = (1 << 5),
2046 UnsignedAlts = (1 << 6),
2048 Use64BitVectors = (1 << 7),
2049 Use128BitVectors = (1 << 8),
2051 Vectorize1ArgType = Add1ArgType | VectorizeArgTypes,
2052 VectorRet = AddRetType | VectorizeRetType,
2053 VectorRetGetArgs01 =
2054 AddRetType | Add2ArgTypes | VectorizeRetType | VectorizeArgTypes,
2056 AddRetType | VectorizeRetType | Add1ArgType | InventFloatType
2059 struct NeonIntrinsicInfo {
2061 unsigned LLVMIntrinsic;
2062 unsigned AltLLVMIntrinsic;
2063 const char *NameHint;
2064 unsigned TypeModifier;
2066 bool operator<(unsigned RHSBuiltinID) const {
2067 return BuiltinID < RHSBuiltinID;
2071 #define NEONMAP0(NameBase) \
2072 { NEON::BI__builtin_neon_ ## NameBase, 0, 0, #NameBase, 0 }
2074 #define NEONMAP1(NameBase, LLVMIntrinsic, TypeModifier) \
2075 { NEON:: BI__builtin_neon_ ## NameBase, \
2076 Intrinsic::LLVMIntrinsic, 0, #NameBase, TypeModifier }
2078 #define NEONMAP2(NameBase, LLVMIntrinsic, AltLLVMIntrinsic, TypeModifier) \
2079 { NEON:: BI__builtin_neon_ ## NameBase, \
2080 Intrinsic::LLVMIntrinsic, Intrinsic::AltLLVMIntrinsic, \
2081 #NameBase, TypeModifier }
2083 static NeonIntrinsicInfo ARMSIMDIntrinsicMap [] = {
2084 NEONMAP2(vabd_v, arm_neon_vabdu, arm_neon_vabds, Add1ArgType | UnsignedAlts),
2085 NEONMAP2(vabdq_v, arm_neon_vabdu, arm_neon_vabds, Add1ArgType | UnsignedAlts),
2086 NEONMAP1(vabs_v, arm_neon_vabs, 0),
2087 NEONMAP1(vabsq_v, arm_neon_vabs, 0),
2089 NEONMAP1(vaesdq_v, arm_neon_aesd, 0),
2090 NEONMAP1(vaeseq_v, arm_neon_aese, 0),
2091 NEONMAP1(vaesimcq_v, arm_neon_aesimc, 0),
2092 NEONMAP1(vaesmcq_v, arm_neon_aesmc, 0),
2093 NEONMAP1(vbsl_v, arm_neon_vbsl, AddRetType),
2094 NEONMAP1(vbslq_v, arm_neon_vbsl, AddRetType),
2095 NEONMAP1(vcage_v, arm_neon_vacge, 0),
2096 NEONMAP1(vcageq_v, arm_neon_vacge, 0),
2097 NEONMAP1(vcagt_v, arm_neon_vacgt, 0),
2098 NEONMAP1(vcagtq_v, arm_neon_vacgt, 0),
2099 NEONMAP1(vcale_v, arm_neon_vacge, 0),
2100 NEONMAP1(vcaleq_v, arm_neon_vacge, 0),
2101 NEONMAP1(vcalt_v, arm_neon_vacgt, 0),
2102 NEONMAP1(vcaltq_v, arm_neon_vacgt, 0),
2103 NEONMAP1(vcls_v, arm_neon_vcls, Add1ArgType),
2104 NEONMAP1(vclsq_v, arm_neon_vcls, Add1ArgType),
2105 NEONMAP1(vclz_v, ctlz, Add1ArgType),
2106 NEONMAP1(vclzq_v, ctlz, Add1ArgType),
2107 NEONMAP1(vcnt_v, ctpop, Add1ArgType),
2108 NEONMAP1(vcntq_v, ctpop, Add1ArgType),
2109 NEONMAP1(vcvt_f16_v, arm_neon_vcvtfp2hf, 0),
2110 NEONMAP1(vcvt_f32_f16, arm_neon_vcvthf2fp, 0),
2111 NEONMAP0(vcvt_f32_v),
2112 NEONMAP2(vcvt_n_f32_v, arm_neon_vcvtfxu2fp, arm_neon_vcvtfxs2fp, 0),
2113 NEONMAP1(vcvt_n_s32_v, arm_neon_vcvtfp2fxs, 0),
2114 NEONMAP1(vcvt_n_s64_v, arm_neon_vcvtfp2fxs, 0),
2115 NEONMAP1(vcvt_n_u32_v, arm_neon_vcvtfp2fxu, 0),
2116 NEONMAP1(vcvt_n_u64_v, arm_neon_vcvtfp2fxu, 0),
2117 NEONMAP0(vcvt_s32_v),
2118 NEONMAP0(vcvt_s64_v),
2119 NEONMAP0(vcvt_u32_v),
2120 NEONMAP0(vcvt_u64_v),
2121 NEONMAP1(vcvta_s32_v, arm_neon_vcvtas, 0),
2122 NEONMAP1(vcvta_s64_v, arm_neon_vcvtas, 0),
2123 NEONMAP1(vcvta_u32_v, arm_neon_vcvtau, 0),
2124 NEONMAP1(vcvta_u64_v, arm_neon_vcvtau, 0),
2125 NEONMAP1(vcvtaq_s32_v, arm_neon_vcvtas, 0),
2126 NEONMAP1(vcvtaq_s64_v, arm_neon_vcvtas, 0),
2127 NEONMAP1(vcvtaq_u32_v, arm_neon_vcvtau, 0),
2128 NEONMAP1(vcvtaq_u64_v, arm_neon_vcvtau, 0),
2129 NEONMAP1(vcvtm_s32_v, arm_neon_vcvtms, 0),
2130 NEONMAP1(vcvtm_s64_v, arm_neon_vcvtms, 0),
2131 NEONMAP1(vcvtm_u32_v, arm_neon_vcvtmu, 0),
2132 NEONMAP1(vcvtm_u64_v, arm_neon_vcvtmu, 0),
2133 NEONMAP1(vcvtmq_s32_v, arm_neon_vcvtms, 0),
2134 NEONMAP1(vcvtmq_s64_v, arm_neon_vcvtms, 0),
2135 NEONMAP1(vcvtmq_u32_v, arm_neon_vcvtmu, 0),
2136 NEONMAP1(vcvtmq_u64_v, arm_neon_vcvtmu, 0),
2137 NEONMAP1(vcvtn_s32_v, arm_neon_vcvtns, 0),
2138 NEONMAP1(vcvtn_s64_v, arm_neon_vcvtns, 0),
2139 NEONMAP1(vcvtn_u32_v, arm_neon_vcvtnu, 0),
2140 NEONMAP1(vcvtn_u64_v, arm_neon_vcvtnu, 0),
2141 NEONMAP1(vcvtnq_s32_v, arm_neon_vcvtns, 0),
2142 NEONMAP1(vcvtnq_s64_v, arm_neon_vcvtns, 0),
2143 NEONMAP1(vcvtnq_u32_v, arm_neon_vcvtnu, 0),
2144 NEONMAP1(vcvtnq_u64_v, arm_neon_vcvtnu, 0),
2145 NEONMAP1(vcvtp_s32_v, arm_neon_vcvtps, 0),
2146 NEONMAP1(vcvtp_s64_v, arm_neon_vcvtps, 0),
2147 NEONMAP1(vcvtp_u32_v, arm_neon_vcvtpu, 0),
2148 NEONMAP1(vcvtp_u64_v, arm_neon_vcvtpu, 0),
2149 NEONMAP1(vcvtpq_s32_v, arm_neon_vcvtps, 0),
2150 NEONMAP1(vcvtpq_s64_v, arm_neon_vcvtps, 0),
2151 NEONMAP1(vcvtpq_u32_v, arm_neon_vcvtpu, 0),
2152 NEONMAP1(vcvtpq_u64_v, arm_neon_vcvtpu, 0),
2153 NEONMAP0(vcvtq_f32_v),
2154 NEONMAP2(vcvtq_n_f32_v, arm_neon_vcvtfxu2fp, arm_neon_vcvtfxs2fp, 0),
2155 NEONMAP1(vcvtq_n_s32_v, arm_neon_vcvtfp2fxs, 0),
2156 NEONMAP1(vcvtq_n_s64_v, arm_neon_vcvtfp2fxs, 0),
2157 NEONMAP1(vcvtq_n_u32_v, arm_neon_vcvtfp2fxu, 0),
2158 NEONMAP1(vcvtq_n_u64_v, arm_neon_vcvtfp2fxu, 0),
2159 NEONMAP0(vcvtq_s32_v),
2160 NEONMAP0(vcvtq_s64_v),
2161 NEONMAP0(vcvtq_u32_v),
2162 NEONMAP0(vcvtq_u64_v),
2167 NEONMAP2(vhadd_v, arm_neon_vhaddu, arm_neon_vhadds, Add1ArgType | UnsignedAlts),
2168 NEONMAP2(vhaddq_v, arm_neon_vhaddu, arm_neon_vhadds, Add1ArgType | UnsignedAlts),
2169 NEONMAP2(vhsub_v, arm_neon_vhsubu, arm_neon_vhsubs, Add1ArgType | UnsignedAlts),
2170 NEONMAP2(vhsubq_v, arm_neon_vhsubu, arm_neon_vhsubs, Add1ArgType | UnsignedAlts),
2171 NEONMAP0(vld1_dup_v),
2172 NEONMAP1(vld1_v, arm_neon_vld1, 0),
2173 NEONMAP0(vld1q_dup_v),
2174 NEONMAP1(vld1q_v, arm_neon_vld1, 0),
2175 NEONMAP1(vld2_lane_v, arm_neon_vld2lane, 0),
2176 NEONMAP1(vld2_v, arm_neon_vld2, 0),
2177 NEONMAP1(vld2q_lane_v, arm_neon_vld2lane, 0),
2178 NEONMAP1(vld2q_v, arm_neon_vld2, 0),
2179 NEONMAP1(vld3_lane_v, arm_neon_vld3lane, 0),
2180 NEONMAP1(vld3_v, arm_neon_vld3, 0),
2181 NEONMAP1(vld3q_lane_v, arm_neon_vld3lane, 0),
2182 NEONMAP1(vld3q_v, arm_neon_vld3, 0),
2183 NEONMAP1(vld4_lane_v, arm_neon_vld4lane, 0),
2184 NEONMAP1(vld4_v, arm_neon_vld4, 0),
2185 NEONMAP1(vld4q_lane_v, arm_neon_vld4lane, 0),
2186 NEONMAP1(vld4q_v, arm_neon_vld4, 0),
2187 NEONMAP2(vmax_v, arm_neon_vmaxu, arm_neon_vmaxs, Add1ArgType | UnsignedAlts),
2188 NEONMAP1(vmaxnm_v, arm_neon_vmaxnm, Add1ArgType),
2189 NEONMAP1(vmaxnmq_v, arm_neon_vmaxnm, Add1ArgType),
2190 NEONMAP2(vmaxq_v, arm_neon_vmaxu, arm_neon_vmaxs, Add1ArgType | UnsignedAlts),
2191 NEONMAP2(vmin_v, arm_neon_vminu, arm_neon_vmins, Add1ArgType | UnsignedAlts),
2192 NEONMAP1(vminnm_v, arm_neon_vminnm, Add1ArgType),
2193 NEONMAP1(vminnmq_v, arm_neon_vminnm, Add1ArgType),
2194 NEONMAP2(vminq_v, arm_neon_vminu, arm_neon_vmins, Add1ArgType | UnsignedAlts),
2197 NEONMAP1(vmul_v, arm_neon_vmulp, Add1ArgType),
2199 NEONMAP1(vmulq_v, arm_neon_vmulp, Add1ArgType),
2200 NEONMAP2(vpadal_v, arm_neon_vpadalu, arm_neon_vpadals, UnsignedAlts),
2201 NEONMAP2(vpadalq_v, arm_neon_vpadalu, arm_neon_vpadals, UnsignedAlts),
2202 NEONMAP1(vpadd_v, arm_neon_vpadd, Add1ArgType),
2203 NEONMAP2(vpaddl_v, arm_neon_vpaddlu, arm_neon_vpaddls, UnsignedAlts),
2204 NEONMAP2(vpaddlq_v, arm_neon_vpaddlu, arm_neon_vpaddls, UnsignedAlts),
2205 NEONMAP1(vpaddq_v, arm_neon_vpadd, Add1ArgType),
2206 NEONMAP2(vpmax_v, arm_neon_vpmaxu, arm_neon_vpmaxs, Add1ArgType | UnsignedAlts),
2207 NEONMAP2(vpmin_v, arm_neon_vpminu, arm_neon_vpmins, Add1ArgType | UnsignedAlts),
2208 NEONMAP1(vqabs_v, arm_neon_vqabs, Add1ArgType),
2209 NEONMAP1(vqabsq_v, arm_neon_vqabs, Add1ArgType),
2210 NEONMAP2(vqadd_v, arm_neon_vqaddu, arm_neon_vqadds, Add1ArgType | UnsignedAlts),
2211 NEONMAP2(vqaddq_v, arm_neon_vqaddu, arm_neon_vqadds, Add1ArgType | UnsignedAlts),
2212 NEONMAP2(vqdmlal_v, arm_neon_vqdmull, arm_neon_vqadds, 0),
2213 NEONMAP2(vqdmlsl_v, arm_neon_vqdmull, arm_neon_vqsubs, 0),
2214 NEONMAP1(vqdmulh_v, arm_neon_vqdmulh, Add1ArgType),
2215 NEONMAP1(vqdmulhq_v, arm_neon_vqdmulh, Add1ArgType),
2216 NEONMAP1(vqdmull_v, arm_neon_vqdmull, Add1ArgType),
2217 NEONMAP2(vqmovn_v, arm_neon_vqmovnu, arm_neon_vqmovns, Add1ArgType | UnsignedAlts),
2218 NEONMAP1(vqmovun_v, arm_neon_vqmovnsu, Add1ArgType),
2219 NEONMAP1(vqneg_v, arm_neon_vqneg, Add1ArgType),
2220 NEONMAP1(vqnegq_v, arm_neon_vqneg, Add1ArgType),
2221 NEONMAP1(vqrdmulh_v, arm_neon_vqrdmulh, Add1ArgType),
2222 NEONMAP1(vqrdmulhq_v, arm_neon_vqrdmulh, Add1ArgType),
2223 NEONMAP2(vqrshl_v, arm_neon_vqrshiftu, arm_neon_vqrshifts, Add1ArgType | UnsignedAlts),
2224 NEONMAP2(vqrshlq_v, arm_neon_vqrshiftu, arm_neon_vqrshifts, Add1ArgType | UnsignedAlts),
2225 NEONMAP2(vqshl_n_v, arm_neon_vqshiftu, arm_neon_vqshifts, UnsignedAlts),
2226 NEONMAP2(vqshl_v, arm_neon_vqshiftu, arm_neon_vqshifts, Add1ArgType | UnsignedAlts),
2227 NEONMAP2(vqshlq_n_v, arm_neon_vqshiftu, arm_neon_vqshifts, UnsignedAlts),
2228 NEONMAP2(vqshlq_v, arm_neon_vqshiftu, arm_neon_vqshifts, Add1ArgType | UnsignedAlts),
2229 NEONMAP1(vqshlu_n_v, arm_neon_vqshiftsu, 0),
2230 NEONMAP1(vqshluq_n_v, arm_neon_vqshiftsu, 0),
2231 NEONMAP2(vqsub_v, arm_neon_vqsubu, arm_neon_vqsubs, Add1ArgType | UnsignedAlts),
2232 NEONMAP2(vqsubq_v, arm_neon_vqsubu, arm_neon_vqsubs, Add1ArgType | UnsignedAlts),
2233 NEONMAP1(vraddhn_v, arm_neon_vraddhn, Add1ArgType),
2234 NEONMAP2(vrecpe_v, arm_neon_vrecpe, arm_neon_vrecpe, 0),
2235 NEONMAP2(vrecpeq_v, arm_neon_vrecpe, arm_neon_vrecpe, 0),
2236 NEONMAP1(vrecps_v, arm_neon_vrecps, Add1ArgType),
2237 NEONMAP1(vrecpsq_v, arm_neon_vrecps, Add1ArgType),
2238 NEONMAP2(vrhadd_v, arm_neon_vrhaddu, arm_neon_vrhadds, Add1ArgType | UnsignedAlts),
2239 NEONMAP2(vrhaddq_v, arm_neon_vrhaddu, arm_neon_vrhadds, Add1ArgType | UnsignedAlts),
2240 NEONMAP1(vrnd_v, arm_neon_vrintz, Add1ArgType),
2241 NEONMAP1(vrnda_v, arm_neon_vrinta, Add1ArgType),
2242 NEONMAP1(vrndaq_v, arm_neon_vrinta, Add1ArgType),
2243 NEONMAP1(vrndm_v, arm_neon_vrintm, Add1ArgType),
2244 NEONMAP1(vrndmq_v, arm_neon_vrintm, Add1ArgType),
2245 NEONMAP1(vrndn_v, arm_neon_vrintn, Add1ArgType),
2246 NEONMAP1(vrndnq_v, arm_neon_vrintn, Add1ArgType),
2247 NEONMAP1(vrndp_v, arm_neon_vrintp, Add1ArgType),
2248 NEONMAP1(vrndpq_v, arm_neon_vrintp, Add1ArgType),
2249 NEONMAP1(vrndq_v, arm_neon_vrintz, Add1ArgType),
2250 NEONMAP1(vrndx_v, arm_neon_vrintx, Add1ArgType),
2251 NEONMAP1(vrndxq_v, arm_neon_vrintx, Add1ArgType),
2252 NEONMAP2(vrshl_v, arm_neon_vrshiftu, arm_neon_vrshifts, Add1ArgType | UnsignedAlts),
2253 NEONMAP2(vrshlq_v, arm_neon_vrshiftu, arm_neon_vrshifts, Add1ArgType | UnsignedAlts),
2254 NEONMAP2(vrshr_n_v, arm_neon_vrshiftu, arm_neon_vrshifts, UnsignedAlts),
2255 NEONMAP2(vrshrq_n_v, arm_neon_vrshiftu, arm_neon_vrshifts, UnsignedAlts),
2256 NEONMAP2(vrsqrte_v, arm_neon_vrsqrte, arm_neon_vrsqrte, 0),
2257 NEONMAP2(vrsqrteq_v, arm_neon_vrsqrte, arm_neon_vrsqrte, 0),
2258 NEONMAP1(vrsqrts_v, arm_neon_vrsqrts, Add1ArgType),
2259 NEONMAP1(vrsqrtsq_v, arm_neon_vrsqrts, Add1ArgType),
2260 NEONMAP1(vrsubhn_v, arm_neon_vrsubhn, Add1ArgType),
2261 NEONMAP1(vsha1su0q_v, arm_neon_sha1su0, 0),
2262 NEONMAP1(vsha1su1q_v, arm_neon_sha1su1, 0),
2263 NEONMAP1(vsha256h2q_v, arm_neon_sha256h2, 0),
2264 NEONMAP1(vsha256hq_v, arm_neon_sha256h, 0),
2265 NEONMAP1(vsha256su0q_v, arm_neon_sha256su0, 0),
2266 NEONMAP1(vsha256su1q_v, arm_neon_sha256su1, 0),
2268 NEONMAP2(vshl_v, arm_neon_vshiftu, arm_neon_vshifts, Add1ArgType | UnsignedAlts),
2269 NEONMAP0(vshll_n_v),
2270 NEONMAP0(vshlq_n_v),
2271 NEONMAP2(vshlq_v, arm_neon_vshiftu, arm_neon_vshifts, Add1ArgType | UnsignedAlts),
2273 NEONMAP0(vshrn_n_v),
2274 NEONMAP0(vshrq_n_v),
2275 NEONMAP1(vst1_v, arm_neon_vst1, 0),
2276 NEONMAP1(vst1q_v, arm_neon_vst1, 0),
2277 NEONMAP1(vst2_lane_v, arm_neon_vst2lane, 0),
2278 NEONMAP1(vst2_v, arm_neon_vst2, 0),
2279 NEONMAP1(vst2q_lane_v, arm_neon_vst2lane, 0),
2280 NEONMAP1(vst2q_v, arm_neon_vst2, 0),
2281 NEONMAP1(vst3_lane_v, arm_neon_vst3lane, 0),
2282 NEONMAP1(vst3_v, arm_neon_vst3, 0),
2283 NEONMAP1(vst3q_lane_v, arm_neon_vst3lane, 0),
2284 NEONMAP1(vst3q_v, arm_neon_vst3, 0),
2285 NEONMAP1(vst4_lane_v, arm_neon_vst4lane, 0),
2286 NEONMAP1(vst4_v, arm_neon_vst4, 0),
2287 NEONMAP1(vst4q_lane_v, arm_neon_vst4lane, 0),
2288 NEONMAP1(vst4q_v, arm_neon_vst4, 0),
2300 static NeonIntrinsicInfo AArch64SIMDIntrinsicMap[] = {
2301 NEONMAP1(vabs_v, aarch64_neon_abs, 0),
2302 NEONMAP1(vabsq_v, aarch64_neon_abs, 0),
2304 NEONMAP1(vaesdq_v, aarch64_crypto_aesd, 0),
2305 NEONMAP1(vaeseq_v, aarch64_crypto_aese, 0),
2306 NEONMAP1(vaesimcq_v, aarch64_crypto_aesimc, 0),
2307 NEONMAP1(vaesmcq_v, aarch64_crypto_aesmc, 0),
2308 NEONMAP1(vcage_v, aarch64_neon_facge, 0),
2309 NEONMAP1(vcageq_v, aarch64_neon_facge, 0),
2310 NEONMAP1(vcagt_v, aarch64_neon_facgt, 0),
2311 NEONMAP1(vcagtq_v, aarch64_neon_facgt, 0),
2312 NEONMAP1(vcale_v, aarch64_neon_facge, 0),
2313 NEONMAP1(vcaleq_v, aarch64_neon_facge, 0),
2314 NEONMAP1(vcalt_v, aarch64_neon_facgt, 0),
2315 NEONMAP1(vcaltq_v, aarch64_neon_facgt, 0),
2316 NEONMAP1(vcls_v, aarch64_neon_cls, Add1ArgType),
2317 NEONMAP1(vclsq_v, aarch64_neon_cls, Add1ArgType),
2318 NEONMAP1(vclz_v, ctlz, Add1ArgType),
2319 NEONMAP1(vclzq_v, ctlz, Add1ArgType),
2320 NEONMAP1(vcnt_v, ctpop, Add1ArgType),
2321 NEONMAP1(vcntq_v, ctpop, Add1ArgType),
2322 NEONMAP1(vcvt_f16_v, aarch64_neon_vcvtfp2hf, 0),
2323 NEONMAP1(vcvt_f32_f16, aarch64_neon_vcvthf2fp, 0),
2324 NEONMAP0(vcvt_f32_v),
2325 NEONMAP2(vcvt_n_f32_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
2326 NEONMAP2(vcvt_n_f64_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
2327 NEONMAP1(vcvt_n_s32_v, aarch64_neon_vcvtfp2fxs, 0),
2328 NEONMAP1(vcvt_n_s64_v, aarch64_neon_vcvtfp2fxs, 0),
2329 NEONMAP1(vcvt_n_u32_v, aarch64_neon_vcvtfp2fxu, 0),
2330 NEONMAP1(vcvt_n_u64_v, aarch64_neon_vcvtfp2fxu, 0),
2331 NEONMAP0(vcvtq_f32_v),
2332 NEONMAP2(vcvtq_n_f32_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
2333 NEONMAP2(vcvtq_n_f64_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
2334 NEONMAP1(vcvtq_n_s32_v, aarch64_neon_vcvtfp2fxs, 0),
2335 NEONMAP1(vcvtq_n_s64_v, aarch64_neon_vcvtfp2fxs, 0),
2336 NEONMAP1(vcvtq_n_u32_v, aarch64_neon_vcvtfp2fxu, 0),
2337 NEONMAP1(vcvtq_n_u64_v, aarch64_neon_vcvtfp2fxu, 0),
2338 NEONMAP1(vcvtx_f32_v, aarch64_neon_fcvtxn, AddRetType | Add1ArgType),
2343 NEONMAP2(vhadd_v, aarch64_neon_uhadd, aarch64_neon_shadd, Add1ArgType | UnsignedAlts),
2344 NEONMAP2(vhaddq_v, aarch64_neon_uhadd, aarch64_neon_shadd, Add1ArgType | UnsignedAlts),
2345 NEONMAP2(vhsub_v, aarch64_neon_uhsub, aarch64_neon_shsub, Add1ArgType | UnsignedAlts),
2346 NEONMAP2(vhsubq_v, aarch64_neon_uhsub, aarch64_neon_shsub, Add1ArgType | UnsignedAlts),
2349 NEONMAP1(vmul_v, aarch64_neon_pmul, Add1ArgType),
2350 NEONMAP1(vmulq_v, aarch64_neon_pmul, Add1ArgType),
2351 NEONMAP1(vpadd_v, aarch64_neon_addp, Add1ArgType),
2352 NEONMAP2(vpaddl_v, aarch64_neon_uaddlp, aarch64_neon_saddlp, UnsignedAlts),
2353 NEONMAP2(vpaddlq_v, aarch64_neon_uaddlp, aarch64_neon_saddlp, UnsignedAlts),
2354 NEONMAP1(vpaddq_v, aarch64_neon_addp, Add1ArgType),
2355 NEONMAP1(vqabs_v, aarch64_neon_sqabs, Add1ArgType),
2356 NEONMAP1(vqabsq_v, aarch64_neon_sqabs, Add1ArgType),
2357 NEONMAP2(vqadd_v, aarch64_neon_uqadd, aarch64_neon_sqadd, Add1ArgType | UnsignedAlts),
2358 NEONMAP2(vqaddq_v, aarch64_neon_uqadd, aarch64_neon_sqadd, Add1ArgType | UnsignedAlts),
2359 NEONMAP2(vqdmlal_v, aarch64_neon_sqdmull, aarch64_neon_sqadd, 0),
2360 NEONMAP2(vqdmlsl_v, aarch64_neon_sqdmull, aarch64_neon_sqsub, 0),
2361 NEONMAP1(vqdmulh_v, aarch64_neon_sqdmulh, Add1ArgType),
2362 NEONMAP1(vqdmulhq_v, aarch64_neon_sqdmulh, Add1ArgType),
2363 NEONMAP1(vqdmull_v, aarch64_neon_sqdmull, Add1ArgType),
2364 NEONMAP2(vqmovn_v, aarch64_neon_uqxtn, aarch64_neon_sqxtn, Add1ArgType | UnsignedAlts),
2365 NEONMAP1(vqmovun_v, aarch64_neon_sqxtun, Add1ArgType),
2366 NEONMAP1(vqneg_v, aarch64_neon_sqneg, Add1ArgType),
2367 NEONMAP1(vqnegq_v, aarch64_neon_sqneg, Add1ArgType),
2368 NEONMAP1(vqrdmulh_v, aarch64_neon_sqrdmulh, Add1ArgType),
2369 NEONMAP1(vqrdmulhq_v, aarch64_neon_sqrdmulh, Add1ArgType),
2370 NEONMAP2(vqrshl_v, aarch64_neon_uqrshl, aarch64_neon_sqrshl, Add1ArgType | UnsignedAlts),
2371 NEONMAP2(vqrshlq_v, aarch64_neon_uqrshl, aarch64_neon_sqrshl, Add1ArgType | UnsignedAlts),
2372 NEONMAP2(vqshl_n_v, aarch64_neon_uqshl, aarch64_neon_sqshl, UnsignedAlts),
2373 NEONMAP2(vqshl_v, aarch64_neon_uqshl, aarch64_neon_sqshl, Add1ArgType | UnsignedAlts),
2374 NEONMAP2(vqshlq_n_v, aarch64_neon_uqshl, aarch64_neon_sqshl,UnsignedAlts),
2375 NEONMAP2(vqshlq_v, aarch64_neon_uqshl, aarch64_neon_sqshl, Add1ArgType | UnsignedAlts),
2376 NEONMAP1(vqshlu_n_v, aarch64_neon_sqshlu, 0),
2377 NEONMAP1(vqshluq_n_v, aarch64_neon_sqshlu, 0),
2378 NEONMAP2(vqsub_v, aarch64_neon_uqsub, aarch64_neon_sqsub, Add1ArgType | UnsignedAlts),
2379 NEONMAP2(vqsubq_v, aarch64_neon_uqsub, aarch64_neon_sqsub, Add1ArgType | UnsignedAlts),
2380 NEONMAP1(vraddhn_v, aarch64_neon_raddhn, Add1ArgType),
2381 NEONMAP2(vrecpe_v, aarch64_neon_frecpe, aarch64_neon_urecpe, 0),
2382 NEONMAP2(vrecpeq_v, aarch64_neon_frecpe, aarch64_neon_urecpe, 0),
2383 NEONMAP1(vrecps_v, aarch64_neon_frecps, Add1ArgType),
2384 NEONMAP1(vrecpsq_v, aarch64_neon_frecps, Add1ArgType),
2385 NEONMAP2(vrhadd_v, aarch64_neon_urhadd, aarch64_neon_srhadd, Add1ArgType | UnsignedAlts),
2386 NEONMAP2(vrhaddq_v, aarch64_neon_urhadd, aarch64_neon_srhadd, Add1ArgType | UnsignedAlts),
2387 NEONMAP2(vrshl_v, aarch64_neon_urshl, aarch64_neon_srshl, Add1ArgType | UnsignedAlts),
2388 NEONMAP2(vrshlq_v, aarch64_neon_urshl, aarch64_neon_srshl, Add1ArgType | UnsignedAlts),
2389 NEONMAP2(vrshr_n_v, aarch64_neon_urshl, aarch64_neon_srshl, UnsignedAlts),
2390 NEONMAP2(vrshrq_n_v, aarch64_neon_urshl, aarch64_neon_srshl, UnsignedAlts),
2391 NEONMAP2(vrsqrte_v, aarch64_neon_frsqrte, aarch64_neon_ursqrte, 0),
2392 NEONMAP2(vrsqrteq_v, aarch64_neon_frsqrte, aarch64_neon_ursqrte, 0),
2393 NEONMAP1(vrsqrts_v, aarch64_neon_frsqrts, Add1ArgType),
2394 NEONMAP1(vrsqrtsq_v, aarch64_neon_frsqrts, Add1ArgType),
2395 NEONMAP1(vrsubhn_v, aarch64_neon_rsubhn, Add1ArgType),
2396 NEONMAP1(vsha1su0q_v, aarch64_crypto_sha1su0, 0),
2397 NEONMAP1(vsha1su1q_v, aarch64_crypto_sha1su1, 0),
2398 NEONMAP1(vsha256h2q_v, aarch64_crypto_sha256h2, 0),
2399 NEONMAP1(vsha256hq_v, aarch64_crypto_sha256h, 0),
2400 NEONMAP1(vsha256su0q_v, aarch64_crypto_sha256su0, 0),
2401 NEONMAP1(vsha256su1q_v, aarch64_crypto_sha256su1, 0),
2403 NEONMAP2(vshl_v, aarch64_neon_ushl, aarch64_neon_sshl, Add1ArgType | UnsignedAlts),
2404 NEONMAP0(vshll_n_v),
2405 NEONMAP0(vshlq_n_v),
2406 NEONMAP2(vshlq_v, aarch64_neon_ushl, aarch64_neon_sshl, Add1ArgType | UnsignedAlts),
2408 NEONMAP0(vshrn_n_v),
2409 NEONMAP0(vshrq_n_v),
2415 static NeonIntrinsicInfo AArch64SISDIntrinsicMap[] = {
2416 NEONMAP1(vabdd_f64, aarch64_sisd_fabd, Add1ArgType),
2417 NEONMAP1(vabds_f32, aarch64_sisd_fabd, Add1ArgType),
2418 NEONMAP1(vabsd_s64, aarch64_neon_abs, Add1ArgType),
2419 NEONMAP1(vaddlv_s32, aarch64_neon_saddlv, AddRetType | Add1ArgType),
2420 NEONMAP1(vaddlv_u32, aarch64_neon_uaddlv, AddRetType | Add1ArgType),
2421 NEONMAP1(vaddlvq_s32, aarch64_neon_saddlv, AddRetType | Add1ArgType),
2422 NEONMAP1(vaddlvq_u32, aarch64_neon_uaddlv, AddRetType | Add1ArgType),
2423 NEONMAP1(vaddv_f32, aarch64_neon_faddv, AddRetType | Add1ArgType),
2424 NEONMAP1(vaddv_s32, aarch64_neon_saddv, AddRetType | Add1ArgType),
2425 NEONMAP1(vaddv_u32, aarch64_neon_uaddv, AddRetType | Add1ArgType),
2426 NEONMAP1(vaddvq_f32, aarch64_neon_faddv, AddRetType | Add1ArgType),
2427 NEONMAP1(vaddvq_f64, aarch64_neon_faddv, AddRetType | Add1ArgType),
2428 NEONMAP1(vaddvq_s32, aarch64_neon_saddv, AddRetType | Add1ArgType),
2429 NEONMAP1(vaddvq_s64, aarch64_neon_saddv, AddRetType | Add1ArgType),
2430 NEONMAP1(vaddvq_u32, aarch64_neon_uaddv, AddRetType | Add1ArgType),
2431 NEONMAP1(vaddvq_u64, aarch64_neon_uaddv, AddRetType | Add1ArgType),
2432 NEONMAP1(vcaged_f64, aarch64_neon_facge, AddRetType | Add1ArgType),
2433 NEONMAP1(vcages_f32, aarch64_neon_facge, AddRetType | Add1ArgType),
2434 NEONMAP1(vcagtd_f64, aarch64_neon_facgt, AddRetType | Add1ArgType),
2435 NEONMAP1(vcagts_f32, aarch64_neon_facgt, AddRetType | Add1ArgType),
2436 NEONMAP1(vcaled_f64, aarch64_neon_facge, AddRetType | Add1ArgType),
2437 NEONMAP1(vcales_f32, aarch64_neon_facge, AddRetType | Add1ArgType),
2438 NEONMAP1(vcaltd_f64, aarch64_neon_facgt, AddRetType | Add1ArgType),
2439 NEONMAP1(vcalts_f32, aarch64_neon_facgt, AddRetType | Add1ArgType),
2440 NEONMAP1(vcvtad_s64_f64, aarch64_neon_fcvtas, AddRetType | Add1ArgType),
2441 NEONMAP1(vcvtad_u64_f64, aarch64_neon_fcvtau, AddRetType | Add1ArgType),
2442 NEONMAP1(vcvtas_s32_f32, aarch64_neon_fcvtas, AddRetType | Add1ArgType),
2443 NEONMAP1(vcvtas_u32_f32, aarch64_neon_fcvtau, AddRetType | Add1ArgType),
2444 NEONMAP1(vcvtd_n_f64_s64, aarch64_neon_vcvtfxs2fp, AddRetType | Add1ArgType),
2445 NEONMAP1(vcvtd_n_f64_u64, aarch64_neon_vcvtfxu2fp, AddRetType | Add1ArgType),
2446 NEONMAP1(vcvtd_n_s64_f64, aarch64_neon_vcvtfp2fxs, AddRetType | Add1ArgType),
2447 NEONMAP1(vcvtd_n_u64_f64, aarch64_neon_vcvtfp2fxu, AddRetType | Add1ArgType),
2448 NEONMAP1(vcvtmd_s64_f64, aarch64_neon_fcvtms, AddRetType | Add1ArgType),
2449 NEONMAP1(vcvtmd_u64_f64, aarch64_neon_fcvtmu, AddRetType | Add1ArgType),
2450 NEONMAP1(vcvtms_s32_f32, aarch64_neon_fcvtms, AddRetType | Add1ArgType),
2451 NEONMAP1(vcvtms_u32_f32, aarch64_neon_fcvtmu, AddRetType | Add1ArgType),
2452 NEONMAP1(vcvtnd_s64_f64, aarch64_neon_fcvtns, AddRetType | Add1ArgType),
2453 NEONMAP1(vcvtnd_u64_f64, aarch64_neon_fcvtnu, AddRetType | Add1ArgType),
2454 NEONMAP1(vcvtns_s32_f32, aarch64_neon_fcvtns, AddRetType | Add1ArgType),
2455 NEONMAP1(vcvtns_u32_f32, aarch64_neon_fcvtnu, AddRetType | Add1ArgType),
2456 NEONMAP1(vcvtpd_s64_f64, aarch64_neon_fcvtps, AddRetType | Add1ArgType),
2457 NEONMAP1(vcvtpd_u64_f64, aarch64_neon_fcvtpu, AddRetType | Add1ArgType),
2458 NEONMAP1(vcvtps_s32_f32, aarch64_neon_fcvtps, AddRetType | Add1ArgType),
2459 NEONMAP1(vcvtps_u32_f32, aarch64_neon_fcvtpu, AddRetType | Add1ArgType),
2460 NEONMAP1(vcvts_n_f32_s32, aarch64_neon_vcvtfxs2fp, AddRetType | Add1ArgType),
2461 NEONMAP1(vcvts_n_f32_u32, aarch64_neon_vcvtfxu2fp, AddRetType | Add1ArgType),
2462 NEONMAP1(vcvts_n_s32_f32, aarch64_neon_vcvtfp2fxs, AddRetType | Add1ArgType),
2463 NEONMAP1(vcvts_n_u32_f32, aarch64_neon_vcvtfp2fxu, AddRetType | Add1ArgType),
2464 NEONMAP1(vcvtxd_f32_f64, aarch64_sisd_fcvtxn, 0),
2465 NEONMAP1(vmaxnmv_f32, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType),
2466 NEONMAP1(vmaxnmvq_f32, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType),
2467 NEONMAP1(vmaxnmvq_f64, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType),
2468 NEONMAP1(vmaxv_f32, aarch64_neon_fmaxv, AddRetType | Add1ArgType),
2469 NEONMAP1(vmaxv_s32, aarch64_neon_smaxv, AddRetType | Add1ArgType),
2470 NEONMAP1(vmaxv_u32, aarch64_neon_umaxv, AddRetType | Add1ArgType),
2471 NEONMAP1(vmaxvq_f32, aarch64_neon_fmaxv, AddRetType | Add1ArgType),
2472 NEONMAP1(vmaxvq_f64, aarch64_neon_fmaxv, AddRetType | Add1ArgType),
2473 NEONMAP1(vmaxvq_s32, aarch64_neon_smaxv, AddRetType | Add1ArgType),
2474 NEONMAP1(vmaxvq_u32, aarch64_neon_umaxv, AddRetType | Add1ArgType),
2475 NEONMAP1(vminnmv_f32, aarch64_neon_fminnmv, AddRetType | Add1ArgType),
2476 NEONMAP1(vminnmvq_f32, aarch64_neon_fminnmv, AddRetType | Add1ArgType),
2477 NEONMAP1(vminnmvq_f64, aarch64_neon_fminnmv, AddRetType | Add1ArgType),
2478 NEONMAP1(vminv_f32, aarch64_neon_fminv, AddRetType | Add1ArgType),
2479 NEONMAP1(vminv_s32, aarch64_neon_sminv, AddRetType | Add1ArgType),
2480 NEONMAP1(vminv_u32, aarch64_neon_uminv, AddRetType | Add1ArgType),
2481 NEONMAP1(vminvq_f32, aarch64_neon_fminv, AddRetType | Add1ArgType),
2482 NEONMAP1(vminvq_f64, aarch64_neon_fminv, AddRetType | Add1ArgType),
2483 NEONMAP1(vminvq_s32, aarch64_neon_sminv, AddRetType | Add1ArgType),
2484 NEONMAP1(vminvq_u32, aarch64_neon_uminv, AddRetType | Add1ArgType),
2485 NEONMAP1(vmull_p64, aarch64_neon_pmull64, 0),
2486 NEONMAP1(vmulxd_f64, aarch64_neon_fmulx, Add1ArgType),
2487 NEONMAP1(vmulxs_f32, aarch64_neon_fmulx, Add1ArgType),
2488 NEONMAP1(vpaddd_s64, aarch64_neon_uaddv, AddRetType | Add1ArgType),
2489 NEONMAP1(vpaddd_u64, aarch64_neon_uaddv, AddRetType | Add1ArgType),
2490 NEONMAP1(vpmaxnmqd_f64, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType),
2491 NEONMAP1(vpmaxnms_f32, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType),
2492 NEONMAP1(vpmaxqd_f64, aarch64_neon_fmaxv, AddRetType | Add1ArgType),
2493 NEONMAP1(vpmaxs_f32, aarch64_neon_fmaxv, AddRetType | Add1ArgType),
2494 NEONMAP1(vpminnmqd_f64, aarch64_neon_fminnmv, AddRetType | Add1ArgType),
2495 NEONMAP1(vpminnms_f32, aarch64_neon_fminnmv, AddRetType | Add1ArgType),
2496 NEONMAP1(vpminqd_f64, aarch64_neon_fminv, AddRetType | Add1ArgType),
2497 NEONMAP1(vpmins_f32, aarch64_neon_fminv, AddRetType | Add1ArgType),
2498 NEONMAP1(vqabsb_s8, aarch64_neon_sqabs, Vectorize1ArgType | Use64BitVectors),
2499 NEONMAP1(vqabsd_s64, aarch64_neon_sqabs, Add1ArgType),
2500 NEONMAP1(vqabsh_s16, aarch64_neon_sqabs, Vectorize1ArgType | Use64BitVectors),
2501 NEONMAP1(vqabss_s32, aarch64_neon_sqabs, Add1ArgType),
2502 NEONMAP1(vqaddb_s8, aarch64_neon_sqadd, Vectorize1ArgType | Use64BitVectors),
2503 NEONMAP1(vqaddb_u8, aarch64_neon_uqadd, Vectorize1ArgType | Use64BitVectors),
2504 NEONMAP1(vqaddd_s64, aarch64_neon_sqadd, Add1ArgType),
2505 NEONMAP1(vqaddd_u64, aarch64_neon_uqadd, Add1ArgType),
2506 NEONMAP1(vqaddh_s16, aarch64_neon_sqadd, Vectorize1ArgType | Use64BitVectors),
2507 NEONMAP1(vqaddh_u16, aarch64_neon_uqadd, Vectorize1ArgType | Use64BitVectors),
2508 NEONMAP1(vqadds_s32, aarch64_neon_sqadd, Add1ArgType),
2509 NEONMAP1(vqadds_u32, aarch64_neon_uqadd, Add1ArgType),
2510 NEONMAP1(vqdmulhh_s16, aarch64_neon_sqdmulh, Vectorize1ArgType | Use64BitVectors),
2511 NEONMAP1(vqdmulhs_s32, aarch64_neon_sqdmulh, Add1ArgType),
2512 NEONMAP1(vqdmullh_s16, aarch64_neon_sqdmull, VectorRet | Use128BitVectors),
2513 NEONMAP1(vqdmulls_s32, aarch64_neon_sqdmulls_scalar, 0),
2514 NEONMAP1(vqmovnd_s64, aarch64_neon_scalar_sqxtn, AddRetType | Add1ArgType),
2515 NEONMAP1(vqmovnd_u64, aarch64_neon_scalar_uqxtn, AddRetType | Add1ArgType),
2516 NEONMAP1(vqmovnh_s16, aarch64_neon_sqxtn, VectorRet | Use64BitVectors),
2517 NEONMAP1(vqmovnh_u16, aarch64_neon_uqxtn, VectorRet | Use64BitVectors),
2518 NEONMAP1(vqmovns_s32, aarch64_neon_sqxtn, VectorRet | Use64BitVectors),
2519 NEONMAP1(vqmovns_u32, aarch64_neon_uqxtn, VectorRet | Use64BitVectors),
2520 NEONMAP1(vqmovund_s64, aarch64_neon_scalar_sqxtun, AddRetType | Add1ArgType),
2521 NEONMAP1(vqmovunh_s16, aarch64_neon_sqxtun, VectorRet | Use64BitVectors),
2522 NEONMAP1(vqmovuns_s32, aarch64_neon_sqxtun, VectorRet | Use64BitVectors),
2523 NEONMAP1(vqnegb_s8, aarch64_neon_sqneg, Vectorize1ArgType | Use64BitVectors),
2524 NEONMAP1(vqnegd_s64, aarch64_neon_sqneg, Add1ArgType),
2525 NEONMAP1(vqnegh_s16, aarch64_neon_sqneg, Vectorize1ArgType | Use64BitVectors),
2526 NEONMAP1(vqnegs_s32, aarch64_neon_sqneg, Add1ArgType),
2527 NEONMAP1(vqrdmulhh_s16, aarch64_neon_sqrdmulh, Vectorize1ArgType | Use64BitVectors),
2528 NEONMAP1(vqrdmulhs_s32, aarch64_neon_sqrdmulh, Add1ArgType),
2529 NEONMAP1(vqrshlb_s8, aarch64_neon_sqrshl, Vectorize1ArgType | Use64BitVectors),
2530 NEONMAP1(vqrshlb_u8, aarch64_neon_uqrshl, Vectorize1ArgType | Use64BitVectors),
2531 NEONMAP1(vqrshld_s64, aarch64_neon_sqrshl, Add1ArgType),
2532 NEONMAP1(vqrshld_u64, aarch64_neon_uqrshl, Add1ArgType),
2533 NEONMAP1(vqrshlh_s16, aarch64_neon_sqrshl, Vectorize1ArgType | Use64BitVectors),
2534 NEONMAP1(vqrshlh_u16, aarch64_neon_uqrshl, Vectorize1ArgType | Use64BitVectors),
2535 NEONMAP1(vqrshls_s32, aarch64_neon_sqrshl, Add1ArgType),
2536 NEONMAP1(vqrshls_u32, aarch64_neon_uqrshl, Add1ArgType),
2537 NEONMAP1(vqrshrnd_n_s64, aarch64_neon_sqrshrn, AddRetType),
2538 NEONMAP1(vqrshrnd_n_u64, aarch64_neon_uqrshrn, AddRetType),
2539 NEONMAP1(vqrshrnh_n_s16, aarch64_neon_sqrshrn, VectorRet | Use64BitVectors),
2540 NEONMAP1(vqrshrnh_n_u16, aarch64_neon_uqrshrn, VectorRet | Use64BitVectors),
2541 NEONMAP1(vqrshrns_n_s32, aarch64_neon_sqrshrn, VectorRet | Use64BitVectors),
2542 NEONMAP1(vqrshrns_n_u32, aarch64_neon_uqrshrn, VectorRet | Use64BitVectors),
2543 NEONMAP1(vqrshrund_n_s64, aarch64_neon_sqrshrun, AddRetType),
2544 NEONMAP1(vqrshrunh_n_s16, aarch64_neon_sqrshrun, VectorRet | Use64BitVectors),
2545 NEONMAP1(vqrshruns_n_s32, aarch64_neon_sqrshrun, VectorRet | Use64BitVectors),
2546 NEONMAP1(vqshlb_n_s8, aarch64_neon_sqshl, Vectorize1ArgType | Use64BitVectors),
2547 NEONMAP1(vqshlb_n_u8, aarch64_neon_uqshl, Vectorize1ArgType | Use64BitVectors),
2548 NEONMAP1(vqshlb_s8, aarch64_neon_sqshl, Vectorize1ArgType | Use64BitVectors),
2549 NEONMAP1(vqshlb_u8, aarch64_neon_uqshl, Vectorize1ArgType | Use64BitVectors),
2550 NEONMAP1(vqshld_s64, aarch64_neon_sqshl, Add1ArgType),
2551 NEONMAP1(vqshld_u64, aarch64_neon_uqshl, Add1ArgType),
2552 NEONMAP1(vqshlh_n_s16, aarch64_neon_sqshl, Vectorize1ArgType | Use64BitVectors),
2553 NEONMAP1(vqshlh_n_u16, aarch64_neon_uqshl, Vectorize1ArgType | Use64BitVectors),
2554 NEONMAP1(vqshlh_s16, aarch64_neon_sqshl, Vectorize1ArgType | Use64BitVectors),
2555 NEONMAP1(vqshlh_u16, aarch64_neon_uqshl, Vectorize1ArgType | Use64BitVectors),
2556 NEONMAP1(vqshls_n_s32, aarch64_neon_sqshl, Add1ArgType),
2557 NEONMAP1(vqshls_n_u32, aarch64_neon_uqshl, Add1ArgType),
2558 NEONMAP1(vqshls_s32, aarch64_neon_sqshl, Add1ArgType),
2559 NEONMAP1(vqshls_u32, aarch64_neon_uqshl, Add1ArgType),
2560 NEONMAP1(vqshlub_n_s8, aarch64_neon_sqshlu, Vectorize1ArgType | Use64BitVectors),
2561 NEONMAP1(vqshluh_n_s16, aarch64_neon_sqshlu, Vectorize1ArgType | Use64BitVectors),
2562 NEONMAP1(vqshlus_n_s32, aarch64_neon_sqshlu, Add1ArgType),
2563 NEONMAP1(vqshrnd_n_s64, aarch64_neon_sqshrn, AddRetType),
2564 NEONMAP1(vqshrnd_n_u64, aarch64_neon_uqshrn, AddRetType),
2565 NEONMAP1(vqshrnh_n_s16, aarch64_neon_sqshrn, VectorRet | Use64BitVectors),
2566 NEONMAP1(vqshrnh_n_u16, aarch64_neon_uqshrn, VectorRet | Use64BitVectors),
2567 NEONMAP1(vqshrns_n_s32, aarch64_neon_sqshrn, VectorRet | Use64BitVectors),
2568 NEONMAP1(vqshrns_n_u32, aarch64_neon_uqshrn, VectorRet | Use64BitVectors),
2569 NEONMAP1(vqshrund_n_s64, aarch64_neon_sqshrun, AddRetType),
2570 NEONMAP1(vqshrunh_n_s16, aarch64_neon_sqshrun, VectorRet | Use64BitVectors),
2571 NEONMAP1(vqshruns_n_s32, aarch64_neon_sqshrun, VectorRet | Use64BitVectors),
2572 NEONMAP1(vqsubb_s8, aarch64_neon_sqsub, Vectorize1ArgType | Use64BitVectors),
2573 NEONMAP1(vqsubb_u8, aarch64_neon_uqsub, Vectorize1ArgType | Use64BitVectors),
2574 NEONMAP1(vqsubd_s64, aarch64_neon_sqsub, Add1ArgType),
2575 NEONMAP1(vqsubd_u64, aarch64_neon_uqsub, Add1ArgType),
2576 NEONMAP1(vqsubh_s16, aarch64_neon_sqsub, Vectorize1ArgType | Use64BitVectors),
2577 NEONMAP1(vqsubh_u16, aarch64_neon_uqsub, Vectorize1ArgType | Use64BitVectors),
2578 NEONMAP1(vqsubs_s32, aarch64_neon_sqsub, Add1ArgType),
2579 NEONMAP1(vqsubs_u32, aarch64_neon_uqsub, Add1ArgType),
2580 NEONMAP1(vrecped_f64, aarch64_neon_frecpe, Add1ArgType),
2581 NEONMAP1(vrecpes_f32, aarch64_neon_frecpe, Add1ArgType),
2582 NEONMAP1(vrecpxd_f64, aarch64_neon_frecpx, Add1ArgType),
2583 NEONMAP1(vrecpxs_f32, aarch64_neon_frecpx, Add1ArgType),
2584 NEONMAP1(vrshld_s64, aarch64_neon_srshl, Add1ArgType),
2585 NEONMAP1(vrshld_u64, aarch64_neon_urshl, Add1ArgType),
2586 NEONMAP1(vrsqrted_f64, aarch64_neon_frsqrte, Add1ArgType),
2587 NEONMAP1(vrsqrtes_f32, aarch64_neon_frsqrte, Add1ArgType),
2588 NEONMAP1(vrsqrtsd_f64, aarch64_neon_frsqrts, Add1ArgType),
2589 NEONMAP1(vrsqrtss_f32, aarch64_neon_frsqrts, Add1ArgType),
2590 NEONMAP1(vsha1cq_u32, aarch64_crypto_sha1c, 0),
2591 NEONMAP1(vsha1h_u32, aarch64_crypto_sha1h, 0),
2592 NEONMAP1(vsha1mq_u32, aarch64_crypto_sha1m, 0),
2593 NEONMAP1(vsha1pq_u32, aarch64_crypto_sha1p, 0),
2594 NEONMAP1(vshld_s64, aarch64_neon_sshl, Add1ArgType),
2595 NEONMAP1(vshld_u64, aarch64_neon_ushl, Add1ArgType),
2596 NEONMAP1(vslid_n_s64, aarch64_neon_vsli, Vectorize1ArgType),
2597 NEONMAP1(vslid_n_u64, aarch64_neon_vsli, Vectorize1ArgType),
2598 NEONMAP1(vsqaddb_u8, aarch64_neon_usqadd, Vectorize1ArgType | Use64BitVectors),
2599 NEONMAP1(vsqaddd_u64, aarch64_neon_usqadd, Add1ArgType),
2600 NEONMAP1(vsqaddh_u16, aarch64_neon_usqadd, Vectorize1ArgType | Use64BitVectors),
2601 NEONMAP1(vsqadds_u32, aarch64_neon_usqadd, Add1ArgType),
2602 NEONMAP1(vsrid_n_s64, aarch64_neon_vsri, Vectorize1ArgType),
2603 NEONMAP1(vsrid_n_u64, aarch64_neon_vsri, Vectorize1ArgType),
2604 NEONMAP1(vuqaddb_s8, aarch64_neon_suqadd, Vectorize1ArgType | Use64BitVectors),
2605 NEONMAP1(vuqaddd_s64, aarch64_neon_suqadd, Add1ArgType),
2606 NEONMAP1(vuqaddh_s16, aarch64_neon_suqadd, Vectorize1ArgType | Use64BitVectors),
2607 NEONMAP1(vuqadds_s32, aarch64_neon_suqadd, Add1ArgType),
2614 static bool NEONSIMDIntrinsicsProvenSorted = false;
2616 static bool AArch64SIMDIntrinsicsProvenSorted = false;
2617 static bool AArch64SISDIntrinsicsProvenSorted = false;
2620 static const NeonIntrinsicInfo *
2621 findNeonIntrinsicInMap(ArrayRef<NeonIntrinsicInfo> IntrinsicMap,
2622 unsigned BuiltinID, bool &MapProvenSorted) {
2625 if (!MapProvenSorted) {
2626 // FIXME: use std::is_sorted once C++11 is allowed
2627 for (unsigned i = 0; i < IntrinsicMap.size() - 1; ++i)
2628 assert(IntrinsicMap[i].BuiltinID <= IntrinsicMap[i + 1].BuiltinID);
2629 MapProvenSorted = true;
2633 const NeonIntrinsicInfo *Builtin =
2634 std::lower_bound(IntrinsicMap.begin(), IntrinsicMap.end(), BuiltinID);
2636 if (Builtin != IntrinsicMap.end() && Builtin->BuiltinID == BuiltinID)
2642 Function *CodeGenFunction::LookupNeonLLVMIntrinsic(unsigned IntrinsicID,
2644 llvm::Type *ArgType,
2645 const CallExpr *E) {
2647 if (Modifier & Use64BitVectors)
2649 else if (Modifier & Use128BitVectors)
2653 SmallVector<llvm::Type *, 3> Tys;
2654 if (Modifier & AddRetType) {
2655 llvm::Type *Ty = ConvertType(E->getCallReturnType(getContext()));
2656 if (Modifier & VectorizeRetType)
2657 Ty = llvm::VectorType::get(
2658 Ty, VectorSize ? VectorSize / Ty->getPrimitiveSizeInBits() : 1);
2664 if (Modifier & VectorizeArgTypes) {
2665 int Elts = VectorSize ? VectorSize / ArgType->getPrimitiveSizeInBits() : 1;
2666 ArgType = llvm::VectorType::get(ArgType, Elts);
2669 if (Modifier & (Add1ArgType | Add2ArgTypes))
2670 Tys.push_back(ArgType);
2672 if (Modifier & Add2ArgTypes)
2673 Tys.push_back(ArgType);
2675 if (Modifier & InventFloatType)
2676 Tys.push_back(FloatTy);
2678 return CGM.getIntrinsic(IntrinsicID, Tys);
2681 static Value *EmitCommonNeonSISDBuiltinExpr(CodeGenFunction &CGF,
2682 const NeonIntrinsicInfo &SISDInfo,
2683 SmallVectorImpl<Value *> &Ops,
2684 const CallExpr *E) {
2685 unsigned BuiltinID = SISDInfo.BuiltinID;
2686 unsigned int Int = SISDInfo.LLVMIntrinsic;
2687 unsigned Modifier = SISDInfo.TypeModifier;
2688 const char *s = SISDInfo.NameHint;
2690 switch (BuiltinID) {
2691 case NEON::BI__builtin_neon_vcled_s64:
2692 case NEON::BI__builtin_neon_vcled_u64:
2693 case NEON::BI__builtin_neon_vcles_f32:
2694 case NEON::BI__builtin_neon_vcled_f64:
2695 case NEON::BI__builtin_neon_vcltd_s64:
2696 case NEON::BI__builtin_neon_vcltd_u64:
2697 case NEON::BI__builtin_neon_vclts_f32:
2698 case NEON::BI__builtin_neon_vcltd_f64:
2699 case NEON::BI__builtin_neon_vcales_f32:
2700 case NEON::BI__builtin_neon_vcaled_f64:
2701 case NEON::BI__builtin_neon_vcalts_f32:
2702 case NEON::BI__builtin_neon_vcaltd_f64:
2703 // Only one direction of comparisons actually exist, cmle is actually a cmge
2704 // with swapped operands. The table gives us the right intrinsic but we
2705 // still need to do the swap.
2706 std::swap(Ops[0], Ops[1]);
2710 assert(Int && "Generic code assumes a valid intrinsic");
2712 // Determine the type(s) of this overloaded AArch64 intrinsic.
2713 const Expr *Arg = E->getArg(0);
2714 llvm::Type *ArgTy = CGF.ConvertType(Arg->getType());
2715 Function *F = CGF.LookupNeonLLVMIntrinsic(Int, Modifier, ArgTy, E);
2718 ConstantInt *C0 = ConstantInt::get(CGF.SizeTy, 0);
2719 for (Function::const_arg_iterator ai = F->arg_begin(), ae = F->arg_end();
2720 ai != ae; ++ai, ++j) {
2721 llvm::Type *ArgTy = ai->getType();
2722 if (Ops[j]->getType()->getPrimitiveSizeInBits() ==
2723 ArgTy->getPrimitiveSizeInBits())
2726 assert(ArgTy->isVectorTy() && !Ops[j]->getType()->isVectorTy());
2727 // The constant argument to an _n_ intrinsic always has Int32Ty, so truncate
2728 // it before inserting.
2730 CGF.Builder.CreateTruncOrBitCast(Ops[j], ArgTy->getVectorElementType());
2732 CGF.Builder.CreateInsertElement(UndefValue::get(ArgTy), Ops[j], C0);
2735 Value *Result = CGF.EmitNeonCall(F, Ops, s);
2736 llvm::Type *ResultType = CGF.ConvertType(E->getType());
2737 if (ResultType->getPrimitiveSizeInBits() <
2738 Result->getType()->getPrimitiveSizeInBits())
2739 return CGF.Builder.CreateExtractElement(Result, C0);
2741 return CGF.Builder.CreateBitCast(Result, ResultType, s);
2744 Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
2745 unsigned BuiltinID, unsigned LLVMIntrinsic, unsigned AltLLVMIntrinsic,
2746 const char *NameHint, unsigned Modifier, const CallExpr *E,
2747 SmallVectorImpl<llvm::Value *> &Ops, llvm::Value *Align) {
2748 // Get the last argument, which specifies the vector type.
2749 llvm::APSInt NeonTypeConst;
2750 const Expr *Arg = E->getArg(E->getNumArgs() - 1);
2751 if (!Arg->isIntegerConstantExpr(NeonTypeConst, getContext()))
2754 // Determine the type of this overloaded NEON intrinsic.
2755 NeonTypeFlags Type(NeonTypeConst.getZExtValue());
2756 bool Usgn = Type.isUnsigned();
2757 bool Quad = Type.isQuad();
2759 llvm::VectorType *VTy = GetNeonType(this, Type);
2760 llvm::Type *Ty = VTy;
2764 unsigned Int = LLVMIntrinsic;
2765 if ((Modifier & UnsignedAlts) && !Usgn)
2766 Int = AltLLVMIntrinsic;
2768 switch (BuiltinID) {
2770 case NEON::BI__builtin_neon_vabs_v:
2771 case NEON::BI__builtin_neon_vabsq_v:
2772 if (VTy->getElementType()->isFloatingPointTy())
2773 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::fabs, Ty), Ops, "vabs");
2774 return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Ty), Ops, "vabs");
2775 case NEON::BI__builtin_neon_vaddhn_v: {
2776 llvm::VectorType *SrcTy =
2777 llvm::VectorType::getExtendedElementVectorType(VTy);
2779 // %sum = add <4 x i32> %lhs, %rhs
2780 Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy);
2781 Ops[1] = Builder.CreateBitCast(Ops[1], SrcTy);
2782 Ops[0] = Builder.CreateAdd(Ops[0], Ops[1], "vaddhn");
2784 // %high = lshr <4 x i32> %sum, <i32 16, i32 16, i32 16, i32 16>
2785 Constant *ShiftAmt = ConstantInt::get(SrcTy->getElementType(),
2786 SrcTy->getScalarSizeInBits() / 2);
2787 ShiftAmt = ConstantVector::getSplat(VTy->getNumElements(), ShiftAmt);
2788 Ops[0] = Builder.CreateLShr(Ops[0], ShiftAmt, "vaddhn");
2790 // %res = trunc <4 x i32> %high to <4 x i16>
2791 return Builder.CreateTrunc(Ops[0], VTy, "vaddhn");
2793 case NEON::BI__builtin_neon_vcale_v:
2794 case NEON::BI__builtin_neon_vcaleq_v:
2795 case NEON::BI__builtin_neon_vcalt_v:
2796 case NEON::BI__builtin_neon_vcaltq_v:
2797 std::swap(Ops[0], Ops[1]);
2798 case NEON::BI__builtin_neon_vcage_v:
2799 case NEON::BI__builtin_neon_vcageq_v:
2800 case NEON::BI__builtin_neon_vcagt_v:
2801 case NEON::BI__builtin_neon_vcagtq_v: {
2802 llvm::Type *VecFlt = llvm::VectorType::get(
2803 VTy->getScalarSizeInBits() == 32 ? FloatTy : DoubleTy,
2804 VTy->getNumElements());
2805 llvm::Type *Tys[] = { VTy, VecFlt };
2806 Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys);
2807 return EmitNeonCall(F, Ops, NameHint);
2809 case NEON::BI__builtin_neon_vclz_v:
2810 case NEON::BI__builtin_neon_vclzq_v:
2811 // We generate target-independent intrinsic, which needs a second argument
2812 // for whether or not clz of zero is undefined; on ARM it isn't.
2813 Ops.push_back(Builder.getInt1(getTarget().isCLZForZeroUndef()));
2815 case NEON::BI__builtin_neon_vcvt_f32_v:
2816 case NEON::BI__builtin_neon_vcvtq_f32_v:
2817 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
2818 Ty = GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float32, false, Quad));
2819 return Usgn ? Builder.CreateUIToFP(Ops[0], Ty, "vcvt")
2820 : Builder.CreateSIToFP(Ops[0], Ty, "vcvt");
2821 case NEON::BI__builtin_neon_vcvt_n_f32_v:
2822 case NEON::BI__builtin_neon_vcvt_n_f64_v:
2823 case NEON::BI__builtin_neon_vcvtq_n_f32_v:
2824 case NEON::BI__builtin_neon_vcvtq_n_f64_v: {
2826 (cast<llvm::IntegerType>(VTy->getElementType())->getBitWidth() == 64);
2827 llvm::Type *FloatTy =
2828 GetNeonType(this, NeonTypeFlags(Double ? NeonTypeFlags::Float64
2829 : NeonTypeFlags::Float32,
2831 llvm::Type *Tys[2] = { FloatTy, Ty };
2832 Int = Usgn ? LLVMIntrinsic : AltLLVMIntrinsic;
2833 Function *F = CGM.getIntrinsic(Int, Tys);
2834 return EmitNeonCall(F, Ops, "vcvt_n");
2836 case NEON::BI__builtin_neon_vcvt_n_s32_v:
2837 case NEON::BI__builtin_neon_vcvt_n_u32_v:
2838 case NEON::BI__builtin_neon_vcvt_n_s64_v:
2839 case NEON::BI__builtin_neon_vcvt_n_u64_v:
2840 case NEON::BI__builtin_neon_vcvtq_n_s32_v:
2841 case NEON::BI__builtin_neon_vcvtq_n_u32_v:
2842 case NEON::BI__builtin_neon_vcvtq_n_s64_v:
2843 case NEON::BI__builtin_neon_vcvtq_n_u64_v: {
2845 (cast<llvm::IntegerType>(VTy->getElementType())->getBitWidth() == 64);
2846 llvm::Type *FloatTy =
2847 GetNeonType(this, NeonTypeFlags(Double ? NeonTypeFlags::Float64
2848 : NeonTypeFlags::Float32,
2850 llvm::Type *Tys[2] = { Ty, FloatTy };
2851 Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys);
2852 return EmitNeonCall(F, Ops, "vcvt_n");
2854 case NEON::BI__builtin_neon_vcvt_s32_v:
2855 case NEON::BI__builtin_neon_vcvt_u32_v:
2856 case NEON::BI__builtin_neon_vcvt_s64_v:
2857 case NEON::BI__builtin_neon_vcvt_u64_v:
2858 case NEON::BI__builtin_neon_vcvtq_s32_v:
2859 case NEON::BI__builtin_neon_vcvtq_u32_v:
2860 case NEON::BI__builtin_neon_vcvtq_s64_v:
2861 case NEON::BI__builtin_neon_vcvtq_u64_v: {
2863 (cast<llvm::IntegerType>(VTy->getElementType())->getBitWidth() == 64);
2864 llvm::Type *FloatTy =
2865 GetNeonType(this, NeonTypeFlags(Double ? NeonTypeFlags::Float64
2866 : NeonTypeFlags::Float32,
2868 Ops[0] = Builder.CreateBitCast(Ops[0], FloatTy);
2869 return Usgn ? Builder.CreateFPToUI(Ops[0], Ty, "vcvt")
2870 : Builder.CreateFPToSI(Ops[0], Ty, "vcvt");
2872 case NEON::BI__builtin_neon_vcvta_s32_v:
2873 case NEON::BI__builtin_neon_vcvta_s64_v:
2874 case NEON::BI__builtin_neon_vcvta_u32_v:
2875 case NEON::BI__builtin_neon_vcvta_u64_v:
2876 case NEON::BI__builtin_neon_vcvtaq_s32_v:
2877 case NEON::BI__builtin_neon_vcvtaq_s64_v:
2878 case NEON::BI__builtin_neon_vcvtaq_u32_v:
2879 case NEON::BI__builtin_neon_vcvtaq_u64_v:
2880 case NEON::BI__builtin_neon_vcvtn_s32_v:
2881 case NEON::BI__builtin_neon_vcvtn_s64_v:
2882 case NEON::BI__builtin_neon_vcvtn_u32_v:
2883 case NEON::BI__builtin_neon_vcvtn_u64_v:
2884 case NEON::BI__builtin_neon_vcvtnq_s32_v:
2885 case NEON::BI__builtin_neon_vcvtnq_s64_v:
2886 case NEON::BI__builtin_neon_vcvtnq_u32_v:
2887 case NEON::BI__builtin_neon_vcvtnq_u64_v:
2888 case NEON::BI__builtin_neon_vcvtp_s32_v:
2889 case NEON::BI__builtin_neon_vcvtp_s64_v:
2890 case NEON::BI__builtin_neon_vcvtp_u32_v:
2891 case NEON::BI__builtin_neon_vcvtp_u64_v:
2892 case NEON::BI__builtin_neon_vcvtpq_s32_v:
2893 case NEON::BI__builtin_neon_vcvtpq_s64_v:
2894 case NEON::BI__builtin_neon_vcvtpq_u32_v:
2895 case NEON::BI__builtin_neon_vcvtpq_u64_v:
2896 case NEON::BI__builtin_neon_vcvtm_s32_v:
2897 case NEON::BI__builtin_neon_vcvtm_s64_v:
2898 case NEON::BI__builtin_neon_vcvtm_u32_v:
2899 case NEON::BI__builtin_neon_vcvtm_u64_v:
2900 case NEON::BI__builtin_neon_vcvtmq_s32_v:
2901 case NEON::BI__builtin_neon_vcvtmq_s64_v:
2902 case NEON::BI__builtin_neon_vcvtmq_u32_v:
2903 case NEON::BI__builtin_neon_vcvtmq_u64_v: {
2905 (cast<llvm::IntegerType>(VTy->getElementType())->getBitWidth() == 64);
2908 NeonTypeFlags(Double ? NeonTypeFlags::Float64
2909 : NeonTypeFlags::Float32, false, Quad));
2910 llvm::Type *Tys[2] = { Ty, InTy };
2911 return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, NameHint);
2913 case NEON::BI__builtin_neon_vext_v:
2914 case NEON::BI__builtin_neon_vextq_v: {
2915 int CV = cast<ConstantInt>(Ops[2])->getSExtValue();
2916 SmallVector<Constant*, 16> Indices;
2917 for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i)
2918 Indices.push_back(ConstantInt::get(Int32Ty, i+CV));
2920 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
2921 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
2922 Value *SV = llvm::ConstantVector::get(Indices);
2923 return Builder.CreateShuffleVector(Ops[0], Ops[1], SV, "vext");
2925 case NEON::BI__builtin_neon_vfma_v:
2926 case NEON::BI__builtin_neon_vfmaq_v: {
2927 Value *F = CGM.getIntrinsic(Intrinsic::fma, Ty);
2928 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
2929 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
2930 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
2932 // NEON intrinsic puts accumulator first, unlike the LLVM fma.
2933 return Builder.CreateCall(F, {Ops[1], Ops[2], Ops[0]});
2935 case NEON::BI__builtin_neon_vld1_v:
2936 case NEON::BI__builtin_neon_vld1q_v:
2937 Ops.push_back(Align);
2938 return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Ty), Ops, "vld1");
2939 case NEON::BI__builtin_neon_vld2_v:
2940 case NEON::BI__builtin_neon_vld2q_v:
2941 case NEON::BI__builtin_neon_vld3_v:
2942 case NEON::BI__builtin_neon_vld3q_v:
2943 case NEON::BI__builtin_neon_vld4_v:
2944 case NEON::BI__builtin_neon_vld4q_v: {
2945 Function *F = CGM.getIntrinsic(LLVMIntrinsic, Ty);
2946 Ops[1] = Builder.CreateCall(F, {Ops[1], Align}, NameHint);
2947 Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
2948 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
2949 return Builder.CreateStore(Ops[1], Ops[0]);
2951 case NEON::BI__builtin_neon_vld1_dup_v:
2952 case NEON::BI__builtin_neon_vld1q_dup_v: {
2953 Value *V = UndefValue::get(Ty);
2954 Ty = llvm::PointerType::getUnqual(VTy->getElementType());
2955 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
2956 LoadInst *Ld = Builder.CreateLoad(Ops[0]);
2957 Ld->setAlignment(cast<ConstantInt>(Align)->getZExtValue());
2958 llvm::Constant *CI = ConstantInt::get(SizeTy, 0);
2959 Ops[0] = Builder.CreateInsertElement(V, Ld, CI);
2960 return EmitNeonSplat(Ops[0], CI);
2962 case NEON::BI__builtin_neon_vld2_lane_v:
2963 case NEON::BI__builtin_neon_vld2q_lane_v:
2964 case NEON::BI__builtin_neon_vld3_lane_v:
2965 case NEON::BI__builtin_neon_vld3q_lane_v:
2966 case NEON::BI__builtin_neon_vld4_lane_v:
2967 case NEON::BI__builtin_neon_vld4q_lane_v: {
2968 Function *F = CGM.getIntrinsic(LLVMIntrinsic, Ty);
2969 for (unsigned I = 2; I < Ops.size() - 1; ++I)
2970 Ops[I] = Builder.CreateBitCast(Ops[I], Ty);
2971 Ops.push_back(Align);
2972 Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), NameHint);
2973 Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
2974 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
2975 return Builder.CreateStore(Ops[1], Ops[0]);
2977 case NEON::BI__builtin_neon_vmovl_v: {
2978 llvm::Type *DTy =llvm::VectorType::getTruncatedElementVectorType(VTy);
2979 Ops[0] = Builder.CreateBitCast(Ops[0], DTy);
2981 return Builder.CreateZExt(Ops[0], Ty, "vmovl");
2982 return Builder.CreateSExt(Ops[0], Ty, "vmovl");
2984 case NEON::BI__builtin_neon_vmovn_v: {
2985 llvm::Type *QTy = llvm::VectorType::getExtendedElementVectorType(VTy);
2986 Ops[0] = Builder.CreateBitCast(Ops[0], QTy);
2987 return Builder.CreateTrunc(Ops[0], Ty, "vmovn");
2989 case NEON::BI__builtin_neon_vmull_v:
2990 // FIXME: the integer vmull operations could be emitted in terms of pure
2991 // LLVM IR (2 exts followed by a mul). Unfortunately LLVM has a habit of
2992 // hoisting the exts outside loops. Until global ISel comes along that can
2993 // see through such movement this leads to bad CodeGen. So we need an
2994 // intrinsic for now.
2995 Int = Usgn ? Intrinsic::arm_neon_vmullu : Intrinsic::arm_neon_vmulls;
2996 Int = Type.isPoly() ? (unsigned)Intrinsic::arm_neon_vmullp : Int;
2997 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmull");
2998 case NEON::BI__builtin_neon_vpadal_v:
2999 case NEON::BI__builtin_neon_vpadalq_v: {
3000 // The source operand type has twice as many elements of half the size.
3001 unsigned EltBits = VTy->getElementType()->getPrimitiveSizeInBits();
3003 llvm::IntegerType::get(getLLVMContext(), EltBits / 2);
3004 llvm::Type *NarrowTy =
3005 llvm::VectorType::get(EltTy, VTy->getNumElements() * 2);
3006 llvm::Type *Tys[2] = { Ty, NarrowTy };
3007 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, NameHint);
3009 case NEON::BI__builtin_neon_vpaddl_v:
3010 case NEON::BI__builtin_neon_vpaddlq_v: {
3011 // The source operand type has twice as many elements of half the size.
3012 unsigned EltBits = VTy->getElementType()->getPrimitiveSizeInBits();
3013 llvm::Type *EltTy = llvm::IntegerType::get(getLLVMContext(), EltBits / 2);
3014 llvm::Type *NarrowTy =
3015 llvm::VectorType::get(EltTy, VTy->getNumElements() * 2);
3016 llvm::Type *Tys[2] = { Ty, NarrowTy };
3017 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vpaddl");
3019 case NEON::BI__builtin_neon_vqdmlal_v:
3020 case NEON::BI__builtin_neon_vqdmlsl_v: {
3021 SmallVector<Value *, 2> MulOps(Ops.begin() + 1, Ops.end());
3022 Value *Mul = EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Ty),
3025 SmallVector<Value *, 2> AccumOps;
3026 AccumOps.push_back(Ops[0]);
3027 AccumOps.push_back(Mul);
3028 return EmitNeonCall(CGM.getIntrinsic(AltLLVMIntrinsic, Ty),
3029 AccumOps, NameHint);
3031 case NEON::BI__builtin_neon_vqshl_n_v:
3032 case NEON::BI__builtin_neon_vqshlq_n_v:
3033 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshl_n",
3035 case NEON::BI__builtin_neon_vqshlu_n_v:
3036 case NEON::BI__builtin_neon_vqshluq_n_v:
3037 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshlu_n",
3039 case NEON::BI__builtin_neon_vrecpe_v:
3040 case NEON::BI__builtin_neon_vrecpeq_v:
3041 case NEON::BI__builtin_neon_vrsqrte_v:
3042 case NEON::BI__builtin_neon_vrsqrteq_v:
3043 Int = Ty->isFPOrFPVectorTy() ? LLVMIntrinsic : AltLLVMIntrinsic;
3044 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, NameHint);
3046 case NEON::BI__builtin_neon_vrshr_n_v:
3047 case NEON::BI__builtin_neon_vrshrq_n_v:
3048 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrshr_n",
3050 case NEON::BI__builtin_neon_vshl_n_v:
3051 case NEON::BI__builtin_neon_vshlq_n_v:
3052 Ops[1] = EmitNeonShiftVector(Ops[1], Ty, false);
3053 return Builder.CreateShl(Builder.CreateBitCast(Ops[0],Ty), Ops[1],
3055 case NEON::BI__builtin_neon_vshll_n_v: {
3056 llvm::Type *SrcTy = llvm::VectorType::getTruncatedElementVectorType(VTy);
3057 Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy);
3059 Ops[0] = Builder.CreateZExt(Ops[0], VTy);
3061 Ops[0] = Builder.CreateSExt(Ops[0], VTy);
3062 Ops[1] = EmitNeonShiftVector(Ops[1], VTy, false);
3063 return Builder.CreateShl(Ops[0], Ops[1], "vshll_n");
3065 case NEON::BI__builtin_neon_vshrn_n_v: {
3066 llvm::Type *SrcTy = llvm::VectorType::getExtendedElementVectorType(VTy);
3067 Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy);
3068 Ops[1] = EmitNeonShiftVector(Ops[1], SrcTy, false);
3070 Ops[0] = Builder.CreateLShr(Ops[0], Ops[1]);
3072 Ops[0] = Builder.CreateAShr(Ops[0], Ops[1]);
3073 return Builder.CreateTrunc(Ops[0], Ty, "vshrn_n");
3075 case NEON::BI__builtin_neon_vshr_n_v:
3076 case NEON::BI__builtin_neon_vshrq_n_v:
3077 return EmitNeonRShiftImm(Ops[0], Ops[1], Ty, Usgn, "vshr_n");
3078 case NEON::BI__builtin_neon_vst1_v:
3079 case NEON::BI__builtin_neon_vst1q_v:
3080 case NEON::BI__builtin_neon_vst2_v:
3081 case NEON::BI__builtin_neon_vst2q_v:
3082 case NEON::BI__builtin_neon_vst3_v:
3083 case NEON::BI__builtin_neon_vst3q_v:
3084 case NEON::BI__builtin_neon_vst4_v:
3085 case NEON::BI__builtin_neon_vst4q_v:
3086 case NEON::BI__builtin_neon_vst2_lane_v:
3087 case NEON::BI__builtin_neon_vst2q_lane_v:
3088 case NEON::BI__builtin_neon_vst3_lane_v:
3089 case NEON::BI__builtin_neon_vst3q_lane_v:
3090 case NEON::BI__builtin_neon_vst4_lane_v:
3091 case NEON::BI__builtin_neon_vst4q_lane_v:
3092 Ops.push_back(Align);
3093 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "");
3094 case NEON::BI__builtin_neon_vsubhn_v: {
3095 llvm::VectorType *SrcTy =
3096 llvm::VectorType::getExtendedElementVectorType(VTy);
3098 // %sum = add <4 x i32> %lhs, %rhs
3099 Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy);
3100 Ops[1] = Builder.CreateBitCast(Ops[1], SrcTy);
3101 Ops[0] = Builder.CreateSub(Ops[0], Ops[1], "vsubhn");
3103 // %high = lshr <4 x i32> %sum, <i32 16, i32 16, i32 16, i32 16>
3104 Constant *ShiftAmt = ConstantInt::get(SrcTy->getElementType(),
3105 SrcTy->getScalarSizeInBits() / 2);
3106 ShiftAmt = ConstantVector::getSplat(VTy->getNumElements(), ShiftAmt);
3107 Ops[0] = Builder.CreateLShr(Ops[0], ShiftAmt, "vsubhn");
3109 // %res = trunc <4 x i32> %high to <4 x i16>
3110 return Builder.CreateTrunc(Ops[0], VTy, "vsubhn");
3112 case NEON::BI__builtin_neon_vtrn_v:
3113 case NEON::BI__builtin_neon_vtrnq_v: {
3114 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
3115 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
3116 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
3117 Value *SV = nullptr;
3119 for (unsigned vi = 0; vi != 2; ++vi) {
3120 SmallVector<Constant*, 16> Indices;
3121 for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) {
3122 Indices.push_back(Builder.getInt32(i+vi));
3123 Indices.push_back(Builder.getInt32(i+e+vi));
3125 Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi);
3126 SV = llvm::ConstantVector::get(Indices);
3127 SV = Builder.CreateShuffleVector(Ops[1], Ops[2], SV, "vtrn");
3128 SV = Builder.CreateStore(SV, Addr);
3132 case NEON::BI__builtin_neon_vtst_v:
3133 case NEON::BI__builtin_neon_vtstq_v: {
3134 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
3135 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
3136 Ops[0] = Builder.CreateAnd(Ops[0], Ops[1]);
3137 Ops[0] = Builder.CreateICmp(ICmpInst::ICMP_NE, Ops[0],
3138 ConstantAggregateZero::get(Ty));
3139 return Builder.CreateSExt(Ops[0], Ty, "vtst");
3141 case NEON::BI__builtin_neon_vuzp_v:
3142 case NEON::BI__builtin_neon_vuzpq_v: {
3143 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
3144 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
3145 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
3146 Value *SV = nullptr;
3148 for (unsigned vi = 0; vi != 2; ++vi) {
3149 SmallVector<Constant*, 16> Indices;
3150 for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i)
3151 Indices.push_back(ConstantInt::get(Int32Ty, 2*i+vi));
3153 Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi);
3154 SV = llvm::ConstantVector::get(Indices);
3155 SV = Builder.CreateShuffleVector(Ops[1], Ops[2], SV, "vuzp");
3156 SV = Builder.CreateStore(SV, Addr);
3160 case NEON::BI__builtin_neon_vzip_v:
3161 case NEON::BI__builtin_neon_vzipq_v: {
3162 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
3163 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
3164 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
3165 Value *SV = nullptr;
3167 for (unsigned vi = 0; vi != 2; ++vi) {
3168 SmallVector<Constant*, 16> Indices;
3169 for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) {
3170 Indices.push_back(ConstantInt::get(Int32Ty, (i + vi*e) >> 1));
3171 Indices.push_back(ConstantInt::get(Int32Ty, ((i + vi*e) >> 1)+e));
3173 Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi);
3174 SV = llvm::ConstantVector::get(Indices);
3175 SV = Builder.CreateShuffleVector(Ops[1], Ops[2], SV, "vzip");
3176 SV = Builder.CreateStore(SV, Addr);
3182 assert(Int && "Expected valid intrinsic number");
3184 // Determine the type(s) of this overloaded AArch64 intrinsic.
3185 Function *F = LookupNeonLLVMIntrinsic(Int, Modifier, Ty, E);
3187 Value *Result = EmitNeonCall(F, Ops, NameHint);
3188 llvm::Type *ResultType = ConvertType(E->getType());
3189 // AArch64 intrinsic one-element vector type cast to
3190 // scalar type expected by the builtin
3191 return Builder.CreateBitCast(Result, ResultType, NameHint);
3194 Value *CodeGenFunction::EmitAArch64CompareBuiltinExpr(
3195 Value *Op, llvm::Type *Ty, const CmpInst::Predicate Fp,
3196 const CmpInst::Predicate Ip, const Twine &Name) {
3197 llvm::Type *OTy = Op->getType();
3199 // FIXME: this is utterly horrific. We should not be looking at previous
3200 // codegen context to find out what needs doing. Unfortunately TableGen
3201 // currently gives us exactly the same calls for vceqz_f32 and vceqz_s32
3203 if (BitCastInst *BI = dyn_cast<BitCastInst>(Op))
3204 OTy = BI->getOperand(0)->getType();
3206 Op = Builder.CreateBitCast(Op, OTy);
3207 if (OTy->getScalarType()->isFloatingPointTy()) {
3208 Op = Builder.CreateFCmp(Fp, Op, Constant::getNullValue(OTy));
3210 Op = Builder.CreateICmp(Ip, Op, Constant::getNullValue(OTy));
3212 return Builder.CreateSExt(Op, Ty, Name);
3215 static Value *packTBLDVectorList(CodeGenFunction &CGF, ArrayRef<Value *> Ops,
3216 Value *ExtOp, Value *IndexOp,
3217 llvm::Type *ResTy, unsigned IntID,
3219 SmallVector<Value *, 2> TblOps;
3221 TblOps.push_back(ExtOp);
3223 // Build a vector containing sequential number like (0, 1, 2, ..., 15)
3224 SmallVector<Constant*, 16> Indices;
3225 llvm::VectorType *TblTy = cast<llvm::VectorType>(Ops[0]->getType());
3226 for (unsigned i = 0, e = TblTy->getNumElements(); i != e; ++i) {
3227 Indices.push_back(ConstantInt::get(CGF.Int32Ty, 2*i));
3228 Indices.push_back(ConstantInt::get(CGF.Int32Ty, 2*i+1));
3230 Value *SV = llvm::ConstantVector::get(Indices);
3232 int PairPos = 0, End = Ops.size() - 1;
3233 while (PairPos < End) {
3234 TblOps.push_back(CGF.Builder.CreateShuffleVector(Ops[PairPos],
3235 Ops[PairPos+1], SV, Name));
3239 // If there's an odd number of 64-bit lookup table, fill the high 64-bit
3240 // of the 128-bit lookup table with zero.
3241 if (PairPos == End) {
3242 Value *ZeroTbl = ConstantAggregateZero::get(TblTy);
3243 TblOps.push_back(CGF.Builder.CreateShuffleVector(Ops[PairPos],
3244 ZeroTbl, SV, Name));
3248 TblOps.push_back(IndexOp);
3249 TblF = CGF.CGM.getIntrinsic(IntID, ResTy);
3251 return CGF.EmitNeonCall(TblF, TblOps, Name);
3254 Value *CodeGenFunction::GetValueForARMHint(unsigned BuiltinID) {
3255 switch (BuiltinID) {
3258 case ARM::BI__builtin_arm_nop:
3259 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_hint),
3260 llvm::ConstantInt::get(Int32Ty, 0));
3261 case ARM::BI__builtin_arm_yield:
3262 case ARM::BI__yield:
3263 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_hint),
3264 llvm::ConstantInt::get(Int32Ty, 1));
3265 case ARM::BI__builtin_arm_wfe:
3267 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_hint),
3268 llvm::ConstantInt::get(Int32Ty, 2));
3269 case ARM::BI__builtin_arm_wfi:
3271 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_hint),
3272 llvm::ConstantInt::get(Int32Ty, 3));
3273 case ARM::BI__builtin_arm_sev:
3275 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_hint),
3276 llvm::ConstantInt::get(Int32Ty, 4));
3277 case ARM::BI__builtin_arm_sevl:
3279 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_hint),
3280 llvm::ConstantInt::get(Int32Ty, 5));
3284 // Generates the IR for the read/write special register builtin,
3285 // ValueType is the type of the value that is to be written or read,
3286 // RegisterType is the type of the register being written to or read from.
3287 static Value *EmitSpecialRegisterBuiltin(CodeGenFunction &CGF,
3289 llvm::Type *RegisterType,
3290 llvm::Type *ValueType, bool IsRead) {
3291 // write and register intrinsics only support 32 and 64 bit operations.
3292 assert((RegisterType->isIntegerTy(32) || RegisterType->isIntegerTy(64))
3293 && "Unsupported size for register.");
3295 CodeGen::CGBuilderTy &Builder = CGF.Builder;
3296 CodeGen::CodeGenModule &CGM = CGF.CGM;
3297 LLVMContext &Context = CGM.getLLVMContext();
3299 const Expr *SysRegStrExpr = E->getArg(0)->IgnoreParenCasts();
3300 StringRef SysReg = cast<StringLiteral>(SysRegStrExpr)->getString();
3302 llvm::Metadata *Ops[] = { llvm::MDString::get(Context, SysReg) };
3303 llvm::MDNode *RegName = llvm::MDNode::get(Context, Ops);
3304 llvm::Value *Metadata = llvm::MetadataAsValue::get(Context, RegName);
3306 llvm::Type *Types[] = { RegisterType };
3308 bool MixedTypes = RegisterType->isIntegerTy(64) && ValueType->isIntegerTy(32);
3309 assert(!(RegisterType->isIntegerTy(32) && ValueType->isIntegerTy(64))
3310 && "Can't fit 64-bit value in 32-bit register");
3313 llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::read_register, Types);
3314 llvm::Value *Call = Builder.CreateCall(F, Metadata);
3317 // Read into 64 bit register and then truncate result to 32 bit.
3318 return Builder.CreateTrunc(Call, ValueType);
3320 if (ValueType->isPointerTy())
3321 // Have i32/i64 result (Call) but want to return a VoidPtrTy (i8*).
3322 return Builder.CreateIntToPtr(Call, ValueType);
3327 llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::write_register, Types);
3328 llvm::Value *ArgValue = CGF.EmitScalarExpr(E->getArg(1));
3330 // Extend 32 bit write value to 64 bit to pass to write.
3331 ArgValue = Builder.CreateZExt(ArgValue, RegisterType);
3332 return Builder.CreateCall(F, { Metadata, ArgValue });
3335 if (ValueType->isPointerTy()) {
3336 // Have VoidPtrTy ArgValue but want to return an i32/i64.
3337 ArgValue = Builder.CreatePtrToInt(ArgValue, RegisterType);
3338 return Builder.CreateCall(F, { Metadata, ArgValue });
3341 return Builder.CreateCall(F, { Metadata, ArgValue });
3344 /// Return true if BuiltinID is an overloaded Neon intrinsic with an extra
3345 /// argument that specifies the vector type.
3346 static bool HasExtraNeonArgument(unsigned BuiltinID) {
3347 switch (BuiltinID) {
3349 case NEON::BI__builtin_neon_vget_lane_i8:
3350 case NEON::BI__builtin_neon_vget_lane_i16:
3351 case NEON::BI__builtin_neon_vget_lane_i32:
3352 case NEON::BI__builtin_neon_vget_lane_i64:
3353 case NEON::BI__builtin_neon_vget_lane_f32:
3354 case NEON::BI__builtin_neon_vgetq_lane_i8:
3355 case NEON::BI__builtin_neon_vgetq_lane_i16:
3356 case NEON::BI__builtin_neon_vgetq_lane_i32:
3357 case NEON::BI__builtin_neon_vgetq_lane_i64:
3358 case NEON::BI__builtin_neon_vgetq_lane_f32:
3359 case NEON::BI__builtin_neon_vset_lane_i8:
3360 case NEON::BI__builtin_neon_vset_lane_i16:
3361 case NEON::BI__builtin_neon_vset_lane_i32:
3362 case NEON::BI__builtin_neon_vset_lane_i64:
3363 case NEON::BI__builtin_neon_vset_lane_f32:
3364 case NEON::BI__builtin_neon_vsetq_lane_i8:
3365 case NEON::BI__builtin_neon_vsetq_lane_i16:
3366 case NEON::BI__builtin_neon_vsetq_lane_i32:
3367 case NEON::BI__builtin_neon_vsetq_lane_i64:
3368 case NEON::BI__builtin_neon_vsetq_lane_f32:
3369 case NEON::BI__builtin_neon_vsha1h_u32:
3370 case NEON::BI__builtin_neon_vsha1cq_u32:
3371 case NEON::BI__builtin_neon_vsha1pq_u32:
3372 case NEON::BI__builtin_neon_vsha1mq_u32:
3373 case ARM::BI_MoveToCoprocessor:
3374 case ARM::BI_MoveToCoprocessor2:
3380 Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
3381 const CallExpr *E) {
3382 if (auto Hint = GetValueForARMHint(BuiltinID))
3385 if (BuiltinID == ARM::BI__emit) {
3386 bool IsThumb = getTarget().getTriple().getArch() == llvm::Triple::thumb;
3387 llvm::FunctionType *FTy =
3388 llvm::FunctionType::get(VoidTy, /*Variadic=*/false);
3391 if (!E->getArg(0)->EvaluateAsInt(Value, CGM.getContext()))
3392 llvm_unreachable("Sema will ensure that the parameter is constant");
3394 uint64_t ZExtValue = Value.zextOrTrunc(IsThumb ? 16 : 32).getZExtValue();
3396 llvm::InlineAsm *Emit =
3397 IsThumb ? InlineAsm::get(FTy, ".inst.n 0x" + utohexstr(ZExtValue), "",
3398 /*SideEffects=*/true)
3399 : InlineAsm::get(FTy, ".inst 0x" + utohexstr(ZExtValue), "",
3400 /*SideEffects=*/true);
3402 return Builder.CreateCall(Emit);
3405 if (BuiltinID == ARM::BI__builtin_arm_dbg) {
3406 Value *Option = EmitScalarExpr(E->getArg(0));
3407 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_dbg), Option);
3410 if (BuiltinID == ARM::BI__builtin_arm_prefetch) {
3411 Value *Address = EmitScalarExpr(E->getArg(0));
3412 Value *RW = EmitScalarExpr(E->getArg(1));
3413 Value *IsData = EmitScalarExpr(E->getArg(2));
3415 // Locality is not supported on ARM target
3416 Value *Locality = llvm::ConstantInt::get(Int32Ty, 3);
3418 Value *F = CGM.getIntrinsic(Intrinsic::prefetch);
3419 return Builder.CreateCall(F, {Address, RW, Locality, IsData});
3422 if (BuiltinID == ARM::BI__builtin_arm_rbit) {
3423 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_rbit),
3424 EmitScalarExpr(E->getArg(0)),
3428 if (BuiltinID == ARM::BI__clear_cache) {
3429 assert(E->getNumArgs() == 2 && "__clear_cache takes 2 arguments");
3430 const FunctionDecl *FD = E->getDirectCallee();
3431 SmallVector<Value*, 2> Ops;
3432 for (unsigned i = 0; i < 2; i++)
3433 Ops.push_back(EmitScalarExpr(E->getArg(i)));
3434 llvm::Type *Ty = CGM.getTypes().ConvertType(FD->getType());
3435 llvm::FunctionType *FTy = cast<llvm::FunctionType>(Ty);
3436 StringRef Name = FD->getName();
3437 return EmitNounwindRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), Ops);
3440 if (BuiltinID == ARM::BI__builtin_arm_ldrexd ||
3441 ((BuiltinID == ARM::BI__builtin_arm_ldrex ||
3442 BuiltinID == ARM::BI__builtin_arm_ldaex) &&
3443 getContext().getTypeSize(E->getType()) == 64) ||
3444 BuiltinID == ARM::BI__ldrexd) {
3447 switch (BuiltinID) {
3448 default: llvm_unreachable("unexpected builtin");
3449 case ARM::BI__builtin_arm_ldaex:
3450 F = CGM.getIntrinsic(Intrinsic::arm_ldaexd);
3452 case ARM::BI__builtin_arm_ldrexd:
3453 case ARM::BI__builtin_arm_ldrex:
3454 case ARM::BI__ldrexd:
3455 F = CGM.getIntrinsic(Intrinsic::arm_ldrexd);
3459 Value *LdPtr = EmitScalarExpr(E->getArg(0));
3460 Value *Val = Builder.CreateCall(F, Builder.CreateBitCast(LdPtr, Int8PtrTy),
3463 Value *Val0 = Builder.CreateExtractValue(Val, 1);
3464 Value *Val1 = Builder.CreateExtractValue(Val, 0);
3465 Val0 = Builder.CreateZExt(Val0, Int64Ty);
3466 Val1 = Builder.CreateZExt(Val1, Int64Ty);
3468 Value *ShiftCst = llvm::ConstantInt::get(Int64Ty, 32);
3469 Val = Builder.CreateShl(Val0, ShiftCst, "shl", true /* nuw */);
3470 Val = Builder.CreateOr(Val, Val1);
3471 return Builder.CreateBitCast(Val, ConvertType(E->getType()));
3474 if (BuiltinID == ARM::BI__builtin_arm_ldrex ||
3475 BuiltinID == ARM::BI__builtin_arm_ldaex) {
3476 Value *LoadAddr = EmitScalarExpr(E->getArg(0));
3478 QualType Ty = E->getType();
3479 llvm::Type *RealResTy = ConvertType(Ty);
3480 llvm::Type *IntResTy = llvm::IntegerType::get(getLLVMContext(),
3481 getContext().getTypeSize(Ty));
3482 LoadAddr = Builder.CreateBitCast(LoadAddr, IntResTy->getPointerTo());
3484 Function *F = CGM.getIntrinsic(BuiltinID == ARM::BI__builtin_arm_ldaex
3485 ? Intrinsic::arm_ldaex
3486 : Intrinsic::arm_ldrex,
3487 LoadAddr->getType());
3488 Value *Val = Builder.CreateCall(F, LoadAddr, "ldrex");
3490 if (RealResTy->isPointerTy())
3491 return Builder.CreateIntToPtr(Val, RealResTy);
3493 Val = Builder.CreateTruncOrBitCast(Val, IntResTy);
3494 return Builder.CreateBitCast(Val, RealResTy);
3498 if (BuiltinID == ARM::BI__builtin_arm_strexd ||
3499 ((BuiltinID == ARM::BI__builtin_arm_stlex ||
3500 BuiltinID == ARM::BI__builtin_arm_strex) &&
3501 getContext().getTypeSize(E->getArg(0)->getType()) == 64)) {
3502 Function *F = CGM.getIntrinsic(BuiltinID == ARM::BI__builtin_arm_stlex
3503 ? Intrinsic::arm_stlexd
3504 : Intrinsic::arm_strexd);
3505 llvm::Type *STy = llvm::StructType::get(Int32Ty, Int32Ty, nullptr);
3507 Value *Tmp = CreateMemTemp(E->getArg(0)->getType());
3508 Value *Val = EmitScalarExpr(E->getArg(0));
3509 Builder.CreateStore(Val, Tmp);
3511 Value *LdPtr = Builder.CreateBitCast(Tmp,llvm::PointerType::getUnqual(STy));
3512 Val = Builder.CreateLoad(LdPtr);
3514 Value *Arg0 = Builder.CreateExtractValue(Val, 0);
3515 Value *Arg1 = Builder.CreateExtractValue(Val, 1);
3516 Value *StPtr = Builder.CreateBitCast(EmitScalarExpr(E->getArg(1)), Int8PtrTy);
3517 return Builder.CreateCall(F, {Arg0, Arg1, StPtr}, "strexd");
3520 if (BuiltinID == ARM::BI__builtin_arm_strex ||
3521 BuiltinID == ARM::BI__builtin_arm_stlex) {
3522 Value *StoreVal = EmitScalarExpr(E->getArg(0));
3523 Value *StoreAddr = EmitScalarExpr(E->getArg(1));
3525 QualType Ty = E->getArg(0)->getType();
3526 llvm::Type *StoreTy = llvm::IntegerType::get(getLLVMContext(),
3527 getContext().getTypeSize(Ty));
3528 StoreAddr = Builder.CreateBitCast(StoreAddr, StoreTy->getPointerTo());
3530 if (StoreVal->getType()->isPointerTy())
3531 StoreVal = Builder.CreatePtrToInt(StoreVal, Int32Ty);
3533 StoreVal = Builder.CreateBitCast(StoreVal, StoreTy);
3534 StoreVal = Builder.CreateZExtOrBitCast(StoreVal, Int32Ty);
3537 Function *F = CGM.getIntrinsic(BuiltinID == ARM::BI__builtin_arm_stlex
3538 ? Intrinsic::arm_stlex
3539 : Intrinsic::arm_strex,
3540 StoreAddr->getType());
3541 return Builder.CreateCall(F, {StoreVal, StoreAddr}, "strex");
3544 if (BuiltinID == ARM::BI__builtin_arm_clrex) {
3545 Function *F = CGM.getIntrinsic(Intrinsic::arm_clrex);
3546 return Builder.CreateCall(F);
3550 Intrinsic::ID CRCIntrinsicID = Intrinsic::not_intrinsic;
3551 switch (BuiltinID) {
3552 case ARM::BI__builtin_arm_crc32b:
3553 CRCIntrinsicID = Intrinsic::arm_crc32b; break;
3554 case ARM::BI__builtin_arm_crc32cb:
3555 CRCIntrinsicID = Intrinsic::arm_crc32cb; break;
3556 case ARM::BI__builtin_arm_crc32h:
3557 CRCIntrinsicID = Intrinsic::arm_crc32h; break;
3558 case ARM::BI__builtin_arm_crc32ch:
3559 CRCIntrinsicID = Intrinsic::arm_crc32ch; break;
3560 case ARM::BI__builtin_arm_crc32w:
3561 case ARM::BI__builtin_arm_crc32d:
3562 CRCIntrinsicID = Intrinsic::arm_crc32w; break;
3563 case ARM::BI__builtin_arm_crc32cw:
3564 case ARM::BI__builtin_arm_crc32cd:
3565 CRCIntrinsicID = Intrinsic::arm_crc32cw; break;
3568 if (CRCIntrinsicID != Intrinsic::not_intrinsic) {
3569 Value *Arg0 = EmitScalarExpr(E->getArg(0));
3570 Value *Arg1 = EmitScalarExpr(E->getArg(1));
3572 // crc32{c,}d intrinsics are implemnted as two calls to crc32{c,}w
3573 // intrinsics, hence we need different codegen for these cases.
3574 if (BuiltinID == ARM::BI__builtin_arm_crc32d ||
3575 BuiltinID == ARM::BI__builtin_arm_crc32cd) {
3576 Value *C1 = llvm::ConstantInt::get(Int64Ty, 32);
3577 Value *Arg1a = Builder.CreateTruncOrBitCast(Arg1, Int32Ty);
3578 Value *Arg1b = Builder.CreateLShr(Arg1, C1);
3579 Arg1b = Builder.CreateTruncOrBitCast(Arg1b, Int32Ty);
3581 Function *F = CGM.getIntrinsic(CRCIntrinsicID);
3582 Value *Res = Builder.CreateCall(F, {Arg0, Arg1a});
3583 return Builder.CreateCall(F, {Res, Arg1b});
3585 Arg1 = Builder.CreateZExtOrBitCast(Arg1, Int32Ty);
3587 Function *F = CGM.getIntrinsic(CRCIntrinsicID);
3588 return Builder.CreateCall(F, {Arg0, Arg1});
3592 if (BuiltinID == ARM::BI__builtin_arm_rsr ||
3593 BuiltinID == ARM::BI__builtin_arm_rsr64 ||
3594 BuiltinID == ARM::BI__builtin_arm_rsrp ||
3595 BuiltinID == ARM::BI__builtin_arm_wsr ||
3596 BuiltinID == ARM::BI__builtin_arm_wsr64 ||
3597 BuiltinID == ARM::BI__builtin_arm_wsrp) {
3599 bool IsRead = BuiltinID == ARM::BI__builtin_arm_rsr ||
3600 BuiltinID == ARM::BI__builtin_arm_rsr64 ||
3601 BuiltinID == ARM::BI__builtin_arm_rsrp;
3603 bool IsPointerBuiltin = BuiltinID == ARM::BI__builtin_arm_rsrp ||
3604 BuiltinID == ARM::BI__builtin_arm_wsrp;
3606 bool Is64Bit = BuiltinID == ARM::BI__builtin_arm_rsr64 ||
3607 BuiltinID == ARM::BI__builtin_arm_wsr64;
3609 llvm::Type *ValueType;
3610 llvm::Type *RegisterType;
3611 if (IsPointerBuiltin) {
3612 ValueType = VoidPtrTy;
3613 RegisterType = Int32Ty;
3614 } else if (Is64Bit) {
3615 ValueType = RegisterType = Int64Ty;
3617 ValueType = RegisterType = Int32Ty;
3620 return EmitSpecialRegisterBuiltin(*this, E, RegisterType, ValueType, IsRead);
3623 // Find out if any arguments are required to be integer constant
3625 unsigned ICEArguments = 0;
3626 ASTContext::GetBuiltinTypeError Error;
3627 getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments);
3628 assert(Error == ASTContext::GE_None && "Should not codegen an error");
3630 SmallVector<Value*, 4> Ops;
3631 llvm::Value *Align = nullptr;
3632 bool HasExtraArg = HasExtraNeonArgument(BuiltinID);
3633 unsigned NumArgs = E->getNumArgs() - (HasExtraArg ? 1 : 0);
3634 for (unsigned i = 0, e = NumArgs; i != e; i++) {
3636 switch (BuiltinID) {
3637 case NEON::BI__builtin_neon_vld1_v:
3638 case NEON::BI__builtin_neon_vld1q_v:
3639 case NEON::BI__builtin_neon_vld1q_lane_v:
3640 case NEON::BI__builtin_neon_vld1_lane_v:
3641 case NEON::BI__builtin_neon_vld1_dup_v:
3642 case NEON::BI__builtin_neon_vld1q_dup_v:
3643 case NEON::BI__builtin_neon_vst1_v:
3644 case NEON::BI__builtin_neon_vst1q_v:
3645 case NEON::BI__builtin_neon_vst1q_lane_v:
3646 case NEON::BI__builtin_neon_vst1_lane_v:
3647 case NEON::BI__builtin_neon_vst2_v:
3648 case NEON::BI__builtin_neon_vst2q_v:
3649 case NEON::BI__builtin_neon_vst2_lane_v:
3650 case NEON::BI__builtin_neon_vst2q_lane_v:
3651 case NEON::BI__builtin_neon_vst3_v:
3652 case NEON::BI__builtin_neon_vst3q_v:
3653 case NEON::BI__builtin_neon_vst3_lane_v:
3654 case NEON::BI__builtin_neon_vst3q_lane_v:
3655 case NEON::BI__builtin_neon_vst4_v:
3656 case NEON::BI__builtin_neon_vst4q_v:
3657 case NEON::BI__builtin_neon_vst4_lane_v:
3658 case NEON::BI__builtin_neon_vst4q_lane_v:
3659 // Get the alignment for the argument in addition to the value;
3660 // we'll use it later.
3661 std::pair<llvm::Value*, unsigned> Src =
3662 EmitPointerWithAlignment(E->getArg(0));
3663 Ops.push_back(Src.first);
3664 Align = Builder.getInt32(Src.second);
3669 switch (BuiltinID) {
3670 case NEON::BI__builtin_neon_vld2_v:
3671 case NEON::BI__builtin_neon_vld2q_v:
3672 case NEON::BI__builtin_neon_vld3_v:
3673 case NEON::BI__builtin_neon_vld3q_v:
3674 case NEON::BI__builtin_neon_vld4_v:
3675 case NEON::BI__builtin_neon_vld4q_v:
3676 case NEON::BI__builtin_neon_vld2_lane_v:
3677 case NEON::BI__builtin_neon_vld2q_lane_v:
3678 case NEON::BI__builtin_neon_vld3_lane_v:
3679 case NEON::BI__builtin_neon_vld3q_lane_v:
3680 case NEON::BI__builtin_neon_vld4_lane_v:
3681 case NEON::BI__builtin_neon_vld4q_lane_v:
3682 case NEON::BI__builtin_neon_vld2_dup_v:
3683 case NEON::BI__builtin_neon_vld3_dup_v:
3684 case NEON::BI__builtin_neon_vld4_dup_v:
3685 // Get the alignment for the argument in addition to the value;
3686 // we'll use it later.
3687 std::pair<llvm::Value*, unsigned> Src =
3688 EmitPointerWithAlignment(E->getArg(1));
3689 Ops.push_back(Src.first);
3690 Align = Builder.getInt32(Src.second);
3695 if ((ICEArguments & (1 << i)) == 0) {
3696 Ops.push_back(EmitScalarExpr(E->getArg(i)));
3698 // If this is required to be a constant, constant fold it so that we know
3699 // that the generated intrinsic gets a ConstantInt.
3700 llvm::APSInt Result;
3701 bool IsConst = E->getArg(i)->isIntegerConstantExpr(Result, getContext());
3702 assert(IsConst && "Constant arg isn't actually constant?"); (void)IsConst;
3703 Ops.push_back(llvm::ConstantInt::get(getLLVMContext(), Result));
3707 switch (BuiltinID) {
3710 case NEON::BI__builtin_neon_vget_lane_i8:
3711 case NEON::BI__builtin_neon_vget_lane_i16:
3712 case NEON::BI__builtin_neon_vget_lane_i32:
3713 case NEON::BI__builtin_neon_vget_lane_i64:
3714 case NEON::BI__builtin_neon_vget_lane_f32:
3715 case NEON::BI__builtin_neon_vgetq_lane_i8:
3716 case NEON::BI__builtin_neon_vgetq_lane_i16:
3717 case NEON::BI__builtin_neon_vgetq_lane_i32:
3718 case NEON::BI__builtin_neon_vgetq_lane_i64:
3719 case NEON::BI__builtin_neon_vgetq_lane_f32:
3720 return Builder.CreateExtractElement(Ops[0], Ops[1], "vget_lane");
3722 case NEON::BI__builtin_neon_vset_lane_i8:
3723 case NEON::BI__builtin_neon_vset_lane_i16:
3724 case NEON::BI__builtin_neon_vset_lane_i32:
3725 case NEON::BI__builtin_neon_vset_lane_i64:
3726 case NEON::BI__builtin_neon_vset_lane_f32:
3727 case NEON::BI__builtin_neon_vsetq_lane_i8:
3728 case NEON::BI__builtin_neon_vsetq_lane_i16:
3729 case NEON::BI__builtin_neon_vsetq_lane_i32:
3730 case NEON::BI__builtin_neon_vsetq_lane_i64:
3731 case NEON::BI__builtin_neon_vsetq_lane_f32:
3732 return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vset_lane");
3734 case NEON::BI__builtin_neon_vsha1h_u32:
3735 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha1h), Ops,
3737 case NEON::BI__builtin_neon_vsha1cq_u32:
3738 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha1c), Ops,
3740 case NEON::BI__builtin_neon_vsha1pq_u32:
3741 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha1p), Ops,
3743 case NEON::BI__builtin_neon_vsha1mq_u32:
3744 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha1m), Ops,
3747 // The ARM _MoveToCoprocessor builtins put the input register value as
3748 // the first argument, but the LLVM intrinsic expects it as the third one.
3749 case ARM::BI_MoveToCoprocessor:
3750 case ARM::BI_MoveToCoprocessor2: {
3751 Function *F = CGM.getIntrinsic(BuiltinID == ARM::BI_MoveToCoprocessor ?
3752 Intrinsic::arm_mcr : Intrinsic::arm_mcr2);
3753 return Builder.CreateCall(F, {Ops[1], Ops[2], Ops[0],
3754 Ops[3], Ops[4], Ops[5]});
3758 // Get the last argument, which specifies the vector type.
3759 assert(HasExtraArg);
3760 llvm::APSInt Result;
3761 const Expr *Arg = E->getArg(E->getNumArgs()-1);
3762 if (!Arg->isIntegerConstantExpr(Result, getContext()))
3765 if (BuiltinID == ARM::BI__builtin_arm_vcvtr_f ||
3766 BuiltinID == ARM::BI__builtin_arm_vcvtr_d) {
3767 // Determine the overloaded type of this builtin.
3769 if (BuiltinID == ARM::BI__builtin_arm_vcvtr_f)
3774 // Determine whether this is an unsigned conversion or not.
3775 bool usgn = Result.getZExtValue() == 1;
3776 unsigned Int = usgn ? Intrinsic::arm_vcvtru : Intrinsic::arm_vcvtr;
3778 // Call the appropriate intrinsic.
3779 Function *F = CGM.getIntrinsic(Int, Ty);
3780 return Builder.CreateCall(F, Ops, "vcvtr");
3783 // Determine the type of this overloaded NEON intrinsic.
3784 NeonTypeFlags Type(Result.getZExtValue());
3785 bool usgn = Type.isUnsigned();
3786 bool rightShift = false;
3788 llvm::VectorType *VTy = GetNeonType(this, Type);
3789 llvm::Type *Ty = VTy;
3793 // Many NEON builtins have identical semantics and uses in ARM and
3794 // AArch64. Emit these in a single function.
3795 auto IntrinsicMap = makeArrayRef(ARMSIMDIntrinsicMap);
3796 const NeonIntrinsicInfo *Builtin = findNeonIntrinsicInMap(
3797 IntrinsicMap, BuiltinID, NEONSIMDIntrinsicsProvenSorted);
3799 return EmitCommonNeonBuiltinExpr(
3800 Builtin->BuiltinID, Builtin->LLVMIntrinsic, Builtin->AltLLVMIntrinsic,
3801 Builtin->NameHint, Builtin->TypeModifier, E, Ops, Align);
3804 switch (BuiltinID) {
3805 default: return nullptr;
3806 case NEON::BI__builtin_neon_vld1q_lane_v:
3807 // Handle 64-bit integer elements as a special case. Use shuffles of
3808 // one-element vectors to avoid poor code for i64 in the backend.
3809 if (VTy->getElementType()->isIntegerTy(64)) {
3810 // Extract the other lane.
3811 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
3812 int Lane = cast<ConstantInt>(Ops[2])->getZExtValue();
3813 Value *SV = llvm::ConstantVector::get(ConstantInt::get(Int32Ty, 1-Lane));
3814 Ops[1] = Builder.CreateShuffleVector(Ops[1], Ops[1], SV);
3815 // Load the value as a one-element vector.
3816 Ty = llvm::VectorType::get(VTy->getElementType(), 1);
3817 Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld1, Ty);
3818 Value *Ld = Builder.CreateCall(F, {Ops[0], Align});
3820 SmallVector<Constant*, 2> Indices;
3821 Indices.push_back(ConstantInt::get(Int32Ty, 1-Lane));
3822 Indices.push_back(ConstantInt::get(Int32Ty, Lane));
3823 SV = llvm::ConstantVector::get(Indices);
3824 return Builder.CreateShuffleVector(Ops[1], Ld, SV, "vld1q_lane");
3827 case NEON::BI__builtin_neon_vld1_lane_v: {
3828 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
3829 Ty = llvm::PointerType::getUnqual(VTy->getElementType());
3830 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
3831 LoadInst *Ld = Builder.CreateLoad(Ops[0]);
3832 Ld->setAlignment(cast<ConstantInt>(Align)->getZExtValue());
3833 return Builder.CreateInsertElement(Ops[1], Ld, Ops[2], "vld1_lane");
3835 case NEON::BI__builtin_neon_vld2_dup_v:
3836 case NEON::BI__builtin_neon_vld3_dup_v:
3837 case NEON::BI__builtin_neon_vld4_dup_v: {
3838 // Handle 64-bit elements as a special-case. There is no "dup" needed.
3839 if (VTy->getElementType()->getPrimitiveSizeInBits() == 64) {
3840 switch (BuiltinID) {
3841 case NEON::BI__builtin_neon_vld2_dup_v:
3842 Int = Intrinsic::arm_neon_vld2;
3844 case NEON::BI__builtin_neon_vld3_dup_v:
3845 Int = Intrinsic::arm_neon_vld3;
3847 case NEON::BI__builtin_neon_vld4_dup_v:
3848 Int = Intrinsic::arm_neon_vld4;
3850 default: llvm_unreachable("unknown vld_dup intrinsic?");
3852 Function *F = CGM.getIntrinsic(Int, Ty);
3853 Ops[1] = Builder.CreateCall(F, {Ops[1], Align}, "vld_dup");
3854 Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
3855 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
3856 return Builder.CreateStore(Ops[1], Ops[0]);
3858 switch (BuiltinID) {
3859 case NEON::BI__builtin_neon_vld2_dup_v:
3860 Int = Intrinsic::arm_neon_vld2lane;
3862 case NEON::BI__builtin_neon_vld3_dup_v:
3863 Int = Intrinsic::arm_neon_vld3lane;
3865 case NEON::BI__builtin_neon_vld4_dup_v:
3866 Int = Intrinsic::arm_neon_vld4lane;
3868 default: llvm_unreachable("unknown vld_dup intrinsic?");
3870 Function *F = CGM.getIntrinsic(Int, Ty);
3871 llvm::StructType *STy = cast<llvm::StructType>(F->getReturnType());
3873 SmallVector<Value*, 6> Args;
3874 Args.push_back(Ops[1]);
3875 Args.append(STy->getNumElements(), UndefValue::get(Ty));
3877 llvm::Constant *CI = ConstantInt::get(Int32Ty, 0);
3879 Args.push_back(Align);
3881 Ops[1] = Builder.CreateCall(F, Args, "vld_dup");
3882 // splat lane 0 to all elts in each vector of the result.
3883 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
3884 Value *Val = Builder.CreateExtractValue(Ops[1], i);
3885 Value *Elt = Builder.CreateBitCast(Val, Ty);
3886 Elt = EmitNeonSplat(Elt, CI);
3887 Elt = Builder.CreateBitCast(Elt, Val->getType());
3888 Ops[1] = Builder.CreateInsertValue(Ops[1], Elt, i);
3890 Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
3891 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
3892 return Builder.CreateStore(Ops[1], Ops[0]);
3894 case NEON::BI__builtin_neon_vqrshrn_n_v:
3896 usgn ? Intrinsic::arm_neon_vqrshiftnu : Intrinsic::arm_neon_vqrshiftns;
3897 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshrn_n",
3899 case NEON::BI__builtin_neon_vqrshrun_n_v:
3900 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqrshiftnsu, Ty),
3901 Ops, "vqrshrun_n", 1, true);
3902 case NEON::BI__builtin_neon_vqshrn_n_v:
3903 Int = usgn ? Intrinsic::arm_neon_vqshiftnu : Intrinsic::arm_neon_vqshiftns;
3904 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshrn_n",
3906 case NEON::BI__builtin_neon_vqshrun_n_v:
3907 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqshiftnsu, Ty),
3908 Ops, "vqshrun_n", 1, true);
3909 case NEON::BI__builtin_neon_vrecpe_v:
3910 case NEON::BI__builtin_neon_vrecpeq_v:
3911 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrecpe, Ty),
3913 case NEON::BI__builtin_neon_vrshrn_n_v:
3914 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrshiftn, Ty),
3915 Ops, "vrshrn_n", 1, true);
3916 case NEON::BI__builtin_neon_vrsra_n_v:
3917 case NEON::BI__builtin_neon_vrsraq_n_v:
3918 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
3919 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
3920 Ops[2] = EmitNeonShiftVector(Ops[2], Ty, true);
3921 Int = usgn ? Intrinsic::arm_neon_vrshiftu : Intrinsic::arm_neon_vrshifts;
3922 Ops[1] = Builder.CreateCall(CGM.getIntrinsic(Int, Ty), {Ops[1], Ops[2]});
3923 return Builder.CreateAdd(Ops[0], Ops[1], "vrsra_n");
3924 case NEON::BI__builtin_neon_vsri_n_v:
3925 case NEON::BI__builtin_neon_vsriq_n_v:
3927 case NEON::BI__builtin_neon_vsli_n_v:
3928 case NEON::BI__builtin_neon_vsliq_n_v:
3929 Ops[2] = EmitNeonShiftVector(Ops[2], Ty, rightShift);
3930 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vshiftins, Ty),
3932 case NEON::BI__builtin_neon_vsra_n_v:
3933 case NEON::BI__builtin_neon_vsraq_n_v:
3934 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
3935 Ops[1] = EmitNeonRShiftImm(Ops[1], Ops[2], Ty, usgn, "vsra_n");
3936 return Builder.CreateAdd(Ops[0], Ops[1]);
3937 case NEON::BI__builtin_neon_vst1q_lane_v:
3938 // Handle 64-bit integer elements as a special case. Use a shuffle to get
3939 // a one-element vector and avoid poor code for i64 in the backend.
3940 if (VTy->getElementType()->isIntegerTy(64)) {
3941 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
3942 Value *SV = llvm::ConstantVector::get(cast<llvm::Constant>(Ops[2]));
3943 Ops[1] = Builder.CreateShuffleVector(Ops[1], Ops[1], SV);
3945 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst1,
3946 Ops[1]->getType()), Ops);
3949 case NEON::BI__builtin_neon_vst1_lane_v: {
3950 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
3951 Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2]);
3952 Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
3953 StoreInst *St = Builder.CreateStore(Ops[1],
3954 Builder.CreateBitCast(Ops[0], Ty));
3955 St->setAlignment(cast<ConstantInt>(Align)->getZExtValue());
3958 case NEON::BI__builtin_neon_vtbl1_v:
3959 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl1),
3961 case NEON::BI__builtin_neon_vtbl2_v:
3962 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl2),
3964 case NEON::BI__builtin_neon_vtbl3_v:
3965 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl3),
3967 case NEON::BI__builtin_neon_vtbl4_v:
3968 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl4),
3970 case NEON::BI__builtin_neon_vtbx1_v:
3971 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx1),
3973 case NEON::BI__builtin_neon_vtbx2_v:
3974 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx2),
3976 case NEON::BI__builtin_neon_vtbx3_v:
3977 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx3),
3979 case NEON::BI__builtin_neon_vtbx4_v:
3980 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx4),
3985 static Value *EmitAArch64TblBuiltinExpr(CodeGenFunction &CGF, unsigned BuiltinID,
3987 SmallVectorImpl<Value *> &Ops) {
3988 unsigned int Int = 0;
3989 const char *s = nullptr;
3991 switch (BuiltinID) {
3994 case NEON::BI__builtin_neon_vtbl1_v:
3995 case NEON::BI__builtin_neon_vqtbl1_v:
3996 case NEON::BI__builtin_neon_vqtbl1q_v:
3997 case NEON::BI__builtin_neon_vtbl2_v:
3998 case NEON::BI__builtin_neon_vqtbl2_v:
3999 case NEON::BI__builtin_neon_vqtbl2q_v:
4000 case NEON::BI__builtin_neon_vtbl3_v:
4001 case NEON::BI__builtin_neon_vqtbl3_v:
4002 case NEON::BI__builtin_neon_vqtbl3q_v:
4003 case NEON::BI__builtin_neon_vtbl4_v:
4004 case NEON::BI__builtin_neon_vqtbl4_v:
4005 case NEON::BI__builtin_neon_vqtbl4q_v:
4007 case NEON::BI__builtin_neon_vtbx1_v:
4008 case NEON::BI__builtin_neon_vqtbx1_v:
4009 case NEON::BI__builtin_neon_vqtbx1q_v:
4010 case NEON::BI__builtin_neon_vtbx2_v:
4011 case NEON::BI__builtin_neon_vqtbx2_v:
4012 case NEON::BI__builtin_neon_vqtbx2q_v:
4013 case NEON::BI__builtin_neon_vtbx3_v:
4014 case NEON::BI__builtin_neon_vqtbx3_v:
4015 case NEON::BI__builtin_neon_vqtbx3q_v:
4016 case NEON::BI__builtin_neon_vtbx4_v:
4017 case NEON::BI__builtin_neon_vqtbx4_v:
4018 case NEON::BI__builtin_neon_vqtbx4q_v:
4022 assert(E->getNumArgs() >= 3);
4024 // Get the last argument, which specifies the vector type.
4025 llvm::APSInt Result;
4026 const Expr *Arg = E->getArg(E->getNumArgs() - 1);
4027 if (!Arg->isIntegerConstantExpr(Result, CGF.getContext()))
4030 // Determine the type of this overloaded NEON intrinsic.
4031 NeonTypeFlags Type(Result.getZExtValue());
4032 llvm::VectorType *VTy = GetNeonType(&CGF, Type);
4033 llvm::Type *Ty = VTy;
4037 unsigned nElts = VTy->getNumElements();
4039 CodeGen::CGBuilderTy &Builder = CGF.Builder;
4041 // AArch64 scalar builtins are not overloaded, they do not have an extra
4042 // argument that specifies the vector type, need to handle each case.
4043 SmallVector<Value *, 2> TblOps;
4044 switch (BuiltinID) {
4045 case NEON::BI__builtin_neon_vtbl1_v: {
4046 TblOps.push_back(Ops[0]);
4047 return packTBLDVectorList(CGF, TblOps, nullptr, Ops[1], Ty,
4048 Intrinsic::aarch64_neon_tbl1, "vtbl1");
4050 case NEON::BI__builtin_neon_vtbl2_v: {
4051 TblOps.push_back(Ops[0]);
4052 TblOps.push_back(Ops[1]);
4053 return packTBLDVectorList(CGF, TblOps, nullptr, Ops[2], Ty,
4054 Intrinsic::aarch64_neon_tbl1, "vtbl1");
4056 case NEON::BI__builtin_neon_vtbl3_v: {
4057 TblOps.push_back(Ops[0]);
4058 TblOps.push_back(Ops[1]);
4059 TblOps.push_back(Ops[2]);
4060 return packTBLDVectorList(CGF, TblOps, nullptr, Ops[3], Ty,
4061 Intrinsic::aarch64_neon_tbl2, "vtbl2");
4063 case NEON::BI__builtin_neon_vtbl4_v: {
4064 TblOps.push_back(Ops[0]);
4065 TblOps.push_back(Ops[1]);
4066 TblOps.push_back(Ops[2]);
4067 TblOps.push_back(Ops[3]);
4068 return packTBLDVectorList(CGF, TblOps, nullptr, Ops[4], Ty,
4069 Intrinsic::aarch64_neon_tbl2, "vtbl2");
4071 case NEON::BI__builtin_neon_vtbx1_v: {
4072 TblOps.push_back(Ops[1]);
4073 Value *TblRes = packTBLDVectorList(CGF, TblOps, nullptr, Ops[2], Ty,
4074 Intrinsic::aarch64_neon_tbl1, "vtbl1");
4076 llvm::Constant *Eight = ConstantInt::get(VTy->getElementType(), 8);
4077 Value* EightV = llvm::ConstantVector::getSplat(nElts, Eight);
4078 Value *CmpRes = Builder.CreateICmp(ICmpInst::ICMP_UGE, Ops[2], EightV);
4079 CmpRes = Builder.CreateSExt(CmpRes, Ty);
4081 Value *EltsFromInput = Builder.CreateAnd(CmpRes, Ops[0]);
4082 Value *EltsFromTbl = Builder.CreateAnd(Builder.CreateNot(CmpRes), TblRes);
4083 return Builder.CreateOr(EltsFromInput, EltsFromTbl, "vtbx");
4085 case NEON::BI__builtin_neon_vtbx2_v: {
4086 TblOps.push_back(Ops[1]);
4087 TblOps.push_back(Ops[2]);
4088 return packTBLDVectorList(CGF, TblOps, Ops[0], Ops[3], Ty,
4089 Intrinsic::aarch64_neon_tbx1, "vtbx1");
4091 case NEON::BI__builtin_neon_vtbx3_v: {
4092 TblOps.push_back(Ops[1]);
4093 TblOps.push_back(Ops[2]);
4094 TblOps.push_back(Ops[3]);
4095 Value *TblRes = packTBLDVectorList(CGF, TblOps, nullptr, Ops[4], Ty,
4096 Intrinsic::aarch64_neon_tbl2, "vtbl2");
4098 llvm::Constant *TwentyFour = ConstantInt::get(VTy->getElementType(), 24);
4099 Value* TwentyFourV = llvm::ConstantVector::getSplat(nElts, TwentyFour);
4100 Value *CmpRes = Builder.CreateICmp(ICmpInst::ICMP_UGE, Ops[4],
4102 CmpRes = Builder.CreateSExt(CmpRes, Ty);
4104 Value *EltsFromInput = Builder.CreateAnd(CmpRes, Ops[0]);
4105 Value *EltsFromTbl = Builder.CreateAnd(Builder.CreateNot(CmpRes), TblRes);
4106 return Builder.CreateOr(EltsFromInput, EltsFromTbl, "vtbx");
4108 case NEON::BI__builtin_neon_vtbx4_v: {
4109 TblOps.push_back(Ops[1]);
4110 TblOps.push_back(Ops[2]);
4111 TblOps.push_back(Ops[3]);
4112 TblOps.push_back(Ops[4]);
4113 return packTBLDVectorList(CGF, TblOps, Ops[0], Ops[5], Ty,
4114 Intrinsic::aarch64_neon_tbx2, "vtbx2");
4116 case NEON::BI__builtin_neon_vqtbl1_v:
4117 case NEON::BI__builtin_neon_vqtbl1q_v:
4118 Int = Intrinsic::aarch64_neon_tbl1; s = "vtbl1"; break;
4119 case NEON::BI__builtin_neon_vqtbl2_v:
4120 case NEON::BI__builtin_neon_vqtbl2q_v: {
4121 Int = Intrinsic::aarch64_neon_tbl2; s = "vtbl2"; break;
4122 case NEON::BI__builtin_neon_vqtbl3_v:
4123 case NEON::BI__builtin_neon_vqtbl3q_v:
4124 Int = Intrinsic::aarch64_neon_tbl3; s = "vtbl3"; break;
4125 case NEON::BI__builtin_neon_vqtbl4_v:
4126 case NEON::BI__builtin_neon_vqtbl4q_v:
4127 Int = Intrinsic::aarch64_neon_tbl4; s = "vtbl4"; break;
4128 case NEON::BI__builtin_neon_vqtbx1_v:
4129 case NEON::BI__builtin_neon_vqtbx1q_v:
4130 Int = Intrinsic::aarch64_neon_tbx1; s = "vtbx1"; break;
4131 case NEON::BI__builtin_neon_vqtbx2_v:
4132 case NEON::BI__builtin_neon_vqtbx2q_v:
4133 Int = Intrinsic::aarch64_neon_tbx2; s = "vtbx2"; break;
4134 case NEON::BI__builtin_neon_vqtbx3_v:
4135 case NEON::BI__builtin_neon_vqtbx3q_v:
4136 Int = Intrinsic::aarch64_neon_tbx3; s = "vtbx3"; break;
4137 case NEON::BI__builtin_neon_vqtbx4_v:
4138 case NEON::BI__builtin_neon_vqtbx4q_v:
4139 Int = Intrinsic::aarch64_neon_tbx4; s = "vtbx4"; break;
4146 Function *F = CGF.CGM.getIntrinsic(Int, Ty);
4147 return CGF.EmitNeonCall(F, Ops, s);
4150 Value *CodeGenFunction::vectorWrapScalar16(Value *Op) {
4151 llvm::Type *VTy = llvm::VectorType::get(Int16Ty, 4);
4152 Op = Builder.CreateBitCast(Op, Int16Ty);
4153 Value *V = UndefValue::get(VTy);
4154 llvm::Constant *CI = ConstantInt::get(SizeTy, 0);
4155 Op = Builder.CreateInsertElement(V, Op, CI);
4159 Value *CodeGenFunction::vectorWrapScalar8(Value *Op) {
4160 llvm::Type *VTy = llvm::VectorType::get(Int8Ty, 8);
4161 Op = Builder.CreateBitCast(Op, Int8Ty);
4162 Value *V = UndefValue::get(VTy);
4163 llvm::Constant *CI = ConstantInt::get(SizeTy, 0);
4164 Op = Builder.CreateInsertElement(V, Op, CI);
4168 Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
4169 const CallExpr *E) {
4170 unsigned HintID = static_cast<unsigned>(-1);
4171 switch (BuiltinID) {
4173 case AArch64::BI__builtin_arm_nop:
4176 case AArch64::BI__builtin_arm_yield:
4179 case AArch64::BI__builtin_arm_wfe:
4182 case AArch64::BI__builtin_arm_wfi:
4185 case AArch64::BI__builtin_arm_sev:
4188 case AArch64::BI__builtin_arm_sevl:
4193 if (HintID != static_cast<unsigned>(-1)) {
4194 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_hint);
4195 return Builder.CreateCall(F, llvm::ConstantInt::get(Int32Ty, HintID));
4198 if (BuiltinID == AArch64::BI__builtin_arm_prefetch) {
4199 Value *Address = EmitScalarExpr(E->getArg(0));
4200 Value *RW = EmitScalarExpr(E->getArg(1));
4201 Value *CacheLevel = EmitScalarExpr(E->getArg(2));
4202 Value *RetentionPolicy = EmitScalarExpr(E->getArg(3));
4203 Value *IsData = EmitScalarExpr(E->getArg(4));
4205 Value *Locality = nullptr;
4206 if (cast<llvm::ConstantInt>(RetentionPolicy)->isZero()) {
4207 // Temporal fetch, needs to convert cache level to locality.
4208 Locality = llvm::ConstantInt::get(Int32Ty,
4209 -cast<llvm::ConstantInt>(CacheLevel)->getValue() + 3);
4212 Locality = llvm::ConstantInt::get(Int32Ty, 0);
4215 // FIXME: We need AArch64 specific LLVM intrinsic if we want to specify
4216 // PLDL3STRM or PLDL2STRM.
4217 Value *F = CGM.getIntrinsic(Intrinsic::prefetch);
4218 return Builder.CreateCall(F, {Address, RW, Locality, IsData});
4221 if (BuiltinID == AArch64::BI__builtin_arm_rbit) {
4222 assert((getContext().getTypeSize(E->getType()) == 32) &&
4223 "rbit of unusual size!");
4224 llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
4225 return Builder.CreateCall(
4226 CGM.getIntrinsic(Intrinsic::aarch64_rbit, Arg->getType()), Arg, "rbit");
4228 if (BuiltinID == AArch64::BI__builtin_arm_rbit64) {
4229 assert((getContext().getTypeSize(E->getType()) == 64) &&
4230 "rbit of unusual size!");
4231 llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
4232 return Builder.CreateCall(
4233 CGM.getIntrinsic(Intrinsic::aarch64_rbit, Arg->getType()), Arg, "rbit");
4236 if (BuiltinID == AArch64::BI__clear_cache) {
4237 assert(E->getNumArgs() == 2 && "__clear_cache takes 2 arguments");
4238 const FunctionDecl *FD = E->getDirectCallee();
4239 SmallVector<Value*, 2> Ops;
4240 for (unsigned i = 0; i < 2; i++)
4241 Ops.push_back(EmitScalarExpr(E->getArg(i)));
4242 llvm::Type *Ty = CGM.getTypes().ConvertType(FD->getType());
4243 llvm::FunctionType *FTy = cast<llvm::FunctionType>(Ty);
4244 StringRef Name = FD->getName();
4245 return EmitNounwindRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), Ops);
4248 if ((BuiltinID == AArch64::BI__builtin_arm_ldrex ||
4249 BuiltinID == AArch64::BI__builtin_arm_ldaex) &&
4250 getContext().getTypeSize(E->getType()) == 128) {
4251 Function *F = CGM.getIntrinsic(BuiltinID == AArch64::BI__builtin_arm_ldaex
4252 ? Intrinsic::aarch64_ldaxp
4253 : Intrinsic::aarch64_ldxp);
4255 Value *LdPtr = EmitScalarExpr(E->getArg(0));
4256 Value *Val = Builder.CreateCall(F, Builder.CreateBitCast(LdPtr, Int8PtrTy),
4259 Value *Val0 = Builder.CreateExtractValue(Val, 1);
4260 Value *Val1 = Builder.CreateExtractValue(Val, 0);
4261 llvm::Type *Int128Ty = llvm::IntegerType::get(getLLVMContext(), 128);
4262 Val0 = Builder.CreateZExt(Val0, Int128Ty);
4263 Val1 = Builder.CreateZExt(Val1, Int128Ty);
4265 Value *ShiftCst = llvm::ConstantInt::get(Int128Ty, 64);
4266 Val = Builder.CreateShl(Val0, ShiftCst, "shl", true /* nuw */);
4267 Val = Builder.CreateOr(Val, Val1);
4268 return Builder.CreateBitCast(Val, ConvertType(E->getType()));
4269 } else if (BuiltinID == AArch64::BI__builtin_arm_ldrex ||
4270 BuiltinID == AArch64::BI__builtin_arm_ldaex) {
4271 Value *LoadAddr = EmitScalarExpr(E->getArg(0));
4273 QualType Ty = E->getType();
4274 llvm::Type *RealResTy = ConvertType(Ty);
4275 llvm::Type *IntResTy = llvm::IntegerType::get(getLLVMContext(),
4276 getContext().getTypeSize(Ty));
4277 LoadAddr = Builder.CreateBitCast(LoadAddr, IntResTy->getPointerTo());
4279 Function *F = CGM.getIntrinsic(BuiltinID == AArch64::BI__builtin_arm_ldaex
4280 ? Intrinsic::aarch64_ldaxr
4281 : Intrinsic::aarch64_ldxr,
4282 LoadAddr->getType());
4283 Value *Val = Builder.CreateCall(F, LoadAddr, "ldxr");
4285 if (RealResTy->isPointerTy())
4286 return Builder.CreateIntToPtr(Val, RealResTy);
4288 Val = Builder.CreateTruncOrBitCast(Val, IntResTy);
4289 return Builder.CreateBitCast(Val, RealResTy);
4292 if ((BuiltinID == AArch64::BI__builtin_arm_strex ||
4293 BuiltinID == AArch64::BI__builtin_arm_stlex) &&
4294 getContext().getTypeSize(E->getArg(0)->getType()) == 128) {
4295 Function *F = CGM.getIntrinsic(BuiltinID == AArch64::BI__builtin_arm_stlex
4296 ? Intrinsic::aarch64_stlxp
4297 : Intrinsic::aarch64_stxp);
4298 llvm::Type *STy = llvm::StructType::get(Int64Ty, Int64Ty, nullptr);
4300 Value *One = llvm::ConstantInt::get(Int32Ty, 1);
4301 Value *Tmp = Builder.CreateAlloca(ConvertType(E->getArg(0)->getType()),
4303 Value *Val = EmitScalarExpr(E->getArg(0));
4304 Builder.CreateStore(Val, Tmp);
4306 Value *LdPtr = Builder.CreateBitCast(Tmp,llvm::PointerType::getUnqual(STy));
4307 Val = Builder.CreateLoad(LdPtr);
4309 Value *Arg0 = Builder.CreateExtractValue(Val, 0);
4310 Value *Arg1 = Builder.CreateExtractValue(Val, 1);
4311 Value *StPtr = Builder.CreateBitCast(EmitScalarExpr(E->getArg(1)),
4313 return Builder.CreateCall(F, {Arg0, Arg1, StPtr}, "stxp");
4316 if (BuiltinID == AArch64::BI__builtin_arm_strex ||
4317 BuiltinID == AArch64::BI__builtin_arm_stlex) {
4318 Value *StoreVal = EmitScalarExpr(E->getArg(0));
4319 Value *StoreAddr = EmitScalarExpr(E->getArg(1));
4321 QualType Ty = E->getArg(0)->getType();
4322 llvm::Type *StoreTy = llvm::IntegerType::get(getLLVMContext(),
4323 getContext().getTypeSize(Ty));
4324 StoreAddr = Builder.CreateBitCast(StoreAddr, StoreTy->getPointerTo());
4326 if (StoreVal->getType()->isPointerTy())
4327 StoreVal = Builder.CreatePtrToInt(StoreVal, Int64Ty);
4329 StoreVal = Builder.CreateBitCast(StoreVal, StoreTy);
4330 StoreVal = Builder.CreateZExtOrBitCast(StoreVal, Int64Ty);
4333 Function *F = CGM.getIntrinsic(BuiltinID == AArch64::BI__builtin_arm_stlex
4334 ? Intrinsic::aarch64_stlxr
4335 : Intrinsic::aarch64_stxr,
4336 StoreAddr->getType());
4337 return Builder.CreateCall(F, {StoreVal, StoreAddr}, "stxr");
4340 if (BuiltinID == AArch64::BI__builtin_arm_clrex) {
4341 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_clrex);
4342 return Builder.CreateCall(F);
4346 Intrinsic::ID CRCIntrinsicID = Intrinsic::not_intrinsic;
4347 switch (BuiltinID) {
4348 case AArch64::BI__builtin_arm_crc32b:
4349 CRCIntrinsicID = Intrinsic::aarch64_crc32b; break;
4350 case AArch64::BI__builtin_arm_crc32cb:
4351 CRCIntrinsicID = Intrinsic::aarch64_crc32cb; break;
4352 case AArch64::BI__builtin_arm_crc32h:
4353 CRCIntrinsicID = Intrinsic::aarch64_crc32h; break;
4354 case AArch64::BI__builtin_arm_crc32ch:
4355 CRCIntrinsicID = Intrinsic::aarch64_crc32ch; break;
4356 case AArch64::BI__builtin_arm_crc32w:
4357 CRCIntrinsicID = Intrinsic::aarch64_crc32w; break;
4358 case AArch64::BI__builtin_arm_crc32cw:
4359 CRCIntrinsicID = Intrinsic::aarch64_crc32cw; break;
4360 case AArch64::BI__builtin_arm_crc32d:
4361 CRCIntrinsicID = Intrinsic::aarch64_crc32x; break;
4362 case AArch64::BI__builtin_arm_crc32cd:
4363 CRCIntrinsicID = Intrinsic::aarch64_crc32cx; break;
4366 if (CRCIntrinsicID != Intrinsic::not_intrinsic) {
4367 Value *Arg0 = EmitScalarExpr(E->getArg(0));
4368 Value *Arg1 = EmitScalarExpr(E->getArg(1));
4369 Function *F = CGM.getIntrinsic(CRCIntrinsicID);
4371 llvm::Type *DataTy = F->getFunctionType()->getParamType(1);
4372 Arg1 = Builder.CreateZExtOrBitCast(Arg1, DataTy);
4374 return Builder.CreateCall(F, {Arg0, Arg1});
4377 if (BuiltinID == AArch64::BI__builtin_arm_rsr ||
4378 BuiltinID == AArch64::BI__builtin_arm_rsr64 ||
4379 BuiltinID == AArch64::BI__builtin_arm_rsrp ||
4380 BuiltinID == AArch64::BI__builtin_arm_wsr ||
4381 BuiltinID == AArch64::BI__builtin_arm_wsr64 ||
4382 BuiltinID == AArch64::BI__builtin_arm_wsrp) {
4384 bool IsRead = BuiltinID == AArch64::BI__builtin_arm_rsr ||
4385 BuiltinID == AArch64::BI__builtin_arm_rsr64 ||
4386 BuiltinID == AArch64::BI__builtin_arm_rsrp;
4388 bool IsPointerBuiltin = BuiltinID == AArch64::BI__builtin_arm_rsrp ||
4389 BuiltinID == AArch64::BI__builtin_arm_wsrp;
4391 bool Is64Bit = BuiltinID != AArch64::BI__builtin_arm_rsr &&
4392 BuiltinID != AArch64::BI__builtin_arm_wsr;
4394 llvm::Type *ValueType;
4395 llvm::Type *RegisterType = Int64Ty;
4396 if (IsPointerBuiltin) {
4397 ValueType = VoidPtrTy;
4398 } else if (Is64Bit) {
4399 ValueType = Int64Ty;
4401 ValueType = Int32Ty;
4404 return EmitSpecialRegisterBuiltin(*this, E, RegisterType, ValueType, IsRead);
4407 // Find out if any arguments are required to be integer constant
4409 unsigned ICEArguments = 0;
4410 ASTContext::GetBuiltinTypeError Error;
4411 getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments);
4412 assert(Error == ASTContext::GE_None && "Should not codegen an error");
4414 llvm::SmallVector<Value*, 4> Ops;
4415 for (unsigned i = 0, e = E->getNumArgs() - 1; i != e; i++) {
4416 if ((ICEArguments & (1 << i)) == 0) {
4417 Ops.push_back(EmitScalarExpr(E->getArg(i)));
4419 // If this is required to be a constant, constant fold it so that we know
4420 // that the generated intrinsic gets a ConstantInt.
4421 llvm::APSInt Result;
4422 bool IsConst = E->getArg(i)->isIntegerConstantExpr(Result, getContext());
4423 assert(IsConst && "Constant arg isn't actually constant?");
4425 Ops.push_back(llvm::ConstantInt::get(getLLVMContext(), Result));
4429 auto SISDMap = makeArrayRef(AArch64SISDIntrinsicMap);
4430 const NeonIntrinsicInfo *Builtin = findNeonIntrinsicInMap(
4431 SISDMap, BuiltinID, AArch64SISDIntrinsicsProvenSorted);
4434 Ops.push_back(EmitScalarExpr(E->getArg(E->getNumArgs() - 1)));
4435 Value *Result = EmitCommonNeonSISDBuiltinExpr(*this, *Builtin, Ops, E);
4436 assert(Result && "SISD intrinsic should have been handled");
4440 llvm::APSInt Result;
4441 const Expr *Arg = E->getArg(E->getNumArgs()-1);
4442 NeonTypeFlags Type(0);
4443 if (Arg->isIntegerConstantExpr(Result, getContext()))
4444 // Determine the type of this overloaded NEON intrinsic.
4445 Type = NeonTypeFlags(Result.getZExtValue());
4447 bool usgn = Type.isUnsigned();
4448 bool quad = Type.isQuad();
4450 // Handle non-overloaded intrinsics first.
4451 switch (BuiltinID) {
4453 case NEON::BI__builtin_neon_vldrq_p128: {
4454 llvm::Type *Int128PTy = llvm::Type::getIntNPtrTy(getLLVMContext(), 128);
4455 Value *Ptr = Builder.CreateBitCast(EmitScalarExpr(E->getArg(0)), Int128PTy);
4456 return Builder.CreateLoad(Ptr);
4458 case NEON::BI__builtin_neon_vstrq_p128: {
4459 llvm::Type *Int128PTy = llvm::Type::getIntNPtrTy(getLLVMContext(), 128);
4460 Value *Ptr = Builder.CreateBitCast(Ops[0], Int128PTy);
4461 return Builder.CreateStore(EmitScalarExpr(E->getArg(1)), Ptr);
4463 case NEON::BI__builtin_neon_vcvts_u32_f32:
4464 case NEON::BI__builtin_neon_vcvtd_u64_f64:
4467 case NEON::BI__builtin_neon_vcvts_s32_f32:
4468 case NEON::BI__builtin_neon_vcvtd_s64_f64: {
4469 Ops.push_back(EmitScalarExpr(E->getArg(0)));
4470 bool Is64 = Ops[0]->getType()->getPrimitiveSizeInBits() == 64;
4471 llvm::Type *InTy = Is64 ? Int64Ty : Int32Ty;
4472 llvm::Type *FTy = Is64 ? DoubleTy : FloatTy;
4473 Ops[0] = Builder.CreateBitCast(Ops[0], FTy);
4475 return Builder.CreateFPToUI(Ops[0], InTy);
4476 return Builder.CreateFPToSI(Ops[0], InTy);
4478 case NEON::BI__builtin_neon_vcvts_f32_u32:
4479 case NEON::BI__builtin_neon_vcvtd_f64_u64:
4482 case NEON::BI__builtin_neon_vcvts_f32_s32:
4483 case NEON::BI__builtin_neon_vcvtd_f64_s64: {
4484 Ops.push_back(EmitScalarExpr(E->getArg(0)));
4485 bool Is64 = Ops[0]->getType()->getPrimitiveSizeInBits() == 64;
4486 llvm::Type *InTy = Is64 ? Int64Ty : Int32Ty;
4487 llvm::Type *FTy = Is64 ? DoubleTy : FloatTy;
4488 Ops[0] = Builder.CreateBitCast(Ops[0], InTy);
4490 return Builder.CreateUIToFP(Ops[0], FTy);
4491 return Builder.CreateSIToFP(Ops[0], FTy);
4493 case NEON::BI__builtin_neon_vpaddd_s64: {
4495 llvm::VectorType::get(llvm::Type::getInt64Ty(getLLVMContext()), 2);
4496 Value *Vec = EmitScalarExpr(E->getArg(0));
4497 // The vector is v2f64, so make sure it's bitcast to that.
4498 Vec = Builder.CreateBitCast(Vec, Ty, "v2i64");
4499 llvm::Value *Idx0 = llvm::ConstantInt::get(SizeTy, 0);
4500 llvm::Value *Idx1 = llvm::ConstantInt::get(SizeTy, 1);
4501 Value *Op0 = Builder.CreateExtractElement(Vec, Idx0, "lane0");
4502 Value *Op1 = Builder.CreateExtractElement(Vec, Idx1, "lane1");
4503 // Pairwise addition of a v2f64 into a scalar f64.
4504 return Builder.CreateAdd(Op0, Op1, "vpaddd");
4506 case NEON::BI__builtin_neon_vpaddd_f64: {
4508 llvm::VectorType::get(llvm::Type::getDoubleTy(getLLVMContext()), 2);
4509 Value *Vec = EmitScalarExpr(E->getArg(0));
4510 // The vector is v2f64, so make sure it's bitcast to that.
4511 Vec = Builder.CreateBitCast(Vec, Ty, "v2f64");
4512 llvm::Value *Idx0 = llvm::ConstantInt::get(SizeTy, 0);
4513 llvm::Value *Idx1 = llvm::ConstantInt::get(SizeTy, 1);
4514 Value *Op0 = Builder.CreateExtractElement(Vec, Idx0, "lane0");
4515 Value *Op1 = Builder.CreateExtractElement(Vec, Idx1, "lane1");
4516 // Pairwise addition of a v2f64 into a scalar f64.
4517 return Builder.CreateFAdd(Op0, Op1, "vpaddd");
4519 case NEON::BI__builtin_neon_vpadds_f32: {
4521 llvm::VectorType::get(llvm::Type::getFloatTy(getLLVMContext()), 2);
4522 Value *Vec = EmitScalarExpr(E->getArg(0));
4523 // The vector is v2f32, so make sure it's bitcast to that.
4524 Vec = Builder.CreateBitCast(Vec, Ty, "v2f32");
4525 llvm::Value *Idx0 = llvm::ConstantInt::get(SizeTy, 0);
4526 llvm::Value *Idx1 = llvm::ConstantInt::get(SizeTy, 1);
4527 Value *Op0 = Builder.CreateExtractElement(Vec, Idx0, "lane0");
4528 Value *Op1 = Builder.CreateExtractElement(Vec, Idx1, "lane1");
4529 // Pairwise addition of a v2f32 into a scalar f32.
4530 return Builder.CreateFAdd(Op0, Op1, "vpaddd");
4532 case NEON::BI__builtin_neon_vceqzd_s64:
4533 case NEON::BI__builtin_neon_vceqzd_f64:
4534 case NEON::BI__builtin_neon_vceqzs_f32:
4535 Ops.push_back(EmitScalarExpr(E->getArg(0)));
4536 return EmitAArch64CompareBuiltinExpr(
4537 Ops[0], ConvertType(E->getCallReturnType(getContext())),
4538 ICmpInst::FCMP_OEQ, ICmpInst::ICMP_EQ, "vceqz");
4539 case NEON::BI__builtin_neon_vcgezd_s64:
4540 case NEON::BI__builtin_neon_vcgezd_f64:
4541 case NEON::BI__builtin_neon_vcgezs_f32:
4542 Ops.push_back(EmitScalarExpr(E->getArg(0)));
4543 return EmitAArch64CompareBuiltinExpr(
4544 Ops[0], ConvertType(E->getCallReturnType(getContext())),
4545 ICmpInst::FCMP_OGE, ICmpInst::ICMP_SGE, "vcgez");
4546 case NEON::BI__builtin_neon_vclezd_s64:
4547 case NEON::BI__builtin_neon_vclezd_f64:
4548 case NEON::BI__builtin_neon_vclezs_f32:
4549 Ops.push_back(EmitScalarExpr(E->getArg(0)));
4550 return EmitAArch64CompareBuiltinExpr(
4551 Ops[0], ConvertType(E->getCallReturnType(getContext())),
4552 ICmpInst::FCMP_OLE, ICmpInst::ICMP_SLE, "vclez");
4553 case NEON::BI__builtin_neon_vcgtzd_s64:
4554 case NEON::BI__builtin_neon_vcgtzd_f64:
4555 case NEON::BI__builtin_neon_vcgtzs_f32:
4556 Ops.push_back(EmitScalarExpr(E->getArg(0)));
4557 return EmitAArch64CompareBuiltinExpr(
4558 Ops[0], ConvertType(E->getCallReturnType(getContext())),
4559 ICmpInst::FCMP_OGT, ICmpInst::ICMP_SGT, "vcgtz");
4560 case NEON::BI__builtin_neon_vcltzd_s64:
4561 case NEON::BI__builtin_neon_vcltzd_f64:
4562 case NEON::BI__builtin_neon_vcltzs_f32:
4563 Ops.push_back(EmitScalarExpr(E->getArg(0)));
4564 return EmitAArch64CompareBuiltinExpr(
4565 Ops[0], ConvertType(E->getCallReturnType(getContext())),
4566 ICmpInst::FCMP_OLT, ICmpInst::ICMP_SLT, "vcltz");
4568 case NEON::BI__builtin_neon_vceqzd_u64: {
4569 llvm::Type *Ty = llvm::Type::getInt64Ty(getLLVMContext());
4570 Ops.push_back(EmitScalarExpr(E->getArg(0)));
4571 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
4572 Ops[0] = Builder.CreateICmp(llvm::ICmpInst::ICMP_EQ, Ops[0],
4573 llvm::Constant::getNullValue(Ty));
4574 return Builder.CreateSExt(Ops[0], Ty, "vceqzd");
4576 case NEON::BI__builtin_neon_vceqd_f64:
4577 case NEON::BI__builtin_neon_vcled_f64:
4578 case NEON::BI__builtin_neon_vcltd_f64:
4579 case NEON::BI__builtin_neon_vcged_f64:
4580 case NEON::BI__builtin_neon_vcgtd_f64: {
4581 llvm::CmpInst::Predicate P;
4582 switch (BuiltinID) {
4583 default: llvm_unreachable("missing builtin ID in switch!");
4584 case NEON::BI__builtin_neon_vceqd_f64: P = llvm::FCmpInst::FCMP_OEQ; break;
4585 case NEON::BI__builtin_neon_vcled_f64: P = llvm::FCmpInst::FCMP_OLE; break;
4586 case NEON::BI__builtin_neon_vcltd_f64: P = llvm::FCmpInst::FCMP_OLT; break;
4587 case NEON::BI__builtin_neon_vcged_f64: P = llvm::FCmpInst::FCMP_OGE; break;
4588 case NEON::BI__builtin_neon_vcgtd_f64: P = llvm::FCmpInst::FCMP_OGT; break;
4590 Ops.push_back(EmitScalarExpr(E->getArg(1)));
4591 Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy);
4592 Ops[1] = Builder.CreateBitCast(Ops[1], DoubleTy);
4593 Ops[0] = Builder.CreateFCmp(P, Ops[0], Ops[1]);
4594 return Builder.CreateSExt(Ops[0], Int64Ty, "vcmpd");
4596 case NEON::BI__builtin_neon_vceqs_f32:
4597 case NEON::BI__builtin_neon_vcles_f32:
4598 case NEON::BI__builtin_neon_vclts_f32:
4599 case NEON::BI__builtin_neon_vcges_f32:
4600 case NEON::BI__builtin_neon_vcgts_f32: {
4601 llvm::CmpInst::Predicate P;
4602 switch (BuiltinID) {
4603 default: llvm_unreachable("missing builtin ID in switch!");
4604 case NEON::BI__builtin_neon_vceqs_f32: P = llvm::FCmpInst::FCMP_OEQ; break;
4605 case NEON::BI__builtin_neon_vcles_f32: P = llvm::FCmpInst::FCMP_OLE; break;
4606 case NEON::BI__builtin_neon_vclts_f32: P = llvm::FCmpInst::FCMP_OLT; break;
4607 case NEON::BI__builtin_neon_vcges_f32: P = llvm::FCmpInst::FCMP_OGE; break;
4608 case NEON::BI__builtin_neon_vcgts_f32: P = llvm::FCmpInst::FCMP_OGT; break;
4610 Ops.push_back(EmitScalarExpr(E->getArg(1)));
4611 Ops[0] = Builder.CreateBitCast(Ops[0], FloatTy);
4612 Ops[1] = Builder.CreateBitCast(Ops[1], FloatTy);
4613 Ops[0] = Builder.CreateFCmp(P, Ops[0], Ops[1]);
4614 return Builder.CreateSExt(Ops[0], Int32Ty, "vcmpd");
4616 case NEON::BI__builtin_neon_vceqd_s64:
4617 case NEON::BI__builtin_neon_vceqd_u64:
4618 case NEON::BI__builtin_neon_vcgtd_s64:
4619 case NEON::BI__builtin_neon_vcgtd_u64:
4620 case NEON::BI__builtin_neon_vcltd_s64:
4621 case NEON::BI__builtin_neon_vcltd_u64:
4622 case NEON::BI__builtin_neon_vcged_u64:
4623 case NEON::BI__builtin_neon_vcged_s64:
4624 case NEON::BI__builtin_neon_vcled_u64:
4625 case NEON::BI__builtin_neon_vcled_s64: {
4626 llvm::CmpInst::Predicate P;
4627 switch (BuiltinID) {
4628 default: llvm_unreachable("missing builtin ID in switch!");
4629 case NEON::BI__builtin_neon_vceqd_s64:
4630 case NEON::BI__builtin_neon_vceqd_u64:P = llvm::ICmpInst::ICMP_EQ;break;
4631 case NEON::BI__builtin_neon_vcgtd_s64:P = llvm::ICmpInst::ICMP_SGT;break;
4632 case NEON::BI__builtin_neon_vcgtd_u64:P = llvm::ICmpInst::ICMP_UGT;break;
4633 case NEON::BI__builtin_neon_vcltd_s64:P = llvm::ICmpInst::ICMP_SLT;break;
4634 case NEON::BI__builtin_neon_vcltd_u64:P = llvm::ICmpInst::ICMP_ULT;break;
4635 case NEON::BI__builtin_neon_vcged_u64:P = llvm::ICmpInst::ICMP_UGE;break;
4636 case NEON::BI__builtin_neon_vcged_s64:P = llvm::ICmpInst::ICMP_SGE;break;
4637 case NEON::BI__builtin_neon_vcled_u64:P = llvm::ICmpInst::ICMP_ULE;break;
4638 case NEON::BI__builtin_neon_vcled_s64:P = llvm::ICmpInst::ICMP_SLE;break;
4640 Ops.push_back(EmitScalarExpr(E->getArg(1)));
4641 Ops[0] = Builder.CreateBitCast(Ops[0], Int64Ty);
4642 Ops[1] = Builder.CreateBitCast(Ops[1], Int64Ty);
4643 Ops[0] = Builder.CreateICmp(P, Ops[0], Ops[1]);
4644 return Builder.CreateSExt(Ops[0], Int64Ty, "vceqd");
4646 case NEON::BI__builtin_neon_vtstd_s64:
4647 case NEON::BI__builtin_neon_vtstd_u64: {
4648 llvm::Type *Ty = llvm::Type::getInt64Ty(getLLVMContext());
4649 Ops.push_back(EmitScalarExpr(E->getArg(1)));
4650 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
4651 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
4652 Ops[0] = Builder.CreateAnd(Ops[0], Ops[1]);
4653 Ops[0] = Builder.CreateICmp(ICmpInst::ICMP_NE, Ops[0],
4654 llvm::Constant::getNullValue(Ty));
4655 return Builder.CreateSExt(Ops[0], Ty, "vtstd");
4657 case NEON::BI__builtin_neon_vset_lane_i8:
4658 case NEON::BI__builtin_neon_vset_lane_i16:
4659 case NEON::BI__builtin_neon_vset_lane_i32:
4660 case NEON::BI__builtin_neon_vset_lane_i64:
4661 case NEON::BI__builtin_neon_vset_lane_f32:
4662 case NEON::BI__builtin_neon_vsetq_lane_i8:
4663 case NEON::BI__builtin_neon_vsetq_lane_i16:
4664 case NEON::BI__builtin_neon_vsetq_lane_i32:
4665 case NEON::BI__builtin_neon_vsetq_lane_i64:
4666 case NEON::BI__builtin_neon_vsetq_lane_f32:
4667 Ops.push_back(EmitScalarExpr(E->getArg(2)));
4668 return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vset_lane");
4669 case NEON::BI__builtin_neon_vset_lane_f64:
4670 // The vector type needs a cast for the v1f64 variant.
4671 Ops[1] = Builder.CreateBitCast(Ops[1],
4672 llvm::VectorType::get(DoubleTy, 1));
4673 Ops.push_back(EmitScalarExpr(E->getArg(2)));
4674 return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vset_lane");
4675 case NEON::BI__builtin_neon_vsetq_lane_f64:
4676 // The vector type needs a cast for the v2f64 variant.
4677 Ops[1] = Builder.CreateBitCast(Ops[1],
4678 llvm::VectorType::get(llvm::Type::getDoubleTy(getLLVMContext()), 2));
4679 Ops.push_back(EmitScalarExpr(E->getArg(2)));
4680 return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vset_lane");
4682 case NEON::BI__builtin_neon_vget_lane_i8:
4683 case NEON::BI__builtin_neon_vdupb_lane_i8:
4684 Ops[0] = Builder.CreateBitCast(Ops[0],
4685 llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 8), 8));
4686 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
4688 case NEON::BI__builtin_neon_vgetq_lane_i8:
4689 case NEON::BI__builtin_neon_vdupb_laneq_i8:
4690 Ops[0] = Builder.CreateBitCast(Ops[0],
4691 llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 8), 16));
4692 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
4694 case NEON::BI__builtin_neon_vget_lane_i16:
4695 case NEON::BI__builtin_neon_vduph_lane_i16:
4696 Ops[0] = Builder.CreateBitCast(Ops[0],
4697 llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 16), 4));
4698 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
4700 case NEON::BI__builtin_neon_vgetq_lane_i16:
4701 case NEON::BI__builtin_neon_vduph_laneq_i16:
4702 Ops[0] = Builder.CreateBitCast(Ops[0],
4703 llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 16), 8));
4704 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
4706 case NEON::BI__builtin_neon_vget_lane_i32:
4707 case NEON::BI__builtin_neon_vdups_lane_i32:
4708 Ops[0] = Builder.CreateBitCast(
4710 llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 32), 2));
4711 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
4713 case NEON::BI__builtin_neon_vdups_lane_f32:
4714 Ops[0] = Builder.CreateBitCast(Ops[0],
4715 llvm::VectorType::get(llvm::Type::getFloatTy(getLLVMContext()), 2));
4716 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
4718 case NEON::BI__builtin_neon_vgetq_lane_i32:
4719 case NEON::BI__builtin_neon_vdups_laneq_i32:
4720 Ops[0] = Builder.CreateBitCast(Ops[0],
4721 llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 32), 4));
4722 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
4724 case NEON::BI__builtin_neon_vget_lane_i64:
4725 case NEON::BI__builtin_neon_vdupd_lane_i64:
4726 Ops[0] = Builder.CreateBitCast(Ops[0],
4727 llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 64), 1));
4728 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
4730 case NEON::BI__builtin_neon_vdupd_lane_f64:
4731 Ops[0] = Builder.CreateBitCast(Ops[0],
4732 llvm::VectorType::get(llvm::Type::getDoubleTy(getLLVMContext()), 1));
4733 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
4735 case NEON::BI__builtin_neon_vgetq_lane_i64:
4736 case NEON::BI__builtin_neon_vdupd_laneq_i64:
4737 Ops[0] = Builder.CreateBitCast(Ops[0],
4738 llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 64), 2));
4739 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
4741 case NEON::BI__builtin_neon_vget_lane_f32:
4742 Ops[0] = Builder.CreateBitCast(Ops[0],
4743 llvm::VectorType::get(llvm::Type::getFloatTy(getLLVMContext()), 2));
4744 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
4746 case NEON::BI__builtin_neon_vget_lane_f64:
4747 Ops[0] = Builder.CreateBitCast(Ops[0],
4748 llvm::VectorType::get(llvm::Type::getDoubleTy(getLLVMContext()), 1));
4749 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
4751 case NEON::BI__builtin_neon_vgetq_lane_f32:
4752 case NEON::BI__builtin_neon_vdups_laneq_f32:
4753 Ops[0] = Builder.CreateBitCast(Ops[0],
4754 llvm::VectorType::get(llvm::Type::getFloatTy(getLLVMContext()), 4));
4755 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
4757 case NEON::BI__builtin_neon_vgetq_lane_f64:
4758 case NEON::BI__builtin_neon_vdupd_laneq_f64:
4759 Ops[0] = Builder.CreateBitCast(Ops[0],
4760 llvm::VectorType::get(llvm::Type::getDoubleTy(getLLVMContext()), 2));
4761 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
4763 case NEON::BI__builtin_neon_vaddd_s64:
4764 case NEON::BI__builtin_neon_vaddd_u64:
4765 return Builder.CreateAdd(Ops[0], EmitScalarExpr(E->getArg(1)), "vaddd");
4766 case NEON::BI__builtin_neon_vsubd_s64:
4767 case NEON::BI__builtin_neon_vsubd_u64:
4768 return Builder.CreateSub(Ops[0], EmitScalarExpr(E->getArg(1)), "vsubd");
4769 case NEON::BI__builtin_neon_vqdmlalh_s16:
4770 case NEON::BI__builtin_neon_vqdmlslh_s16: {
4771 SmallVector<Value *, 2> ProductOps;
4772 ProductOps.push_back(vectorWrapScalar16(Ops[1]));
4773 ProductOps.push_back(vectorWrapScalar16(EmitScalarExpr(E->getArg(2))));
4774 llvm::Type *VTy = llvm::VectorType::get(Int32Ty, 4);
4775 Ops[1] = EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqdmull, VTy),
4776 ProductOps, "vqdmlXl");
4777 Constant *CI = ConstantInt::get(SizeTy, 0);
4778 Ops[1] = Builder.CreateExtractElement(Ops[1], CI, "lane0");
4780 unsigned AccumInt = BuiltinID == NEON::BI__builtin_neon_vqdmlalh_s16
4781 ? Intrinsic::aarch64_neon_sqadd
4782 : Intrinsic::aarch64_neon_sqsub;
4783 return EmitNeonCall(CGM.getIntrinsic(AccumInt, Int32Ty), Ops, "vqdmlXl");
4785 case NEON::BI__builtin_neon_vqshlud_n_s64: {
4786 Ops.push_back(EmitScalarExpr(E->getArg(1)));
4787 Ops[1] = Builder.CreateZExt(Ops[1], Int64Ty);
4788 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqshlu, Int64Ty),
4791 case NEON::BI__builtin_neon_vqshld_n_u64:
4792 case NEON::BI__builtin_neon_vqshld_n_s64: {
4793 unsigned Int = BuiltinID == NEON::BI__builtin_neon_vqshld_n_u64
4794 ? Intrinsic::aarch64_neon_uqshl
4795 : Intrinsic::aarch64_neon_sqshl;
4796 Ops.push_back(EmitScalarExpr(E->getArg(1)));
4797 Ops[1] = Builder.CreateZExt(Ops[1], Int64Ty);
4798 return EmitNeonCall(CGM.getIntrinsic(Int, Int64Ty), Ops, "vqshl_n");
4800 case NEON::BI__builtin_neon_vrshrd_n_u64:
4801 case NEON::BI__builtin_neon_vrshrd_n_s64: {
4802 unsigned Int = BuiltinID == NEON::BI__builtin_neon_vrshrd_n_u64
4803 ? Intrinsic::aarch64_neon_urshl
4804 : Intrinsic::aarch64_neon_srshl;
4805 Ops.push_back(EmitScalarExpr(E->getArg(1)));
4806 int SV = cast<ConstantInt>(Ops[1])->getSExtValue();
4807 Ops[1] = ConstantInt::get(Int64Ty, -SV);
4808 return EmitNeonCall(CGM.getIntrinsic(Int, Int64Ty), Ops, "vrshr_n");
4810 case NEON::BI__builtin_neon_vrsrad_n_u64:
4811 case NEON::BI__builtin_neon_vrsrad_n_s64: {
4812 unsigned Int = BuiltinID == NEON::BI__builtin_neon_vrsrad_n_u64
4813 ? Intrinsic::aarch64_neon_urshl
4814 : Intrinsic::aarch64_neon_srshl;
4815 Ops[1] = Builder.CreateBitCast(Ops[1], Int64Ty);
4816 Ops.push_back(Builder.CreateNeg(EmitScalarExpr(E->getArg(2))));
4817 Ops[1] = Builder.CreateCall(CGM.getIntrinsic(Int, Int64Ty),
4818 {Ops[1], Builder.CreateSExt(Ops[2], Int64Ty)});
4819 return Builder.CreateAdd(Ops[0], Builder.CreateBitCast(Ops[1], Int64Ty));
4821 case NEON::BI__builtin_neon_vshld_n_s64:
4822 case NEON::BI__builtin_neon_vshld_n_u64: {
4823 llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(1)));
4824 return Builder.CreateShl(
4825 Ops[0], ConstantInt::get(Int64Ty, Amt->getZExtValue()), "shld_n");
4827 case NEON::BI__builtin_neon_vshrd_n_s64: {
4828 llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(1)));
4829 return Builder.CreateAShr(
4830 Ops[0], ConstantInt::get(Int64Ty, std::min(static_cast<uint64_t>(63),
4831 Amt->getZExtValue())),
4834 case NEON::BI__builtin_neon_vshrd_n_u64: {
4835 llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(1)));
4836 uint64_t ShiftAmt = Amt->getZExtValue();
4837 // Right-shifting an unsigned value by its size yields 0.
4839 return ConstantInt::get(Int64Ty, 0);
4840 return Builder.CreateLShr(Ops[0], ConstantInt::get(Int64Ty, ShiftAmt),
4843 case NEON::BI__builtin_neon_vsrad_n_s64: {
4844 llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(2)));
4845 Ops[1] = Builder.CreateAShr(
4846 Ops[1], ConstantInt::get(Int64Ty, std::min(static_cast<uint64_t>(63),
4847 Amt->getZExtValue())),
4849 return Builder.CreateAdd(Ops[0], Ops[1]);
4851 case NEON::BI__builtin_neon_vsrad_n_u64: {
4852 llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(2)));
4853 uint64_t ShiftAmt = Amt->getZExtValue();
4854 // Right-shifting an unsigned value by its size yields 0.
4855 // As Op + 0 = Op, return Ops[0] directly.
4858 Ops[1] = Builder.CreateLShr(Ops[1], ConstantInt::get(Int64Ty, ShiftAmt),
4860 return Builder.CreateAdd(Ops[0], Ops[1]);
4862 case NEON::BI__builtin_neon_vqdmlalh_lane_s16:
4863 case NEON::BI__builtin_neon_vqdmlalh_laneq_s16:
4864 case NEON::BI__builtin_neon_vqdmlslh_lane_s16:
4865 case NEON::BI__builtin_neon_vqdmlslh_laneq_s16: {
4866 Ops[2] = Builder.CreateExtractElement(Ops[2], EmitScalarExpr(E->getArg(3)),
4868 SmallVector<Value *, 2> ProductOps;
4869 ProductOps.push_back(vectorWrapScalar16(Ops[1]));
4870 ProductOps.push_back(vectorWrapScalar16(Ops[2]));
4871 llvm::Type *VTy = llvm::VectorType::get(Int32Ty, 4);
4872 Ops[1] = EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqdmull, VTy),
4873 ProductOps, "vqdmlXl");
4874 Constant *CI = ConstantInt::get(SizeTy, 0);
4875 Ops[1] = Builder.CreateExtractElement(Ops[1], CI, "lane0");
4878 unsigned AccInt = (BuiltinID == NEON::BI__builtin_neon_vqdmlalh_lane_s16 ||
4879 BuiltinID == NEON::BI__builtin_neon_vqdmlalh_laneq_s16)
4880 ? Intrinsic::aarch64_neon_sqadd
4881 : Intrinsic::aarch64_neon_sqsub;
4882 return EmitNeonCall(CGM.getIntrinsic(AccInt, Int32Ty), Ops, "vqdmlXl");
4884 case NEON::BI__builtin_neon_vqdmlals_s32:
4885 case NEON::BI__builtin_neon_vqdmlsls_s32: {
4886 SmallVector<Value *, 2> ProductOps;
4887 ProductOps.push_back(Ops[1]);
4888 ProductOps.push_back(EmitScalarExpr(E->getArg(2)));
4890 EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqdmulls_scalar),
4891 ProductOps, "vqdmlXl");
4893 unsigned AccumInt = BuiltinID == NEON::BI__builtin_neon_vqdmlals_s32
4894 ? Intrinsic::aarch64_neon_sqadd
4895 : Intrinsic::aarch64_neon_sqsub;
4896 return EmitNeonCall(CGM.getIntrinsic(AccumInt, Int64Ty), Ops, "vqdmlXl");
4898 case NEON::BI__builtin_neon_vqdmlals_lane_s32:
4899 case NEON::BI__builtin_neon_vqdmlals_laneq_s32:
4900 case NEON::BI__builtin_neon_vqdmlsls_lane_s32:
4901 case NEON::BI__builtin_neon_vqdmlsls_laneq_s32: {
4902 Ops[2] = Builder.CreateExtractElement(Ops[2], EmitScalarExpr(E->getArg(3)),
4904 SmallVector<Value *, 2> ProductOps;
4905 ProductOps.push_back(Ops[1]);
4906 ProductOps.push_back(Ops[2]);
4908 EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqdmulls_scalar),
4909 ProductOps, "vqdmlXl");
4912 unsigned AccInt = (BuiltinID == NEON::BI__builtin_neon_vqdmlals_lane_s32 ||
4913 BuiltinID == NEON::BI__builtin_neon_vqdmlals_laneq_s32)
4914 ? Intrinsic::aarch64_neon_sqadd
4915 : Intrinsic::aarch64_neon_sqsub;
4916 return EmitNeonCall(CGM.getIntrinsic(AccInt, Int64Ty), Ops, "vqdmlXl");
4920 llvm::VectorType *VTy = GetNeonType(this, Type);
4921 llvm::Type *Ty = VTy;
4925 // Not all intrinsics handled by the common case work for AArch64 yet, so only
4926 // defer to common code if it's been added to our special map.
4927 Builtin = findNeonIntrinsicInMap(AArch64SIMDIntrinsicMap, BuiltinID,
4928 AArch64SIMDIntrinsicsProvenSorted);
4931 return EmitCommonNeonBuiltinExpr(
4932 Builtin->BuiltinID, Builtin->LLVMIntrinsic, Builtin->AltLLVMIntrinsic,
4933 Builtin->NameHint, Builtin->TypeModifier, E, Ops, nullptr);
4935 if (Value *V = EmitAArch64TblBuiltinExpr(*this, BuiltinID, E, Ops))
4939 switch (BuiltinID) {
4940 default: return nullptr;
4941 case NEON::BI__builtin_neon_vbsl_v:
4942 case NEON::BI__builtin_neon_vbslq_v: {
4943 llvm::Type *BitTy = llvm::VectorType::getInteger(VTy);
4944 Ops[0] = Builder.CreateBitCast(Ops[0], BitTy, "vbsl");
4945 Ops[1] = Builder.CreateBitCast(Ops[1], BitTy, "vbsl");
4946 Ops[2] = Builder.CreateBitCast(Ops[2], BitTy, "vbsl");
4948 Ops[1] = Builder.CreateAnd(Ops[0], Ops[1], "vbsl");
4949 Ops[2] = Builder.CreateAnd(Builder.CreateNot(Ops[0]), Ops[2], "vbsl");
4950 Ops[0] = Builder.CreateOr(Ops[1], Ops[2], "vbsl");
4951 return Builder.CreateBitCast(Ops[0], Ty);
4953 case NEON::BI__builtin_neon_vfma_lane_v:
4954 case NEON::BI__builtin_neon_vfmaq_lane_v: { // Only used for FP types
4955 // The ARM builtins (and instructions) have the addend as the first
4956 // operand, but the 'fma' intrinsics have it last. Swap it around here.
4957 Value *Addend = Ops[0];
4958 Value *Multiplicand = Ops[1];
4959 Value *LaneSource = Ops[2];
4960 Ops[0] = Multiplicand;
4961 Ops[1] = LaneSource;
4964 // Now adjust things to handle the lane access.
4965 llvm::Type *SourceTy = BuiltinID == NEON::BI__builtin_neon_vfmaq_lane_v ?
4966 llvm::VectorType::get(VTy->getElementType(), VTy->getNumElements() / 2) :
4968 llvm::Constant *cst = cast<Constant>(Ops[3]);
4969 Value *SV = llvm::ConstantVector::getSplat(VTy->getNumElements(), cst);
4970 Ops[1] = Builder.CreateBitCast(Ops[1], SourceTy);
4971 Ops[1] = Builder.CreateShuffleVector(Ops[1], Ops[1], SV, "lane");
4974 Int = Intrinsic::fma;
4975 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "fmla");
4977 case NEON::BI__builtin_neon_vfma_laneq_v: {
4978 llvm::VectorType *VTy = cast<llvm::VectorType>(Ty);
4979 // v1f64 fma should be mapped to Neon scalar f64 fma
4980 if (VTy && VTy->getElementType() == DoubleTy) {
4981 Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy);
4982 Ops[1] = Builder.CreateBitCast(Ops[1], DoubleTy);
4983 llvm::Type *VTy = GetNeonType(this,
4984 NeonTypeFlags(NeonTypeFlags::Float64, false, true));
4985 Ops[2] = Builder.CreateBitCast(Ops[2], VTy);
4986 Ops[2] = Builder.CreateExtractElement(Ops[2], Ops[3], "extract");
4987 Value *F = CGM.getIntrinsic(Intrinsic::fma, DoubleTy);
4988 Value *Result = Builder.CreateCall(F, {Ops[1], Ops[2], Ops[0]});
4989 return Builder.CreateBitCast(Result, Ty);
4991 Value *F = CGM.getIntrinsic(Intrinsic::fma, Ty);
4992 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
4993 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
4995 llvm::Type *STy = llvm::VectorType::get(VTy->getElementType(),
4996 VTy->getNumElements() * 2);
4997 Ops[2] = Builder.CreateBitCast(Ops[2], STy);
4998 Value* SV = llvm::ConstantVector::getSplat(VTy->getNumElements(),
4999 cast<ConstantInt>(Ops[3]));
5000 Ops[2] = Builder.CreateShuffleVector(Ops[2], Ops[2], SV, "lane");
5002 return Builder.CreateCall(F, {Ops[2], Ops[1], Ops[0]});
5004 case NEON::BI__builtin_neon_vfmaq_laneq_v: {
5005 Value *F = CGM.getIntrinsic(Intrinsic::fma, Ty);
5006 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
5007 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
5009 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
5010 Ops[2] = EmitNeonSplat(Ops[2], cast<ConstantInt>(Ops[3]));
5011 return Builder.CreateCall(F, {Ops[2], Ops[1], Ops[0]});
5013 case NEON::BI__builtin_neon_vfmas_lane_f32:
5014 case NEON::BI__builtin_neon_vfmas_laneq_f32:
5015 case NEON::BI__builtin_neon_vfmad_lane_f64:
5016 case NEON::BI__builtin_neon_vfmad_laneq_f64: {
5017 Ops.push_back(EmitScalarExpr(E->getArg(3)));
5018 llvm::Type *Ty = ConvertType(E->getCallReturnType(getContext()));
5019 Value *F = CGM.getIntrinsic(Intrinsic::fma, Ty);
5020 Ops[2] = Builder.CreateExtractElement(Ops[2], Ops[3], "extract");
5021 return Builder.CreateCall(F, {Ops[1], Ops[2], Ops[0]});
5023 case NEON::BI__builtin_neon_vfms_v:
5024 case NEON::BI__builtin_neon_vfmsq_v: { // Only used for FP types
5025 // FIXME: probably remove when we no longer support aarch64_simd.h
5026 // (arm_neon.h delegates to vfma).
5028 // The ARM builtins (and instructions) have the addend as the first
5029 // operand, but the 'fma' intrinsics have it last. Swap it around here.
5030 Value *Subtrahend = Ops[0];
5031 Value *Multiplicand = Ops[2];
5032 Ops[0] = Multiplicand;
5033 Ops[2] = Subtrahend;
5034 Ops[1] = Builder.CreateBitCast(Ops[1], VTy);
5035 Ops[1] = Builder.CreateFNeg(Ops[1]);
5036 Int = Intrinsic::fma;
5037 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "fmls");
5039 case NEON::BI__builtin_neon_vmull_v:
5040 // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
5041 Int = usgn ? Intrinsic::aarch64_neon_umull : Intrinsic::aarch64_neon_smull;
5042 if (Type.isPoly()) Int = Intrinsic::aarch64_neon_pmull;
5043 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmull");
5044 case NEON::BI__builtin_neon_vmax_v:
5045 case NEON::BI__builtin_neon_vmaxq_v:
5046 // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
5047 Int = usgn ? Intrinsic::aarch64_neon_umax : Intrinsic::aarch64_neon_smax;
5048 if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fmax;
5049 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmax");
5050 case NEON::BI__builtin_neon_vmin_v:
5051 case NEON::BI__builtin_neon_vminq_v:
5052 // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
5053 Int = usgn ? Intrinsic::aarch64_neon_umin : Intrinsic::aarch64_neon_smin;
5054 if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fmin;
5055 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmin");
5056 case NEON::BI__builtin_neon_vabd_v:
5057 case NEON::BI__builtin_neon_vabdq_v:
5058 // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
5059 Int = usgn ? Intrinsic::aarch64_neon_uabd : Intrinsic::aarch64_neon_sabd;
5060 if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fabd;
5061 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vabd");
5062 case NEON::BI__builtin_neon_vpadal_v:
5063 case NEON::BI__builtin_neon_vpadalq_v: {
5064 unsigned ArgElts = VTy->getNumElements();
5065 llvm::IntegerType *EltTy = cast<IntegerType>(VTy->getElementType());
5066 unsigned BitWidth = EltTy->getBitWidth();
5067 llvm::Type *ArgTy = llvm::VectorType::get(
5068 llvm::IntegerType::get(getLLVMContext(), BitWidth/2), 2*ArgElts);
5069 llvm::Type* Tys[2] = { VTy, ArgTy };
5070 Int = usgn ? Intrinsic::aarch64_neon_uaddlp : Intrinsic::aarch64_neon_saddlp;
5071 SmallVector<llvm::Value*, 1> TmpOps;
5072 TmpOps.push_back(Ops[1]);
5073 Function *F = CGM.getIntrinsic(Int, Tys);
5074 llvm::Value *tmp = EmitNeonCall(F, TmpOps, "vpadal");
5075 llvm::Value *addend = Builder.CreateBitCast(Ops[0], tmp->getType());
5076 return Builder.CreateAdd(tmp, addend);
5078 case NEON::BI__builtin_neon_vpmin_v:
5079 case NEON::BI__builtin_neon_vpminq_v:
5080 // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
5081 Int = usgn ? Intrinsic::aarch64_neon_uminp : Intrinsic::aarch64_neon_sminp;
5082 if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fminp;
5083 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpmin");
5084 case NEON::BI__builtin_neon_vpmax_v:
5085 case NEON::BI__builtin_neon_vpmaxq_v:
5086 // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
5087 Int = usgn ? Intrinsic::aarch64_neon_umaxp : Intrinsic::aarch64_neon_smaxp;
5088 if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fmaxp;
5089 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpmax");
5090 case NEON::BI__builtin_neon_vminnm_v:
5091 case NEON::BI__builtin_neon_vminnmq_v:
5092 Int = Intrinsic::aarch64_neon_fminnm;
5093 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vminnm");
5094 case NEON::BI__builtin_neon_vmaxnm_v:
5095 case NEON::BI__builtin_neon_vmaxnmq_v:
5096 Int = Intrinsic::aarch64_neon_fmaxnm;
5097 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmaxnm");
5098 case NEON::BI__builtin_neon_vrecpss_f32: {
5099 llvm::Type *f32Type = llvm::Type::getFloatTy(getLLVMContext());
5100 Ops.push_back(EmitScalarExpr(E->getArg(1)));
5101 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_frecps, f32Type),
5104 case NEON::BI__builtin_neon_vrecpsd_f64: {
5105 llvm::Type *f64Type = llvm::Type::getDoubleTy(getLLVMContext());
5106 Ops.push_back(EmitScalarExpr(E->getArg(1)));
5107 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_frecps, f64Type),
5110 case NEON::BI__builtin_neon_vqshrun_n_v:
5111 Int = Intrinsic::aarch64_neon_sqshrun;
5112 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshrun_n");
5113 case NEON::BI__builtin_neon_vqrshrun_n_v:
5114 Int = Intrinsic::aarch64_neon_sqrshrun;
5115 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshrun_n");
5116 case NEON::BI__builtin_neon_vqshrn_n_v:
5117 Int = usgn ? Intrinsic::aarch64_neon_uqshrn : Intrinsic::aarch64_neon_sqshrn;
5118 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshrn_n");
5119 case NEON::BI__builtin_neon_vrshrn_n_v:
5120 Int = Intrinsic::aarch64_neon_rshrn;
5121 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrshrn_n");
5122 case NEON::BI__builtin_neon_vqrshrn_n_v:
5123 Int = usgn ? Intrinsic::aarch64_neon_uqrshrn : Intrinsic::aarch64_neon_sqrshrn;
5124 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshrn_n");
5125 case NEON::BI__builtin_neon_vrnda_v:
5126 case NEON::BI__builtin_neon_vrndaq_v: {
5127 Int = Intrinsic::round;
5128 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrnda");
5130 case NEON::BI__builtin_neon_vrndi_v:
5131 case NEON::BI__builtin_neon_vrndiq_v: {
5132 Int = Intrinsic::nearbyint;
5133 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndi");
5135 case NEON::BI__builtin_neon_vrndm_v:
5136 case NEON::BI__builtin_neon_vrndmq_v: {
5137 Int = Intrinsic::floor;
5138 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndm");
5140 case NEON::BI__builtin_neon_vrndn_v:
5141 case NEON::BI__builtin_neon_vrndnq_v: {
5142 Int = Intrinsic::aarch64_neon_frintn;
5143 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndn");
5145 case NEON::BI__builtin_neon_vrndp_v:
5146 case NEON::BI__builtin_neon_vrndpq_v: {
5147 Int = Intrinsic::ceil;
5148 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndp");
5150 case NEON::BI__builtin_neon_vrndx_v:
5151 case NEON::BI__builtin_neon_vrndxq_v: {
5152 Int = Intrinsic::rint;
5153 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndx");
5155 case NEON::BI__builtin_neon_vrnd_v:
5156 case NEON::BI__builtin_neon_vrndq_v: {
5157 Int = Intrinsic::trunc;
5158 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndz");
5160 case NEON::BI__builtin_neon_vceqz_v:
5161 case NEON::BI__builtin_neon_vceqzq_v:
5162 return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OEQ,
5163 ICmpInst::ICMP_EQ, "vceqz");
5164 case NEON::BI__builtin_neon_vcgez_v:
5165 case NEON::BI__builtin_neon_vcgezq_v:
5166 return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OGE,
5167 ICmpInst::ICMP_SGE, "vcgez");
5168 case NEON::BI__builtin_neon_vclez_v:
5169 case NEON::BI__builtin_neon_vclezq_v:
5170 return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OLE,
5171 ICmpInst::ICMP_SLE, "vclez");
5172 case NEON::BI__builtin_neon_vcgtz_v:
5173 case NEON::BI__builtin_neon_vcgtzq_v:
5174 return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OGT,
5175 ICmpInst::ICMP_SGT, "vcgtz");
5176 case NEON::BI__builtin_neon_vcltz_v:
5177 case NEON::BI__builtin_neon_vcltzq_v:
5178 return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OLT,
5179 ICmpInst::ICMP_SLT, "vcltz");
5180 case NEON::BI__builtin_neon_vcvt_f64_v:
5181 case NEON::BI__builtin_neon_vcvtq_f64_v:
5182 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
5183 Ty = GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float64, false, quad));
5184 return usgn ? Builder.CreateUIToFP(Ops[0], Ty, "vcvt")
5185 : Builder.CreateSIToFP(Ops[0], Ty, "vcvt");
5186 case NEON::BI__builtin_neon_vcvt_f64_f32: {
5187 assert(Type.getEltType() == NeonTypeFlags::Float64 && quad &&
5188 "unexpected vcvt_f64_f32 builtin");
5189 NeonTypeFlags SrcFlag = NeonTypeFlags(NeonTypeFlags::Float32, false, false);
5190 Ops[0] = Builder.CreateBitCast(Ops[0], GetNeonType(this, SrcFlag));
5192 return Builder.CreateFPExt(Ops[0], Ty, "vcvt");
5194 case NEON::BI__builtin_neon_vcvt_f32_f64: {
5195 assert(Type.getEltType() == NeonTypeFlags::Float32 &&
5196 "unexpected vcvt_f32_f64 builtin");
5197 NeonTypeFlags SrcFlag = NeonTypeFlags(NeonTypeFlags::Float64, false, true);
5198 Ops[0] = Builder.CreateBitCast(Ops[0], GetNeonType(this, SrcFlag));
5200 return Builder.CreateFPTrunc(Ops[0], Ty, "vcvt");
5202 case NEON::BI__builtin_neon_vcvt_s32_v:
5203 case NEON::BI__builtin_neon_vcvt_u32_v:
5204 case NEON::BI__builtin_neon_vcvt_s64_v:
5205 case NEON::BI__builtin_neon_vcvt_u64_v:
5206 case NEON::BI__builtin_neon_vcvtq_s32_v:
5207 case NEON::BI__builtin_neon_vcvtq_u32_v:
5208 case NEON::BI__builtin_neon_vcvtq_s64_v:
5209 case NEON::BI__builtin_neon_vcvtq_u64_v: {
5211 (cast<llvm::IntegerType>(VTy->getElementType())->getBitWidth() == 64);
5214 NeonTypeFlags(Double ? NeonTypeFlags::Float64
5215 : NeonTypeFlags::Float32, false, quad));
5216 Ops[0] = Builder.CreateBitCast(Ops[0], InTy);
5218 return Builder.CreateFPToUI(Ops[0], Ty);
5219 return Builder.CreateFPToSI(Ops[0], Ty);
5221 case NEON::BI__builtin_neon_vcvta_s32_v:
5222 case NEON::BI__builtin_neon_vcvtaq_s32_v:
5223 case NEON::BI__builtin_neon_vcvta_u32_v:
5224 case NEON::BI__builtin_neon_vcvtaq_u32_v:
5225 case NEON::BI__builtin_neon_vcvta_s64_v:
5226 case NEON::BI__builtin_neon_vcvtaq_s64_v:
5227 case NEON::BI__builtin_neon_vcvta_u64_v:
5228 case NEON::BI__builtin_neon_vcvtaq_u64_v: {
5229 Int = usgn ? Intrinsic::aarch64_neon_fcvtau : Intrinsic::aarch64_neon_fcvtas;
5231 (cast<llvm::IntegerType>(VTy->getElementType())->getBitWidth() == 64);
5234 NeonTypeFlags(Double ? NeonTypeFlags::Float64
5235 : NeonTypeFlags::Float32, false, quad));
5236 llvm::Type *Tys[2] = { Ty, InTy };
5237 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvta");
5239 case NEON::BI__builtin_neon_vcvtm_s32_v:
5240 case NEON::BI__builtin_neon_vcvtmq_s32_v:
5241 case NEON::BI__builtin_neon_vcvtm_u32_v:
5242 case NEON::BI__builtin_neon_vcvtmq_u32_v:
5243 case NEON::BI__builtin_neon_vcvtm_s64_v:
5244 case NEON::BI__builtin_neon_vcvtmq_s64_v:
5245 case NEON::BI__builtin_neon_vcvtm_u64_v:
5246 case NEON::BI__builtin_neon_vcvtmq_u64_v: {
5247 Int = usgn ? Intrinsic::aarch64_neon_fcvtmu : Intrinsic::aarch64_neon_fcvtms;
5249 (cast<llvm::IntegerType>(VTy->getElementType())->getBitWidth() == 64);
5252 NeonTypeFlags(Double ? NeonTypeFlags::Float64
5253 : NeonTypeFlags::Float32, false, quad));
5254 llvm::Type *Tys[2] = { Ty, InTy };
5255 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtm");
5257 case NEON::BI__builtin_neon_vcvtn_s32_v:
5258 case NEON::BI__builtin_neon_vcvtnq_s32_v:
5259 case NEON::BI__builtin_neon_vcvtn_u32_v:
5260 case NEON::BI__builtin_neon_vcvtnq_u32_v:
5261 case NEON::BI__builtin_neon_vcvtn_s64_v:
5262 case NEON::BI__builtin_neon_vcvtnq_s64_v:
5263 case NEON::BI__builtin_neon_vcvtn_u64_v:
5264 case NEON::BI__builtin_neon_vcvtnq_u64_v: {
5265 Int = usgn ? Intrinsic::aarch64_neon_fcvtnu : Intrinsic::aarch64_neon_fcvtns;
5267 (cast<llvm::IntegerType>(VTy->getElementType())->getBitWidth() == 64);
5270 NeonTypeFlags(Double ? NeonTypeFlags::Float64
5271 : NeonTypeFlags::Float32, false, quad));
5272 llvm::Type *Tys[2] = { Ty, InTy };
5273 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtn");
5275 case NEON::BI__builtin_neon_vcvtp_s32_v:
5276 case NEON::BI__builtin_neon_vcvtpq_s32_v:
5277 case NEON::BI__builtin_neon_vcvtp_u32_v:
5278 case NEON::BI__builtin_neon_vcvtpq_u32_v:
5279 case NEON::BI__builtin_neon_vcvtp_s64_v:
5280 case NEON::BI__builtin_neon_vcvtpq_s64_v:
5281 case NEON::BI__builtin_neon_vcvtp_u64_v:
5282 case NEON::BI__builtin_neon_vcvtpq_u64_v: {
5283 Int = usgn ? Intrinsic::aarch64_neon_fcvtpu : Intrinsic::aarch64_neon_fcvtps;
5285 (cast<llvm::IntegerType>(VTy->getElementType())->getBitWidth() == 64);
5288 NeonTypeFlags(Double ? NeonTypeFlags::Float64
5289 : NeonTypeFlags::Float32, false, quad));
5290 llvm::Type *Tys[2] = { Ty, InTy };
5291 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtp");
5293 case NEON::BI__builtin_neon_vmulx_v:
5294 case NEON::BI__builtin_neon_vmulxq_v: {
5295 Int = Intrinsic::aarch64_neon_fmulx;
5296 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmulx");
5298 case NEON::BI__builtin_neon_vmul_lane_v:
5299 case NEON::BI__builtin_neon_vmul_laneq_v: {
5300 // v1f64 vmul_lane should be mapped to Neon scalar mul lane
5302 if (BuiltinID == NEON::BI__builtin_neon_vmul_laneq_v)
5304 Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy);
5305 llvm::Type *VTy = GetNeonType(this,
5306 NeonTypeFlags(NeonTypeFlags::Float64, false, Quad));
5307 Ops[1] = Builder.CreateBitCast(Ops[1], VTy);
5308 Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2], "extract");
5309 Value *Result = Builder.CreateFMul(Ops[0], Ops[1]);
5310 return Builder.CreateBitCast(Result, Ty);
5312 case NEON::BI__builtin_neon_vnegd_s64:
5313 return Builder.CreateNeg(EmitScalarExpr(E->getArg(0)), "vnegd");
5314 case NEON::BI__builtin_neon_vpmaxnm_v:
5315 case NEON::BI__builtin_neon_vpmaxnmq_v: {
5316 Int = Intrinsic::aarch64_neon_fmaxnmp;
5317 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpmaxnm");
5319 case NEON::BI__builtin_neon_vpminnm_v:
5320 case NEON::BI__builtin_neon_vpminnmq_v: {
5321 Int = Intrinsic::aarch64_neon_fminnmp;
5322 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpminnm");
5324 case NEON::BI__builtin_neon_vsqrt_v:
5325 case NEON::BI__builtin_neon_vsqrtq_v: {
5326 Int = Intrinsic::sqrt;
5327 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
5328 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vsqrt");
5330 case NEON::BI__builtin_neon_vrbit_v:
5331 case NEON::BI__builtin_neon_vrbitq_v: {
5332 Int = Intrinsic::aarch64_neon_rbit;
5333 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrbit");
5335 case NEON::BI__builtin_neon_vaddv_u8:
5336 // FIXME: These are handled by the AArch64 scalar code.
5339 case NEON::BI__builtin_neon_vaddv_s8: {
5340 Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv;
5341 Ty = llvm::IntegerType::get(getLLVMContext(), 32);
5343 llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 8), 8);
5344 llvm::Type *Tys[2] = { Ty, VTy };
5345 Ops.push_back(EmitScalarExpr(E->getArg(0)));
5346 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddv");
5347 return Builder.CreateTrunc(Ops[0],
5348 llvm::IntegerType::get(getLLVMContext(), 8));
5350 case NEON::BI__builtin_neon_vaddv_u16:
5353 case NEON::BI__builtin_neon_vaddv_s16: {
5354 Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv;
5355 Ty = llvm::IntegerType::get(getLLVMContext(), 32);
5357 llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 16), 4);
5358 llvm::Type *Tys[2] = { Ty, VTy };
5359 Ops.push_back(EmitScalarExpr(E->getArg(0)));
5360 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddv");
5361 return Builder.CreateTrunc(Ops[0],
5362 llvm::IntegerType::get(getLLVMContext(), 16));
5364 case NEON::BI__builtin_neon_vaddvq_u8:
5367 case NEON::BI__builtin_neon_vaddvq_s8: {
5368 Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv;
5369 Ty = llvm::IntegerType::get(getLLVMContext(), 32);
5371 llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 8), 16);
5372 llvm::Type *Tys[2] = { Ty, VTy };
5373 Ops.push_back(EmitScalarExpr(E->getArg(0)));
5374 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddv");
5375 return Builder.CreateTrunc(Ops[0],
5376 llvm::IntegerType::get(getLLVMContext(), 8));
5378 case NEON::BI__builtin_neon_vaddvq_u16:
5381 case NEON::BI__builtin_neon_vaddvq_s16: {
5382 Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv;
5383 Ty = llvm::IntegerType::get(getLLVMContext(), 32);
5385 llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 16), 8);
5386 llvm::Type *Tys[2] = { Ty, VTy };
5387 Ops.push_back(EmitScalarExpr(E->getArg(0)));
5388 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddv");
5389 return Builder.CreateTrunc(Ops[0],
5390 llvm::IntegerType::get(getLLVMContext(), 16));
5392 case NEON::BI__builtin_neon_vmaxv_u8: {
5393 Int = Intrinsic::aarch64_neon_umaxv;
5394 Ty = llvm::IntegerType::get(getLLVMContext(), 32);
5396 llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 8), 8);
5397 llvm::Type *Tys[2] = { Ty, VTy };
5398 Ops.push_back(EmitScalarExpr(E->getArg(0)));
5399 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
5400 return Builder.CreateTrunc(Ops[0],
5401 llvm::IntegerType::get(getLLVMContext(), 8));
5403 case NEON::BI__builtin_neon_vmaxv_u16: {
5404 Int = Intrinsic::aarch64_neon_umaxv;
5405 Ty = llvm::IntegerType::get(getLLVMContext(), 32);
5407 llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 16), 4);
5408 llvm::Type *Tys[2] = { Ty, VTy };
5409 Ops.push_back(EmitScalarExpr(E->getArg(0)));
5410 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
5411 return Builder.CreateTrunc(Ops[0],
5412 llvm::IntegerType::get(getLLVMContext(), 16));
5414 case NEON::BI__builtin_neon_vmaxvq_u8: {
5415 Int = Intrinsic::aarch64_neon_umaxv;
5416 Ty = llvm::IntegerType::get(getLLVMContext(), 32);
5418 llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 8), 16);
5419 llvm::Type *Tys[2] = { Ty, VTy };
5420 Ops.push_back(EmitScalarExpr(E->getArg(0)));
5421 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
5422 return Builder.CreateTrunc(Ops[0],
5423 llvm::IntegerType::get(getLLVMContext(), 8));
5425 case NEON::BI__builtin_neon_vmaxvq_u16: {
5426 Int = Intrinsic::aarch64_neon_umaxv;
5427 Ty = llvm::IntegerType::get(getLLVMContext(), 32);
5429 llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 16), 8);
5430 llvm::Type *Tys[2] = { Ty, VTy };
5431 Ops.push_back(EmitScalarExpr(E->getArg(0)));
5432 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
5433 return Builder.CreateTrunc(Ops[0],
5434 llvm::IntegerType::get(getLLVMContext(), 16));
5436 case NEON::BI__builtin_neon_vmaxv_s8: {
5437 Int = Intrinsic::aarch64_neon_smaxv;
5438 Ty = llvm::IntegerType::get(getLLVMContext(), 32);
5440 llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 8), 8);
5441 llvm::Type *Tys[2] = { Ty, VTy };
5442 Ops.push_back(EmitScalarExpr(E->getArg(0)));
5443 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
5444 return Builder.CreateTrunc(Ops[0],
5445 llvm::IntegerType::get(getLLVMContext(), 8));
5447 case NEON::BI__builtin_neon_vmaxv_s16: {
5448 Int = Intrinsic::aarch64_neon_smaxv;
5449 Ty = llvm::IntegerType::get(getLLVMContext(), 32);
5451 llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 16), 4);
5452 llvm::Type *Tys[2] = { Ty, VTy };
5453 Ops.push_back(EmitScalarExpr(E->getArg(0)));
5454 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
5455 return Builder.CreateTrunc(Ops[0],
5456 llvm::IntegerType::get(getLLVMContext(), 16));
5458 case NEON::BI__builtin_neon_vmaxvq_s8: {
5459 Int = Intrinsic::aarch64_neon_smaxv;
5460 Ty = llvm::IntegerType::get(getLLVMContext(), 32);
5462 llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 8), 16);
5463 llvm::Type *Tys[2] = { Ty, VTy };
5464 Ops.push_back(EmitScalarExpr(E->getArg(0)));
5465 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
5466 return Builder.CreateTrunc(Ops[0],
5467 llvm::IntegerType::get(getLLVMContext(), 8));
5469 case NEON::BI__builtin_neon_vmaxvq_s16: {
5470 Int = Intrinsic::aarch64_neon_smaxv;
5471 Ty = llvm::IntegerType::get(getLLVMContext(), 32);
5473 llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 16), 8);
5474 llvm::Type *Tys[2] = { Ty, VTy };
5475 Ops.push_back(EmitScalarExpr(E->getArg(0)));
5476 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
5477 return Builder.CreateTrunc(Ops[0],
5478 llvm::IntegerType::get(getLLVMContext(), 16));
5480 case NEON::BI__builtin_neon_vminv_u8: {
5481 Int = Intrinsic::aarch64_neon_uminv;
5482 Ty = llvm::IntegerType::get(getLLVMContext(), 32);
5484 llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 8), 8);
5485 llvm::Type *Tys[2] = { Ty, VTy };
5486 Ops.push_back(EmitScalarExpr(E->getArg(0)));
5487 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
5488 return Builder.CreateTrunc(Ops[0],
5489 llvm::IntegerType::get(getLLVMContext(), 8));
5491 case NEON::BI__builtin_neon_vminv_u16: {
5492 Int = Intrinsic::aarch64_neon_uminv;
5493 Ty = llvm::IntegerType::get(getLLVMContext(), 32);
5495 llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 16), 4);
5496 llvm::Type *Tys[2] = { Ty, VTy };
5497 Ops.push_back(EmitScalarExpr(E->getArg(0)));
5498 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
5499 return Builder.CreateTrunc(Ops[0],
5500 llvm::IntegerType::get(getLLVMContext(), 16));
5502 case NEON::BI__builtin_neon_vminvq_u8: {
5503 Int = Intrinsic::aarch64_neon_uminv;
5504 Ty = llvm::IntegerType::get(getLLVMContext(), 32);
5506 llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 8), 16);
5507 llvm::Type *Tys[2] = { Ty, VTy };
5508 Ops.push_back(EmitScalarExpr(E->getArg(0)));
5509 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
5510 return Builder.CreateTrunc(Ops[0],
5511 llvm::IntegerType::get(getLLVMContext(), 8));
5513 case NEON::BI__builtin_neon_vminvq_u16: {
5514 Int = Intrinsic::aarch64_neon_uminv;
5515 Ty = llvm::IntegerType::get(getLLVMContext(), 32);
5517 llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 16), 8);
5518 llvm::Type *Tys[2] = { Ty, VTy };
5519 Ops.push_back(EmitScalarExpr(E->getArg(0)));
5520 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
5521 return Builder.CreateTrunc(Ops[0],
5522 llvm::IntegerType::get(getLLVMContext(), 16));
5524 case NEON::BI__builtin_neon_vminv_s8: {
5525 Int = Intrinsic::aarch64_neon_sminv;
5526 Ty = llvm::IntegerType::get(getLLVMContext(), 32);
5528 llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 8), 8);
5529 llvm::Type *Tys[2] = { Ty, VTy };
5530 Ops.push_back(EmitScalarExpr(E->getArg(0)));
5531 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
5532 return Builder.CreateTrunc(Ops[0],
5533 llvm::IntegerType::get(getLLVMContext(), 8));
5535 case NEON::BI__builtin_neon_vminv_s16: {
5536 Int = Intrinsic::aarch64_neon_sminv;
5537 Ty = llvm::IntegerType::get(getLLVMContext(), 32);
5539 llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 16), 4);
5540 llvm::Type *Tys[2] = { Ty, VTy };
5541 Ops.push_back(EmitScalarExpr(E->getArg(0)));
5542 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
5543 return Builder.CreateTrunc(Ops[0],
5544 llvm::IntegerType::get(getLLVMContext(), 16));
5546 case NEON::BI__builtin_neon_vminvq_s8: {
5547 Int = Intrinsic::aarch64_neon_sminv;
5548 Ty = llvm::IntegerType::get(getLLVMContext(), 32);
5550 llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 8), 16);
5551 llvm::Type *Tys[2] = { Ty, VTy };
5552 Ops.push_back(EmitScalarExpr(E->getArg(0)));
5553 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
5554 return Builder.CreateTrunc(Ops[0],
5555 llvm::IntegerType::get(getLLVMContext(), 8));
5557 case NEON::BI__builtin_neon_vminvq_s16: {
5558 Int = Intrinsic::aarch64_neon_sminv;
5559 Ty = llvm::IntegerType::get(getLLVMContext(), 32);
5561 llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 16), 8);
5562 llvm::Type *Tys[2] = { Ty, VTy };
5563 Ops.push_back(EmitScalarExpr(E->getArg(0)));
5564 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
5565 return Builder.CreateTrunc(Ops[0],
5566 llvm::IntegerType::get(getLLVMContext(), 16));
5568 case NEON::BI__builtin_neon_vmul_n_f64: {
5569 Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy);
5570 Value *RHS = Builder.CreateBitCast(EmitScalarExpr(E->getArg(1)), DoubleTy);
5571 return Builder.CreateFMul(Ops[0], RHS);
5573 case NEON::BI__builtin_neon_vaddlv_u8: {
5574 Int = Intrinsic::aarch64_neon_uaddlv;
5575 Ty = llvm::IntegerType::get(getLLVMContext(), 32);
5577 llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 8), 8);
5578 llvm::Type *Tys[2] = { Ty, VTy };
5579 Ops.push_back(EmitScalarExpr(E->getArg(0)));
5580 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
5581 return Builder.CreateTrunc(Ops[0],
5582 llvm::IntegerType::get(getLLVMContext(), 16));
5584 case NEON::BI__builtin_neon_vaddlv_u16: {
5585 Int = Intrinsic::aarch64_neon_uaddlv;
5586 Ty = llvm::IntegerType::get(getLLVMContext(), 32);
5588 llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 16), 4);
5589 llvm::Type *Tys[2] = { Ty, VTy };
5590 Ops.push_back(EmitScalarExpr(E->getArg(0)));
5591 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
5593 case NEON::BI__builtin_neon_vaddlvq_u8: {
5594 Int = Intrinsic::aarch64_neon_uaddlv;
5595 Ty = llvm::IntegerType::get(getLLVMContext(), 32);
5597 llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 8), 16);
5598 llvm::Type *Tys[2] = { Ty, VTy };
5599 Ops.push_back(EmitScalarExpr(E->getArg(0)));
5600 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
5601 return Builder.CreateTrunc(Ops[0],
5602 llvm::IntegerType::get(getLLVMContext(), 16));
5604 case NEON::BI__builtin_neon_vaddlvq_u16: {
5605 Int = Intrinsic::aarch64_neon_uaddlv;
5606 Ty = llvm::IntegerType::get(getLLVMContext(), 32);
5608 llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 16), 8);
5609 llvm::Type *Tys[2] = { Ty, VTy };
5610 Ops.push_back(EmitScalarExpr(E->getArg(0)));
5611 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
5613 case NEON::BI__builtin_neon_vaddlv_s8: {
5614 Int = Intrinsic::aarch64_neon_saddlv;
5615 Ty = llvm::IntegerType::get(getLLVMContext(), 32);
5617 llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 8), 8);
5618 llvm::Type *Tys[2] = { Ty, VTy };
5619 Ops.push_back(EmitScalarExpr(E->getArg(0)));
5620 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
5621 return Builder.CreateTrunc(Ops[0],
5622 llvm::IntegerType::get(getLLVMContext(), 16));
5624 case NEON::BI__builtin_neon_vaddlv_s16: {
5625 Int = Intrinsic::aarch64_neon_saddlv;
5626 Ty = llvm::IntegerType::get(getLLVMContext(), 32);
5628 llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 16), 4);
5629 llvm::Type *Tys[2] = { Ty, VTy };
5630 Ops.push_back(EmitScalarExpr(E->getArg(0)));
5631 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
5633 case NEON::BI__builtin_neon_vaddlvq_s8: {
5634 Int = Intrinsic::aarch64_neon_saddlv;
5635 Ty = llvm::IntegerType::get(getLLVMContext(), 32);
5637 llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 8), 16);
5638 llvm::Type *Tys[2] = { Ty, VTy };
5639 Ops.push_back(EmitScalarExpr(E->getArg(0)));
5640 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
5641 return Builder.CreateTrunc(Ops[0],
5642 llvm::IntegerType::get(getLLVMContext(), 16));
5644 case NEON::BI__builtin_neon_vaddlvq_s16: {
5645 Int = Intrinsic::aarch64_neon_saddlv;
5646 Ty = llvm::IntegerType::get(getLLVMContext(), 32);
5648 llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 16), 8);
5649 llvm::Type *Tys[2] = { Ty, VTy };
5650 Ops.push_back(EmitScalarExpr(E->getArg(0)));
5651 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
5653 case NEON::BI__builtin_neon_vsri_n_v:
5654 case NEON::BI__builtin_neon_vsriq_n_v: {
5655 Int = Intrinsic::aarch64_neon_vsri;
5656 llvm::Function *Intrin = CGM.getIntrinsic(Int, Ty);
5657 return EmitNeonCall(Intrin, Ops, "vsri_n");
5659 case NEON::BI__builtin_neon_vsli_n_v:
5660 case NEON::BI__builtin_neon_vsliq_n_v: {
5661 Int = Intrinsic::aarch64_neon_vsli;
5662 llvm::Function *Intrin = CGM.getIntrinsic(Int, Ty);
5663 return EmitNeonCall(Intrin, Ops, "vsli_n");
5665 case NEON::BI__builtin_neon_vsra_n_v:
5666 case NEON::BI__builtin_neon_vsraq_n_v:
5667 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
5668 Ops[1] = EmitNeonRShiftImm(Ops[1], Ops[2], Ty, usgn, "vsra_n");
5669 return Builder.CreateAdd(Ops[0], Ops[1]);
5670 case NEON::BI__builtin_neon_vrsra_n_v:
5671 case NEON::BI__builtin_neon_vrsraq_n_v: {
5672 Int = usgn ? Intrinsic::aarch64_neon_urshl : Intrinsic::aarch64_neon_srshl;
5673 SmallVector<llvm::Value*,2> TmpOps;
5674 TmpOps.push_back(Ops[1]);
5675 TmpOps.push_back(Ops[2]);
5676 Function* F = CGM.getIntrinsic(Int, Ty);
5677 llvm::Value *tmp = EmitNeonCall(F, TmpOps, "vrshr_n", 1, true);
5678 Ops[0] = Builder.CreateBitCast(Ops[0], VTy);
5679 return Builder.CreateAdd(Ops[0], tmp);
5681 // FIXME: Sharing loads & stores with 32-bit is complicated by the absence
5682 // of an Align parameter here.
5683 case NEON::BI__builtin_neon_vld1_x2_v:
5684 case NEON::BI__builtin_neon_vld1q_x2_v:
5685 case NEON::BI__builtin_neon_vld1_x3_v:
5686 case NEON::BI__builtin_neon_vld1q_x3_v:
5687 case NEON::BI__builtin_neon_vld1_x4_v:
5688 case NEON::BI__builtin_neon_vld1q_x4_v: {
5689 llvm::Type *PTy = llvm::PointerType::getUnqual(VTy->getVectorElementType());
5690 Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
5691 llvm::Type *Tys[2] = { VTy, PTy };
5693 switch (BuiltinID) {
5694 case NEON::BI__builtin_neon_vld1_x2_v:
5695 case NEON::BI__builtin_neon_vld1q_x2_v:
5696 Int = Intrinsic::aarch64_neon_ld1x2;
5698 case NEON::BI__builtin_neon_vld1_x3_v:
5699 case NEON::BI__builtin_neon_vld1q_x3_v:
5700 Int = Intrinsic::aarch64_neon_ld1x3;
5702 case NEON::BI__builtin_neon_vld1_x4_v:
5703 case NEON::BI__builtin_neon_vld1q_x4_v:
5704 Int = Intrinsic::aarch64_neon_ld1x4;
5707 Function *F = CGM.getIntrinsic(Int, Tys);
5708 Ops[1] = Builder.CreateCall(F, Ops[1], "vld1xN");
5709 Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
5710 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
5711 return Builder.CreateStore(Ops[1], Ops[0]);
5713 case NEON::BI__builtin_neon_vst1_x2_v:
5714 case NEON::BI__builtin_neon_vst1q_x2_v:
5715 case NEON::BI__builtin_neon_vst1_x3_v:
5716 case NEON::BI__builtin_neon_vst1q_x3_v:
5717 case NEON::BI__builtin_neon_vst1_x4_v:
5718 case NEON::BI__builtin_neon_vst1q_x4_v: {
5719 llvm::Type *PTy = llvm::PointerType::getUnqual(VTy->getVectorElementType());
5720 llvm::Type *Tys[2] = { VTy, PTy };
5722 switch (BuiltinID) {
5723 case NEON::BI__builtin_neon_vst1_x2_v:
5724 case NEON::BI__builtin_neon_vst1q_x2_v:
5725 Int = Intrinsic::aarch64_neon_st1x2;
5727 case NEON::BI__builtin_neon_vst1_x3_v:
5728 case NEON::BI__builtin_neon_vst1q_x3_v:
5729 Int = Intrinsic::aarch64_neon_st1x3;
5731 case NEON::BI__builtin_neon_vst1_x4_v:
5732 case NEON::BI__builtin_neon_vst1q_x4_v:
5733 Int = Intrinsic::aarch64_neon_st1x4;
5736 SmallVector<Value *, 4> IntOps(Ops.begin()+1, Ops.end());
5737 IntOps.push_back(Ops[0]);
5738 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), IntOps, "");
5740 case NEON::BI__builtin_neon_vld1_v:
5741 case NEON::BI__builtin_neon_vld1q_v:
5742 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(VTy));
5743 return Builder.CreateLoad(Ops[0]);
5744 case NEON::BI__builtin_neon_vst1_v:
5745 case NEON::BI__builtin_neon_vst1q_v:
5746 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(VTy));
5747 Ops[1] = Builder.CreateBitCast(Ops[1], VTy);
5748 return Builder.CreateStore(Ops[1], Ops[0]);
5749 case NEON::BI__builtin_neon_vld1_lane_v:
5750 case NEON::BI__builtin_neon_vld1q_lane_v:
5751 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
5752 Ty = llvm::PointerType::getUnqual(VTy->getElementType());
5753 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
5754 Ops[0] = Builder.CreateLoad(Ops[0]);
5755 return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vld1_lane");
5756 case NEON::BI__builtin_neon_vld1_dup_v:
5757 case NEON::BI__builtin_neon_vld1q_dup_v: {
5758 Value *V = UndefValue::get(Ty);
5759 Ty = llvm::PointerType::getUnqual(VTy->getElementType());
5760 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
5761 Ops[0] = Builder.CreateLoad(Ops[0]);
5762 llvm::Constant *CI = ConstantInt::get(Int32Ty, 0);
5763 Ops[0] = Builder.CreateInsertElement(V, Ops[0], CI);
5764 return EmitNeonSplat(Ops[0], CI);
5766 case NEON::BI__builtin_neon_vst1_lane_v:
5767 case NEON::BI__builtin_neon_vst1q_lane_v:
5768 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
5769 Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2]);
5770 Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
5771 return Builder.CreateStore(Ops[1], Builder.CreateBitCast(Ops[0], Ty));
5772 case NEON::BI__builtin_neon_vld2_v:
5773 case NEON::BI__builtin_neon_vld2q_v: {
5774 llvm::Type *PTy = llvm::PointerType::getUnqual(VTy);
5775 Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
5776 llvm::Type *Tys[2] = { VTy, PTy };
5777 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld2, Tys);
5778 Ops[1] = Builder.CreateCall(F, Ops[1], "vld2");
5779 Ops[0] = Builder.CreateBitCast(Ops[0],
5780 llvm::PointerType::getUnqual(Ops[1]->getType()));
5781 return Builder.CreateStore(Ops[1], Ops[0]);
5783 case NEON::BI__builtin_neon_vld3_v:
5784 case NEON::BI__builtin_neon_vld3q_v: {
5785 llvm::Type *PTy = llvm::PointerType::getUnqual(VTy);
5786 Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
5787 llvm::Type *Tys[2] = { VTy, PTy };
5788 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld3, Tys);
5789 Ops[1] = Builder.CreateCall(F, Ops[1], "vld3");
5790 Ops[0] = Builder.CreateBitCast(Ops[0],
5791 llvm::PointerType::getUnqual(Ops[1]->getType()));
5792 return Builder.CreateStore(Ops[1], Ops[0]);
5794 case NEON::BI__builtin_neon_vld4_v:
5795 case NEON::BI__builtin_neon_vld4q_v: {
5796 llvm::Type *PTy = llvm::PointerType::getUnqual(VTy);
5797 Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
5798 llvm::Type *Tys[2] = { VTy, PTy };
5799 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld4, Tys);
5800 Ops[1] = Builder.CreateCall(F, Ops[1], "vld4");
5801 Ops[0] = Builder.CreateBitCast(Ops[0],
5802 llvm::PointerType::getUnqual(Ops[1]->getType()));
5803 return Builder.CreateStore(Ops[1], Ops[0]);
5805 case NEON::BI__builtin_neon_vld2_dup_v:
5806 case NEON::BI__builtin_neon_vld2q_dup_v: {
5808 llvm::PointerType::getUnqual(VTy->getElementType());
5809 Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
5810 llvm::Type *Tys[2] = { VTy, PTy };
5811 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld2r, Tys);
5812 Ops[1] = Builder.CreateCall(F, Ops[1], "vld2");
5813 Ops[0] = Builder.CreateBitCast(Ops[0],
5814 llvm::PointerType::getUnqual(Ops[1]->getType()));
5815 return Builder.CreateStore(Ops[1], Ops[0]);
5817 case NEON::BI__builtin_neon_vld3_dup_v:
5818 case NEON::BI__builtin_neon_vld3q_dup_v: {
5820 llvm::PointerType::getUnqual(VTy->getElementType());
5821 Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
5822 llvm::Type *Tys[2] = { VTy, PTy };
5823 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld3r, Tys);
5824 Ops[1] = Builder.CreateCall(F, Ops[1], "vld3");
5825 Ops[0] = Builder.CreateBitCast(Ops[0],
5826 llvm::PointerType::getUnqual(Ops[1]->getType()));
5827 return Builder.CreateStore(Ops[1], Ops[0]);
5829 case NEON::BI__builtin_neon_vld4_dup_v:
5830 case NEON::BI__builtin_neon_vld4q_dup_v: {
5832 llvm::PointerType::getUnqual(VTy->getElementType());
5833 Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
5834 llvm::Type *Tys[2] = { VTy, PTy };
5835 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld4r, Tys);
5836 Ops[1] = Builder.CreateCall(F, Ops[1], "vld4");
5837 Ops[0] = Builder.CreateBitCast(Ops[0],
5838 llvm::PointerType::getUnqual(Ops[1]->getType()));
5839 return Builder.CreateStore(Ops[1], Ops[0]);
5841 case NEON::BI__builtin_neon_vld2_lane_v:
5842 case NEON::BI__builtin_neon_vld2q_lane_v: {
5843 llvm::Type *Tys[2] = { VTy, Ops[1]->getType() };
5844 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld2lane, Tys);
5845 Ops.push_back(Ops[1]);
5846 Ops.erase(Ops.begin()+1);
5847 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
5848 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
5849 Ops[3] = Builder.CreateZExt(Ops[3],
5850 llvm::IntegerType::get(getLLVMContext(), 64));
5851 Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), "vld2_lane");
5852 Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
5853 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
5854 return Builder.CreateStore(Ops[1], Ops[0]);
5856 case NEON::BI__builtin_neon_vld3_lane_v:
5857 case NEON::BI__builtin_neon_vld3q_lane_v: {
5858 llvm::Type *Tys[2] = { VTy, Ops[1]->getType() };
5859 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld3lane, Tys);
5860 Ops.push_back(Ops[1]);
5861 Ops.erase(Ops.begin()+1);
5862 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
5863 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
5864 Ops[3] = Builder.CreateBitCast(Ops[3], Ty);
5865 Ops[4] = Builder.CreateZExt(Ops[4],
5866 llvm::IntegerType::get(getLLVMContext(), 64));
5867 Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), "vld3_lane");
5868 Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
5869 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
5870 return Builder.CreateStore(Ops[1], Ops[0]);
5872 case NEON::BI__builtin_neon_vld4_lane_v:
5873 case NEON::BI__builtin_neon_vld4q_lane_v: {
5874 llvm::Type *Tys[2] = { VTy, Ops[1]->getType() };
5875 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld4lane, Tys);
5876 Ops.push_back(Ops[1]);
5877 Ops.erase(Ops.begin()+1);
5878 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
5879 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
5880 Ops[3] = Builder.CreateBitCast(Ops[3], Ty);
5881 Ops[4] = Builder.CreateBitCast(Ops[4], Ty);
5882 Ops[5] = Builder.CreateZExt(Ops[5],
5883 llvm::IntegerType::get(getLLVMContext(), 64));
5884 Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), "vld4_lane");
5885 Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
5886 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
5887 return Builder.CreateStore(Ops[1], Ops[0]);
5889 case NEON::BI__builtin_neon_vst2_v:
5890 case NEON::BI__builtin_neon_vst2q_v: {
5891 Ops.push_back(Ops[0]);
5892 Ops.erase(Ops.begin());
5893 llvm::Type *Tys[2] = { VTy, Ops[2]->getType() };
5894 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st2, Tys),
5897 case NEON::BI__builtin_neon_vst2_lane_v:
5898 case NEON::BI__builtin_neon_vst2q_lane_v: {
5899 Ops.push_back(Ops[0]);
5900 Ops.erase(Ops.begin());
5901 Ops[2] = Builder.CreateZExt(Ops[2],
5902 llvm::IntegerType::get(getLLVMContext(), 64));
5903 llvm::Type *Tys[2] = { VTy, Ops[3]->getType() };
5904 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st2lane, Tys),
5907 case NEON::BI__builtin_neon_vst3_v:
5908 case NEON::BI__builtin_neon_vst3q_v: {
5909 Ops.push_back(Ops[0]);
5910 Ops.erase(Ops.begin());
5911 llvm::Type *Tys[2] = { VTy, Ops[3]->getType() };
5912 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st3, Tys),
5915 case NEON::BI__builtin_neon_vst3_lane_v:
5916 case NEON::BI__builtin_neon_vst3q_lane_v: {
5917 Ops.push_back(Ops[0]);
5918 Ops.erase(Ops.begin());
5919 Ops[3] = Builder.CreateZExt(Ops[3],
5920 llvm::IntegerType::get(getLLVMContext(), 64));
5921 llvm::Type *Tys[2] = { VTy, Ops[4]->getType() };
5922 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st3lane, Tys),
5925 case NEON::BI__builtin_neon_vst4_v:
5926 case NEON::BI__builtin_neon_vst4q_v: {
5927 Ops.push_back(Ops[0]);
5928 Ops.erase(Ops.begin());
5929 llvm::Type *Tys[2] = { VTy, Ops[4]->getType() };
5930 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st4, Tys),
5933 case NEON::BI__builtin_neon_vst4_lane_v:
5934 case NEON::BI__builtin_neon_vst4q_lane_v: {
5935 Ops.push_back(Ops[0]);
5936 Ops.erase(Ops.begin());
5937 Ops[4] = Builder.CreateZExt(Ops[4],
5938 llvm::IntegerType::get(getLLVMContext(), 64));
5939 llvm::Type *Tys[2] = { VTy, Ops[5]->getType() };
5940 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st4lane, Tys),
5943 case NEON::BI__builtin_neon_vtrn_v:
5944 case NEON::BI__builtin_neon_vtrnq_v: {
5945 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
5946 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
5947 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
5948 Value *SV = nullptr;
5950 for (unsigned vi = 0; vi != 2; ++vi) {
5951 SmallVector<Constant*, 16> Indices;
5952 for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) {
5953 Indices.push_back(ConstantInt::get(Int32Ty, i+vi));
5954 Indices.push_back(ConstantInt::get(Int32Ty, i+e+vi));
5956 Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi);
5957 SV = llvm::ConstantVector::get(Indices);
5958 SV = Builder.CreateShuffleVector(Ops[1], Ops[2], SV, "vtrn");
5959 SV = Builder.CreateStore(SV, Addr);
5963 case NEON::BI__builtin_neon_vuzp_v:
5964 case NEON::BI__builtin_neon_vuzpq_v: {
5965 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
5966 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
5967 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
5968 Value *SV = nullptr;
5970 for (unsigned vi = 0; vi != 2; ++vi) {
5971 SmallVector<Constant*, 16> Indices;
5972 for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i)
5973 Indices.push_back(ConstantInt::get(Int32Ty, 2*i+vi));
5975 Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi);
5976 SV = llvm::ConstantVector::get(Indices);
5977 SV = Builder.CreateShuffleVector(Ops[1], Ops[2], SV, "vuzp");
5978 SV = Builder.CreateStore(SV, Addr);
5982 case NEON::BI__builtin_neon_vzip_v:
5983 case NEON::BI__builtin_neon_vzipq_v: {
5984 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
5985 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
5986 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
5987 Value *SV = nullptr;
5989 for (unsigned vi = 0; vi != 2; ++vi) {
5990 SmallVector<Constant*, 16> Indices;
5991 for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) {
5992 Indices.push_back(ConstantInt::get(Int32Ty, (i + vi*e) >> 1));
5993 Indices.push_back(ConstantInt::get(Int32Ty, ((i + vi*e) >> 1)+e));
5995 Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi);
5996 SV = llvm::ConstantVector::get(Indices);
5997 SV = Builder.CreateShuffleVector(Ops[1], Ops[2], SV, "vzip");
5998 SV = Builder.CreateStore(SV, Addr);
6002 case NEON::BI__builtin_neon_vqtbl1q_v: {
6003 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbl1, Ty),
6006 case NEON::BI__builtin_neon_vqtbl2q_v: {
6007 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbl2, Ty),
6010 case NEON::BI__builtin_neon_vqtbl3q_v: {
6011 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbl3, Ty),
6014 case NEON::BI__builtin_neon_vqtbl4q_v: {
6015 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbl4, Ty),
6018 case NEON::BI__builtin_neon_vqtbx1q_v: {
6019 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbx1, Ty),
6022 case NEON::BI__builtin_neon_vqtbx2q_v: {
6023 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbx2, Ty),
6026 case NEON::BI__builtin_neon_vqtbx3q_v: {
6027 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbx3, Ty),
6030 case NEON::BI__builtin_neon_vqtbx4q_v: {
6031 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbx4, Ty),
6034 case NEON::BI__builtin_neon_vsqadd_v:
6035 case NEON::BI__builtin_neon_vsqaddq_v: {
6036 Int = Intrinsic::aarch64_neon_usqadd;
6037 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vsqadd");
6039 case NEON::BI__builtin_neon_vuqadd_v:
6040 case NEON::BI__builtin_neon_vuqaddq_v: {
6041 Int = Intrinsic::aarch64_neon_suqadd;
6042 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vuqadd");
6047 llvm::Value *CodeGenFunction::
6048 BuildVector(ArrayRef<llvm::Value*> Ops) {
6049 assert((Ops.size() & (Ops.size() - 1)) == 0 &&
6050 "Not a power-of-two sized vector!");
6051 bool AllConstants = true;
6052 for (unsigned i = 0, e = Ops.size(); i != e && AllConstants; ++i)
6053 AllConstants &= isa<Constant>(Ops[i]);
6055 // If this is a constant vector, create a ConstantVector.
6057 SmallVector<llvm::Constant*, 16> CstOps;
6058 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
6059 CstOps.push_back(cast<Constant>(Ops[i]));
6060 return llvm::ConstantVector::get(CstOps);
6063 // Otherwise, insertelement the values to build the vector.
6065 llvm::UndefValue::get(llvm::VectorType::get(Ops[0]->getType(), Ops.size()));
6067 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
6068 Result = Builder.CreateInsertElement(Result, Ops[i], Builder.getInt32(i));
6073 Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
6074 const CallExpr *E) {
6075 SmallVector<Value*, 4> Ops;
6077 // Find out if any arguments are required to be integer constant expressions.
6078 unsigned ICEArguments = 0;
6079 ASTContext::GetBuiltinTypeError Error;
6080 getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments);
6081 assert(Error == ASTContext::GE_None && "Should not codegen an error");
6083 for (unsigned i = 0, e = E->getNumArgs(); i != e; i++) {
6084 // If this is a normal argument, just emit it as a scalar.
6085 if ((ICEArguments & (1 << i)) == 0) {
6086 Ops.push_back(EmitScalarExpr(E->getArg(i)));
6090 // If this is required to be a constant, constant fold it so that we know
6091 // that the generated intrinsic gets a ConstantInt.
6092 llvm::APSInt Result;
6093 bool IsConst = E->getArg(i)->isIntegerConstantExpr(Result, getContext());
6094 assert(IsConst && "Constant arg isn't actually constant?"); (void)IsConst;
6095 Ops.push_back(llvm::ConstantInt::get(getLLVMContext(), Result));
6098 switch (BuiltinID) {
6099 default: return nullptr;
6100 case X86::BI__builtin_cpu_supports: {
6101 const Expr *FeatureExpr = E->getArg(0)->IgnoreParenCasts();
6102 StringRef FeatureStr = cast<StringLiteral>(FeatureExpr)->getString();
6104 // TODO: When/if this becomes more than x86 specific then use a TargetInfo
6106 // Processor features and mapping to processor feature value.
6129 X86Features Feature = StringSwitch<X86Features>(FeatureStr)
6130 .Case("cmov", X86Features::CMOV)
6131 .Case("mmx", X86Features::MMX)
6132 .Case("popcnt", X86Features::POPCNT)
6133 .Case("sse", X86Features::SSE)
6134 .Case("sse2", X86Features::SSE2)
6135 .Case("sse3", X86Features::SSE3)
6136 .Case("sse4.1", X86Features::SSE4_1)
6137 .Case("sse4.2", X86Features::SSE4_2)
6138 .Case("avx", X86Features::AVX)
6139 .Case("avx2", X86Features::AVX2)
6140 .Case("sse4a", X86Features::SSE4_A)
6141 .Case("fma4", X86Features::FMA4)
6142 .Case("xop", X86Features::XOP)
6143 .Case("fma", X86Features::FMA)
6144 .Case("avx512f", X86Features::AVX512F)
6145 .Case("bmi", X86Features::BMI)
6146 .Case("bmi2", X86Features::BMI2)
6147 .Default(X86Features::MAX);
6148 assert(Feature != X86Features::MAX && "Invalid feature!");
6150 // Matching the struct layout from the compiler-rt/libgcc structure that is
6152 // unsigned int __cpu_vendor;
6153 // unsigned int __cpu_type;
6154 // unsigned int __cpu_subtype;
6155 // unsigned int __cpu_features[1];
6156 llvm::Type *STy = llvm::StructType::get(
6157 Int32Ty, Int32Ty, Int32Ty, llvm::ArrayType::get(Int32Ty, 1), nullptr);
6159 // Grab the global __cpu_model.
6160 llvm::Constant *CpuModel = CGM.CreateRuntimeVariable(STy, "__cpu_model");
6162 // Grab the first (0th) element from the field __cpu_features off of the
6163 // global in the struct STy.
6165 ConstantInt::get(Int32Ty, 0),
6166 ConstantInt::get(Int32Ty, 3),
6167 ConstantInt::get(Int32Ty, 0)
6169 Value *CpuFeatures = Builder.CreateGEP(STy, CpuModel, Idxs);
6170 Value *Features = Builder.CreateLoad(CpuFeatures);
6172 // Check the value of the bit corresponding to the feature requested.
6173 Value *Bitset = Builder.CreateAnd(
6174 Features, llvm::ConstantInt::get(Int32Ty, 1 << Feature));
6175 return Builder.CreateICmpNE(Bitset, llvm::ConstantInt::get(Int32Ty, 0));
6177 case X86::BI_mm_prefetch: {
6178 Value *Address = EmitScalarExpr(E->getArg(0));
6179 Value *RW = ConstantInt::get(Int32Ty, 0);
6180 Value *Locality = EmitScalarExpr(E->getArg(1));
6181 Value *Data = ConstantInt::get(Int32Ty, 1);
6182 Value *F = CGM.getIntrinsic(Intrinsic::prefetch);
6183 return Builder.CreateCall(F, {Address, RW, Locality, Data});
6185 case X86::BI__builtin_ia32_vec_init_v8qi:
6186 case X86::BI__builtin_ia32_vec_init_v4hi:
6187 case X86::BI__builtin_ia32_vec_init_v2si:
6188 return Builder.CreateBitCast(BuildVector(Ops),
6189 llvm::Type::getX86_MMXTy(getLLVMContext()));
6190 case X86::BI__builtin_ia32_vec_ext_v2si:
6191 return Builder.CreateExtractElement(Ops[0],
6192 llvm::ConstantInt::get(Ops[1]->getType(), 0));
6193 case X86::BI__builtin_ia32_ldmxcsr: {
6194 Value *Tmp = CreateMemTemp(E->getArg(0)->getType());
6195 Builder.CreateStore(Ops[0], Tmp);
6196 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_ldmxcsr),
6197 Builder.CreateBitCast(Tmp, Int8PtrTy));
6199 case X86::BI__builtin_ia32_stmxcsr: {
6200 Value *Tmp = CreateMemTemp(E->getType());
6201 Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_stmxcsr),
6202 Builder.CreateBitCast(Tmp, Int8PtrTy));
6203 return Builder.CreateLoad(Tmp, "stmxcsr");
6205 case X86::BI__builtin_ia32_storehps:
6206 case X86::BI__builtin_ia32_storelps: {
6207 llvm::Type *PtrTy = llvm::PointerType::getUnqual(Int64Ty);
6208 llvm::Type *VecTy = llvm::VectorType::get(Int64Ty, 2);
6211 Ops[1] = Builder.CreateBitCast(Ops[1], VecTy, "cast");
6214 unsigned Index = BuiltinID == X86::BI__builtin_ia32_storelps ? 0 : 1;
6215 llvm::Value *Idx = llvm::ConstantInt::get(SizeTy, Index);
6216 Ops[1] = Builder.CreateExtractElement(Ops[1], Idx, "extract");
6218 // cast pointer to i64 & store
6219 Ops[0] = Builder.CreateBitCast(Ops[0], PtrTy);
6220 return Builder.CreateStore(Ops[1], Ops[0]);
6222 case X86::BI__builtin_ia32_palignr128:
6223 case X86::BI__builtin_ia32_palignr256: {
6224 unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[2])->getZExtValue();
6227 cast<llvm::VectorType>(Ops[0]->getType())->getNumElements();
6228 assert(NumElts % 16 == 0);
6229 unsigned NumLanes = NumElts / 16;
6230 unsigned NumLaneElts = NumElts / NumLanes;
6232 // If palignr is shifting the pair of vectors more than the size of two
6233 // lanes, emit zero.
6234 if (ShiftVal >= (2 * NumLaneElts))
6235 return llvm::Constant::getNullValue(ConvertType(E->getType()));
6237 // If palignr is shifting the pair of input vectors more than one lane,
6238 // but less than two lanes, convert to shifting in zeroes.
6239 if (ShiftVal > NumLaneElts) {
6240 ShiftVal -= NumLaneElts;
6241 Ops[0] = llvm::Constant::getNullValue(Ops[0]->getType());
6244 SmallVector<llvm::Constant*, 32> Indices;
6245 // 256-bit palignr operates on 128-bit lanes so we need to handle that
6246 for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
6247 for (unsigned i = 0; i != NumLaneElts; ++i) {
6248 unsigned Idx = ShiftVal + i;
6249 if (Idx >= NumLaneElts)
6250 Idx += NumElts - NumLaneElts; // End of lane, switch operand.
6251 Indices.push_back(llvm::ConstantInt::get(Int32Ty, Idx + l));
6255 Value* SV = llvm::ConstantVector::get(Indices);
6256 return Builder.CreateShuffleVector(Ops[1], Ops[0], SV, "palignr");
6258 case X86::BI__builtin_ia32_pslldqi256: {
6259 // Shift value is in bits so divide by 8.
6260 unsigned shiftVal = cast<llvm::ConstantInt>(Ops[1])->getZExtValue() >> 3;
6262 // If pslldq is shifting the vector more than 15 bytes, emit zero.
6264 return llvm::Constant::getNullValue(ConvertType(E->getType()));
6266 SmallVector<llvm::Constant*, 32> Indices;
6267 // 256-bit pslldq operates on 128-bit lanes so we need to handle that
6268 for (unsigned l = 0; l != 32; l += 16) {
6269 for (unsigned i = 0; i != 16; ++i) {
6270 unsigned Idx = 32 + i - shiftVal;
6271 if (Idx < 32) Idx -= 16; // end of lane, switch operand.
6272 Indices.push_back(llvm::ConstantInt::get(Int32Ty, Idx + l));
6276 llvm::Type *VecTy = llvm::VectorType::get(Int8Ty, 32);
6277 Ops[0] = Builder.CreateBitCast(Ops[0], VecTy, "cast");
6278 Value *Zero = llvm::Constant::getNullValue(VecTy);
6280 Value *SV = llvm::ConstantVector::get(Indices);
6281 SV = Builder.CreateShuffleVector(Zero, Ops[0], SV, "pslldq");
6282 llvm::Type *ResultType = ConvertType(E->getType());
6283 return Builder.CreateBitCast(SV, ResultType, "cast");
6285 case X86::BI__builtin_ia32_psrldqi256: {
6286 // Shift value is in bits so divide by 8.
6287 unsigned shiftVal = cast<llvm::ConstantInt>(Ops[1])->getZExtValue() >> 3;
6289 // If psrldq is shifting the vector more than 15 bytes, emit zero.
6291 return llvm::Constant::getNullValue(ConvertType(E->getType()));
6293 SmallVector<llvm::Constant*, 32> Indices;
6294 // 256-bit psrldq operates on 128-bit lanes so we need to handle that
6295 for (unsigned l = 0; l != 32; l += 16) {
6296 for (unsigned i = 0; i != 16; ++i) {
6297 unsigned Idx = i + shiftVal;
6298 if (Idx >= 16) Idx += 16; // end of lane, switch operand.
6299 Indices.push_back(llvm::ConstantInt::get(Int32Ty, Idx + l));
6303 llvm::Type *VecTy = llvm::VectorType::get(Int8Ty, 32);
6304 Ops[0] = Builder.CreateBitCast(Ops[0], VecTy, "cast");
6305 Value *Zero = llvm::Constant::getNullValue(VecTy);
6307 Value *SV = llvm::ConstantVector::get(Indices);
6308 SV = Builder.CreateShuffleVector(Ops[0], Zero, SV, "psrldq");
6309 llvm::Type *ResultType = ConvertType(E->getType());
6310 return Builder.CreateBitCast(SV, ResultType, "cast");
6312 case X86::BI__builtin_ia32_movntps:
6313 case X86::BI__builtin_ia32_movntps256:
6314 case X86::BI__builtin_ia32_movntpd:
6315 case X86::BI__builtin_ia32_movntpd256:
6316 case X86::BI__builtin_ia32_movntdq:
6317 case X86::BI__builtin_ia32_movntdq256:
6318 case X86::BI__builtin_ia32_movnti:
6319 case X86::BI__builtin_ia32_movnti64: {
6320 llvm::MDNode *Node = llvm::MDNode::get(
6321 getLLVMContext(), llvm::ConstantAsMetadata::get(Builder.getInt32(1)));
6323 // Convert the type of the pointer to a pointer to the stored type.
6324 Value *BC = Builder.CreateBitCast(Ops[0],
6325 llvm::PointerType::getUnqual(Ops[1]->getType()),
6327 StoreInst *SI = Builder.CreateStore(Ops[1], BC);
6328 SI->setMetadata(CGM.getModule().getMDKindID("nontemporal"), Node);
6330 // If the operand is an integer, we can't assume alignment. Otherwise,
6331 // assume natural alignment.
6332 QualType ArgTy = E->getArg(1)->getType();
6334 if (ArgTy->isIntegerType())
6337 Align = getContext().getTypeSizeInChars(ArgTy).getQuantity();
6338 SI->setAlignment(Align);
6342 case X86::BI__builtin_ia32_pswapdsf:
6343 case X86::BI__builtin_ia32_pswapdsi: {
6344 llvm::Type *MMXTy = llvm::Type::getX86_MMXTy(getLLVMContext());
6345 Ops[0] = Builder.CreateBitCast(Ops[0], MMXTy, "cast");
6346 llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_3dnowa_pswapd);
6347 return Builder.CreateCall(F, Ops, "pswapd");
6349 case X86::BI__builtin_ia32_rdrand16_step:
6350 case X86::BI__builtin_ia32_rdrand32_step:
6351 case X86::BI__builtin_ia32_rdrand64_step:
6352 case X86::BI__builtin_ia32_rdseed16_step:
6353 case X86::BI__builtin_ia32_rdseed32_step:
6354 case X86::BI__builtin_ia32_rdseed64_step: {
6356 switch (BuiltinID) {
6357 default: llvm_unreachable("Unsupported intrinsic!");
6358 case X86::BI__builtin_ia32_rdrand16_step:
6359 ID = Intrinsic::x86_rdrand_16;
6361 case X86::BI__builtin_ia32_rdrand32_step:
6362 ID = Intrinsic::x86_rdrand_32;
6364 case X86::BI__builtin_ia32_rdrand64_step:
6365 ID = Intrinsic::x86_rdrand_64;
6367 case X86::BI__builtin_ia32_rdseed16_step:
6368 ID = Intrinsic::x86_rdseed_16;
6370 case X86::BI__builtin_ia32_rdseed32_step:
6371 ID = Intrinsic::x86_rdseed_32;
6373 case X86::BI__builtin_ia32_rdseed64_step:
6374 ID = Intrinsic::x86_rdseed_64;
6378 Value *Call = Builder.CreateCall(CGM.getIntrinsic(ID));
6379 Builder.CreateStore(Builder.CreateExtractValue(Call, 0), Ops[0]);
6380 return Builder.CreateExtractValue(Call, 1);
6382 // SSE comparison intrisics
6383 case X86::BI__builtin_ia32_cmpeqps:
6384 case X86::BI__builtin_ia32_cmpltps:
6385 case X86::BI__builtin_ia32_cmpleps:
6386 case X86::BI__builtin_ia32_cmpunordps:
6387 case X86::BI__builtin_ia32_cmpneqps:
6388 case X86::BI__builtin_ia32_cmpnltps:
6389 case X86::BI__builtin_ia32_cmpnleps:
6390 case X86::BI__builtin_ia32_cmpordps:
6391 case X86::BI__builtin_ia32_cmpeqss:
6392 case X86::BI__builtin_ia32_cmpltss:
6393 case X86::BI__builtin_ia32_cmpless:
6394 case X86::BI__builtin_ia32_cmpunordss:
6395 case X86::BI__builtin_ia32_cmpneqss:
6396 case X86::BI__builtin_ia32_cmpnltss:
6397 case X86::BI__builtin_ia32_cmpnless:
6398 case X86::BI__builtin_ia32_cmpordss:
6399 case X86::BI__builtin_ia32_cmpeqpd:
6400 case X86::BI__builtin_ia32_cmpltpd:
6401 case X86::BI__builtin_ia32_cmplepd:
6402 case X86::BI__builtin_ia32_cmpunordpd:
6403 case X86::BI__builtin_ia32_cmpneqpd:
6404 case X86::BI__builtin_ia32_cmpnltpd:
6405 case X86::BI__builtin_ia32_cmpnlepd:
6406 case X86::BI__builtin_ia32_cmpordpd:
6407 case X86::BI__builtin_ia32_cmpeqsd:
6408 case X86::BI__builtin_ia32_cmpltsd:
6409 case X86::BI__builtin_ia32_cmplesd:
6410 case X86::BI__builtin_ia32_cmpunordsd:
6411 case X86::BI__builtin_ia32_cmpneqsd:
6412 case X86::BI__builtin_ia32_cmpnltsd:
6413 case X86::BI__builtin_ia32_cmpnlesd:
6414 case X86::BI__builtin_ia32_cmpordsd:
6415 // These exist so that the builtin that takes an immediate can be bounds
6416 // checked by clang to avoid passing bad immediates to the backend. Since
6417 // AVX has a larger immediate than SSE we would need separate builtins to
6418 // do the different bounds checking. Rather than create a clang specific
6419 // SSE only builtin, this implements eight separate builtins to match gcc
6422 // Choose the immediate.
6424 switch (BuiltinID) {
6425 default: llvm_unreachable("Unsupported intrinsic!");
6426 case X86::BI__builtin_ia32_cmpeqps:
6427 case X86::BI__builtin_ia32_cmpeqss:
6428 case X86::BI__builtin_ia32_cmpeqpd:
6429 case X86::BI__builtin_ia32_cmpeqsd:
6432 case X86::BI__builtin_ia32_cmpltps:
6433 case X86::BI__builtin_ia32_cmpltss:
6434 case X86::BI__builtin_ia32_cmpltpd:
6435 case X86::BI__builtin_ia32_cmpltsd:
6438 case X86::BI__builtin_ia32_cmpleps:
6439 case X86::BI__builtin_ia32_cmpless:
6440 case X86::BI__builtin_ia32_cmplepd:
6441 case X86::BI__builtin_ia32_cmplesd:
6444 case X86::BI__builtin_ia32_cmpunordps:
6445 case X86::BI__builtin_ia32_cmpunordss:
6446 case X86::BI__builtin_ia32_cmpunordpd:
6447 case X86::BI__builtin_ia32_cmpunordsd:
6450 case X86::BI__builtin_ia32_cmpneqps:
6451 case X86::BI__builtin_ia32_cmpneqss:
6452 case X86::BI__builtin_ia32_cmpneqpd:
6453 case X86::BI__builtin_ia32_cmpneqsd:
6456 case X86::BI__builtin_ia32_cmpnltps:
6457 case X86::BI__builtin_ia32_cmpnltss:
6458 case X86::BI__builtin_ia32_cmpnltpd:
6459 case X86::BI__builtin_ia32_cmpnltsd:
6462 case X86::BI__builtin_ia32_cmpnleps:
6463 case X86::BI__builtin_ia32_cmpnless:
6464 case X86::BI__builtin_ia32_cmpnlepd:
6465 case X86::BI__builtin_ia32_cmpnlesd:
6468 case X86::BI__builtin_ia32_cmpordps:
6469 case X86::BI__builtin_ia32_cmpordss:
6470 case X86::BI__builtin_ia32_cmpordpd:
6471 case X86::BI__builtin_ia32_cmpordsd:
6476 // Choose the intrinsic ID.
6479 switch (BuiltinID) {
6480 default: llvm_unreachable("Unsupported intrinsic!");
6481 case X86::BI__builtin_ia32_cmpeqps:
6482 case X86::BI__builtin_ia32_cmpltps:
6483 case X86::BI__builtin_ia32_cmpleps:
6484 case X86::BI__builtin_ia32_cmpunordps:
6485 case X86::BI__builtin_ia32_cmpneqps:
6486 case X86::BI__builtin_ia32_cmpnltps:
6487 case X86::BI__builtin_ia32_cmpnleps:
6488 case X86::BI__builtin_ia32_cmpordps:
6490 ID = Intrinsic::x86_sse_cmp_ps;
6492 case X86::BI__builtin_ia32_cmpeqss:
6493 case X86::BI__builtin_ia32_cmpltss:
6494 case X86::BI__builtin_ia32_cmpless:
6495 case X86::BI__builtin_ia32_cmpunordss:
6496 case X86::BI__builtin_ia32_cmpneqss:
6497 case X86::BI__builtin_ia32_cmpnltss:
6498 case X86::BI__builtin_ia32_cmpnless:
6499 case X86::BI__builtin_ia32_cmpordss:
6501 ID = Intrinsic::x86_sse_cmp_ss;
6503 case X86::BI__builtin_ia32_cmpeqpd:
6504 case X86::BI__builtin_ia32_cmpltpd:
6505 case X86::BI__builtin_ia32_cmplepd:
6506 case X86::BI__builtin_ia32_cmpunordpd:
6507 case X86::BI__builtin_ia32_cmpneqpd:
6508 case X86::BI__builtin_ia32_cmpnltpd:
6509 case X86::BI__builtin_ia32_cmpnlepd:
6510 case X86::BI__builtin_ia32_cmpordpd:
6512 ID = Intrinsic::x86_sse2_cmp_pd;
6514 case X86::BI__builtin_ia32_cmpeqsd:
6515 case X86::BI__builtin_ia32_cmpltsd:
6516 case X86::BI__builtin_ia32_cmplesd:
6517 case X86::BI__builtin_ia32_cmpunordsd:
6518 case X86::BI__builtin_ia32_cmpneqsd:
6519 case X86::BI__builtin_ia32_cmpnltsd:
6520 case X86::BI__builtin_ia32_cmpnlesd:
6521 case X86::BI__builtin_ia32_cmpordsd:
6523 ID = Intrinsic::x86_sse2_cmp_sd;
6527 Ops.push_back(llvm::ConstantInt::get(Int8Ty, Imm));
6528 llvm::Function *F = CGM.getIntrinsic(ID);
6529 return Builder.CreateCall(F, Ops, name);
6534 Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
6535 const CallExpr *E) {
6536 SmallVector<Value*, 4> Ops;
6538 for (unsigned i = 0, e = E->getNumArgs(); i != e; i++)
6539 Ops.push_back(EmitScalarExpr(E->getArg(i)));
6541 Intrinsic::ID ID = Intrinsic::not_intrinsic;
6543 switch (BuiltinID) {
6544 default: return nullptr;
6546 // vec_ld, vec_lvsl, vec_lvsr
6547 case PPC::BI__builtin_altivec_lvx:
6548 case PPC::BI__builtin_altivec_lvxl:
6549 case PPC::BI__builtin_altivec_lvebx:
6550 case PPC::BI__builtin_altivec_lvehx:
6551 case PPC::BI__builtin_altivec_lvewx:
6552 case PPC::BI__builtin_altivec_lvsl:
6553 case PPC::BI__builtin_altivec_lvsr:
6554 case PPC::BI__builtin_vsx_lxvd2x:
6555 case PPC::BI__builtin_vsx_lxvw4x:
6557 Ops[1] = Builder.CreateBitCast(Ops[1], Int8PtrTy);
6559 Ops[0] = Builder.CreateGEP(Ops[1], Ops[0]);
6562 switch (BuiltinID) {
6563 default: llvm_unreachable("Unsupported ld/lvsl/lvsr intrinsic!");
6564 case PPC::BI__builtin_altivec_lvx:
6565 ID = Intrinsic::ppc_altivec_lvx;
6567 case PPC::BI__builtin_altivec_lvxl:
6568 ID = Intrinsic::ppc_altivec_lvxl;
6570 case PPC::BI__builtin_altivec_lvebx:
6571 ID = Intrinsic::ppc_altivec_lvebx;
6573 case PPC::BI__builtin_altivec_lvehx:
6574 ID = Intrinsic::ppc_altivec_lvehx;
6576 case PPC::BI__builtin_altivec_lvewx:
6577 ID = Intrinsic::ppc_altivec_lvewx;
6579 case PPC::BI__builtin_altivec_lvsl:
6580 ID = Intrinsic::ppc_altivec_lvsl;
6582 case PPC::BI__builtin_altivec_lvsr:
6583 ID = Intrinsic::ppc_altivec_lvsr;
6585 case PPC::BI__builtin_vsx_lxvd2x:
6586 ID = Intrinsic::ppc_vsx_lxvd2x;
6588 case PPC::BI__builtin_vsx_lxvw4x:
6589 ID = Intrinsic::ppc_vsx_lxvw4x;
6592 llvm::Function *F = CGM.getIntrinsic(ID);
6593 return Builder.CreateCall(F, Ops, "");
6597 case PPC::BI__builtin_altivec_stvx:
6598 case PPC::BI__builtin_altivec_stvxl:
6599 case PPC::BI__builtin_altivec_stvebx:
6600 case PPC::BI__builtin_altivec_stvehx:
6601 case PPC::BI__builtin_altivec_stvewx:
6602 case PPC::BI__builtin_vsx_stxvd2x:
6603 case PPC::BI__builtin_vsx_stxvw4x:
6605 Ops[2] = Builder.CreateBitCast(Ops[2], Int8PtrTy);
6606 Ops[1] = Builder.CreateGEP(Ops[2], Ops[1]);
6609 switch (BuiltinID) {
6610 default: llvm_unreachable("Unsupported st intrinsic!");
6611 case PPC::BI__builtin_altivec_stvx:
6612 ID = Intrinsic::ppc_altivec_stvx;
6614 case PPC::BI__builtin_altivec_stvxl:
6615 ID = Intrinsic::ppc_altivec_stvxl;
6617 case PPC::BI__builtin_altivec_stvebx:
6618 ID = Intrinsic::ppc_altivec_stvebx;
6620 case PPC::BI__builtin_altivec_stvehx:
6621 ID = Intrinsic::ppc_altivec_stvehx;
6623 case PPC::BI__builtin_altivec_stvewx:
6624 ID = Intrinsic::ppc_altivec_stvewx;
6626 case PPC::BI__builtin_vsx_stxvd2x:
6627 ID = Intrinsic::ppc_vsx_stxvd2x;
6629 case PPC::BI__builtin_vsx_stxvw4x:
6630 ID = Intrinsic::ppc_vsx_stxvw4x;
6633 llvm::Function *F = CGM.getIntrinsic(ID);
6634 return Builder.CreateCall(F, Ops, "");
6637 case PPC::BI__builtin_vsx_xvsqrtsp:
6638 case PPC::BI__builtin_vsx_xvsqrtdp: {
6639 llvm::Type *ResultType = ConvertType(E->getType());
6640 Value *X = EmitScalarExpr(E->getArg(0));
6641 ID = Intrinsic::sqrt;
6642 llvm::Function *F = CGM.getIntrinsic(ID, ResultType);
6643 return Builder.CreateCall(F, X);
6645 // Count leading zeros
6646 case PPC::BI__builtin_altivec_vclzb:
6647 case PPC::BI__builtin_altivec_vclzh:
6648 case PPC::BI__builtin_altivec_vclzw:
6649 case PPC::BI__builtin_altivec_vclzd: {
6650 llvm::Type *ResultType = ConvertType(E->getType());
6651 Value *X = EmitScalarExpr(E->getArg(0));
6652 Value *Undef = ConstantInt::get(Builder.getInt1Ty(), false);
6653 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ResultType);
6654 return Builder.CreateCall(F, {X, Undef});
6657 case PPC::BI__builtin_vsx_xvcpsgnsp:
6658 case PPC::BI__builtin_vsx_xvcpsgndp: {
6659 llvm::Type *ResultType = ConvertType(E->getType());
6660 Value *X = EmitScalarExpr(E->getArg(0));
6661 Value *Y = EmitScalarExpr(E->getArg(1));
6662 ID = Intrinsic::copysign;
6663 llvm::Function *F = CGM.getIntrinsic(ID, ResultType);
6664 return Builder.CreateCall(F, {X, Y});
6666 // Rounding/truncation
6667 case PPC::BI__builtin_vsx_xvrspip:
6668 case PPC::BI__builtin_vsx_xvrdpip:
6669 case PPC::BI__builtin_vsx_xvrdpim:
6670 case PPC::BI__builtin_vsx_xvrspim:
6671 case PPC::BI__builtin_vsx_xvrdpi:
6672 case PPC::BI__builtin_vsx_xvrspi:
6673 case PPC::BI__builtin_vsx_xvrdpic:
6674 case PPC::BI__builtin_vsx_xvrspic:
6675 case PPC::BI__builtin_vsx_xvrdpiz:
6676 case PPC::BI__builtin_vsx_xvrspiz: {
6677 llvm::Type *ResultType = ConvertType(E->getType());
6678 Value *X = EmitScalarExpr(E->getArg(0));
6679 if (BuiltinID == PPC::BI__builtin_vsx_xvrdpim ||
6680 BuiltinID == PPC::BI__builtin_vsx_xvrspim)
6681 ID = Intrinsic::floor;
6682 else if (BuiltinID == PPC::BI__builtin_vsx_xvrdpi ||
6683 BuiltinID == PPC::BI__builtin_vsx_xvrspi)
6684 ID = Intrinsic::round;
6685 else if (BuiltinID == PPC::BI__builtin_vsx_xvrdpic ||
6686 BuiltinID == PPC::BI__builtin_vsx_xvrspic)
6687 ID = Intrinsic::nearbyint;
6688 else if (BuiltinID == PPC::BI__builtin_vsx_xvrdpip ||
6689 BuiltinID == PPC::BI__builtin_vsx_xvrspip)
6690 ID = Intrinsic::ceil;
6691 else if (BuiltinID == PPC::BI__builtin_vsx_xvrdpiz ||
6692 BuiltinID == PPC::BI__builtin_vsx_xvrspiz)
6693 ID = Intrinsic::trunc;
6694 llvm::Function *F = CGM.getIntrinsic(ID, ResultType);
6695 return Builder.CreateCall(F, X);
6698 case PPC::BI__builtin_vsx_xvmaddadp:
6699 case PPC::BI__builtin_vsx_xvmaddasp:
6700 case PPC::BI__builtin_vsx_xvnmaddadp:
6701 case PPC::BI__builtin_vsx_xvnmaddasp:
6702 case PPC::BI__builtin_vsx_xvmsubadp:
6703 case PPC::BI__builtin_vsx_xvmsubasp:
6704 case PPC::BI__builtin_vsx_xvnmsubadp:
6705 case PPC::BI__builtin_vsx_xvnmsubasp: {
6706 llvm::Type *ResultType = ConvertType(E->getType());
6707 Value *X = EmitScalarExpr(E->getArg(0));
6708 Value *Y = EmitScalarExpr(E->getArg(1));
6709 Value *Z = EmitScalarExpr(E->getArg(2));
6710 Value *Zero = llvm::ConstantFP::getZeroValueForNegation(ResultType);
6711 llvm::Function *F = CGM.getIntrinsic(Intrinsic::fma, ResultType);
6712 switch (BuiltinID) {
6713 case PPC::BI__builtin_vsx_xvmaddadp:
6714 case PPC::BI__builtin_vsx_xvmaddasp:
6715 return Builder.CreateCall(F, {X, Y, Z});
6716 case PPC::BI__builtin_vsx_xvnmaddadp:
6717 case PPC::BI__builtin_vsx_xvnmaddasp:
6718 return Builder.CreateFSub(Zero,
6719 Builder.CreateCall(F, {X, Y, Z}), "sub");
6720 case PPC::BI__builtin_vsx_xvmsubadp:
6721 case PPC::BI__builtin_vsx_xvmsubasp:
6722 return Builder.CreateCall(F,
6723 {X, Y, Builder.CreateFSub(Zero, Z, "sub")});
6724 case PPC::BI__builtin_vsx_xvnmsubadp:
6725 case PPC::BI__builtin_vsx_xvnmsubasp:
6727 Builder.CreateCall(F, {X, Y, Builder.CreateFSub(Zero, Z, "sub")});
6728 return Builder.CreateFSub(Zero, FsubRes, "sub");
6730 llvm_unreachable("Unknown FMA operation");
6731 return nullptr; // Suppress no-return warning
6736 // Emit an intrinsic that has 1 float or double.
6737 static Value *emitUnaryFPBuiltin(CodeGenFunction &CGF,
6739 unsigned IntrinsicID) {
6740 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
6742 Value *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
6743 return CGF.Builder.CreateCall(F, Src0);
6746 // Emit an intrinsic that has 3 float or double operands.
6747 static Value *emitTernaryFPBuiltin(CodeGenFunction &CGF,
6749 unsigned IntrinsicID) {
6750 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
6751 llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
6752 llvm::Value *Src2 = CGF.EmitScalarExpr(E->getArg(2));
6754 Value *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
6755 return CGF.Builder.CreateCall(F, {Src0, Src1, Src2});
6758 // Emit an intrinsic that has 1 float or double operand, and 1 integer.
6759 static Value *emitFPIntBuiltin(CodeGenFunction &CGF,
6761 unsigned IntrinsicID) {
6762 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
6763 llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
6765 Value *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
6766 return CGF.Builder.CreateCall(F, {Src0, Src1});
6769 Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID,
6770 const CallExpr *E) {
6771 switch (BuiltinID) {
6772 case AMDGPU::BI__builtin_amdgpu_div_scale:
6773 case AMDGPU::BI__builtin_amdgpu_div_scalef: {
6774 // Translate from the intrinsics's struct return to the builtin's out
6777 std::pair<llvm::Value *, unsigned> FlagOutPtr
6778 = EmitPointerWithAlignment(E->getArg(3));
6780 llvm::Value *X = EmitScalarExpr(E->getArg(0));
6781 llvm::Value *Y = EmitScalarExpr(E->getArg(1));
6782 llvm::Value *Z = EmitScalarExpr(E->getArg(2));
6784 llvm::Value *Callee = CGM.getIntrinsic(Intrinsic::AMDGPU_div_scale,
6787 llvm::Value *Tmp = Builder.CreateCall(Callee, {X, Y, Z});
6789 llvm::Value *Result = Builder.CreateExtractValue(Tmp, 0);
6790 llvm::Value *Flag = Builder.CreateExtractValue(Tmp, 1);
6792 llvm::Type *RealFlagType
6793 = FlagOutPtr.first->getType()->getPointerElementType();
6795 llvm::Value *FlagExt = Builder.CreateZExt(Flag, RealFlagType);
6796 llvm::StoreInst *FlagStore = Builder.CreateStore(FlagExt, FlagOutPtr.first);
6797 FlagStore->setAlignment(FlagOutPtr.second);
6800 case AMDGPU::BI__builtin_amdgpu_div_fmas:
6801 case AMDGPU::BI__builtin_amdgpu_div_fmasf: {
6802 llvm::Value *Src0 = EmitScalarExpr(E->getArg(0));
6803 llvm::Value *Src1 = EmitScalarExpr(E->getArg(1));
6804 llvm::Value *Src2 = EmitScalarExpr(E->getArg(2));
6805 llvm::Value *Src3 = EmitScalarExpr(E->getArg(3));
6807 llvm::Value *F = CGM.getIntrinsic(Intrinsic::AMDGPU_div_fmas,
6809 llvm::Value *Src3ToBool = Builder.CreateIsNotNull(Src3);
6810 return Builder.CreateCall(F, {Src0, Src1, Src2, Src3ToBool});
6812 case AMDGPU::BI__builtin_amdgpu_div_fixup:
6813 case AMDGPU::BI__builtin_amdgpu_div_fixupf:
6814 return emitTernaryFPBuiltin(*this, E, Intrinsic::AMDGPU_div_fixup);
6815 case AMDGPU::BI__builtin_amdgpu_trig_preop:
6816 case AMDGPU::BI__builtin_amdgpu_trig_preopf:
6817 return emitFPIntBuiltin(*this, E, Intrinsic::AMDGPU_trig_preop);
6818 case AMDGPU::BI__builtin_amdgpu_rcp:
6819 case AMDGPU::BI__builtin_amdgpu_rcpf:
6820 return emitUnaryFPBuiltin(*this, E, Intrinsic::AMDGPU_rcp);
6821 case AMDGPU::BI__builtin_amdgpu_rsq:
6822 case AMDGPU::BI__builtin_amdgpu_rsqf:
6823 return emitUnaryFPBuiltin(*this, E, Intrinsic::AMDGPU_rsq);
6824 case AMDGPU::BI__builtin_amdgpu_rsq_clamped:
6825 case AMDGPU::BI__builtin_amdgpu_rsq_clampedf:
6826 return emitUnaryFPBuiltin(*this, E, Intrinsic::AMDGPU_rsq_clamped);
6827 case AMDGPU::BI__builtin_amdgpu_ldexp:
6828 case AMDGPU::BI__builtin_amdgpu_ldexpf:
6829 return emitFPIntBuiltin(*this, E, Intrinsic::AMDGPU_ldexp);
6830 case AMDGPU::BI__builtin_amdgpu_class:
6831 case AMDGPU::BI__builtin_amdgpu_classf:
6832 return emitFPIntBuiltin(*this, E, Intrinsic::AMDGPU_class);
6838 /// Handle a SystemZ function in which the final argument is a pointer
6839 /// to an int that receives the post-instruction CC value. At the LLVM level
6840 /// this is represented as a function that returns a {result, cc} pair.
6841 static Value *EmitSystemZIntrinsicWithCC(CodeGenFunction &CGF,
6842 unsigned IntrinsicID,
6843 const CallExpr *E) {
6844 unsigned NumArgs = E->getNumArgs() - 1;
6845 SmallVector<Value *, 8> Args(NumArgs);
6846 for (unsigned I = 0; I < NumArgs; ++I)
6847 Args[I] = CGF.EmitScalarExpr(E->getArg(I));
6848 Value *CCPtr = CGF.EmitScalarExpr(E->getArg(NumArgs));
6849 Value *F = CGF.CGM.getIntrinsic(IntrinsicID);
6850 Value *Call = CGF.Builder.CreateCall(F, Args);
6851 Value *CC = CGF.Builder.CreateExtractValue(Call, 1);
6852 CGF.Builder.CreateStore(CC, CCPtr);
6853 return CGF.Builder.CreateExtractValue(Call, 0);
6856 Value *CodeGenFunction::EmitSystemZBuiltinExpr(unsigned BuiltinID,
6857 const CallExpr *E) {
6858 switch (BuiltinID) {
6859 case SystemZ::BI__builtin_tbegin: {
6860 Value *TDB = EmitScalarExpr(E->getArg(0));
6861 Value *Control = llvm::ConstantInt::get(Int32Ty, 0xff0c);
6862 Value *F = CGM.getIntrinsic(Intrinsic::s390_tbegin);
6863 return Builder.CreateCall(F, {TDB, Control});
6865 case SystemZ::BI__builtin_tbegin_nofloat: {
6866 Value *TDB = EmitScalarExpr(E->getArg(0));
6867 Value *Control = llvm::ConstantInt::get(Int32Ty, 0xff0c);
6868 Value *F = CGM.getIntrinsic(Intrinsic::s390_tbegin_nofloat);
6869 return Builder.CreateCall(F, {TDB, Control});
6871 case SystemZ::BI__builtin_tbeginc: {
6872 Value *TDB = llvm::ConstantPointerNull::get(Int8PtrTy);
6873 Value *Control = llvm::ConstantInt::get(Int32Ty, 0xff08);
6874 Value *F = CGM.getIntrinsic(Intrinsic::s390_tbeginc);
6875 return Builder.CreateCall(F, {TDB, Control});
6877 case SystemZ::BI__builtin_tabort: {
6878 Value *Data = EmitScalarExpr(E->getArg(0));
6879 Value *F = CGM.getIntrinsic(Intrinsic::s390_tabort);
6880 return Builder.CreateCall(F, Builder.CreateSExt(Data, Int64Ty, "tabort"));
6882 case SystemZ::BI__builtin_non_tx_store: {
6883 Value *Address = EmitScalarExpr(E->getArg(0));
6884 Value *Data = EmitScalarExpr(E->getArg(1));
6885 Value *F = CGM.getIntrinsic(Intrinsic::s390_ntstg);
6886 return Builder.CreateCall(F, {Data, Address});
6889 // Vector builtins. Note that most vector builtins are mapped automatically
6890 // to target-specific LLVM intrinsics. The ones handled specially here can
6891 // be represented via standard LLVM IR, which is preferable to enable common
6892 // LLVM optimizations.
6894 case SystemZ::BI__builtin_s390_vpopctb:
6895 case SystemZ::BI__builtin_s390_vpopcth:
6896 case SystemZ::BI__builtin_s390_vpopctf:
6897 case SystemZ::BI__builtin_s390_vpopctg: {
6898 llvm::Type *ResultType = ConvertType(E->getType());
6899 Value *X = EmitScalarExpr(E->getArg(0));
6900 Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ResultType);
6901 return Builder.CreateCall(F, X);
6904 case SystemZ::BI__builtin_s390_vclzb:
6905 case SystemZ::BI__builtin_s390_vclzh:
6906 case SystemZ::BI__builtin_s390_vclzf:
6907 case SystemZ::BI__builtin_s390_vclzg: {
6908 llvm::Type *ResultType = ConvertType(E->getType());
6909 Value *X = EmitScalarExpr(E->getArg(0));
6910 Value *Undef = ConstantInt::get(Builder.getInt1Ty(), false);
6911 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ResultType);
6912 return Builder.CreateCall(F, {X, Undef});
6915 case SystemZ::BI__builtin_s390_vctzb:
6916 case SystemZ::BI__builtin_s390_vctzh:
6917 case SystemZ::BI__builtin_s390_vctzf:
6918 case SystemZ::BI__builtin_s390_vctzg: {
6919 llvm::Type *ResultType = ConvertType(E->getType());
6920 Value *X = EmitScalarExpr(E->getArg(0));
6921 Value *Undef = ConstantInt::get(Builder.getInt1Ty(), false);
6922 Function *F = CGM.getIntrinsic(Intrinsic::cttz, ResultType);
6923 return Builder.CreateCall(F, {X, Undef});
6926 case SystemZ::BI__builtin_s390_vfsqdb: {
6927 llvm::Type *ResultType = ConvertType(E->getType());
6928 Value *X = EmitScalarExpr(E->getArg(0));
6929 Function *F = CGM.getIntrinsic(Intrinsic::sqrt, ResultType);
6930 return Builder.CreateCall(F, X);
6932 case SystemZ::BI__builtin_s390_vfmadb: {
6933 llvm::Type *ResultType = ConvertType(E->getType());
6934 Value *X = EmitScalarExpr(E->getArg(0));
6935 Value *Y = EmitScalarExpr(E->getArg(1));
6936 Value *Z = EmitScalarExpr(E->getArg(2));
6937 Function *F = CGM.getIntrinsic(Intrinsic::fma, ResultType);
6938 return Builder.CreateCall(F, {X, Y, Z});
6940 case SystemZ::BI__builtin_s390_vfmsdb: {
6941 llvm::Type *ResultType = ConvertType(E->getType());
6942 Value *X = EmitScalarExpr(E->getArg(0));
6943 Value *Y = EmitScalarExpr(E->getArg(1));
6944 Value *Z = EmitScalarExpr(E->getArg(2));
6945 Value *Zero = llvm::ConstantFP::getZeroValueForNegation(ResultType);
6946 Function *F = CGM.getIntrinsic(Intrinsic::fma, ResultType);
6947 return Builder.CreateCall(F, {X, Y, Builder.CreateFSub(Zero, Z, "sub")});
6949 case SystemZ::BI__builtin_s390_vflpdb: {
6950 llvm::Type *ResultType = ConvertType(E->getType());
6951 Value *X = EmitScalarExpr(E->getArg(0));
6952 Function *F = CGM.getIntrinsic(Intrinsic::fabs, ResultType);
6953 return Builder.CreateCall(F, X);
6955 case SystemZ::BI__builtin_s390_vflndb: {
6956 llvm::Type *ResultType = ConvertType(E->getType());
6957 Value *X = EmitScalarExpr(E->getArg(0));
6958 Value *Zero = llvm::ConstantFP::getZeroValueForNegation(ResultType);
6959 Function *F = CGM.getIntrinsic(Intrinsic::fabs, ResultType);
6960 return Builder.CreateFSub(Zero, Builder.CreateCall(F, X), "sub");
6962 case SystemZ::BI__builtin_s390_vfidb: {
6963 llvm::Type *ResultType = ConvertType(E->getType());
6964 Value *X = EmitScalarExpr(E->getArg(0));
6965 // Constant-fold the M4 and M5 mask arguments.
6966 llvm::APSInt M4, M5;
6967 bool IsConstM4 = E->getArg(1)->isIntegerConstantExpr(M4, getContext());
6968 bool IsConstM5 = E->getArg(2)->isIntegerConstantExpr(M5, getContext());
6969 assert(IsConstM4 && IsConstM5 && "Constant arg isn't actually constant?");
6970 (void)IsConstM4; (void)IsConstM5;
6971 // Check whether this instance of vfidb can be represented via a LLVM
6972 // standard intrinsic. We only support some combinations of M4 and M5.
6973 Intrinsic::ID ID = Intrinsic::not_intrinsic;
6974 switch (M4.getZExtValue()) {
6976 case 0: // IEEE-inexact exception allowed
6977 switch (M5.getZExtValue()) {
6979 case 0: ID = Intrinsic::rint; break;
6982 case 4: // IEEE-inexact exception suppressed
6983 switch (M5.getZExtValue()) {
6985 case 0: ID = Intrinsic::nearbyint; break;
6986 case 1: ID = Intrinsic::round; break;
6987 case 5: ID = Intrinsic::trunc; break;
6988 case 6: ID = Intrinsic::ceil; break;
6989 case 7: ID = Intrinsic::floor; break;
6993 if (ID != Intrinsic::not_intrinsic) {
6994 Function *F = CGM.getIntrinsic(ID, ResultType);
6995 return Builder.CreateCall(F, X);
6997 Function *F = CGM.getIntrinsic(Intrinsic::s390_vfidb);
6998 Value *M4Value = llvm::ConstantInt::get(getLLVMContext(), M4);
6999 Value *M5Value = llvm::ConstantInt::get(getLLVMContext(), M5);
7000 return Builder.CreateCall(F, {X, M4Value, M5Value});
7003 // Vector intrisincs that output the post-instruction CC value.
7005 #define INTRINSIC_WITH_CC(NAME) \
7006 case SystemZ::BI__builtin_##NAME: \
7007 return EmitSystemZIntrinsicWithCC(*this, Intrinsic::NAME, E)
7009 INTRINSIC_WITH_CC(s390_vpkshs);
7010 INTRINSIC_WITH_CC(s390_vpksfs);
7011 INTRINSIC_WITH_CC(s390_vpksgs);
7013 INTRINSIC_WITH_CC(s390_vpklshs);
7014 INTRINSIC_WITH_CC(s390_vpklsfs);
7015 INTRINSIC_WITH_CC(s390_vpklsgs);
7017 INTRINSIC_WITH_CC(s390_vceqbs);
7018 INTRINSIC_WITH_CC(s390_vceqhs);
7019 INTRINSIC_WITH_CC(s390_vceqfs);
7020 INTRINSIC_WITH_CC(s390_vceqgs);
7022 INTRINSIC_WITH_CC(s390_vchbs);
7023 INTRINSIC_WITH_CC(s390_vchhs);
7024 INTRINSIC_WITH_CC(s390_vchfs);
7025 INTRINSIC_WITH_CC(s390_vchgs);
7027 INTRINSIC_WITH_CC(s390_vchlbs);
7028 INTRINSIC_WITH_CC(s390_vchlhs);
7029 INTRINSIC_WITH_CC(s390_vchlfs);
7030 INTRINSIC_WITH_CC(s390_vchlgs);
7032 INTRINSIC_WITH_CC(s390_vfaebs);
7033 INTRINSIC_WITH_CC(s390_vfaehs);
7034 INTRINSIC_WITH_CC(s390_vfaefs);
7036 INTRINSIC_WITH_CC(s390_vfaezbs);
7037 INTRINSIC_WITH_CC(s390_vfaezhs);
7038 INTRINSIC_WITH_CC(s390_vfaezfs);
7040 INTRINSIC_WITH_CC(s390_vfeebs);
7041 INTRINSIC_WITH_CC(s390_vfeehs);
7042 INTRINSIC_WITH_CC(s390_vfeefs);
7044 INTRINSIC_WITH_CC(s390_vfeezbs);
7045 INTRINSIC_WITH_CC(s390_vfeezhs);
7046 INTRINSIC_WITH_CC(s390_vfeezfs);
7048 INTRINSIC_WITH_CC(s390_vfenebs);
7049 INTRINSIC_WITH_CC(s390_vfenehs);
7050 INTRINSIC_WITH_CC(s390_vfenefs);
7052 INTRINSIC_WITH_CC(s390_vfenezbs);
7053 INTRINSIC_WITH_CC(s390_vfenezhs);
7054 INTRINSIC_WITH_CC(s390_vfenezfs);
7056 INTRINSIC_WITH_CC(s390_vistrbs);
7057 INTRINSIC_WITH_CC(s390_vistrhs);
7058 INTRINSIC_WITH_CC(s390_vistrfs);
7060 INTRINSIC_WITH_CC(s390_vstrcbs);
7061 INTRINSIC_WITH_CC(s390_vstrchs);
7062 INTRINSIC_WITH_CC(s390_vstrcfs);
7064 INTRINSIC_WITH_CC(s390_vstrczbs);
7065 INTRINSIC_WITH_CC(s390_vstrczhs);
7066 INTRINSIC_WITH_CC(s390_vstrczfs);
7068 INTRINSIC_WITH_CC(s390_vfcedbs);
7069 INTRINSIC_WITH_CC(s390_vfchdbs);
7070 INTRINSIC_WITH_CC(s390_vfchedbs);
7072 INTRINSIC_WITH_CC(s390_vftcidb);
7074 #undef INTRINSIC_WITH_CC
7081 Value *CodeGenFunction::EmitNVPTXBuiltinExpr(unsigned BuiltinID,
7082 const CallExpr *E) {
7083 switch (BuiltinID) {
7084 case NVPTX::BI__nvvm_atom_add_gen_i:
7085 case NVPTX::BI__nvvm_atom_add_gen_l:
7086 case NVPTX::BI__nvvm_atom_add_gen_ll:
7087 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Add, E);
7089 case NVPTX::BI__nvvm_atom_sub_gen_i:
7090 case NVPTX::BI__nvvm_atom_sub_gen_l:
7091 case NVPTX::BI__nvvm_atom_sub_gen_ll:
7092 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Sub, E);
7094 case NVPTX::BI__nvvm_atom_and_gen_i:
7095 case NVPTX::BI__nvvm_atom_and_gen_l:
7096 case NVPTX::BI__nvvm_atom_and_gen_ll:
7097 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::And, E);
7099 case NVPTX::BI__nvvm_atom_or_gen_i:
7100 case NVPTX::BI__nvvm_atom_or_gen_l:
7101 case NVPTX::BI__nvvm_atom_or_gen_ll:
7102 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Or, E);
7104 case NVPTX::BI__nvvm_atom_xor_gen_i:
7105 case NVPTX::BI__nvvm_atom_xor_gen_l:
7106 case NVPTX::BI__nvvm_atom_xor_gen_ll:
7107 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Xor, E);
7109 case NVPTX::BI__nvvm_atom_xchg_gen_i:
7110 case NVPTX::BI__nvvm_atom_xchg_gen_l:
7111 case NVPTX::BI__nvvm_atom_xchg_gen_ll:
7112 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Xchg, E);
7114 case NVPTX::BI__nvvm_atom_max_gen_i:
7115 case NVPTX::BI__nvvm_atom_max_gen_l:
7116 case NVPTX::BI__nvvm_atom_max_gen_ll:
7117 case NVPTX::BI__nvvm_atom_max_gen_ui:
7118 case NVPTX::BI__nvvm_atom_max_gen_ul:
7119 case NVPTX::BI__nvvm_atom_max_gen_ull:
7120 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Max, E);
7122 case NVPTX::BI__nvvm_atom_min_gen_i:
7123 case NVPTX::BI__nvvm_atom_min_gen_l:
7124 case NVPTX::BI__nvvm_atom_min_gen_ll:
7125 case NVPTX::BI__nvvm_atom_min_gen_ui:
7126 case NVPTX::BI__nvvm_atom_min_gen_ul:
7127 case NVPTX::BI__nvvm_atom_min_gen_ull:
7128 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Min, E);
7130 case NVPTX::BI__nvvm_atom_cas_gen_i:
7131 case NVPTX::BI__nvvm_atom_cas_gen_l:
7132 case NVPTX::BI__nvvm_atom_cas_gen_ll:
7133 return MakeAtomicCmpXchgValue(*this, E, true);
7135 case NVPTX::BI__nvvm_atom_add_gen_f: {
7136 Value *Ptr = EmitScalarExpr(E->getArg(0));
7137 Value *Val = EmitScalarExpr(E->getArg(1));
7138 // atomicrmw only deals with integer arguments so we need to use
7139 // LLVM's nvvm_atomic_load_add_f32 intrinsic for that.
7141 CGM.getIntrinsic(Intrinsic::nvvm_atomic_load_add_f32, Ptr->getType());
7142 return Builder.CreateCall(FnALAF32, {Ptr, Val});