1 //===---- CGBuiltin.cpp - Emit LLVM Code for builtins ---------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This contains code to emit Builtin calls as LLVM code.
12 //===----------------------------------------------------------------------===//
14 #include "CodeGenFunction.h"
15 #include "CGObjCRuntime.h"
16 #include "CodeGenModule.h"
17 #include "TargetInfo.h"
18 #include "clang/AST/ASTContext.h"
19 #include "clang/AST/Decl.h"
20 #include "clang/Basic/TargetBuiltins.h"
21 #include "clang/Basic/TargetInfo.h"
22 #include "clang/CodeGen/CGFunctionInfo.h"
23 #include "llvm/IR/DataLayout.h"
24 #include "llvm/IR/Intrinsics.h"
26 using namespace clang;
27 using namespace CodeGen;
30 /// getBuiltinLibFunction - Given a builtin id for a function like
31 /// "__builtin_fabsf", return a Function* for "fabsf".
32 llvm::Value *CodeGenModule::getBuiltinLibFunction(const FunctionDecl *FD,
34 assert(Context.BuiltinInfo.isLibFunction(BuiltinID));
36 // Get the name, skip over the __builtin_ prefix (if necessary).
40 // If the builtin has been declared explicitly with an assembler label,
41 // use the mangled name. This differs from the plain label on platforms
42 // that prefix labels.
43 if (FD->hasAttr<AsmLabelAttr>())
44 Name = getMangledName(D);
46 Name = Context.BuiltinInfo.GetName(BuiltinID) + 10;
48 llvm::FunctionType *Ty =
49 cast<llvm::FunctionType>(getTypes().ConvertType(FD->getType()));
51 return GetOrCreateLLVMFunction(Name, Ty, D, /*ForVTable=*/false);
54 /// Emit the conversions required to turn the given value into an
55 /// integer of the given size.
56 static Value *EmitToInt(CodeGenFunction &CGF, llvm::Value *V,
57 QualType T, llvm::IntegerType *IntType) {
58 V = CGF.EmitToMemory(V, T);
60 if (V->getType()->isPointerTy())
61 return CGF.Builder.CreatePtrToInt(V, IntType);
63 assert(V->getType() == IntType);
67 static Value *EmitFromInt(CodeGenFunction &CGF, llvm::Value *V,
68 QualType T, llvm::Type *ResultType) {
69 V = CGF.EmitFromMemory(V, T);
71 if (ResultType->isPointerTy())
72 return CGF.Builder.CreateIntToPtr(V, ResultType);
74 assert(V->getType() == ResultType);
78 /// Utility to insert an atomic instruction based on Instrinsic::ID
79 /// and the expression node.
80 static RValue EmitBinaryAtomic(CodeGenFunction &CGF,
81 llvm::AtomicRMWInst::BinOp Kind,
83 QualType T = E->getType();
84 assert(E->getArg(0)->getType()->isPointerType());
85 assert(CGF.getContext().hasSameUnqualifiedType(T,
86 E->getArg(0)->getType()->getPointeeType()));
87 assert(CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->getType()));
89 llvm::Value *DestPtr = CGF.EmitScalarExpr(E->getArg(0));
90 unsigned AddrSpace = DestPtr->getType()->getPointerAddressSpace();
92 llvm::IntegerType *IntType =
93 llvm::IntegerType::get(CGF.getLLVMContext(),
94 CGF.getContext().getTypeSize(T));
95 llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace);
98 Args[0] = CGF.Builder.CreateBitCast(DestPtr, IntPtrType);
99 Args[1] = CGF.EmitScalarExpr(E->getArg(1));
100 llvm::Type *ValueType = Args[1]->getType();
101 Args[1] = EmitToInt(CGF, Args[1], T, IntType);
103 llvm::Value *Result =
104 CGF.Builder.CreateAtomicRMW(Kind, Args[0], Args[1],
105 llvm::SequentiallyConsistent);
106 Result = EmitFromInt(CGF, Result, T, ValueType);
107 return RValue::get(Result);
110 /// Utility to insert an atomic instruction based Instrinsic::ID and
111 /// the expression node, where the return value is the result of the
113 static RValue EmitBinaryAtomicPost(CodeGenFunction &CGF,
114 llvm::AtomicRMWInst::BinOp Kind,
116 Instruction::BinaryOps Op) {
117 QualType T = E->getType();
118 assert(E->getArg(0)->getType()->isPointerType());
119 assert(CGF.getContext().hasSameUnqualifiedType(T,
120 E->getArg(0)->getType()->getPointeeType()));
121 assert(CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->getType()));
123 llvm::Value *DestPtr = CGF.EmitScalarExpr(E->getArg(0));
124 unsigned AddrSpace = DestPtr->getType()->getPointerAddressSpace();
126 llvm::IntegerType *IntType =
127 llvm::IntegerType::get(CGF.getLLVMContext(),
128 CGF.getContext().getTypeSize(T));
129 llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace);
131 llvm::Value *Args[2];
132 Args[1] = CGF.EmitScalarExpr(E->getArg(1));
133 llvm::Type *ValueType = Args[1]->getType();
134 Args[1] = EmitToInt(CGF, Args[1], T, IntType);
135 Args[0] = CGF.Builder.CreateBitCast(DestPtr, IntPtrType);
137 llvm::Value *Result =
138 CGF.Builder.CreateAtomicRMW(Kind, Args[0], Args[1],
139 llvm::SequentiallyConsistent);
140 Result = CGF.Builder.CreateBinOp(Op, Result, Args[1]);
141 Result = EmitFromInt(CGF, Result, T, ValueType);
142 return RValue::get(Result);
145 /// EmitFAbs - Emit a call to fabs/fabsf/fabsl, depending on the type of ValTy,
146 /// which must be a scalar floating point type.
147 static Value *EmitFAbs(CodeGenFunction &CGF, Value *V, QualType ValTy) {
148 const BuiltinType *ValTyP = ValTy->getAs<BuiltinType>();
149 assert(ValTyP && "isn't scalar fp type!");
152 switch (ValTyP->getKind()) {
153 default: llvm_unreachable("Isn't a scalar fp type!");
154 case BuiltinType::Float: FnName = "fabsf"; break;
155 case BuiltinType::Double: FnName = "fabs"; break;
156 case BuiltinType::LongDouble: FnName = "fabsl"; break;
159 // The prototype is something that takes and returns whatever V's type is.
160 llvm::FunctionType *FT = llvm::FunctionType::get(V->getType(), V->getType(),
162 llvm::Value *Fn = CGF.CGM.CreateRuntimeFunction(FT, FnName);
164 return CGF.EmitNounwindRuntimeCall(Fn, V, "abs");
167 static RValue emitLibraryCall(CodeGenFunction &CGF, const FunctionDecl *Fn,
168 const CallExpr *E, llvm::Value *calleeValue) {
169 return CGF.EmitCall(E->getCallee()->getType(), calleeValue, E->getLocStart(),
170 ReturnValueSlot(), E->arg_begin(), E->arg_end(), Fn);
173 /// \brief Emit a call to llvm.{sadd,uadd,ssub,usub,smul,umul}.with.overflow.*
174 /// depending on IntrinsicID.
176 /// \arg CGF The current codegen function.
177 /// \arg IntrinsicID The ID for the Intrinsic we wish to generate.
178 /// \arg X The first argument to the llvm.*.with.overflow.*.
179 /// \arg Y The second argument to the llvm.*.with.overflow.*.
180 /// \arg Carry The carry returned by the llvm.*.with.overflow.*.
181 /// \returns The result (i.e. sum/product) returned by the intrinsic.
182 static llvm::Value *EmitOverflowIntrinsic(CodeGenFunction &CGF,
183 const llvm::Intrinsic::ID IntrinsicID,
184 llvm::Value *X, llvm::Value *Y,
185 llvm::Value *&Carry) {
186 // Make sure we have integers of the same width.
187 assert(X->getType() == Y->getType() &&
188 "Arguments must be the same type. (Did you forget to make sure both "
189 "arguments have the same integer width?)");
191 llvm::Value *Callee = CGF.CGM.getIntrinsic(IntrinsicID, X->getType());
192 llvm::Value *Tmp = CGF.Builder.CreateCall2(Callee, X, Y);
193 Carry = CGF.Builder.CreateExtractValue(Tmp, 1);
194 return CGF.Builder.CreateExtractValue(Tmp, 0);
197 RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
198 unsigned BuiltinID, const CallExpr *E) {
199 // See if we can constant fold this builtin. If so, don't emit it at all.
200 Expr::EvalResult Result;
201 if (E->EvaluateAsRValue(Result, CGM.getContext()) &&
202 !Result.hasSideEffects()) {
203 if (Result.Val.isInt())
204 return RValue::get(llvm::ConstantInt::get(getLLVMContext(),
205 Result.Val.getInt()));
206 if (Result.Val.isFloat())
207 return RValue::get(llvm::ConstantFP::get(getLLVMContext(),
208 Result.Val.getFloat()));
212 default: break; // Handle intrinsics and libm functions below.
213 case Builtin::BI__builtin___CFStringMakeConstantString:
214 case Builtin::BI__builtin___NSStringMakeConstantString:
215 return RValue::get(CGM.EmitConstantExpr(E, E->getType(), nullptr));
216 case Builtin::BI__builtin_stdarg_start:
217 case Builtin::BI__builtin_va_start:
218 case Builtin::BI__va_start:
219 case Builtin::BI__builtin_va_end: {
220 Value *ArgValue = (BuiltinID == Builtin::BI__va_start)
221 ? EmitScalarExpr(E->getArg(0))
222 : EmitVAListRef(E->getArg(0));
223 llvm::Type *DestType = Int8PtrTy;
224 if (ArgValue->getType() != DestType)
225 ArgValue = Builder.CreateBitCast(ArgValue, DestType,
226 ArgValue->getName().data());
228 Intrinsic::ID inst = (BuiltinID == Builtin::BI__builtin_va_end) ?
229 Intrinsic::vaend : Intrinsic::vastart;
230 return RValue::get(Builder.CreateCall(CGM.getIntrinsic(inst), ArgValue));
232 case Builtin::BI__builtin_va_copy: {
233 Value *DstPtr = EmitVAListRef(E->getArg(0));
234 Value *SrcPtr = EmitVAListRef(E->getArg(1));
236 llvm::Type *Type = Int8PtrTy;
238 DstPtr = Builder.CreateBitCast(DstPtr, Type);
239 SrcPtr = Builder.CreateBitCast(SrcPtr, Type);
240 return RValue::get(Builder.CreateCall2(CGM.getIntrinsic(Intrinsic::vacopy),
243 case Builtin::BI__builtin_abs:
244 case Builtin::BI__builtin_labs:
245 case Builtin::BI__builtin_llabs: {
246 Value *ArgValue = EmitScalarExpr(E->getArg(0));
248 Value *NegOp = Builder.CreateNeg(ArgValue, "neg");
250 Builder.CreateICmpSGE(ArgValue,
251 llvm::Constant::getNullValue(ArgValue->getType()),
254 Builder.CreateSelect(CmpResult, ArgValue, NegOp, "abs");
256 return RValue::get(Result);
259 case Builtin::BI__builtin_conj:
260 case Builtin::BI__builtin_conjf:
261 case Builtin::BI__builtin_conjl: {
262 ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0));
263 Value *Real = ComplexVal.first;
264 Value *Imag = ComplexVal.second;
266 Imag->getType()->isFPOrFPVectorTy()
267 ? llvm::ConstantFP::getZeroValueForNegation(Imag->getType())
268 : llvm::Constant::getNullValue(Imag->getType());
270 Imag = Builder.CreateFSub(Zero, Imag, "sub");
271 return RValue::getComplex(std::make_pair(Real, Imag));
273 case Builtin::BI__builtin_creal:
274 case Builtin::BI__builtin_crealf:
275 case Builtin::BI__builtin_creall:
276 case Builtin::BIcreal:
277 case Builtin::BIcrealf:
278 case Builtin::BIcreall: {
279 ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0));
280 return RValue::get(ComplexVal.first);
283 case Builtin::BI__builtin_cimag:
284 case Builtin::BI__builtin_cimagf:
285 case Builtin::BI__builtin_cimagl:
286 case Builtin::BIcimag:
287 case Builtin::BIcimagf:
288 case Builtin::BIcimagl: {
289 ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0));
290 return RValue::get(ComplexVal.second);
293 case Builtin::BI__builtin_ctzs:
294 case Builtin::BI__builtin_ctz:
295 case Builtin::BI__builtin_ctzl:
296 case Builtin::BI__builtin_ctzll: {
297 Value *ArgValue = EmitScalarExpr(E->getArg(0));
299 llvm::Type *ArgType = ArgValue->getType();
300 Value *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType);
302 llvm::Type *ResultType = ConvertType(E->getType());
303 Value *ZeroUndef = Builder.getInt1(getTarget().isCLZForZeroUndef());
304 Value *Result = Builder.CreateCall2(F, ArgValue, ZeroUndef);
305 if (Result->getType() != ResultType)
306 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
308 return RValue::get(Result);
310 case Builtin::BI__builtin_clzs:
311 case Builtin::BI__builtin_clz:
312 case Builtin::BI__builtin_clzl:
313 case Builtin::BI__builtin_clzll: {
314 Value *ArgValue = EmitScalarExpr(E->getArg(0));
316 llvm::Type *ArgType = ArgValue->getType();
317 Value *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType);
319 llvm::Type *ResultType = ConvertType(E->getType());
320 Value *ZeroUndef = Builder.getInt1(getTarget().isCLZForZeroUndef());
321 Value *Result = Builder.CreateCall2(F, ArgValue, ZeroUndef);
322 if (Result->getType() != ResultType)
323 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
325 return RValue::get(Result);
327 case Builtin::BI__builtin_ffs:
328 case Builtin::BI__builtin_ffsl:
329 case Builtin::BI__builtin_ffsll: {
330 // ffs(x) -> x ? cttz(x) + 1 : 0
331 Value *ArgValue = EmitScalarExpr(E->getArg(0));
333 llvm::Type *ArgType = ArgValue->getType();
334 Value *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType);
336 llvm::Type *ResultType = ConvertType(E->getType());
337 Value *Tmp = Builder.CreateAdd(Builder.CreateCall2(F, ArgValue,
339 llvm::ConstantInt::get(ArgType, 1));
340 Value *Zero = llvm::Constant::getNullValue(ArgType);
341 Value *IsZero = Builder.CreateICmpEQ(ArgValue, Zero, "iszero");
342 Value *Result = Builder.CreateSelect(IsZero, Zero, Tmp, "ffs");
343 if (Result->getType() != ResultType)
344 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
346 return RValue::get(Result);
348 case Builtin::BI__builtin_parity:
349 case Builtin::BI__builtin_parityl:
350 case Builtin::BI__builtin_parityll: {
351 // parity(x) -> ctpop(x) & 1
352 Value *ArgValue = EmitScalarExpr(E->getArg(0));
354 llvm::Type *ArgType = ArgValue->getType();
355 Value *F = CGM.getIntrinsic(Intrinsic::ctpop, ArgType);
357 llvm::Type *ResultType = ConvertType(E->getType());
358 Value *Tmp = Builder.CreateCall(F, ArgValue);
359 Value *Result = Builder.CreateAnd(Tmp, llvm::ConstantInt::get(ArgType, 1));
360 if (Result->getType() != ResultType)
361 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
363 return RValue::get(Result);
365 case Builtin::BI__builtin_popcount:
366 case Builtin::BI__builtin_popcountl:
367 case Builtin::BI__builtin_popcountll: {
368 Value *ArgValue = EmitScalarExpr(E->getArg(0));
370 llvm::Type *ArgType = ArgValue->getType();
371 Value *F = CGM.getIntrinsic(Intrinsic::ctpop, ArgType);
373 llvm::Type *ResultType = ConvertType(E->getType());
374 Value *Result = Builder.CreateCall(F, ArgValue);
375 if (Result->getType() != ResultType)
376 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
378 return RValue::get(Result);
380 case Builtin::BI__builtin_expect: {
381 Value *ArgValue = EmitScalarExpr(E->getArg(0));
382 llvm::Type *ArgType = ArgValue->getType();
384 Value *FnExpect = CGM.getIntrinsic(Intrinsic::expect, ArgType);
385 Value *ExpectedValue = EmitScalarExpr(E->getArg(1));
387 Value *Result = Builder.CreateCall2(FnExpect, ArgValue, ExpectedValue,
389 return RValue::get(Result);
391 case Builtin::BI__builtin_bswap16:
392 case Builtin::BI__builtin_bswap32:
393 case Builtin::BI__builtin_bswap64: {
394 Value *ArgValue = EmitScalarExpr(E->getArg(0));
395 llvm::Type *ArgType = ArgValue->getType();
396 Value *F = CGM.getIntrinsic(Intrinsic::bswap, ArgType);
397 return RValue::get(Builder.CreateCall(F, ArgValue));
399 case Builtin::BI__builtin_object_size: {
400 // We rely on constant folding to deal with expressions with side effects.
401 assert(!E->getArg(0)->HasSideEffects(getContext()) &&
402 "should have been constant folded");
404 // We pass this builtin onto the optimizer so that it can
405 // figure out the object size in more complex cases.
406 llvm::Type *ResType = ConvertType(E->getType());
408 // LLVM only supports 0 and 2, make sure that we pass along that
410 Value *Ty = EmitScalarExpr(E->getArg(1));
411 ConstantInt *CI = dyn_cast<ConstantInt>(Ty);
413 uint64_t val = CI->getZExtValue();
414 CI = ConstantInt::get(Builder.getInt1Ty(), (val & 0x2) >> 1);
415 // FIXME: Get right address space.
416 llvm::Type *Tys[] = { ResType, Builder.getInt8PtrTy(0) };
417 Value *F = CGM.getIntrinsic(Intrinsic::objectsize, Tys);
418 return RValue::get(Builder.CreateCall2(F, EmitScalarExpr(E->getArg(0)),CI));
420 case Builtin::BI__builtin_prefetch: {
421 Value *Locality, *RW, *Address = EmitScalarExpr(E->getArg(0));
422 // FIXME: Technically these constants should of type 'int', yes?
423 RW = (E->getNumArgs() > 1) ? EmitScalarExpr(E->getArg(1)) :
424 llvm::ConstantInt::get(Int32Ty, 0);
425 Locality = (E->getNumArgs() > 2) ? EmitScalarExpr(E->getArg(2)) :
426 llvm::ConstantInt::get(Int32Ty, 3);
427 Value *Data = llvm::ConstantInt::get(Int32Ty, 1);
428 Value *F = CGM.getIntrinsic(Intrinsic::prefetch);
429 return RValue::get(Builder.CreateCall4(F, Address, RW, Locality, Data));
431 case Builtin::BI__builtin_readcyclecounter: {
432 Value *F = CGM.getIntrinsic(Intrinsic::readcyclecounter);
433 return RValue::get(Builder.CreateCall(F));
435 case Builtin::BI__builtin___clear_cache: {
436 Value *Begin = EmitScalarExpr(E->getArg(0));
437 Value *End = EmitScalarExpr(E->getArg(1));
438 Value *F = CGM.getIntrinsic(Intrinsic::clear_cache);
439 return RValue::get(Builder.CreateCall2(F, Begin, End));
441 case Builtin::BI__builtin_trap: {
442 Value *F = CGM.getIntrinsic(Intrinsic::trap);
443 return RValue::get(Builder.CreateCall(F));
445 case Builtin::BI__debugbreak: {
446 Value *F = CGM.getIntrinsic(Intrinsic::debugtrap);
447 return RValue::get(Builder.CreateCall(F));
449 case Builtin::BI__builtin_unreachable: {
450 if (SanOpts->Unreachable) {
451 SanitizerScope SanScope(this);
452 EmitCheck(Builder.getFalse(), "builtin_unreachable",
453 EmitCheckSourceLocation(E->getExprLoc()),
454 ArrayRef<llvm::Value *>(), CRK_Unrecoverable);
456 Builder.CreateUnreachable();
458 // We do need to preserve an insertion point.
459 EmitBlock(createBasicBlock("unreachable.cont"));
461 return RValue::get(nullptr);
464 case Builtin::BI__builtin_powi:
465 case Builtin::BI__builtin_powif:
466 case Builtin::BI__builtin_powil: {
467 Value *Base = EmitScalarExpr(E->getArg(0));
468 Value *Exponent = EmitScalarExpr(E->getArg(1));
469 llvm::Type *ArgType = Base->getType();
470 Value *F = CGM.getIntrinsic(Intrinsic::powi, ArgType);
471 return RValue::get(Builder.CreateCall2(F, Base, Exponent));
474 case Builtin::BI__builtin_isgreater:
475 case Builtin::BI__builtin_isgreaterequal:
476 case Builtin::BI__builtin_isless:
477 case Builtin::BI__builtin_islessequal:
478 case Builtin::BI__builtin_islessgreater:
479 case Builtin::BI__builtin_isunordered: {
480 // Ordered comparisons: we know the arguments to these are matching scalar
481 // floating point values.
482 Value *LHS = EmitScalarExpr(E->getArg(0));
483 Value *RHS = EmitScalarExpr(E->getArg(1));
486 default: llvm_unreachable("Unknown ordered comparison");
487 case Builtin::BI__builtin_isgreater:
488 LHS = Builder.CreateFCmpOGT(LHS, RHS, "cmp");
490 case Builtin::BI__builtin_isgreaterequal:
491 LHS = Builder.CreateFCmpOGE(LHS, RHS, "cmp");
493 case Builtin::BI__builtin_isless:
494 LHS = Builder.CreateFCmpOLT(LHS, RHS, "cmp");
496 case Builtin::BI__builtin_islessequal:
497 LHS = Builder.CreateFCmpOLE(LHS, RHS, "cmp");
499 case Builtin::BI__builtin_islessgreater:
500 LHS = Builder.CreateFCmpONE(LHS, RHS, "cmp");
502 case Builtin::BI__builtin_isunordered:
503 LHS = Builder.CreateFCmpUNO(LHS, RHS, "cmp");
506 // ZExt bool to int type.
507 return RValue::get(Builder.CreateZExt(LHS, ConvertType(E->getType())));
509 case Builtin::BI__builtin_isnan: {
510 Value *V = EmitScalarExpr(E->getArg(0));
511 V = Builder.CreateFCmpUNO(V, V, "cmp");
512 return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType())));
515 case Builtin::BI__builtin_isinf: {
516 // isinf(x) --> fabs(x) == infinity
517 Value *V = EmitScalarExpr(E->getArg(0));
518 V = EmitFAbs(*this, V, E->getArg(0)->getType());
520 V = Builder.CreateFCmpOEQ(V, ConstantFP::getInfinity(V->getType()),"isinf");
521 return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType())));
524 // TODO: BI__builtin_isinf_sign
525 // isinf_sign(x) -> isinf(x) ? (signbit(x) ? -1 : 1) : 0
527 case Builtin::BI__builtin_isnormal: {
528 // isnormal(x) --> x == x && fabsf(x) < infinity && fabsf(x) >= float_min
529 Value *V = EmitScalarExpr(E->getArg(0));
530 Value *Eq = Builder.CreateFCmpOEQ(V, V, "iseq");
532 Value *Abs = EmitFAbs(*this, V, E->getArg(0)->getType());
533 Value *IsLessThanInf =
534 Builder.CreateFCmpULT(Abs, ConstantFP::getInfinity(V->getType()),"isinf");
535 APFloat Smallest = APFloat::getSmallestNormalized(
536 getContext().getFloatTypeSemantics(E->getArg(0)->getType()));
538 Builder.CreateFCmpUGE(Abs, ConstantFP::get(V->getContext(), Smallest),
540 V = Builder.CreateAnd(Eq, IsLessThanInf, "and");
541 V = Builder.CreateAnd(V, IsNormal, "and");
542 return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType())));
545 case Builtin::BI__builtin_isfinite: {
546 // isfinite(x) --> x == x && fabs(x) != infinity;
547 Value *V = EmitScalarExpr(E->getArg(0));
548 Value *Eq = Builder.CreateFCmpOEQ(V, V, "iseq");
550 Value *Abs = EmitFAbs(*this, V, E->getArg(0)->getType());
552 Builder.CreateFCmpUNE(Abs, ConstantFP::getInfinity(V->getType()),"isinf");
554 V = Builder.CreateAnd(Eq, IsNotInf, "and");
555 return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType())));
558 case Builtin::BI__builtin_fpclassify: {
559 Value *V = EmitScalarExpr(E->getArg(5));
560 llvm::Type *Ty = ConvertType(E->getArg(5)->getType());
563 BasicBlock *Begin = Builder.GetInsertBlock();
564 BasicBlock *End = createBasicBlock("fpclassify_end", this->CurFn);
565 Builder.SetInsertPoint(End);
567 Builder.CreatePHI(ConvertType(E->getArg(0)->getType()), 4,
568 "fpclassify_result");
570 // if (V==0) return FP_ZERO
571 Builder.SetInsertPoint(Begin);
572 Value *IsZero = Builder.CreateFCmpOEQ(V, Constant::getNullValue(Ty),
574 Value *ZeroLiteral = EmitScalarExpr(E->getArg(4));
575 BasicBlock *NotZero = createBasicBlock("fpclassify_not_zero", this->CurFn);
576 Builder.CreateCondBr(IsZero, End, NotZero);
577 Result->addIncoming(ZeroLiteral, Begin);
579 // if (V != V) return FP_NAN
580 Builder.SetInsertPoint(NotZero);
581 Value *IsNan = Builder.CreateFCmpUNO(V, V, "cmp");
582 Value *NanLiteral = EmitScalarExpr(E->getArg(0));
583 BasicBlock *NotNan = createBasicBlock("fpclassify_not_nan", this->CurFn);
584 Builder.CreateCondBr(IsNan, End, NotNan);
585 Result->addIncoming(NanLiteral, NotZero);
587 // if (fabs(V) == infinity) return FP_INFINITY
588 Builder.SetInsertPoint(NotNan);
589 Value *VAbs = EmitFAbs(*this, V, E->getArg(5)->getType());
591 Builder.CreateFCmpOEQ(VAbs, ConstantFP::getInfinity(V->getType()),
593 Value *InfLiteral = EmitScalarExpr(E->getArg(1));
594 BasicBlock *NotInf = createBasicBlock("fpclassify_not_inf", this->CurFn);
595 Builder.CreateCondBr(IsInf, End, NotInf);
596 Result->addIncoming(InfLiteral, NotNan);
598 // if (fabs(V) >= MIN_NORMAL) return FP_NORMAL else FP_SUBNORMAL
599 Builder.SetInsertPoint(NotInf);
600 APFloat Smallest = APFloat::getSmallestNormalized(
601 getContext().getFloatTypeSemantics(E->getArg(5)->getType()));
603 Builder.CreateFCmpUGE(VAbs, ConstantFP::get(V->getContext(), Smallest),
605 Value *NormalResult =
606 Builder.CreateSelect(IsNormal, EmitScalarExpr(E->getArg(2)),
607 EmitScalarExpr(E->getArg(3)));
608 Builder.CreateBr(End);
609 Result->addIncoming(NormalResult, NotInf);
612 Builder.SetInsertPoint(End);
613 return RValue::get(Result);
616 case Builtin::BIalloca:
617 case Builtin::BI_alloca:
618 case Builtin::BI__builtin_alloca: {
619 Value *Size = EmitScalarExpr(E->getArg(0));
620 return RValue::get(Builder.CreateAlloca(Builder.getInt8Ty(), Size));
622 case Builtin::BIbzero:
623 case Builtin::BI__builtin_bzero: {
624 std::pair<llvm::Value*, unsigned> Dest =
625 EmitPointerWithAlignment(E->getArg(0));
626 Value *SizeVal = EmitScalarExpr(E->getArg(1));
627 Builder.CreateMemSet(Dest.first, Builder.getInt8(0), SizeVal,
629 return RValue::get(Dest.first);
631 case Builtin::BImemcpy:
632 case Builtin::BI__builtin_memcpy: {
633 std::pair<llvm::Value*, unsigned> Dest =
634 EmitPointerWithAlignment(E->getArg(0));
635 std::pair<llvm::Value*, unsigned> Src =
636 EmitPointerWithAlignment(E->getArg(1));
637 Value *SizeVal = EmitScalarExpr(E->getArg(2));
638 unsigned Align = std::min(Dest.second, Src.second);
639 Builder.CreateMemCpy(Dest.first, Src.first, SizeVal, Align, false);
640 return RValue::get(Dest.first);
643 case Builtin::BI__builtin___memcpy_chk: {
644 // fold __builtin_memcpy_chk(x, y, cst1, cst2) to memcpy iff cst1<=cst2.
645 llvm::APSInt Size, DstSize;
646 if (!E->getArg(2)->EvaluateAsInt(Size, CGM.getContext()) ||
647 !E->getArg(3)->EvaluateAsInt(DstSize, CGM.getContext()))
649 if (Size.ugt(DstSize))
651 std::pair<llvm::Value*, unsigned> Dest =
652 EmitPointerWithAlignment(E->getArg(0));
653 std::pair<llvm::Value*, unsigned> Src =
654 EmitPointerWithAlignment(E->getArg(1));
655 Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size);
656 unsigned Align = std::min(Dest.second, Src.second);
657 Builder.CreateMemCpy(Dest.first, Src.first, SizeVal, Align, false);
658 return RValue::get(Dest.first);
661 case Builtin::BI__builtin_objc_memmove_collectable: {
662 Value *Address = EmitScalarExpr(E->getArg(0));
663 Value *SrcAddr = EmitScalarExpr(E->getArg(1));
664 Value *SizeVal = EmitScalarExpr(E->getArg(2));
665 CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this,
666 Address, SrcAddr, SizeVal);
667 return RValue::get(Address);
670 case Builtin::BI__builtin___memmove_chk: {
671 // fold __builtin_memmove_chk(x, y, cst1, cst2) to memmove iff cst1<=cst2.
672 llvm::APSInt Size, DstSize;
673 if (!E->getArg(2)->EvaluateAsInt(Size, CGM.getContext()) ||
674 !E->getArg(3)->EvaluateAsInt(DstSize, CGM.getContext()))
676 if (Size.ugt(DstSize))
678 std::pair<llvm::Value*, unsigned> Dest =
679 EmitPointerWithAlignment(E->getArg(0));
680 std::pair<llvm::Value*, unsigned> Src =
681 EmitPointerWithAlignment(E->getArg(1));
682 Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size);
683 unsigned Align = std::min(Dest.second, Src.second);
684 Builder.CreateMemMove(Dest.first, Src.first, SizeVal, Align, false);
685 return RValue::get(Dest.first);
688 case Builtin::BImemmove:
689 case Builtin::BI__builtin_memmove: {
690 std::pair<llvm::Value*, unsigned> Dest =
691 EmitPointerWithAlignment(E->getArg(0));
692 std::pair<llvm::Value*, unsigned> Src =
693 EmitPointerWithAlignment(E->getArg(1));
694 Value *SizeVal = EmitScalarExpr(E->getArg(2));
695 unsigned Align = std::min(Dest.second, Src.second);
696 Builder.CreateMemMove(Dest.first, Src.first, SizeVal, Align, false);
697 return RValue::get(Dest.first);
699 case Builtin::BImemset:
700 case Builtin::BI__builtin_memset: {
701 std::pair<llvm::Value*, unsigned> Dest =
702 EmitPointerWithAlignment(E->getArg(0));
703 Value *ByteVal = Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)),
704 Builder.getInt8Ty());
705 Value *SizeVal = EmitScalarExpr(E->getArg(2));
706 Builder.CreateMemSet(Dest.first, ByteVal, SizeVal, Dest.second, false);
707 return RValue::get(Dest.first);
709 case Builtin::BI__builtin___memset_chk: {
710 // fold __builtin_memset_chk(x, y, cst1, cst2) to memset iff cst1<=cst2.
711 llvm::APSInt Size, DstSize;
712 if (!E->getArg(2)->EvaluateAsInt(Size, CGM.getContext()) ||
713 !E->getArg(3)->EvaluateAsInt(DstSize, CGM.getContext()))
715 if (Size.ugt(DstSize))
717 std::pair<llvm::Value*, unsigned> Dest =
718 EmitPointerWithAlignment(E->getArg(0));
719 Value *ByteVal = Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)),
720 Builder.getInt8Ty());
721 Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size);
722 Builder.CreateMemSet(Dest.first, ByteVal, SizeVal, Dest.second, false);
723 return RValue::get(Dest.first);
725 case Builtin::BI__builtin_dwarf_cfa: {
726 // The offset in bytes from the first argument to the CFA.
728 // Why on earth is this in the frontend? Is there any reason at
729 // all that the backend can't reasonably determine this while
730 // lowering llvm.eh.dwarf.cfa()?
732 // TODO: If there's a satisfactory reason, add a target hook for
733 // this instead of hard-coding 0, which is correct for most targets.
736 Value *F = CGM.getIntrinsic(Intrinsic::eh_dwarf_cfa);
737 return RValue::get(Builder.CreateCall(F,
738 llvm::ConstantInt::get(Int32Ty, Offset)));
740 case Builtin::BI__builtin_return_address: {
741 Value *Depth = EmitScalarExpr(E->getArg(0));
742 Depth = Builder.CreateIntCast(Depth, Int32Ty, false);
743 Value *F = CGM.getIntrinsic(Intrinsic::returnaddress);
744 return RValue::get(Builder.CreateCall(F, Depth));
746 case Builtin::BI__builtin_frame_address: {
747 Value *Depth = EmitScalarExpr(E->getArg(0));
748 Depth = Builder.CreateIntCast(Depth, Int32Ty, false);
749 Value *F = CGM.getIntrinsic(Intrinsic::frameaddress);
750 return RValue::get(Builder.CreateCall(F, Depth));
752 case Builtin::BI__builtin_extract_return_addr: {
753 Value *Address = EmitScalarExpr(E->getArg(0));
754 Value *Result = getTargetHooks().decodeReturnAddress(*this, Address);
755 return RValue::get(Result);
757 case Builtin::BI__builtin_frob_return_addr: {
758 Value *Address = EmitScalarExpr(E->getArg(0));
759 Value *Result = getTargetHooks().encodeReturnAddress(*this, Address);
760 return RValue::get(Result);
762 case Builtin::BI__builtin_dwarf_sp_column: {
763 llvm::IntegerType *Ty
764 = cast<llvm::IntegerType>(ConvertType(E->getType()));
765 int Column = getTargetHooks().getDwarfEHStackPointer(CGM);
767 CGM.ErrorUnsupported(E, "__builtin_dwarf_sp_column");
768 return RValue::get(llvm::UndefValue::get(Ty));
770 return RValue::get(llvm::ConstantInt::get(Ty, Column, true));
772 case Builtin::BI__builtin_init_dwarf_reg_size_table: {
773 Value *Address = EmitScalarExpr(E->getArg(0));
774 if (getTargetHooks().initDwarfEHRegSizeTable(*this, Address))
775 CGM.ErrorUnsupported(E, "__builtin_init_dwarf_reg_size_table");
776 return RValue::get(llvm::UndefValue::get(ConvertType(E->getType())));
778 case Builtin::BI__builtin_eh_return: {
779 Value *Int = EmitScalarExpr(E->getArg(0));
780 Value *Ptr = EmitScalarExpr(E->getArg(1));
782 llvm::IntegerType *IntTy = cast<llvm::IntegerType>(Int->getType());
783 assert((IntTy->getBitWidth() == 32 || IntTy->getBitWidth() == 64) &&
784 "LLVM's __builtin_eh_return only supports 32- and 64-bit variants");
785 Value *F = CGM.getIntrinsic(IntTy->getBitWidth() == 32
786 ? Intrinsic::eh_return_i32
787 : Intrinsic::eh_return_i64);
788 Builder.CreateCall2(F, Int, Ptr);
789 Builder.CreateUnreachable();
791 // We do need to preserve an insertion point.
792 EmitBlock(createBasicBlock("builtin_eh_return.cont"));
794 return RValue::get(nullptr);
796 case Builtin::BI__builtin_unwind_init: {
797 Value *F = CGM.getIntrinsic(Intrinsic::eh_unwind_init);
798 return RValue::get(Builder.CreateCall(F));
800 case Builtin::BI__builtin_extend_pointer: {
801 // Extends a pointer to the size of an _Unwind_Word, which is
802 // uint64_t on all platforms. Generally this gets poked into a
803 // register and eventually used as an address, so if the
804 // addressing registers are wider than pointers and the platform
805 // doesn't implicitly ignore high-order bits when doing
806 // addressing, we need to make sure we zext / sext based on
807 // the platform's expectations.
809 // See: http://gcc.gnu.org/ml/gcc-bugs/2002-02/msg00237.html
811 // Cast the pointer to intptr_t.
812 Value *Ptr = EmitScalarExpr(E->getArg(0));
813 Value *Result = Builder.CreatePtrToInt(Ptr, IntPtrTy, "extend.cast");
815 // If that's 64 bits, we're done.
816 if (IntPtrTy->getBitWidth() == 64)
817 return RValue::get(Result);
819 // Otherwise, ask the codegen data what to do.
820 if (getTargetHooks().extendPointerWithSExt())
821 return RValue::get(Builder.CreateSExt(Result, Int64Ty, "extend.sext"));
823 return RValue::get(Builder.CreateZExt(Result, Int64Ty, "extend.zext"));
825 case Builtin::BI__builtin_setjmp: {
826 // Buffer is a void**.
827 Value *Buf = EmitScalarExpr(E->getArg(0));
829 // Store the frame pointer to the setjmp buffer.
831 Builder.CreateCall(CGM.getIntrinsic(Intrinsic::frameaddress),
832 ConstantInt::get(Int32Ty, 0));
833 Builder.CreateStore(FrameAddr, Buf);
835 // Store the stack pointer to the setjmp buffer.
837 Builder.CreateCall(CGM.getIntrinsic(Intrinsic::stacksave));
838 Value *StackSaveSlot =
839 Builder.CreateGEP(Buf, ConstantInt::get(Int32Ty, 2));
840 Builder.CreateStore(StackAddr, StackSaveSlot);
842 // Call LLVM's EH setjmp, which is lightweight.
843 Value *F = CGM.getIntrinsic(Intrinsic::eh_sjlj_setjmp);
844 Buf = Builder.CreateBitCast(Buf, Int8PtrTy);
845 return RValue::get(Builder.CreateCall(F, Buf));
847 case Builtin::BI__builtin_longjmp: {
848 Value *Buf = EmitScalarExpr(E->getArg(0));
849 Buf = Builder.CreateBitCast(Buf, Int8PtrTy);
851 // Call LLVM's EH longjmp, which is lightweight.
852 Builder.CreateCall(CGM.getIntrinsic(Intrinsic::eh_sjlj_longjmp), Buf);
854 // longjmp doesn't return; mark this as unreachable.
855 Builder.CreateUnreachable();
857 // We do need to preserve an insertion point.
858 EmitBlock(createBasicBlock("longjmp.cont"));
860 return RValue::get(nullptr);
862 case Builtin::BI__sync_fetch_and_add:
863 case Builtin::BI__sync_fetch_and_sub:
864 case Builtin::BI__sync_fetch_and_or:
865 case Builtin::BI__sync_fetch_and_and:
866 case Builtin::BI__sync_fetch_and_xor:
867 case Builtin::BI__sync_add_and_fetch:
868 case Builtin::BI__sync_sub_and_fetch:
869 case Builtin::BI__sync_and_and_fetch:
870 case Builtin::BI__sync_or_and_fetch:
871 case Builtin::BI__sync_xor_and_fetch:
872 case Builtin::BI__sync_val_compare_and_swap:
873 case Builtin::BI__sync_bool_compare_and_swap:
874 case Builtin::BI__sync_lock_test_and_set:
875 case Builtin::BI__sync_lock_release:
876 case Builtin::BI__sync_swap:
877 llvm_unreachable("Shouldn't make it through sema");
878 case Builtin::BI__sync_fetch_and_add_1:
879 case Builtin::BI__sync_fetch_and_add_2:
880 case Builtin::BI__sync_fetch_and_add_4:
881 case Builtin::BI__sync_fetch_and_add_8:
882 case Builtin::BI__sync_fetch_and_add_16:
883 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Add, E);
884 case Builtin::BI__sync_fetch_and_sub_1:
885 case Builtin::BI__sync_fetch_and_sub_2:
886 case Builtin::BI__sync_fetch_and_sub_4:
887 case Builtin::BI__sync_fetch_and_sub_8:
888 case Builtin::BI__sync_fetch_and_sub_16:
889 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Sub, E);
890 case Builtin::BI__sync_fetch_and_or_1:
891 case Builtin::BI__sync_fetch_and_or_2:
892 case Builtin::BI__sync_fetch_and_or_4:
893 case Builtin::BI__sync_fetch_and_or_8:
894 case Builtin::BI__sync_fetch_and_or_16:
895 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Or, E);
896 case Builtin::BI__sync_fetch_and_and_1:
897 case Builtin::BI__sync_fetch_and_and_2:
898 case Builtin::BI__sync_fetch_and_and_4:
899 case Builtin::BI__sync_fetch_and_and_8:
900 case Builtin::BI__sync_fetch_and_and_16:
901 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::And, E);
902 case Builtin::BI__sync_fetch_and_xor_1:
903 case Builtin::BI__sync_fetch_and_xor_2:
904 case Builtin::BI__sync_fetch_and_xor_4:
905 case Builtin::BI__sync_fetch_and_xor_8:
906 case Builtin::BI__sync_fetch_and_xor_16:
907 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xor, E);
909 // Clang extensions: not overloaded yet.
910 case Builtin::BI__sync_fetch_and_min:
911 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Min, E);
912 case Builtin::BI__sync_fetch_and_max:
913 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Max, E);
914 case Builtin::BI__sync_fetch_and_umin:
915 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::UMin, E);
916 case Builtin::BI__sync_fetch_and_umax:
917 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::UMax, E);
919 case Builtin::BI__sync_add_and_fetch_1:
920 case Builtin::BI__sync_add_and_fetch_2:
921 case Builtin::BI__sync_add_and_fetch_4:
922 case Builtin::BI__sync_add_and_fetch_8:
923 case Builtin::BI__sync_add_and_fetch_16:
924 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Add, E,
925 llvm::Instruction::Add);
926 case Builtin::BI__sync_sub_and_fetch_1:
927 case Builtin::BI__sync_sub_and_fetch_2:
928 case Builtin::BI__sync_sub_and_fetch_4:
929 case Builtin::BI__sync_sub_and_fetch_8:
930 case Builtin::BI__sync_sub_and_fetch_16:
931 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Sub, E,
932 llvm::Instruction::Sub);
933 case Builtin::BI__sync_and_and_fetch_1:
934 case Builtin::BI__sync_and_and_fetch_2:
935 case Builtin::BI__sync_and_and_fetch_4:
936 case Builtin::BI__sync_and_and_fetch_8:
937 case Builtin::BI__sync_and_and_fetch_16:
938 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::And, E,
939 llvm::Instruction::And);
940 case Builtin::BI__sync_or_and_fetch_1:
941 case Builtin::BI__sync_or_and_fetch_2:
942 case Builtin::BI__sync_or_and_fetch_4:
943 case Builtin::BI__sync_or_and_fetch_8:
944 case Builtin::BI__sync_or_and_fetch_16:
945 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Or, E,
946 llvm::Instruction::Or);
947 case Builtin::BI__sync_xor_and_fetch_1:
948 case Builtin::BI__sync_xor_and_fetch_2:
949 case Builtin::BI__sync_xor_and_fetch_4:
950 case Builtin::BI__sync_xor_and_fetch_8:
951 case Builtin::BI__sync_xor_and_fetch_16:
952 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Xor, E,
953 llvm::Instruction::Xor);
955 case Builtin::BI__sync_val_compare_and_swap_1:
956 case Builtin::BI__sync_val_compare_and_swap_2:
957 case Builtin::BI__sync_val_compare_and_swap_4:
958 case Builtin::BI__sync_val_compare_and_swap_8:
959 case Builtin::BI__sync_val_compare_and_swap_16: {
960 QualType T = E->getType();
961 llvm::Value *DestPtr = EmitScalarExpr(E->getArg(0));
962 unsigned AddrSpace = DestPtr->getType()->getPointerAddressSpace();
964 llvm::IntegerType *IntType =
965 llvm::IntegerType::get(getLLVMContext(),
966 getContext().getTypeSize(T));
967 llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace);
970 Args[0] = Builder.CreateBitCast(DestPtr, IntPtrType);
971 Args[1] = EmitScalarExpr(E->getArg(1));
972 llvm::Type *ValueType = Args[1]->getType();
973 Args[1] = EmitToInt(*this, Args[1], T, IntType);
974 Args[2] = EmitToInt(*this, EmitScalarExpr(E->getArg(2)), T, IntType);
976 Value *Result = Builder.CreateAtomicCmpXchg(Args[0], Args[1], Args[2],
977 llvm::SequentiallyConsistent,
978 llvm::SequentiallyConsistent);
979 Result = Builder.CreateExtractValue(Result, 0);
980 Result = EmitFromInt(*this, Result, T, ValueType);
981 return RValue::get(Result);
984 case Builtin::BI__sync_bool_compare_and_swap_1:
985 case Builtin::BI__sync_bool_compare_and_swap_2:
986 case Builtin::BI__sync_bool_compare_and_swap_4:
987 case Builtin::BI__sync_bool_compare_and_swap_8:
988 case Builtin::BI__sync_bool_compare_and_swap_16: {
989 QualType T = E->getArg(1)->getType();
990 llvm::Value *DestPtr = EmitScalarExpr(E->getArg(0));
991 unsigned AddrSpace = DestPtr->getType()->getPointerAddressSpace();
993 llvm::IntegerType *IntType =
994 llvm::IntegerType::get(getLLVMContext(),
995 getContext().getTypeSize(T));
996 llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace);
999 Args[0] = Builder.CreateBitCast(DestPtr, IntPtrType);
1000 Args[1] = EmitToInt(*this, EmitScalarExpr(E->getArg(1)), T, IntType);
1001 Args[2] = EmitToInt(*this, EmitScalarExpr(E->getArg(2)), T, IntType);
1003 Value *Pair = Builder.CreateAtomicCmpXchg(Args[0], Args[1], Args[2],
1004 llvm::SequentiallyConsistent,
1005 llvm::SequentiallyConsistent);
1006 Value *Result = Builder.CreateExtractValue(Pair, 1);
1007 // zext bool to int.
1008 Result = Builder.CreateZExt(Result, ConvertType(E->getType()));
1009 return RValue::get(Result);
1012 case Builtin::BI__sync_swap_1:
1013 case Builtin::BI__sync_swap_2:
1014 case Builtin::BI__sync_swap_4:
1015 case Builtin::BI__sync_swap_8:
1016 case Builtin::BI__sync_swap_16:
1017 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xchg, E);
1019 case Builtin::BI__sync_lock_test_and_set_1:
1020 case Builtin::BI__sync_lock_test_and_set_2:
1021 case Builtin::BI__sync_lock_test_and_set_4:
1022 case Builtin::BI__sync_lock_test_and_set_8:
1023 case Builtin::BI__sync_lock_test_and_set_16:
1024 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xchg, E);
1026 case Builtin::BI__sync_lock_release_1:
1027 case Builtin::BI__sync_lock_release_2:
1028 case Builtin::BI__sync_lock_release_4:
1029 case Builtin::BI__sync_lock_release_8:
1030 case Builtin::BI__sync_lock_release_16: {
1031 Value *Ptr = EmitScalarExpr(E->getArg(0));
1032 QualType ElTy = E->getArg(0)->getType()->getPointeeType();
1033 CharUnits StoreSize = getContext().getTypeSizeInChars(ElTy);
1034 llvm::Type *ITy = llvm::IntegerType::get(getLLVMContext(),
1035 StoreSize.getQuantity() * 8);
1036 Ptr = Builder.CreateBitCast(Ptr, ITy->getPointerTo());
1037 llvm::StoreInst *Store =
1038 Builder.CreateStore(llvm::Constant::getNullValue(ITy), Ptr);
1039 Store->setAlignment(StoreSize.getQuantity());
1040 Store->setAtomic(llvm::Release);
1041 return RValue::get(nullptr);
1044 case Builtin::BI__sync_synchronize: {
1045 // We assume this is supposed to correspond to a C++0x-style
1046 // sequentially-consistent fence (i.e. this is only usable for
1047 // synchonization, not device I/O or anything like that). This intrinsic
1048 // is really badly designed in the sense that in theory, there isn't
1049 // any way to safely use it... but in practice, it mostly works
1050 // to use it with non-atomic loads and stores to get acquire/release
1052 Builder.CreateFence(llvm::SequentiallyConsistent);
1053 return RValue::get(nullptr);
1056 case Builtin::BI__c11_atomic_is_lock_free:
1057 case Builtin::BI__atomic_is_lock_free: {
1058 // Call "bool __atomic_is_lock_free(size_t size, void *ptr)". For the
1059 // __c11 builtin, ptr is 0 (indicating a properly-aligned object), since
1060 // _Atomic(T) is always properly-aligned.
1061 const char *LibCallName = "__atomic_is_lock_free";
1063 Args.add(RValue::get(EmitScalarExpr(E->getArg(0))),
1064 getContext().getSizeType());
1065 if (BuiltinID == Builtin::BI__atomic_is_lock_free)
1066 Args.add(RValue::get(EmitScalarExpr(E->getArg(1))),
1067 getContext().VoidPtrTy);
1069 Args.add(RValue::get(llvm::Constant::getNullValue(VoidPtrTy)),
1070 getContext().VoidPtrTy);
1071 const CGFunctionInfo &FuncInfo =
1072 CGM.getTypes().arrangeFreeFunctionCall(E->getType(), Args,
1073 FunctionType::ExtInfo(),
1075 llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FuncInfo);
1076 llvm::Constant *Func = CGM.CreateRuntimeFunction(FTy, LibCallName);
1077 return EmitCall(FuncInfo, Func, ReturnValueSlot(), Args);
1080 case Builtin::BI__atomic_test_and_set: {
1081 // Look at the argument type to determine whether this is a volatile
1082 // operation. The parameter type is always volatile.
1083 QualType PtrTy = E->getArg(0)->IgnoreImpCasts()->getType();
1085 PtrTy->castAs<PointerType>()->getPointeeType().isVolatileQualified();
1087 Value *Ptr = EmitScalarExpr(E->getArg(0));
1088 unsigned AddrSpace = Ptr->getType()->getPointerAddressSpace();
1089 Ptr = Builder.CreateBitCast(Ptr, Int8Ty->getPointerTo(AddrSpace));
1090 Value *NewVal = Builder.getInt8(1);
1091 Value *Order = EmitScalarExpr(E->getArg(1));
1092 if (isa<llvm::ConstantInt>(Order)) {
1093 int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
1094 AtomicRMWInst *Result = nullptr;
1096 case 0: // memory_order_relaxed
1097 default: // invalid order
1098 Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg,
1102 case 1: // memory_order_consume
1103 case 2: // memory_order_acquire
1104 Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg,
1108 case 3: // memory_order_release
1109 Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg,
1113 case 4: // memory_order_acq_rel
1114 Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg,
1116 llvm::AcquireRelease);
1118 case 5: // memory_order_seq_cst
1119 Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg,
1121 llvm::SequentiallyConsistent);
1124 Result->setVolatile(Volatile);
1125 return RValue::get(Builder.CreateIsNotNull(Result, "tobool"));
1128 llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
1130 llvm::BasicBlock *BBs[5] = {
1131 createBasicBlock("monotonic", CurFn),
1132 createBasicBlock("acquire", CurFn),
1133 createBasicBlock("release", CurFn),
1134 createBasicBlock("acqrel", CurFn),
1135 createBasicBlock("seqcst", CurFn)
1137 llvm::AtomicOrdering Orders[5] = {
1138 llvm::Monotonic, llvm::Acquire, llvm::Release,
1139 llvm::AcquireRelease, llvm::SequentiallyConsistent
1142 Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
1143 llvm::SwitchInst *SI = Builder.CreateSwitch(Order, BBs[0]);
1145 Builder.SetInsertPoint(ContBB);
1146 PHINode *Result = Builder.CreatePHI(Int8Ty, 5, "was_set");
1148 for (unsigned i = 0; i < 5; ++i) {
1149 Builder.SetInsertPoint(BBs[i]);
1150 AtomicRMWInst *RMW = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg,
1151 Ptr, NewVal, Orders[i]);
1152 RMW->setVolatile(Volatile);
1153 Result->addIncoming(RMW, BBs[i]);
1154 Builder.CreateBr(ContBB);
1157 SI->addCase(Builder.getInt32(0), BBs[0]);
1158 SI->addCase(Builder.getInt32(1), BBs[1]);
1159 SI->addCase(Builder.getInt32(2), BBs[1]);
1160 SI->addCase(Builder.getInt32(3), BBs[2]);
1161 SI->addCase(Builder.getInt32(4), BBs[3]);
1162 SI->addCase(Builder.getInt32(5), BBs[4]);
1164 Builder.SetInsertPoint(ContBB);
1165 return RValue::get(Builder.CreateIsNotNull(Result, "tobool"));
1168 case Builtin::BI__atomic_clear: {
1169 QualType PtrTy = E->getArg(0)->IgnoreImpCasts()->getType();
1171 PtrTy->castAs<PointerType>()->getPointeeType().isVolatileQualified();
1173 Value *Ptr = EmitScalarExpr(E->getArg(0));
1174 unsigned AddrSpace = Ptr->getType()->getPointerAddressSpace();
1175 Ptr = Builder.CreateBitCast(Ptr, Int8Ty->getPointerTo(AddrSpace));
1176 Value *NewVal = Builder.getInt8(0);
1177 Value *Order = EmitScalarExpr(E->getArg(1));
1178 if (isa<llvm::ConstantInt>(Order)) {
1179 int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
1180 StoreInst *Store = Builder.CreateStore(NewVal, Ptr, Volatile);
1181 Store->setAlignment(1);
1183 case 0: // memory_order_relaxed
1184 default: // invalid order
1185 Store->setOrdering(llvm::Monotonic);
1187 case 3: // memory_order_release
1188 Store->setOrdering(llvm::Release);
1190 case 5: // memory_order_seq_cst
1191 Store->setOrdering(llvm::SequentiallyConsistent);
1194 return RValue::get(nullptr);
1197 llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
1199 llvm::BasicBlock *BBs[3] = {
1200 createBasicBlock("monotonic", CurFn),
1201 createBasicBlock("release", CurFn),
1202 createBasicBlock("seqcst", CurFn)
1204 llvm::AtomicOrdering Orders[3] = {
1205 llvm::Monotonic, llvm::Release, llvm::SequentiallyConsistent
1208 Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
1209 llvm::SwitchInst *SI = Builder.CreateSwitch(Order, BBs[0]);
1211 for (unsigned i = 0; i < 3; ++i) {
1212 Builder.SetInsertPoint(BBs[i]);
1213 StoreInst *Store = Builder.CreateStore(NewVal, Ptr, Volatile);
1214 Store->setAlignment(1);
1215 Store->setOrdering(Orders[i]);
1216 Builder.CreateBr(ContBB);
1219 SI->addCase(Builder.getInt32(0), BBs[0]);
1220 SI->addCase(Builder.getInt32(3), BBs[1]);
1221 SI->addCase(Builder.getInt32(5), BBs[2]);
1223 Builder.SetInsertPoint(ContBB);
1224 return RValue::get(nullptr);
1227 case Builtin::BI__atomic_thread_fence:
1228 case Builtin::BI__atomic_signal_fence:
1229 case Builtin::BI__c11_atomic_thread_fence:
1230 case Builtin::BI__c11_atomic_signal_fence: {
1231 llvm::SynchronizationScope Scope;
1232 if (BuiltinID == Builtin::BI__atomic_signal_fence ||
1233 BuiltinID == Builtin::BI__c11_atomic_signal_fence)
1234 Scope = llvm::SingleThread;
1236 Scope = llvm::CrossThread;
1237 Value *Order = EmitScalarExpr(E->getArg(0));
1238 if (isa<llvm::ConstantInt>(Order)) {
1239 int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
1241 case 0: // memory_order_relaxed
1242 default: // invalid order
1244 case 1: // memory_order_consume
1245 case 2: // memory_order_acquire
1246 Builder.CreateFence(llvm::Acquire, Scope);
1248 case 3: // memory_order_release
1249 Builder.CreateFence(llvm::Release, Scope);
1251 case 4: // memory_order_acq_rel
1252 Builder.CreateFence(llvm::AcquireRelease, Scope);
1254 case 5: // memory_order_seq_cst
1255 Builder.CreateFence(llvm::SequentiallyConsistent, Scope);
1258 return RValue::get(nullptr);
1261 llvm::BasicBlock *AcquireBB, *ReleaseBB, *AcqRelBB, *SeqCstBB;
1262 AcquireBB = createBasicBlock("acquire", CurFn);
1263 ReleaseBB = createBasicBlock("release", CurFn);
1264 AcqRelBB = createBasicBlock("acqrel", CurFn);
1265 SeqCstBB = createBasicBlock("seqcst", CurFn);
1266 llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
1268 Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
1269 llvm::SwitchInst *SI = Builder.CreateSwitch(Order, ContBB);
1271 Builder.SetInsertPoint(AcquireBB);
1272 Builder.CreateFence(llvm::Acquire, Scope);
1273 Builder.CreateBr(ContBB);
1274 SI->addCase(Builder.getInt32(1), AcquireBB);
1275 SI->addCase(Builder.getInt32(2), AcquireBB);
1277 Builder.SetInsertPoint(ReleaseBB);
1278 Builder.CreateFence(llvm::Release, Scope);
1279 Builder.CreateBr(ContBB);
1280 SI->addCase(Builder.getInt32(3), ReleaseBB);
1282 Builder.SetInsertPoint(AcqRelBB);
1283 Builder.CreateFence(llvm::AcquireRelease, Scope);
1284 Builder.CreateBr(ContBB);
1285 SI->addCase(Builder.getInt32(4), AcqRelBB);
1287 Builder.SetInsertPoint(SeqCstBB);
1288 Builder.CreateFence(llvm::SequentiallyConsistent, Scope);
1289 Builder.CreateBr(ContBB);
1290 SI->addCase(Builder.getInt32(5), SeqCstBB);
1292 Builder.SetInsertPoint(ContBB);
1293 return RValue::get(nullptr);
1296 // Library functions with special handling.
1297 case Builtin::BIsqrt:
1298 case Builtin::BIsqrtf:
1299 case Builtin::BIsqrtl: {
1300 // Transform a call to sqrt* into a @llvm.sqrt.* intrinsic call, but only
1301 // in finite- or unsafe-math mode (the intrinsic has different semantics
1302 // for handling negative numbers compared to the library function, so
1303 // -fmath-errno=0 is not enough).
1304 if (!FD->hasAttr<ConstAttr>())
1306 if (!(CGM.getCodeGenOpts().UnsafeFPMath ||
1307 CGM.getCodeGenOpts().NoNaNsFPMath))
1309 Value *Arg0 = EmitScalarExpr(E->getArg(0));
1310 llvm::Type *ArgType = Arg0->getType();
1311 Value *F = CGM.getIntrinsic(Intrinsic::sqrt, ArgType);
1312 return RValue::get(Builder.CreateCall(F, Arg0));
1315 case Builtin::BIpow:
1316 case Builtin::BIpowf:
1317 case Builtin::BIpowl: {
1318 // Transform a call to pow* into a @llvm.pow.* intrinsic call.
1319 if (!FD->hasAttr<ConstAttr>())
1321 Value *Base = EmitScalarExpr(E->getArg(0));
1322 Value *Exponent = EmitScalarExpr(E->getArg(1));
1323 llvm::Type *ArgType = Base->getType();
1324 Value *F = CGM.getIntrinsic(Intrinsic::pow, ArgType);
1325 return RValue::get(Builder.CreateCall2(F, Base, Exponent));
1328 case Builtin::BIfma:
1329 case Builtin::BIfmaf:
1330 case Builtin::BIfmal:
1331 case Builtin::BI__builtin_fma:
1332 case Builtin::BI__builtin_fmaf:
1333 case Builtin::BI__builtin_fmal: {
1334 // Rewrite fma to intrinsic.
1335 Value *FirstArg = EmitScalarExpr(E->getArg(0));
1336 llvm::Type *ArgType = FirstArg->getType();
1337 Value *F = CGM.getIntrinsic(Intrinsic::fma, ArgType);
1338 return RValue::get(Builder.CreateCall3(F, FirstArg,
1339 EmitScalarExpr(E->getArg(1)),
1340 EmitScalarExpr(E->getArg(2))));
1343 case Builtin::BI__builtin_signbit:
1344 case Builtin::BI__builtin_signbitf:
1345 case Builtin::BI__builtin_signbitl: {
1346 LLVMContext &C = CGM.getLLVMContext();
1348 Value *Arg = EmitScalarExpr(E->getArg(0));
1349 llvm::Type *ArgTy = Arg->getType();
1350 if (ArgTy->isPPC_FP128Ty())
1351 break; // FIXME: I'm not sure what the right implementation is here.
1352 int ArgWidth = ArgTy->getPrimitiveSizeInBits();
1353 llvm::Type *ArgIntTy = llvm::IntegerType::get(C, ArgWidth);
1354 Value *BCArg = Builder.CreateBitCast(Arg, ArgIntTy);
1355 Value *ZeroCmp = llvm::Constant::getNullValue(ArgIntTy);
1356 Value *Result = Builder.CreateICmpSLT(BCArg, ZeroCmp);
1357 return RValue::get(Builder.CreateZExt(Result, ConvertType(E->getType())));
1359 case Builtin::BI__builtin_annotation: {
1360 llvm::Value *AnnVal = EmitScalarExpr(E->getArg(0));
1361 llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::annotation,
1364 // Get the annotation string, go through casts. Sema requires this to be a
1365 // non-wide string literal, potentially casted, so the cast<> is safe.
1366 const Expr *AnnotationStrExpr = E->getArg(1)->IgnoreParenCasts();
1367 StringRef Str = cast<StringLiteral>(AnnotationStrExpr)->getString();
1368 return RValue::get(EmitAnnotationCall(F, AnnVal, Str, E->getExprLoc()));
1370 case Builtin::BI__builtin_addcb:
1371 case Builtin::BI__builtin_addcs:
1372 case Builtin::BI__builtin_addc:
1373 case Builtin::BI__builtin_addcl:
1374 case Builtin::BI__builtin_addcll:
1375 case Builtin::BI__builtin_subcb:
1376 case Builtin::BI__builtin_subcs:
1377 case Builtin::BI__builtin_subc:
1378 case Builtin::BI__builtin_subcl:
1379 case Builtin::BI__builtin_subcll: {
1381 // We translate all of these builtins from expressions of the form:
1382 // int x = ..., y = ..., carryin = ..., carryout, result;
1383 // result = __builtin_addc(x, y, carryin, &carryout);
1385 // to LLVM IR of the form:
1387 // %tmp1 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %x, i32 %y)
1388 // %tmpsum1 = extractvalue {i32, i1} %tmp1, 0
1389 // %carry1 = extractvalue {i32, i1} %tmp1, 1
1390 // %tmp2 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %tmpsum1,
1392 // %result = extractvalue {i32, i1} %tmp2, 0
1393 // %carry2 = extractvalue {i32, i1} %tmp2, 1
1394 // %tmp3 = or i1 %carry1, %carry2
1395 // %tmp4 = zext i1 %tmp3 to i32
1396 // store i32 %tmp4, i32* %carryout
1398 // Scalarize our inputs.
1399 llvm::Value *X = EmitScalarExpr(E->getArg(0));
1400 llvm::Value *Y = EmitScalarExpr(E->getArg(1));
1401 llvm::Value *Carryin = EmitScalarExpr(E->getArg(2));
1402 std::pair<llvm::Value*, unsigned> CarryOutPtr =
1403 EmitPointerWithAlignment(E->getArg(3));
1405 // Decide if we are lowering to a uadd.with.overflow or usub.with.overflow.
1406 llvm::Intrinsic::ID IntrinsicId;
1407 switch (BuiltinID) {
1408 default: llvm_unreachable("Unknown multiprecision builtin id.");
1409 case Builtin::BI__builtin_addcb:
1410 case Builtin::BI__builtin_addcs:
1411 case Builtin::BI__builtin_addc:
1412 case Builtin::BI__builtin_addcl:
1413 case Builtin::BI__builtin_addcll:
1414 IntrinsicId = llvm::Intrinsic::uadd_with_overflow;
1416 case Builtin::BI__builtin_subcb:
1417 case Builtin::BI__builtin_subcs:
1418 case Builtin::BI__builtin_subc:
1419 case Builtin::BI__builtin_subcl:
1420 case Builtin::BI__builtin_subcll:
1421 IntrinsicId = llvm::Intrinsic::usub_with_overflow;
1425 // Construct our resulting LLVM IR expression.
1426 llvm::Value *Carry1;
1427 llvm::Value *Sum1 = EmitOverflowIntrinsic(*this, IntrinsicId,
1429 llvm::Value *Carry2;
1430 llvm::Value *Sum2 = EmitOverflowIntrinsic(*this, IntrinsicId,
1431 Sum1, Carryin, Carry2);
1432 llvm::Value *CarryOut = Builder.CreateZExt(Builder.CreateOr(Carry1, Carry2),
1434 llvm::StoreInst *CarryOutStore = Builder.CreateStore(CarryOut,
1436 CarryOutStore->setAlignment(CarryOutPtr.second);
1437 return RValue::get(Sum2);
1439 case Builtin::BI__builtin_uadd_overflow:
1440 case Builtin::BI__builtin_uaddl_overflow:
1441 case Builtin::BI__builtin_uaddll_overflow:
1442 case Builtin::BI__builtin_usub_overflow:
1443 case Builtin::BI__builtin_usubl_overflow:
1444 case Builtin::BI__builtin_usubll_overflow:
1445 case Builtin::BI__builtin_umul_overflow:
1446 case Builtin::BI__builtin_umull_overflow:
1447 case Builtin::BI__builtin_umulll_overflow:
1448 case Builtin::BI__builtin_sadd_overflow:
1449 case Builtin::BI__builtin_saddl_overflow:
1450 case Builtin::BI__builtin_saddll_overflow:
1451 case Builtin::BI__builtin_ssub_overflow:
1452 case Builtin::BI__builtin_ssubl_overflow:
1453 case Builtin::BI__builtin_ssubll_overflow:
1454 case Builtin::BI__builtin_smul_overflow:
1455 case Builtin::BI__builtin_smull_overflow:
1456 case Builtin::BI__builtin_smulll_overflow: {
1458 // We translate all of these builtins directly to the relevant llvm IR node.
1460 // Scalarize our inputs.
1461 llvm::Value *X = EmitScalarExpr(E->getArg(0));
1462 llvm::Value *Y = EmitScalarExpr(E->getArg(1));
1463 std::pair<llvm::Value *, unsigned> SumOutPtr =
1464 EmitPointerWithAlignment(E->getArg(2));
1466 // Decide which of the overflow intrinsics we are lowering to:
1467 llvm::Intrinsic::ID IntrinsicId;
1468 switch (BuiltinID) {
1469 default: llvm_unreachable("Unknown security overflow builtin id.");
1470 case Builtin::BI__builtin_uadd_overflow:
1471 case Builtin::BI__builtin_uaddl_overflow:
1472 case Builtin::BI__builtin_uaddll_overflow:
1473 IntrinsicId = llvm::Intrinsic::uadd_with_overflow;
1475 case Builtin::BI__builtin_usub_overflow:
1476 case Builtin::BI__builtin_usubl_overflow:
1477 case Builtin::BI__builtin_usubll_overflow:
1478 IntrinsicId = llvm::Intrinsic::usub_with_overflow;
1480 case Builtin::BI__builtin_umul_overflow:
1481 case Builtin::BI__builtin_umull_overflow:
1482 case Builtin::BI__builtin_umulll_overflow:
1483 IntrinsicId = llvm::Intrinsic::umul_with_overflow;
1485 case Builtin::BI__builtin_sadd_overflow:
1486 case Builtin::BI__builtin_saddl_overflow:
1487 case Builtin::BI__builtin_saddll_overflow:
1488 IntrinsicId = llvm::Intrinsic::sadd_with_overflow;
1490 case Builtin::BI__builtin_ssub_overflow:
1491 case Builtin::BI__builtin_ssubl_overflow:
1492 case Builtin::BI__builtin_ssubll_overflow:
1493 IntrinsicId = llvm::Intrinsic::ssub_with_overflow;
1495 case Builtin::BI__builtin_smul_overflow:
1496 case Builtin::BI__builtin_smull_overflow:
1497 case Builtin::BI__builtin_smulll_overflow:
1498 IntrinsicId = llvm::Intrinsic::smul_with_overflow;
1504 llvm::Value *Sum = EmitOverflowIntrinsic(*this, IntrinsicId, X, Y, Carry);
1505 llvm::StoreInst *SumOutStore = Builder.CreateStore(Sum, SumOutPtr.first);
1506 SumOutStore->setAlignment(SumOutPtr.second);
1508 return RValue::get(Carry);
1510 case Builtin::BI__builtin_addressof:
1511 return RValue::get(EmitLValue(E->getArg(0)).getAddress());
1512 case Builtin::BI__builtin_operator_new:
1513 return EmitBuiltinNewDeleteCall(FD->getType()->castAs<FunctionProtoType>(),
1514 E->getArg(0), false);
1515 case Builtin::BI__builtin_operator_delete:
1516 return EmitBuiltinNewDeleteCall(FD->getType()->castAs<FunctionProtoType>(),
1517 E->getArg(0), true);
1518 case Builtin::BI__noop:
1519 // __noop always evaluates to an integer literal zero.
1520 return RValue::get(ConstantInt::get(IntTy, 0));
1521 case Builtin::BI__assume:
1522 // Until LLVM supports assumptions at the IR level, this becomes nothing.
1523 return RValue::get(nullptr);
1524 case Builtin::BI_InterlockedExchange:
1525 case Builtin::BI_InterlockedExchangePointer:
1526 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xchg, E);
1527 case Builtin::BI_InterlockedCompareExchangePointer: {
1529 llvm::IntegerType *IntType =
1530 IntegerType::get(getLLVMContext(),
1531 getContext().getTypeSize(E->getType()));
1532 llvm::Type *IntPtrType = IntType->getPointerTo();
1534 llvm::Value *Destination =
1535 Builder.CreateBitCast(EmitScalarExpr(E->getArg(0)), IntPtrType);
1537 llvm::Value *Exchange = EmitScalarExpr(E->getArg(1));
1538 RTy = Exchange->getType();
1539 Exchange = Builder.CreatePtrToInt(Exchange, IntType);
1541 llvm::Value *Comparand =
1542 Builder.CreatePtrToInt(EmitScalarExpr(E->getArg(2)), IntType);
1544 auto Result = Builder.CreateAtomicCmpXchg(Destination, Comparand, Exchange,
1545 SequentiallyConsistent,
1546 SequentiallyConsistent);
1547 Result->setVolatile(true);
1549 return RValue::get(Builder.CreateIntToPtr(Builder.CreateExtractValue(Result,
1553 case Builtin::BI_InterlockedCompareExchange: {
1554 AtomicCmpXchgInst *CXI = Builder.CreateAtomicCmpXchg(
1555 EmitScalarExpr(E->getArg(0)),
1556 EmitScalarExpr(E->getArg(2)),
1557 EmitScalarExpr(E->getArg(1)),
1558 SequentiallyConsistent,
1559 SequentiallyConsistent);
1560 CXI->setVolatile(true);
1561 return RValue::get(Builder.CreateExtractValue(CXI, 0));
1563 case Builtin::BI_InterlockedIncrement: {
1564 AtomicRMWInst *RMWI = Builder.CreateAtomicRMW(
1566 EmitScalarExpr(E->getArg(0)),
1567 ConstantInt::get(Int32Ty, 1),
1568 llvm::SequentiallyConsistent);
1569 RMWI->setVolatile(true);
1570 return RValue::get(Builder.CreateAdd(RMWI, ConstantInt::get(Int32Ty, 1)));
1572 case Builtin::BI_InterlockedDecrement: {
1573 AtomicRMWInst *RMWI = Builder.CreateAtomicRMW(
1575 EmitScalarExpr(E->getArg(0)),
1576 ConstantInt::get(Int32Ty, 1),
1577 llvm::SequentiallyConsistent);
1578 RMWI->setVolatile(true);
1579 return RValue::get(Builder.CreateSub(RMWI, ConstantInt::get(Int32Ty, 1)));
1581 case Builtin::BI_InterlockedExchangeAdd: {
1582 AtomicRMWInst *RMWI = Builder.CreateAtomicRMW(
1584 EmitScalarExpr(E->getArg(0)),
1585 EmitScalarExpr(E->getArg(1)),
1586 llvm::SequentiallyConsistent);
1587 RMWI->setVolatile(true);
1588 return RValue::get(RMWI);
1592 // If this is an alias for a lib function (e.g. __builtin_sin), emit
1593 // the call using the normal call path, but using the unmangled
1594 // version of the function name.
1595 if (getContext().BuiltinInfo.isLibFunction(BuiltinID))
1596 return emitLibraryCall(*this, FD, E,
1597 CGM.getBuiltinLibFunction(FD, BuiltinID));
1599 // If this is a predefined lib function (e.g. malloc), emit the call
1600 // using exactly the normal call path.
1601 if (getContext().BuiltinInfo.isPredefinedLibFunction(BuiltinID))
1602 return emitLibraryCall(*this, FD, E, EmitScalarExpr(E->getCallee()));
1604 // See if we have a target specific intrinsic.
1605 const char *Name = getContext().BuiltinInfo.GetName(BuiltinID);
1606 Intrinsic::ID IntrinsicID = Intrinsic::not_intrinsic;
1607 if (const char *Prefix =
1608 llvm::Triple::getArchTypePrefix(getTarget().getTriple().getArch())) {
1609 IntrinsicID = Intrinsic::getIntrinsicForGCCBuiltin(Prefix, Name);
1610 // NOTE we dont need to perform a compatibility flag check here since the
1611 // intrinsics are declared in Builtins*.def via LANGBUILTIN which filter the
1612 // MS builtins via ALL_MS_LANGUAGES and are filtered earlier.
1613 if (IntrinsicID == Intrinsic::not_intrinsic)
1614 IntrinsicID = Intrinsic::getIntrinsicForMSBuiltin(Prefix, Name);
1617 if (IntrinsicID != Intrinsic::not_intrinsic) {
1618 SmallVector<Value*, 16> Args;
1620 // Find out if any arguments are required to be integer constant
1622 unsigned ICEArguments = 0;
1623 ASTContext::GetBuiltinTypeError Error;
1624 getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments);
1625 assert(Error == ASTContext::GE_None && "Should not codegen an error");
1627 Function *F = CGM.getIntrinsic(IntrinsicID);
1628 llvm::FunctionType *FTy = F->getFunctionType();
1630 for (unsigned i = 0, e = E->getNumArgs(); i != e; ++i) {
1632 // If this is a normal argument, just emit it as a scalar.
1633 if ((ICEArguments & (1 << i)) == 0) {
1634 ArgValue = EmitScalarExpr(E->getArg(i));
1636 // If this is required to be a constant, constant fold it so that we
1637 // know that the generated intrinsic gets a ConstantInt.
1638 llvm::APSInt Result;
1639 bool IsConst = E->getArg(i)->isIntegerConstantExpr(Result,getContext());
1640 assert(IsConst && "Constant arg isn't actually constant?");
1642 ArgValue = llvm::ConstantInt::get(getLLVMContext(), Result);
1645 // If the intrinsic arg type is different from the builtin arg type
1646 // we need to do a bit cast.
1647 llvm::Type *PTy = FTy->getParamType(i);
1648 if (PTy != ArgValue->getType()) {
1649 assert(PTy->canLosslesslyBitCastTo(FTy->getParamType(i)) &&
1650 "Must be able to losslessly bit cast to param");
1651 ArgValue = Builder.CreateBitCast(ArgValue, PTy);
1654 Args.push_back(ArgValue);
1657 Value *V = Builder.CreateCall(F, Args);
1658 QualType BuiltinRetType = E->getType();
1660 llvm::Type *RetTy = VoidTy;
1661 if (!BuiltinRetType->isVoidType())
1662 RetTy = ConvertType(BuiltinRetType);
1664 if (RetTy != V->getType()) {
1665 assert(V->getType()->canLosslesslyBitCastTo(RetTy) &&
1666 "Must be able to losslessly bit cast result type");
1667 V = Builder.CreateBitCast(V, RetTy);
1670 return RValue::get(V);
1673 // See if we have a target specific builtin that needs to be lowered.
1674 if (Value *V = EmitTargetBuiltinExpr(BuiltinID, E))
1675 return RValue::get(V);
1677 ErrorUnsupported(E, "builtin function");
1679 // Unknown builtin, for now just dump it out and return undef.
1680 return GetUndefRValue(E->getType());
1683 Value *CodeGenFunction::EmitTargetBuiltinExpr(unsigned BuiltinID,
1684 const CallExpr *E) {
1685 switch (getTarget().getTriple().getArch()) {
1686 case llvm::Triple::arm:
1687 case llvm::Triple::armeb:
1688 case llvm::Triple::thumb:
1689 case llvm::Triple::thumbeb:
1690 return EmitARMBuiltinExpr(BuiltinID, E);
1691 case llvm::Triple::aarch64:
1692 case llvm::Triple::aarch64_be:
1693 case llvm::Triple::arm64:
1694 case llvm::Triple::arm64_be:
1695 return EmitAArch64BuiltinExpr(BuiltinID, E);
1696 case llvm::Triple::x86:
1697 case llvm::Triple::x86_64:
1698 return EmitX86BuiltinExpr(BuiltinID, E);
1699 case llvm::Triple::ppc:
1700 case llvm::Triple::ppc64:
1701 case llvm::Triple::ppc64le:
1702 return EmitPPCBuiltinExpr(BuiltinID, E);
1703 case llvm::Triple::r600:
1704 return EmitR600BuiltinExpr(BuiltinID, E);
1710 static llvm::VectorType *GetNeonType(CodeGenFunction *CGF,
1711 NeonTypeFlags TypeFlags,
1713 int IsQuad = TypeFlags.isQuad();
1714 switch (TypeFlags.getEltType()) {
1715 case NeonTypeFlags::Int8:
1716 case NeonTypeFlags::Poly8:
1717 return llvm::VectorType::get(CGF->Int8Ty, V1Ty ? 1 : (8 << IsQuad));
1718 case NeonTypeFlags::Int16:
1719 case NeonTypeFlags::Poly16:
1720 case NeonTypeFlags::Float16:
1721 return llvm::VectorType::get(CGF->Int16Ty, V1Ty ? 1 : (4 << IsQuad));
1722 case NeonTypeFlags::Int32:
1723 return llvm::VectorType::get(CGF->Int32Ty, V1Ty ? 1 : (2 << IsQuad));
1724 case NeonTypeFlags::Int64:
1725 case NeonTypeFlags::Poly64:
1726 return llvm::VectorType::get(CGF->Int64Ty, V1Ty ? 1 : (1 << IsQuad));
1727 case NeonTypeFlags::Poly128:
1728 // FIXME: i128 and f128 doesn't get fully support in Clang and llvm.
1729 // There is a lot of i128 and f128 API missing.
1730 // so we use v16i8 to represent poly128 and get pattern matched.
1731 return llvm::VectorType::get(CGF->Int8Ty, 16);
1732 case NeonTypeFlags::Float32:
1733 return llvm::VectorType::get(CGF->FloatTy, V1Ty ? 1 : (2 << IsQuad));
1734 case NeonTypeFlags::Float64:
1735 return llvm::VectorType::get(CGF->DoubleTy, V1Ty ? 1 : (1 << IsQuad));
1737 llvm_unreachable("Unknown vector element type!");
1740 Value *CodeGenFunction::EmitNeonSplat(Value *V, Constant *C) {
1741 unsigned nElts = cast<llvm::VectorType>(V->getType())->getNumElements();
1742 Value* SV = llvm::ConstantVector::getSplat(nElts, C);
1743 return Builder.CreateShuffleVector(V, V, SV, "lane");
1746 Value *CodeGenFunction::EmitNeonCall(Function *F, SmallVectorImpl<Value*> &Ops,
1748 unsigned shift, bool rightshift) {
1750 for (Function::const_arg_iterator ai = F->arg_begin(), ae = F->arg_end();
1751 ai != ae; ++ai, ++j)
1752 if (shift > 0 && shift == j)
1753 Ops[j] = EmitNeonShiftVector(Ops[j], ai->getType(), rightshift);
1755 Ops[j] = Builder.CreateBitCast(Ops[j], ai->getType(), name);
1757 return Builder.CreateCall(F, Ops, name);
1760 Value *CodeGenFunction::EmitNeonShiftVector(Value *V, llvm::Type *Ty,
1762 int SV = cast<ConstantInt>(V)->getSExtValue();
1764 llvm::VectorType *VTy = cast<llvm::VectorType>(Ty);
1765 llvm::Constant *C = ConstantInt::get(VTy->getElementType(), neg ? -SV : SV);
1766 return llvm::ConstantVector::getSplat(VTy->getNumElements(), C);
1769 // \brief Right-shift a vector by a constant.
1770 Value *CodeGenFunction::EmitNeonRShiftImm(Value *Vec, Value *Shift,
1771 llvm::Type *Ty, bool usgn,
1773 llvm::VectorType *VTy = cast<llvm::VectorType>(Ty);
1775 int ShiftAmt = cast<ConstantInt>(Shift)->getSExtValue();
1776 int EltSize = VTy->getScalarSizeInBits();
1778 Vec = Builder.CreateBitCast(Vec, Ty);
1780 // lshr/ashr are undefined when the shift amount is equal to the vector
1782 if (ShiftAmt == EltSize) {
1784 // Right-shifting an unsigned value by its size yields 0.
1785 llvm::Constant *Zero = ConstantInt::get(VTy->getElementType(), 0);
1786 return llvm::ConstantVector::getSplat(VTy->getNumElements(), Zero);
1788 // Right-shifting a signed value by its size is equivalent
1789 // to a shift of size-1.
1791 Shift = ConstantInt::get(VTy->getElementType(), ShiftAmt);
1795 Shift = EmitNeonShiftVector(Shift, Ty, false);
1797 return Builder.CreateLShr(Vec, Shift, name);
1799 return Builder.CreateAShr(Vec, Shift, name);
1802 /// GetPointeeAlignment - Given an expression with a pointer type, find the
1803 /// alignment of the type referenced by the pointer. Skip over implicit
1805 std::pair<llvm::Value*, unsigned>
1806 CodeGenFunction::EmitPointerWithAlignment(const Expr *Addr) {
1807 assert(Addr->getType()->isPointerType());
1808 Addr = Addr->IgnoreParens();
1809 if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Addr)) {
1810 if ((ICE->getCastKind() == CK_BitCast || ICE->getCastKind() == CK_NoOp) &&
1811 ICE->getSubExpr()->getType()->isPointerType()) {
1812 std::pair<llvm::Value*, unsigned> Ptr =
1813 EmitPointerWithAlignment(ICE->getSubExpr());
1814 Ptr.first = Builder.CreateBitCast(Ptr.first,
1815 ConvertType(Addr->getType()));
1817 } else if (ICE->getCastKind() == CK_ArrayToPointerDecay) {
1818 LValue LV = EmitLValue(ICE->getSubExpr());
1819 unsigned Align = LV.getAlignment().getQuantity();
1821 // FIXME: Once LValues are fixed to always set alignment,
1823 QualType PtTy = ICE->getSubExpr()->getType();
1824 if (!PtTy->isIncompleteType())
1825 Align = getContext().getTypeAlignInChars(PtTy).getQuantity();
1829 return std::make_pair(LV.getAddress(), Align);
1832 if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(Addr)) {
1833 if (UO->getOpcode() == UO_AddrOf) {
1834 LValue LV = EmitLValue(UO->getSubExpr());
1835 unsigned Align = LV.getAlignment().getQuantity();
1837 // FIXME: Once LValues are fixed to always set alignment,
1839 QualType PtTy = UO->getSubExpr()->getType();
1840 if (!PtTy->isIncompleteType())
1841 Align = getContext().getTypeAlignInChars(PtTy).getQuantity();
1845 return std::make_pair(LV.getAddress(), Align);
1850 QualType PtTy = Addr->getType()->getPointeeType();
1851 if (!PtTy->isIncompleteType())
1852 Align = getContext().getTypeAlignInChars(PtTy).getQuantity();
1854 return std::make_pair(EmitScalarExpr(Addr), Align);
1858 AddRetType = (1 << 0),
1859 Add1ArgType = (1 << 1),
1860 Add2ArgTypes = (1 << 2),
1862 VectorizeRetType = (1 << 3),
1863 VectorizeArgTypes = (1 << 4),
1865 InventFloatType = (1 << 5),
1866 UnsignedAlts = (1 << 6),
1868 Use64BitVectors = (1 << 7),
1869 Use128BitVectors = (1 << 8),
1871 Vectorize1ArgType = Add1ArgType | VectorizeArgTypes,
1872 VectorRet = AddRetType | VectorizeRetType,
1873 VectorRetGetArgs01 =
1874 AddRetType | Add2ArgTypes | VectorizeRetType | VectorizeArgTypes,
1876 AddRetType | VectorizeRetType | Add1ArgType | InventFloatType
1879 struct NeonIntrinsicInfo {
1881 unsigned LLVMIntrinsic;
1882 unsigned AltLLVMIntrinsic;
1883 const char *NameHint;
1884 unsigned TypeModifier;
1886 bool operator<(unsigned RHSBuiltinID) const {
1887 return BuiltinID < RHSBuiltinID;
1891 #define NEONMAP0(NameBase) \
1892 { NEON::BI__builtin_neon_ ## NameBase, 0, 0, #NameBase, 0 }
1894 #define NEONMAP1(NameBase, LLVMIntrinsic, TypeModifier) \
1895 { NEON:: BI__builtin_neon_ ## NameBase, \
1896 Intrinsic::LLVMIntrinsic, 0, #NameBase, TypeModifier }
1898 #define NEONMAP2(NameBase, LLVMIntrinsic, AltLLVMIntrinsic, TypeModifier) \
1899 { NEON:: BI__builtin_neon_ ## NameBase, \
1900 Intrinsic::LLVMIntrinsic, Intrinsic::AltLLVMIntrinsic, \
1901 #NameBase, TypeModifier }
1903 static NeonIntrinsicInfo ARMSIMDIntrinsicMap [] = {
1904 NEONMAP2(vabd_v, arm_neon_vabdu, arm_neon_vabds, Add1ArgType | UnsignedAlts),
1905 NEONMAP2(vabdq_v, arm_neon_vabdu, arm_neon_vabds, Add1ArgType | UnsignedAlts),
1906 NEONMAP1(vabs_v, arm_neon_vabs, 0),
1907 NEONMAP1(vabsq_v, arm_neon_vabs, 0),
1909 NEONMAP1(vaesdq_v, arm_neon_aesd, 0),
1910 NEONMAP1(vaeseq_v, arm_neon_aese, 0),
1911 NEONMAP1(vaesimcq_v, arm_neon_aesimc, 0),
1912 NEONMAP1(vaesmcq_v, arm_neon_aesmc, 0),
1913 NEONMAP1(vbsl_v, arm_neon_vbsl, AddRetType),
1914 NEONMAP1(vbslq_v, arm_neon_vbsl, AddRetType),
1915 NEONMAP1(vcage_v, arm_neon_vacge, 0),
1916 NEONMAP1(vcageq_v, arm_neon_vacge, 0),
1917 NEONMAP1(vcagt_v, arm_neon_vacgt, 0),
1918 NEONMAP1(vcagtq_v, arm_neon_vacgt, 0),
1919 NEONMAP1(vcale_v, arm_neon_vacge, 0),
1920 NEONMAP1(vcaleq_v, arm_neon_vacge, 0),
1921 NEONMAP1(vcalt_v, arm_neon_vacgt, 0),
1922 NEONMAP1(vcaltq_v, arm_neon_vacgt, 0),
1923 NEONMAP1(vcls_v, arm_neon_vcls, Add1ArgType),
1924 NEONMAP1(vclsq_v, arm_neon_vcls, Add1ArgType),
1925 NEONMAP1(vclz_v, ctlz, Add1ArgType),
1926 NEONMAP1(vclzq_v, ctlz, Add1ArgType),
1927 NEONMAP1(vcnt_v, ctpop, Add1ArgType),
1928 NEONMAP1(vcntq_v, ctpop, Add1ArgType),
1929 NEONMAP1(vcvt_f16_v, arm_neon_vcvtfp2hf, 0),
1930 NEONMAP1(vcvt_f32_f16, arm_neon_vcvthf2fp, 0),
1931 NEONMAP0(vcvt_f32_v),
1932 NEONMAP2(vcvt_n_f32_v, arm_neon_vcvtfxu2fp, arm_neon_vcvtfxs2fp, 0),
1933 NEONMAP1(vcvt_n_s32_v, arm_neon_vcvtfp2fxs, 0),
1934 NEONMAP1(vcvt_n_s64_v, arm_neon_vcvtfp2fxs, 0),
1935 NEONMAP1(vcvt_n_u32_v, arm_neon_vcvtfp2fxu, 0),
1936 NEONMAP1(vcvt_n_u64_v, arm_neon_vcvtfp2fxu, 0),
1937 NEONMAP0(vcvt_s32_v),
1938 NEONMAP0(vcvt_s64_v),
1939 NEONMAP0(vcvt_u32_v),
1940 NEONMAP0(vcvt_u64_v),
1941 NEONMAP1(vcvta_s32_v, arm_neon_vcvtas, 0),
1942 NEONMAP1(vcvta_s64_v, arm_neon_vcvtas, 0),
1943 NEONMAP1(vcvta_u32_v, arm_neon_vcvtau, 0),
1944 NEONMAP1(vcvta_u64_v, arm_neon_vcvtau, 0),
1945 NEONMAP1(vcvtaq_s32_v, arm_neon_vcvtas, 0),
1946 NEONMAP1(vcvtaq_s64_v, arm_neon_vcvtas, 0),
1947 NEONMAP1(vcvtaq_u32_v, arm_neon_vcvtau, 0),
1948 NEONMAP1(vcvtaq_u64_v, arm_neon_vcvtau, 0),
1949 NEONMAP1(vcvtm_s32_v, arm_neon_vcvtms, 0),
1950 NEONMAP1(vcvtm_s64_v, arm_neon_vcvtms, 0),
1951 NEONMAP1(vcvtm_u32_v, arm_neon_vcvtmu, 0),
1952 NEONMAP1(vcvtm_u64_v, arm_neon_vcvtmu, 0),
1953 NEONMAP1(vcvtmq_s32_v, arm_neon_vcvtms, 0),
1954 NEONMAP1(vcvtmq_s64_v, arm_neon_vcvtms, 0),
1955 NEONMAP1(vcvtmq_u32_v, arm_neon_vcvtmu, 0),
1956 NEONMAP1(vcvtmq_u64_v, arm_neon_vcvtmu, 0),
1957 NEONMAP1(vcvtn_s32_v, arm_neon_vcvtns, 0),
1958 NEONMAP1(vcvtn_s64_v, arm_neon_vcvtns, 0),
1959 NEONMAP1(vcvtn_u32_v, arm_neon_vcvtnu, 0),
1960 NEONMAP1(vcvtn_u64_v, arm_neon_vcvtnu, 0),
1961 NEONMAP1(vcvtnq_s32_v, arm_neon_vcvtns, 0),
1962 NEONMAP1(vcvtnq_s64_v, arm_neon_vcvtns, 0),
1963 NEONMAP1(vcvtnq_u32_v, arm_neon_vcvtnu, 0),
1964 NEONMAP1(vcvtnq_u64_v, arm_neon_vcvtnu, 0),
1965 NEONMAP1(vcvtp_s32_v, arm_neon_vcvtps, 0),
1966 NEONMAP1(vcvtp_s64_v, arm_neon_vcvtps, 0),
1967 NEONMAP1(vcvtp_u32_v, arm_neon_vcvtpu, 0),
1968 NEONMAP1(vcvtp_u64_v, arm_neon_vcvtpu, 0),
1969 NEONMAP1(vcvtpq_s32_v, arm_neon_vcvtps, 0),
1970 NEONMAP1(vcvtpq_s64_v, arm_neon_vcvtps, 0),
1971 NEONMAP1(vcvtpq_u32_v, arm_neon_vcvtpu, 0),
1972 NEONMAP1(vcvtpq_u64_v, arm_neon_vcvtpu, 0),
1973 NEONMAP0(vcvtq_f32_v),
1974 NEONMAP2(vcvtq_n_f32_v, arm_neon_vcvtfxu2fp, arm_neon_vcvtfxs2fp, 0),
1975 NEONMAP1(vcvtq_n_s32_v, arm_neon_vcvtfp2fxs, 0),
1976 NEONMAP1(vcvtq_n_s64_v, arm_neon_vcvtfp2fxs, 0),
1977 NEONMAP1(vcvtq_n_u32_v, arm_neon_vcvtfp2fxu, 0),
1978 NEONMAP1(vcvtq_n_u64_v, arm_neon_vcvtfp2fxu, 0),
1979 NEONMAP0(vcvtq_s32_v),
1980 NEONMAP0(vcvtq_s64_v),
1981 NEONMAP0(vcvtq_u32_v),
1982 NEONMAP0(vcvtq_u64_v),
1987 NEONMAP2(vhadd_v, arm_neon_vhaddu, arm_neon_vhadds, Add1ArgType | UnsignedAlts),
1988 NEONMAP2(vhaddq_v, arm_neon_vhaddu, arm_neon_vhadds, Add1ArgType | UnsignedAlts),
1989 NEONMAP2(vhsub_v, arm_neon_vhsubu, arm_neon_vhsubs, Add1ArgType | UnsignedAlts),
1990 NEONMAP2(vhsubq_v, arm_neon_vhsubu, arm_neon_vhsubs, Add1ArgType | UnsignedAlts),
1991 NEONMAP0(vld1_dup_v),
1992 NEONMAP1(vld1_v, arm_neon_vld1, 0),
1993 NEONMAP0(vld1q_dup_v),
1994 NEONMAP1(vld1q_v, arm_neon_vld1, 0),
1995 NEONMAP1(vld2_lane_v, arm_neon_vld2lane, 0),
1996 NEONMAP1(vld2_v, arm_neon_vld2, 0),
1997 NEONMAP1(vld2q_lane_v, arm_neon_vld2lane, 0),
1998 NEONMAP1(vld2q_v, arm_neon_vld2, 0),
1999 NEONMAP1(vld3_lane_v, arm_neon_vld3lane, 0),
2000 NEONMAP1(vld3_v, arm_neon_vld3, 0),
2001 NEONMAP1(vld3q_lane_v, arm_neon_vld3lane, 0),
2002 NEONMAP1(vld3q_v, arm_neon_vld3, 0),
2003 NEONMAP1(vld4_lane_v, arm_neon_vld4lane, 0),
2004 NEONMAP1(vld4_v, arm_neon_vld4, 0),
2005 NEONMAP1(vld4q_lane_v, arm_neon_vld4lane, 0),
2006 NEONMAP1(vld4q_v, arm_neon_vld4, 0),
2007 NEONMAP2(vmax_v, arm_neon_vmaxu, arm_neon_vmaxs, Add1ArgType | UnsignedAlts),
2008 NEONMAP2(vmaxq_v, arm_neon_vmaxu, arm_neon_vmaxs, Add1ArgType | UnsignedAlts),
2009 NEONMAP2(vmin_v, arm_neon_vminu, arm_neon_vmins, Add1ArgType | UnsignedAlts),
2010 NEONMAP2(vminq_v, arm_neon_vminu, arm_neon_vmins, Add1ArgType | UnsignedAlts),
2013 NEONMAP1(vmul_v, arm_neon_vmulp, Add1ArgType),
2015 NEONMAP1(vmulq_v, arm_neon_vmulp, Add1ArgType),
2016 NEONMAP2(vpadal_v, arm_neon_vpadalu, arm_neon_vpadals, UnsignedAlts),
2017 NEONMAP2(vpadalq_v, arm_neon_vpadalu, arm_neon_vpadals, UnsignedAlts),
2018 NEONMAP1(vpadd_v, arm_neon_vpadd, Add1ArgType),
2019 NEONMAP2(vpaddl_v, arm_neon_vpaddlu, arm_neon_vpaddls, UnsignedAlts),
2020 NEONMAP2(vpaddlq_v, arm_neon_vpaddlu, arm_neon_vpaddls, UnsignedAlts),
2021 NEONMAP1(vpaddq_v, arm_neon_vpadd, Add1ArgType),
2022 NEONMAP2(vpmax_v, arm_neon_vpmaxu, arm_neon_vpmaxs, Add1ArgType | UnsignedAlts),
2023 NEONMAP2(vpmin_v, arm_neon_vpminu, arm_neon_vpmins, Add1ArgType | UnsignedAlts),
2024 NEONMAP1(vqabs_v, arm_neon_vqabs, Add1ArgType),
2025 NEONMAP1(vqabsq_v, arm_neon_vqabs, Add1ArgType),
2026 NEONMAP2(vqadd_v, arm_neon_vqaddu, arm_neon_vqadds, Add1ArgType | UnsignedAlts),
2027 NEONMAP2(vqaddq_v, arm_neon_vqaddu, arm_neon_vqadds, Add1ArgType | UnsignedAlts),
2028 NEONMAP2(vqdmlal_v, arm_neon_vqdmull, arm_neon_vqadds, 0),
2029 NEONMAP2(vqdmlsl_v, arm_neon_vqdmull, arm_neon_vqsubs, 0),
2030 NEONMAP1(vqdmulh_v, arm_neon_vqdmulh, Add1ArgType),
2031 NEONMAP1(vqdmulhq_v, arm_neon_vqdmulh, Add1ArgType),
2032 NEONMAP1(vqdmull_v, arm_neon_vqdmull, Add1ArgType),
2033 NEONMAP2(vqmovn_v, arm_neon_vqmovnu, arm_neon_vqmovns, Add1ArgType | UnsignedAlts),
2034 NEONMAP1(vqmovun_v, arm_neon_vqmovnsu, Add1ArgType),
2035 NEONMAP1(vqneg_v, arm_neon_vqneg, Add1ArgType),
2036 NEONMAP1(vqnegq_v, arm_neon_vqneg, Add1ArgType),
2037 NEONMAP1(vqrdmulh_v, arm_neon_vqrdmulh, Add1ArgType),
2038 NEONMAP1(vqrdmulhq_v, arm_neon_vqrdmulh, Add1ArgType),
2039 NEONMAP2(vqrshl_v, arm_neon_vqrshiftu, arm_neon_vqrshifts, Add1ArgType | UnsignedAlts),
2040 NEONMAP2(vqrshlq_v, arm_neon_vqrshiftu, arm_neon_vqrshifts, Add1ArgType | UnsignedAlts),
2041 NEONMAP2(vqshl_n_v, arm_neon_vqshiftu, arm_neon_vqshifts, UnsignedAlts),
2042 NEONMAP2(vqshl_v, arm_neon_vqshiftu, arm_neon_vqshifts, Add1ArgType | UnsignedAlts),
2043 NEONMAP2(vqshlq_n_v, arm_neon_vqshiftu, arm_neon_vqshifts, UnsignedAlts),
2044 NEONMAP2(vqshlq_v, arm_neon_vqshiftu, arm_neon_vqshifts, Add1ArgType | UnsignedAlts),
2045 NEONMAP2(vqsub_v, arm_neon_vqsubu, arm_neon_vqsubs, Add1ArgType | UnsignedAlts),
2046 NEONMAP2(vqsubq_v, arm_neon_vqsubu, arm_neon_vqsubs, Add1ArgType | UnsignedAlts),
2047 NEONMAP1(vraddhn_v, arm_neon_vraddhn, Add1ArgType),
2048 NEONMAP2(vrecpe_v, arm_neon_vrecpe, arm_neon_vrecpe, 0),
2049 NEONMAP2(vrecpeq_v, arm_neon_vrecpe, arm_neon_vrecpe, 0),
2050 NEONMAP1(vrecps_v, arm_neon_vrecps, Add1ArgType),
2051 NEONMAP1(vrecpsq_v, arm_neon_vrecps, Add1ArgType),
2052 NEONMAP2(vrhadd_v, arm_neon_vrhaddu, arm_neon_vrhadds, Add1ArgType | UnsignedAlts),
2053 NEONMAP2(vrhaddq_v, arm_neon_vrhaddu, arm_neon_vrhadds, Add1ArgType | UnsignedAlts),
2054 NEONMAP2(vrshl_v, arm_neon_vrshiftu, arm_neon_vrshifts, Add1ArgType | UnsignedAlts),
2055 NEONMAP2(vrshlq_v, arm_neon_vrshiftu, arm_neon_vrshifts, Add1ArgType | UnsignedAlts),
2056 NEONMAP2(vrsqrte_v, arm_neon_vrsqrte, arm_neon_vrsqrte, 0),
2057 NEONMAP2(vrsqrteq_v, arm_neon_vrsqrte, arm_neon_vrsqrte, 0),
2058 NEONMAP1(vrsqrts_v, arm_neon_vrsqrts, Add1ArgType),
2059 NEONMAP1(vrsqrtsq_v, arm_neon_vrsqrts, Add1ArgType),
2060 NEONMAP1(vrsubhn_v, arm_neon_vrsubhn, Add1ArgType),
2061 NEONMAP1(vsha1su0q_v, arm_neon_sha1su0, 0),
2062 NEONMAP1(vsha1su1q_v, arm_neon_sha1su1, 0),
2063 NEONMAP1(vsha256h2q_v, arm_neon_sha256h2, 0),
2064 NEONMAP1(vsha256hq_v, arm_neon_sha256h, 0),
2065 NEONMAP1(vsha256su0q_v, arm_neon_sha256su0, 0),
2066 NEONMAP1(vsha256su1q_v, arm_neon_sha256su1, 0),
2068 NEONMAP2(vshl_v, arm_neon_vshiftu, arm_neon_vshifts, Add1ArgType | UnsignedAlts),
2069 NEONMAP0(vshll_n_v),
2070 NEONMAP0(vshlq_n_v),
2071 NEONMAP2(vshlq_v, arm_neon_vshiftu, arm_neon_vshifts, Add1ArgType | UnsignedAlts),
2073 NEONMAP0(vshrn_n_v),
2074 NEONMAP0(vshrq_n_v),
2075 NEONMAP1(vst1_v, arm_neon_vst1, 0),
2076 NEONMAP1(vst1q_v, arm_neon_vst1, 0),
2077 NEONMAP1(vst2_lane_v, arm_neon_vst2lane, 0),
2078 NEONMAP1(vst2_v, arm_neon_vst2, 0),
2079 NEONMAP1(vst2q_lane_v, arm_neon_vst2lane, 0),
2080 NEONMAP1(vst2q_v, arm_neon_vst2, 0),
2081 NEONMAP1(vst3_lane_v, arm_neon_vst3lane, 0),
2082 NEONMAP1(vst3_v, arm_neon_vst3, 0),
2083 NEONMAP1(vst3q_lane_v, arm_neon_vst3lane, 0),
2084 NEONMAP1(vst3q_v, arm_neon_vst3, 0),
2085 NEONMAP1(vst4_lane_v, arm_neon_vst4lane, 0),
2086 NEONMAP1(vst4_v, arm_neon_vst4, 0),
2087 NEONMAP1(vst4q_lane_v, arm_neon_vst4lane, 0),
2088 NEONMAP1(vst4q_v, arm_neon_vst4, 0),
2100 static NeonIntrinsicInfo AArch64SIMDIntrinsicMap[] = {
2101 NEONMAP1(vabs_v, aarch64_neon_abs, 0),
2102 NEONMAP1(vabsq_v, aarch64_neon_abs, 0),
2104 NEONMAP1(vaesdq_v, aarch64_crypto_aesd, 0),
2105 NEONMAP1(vaeseq_v, aarch64_crypto_aese, 0),
2106 NEONMAP1(vaesimcq_v, aarch64_crypto_aesimc, 0),
2107 NEONMAP1(vaesmcq_v, aarch64_crypto_aesmc, 0),
2108 NEONMAP1(vcage_v, aarch64_neon_facge, 0),
2109 NEONMAP1(vcageq_v, aarch64_neon_facge, 0),
2110 NEONMAP1(vcagt_v, aarch64_neon_facgt, 0),
2111 NEONMAP1(vcagtq_v, aarch64_neon_facgt, 0),
2112 NEONMAP1(vcale_v, aarch64_neon_facge, 0),
2113 NEONMAP1(vcaleq_v, aarch64_neon_facge, 0),
2114 NEONMAP1(vcalt_v, aarch64_neon_facgt, 0),
2115 NEONMAP1(vcaltq_v, aarch64_neon_facgt, 0),
2116 NEONMAP1(vcls_v, aarch64_neon_cls, Add1ArgType),
2117 NEONMAP1(vclsq_v, aarch64_neon_cls, Add1ArgType),
2118 NEONMAP1(vclz_v, ctlz, Add1ArgType),
2119 NEONMAP1(vclzq_v, ctlz, Add1ArgType),
2120 NEONMAP1(vcnt_v, ctpop, Add1ArgType),
2121 NEONMAP1(vcntq_v, ctpop, Add1ArgType),
2122 NEONMAP1(vcvt_f16_v, aarch64_neon_vcvtfp2hf, 0),
2123 NEONMAP1(vcvt_f32_f16, aarch64_neon_vcvthf2fp, 0),
2124 NEONMAP0(vcvt_f32_v),
2125 NEONMAP2(vcvt_n_f32_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
2126 NEONMAP2(vcvt_n_f64_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
2127 NEONMAP1(vcvt_n_s32_v, aarch64_neon_vcvtfp2fxs, 0),
2128 NEONMAP1(vcvt_n_s64_v, aarch64_neon_vcvtfp2fxs, 0),
2129 NEONMAP1(vcvt_n_u32_v, aarch64_neon_vcvtfp2fxu, 0),
2130 NEONMAP1(vcvt_n_u64_v, aarch64_neon_vcvtfp2fxu, 0),
2131 NEONMAP0(vcvtq_f32_v),
2132 NEONMAP2(vcvtq_n_f32_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
2133 NEONMAP2(vcvtq_n_f64_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
2134 NEONMAP1(vcvtq_n_s32_v, aarch64_neon_vcvtfp2fxs, 0),
2135 NEONMAP1(vcvtq_n_s64_v, aarch64_neon_vcvtfp2fxs, 0),
2136 NEONMAP1(vcvtq_n_u32_v, aarch64_neon_vcvtfp2fxu, 0),
2137 NEONMAP1(vcvtq_n_u64_v, aarch64_neon_vcvtfp2fxu, 0),
2138 NEONMAP1(vcvtx_f32_v, aarch64_neon_fcvtxn, AddRetType | Add1ArgType),
2143 NEONMAP2(vhadd_v, aarch64_neon_uhadd, aarch64_neon_shadd, Add1ArgType | UnsignedAlts),
2144 NEONMAP2(vhaddq_v, aarch64_neon_uhadd, aarch64_neon_shadd, Add1ArgType | UnsignedAlts),
2145 NEONMAP2(vhsub_v, aarch64_neon_uhsub, aarch64_neon_shsub, Add1ArgType | UnsignedAlts),
2146 NEONMAP2(vhsubq_v, aarch64_neon_uhsub, aarch64_neon_shsub, Add1ArgType | UnsignedAlts),
2149 NEONMAP1(vmul_v, aarch64_neon_pmul, Add1ArgType),
2150 NEONMAP1(vmulq_v, aarch64_neon_pmul, Add1ArgType),
2151 NEONMAP1(vpadd_v, aarch64_neon_addp, Add1ArgType),
2152 NEONMAP2(vpaddl_v, aarch64_neon_uaddlp, aarch64_neon_saddlp, UnsignedAlts),
2153 NEONMAP2(vpaddlq_v, aarch64_neon_uaddlp, aarch64_neon_saddlp, UnsignedAlts),
2154 NEONMAP1(vpaddq_v, aarch64_neon_addp, Add1ArgType),
2155 NEONMAP1(vqabs_v, aarch64_neon_sqabs, Add1ArgType),
2156 NEONMAP1(vqabsq_v, aarch64_neon_sqabs, Add1ArgType),
2157 NEONMAP2(vqadd_v, aarch64_neon_uqadd, aarch64_neon_sqadd, Add1ArgType | UnsignedAlts),
2158 NEONMAP2(vqaddq_v, aarch64_neon_uqadd, aarch64_neon_sqadd, Add1ArgType | UnsignedAlts),
2159 NEONMAP2(vqdmlal_v, aarch64_neon_sqdmull, aarch64_neon_sqadd, 0),
2160 NEONMAP2(vqdmlsl_v, aarch64_neon_sqdmull, aarch64_neon_sqsub, 0),
2161 NEONMAP1(vqdmulh_v, aarch64_neon_sqdmulh, Add1ArgType),
2162 NEONMAP1(vqdmulhq_v, aarch64_neon_sqdmulh, Add1ArgType),
2163 NEONMAP1(vqdmull_v, aarch64_neon_sqdmull, Add1ArgType),
2164 NEONMAP2(vqmovn_v, aarch64_neon_uqxtn, aarch64_neon_sqxtn, Add1ArgType | UnsignedAlts),
2165 NEONMAP1(vqmovun_v, aarch64_neon_sqxtun, Add1ArgType),
2166 NEONMAP1(vqneg_v, aarch64_neon_sqneg, Add1ArgType),
2167 NEONMAP1(vqnegq_v, aarch64_neon_sqneg, Add1ArgType),
2168 NEONMAP1(vqrdmulh_v, aarch64_neon_sqrdmulh, Add1ArgType),
2169 NEONMAP1(vqrdmulhq_v, aarch64_neon_sqrdmulh, Add1ArgType),
2170 NEONMAP2(vqrshl_v, aarch64_neon_uqrshl, aarch64_neon_sqrshl, Add1ArgType | UnsignedAlts),
2171 NEONMAP2(vqrshlq_v, aarch64_neon_uqrshl, aarch64_neon_sqrshl, Add1ArgType | UnsignedAlts),
2172 NEONMAP2(vqshl_n_v, aarch64_neon_uqshl, aarch64_neon_sqshl, UnsignedAlts),
2173 NEONMAP2(vqshl_v, aarch64_neon_uqshl, aarch64_neon_sqshl, Add1ArgType | UnsignedAlts),
2174 NEONMAP2(vqshlq_n_v, aarch64_neon_uqshl, aarch64_neon_sqshl,UnsignedAlts),
2175 NEONMAP2(vqshlq_v, aarch64_neon_uqshl, aarch64_neon_sqshl, Add1ArgType | UnsignedAlts),
2176 NEONMAP2(vqsub_v, aarch64_neon_uqsub, aarch64_neon_sqsub, Add1ArgType | UnsignedAlts),
2177 NEONMAP2(vqsubq_v, aarch64_neon_uqsub, aarch64_neon_sqsub, Add1ArgType | UnsignedAlts),
2178 NEONMAP1(vraddhn_v, aarch64_neon_raddhn, Add1ArgType),
2179 NEONMAP2(vrecpe_v, aarch64_neon_frecpe, aarch64_neon_urecpe, 0),
2180 NEONMAP2(vrecpeq_v, aarch64_neon_frecpe, aarch64_neon_urecpe, 0),
2181 NEONMAP1(vrecps_v, aarch64_neon_frecps, Add1ArgType),
2182 NEONMAP1(vrecpsq_v, aarch64_neon_frecps, Add1ArgType),
2183 NEONMAP2(vrhadd_v, aarch64_neon_urhadd, aarch64_neon_srhadd, Add1ArgType | UnsignedAlts),
2184 NEONMAP2(vrhaddq_v, aarch64_neon_urhadd, aarch64_neon_srhadd, Add1ArgType | UnsignedAlts),
2185 NEONMAP2(vrshl_v, aarch64_neon_urshl, aarch64_neon_srshl, Add1ArgType | UnsignedAlts),
2186 NEONMAP2(vrshlq_v, aarch64_neon_urshl, aarch64_neon_srshl, Add1ArgType | UnsignedAlts),
2187 NEONMAP2(vrsqrte_v, aarch64_neon_frsqrte, aarch64_neon_ursqrte, 0),
2188 NEONMAP2(vrsqrteq_v, aarch64_neon_frsqrte, aarch64_neon_ursqrte, 0),
2189 NEONMAP1(vrsqrts_v, aarch64_neon_frsqrts, Add1ArgType),
2190 NEONMAP1(vrsqrtsq_v, aarch64_neon_frsqrts, Add1ArgType),
2191 NEONMAP1(vrsubhn_v, aarch64_neon_rsubhn, Add1ArgType),
2192 NEONMAP1(vsha1su0q_v, aarch64_crypto_sha1su0, 0),
2193 NEONMAP1(vsha1su1q_v, aarch64_crypto_sha1su1, 0),
2194 NEONMAP1(vsha256h2q_v, aarch64_crypto_sha256h2, 0),
2195 NEONMAP1(vsha256hq_v, aarch64_crypto_sha256h, 0),
2196 NEONMAP1(vsha256su0q_v, aarch64_crypto_sha256su0, 0),
2197 NEONMAP1(vsha256su1q_v, aarch64_crypto_sha256su1, 0),
2199 NEONMAP2(vshl_v, aarch64_neon_ushl, aarch64_neon_sshl, Add1ArgType | UnsignedAlts),
2200 NEONMAP0(vshll_n_v),
2201 NEONMAP0(vshlq_n_v),
2202 NEONMAP2(vshlq_v, aarch64_neon_ushl, aarch64_neon_sshl, Add1ArgType | UnsignedAlts),
2204 NEONMAP0(vshrn_n_v),
2205 NEONMAP0(vshrq_n_v),
2211 static NeonIntrinsicInfo AArch64SISDIntrinsicMap[] = {
2212 NEONMAP1(vabdd_f64, aarch64_sisd_fabd, Add1ArgType),
2213 NEONMAP1(vabds_f32, aarch64_sisd_fabd, Add1ArgType),
2214 NEONMAP1(vabsd_s64, aarch64_neon_abs, Add1ArgType),
2215 NEONMAP1(vaddlv_s32, aarch64_neon_saddlv, AddRetType | Add1ArgType),
2216 NEONMAP1(vaddlv_u32, aarch64_neon_uaddlv, AddRetType | Add1ArgType),
2217 NEONMAP1(vaddlvq_s32, aarch64_neon_saddlv, AddRetType | Add1ArgType),
2218 NEONMAP1(vaddlvq_u32, aarch64_neon_uaddlv, AddRetType | Add1ArgType),
2219 NEONMAP1(vaddv_f32, aarch64_neon_faddv, AddRetType | Add1ArgType),
2220 NEONMAP1(vaddv_s32, aarch64_neon_saddv, AddRetType | Add1ArgType),
2221 NEONMAP1(vaddv_u32, aarch64_neon_uaddv, AddRetType | Add1ArgType),
2222 NEONMAP1(vaddvq_f32, aarch64_neon_faddv, AddRetType | Add1ArgType),
2223 NEONMAP1(vaddvq_f64, aarch64_neon_faddv, AddRetType | Add1ArgType),
2224 NEONMAP1(vaddvq_s32, aarch64_neon_saddv, AddRetType | Add1ArgType),
2225 NEONMAP1(vaddvq_s64, aarch64_neon_saddv, AddRetType | Add1ArgType),
2226 NEONMAP1(vaddvq_u32, aarch64_neon_uaddv, AddRetType | Add1ArgType),
2227 NEONMAP1(vaddvq_u64, aarch64_neon_uaddv, AddRetType | Add1ArgType),
2228 NEONMAP1(vcaged_f64, aarch64_neon_facge, AddRetType | Add1ArgType),
2229 NEONMAP1(vcages_f32, aarch64_neon_facge, AddRetType | Add1ArgType),
2230 NEONMAP1(vcagtd_f64, aarch64_neon_facgt, AddRetType | Add1ArgType),
2231 NEONMAP1(vcagts_f32, aarch64_neon_facgt, AddRetType | Add1ArgType),
2232 NEONMAP1(vcaled_f64, aarch64_neon_facge, AddRetType | Add1ArgType),
2233 NEONMAP1(vcales_f32, aarch64_neon_facge, AddRetType | Add1ArgType),
2234 NEONMAP1(vcaltd_f64, aarch64_neon_facgt, AddRetType | Add1ArgType),
2235 NEONMAP1(vcalts_f32, aarch64_neon_facgt, AddRetType | Add1ArgType),
2236 NEONMAP1(vcvtad_s64_f64, aarch64_neon_fcvtas, AddRetType | Add1ArgType),
2237 NEONMAP1(vcvtad_u64_f64, aarch64_neon_fcvtau, AddRetType | Add1ArgType),
2238 NEONMAP1(vcvtas_s32_f32, aarch64_neon_fcvtas, AddRetType | Add1ArgType),
2239 NEONMAP1(vcvtas_u32_f32, aarch64_neon_fcvtau, AddRetType | Add1ArgType),
2240 NEONMAP1(vcvtd_n_f64_s64, aarch64_neon_vcvtfxs2fp, AddRetType | Add1ArgType),
2241 NEONMAP1(vcvtd_n_f64_u64, aarch64_neon_vcvtfxu2fp, AddRetType | Add1ArgType),
2242 NEONMAP1(vcvtd_n_s64_f64, aarch64_neon_vcvtfp2fxs, AddRetType | Add1ArgType),
2243 NEONMAP1(vcvtd_n_u64_f64, aarch64_neon_vcvtfp2fxu, AddRetType | Add1ArgType),
2244 NEONMAP1(vcvtmd_s64_f64, aarch64_neon_fcvtms, AddRetType | Add1ArgType),
2245 NEONMAP1(vcvtmd_u64_f64, aarch64_neon_fcvtmu, AddRetType | Add1ArgType),
2246 NEONMAP1(vcvtms_s32_f32, aarch64_neon_fcvtms, AddRetType | Add1ArgType),
2247 NEONMAP1(vcvtms_u32_f32, aarch64_neon_fcvtmu, AddRetType | Add1ArgType),
2248 NEONMAP1(vcvtnd_s64_f64, aarch64_neon_fcvtns, AddRetType | Add1ArgType),
2249 NEONMAP1(vcvtnd_u64_f64, aarch64_neon_fcvtnu, AddRetType | Add1ArgType),
2250 NEONMAP1(vcvtns_s32_f32, aarch64_neon_fcvtns, AddRetType | Add1ArgType),
2251 NEONMAP1(vcvtns_u32_f32, aarch64_neon_fcvtnu, AddRetType | Add1ArgType),
2252 NEONMAP1(vcvtpd_s64_f64, aarch64_neon_fcvtps, AddRetType | Add1ArgType),
2253 NEONMAP1(vcvtpd_u64_f64, aarch64_neon_fcvtpu, AddRetType | Add1ArgType),
2254 NEONMAP1(vcvtps_s32_f32, aarch64_neon_fcvtps, AddRetType | Add1ArgType),
2255 NEONMAP1(vcvtps_u32_f32, aarch64_neon_fcvtpu, AddRetType | Add1ArgType),
2256 NEONMAP1(vcvts_n_f32_s32, aarch64_neon_vcvtfxs2fp, AddRetType | Add1ArgType),
2257 NEONMAP1(vcvts_n_f32_u32, aarch64_neon_vcvtfxu2fp, AddRetType | Add1ArgType),
2258 NEONMAP1(vcvts_n_s32_f32, aarch64_neon_vcvtfp2fxs, AddRetType | Add1ArgType),
2259 NEONMAP1(vcvts_n_u32_f32, aarch64_neon_vcvtfp2fxu, AddRetType | Add1ArgType),
2260 NEONMAP1(vcvtxd_f32_f64, aarch64_sisd_fcvtxn, 0),
2261 NEONMAP1(vmaxnmv_f32, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType),
2262 NEONMAP1(vmaxnmvq_f32, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType),
2263 NEONMAP1(vmaxnmvq_f64, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType),
2264 NEONMAP1(vmaxv_f32, aarch64_neon_fmaxv, AddRetType | Add1ArgType),
2265 NEONMAP1(vmaxv_s32, aarch64_neon_smaxv, AddRetType | Add1ArgType),
2266 NEONMAP1(vmaxv_u32, aarch64_neon_umaxv, AddRetType | Add1ArgType),
2267 NEONMAP1(vmaxvq_f32, aarch64_neon_fmaxv, AddRetType | Add1ArgType),
2268 NEONMAP1(vmaxvq_f64, aarch64_neon_fmaxv, AddRetType | Add1ArgType),
2269 NEONMAP1(vmaxvq_s32, aarch64_neon_smaxv, AddRetType | Add1ArgType),
2270 NEONMAP1(vmaxvq_u32, aarch64_neon_umaxv, AddRetType | Add1ArgType),
2271 NEONMAP1(vminnmv_f32, aarch64_neon_fminnmv, AddRetType | Add1ArgType),
2272 NEONMAP1(vminnmvq_f32, aarch64_neon_fminnmv, AddRetType | Add1ArgType),
2273 NEONMAP1(vminnmvq_f64, aarch64_neon_fminnmv, AddRetType | Add1ArgType),
2274 NEONMAP1(vminv_f32, aarch64_neon_fminv, AddRetType | Add1ArgType),
2275 NEONMAP1(vminv_s32, aarch64_neon_sminv, AddRetType | Add1ArgType),
2276 NEONMAP1(vminv_u32, aarch64_neon_uminv, AddRetType | Add1ArgType),
2277 NEONMAP1(vminvq_f32, aarch64_neon_fminv, AddRetType | Add1ArgType),
2278 NEONMAP1(vminvq_f64, aarch64_neon_fminv, AddRetType | Add1ArgType),
2279 NEONMAP1(vminvq_s32, aarch64_neon_sminv, AddRetType | Add1ArgType),
2280 NEONMAP1(vminvq_u32, aarch64_neon_uminv, AddRetType | Add1ArgType),
2281 NEONMAP1(vmull_p64, aarch64_neon_pmull64, 0),
2282 NEONMAP1(vmulxd_f64, aarch64_neon_fmulx, Add1ArgType),
2283 NEONMAP1(vmulxs_f32, aarch64_neon_fmulx, Add1ArgType),
2284 NEONMAP1(vpaddd_s64, aarch64_neon_uaddv, AddRetType | Add1ArgType),
2285 NEONMAP1(vpaddd_u64, aarch64_neon_uaddv, AddRetType | Add1ArgType),
2286 NEONMAP1(vpmaxnmqd_f64, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType),
2287 NEONMAP1(vpmaxnms_f32, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType),
2288 NEONMAP1(vpmaxqd_f64, aarch64_neon_fmaxv, AddRetType | Add1ArgType),
2289 NEONMAP1(vpmaxs_f32, aarch64_neon_fmaxv, AddRetType | Add1ArgType),
2290 NEONMAP1(vpminnmqd_f64, aarch64_neon_fminnmv, AddRetType | Add1ArgType),
2291 NEONMAP1(vpminnms_f32, aarch64_neon_fminnmv, AddRetType | Add1ArgType),
2292 NEONMAP1(vpminqd_f64, aarch64_neon_fminv, AddRetType | Add1ArgType),
2293 NEONMAP1(vpmins_f32, aarch64_neon_fminv, AddRetType | Add1ArgType),
2294 NEONMAP1(vqabsb_s8, aarch64_neon_sqabs, Vectorize1ArgType | Use64BitVectors),
2295 NEONMAP1(vqabsd_s64, aarch64_neon_sqabs, Add1ArgType),
2296 NEONMAP1(vqabsh_s16, aarch64_neon_sqabs, Vectorize1ArgType | Use64BitVectors),
2297 NEONMAP1(vqabss_s32, aarch64_neon_sqabs, Add1ArgType),
2298 NEONMAP1(vqaddb_s8, aarch64_neon_sqadd, Vectorize1ArgType | Use64BitVectors),
2299 NEONMAP1(vqaddb_u8, aarch64_neon_uqadd, Vectorize1ArgType | Use64BitVectors),
2300 NEONMAP1(vqaddd_s64, aarch64_neon_sqadd, Add1ArgType),
2301 NEONMAP1(vqaddd_u64, aarch64_neon_uqadd, Add1ArgType),
2302 NEONMAP1(vqaddh_s16, aarch64_neon_sqadd, Vectorize1ArgType | Use64BitVectors),
2303 NEONMAP1(vqaddh_u16, aarch64_neon_uqadd, Vectorize1ArgType | Use64BitVectors),
2304 NEONMAP1(vqadds_s32, aarch64_neon_sqadd, Add1ArgType),
2305 NEONMAP1(vqadds_u32, aarch64_neon_uqadd, Add1ArgType),
2306 NEONMAP1(vqdmulhh_s16, aarch64_neon_sqdmulh, Vectorize1ArgType | Use64BitVectors),
2307 NEONMAP1(vqdmulhs_s32, aarch64_neon_sqdmulh, Add1ArgType),
2308 NEONMAP1(vqdmullh_s16, aarch64_neon_sqdmull, VectorRet | Use128BitVectors),
2309 NEONMAP1(vqdmulls_s32, aarch64_neon_sqdmulls_scalar, 0),
2310 NEONMAP1(vqmovnd_s64, aarch64_neon_scalar_sqxtn, AddRetType | Add1ArgType),
2311 NEONMAP1(vqmovnd_u64, aarch64_neon_scalar_uqxtn, AddRetType | Add1ArgType),
2312 NEONMAP1(vqmovnh_s16, aarch64_neon_sqxtn, VectorRet | Use64BitVectors),
2313 NEONMAP1(vqmovnh_u16, aarch64_neon_uqxtn, VectorRet | Use64BitVectors),
2314 NEONMAP1(vqmovns_s32, aarch64_neon_sqxtn, VectorRet | Use64BitVectors),
2315 NEONMAP1(vqmovns_u32, aarch64_neon_uqxtn, VectorRet | Use64BitVectors),
2316 NEONMAP1(vqmovund_s64, aarch64_neon_scalar_sqxtun, AddRetType | Add1ArgType),
2317 NEONMAP1(vqmovunh_s16, aarch64_neon_sqxtun, VectorRet | Use64BitVectors),
2318 NEONMAP1(vqmovuns_s32, aarch64_neon_sqxtun, VectorRet | Use64BitVectors),
2319 NEONMAP1(vqnegb_s8, aarch64_neon_sqneg, Vectorize1ArgType | Use64BitVectors),
2320 NEONMAP1(vqnegd_s64, aarch64_neon_sqneg, Add1ArgType),
2321 NEONMAP1(vqnegh_s16, aarch64_neon_sqneg, Vectorize1ArgType | Use64BitVectors),
2322 NEONMAP1(vqnegs_s32, aarch64_neon_sqneg, Add1ArgType),
2323 NEONMAP1(vqrdmulhh_s16, aarch64_neon_sqrdmulh, Vectorize1ArgType | Use64BitVectors),
2324 NEONMAP1(vqrdmulhs_s32, aarch64_neon_sqrdmulh, Add1ArgType),
2325 NEONMAP1(vqrshlb_s8, aarch64_neon_sqrshl, Vectorize1ArgType | Use64BitVectors),
2326 NEONMAP1(vqrshlb_u8, aarch64_neon_uqrshl, Vectorize1ArgType | Use64BitVectors),
2327 NEONMAP1(vqrshld_s64, aarch64_neon_sqrshl, Add1ArgType),
2328 NEONMAP1(vqrshld_u64, aarch64_neon_uqrshl, Add1ArgType),
2329 NEONMAP1(vqrshlh_s16, aarch64_neon_sqrshl, Vectorize1ArgType | Use64BitVectors),
2330 NEONMAP1(vqrshlh_u16, aarch64_neon_uqrshl, Vectorize1ArgType | Use64BitVectors),
2331 NEONMAP1(vqrshls_s32, aarch64_neon_sqrshl, Add1ArgType),
2332 NEONMAP1(vqrshls_u32, aarch64_neon_uqrshl, Add1ArgType),
2333 NEONMAP1(vqrshrnd_n_s64, aarch64_neon_sqrshrn, AddRetType),
2334 NEONMAP1(vqrshrnd_n_u64, aarch64_neon_uqrshrn, AddRetType),
2335 NEONMAP1(vqrshrnh_n_s16, aarch64_neon_sqrshrn, VectorRet | Use64BitVectors),
2336 NEONMAP1(vqrshrnh_n_u16, aarch64_neon_uqrshrn, VectorRet | Use64BitVectors),
2337 NEONMAP1(vqrshrns_n_s32, aarch64_neon_sqrshrn, VectorRet | Use64BitVectors),
2338 NEONMAP1(vqrshrns_n_u32, aarch64_neon_uqrshrn, VectorRet | Use64BitVectors),
2339 NEONMAP1(vqrshrund_n_s64, aarch64_neon_sqrshrun, AddRetType),
2340 NEONMAP1(vqrshrunh_n_s16, aarch64_neon_sqrshrun, VectorRet | Use64BitVectors),
2341 NEONMAP1(vqrshruns_n_s32, aarch64_neon_sqrshrun, VectorRet | Use64BitVectors),
2342 NEONMAP1(vqshlb_n_s8, aarch64_neon_sqshl, Vectorize1ArgType | Use64BitVectors),
2343 NEONMAP1(vqshlb_n_u8, aarch64_neon_uqshl, Vectorize1ArgType | Use64BitVectors),
2344 NEONMAP1(vqshlb_s8, aarch64_neon_sqshl, Vectorize1ArgType | Use64BitVectors),
2345 NEONMAP1(vqshlb_u8, aarch64_neon_uqshl, Vectorize1ArgType | Use64BitVectors),
2346 NEONMAP1(vqshld_s64, aarch64_neon_sqshl, Add1ArgType),
2347 NEONMAP1(vqshld_u64, aarch64_neon_uqshl, Add1ArgType),
2348 NEONMAP1(vqshlh_n_s16, aarch64_neon_sqshl, Vectorize1ArgType | Use64BitVectors),
2349 NEONMAP1(vqshlh_n_u16, aarch64_neon_uqshl, Vectorize1ArgType | Use64BitVectors),
2350 NEONMAP1(vqshlh_s16, aarch64_neon_sqshl, Vectorize1ArgType | Use64BitVectors),
2351 NEONMAP1(vqshlh_u16, aarch64_neon_uqshl, Vectorize1ArgType | Use64BitVectors),
2352 NEONMAP1(vqshls_n_s32, aarch64_neon_sqshl, Add1ArgType),
2353 NEONMAP1(vqshls_n_u32, aarch64_neon_uqshl, Add1ArgType),
2354 NEONMAP1(vqshls_s32, aarch64_neon_sqshl, Add1ArgType),
2355 NEONMAP1(vqshls_u32, aarch64_neon_uqshl, Add1ArgType),
2356 NEONMAP1(vqshlub_n_s8, aarch64_neon_sqshlu, Vectorize1ArgType | Use64BitVectors),
2357 NEONMAP1(vqshluh_n_s16, aarch64_neon_sqshlu, Vectorize1ArgType | Use64BitVectors),
2358 NEONMAP1(vqshlus_n_s32, aarch64_neon_sqshlu, Add1ArgType),
2359 NEONMAP1(vqshrnd_n_s64, aarch64_neon_sqshrn, AddRetType),
2360 NEONMAP1(vqshrnd_n_u64, aarch64_neon_uqshrn, AddRetType),
2361 NEONMAP1(vqshrnh_n_s16, aarch64_neon_sqshrn, VectorRet | Use64BitVectors),
2362 NEONMAP1(vqshrnh_n_u16, aarch64_neon_uqshrn, VectorRet | Use64BitVectors),
2363 NEONMAP1(vqshrns_n_s32, aarch64_neon_sqshrn, VectorRet | Use64BitVectors),
2364 NEONMAP1(vqshrns_n_u32, aarch64_neon_uqshrn, VectorRet | Use64BitVectors),
2365 NEONMAP1(vqshrund_n_s64, aarch64_neon_sqshrun, AddRetType),
2366 NEONMAP1(vqshrunh_n_s16, aarch64_neon_sqshrun, VectorRet | Use64BitVectors),
2367 NEONMAP1(vqshruns_n_s32, aarch64_neon_sqshrun, VectorRet | Use64BitVectors),
2368 NEONMAP1(vqsubb_s8, aarch64_neon_sqsub, Vectorize1ArgType | Use64BitVectors),
2369 NEONMAP1(vqsubb_u8, aarch64_neon_uqsub, Vectorize1ArgType | Use64BitVectors),
2370 NEONMAP1(vqsubd_s64, aarch64_neon_sqsub, Add1ArgType),
2371 NEONMAP1(vqsubd_u64, aarch64_neon_uqsub, Add1ArgType),
2372 NEONMAP1(vqsubh_s16, aarch64_neon_sqsub, Vectorize1ArgType | Use64BitVectors),
2373 NEONMAP1(vqsubh_u16, aarch64_neon_uqsub, Vectorize1ArgType | Use64BitVectors),
2374 NEONMAP1(vqsubs_s32, aarch64_neon_sqsub, Add1ArgType),
2375 NEONMAP1(vqsubs_u32, aarch64_neon_uqsub, Add1ArgType),
2376 NEONMAP1(vrecped_f64, aarch64_neon_frecpe, Add1ArgType),
2377 NEONMAP1(vrecpes_f32, aarch64_neon_frecpe, Add1ArgType),
2378 NEONMAP1(vrecpxd_f64, aarch64_neon_frecpx, Add1ArgType),
2379 NEONMAP1(vrecpxs_f32, aarch64_neon_frecpx, Add1ArgType),
2380 NEONMAP1(vrshld_s64, aarch64_neon_srshl, Add1ArgType),
2381 NEONMAP1(vrshld_u64, aarch64_neon_urshl, Add1ArgType),
2382 NEONMAP1(vrsqrted_f64, aarch64_neon_frsqrte, Add1ArgType),
2383 NEONMAP1(vrsqrtes_f32, aarch64_neon_frsqrte, Add1ArgType),
2384 NEONMAP1(vrsqrtsd_f64, aarch64_neon_frsqrts, Add1ArgType),
2385 NEONMAP1(vrsqrtss_f32, aarch64_neon_frsqrts, Add1ArgType),
2386 NEONMAP1(vsha1cq_u32, aarch64_crypto_sha1c, 0),
2387 NEONMAP1(vsha1h_u32, aarch64_crypto_sha1h, 0),
2388 NEONMAP1(vsha1mq_u32, aarch64_crypto_sha1m, 0),
2389 NEONMAP1(vsha1pq_u32, aarch64_crypto_sha1p, 0),
2390 NEONMAP1(vshld_s64, aarch64_neon_sshl, Add1ArgType),
2391 NEONMAP1(vshld_u64, aarch64_neon_ushl, Add1ArgType),
2392 NEONMAP1(vslid_n_s64, aarch64_neon_vsli, Vectorize1ArgType),
2393 NEONMAP1(vslid_n_u64, aarch64_neon_vsli, Vectorize1ArgType),
2394 NEONMAP1(vsqaddb_u8, aarch64_neon_usqadd, Vectorize1ArgType | Use64BitVectors),
2395 NEONMAP1(vsqaddd_u64, aarch64_neon_usqadd, Add1ArgType),
2396 NEONMAP1(vsqaddh_u16, aarch64_neon_usqadd, Vectorize1ArgType | Use64BitVectors),
2397 NEONMAP1(vsqadds_u32, aarch64_neon_usqadd, Add1ArgType),
2398 NEONMAP1(vsrid_n_s64, aarch64_neon_vsri, Vectorize1ArgType),
2399 NEONMAP1(vsrid_n_u64, aarch64_neon_vsri, Vectorize1ArgType),
2400 NEONMAP1(vuqaddb_s8, aarch64_neon_suqadd, Vectorize1ArgType | Use64BitVectors),
2401 NEONMAP1(vuqaddd_s64, aarch64_neon_suqadd, Add1ArgType),
2402 NEONMAP1(vuqaddh_s16, aarch64_neon_suqadd, Vectorize1ArgType | Use64BitVectors),
2403 NEONMAP1(vuqadds_s32, aarch64_neon_suqadd, Add1ArgType),
2410 static bool NEONSIMDIntrinsicsProvenSorted = false;
2412 static bool AArch64SIMDIntrinsicsProvenSorted = false;
2413 static bool AArch64SISDIntrinsicsProvenSorted = false;
2416 static const NeonIntrinsicInfo *
2417 findNeonIntrinsicInMap(ArrayRef<NeonIntrinsicInfo> IntrinsicMap,
2418 unsigned BuiltinID, bool &MapProvenSorted) {
2421 if (!MapProvenSorted) {
2422 // FIXME: use std::is_sorted once C++11 is allowed
2423 for (unsigned i = 0; i < IntrinsicMap.size() - 1; ++i)
2424 assert(IntrinsicMap[i].BuiltinID <= IntrinsicMap[i + 1].BuiltinID);
2425 MapProvenSorted = true;
2429 const NeonIntrinsicInfo *Builtin =
2430 std::lower_bound(IntrinsicMap.begin(), IntrinsicMap.end(), BuiltinID);
2432 if (Builtin != IntrinsicMap.end() && Builtin->BuiltinID == BuiltinID)
2438 Function *CodeGenFunction::LookupNeonLLVMIntrinsic(unsigned IntrinsicID,
2440 llvm::Type *ArgType,
2441 const CallExpr *E) {
2443 if (Modifier & Use64BitVectors)
2445 else if (Modifier & Use128BitVectors)
2449 SmallVector<llvm::Type *, 3> Tys;
2450 if (Modifier & AddRetType) {
2451 llvm::Type *Ty = ConvertType(E->getCallReturnType());
2452 if (Modifier & VectorizeRetType)
2453 Ty = llvm::VectorType::get(
2454 Ty, VectorSize ? VectorSize / Ty->getPrimitiveSizeInBits() : 1);
2460 if (Modifier & VectorizeArgTypes) {
2461 int Elts = VectorSize ? VectorSize / ArgType->getPrimitiveSizeInBits() : 1;
2462 ArgType = llvm::VectorType::get(ArgType, Elts);
2465 if (Modifier & (Add1ArgType | Add2ArgTypes))
2466 Tys.push_back(ArgType);
2468 if (Modifier & Add2ArgTypes)
2469 Tys.push_back(ArgType);
2471 if (Modifier & InventFloatType)
2472 Tys.push_back(FloatTy);
2474 return CGM.getIntrinsic(IntrinsicID, Tys);
2477 static Value *EmitCommonNeonSISDBuiltinExpr(CodeGenFunction &CGF,
2478 const NeonIntrinsicInfo &SISDInfo,
2479 SmallVectorImpl<Value *> &Ops,
2480 const CallExpr *E) {
2481 unsigned BuiltinID = SISDInfo.BuiltinID;
2482 unsigned int Int = SISDInfo.LLVMIntrinsic;
2483 unsigned Modifier = SISDInfo.TypeModifier;
2484 const char *s = SISDInfo.NameHint;
2486 switch (BuiltinID) {
2487 case NEON::BI__builtin_neon_vcled_s64:
2488 case NEON::BI__builtin_neon_vcled_u64:
2489 case NEON::BI__builtin_neon_vcles_f32:
2490 case NEON::BI__builtin_neon_vcled_f64:
2491 case NEON::BI__builtin_neon_vcltd_s64:
2492 case NEON::BI__builtin_neon_vcltd_u64:
2493 case NEON::BI__builtin_neon_vclts_f32:
2494 case NEON::BI__builtin_neon_vcltd_f64:
2495 case NEON::BI__builtin_neon_vcales_f32:
2496 case NEON::BI__builtin_neon_vcaled_f64:
2497 case NEON::BI__builtin_neon_vcalts_f32:
2498 case NEON::BI__builtin_neon_vcaltd_f64:
2499 // Only one direction of comparisons actually exist, cmle is actually a cmge
2500 // with swapped operands. The table gives us the right intrinsic but we
2501 // still need to do the swap.
2502 std::swap(Ops[0], Ops[1]);
2506 assert(Int && "Generic code assumes a valid intrinsic");
2508 // Determine the type(s) of this overloaded AArch64 intrinsic.
2509 const Expr *Arg = E->getArg(0);
2510 llvm::Type *ArgTy = CGF.ConvertType(Arg->getType());
2511 Function *F = CGF.LookupNeonLLVMIntrinsic(Int, Modifier, ArgTy, E);
2514 ConstantInt *C0 = ConstantInt::get(CGF.SizeTy, 0);
2515 for (Function::const_arg_iterator ai = F->arg_begin(), ae = F->arg_end();
2516 ai != ae; ++ai, ++j) {
2517 llvm::Type *ArgTy = ai->getType();
2518 if (Ops[j]->getType()->getPrimitiveSizeInBits() ==
2519 ArgTy->getPrimitiveSizeInBits())
2522 assert(ArgTy->isVectorTy() && !Ops[j]->getType()->isVectorTy());
2523 // The constant argument to an _n_ intrinsic always has Int32Ty, so truncate
2524 // it before inserting.
2526 CGF.Builder.CreateTruncOrBitCast(Ops[j], ArgTy->getVectorElementType());
2528 CGF.Builder.CreateInsertElement(UndefValue::get(ArgTy), Ops[j], C0);
2531 Value *Result = CGF.EmitNeonCall(F, Ops, s);
2532 llvm::Type *ResultType = CGF.ConvertType(E->getType());
2533 if (ResultType->getPrimitiveSizeInBits() <
2534 Result->getType()->getPrimitiveSizeInBits())
2535 return CGF.Builder.CreateExtractElement(Result, C0);
2537 return CGF.Builder.CreateBitCast(Result, ResultType, s);
2540 Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
2541 unsigned BuiltinID, unsigned LLVMIntrinsic, unsigned AltLLVMIntrinsic,
2542 const char *NameHint, unsigned Modifier, const CallExpr *E,
2543 SmallVectorImpl<llvm::Value *> &Ops, llvm::Value *Align) {
2544 // Get the last argument, which specifies the vector type.
2545 llvm::APSInt NeonTypeConst;
2546 const Expr *Arg = E->getArg(E->getNumArgs() - 1);
2547 if (!Arg->isIntegerConstantExpr(NeonTypeConst, getContext()))
2550 // Determine the type of this overloaded NEON intrinsic.
2551 NeonTypeFlags Type(NeonTypeConst.getZExtValue());
2552 bool Usgn = Type.isUnsigned();
2553 bool Quad = Type.isQuad();
2555 llvm::VectorType *VTy = GetNeonType(this, Type);
2556 llvm::Type *Ty = VTy;
2560 unsigned Int = LLVMIntrinsic;
2561 if ((Modifier & UnsignedAlts) && !Usgn)
2562 Int = AltLLVMIntrinsic;
2564 switch (BuiltinID) {
2566 case NEON::BI__builtin_neon_vabs_v:
2567 case NEON::BI__builtin_neon_vabsq_v:
2568 if (VTy->getElementType()->isFloatingPointTy())
2569 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::fabs, Ty), Ops, "vabs");
2570 return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Ty), Ops, "vabs");
2571 case NEON::BI__builtin_neon_vaddhn_v: {
2572 llvm::VectorType *SrcTy =
2573 llvm::VectorType::getExtendedElementVectorType(VTy);
2575 // %sum = add <4 x i32> %lhs, %rhs
2576 Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy);
2577 Ops[1] = Builder.CreateBitCast(Ops[1], SrcTy);
2578 Ops[0] = Builder.CreateAdd(Ops[0], Ops[1], "vaddhn");
2580 // %high = lshr <4 x i32> %sum, <i32 16, i32 16, i32 16, i32 16>
2581 Constant *ShiftAmt = ConstantInt::get(SrcTy->getElementType(),
2582 SrcTy->getScalarSizeInBits() / 2);
2583 ShiftAmt = ConstantVector::getSplat(VTy->getNumElements(), ShiftAmt);
2584 Ops[0] = Builder.CreateLShr(Ops[0], ShiftAmt, "vaddhn");
2586 // %res = trunc <4 x i32> %high to <4 x i16>
2587 return Builder.CreateTrunc(Ops[0], VTy, "vaddhn");
2589 case NEON::BI__builtin_neon_vcale_v:
2590 case NEON::BI__builtin_neon_vcaleq_v:
2591 case NEON::BI__builtin_neon_vcalt_v:
2592 case NEON::BI__builtin_neon_vcaltq_v:
2593 std::swap(Ops[0], Ops[1]);
2594 case NEON::BI__builtin_neon_vcage_v:
2595 case NEON::BI__builtin_neon_vcageq_v:
2596 case NEON::BI__builtin_neon_vcagt_v:
2597 case NEON::BI__builtin_neon_vcagtq_v: {
2598 llvm::Type *VecFlt = llvm::VectorType::get(
2599 VTy->getScalarSizeInBits() == 32 ? FloatTy : DoubleTy,
2600 VTy->getNumElements());
2601 llvm::Type *Tys[] = { VTy, VecFlt };
2602 Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys);
2603 return EmitNeonCall(F, Ops, NameHint);
2605 case NEON::BI__builtin_neon_vclz_v:
2606 case NEON::BI__builtin_neon_vclzq_v:
2607 // We generate target-independent intrinsic, which needs a second argument
2608 // for whether or not clz of zero is undefined; on ARM it isn't.
2609 Ops.push_back(Builder.getInt1(getTarget().isCLZForZeroUndef()));
2611 case NEON::BI__builtin_neon_vcvt_f32_v:
2612 case NEON::BI__builtin_neon_vcvtq_f32_v:
2613 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
2614 Ty = GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float32, false, Quad));
2615 return Usgn ? Builder.CreateUIToFP(Ops[0], Ty, "vcvt")
2616 : Builder.CreateSIToFP(Ops[0], Ty, "vcvt");
2617 case NEON::BI__builtin_neon_vcvt_n_f32_v:
2618 case NEON::BI__builtin_neon_vcvt_n_f64_v:
2619 case NEON::BI__builtin_neon_vcvtq_n_f32_v:
2620 case NEON::BI__builtin_neon_vcvtq_n_f64_v: {
2622 (cast<llvm::IntegerType>(VTy->getElementType())->getBitWidth() == 64);
2623 llvm::Type *FloatTy =
2624 GetNeonType(this, NeonTypeFlags(Double ? NeonTypeFlags::Float64
2625 : NeonTypeFlags::Float32,
2627 llvm::Type *Tys[2] = { FloatTy, Ty };
2628 Int = Usgn ? LLVMIntrinsic : AltLLVMIntrinsic;
2629 Function *F = CGM.getIntrinsic(Int, Tys);
2630 return EmitNeonCall(F, Ops, "vcvt_n");
2632 case NEON::BI__builtin_neon_vcvt_n_s32_v:
2633 case NEON::BI__builtin_neon_vcvt_n_u32_v:
2634 case NEON::BI__builtin_neon_vcvt_n_s64_v:
2635 case NEON::BI__builtin_neon_vcvt_n_u64_v:
2636 case NEON::BI__builtin_neon_vcvtq_n_s32_v:
2637 case NEON::BI__builtin_neon_vcvtq_n_u32_v:
2638 case NEON::BI__builtin_neon_vcvtq_n_s64_v:
2639 case NEON::BI__builtin_neon_vcvtq_n_u64_v: {
2641 (cast<llvm::IntegerType>(VTy->getElementType())->getBitWidth() == 64);
2642 llvm::Type *FloatTy =
2643 GetNeonType(this, NeonTypeFlags(Double ? NeonTypeFlags::Float64
2644 : NeonTypeFlags::Float32,
2646 llvm::Type *Tys[2] = { Ty, FloatTy };
2647 Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys);
2648 return EmitNeonCall(F, Ops, "vcvt_n");
2650 case NEON::BI__builtin_neon_vcvt_s32_v:
2651 case NEON::BI__builtin_neon_vcvt_u32_v:
2652 case NEON::BI__builtin_neon_vcvt_s64_v:
2653 case NEON::BI__builtin_neon_vcvt_u64_v:
2654 case NEON::BI__builtin_neon_vcvtq_s32_v:
2655 case NEON::BI__builtin_neon_vcvtq_u32_v:
2656 case NEON::BI__builtin_neon_vcvtq_s64_v:
2657 case NEON::BI__builtin_neon_vcvtq_u64_v: {
2659 (cast<llvm::IntegerType>(VTy->getElementType())->getBitWidth() == 64);
2660 llvm::Type *FloatTy =
2661 GetNeonType(this, NeonTypeFlags(Double ? NeonTypeFlags::Float64
2662 : NeonTypeFlags::Float32,
2664 Ops[0] = Builder.CreateBitCast(Ops[0], FloatTy);
2665 return Usgn ? Builder.CreateFPToUI(Ops[0], Ty, "vcvt")
2666 : Builder.CreateFPToSI(Ops[0], Ty, "vcvt");
2668 case NEON::BI__builtin_neon_vcvta_s32_v:
2669 case NEON::BI__builtin_neon_vcvta_s64_v:
2670 case NEON::BI__builtin_neon_vcvta_u32_v:
2671 case NEON::BI__builtin_neon_vcvta_u64_v:
2672 case NEON::BI__builtin_neon_vcvtaq_s32_v:
2673 case NEON::BI__builtin_neon_vcvtaq_s64_v:
2674 case NEON::BI__builtin_neon_vcvtaq_u32_v:
2675 case NEON::BI__builtin_neon_vcvtaq_u64_v:
2676 case NEON::BI__builtin_neon_vcvtn_s32_v:
2677 case NEON::BI__builtin_neon_vcvtn_s64_v:
2678 case NEON::BI__builtin_neon_vcvtn_u32_v:
2679 case NEON::BI__builtin_neon_vcvtn_u64_v:
2680 case NEON::BI__builtin_neon_vcvtnq_s32_v:
2681 case NEON::BI__builtin_neon_vcvtnq_s64_v:
2682 case NEON::BI__builtin_neon_vcvtnq_u32_v:
2683 case NEON::BI__builtin_neon_vcvtnq_u64_v:
2684 case NEON::BI__builtin_neon_vcvtp_s32_v:
2685 case NEON::BI__builtin_neon_vcvtp_s64_v:
2686 case NEON::BI__builtin_neon_vcvtp_u32_v:
2687 case NEON::BI__builtin_neon_vcvtp_u64_v:
2688 case NEON::BI__builtin_neon_vcvtpq_s32_v:
2689 case NEON::BI__builtin_neon_vcvtpq_s64_v:
2690 case NEON::BI__builtin_neon_vcvtpq_u32_v:
2691 case NEON::BI__builtin_neon_vcvtpq_u64_v:
2692 case NEON::BI__builtin_neon_vcvtm_s32_v:
2693 case NEON::BI__builtin_neon_vcvtm_s64_v:
2694 case NEON::BI__builtin_neon_vcvtm_u32_v:
2695 case NEON::BI__builtin_neon_vcvtm_u64_v:
2696 case NEON::BI__builtin_neon_vcvtmq_s32_v:
2697 case NEON::BI__builtin_neon_vcvtmq_s64_v:
2698 case NEON::BI__builtin_neon_vcvtmq_u32_v:
2699 case NEON::BI__builtin_neon_vcvtmq_u64_v: {
2701 (cast<llvm::IntegerType>(VTy->getElementType())->getBitWidth() == 64);
2704 NeonTypeFlags(Double ? NeonTypeFlags::Float64
2705 : NeonTypeFlags::Float32, false, Quad));
2706 llvm::Type *Tys[2] = { Ty, InTy };
2707 return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, NameHint);
2709 case NEON::BI__builtin_neon_vext_v:
2710 case NEON::BI__builtin_neon_vextq_v: {
2711 int CV = cast<ConstantInt>(Ops[2])->getSExtValue();
2712 SmallVector<Constant*, 16> Indices;
2713 for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i)
2714 Indices.push_back(ConstantInt::get(Int32Ty, i+CV));
2716 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
2717 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
2718 Value *SV = llvm::ConstantVector::get(Indices);
2719 return Builder.CreateShuffleVector(Ops[0], Ops[1], SV, "vext");
2721 case NEON::BI__builtin_neon_vfma_v:
2722 case NEON::BI__builtin_neon_vfmaq_v: {
2723 Value *F = CGM.getIntrinsic(Intrinsic::fma, Ty);
2724 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
2725 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
2726 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
2728 // NEON intrinsic puts accumulator first, unlike the LLVM fma.
2729 return Builder.CreateCall3(F, Ops[1], Ops[2], Ops[0]);
2731 case NEON::BI__builtin_neon_vld1_v:
2732 case NEON::BI__builtin_neon_vld1q_v:
2733 Ops.push_back(Align);
2734 return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Ty), Ops, "vld1");
2735 case NEON::BI__builtin_neon_vld2_v:
2736 case NEON::BI__builtin_neon_vld2q_v:
2737 case NEON::BI__builtin_neon_vld3_v:
2738 case NEON::BI__builtin_neon_vld3q_v:
2739 case NEON::BI__builtin_neon_vld4_v:
2740 case NEON::BI__builtin_neon_vld4q_v: {
2741 Function *F = CGM.getIntrinsic(LLVMIntrinsic, Ty);
2742 Ops[1] = Builder.CreateCall2(F, Ops[1], Align, NameHint);
2743 Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
2744 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
2745 return Builder.CreateStore(Ops[1], Ops[0]);
2747 case NEON::BI__builtin_neon_vld1_dup_v:
2748 case NEON::BI__builtin_neon_vld1q_dup_v: {
2749 Value *V = UndefValue::get(Ty);
2750 Ty = llvm::PointerType::getUnqual(VTy->getElementType());
2751 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
2752 LoadInst *Ld = Builder.CreateLoad(Ops[0]);
2753 Ld->setAlignment(cast<ConstantInt>(Align)->getZExtValue());
2754 llvm::Constant *CI = ConstantInt::get(SizeTy, 0);
2755 Ops[0] = Builder.CreateInsertElement(V, Ld, CI);
2756 return EmitNeonSplat(Ops[0], CI);
2758 case NEON::BI__builtin_neon_vld2_lane_v:
2759 case NEON::BI__builtin_neon_vld2q_lane_v:
2760 case NEON::BI__builtin_neon_vld3_lane_v:
2761 case NEON::BI__builtin_neon_vld3q_lane_v:
2762 case NEON::BI__builtin_neon_vld4_lane_v:
2763 case NEON::BI__builtin_neon_vld4q_lane_v: {
2764 Function *F = CGM.getIntrinsic(LLVMIntrinsic, Ty);
2765 for (unsigned I = 2; I < Ops.size() - 1; ++I)
2766 Ops[I] = Builder.CreateBitCast(Ops[I], Ty);
2767 Ops.push_back(Align);
2768 Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), NameHint);
2769 Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
2770 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
2771 return Builder.CreateStore(Ops[1], Ops[0]);
2773 case NEON::BI__builtin_neon_vmovl_v: {
2774 llvm::Type *DTy =llvm::VectorType::getTruncatedElementVectorType(VTy);
2775 Ops[0] = Builder.CreateBitCast(Ops[0], DTy);
2777 return Builder.CreateZExt(Ops[0], Ty, "vmovl");
2778 return Builder.CreateSExt(Ops[0], Ty, "vmovl");
2780 case NEON::BI__builtin_neon_vmovn_v: {
2781 llvm::Type *QTy = llvm::VectorType::getExtendedElementVectorType(VTy);
2782 Ops[0] = Builder.CreateBitCast(Ops[0], QTy);
2783 return Builder.CreateTrunc(Ops[0], Ty, "vmovn");
2785 case NEON::BI__builtin_neon_vmull_v:
2786 // FIXME: the integer vmull operations could be emitted in terms of pure
2787 // LLVM IR (2 exts followed by a mul). Unfortunately LLVM has a habit of
2788 // hoisting the exts outside loops. Until global ISel comes along that can
2789 // see through such movement this leads to bad CodeGen. So we need an
2790 // intrinsic for now.
2791 Int = Usgn ? Intrinsic::arm_neon_vmullu : Intrinsic::arm_neon_vmulls;
2792 Int = Type.isPoly() ? (unsigned)Intrinsic::arm_neon_vmullp : Int;
2793 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmull");
2794 case NEON::BI__builtin_neon_vpadal_v:
2795 case NEON::BI__builtin_neon_vpadalq_v: {
2796 // The source operand type has twice as many elements of half the size.
2797 unsigned EltBits = VTy->getElementType()->getPrimitiveSizeInBits();
2799 llvm::IntegerType::get(getLLVMContext(), EltBits / 2);
2800 llvm::Type *NarrowTy =
2801 llvm::VectorType::get(EltTy, VTy->getNumElements() * 2);
2802 llvm::Type *Tys[2] = { Ty, NarrowTy };
2803 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, NameHint);
2805 case NEON::BI__builtin_neon_vpaddl_v:
2806 case NEON::BI__builtin_neon_vpaddlq_v: {
2807 // The source operand type has twice as many elements of half the size.
2808 unsigned EltBits = VTy->getElementType()->getPrimitiveSizeInBits();
2809 llvm::Type *EltTy = llvm::IntegerType::get(getLLVMContext(), EltBits / 2);
2810 llvm::Type *NarrowTy =
2811 llvm::VectorType::get(EltTy, VTy->getNumElements() * 2);
2812 llvm::Type *Tys[2] = { Ty, NarrowTy };
2813 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vpaddl");
2815 case NEON::BI__builtin_neon_vqdmlal_v:
2816 case NEON::BI__builtin_neon_vqdmlsl_v: {
2817 SmallVector<Value *, 2> MulOps(Ops.begin() + 1, Ops.end());
2818 Value *Mul = EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Ty),
2821 SmallVector<Value *, 2> AccumOps;
2822 AccumOps.push_back(Ops[0]);
2823 AccumOps.push_back(Mul);
2824 return EmitNeonCall(CGM.getIntrinsic(AltLLVMIntrinsic, Ty),
2825 AccumOps, NameHint);
2827 case NEON::BI__builtin_neon_vqshl_n_v:
2828 case NEON::BI__builtin_neon_vqshlq_n_v:
2829 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshl_n",
2831 case NEON::BI__builtin_neon_vrecpe_v:
2832 case NEON::BI__builtin_neon_vrecpeq_v:
2833 case NEON::BI__builtin_neon_vrsqrte_v:
2834 case NEON::BI__builtin_neon_vrsqrteq_v:
2835 Int = Ty->isFPOrFPVectorTy() ? LLVMIntrinsic : AltLLVMIntrinsic;
2836 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, NameHint);
2838 case NEON::BI__builtin_neon_vshl_n_v:
2839 case NEON::BI__builtin_neon_vshlq_n_v:
2840 Ops[1] = EmitNeonShiftVector(Ops[1], Ty, false);
2841 return Builder.CreateShl(Builder.CreateBitCast(Ops[0],Ty), Ops[1],
2843 case NEON::BI__builtin_neon_vshll_n_v: {
2844 llvm::Type *SrcTy = llvm::VectorType::getTruncatedElementVectorType(VTy);
2845 Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy);
2847 Ops[0] = Builder.CreateZExt(Ops[0], VTy);
2849 Ops[0] = Builder.CreateSExt(Ops[0], VTy);
2850 Ops[1] = EmitNeonShiftVector(Ops[1], VTy, false);
2851 return Builder.CreateShl(Ops[0], Ops[1], "vshll_n");
2853 case NEON::BI__builtin_neon_vshrn_n_v: {
2854 llvm::Type *SrcTy = llvm::VectorType::getExtendedElementVectorType(VTy);
2855 Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy);
2856 Ops[1] = EmitNeonShiftVector(Ops[1], SrcTy, false);
2858 Ops[0] = Builder.CreateLShr(Ops[0], Ops[1]);
2860 Ops[0] = Builder.CreateAShr(Ops[0], Ops[1]);
2861 return Builder.CreateTrunc(Ops[0], Ty, "vshrn_n");
2863 case NEON::BI__builtin_neon_vshr_n_v:
2864 case NEON::BI__builtin_neon_vshrq_n_v:
2865 return EmitNeonRShiftImm(Ops[0], Ops[1], Ty, Usgn, "vshr_n");
2866 case NEON::BI__builtin_neon_vst1_v:
2867 case NEON::BI__builtin_neon_vst1q_v:
2868 case NEON::BI__builtin_neon_vst2_v:
2869 case NEON::BI__builtin_neon_vst2q_v:
2870 case NEON::BI__builtin_neon_vst3_v:
2871 case NEON::BI__builtin_neon_vst3q_v:
2872 case NEON::BI__builtin_neon_vst4_v:
2873 case NEON::BI__builtin_neon_vst4q_v:
2874 case NEON::BI__builtin_neon_vst2_lane_v:
2875 case NEON::BI__builtin_neon_vst2q_lane_v:
2876 case NEON::BI__builtin_neon_vst3_lane_v:
2877 case NEON::BI__builtin_neon_vst3q_lane_v:
2878 case NEON::BI__builtin_neon_vst4_lane_v:
2879 case NEON::BI__builtin_neon_vst4q_lane_v:
2880 Ops.push_back(Align);
2881 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "");
2882 case NEON::BI__builtin_neon_vsubhn_v: {
2883 llvm::VectorType *SrcTy =
2884 llvm::VectorType::getExtendedElementVectorType(VTy);
2886 // %sum = add <4 x i32> %lhs, %rhs
2887 Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy);
2888 Ops[1] = Builder.CreateBitCast(Ops[1], SrcTy);
2889 Ops[0] = Builder.CreateSub(Ops[0], Ops[1], "vsubhn");
2891 // %high = lshr <4 x i32> %sum, <i32 16, i32 16, i32 16, i32 16>
2892 Constant *ShiftAmt = ConstantInt::get(SrcTy->getElementType(),
2893 SrcTy->getScalarSizeInBits() / 2);
2894 ShiftAmt = ConstantVector::getSplat(VTy->getNumElements(), ShiftAmt);
2895 Ops[0] = Builder.CreateLShr(Ops[0], ShiftAmt, "vsubhn");
2897 // %res = trunc <4 x i32> %high to <4 x i16>
2898 return Builder.CreateTrunc(Ops[0], VTy, "vsubhn");
2900 case NEON::BI__builtin_neon_vtrn_v:
2901 case NEON::BI__builtin_neon_vtrnq_v: {
2902 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
2903 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
2904 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
2905 Value *SV = nullptr;
2907 for (unsigned vi = 0; vi != 2; ++vi) {
2908 SmallVector<Constant*, 16> Indices;
2909 for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) {
2910 Indices.push_back(Builder.getInt32(i+vi));
2911 Indices.push_back(Builder.getInt32(i+e+vi));
2913 Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ops[0], vi);
2914 SV = llvm::ConstantVector::get(Indices);
2915 SV = Builder.CreateShuffleVector(Ops[1], Ops[2], SV, "vtrn");
2916 SV = Builder.CreateStore(SV, Addr);
2920 case NEON::BI__builtin_neon_vtst_v:
2921 case NEON::BI__builtin_neon_vtstq_v: {
2922 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
2923 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
2924 Ops[0] = Builder.CreateAnd(Ops[0], Ops[1]);
2925 Ops[0] = Builder.CreateICmp(ICmpInst::ICMP_NE, Ops[0],
2926 ConstantAggregateZero::get(Ty));
2927 return Builder.CreateSExt(Ops[0], Ty, "vtst");
2929 case NEON::BI__builtin_neon_vuzp_v:
2930 case NEON::BI__builtin_neon_vuzpq_v: {
2931 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
2932 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
2933 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
2934 Value *SV = nullptr;
2936 for (unsigned vi = 0; vi != 2; ++vi) {
2937 SmallVector<Constant*, 16> Indices;
2938 for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i)
2939 Indices.push_back(ConstantInt::get(Int32Ty, 2*i+vi));
2941 Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ops[0], vi);
2942 SV = llvm::ConstantVector::get(Indices);
2943 SV = Builder.CreateShuffleVector(Ops[1], Ops[2], SV, "vuzp");
2944 SV = Builder.CreateStore(SV, Addr);
2948 case NEON::BI__builtin_neon_vzip_v:
2949 case NEON::BI__builtin_neon_vzipq_v: {
2950 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
2951 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
2952 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
2953 Value *SV = nullptr;
2955 for (unsigned vi = 0; vi != 2; ++vi) {
2956 SmallVector<Constant*, 16> Indices;
2957 for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) {
2958 Indices.push_back(ConstantInt::get(Int32Ty, (i + vi*e) >> 1));
2959 Indices.push_back(ConstantInt::get(Int32Ty, ((i + vi*e) >> 1)+e));
2961 Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ops[0], vi);
2962 SV = llvm::ConstantVector::get(Indices);
2963 SV = Builder.CreateShuffleVector(Ops[1], Ops[2], SV, "vzip");
2964 SV = Builder.CreateStore(SV, Addr);
2970 assert(Int && "Expected valid intrinsic number");
2972 // Determine the type(s) of this overloaded AArch64 intrinsic.
2973 Function *F = LookupNeonLLVMIntrinsic(Int, Modifier, Ty, E);
2975 Value *Result = EmitNeonCall(F, Ops, NameHint);
2976 llvm::Type *ResultType = ConvertType(E->getType());
2977 // AArch64 intrinsic one-element vector type cast to
2978 // scalar type expected by the builtin
2979 return Builder.CreateBitCast(Result, ResultType, NameHint);
2982 Value *CodeGenFunction::EmitAArch64CompareBuiltinExpr(
2983 Value *Op, llvm::Type *Ty, const CmpInst::Predicate Fp,
2984 const CmpInst::Predicate Ip, const Twine &Name) {
2985 llvm::Type *OTy = Op->getType();
2987 // FIXME: this is utterly horrific. We should not be looking at previous
2988 // codegen context to find out what needs doing. Unfortunately TableGen
2989 // currently gives us exactly the same calls for vceqz_f32 and vceqz_s32
2991 if (BitCastInst *BI = dyn_cast<BitCastInst>(Op))
2992 OTy = BI->getOperand(0)->getType();
2994 Op = Builder.CreateBitCast(Op, OTy);
2995 if (OTy->getScalarType()->isFloatingPointTy()) {
2996 Op = Builder.CreateFCmp(Fp, Op, Constant::getNullValue(OTy));
2998 Op = Builder.CreateICmp(Ip, Op, Constant::getNullValue(OTy));
3000 return Builder.CreateSExt(Op, Ty, Name);
3003 static Value *packTBLDVectorList(CodeGenFunction &CGF, ArrayRef<Value *> Ops,
3004 Value *ExtOp, Value *IndexOp,
3005 llvm::Type *ResTy, unsigned IntID,
3007 SmallVector<Value *, 2> TblOps;
3009 TblOps.push_back(ExtOp);
3011 // Build a vector containing sequential number like (0, 1, 2, ..., 15)
3012 SmallVector<Constant*, 16> Indices;
3013 llvm::VectorType *TblTy = cast<llvm::VectorType>(Ops[0]->getType());
3014 for (unsigned i = 0, e = TblTy->getNumElements(); i != e; ++i) {
3015 Indices.push_back(ConstantInt::get(CGF.Int32Ty, 2*i));
3016 Indices.push_back(ConstantInt::get(CGF.Int32Ty, 2*i+1));
3018 Value *SV = llvm::ConstantVector::get(Indices);
3020 int PairPos = 0, End = Ops.size() - 1;
3021 while (PairPos < End) {
3022 TblOps.push_back(CGF.Builder.CreateShuffleVector(Ops[PairPos],
3023 Ops[PairPos+1], SV, Name));
3027 // If there's an odd number of 64-bit lookup table, fill the high 64-bit
3028 // of the 128-bit lookup table with zero.
3029 if (PairPos == End) {
3030 Value *ZeroTbl = ConstantAggregateZero::get(TblTy);
3031 TblOps.push_back(CGF.Builder.CreateShuffleVector(Ops[PairPos],
3032 ZeroTbl, SV, Name));
3036 TblOps.push_back(IndexOp);
3037 TblF = CGF.CGM.getIntrinsic(IntID, ResTy);
3039 return CGF.EmitNeonCall(TblF, TblOps, Name);
3042 Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
3043 const CallExpr *E) {
3044 unsigned HintID = static_cast<unsigned>(-1);
3045 switch (BuiltinID) {
3047 case ARM::BI__builtin_arm_nop:
3050 case ARM::BI__builtin_arm_yield:
3051 case ARM::BI__yield:
3054 case ARM::BI__builtin_arm_wfe:
3058 case ARM::BI__builtin_arm_wfi:
3062 case ARM::BI__builtin_arm_sev:
3066 case ARM::BI__builtin_arm_sevl:
3072 if (HintID != static_cast<unsigned>(-1)) {
3073 Function *F = CGM.getIntrinsic(Intrinsic::arm_hint);
3074 return Builder.CreateCall(F, llvm::ConstantInt::get(Int32Ty, HintID));
3077 if (BuiltinID == ARM::BI__builtin_arm_rbit) {
3078 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_rbit),
3079 EmitScalarExpr(E->getArg(0)),
3083 if (BuiltinID == ARM::BI__clear_cache) {
3084 assert(E->getNumArgs() == 2 && "__clear_cache takes 2 arguments");
3085 const FunctionDecl *FD = E->getDirectCallee();
3086 SmallVector<Value*, 2> Ops;
3087 for (unsigned i = 0; i < 2; i++)
3088 Ops.push_back(EmitScalarExpr(E->getArg(i)));
3089 llvm::Type *Ty = CGM.getTypes().ConvertType(FD->getType());
3090 llvm::FunctionType *FTy = cast<llvm::FunctionType>(Ty);
3091 StringRef Name = FD->getName();
3092 return EmitNounwindRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), Ops);
3095 if (BuiltinID == ARM::BI__builtin_arm_ldrexd ||
3096 ((BuiltinID == ARM::BI__builtin_arm_ldrex ||
3097 BuiltinID == ARM::BI__builtin_arm_ldaex) &&
3098 getContext().getTypeSize(E->getType()) == 64) ||
3099 BuiltinID == ARM::BI__ldrexd) {
3102 switch (BuiltinID) {
3103 default: llvm_unreachable("unexpected builtin");
3104 case ARM::BI__builtin_arm_ldaex:
3105 F = CGM.getIntrinsic(Intrinsic::arm_ldaexd);
3107 case ARM::BI__builtin_arm_ldrexd:
3108 case ARM::BI__builtin_arm_ldrex:
3109 case ARM::BI__ldrexd:
3110 F = CGM.getIntrinsic(Intrinsic::arm_ldrexd);
3114 Value *LdPtr = EmitScalarExpr(E->getArg(0));
3115 Value *Val = Builder.CreateCall(F, Builder.CreateBitCast(LdPtr, Int8PtrTy),
3118 Value *Val0 = Builder.CreateExtractValue(Val, 1);
3119 Value *Val1 = Builder.CreateExtractValue(Val, 0);
3120 Val0 = Builder.CreateZExt(Val0, Int64Ty);
3121 Val1 = Builder.CreateZExt(Val1, Int64Ty);
3123 Value *ShiftCst = llvm::ConstantInt::get(Int64Ty, 32);
3124 Val = Builder.CreateShl(Val0, ShiftCst, "shl", true /* nuw */);
3125 Val = Builder.CreateOr(Val, Val1);
3126 return Builder.CreateBitCast(Val, ConvertType(E->getType()));
3129 if (BuiltinID == ARM::BI__builtin_arm_ldrex ||
3130 BuiltinID == ARM::BI__builtin_arm_ldaex) {
3131 Value *LoadAddr = EmitScalarExpr(E->getArg(0));
3133 QualType Ty = E->getType();
3134 llvm::Type *RealResTy = ConvertType(Ty);
3135 llvm::Type *IntResTy = llvm::IntegerType::get(getLLVMContext(),
3136 getContext().getTypeSize(Ty));
3137 LoadAddr = Builder.CreateBitCast(LoadAddr, IntResTy->getPointerTo());
3139 Function *F = CGM.getIntrinsic(BuiltinID == ARM::BI__builtin_arm_ldaex
3140 ? Intrinsic::arm_ldaex
3141 : Intrinsic::arm_ldrex,
3142 LoadAddr->getType());
3143 Value *Val = Builder.CreateCall(F, LoadAddr, "ldrex");
3145 if (RealResTy->isPointerTy())
3146 return Builder.CreateIntToPtr(Val, RealResTy);
3148 Val = Builder.CreateTruncOrBitCast(Val, IntResTy);
3149 return Builder.CreateBitCast(Val, RealResTy);
3153 if (BuiltinID == ARM::BI__builtin_arm_strexd ||
3154 ((BuiltinID == ARM::BI__builtin_arm_stlex ||
3155 BuiltinID == ARM::BI__builtin_arm_strex) &&
3156 getContext().getTypeSize(E->getArg(0)->getType()) == 64)) {
3157 Function *F = CGM.getIntrinsic(BuiltinID == ARM::BI__builtin_arm_stlex
3158 ? Intrinsic::arm_stlexd
3159 : Intrinsic::arm_strexd);
3160 llvm::Type *STy = llvm::StructType::get(Int32Ty, Int32Ty, NULL);
3162 Value *Tmp = CreateMemTemp(E->getArg(0)->getType());
3163 Value *Val = EmitScalarExpr(E->getArg(0));
3164 Builder.CreateStore(Val, Tmp);
3166 Value *LdPtr = Builder.CreateBitCast(Tmp,llvm::PointerType::getUnqual(STy));
3167 Val = Builder.CreateLoad(LdPtr);
3169 Value *Arg0 = Builder.CreateExtractValue(Val, 0);
3170 Value *Arg1 = Builder.CreateExtractValue(Val, 1);
3171 Value *StPtr = Builder.CreateBitCast(EmitScalarExpr(E->getArg(1)), Int8PtrTy);
3172 return Builder.CreateCall3(F, Arg0, Arg1, StPtr, "strexd");
3175 if (BuiltinID == ARM::BI__builtin_arm_strex ||
3176 BuiltinID == ARM::BI__builtin_arm_stlex) {
3177 Value *StoreVal = EmitScalarExpr(E->getArg(0));
3178 Value *StoreAddr = EmitScalarExpr(E->getArg(1));
3180 QualType Ty = E->getArg(0)->getType();
3181 llvm::Type *StoreTy = llvm::IntegerType::get(getLLVMContext(),
3182 getContext().getTypeSize(Ty));
3183 StoreAddr = Builder.CreateBitCast(StoreAddr, StoreTy->getPointerTo());
3185 if (StoreVal->getType()->isPointerTy())
3186 StoreVal = Builder.CreatePtrToInt(StoreVal, Int32Ty);
3188 StoreVal = Builder.CreateBitCast(StoreVal, StoreTy);
3189 StoreVal = Builder.CreateZExtOrBitCast(StoreVal, Int32Ty);
3192 Function *F = CGM.getIntrinsic(BuiltinID == ARM::BI__builtin_arm_stlex
3193 ? Intrinsic::arm_stlex
3194 : Intrinsic::arm_strex,
3195 StoreAddr->getType());
3196 return Builder.CreateCall2(F, StoreVal, StoreAddr, "strex");
3199 if (BuiltinID == ARM::BI__builtin_arm_clrex) {
3200 Function *F = CGM.getIntrinsic(Intrinsic::arm_clrex);
3201 return Builder.CreateCall(F);
3205 Intrinsic::ID CRCIntrinsicID = Intrinsic::not_intrinsic;
3206 switch (BuiltinID) {
3207 case ARM::BI__builtin_arm_crc32b:
3208 CRCIntrinsicID = Intrinsic::arm_crc32b; break;
3209 case ARM::BI__builtin_arm_crc32cb:
3210 CRCIntrinsicID = Intrinsic::arm_crc32cb; break;
3211 case ARM::BI__builtin_arm_crc32h:
3212 CRCIntrinsicID = Intrinsic::arm_crc32h; break;
3213 case ARM::BI__builtin_arm_crc32ch:
3214 CRCIntrinsicID = Intrinsic::arm_crc32ch; break;
3215 case ARM::BI__builtin_arm_crc32w:
3216 case ARM::BI__builtin_arm_crc32d:
3217 CRCIntrinsicID = Intrinsic::arm_crc32w; break;
3218 case ARM::BI__builtin_arm_crc32cw:
3219 case ARM::BI__builtin_arm_crc32cd:
3220 CRCIntrinsicID = Intrinsic::arm_crc32cw; break;
3223 if (CRCIntrinsicID != Intrinsic::not_intrinsic) {
3224 Value *Arg0 = EmitScalarExpr(E->getArg(0));
3225 Value *Arg1 = EmitScalarExpr(E->getArg(1));
3227 // crc32{c,}d intrinsics are implemnted as two calls to crc32{c,}w
3228 // intrinsics, hence we need different codegen for these cases.
3229 if (BuiltinID == ARM::BI__builtin_arm_crc32d ||
3230 BuiltinID == ARM::BI__builtin_arm_crc32cd) {
3231 Value *C1 = llvm::ConstantInt::get(Int64Ty, 32);
3232 Value *Arg1a = Builder.CreateTruncOrBitCast(Arg1, Int32Ty);
3233 Value *Arg1b = Builder.CreateLShr(Arg1, C1);
3234 Arg1b = Builder.CreateTruncOrBitCast(Arg1b, Int32Ty);
3236 Function *F = CGM.getIntrinsic(CRCIntrinsicID);
3237 Value *Res = Builder.CreateCall2(F, Arg0, Arg1a);
3238 return Builder.CreateCall2(F, Res, Arg1b);
3240 Arg1 = Builder.CreateZExtOrBitCast(Arg1, Int32Ty);
3242 Function *F = CGM.getIntrinsic(CRCIntrinsicID);
3243 return Builder.CreateCall2(F, Arg0, Arg1);
3247 SmallVector<Value*, 4> Ops;
3248 llvm::Value *Align = nullptr;
3249 for (unsigned i = 0, e = E->getNumArgs() - 1; i != e; i++) {
3251 switch (BuiltinID) {
3252 case NEON::BI__builtin_neon_vld1_v:
3253 case NEON::BI__builtin_neon_vld1q_v:
3254 case NEON::BI__builtin_neon_vld1q_lane_v:
3255 case NEON::BI__builtin_neon_vld1_lane_v:
3256 case NEON::BI__builtin_neon_vld1_dup_v:
3257 case NEON::BI__builtin_neon_vld1q_dup_v:
3258 case NEON::BI__builtin_neon_vst1_v:
3259 case NEON::BI__builtin_neon_vst1q_v:
3260 case NEON::BI__builtin_neon_vst1q_lane_v:
3261 case NEON::BI__builtin_neon_vst1_lane_v:
3262 case NEON::BI__builtin_neon_vst2_v:
3263 case NEON::BI__builtin_neon_vst2q_v:
3264 case NEON::BI__builtin_neon_vst2_lane_v:
3265 case NEON::BI__builtin_neon_vst2q_lane_v:
3266 case NEON::BI__builtin_neon_vst3_v:
3267 case NEON::BI__builtin_neon_vst3q_v:
3268 case NEON::BI__builtin_neon_vst3_lane_v:
3269 case NEON::BI__builtin_neon_vst3q_lane_v:
3270 case NEON::BI__builtin_neon_vst4_v:
3271 case NEON::BI__builtin_neon_vst4q_v:
3272 case NEON::BI__builtin_neon_vst4_lane_v:
3273 case NEON::BI__builtin_neon_vst4q_lane_v:
3274 // Get the alignment for the argument in addition to the value;
3275 // we'll use it later.
3276 std::pair<llvm::Value*, unsigned> Src =
3277 EmitPointerWithAlignment(E->getArg(0));
3278 Ops.push_back(Src.first);
3279 Align = Builder.getInt32(Src.second);
3284 switch (BuiltinID) {
3285 case NEON::BI__builtin_neon_vld2_v:
3286 case NEON::BI__builtin_neon_vld2q_v:
3287 case NEON::BI__builtin_neon_vld3_v:
3288 case NEON::BI__builtin_neon_vld3q_v:
3289 case NEON::BI__builtin_neon_vld4_v:
3290 case NEON::BI__builtin_neon_vld4q_v:
3291 case NEON::BI__builtin_neon_vld2_lane_v:
3292 case NEON::BI__builtin_neon_vld2q_lane_v:
3293 case NEON::BI__builtin_neon_vld3_lane_v:
3294 case NEON::BI__builtin_neon_vld3q_lane_v:
3295 case NEON::BI__builtin_neon_vld4_lane_v:
3296 case NEON::BI__builtin_neon_vld4q_lane_v:
3297 case NEON::BI__builtin_neon_vld2_dup_v:
3298 case NEON::BI__builtin_neon_vld3_dup_v:
3299 case NEON::BI__builtin_neon_vld4_dup_v:
3300 // Get the alignment for the argument in addition to the value;
3301 // we'll use it later.
3302 std::pair<llvm::Value*, unsigned> Src =
3303 EmitPointerWithAlignment(E->getArg(1));
3304 Ops.push_back(Src.first);
3305 Align = Builder.getInt32(Src.second);
3309 Ops.push_back(EmitScalarExpr(E->getArg(i)));
3312 switch (BuiltinID) {
3314 // vget_lane and vset_lane are not overloaded and do not have an extra
3315 // argument that specifies the vector type.
3316 case NEON::BI__builtin_neon_vget_lane_i8:
3317 case NEON::BI__builtin_neon_vget_lane_i16:
3318 case NEON::BI__builtin_neon_vget_lane_i32:
3319 case NEON::BI__builtin_neon_vget_lane_i64:
3320 case NEON::BI__builtin_neon_vget_lane_f32:
3321 case NEON::BI__builtin_neon_vgetq_lane_i8:
3322 case NEON::BI__builtin_neon_vgetq_lane_i16:
3323 case NEON::BI__builtin_neon_vgetq_lane_i32:
3324 case NEON::BI__builtin_neon_vgetq_lane_i64:
3325 case NEON::BI__builtin_neon_vgetq_lane_f32:
3326 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
3328 case NEON::BI__builtin_neon_vset_lane_i8:
3329 case NEON::BI__builtin_neon_vset_lane_i16:
3330 case NEON::BI__builtin_neon_vset_lane_i32:
3331 case NEON::BI__builtin_neon_vset_lane_i64:
3332 case NEON::BI__builtin_neon_vset_lane_f32:
3333 case NEON::BI__builtin_neon_vsetq_lane_i8:
3334 case NEON::BI__builtin_neon_vsetq_lane_i16:
3335 case NEON::BI__builtin_neon_vsetq_lane_i32:
3336 case NEON::BI__builtin_neon_vsetq_lane_i64:
3337 case NEON::BI__builtin_neon_vsetq_lane_f32:
3338 Ops.push_back(EmitScalarExpr(E->getArg(2)));
3339 return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vset_lane");
3341 // Non-polymorphic crypto instructions also not overloaded
3342 case NEON::BI__builtin_neon_vsha1h_u32:
3343 Ops.push_back(EmitScalarExpr(E->getArg(0)));
3344 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha1h), Ops,
3346 case NEON::BI__builtin_neon_vsha1cq_u32:
3347 Ops.push_back(EmitScalarExpr(E->getArg(2)));
3348 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha1c), Ops,
3350 case NEON::BI__builtin_neon_vsha1pq_u32:
3351 Ops.push_back(EmitScalarExpr(E->getArg(2)));
3352 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha1p), Ops,
3354 case NEON::BI__builtin_neon_vsha1mq_u32:
3355 Ops.push_back(EmitScalarExpr(E->getArg(2)));
3356 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha1m), Ops,
3360 // Get the last argument, which specifies the vector type.
3361 llvm::APSInt Result;
3362 const Expr *Arg = E->getArg(E->getNumArgs()-1);
3363 if (!Arg->isIntegerConstantExpr(Result, getContext()))
3366 if (BuiltinID == ARM::BI__builtin_arm_vcvtr_f ||
3367 BuiltinID == ARM::BI__builtin_arm_vcvtr_d) {
3368 // Determine the overloaded type of this builtin.
3370 if (BuiltinID == ARM::BI__builtin_arm_vcvtr_f)
3375 // Determine whether this is an unsigned conversion or not.
3376 bool usgn = Result.getZExtValue() == 1;
3377 unsigned Int = usgn ? Intrinsic::arm_vcvtru : Intrinsic::arm_vcvtr;
3379 // Call the appropriate intrinsic.
3380 Function *F = CGM.getIntrinsic(Int, Ty);
3381 return Builder.CreateCall(F, Ops, "vcvtr");
3384 // Determine the type of this overloaded NEON intrinsic.
3385 NeonTypeFlags Type(Result.getZExtValue());
3386 bool usgn = Type.isUnsigned();
3387 bool rightShift = false;
3389 llvm::VectorType *VTy = GetNeonType(this, Type);
3390 llvm::Type *Ty = VTy;
3394 // Many NEON builtins have identical semantics and uses in ARM and
3395 // AArch64. Emit these in a single function.
3396 ArrayRef<NeonIntrinsicInfo> IntrinsicMap(ARMSIMDIntrinsicMap);
3397 const NeonIntrinsicInfo *Builtin = findNeonIntrinsicInMap(
3398 IntrinsicMap, BuiltinID, NEONSIMDIntrinsicsProvenSorted);
3400 return EmitCommonNeonBuiltinExpr(
3401 Builtin->BuiltinID, Builtin->LLVMIntrinsic, Builtin->AltLLVMIntrinsic,
3402 Builtin->NameHint, Builtin->TypeModifier, E, Ops, Align);
3405 switch (BuiltinID) {
3406 default: return nullptr;
3407 case NEON::BI__builtin_neon_vld1q_lane_v:
3408 // Handle 64-bit integer elements as a special case. Use shuffles of
3409 // one-element vectors to avoid poor code for i64 in the backend.
3410 if (VTy->getElementType()->isIntegerTy(64)) {
3411 // Extract the other lane.
3412 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
3413 int Lane = cast<ConstantInt>(Ops[2])->getZExtValue();
3414 Value *SV = llvm::ConstantVector::get(ConstantInt::get(Int32Ty, 1-Lane));
3415 Ops[1] = Builder.CreateShuffleVector(Ops[1], Ops[1], SV);
3416 // Load the value as a one-element vector.
3417 Ty = llvm::VectorType::get(VTy->getElementType(), 1);
3418 Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld1, Ty);
3419 Value *Ld = Builder.CreateCall2(F, Ops[0], Align);
3421 SmallVector<Constant*, 2> Indices;
3422 Indices.push_back(ConstantInt::get(Int32Ty, 1-Lane));
3423 Indices.push_back(ConstantInt::get(Int32Ty, Lane));
3424 SV = llvm::ConstantVector::get(Indices);
3425 return Builder.CreateShuffleVector(Ops[1], Ld, SV, "vld1q_lane");
3428 case NEON::BI__builtin_neon_vld1_lane_v: {
3429 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
3430 Ty = llvm::PointerType::getUnqual(VTy->getElementType());
3431 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
3432 LoadInst *Ld = Builder.CreateLoad(Ops[0]);
3433 Ld->setAlignment(cast<ConstantInt>(Align)->getZExtValue());
3434 return Builder.CreateInsertElement(Ops[1], Ld, Ops[2], "vld1_lane");
3436 case NEON::BI__builtin_neon_vld2_dup_v:
3437 case NEON::BI__builtin_neon_vld3_dup_v:
3438 case NEON::BI__builtin_neon_vld4_dup_v: {
3439 // Handle 64-bit elements as a special-case. There is no "dup" needed.
3440 if (VTy->getElementType()->getPrimitiveSizeInBits() == 64) {
3441 switch (BuiltinID) {
3442 case NEON::BI__builtin_neon_vld2_dup_v:
3443 Int = Intrinsic::arm_neon_vld2;
3445 case NEON::BI__builtin_neon_vld3_dup_v:
3446 Int = Intrinsic::arm_neon_vld3;
3448 case NEON::BI__builtin_neon_vld4_dup_v:
3449 Int = Intrinsic::arm_neon_vld4;
3451 default: llvm_unreachable("unknown vld_dup intrinsic?");
3453 Function *F = CGM.getIntrinsic(Int, Ty);
3454 Ops[1] = Builder.CreateCall2(F, Ops[1], Align, "vld_dup");
3455 Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
3456 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
3457 return Builder.CreateStore(Ops[1], Ops[0]);
3459 switch (BuiltinID) {
3460 case NEON::BI__builtin_neon_vld2_dup_v:
3461 Int = Intrinsic::arm_neon_vld2lane;
3463 case NEON::BI__builtin_neon_vld3_dup_v:
3464 Int = Intrinsic::arm_neon_vld3lane;
3466 case NEON::BI__builtin_neon_vld4_dup_v:
3467 Int = Intrinsic::arm_neon_vld4lane;
3469 default: llvm_unreachable("unknown vld_dup intrinsic?");
3471 Function *F = CGM.getIntrinsic(Int, Ty);
3472 llvm::StructType *STy = cast<llvm::StructType>(F->getReturnType());
3474 SmallVector<Value*, 6> Args;
3475 Args.push_back(Ops[1]);
3476 Args.append(STy->getNumElements(), UndefValue::get(Ty));
3478 llvm::Constant *CI = ConstantInt::get(Int32Ty, 0);
3480 Args.push_back(Align);
3482 Ops[1] = Builder.CreateCall(F, Args, "vld_dup");
3483 // splat lane 0 to all elts in each vector of the result.
3484 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
3485 Value *Val = Builder.CreateExtractValue(Ops[1], i);
3486 Value *Elt = Builder.CreateBitCast(Val, Ty);
3487 Elt = EmitNeonSplat(Elt, CI);
3488 Elt = Builder.CreateBitCast(Elt, Val->getType());
3489 Ops[1] = Builder.CreateInsertValue(Ops[1], Elt, i);
3491 Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
3492 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
3493 return Builder.CreateStore(Ops[1], Ops[0]);
3495 case NEON::BI__builtin_neon_vqrshrn_n_v:
3497 usgn ? Intrinsic::arm_neon_vqrshiftnu : Intrinsic::arm_neon_vqrshiftns;
3498 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshrn_n",
3500 case NEON::BI__builtin_neon_vqrshrun_n_v:
3501 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqrshiftnsu, Ty),
3502 Ops, "vqrshrun_n", 1, true);
3503 case NEON::BI__builtin_neon_vqshlu_n_v:
3504 case NEON::BI__builtin_neon_vqshluq_n_v:
3505 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqshiftsu, Ty),
3506 Ops, "vqshlu", 1, false);
3507 case NEON::BI__builtin_neon_vqshrn_n_v:
3508 Int = usgn ? Intrinsic::arm_neon_vqshiftnu : Intrinsic::arm_neon_vqshiftns;
3509 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshrn_n",
3511 case NEON::BI__builtin_neon_vqshrun_n_v:
3512 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqshiftnsu, Ty),
3513 Ops, "vqshrun_n", 1, true);
3514 case NEON::BI__builtin_neon_vrecpe_v:
3515 case NEON::BI__builtin_neon_vrecpeq_v:
3516 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrecpe, Ty),
3518 case NEON::BI__builtin_neon_vrshrn_n_v:
3519 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrshiftn, Ty),
3520 Ops, "vrshrn_n", 1, true);
3521 case NEON::BI__builtin_neon_vrshr_n_v:
3522 case NEON::BI__builtin_neon_vrshrq_n_v:
3523 Int = usgn ? Intrinsic::arm_neon_vrshiftu : Intrinsic::arm_neon_vrshifts;
3524 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrshr_n", 1, true);
3525 case NEON::BI__builtin_neon_vrsra_n_v:
3526 case NEON::BI__builtin_neon_vrsraq_n_v:
3527 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
3528 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
3529 Ops[2] = EmitNeonShiftVector(Ops[2], Ty, true);
3530 Int = usgn ? Intrinsic::arm_neon_vrshiftu : Intrinsic::arm_neon_vrshifts;
3531 Ops[1] = Builder.CreateCall2(CGM.getIntrinsic(Int, Ty), Ops[1], Ops[2]);
3532 return Builder.CreateAdd(Ops[0], Ops[1], "vrsra_n");
3533 case NEON::BI__builtin_neon_vsri_n_v:
3534 case NEON::BI__builtin_neon_vsriq_n_v:
3536 case NEON::BI__builtin_neon_vsli_n_v:
3537 case NEON::BI__builtin_neon_vsliq_n_v:
3538 Ops[2] = EmitNeonShiftVector(Ops[2], Ty, rightShift);
3539 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vshiftins, Ty),
3541 case NEON::BI__builtin_neon_vsra_n_v:
3542 case NEON::BI__builtin_neon_vsraq_n_v:
3543 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
3544 Ops[1] = EmitNeonRShiftImm(Ops[1], Ops[2], Ty, usgn, "vsra_n");
3545 return Builder.CreateAdd(Ops[0], Ops[1]);
3546 case NEON::BI__builtin_neon_vst1q_lane_v:
3547 // Handle 64-bit integer elements as a special case. Use a shuffle to get
3548 // a one-element vector and avoid poor code for i64 in the backend.
3549 if (VTy->getElementType()->isIntegerTy(64)) {
3550 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
3551 Value *SV = llvm::ConstantVector::get(cast<llvm::Constant>(Ops[2]));
3552 Ops[1] = Builder.CreateShuffleVector(Ops[1], Ops[1], SV);
3554 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst1,
3555 Ops[1]->getType()), Ops);
3558 case NEON::BI__builtin_neon_vst1_lane_v: {
3559 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
3560 Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2]);
3561 Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
3562 StoreInst *St = Builder.CreateStore(Ops[1],
3563 Builder.CreateBitCast(Ops[0], Ty));
3564 St->setAlignment(cast<ConstantInt>(Align)->getZExtValue());
3567 case NEON::BI__builtin_neon_vtbl1_v:
3568 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl1),
3570 case NEON::BI__builtin_neon_vtbl2_v:
3571 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl2),
3573 case NEON::BI__builtin_neon_vtbl3_v:
3574 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl3),
3576 case NEON::BI__builtin_neon_vtbl4_v:
3577 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl4),
3579 case NEON::BI__builtin_neon_vtbx1_v:
3580 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx1),
3582 case NEON::BI__builtin_neon_vtbx2_v:
3583 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx2),
3585 case NEON::BI__builtin_neon_vtbx3_v:
3586 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx3),
3588 case NEON::BI__builtin_neon_vtbx4_v:
3589 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx4),
3594 static Value *EmitAArch64TblBuiltinExpr(CodeGenFunction &CGF, unsigned BuiltinID,
3596 SmallVectorImpl<Value *> &Ops) {
3597 unsigned int Int = 0;
3598 const char *s = nullptr;
3600 switch (BuiltinID) {
3603 case NEON::BI__builtin_neon_vtbl1_v:
3604 case NEON::BI__builtin_neon_vqtbl1_v:
3605 case NEON::BI__builtin_neon_vqtbl1q_v:
3606 case NEON::BI__builtin_neon_vtbl2_v:
3607 case NEON::BI__builtin_neon_vqtbl2_v:
3608 case NEON::BI__builtin_neon_vqtbl2q_v:
3609 case NEON::BI__builtin_neon_vtbl3_v:
3610 case NEON::BI__builtin_neon_vqtbl3_v:
3611 case NEON::BI__builtin_neon_vqtbl3q_v:
3612 case NEON::BI__builtin_neon_vtbl4_v:
3613 case NEON::BI__builtin_neon_vqtbl4_v:
3614 case NEON::BI__builtin_neon_vqtbl4q_v:
3616 case NEON::BI__builtin_neon_vtbx1_v:
3617 case NEON::BI__builtin_neon_vqtbx1_v:
3618 case NEON::BI__builtin_neon_vqtbx1q_v:
3619 case NEON::BI__builtin_neon_vtbx2_v:
3620 case NEON::BI__builtin_neon_vqtbx2_v:
3621 case NEON::BI__builtin_neon_vqtbx2q_v:
3622 case NEON::BI__builtin_neon_vtbx3_v:
3623 case NEON::BI__builtin_neon_vqtbx3_v:
3624 case NEON::BI__builtin_neon_vqtbx3q_v:
3625 case NEON::BI__builtin_neon_vtbx4_v:
3626 case NEON::BI__builtin_neon_vqtbx4_v:
3627 case NEON::BI__builtin_neon_vqtbx4q_v:
3631 assert(E->getNumArgs() >= 3);
3633 // Get the last argument, which specifies the vector type.
3634 llvm::APSInt Result;
3635 const Expr *Arg = E->getArg(E->getNumArgs() - 1);
3636 if (!Arg->isIntegerConstantExpr(Result, CGF.getContext()))
3639 // Determine the type of this overloaded NEON intrinsic.
3640 NeonTypeFlags Type(Result.getZExtValue());
3641 llvm::VectorType *VTy = GetNeonType(&CGF, Type);
3642 llvm::Type *Ty = VTy;
3646 unsigned nElts = VTy->getNumElements();
3648 CodeGen::CGBuilderTy &Builder = CGF.Builder;
3650 // AArch64 scalar builtins are not overloaded, they do not have an extra
3651 // argument that specifies the vector type, need to handle each case.
3652 SmallVector<Value *, 2> TblOps;
3653 switch (BuiltinID) {
3654 case NEON::BI__builtin_neon_vtbl1_v: {
3655 TblOps.push_back(Ops[0]);
3656 return packTBLDVectorList(CGF, TblOps, nullptr, Ops[1], Ty,
3657 Intrinsic::aarch64_neon_tbl1, "vtbl1");
3659 case NEON::BI__builtin_neon_vtbl2_v: {
3660 TblOps.push_back(Ops[0]);
3661 TblOps.push_back(Ops[1]);
3662 return packTBLDVectorList(CGF, TblOps, nullptr, Ops[2], Ty,
3663 Intrinsic::aarch64_neon_tbl1, "vtbl1");
3665 case NEON::BI__builtin_neon_vtbl3_v: {
3666 TblOps.push_back(Ops[0]);
3667 TblOps.push_back(Ops[1]);
3668 TblOps.push_back(Ops[2]);
3669 return packTBLDVectorList(CGF, TblOps, nullptr, Ops[3], Ty,
3670 Intrinsic::aarch64_neon_tbl2, "vtbl2");
3672 case NEON::BI__builtin_neon_vtbl4_v: {
3673 TblOps.push_back(Ops[0]);
3674 TblOps.push_back(Ops[1]);
3675 TblOps.push_back(Ops[2]);
3676 TblOps.push_back(Ops[3]);
3677 return packTBLDVectorList(CGF, TblOps, nullptr, Ops[4], Ty,
3678 Intrinsic::aarch64_neon_tbl2, "vtbl2");
3680 case NEON::BI__builtin_neon_vtbx1_v: {
3681 TblOps.push_back(Ops[1]);
3682 Value *TblRes = packTBLDVectorList(CGF, TblOps, nullptr, Ops[2], Ty,
3683 Intrinsic::aarch64_neon_tbl1, "vtbl1");
3685 llvm::Constant *Eight = ConstantInt::get(VTy->getElementType(), 8);
3686 Value* EightV = llvm::ConstantVector::getSplat(nElts, Eight);
3687 Value *CmpRes = Builder.CreateICmp(ICmpInst::ICMP_UGE, Ops[2], EightV);
3688 CmpRes = Builder.CreateSExt(CmpRes, Ty);
3690 Value *EltsFromInput = Builder.CreateAnd(CmpRes, Ops[0]);
3691 Value *EltsFromTbl = Builder.CreateAnd(Builder.CreateNot(CmpRes), TblRes);
3692 return Builder.CreateOr(EltsFromInput, EltsFromTbl, "vtbx");
3694 case NEON::BI__builtin_neon_vtbx2_v: {
3695 TblOps.push_back(Ops[1]);
3696 TblOps.push_back(Ops[2]);
3697 return packTBLDVectorList(CGF, TblOps, Ops[0], Ops[3], Ty,
3698 Intrinsic::aarch64_neon_tbx1, "vtbx1");
3700 case NEON::BI__builtin_neon_vtbx3_v: {
3701 TblOps.push_back(Ops[1]);
3702 TblOps.push_back(Ops[2]);
3703 TblOps.push_back(Ops[3]);
3704 Value *TblRes = packTBLDVectorList(CGF, TblOps, nullptr, Ops[4], Ty,
3705 Intrinsic::aarch64_neon_tbl2, "vtbl2");
3707 llvm::Constant *TwentyFour = ConstantInt::get(VTy->getElementType(), 24);
3708 Value* TwentyFourV = llvm::ConstantVector::getSplat(nElts, TwentyFour);
3709 Value *CmpRes = Builder.CreateICmp(ICmpInst::ICMP_UGE, Ops[4],
3711 CmpRes = Builder.CreateSExt(CmpRes, Ty);
3713 Value *EltsFromInput = Builder.CreateAnd(CmpRes, Ops[0]);
3714 Value *EltsFromTbl = Builder.CreateAnd(Builder.CreateNot(CmpRes), TblRes);
3715 return Builder.CreateOr(EltsFromInput, EltsFromTbl, "vtbx");
3717 case NEON::BI__builtin_neon_vtbx4_v: {
3718 TblOps.push_back(Ops[1]);
3719 TblOps.push_back(Ops[2]);
3720 TblOps.push_back(Ops[3]);
3721 TblOps.push_back(Ops[4]);
3722 return packTBLDVectorList(CGF, TblOps, Ops[0], Ops[5], Ty,
3723 Intrinsic::aarch64_neon_tbx2, "vtbx2");
3725 case NEON::BI__builtin_neon_vqtbl1_v:
3726 case NEON::BI__builtin_neon_vqtbl1q_v:
3727 Int = Intrinsic::aarch64_neon_tbl1; s = "vtbl1"; break;
3728 case NEON::BI__builtin_neon_vqtbl2_v:
3729 case NEON::BI__builtin_neon_vqtbl2q_v: {
3730 Int = Intrinsic::aarch64_neon_tbl2; s = "vtbl2"; break;
3731 case NEON::BI__builtin_neon_vqtbl3_v:
3732 case NEON::BI__builtin_neon_vqtbl3q_v:
3733 Int = Intrinsic::aarch64_neon_tbl3; s = "vtbl3"; break;
3734 case NEON::BI__builtin_neon_vqtbl4_v:
3735 case NEON::BI__builtin_neon_vqtbl4q_v:
3736 Int = Intrinsic::aarch64_neon_tbl4; s = "vtbl4"; break;
3737 case NEON::BI__builtin_neon_vqtbx1_v:
3738 case NEON::BI__builtin_neon_vqtbx1q_v:
3739 Int = Intrinsic::aarch64_neon_tbx1; s = "vtbx1"; break;
3740 case NEON::BI__builtin_neon_vqtbx2_v:
3741 case NEON::BI__builtin_neon_vqtbx2q_v:
3742 Int = Intrinsic::aarch64_neon_tbx2; s = "vtbx2"; break;
3743 case NEON::BI__builtin_neon_vqtbx3_v:
3744 case NEON::BI__builtin_neon_vqtbx3q_v:
3745 Int = Intrinsic::aarch64_neon_tbx3; s = "vtbx3"; break;
3746 case NEON::BI__builtin_neon_vqtbx4_v:
3747 case NEON::BI__builtin_neon_vqtbx4q_v:
3748 Int = Intrinsic::aarch64_neon_tbx4; s = "vtbx4"; break;
3755 Function *F = CGF.CGM.getIntrinsic(Int, Ty);
3756 return CGF.EmitNeonCall(F, Ops, s);
3759 Value *CodeGenFunction::vectorWrapScalar16(Value *Op) {
3760 llvm::Type *VTy = llvm::VectorType::get(Int16Ty, 4);
3761 Op = Builder.CreateBitCast(Op, Int16Ty);
3762 Value *V = UndefValue::get(VTy);
3763 llvm::Constant *CI = ConstantInt::get(SizeTy, 0);
3764 Op = Builder.CreateInsertElement(V, Op, CI);
3768 Value *CodeGenFunction::vectorWrapScalar8(Value *Op) {
3769 llvm::Type *VTy = llvm::VectorType::get(Int8Ty, 8);
3770 Op = Builder.CreateBitCast(Op, Int8Ty);
3771 Value *V = UndefValue::get(VTy);
3772 llvm::Constant *CI = ConstantInt::get(SizeTy, 0);
3773 Op = Builder.CreateInsertElement(V, Op, CI);
3777 Value *CodeGenFunction::
3778 emitVectorWrappedScalar8Intrinsic(unsigned Int, SmallVectorImpl<Value*> &Ops,
3780 // i8 is not a legal types for AArch64, so we can't just use
3781 // a normal overloaded intrinsic call for these scalar types. Instead
3782 // we'll build 64-bit vectors w/ lane zero being our input values and
3783 // perform the operation on that. The back end can pattern match directly
3784 // to the scalar instruction.
3785 Ops[0] = vectorWrapScalar8(Ops[0]);
3786 Ops[1] = vectorWrapScalar8(Ops[1]);
3787 llvm::Type *VTy = llvm::VectorType::get(Int8Ty, 8);
3788 Value *V = EmitNeonCall(CGM.getIntrinsic(Int, VTy), Ops, Name);
3789 Constant *CI = ConstantInt::get(SizeTy, 0);
3790 return Builder.CreateExtractElement(V, CI, "lane0");
3793 Value *CodeGenFunction::
3794 emitVectorWrappedScalar16Intrinsic(unsigned Int, SmallVectorImpl<Value*> &Ops,
3796 // i16 is not a legal types for AArch64, so we can't just use
3797 // a normal overloaded intrinsic call for these scalar types. Instead
3798 // we'll build 64-bit vectors w/ lane zero being our input values and
3799 // perform the operation on that. The back end can pattern match directly
3800 // to the scalar instruction.
3801 Ops[0] = vectorWrapScalar16(Ops[0]);
3802 Ops[1] = vectorWrapScalar16(Ops[1]);
3803 llvm::Type *VTy = llvm::VectorType::get(Int16Ty, 4);
3804 Value *V = EmitNeonCall(CGM.getIntrinsic(Int, VTy), Ops, Name);
3805 Constant *CI = ConstantInt::get(SizeTy, 0);
3806 return Builder.CreateExtractElement(V, CI, "lane0");
3809 Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
3810 const CallExpr *E) {
3811 unsigned HintID = static_cast<unsigned>(-1);
3812 switch (BuiltinID) {
3814 case AArch64::BI__builtin_arm_nop:
3817 case AArch64::BI__builtin_arm_yield:
3820 case AArch64::BI__builtin_arm_wfe:
3823 case AArch64::BI__builtin_arm_wfi:
3826 case AArch64::BI__builtin_arm_sev:
3829 case AArch64::BI__builtin_arm_sevl:
3834 if (HintID != static_cast<unsigned>(-1)) {
3835 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_hint);
3836 return Builder.CreateCall(F, llvm::ConstantInt::get(Int32Ty, HintID));
3839 if (BuiltinID == AArch64::BI__builtin_arm_rbit) {
3840 assert((getContext().getTypeSize(E->getType()) == 32) &&
3841 "rbit of unusual size!");
3842 llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
3843 return Builder.CreateCall(
3844 CGM.getIntrinsic(Intrinsic::aarch64_rbit, Arg->getType()), Arg, "rbit");
3846 if (BuiltinID == AArch64::BI__builtin_arm_rbit64) {
3847 assert((getContext().getTypeSize(E->getType()) == 64) &&
3848 "rbit of unusual size!");
3849 llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
3850 return Builder.CreateCall(
3851 CGM.getIntrinsic(Intrinsic::aarch64_rbit, Arg->getType()), Arg, "rbit");
3854 if (BuiltinID == AArch64::BI__clear_cache) {
3855 assert(E->getNumArgs() == 2 && "__clear_cache takes 2 arguments");
3856 const FunctionDecl *FD = E->getDirectCallee();
3857 SmallVector<Value*, 2> Ops;
3858 for (unsigned i = 0; i < 2; i++)
3859 Ops.push_back(EmitScalarExpr(E->getArg(i)));
3860 llvm::Type *Ty = CGM.getTypes().ConvertType(FD->getType());
3861 llvm::FunctionType *FTy = cast<llvm::FunctionType>(Ty);
3862 StringRef Name = FD->getName();
3863 return EmitNounwindRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), Ops);
3866 if ((BuiltinID == AArch64::BI__builtin_arm_ldrex ||
3867 BuiltinID == AArch64::BI__builtin_arm_ldaex) &&
3868 getContext().getTypeSize(E->getType()) == 128) {
3869 Function *F = CGM.getIntrinsic(BuiltinID == AArch64::BI__builtin_arm_ldaex
3870 ? Intrinsic::aarch64_ldaxp
3871 : Intrinsic::aarch64_ldxp);
3873 Value *LdPtr = EmitScalarExpr(E->getArg(0));
3874 Value *Val = Builder.CreateCall(F, Builder.CreateBitCast(LdPtr, Int8PtrTy),
3877 Value *Val0 = Builder.CreateExtractValue(Val, 1);
3878 Value *Val1 = Builder.CreateExtractValue(Val, 0);
3879 llvm::Type *Int128Ty = llvm::IntegerType::get(getLLVMContext(), 128);
3880 Val0 = Builder.CreateZExt(Val0, Int128Ty);
3881 Val1 = Builder.CreateZExt(Val1, Int128Ty);
3883 Value *ShiftCst = llvm::ConstantInt::get(Int128Ty, 64);
3884 Val = Builder.CreateShl(Val0, ShiftCst, "shl", true /* nuw */);
3885 Val = Builder.CreateOr(Val, Val1);
3886 return Builder.CreateBitCast(Val, ConvertType(E->getType()));
3887 } else if (BuiltinID == AArch64::BI__builtin_arm_ldrex ||
3888 BuiltinID == AArch64::BI__builtin_arm_ldaex) {
3889 Value *LoadAddr = EmitScalarExpr(E->getArg(0));
3891 QualType Ty = E->getType();
3892 llvm::Type *RealResTy = ConvertType(Ty);
3893 llvm::Type *IntResTy = llvm::IntegerType::get(getLLVMContext(),
3894 getContext().getTypeSize(Ty));
3895 LoadAddr = Builder.CreateBitCast(LoadAddr, IntResTy->getPointerTo());
3897 Function *F = CGM.getIntrinsic(BuiltinID == AArch64::BI__builtin_arm_ldaex
3898 ? Intrinsic::aarch64_ldaxr
3899 : Intrinsic::aarch64_ldxr,
3900 LoadAddr->getType());
3901 Value *Val = Builder.CreateCall(F, LoadAddr, "ldxr");
3903 if (RealResTy->isPointerTy())
3904 return Builder.CreateIntToPtr(Val, RealResTy);
3906 Val = Builder.CreateTruncOrBitCast(Val, IntResTy);
3907 return Builder.CreateBitCast(Val, RealResTy);
3910 if ((BuiltinID == AArch64::BI__builtin_arm_strex ||
3911 BuiltinID == AArch64::BI__builtin_arm_stlex) &&
3912 getContext().getTypeSize(E->getArg(0)->getType()) == 128) {
3913 Function *F = CGM.getIntrinsic(BuiltinID == AArch64::BI__builtin_arm_stlex
3914 ? Intrinsic::aarch64_stlxp
3915 : Intrinsic::aarch64_stxp);
3916 llvm::Type *STy = llvm::StructType::get(Int64Ty, Int64Ty, NULL);
3918 Value *One = llvm::ConstantInt::get(Int32Ty, 1);
3919 Value *Tmp = Builder.CreateAlloca(ConvertType(E->getArg(0)->getType()),
3921 Value *Val = EmitScalarExpr(E->getArg(0));
3922 Builder.CreateStore(Val, Tmp);
3924 Value *LdPtr = Builder.CreateBitCast(Tmp,llvm::PointerType::getUnqual(STy));
3925 Val = Builder.CreateLoad(LdPtr);
3927 Value *Arg0 = Builder.CreateExtractValue(Val, 0);
3928 Value *Arg1 = Builder.CreateExtractValue(Val, 1);
3929 Value *StPtr = Builder.CreateBitCast(EmitScalarExpr(E->getArg(1)),
3931 return Builder.CreateCall3(F, Arg0, Arg1, StPtr, "stxp");
3932 } else if (BuiltinID == AArch64::BI__builtin_arm_strex ||
3933 BuiltinID == AArch64::BI__builtin_arm_stlex) {
3934 Value *StoreVal = EmitScalarExpr(E->getArg(0));
3935 Value *StoreAddr = EmitScalarExpr(E->getArg(1));
3937 QualType Ty = E->getArg(0)->getType();
3938 llvm::Type *StoreTy = llvm::IntegerType::get(getLLVMContext(),
3939 getContext().getTypeSize(Ty));
3940 StoreAddr = Builder.CreateBitCast(StoreAddr, StoreTy->getPointerTo());
3942 if (StoreVal->getType()->isPointerTy())
3943 StoreVal = Builder.CreatePtrToInt(StoreVal, Int64Ty);
3945 StoreVal = Builder.CreateBitCast(StoreVal, StoreTy);
3946 StoreVal = Builder.CreateZExtOrBitCast(StoreVal, Int64Ty);
3949 Function *F = CGM.getIntrinsic(BuiltinID == AArch64::BI__builtin_arm_stlex
3950 ? Intrinsic::aarch64_stlxr
3951 : Intrinsic::aarch64_stxr,
3952 StoreAddr->getType());
3953 return Builder.CreateCall2(F, StoreVal, StoreAddr, "stxr");
3956 if (BuiltinID == AArch64::BI__builtin_arm_clrex) {
3957 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_clrex);
3958 return Builder.CreateCall(F);
3962 Intrinsic::ID CRCIntrinsicID = Intrinsic::not_intrinsic;
3963 switch (BuiltinID) {
3964 case AArch64::BI__builtin_arm_crc32b:
3965 CRCIntrinsicID = Intrinsic::aarch64_crc32b; break;
3966 case AArch64::BI__builtin_arm_crc32cb:
3967 CRCIntrinsicID = Intrinsic::aarch64_crc32cb; break;
3968 case AArch64::BI__builtin_arm_crc32h:
3969 CRCIntrinsicID = Intrinsic::aarch64_crc32h; break;
3970 case AArch64::BI__builtin_arm_crc32ch:
3971 CRCIntrinsicID = Intrinsic::aarch64_crc32ch; break;
3972 case AArch64::BI__builtin_arm_crc32w:
3973 CRCIntrinsicID = Intrinsic::aarch64_crc32w; break;
3974 case AArch64::BI__builtin_arm_crc32cw:
3975 CRCIntrinsicID = Intrinsic::aarch64_crc32cw; break;
3976 case AArch64::BI__builtin_arm_crc32d:
3977 CRCIntrinsicID = Intrinsic::aarch64_crc32x; break;
3978 case AArch64::BI__builtin_arm_crc32cd:
3979 CRCIntrinsicID = Intrinsic::aarch64_crc32cx; break;
3982 if (CRCIntrinsicID != Intrinsic::not_intrinsic) {
3983 Value *Arg0 = EmitScalarExpr(E->getArg(0));
3984 Value *Arg1 = EmitScalarExpr(E->getArg(1));
3985 Function *F = CGM.getIntrinsic(CRCIntrinsicID);
3987 llvm::Type *DataTy = F->getFunctionType()->getParamType(1);
3988 Arg1 = Builder.CreateZExtOrBitCast(Arg1, DataTy);
3990 return Builder.CreateCall2(F, Arg0, Arg1);
3993 llvm::SmallVector<Value*, 4> Ops;
3994 for (unsigned i = 0, e = E->getNumArgs() - 1; i != e; i++)
3995 Ops.push_back(EmitScalarExpr(E->getArg(i)));
3997 ArrayRef<NeonIntrinsicInfo> SISDMap(AArch64SISDIntrinsicMap);
3998 const NeonIntrinsicInfo *Builtin = findNeonIntrinsicInMap(
3999 SISDMap, BuiltinID, AArch64SISDIntrinsicsProvenSorted);
4002 Ops.push_back(EmitScalarExpr(E->getArg(E->getNumArgs() - 1)));
4003 Value *Result = EmitCommonNeonSISDBuiltinExpr(*this, *Builtin, Ops, E);
4004 assert(Result && "SISD intrinsic should have been handled");
4008 llvm::APSInt Result;
4009 const Expr *Arg = E->getArg(E->getNumArgs()-1);
4010 NeonTypeFlags Type(0);
4011 if (Arg->isIntegerConstantExpr(Result, getContext()))
4012 // Determine the type of this overloaded NEON intrinsic.
4013 Type = NeonTypeFlags(Result.getZExtValue());
4015 bool usgn = Type.isUnsigned();
4016 bool quad = Type.isQuad();
4018 // Handle non-overloaded intrinsics first.
4019 switch (BuiltinID) {
4021 case NEON::BI__builtin_neon_vldrq_p128: {
4022 llvm::Type *Int128PTy = llvm::Type::getIntNPtrTy(getLLVMContext(), 128);
4023 Value *Ptr = Builder.CreateBitCast(EmitScalarExpr(E->getArg(0)), Int128PTy);
4024 return Builder.CreateLoad(Ptr);
4026 case NEON::BI__builtin_neon_vstrq_p128: {
4027 llvm::Type *Int128PTy = llvm::Type::getIntNPtrTy(getLLVMContext(), 128);
4028 Value *Ptr = Builder.CreateBitCast(Ops[0], Int128PTy);
4029 return Builder.CreateStore(EmitScalarExpr(E->getArg(1)), Ptr);
4031 case NEON::BI__builtin_neon_vcvts_u32_f32:
4032 case NEON::BI__builtin_neon_vcvtd_u64_f64:
4035 case NEON::BI__builtin_neon_vcvts_s32_f32:
4036 case NEON::BI__builtin_neon_vcvtd_s64_f64: {
4037 Ops.push_back(EmitScalarExpr(E->getArg(0)));
4038 bool Is64 = Ops[0]->getType()->getPrimitiveSizeInBits() == 64;
4039 llvm::Type *InTy = Is64 ? Int64Ty : Int32Ty;
4040 llvm::Type *FTy = Is64 ? DoubleTy : FloatTy;
4041 Ops[0] = Builder.CreateBitCast(Ops[0], FTy);
4043 return Builder.CreateFPToUI(Ops[0], InTy);
4044 return Builder.CreateFPToSI(Ops[0], InTy);
4046 case NEON::BI__builtin_neon_vcvts_f32_u32:
4047 case NEON::BI__builtin_neon_vcvtd_f64_u64:
4050 case NEON::BI__builtin_neon_vcvts_f32_s32:
4051 case NEON::BI__builtin_neon_vcvtd_f64_s64: {
4052 Ops.push_back(EmitScalarExpr(E->getArg(0)));
4053 bool Is64 = Ops[0]->getType()->getPrimitiveSizeInBits() == 64;
4054 llvm::Type *InTy = Is64 ? Int64Ty : Int32Ty;
4055 llvm::Type *FTy = Is64 ? DoubleTy : FloatTy;
4056 Ops[0] = Builder.CreateBitCast(Ops[0], InTy);
4058 return Builder.CreateUIToFP(Ops[0], FTy);
4059 return Builder.CreateSIToFP(Ops[0], FTy);
4061 case NEON::BI__builtin_neon_vpaddd_s64: {
4063 llvm::VectorType::get(llvm::Type::getInt64Ty(getLLVMContext()), 2);
4064 Value *Vec = EmitScalarExpr(E->getArg(0));
4065 // The vector is v2f64, so make sure it's bitcast to that.
4066 Vec = Builder.CreateBitCast(Vec, Ty, "v2i64");
4067 llvm::Value *Idx0 = llvm::ConstantInt::get(SizeTy, 0);
4068 llvm::Value *Idx1 = llvm::ConstantInt::get(SizeTy, 1);
4069 Value *Op0 = Builder.CreateExtractElement(Vec, Idx0, "lane0");
4070 Value *Op1 = Builder.CreateExtractElement(Vec, Idx1, "lane1");
4071 // Pairwise addition of a v2f64 into a scalar f64.
4072 return Builder.CreateAdd(Op0, Op1, "vpaddd");
4074 case NEON::BI__builtin_neon_vpaddd_f64: {
4076 llvm::VectorType::get(llvm::Type::getDoubleTy(getLLVMContext()), 2);
4077 Value *Vec = EmitScalarExpr(E->getArg(0));
4078 // The vector is v2f64, so make sure it's bitcast to that.
4079 Vec = Builder.CreateBitCast(Vec, Ty, "v2f64");
4080 llvm::Value *Idx0 = llvm::ConstantInt::get(SizeTy, 0);
4081 llvm::Value *Idx1 = llvm::ConstantInt::get(SizeTy, 1);
4082 Value *Op0 = Builder.CreateExtractElement(Vec, Idx0, "lane0");
4083 Value *Op1 = Builder.CreateExtractElement(Vec, Idx1, "lane1");
4084 // Pairwise addition of a v2f64 into a scalar f64.
4085 return Builder.CreateFAdd(Op0, Op1, "vpaddd");
4087 case NEON::BI__builtin_neon_vpadds_f32: {
4089 llvm::VectorType::get(llvm::Type::getFloatTy(getLLVMContext()), 2);
4090 Value *Vec = EmitScalarExpr(E->getArg(0));
4091 // The vector is v2f32, so make sure it's bitcast to that.
4092 Vec = Builder.CreateBitCast(Vec, Ty, "v2f32");
4093 llvm::Value *Idx0 = llvm::ConstantInt::get(SizeTy, 0);
4094 llvm::Value *Idx1 = llvm::ConstantInt::get(SizeTy, 1);
4095 Value *Op0 = Builder.CreateExtractElement(Vec, Idx0, "lane0");
4096 Value *Op1 = Builder.CreateExtractElement(Vec, Idx1, "lane1");
4097 // Pairwise addition of a v2f32 into a scalar f32.
4098 return Builder.CreateFAdd(Op0, Op1, "vpaddd");
4100 case NEON::BI__builtin_neon_vceqzd_s64:
4101 case NEON::BI__builtin_neon_vceqzd_f64:
4102 case NEON::BI__builtin_neon_vceqzs_f32:
4103 Ops.push_back(EmitScalarExpr(E->getArg(0)));
4104 return EmitAArch64CompareBuiltinExpr(
4105 Ops[0], ConvertType(E->getCallReturnType()), ICmpInst::FCMP_OEQ,
4106 ICmpInst::ICMP_EQ, "vceqz");
4107 case NEON::BI__builtin_neon_vcgezd_s64:
4108 case NEON::BI__builtin_neon_vcgezd_f64:
4109 case NEON::BI__builtin_neon_vcgezs_f32:
4110 Ops.push_back(EmitScalarExpr(E->getArg(0)));
4111 return EmitAArch64CompareBuiltinExpr(
4112 Ops[0], ConvertType(E->getCallReturnType()), ICmpInst::FCMP_OGE,
4113 ICmpInst::ICMP_SGE, "vcgez");
4114 case NEON::BI__builtin_neon_vclezd_s64:
4115 case NEON::BI__builtin_neon_vclezd_f64:
4116 case NEON::BI__builtin_neon_vclezs_f32:
4117 Ops.push_back(EmitScalarExpr(E->getArg(0)));
4118 return EmitAArch64CompareBuiltinExpr(
4119 Ops[0], ConvertType(E->getCallReturnType()), ICmpInst::FCMP_OLE,
4120 ICmpInst::ICMP_SLE, "vclez");
4121 case NEON::BI__builtin_neon_vcgtzd_s64:
4122 case NEON::BI__builtin_neon_vcgtzd_f64:
4123 case NEON::BI__builtin_neon_vcgtzs_f32:
4124 Ops.push_back(EmitScalarExpr(E->getArg(0)));
4125 return EmitAArch64CompareBuiltinExpr(
4126 Ops[0], ConvertType(E->getCallReturnType()), ICmpInst::FCMP_OGT,
4127 ICmpInst::ICMP_SGT, "vcgtz");
4128 case NEON::BI__builtin_neon_vcltzd_s64:
4129 case NEON::BI__builtin_neon_vcltzd_f64:
4130 case NEON::BI__builtin_neon_vcltzs_f32:
4131 Ops.push_back(EmitScalarExpr(E->getArg(0)));
4132 return EmitAArch64CompareBuiltinExpr(
4133 Ops[0], ConvertType(E->getCallReturnType()), ICmpInst::FCMP_OLT,
4134 ICmpInst::ICMP_SLT, "vcltz");
4136 case NEON::BI__builtin_neon_vceqzd_u64: {
4137 llvm::Type *Ty = llvm::Type::getInt64Ty(getLLVMContext());
4138 Ops.push_back(EmitScalarExpr(E->getArg(0)));
4139 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
4140 Ops[0] = Builder.CreateICmp(llvm::ICmpInst::ICMP_EQ, Ops[0],
4141 llvm::Constant::getNullValue(Ty));
4142 return Builder.CreateSExt(Ops[0], Ty, "vceqzd");
4144 case NEON::BI__builtin_neon_vceqd_f64:
4145 case NEON::BI__builtin_neon_vcled_f64:
4146 case NEON::BI__builtin_neon_vcltd_f64:
4147 case NEON::BI__builtin_neon_vcged_f64:
4148 case NEON::BI__builtin_neon_vcgtd_f64: {
4149 llvm::CmpInst::Predicate P;
4150 switch (BuiltinID) {
4151 default: llvm_unreachable("missing builtin ID in switch!");
4152 case NEON::BI__builtin_neon_vceqd_f64: P = llvm::FCmpInst::FCMP_OEQ; break;
4153 case NEON::BI__builtin_neon_vcled_f64: P = llvm::FCmpInst::FCMP_OLE; break;
4154 case NEON::BI__builtin_neon_vcltd_f64: P = llvm::FCmpInst::FCMP_OLT; break;
4155 case NEON::BI__builtin_neon_vcged_f64: P = llvm::FCmpInst::FCMP_OGE; break;
4156 case NEON::BI__builtin_neon_vcgtd_f64: P = llvm::FCmpInst::FCMP_OGT; break;
4158 Ops.push_back(EmitScalarExpr(E->getArg(1)));
4159 Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy);
4160 Ops[1] = Builder.CreateBitCast(Ops[1], DoubleTy);
4161 Ops[0] = Builder.CreateFCmp(P, Ops[0], Ops[1]);
4162 return Builder.CreateSExt(Ops[0], Int64Ty, "vcmpd");
4164 case NEON::BI__builtin_neon_vceqs_f32:
4165 case NEON::BI__builtin_neon_vcles_f32:
4166 case NEON::BI__builtin_neon_vclts_f32:
4167 case NEON::BI__builtin_neon_vcges_f32:
4168 case NEON::BI__builtin_neon_vcgts_f32: {
4169 llvm::CmpInst::Predicate P;
4170 switch (BuiltinID) {
4171 default: llvm_unreachable("missing builtin ID in switch!");
4172 case NEON::BI__builtin_neon_vceqs_f32: P = llvm::FCmpInst::FCMP_OEQ; break;
4173 case NEON::BI__builtin_neon_vcles_f32: P = llvm::FCmpInst::FCMP_OLE; break;
4174 case NEON::BI__builtin_neon_vclts_f32: P = llvm::FCmpInst::FCMP_OLT; break;
4175 case NEON::BI__builtin_neon_vcges_f32: P = llvm::FCmpInst::FCMP_OGE; break;
4176 case NEON::BI__builtin_neon_vcgts_f32: P = llvm::FCmpInst::FCMP_OGT; break;
4178 Ops.push_back(EmitScalarExpr(E->getArg(1)));
4179 Ops[0] = Builder.CreateBitCast(Ops[0], FloatTy);
4180 Ops[1] = Builder.CreateBitCast(Ops[1], FloatTy);
4181 Ops[0] = Builder.CreateFCmp(P, Ops[0], Ops[1]);
4182 return Builder.CreateSExt(Ops[0], Int32Ty, "vcmpd");
4184 case NEON::BI__builtin_neon_vceqd_s64:
4185 case NEON::BI__builtin_neon_vceqd_u64:
4186 case NEON::BI__builtin_neon_vcgtd_s64:
4187 case NEON::BI__builtin_neon_vcgtd_u64:
4188 case NEON::BI__builtin_neon_vcltd_s64:
4189 case NEON::BI__builtin_neon_vcltd_u64:
4190 case NEON::BI__builtin_neon_vcged_u64:
4191 case NEON::BI__builtin_neon_vcged_s64:
4192 case NEON::BI__builtin_neon_vcled_u64:
4193 case NEON::BI__builtin_neon_vcled_s64: {
4194 llvm::CmpInst::Predicate P;
4195 switch (BuiltinID) {
4196 default: llvm_unreachable("missing builtin ID in switch!");
4197 case NEON::BI__builtin_neon_vceqd_s64:
4198 case NEON::BI__builtin_neon_vceqd_u64:P = llvm::ICmpInst::ICMP_EQ;break;
4199 case NEON::BI__builtin_neon_vcgtd_s64:P = llvm::ICmpInst::ICMP_SGT;break;
4200 case NEON::BI__builtin_neon_vcgtd_u64:P = llvm::ICmpInst::ICMP_UGT;break;
4201 case NEON::BI__builtin_neon_vcltd_s64:P = llvm::ICmpInst::ICMP_SLT;break;
4202 case NEON::BI__builtin_neon_vcltd_u64:P = llvm::ICmpInst::ICMP_ULT;break;
4203 case NEON::BI__builtin_neon_vcged_u64:P = llvm::ICmpInst::ICMP_UGE;break;
4204 case NEON::BI__builtin_neon_vcged_s64:P = llvm::ICmpInst::ICMP_SGE;break;
4205 case NEON::BI__builtin_neon_vcled_u64:P = llvm::ICmpInst::ICMP_ULE;break;
4206 case NEON::BI__builtin_neon_vcled_s64:P = llvm::ICmpInst::ICMP_SLE;break;
4208 Ops.push_back(EmitScalarExpr(E->getArg(1)));
4209 Ops[0] = Builder.CreateBitCast(Ops[0], Int64Ty);
4210 Ops[1] = Builder.CreateBitCast(Ops[1], Int64Ty);
4211 Ops[0] = Builder.CreateICmp(P, Ops[0], Ops[1]);
4212 return Builder.CreateSExt(Ops[0], Int64Ty, "vceqd");
4214 case NEON::BI__builtin_neon_vtstd_s64:
4215 case NEON::BI__builtin_neon_vtstd_u64: {
4216 llvm::Type *Ty = llvm::Type::getInt64Ty(getLLVMContext());
4217 Ops.push_back(EmitScalarExpr(E->getArg(1)));
4218 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
4219 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
4220 Ops[0] = Builder.CreateAnd(Ops[0], Ops[1]);
4221 Ops[0] = Builder.CreateICmp(ICmpInst::ICMP_NE, Ops[0],
4222 llvm::Constant::getNullValue(Ty));
4223 return Builder.CreateSExt(Ops[0], Ty, "vtstd");
4225 case NEON::BI__builtin_neon_vset_lane_i8:
4226 case NEON::BI__builtin_neon_vset_lane_i16:
4227 case NEON::BI__builtin_neon_vset_lane_i32:
4228 case NEON::BI__builtin_neon_vset_lane_i64:
4229 case NEON::BI__builtin_neon_vset_lane_f32:
4230 case NEON::BI__builtin_neon_vsetq_lane_i8:
4231 case NEON::BI__builtin_neon_vsetq_lane_i16:
4232 case NEON::BI__builtin_neon_vsetq_lane_i32:
4233 case NEON::BI__builtin_neon_vsetq_lane_i64:
4234 case NEON::BI__builtin_neon_vsetq_lane_f32:
4235 Ops.push_back(EmitScalarExpr(E->getArg(2)));
4236 return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vset_lane");
4237 case NEON::BI__builtin_neon_vset_lane_f64:
4238 // The vector type needs a cast for the v1f64 variant.
4239 Ops[1] = Builder.CreateBitCast(Ops[1],
4240 llvm::VectorType::get(DoubleTy, 1));
4241 Ops.push_back(EmitScalarExpr(E->getArg(2)));
4242 return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vset_lane");
4243 case NEON::BI__builtin_neon_vsetq_lane_f64:
4244 // The vector type needs a cast for the v2f64 variant.
4245 Ops[1] = Builder.CreateBitCast(Ops[1],
4246 llvm::VectorType::get(llvm::Type::getDoubleTy(getLLVMContext()), 2));
4247 Ops.push_back(EmitScalarExpr(E->getArg(2)));
4248 return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vset_lane");
4250 case NEON::BI__builtin_neon_vget_lane_i8:
4251 case NEON::BI__builtin_neon_vdupb_lane_i8:
4252 Ops[0] = Builder.CreateBitCast(Ops[0],
4253 llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 8), 8));
4254 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
4256 case NEON::BI__builtin_neon_vgetq_lane_i8:
4257 case NEON::BI__builtin_neon_vdupb_laneq_i8:
4258 Ops[0] = Builder.CreateBitCast(Ops[0],
4259 llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 8), 16));
4260 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
4262 case NEON::BI__builtin_neon_vget_lane_i16:
4263 case NEON::BI__builtin_neon_vduph_lane_i16:
4264 Ops[0] = Builder.CreateBitCast(Ops[0],
4265 llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 16), 4));
4266 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
4268 case NEON::BI__builtin_neon_vgetq_lane_i16:
4269 case NEON::BI__builtin_neon_vduph_laneq_i16:
4270 Ops[0] = Builder.CreateBitCast(Ops[0],
4271 llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 16), 8));
4272 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
4274 case NEON::BI__builtin_neon_vget_lane_i32:
4275 case NEON::BI__builtin_neon_vdups_lane_i32:
4276 Ops[0] = Builder.CreateBitCast(
4278 llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 32), 2));
4279 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
4281 case NEON::BI__builtin_neon_vdups_lane_f32:
4282 Ops[0] = Builder.CreateBitCast(Ops[0],
4283 llvm::VectorType::get(llvm::Type::getFloatTy(getLLVMContext()), 2));
4284 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
4286 case NEON::BI__builtin_neon_vgetq_lane_i32:
4287 case NEON::BI__builtin_neon_vdups_laneq_i32:
4288 Ops[0] = Builder.CreateBitCast(Ops[0],
4289 llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 32), 4));
4290 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
4292 case NEON::BI__builtin_neon_vget_lane_i64:
4293 case NEON::BI__builtin_neon_vdupd_lane_i64:
4294 Ops[0] = Builder.CreateBitCast(Ops[0],
4295 llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 64), 1));
4296 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
4298 case NEON::BI__builtin_neon_vdupd_lane_f64:
4299 Ops[0] = Builder.CreateBitCast(Ops[0],
4300 llvm::VectorType::get(llvm::Type::getDoubleTy(getLLVMContext()), 1));
4301 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
4303 case NEON::BI__builtin_neon_vgetq_lane_i64:
4304 case NEON::BI__builtin_neon_vdupd_laneq_i64:
4305 Ops[0] = Builder.CreateBitCast(Ops[0],
4306 llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 64), 2));
4307 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
4309 case NEON::BI__builtin_neon_vget_lane_f32:
4310 Ops[0] = Builder.CreateBitCast(Ops[0],
4311 llvm::VectorType::get(llvm::Type::getFloatTy(getLLVMContext()), 2));
4312 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
4314 case NEON::BI__builtin_neon_vget_lane_f64:
4315 Ops[0] = Builder.CreateBitCast(Ops[0],
4316 llvm::VectorType::get(llvm::Type::getDoubleTy(getLLVMContext()), 1));
4317 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
4319 case NEON::BI__builtin_neon_vgetq_lane_f32:
4320 case NEON::BI__builtin_neon_vdups_laneq_f32:
4321 Ops[0] = Builder.CreateBitCast(Ops[0],
4322 llvm::VectorType::get(llvm::Type::getFloatTy(getLLVMContext()), 4));
4323 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
4325 case NEON::BI__builtin_neon_vgetq_lane_f64:
4326 case NEON::BI__builtin_neon_vdupd_laneq_f64:
4327 Ops[0] = Builder.CreateBitCast(Ops[0],
4328 llvm::VectorType::get(llvm::Type::getDoubleTy(getLLVMContext()), 2));
4329 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
4331 case NEON::BI__builtin_neon_vaddd_s64:
4332 case NEON::BI__builtin_neon_vaddd_u64:
4333 return Builder.CreateAdd(Ops[0], EmitScalarExpr(E->getArg(1)), "vaddd");
4334 case NEON::BI__builtin_neon_vsubd_s64:
4335 case NEON::BI__builtin_neon_vsubd_u64:
4336 return Builder.CreateSub(Ops[0], EmitScalarExpr(E->getArg(1)), "vsubd");
4337 case NEON::BI__builtin_neon_vqdmlalh_s16:
4338 case NEON::BI__builtin_neon_vqdmlslh_s16: {
4339 SmallVector<Value *, 2> ProductOps;
4340 ProductOps.push_back(vectorWrapScalar16(Ops[1]));
4341 ProductOps.push_back(vectorWrapScalar16(EmitScalarExpr(E->getArg(2))));
4342 llvm::Type *VTy = llvm::VectorType::get(Int32Ty, 4);
4343 Ops[1] = EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqdmull, VTy),
4344 ProductOps, "vqdmlXl");
4345 Constant *CI = ConstantInt::get(SizeTy, 0);
4346 Ops[1] = Builder.CreateExtractElement(Ops[1], CI, "lane0");
4348 unsigned AccumInt = BuiltinID == NEON::BI__builtin_neon_vqdmlalh_s16
4349 ? Intrinsic::aarch64_neon_sqadd
4350 : Intrinsic::aarch64_neon_sqsub;
4351 return EmitNeonCall(CGM.getIntrinsic(AccumInt, Int32Ty), Ops, "vqdmlXl");
4353 case NEON::BI__builtin_neon_vqshlud_n_s64: {
4354 Ops.push_back(EmitScalarExpr(E->getArg(1)));
4355 Ops[1] = Builder.CreateZExt(Ops[1], Int64Ty);
4356 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqshlu, Int64Ty),
4359 case NEON::BI__builtin_neon_vqshld_n_u64:
4360 case NEON::BI__builtin_neon_vqshld_n_s64: {
4361 unsigned Int = BuiltinID == NEON::BI__builtin_neon_vqshld_n_u64
4362 ? Intrinsic::aarch64_neon_uqshl
4363 : Intrinsic::aarch64_neon_sqshl;
4364 Ops.push_back(EmitScalarExpr(E->getArg(1)));
4365 Ops[1] = Builder.CreateZExt(Ops[1], Int64Ty);
4366 return EmitNeonCall(CGM.getIntrinsic(Int, Int64Ty), Ops, "vqshl_n");
4368 case NEON::BI__builtin_neon_vrshrd_n_u64:
4369 case NEON::BI__builtin_neon_vrshrd_n_s64: {
4370 unsigned Int = BuiltinID == NEON::BI__builtin_neon_vrshrd_n_u64
4371 ? Intrinsic::aarch64_neon_urshl
4372 : Intrinsic::aarch64_neon_srshl;
4373 Ops.push_back(EmitScalarExpr(E->getArg(1)));
4374 int SV = cast<ConstantInt>(Ops[1])->getSExtValue();
4375 Ops[1] = ConstantInt::get(Int64Ty, -SV);
4376 return EmitNeonCall(CGM.getIntrinsic(Int, Int64Ty), Ops, "vrshr_n");
4378 case NEON::BI__builtin_neon_vrsrad_n_u64:
4379 case NEON::BI__builtin_neon_vrsrad_n_s64: {
4380 unsigned Int = BuiltinID == NEON::BI__builtin_neon_vrsrad_n_u64
4381 ? Intrinsic::aarch64_neon_urshl
4382 : Intrinsic::aarch64_neon_srshl;
4383 Ops[1] = Builder.CreateBitCast(Ops[1], Int64Ty);
4384 Ops.push_back(Builder.CreateNeg(EmitScalarExpr(E->getArg(2))));
4385 Ops[1] = Builder.CreateCall2(CGM.getIntrinsic(Int, Int64Ty), Ops[1],
4386 Builder.CreateSExt(Ops[2], Int64Ty));
4387 return Builder.CreateAdd(Ops[0], Builder.CreateBitCast(Ops[1], Int64Ty));
4389 case NEON::BI__builtin_neon_vshld_n_s64:
4390 case NEON::BI__builtin_neon_vshld_n_u64: {
4391 llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(1)));
4392 return Builder.CreateShl(
4393 Ops[0], ConstantInt::get(Int64Ty, Amt->getZExtValue()), "shld_n");
4395 case NEON::BI__builtin_neon_vshrd_n_s64: {
4396 llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(1)));
4397 return Builder.CreateAShr(
4398 Ops[0], ConstantInt::get(Int64Ty, std::min(static_cast<uint64_t>(63),
4399 Amt->getZExtValue())),
4402 case NEON::BI__builtin_neon_vshrd_n_u64: {
4403 llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(1)));
4404 uint64_t ShiftAmt = Amt->getZExtValue();
4405 // Right-shifting an unsigned value by its size yields 0.
4407 return ConstantInt::get(Int64Ty, 0);
4408 return Builder.CreateLShr(Ops[0], ConstantInt::get(Int64Ty, ShiftAmt),
4411 case NEON::BI__builtin_neon_vsrad_n_s64: {
4412 llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(2)));
4413 Ops[1] = Builder.CreateAShr(
4414 Ops[1], ConstantInt::get(Int64Ty, std::min(static_cast<uint64_t>(63),
4415 Amt->getZExtValue())),
4417 return Builder.CreateAdd(Ops[0], Ops[1]);
4419 case NEON::BI__builtin_neon_vsrad_n_u64: {
4420 llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(2)));
4421 uint64_t ShiftAmt = Amt->getZExtValue();
4422 // Right-shifting an unsigned value by its size yields 0.
4423 // As Op + 0 = Op, return Ops[0] directly.
4426 Ops[1] = Builder.CreateLShr(Ops[1], ConstantInt::get(Int64Ty, ShiftAmt),
4428 return Builder.CreateAdd(Ops[0], Ops[1]);
4430 case NEON::BI__builtin_neon_vqdmlalh_lane_s16:
4431 case NEON::BI__builtin_neon_vqdmlalh_laneq_s16:
4432 case NEON::BI__builtin_neon_vqdmlslh_lane_s16:
4433 case NEON::BI__builtin_neon_vqdmlslh_laneq_s16: {
4434 Ops[2] = Builder.CreateExtractElement(Ops[2], EmitScalarExpr(E->getArg(3)),
4436 SmallVector<Value *, 2> ProductOps;
4437 ProductOps.push_back(vectorWrapScalar16(Ops[1]));
4438 ProductOps.push_back(vectorWrapScalar16(Ops[2]));
4439 llvm::Type *VTy = llvm::VectorType::get(Int32Ty, 4);
4440 Ops[1] = EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqdmull, VTy),
4441 ProductOps, "vqdmlXl");
4442 Constant *CI = ConstantInt::get(SizeTy, 0);
4443 Ops[1] = Builder.CreateExtractElement(Ops[1], CI, "lane0");
4446 unsigned AccInt = (BuiltinID == NEON::BI__builtin_neon_vqdmlalh_lane_s16 ||
4447 BuiltinID == NEON::BI__builtin_neon_vqdmlalh_laneq_s16)
4448 ? Intrinsic::aarch64_neon_sqadd
4449 : Intrinsic::aarch64_neon_sqsub;
4450 return EmitNeonCall(CGM.getIntrinsic(AccInt, Int32Ty), Ops, "vqdmlXl");
4452 case NEON::BI__builtin_neon_vqdmlals_s32:
4453 case NEON::BI__builtin_neon_vqdmlsls_s32: {
4454 SmallVector<Value *, 2> ProductOps;
4455 ProductOps.push_back(Ops[1]);
4456 ProductOps.push_back(EmitScalarExpr(E->getArg(2)));
4458 EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqdmulls_scalar),
4459 ProductOps, "vqdmlXl");
4461 unsigned AccumInt = BuiltinID == NEON::BI__builtin_neon_vqdmlals_s32
4462 ? Intrinsic::aarch64_neon_sqadd
4463 : Intrinsic::aarch64_neon_sqsub;
4464 return EmitNeonCall(CGM.getIntrinsic(AccumInt, Int64Ty), Ops, "vqdmlXl");
4466 case NEON::BI__builtin_neon_vqdmlals_lane_s32:
4467 case NEON::BI__builtin_neon_vqdmlals_laneq_s32:
4468 case NEON::BI__builtin_neon_vqdmlsls_lane_s32:
4469 case NEON::BI__builtin_neon_vqdmlsls_laneq_s32: {
4470 Ops[2] = Builder.CreateExtractElement(Ops[2], EmitScalarExpr(E->getArg(3)),
4472 SmallVector<Value *, 2> ProductOps;
4473 ProductOps.push_back(Ops[1]);
4474 ProductOps.push_back(Ops[2]);
4476 EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqdmulls_scalar),
4477 ProductOps, "vqdmlXl");
4480 unsigned AccInt = (BuiltinID == NEON::BI__builtin_neon_vqdmlals_lane_s32 ||
4481 BuiltinID == NEON::BI__builtin_neon_vqdmlals_laneq_s32)
4482 ? Intrinsic::aarch64_neon_sqadd
4483 : Intrinsic::aarch64_neon_sqsub;
4484 return EmitNeonCall(CGM.getIntrinsic(AccInt, Int64Ty), Ops, "vqdmlXl");
4488 llvm::VectorType *VTy = GetNeonType(this, Type);
4489 llvm::Type *Ty = VTy;
4493 // Not all intrinsics handled by the common case work for AArch64 yet, so only
4494 // defer to common code if it's been added to our special map.
4495 Builtin = findNeonIntrinsicInMap(AArch64SIMDIntrinsicMap, BuiltinID,
4496 AArch64SIMDIntrinsicsProvenSorted);
4499 return EmitCommonNeonBuiltinExpr(
4500 Builtin->BuiltinID, Builtin->LLVMIntrinsic, Builtin->AltLLVMIntrinsic,
4501 Builtin->NameHint, Builtin->TypeModifier, E, Ops, nullptr);
4503 if (Value *V = EmitAArch64TblBuiltinExpr(*this, BuiltinID, E, Ops))
4507 switch (BuiltinID) {
4508 default: return nullptr;
4509 case NEON::BI__builtin_neon_vbsl_v:
4510 case NEON::BI__builtin_neon_vbslq_v: {
4511 llvm::Type *BitTy = llvm::VectorType::getInteger(VTy);
4512 Ops[0] = Builder.CreateBitCast(Ops[0], BitTy, "vbsl");
4513 Ops[1] = Builder.CreateBitCast(Ops[1], BitTy, "vbsl");
4514 Ops[2] = Builder.CreateBitCast(Ops[2], BitTy, "vbsl");
4516 Ops[1] = Builder.CreateAnd(Ops[0], Ops[1], "vbsl");
4517 Ops[2] = Builder.CreateAnd(Builder.CreateNot(Ops[0]), Ops[2], "vbsl");
4518 Ops[0] = Builder.CreateOr(Ops[1], Ops[2], "vbsl");
4519 return Builder.CreateBitCast(Ops[0], Ty);
4521 case NEON::BI__builtin_neon_vfma_lane_v:
4522 case NEON::BI__builtin_neon_vfmaq_lane_v: { // Only used for FP types
4523 // The ARM builtins (and instructions) have the addend as the first
4524 // operand, but the 'fma' intrinsics have it last. Swap it around here.
4525 Value *Addend = Ops[0];
4526 Value *Multiplicand = Ops[1];
4527 Value *LaneSource = Ops[2];
4528 Ops[0] = Multiplicand;
4529 Ops[1] = LaneSource;
4532 // Now adjust things to handle the lane access.
4533 llvm::Type *SourceTy = BuiltinID == NEON::BI__builtin_neon_vfmaq_lane_v ?
4534 llvm::VectorType::get(VTy->getElementType(), VTy->getNumElements() / 2) :
4536 llvm::Constant *cst = cast<Constant>(Ops[3]);
4537 Value *SV = llvm::ConstantVector::getSplat(VTy->getNumElements(), cst);
4538 Ops[1] = Builder.CreateBitCast(Ops[1], SourceTy);
4539 Ops[1] = Builder.CreateShuffleVector(Ops[1], Ops[1], SV, "lane");
4542 Int = Intrinsic::fma;
4543 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "fmla");
4545 case NEON::BI__builtin_neon_vfma_laneq_v: {
4546 llvm::VectorType *VTy = cast<llvm::VectorType>(Ty);
4547 // v1f64 fma should be mapped to Neon scalar f64 fma
4548 if (VTy && VTy->getElementType() == DoubleTy) {
4549 Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy);
4550 Ops[1] = Builder.CreateBitCast(Ops[1], DoubleTy);
4551 llvm::Type *VTy = GetNeonType(this,
4552 NeonTypeFlags(NeonTypeFlags::Float64, false, true));
4553 Ops[2] = Builder.CreateBitCast(Ops[2], VTy);
4554 Ops[2] = Builder.CreateExtractElement(Ops[2], Ops[3], "extract");
4555 Value *F = CGM.getIntrinsic(Intrinsic::fma, DoubleTy);
4556 Value *Result = Builder.CreateCall3(F, Ops[1], Ops[2], Ops[0]);
4557 return Builder.CreateBitCast(Result, Ty);
4559 Value *F = CGM.getIntrinsic(Intrinsic::fma, Ty);
4560 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
4561 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
4563 llvm::Type *STy = llvm::VectorType::get(VTy->getElementType(),
4564 VTy->getNumElements() * 2);
4565 Ops[2] = Builder.CreateBitCast(Ops[2], STy);
4566 Value* SV = llvm::ConstantVector::getSplat(VTy->getNumElements(),
4567 cast<ConstantInt>(Ops[3]));
4568 Ops[2] = Builder.CreateShuffleVector(Ops[2], Ops[2], SV, "lane");
4570 return Builder.CreateCall3(F, Ops[2], Ops[1], Ops[0]);
4572 case NEON::BI__builtin_neon_vfmaq_laneq_v: {
4573 Value *F = CGM.getIntrinsic(Intrinsic::fma, Ty);
4574 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
4575 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
4577 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
4578 Ops[2] = EmitNeonSplat(Ops[2], cast<ConstantInt>(Ops[3]));
4579 return Builder.CreateCall3(F, Ops[2], Ops[1], Ops[0]);
4581 case NEON::BI__builtin_neon_vfmas_lane_f32:
4582 case NEON::BI__builtin_neon_vfmas_laneq_f32:
4583 case NEON::BI__builtin_neon_vfmad_lane_f64:
4584 case NEON::BI__builtin_neon_vfmad_laneq_f64: {
4585 Ops.push_back(EmitScalarExpr(E->getArg(3)));
4586 llvm::Type *Ty = ConvertType(E->getCallReturnType());
4587 Value *F = CGM.getIntrinsic(Intrinsic::fma, Ty);
4588 Ops[2] = Builder.CreateExtractElement(Ops[2], Ops[3], "extract");
4589 return Builder.CreateCall3(F, Ops[1], Ops[2], Ops[0]);
4591 case NEON::BI__builtin_neon_vfms_v:
4592 case NEON::BI__builtin_neon_vfmsq_v: { // Only used for FP types
4593 // FIXME: probably remove when we no longer support aarch64_simd.h
4594 // (arm_neon.h delegates to vfma).
4596 // The ARM builtins (and instructions) have the addend as the first
4597 // operand, but the 'fma' intrinsics have it last. Swap it around here.
4598 Value *Subtrahend = Ops[0];
4599 Value *Multiplicand = Ops[2];
4600 Ops[0] = Multiplicand;
4601 Ops[2] = Subtrahend;
4602 Ops[1] = Builder.CreateBitCast(Ops[1], VTy);
4603 Ops[1] = Builder.CreateFNeg(Ops[1]);
4604 Int = Intrinsic::fma;
4605 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "fmls");
4607 case NEON::BI__builtin_neon_vmull_v:
4608 // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
4609 Int = usgn ? Intrinsic::aarch64_neon_umull : Intrinsic::aarch64_neon_smull;
4610 if (Type.isPoly()) Int = Intrinsic::aarch64_neon_pmull;
4611 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmull");
4612 case NEON::BI__builtin_neon_vmax_v:
4613 case NEON::BI__builtin_neon_vmaxq_v:
4614 // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
4615 Int = usgn ? Intrinsic::aarch64_neon_umax : Intrinsic::aarch64_neon_smax;
4616 if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fmax;
4617 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmax");
4618 case NEON::BI__builtin_neon_vmin_v:
4619 case NEON::BI__builtin_neon_vminq_v:
4620 // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
4621 Int = usgn ? Intrinsic::aarch64_neon_umin : Intrinsic::aarch64_neon_smin;
4622 if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fmin;
4623 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmin");
4624 case NEON::BI__builtin_neon_vabd_v:
4625 case NEON::BI__builtin_neon_vabdq_v:
4626 // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
4627 Int = usgn ? Intrinsic::aarch64_neon_uabd : Intrinsic::aarch64_neon_sabd;
4628 if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fabd;
4629 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vabd");
4630 case NEON::BI__builtin_neon_vpadal_v:
4631 case NEON::BI__builtin_neon_vpadalq_v: {
4632 unsigned ArgElts = VTy->getNumElements();
4633 llvm::IntegerType *EltTy = cast<IntegerType>(VTy->getElementType());
4634 unsigned BitWidth = EltTy->getBitWidth();
4635 llvm::Type *ArgTy = llvm::VectorType::get(
4636 llvm::IntegerType::get(getLLVMContext(), BitWidth/2), 2*ArgElts);
4637 llvm::Type* Tys[2] = { VTy, ArgTy };
4638 Int = usgn ? Intrinsic::aarch64_neon_uaddlp : Intrinsic::aarch64_neon_saddlp;
4639 SmallVector<llvm::Value*, 1> TmpOps;
4640 TmpOps.push_back(Ops[1]);
4641 Function *F = CGM.getIntrinsic(Int, Tys);
4642 llvm::Value *tmp = EmitNeonCall(F, TmpOps, "vpadal");
4643 llvm::Value *addend = Builder.CreateBitCast(Ops[0], tmp->getType());
4644 return Builder.CreateAdd(tmp, addend);
4646 case NEON::BI__builtin_neon_vpmin_v:
4647 case NEON::BI__builtin_neon_vpminq_v:
4648 // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
4649 Int = usgn ? Intrinsic::aarch64_neon_uminp : Intrinsic::aarch64_neon_sminp;
4650 if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fminp;
4651 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpmin");
4652 case NEON::BI__builtin_neon_vpmax_v:
4653 case NEON::BI__builtin_neon_vpmaxq_v:
4654 // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
4655 Int = usgn ? Intrinsic::aarch64_neon_umaxp : Intrinsic::aarch64_neon_smaxp;
4656 if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fmaxp;
4657 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpmax");
4658 case NEON::BI__builtin_neon_vminnm_v:
4659 case NEON::BI__builtin_neon_vminnmq_v:
4660 Int = Intrinsic::aarch64_neon_fminnm;
4661 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vminnm");
4662 case NEON::BI__builtin_neon_vmaxnm_v:
4663 case NEON::BI__builtin_neon_vmaxnmq_v:
4664 Int = Intrinsic::aarch64_neon_fmaxnm;
4665 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmaxnm");
4666 case NEON::BI__builtin_neon_vrecpss_f32: {
4667 llvm::Type *f32Type = llvm::Type::getFloatTy(getLLVMContext());
4668 Ops.push_back(EmitScalarExpr(E->getArg(1)));
4669 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_frecps, f32Type),
4672 case NEON::BI__builtin_neon_vrecpsd_f64: {
4673 llvm::Type *f64Type = llvm::Type::getDoubleTy(getLLVMContext());
4674 Ops.push_back(EmitScalarExpr(E->getArg(1)));
4675 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_frecps, f64Type),
4678 case NEON::BI__builtin_neon_vrshr_n_v:
4679 case NEON::BI__builtin_neon_vrshrq_n_v:
4680 // FIXME: this can be shared with 32-bit ARM, but not AArch64 at the
4681 // moment. After the final merge it should be added to
4682 // EmitCommonNeonBuiltinExpr.
4683 Int = usgn ? Intrinsic::aarch64_neon_urshl : Intrinsic::aarch64_neon_srshl;
4684 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrshr_n", 1, true);
4685 case NEON::BI__builtin_neon_vqshlu_n_v:
4686 case NEON::BI__builtin_neon_vqshluq_n_v:
4687 // FIXME: AArch64 and ARM use different intrinsics for this, but are
4688 // essentially compatible. It should be in EmitCommonNeonBuiltinExpr after
4690 Int = Intrinsic::aarch64_neon_sqshlu;
4691 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshlu_n", 1, false);
4692 case NEON::BI__builtin_neon_vqshrun_n_v:
4694 Int = Intrinsic::aarch64_neon_sqshrun;
4695 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshrun_n");
4696 case NEON::BI__builtin_neon_vqrshrun_n_v:
4697 // FIXME: and again.
4698 Int = Intrinsic::aarch64_neon_sqrshrun;
4699 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshrun_n");
4700 case NEON::BI__builtin_neon_vqshrn_n_v:
4702 Int = usgn ? Intrinsic::aarch64_neon_uqshrn : Intrinsic::aarch64_neon_sqshrn;
4703 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshrn_n");
4704 case NEON::BI__builtin_neon_vrshrn_n_v:
4705 // FIXME: there might be a pattern here.
4706 Int = Intrinsic::aarch64_neon_rshrn;
4707 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrshrn_n");
4708 case NEON::BI__builtin_neon_vqrshrn_n_v:
4709 // FIXME: another one
4710 Int = usgn ? Intrinsic::aarch64_neon_uqrshrn : Intrinsic::aarch64_neon_sqrshrn;
4711 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshrn_n");
4712 case NEON::BI__builtin_neon_vrnda_v:
4713 case NEON::BI__builtin_neon_vrndaq_v: {
4714 Int = Intrinsic::round;
4715 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrnda");
4717 case NEON::BI__builtin_neon_vrndi_v:
4718 case NEON::BI__builtin_neon_vrndiq_v: {
4719 Int = Intrinsic::nearbyint;
4720 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndi");
4722 case NEON::BI__builtin_neon_vrndm_v:
4723 case NEON::BI__builtin_neon_vrndmq_v: {
4724 Int = Intrinsic::floor;
4725 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndm");
4727 case NEON::BI__builtin_neon_vrndn_v:
4728 case NEON::BI__builtin_neon_vrndnq_v: {
4729 Int = Intrinsic::aarch64_neon_frintn;
4730 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndn");
4732 case NEON::BI__builtin_neon_vrndp_v:
4733 case NEON::BI__builtin_neon_vrndpq_v: {
4734 Int = Intrinsic::ceil;
4735 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndp");
4737 case NEON::BI__builtin_neon_vrndx_v:
4738 case NEON::BI__builtin_neon_vrndxq_v: {
4739 Int = Intrinsic::rint;
4740 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndx");
4742 case NEON::BI__builtin_neon_vrnd_v:
4743 case NEON::BI__builtin_neon_vrndq_v: {
4744 Int = Intrinsic::trunc;
4745 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndz");
4747 case NEON::BI__builtin_neon_vceqz_v:
4748 case NEON::BI__builtin_neon_vceqzq_v:
4749 return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OEQ,
4750 ICmpInst::ICMP_EQ, "vceqz");
4751 case NEON::BI__builtin_neon_vcgez_v:
4752 case NEON::BI__builtin_neon_vcgezq_v:
4753 return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OGE,
4754 ICmpInst::ICMP_SGE, "vcgez");
4755 case NEON::BI__builtin_neon_vclez_v:
4756 case NEON::BI__builtin_neon_vclezq_v:
4757 return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OLE,
4758 ICmpInst::ICMP_SLE, "vclez");
4759 case NEON::BI__builtin_neon_vcgtz_v:
4760 case NEON::BI__builtin_neon_vcgtzq_v:
4761 return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OGT,
4762 ICmpInst::ICMP_SGT, "vcgtz");
4763 case NEON::BI__builtin_neon_vcltz_v:
4764 case NEON::BI__builtin_neon_vcltzq_v:
4765 return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OLT,
4766 ICmpInst::ICMP_SLT, "vcltz");
4767 case NEON::BI__builtin_neon_vcvt_f64_v:
4768 case NEON::BI__builtin_neon_vcvtq_f64_v:
4769 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
4770 Ty = GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float64, false, quad));
4771 return usgn ? Builder.CreateUIToFP(Ops[0], Ty, "vcvt")
4772 : Builder.CreateSIToFP(Ops[0], Ty, "vcvt");
4773 case NEON::BI__builtin_neon_vcvt_f64_f32: {
4774 assert(Type.getEltType() == NeonTypeFlags::Float64 && quad &&
4775 "unexpected vcvt_f64_f32 builtin");
4776 NeonTypeFlags SrcFlag = NeonTypeFlags(NeonTypeFlags::Float32, false, false);
4777 Ops[0] = Builder.CreateBitCast(Ops[0], GetNeonType(this, SrcFlag));
4779 return Builder.CreateFPExt(Ops[0], Ty, "vcvt");
4781 case NEON::BI__builtin_neon_vcvt_f32_f64: {
4782 assert(Type.getEltType() == NeonTypeFlags::Float32 &&
4783 "unexpected vcvt_f32_f64 builtin");
4784 NeonTypeFlags SrcFlag = NeonTypeFlags(NeonTypeFlags::Float64, false, true);
4785 Ops[0] = Builder.CreateBitCast(Ops[0], GetNeonType(this, SrcFlag));
4787 return Builder.CreateFPTrunc(Ops[0], Ty, "vcvt");
4789 case NEON::BI__builtin_neon_vcvt_s32_v:
4790 case NEON::BI__builtin_neon_vcvt_u32_v:
4791 case NEON::BI__builtin_neon_vcvt_s64_v:
4792 case NEON::BI__builtin_neon_vcvt_u64_v:
4793 case NEON::BI__builtin_neon_vcvtq_s32_v:
4794 case NEON::BI__builtin_neon_vcvtq_u32_v:
4795 case NEON::BI__builtin_neon_vcvtq_s64_v:
4796 case NEON::BI__builtin_neon_vcvtq_u64_v: {
4798 (cast<llvm::IntegerType>(VTy->getElementType())->getBitWidth() == 64);
4801 NeonTypeFlags(Double ? NeonTypeFlags::Float64
4802 : NeonTypeFlags::Float32, false, quad));
4803 Ops[0] = Builder.CreateBitCast(Ops[0], InTy);
4805 return Builder.CreateFPToUI(Ops[0], Ty);
4806 return Builder.CreateFPToSI(Ops[0], Ty);
4808 case NEON::BI__builtin_neon_vcvta_s32_v:
4809 case NEON::BI__builtin_neon_vcvtaq_s32_v:
4810 case NEON::BI__builtin_neon_vcvta_u32_v:
4811 case NEON::BI__builtin_neon_vcvtaq_u32_v:
4812 case NEON::BI__builtin_neon_vcvta_s64_v:
4813 case NEON::BI__builtin_neon_vcvtaq_s64_v:
4814 case NEON::BI__builtin_neon_vcvta_u64_v:
4815 case NEON::BI__builtin_neon_vcvtaq_u64_v: {
4816 Int = usgn ? Intrinsic::aarch64_neon_fcvtau : Intrinsic::aarch64_neon_fcvtas;
4818 (cast<llvm::IntegerType>(VTy->getElementType())->getBitWidth() == 64);
4821 NeonTypeFlags(Double ? NeonTypeFlags::Float64
4822 : NeonTypeFlags::Float32, false, quad));
4823 llvm::Type *Tys[2] = { Ty, InTy };
4824 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvta");
4826 case NEON::BI__builtin_neon_vcvtm_s32_v:
4827 case NEON::BI__builtin_neon_vcvtmq_s32_v:
4828 case NEON::BI__builtin_neon_vcvtm_u32_v:
4829 case NEON::BI__builtin_neon_vcvtmq_u32_v:
4830 case NEON::BI__builtin_neon_vcvtm_s64_v:
4831 case NEON::BI__builtin_neon_vcvtmq_s64_v:
4832 case NEON::BI__builtin_neon_vcvtm_u64_v:
4833 case NEON::BI__builtin_neon_vcvtmq_u64_v: {
4834 Int = usgn ? Intrinsic::aarch64_neon_fcvtmu : Intrinsic::aarch64_neon_fcvtms;
4836 (cast<llvm::IntegerType>(VTy->getElementType())->getBitWidth() == 64);
4839 NeonTypeFlags(Double ? NeonTypeFlags::Float64
4840 : NeonTypeFlags::Float32, false, quad));
4841 llvm::Type *Tys[2] = { Ty, InTy };
4842 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtm");
4844 case NEON::BI__builtin_neon_vcvtn_s32_v:
4845 case NEON::BI__builtin_neon_vcvtnq_s32_v:
4846 case NEON::BI__builtin_neon_vcvtn_u32_v:
4847 case NEON::BI__builtin_neon_vcvtnq_u32_v:
4848 case NEON::BI__builtin_neon_vcvtn_s64_v:
4849 case NEON::BI__builtin_neon_vcvtnq_s64_v:
4850 case NEON::BI__builtin_neon_vcvtn_u64_v:
4851 case NEON::BI__builtin_neon_vcvtnq_u64_v: {
4852 Int = usgn ? Intrinsic::aarch64_neon_fcvtnu : Intrinsic::aarch64_neon_fcvtns;
4854 (cast<llvm::IntegerType>(VTy->getElementType())->getBitWidth() == 64);
4857 NeonTypeFlags(Double ? NeonTypeFlags::Float64
4858 : NeonTypeFlags::Float32, false, quad));
4859 llvm::Type *Tys[2] = { Ty, InTy };
4860 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtn");
4862 case NEON::BI__builtin_neon_vcvtp_s32_v:
4863 case NEON::BI__builtin_neon_vcvtpq_s32_v:
4864 case NEON::BI__builtin_neon_vcvtp_u32_v:
4865 case NEON::BI__builtin_neon_vcvtpq_u32_v:
4866 case NEON::BI__builtin_neon_vcvtp_s64_v:
4867 case NEON::BI__builtin_neon_vcvtpq_s64_v:
4868 case NEON::BI__builtin_neon_vcvtp_u64_v:
4869 case NEON::BI__builtin_neon_vcvtpq_u64_v: {
4870 Int = usgn ? Intrinsic::aarch64_neon_fcvtpu : Intrinsic::aarch64_neon_fcvtps;
4872 (cast<llvm::IntegerType>(VTy->getElementType())->getBitWidth() == 64);
4875 NeonTypeFlags(Double ? NeonTypeFlags::Float64
4876 : NeonTypeFlags::Float32, false, quad));
4877 llvm::Type *Tys[2] = { Ty, InTy };
4878 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtp");
4880 case NEON::BI__builtin_neon_vmulx_v:
4881 case NEON::BI__builtin_neon_vmulxq_v: {
4882 Int = Intrinsic::aarch64_neon_fmulx;
4883 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmulx");
4885 case NEON::BI__builtin_neon_vmul_lane_v:
4886 case NEON::BI__builtin_neon_vmul_laneq_v: {
4887 // v1f64 vmul_lane should be mapped to Neon scalar mul lane
4889 if (BuiltinID == NEON::BI__builtin_neon_vmul_laneq_v)
4891 Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy);
4892 llvm::Type *VTy = GetNeonType(this,
4893 NeonTypeFlags(NeonTypeFlags::Float64, false, Quad));
4894 Ops[1] = Builder.CreateBitCast(Ops[1], VTy);
4895 Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2], "extract");
4896 Value *Result = Builder.CreateFMul(Ops[0], Ops[1]);
4897 return Builder.CreateBitCast(Result, Ty);
4899 case NEON::BI__builtin_neon_vnegd_s64:
4900 return Builder.CreateNeg(EmitScalarExpr(E->getArg(0)), "vnegd");
4901 case NEON::BI__builtin_neon_vpmaxnm_v:
4902 case NEON::BI__builtin_neon_vpmaxnmq_v: {
4903 Int = Intrinsic::aarch64_neon_fmaxnmp;
4904 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpmaxnm");
4906 case NEON::BI__builtin_neon_vpminnm_v:
4907 case NEON::BI__builtin_neon_vpminnmq_v: {
4908 Int = Intrinsic::aarch64_neon_fminnmp;
4909 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpminnm");
4911 case NEON::BI__builtin_neon_vsqrt_v:
4912 case NEON::BI__builtin_neon_vsqrtq_v: {
4913 Int = Intrinsic::sqrt;
4914 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
4915 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vsqrt");
4917 case NEON::BI__builtin_neon_vrbit_v:
4918 case NEON::BI__builtin_neon_vrbitq_v: {
4919 Int = Intrinsic::aarch64_neon_rbit;
4920 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrbit");
4922 case NEON::BI__builtin_neon_vaddv_u8:
4923 // FIXME: These are handled by the AArch64 scalar code.
4926 case NEON::BI__builtin_neon_vaddv_s8: {
4927 Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv;
4928 Ty = llvm::IntegerType::get(getLLVMContext(), 32);
4930 llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 8), 8);
4931 llvm::Type *Tys[2] = { Ty, VTy };
4932 Ops.push_back(EmitScalarExpr(E->getArg(0)));
4933 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddv");
4934 return Builder.CreateTrunc(Ops[0],
4935 llvm::IntegerType::get(getLLVMContext(), 8));
4937 case NEON::BI__builtin_neon_vaddv_u16:
4940 case NEON::BI__builtin_neon_vaddv_s16: {
4941 Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv;
4942 Ty = llvm::IntegerType::get(getLLVMContext(), 32);
4944 llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 16), 4);
4945 llvm::Type *Tys[2] = { Ty, VTy };
4946 Ops.push_back(EmitScalarExpr(E->getArg(0)));
4947 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddv");
4948 return Builder.CreateTrunc(Ops[0],
4949 llvm::IntegerType::get(getLLVMContext(), 16));
4951 case NEON::BI__builtin_neon_vaddvq_u8:
4954 case NEON::BI__builtin_neon_vaddvq_s8: {
4955 Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv;
4956 Ty = llvm::IntegerType::get(getLLVMContext(), 32);
4958 llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 8), 16);
4959 llvm::Type *Tys[2] = { Ty, VTy };
4960 Ops.push_back(EmitScalarExpr(E->getArg(0)));
4961 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddv");
4962 return Builder.CreateTrunc(Ops[0],
4963 llvm::IntegerType::get(getLLVMContext(), 8));
4965 case NEON::BI__builtin_neon_vaddvq_u16:
4968 case NEON::BI__builtin_neon_vaddvq_s16: {
4969 Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv;
4970 Ty = llvm::IntegerType::get(getLLVMContext(), 32);
4972 llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 16), 8);
4973 llvm::Type *Tys[2] = { Ty, VTy };
4974 Ops.push_back(EmitScalarExpr(E->getArg(0)));
4975 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddv");
4976 return Builder.CreateTrunc(Ops[0],
4977 llvm::IntegerType::get(getLLVMContext(), 16));
4979 case NEON::BI__builtin_neon_vmaxv_u8: {
4980 Int = Intrinsic::aarch64_neon_umaxv;
4981 Ty = llvm::IntegerType::get(getLLVMContext(), 32);
4983 llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 8), 8);
4984 llvm::Type *Tys[2] = { Ty, VTy };
4985 Ops.push_back(EmitScalarExpr(E->getArg(0)));
4986 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
4987 return Builder.CreateTrunc(Ops[0],
4988 llvm::IntegerType::get(getLLVMContext(), 8));
4990 case NEON::BI__builtin_neon_vmaxv_u16: {
4991 Int = Intrinsic::aarch64_neon_umaxv;
4992 Ty = llvm::IntegerType::get(getLLVMContext(), 32);
4994 llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 16), 4);
4995 llvm::Type *Tys[2] = { Ty, VTy };
4996 Ops.push_back(EmitScalarExpr(E->getArg(0)));
4997 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
4998 return Builder.CreateTrunc(Ops[0],
4999 llvm::IntegerType::get(getLLVMContext(), 16));
5001 case NEON::BI__builtin_neon_vmaxvq_u8: {
5002 Int = Intrinsic::aarch64_neon_umaxv;
5003 Ty = llvm::IntegerType::get(getLLVMContext(), 32);
5005 llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 8), 16);
5006 llvm::Type *Tys[2] = { Ty, VTy };
5007 Ops.push_back(EmitScalarExpr(E->getArg(0)));
5008 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
5009 return Builder.CreateTrunc(Ops[0],
5010 llvm::IntegerType::get(getLLVMContext(), 8));
5012 case NEON::BI__builtin_neon_vmaxvq_u16: {
5013 Int = Intrinsic::aarch64_neon_umaxv;
5014 Ty = llvm::IntegerType::get(getLLVMContext(), 32);
5016 llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 16), 8);
5017 llvm::Type *Tys[2] = { Ty, VTy };
5018 Ops.push_back(EmitScalarExpr(E->getArg(0)));
5019 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
5020 return Builder.CreateTrunc(Ops[0],
5021 llvm::IntegerType::get(getLLVMContext(), 16));
5023 case NEON::BI__builtin_neon_vmaxv_s8: {
5024 Int = Intrinsic::aarch64_neon_smaxv;
5025 Ty = llvm::IntegerType::get(getLLVMContext(), 32);
5027 llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 8), 8);
5028 llvm::Type *Tys[2] = { Ty, VTy };
5029 Ops.push_back(EmitScalarExpr(E->getArg(0)));
5030 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
5031 return Builder.CreateTrunc(Ops[0],
5032 llvm::IntegerType::get(getLLVMContext(), 8));
5034 case NEON::BI__builtin_neon_vmaxv_s16: {
5035 Int = Intrinsic::aarch64_neon_smaxv;
5036 Ty = llvm::IntegerType::get(getLLVMContext(), 32);
5038 llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 16), 4);
5039 llvm::Type *Tys[2] = { Ty, VTy };
5040 Ops.push_back(EmitScalarExpr(E->getArg(0)));
5041 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
5042 return Builder.CreateTrunc(Ops[0],
5043 llvm::IntegerType::get(getLLVMContext(), 16));
5045 case NEON::BI__builtin_neon_vmaxvq_s8: {
5046 Int = Intrinsic::aarch64_neon_smaxv;
5047 Ty = llvm::IntegerType::get(getLLVMContext(), 32);
5049 llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 8), 16);
5050 llvm::Type *Tys[2] = { Ty, VTy };
5051 Ops.push_back(EmitScalarExpr(E->getArg(0)));
5052 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
5053 return Builder.CreateTrunc(Ops[0],
5054 llvm::IntegerType::get(getLLVMContext(), 8));
5056 case NEON::BI__builtin_neon_vmaxvq_s16: {
5057 Int = Intrinsic::aarch64_neon_smaxv;
5058 Ty = llvm::IntegerType::get(getLLVMContext(), 32);
5060 llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 16), 8);
5061 llvm::Type *Tys[2] = { Ty, VTy };
5062 Ops.push_back(EmitScalarExpr(E->getArg(0)));
5063 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
5064 return Builder.CreateTrunc(Ops[0],
5065 llvm::IntegerType::get(getLLVMContext(), 16));
5067 case NEON::BI__builtin_neon_vminv_u8: {
5068 Int = Intrinsic::aarch64_neon_uminv;
5069 Ty = llvm::IntegerType::get(getLLVMContext(), 32);
5071 llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 8), 8);
5072 llvm::Type *Tys[2] = { Ty, VTy };
5073 Ops.push_back(EmitScalarExpr(E->getArg(0)));
5074 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
5075 return Builder.CreateTrunc(Ops[0],
5076 llvm::IntegerType::get(getLLVMContext(), 8));
5078 case NEON::BI__builtin_neon_vminv_u16: {
5079 Int = Intrinsic::aarch64_neon_uminv;
5080 Ty = llvm::IntegerType::get(getLLVMContext(), 32);
5082 llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 16), 4);
5083 llvm::Type *Tys[2] = { Ty, VTy };
5084 Ops.push_back(EmitScalarExpr(E->getArg(0)));
5085 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
5086 return Builder.CreateTrunc(Ops[0],
5087 llvm::IntegerType::get(getLLVMContext(), 16));
5089 case NEON::BI__builtin_neon_vminvq_u8: {
5090 Int = Intrinsic::aarch64_neon_uminv;
5091 Ty = llvm::IntegerType::get(getLLVMContext(), 32);
5093 llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 8), 16);
5094 llvm::Type *Tys[2] = { Ty, VTy };
5095 Ops.push_back(EmitScalarExpr(E->getArg(0)));
5096 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
5097 return Builder.CreateTrunc(Ops[0],
5098 llvm::IntegerType::get(getLLVMContext(), 8));
5100 case NEON::BI__builtin_neon_vminvq_u16: {
5101 Int = Intrinsic::aarch64_neon_uminv;
5102 Ty = llvm::IntegerType::get(getLLVMContext(), 32);
5104 llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 16), 8);
5105 llvm::Type *Tys[2] = { Ty, VTy };
5106 Ops.push_back(EmitScalarExpr(E->getArg(0)));
5107 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
5108 return Builder.CreateTrunc(Ops[0],
5109 llvm::IntegerType::get(getLLVMContext(), 16));
5111 case NEON::BI__builtin_neon_vminv_s8: {
5112 Int = Intrinsic::aarch64_neon_sminv;
5113 Ty = llvm::IntegerType::get(getLLVMContext(), 32);
5115 llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 8), 8);
5116 llvm::Type *Tys[2] = { Ty, VTy };
5117 Ops.push_back(EmitScalarExpr(E->getArg(0)));
5118 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
5119 return Builder.CreateTrunc(Ops[0],
5120 llvm::IntegerType::get(getLLVMContext(), 8));
5122 case NEON::BI__builtin_neon_vminv_s16: {
5123 Int = Intrinsic::aarch64_neon_sminv;
5124 Ty = llvm::IntegerType::get(getLLVMContext(), 32);
5126 llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 16), 4);
5127 llvm::Type *Tys[2] = { Ty, VTy };
5128 Ops.push_back(EmitScalarExpr(E->getArg(0)));
5129 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
5130 return Builder.CreateTrunc(Ops[0],
5131 llvm::IntegerType::get(getLLVMContext(), 16));
5133 case NEON::BI__builtin_neon_vminvq_s8: {
5134 Int = Intrinsic::aarch64_neon_sminv;
5135 Ty = llvm::IntegerType::get(getLLVMContext(), 32);
5137 llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 8), 16);
5138 llvm::Type *Tys[2] = { Ty, VTy };
5139 Ops.push_back(EmitScalarExpr(E->getArg(0)));
5140 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
5141 return Builder.CreateTrunc(Ops[0],
5142 llvm::IntegerType::get(getLLVMContext(), 8));
5144 case NEON::BI__builtin_neon_vminvq_s16: {
5145 Int = Intrinsic::aarch64_neon_sminv;
5146 Ty = llvm::IntegerType::get(getLLVMContext(), 32);
5148 llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 16), 8);
5149 llvm::Type *Tys[2] = { Ty, VTy };
5150 Ops.push_back(EmitScalarExpr(E->getArg(0)));
5151 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
5152 return Builder.CreateTrunc(Ops[0],
5153 llvm::IntegerType::get(getLLVMContext(), 16));
5155 case NEON::BI__builtin_neon_vmul_n_f64: {
5156 Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy);
5157 Value *RHS = Builder.CreateBitCast(EmitScalarExpr(E->getArg(1)), DoubleTy);
5158 return Builder.CreateFMul(Ops[0], RHS);
5160 case NEON::BI__builtin_neon_vaddlv_u8: {
5161 Int = Intrinsic::aarch64_neon_uaddlv;
5162 Ty = llvm::IntegerType::get(getLLVMContext(), 32);
5164 llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 8), 8);
5165 llvm::Type *Tys[2] = { Ty, VTy };
5166 Ops.push_back(EmitScalarExpr(E->getArg(0)));
5167 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
5168 return Builder.CreateTrunc(Ops[0],
5169 llvm::IntegerType::get(getLLVMContext(), 16));
5171 case NEON::BI__builtin_neon_vaddlv_u16: {
5172 Int = Intrinsic::aarch64_neon_uaddlv;
5173 Ty = llvm::IntegerType::get(getLLVMContext(), 32);
5175 llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 16), 4);
5176 llvm::Type *Tys[2] = { Ty, VTy };
5177 Ops.push_back(EmitScalarExpr(E->getArg(0)));
5178 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
5180 case NEON::BI__builtin_neon_vaddlvq_u8: {
5181 Int = Intrinsic::aarch64_neon_uaddlv;
5182 Ty = llvm::IntegerType::get(getLLVMContext(), 32);
5184 llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 8), 16);
5185 llvm::Type *Tys[2] = { Ty, VTy };
5186 Ops.push_back(EmitScalarExpr(E->getArg(0)));
5187 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
5188 return Builder.CreateTrunc(Ops[0],
5189 llvm::IntegerType::get(getLLVMContext(), 16));
5191 case NEON::BI__builtin_neon_vaddlvq_u16: {
5192 Int = Intrinsic::aarch64_neon_uaddlv;
5193 Ty = llvm::IntegerType::get(getLLVMContext(), 32);
5195 llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 16), 8);
5196 llvm::Type *Tys[2] = { Ty, VTy };
5197 Ops.push_back(EmitScalarExpr(E->getArg(0)));
5198 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
5200 case NEON::BI__builtin_neon_vaddlv_s8: {
5201 Int = Intrinsic::aarch64_neon_saddlv;
5202 Ty = llvm::IntegerType::get(getLLVMContext(), 32);
5204 llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 8), 8);
5205 llvm::Type *Tys[2] = { Ty, VTy };
5206 Ops.push_back(EmitScalarExpr(E->getArg(0)));
5207 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
5208 return Builder.CreateTrunc(Ops[0],
5209 llvm::IntegerType::get(getLLVMContext(), 16));
5211 case NEON::BI__builtin_neon_vaddlv_s16: {
5212 Int = Intrinsic::aarch64_neon_saddlv;
5213 Ty = llvm::IntegerType::get(getLLVMContext(), 32);
5215 llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 16), 4);
5216 llvm::Type *Tys[2] = { Ty, VTy };
5217 Ops.push_back(EmitScalarExpr(E->getArg(0)));
5218 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
5220 case NEON::BI__builtin_neon_vaddlvq_s8: {
5221 Int = Intrinsic::aarch64_neon_saddlv;
5222 Ty = llvm::IntegerType::get(getLLVMContext(), 32);
5224 llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 8), 16);
5225 llvm::Type *Tys[2] = { Ty, VTy };
5226 Ops.push_back(EmitScalarExpr(E->getArg(0)));
5227 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
5228 return Builder.CreateTrunc(Ops[0],
5229 llvm::IntegerType::get(getLLVMContext(), 16));
5231 case NEON::BI__builtin_neon_vaddlvq_s16: {
5232 Int = Intrinsic::aarch64_neon_saddlv;
5233 Ty = llvm::IntegerType::get(getLLVMContext(), 32);
5235 llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 16), 8);
5236 llvm::Type *Tys[2] = { Ty, VTy };
5237 Ops.push_back(EmitScalarExpr(E->getArg(0)));
5238 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
5240 case NEON::BI__builtin_neon_vsri_n_v:
5241 case NEON::BI__builtin_neon_vsriq_n_v: {
5242 Int = Intrinsic::aarch64_neon_vsri;
5243 llvm::Function *Intrin = CGM.getIntrinsic(Int, Ty);
5244 return EmitNeonCall(Intrin, Ops, "vsri_n");
5246 case NEON::BI__builtin_neon_vsli_n_v:
5247 case NEON::BI__builtin_neon_vsliq_n_v: {
5248 Int = Intrinsic::aarch64_neon_vsli;
5249 llvm::Function *Intrin = CGM.getIntrinsic(Int, Ty);
5250 return EmitNeonCall(Intrin, Ops, "vsli_n");
5252 case NEON::BI__builtin_neon_vsra_n_v:
5253 case NEON::BI__builtin_neon_vsraq_n_v:
5254 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
5255 Ops[1] = EmitNeonRShiftImm(Ops[1], Ops[2], Ty, usgn, "vsra_n");
5256 return Builder.CreateAdd(Ops[0], Ops[1]);
5257 case NEON::BI__builtin_neon_vrsra_n_v:
5258 case NEON::BI__builtin_neon_vrsraq_n_v: {
5259 Int = usgn ? Intrinsic::aarch64_neon_urshl : Intrinsic::aarch64_neon_srshl;
5260 SmallVector<llvm::Value*,2> TmpOps;
5261 TmpOps.push_back(Ops[1]);
5262 TmpOps.push_back(Ops[2]);
5263 Function* F = CGM.getIntrinsic(Int, Ty);
5264 llvm::Value *tmp = EmitNeonCall(F, TmpOps, "vrshr_n", 1, true);
5265 Ops[0] = Builder.CreateBitCast(Ops[0], VTy);
5266 return Builder.CreateAdd(Ops[0], tmp);
5268 // FIXME: Sharing loads & stores with 32-bit is complicated by the absence
5269 // of an Align parameter here.
5270 case NEON::BI__builtin_neon_vld1_x2_v:
5271 case NEON::BI__builtin_neon_vld1q_x2_v:
5272 case NEON::BI__builtin_neon_vld1_x3_v:
5273 case NEON::BI__builtin_neon_vld1q_x3_v:
5274 case NEON::BI__builtin_neon_vld1_x4_v:
5275 case NEON::BI__builtin_neon_vld1q_x4_v: {
5276 llvm::Type *PTy = llvm::PointerType::getUnqual(VTy->getVectorElementType());
5277 Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
5278 llvm::Type *Tys[2] = { VTy, PTy };
5280 switch (BuiltinID) {
5281 case NEON::BI__builtin_neon_vld1_x2_v:
5282 case NEON::BI__builtin_neon_vld1q_x2_v:
5283 Int = Intrinsic::aarch64_neon_ld1x2;
5285 case NEON::BI__builtin_neon_vld1_x3_v:
5286 case NEON::BI__builtin_neon_vld1q_x3_v:
5287 Int = Intrinsic::aarch64_neon_ld1x3;
5289 case NEON::BI__builtin_neon_vld1_x4_v:
5290 case NEON::BI__builtin_neon_vld1q_x4_v:
5291 Int = Intrinsic::aarch64_neon_ld1x4;
5294 Function *F = CGM.getIntrinsic(Int, Tys);
5295 Ops[1] = Builder.CreateCall(F, Ops[1], "vld1xN");
5296 Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
5297 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
5298 return Builder.CreateStore(Ops[1], Ops[0]);
5300 case NEON::BI__builtin_neon_vst1_x2_v:
5301 case NEON::BI__builtin_neon_vst1q_x2_v:
5302 case NEON::BI__builtin_neon_vst1_x3_v:
5303 case NEON::BI__builtin_neon_vst1q_x3_v:
5304 case NEON::BI__builtin_neon_vst1_x4_v:
5305 case NEON::BI__builtin_neon_vst1q_x4_v: {
5306 llvm::Type *PTy = llvm::PointerType::getUnqual(VTy->getVectorElementType());
5307 llvm::Type *Tys[2] = { VTy, PTy };
5309 switch (BuiltinID) {
5310 case NEON::BI__builtin_neon_vst1_x2_v:
5311 case NEON::BI__builtin_neon_vst1q_x2_v:
5312 Int = Intrinsic::aarch64_neon_st1x2;
5314 case NEON::BI__builtin_neon_vst1_x3_v:
5315 case NEON::BI__builtin_neon_vst1q_x3_v:
5316 Int = Intrinsic::aarch64_neon_st1x3;
5318 case NEON::BI__builtin_neon_vst1_x4_v:
5319 case NEON::BI__builtin_neon_vst1q_x4_v:
5320 Int = Intrinsic::aarch64_neon_st1x4;
5323 SmallVector<Value *, 4> IntOps(Ops.begin()+1, Ops.end());
5324 IntOps.push_back(Ops[0]);
5325 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), IntOps, "");
5327 case NEON::BI__builtin_neon_vld1_v:
5328 case NEON::BI__builtin_neon_vld1q_v:
5329 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(VTy));
5330 return Builder.CreateLoad(Ops[0]);
5331 case NEON::BI__builtin_neon_vst1_v:
5332 case NEON::BI__builtin_neon_vst1q_v:
5333 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(VTy));
5334 Ops[1] = Builder.CreateBitCast(Ops[1], VTy);
5335 return Builder.CreateStore(Ops[1], Ops[0]);
5336 case NEON::BI__builtin_neon_vld1_lane_v:
5337 case NEON::BI__builtin_neon_vld1q_lane_v:
5338 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
5339 Ty = llvm::PointerType::getUnqual(VTy->getElementType());
5340 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
5341 Ops[0] = Builder.CreateLoad(Ops[0]);
5342 return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vld1_lane");
5343 case NEON::BI__builtin_neon_vld1_dup_v:
5344 case NEON::BI__builtin_neon_vld1q_dup_v: {
5345 Value *V = UndefValue::get(Ty);
5346 Ty = llvm::PointerType::getUnqual(VTy->getElementType());
5347 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
5348 Ops[0] = Builder.CreateLoad(Ops[0]);
5349 llvm::Constant *CI = ConstantInt::get(Int32Ty, 0);
5350 Ops[0] = Builder.CreateInsertElement(V, Ops[0], CI);
5351 return EmitNeonSplat(Ops[0], CI);
5353 case NEON::BI__builtin_neon_vst1_lane_v:
5354 case NEON::BI__builtin_neon_vst1q_lane_v:
5355 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
5356 Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2]);
5357 Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
5358 return Builder.CreateStore(Ops[1], Builder.CreateBitCast(Ops[0], Ty));
5359 case NEON::BI__builtin_neon_vld2_v:
5360 case NEON::BI__builtin_neon_vld2q_v: {
5361 llvm::Type *PTy = llvm::PointerType::getUnqual(VTy);
5362 Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
5363 llvm::Type *Tys[2] = { VTy, PTy };
5364 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld2, Tys);
5365 Ops[1] = Builder.CreateCall(F, Ops[1], "vld2");
5366 Ops[0] = Builder.CreateBitCast(Ops[0],
5367 llvm::PointerType::getUnqual(Ops[1]->getType()));
5368 return Builder.CreateStore(Ops[1], Ops[0]);
5370 case NEON::BI__builtin_neon_vld3_v:
5371 case NEON::BI__builtin_neon_vld3q_v: {
5372 llvm::Type *PTy = llvm::PointerType::getUnqual(VTy);
5373 Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
5374 llvm::Type *Tys[2] = { VTy, PTy };
5375 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld3, Tys);
5376 Ops[1] = Builder.CreateCall(F, Ops[1], "vld3");
5377 Ops[0] = Builder.CreateBitCast(Ops[0],
5378 llvm::PointerType::getUnqual(Ops[1]->getType()));
5379 return Builder.CreateStore(Ops[1], Ops[0]);
5381 case NEON::BI__builtin_neon_vld4_v:
5382 case NEON::BI__builtin_neon_vld4q_v: {
5383 llvm::Type *PTy = llvm::PointerType::getUnqual(VTy);
5384 Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
5385 llvm::Type *Tys[2] = { VTy, PTy };
5386 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld4, Tys);
5387 Ops[1] = Builder.CreateCall(F, Ops[1], "vld4");
5388 Ops[0] = Builder.CreateBitCast(Ops[0],
5389 llvm::PointerType::getUnqual(Ops[1]->getType()));
5390 return Builder.CreateStore(Ops[1], Ops[0]);
5392 case NEON::BI__builtin_neon_vld2_dup_v:
5393 case NEON::BI__builtin_neon_vld2q_dup_v: {
5395 llvm::PointerType::getUnqual(VTy->getElementType());
5396 Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
5397 llvm::Type *Tys[2] = { VTy, PTy };
5398 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld2r, Tys);
5399 Ops[1] = Builder.CreateCall(F, Ops[1], "vld2");
5400 Ops[0] = Builder.CreateBitCast(Ops[0],
5401 llvm::PointerType::getUnqual(Ops[1]->getType()));
5402 return Builder.CreateStore(Ops[1], Ops[0]);
5404 case NEON::BI__builtin_neon_vld3_dup_v:
5405 case NEON::BI__builtin_neon_vld3q_dup_v: {
5407 llvm::PointerType::getUnqual(VTy->getElementType());
5408 Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
5409 llvm::Type *Tys[2] = { VTy, PTy };
5410 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld3r, Tys);
5411 Ops[1] = Builder.CreateCall(F, Ops[1], "vld3");
5412 Ops[0] = Builder.CreateBitCast(Ops[0],
5413 llvm::PointerType::getUnqual(Ops[1]->getType()));
5414 return Builder.CreateStore(Ops[1], Ops[0]);
5416 case NEON::BI__builtin_neon_vld4_dup_v:
5417 case NEON::BI__builtin_neon_vld4q_dup_v: {
5419 llvm::PointerType::getUnqual(VTy->getElementType());
5420 Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
5421 llvm::Type *Tys[2] = { VTy, PTy };
5422 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld4r, Tys);
5423 Ops[1] = Builder.CreateCall(F, Ops[1], "vld4");
5424 Ops[0] = Builder.CreateBitCast(Ops[0],
5425 llvm::PointerType::getUnqual(Ops[1]->getType()));
5426 return Builder.CreateStore(Ops[1], Ops[0]);
5428 case NEON::BI__builtin_neon_vld2_lane_v:
5429 case NEON::BI__builtin_neon_vld2q_lane_v: {
5430 llvm::Type *Tys[2] = { VTy, Ops[1]->getType() };
5431 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld2lane, Tys);
5432 Ops.push_back(Ops[1]);
5433 Ops.erase(Ops.begin()+1);
5434 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
5435 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
5436 Ops[3] = Builder.CreateZExt(Ops[3],
5437 llvm::IntegerType::get(getLLVMContext(), 64));
5438 Ops[1] = Builder.CreateCall(F,
5439 ArrayRef<Value*>(Ops).slice(1), "vld2_lane");
5440 Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
5441 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
5442 return Builder.CreateStore(Ops[1], Ops[0]);
5444 case NEON::BI__builtin_neon_vld3_lane_v:
5445 case NEON::BI__builtin_neon_vld3q_lane_v: {
5446 llvm::Type *Tys[2] = { VTy, Ops[1]->getType() };
5447 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld3lane, Tys);
5448 Ops.push_back(Ops[1]);
5449 Ops.erase(Ops.begin()+1);
5450 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
5451 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
5452 Ops[3] = Builder.CreateBitCast(Ops[3], Ty);
5453 Ops[4] = Builder.CreateZExt(Ops[4],
5454 llvm::IntegerType::get(getLLVMContext(), 64));
5455 Ops[1] = Builder.CreateCall(F,
5456 ArrayRef<Value*>(Ops).slice(1), "vld3_lane");
5457 Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
5458 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
5459 return Builder.CreateStore(Ops[1], Ops[0]);
5461 case NEON::BI__builtin_neon_vld4_lane_v:
5462 case NEON::BI__builtin_neon_vld4q_lane_v: {
5463 llvm::Type *Tys[2] = { VTy, Ops[1]->getType() };
5464 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld4lane, Tys);
5465 Ops.push_back(Ops[1]);
5466 Ops.erase(Ops.begin()+1);
5467 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
5468 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
5469 Ops[3] = Builder.CreateBitCast(Ops[3], Ty);
5470 Ops[4] = Builder.CreateBitCast(Ops[4], Ty);
5471 Ops[5] = Builder.CreateZExt(Ops[5],
5472 llvm::IntegerType::get(getLLVMContext(), 64));
5473 Ops[1] = Builder.CreateCall(F,
5474 ArrayRef<Value*>(Ops).slice(1), "vld4_lane");
5475 Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
5476 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
5477 return Builder.CreateStore(Ops[1], Ops[0]);
5479 case NEON::BI__builtin_neon_vst2_v:
5480 case NEON::BI__builtin_neon_vst2q_v: {
5481 Ops.push_back(Ops[0]);
5482 Ops.erase(Ops.begin());
5483 llvm::Type *Tys[2] = { VTy, Ops[2]->getType() };
5484 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st2, Tys),
5487 case NEON::BI__builtin_neon_vst2_lane_v:
5488 case NEON::BI__builtin_neon_vst2q_lane_v: {
5489 Ops.push_back(Ops[0]);
5490 Ops.erase(Ops.begin());
5491 Ops[2] = Builder.CreateZExt(Ops[2],
5492 llvm::IntegerType::get(getLLVMContext(), 64));
5493 llvm::Type *Tys[2] = { VTy, Ops[3]->getType() };
5494 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st2lane, Tys),
5497 case NEON::BI__builtin_neon_vst3_v:
5498 case NEON::BI__builtin_neon_vst3q_v: {
5499 Ops.push_back(Ops[0]);
5500 Ops.erase(Ops.begin());
5501 llvm::Type *Tys[2] = { VTy, Ops[3]->getType() };
5502 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st3, Tys),
5505 case NEON::BI__builtin_neon_vst3_lane_v:
5506 case NEON::BI__builtin_neon_vst3q_lane_v: {
5507 Ops.push_back(Ops[0]);
5508 Ops.erase(Ops.begin());
5509 Ops[3] = Builder.CreateZExt(Ops[3],
5510 llvm::IntegerType::get(getLLVMContext(), 64));
5511 llvm::Type *Tys[2] = { VTy, Ops[4]->getType() };
5512 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st3lane, Tys),
5515 case NEON::BI__builtin_neon_vst4_v:
5516 case NEON::BI__builtin_neon_vst4q_v: {
5517 Ops.push_back(Ops[0]);
5518 Ops.erase(Ops.begin());
5519 llvm::Type *Tys[2] = { VTy, Ops[4]->getType() };
5520 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st4, Tys),
5523 case NEON::BI__builtin_neon_vst4_lane_v:
5524 case NEON::BI__builtin_neon_vst4q_lane_v: {
5525 Ops.push_back(Ops[0]);
5526 Ops.erase(Ops.begin());
5527 Ops[4] = Builder.CreateZExt(Ops[4],
5528 llvm::IntegerType::get(getLLVMContext(), 64));
5529 llvm::Type *Tys[2] = { VTy, Ops[5]->getType() };
5530 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st4lane, Tys),
5533 case NEON::BI__builtin_neon_vtrn_v:
5534 case NEON::BI__builtin_neon_vtrnq_v: {
5535 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
5536 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
5537 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
5538 Value *SV = nullptr;
5540 for (unsigned vi = 0; vi != 2; ++vi) {
5541 SmallVector<Constant*, 16> Indices;
5542 for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) {
5543 Indices.push_back(ConstantInt::get(Int32Ty, i+vi));
5544 Indices.push_back(ConstantInt::get(Int32Ty, i+e+vi));
5546 Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ops[0], vi);
5547 SV = llvm::ConstantVector::get(Indices);
5548 SV = Builder.CreateShuffleVector(Ops[1], Ops[2], SV, "vtrn");
5549 SV = Builder.CreateStore(SV, Addr);
5553 case NEON::BI__builtin_neon_vuzp_v:
5554 case NEON::BI__builtin_neon_vuzpq_v: {
5555 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
5556 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
5557 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
5558 Value *SV = nullptr;
5560 for (unsigned vi = 0; vi != 2; ++vi) {
5561 SmallVector<Constant*, 16> Indices;
5562 for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i)
5563 Indices.push_back(ConstantInt::get(Int32Ty, 2*i+vi));
5565 Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ops[0], vi);
5566 SV = llvm::ConstantVector::get(Indices);
5567 SV = Builder.CreateShuffleVector(Ops[1], Ops[2], SV, "vuzp");
5568 SV = Builder.CreateStore(SV, Addr);
5572 case NEON::BI__builtin_neon_vzip_v:
5573 case NEON::BI__builtin_neon_vzipq_v: {
5574 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
5575 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
5576 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
5577 Value *SV = nullptr;
5579 for (unsigned vi = 0; vi != 2; ++vi) {
5580 SmallVector<Constant*, 16> Indices;
5581 for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) {
5582 Indices.push_back(ConstantInt::get(Int32Ty, (i + vi*e) >> 1));
5583 Indices.push_back(ConstantInt::get(Int32Ty, ((i + vi*e) >> 1)+e));
5585 Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ops[0], vi);
5586 SV = llvm::ConstantVector::get(Indices);
5587 SV = Builder.CreateShuffleVector(Ops[1], Ops[2], SV, "vzip");
5588 SV = Builder.CreateStore(SV, Addr);
5592 case NEON::BI__builtin_neon_vqtbl1q_v: {
5593 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbl1, Ty),
5596 case NEON::BI__builtin_neon_vqtbl2q_v: {
5597 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbl2, Ty),
5600 case NEON::BI__builtin_neon_vqtbl3q_v: {
5601 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbl3, Ty),
5604 case NEON::BI__builtin_neon_vqtbl4q_v: {
5605 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbl4, Ty),
5608 case NEON::BI__builtin_neon_vqtbx1q_v: {
5609 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbx1, Ty),
5612 case NEON::BI__builtin_neon_vqtbx2q_v: {
5613 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbx2, Ty),
5616 case NEON::BI__builtin_neon_vqtbx3q_v: {
5617 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbx3, Ty),
5620 case NEON::BI__builtin_neon_vqtbx4q_v: {
5621 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbx4, Ty),
5624 case NEON::BI__builtin_neon_vsqadd_v:
5625 case NEON::BI__builtin_neon_vsqaddq_v: {
5626 Int = Intrinsic::aarch64_neon_usqadd;
5627 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vsqadd");
5629 case NEON::BI__builtin_neon_vuqadd_v:
5630 case NEON::BI__builtin_neon_vuqaddq_v: {
5631 Int = Intrinsic::aarch64_neon_suqadd;
5632 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vuqadd");
5637 llvm::Value *CodeGenFunction::
5638 BuildVector(ArrayRef<llvm::Value*> Ops) {
5639 assert((Ops.size() & (Ops.size() - 1)) == 0 &&
5640 "Not a power-of-two sized vector!");
5641 bool AllConstants = true;
5642 for (unsigned i = 0, e = Ops.size(); i != e && AllConstants; ++i)
5643 AllConstants &= isa<Constant>(Ops[i]);
5645 // If this is a constant vector, create a ConstantVector.
5647 SmallVector<llvm::Constant*, 16> CstOps;
5648 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
5649 CstOps.push_back(cast<Constant>(Ops[i]));
5650 return llvm::ConstantVector::get(CstOps);
5653 // Otherwise, insertelement the values to build the vector.
5655 llvm::UndefValue::get(llvm::VectorType::get(Ops[0]->getType(), Ops.size()));
5657 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
5658 Result = Builder.CreateInsertElement(Result, Ops[i], Builder.getInt32(i));
5663 Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
5664 const CallExpr *E) {
5665 SmallVector<Value*, 4> Ops;
5667 // Find out if any arguments are required to be integer constant expressions.
5668 unsigned ICEArguments = 0;
5669 ASTContext::GetBuiltinTypeError Error;
5670 getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments);
5671 assert(Error == ASTContext::GE_None && "Should not codegen an error");
5673 for (unsigned i = 0, e = E->getNumArgs(); i != e; i++) {
5674 // If this is a normal argument, just emit it as a scalar.
5675 if ((ICEArguments & (1 << i)) == 0) {
5676 Ops.push_back(EmitScalarExpr(E->getArg(i)));
5680 // If this is required to be a constant, constant fold it so that we know
5681 // that the generated intrinsic gets a ConstantInt.
5682 llvm::APSInt Result;
5683 bool IsConst = E->getArg(i)->isIntegerConstantExpr(Result, getContext());
5684 assert(IsConst && "Constant arg isn't actually constant?"); (void)IsConst;
5685 Ops.push_back(llvm::ConstantInt::get(getLLVMContext(), Result));
5688 switch (BuiltinID) {
5689 default: return nullptr;
5690 case X86::BI_mm_prefetch: {
5691 Value *Address = EmitScalarExpr(E->getArg(0));
5692 Value *RW = ConstantInt::get(Int32Ty, 0);
5693 Value *Locality = EmitScalarExpr(E->getArg(1));
5694 Value *Data = ConstantInt::get(Int32Ty, 1);
5695 Value *F = CGM.getIntrinsic(Intrinsic::prefetch);
5696 return Builder.CreateCall4(F, Address, RW, Locality, Data);
5698 case X86::BI__builtin_ia32_vec_init_v8qi:
5699 case X86::BI__builtin_ia32_vec_init_v4hi:
5700 case X86::BI__builtin_ia32_vec_init_v2si:
5701 return Builder.CreateBitCast(BuildVector(Ops),
5702 llvm::Type::getX86_MMXTy(getLLVMContext()));
5703 case X86::BI__builtin_ia32_vec_ext_v2si:
5704 return Builder.CreateExtractElement(Ops[0],
5705 llvm::ConstantInt::get(Ops[1]->getType(), 0));
5706 case X86::BI__builtin_ia32_ldmxcsr: {
5707 Value *Tmp = CreateMemTemp(E->getArg(0)->getType());
5708 Builder.CreateStore(Ops[0], Tmp);
5709 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_ldmxcsr),
5710 Builder.CreateBitCast(Tmp, Int8PtrTy));
5712 case X86::BI__builtin_ia32_stmxcsr: {
5713 Value *Tmp = CreateMemTemp(E->getType());
5714 Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_stmxcsr),
5715 Builder.CreateBitCast(Tmp, Int8PtrTy));
5716 return Builder.CreateLoad(Tmp, "stmxcsr");
5718 case X86::BI__builtin_ia32_storehps:
5719 case X86::BI__builtin_ia32_storelps: {
5720 llvm::Type *PtrTy = llvm::PointerType::getUnqual(Int64Ty);
5721 llvm::Type *VecTy = llvm::VectorType::get(Int64Ty, 2);
5724 Ops[1] = Builder.CreateBitCast(Ops[1], VecTy, "cast");
5727 unsigned Index = BuiltinID == X86::BI__builtin_ia32_storelps ? 0 : 1;
5728 llvm::Value *Idx = llvm::ConstantInt::get(SizeTy, Index);
5729 Ops[1] = Builder.CreateExtractElement(Ops[1], Idx, "extract");
5731 // cast pointer to i64 & store
5732 Ops[0] = Builder.CreateBitCast(Ops[0], PtrTy);
5733 return Builder.CreateStore(Ops[1], Ops[0]);
5735 case X86::BI__builtin_ia32_palignr: {
5736 unsigned shiftVal = cast<llvm::ConstantInt>(Ops[2])->getZExtValue();
5738 // If palignr is shifting the pair of input vectors less than 9 bytes,
5739 // emit a shuffle instruction.
5740 if (shiftVal <= 8) {
5741 SmallVector<llvm::Constant*, 8> Indices;
5742 for (unsigned i = 0; i != 8; ++i)
5743 Indices.push_back(llvm::ConstantInt::get(Int32Ty, shiftVal + i));
5745 Value* SV = llvm::ConstantVector::get(Indices);
5746 return Builder.CreateShuffleVector(Ops[1], Ops[0], SV, "palignr");
5749 // If palignr is shifting the pair of input vectors more than 8 but less
5750 // than 16 bytes, emit a logical right shift of the destination.
5751 if (shiftVal < 16) {
5752 // MMX has these as 1 x i64 vectors for some odd optimization reasons.
5753 llvm::Type *VecTy = llvm::VectorType::get(Int64Ty, 1);
5755 Ops[0] = Builder.CreateBitCast(Ops[0], VecTy, "cast");
5756 Ops[1] = llvm::ConstantInt::get(VecTy, (shiftVal-8) * 8);
5758 // create i32 constant
5759 llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_mmx_psrl_q);
5760 return Builder.CreateCall(F, makeArrayRef(&Ops[0], 2), "palignr");
5763 // If palignr is shifting the pair of vectors more than 16 bytes, emit zero.
5764 return llvm::Constant::getNullValue(ConvertType(E->getType()));
5766 case X86::BI__builtin_ia32_palignr128: {
5767 unsigned shiftVal = cast<llvm::ConstantInt>(Ops[2])->getZExtValue();
5769 // If palignr is shifting the pair of input vectors less than 17 bytes,
5770 // emit a shuffle instruction.
5771 if (shiftVal <= 16) {
5772 SmallVector<llvm::Constant*, 16> Indices;
5773 for (unsigned i = 0; i != 16; ++i)
5774 Indices.push_back(llvm::ConstantInt::get(Int32Ty, shiftVal + i));
5776 Value* SV = llvm::ConstantVector::get(Indices);
5777 return Builder.CreateShuffleVector(Ops[1], Ops[0], SV, "palignr");
5780 // If palignr is shifting the pair of input vectors more than 16 but less
5781 // than 32 bytes, emit a logical right shift of the destination.
5782 if (shiftVal < 32) {
5783 llvm::Type *VecTy = llvm::VectorType::get(Int64Ty, 2);
5785 Ops[0] = Builder.CreateBitCast(Ops[0], VecTy, "cast");
5786 Ops[1] = llvm::ConstantInt::get(Int32Ty, (shiftVal-16) * 8);
5788 // create i32 constant
5789 llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_sse2_psrl_dq);
5790 return Builder.CreateCall(F, makeArrayRef(&Ops[0], 2), "palignr");
5793 // If palignr is shifting the pair of vectors more than 32 bytes, emit zero.
5794 return llvm::Constant::getNullValue(ConvertType(E->getType()));
5796 case X86::BI__builtin_ia32_palignr256: {
5797 unsigned shiftVal = cast<llvm::ConstantInt>(Ops[2])->getZExtValue();
5799 // If palignr is shifting the pair of input vectors less than 17 bytes,
5800 // emit a shuffle instruction.
5801 if (shiftVal <= 16) {
5802 SmallVector<llvm::Constant*, 32> Indices;
5803 // 256-bit palignr operates on 128-bit lanes so we need to handle that
5804 for (unsigned l = 0; l != 2; ++l) {
5805 unsigned LaneStart = l * 16;
5806 unsigned LaneEnd = (l+1) * 16;
5807 for (unsigned i = 0; i != 16; ++i) {
5808 unsigned Idx = shiftVal + i + LaneStart;
5809 if (Idx >= LaneEnd) Idx += 16; // end of lane, switch operand
5810 Indices.push_back(llvm::ConstantInt::get(Int32Ty, Idx));
5814 Value* SV = llvm::ConstantVector::get(Indices);
5815 return Builder.CreateShuffleVector(Ops[1], Ops[0], SV, "palignr");
5818 // If palignr is shifting the pair of input vectors more than 16 but less
5819 // than 32 bytes, emit a logical right shift of the destination.
5820 if (shiftVal < 32) {
5821 llvm::Type *VecTy = llvm::VectorType::get(Int64Ty, 4);
5823 Ops[0] = Builder.CreateBitCast(Ops[0], VecTy, "cast");
5824 Ops[1] = llvm::ConstantInt::get(Int32Ty, (shiftVal-16) * 8);
5826 // create i32 constant
5827 llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_avx2_psrl_dq);
5828 return Builder.CreateCall(F, makeArrayRef(&Ops[0], 2), "palignr");
5831 // If palignr is shifting the pair of vectors more than 32 bytes, emit zero.
5832 return llvm::Constant::getNullValue(ConvertType(E->getType()));
5834 case X86::BI__builtin_ia32_movntps:
5835 case X86::BI__builtin_ia32_movntps256:
5836 case X86::BI__builtin_ia32_movntpd:
5837 case X86::BI__builtin_ia32_movntpd256:
5838 case X86::BI__builtin_ia32_movntdq:
5839 case X86::BI__builtin_ia32_movntdq256:
5840 case X86::BI__builtin_ia32_movnti:
5841 case X86::BI__builtin_ia32_movnti64: {
5842 llvm::MDNode *Node = llvm::MDNode::get(getLLVMContext(),
5843 Builder.getInt32(1));
5845 // Convert the type of the pointer to a pointer to the stored type.
5846 Value *BC = Builder.CreateBitCast(Ops[0],
5847 llvm::PointerType::getUnqual(Ops[1]->getType()),
5849 StoreInst *SI = Builder.CreateStore(Ops[1], BC);
5850 SI->setMetadata(CGM.getModule().getMDKindID("nontemporal"), Node);
5852 // If the operand is an integer, we can't assume alignment. Otherwise,
5853 // assume natural alignment.
5854 QualType ArgTy = E->getArg(1)->getType();
5856 if (ArgTy->isIntegerType())
5859 Align = getContext().getTypeSizeInChars(ArgTy).getQuantity();
5860 SI->setAlignment(Align);
5864 case X86::BI__builtin_ia32_pswapdsf:
5865 case X86::BI__builtin_ia32_pswapdsi: {
5866 const char *name = nullptr;
5867 Intrinsic::ID ID = Intrinsic::not_intrinsic;
5869 default: llvm_unreachable("Unsupported intrinsic!");
5870 case X86::BI__builtin_ia32_pswapdsf:
5871 case X86::BI__builtin_ia32_pswapdsi:
5873 ID = Intrinsic::x86_3dnowa_pswapd;
5876 llvm::Type *MMXTy = llvm::Type::getX86_MMXTy(getLLVMContext());
5877 Ops[0] = Builder.CreateBitCast(Ops[0], MMXTy, "cast");
5878 llvm::Function *F = CGM.getIntrinsic(ID);
5879 return Builder.CreateCall(F, Ops, name);
5881 case X86::BI__builtin_ia32_rdrand16_step:
5882 case X86::BI__builtin_ia32_rdrand32_step:
5883 case X86::BI__builtin_ia32_rdrand64_step:
5884 case X86::BI__builtin_ia32_rdseed16_step:
5885 case X86::BI__builtin_ia32_rdseed32_step:
5886 case X86::BI__builtin_ia32_rdseed64_step: {
5888 switch (BuiltinID) {
5889 default: llvm_unreachable("Unsupported intrinsic!");
5890 case X86::BI__builtin_ia32_rdrand16_step:
5891 ID = Intrinsic::x86_rdrand_16;
5893 case X86::BI__builtin_ia32_rdrand32_step:
5894 ID = Intrinsic::x86_rdrand_32;
5896 case X86::BI__builtin_ia32_rdrand64_step:
5897 ID = Intrinsic::x86_rdrand_64;
5899 case X86::BI__builtin_ia32_rdseed16_step:
5900 ID = Intrinsic::x86_rdseed_16;
5902 case X86::BI__builtin_ia32_rdseed32_step:
5903 ID = Intrinsic::x86_rdseed_32;
5905 case X86::BI__builtin_ia32_rdseed64_step:
5906 ID = Intrinsic::x86_rdseed_64;
5910 Value *Call = Builder.CreateCall(CGM.getIntrinsic(ID));
5911 Builder.CreateStore(Builder.CreateExtractValue(Call, 0), Ops[0]);
5912 return Builder.CreateExtractValue(Call, 1);
5915 case X86::BI__builtin_ia32_vbroadcastsi256: {
5916 Value *VecTmp = CreateMemTemp(E->getArg(0)->getType());
5917 Builder.CreateStore(Ops[0], VecTmp);
5918 Value *F = CGM.getIntrinsic(Intrinsic::x86_avx2_vbroadcasti128);
5919 return Builder.CreateCall(F, Builder.CreateBitCast(VecTmp, Int8PtrTy));
5925 Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
5926 const CallExpr *E) {
5927 SmallVector<Value*, 4> Ops;
5929 for (unsigned i = 0, e = E->getNumArgs(); i != e; i++)
5930 Ops.push_back(EmitScalarExpr(E->getArg(i)));
5932 Intrinsic::ID ID = Intrinsic::not_intrinsic;
5934 switch (BuiltinID) {
5935 default: return nullptr;
5937 // vec_ld, vec_lvsl, vec_lvsr
5938 case PPC::BI__builtin_altivec_lvx:
5939 case PPC::BI__builtin_altivec_lvxl:
5940 case PPC::BI__builtin_altivec_lvebx:
5941 case PPC::BI__builtin_altivec_lvehx:
5942 case PPC::BI__builtin_altivec_lvewx:
5943 case PPC::BI__builtin_altivec_lvsl:
5944 case PPC::BI__builtin_altivec_lvsr:
5946 Ops[1] = Builder.CreateBitCast(Ops[1], Int8PtrTy);
5948 Ops[0] = Builder.CreateGEP(Ops[1], Ops[0]);
5951 switch (BuiltinID) {
5952 default: llvm_unreachable("Unsupported ld/lvsl/lvsr intrinsic!");
5953 case PPC::BI__builtin_altivec_lvx:
5954 ID = Intrinsic::ppc_altivec_lvx;
5956 case PPC::BI__builtin_altivec_lvxl:
5957 ID = Intrinsic::ppc_altivec_lvxl;
5959 case PPC::BI__builtin_altivec_lvebx:
5960 ID = Intrinsic::ppc_altivec_lvebx;
5962 case PPC::BI__builtin_altivec_lvehx:
5963 ID = Intrinsic::ppc_altivec_lvehx;
5965 case PPC::BI__builtin_altivec_lvewx:
5966 ID = Intrinsic::ppc_altivec_lvewx;
5968 case PPC::BI__builtin_altivec_lvsl:
5969 ID = Intrinsic::ppc_altivec_lvsl;
5971 case PPC::BI__builtin_altivec_lvsr:
5972 ID = Intrinsic::ppc_altivec_lvsr;
5975 llvm::Function *F = CGM.getIntrinsic(ID);
5976 return Builder.CreateCall(F, Ops, "");
5980 case PPC::BI__builtin_altivec_stvx:
5981 case PPC::BI__builtin_altivec_stvxl:
5982 case PPC::BI__builtin_altivec_stvebx:
5983 case PPC::BI__builtin_altivec_stvehx:
5984 case PPC::BI__builtin_altivec_stvewx:
5986 Ops[2] = Builder.CreateBitCast(Ops[2], Int8PtrTy);
5987 Ops[1] = Builder.CreateGEP(Ops[2], Ops[1]);
5990 switch (BuiltinID) {
5991 default: llvm_unreachable("Unsupported st intrinsic!");
5992 case PPC::BI__builtin_altivec_stvx:
5993 ID = Intrinsic::ppc_altivec_stvx;
5995 case PPC::BI__builtin_altivec_stvxl:
5996 ID = Intrinsic::ppc_altivec_stvxl;
5998 case PPC::BI__builtin_altivec_stvebx:
5999 ID = Intrinsic::ppc_altivec_stvebx;
6001 case PPC::BI__builtin_altivec_stvehx:
6002 ID = Intrinsic::ppc_altivec_stvehx;
6004 case PPC::BI__builtin_altivec_stvewx:
6005 ID = Intrinsic::ppc_altivec_stvewx;
6008 llvm::Function *F = CGM.getIntrinsic(ID);
6009 return Builder.CreateCall(F, Ops, "");
6014 // Emit an intrinsic that has 1 float or double.
6015 static Value *emitUnaryFPBuiltin(CodeGenFunction &CGF,
6017 unsigned IntrinsicID) {
6018 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
6020 Value *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
6021 return CGF.Builder.CreateCall(F, Src0);
6024 // Emit an intrinsic that has 3 float or double operands.
6025 static Value *emitTernaryFPBuiltin(CodeGenFunction &CGF,
6027 unsigned IntrinsicID) {
6028 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
6029 llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
6030 llvm::Value *Src2 = CGF.EmitScalarExpr(E->getArg(2));
6032 Value *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
6033 return CGF.Builder.CreateCall3(F, Src0, Src1, Src2);
6036 Value *CodeGenFunction::EmitR600BuiltinExpr(unsigned BuiltinID,
6037 const CallExpr *E) {
6038 switch (BuiltinID) {
6039 case R600::BI__builtin_amdgpu_div_scale:
6040 case R600::BI__builtin_amdgpu_div_scalef: {
6041 // Translate from the intrinsics's struct return to the builtin's out
6044 std::pair<llvm::Value *, unsigned> FlagOutPtr
6045 = EmitPointerWithAlignment(E->getArg(3));
6047 llvm::Value *X = EmitScalarExpr(E->getArg(0));
6048 llvm::Value *Y = EmitScalarExpr(E->getArg(1));
6049 llvm::Value *Z = EmitScalarExpr(E->getArg(2));
6051 llvm::Value *Callee = CGM.getIntrinsic(Intrinsic::AMDGPU_div_scale,
6054 llvm::Value *Tmp = Builder.CreateCall3(Callee, X, Y, Z);
6056 llvm::Value *Result = Builder.CreateExtractValue(Tmp, 0);
6057 llvm::Value *Flag = Builder.CreateExtractValue(Tmp, 1);
6059 llvm::Type *RealFlagType
6060 = FlagOutPtr.first->getType()->getPointerElementType();
6062 llvm::Value *FlagExt = Builder.CreateZExt(Flag, RealFlagType);
6063 llvm::StoreInst *FlagStore = Builder.CreateStore(FlagExt, FlagOutPtr.first);
6064 FlagStore->setAlignment(FlagOutPtr.second);
6067 case R600::BI__builtin_amdgpu_div_fmas:
6068 case R600::BI__builtin_amdgpu_div_fmasf:
6069 return emitTernaryFPBuiltin(*this, E, Intrinsic::AMDGPU_div_fmas);
6070 case R600::BI__builtin_amdgpu_div_fixup:
6071 case R600::BI__builtin_amdgpu_div_fixupf:
6072 return emitTernaryFPBuiltin(*this, E, Intrinsic::AMDGPU_div_fixup);
6073 case R600::BI__builtin_amdgpu_trig_preop:
6074 case R600::BI__builtin_amdgpu_trig_preopf: {
6075 Value *Src0 = EmitScalarExpr(E->getArg(0));
6076 Value *Src1 = EmitScalarExpr(E->getArg(1));
6077 Value *F = CGM.getIntrinsic(Intrinsic::AMDGPU_trig_preop, Src0->getType());
6078 return Builder.CreateCall2(F, Src0, Src1);
6080 case R600::BI__builtin_amdgpu_rcp:
6081 case R600::BI__builtin_amdgpu_rcpf:
6082 return emitUnaryFPBuiltin(*this, E, Intrinsic::AMDGPU_rcp);
6083 case R600::BI__builtin_amdgpu_rsq:
6084 case R600::BI__builtin_amdgpu_rsqf:
6085 return emitUnaryFPBuiltin(*this, E, Intrinsic::AMDGPU_rsq);
6086 case R600::BI__builtin_amdgpu_rsq_clamped:
6087 case R600::BI__builtin_amdgpu_rsq_clampedf:
6088 return emitUnaryFPBuiltin(*this, E, Intrinsic::AMDGPU_rsq_clamped);