1 //===---- CGBuiltin.cpp - Emit LLVM Code for builtins ---------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This contains code to emit Builtin calls as LLVM code.
12 //===----------------------------------------------------------------------===//
14 #include "CodeGenFunction.h"
15 #include "CGObjCRuntime.h"
16 #include "CodeGenModule.h"
17 #include "TargetInfo.h"
18 #include "clang/AST/ASTContext.h"
19 #include "clang/AST/Decl.h"
20 #include "clang/Basic/TargetBuiltins.h"
21 #include "clang/Basic/TargetInfo.h"
22 #include "clang/CodeGen/CGFunctionInfo.h"
23 #include "llvm/IR/DataLayout.h"
24 #include "llvm/IR/Intrinsics.h"
26 using namespace clang;
27 using namespace CodeGen;
30 /// getBuiltinLibFunction - Given a builtin id for a function like
31 /// "__builtin_fabsf", return a Function* for "fabsf".
32 llvm::Value *CodeGenModule::getBuiltinLibFunction(const FunctionDecl *FD,
34 assert(Context.BuiltinInfo.isLibFunction(BuiltinID));
36 // Get the name, skip over the __builtin_ prefix (if necessary).
40 // If the builtin has been declared explicitly with an assembler label,
41 // use the mangled name. This differs from the plain label on platforms
42 // that prefix labels.
43 if (FD->hasAttr<AsmLabelAttr>())
44 Name = getMangledName(D);
46 Name = Context.BuiltinInfo.GetName(BuiltinID) + 10;
48 llvm::FunctionType *Ty =
49 cast<llvm::FunctionType>(getTypes().ConvertType(FD->getType()));
51 return GetOrCreateLLVMFunction(Name, Ty, D, /*ForVTable=*/false);
54 /// Emit the conversions required to turn the given value into an
55 /// integer of the given size.
56 static Value *EmitToInt(CodeGenFunction &CGF, llvm::Value *V,
57 QualType T, llvm::IntegerType *IntType) {
58 V = CGF.EmitToMemory(V, T);
60 if (V->getType()->isPointerTy())
61 return CGF.Builder.CreatePtrToInt(V, IntType);
63 assert(V->getType() == IntType);
67 static Value *EmitFromInt(CodeGenFunction &CGF, llvm::Value *V,
68 QualType T, llvm::Type *ResultType) {
69 V = CGF.EmitFromMemory(V, T);
71 if (ResultType->isPointerTy())
72 return CGF.Builder.CreateIntToPtr(V, ResultType);
74 assert(V->getType() == ResultType);
78 /// Utility to insert an atomic instruction based on Instrinsic::ID
79 /// and the expression node.
80 static RValue EmitBinaryAtomic(CodeGenFunction &CGF,
81 llvm::AtomicRMWInst::BinOp Kind,
83 QualType T = E->getType();
84 assert(E->getArg(0)->getType()->isPointerType());
85 assert(CGF.getContext().hasSameUnqualifiedType(T,
86 E->getArg(0)->getType()->getPointeeType()));
87 assert(CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->getType()));
89 llvm::Value *DestPtr = CGF.EmitScalarExpr(E->getArg(0));
90 unsigned AddrSpace = DestPtr->getType()->getPointerAddressSpace();
92 llvm::IntegerType *IntType =
93 llvm::IntegerType::get(CGF.getLLVMContext(),
94 CGF.getContext().getTypeSize(T));
95 llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace);
98 Args[0] = CGF.Builder.CreateBitCast(DestPtr, IntPtrType);
99 Args[1] = CGF.EmitScalarExpr(E->getArg(1));
100 llvm::Type *ValueType = Args[1]->getType();
101 Args[1] = EmitToInt(CGF, Args[1], T, IntType);
103 llvm::Value *Result =
104 CGF.Builder.CreateAtomicRMW(Kind, Args[0], Args[1],
105 llvm::SequentiallyConsistent);
106 Result = EmitFromInt(CGF, Result, T, ValueType);
107 return RValue::get(Result);
110 /// Utility to insert an atomic instruction based Instrinsic::ID and
111 /// the expression node, where the return value is the result of the
113 static RValue EmitBinaryAtomicPost(CodeGenFunction &CGF,
114 llvm::AtomicRMWInst::BinOp Kind,
116 Instruction::BinaryOps Op) {
117 QualType T = E->getType();
118 assert(E->getArg(0)->getType()->isPointerType());
119 assert(CGF.getContext().hasSameUnqualifiedType(T,
120 E->getArg(0)->getType()->getPointeeType()));
121 assert(CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->getType()));
123 llvm::Value *DestPtr = CGF.EmitScalarExpr(E->getArg(0));
124 unsigned AddrSpace = DestPtr->getType()->getPointerAddressSpace();
126 llvm::IntegerType *IntType =
127 llvm::IntegerType::get(CGF.getLLVMContext(),
128 CGF.getContext().getTypeSize(T));
129 llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace);
131 llvm::Value *Args[2];
132 Args[1] = CGF.EmitScalarExpr(E->getArg(1));
133 llvm::Type *ValueType = Args[1]->getType();
134 Args[1] = EmitToInt(CGF, Args[1], T, IntType);
135 Args[0] = CGF.Builder.CreateBitCast(DestPtr, IntPtrType);
137 llvm::Value *Result =
138 CGF.Builder.CreateAtomicRMW(Kind, Args[0], Args[1],
139 llvm::SequentiallyConsistent);
140 Result = CGF.Builder.CreateBinOp(Op, Result, Args[1]);
141 Result = EmitFromInt(CGF, Result, T, ValueType);
142 return RValue::get(Result);
145 /// EmitFAbs - Emit a call to fabs/fabsf/fabsl, depending on the type of ValTy,
146 /// which must be a scalar floating point type.
147 static Value *EmitFAbs(CodeGenFunction &CGF, Value *V, QualType ValTy) {
148 const BuiltinType *ValTyP = ValTy->getAs<BuiltinType>();
149 assert(ValTyP && "isn't scalar fp type!");
152 switch (ValTyP->getKind()) {
153 default: llvm_unreachable("Isn't a scalar fp type!");
154 case BuiltinType::Float: FnName = "fabsf"; break;
155 case BuiltinType::Double: FnName = "fabs"; break;
156 case BuiltinType::LongDouble: FnName = "fabsl"; break;
159 // The prototype is something that takes and returns whatever V's type is.
160 llvm::FunctionType *FT = llvm::FunctionType::get(V->getType(), V->getType(),
162 llvm::Value *Fn = CGF.CGM.CreateRuntimeFunction(FT, FnName);
164 return CGF.EmitNounwindRuntimeCall(Fn, V, "abs");
167 static RValue emitLibraryCall(CodeGenFunction &CGF, const FunctionDecl *Fn,
168 const CallExpr *E, llvm::Value *calleeValue) {
169 return CGF.EmitCall(E->getCallee()->getType(), calleeValue, E->getLocStart(),
170 ReturnValueSlot(), E->arg_begin(), E->arg_end(), Fn);
173 /// \brief Emit a call to llvm.{sadd,uadd,ssub,usub,smul,umul}.with.overflow.*
174 /// depending on IntrinsicID.
176 /// \arg CGF The current codegen function.
177 /// \arg IntrinsicID The ID for the Intrinsic we wish to generate.
178 /// \arg X The first argument to the llvm.*.with.overflow.*.
179 /// \arg Y The second argument to the llvm.*.with.overflow.*.
180 /// \arg Carry The carry returned by the llvm.*.with.overflow.*.
181 /// \returns The result (i.e. sum/product) returned by the intrinsic.
182 static llvm::Value *EmitOverflowIntrinsic(CodeGenFunction &CGF,
183 const llvm::Intrinsic::ID IntrinsicID,
184 llvm::Value *X, llvm::Value *Y,
185 llvm::Value *&Carry) {
186 // Make sure we have integers of the same width.
187 assert(X->getType() == Y->getType() &&
188 "Arguments must be the same type. (Did you forget to make sure both "
189 "arguments have the same integer width?)");
191 llvm::Value *Callee = CGF.CGM.getIntrinsic(IntrinsicID, X->getType());
192 llvm::Value *Tmp = CGF.Builder.CreateCall2(Callee, X, Y);
193 Carry = CGF.Builder.CreateExtractValue(Tmp, 1);
194 return CGF.Builder.CreateExtractValue(Tmp, 0);
197 RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
198 unsigned BuiltinID, const CallExpr *E) {
199 // See if we can constant fold this builtin. If so, don't emit it at all.
200 Expr::EvalResult Result;
201 if (E->EvaluateAsRValue(Result, CGM.getContext()) &&
202 !Result.hasSideEffects()) {
203 if (Result.Val.isInt())
204 return RValue::get(llvm::ConstantInt::get(getLLVMContext(),
205 Result.Val.getInt()));
206 if (Result.Val.isFloat())
207 return RValue::get(llvm::ConstantFP::get(getLLVMContext(),
208 Result.Val.getFloat()));
212 default: break; // Handle intrinsics and libm functions below.
213 case Builtin::BI__builtin___CFStringMakeConstantString:
214 case Builtin::BI__builtin___NSStringMakeConstantString:
215 return RValue::get(CGM.EmitConstantExpr(E, E->getType(), 0));
216 case Builtin::BI__builtin_stdarg_start:
217 case Builtin::BI__builtin_va_start:
218 case Builtin::BI__builtin_va_end: {
219 Value *ArgValue = EmitVAListRef(E->getArg(0));
220 llvm::Type *DestType = Int8PtrTy;
221 if (ArgValue->getType() != DestType)
222 ArgValue = Builder.CreateBitCast(ArgValue, DestType,
223 ArgValue->getName().data());
225 Intrinsic::ID inst = (BuiltinID == Builtin::BI__builtin_va_end) ?
226 Intrinsic::vaend : Intrinsic::vastart;
227 return RValue::get(Builder.CreateCall(CGM.getIntrinsic(inst), ArgValue));
229 case Builtin::BI__builtin_va_copy: {
230 Value *DstPtr = EmitVAListRef(E->getArg(0));
231 Value *SrcPtr = EmitVAListRef(E->getArg(1));
233 llvm::Type *Type = Int8PtrTy;
235 DstPtr = Builder.CreateBitCast(DstPtr, Type);
236 SrcPtr = Builder.CreateBitCast(SrcPtr, Type);
237 return RValue::get(Builder.CreateCall2(CGM.getIntrinsic(Intrinsic::vacopy),
240 case Builtin::BI__builtin_abs:
241 case Builtin::BI__builtin_labs:
242 case Builtin::BI__builtin_llabs: {
243 Value *ArgValue = EmitScalarExpr(E->getArg(0));
245 Value *NegOp = Builder.CreateNeg(ArgValue, "neg");
247 Builder.CreateICmpSGE(ArgValue,
248 llvm::Constant::getNullValue(ArgValue->getType()),
251 Builder.CreateSelect(CmpResult, ArgValue, NegOp, "abs");
253 return RValue::get(Result);
256 case Builtin::BI__builtin_conj:
257 case Builtin::BI__builtin_conjf:
258 case Builtin::BI__builtin_conjl: {
259 ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0));
260 Value *Real = ComplexVal.first;
261 Value *Imag = ComplexVal.second;
263 Imag->getType()->isFPOrFPVectorTy()
264 ? llvm::ConstantFP::getZeroValueForNegation(Imag->getType())
265 : llvm::Constant::getNullValue(Imag->getType());
267 Imag = Builder.CreateFSub(Zero, Imag, "sub");
268 return RValue::getComplex(std::make_pair(Real, Imag));
270 case Builtin::BI__builtin_creal:
271 case Builtin::BI__builtin_crealf:
272 case Builtin::BI__builtin_creall:
273 case Builtin::BIcreal:
274 case Builtin::BIcrealf:
275 case Builtin::BIcreall: {
276 ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0));
277 return RValue::get(ComplexVal.first);
280 case Builtin::BI__builtin_cimag:
281 case Builtin::BI__builtin_cimagf:
282 case Builtin::BI__builtin_cimagl:
283 case Builtin::BIcimag:
284 case Builtin::BIcimagf:
285 case Builtin::BIcimagl: {
286 ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0));
287 return RValue::get(ComplexVal.second);
290 case Builtin::BI__builtin_ctzs:
291 case Builtin::BI__builtin_ctz:
292 case Builtin::BI__builtin_ctzl:
293 case Builtin::BI__builtin_ctzll: {
294 Value *ArgValue = EmitScalarExpr(E->getArg(0));
296 llvm::Type *ArgType = ArgValue->getType();
297 Value *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType);
299 llvm::Type *ResultType = ConvertType(E->getType());
300 Value *ZeroUndef = Builder.getInt1(getTarget().isCLZForZeroUndef());
301 Value *Result = Builder.CreateCall2(F, ArgValue, ZeroUndef);
302 if (Result->getType() != ResultType)
303 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
305 return RValue::get(Result);
307 case Builtin::BI__builtin_clzs:
308 case Builtin::BI__builtin_clz:
309 case Builtin::BI__builtin_clzl:
310 case Builtin::BI__builtin_clzll: {
311 Value *ArgValue = EmitScalarExpr(E->getArg(0));
313 llvm::Type *ArgType = ArgValue->getType();
314 Value *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType);
316 llvm::Type *ResultType = ConvertType(E->getType());
317 Value *ZeroUndef = Builder.getInt1(getTarget().isCLZForZeroUndef());
318 Value *Result = Builder.CreateCall2(F, ArgValue, ZeroUndef);
319 if (Result->getType() != ResultType)
320 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
322 return RValue::get(Result);
324 case Builtin::BI__builtin_ffs:
325 case Builtin::BI__builtin_ffsl:
326 case Builtin::BI__builtin_ffsll: {
327 // ffs(x) -> x ? cttz(x) + 1 : 0
328 Value *ArgValue = EmitScalarExpr(E->getArg(0));
330 llvm::Type *ArgType = ArgValue->getType();
331 Value *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType);
333 llvm::Type *ResultType = ConvertType(E->getType());
334 Value *Tmp = Builder.CreateAdd(Builder.CreateCall2(F, ArgValue,
336 llvm::ConstantInt::get(ArgType, 1));
337 Value *Zero = llvm::Constant::getNullValue(ArgType);
338 Value *IsZero = Builder.CreateICmpEQ(ArgValue, Zero, "iszero");
339 Value *Result = Builder.CreateSelect(IsZero, Zero, Tmp, "ffs");
340 if (Result->getType() != ResultType)
341 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
343 return RValue::get(Result);
345 case Builtin::BI__builtin_parity:
346 case Builtin::BI__builtin_parityl:
347 case Builtin::BI__builtin_parityll: {
348 // parity(x) -> ctpop(x) & 1
349 Value *ArgValue = EmitScalarExpr(E->getArg(0));
351 llvm::Type *ArgType = ArgValue->getType();
352 Value *F = CGM.getIntrinsic(Intrinsic::ctpop, ArgType);
354 llvm::Type *ResultType = ConvertType(E->getType());
355 Value *Tmp = Builder.CreateCall(F, ArgValue);
356 Value *Result = Builder.CreateAnd(Tmp, llvm::ConstantInt::get(ArgType, 1));
357 if (Result->getType() != ResultType)
358 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
360 return RValue::get(Result);
362 case Builtin::BI__builtin_popcount:
363 case Builtin::BI__builtin_popcountl:
364 case Builtin::BI__builtin_popcountll: {
365 Value *ArgValue = EmitScalarExpr(E->getArg(0));
367 llvm::Type *ArgType = ArgValue->getType();
368 Value *F = CGM.getIntrinsic(Intrinsic::ctpop, ArgType);
370 llvm::Type *ResultType = ConvertType(E->getType());
371 Value *Result = Builder.CreateCall(F, ArgValue);
372 if (Result->getType() != ResultType)
373 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
375 return RValue::get(Result);
377 case Builtin::BI__builtin_expect: {
378 Value *ArgValue = EmitScalarExpr(E->getArg(0));
379 llvm::Type *ArgType = ArgValue->getType();
381 Value *FnExpect = CGM.getIntrinsic(Intrinsic::expect, ArgType);
382 Value *ExpectedValue = EmitScalarExpr(E->getArg(1));
384 Value *Result = Builder.CreateCall2(FnExpect, ArgValue, ExpectedValue,
386 return RValue::get(Result);
388 case Builtin::BI__builtin_bswap16:
389 case Builtin::BI__builtin_bswap32:
390 case Builtin::BI__builtin_bswap64: {
391 Value *ArgValue = EmitScalarExpr(E->getArg(0));
392 llvm::Type *ArgType = ArgValue->getType();
393 Value *F = CGM.getIntrinsic(Intrinsic::bswap, ArgType);
394 return RValue::get(Builder.CreateCall(F, ArgValue));
396 case Builtin::BI__builtin_object_size: {
397 // We rely on constant folding to deal with expressions with side effects.
398 assert(!E->getArg(0)->HasSideEffects(getContext()) &&
399 "should have been constant folded");
401 // We pass this builtin onto the optimizer so that it can
402 // figure out the object size in more complex cases.
403 llvm::Type *ResType = ConvertType(E->getType());
405 // LLVM only supports 0 and 2, make sure that we pass along that
407 Value *Ty = EmitScalarExpr(E->getArg(1));
408 ConstantInt *CI = dyn_cast<ConstantInt>(Ty);
410 uint64_t val = CI->getZExtValue();
411 CI = ConstantInt::get(Builder.getInt1Ty(), (val & 0x2) >> 1);
412 // FIXME: Get right address space.
413 llvm::Type *Tys[] = { ResType, Builder.getInt8PtrTy(0) };
414 Value *F = CGM.getIntrinsic(Intrinsic::objectsize, Tys);
415 return RValue::get(Builder.CreateCall2(F, EmitScalarExpr(E->getArg(0)),CI));
417 case Builtin::BI__builtin_prefetch: {
418 Value *Locality, *RW, *Address = EmitScalarExpr(E->getArg(0));
419 // FIXME: Technically these constants should of type 'int', yes?
420 RW = (E->getNumArgs() > 1) ? EmitScalarExpr(E->getArg(1)) :
421 llvm::ConstantInt::get(Int32Ty, 0);
422 Locality = (E->getNumArgs() > 2) ? EmitScalarExpr(E->getArg(2)) :
423 llvm::ConstantInt::get(Int32Ty, 3);
424 Value *Data = llvm::ConstantInt::get(Int32Ty, 1);
425 Value *F = CGM.getIntrinsic(Intrinsic::prefetch);
426 return RValue::get(Builder.CreateCall4(F, Address, RW, Locality, Data));
428 case Builtin::BI__builtin_readcyclecounter: {
429 Value *F = CGM.getIntrinsic(Intrinsic::readcyclecounter);
430 return RValue::get(Builder.CreateCall(F));
432 case Builtin::BI__builtin_trap: {
433 Value *F = CGM.getIntrinsic(Intrinsic::trap);
434 return RValue::get(Builder.CreateCall(F));
436 case Builtin::BI__debugbreak: {
437 Value *F = CGM.getIntrinsic(Intrinsic::debugtrap);
438 return RValue::get(Builder.CreateCall(F));
440 case Builtin::BI__builtin_unreachable: {
441 if (SanOpts->Unreachable)
442 EmitCheck(Builder.getFalse(), "builtin_unreachable",
443 EmitCheckSourceLocation(E->getExprLoc()),
444 ArrayRef<llvm::Value *>(), CRK_Unrecoverable);
446 Builder.CreateUnreachable();
448 // We do need to preserve an insertion point.
449 EmitBlock(createBasicBlock("unreachable.cont"));
451 return RValue::get(0);
454 case Builtin::BI__builtin_powi:
455 case Builtin::BI__builtin_powif:
456 case Builtin::BI__builtin_powil: {
457 Value *Base = EmitScalarExpr(E->getArg(0));
458 Value *Exponent = EmitScalarExpr(E->getArg(1));
459 llvm::Type *ArgType = Base->getType();
460 Value *F = CGM.getIntrinsic(Intrinsic::powi, ArgType);
461 return RValue::get(Builder.CreateCall2(F, Base, Exponent));
464 case Builtin::BI__builtin_isgreater:
465 case Builtin::BI__builtin_isgreaterequal:
466 case Builtin::BI__builtin_isless:
467 case Builtin::BI__builtin_islessequal:
468 case Builtin::BI__builtin_islessgreater:
469 case Builtin::BI__builtin_isunordered: {
470 // Ordered comparisons: we know the arguments to these are matching scalar
471 // floating point values.
472 Value *LHS = EmitScalarExpr(E->getArg(0));
473 Value *RHS = EmitScalarExpr(E->getArg(1));
476 default: llvm_unreachable("Unknown ordered comparison");
477 case Builtin::BI__builtin_isgreater:
478 LHS = Builder.CreateFCmpOGT(LHS, RHS, "cmp");
480 case Builtin::BI__builtin_isgreaterequal:
481 LHS = Builder.CreateFCmpOGE(LHS, RHS, "cmp");
483 case Builtin::BI__builtin_isless:
484 LHS = Builder.CreateFCmpOLT(LHS, RHS, "cmp");
486 case Builtin::BI__builtin_islessequal:
487 LHS = Builder.CreateFCmpOLE(LHS, RHS, "cmp");
489 case Builtin::BI__builtin_islessgreater:
490 LHS = Builder.CreateFCmpONE(LHS, RHS, "cmp");
492 case Builtin::BI__builtin_isunordered:
493 LHS = Builder.CreateFCmpUNO(LHS, RHS, "cmp");
496 // ZExt bool to int type.
497 return RValue::get(Builder.CreateZExt(LHS, ConvertType(E->getType())));
499 case Builtin::BI__builtin_isnan: {
500 Value *V = EmitScalarExpr(E->getArg(0));
501 V = Builder.CreateFCmpUNO(V, V, "cmp");
502 return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType())));
505 case Builtin::BI__builtin_isinf: {
506 // isinf(x) --> fabs(x) == infinity
507 Value *V = EmitScalarExpr(E->getArg(0));
508 V = EmitFAbs(*this, V, E->getArg(0)->getType());
510 V = Builder.CreateFCmpOEQ(V, ConstantFP::getInfinity(V->getType()),"isinf");
511 return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType())));
514 // TODO: BI__builtin_isinf_sign
515 // isinf_sign(x) -> isinf(x) ? (signbit(x) ? -1 : 1) : 0
517 case Builtin::BI__builtin_isnormal: {
518 // isnormal(x) --> x == x && fabsf(x) < infinity && fabsf(x) >= float_min
519 Value *V = EmitScalarExpr(E->getArg(0));
520 Value *Eq = Builder.CreateFCmpOEQ(V, V, "iseq");
522 Value *Abs = EmitFAbs(*this, V, E->getArg(0)->getType());
523 Value *IsLessThanInf =
524 Builder.CreateFCmpULT(Abs, ConstantFP::getInfinity(V->getType()),"isinf");
525 APFloat Smallest = APFloat::getSmallestNormalized(
526 getContext().getFloatTypeSemantics(E->getArg(0)->getType()));
528 Builder.CreateFCmpUGE(Abs, ConstantFP::get(V->getContext(), Smallest),
530 V = Builder.CreateAnd(Eq, IsLessThanInf, "and");
531 V = Builder.CreateAnd(V, IsNormal, "and");
532 return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType())));
535 case Builtin::BI__builtin_isfinite: {
536 // isfinite(x) --> x == x && fabs(x) != infinity;
537 Value *V = EmitScalarExpr(E->getArg(0));
538 Value *Eq = Builder.CreateFCmpOEQ(V, V, "iseq");
540 Value *Abs = EmitFAbs(*this, V, E->getArg(0)->getType());
542 Builder.CreateFCmpUNE(Abs, ConstantFP::getInfinity(V->getType()),"isinf");
544 V = Builder.CreateAnd(Eq, IsNotInf, "and");
545 return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType())));
548 case Builtin::BI__builtin_fpclassify: {
549 Value *V = EmitScalarExpr(E->getArg(5));
550 llvm::Type *Ty = ConvertType(E->getArg(5)->getType());
553 BasicBlock *Begin = Builder.GetInsertBlock();
554 BasicBlock *End = createBasicBlock("fpclassify_end", this->CurFn);
555 Builder.SetInsertPoint(End);
557 Builder.CreatePHI(ConvertType(E->getArg(0)->getType()), 4,
558 "fpclassify_result");
560 // if (V==0) return FP_ZERO
561 Builder.SetInsertPoint(Begin);
562 Value *IsZero = Builder.CreateFCmpOEQ(V, Constant::getNullValue(Ty),
564 Value *ZeroLiteral = EmitScalarExpr(E->getArg(4));
565 BasicBlock *NotZero = createBasicBlock("fpclassify_not_zero", this->CurFn);
566 Builder.CreateCondBr(IsZero, End, NotZero);
567 Result->addIncoming(ZeroLiteral, Begin);
569 // if (V != V) return FP_NAN
570 Builder.SetInsertPoint(NotZero);
571 Value *IsNan = Builder.CreateFCmpUNO(V, V, "cmp");
572 Value *NanLiteral = EmitScalarExpr(E->getArg(0));
573 BasicBlock *NotNan = createBasicBlock("fpclassify_not_nan", this->CurFn);
574 Builder.CreateCondBr(IsNan, End, NotNan);
575 Result->addIncoming(NanLiteral, NotZero);
577 // if (fabs(V) == infinity) return FP_INFINITY
578 Builder.SetInsertPoint(NotNan);
579 Value *VAbs = EmitFAbs(*this, V, E->getArg(5)->getType());
581 Builder.CreateFCmpOEQ(VAbs, ConstantFP::getInfinity(V->getType()),
583 Value *InfLiteral = EmitScalarExpr(E->getArg(1));
584 BasicBlock *NotInf = createBasicBlock("fpclassify_not_inf", this->CurFn);
585 Builder.CreateCondBr(IsInf, End, NotInf);
586 Result->addIncoming(InfLiteral, NotNan);
588 // if (fabs(V) >= MIN_NORMAL) return FP_NORMAL else FP_SUBNORMAL
589 Builder.SetInsertPoint(NotInf);
590 APFloat Smallest = APFloat::getSmallestNormalized(
591 getContext().getFloatTypeSemantics(E->getArg(5)->getType()));
593 Builder.CreateFCmpUGE(VAbs, ConstantFP::get(V->getContext(), Smallest),
595 Value *NormalResult =
596 Builder.CreateSelect(IsNormal, EmitScalarExpr(E->getArg(2)),
597 EmitScalarExpr(E->getArg(3)));
598 Builder.CreateBr(End);
599 Result->addIncoming(NormalResult, NotInf);
602 Builder.SetInsertPoint(End);
603 return RValue::get(Result);
606 case Builtin::BIalloca:
607 case Builtin::BI_alloca:
608 case Builtin::BI__builtin_alloca: {
609 Value *Size = EmitScalarExpr(E->getArg(0));
610 return RValue::get(Builder.CreateAlloca(Builder.getInt8Ty(), Size));
612 case Builtin::BIbzero:
613 case Builtin::BI__builtin_bzero: {
614 std::pair<llvm::Value*, unsigned> Dest =
615 EmitPointerWithAlignment(E->getArg(0));
616 Value *SizeVal = EmitScalarExpr(E->getArg(1));
617 Builder.CreateMemSet(Dest.first, Builder.getInt8(0), SizeVal,
619 return RValue::get(Dest.first);
621 case Builtin::BImemcpy:
622 case Builtin::BI__builtin_memcpy: {
623 std::pair<llvm::Value*, unsigned> Dest =
624 EmitPointerWithAlignment(E->getArg(0));
625 std::pair<llvm::Value*, unsigned> Src =
626 EmitPointerWithAlignment(E->getArg(1));
627 Value *SizeVal = EmitScalarExpr(E->getArg(2));
628 unsigned Align = std::min(Dest.second, Src.second);
629 Builder.CreateMemCpy(Dest.first, Src.first, SizeVal, Align, false);
630 return RValue::get(Dest.first);
633 case Builtin::BI__builtin___memcpy_chk: {
634 // fold __builtin_memcpy_chk(x, y, cst1, cst2) to memcpy iff cst1<=cst2.
635 llvm::APSInt Size, DstSize;
636 if (!E->getArg(2)->EvaluateAsInt(Size, CGM.getContext()) ||
637 !E->getArg(3)->EvaluateAsInt(DstSize, CGM.getContext()))
639 if (Size.ugt(DstSize))
641 std::pair<llvm::Value*, unsigned> Dest =
642 EmitPointerWithAlignment(E->getArg(0));
643 std::pair<llvm::Value*, unsigned> Src =
644 EmitPointerWithAlignment(E->getArg(1));
645 Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size);
646 unsigned Align = std::min(Dest.second, Src.second);
647 Builder.CreateMemCpy(Dest.first, Src.first, SizeVal, Align, false);
648 return RValue::get(Dest.first);
651 case Builtin::BI__builtin_objc_memmove_collectable: {
652 Value *Address = EmitScalarExpr(E->getArg(0));
653 Value *SrcAddr = EmitScalarExpr(E->getArg(1));
654 Value *SizeVal = EmitScalarExpr(E->getArg(2));
655 CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this,
656 Address, SrcAddr, SizeVal);
657 return RValue::get(Address);
660 case Builtin::BI__builtin___memmove_chk: {
661 // fold __builtin_memmove_chk(x, y, cst1, cst2) to memmove iff cst1<=cst2.
662 llvm::APSInt Size, DstSize;
663 if (!E->getArg(2)->EvaluateAsInt(Size, CGM.getContext()) ||
664 !E->getArg(3)->EvaluateAsInt(DstSize, CGM.getContext()))
666 if (Size.ugt(DstSize))
668 std::pair<llvm::Value*, unsigned> Dest =
669 EmitPointerWithAlignment(E->getArg(0));
670 std::pair<llvm::Value*, unsigned> Src =
671 EmitPointerWithAlignment(E->getArg(1));
672 Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size);
673 unsigned Align = std::min(Dest.second, Src.second);
674 Builder.CreateMemMove(Dest.first, Src.first, SizeVal, Align, false);
675 return RValue::get(Dest.first);
678 case Builtin::BImemmove:
679 case Builtin::BI__builtin_memmove: {
680 std::pair<llvm::Value*, unsigned> Dest =
681 EmitPointerWithAlignment(E->getArg(0));
682 std::pair<llvm::Value*, unsigned> Src =
683 EmitPointerWithAlignment(E->getArg(1));
684 Value *SizeVal = EmitScalarExpr(E->getArg(2));
685 unsigned Align = std::min(Dest.second, Src.second);
686 Builder.CreateMemMove(Dest.first, Src.first, SizeVal, Align, false);
687 return RValue::get(Dest.first);
689 case Builtin::BImemset:
690 case Builtin::BI__builtin_memset: {
691 std::pair<llvm::Value*, unsigned> Dest =
692 EmitPointerWithAlignment(E->getArg(0));
693 Value *ByteVal = Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)),
694 Builder.getInt8Ty());
695 Value *SizeVal = EmitScalarExpr(E->getArg(2));
696 Builder.CreateMemSet(Dest.first, ByteVal, SizeVal, Dest.second, false);
697 return RValue::get(Dest.first);
699 case Builtin::BI__builtin___memset_chk: {
700 // fold __builtin_memset_chk(x, y, cst1, cst2) to memset iff cst1<=cst2.
701 llvm::APSInt Size, DstSize;
702 if (!E->getArg(2)->EvaluateAsInt(Size, CGM.getContext()) ||
703 !E->getArg(3)->EvaluateAsInt(DstSize, CGM.getContext()))
705 if (Size.ugt(DstSize))
707 std::pair<llvm::Value*, unsigned> Dest =
708 EmitPointerWithAlignment(E->getArg(0));
709 Value *ByteVal = Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)),
710 Builder.getInt8Ty());
711 Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size);
712 Builder.CreateMemSet(Dest.first, ByteVal, SizeVal, Dest.second, false);
713 return RValue::get(Dest.first);
715 case Builtin::BI__builtin_dwarf_cfa: {
716 // The offset in bytes from the first argument to the CFA.
718 // Why on earth is this in the frontend? Is there any reason at
719 // all that the backend can't reasonably determine this while
720 // lowering llvm.eh.dwarf.cfa()?
722 // TODO: If there's a satisfactory reason, add a target hook for
723 // this instead of hard-coding 0, which is correct for most targets.
726 Value *F = CGM.getIntrinsic(Intrinsic::eh_dwarf_cfa);
727 return RValue::get(Builder.CreateCall(F,
728 llvm::ConstantInt::get(Int32Ty, Offset)));
730 case Builtin::BI__builtin_return_address: {
731 Value *Depth = EmitScalarExpr(E->getArg(0));
732 Depth = Builder.CreateIntCast(Depth, Int32Ty, false);
733 Value *F = CGM.getIntrinsic(Intrinsic::returnaddress);
734 return RValue::get(Builder.CreateCall(F, Depth));
736 case Builtin::BI__builtin_frame_address: {
737 Value *Depth = EmitScalarExpr(E->getArg(0));
738 Depth = Builder.CreateIntCast(Depth, Int32Ty, false);
739 Value *F = CGM.getIntrinsic(Intrinsic::frameaddress);
740 return RValue::get(Builder.CreateCall(F, Depth));
742 case Builtin::BI__builtin_extract_return_addr: {
743 Value *Address = EmitScalarExpr(E->getArg(0));
744 Value *Result = getTargetHooks().decodeReturnAddress(*this, Address);
745 return RValue::get(Result);
747 case Builtin::BI__builtin_frob_return_addr: {
748 Value *Address = EmitScalarExpr(E->getArg(0));
749 Value *Result = getTargetHooks().encodeReturnAddress(*this, Address);
750 return RValue::get(Result);
752 case Builtin::BI__builtin_dwarf_sp_column: {
753 llvm::IntegerType *Ty
754 = cast<llvm::IntegerType>(ConvertType(E->getType()));
755 int Column = getTargetHooks().getDwarfEHStackPointer(CGM);
757 CGM.ErrorUnsupported(E, "__builtin_dwarf_sp_column");
758 return RValue::get(llvm::UndefValue::get(Ty));
760 return RValue::get(llvm::ConstantInt::get(Ty, Column, true));
762 case Builtin::BI__builtin_init_dwarf_reg_size_table: {
763 Value *Address = EmitScalarExpr(E->getArg(0));
764 if (getTargetHooks().initDwarfEHRegSizeTable(*this, Address))
765 CGM.ErrorUnsupported(E, "__builtin_init_dwarf_reg_size_table");
766 return RValue::get(llvm::UndefValue::get(ConvertType(E->getType())));
768 case Builtin::BI__builtin_eh_return: {
769 Value *Int = EmitScalarExpr(E->getArg(0));
770 Value *Ptr = EmitScalarExpr(E->getArg(1));
772 llvm::IntegerType *IntTy = cast<llvm::IntegerType>(Int->getType());
773 assert((IntTy->getBitWidth() == 32 || IntTy->getBitWidth() == 64) &&
774 "LLVM's __builtin_eh_return only supports 32- and 64-bit variants");
775 Value *F = CGM.getIntrinsic(IntTy->getBitWidth() == 32
776 ? Intrinsic::eh_return_i32
777 : Intrinsic::eh_return_i64);
778 Builder.CreateCall2(F, Int, Ptr);
779 Builder.CreateUnreachable();
781 // We do need to preserve an insertion point.
782 EmitBlock(createBasicBlock("builtin_eh_return.cont"));
784 return RValue::get(0);
786 case Builtin::BI__builtin_unwind_init: {
787 Value *F = CGM.getIntrinsic(Intrinsic::eh_unwind_init);
788 return RValue::get(Builder.CreateCall(F));
790 case Builtin::BI__builtin_extend_pointer: {
791 // Extends a pointer to the size of an _Unwind_Word, which is
792 // uint64_t on all platforms. Generally this gets poked into a
793 // register and eventually used as an address, so if the
794 // addressing registers are wider than pointers and the platform
795 // doesn't implicitly ignore high-order bits when doing
796 // addressing, we need to make sure we zext / sext based on
797 // the platform's expectations.
799 // See: http://gcc.gnu.org/ml/gcc-bugs/2002-02/msg00237.html
801 // Cast the pointer to intptr_t.
802 Value *Ptr = EmitScalarExpr(E->getArg(0));
803 Value *Result = Builder.CreatePtrToInt(Ptr, IntPtrTy, "extend.cast");
805 // If that's 64 bits, we're done.
806 if (IntPtrTy->getBitWidth() == 64)
807 return RValue::get(Result);
809 // Otherwise, ask the codegen data what to do.
810 if (getTargetHooks().extendPointerWithSExt())
811 return RValue::get(Builder.CreateSExt(Result, Int64Ty, "extend.sext"));
813 return RValue::get(Builder.CreateZExt(Result, Int64Ty, "extend.zext"));
815 case Builtin::BI__builtin_setjmp: {
816 // Buffer is a void**.
817 Value *Buf = EmitScalarExpr(E->getArg(0));
819 // Store the frame pointer to the setjmp buffer.
821 Builder.CreateCall(CGM.getIntrinsic(Intrinsic::frameaddress),
822 ConstantInt::get(Int32Ty, 0));
823 Builder.CreateStore(FrameAddr, Buf);
825 // Store the stack pointer to the setjmp buffer.
827 Builder.CreateCall(CGM.getIntrinsic(Intrinsic::stacksave));
828 Value *StackSaveSlot =
829 Builder.CreateGEP(Buf, ConstantInt::get(Int32Ty, 2));
830 Builder.CreateStore(StackAddr, StackSaveSlot);
832 // Call LLVM's EH setjmp, which is lightweight.
833 Value *F = CGM.getIntrinsic(Intrinsic::eh_sjlj_setjmp);
834 Buf = Builder.CreateBitCast(Buf, Int8PtrTy);
835 return RValue::get(Builder.CreateCall(F, Buf));
837 case Builtin::BI__builtin_longjmp: {
838 Value *Buf = EmitScalarExpr(E->getArg(0));
839 Buf = Builder.CreateBitCast(Buf, Int8PtrTy);
841 // Call LLVM's EH longjmp, which is lightweight.
842 Builder.CreateCall(CGM.getIntrinsic(Intrinsic::eh_sjlj_longjmp), Buf);
844 // longjmp doesn't return; mark this as unreachable.
845 Builder.CreateUnreachable();
847 // We do need to preserve an insertion point.
848 EmitBlock(createBasicBlock("longjmp.cont"));
850 return RValue::get(0);
852 case Builtin::BI__sync_fetch_and_add:
853 case Builtin::BI__sync_fetch_and_sub:
854 case Builtin::BI__sync_fetch_and_or:
855 case Builtin::BI__sync_fetch_and_and:
856 case Builtin::BI__sync_fetch_and_xor:
857 case Builtin::BI__sync_add_and_fetch:
858 case Builtin::BI__sync_sub_and_fetch:
859 case Builtin::BI__sync_and_and_fetch:
860 case Builtin::BI__sync_or_and_fetch:
861 case Builtin::BI__sync_xor_and_fetch:
862 case Builtin::BI__sync_val_compare_and_swap:
863 case Builtin::BI__sync_bool_compare_and_swap:
864 case Builtin::BI__sync_lock_test_and_set:
865 case Builtin::BI__sync_lock_release:
866 case Builtin::BI__sync_swap:
867 llvm_unreachable("Shouldn't make it through sema");
868 case Builtin::BI__sync_fetch_and_add_1:
869 case Builtin::BI__sync_fetch_and_add_2:
870 case Builtin::BI__sync_fetch_and_add_4:
871 case Builtin::BI__sync_fetch_and_add_8:
872 case Builtin::BI__sync_fetch_and_add_16:
873 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Add, E);
874 case Builtin::BI__sync_fetch_and_sub_1:
875 case Builtin::BI__sync_fetch_and_sub_2:
876 case Builtin::BI__sync_fetch_and_sub_4:
877 case Builtin::BI__sync_fetch_and_sub_8:
878 case Builtin::BI__sync_fetch_and_sub_16:
879 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Sub, E);
880 case Builtin::BI__sync_fetch_and_or_1:
881 case Builtin::BI__sync_fetch_and_or_2:
882 case Builtin::BI__sync_fetch_and_or_4:
883 case Builtin::BI__sync_fetch_and_or_8:
884 case Builtin::BI__sync_fetch_and_or_16:
885 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Or, E);
886 case Builtin::BI__sync_fetch_and_and_1:
887 case Builtin::BI__sync_fetch_and_and_2:
888 case Builtin::BI__sync_fetch_and_and_4:
889 case Builtin::BI__sync_fetch_and_and_8:
890 case Builtin::BI__sync_fetch_and_and_16:
891 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::And, E);
892 case Builtin::BI__sync_fetch_and_xor_1:
893 case Builtin::BI__sync_fetch_and_xor_2:
894 case Builtin::BI__sync_fetch_and_xor_4:
895 case Builtin::BI__sync_fetch_and_xor_8:
896 case Builtin::BI__sync_fetch_and_xor_16:
897 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xor, E);
899 // Clang extensions: not overloaded yet.
900 case Builtin::BI__sync_fetch_and_min:
901 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Min, E);
902 case Builtin::BI__sync_fetch_and_max:
903 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Max, E);
904 case Builtin::BI__sync_fetch_and_umin:
905 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::UMin, E);
906 case Builtin::BI__sync_fetch_and_umax:
907 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::UMax, E);
909 case Builtin::BI__sync_add_and_fetch_1:
910 case Builtin::BI__sync_add_and_fetch_2:
911 case Builtin::BI__sync_add_and_fetch_4:
912 case Builtin::BI__sync_add_and_fetch_8:
913 case Builtin::BI__sync_add_and_fetch_16:
914 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Add, E,
915 llvm::Instruction::Add);
916 case Builtin::BI__sync_sub_and_fetch_1:
917 case Builtin::BI__sync_sub_and_fetch_2:
918 case Builtin::BI__sync_sub_and_fetch_4:
919 case Builtin::BI__sync_sub_and_fetch_8:
920 case Builtin::BI__sync_sub_and_fetch_16:
921 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Sub, E,
922 llvm::Instruction::Sub);
923 case Builtin::BI__sync_and_and_fetch_1:
924 case Builtin::BI__sync_and_and_fetch_2:
925 case Builtin::BI__sync_and_and_fetch_4:
926 case Builtin::BI__sync_and_and_fetch_8:
927 case Builtin::BI__sync_and_and_fetch_16:
928 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::And, E,
929 llvm::Instruction::And);
930 case Builtin::BI__sync_or_and_fetch_1:
931 case Builtin::BI__sync_or_and_fetch_2:
932 case Builtin::BI__sync_or_and_fetch_4:
933 case Builtin::BI__sync_or_and_fetch_8:
934 case Builtin::BI__sync_or_and_fetch_16:
935 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Or, E,
936 llvm::Instruction::Or);
937 case Builtin::BI__sync_xor_and_fetch_1:
938 case Builtin::BI__sync_xor_and_fetch_2:
939 case Builtin::BI__sync_xor_and_fetch_4:
940 case Builtin::BI__sync_xor_and_fetch_8:
941 case Builtin::BI__sync_xor_and_fetch_16:
942 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Xor, E,
943 llvm::Instruction::Xor);
945 case Builtin::BI__sync_val_compare_and_swap_1:
946 case Builtin::BI__sync_val_compare_and_swap_2:
947 case Builtin::BI__sync_val_compare_and_swap_4:
948 case Builtin::BI__sync_val_compare_and_swap_8:
949 case Builtin::BI__sync_val_compare_and_swap_16: {
950 QualType T = E->getType();
951 llvm::Value *DestPtr = EmitScalarExpr(E->getArg(0));
952 unsigned AddrSpace = DestPtr->getType()->getPointerAddressSpace();
954 llvm::IntegerType *IntType =
955 llvm::IntegerType::get(getLLVMContext(),
956 getContext().getTypeSize(T));
957 llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace);
960 Args[0] = Builder.CreateBitCast(DestPtr, IntPtrType);
961 Args[1] = EmitScalarExpr(E->getArg(1));
962 llvm::Type *ValueType = Args[1]->getType();
963 Args[1] = EmitToInt(*this, Args[1], T, IntType);
964 Args[2] = EmitToInt(*this, EmitScalarExpr(E->getArg(2)), T, IntType);
966 Value *Result = Builder.CreateAtomicCmpXchg(Args[0], Args[1], Args[2],
967 llvm::SequentiallyConsistent);
968 Result = EmitFromInt(*this, Result, T, ValueType);
969 return RValue::get(Result);
972 case Builtin::BI__sync_bool_compare_and_swap_1:
973 case Builtin::BI__sync_bool_compare_and_swap_2:
974 case Builtin::BI__sync_bool_compare_and_swap_4:
975 case Builtin::BI__sync_bool_compare_and_swap_8:
976 case Builtin::BI__sync_bool_compare_and_swap_16: {
977 QualType T = E->getArg(1)->getType();
978 llvm::Value *DestPtr = EmitScalarExpr(E->getArg(0));
979 unsigned AddrSpace = DestPtr->getType()->getPointerAddressSpace();
981 llvm::IntegerType *IntType =
982 llvm::IntegerType::get(getLLVMContext(),
983 getContext().getTypeSize(T));
984 llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace);
987 Args[0] = Builder.CreateBitCast(DestPtr, IntPtrType);
988 Args[1] = EmitToInt(*this, EmitScalarExpr(E->getArg(1)), T, IntType);
989 Args[2] = EmitToInt(*this, EmitScalarExpr(E->getArg(2)), T, IntType);
991 Value *OldVal = Args[1];
992 Value *PrevVal = Builder.CreateAtomicCmpXchg(Args[0], Args[1], Args[2],
993 llvm::SequentiallyConsistent);
994 Value *Result = Builder.CreateICmpEQ(PrevVal, OldVal);
996 Result = Builder.CreateZExt(Result, ConvertType(E->getType()));
997 return RValue::get(Result);
1000 case Builtin::BI__sync_swap_1:
1001 case Builtin::BI__sync_swap_2:
1002 case Builtin::BI__sync_swap_4:
1003 case Builtin::BI__sync_swap_8:
1004 case Builtin::BI__sync_swap_16:
1005 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xchg, E);
1007 case Builtin::BI__sync_lock_test_and_set_1:
1008 case Builtin::BI__sync_lock_test_and_set_2:
1009 case Builtin::BI__sync_lock_test_and_set_4:
1010 case Builtin::BI__sync_lock_test_and_set_8:
1011 case Builtin::BI__sync_lock_test_and_set_16:
1012 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xchg, E);
1014 case Builtin::BI__sync_lock_release_1:
1015 case Builtin::BI__sync_lock_release_2:
1016 case Builtin::BI__sync_lock_release_4:
1017 case Builtin::BI__sync_lock_release_8:
1018 case Builtin::BI__sync_lock_release_16: {
1019 Value *Ptr = EmitScalarExpr(E->getArg(0));
1020 QualType ElTy = E->getArg(0)->getType()->getPointeeType();
1021 CharUnits StoreSize = getContext().getTypeSizeInChars(ElTy);
1022 llvm::Type *ITy = llvm::IntegerType::get(getLLVMContext(),
1023 StoreSize.getQuantity() * 8);
1024 Ptr = Builder.CreateBitCast(Ptr, ITy->getPointerTo());
1025 llvm::StoreInst *Store =
1026 Builder.CreateStore(llvm::Constant::getNullValue(ITy), Ptr);
1027 Store->setAlignment(StoreSize.getQuantity());
1028 Store->setAtomic(llvm::Release);
1029 return RValue::get(0);
1032 case Builtin::BI__sync_synchronize: {
1033 // We assume this is supposed to correspond to a C++0x-style
1034 // sequentially-consistent fence (i.e. this is only usable for
1035 // synchonization, not device I/O or anything like that). This intrinsic
1036 // is really badly designed in the sense that in theory, there isn't
1037 // any way to safely use it... but in practice, it mostly works
1038 // to use it with non-atomic loads and stores to get acquire/release
1040 Builder.CreateFence(llvm::SequentiallyConsistent);
1041 return RValue::get(0);
1044 case Builtin::BI__c11_atomic_is_lock_free:
1045 case Builtin::BI__atomic_is_lock_free: {
1046 // Call "bool __atomic_is_lock_free(size_t size, void *ptr)". For the
1047 // __c11 builtin, ptr is 0 (indicating a properly-aligned object), since
1048 // _Atomic(T) is always properly-aligned.
1049 const char *LibCallName = "__atomic_is_lock_free";
1051 Args.add(RValue::get(EmitScalarExpr(E->getArg(0))),
1052 getContext().getSizeType());
1053 if (BuiltinID == Builtin::BI__atomic_is_lock_free)
1054 Args.add(RValue::get(EmitScalarExpr(E->getArg(1))),
1055 getContext().VoidPtrTy);
1057 Args.add(RValue::get(llvm::Constant::getNullValue(VoidPtrTy)),
1058 getContext().VoidPtrTy);
1059 const CGFunctionInfo &FuncInfo =
1060 CGM.getTypes().arrangeFreeFunctionCall(E->getType(), Args,
1061 FunctionType::ExtInfo(),
1063 llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FuncInfo);
1064 llvm::Constant *Func = CGM.CreateRuntimeFunction(FTy, LibCallName);
1065 return EmitCall(FuncInfo, Func, ReturnValueSlot(), Args);
1068 case Builtin::BI__atomic_test_and_set: {
1069 // Look at the argument type to determine whether this is a volatile
1070 // operation. The parameter type is always volatile.
1071 QualType PtrTy = E->getArg(0)->IgnoreImpCasts()->getType();
1073 PtrTy->castAs<PointerType>()->getPointeeType().isVolatileQualified();
1075 Value *Ptr = EmitScalarExpr(E->getArg(0));
1076 unsigned AddrSpace = Ptr->getType()->getPointerAddressSpace();
1077 Ptr = Builder.CreateBitCast(Ptr, Int8Ty->getPointerTo(AddrSpace));
1078 Value *NewVal = Builder.getInt8(1);
1079 Value *Order = EmitScalarExpr(E->getArg(1));
1080 if (isa<llvm::ConstantInt>(Order)) {
1081 int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
1082 AtomicRMWInst *Result = 0;
1084 case 0: // memory_order_relaxed
1085 default: // invalid order
1086 Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg,
1090 case 1: // memory_order_consume
1091 case 2: // memory_order_acquire
1092 Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg,
1096 case 3: // memory_order_release
1097 Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg,
1101 case 4: // memory_order_acq_rel
1102 Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg,
1104 llvm::AcquireRelease);
1106 case 5: // memory_order_seq_cst
1107 Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg,
1109 llvm::SequentiallyConsistent);
1112 Result->setVolatile(Volatile);
1113 return RValue::get(Builder.CreateIsNotNull(Result, "tobool"));
1116 llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
1118 llvm::BasicBlock *BBs[5] = {
1119 createBasicBlock("monotonic", CurFn),
1120 createBasicBlock("acquire", CurFn),
1121 createBasicBlock("release", CurFn),
1122 createBasicBlock("acqrel", CurFn),
1123 createBasicBlock("seqcst", CurFn)
1125 llvm::AtomicOrdering Orders[5] = {
1126 llvm::Monotonic, llvm::Acquire, llvm::Release,
1127 llvm::AcquireRelease, llvm::SequentiallyConsistent
1130 Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
1131 llvm::SwitchInst *SI = Builder.CreateSwitch(Order, BBs[0]);
1133 Builder.SetInsertPoint(ContBB);
1134 PHINode *Result = Builder.CreatePHI(Int8Ty, 5, "was_set");
1136 for (unsigned i = 0; i < 5; ++i) {
1137 Builder.SetInsertPoint(BBs[i]);
1138 AtomicRMWInst *RMW = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg,
1139 Ptr, NewVal, Orders[i]);
1140 RMW->setVolatile(Volatile);
1141 Result->addIncoming(RMW, BBs[i]);
1142 Builder.CreateBr(ContBB);
1145 SI->addCase(Builder.getInt32(0), BBs[0]);
1146 SI->addCase(Builder.getInt32(1), BBs[1]);
1147 SI->addCase(Builder.getInt32(2), BBs[1]);
1148 SI->addCase(Builder.getInt32(3), BBs[2]);
1149 SI->addCase(Builder.getInt32(4), BBs[3]);
1150 SI->addCase(Builder.getInt32(5), BBs[4]);
1152 Builder.SetInsertPoint(ContBB);
1153 return RValue::get(Builder.CreateIsNotNull(Result, "tobool"));
1156 case Builtin::BI__atomic_clear: {
1157 QualType PtrTy = E->getArg(0)->IgnoreImpCasts()->getType();
1159 PtrTy->castAs<PointerType>()->getPointeeType().isVolatileQualified();
1161 Value *Ptr = EmitScalarExpr(E->getArg(0));
1162 unsigned AddrSpace = Ptr->getType()->getPointerAddressSpace();
1163 Ptr = Builder.CreateBitCast(Ptr, Int8Ty->getPointerTo(AddrSpace));
1164 Value *NewVal = Builder.getInt8(0);
1165 Value *Order = EmitScalarExpr(E->getArg(1));
1166 if (isa<llvm::ConstantInt>(Order)) {
1167 int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
1168 StoreInst *Store = Builder.CreateStore(NewVal, Ptr, Volatile);
1169 Store->setAlignment(1);
1171 case 0: // memory_order_relaxed
1172 default: // invalid order
1173 Store->setOrdering(llvm::Monotonic);
1175 case 3: // memory_order_release
1176 Store->setOrdering(llvm::Release);
1178 case 5: // memory_order_seq_cst
1179 Store->setOrdering(llvm::SequentiallyConsistent);
1182 return RValue::get(0);
1185 llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
1187 llvm::BasicBlock *BBs[3] = {
1188 createBasicBlock("monotonic", CurFn),
1189 createBasicBlock("release", CurFn),
1190 createBasicBlock("seqcst", CurFn)
1192 llvm::AtomicOrdering Orders[3] = {
1193 llvm::Monotonic, llvm::Release, llvm::SequentiallyConsistent
1196 Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
1197 llvm::SwitchInst *SI = Builder.CreateSwitch(Order, BBs[0]);
1199 for (unsigned i = 0; i < 3; ++i) {
1200 Builder.SetInsertPoint(BBs[i]);
1201 StoreInst *Store = Builder.CreateStore(NewVal, Ptr, Volatile);
1202 Store->setAlignment(1);
1203 Store->setOrdering(Orders[i]);
1204 Builder.CreateBr(ContBB);
1207 SI->addCase(Builder.getInt32(0), BBs[0]);
1208 SI->addCase(Builder.getInt32(3), BBs[1]);
1209 SI->addCase(Builder.getInt32(5), BBs[2]);
1211 Builder.SetInsertPoint(ContBB);
1212 return RValue::get(0);
1215 case Builtin::BI__atomic_thread_fence:
1216 case Builtin::BI__atomic_signal_fence:
1217 case Builtin::BI__c11_atomic_thread_fence:
1218 case Builtin::BI__c11_atomic_signal_fence: {
1219 llvm::SynchronizationScope Scope;
1220 if (BuiltinID == Builtin::BI__atomic_signal_fence ||
1221 BuiltinID == Builtin::BI__c11_atomic_signal_fence)
1222 Scope = llvm::SingleThread;
1224 Scope = llvm::CrossThread;
1225 Value *Order = EmitScalarExpr(E->getArg(0));
1226 if (isa<llvm::ConstantInt>(Order)) {
1227 int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
1229 case 0: // memory_order_relaxed
1230 default: // invalid order
1232 case 1: // memory_order_consume
1233 case 2: // memory_order_acquire
1234 Builder.CreateFence(llvm::Acquire, Scope);
1236 case 3: // memory_order_release
1237 Builder.CreateFence(llvm::Release, Scope);
1239 case 4: // memory_order_acq_rel
1240 Builder.CreateFence(llvm::AcquireRelease, Scope);
1242 case 5: // memory_order_seq_cst
1243 Builder.CreateFence(llvm::SequentiallyConsistent, Scope);
1246 return RValue::get(0);
1249 llvm::BasicBlock *AcquireBB, *ReleaseBB, *AcqRelBB, *SeqCstBB;
1250 AcquireBB = createBasicBlock("acquire", CurFn);
1251 ReleaseBB = createBasicBlock("release", CurFn);
1252 AcqRelBB = createBasicBlock("acqrel", CurFn);
1253 SeqCstBB = createBasicBlock("seqcst", CurFn);
1254 llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
1256 Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
1257 llvm::SwitchInst *SI = Builder.CreateSwitch(Order, ContBB);
1259 Builder.SetInsertPoint(AcquireBB);
1260 Builder.CreateFence(llvm::Acquire, Scope);
1261 Builder.CreateBr(ContBB);
1262 SI->addCase(Builder.getInt32(1), AcquireBB);
1263 SI->addCase(Builder.getInt32(2), AcquireBB);
1265 Builder.SetInsertPoint(ReleaseBB);
1266 Builder.CreateFence(llvm::Release, Scope);
1267 Builder.CreateBr(ContBB);
1268 SI->addCase(Builder.getInt32(3), ReleaseBB);
1270 Builder.SetInsertPoint(AcqRelBB);
1271 Builder.CreateFence(llvm::AcquireRelease, Scope);
1272 Builder.CreateBr(ContBB);
1273 SI->addCase(Builder.getInt32(4), AcqRelBB);
1275 Builder.SetInsertPoint(SeqCstBB);
1276 Builder.CreateFence(llvm::SequentiallyConsistent, Scope);
1277 Builder.CreateBr(ContBB);
1278 SI->addCase(Builder.getInt32(5), SeqCstBB);
1280 Builder.SetInsertPoint(ContBB);
1281 return RValue::get(0);
1284 // Library functions with special handling.
1285 case Builtin::BIsqrt:
1286 case Builtin::BIsqrtf:
1287 case Builtin::BIsqrtl: {
1288 // Transform a call to sqrt* into a @llvm.sqrt.* intrinsic call, but only
1289 // in finite- or unsafe-math mode (the intrinsic has different semantics
1290 // for handling negative numbers compared to the library function, so
1291 // -fmath-errno=0 is not enough).
1292 if (!FD->hasAttr<ConstAttr>())
1294 if (!(CGM.getCodeGenOpts().UnsafeFPMath ||
1295 CGM.getCodeGenOpts().NoNaNsFPMath))
1297 Value *Arg0 = EmitScalarExpr(E->getArg(0));
1298 llvm::Type *ArgType = Arg0->getType();
1299 Value *F = CGM.getIntrinsic(Intrinsic::sqrt, ArgType);
1300 return RValue::get(Builder.CreateCall(F, Arg0));
1303 case Builtin::BIpow:
1304 case Builtin::BIpowf:
1305 case Builtin::BIpowl: {
1306 // Transform a call to pow* into a @llvm.pow.* intrinsic call.
1307 if (!FD->hasAttr<ConstAttr>())
1309 Value *Base = EmitScalarExpr(E->getArg(0));
1310 Value *Exponent = EmitScalarExpr(E->getArg(1));
1311 llvm::Type *ArgType = Base->getType();
1312 Value *F = CGM.getIntrinsic(Intrinsic::pow, ArgType);
1313 return RValue::get(Builder.CreateCall2(F, Base, Exponent));
1317 case Builtin::BIfma:
1318 case Builtin::BIfmaf:
1319 case Builtin::BIfmal:
1320 case Builtin::BI__builtin_fma:
1321 case Builtin::BI__builtin_fmaf:
1322 case Builtin::BI__builtin_fmal: {
1323 // Rewrite fma to intrinsic.
1324 Value *FirstArg = EmitScalarExpr(E->getArg(0));
1325 llvm::Type *ArgType = FirstArg->getType();
1326 Value *F = CGM.getIntrinsic(Intrinsic::fma, ArgType);
1327 return RValue::get(Builder.CreateCall3(F, FirstArg,
1328 EmitScalarExpr(E->getArg(1)),
1329 EmitScalarExpr(E->getArg(2))));
1332 case Builtin::BI__builtin_signbit:
1333 case Builtin::BI__builtin_signbitf:
1334 case Builtin::BI__builtin_signbitl: {
1335 LLVMContext &C = CGM.getLLVMContext();
1337 Value *Arg = EmitScalarExpr(E->getArg(0));
1338 llvm::Type *ArgTy = Arg->getType();
1339 if (ArgTy->isPPC_FP128Ty())
1340 break; // FIXME: I'm not sure what the right implementation is here.
1341 int ArgWidth = ArgTy->getPrimitiveSizeInBits();
1342 llvm::Type *ArgIntTy = llvm::IntegerType::get(C, ArgWidth);
1343 Value *BCArg = Builder.CreateBitCast(Arg, ArgIntTy);
1344 Value *ZeroCmp = llvm::Constant::getNullValue(ArgIntTy);
1345 Value *Result = Builder.CreateICmpSLT(BCArg, ZeroCmp);
1346 return RValue::get(Builder.CreateZExt(Result, ConvertType(E->getType())));
1348 case Builtin::BI__builtin_annotation: {
1349 llvm::Value *AnnVal = EmitScalarExpr(E->getArg(0));
1350 llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::annotation,
1353 // Get the annotation string, go through casts. Sema requires this to be a
1354 // non-wide string literal, potentially casted, so the cast<> is safe.
1355 const Expr *AnnotationStrExpr = E->getArg(1)->IgnoreParenCasts();
1356 StringRef Str = cast<StringLiteral>(AnnotationStrExpr)->getString();
1357 return RValue::get(EmitAnnotationCall(F, AnnVal, Str, E->getExprLoc()));
1359 case Builtin::BI__builtin_addcb:
1360 case Builtin::BI__builtin_addcs:
1361 case Builtin::BI__builtin_addc:
1362 case Builtin::BI__builtin_addcl:
1363 case Builtin::BI__builtin_addcll:
1364 case Builtin::BI__builtin_subcb:
1365 case Builtin::BI__builtin_subcs:
1366 case Builtin::BI__builtin_subc:
1367 case Builtin::BI__builtin_subcl:
1368 case Builtin::BI__builtin_subcll: {
1370 // We translate all of these builtins from expressions of the form:
1371 // int x = ..., y = ..., carryin = ..., carryout, result;
1372 // result = __builtin_addc(x, y, carryin, &carryout);
1374 // to LLVM IR of the form:
1376 // %tmp1 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %x, i32 %y)
1377 // %tmpsum1 = extractvalue {i32, i1} %tmp1, 0
1378 // %carry1 = extractvalue {i32, i1} %tmp1, 1
1379 // %tmp2 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %tmpsum1,
1381 // %result = extractvalue {i32, i1} %tmp2, 0
1382 // %carry2 = extractvalue {i32, i1} %tmp2, 1
1383 // %tmp3 = or i1 %carry1, %carry2
1384 // %tmp4 = zext i1 %tmp3 to i32
1385 // store i32 %tmp4, i32* %carryout
1387 // Scalarize our inputs.
1388 llvm::Value *X = EmitScalarExpr(E->getArg(0));
1389 llvm::Value *Y = EmitScalarExpr(E->getArg(1));
1390 llvm::Value *Carryin = EmitScalarExpr(E->getArg(2));
1391 std::pair<llvm::Value*, unsigned> CarryOutPtr =
1392 EmitPointerWithAlignment(E->getArg(3));
1394 // Decide if we are lowering to a uadd.with.overflow or usub.with.overflow.
1395 llvm::Intrinsic::ID IntrinsicId;
1396 switch (BuiltinID) {
1397 default: llvm_unreachable("Unknown multiprecision builtin id.");
1398 case Builtin::BI__builtin_addcb:
1399 case Builtin::BI__builtin_addcs:
1400 case Builtin::BI__builtin_addc:
1401 case Builtin::BI__builtin_addcl:
1402 case Builtin::BI__builtin_addcll:
1403 IntrinsicId = llvm::Intrinsic::uadd_with_overflow;
1405 case Builtin::BI__builtin_subcb:
1406 case Builtin::BI__builtin_subcs:
1407 case Builtin::BI__builtin_subc:
1408 case Builtin::BI__builtin_subcl:
1409 case Builtin::BI__builtin_subcll:
1410 IntrinsicId = llvm::Intrinsic::usub_with_overflow;
1414 // Construct our resulting LLVM IR expression.
1415 llvm::Value *Carry1;
1416 llvm::Value *Sum1 = EmitOverflowIntrinsic(*this, IntrinsicId,
1418 llvm::Value *Carry2;
1419 llvm::Value *Sum2 = EmitOverflowIntrinsic(*this, IntrinsicId,
1420 Sum1, Carryin, Carry2);
1421 llvm::Value *CarryOut = Builder.CreateZExt(Builder.CreateOr(Carry1, Carry2),
1423 llvm::StoreInst *CarryOutStore = Builder.CreateStore(CarryOut,
1425 CarryOutStore->setAlignment(CarryOutPtr.second);
1426 return RValue::get(Sum2);
1428 case Builtin::BI__builtin_uadd_overflow:
1429 case Builtin::BI__builtin_uaddl_overflow:
1430 case Builtin::BI__builtin_uaddll_overflow:
1431 case Builtin::BI__builtin_usub_overflow:
1432 case Builtin::BI__builtin_usubl_overflow:
1433 case Builtin::BI__builtin_usubll_overflow:
1434 case Builtin::BI__builtin_umul_overflow:
1435 case Builtin::BI__builtin_umull_overflow:
1436 case Builtin::BI__builtin_umulll_overflow:
1437 case Builtin::BI__builtin_sadd_overflow:
1438 case Builtin::BI__builtin_saddl_overflow:
1439 case Builtin::BI__builtin_saddll_overflow:
1440 case Builtin::BI__builtin_ssub_overflow:
1441 case Builtin::BI__builtin_ssubl_overflow:
1442 case Builtin::BI__builtin_ssubll_overflow:
1443 case Builtin::BI__builtin_smul_overflow:
1444 case Builtin::BI__builtin_smull_overflow:
1445 case Builtin::BI__builtin_smulll_overflow: {
1447 // We translate all of these builtins directly to the relevant llvm IR node.
1449 // Scalarize our inputs.
1450 llvm::Value *X = EmitScalarExpr(E->getArg(0));
1451 llvm::Value *Y = EmitScalarExpr(E->getArg(1));
1452 std::pair<llvm::Value *, unsigned> SumOutPtr =
1453 EmitPointerWithAlignment(E->getArg(2));
1455 // Decide which of the overflow intrinsics we are lowering to:
1456 llvm::Intrinsic::ID IntrinsicId;
1457 switch (BuiltinID) {
1458 default: llvm_unreachable("Unknown security overflow builtin id.");
1459 case Builtin::BI__builtin_uadd_overflow:
1460 case Builtin::BI__builtin_uaddl_overflow:
1461 case Builtin::BI__builtin_uaddll_overflow:
1462 IntrinsicId = llvm::Intrinsic::uadd_with_overflow;
1464 case Builtin::BI__builtin_usub_overflow:
1465 case Builtin::BI__builtin_usubl_overflow:
1466 case Builtin::BI__builtin_usubll_overflow:
1467 IntrinsicId = llvm::Intrinsic::usub_with_overflow;
1469 case Builtin::BI__builtin_umul_overflow:
1470 case Builtin::BI__builtin_umull_overflow:
1471 case Builtin::BI__builtin_umulll_overflow:
1472 IntrinsicId = llvm::Intrinsic::umul_with_overflow;
1474 case Builtin::BI__builtin_sadd_overflow:
1475 case Builtin::BI__builtin_saddl_overflow:
1476 case Builtin::BI__builtin_saddll_overflow:
1477 IntrinsicId = llvm::Intrinsic::sadd_with_overflow;
1479 case Builtin::BI__builtin_ssub_overflow:
1480 case Builtin::BI__builtin_ssubl_overflow:
1481 case Builtin::BI__builtin_ssubll_overflow:
1482 IntrinsicId = llvm::Intrinsic::ssub_with_overflow;
1484 case Builtin::BI__builtin_smul_overflow:
1485 case Builtin::BI__builtin_smull_overflow:
1486 case Builtin::BI__builtin_smulll_overflow:
1487 IntrinsicId = llvm::Intrinsic::smul_with_overflow;
1493 llvm::Value *Sum = EmitOverflowIntrinsic(*this, IntrinsicId, X, Y, Carry);
1494 llvm::StoreInst *SumOutStore = Builder.CreateStore(Sum, SumOutPtr.first);
1495 SumOutStore->setAlignment(SumOutPtr.second);
1497 return RValue::get(Carry);
1499 case Builtin::BI__builtin_addressof:
1500 return RValue::get(EmitLValue(E->getArg(0)).getAddress());
1501 case Builtin::BI__noop:
1502 return RValue::get(0);
1505 // If this is an alias for a lib function (e.g. __builtin_sin), emit
1506 // the call using the normal call path, but using the unmangled
1507 // version of the function name.
1508 if (getContext().BuiltinInfo.isLibFunction(BuiltinID))
1509 return emitLibraryCall(*this, FD, E,
1510 CGM.getBuiltinLibFunction(FD, BuiltinID));
1512 // If this is a predefined lib function (e.g. malloc), emit the call
1513 // using exactly the normal call path.
1514 if (getContext().BuiltinInfo.isPredefinedLibFunction(BuiltinID))
1515 return emitLibraryCall(*this, FD, E, EmitScalarExpr(E->getCallee()));
1517 // See if we have a target specific intrinsic.
1518 const char *Name = getContext().BuiltinInfo.GetName(BuiltinID);
1519 Intrinsic::ID IntrinsicID = Intrinsic::not_intrinsic;
1520 if (const char *Prefix =
1521 llvm::Triple::getArchTypePrefix(getTarget().getTriple().getArch()))
1522 IntrinsicID = Intrinsic::getIntrinsicForGCCBuiltin(Prefix, Name);
1524 if (IntrinsicID != Intrinsic::not_intrinsic) {
1525 SmallVector<Value*, 16> Args;
1527 // Find out if any arguments are required to be integer constant
1529 unsigned ICEArguments = 0;
1530 ASTContext::GetBuiltinTypeError Error;
1531 getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments);
1532 assert(Error == ASTContext::GE_None && "Should not codegen an error");
1534 Function *F = CGM.getIntrinsic(IntrinsicID);
1535 llvm::FunctionType *FTy = F->getFunctionType();
1537 for (unsigned i = 0, e = E->getNumArgs(); i != e; ++i) {
1539 // If this is a normal argument, just emit it as a scalar.
1540 if ((ICEArguments & (1 << i)) == 0) {
1541 ArgValue = EmitScalarExpr(E->getArg(i));
1543 // If this is required to be a constant, constant fold it so that we
1544 // know that the generated intrinsic gets a ConstantInt.
1545 llvm::APSInt Result;
1546 bool IsConst = E->getArg(i)->isIntegerConstantExpr(Result,getContext());
1547 assert(IsConst && "Constant arg isn't actually constant?");
1549 ArgValue = llvm::ConstantInt::get(getLLVMContext(), Result);
1552 // If the intrinsic arg type is different from the builtin arg type
1553 // we need to do a bit cast.
1554 llvm::Type *PTy = FTy->getParamType(i);
1555 if (PTy != ArgValue->getType()) {
1556 assert(PTy->canLosslesslyBitCastTo(FTy->getParamType(i)) &&
1557 "Must be able to losslessly bit cast to param");
1558 ArgValue = Builder.CreateBitCast(ArgValue, PTy);
1561 Args.push_back(ArgValue);
1564 Value *V = Builder.CreateCall(F, Args);
1565 QualType BuiltinRetType = E->getType();
1567 llvm::Type *RetTy = VoidTy;
1568 if (!BuiltinRetType->isVoidType())
1569 RetTy = ConvertType(BuiltinRetType);
1571 if (RetTy != V->getType()) {
1572 assert(V->getType()->canLosslesslyBitCastTo(RetTy) &&
1573 "Must be able to losslessly bit cast result type");
1574 V = Builder.CreateBitCast(V, RetTy);
1577 return RValue::get(V);
1580 // See if we have a target specific builtin that needs to be lowered.
1581 if (Value *V = EmitTargetBuiltinExpr(BuiltinID, E))
1582 return RValue::get(V);
1584 ErrorUnsupported(E, "builtin function");
1586 // Unknown builtin, for now just dump it out and return undef.
1587 return GetUndefRValue(E->getType());
1590 Value *CodeGenFunction::EmitTargetBuiltinExpr(unsigned BuiltinID,
1591 const CallExpr *E) {
1592 switch (getTarget().getTriple().getArch()) {
1593 case llvm::Triple::aarch64:
1594 return EmitAArch64BuiltinExpr(BuiltinID, E);
1595 case llvm::Triple::arm:
1596 case llvm::Triple::thumb:
1597 return EmitARMBuiltinExpr(BuiltinID, E);
1598 case llvm::Triple::x86:
1599 case llvm::Triple::x86_64:
1600 return EmitX86BuiltinExpr(BuiltinID, E);
1601 case llvm::Triple::ppc:
1602 case llvm::Triple::ppc64:
1603 case llvm::Triple::ppc64le:
1604 return EmitPPCBuiltinExpr(BuiltinID, E);
1610 static llvm::VectorType *GetNeonType(CodeGenFunction *CGF,
1611 NeonTypeFlags TypeFlags,
1613 int IsQuad = TypeFlags.isQuad();
1614 switch (TypeFlags.getEltType()) {
1615 case NeonTypeFlags::Int8:
1616 case NeonTypeFlags::Poly8:
1617 return llvm::VectorType::get(CGF->Int8Ty, V1Ty ? 1 : (8 << IsQuad));
1618 case NeonTypeFlags::Int16:
1619 case NeonTypeFlags::Poly16:
1620 case NeonTypeFlags::Float16:
1621 return llvm::VectorType::get(CGF->Int16Ty, V1Ty ? 1 : (4 << IsQuad));
1622 case NeonTypeFlags::Int32:
1623 return llvm::VectorType::get(CGF->Int32Ty, V1Ty ? 1 : (2 << IsQuad));
1624 case NeonTypeFlags::Int64:
1625 case NeonTypeFlags::Poly64:
1626 return llvm::VectorType::get(CGF->Int64Ty, V1Ty ? 1 : (1 << IsQuad));
1627 case NeonTypeFlags::Float32:
1628 return llvm::VectorType::get(CGF->FloatTy, V1Ty ? 1 : (2 << IsQuad));
1629 case NeonTypeFlags::Float64:
1630 return llvm::VectorType::get(CGF->DoubleTy, V1Ty ? 1 : (1 << IsQuad));
1632 llvm_unreachable("Unknown vector element type!");
1635 Value *CodeGenFunction::EmitNeonSplat(Value *V, Constant *C) {
1636 unsigned nElts = cast<llvm::VectorType>(V->getType())->getNumElements();
1637 Value* SV = llvm::ConstantVector::getSplat(nElts, C);
1638 return Builder.CreateShuffleVector(V, V, SV, "lane");
1641 Value *CodeGenFunction::EmitNeonCall(Function *F, SmallVectorImpl<Value*> &Ops,
1643 unsigned shift, bool rightshift) {
1645 for (Function::const_arg_iterator ai = F->arg_begin(), ae = F->arg_end();
1646 ai != ae; ++ai, ++j)
1647 if (shift > 0 && shift == j)
1648 Ops[j] = EmitNeonShiftVector(Ops[j], ai->getType(), rightshift);
1650 Ops[j] = Builder.CreateBitCast(Ops[j], ai->getType(), name);
1652 return Builder.CreateCall(F, Ops, name);
1655 Value *CodeGenFunction::EmitNeonShiftVector(Value *V, llvm::Type *Ty,
1657 int SV = cast<ConstantInt>(V)->getSExtValue();
1659 llvm::VectorType *VTy = cast<llvm::VectorType>(Ty);
1660 llvm::Constant *C = ConstantInt::get(VTy->getElementType(), neg ? -SV : SV);
1661 return llvm::ConstantVector::getSplat(VTy->getNumElements(), C);
1664 // \brief Right-shift a vector by a constant.
1665 Value *CodeGenFunction::EmitNeonRShiftImm(Value *Vec, Value *Shift,
1666 llvm::Type *Ty, bool usgn,
1668 llvm::VectorType *VTy = cast<llvm::VectorType>(Ty);
1670 int ShiftAmt = cast<ConstantInt>(Shift)->getSExtValue();
1671 int EltSize = VTy->getScalarSizeInBits();
1673 Vec = Builder.CreateBitCast(Vec, Ty);
1675 // lshr/ashr are undefined when the shift amount is equal to the vector
1677 if (ShiftAmt == EltSize) {
1679 // Right-shifting an unsigned value by its size yields 0.
1680 llvm::Constant *Zero = ConstantInt::get(VTy->getElementType(), 0);
1681 return llvm::ConstantVector::getSplat(VTy->getNumElements(), Zero);
1683 // Right-shifting a signed value by its size is equivalent
1684 // to a shift of size-1.
1686 Shift = ConstantInt::get(VTy->getElementType(), ShiftAmt);
1690 Shift = EmitNeonShiftVector(Shift, Ty, false);
1692 return Builder.CreateLShr(Vec, Shift, name);
1694 return Builder.CreateAShr(Vec, Shift, name);
1697 /// GetPointeeAlignment - Given an expression with a pointer type, find the
1698 /// alignment of the type referenced by the pointer. Skip over implicit
1700 std::pair<llvm::Value*, unsigned>
1701 CodeGenFunction::EmitPointerWithAlignment(const Expr *Addr) {
1702 assert(Addr->getType()->isPointerType());
1703 Addr = Addr->IgnoreParens();
1704 if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Addr)) {
1705 if ((ICE->getCastKind() == CK_BitCast || ICE->getCastKind() == CK_NoOp) &&
1706 ICE->getSubExpr()->getType()->isPointerType()) {
1707 std::pair<llvm::Value*, unsigned> Ptr =
1708 EmitPointerWithAlignment(ICE->getSubExpr());
1709 Ptr.first = Builder.CreateBitCast(Ptr.first,
1710 ConvertType(Addr->getType()));
1712 } else if (ICE->getCastKind() == CK_ArrayToPointerDecay) {
1713 LValue LV = EmitLValue(ICE->getSubExpr());
1714 unsigned Align = LV.getAlignment().getQuantity();
1716 // FIXME: Once LValues are fixed to always set alignment,
1718 QualType PtTy = ICE->getSubExpr()->getType();
1719 if (!PtTy->isIncompleteType())
1720 Align = getContext().getTypeAlignInChars(PtTy).getQuantity();
1724 return std::make_pair(LV.getAddress(), Align);
1727 if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(Addr)) {
1728 if (UO->getOpcode() == UO_AddrOf) {
1729 LValue LV = EmitLValue(UO->getSubExpr());
1730 unsigned Align = LV.getAlignment().getQuantity();
1732 // FIXME: Once LValues are fixed to always set alignment,
1734 QualType PtTy = UO->getSubExpr()->getType();
1735 if (!PtTy->isIncompleteType())
1736 Align = getContext().getTypeAlignInChars(PtTy).getQuantity();
1740 return std::make_pair(LV.getAddress(), Align);
1745 QualType PtTy = Addr->getType()->getPointeeType();
1746 if (!PtTy->isIncompleteType())
1747 Align = getContext().getTypeAlignInChars(PtTy).getQuantity();
1749 return std::make_pair(EmitScalarExpr(Addr), Align);
1752 static Value *EmitAArch64ScalarBuiltinExpr(CodeGenFunction &CGF,
1754 const CallExpr *E) {
1755 unsigned int Int = 0;
1756 // Scalar result generated across vectors
1757 bool AcrossVec = false;
1758 // Extend element of one-element vector
1759 bool ExtendEle = false;
1760 bool OverloadInt = false;
1761 bool OverloadCmpInt = false;
1762 bool IsFpCmpZInt = false;
1763 bool OverloadCvtInt = false;
1764 bool OverloadWideInt = false;
1765 bool OverloadNarrowInt = false;
1766 const char *s = NULL;
1768 SmallVector<Value *, 4> Ops;
1769 for (unsigned i = 0, e = E->getNumArgs(); i != e; i++) {
1770 Ops.push_back(CGF.EmitScalarExpr(E->getArg(i)));
1773 // AArch64 scalar builtins are not overloaded, they do not have an extra
1774 // argument that specifies the vector type, need to handle each case.
1775 switch (BuiltinID) {
1777 case AArch64::BI__builtin_neon_vdups_lane_f32:
1778 case AArch64::BI__builtin_neon_vdupd_lane_f64:
1779 case AArch64::BI__builtin_neon_vdups_laneq_f32:
1780 case AArch64::BI__builtin_neon_vdupd_laneq_f64: {
1781 return CGF.Builder.CreateExtractElement(Ops[0], Ops[1], "vdup_lane");
1783 case AArch64::BI__builtin_neon_vdupb_lane_i8:
1784 case AArch64::BI__builtin_neon_vduph_lane_i16:
1785 case AArch64::BI__builtin_neon_vdups_lane_i32:
1786 case AArch64::BI__builtin_neon_vdupd_lane_i64:
1787 case AArch64::BI__builtin_neon_vdupb_laneq_i8:
1788 case AArch64::BI__builtin_neon_vduph_laneq_i16:
1789 case AArch64::BI__builtin_neon_vdups_laneq_i32:
1790 case AArch64::BI__builtin_neon_vdupd_laneq_i64: {
1791 // The backend treats Neon scalar types as v1ix types
1792 // So we want to dup lane from any vector to v1ix vector
1793 // with shufflevector
1795 Value* SV = llvm::ConstantVector::getSplat(1, cast<ConstantInt>(Ops[1]));
1796 Value *Result = CGF.Builder.CreateShuffleVector(Ops[0], Ops[0], SV, s);
1797 llvm::Type *Ty = CGF.ConvertType(E->getCallReturnType());
1798 // AArch64 intrinsic one-element vector type cast to
1799 // scalar type expected by the builtin
1800 return CGF.Builder.CreateBitCast(Result, Ty, s);
1802 case AArch64::BI__builtin_neon_vqdmlalh_lane_s16 :
1803 case AArch64::BI__builtin_neon_vqdmlalh_laneq_s16 :
1804 case AArch64::BI__builtin_neon_vqdmlals_lane_s32 :
1805 case AArch64::BI__builtin_neon_vqdmlals_laneq_s32 :
1806 case AArch64::BI__builtin_neon_vqdmlslh_lane_s16 :
1807 case AArch64::BI__builtin_neon_vqdmlslh_laneq_s16 :
1808 case AArch64::BI__builtin_neon_vqdmlsls_lane_s32 :
1809 case AArch64::BI__builtin_neon_vqdmlsls_laneq_s32 : {
1810 Int = Intrinsic::arm_neon_vqadds;
1811 if (BuiltinID == AArch64::BI__builtin_neon_vqdmlslh_lane_s16 ||
1812 BuiltinID == AArch64::BI__builtin_neon_vqdmlslh_laneq_s16 ||
1813 BuiltinID == AArch64::BI__builtin_neon_vqdmlsls_lane_s32 ||
1814 BuiltinID == AArch64::BI__builtin_neon_vqdmlsls_laneq_s32) {
1815 Int = Intrinsic::arm_neon_vqsubs;
1817 // create vqdmull call with b * c[i]
1818 llvm::Type *Ty = CGF.ConvertType(E->getArg(1)->getType());
1819 llvm::VectorType *OpVTy = llvm::VectorType::get(Ty, 1);
1820 Ty = CGF.ConvertType(E->getArg(0)->getType());
1821 llvm::VectorType *ResVTy = llvm::VectorType::get(Ty, 1);
1822 Value *F = CGF.CGM.getIntrinsic(Intrinsic::arm_neon_vqdmull, ResVTy);
1823 Value *V = UndefValue::get(OpVTy);
1824 llvm::Constant *CI = ConstantInt::get(CGF.Int32Ty, 0);
1825 SmallVector<Value *, 2> MulOps;
1826 MulOps.push_back(Ops[1]);
1827 MulOps.push_back(Ops[2]);
1828 MulOps[0] = CGF.Builder.CreateInsertElement(V, MulOps[0], CI);
1829 MulOps[1] = CGF.Builder.CreateExtractElement(MulOps[1], Ops[3], "extract");
1830 MulOps[1] = CGF.Builder.CreateInsertElement(V, MulOps[1], CI);
1831 Value *MulRes = CGF.Builder.CreateCall2(F, MulOps[0], MulOps[1]);
1832 // create vqadds call with a +/- vqdmull result
1833 F = CGF.CGM.getIntrinsic(Int, ResVTy);
1834 SmallVector<Value *, 2> AddOps;
1835 AddOps.push_back(Ops[0]);
1836 AddOps.push_back(MulRes);
1837 V = UndefValue::get(ResVTy);
1838 AddOps[0] = CGF.Builder.CreateInsertElement(V, AddOps[0], CI);
1839 Value *AddRes = CGF.Builder.CreateCall2(F, AddOps[0], AddOps[1]);
1840 return CGF.Builder.CreateBitCast(AddRes, Ty);
1842 case AArch64::BI__builtin_neon_vfmas_lane_f32:
1843 case AArch64::BI__builtin_neon_vfmas_laneq_f32:
1844 case AArch64::BI__builtin_neon_vfmad_lane_f64:
1845 case AArch64::BI__builtin_neon_vfmad_laneq_f64: {
1846 llvm::Type *Ty = CGF.ConvertType(E->getCallReturnType());
1847 Value *F = CGF.CGM.getIntrinsic(Intrinsic::fma, Ty);
1848 Ops[2] = CGF.Builder.CreateExtractElement(Ops[2], Ops[3], "extract");
1849 return CGF.Builder.CreateCall3(F, Ops[1], Ops[2], Ops[0]);
1851 // Scalar Floating-point Multiply Extended
1852 case AArch64::BI__builtin_neon_vmulxs_f32:
1853 case AArch64::BI__builtin_neon_vmulxd_f64: {
1854 Int = Intrinsic::aarch64_neon_vmulx;
1855 llvm::Type *Ty = CGF.ConvertType(E->getCallReturnType());
1856 return CGF.EmitNeonCall(CGF.CGM.getIntrinsic(Int, Ty), Ops, "vmulx");
1858 case AArch64::BI__builtin_neon_vmul_n_f64: {
1859 // v1f64 vmul_n_f64 should be mapped to Neon scalar mul lane
1860 llvm::Type *VTy = GetNeonType(&CGF,
1861 NeonTypeFlags(NeonTypeFlags::Float64, false, false));
1862 Ops[0] = CGF.Builder.CreateBitCast(Ops[0], VTy);
1863 llvm::Value *Idx = llvm::ConstantInt::get(CGF.Int32Ty, 0);
1864 Ops[0] = CGF.Builder.CreateExtractElement(Ops[0], Idx, "extract");
1865 Value *Result = CGF.Builder.CreateFMul(Ops[0], Ops[1]);
1866 return CGF.Builder.CreateBitCast(Result, VTy);
1868 case AArch64::BI__builtin_neon_vget_lane_i8:
1869 case AArch64::BI__builtin_neon_vget_lane_i16:
1870 case AArch64::BI__builtin_neon_vget_lane_i32:
1871 case AArch64::BI__builtin_neon_vget_lane_i64:
1872 case AArch64::BI__builtin_neon_vget_lane_f32:
1873 case AArch64::BI__builtin_neon_vget_lane_f64:
1874 case AArch64::BI__builtin_neon_vgetq_lane_i8:
1875 case AArch64::BI__builtin_neon_vgetq_lane_i16:
1876 case AArch64::BI__builtin_neon_vgetq_lane_i32:
1877 case AArch64::BI__builtin_neon_vgetq_lane_i64:
1878 case AArch64::BI__builtin_neon_vgetq_lane_f32:
1879 case AArch64::BI__builtin_neon_vgetq_lane_f64:
1880 return CGF.EmitARMBuiltinExpr(ARM::BI__builtin_neon_vget_lane_i8, E);
1881 case AArch64::BI__builtin_neon_vset_lane_i8:
1882 case AArch64::BI__builtin_neon_vset_lane_i16:
1883 case AArch64::BI__builtin_neon_vset_lane_i32:
1884 case AArch64::BI__builtin_neon_vset_lane_i64:
1885 case AArch64::BI__builtin_neon_vset_lane_f32:
1886 case AArch64::BI__builtin_neon_vset_lane_f64:
1887 case AArch64::BI__builtin_neon_vsetq_lane_i8:
1888 case AArch64::BI__builtin_neon_vsetq_lane_i16:
1889 case AArch64::BI__builtin_neon_vsetq_lane_i32:
1890 case AArch64::BI__builtin_neon_vsetq_lane_i64:
1891 case AArch64::BI__builtin_neon_vsetq_lane_f32:
1892 case AArch64::BI__builtin_neon_vsetq_lane_f64:
1893 return CGF.EmitARMBuiltinExpr(ARM::BI__builtin_neon_vset_lane_i8, E);
1895 case AArch64::BI__builtin_neon_vsha1h_u32:
1896 Int = Intrinsic::arm_neon_sha1h;
1897 s = "sha1h"; OverloadInt = true; break;
1898 case AArch64::BI__builtin_neon_vsha1cq_u32:
1899 Int = Intrinsic::aarch64_neon_sha1c;
1901 case AArch64::BI__builtin_neon_vsha1pq_u32:
1902 Int = Intrinsic::aarch64_neon_sha1p;
1904 case AArch64::BI__builtin_neon_vsha1mq_u32:
1905 Int = Intrinsic::aarch64_neon_sha1m;
1908 case AArch64::BI__builtin_neon_vaddd_s64:
1909 Int = Intrinsic::aarch64_neon_vaddds;
1910 s = "vaddds"; break;
1911 case AArch64::BI__builtin_neon_vaddd_u64:
1912 Int = Intrinsic::aarch64_neon_vadddu;
1913 s = "vadddu"; break;
1915 case AArch64::BI__builtin_neon_vsubd_s64:
1916 Int = Intrinsic::aarch64_neon_vsubds;
1917 s = "vsubds"; break;
1918 case AArch64::BI__builtin_neon_vsubd_u64:
1919 Int = Intrinsic::aarch64_neon_vsubdu;
1920 s = "vsubdu"; break;
1921 // Scalar Saturating Add
1922 case AArch64::BI__builtin_neon_vqaddb_s8:
1923 case AArch64::BI__builtin_neon_vqaddh_s16:
1924 case AArch64::BI__builtin_neon_vqadds_s32:
1925 case AArch64::BI__builtin_neon_vqaddd_s64:
1926 Int = Intrinsic::arm_neon_vqadds;
1927 s = "vqadds"; OverloadInt = true; break;
1928 case AArch64::BI__builtin_neon_vqaddb_u8:
1929 case AArch64::BI__builtin_neon_vqaddh_u16:
1930 case AArch64::BI__builtin_neon_vqadds_u32:
1931 case AArch64::BI__builtin_neon_vqaddd_u64:
1932 Int = Intrinsic::arm_neon_vqaddu;
1933 s = "vqaddu"; OverloadInt = true; break;
1934 // Scalar Saturating Sub
1935 case AArch64::BI__builtin_neon_vqsubb_s8:
1936 case AArch64::BI__builtin_neon_vqsubh_s16:
1937 case AArch64::BI__builtin_neon_vqsubs_s32:
1938 case AArch64::BI__builtin_neon_vqsubd_s64:
1939 Int = Intrinsic::arm_neon_vqsubs;
1940 s = "vqsubs"; OverloadInt = true; break;
1941 case AArch64::BI__builtin_neon_vqsubb_u8:
1942 case AArch64::BI__builtin_neon_vqsubh_u16:
1943 case AArch64::BI__builtin_neon_vqsubs_u32:
1944 case AArch64::BI__builtin_neon_vqsubd_u64:
1945 Int = Intrinsic::arm_neon_vqsubu;
1946 s = "vqsubu"; OverloadInt = true; break;
1947 // Scalar Shift Left
1948 case AArch64::BI__builtin_neon_vshld_s64:
1949 Int = Intrinsic::aarch64_neon_vshlds;
1950 s = "vshlds"; break;
1951 case AArch64::BI__builtin_neon_vshld_u64:
1952 Int = Intrinsic::aarch64_neon_vshldu;
1953 s = "vshldu"; break;
1954 // Scalar Saturating Shift Left
1955 case AArch64::BI__builtin_neon_vqshlb_s8:
1956 case AArch64::BI__builtin_neon_vqshlh_s16:
1957 case AArch64::BI__builtin_neon_vqshls_s32:
1958 case AArch64::BI__builtin_neon_vqshld_s64:
1959 Int = Intrinsic::aarch64_neon_vqshls;
1960 s = "vqshls"; OverloadInt = true; break;
1961 case AArch64::BI__builtin_neon_vqshlb_u8:
1962 case AArch64::BI__builtin_neon_vqshlh_u16:
1963 case AArch64::BI__builtin_neon_vqshls_u32:
1964 case AArch64::BI__builtin_neon_vqshld_u64:
1965 Int = Intrinsic::aarch64_neon_vqshlu;
1966 s = "vqshlu"; OverloadInt = true; break;
1967 // Scalar Rouding Shift Left
1968 case AArch64::BI__builtin_neon_vrshld_s64:
1969 Int = Intrinsic::aarch64_neon_vrshlds;
1970 s = "vrshlds"; break;
1971 case AArch64::BI__builtin_neon_vrshld_u64:
1972 Int = Intrinsic::aarch64_neon_vrshldu;
1973 s = "vrshldu"; break;
1974 // Scalar Saturating Rouding Shift Left
1975 case AArch64::BI__builtin_neon_vqrshlb_s8:
1976 case AArch64::BI__builtin_neon_vqrshlh_s16:
1977 case AArch64::BI__builtin_neon_vqrshls_s32:
1978 case AArch64::BI__builtin_neon_vqrshld_s64:
1979 Int = Intrinsic::aarch64_neon_vqrshls;
1980 s = "vqrshls"; OverloadInt = true; break;
1981 case AArch64::BI__builtin_neon_vqrshlb_u8:
1982 case AArch64::BI__builtin_neon_vqrshlh_u16:
1983 case AArch64::BI__builtin_neon_vqrshls_u32:
1984 case AArch64::BI__builtin_neon_vqrshld_u64:
1985 Int = Intrinsic::aarch64_neon_vqrshlu;
1986 s = "vqrshlu"; OverloadInt = true; break;
1987 // Scalar Reduce Pairwise Add
1988 case AArch64::BI__builtin_neon_vpaddd_s64:
1989 case AArch64::BI__builtin_neon_vpaddd_u64:
1990 Int = Intrinsic::aarch64_neon_vpadd; s = "vpadd";
1992 case AArch64::BI__builtin_neon_vpadds_f32:
1993 Int = Intrinsic::aarch64_neon_vpfadd; s = "vpfadd";
1995 case AArch64::BI__builtin_neon_vpaddd_f64:
1996 Int = Intrinsic::aarch64_neon_vpfaddq; s = "vpfaddq";
1998 // Scalar Reduce Pairwise Floating Point Max
1999 case AArch64::BI__builtin_neon_vpmaxs_f32:
2000 Int = Intrinsic::aarch64_neon_vpmax; s = "vpmax";
2002 case AArch64::BI__builtin_neon_vpmaxqd_f64:
2003 Int = Intrinsic::aarch64_neon_vpmaxq; s = "vpmaxq";
2005 // Scalar Reduce Pairwise Floating Point Min
2006 case AArch64::BI__builtin_neon_vpmins_f32:
2007 Int = Intrinsic::aarch64_neon_vpmin; s = "vpmin";
2009 case AArch64::BI__builtin_neon_vpminqd_f64:
2010 Int = Intrinsic::aarch64_neon_vpminq; s = "vpminq";
2012 // Scalar Reduce Pairwise Floating Point Maxnm
2013 case AArch64::BI__builtin_neon_vpmaxnms_f32:
2014 Int = Intrinsic::aarch64_neon_vpfmaxnm; s = "vpfmaxnm";
2016 case AArch64::BI__builtin_neon_vpmaxnmqd_f64:
2017 Int = Intrinsic::aarch64_neon_vpfmaxnmq; s = "vpfmaxnmq";
2019 // Scalar Reduce Pairwise Floating Point Minnm
2020 case AArch64::BI__builtin_neon_vpminnms_f32:
2021 Int = Intrinsic::aarch64_neon_vpfminnm; s = "vpfminnm";
2023 case AArch64::BI__builtin_neon_vpminnmqd_f64:
2024 Int = Intrinsic::aarch64_neon_vpfminnmq; s = "vpfminnmq";
2026 // The followings are intrinsics with scalar results generated AcrossVec vectors
2027 case AArch64::BI__builtin_neon_vaddlv_s8:
2028 case AArch64::BI__builtin_neon_vaddlv_s16:
2029 case AArch64::BI__builtin_neon_vaddlvq_s8:
2030 case AArch64::BI__builtin_neon_vaddlvq_s16:
2031 case AArch64::BI__builtin_neon_vaddlvq_s32:
2032 Int = Intrinsic::aarch64_neon_saddlv;
2033 AcrossVec = true; ExtendEle = true; s = "saddlv"; break;
2034 case AArch64::BI__builtin_neon_vaddlv_u8:
2035 case AArch64::BI__builtin_neon_vaddlv_u16:
2036 case AArch64::BI__builtin_neon_vaddlvq_u8:
2037 case AArch64::BI__builtin_neon_vaddlvq_u16:
2038 case AArch64::BI__builtin_neon_vaddlvq_u32:
2039 Int = Intrinsic::aarch64_neon_uaddlv;
2040 AcrossVec = true; ExtendEle = true; s = "uaddlv"; break;
2041 case AArch64::BI__builtin_neon_vmaxv_s8:
2042 case AArch64::BI__builtin_neon_vmaxv_s16:
2043 case AArch64::BI__builtin_neon_vmaxvq_s8:
2044 case AArch64::BI__builtin_neon_vmaxvq_s16:
2045 case AArch64::BI__builtin_neon_vmaxvq_s32:
2046 Int = Intrinsic::aarch64_neon_smaxv;
2047 AcrossVec = true; ExtendEle = false; s = "smaxv"; break;
2048 case AArch64::BI__builtin_neon_vmaxv_u8:
2049 case AArch64::BI__builtin_neon_vmaxv_u16:
2050 case AArch64::BI__builtin_neon_vmaxvq_u8:
2051 case AArch64::BI__builtin_neon_vmaxvq_u16:
2052 case AArch64::BI__builtin_neon_vmaxvq_u32:
2053 Int = Intrinsic::aarch64_neon_umaxv;
2054 AcrossVec = true; ExtendEle = false; s = "umaxv"; break;
2055 case AArch64::BI__builtin_neon_vminv_s8:
2056 case AArch64::BI__builtin_neon_vminv_s16:
2057 case AArch64::BI__builtin_neon_vminvq_s8:
2058 case AArch64::BI__builtin_neon_vminvq_s16:
2059 case AArch64::BI__builtin_neon_vminvq_s32:
2060 Int = Intrinsic::aarch64_neon_sminv;
2061 AcrossVec = true; ExtendEle = false; s = "sminv"; break;
2062 case AArch64::BI__builtin_neon_vminv_u8:
2063 case AArch64::BI__builtin_neon_vminv_u16:
2064 case AArch64::BI__builtin_neon_vminvq_u8:
2065 case AArch64::BI__builtin_neon_vminvq_u16:
2066 case AArch64::BI__builtin_neon_vminvq_u32:
2067 Int = Intrinsic::aarch64_neon_uminv;
2068 AcrossVec = true; ExtendEle = false; s = "uminv"; break;
2069 case AArch64::BI__builtin_neon_vaddv_s8:
2070 case AArch64::BI__builtin_neon_vaddv_s16:
2071 case AArch64::BI__builtin_neon_vaddvq_s8:
2072 case AArch64::BI__builtin_neon_vaddvq_s16:
2073 case AArch64::BI__builtin_neon_vaddvq_s32:
2074 case AArch64::BI__builtin_neon_vaddvq_s64:
2075 case AArch64::BI__builtin_neon_vaddv_u8:
2076 case AArch64::BI__builtin_neon_vaddv_u16:
2077 case AArch64::BI__builtin_neon_vaddvq_u8:
2078 case AArch64::BI__builtin_neon_vaddvq_u16:
2079 case AArch64::BI__builtin_neon_vaddvq_u32:
2080 case AArch64::BI__builtin_neon_vaddvq_u64:
2081 case AArch64::BI__builtin_neon_vaddv_f32:
2082 case AArch64::BI__builtin_neon_vaddvq_f32:
2083 case AArch64::BI__builtin_neon_vaddvq_f64:
2084 Int = Intrinsic::aarch64_neon_vaddv;
2085 AcrossVec = true; ExtendEle = false; s = "vaddv"; break;
2086 case AArch64::BI__builtin_neon_vmaxv_f32:
2087 case AArch64::BI__builtin_neon_vmaxvq_f32:
2088 case AArch64::BI__builtin_neon_vmaxvq_f64:
2089 Int = Intrinsic::aarch64_neon_vmaxv;
2090 AcrossVec = true; ExtendEle = false; s = "vmaxv"; break;
2091 case AArch64::BI__builtin_neon_vminv_f32:
2092 case AArch64::BI__builtin_neon_vminvq_f32:
2093 case AArch64::BI__builtin_neon_vminvq_f64:
2094 Int = Intrinsic::aarch64_neon_vminv;
2095 AcrossVec = true; ExtendEle = false; s = "vminv"; break;
2096 case AArch64::BI__builtin_neon_vmaxnmv_f32:
2097 case AArch64::BI__builtin_neon_vmaxnmvq_f32:
2098 case AArch64::BI__builtin_neon_vmaxnmvq_f64:
2099 Int = Intrinsic::aarch64_neon_vmaxnmv;
2100 AcrossVec = true; ExtendEle = false; s = "vmaxnmv"; break;
2101 case AArch64::BI__builtin_neon_vminnmv_f32:
2102 case AArch64::BI__builtin_neon_vminnmvq_f32:
2103 case AArch64::BI__builtin_neon_vminnmvq_f64:
2104 Int = Intrinsic::aarch64_neon_vminnmv;
2105 AcrossVec = true; ExtendEle = false; s = "vminnmv"; break;
2106 // Scalar Integer Saturating Doubling Multiply Half High
2107 case AArch64::BI__builtin_neon_vqdmulhh_s16:
2108 case AArch64::BI__builtin_neon_vqdmulhs_s32:
2109 Int = Intrinsic::arm_neon_vqdmulh;
2110 s = "vqdmulh"; OverloadInt = true; break;
2111 // Scalar Integer Saturating Rounding Doubling Multiply Half High
2112 case AArch64::BI__builtin_neon_vqrdmulhh_s16:
2113 case AArch64::BI__builtin_neon_vqrdmulhs_s32:
2114 Int = Intrinsic::arm_neon_vqrdmulh;
2115 s = "vqrdmulh"; OverloadInt = true; break;
2116 // Scalar Floating-point Reciprocal Step and
2117 case AArch64::BI__builtin_neon_vrecpss_f32:
2118 case AArch64::BI__builtin_neon_vrecpsd_f64:
2119 Int = Intrinsic::arm_neon_vrecps;
2120 s = "vrecps"; OverloadInt = true; break;
2121 // Scalar Floating-point Reciprocal Square Root Step
2122 case AArch64::BI__builtin_neon_vrsqrtss_f32:
2123 case AArch64::BI__builtin_neon_vrsqrtsd_f64:
2124 Int = Intrinsic::arm_neon_vrsqrts;
2125 s = "vrsqrts"; OverloadInt = true; break;
2126 // Scalar Signed Integer Convert To Floating-point
2127 case AArch64::BI__builtin_neon_vcvts_f32_s32:
2128 Int = Intrinsic::aarch64_neon_vcvtf32_s32,
2129 s = "vcvtf"; OverloadInt = false; break;
2130 case AArch64::BI__builtin_neon_vcvtd_f64_s64:
2131 Int = Intrinsic::aarch64_neon_vcvtf64_s64,
2132 s = "vcvtf"; OverloadInt = false; break;
2133 // Scalar Unsigned Integer Convert To Floating-point
2134 case AArch64::BI__builtin_neon_vcvts_f32_u32:
2135 Int = Intrinsic::aarch64_neon_vcvtf32_u32,
2136 s = "vcvtf"; OverloadInt = false; break;
2137 case AArch64::BI__builtin_neon_vcvtd_f64_u64:
2138 Int = Intrinsic::aarch64_neon_vcvtf64_u64,
2139 s = "vcvtf"; OverloadInt = false; break;
2140 // Scalar Floating-point Converts
2141 case AArch64::BI__builtin_neon_vcvtxd_f32_f64:
2142 Int = Intrinsic::aarch64_neon_fcvtxn;
2143 s = "vcvtxn"; OverloadCvtInt = true; break;
2144 case AArch64::BI__builtin_neon_vcvtas_s32_f32:
2145 case AArch64::BI__builtin_neon_vcvtad_s64_f64:
2146 Int = Intrinsic::aarch64_neon_fcvtas;
2147 s = "vcvtas"; OverloadCvtInt = true; break;
2148 case AArch64::BI__builtin_neon_vcvtas_u32_f32:
2149 case AArch64::BI__builtin_neon_vcvtad_u64_f64:
2150 Int = Intrinsic::aarch64_neon_fcvtau;
2151 s = "vcvtau"; OverloadCvtInt = true; break;
2152 case AArch64::BI__builtin_neon_vcvtms_s32_f32:
2153 case AArch64::BI__builtin_neon_vcvtmd_s64_f64:
2154 Int = Intrinsic::aarch64_neon_fcvtms;
2155 s = "vcvtms"; OverloadCvtInt = true; break;
2156 case AArch64::BI__builtin_neon_vcvtms_u32_f32:
2157 case AArch64::BI__builtin_neon_vcvtmd_u64_f64:
2158 Int = Intrinsic::aarch64_neon_fcvtmu;
2159 s = "vcvtmu"; OverloadCvtInt = true; break;
2160 case AArch64::BI__builtin_neon_vcvtns_s32_f32:
2161 case AArch64::BI__builtin_neon_vcvtnd_s64_f64:
2162 Int = Intrinsic::aarch64_neon_fcvtns;
2163 s = "vcvtns"; OverloadCvtInt = true; break;
2164 case AArch64::BI__builtin_neon_vcvtns_u32_f32:
2165 case AArch64::BI__builtin_neon_vcvtnd_u64_f64:
2166 Int = Intrinsic::aarch64_neon_fcvtnu;
2167 s = "vcvtnu"; OverloadCvtInt = true; break;
2168 case AArch64::BI__builtin_neon_vcvtps_s32_f32:
2169 case AArch64::BI__builtin_neon_vcvtpd_s64_f64:
2170 Int = Intrinsic::aarch64_neon_fcvtps;
2171 s = "vcvtps"; OverloadCvtInt = true; break;
2172 case AArch64::BI__builtin_neon_vcvtps_u32_f32:
2173 case AArch64::BI__builtin_neon_vcvtpd_u64_f64:
2174 Int = Intrinsic::aarch64_neon_fcvtpu;
2175 s = "vcvtpu"; OverloadCvtInt = true; break;
2176 case AArch64::BI__builtin_neon_vcvts_s32_f32:
2177 case AArch64::BI__builtin_neon_vcvtd_s64_f64:
2178 Int = Intrinsic::aarch64_neon_fcvtzs;
2179 s = "vcvtzs"; OverloadCvtInt = true; break;
2180 case AArch64::BI__builtin_neon_vcvts_u32_f32:
2181 case AArch64::BI__builtin_neon_vcvtd_u64_f64:
2182 Int = Intrinsic::aarch64_neon_fcvtzu;
2183 s = "vcvtzu"; OverloadCvtInt = true; break;
2184 // Scalar Floating-point Reciprocal Estimate
2185 case AArch64::BI__builtin_neon_vrecpes_f32:
2186 case AArch64::BI__builtin_neon_vrecped_f64:
2187 Int = Intrinsic::arm_neon_vrecpe;
2188 s = "vrecpe"; OverloadInt = true; break;
2189 // Scalar Floating-point Reciprocal Exponent
2190 case AArch64::BI__builtin_neon_vrecpxs_f32:
2191 case AArch64::BI__builtin_neon_vrecpxd_f64:
2192 Int = Intrinsic::aarch64_neon_vrecpx;
2193 s = "vrecpx"; OverloadInt = true; break;
2194 // Scalar Floating-point Reciprocal Square Root Estimate
2195 case AArch64::BI__builtin_neon_vrsqrtes_f32:
2196 case AArch64::BI__builtin_neon_vrsqrted_f64:
2197 Int = Intrinsic::arm_neon_vrsqrte;
2198 s = "vrsqrte"; OverloadInt = true; break;
2199 // Scalar Compare Equal
2200 case AArch64::BI__builtin_neon_vceqd_s64:
2201 case AArch64::BI__builtin_neon_vceqd_u64:
2202 Int = Intrinsic::aarch64_neon_vceq; s = "vceq";
2203 OverloadCmpInt = true; break;
2204 // Scalar Compare Equal To Zero
2205 case AArch64::BI__builtin_neon_vceqzd_s64:
2206 case AArch64::BI__builtin_neon_vceqzd_u64:
2207 Int = Intrinsic::aarch64_neon_vceq; s = "vceq";
2208 // Add implicit zero operand.
2209 Ops.push_back(llvm::Constant::getNullValue(Ops[0]->getType()));
2210 OverloadCmpInt = true; break;
2211 // Scalar Compare Greater Than or Equal
2212 case AArch64::BI__builtin_neon_vcged_s64:
2213 Int = Intrinsic::aarch64_neon_vcge; s = "vcge";
2214 OverloadCmpInt = true; break;
2215 case AArch64::BI__builtin_neon_vcged_u64:
2216 Int = Intrinsic::aarch64_neon_vchs; s = "vcge";
2217 OverloadCmpInt = true; break;
2218 // Scalar Compare Greater Than or Equal To Zero
2219 case AArch64::BI__builtin_neon_vcgezd_s64:
2220 Int = Intrinsic::aarch64_neon_vcge; s = "vcge";
2221 // Add implicit zero operand.
2222 Ops.push_back(llvm::Constant::getNullValue(Ops[0]->getType()));
2223 OverloadCmpInt = true; break;
2224 // Scalar Compare Greater Than
2225 case AArch64::BI__builtin_neon_vcgtd_s64:
2226 Int = Intrinsic::aarch64_neon_vcgt; s = "vcgt";
2227 OverloadCmpInt = true; break;
2228 case AArch64::BI__builtin_neon_vcgtd_u64:
2229 Int = Intrinsic::aarch64_neon_vchi; s = "vcgt";
2230 OverloadCmpInt = true; break;
2231 // Scalar Compare Greater Than Zero
2232 case AArch64::BI__builtin_neon_vcgtzd_s64:
2233 Int = Intrinsic::aarch64_neon_vcgt; s = "vcgt";
2234 // Add implicit zero operand.
2235 Ops.push_back(llvm::Constant::getNullValue(Ops[0]->getType()));
2236 OverloadCmpInt = true; break;
2237 // Scalar Compare Less Than or Equal
2238 case AArch64::BI__builtin_neon_vcled_s64:
2239 Int = Intrinsic::aarch64_neon_vcge; s = "vcge";
2240 OverloadCmpInt = true; std::swap(Ops[0], Ops[1]); break;
2241 case AArch64::BI__builtin_neon_vcled_u64:
2242 Int = Intrinsic::aarch64_neon_vchs; s = "vchs";
2243 OverloadCmpInt = true; std::swap(Ops[0], Ops[1]); break;
2244 // Scalar Compare Less Than or Equal To Zero
2245 case AArch64::BI__builtin_neon_vclezd_s64:
2246 Int = Intrinsic::aarch64_neon_vclez; s = "vcle";
2247 // Add implicit zero operand.
2248 Ops.push_back(llvm::Constant::getNullValue(Ops[0]->getType()));
2249 OverloadCmpInt = true; break;
2250 // Scalar Compare Less Than
2251 case AArch64::BI__builtin_neon_vcltd_s64:
2252 Int = Intrinsic::aarch64_neon_vcgt; s = "vcgt";
2253 OverloadCmpInt = true; std::swap(Ops[0], Ops[1]); break;
2254 case AArch64::BI__builtin_neon_vcltd_u64:
2255 Int = Intrinsic::aarch64_neon_vchi; s = "vchi";
2256 OverloadCmpInt = true; std::swap(Ops[0], Ops[1]); break;
2257 // Scalar Compare Less Than Zero
2258 case AArch64::BI__builtin_neon_vcltzd_s64:
2259 Int = Intrinsic::aarch64_neon_vcltz; s = "vclt";
2260 // Add implicit zero operand.
2261 Ops.push_back(llvm::Constant::getNullValue(Ops[0]->getType()));
2262 OverloadCmpInt = true; break;
2263 // Scalar Floating-point Compare Equal
2264 case AArch64::BI__builtin_neon_vceqs_f32:
2265 case AArch64::BI__builtin_neon_vceqd_f64:
2266 Int = Intrinsic::aarch64_neon_vceq; s = "vceq";
2267 OverloadCmpInt = true; break;
2268 // Scalar Floating-point Compare Equal To Zero
2269 case AArch64::BI__builtin_neon_vceqzs_f32:
2270 case AArch64::BI__builtin_neon_vceqzd_f64:
2271 Int = Intrinsic::aarch64_neon_vceq; s = "vceq";
2272 // Add implicit zero operand.
2273 Ops.push_back(llvm::Constant::getNullValue(CGF.FloatTy));
2275 OverloadCmpInt = true; break;
2276 // Scalar Floating-point Compare Greater Than Or Equal
2277 case AArch64::BI__builtin_neon_vcges_f32:
2278 case AArch64::BI__builtin_neon_vcged_f64:
2279 Int = Intrinsic::aarch64_neon_vcge; s = "vcge";
2280 OverloadCmpInt = true; break;
2281 // Scalar Floating-point Compare Greater Than Or Equal To Zero
2282 case AArch64::BI__builtin_neon_vcgezs_f32:
2283 case AArch64::BI__builtin_neon_vcgezd_f64:
2284 Int = Intrinsic::aarch64_neon_vcge; s = "vcge";
2285 // Add implicit zero operand.
2286 Ops.push_back(llvm::Constant::getNullValue(CGF.FloatTy));
2288 OverloadCmpInt = true; break;
2289 // Scalar Floating-point Compare Greather Than
2290 case AArch64::BI__builtin_neon_vcgts_f32:
2291 case AArch64::BI__builtin_neon_vcgtd_f64:
2292 Int = Intrinsic::aarch64_neon_vcgt; s = "vcgt";
2293 OverloadCmpInt = true; break;
2294 // Scalar Floating-point Compare Greather Than Zero
2295 case AArch64::BI__builtin_neon_vcgtzs_f32:
2296 case AArch64::BI__builtin_neon_vcgtzd_f64:
2297 Int = Intrinsic::aarch64_neon_vcgt; s = "vcgt";
2298 // Add implicit zero operand.
2299 Ops.push_back(llvm::Constant::getNullValue(CGF.FloatTy));
2301 OverloadCmpInt = true; break;
2302 // Scalar Floating-point Compare Less Than or Equal
2303 case AArch64::BI__builtin_neon_vcles_f32:
2304 case AArch64::BI__builtin_neon_vcled_f64:
2305 Int = Intrinsic::aarch64_neon_vcge; s = "vcge";
2306 OverloadCmpInt = true; break;
2307 // Scalar Floating-point Compare Less Than Or Equal To Zero
2308 case AArch64::BI__builtin_neon_vclezs_f32:
2309 case AArch64::BI__builtin_neon_vclezd_f64:
2310 Int = Intrinsic::aarch64_neon_vclez; s = "vcle";
2311 // Add implicit zero operand.
2312 Ops.push_back(llvm::Constant::getNullValue(CGF.FloatTy));
2314 OverloadCmpInt = true; break;
2315 // Scalar Floating-point Compare Less Than Zero
2316 case AArch64::BI__builtin_neon_vclts_f32:
2317 case AArch64::BI__builtin_neon_vcltd_f64:
2318 Int = Intrinsic::aarch64_neon_vcgt; s = "vcgt";
2319 OverloadCmpInt = true; std::swap(Ops[0], Ops[1]); break;
2320 // Scalar Floating-point Compare Less Than Zero
2321 case AArch64::BI__builtin_neon_vcltzs_f32:
2322 case AArch64::BI__builtin_neon_vcltzd_f64:
2323 Int = Intrinsic::aarch64_neon_vcltz; s = "vclt";
2324 // Add implicit zero operand.
2325 Ops.push_back(llvm::Constant::getNullValue(CGF.FloatTy));
2327 OverloadCmpInt = true; break;
2328 // Scalar Floating-point Absolute Compare Greater Than Or Equal
2329 case AArch64::BI__builtin_neon_vcages_f32:
2330 case AArch64::BI__builtin_neon_vcaged_f64:
2331 Int = Intrinsic::aarch64_neon_vcage; s = "vcage";
2332 OverloadCmpInt = true; break;
2333 // Scalar Floating-point Absolute Compare Greater Than
2334 case AArch64::BI__builtin_neon_vcagts_f32:
2335 case AArch64::BI__builtin_neon_vcagtd_f64:
2336 Int = Intrinsic::aarch64_neon_vcagt; s = "vcagt";
2337 OverloadCmpInt = true; break;
2338 // Scalar Floating-point Absolute Compare Less Than Or Equal
2339 case AArch64::BI__builtin_neon_vcales_f32:
2340 case AArch64::BI__builtin_neon_vcaled_f64:
2341 Int = Intrinsic::aarch64_neon_vcage; s = "vcage";
2342 OverloadCmpInt = true; std::swap(Ops[0], Ops[1]); break;
2343 // Scalar Floating-point Absolute Compare Less Than
2344 case AArch64::BI__builtin_neon_vcalts_f32:
2345 case AArch64::BI__builtin_neon_vcaltd_f64:
2346 Int = Intrinsic::aarch64_neon_vcagt; s = "vcalt";
2347 OverloadCmpInt = true; std::swap(Ops[0], Ops[1]); break;
2348 // Scalar Compare Bitwise Test Bits
2349 case AArch64::BI__builtin_neon_vtstd_s64:
2350 case AArch64::BI__builtin_neon_vtstd_u64:
2351 Int = Intrinsic::aarch64_neon_vtstd; s = "vtst";
2352 OverloadCmpInt = true; break;
2353 // Scalar Absolute Value
2354 case AArch64::BI__builtin_neon_vabsd_s64:
2355 Int = Intrinsic::aarch64_neon_vabs;
2356 s = "vabs"; OverloadInt = false; break;
2357 // Scalar Absolute Difference
2358 case AArch64::BI__builtin_neon_vabds_f32:
2359 case AArch64::BI__builtin_neon_vabdd_f64:
2360 Int = Intrinsic::aarch64_neon_vabd;
2361 s = "vabd"; OverloadInt = true; break;
2362 // Scalar Signed Saturating Absolute Value
2363 case AArch64::BI__builtin_neon_vqabsb_s8:
2364 case AArch64::BI__builtin_neon_vqabsh_s16:
2365 case AArch64::BI__builtin_neon_vqabss_s32:
2366 case AArch64::BI__builtin_neon_vqabsd_s64:
2367 Int = Intrinsic::arm_neon_vqabs;
2368 s = "vqabs"; OverloadInt = true; break;
2370 case AArch64::BI__builtin_neon_vnegd_s64:
2371 Int = Intrinsic::aarch64_neon_vneg;
2372 s = "vneg"; OverloadInt = false; break;
2373 // Scalar Signed Saturating Negate
2374 case AArch64::BI__builtin_neon_vqnegb_s8:
2375 case AArch64::BI__builtin_neon_vqnegh_s16:
2376 case AArch64::BI__builtin_neon_vqnegs_s32:
2377 case AArch64::BI__builtin_neon_vqnegd_s64:
2378 Int = Intrinsic::arm_neon_vqneg;
2379 s = "vqneg"; OverloadInt = true; break;
2380 // Scalar Signed Saturating Accumulated of Unsigned Value
2381 case AArch64::BI__builtin_neon_vuqaddb_s8:
2382 case AArch64::BI__builtin_neon_vuqaddh_s16:
2383 case AArch64::BI__builtin_neon_vuqadds_s32:
2384 case AArch64::BI__builtin_neon_vuqaddd_s64:
2385 Int = Intrinsic::aarch64_neon_vuqadd;
2386 s = "vuqadd"; OverloadInt = true; break;
2387 // Scalar Unsigned Saturating Accumulated of Signed Value
2388 case AArch64::BI__builtin_neon_vsqaddb_u8:
2389 case AArch64::BI__builtin_neon_vsqaddh_u16:
2390 case AArch64::BI__builtin_neon_vsqadds_u32:
2391 case AArch64::BI__builtin_neon_vsqaddd_u64:
2392 Int = Intrinsic::aarch64_neon_vsqadd;
2393 s = "vsqadd"; OverloadInt = true; break;
2394 // Signed Saturating Doubling Multiply-Add Long
2395 case AArch64::BI__builtin_neon_vqdmlalh_s16:
2396 case AArch64::BI__builtin_neon_vqdmlals_s32:
2397 Int = Intrinsic::aarch64_neon_vqdmlal;
2398 s = "vqdmlal"; OverloadWideInt = true; break;
2399 // Signed Saturating Doubling Multiply-Subtract Long
2400 case AArch64::BI__builtin_neon_vqdmlslh_s16:
2401 case AArch64::BI__builtin_neon_vqdmlsls_s32:
2402 Int = Intrinsic::aarch64_neon_vqdmlsl;
2403 s = "vqdmlsl"; OverloadWideInt = true; break;
2404 // Signed Saturating Doubling Multiply Long
2405 case AArch64::BI__builtin_neon_vqdmullh_s16:
2406 case AArch64::BI__builtin_neon_vqdmulls_s32:
2407 Int = Intrinsic::arm_neon_vqdmull;
2408 s = "vqdmull"; OverloadWideInt = true; break;
2409 // Scalar Signed Saturating Extract Unsigned Narrow
2410 case AArch64::BI__builtin_neon_vqmovunh_s16:
2411 case AArch64::BI__builtin_neon_vqmovuns_s32:
2412 case AArch64::BI__builtin_neon_vqmovund_s64:
2413 Int = Intrinsic::arm_neon_vqmovnsu;
2414 s = "vqmovun"; OverloadNarrowInt = true; break;
2415 // Scalar Signed Saturating Extract Narrow
2416 case AArch64::BI__builtin_neon_vqmovnh_s16:
2417 case AArch64::BI__builtin_neon_vqmovns_s32:
2418 case AArch64::BI__builtin_neon_vqmovnd_s64:
2419 Int = Intrinsic::arm_neon_vqmovns;
2420 s = "vqmovn"; OverloadNarrowInt = true; break;
2421 // Scalar Unsigned Saturating Extract Narrow
2422 case AArch64::BI__builtin_neon_vqmovnh_u16:
2423 case AArch64::BI__builtin_neon_vqmovns_u32:
2424 case AArch64::BI__builtin_neon_vqmovnd_u64:
2425 Int = Intrinsic::arm_neon_vqmovnu;
2426 s = "vqmovn"; OverloadNarrowInt = true; break;
2427 // Scalar Signed Shift Right (Immediate)
2428 case AArch64::BI__builtin_neon_vshrd_n_s64:
2429 Int = Intrinsic::aarch64_neon_vshrds_n;
2430 s = "vsshr"; OverloadInt = false; break;
2431 // Scalar Unsigned Shift Right (Immediate)
2432 case AArch64::BI__builtin_neon_vshrd_n_u64:
2433 Int = Intrinsic::aarch64_neon_vshrdu_n;
2434 s = "vushr"; OverloadInt = false; break;
2435 // Scalar Signed Rounding Shift Right (Immediate)
2436 case AArch64::BI__builtin_neon_vrshrd_n_s64:
2437 Int = Intrinsic::aarch64_neon_vsrshr;
2438 s = "vsrshr"; OverloadInt = true; break;
2439 // Scalar Unsigned Rounding Shift Right (Immediate)
2440 case AArch64::BI__builtin_neon_vrshrd_n_u64:
2441 Int = Intrinsic::aarch64_neon_vurshr;
2442 s = "vurshr"; OverloadInt = true; break;
2443 // Scalar Signed Shift Right and Accumulate (Immediate)
2444 case AArch64::BI__builtin_neon_vsrad_n_s64:
2445 Int = Intrinsic::aarch64_neon_vsrads_n;
2446 s = "vssra"; OverloadInt = false; break;
2447 // Scalar Unsigned Shift Right and Accumulate (Immediate)
2448 case AArch64::BI__builtin_neon_vsrad_n_u64:
2449 Int = Intrinsic::aarch64_neon_vsradu_n;
2450 s = "vusra"; OverloadInt = false; break;
2451 // Scalar Signed Rounding Shift Right and Accumulate (Immediate)
2452 case AArch64::BI__builtin_neon_vrsrad_n_s64:
2453 Int = Intrinsic::aarch64_neon_vrsrads_n;
2454 s = "vsrsra"; OverloadInt = false; break;
2455 // Scalar Unsigned Rounding Shift Right and Accumulate (Immediate)
2456 case AArch64::BI__builtin_neon_vrsrad_n_u64:
2457 Int = Intrinsic::aarch64_neon_vrsradu_n;
2458 s = "vursra"; OverloadInt = false; break;
2459 // Scalar Signed/Unsigned Shift Left (Immediate)
2460 case AArch64::BI__builtin_neon_vshld_n_s64:
2461 case AArch64::BI__builtin_neon_vshld_n_u64:
2462 Int = Intrinsic::aarch64_neon_vshld_n;
2463 s = "vshl"; OverloadInt = false; break;
2464 // Signed Saturating Shift Left (Immediate)
2465 case AArch64::BI__builtin_neon_vqshlb_n_s8:
2466 case AArch64::BI__builtin_neon_vqshlh_n_s16:
2467 case AArch64::BI__builtin_neon_vqshls_n_s32:
2468 case AArch64::BI__builtin_neon_vqshld_n_s64:
2469 Int = Intrinsic::aarch64_neon_vqshls_n;
2470 s = "vsqshl"; OverloadInt = true; break;
2471 // Unsigned Saturating Shift Left (Immediate)
2472 case AArch64::BI__builtin_neon_vqshlb_n_u8:
2473 case AArch64::BI__builtin_neon_vqshlh_n_u16:
2474 case AArch64::BI__builtin_neon_vqshls_n_u32:
2475 case AArch64::BI__builtin_neon_vqshld_n_u64:
2476 Int = Intrinsic::aarch64_neon_vqshlu_n;
2477 s = "vuqshl"; OverloadInt = true; break;
2478 // Signed Saturating Shift Left Unsigned (Immediate)
2479 case AArch64::BI__builtin_neon_vqshlub_n_s8:
2480 case AArch64::BI__builtin_neon_vqshluh_n_s16:
2481 case AArch64::BI__builtin_neon_vqshlus_n_s32:
2482 case AArch64::BI__builtin_neon_vqshlud_n_s64:
2483 Int = Intrinsic::aarch64_neon_vsqshlu;
2484 s = "vsqshlu"; OverloadInt = true; break;
2485 // Shift Right And Insert (Immediate)
2486 case AArch64::BI__builtin_neon_vsrid_n_s64:
2487 case AArch64::BI__builtin_neon_vsrid_n_u64:
2488 Int = Intrinsic::aarch64_neon_vsri;
2489 s = "vsri"; OverloadInt = true; break;
2490 // Shift Left And Insert (Immediate)
2491 case AArch64::BI__builtin_neon_vslid_n_s64:
2492 case AArch64::BI__builtin_neon_vslid_n_u64:
2493 Int = Intrinsic::aarch64_neon_vsli;
2494 s = "vsli"; OverloadInt = true; break;
2495 // Signed Saturating Shift Right Narrow (Immediate)
2496 case AArch64::BI__builtin_neon_vqshrnh_n_s16:
2497 case AArch64::BI__builtin_neon_vqshrns_n_s32:
2498 case AArch64::BI__builtin_neon_vqshrnd_n_s64:
2499 Int = Intrinsic::aarch64_neon_vsqshrn;
2500 s = "vsqshrn"; OverloadInt = true; break;
2501 // Unsigned Saturating Shift Right Narrow (Immediate)
2502 case AArch64::BI__builtin_neon_vqshrnh_n_u16:
2503 case AArch64::BI__builtin_neon_vqshrns_n_u32:
2504 case AArch64::BI__builtin_neon_vqshrnd_n_u64:
2505 Int = Intrinsic::aarch64_neon_vuqshrn;
2506 s = "vuqshrn"; OverloadInt = true; break;
2507 // Signed Saturating Rounded Shift Right Narrow (Immediate)
2508 case AArch64::BI__builtin_neon_vqrshrnh_n_s16:
2509 case AArch64::BI__builtin_neon_vqrshrns_n_s32:
2510 case AArch64::BI__builtin_neon_vqrshrnd_n_s64:
2511 Int = Intrinsic::aarch64_neon_vsqrshrn;
2512 s = "vsqrshrn"; OverloadInt = true; break;
2513 // Unsigned Saturating Rounded Shift Right Narrow (Immediate)
2514 case AArch64::BI__builtin_neon_vqrshrnh_n_u16:
2515 case AArch64::BI__builtin_neon_vqrshrns_n_u32:
2516 case AArch64::BI__builtin_neon_vqrshrnd_n_u64:
2517 Int = Intrinsic::aarch64_neon_vuqrshrn;
2518 s = "vuqrshrn"; OverloadInt = true; break;
2519 // Signed Saturating Shift Right Unsigned Narrow (Immediate)
2520 case AArch64::BI__builtin_neon_vqshrunh_n_s16:
2521 case AArch64::BI__builtin_neon_vqshruns_n_s32:
2522 case AArch64::BI__builtin_neon_vqshrund_n_s64:
2523 Int = Intrinsic::aarch64_neon_vsqshrun;
2524 s = "vsqshrun"; OverloadInt = true; break;
2525 // Signed Saturating Rounded Shift Right Unsigned Narrow (Immediate)
2526 case AArch64::BI__builtin_neon_vqrshrunh_n_s16:
2527 case AArch64::BI__builtin_neon_vqrshruns_n_s32:
2528 case AArch64::BI__builtin_neon_vqrshrund_n_s64:
2529 Int = Intrinsic::aarch64_neon_vsqrshrun;
2530 s = "vsqrshrun"; OverloadInt = true; break;
2531 // Scalar Signed Fixed-point Convert To Floating-Point (Immediate)
2532 case AArch64::BI__builtin_neon_vcvts_n_f32_s32:
2533 Int = Intrinsic::aarch64_neon_vcvtf32_n_s32;
2534 s = "vcvtf"; OverloadInt = false; break;
2535 case AArch64::BI__builtin_neon_vcvtd_n_f64_s64:
2536 Int = Intrinsic::aarch64_neon_vcvtf64_n_s64;
2537 s = "vcvtf"; OverloadInt = false; break;
2538 // Scalar Unsigned Fixed-point Convert To Floating-Point (Immediate)
2539 case AArch64::BI__builtin_neon_vcvts_n_f32_u32:
2540 Int = Intrinsic::aarch64_neon_vcvtf32_n_u32;
2541 s = "vcvtf"; OverloadInt = false; break;
2542 case AArch64::BI__builtin_neon_vcvtd_n_f64_u64:
2543 Int = Intrinsic::aarch64_neon_vcvtf64_n_u64;
2544 s = "vcvtf"; OverloadInt = false; break;
2545 // Scalar Floating-point Convert To Signed Fixed-point (Immediate)
2546 case AArch64::BI__builtin_neon_vcvts_n_s32_f32:
2547 Int = Intrinsic::aarch64_neon_vcvts_n_s32_f32;
2548 s = "fcvtzs"; OverloadInt = false; break;
2549 case AArch64::BI__builtin_neon_vcvtd_n_s64_f64:
2550 Int = Intrinsic::aarch64_neon_vcvtd_n_s64_f64;
2551 s = "fcvtzs"; OverloadInt = false; break;
2552 // Scalar Floating-point Convert To Unsigned Fixed-point (Immediate)
2553 case AArch64::BI__builtin_neon_vcvts_n_u32_f32:
2554 Int = Intrinsic::aarch64_neon_vcvts_n_u32_f32;
2555 s = "fcvtzu"; OverloadInt = false; break;
2556 case AArch64::BI__builtin_neon_vcvtd_n_u64_f64:
2557 Int = Intrinsic::aarch64_neon_vcvtd_n_u64_f64;
2558 s = "fcvtzu"; OverloadInt = false; break;
2564 // AArch64 scalar builtin that returns scalar type
2565 // and should be mapped to AArch64 intrinsic that returns
2566 // one-element vector type.
2570 const Expr *Arg = E->getArg(E->getNumArgs()-1);
2571 llvm::Type *Ty = CGF.ConvertType(Arg->getType());
2572 llvm::VectorType *VTy = cast<llvm::VectorType>(Ty);
2573 llvm::Type *ETy = VTy->getElementType();
2574 llvm::VectorType *RTy = llvm::VectorType::get(ETy, 1);
2577 assert(!ETy->isFloatingPointTy());
2578 RTy = llvm::VectorType::getExtendedElementVectorType(RTy);
2581 llvm::Type *Tys[2] = {RTy, VTy};
2582 F = CGF.CGM.getIntrinsic(Int, Tys);
2583 assert(E->getNumArgs() == 1);
2584 } else if (OverloadInt) {
2585 // Determine the type of this overloaded AArch64 intrinsic
2586 llvm::Type *Ty = CGF.ConvertType(E->getCallReturnType());
2587 llvm::VectorType *VTy = llvm::VectorType::get(Ty, 1);
2590 F = CGF.CGM.getIntrinsic(Int, VTy);
2591 } else if (OverloadWideInt || OverloadNarrowInt) {
2592 // Determine the type of this overloaded AArch64 intrinsic
2593 const Expr *Arg = E->getArg(E->getNumArgs()-1);
2594 llvm::Type *Ty = CGF.ConvertType(Arg->getType());
2595 llvm::VectorType *VTy = llvm::VectorType::get(Ty, 1);
2596 llvm::VectorType *RTy = OverloadWideInt ?
2597 llvm::VectorType::getExtendedElementVectorType(VTy) :
2598 llvm::VectorType::getTruncatedElementVectorType(VTy);
2599 F = CGF.CGM.getIntrinsic(Int, RTy);
2600 } else if (OverloadCmpInt) {
2601 // Determine the types of this overloaded AArch64 intrinsic
2602 SmallVector<llvm::Type *, 3> Tys;
2603 const Expr *Arg = E->getArg(E->getNumArgs()-1);
2604 llvm::Type *Ty = CGF.ConvertType(E->getCallReturnType());
2605 llvm::VectorType *VTy = llvm::VectorType::get(Ty, 1);
2607 Ty = CGF.ConvertType(Arg->getType());
2608 VTy = llvm::VectorType::get(Ty, 1);
2611 VTy = llvm::VectorType::get(CGF.FloatTy, 1);
2614 F = CGF.CGM.getIntrinsic(Int, Tys);
2615 } else if (OverloadCvtInt) {
2616 // Determine the types of this overloaded AArch64 intrinsic
2617 SmallVector<llvm::Type *, 2> Tys;
2618 const Expr *Arg = E->getArg(E->getNumArgs()-1);
2619 llvm::Type *Ty = CGF.ConvertType(E->getCallReturnType());
2620 llvm::VectorType *VTy = llvm::VectorType::get(Ty, 1);
2622 Ty = CGF.ConvertType(Arg->getType());
2623 VTy = llvm::VectorType::get(Ty, 1);
2626 F = CGF.CGM.getIntrinsic(Int, Tys);
2628 F = CGF.CGM.getIntrinsic(Int);
2630 Value *Result = CGF.EmitNeonCall(F, Ops, s);
2631 llvm::Type *ResultType = CGF.ConvertType(E->getType());
2632 // AArch64 intrinsic one-element vector type cast to
2633 // scalar type expected by the builtin
2634 return CGF.Builder.CreateBitCast(Result, ResultType, s);
2637 Value *CodeGenFunction::EmitAArch64CompareBuiltinExpr(
2638 Value *Op, llvm::Type *Ty, const CmpInst::Predicate Fp,
2639 const CmpInst::Predicate Ip, const Twine &Name) {
2640 llvm::Type *OTy = ((llvm::User *)Op)->getOperand(0)->getType();
2641 if (OTy->isPointerTy())
2643 Op = Builder.CreateBitCast(Op, OTy);
2644 if (((llvm::VectorType *)OTy)->getElementType()->isFloatingPointTy()) {
2645 Op = Builder.CreateFCmp(Fp, Op, ConstantAggregateZero::get(OTy));
2647 Op = Builder.CreateICmp(Ip, Op, ConstantAggregateZero::get(OTy));
2649 return Builder.CreateZExt(Op, Ty, Name);
2652 static Value *packTBLDVectorList(CodeGenFunction &CGF, ArrayRef<Value *> Ops,
2653 Value *ExtOp, Value *IndexOp,
2654 llvm::Type *ResTy, unsigned IntID,
2656 SmallVector<Value *, 2> TblOps;
2658 TblOps.push_back(ExtOp);
2660 // Build a vector containing sequential number like (0, 1, 2, ..., 15)
2661 SmallVector<Constant*, 16> Indices;
2662 llvm::VectorType *TblTy = cast<llvm::VectorType>(Ops[0]->getType());
2663 for (unsigned i = 0, e = TblTy->getNumElements(); i != e; ++i) {
2664 Indices.push_back(ConstantInt::get(CGF.Int32Ty, 2*i));
2665 Indices.push_back(ConstantInt::get(CGF.Int32Ty, 2*i+1));
2667 Value *SV = llvm::ConstantVector::get(Indices);
2669 int PairPos = 0, End = Ops.size() - 1;
2670 while (PairPos < End) {
2671 TblOps.push_back(CGF.Builder.CreateShuffleVector(Ops[PairPos],
2672 Ops[PairPos+1], SV, Name));
2676 // If there's an odd number of 64-bit lookup table, fill the high 64-bit
2677 // of the 128-bit lookup table with zero.
2678 if (PairPos == End) {
2679 Value *ZeroTbl = ConstantAggregateZero::get(TblTy);
2680 TblOps.push_back(CGF.Builder.CreateShuffleVector(Ops[PairPos],
2681 ZeroTbl, SV, Name));
2684 TblTy = llvm::VectorType::get(TblTy->getElementType(),
2685 2*TblTy->getNumElements());
2686 llvm::Type *Tys[2] = { ResTy, TblTy };
2689 TblOps.push_back(IndexOp);
2690 TblF = CGF.CGM.getIntrinsic(IntID, Tys);
2692 return CGF.EmitNeonCall(TblF, TblOps, Name);
2695 static Value *EmitAArch64TblBuiltinExpr(CodeGenFunction &CGF,
2697 const CallExpr *E) {
2698 unsigned int Int = 0;
2699 const char *s = NULL;
2702 switch (BuiltinID) {
2705 case AArch64::BI__builtin_neon_vtbl1_v:
2706 case AArch64::BI__builtin_neon_vqtbl1_v:
2707 case AArch64::BI__builtin_neon_vqtbl1q_v:
2708 case AArch64::BI__builtin_neon_vtbl2_v:
2709 case AArch64::BI__builtin_neon_vqtbl2_v:
2710 case AArch64::BI__builtin_neon_vqtbl2q_v:
2711 case AArch64::BI__builtin_neon_vtbl3_v:
2712 case AArch64::BI__builtin_neon_vqtbl3_v:
2713 case AArch64::BI__builtin_neon_vqtbl3q_v:
2714 case AArch64::BI__builtin_neon_vtbl4_v:
2715 case AArch64::BI__builtin_neon_vqtbl4_v:
2716 case AArch64::BI__builtin_neon_vqtbl4q_v:
2719 case AArch64::BI__builtin_neon_vtbx1_v:
2720 case AArch64::BI__builtin_neon_vqtbx1_v:
2721 case AArch64::BI__builtin_neon_vqtbx1q_v:
2722 case AArch64::BI__builtin_neon_vtbx2_v:
2723 case AArch64::BI__builtin_neon_vqtbx2_v:
2724 case AArch64::BI__builtin_neon_vqtbx2q_v:
2725 case AArch64::BI__builtin_neon_vtbx3_v:
2726 case AArch64::BI__builtin_neon_vqtbx3_v:
2727 case AArch64::BI__builtin_neon_vqtbx3q_v:
2728 case AArch64::BI__builtin_neon_vtbx4_v:
2729 case AArch64::BI__builtin_neon_vqtbx4_v:
2730 case AArch64::BI__builtin_neon_vqtbx4q_v:
2735 assert(E->getNumArgs() >= 3);
2737 // Get the last argument, which specifies the vector type.
2738 llvm::APSInt Result;
2739 const Expr *Arg = E->getArg(E->getNumArgs() - 1);
2740 if (!Arg->isIntegerConstantExpr(Result, CGF.getContext()))
2743 // Determine the type of this overloaded NEON intrinsic.
2744 NeonTypeFlags Type(Result.getZExtValue());
2745 llvm::VectorType *VTy = GetNeonType(&CGF, Type);
2746 llvm::Type *Ty = VTy;
2750 SmallVector<Value *, 4> Ops;
2751 for (unsigned i = 0, e = E->getNumArgs() - 1; i != e; i++) {
2752 Ops.push_back(CGF.EmitScalarExpr(E->getArg(i)));
2755 Arg = E->getArg(TblPos);
2756 llvm::Type *TblTy = CGF.ConvertType(Arg->getType());
2757 llvm::VectorType *VTblTy = cast<llvm::VectorType>(TblTy);
2758 llvm::Type *Tys[2] = { Ty, VTblTy };
2759 unsigned nElts = VTy->getNumElements();
2761 // AArch64 scalar builtins are not overloaded, they do not have an extra
2762 // argument that specifies the vector type, need to handle each case.
2763 SmallVector<Value *, 2> TblOps;
2764 switch (BuiltinID) {
2765 case AArch64::BI__builtin_neon_vtbl1_v: {
2766 TblOps.push_back(Ops[0]);
2767 return packTBLDVectorList(CGF, TblOps, 0, Ops[1], Ty,
2768 Intrinsic::aarch64_neon_vtbl1, "vtbl1");
2770 case AArch64::BI__builtin_neon_vtbl2_v: {
2771 TblOps.push_back(Ops[0]);
2772 TblOps.push_back(Ops[1]);
2773 return packTBLDVectorList(CGF, TblOps, 0, Ops[2], Ty,
2774 Intrinsic::aarch64_neon_vtbl1, "vtbl1");
2776 case AArch64::BI__builtin_neon_vtbl3_v: {
2777 TblOps.push_back(Ops[0]);
2778 TblOps.push_back(Ops[1]);
2779 TblOps.push_back(Ops[2]);
2780 return packTBLDVectorList(CGF, TblOps, 0, Ops[3], Ty,
2781 Intrinsic::aarch64_neon_vtbl2, "vtbl2");
2783 case AArch64::BI__builtin_neon_vtbl4_v: {
2784 TblOps.push_back(Ops[0]);
2785 TblOps.push_back(Ops[1]);
2786 TblOps.push_back(Ops[2]);
2787 TblOps.push_back(Ops[3]);
2788 return packTBLDVectorList(CGF, TblOps, 0, Ops[4], Ty,
2789 Intrinsic::aarch64_neon_vtbl2, "vtbl2");
2791 case AArch64::BI__builtin_neon_vtbx1_v: {
2792 TblOps.push_back(Ops[1]);
2793 Value *TblRes = packTBLDVectorList(CGF, TblOps, 0, Ops[2], Ty,
2794 Intrinsic::aarch64_neon_vtbl1, "vtbl1");
2796 llvm::Constant *Eight = ConstantInt::get(VTy->getElementType(), 8);
2797 Value* EightV = llvm::ConstantVector::getSplat(nElts, Eight);
2798 Value *CmpRes = CGF.Builder.CreateICmp(ICmpInst::ICMP_UGE, Ops[2], EightV);
2799 CmpRes = CGF.Builder.CreateSExt(CmpRes, Ty);
2801 SmallVector<Value *, 4> BslOps;
2802 BslOps.push_back(CmpRes);
2803 BslOps.push_back(Ops[0]);
2804 BslOps.push_back(TblRes);
2805 Function *BslF = CGF.CGM.getIntrinsic(Intrinsic::arm_neon_vbsl, Ty);
2806 return CGF.EmitNeonCall(BslF, BslOps, "vbsl");
2808 case AArch64::BI__builtin_neon_vtbx2_v: {
2809 TblOps.push_back(Ops[1]);
2810 TblOps.push_back(Ops[2]);
2811 return packTBLDVectorList(CGF, TblOps, Ops[0], Ops[3], Ty,
2812 Intrinsic::aarch64_neon_vtbx1, "vtbx1");
2814 case AArch64::BI__builtin_neon_vtbx3_v: {
2815 TblOps.push_back(Ops[1]);
2816 TblOps.push_back(Ops[2]);
2817 TblOps.push_back(Ops[3]);
2818 Value *TblRes = packTBLDVectorList(CGF, TblOps, 0, Ops[4], Ty,
2819 Intrinsic::aarch64_neon_vtbl2, "vtbl2");
2821 llvm::Constant *TwentyFour = ConstantInt::get(VTy->getElementType(), 24);
2822 Value* TwentyFourV = llvm::ConstantVector::getSplat(nElts, TwentyFour);
2823 Value *CmpRes = CGF.Builder.CreateICmp(ICmpInst::ICMP_UGE, Ops[4],
2825 CmpRes = CGF.Builder.CreateSExt(CmpRes, Ty);
2827 SmallVector<Value *, 4> BslOps;
2828 BslOps.push_back(CmpRes);
2829 BslOps.push_back(Ops[0]);
2830 BslOps.push_back(TblRes);
2831 Function *BslF = CGF.CGM.getIntrinsic(Intrinsic::arm_neon_vbsl, Ty);
2832 return CGF.EmitNeonCall(BslF, BslOps, "vbsl");
2834 case AArch64::BI__builtin_neon_vtbx4_v: {
2835 TblOps.push_back(Ops[1]);
2836 TblOps.push_back(Ops[2]);
2837 TblOps.push_back(Ops[3]);
2838 TblOps.push_back(Ops[4]);
2839 return packTBLDVectorList(CGF, TblOps, Ops[0], Ops[5], Ty,
2840 Intrinsic::aarch64_neon_vtbx2, "vtbx2");
2842 case AArch64::BI__builtin_neon_vqtbl1_v:
2843 case AArch64::BI__builtin_neon_vqtbl1q_v:
2844 Int = Intrinsic::aarch64_neon_vtbl1; s = "vtbl1"; break;
2845 case AArch64::BI__builtin_neon_vqtbl2_v:
2846 case AArch64::BI__builtin_neon_vqtbl2q_v: {
2847 Int = Intrinsic::aarch64_neon_vtbl2; s = "vtbl2"; break;
2848 case AArch64::BI__builtin_neon_vqtbl3_v:
2849 case AArch64::BI__builtin_neon_vqtbl3q_v:
2850 Int = Intrinsic::aarch64_neon_vtbl3; s = "vtbl3"; break;
2851 case AArch64::BI__builtin_neon_vqtbl4_v:
2852 case AArch64::BI__builtin_neon_vqtbl4q_v:
2853 Int = Intrinsic::aarch64_neon_vtbl4; s = "vtbl4"; break;
2854 case AArch64::BI__builtin_neon_vqtbx1_v:
2855 case AArch64::BI__builtin_neon_vqtbx1q_v:
2856 Int = Intrinsic::aarch64_neon_vtbx1; s = "vtbx1"; break;
2857 case AArch64::BI__builtin_neon_vqtbx2_v:
2858 case AArch64::BI__builtin_neon_vqtbx2q_v:
2859 Int = Intrinsic::aarch64_neon_vtbx2; s = "vtbx2"; break;
2860 case AArch64::BI__builtin_neon_vqtbx3_v:
2861 case AArch64::BI__builtin_neon_vqtbx3q_v:
2862 Int = Intrinsic::aarch64_neon_vtbx3; s = "vtbx3"; break;
2863 case AArch64::BI__builtin_neon_vqtbx4_v:
2864 case AArch64::BI__builtin_neon_vqtbx4q_v:
2865 Int = Intrinsic::aarch64_neon_vtbx4; s = "vtbx4"; break;
2872 Function *F = CGF.CGM.getIntrinsic(Int, Tys);
2873 return CGF.EmitNeonCall(F, Ops, s);
2876 Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
2877 const CallExpr *E) {
2878 // Process AArch64 scalar builtins
2879 if (Value *Result = EmitAArch64ScalarBuiltinExpr(*this, BuiltinID, E))
2882 // Process AArch64 table lookup builtins
2883 if (Value *Result = EmitAArch64TblBuiltinExpr(*this, BuiltinID, E))
2886 if (BuiltinID == AArch64::BI__clear_cache) {
2887 assert(E->getNumArgs() == 2 &&
2888 "Variadic __clear_cache slipped through on AArch64");
2890 const FunctionDecl *FD = E->getDirectCallee();
2891 SmallVector<Value *, 2> Ops;
2892 for (unsigned i = 0; i < E->getNumArgs(); i++)
2893 Ops.push_back(EmitScalarExpr(E->getArg(i)));
2894 llvm::Type *Ty = CGM.getTypes().ConvertType(FD->getType());
2895 llvm::FunctionType *FTy = cast<llvm::FunctionType>(Ty);
2896 StringRef Name = FD->getName();
2897 return EmitNounwindRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), Ops);
2900 SmallVector<Value *, 4> Ops;
2901 llvm::Value *Align = 0; // Alignment for load/store
2902 for (unsigned i = 0, e = E->getNumArgs() - 1; i != e; i++) {
2904 switch (BuiltinID) {
2905 case AArch64::BI__builtin_neon_vst1_x2_v:
2906 case AArch64::BI__builtin_neon_vst1q_x2_v:
2907 case AArch64::BI__builtin_neon_vst1_x3_v:
2908 case AArch64::BI__builtin_neon_vst1q_x3_v:
2909 case AArch64::BI__builtin_neon_vst1_x4_v:
2910 case AArch64::BI__builtin_neon_vst1q_x4_v:
2911 // Handle ld1/st1 lane in this function a little different from ARM.
2912 case AArch64::BI__builtin_neon_vld1_lane_v:
2913 case AArch64::BI__builtin_neon_vld1q_lane_v:
2914 case AArch64::BI__builtin_neon_vst1_lane_v:
2915 case AArch64::BI__builtin_neon_vst1q_lane_v:
2916 // Get the alignment for the argument in addition to the value;
2917 // we'll use it later.
2918 std::pair<llvm::Value *, unsigned> Src =
2919 EmitPointerWithAlignment(E->getArg(0));
2920 Ops.push_back(Src.first);
2921 Align = Builder.getInt32(Src.second);
2926 switch (BuiltinID) {
2927 case AArch64::BI__builtin_neon_vld1_x2_v:
2928 case AArch64::BI__builtin_neon_vld1q_x2_v:
2929 case AArch64::BI__builtin_neon_vld1_x3_v:
2930 case AArch64::BI__builtin_neon_vld1q_x3_v:
2931 case AArch64::BI__builtin_neon_vld1_x4_v:
2932 case AArch64::BI__builtin_neon_vld1q_x4_v:
2933 // Handle ld1/st1 dup lane in this function a little different from ARM.
2934 case AArch64::BI__builtin_neon_vld2_dup_v:
2935 case AArch64::BI__builtin_neon_vld2q_dup_v:
2936 case AArch64::BI__builtin_neon_vld3_dup_v:
2937 case AArch64::BI__builtin_neon_vld3q_dup_v:
2938 case AArch64::BI__builtin_neon_vld4_dup_v:
2939 case AArch64::BI__builtin_neon_vld4q_dup_v:
2940 case AArch64::BI__builtin_neon_vld2_lane_v:
2941 case AArch64::BI__builtin_neon_vld2q_lane_v:
2942 // Get the alignment for the argument in addition to the value;
2943 // we'll use it later.
2944 std::pair<llvm::Value *, unsigned> Src =
2945 EmitPointerWithAlignment(E->getArg(1));
2946 Ops.push_back(Src.first);
2947 Align = Builder.getInt32(Src.second);
2951 Ops.push_back(EmitScalarExpr(E->getArg(i)));
2954 // Get the last argument, which specifies the vector type.
2955 llvm::APSInt Result;
2956 const Expr *Arg = E->getArg(E->getNumArgs() - 1);
2957 if (!Arg->isIntegerConstantExpr(Result, getContext()))
2960 // Determine the type of this overloaded NEON intrinsic.
2961 NeonTypeFlags Type(Result.getZExtValue());
2962 bool usgn = Type.isUnsigned();
2963 bool quad = Type.isQuad();
2965 llvm::VectorType *VTy = GetNeonType(this, Type);
2966 llvm::Type *Ty = VTy;
2971 switch (BuiltinID) {
2975 // AArch64 builtins mapping to legacy ARM v7 builtins.
2976 // FIXME: the mapped builtins listed correspond to what has been tested
2977 // in aarch64-neon-intrinsics.c so far.
2978 case AArch64::BI__builtin_neon_vuzp_v:
2979 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vuzp_v, E);
2980 case AArch64::BI__builtin_neon_vuzpq_v:
2981 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vuzpq_v, E);
2982 case AArch64::BI__builtin_neon_vzip_v:
2983 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vzip_v, E);
2984 case AArch64::BI__builtin_neon_vzipq_v:
2985 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vzipq_v, E);
2986 case AArch64::BI__builtin_neon_vtrn_v:
2987 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vtrn_v, E);
2988 case AArch64::BI__builtin_neon_vtrnq_v:
2989 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vtrnq_v, E);
2990 case AArch64::BI__builtin_neon_vext_v:
2991 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vext_v, E);
2992 case AArch64::BI__builtin_neon_vextq_v:
2993 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vextq_v, E);
2994 case AArch64::BI__builtin_neon_vmul_v:
2995 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vmul_v, E);
2996 case AArch64::BI__builtin_neon_vmulq_v:
2997 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vmulq_v, E);
2998 case AArch64::BI__builtin_neon_vabd_v:
2999 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vabd_v, E);
3000 case AArch64::BI__builtin_neon_vabdq_v:
3001 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vabdq_v, E);
3002 case AArch64::BI__builtin_neon_vfma_v:
3003 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vfma_v, E);
3004 case AArch64::BI__builtin_neon_vfmaq_v:
3005 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vfmaq_v, E);
3006 case AArch64::BI__builtin_neon_vbsl_v:
3007 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vbsl_v, E);
3008 case AArch64::BI__builtin_neon_vbslq_v:
3009 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vbslq_v, E);
3010 case AArch64::BI__builtin_neon_vrsqrts_v:
3011 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vrsqrts_v, E);
3012 case AArch64::BI__builtin_neon_vrsqrtsq_v:
3013 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vrsqrtsq_v, E);
3014 case AArch64::BI__builtin_neon_vrecps_v:
3015 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vrecps_v, E);
3016 case AArch64::BI__builtin_neon_vrecpsq_v:
3017 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vrecpsq_v, E);
3018 case AArch64::BI__builtin_neon_vcale_v:
3019 if (VTy->getVectorNumElements() == 1) {
3020 std::swap(Ops[0], Ops[1]);
3022 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vcale_v, E);
3024 case AArch64::BI__builtin_neon_vcage_v:
3025 if (VTy->getVectorNumElements() == 1) {
3026 // Determine the types of this overloaded AArch64 intrinsic
3027 SmallVector<llvm::Type *, 3> Tys;
3029 VTy = llvm::VectorType::get(DoubleTy, 1);
3032 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_vcage, Tys);
3033 return EmitNeonCall(F, Ops, "vcage");
3035 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vcage_v, E);
3036 case AArch64::BI__builtin_neon_vcaleq_v:
3037 std::swap(Ops[0], Ops[1]);
3038 case AArch64::BI__builtin_neon_vcageq_v: {
3040 if (VTy->getElementType()->isIntegerTy(64))
3041 F = CGM.getIntrinsic(Intrinsic::aarch64_neon_vacgeq);
3043 F = CGM.getIntrinsic(Intrinsic::arm_neon_vacgeq);
3044 return EmitNeonCall(F, Ops, "vcage");
3046 case AArch64::BI__builtin_neon_vcalt_v:
3047 if (VTy->getVectorNumElements() == 1) {
3048 std::swap(Ops[0], Ops[1]);
3050 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vcalt_v, E);
3052 case AArch64::BI__builtin_neon_vcagt_v:
3053 if (VTy->getVectorNumElements() == 1) {
3054 // Determine the types of this overloaded AArch64 intrinsic
3055 SmallVector<llvm::Type *, 3> Tys;
3057 VTy = llvm::VectorType::get(DoubleTy, 1);
3060 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_vcagt, Tys);
3061 return EmitNeonCall(F, Ops, "vcagt");
3063 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vcagt_v, E);
3064 case AArch64::BI__builtin_neon_vcaltq_v:
3065 std::swap(Ops[0], Ops[1]);
3066 case AArch64::BI__builtin_neon_vcagtq_v: {
3068 if (VTy->getElementType()->isIntegerTy(64))
3069 F = CGM.getIntrinsic(Intrinsic::aarch64_neon_vacgtq);
3071 F = CGM.getIntrinsic(Intrinsic::arm_neon_vacgtq);
3072 return EmitNeonCall(F, Ops, "vcagt");
3074 case AArch64::BI__builtin_neon_vtst_v:
3075 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vtst_v, E);
3076 case AArch64::BI__builtin_neon_vtstq_v:
3077 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vtstq_v, E);
3078 case AArch64::BI__builtin_neon_vhadd_v:
3079 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vhadd_v, E);
3080 case AArch64::BI__builtin_neon_vhaddq_v:
3081 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vhaddq_v, E);
3082 case AArch64::BI__builtin_neon_vhsub_v:
3083 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vhsub_v, E);
3084 case AArch64::BI__builtin_neon_vhsubq_v:
3085 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vhsubq_v, E);
3086 case AArch64::BI__builtin_neon_vrhadd_v:
3087 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vrhadd_v, E);
3088 case AArch64::BI__builtin_neon_vrhaddq_v:
3089 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vrhaddq_v, E);
3090 case AArch64::BI__builtin_neon_vqadd_v:
3091 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vqadd_v, E);
3092 case AArch64::BI__builtin_neon_vqaddq_v:
3093 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vqaddq_v, E);
3094 case AArch64::BI__builtin_neon_vqsub_v:
3095 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vqsub_v, E);
3096 case AArch64::BI__builtin_neon_vqsubq_v:
3097 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vqsubq_v, E);
3098 case AArch64::BI__builtin_neon_vshl_v:
3099 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vshl_v, E);
3100 case AArch64::BI__builtin_neon_vshlq_v:
3101 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vshlq_v, E);
3102 case AArch64::BI__builtin_neon_vqshl_v:
3103 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vqshl_v, E);
3104 case AArch64::BI__builtin_neon_vqshlq_v:
3105 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vqshlq_v, E);
3106 case AArch64::BI__builtin_neon_vrshl_v:
3107 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vrshl_v, E);
3108 case AArch64::BI__builtin_neon_vrshlq_v:
3109 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vrshlq_v, E);
3110 case AArch64::BI__builtin_neon_vqrshl_v:
3111 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vqrshl_v, E);
3112 case AArch64::BI__builtin_neon_vqrshlq_v:
3113 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vqrshlq_v, E);
3114 case AArch64::BI__builtin_neon_vaddhn_v:
3115 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vaddhn_v, E);
3116 case AArch64::BI__builtin_neon_vraddhn_v:
3117 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vraddhn_v, E);
3118 case AArch64::BI__builtin_neon_vsubhn_v:
3119 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vsubhn_v, E);
3120 case AArch64::BI__builtin_neon_vrsubhn_v:
3121 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vrsubhn_v, E);
3122 case AArch64::BI__builtin_neon_vmull_v:
3123 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vmull_v, E);
3124 case AArch64::BI__builtin_neon_vqdmull_v:
3125 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vqdmull_v, E);
3126 case AArch64::BI__builtin_neon_vqdmlal_v:
3127 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vqdmlal_v, E);
3128 case AArch64::BI__builtin_neon_vqdmlsl_v:
3129 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vqdmlsl_v, E);
3130 case AArch64::BI__builtin_neon_vmax_v:
3131 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vmax_v, E);
3132 case AArch64::BI__builtin_neon_vmaxq_v:
3133 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vmaxq_v, E);
3134 case AArch64::BI__builtin_neon_vmin_v:
3135 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vmin_v, E);
3136 case AArch64::BI__builtin_neon_vminq_v:
3137 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vminq_v, E);
3138 case AArch64::BI__builtin_neon_vpmax_v:
3139 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vpmax_v, E);
3140 case AArch64::BI__builtin_neon_vpmin_v:
3141 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vpmin_v, E);
3142 case AArch64::BI__builtin_neon_vpadd_v:
3143 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vpadd_v, E);
3144 case AArch64::BI__builtin_neon_vqdmulh_v:
3145 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vqdmulh_v, E);
3146 case AArch64::BI__builtin_neon_vqdmulhq_v:
3147 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vqdmulhq_v, E);
3148 case AArch64::BI__builtin_neon_vqrdmulh_v:
3149 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vqrdmulh_v, E);
3150 case AArch64::BI__builtin_neon_vqrdmulhq_v:
3151 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vqrdmulhq_v, E);
3153 // Shift by immediate
3154 case AArch64::BI__builtin_neon_vshr_n_v:
3155 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vshr_n_v, E);
3156 case AArch64::BI__builtin_neon_vshrq_n_v:
3157 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vshrq_n_v, E);
3158 case AArch64::BI__builtin_neon_vrshr_n_v:
3159 case AArch64::BI__builtin_neon_vrshrq_n_v:
3160 Int = usgn ? Intrinsic::aarch64_neon_vurshr
3161 : Intrinsic::aarch64_neon_vsrshr;
3162 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrshr_n");
3163 case AArch64::BI__builtin_neon_vsra_n_v:
3164 if (VTy->getElementType()->isIntegerTy(64)) {
3165 Int = usgn ? Intrinsic::aarch64_neon_vsradu_n
3166 : Intrinsic::aarch64_neon_vsrads_n;
3167 return EmitNeonCall(CGM.getIntrinsic(Int), Ops, "vsra_n");
3169 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vsra_n_v, E);
3170 case AArch64::BI__builtin_neon_vsraq_n_v:
3171 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vsraq_n_v, E);
3172 case AArch64::BI__builtin_neon_vrsra_n_v:
3173 if (VTy->getElementType()->isIntegerTy(64)) {
3174 Int = usgn ? Intrinsic::aarch64_neon_vrsradu_n
3175 : Intrinsic::aarch64_neon_vrsrads_n;
3176 return EmitNeonCall(CGM.getIntrinsic(Int), Ops, "vrsra_n");
3179 case AArch64::BI__builtin_neon_vrsraq_n_v: {
3180 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
3181 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
3182 Int = usgn ? Intrinsic::aarch64_neon_vurshr
3183 : Intrinsic::aarch64_neon_vsrshr;
3184 Ops[1] = Builder.CreateCall2(CGM.getIntrinsic(Int, Ty), Ops[1], Ops[2]);
3185 return Builder.CreateAdd(Ops[0], Ops[1], "vrsra_n");
3187 case AArch64::BI__builtin_neon_vshl_n_v:
3188 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vshl_n_v, E);
3189 case AArch64::BI__builtin_neon_vshlq_n_v:
3190 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vshlq_n_v, E);
3191 case AArch64::BI__builtin_neon_vqshl_n_v:
3192 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vqshl_n_v, E);
3193 case AArch64::BI__builtin_neon_vqshlq_n_v:
3194 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vqshlq_n_v, E);
3195 case AArch64::BI__builtin_neon_vqshlu_n_v:
3196 case AArch64::BI__builtin_neon_vqshluq_n_v:
3197 Int = Intrinsic::aarch64_neon_vsqshlu;
3198 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshlu_n");
3199 case AArch64::BI__builtin_neon_vsri_n_v:
3200 case AArch64::BI__builtin_neon_vsriq_n_v:
3201 Int = Intrinsic::aarch64_neon_vsri;
3202 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vsri_n");
3203 case AArch64::BI__builtin_neon_vsli_n_v:
3204 case AArch64::BI__builtin_neon_vsliq_n_v:
3205 Int = Intrinsic::aarch64_neon_vsli;
3206 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vsli_n");
3207 case AArch64::BI__builtin_neon_vshll_n_v: {
3208 llvm::Type *SrcTy = llvm::VectorType::getTruncatedElementVectorType(VTy);
3209 Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy);
3211 Ops[0] = Builder.CreateZExt(Ops[0], VTy);
3213 Ops[0] = Builder.CreateSExt(Ops[0], VTy);
3214 Ops[1] = EmitNeonShiftVector(Ops[1], VTy, false);
3215 return Builder.CreateShl(Ops[0], Ops[1], "vshll_n");
3217 case AArch64::BI__builtin_neon_vshrn_n_v: {
3218 llvm::Type *SrcTy = llvm::VectorType::getExtendedElementVectorType(VTy);
3219 Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy);
3220 Ops[1] = EmitNeonShiftVector(Ops[1], SrcTy, false);
3222 Ops[0] = Builder.CreateLShr(Ops[0], Ops[1]);
3224 Ops[0] = Builder.CreateAShr(Ops[0], Ops[1]);
3225 return Builder.CreateTrunc(Ops[0], Ty, "vshrn_n");
3227 case AArch64::BI__builtin_neon_vqshrun_n_v:
3228 Int = Intrinsic::aarch64_neon_vsqshrun;
3229 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshrun_n");
3230 case AArch64::BI__builtin_neon_vrshrn_n_v:
3231 Int = Intrinsic::aarch64_neon_vrshrn;
3232 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrshrn_n");
3233 case AArch64::BI__builtin_neon_vqrshrun_n_v:
3234 Int = Intrinsic::aarch64_neon_vsqrshrun;
3235 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshrun_n");
3236 case AArch64::BI__builtin_neon_vqshrn_n_v:
3237 Int = usgn ? Intrinsic::aarch64_neon_vuqshrn
3238 : Intrinsic::aarch64_neon_vsqshrn;
3239 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshrn_n");
3240 case AArch64::BI__builtin_neon_vqrshrn_n_v:
3241 Int = usgn ? Intrinsic::aarch64_neon_vuqrshrn
3242 : Intrinsic::aarch64_neon_vsqrshrn;
3243 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshrn_n");
3246 case AArch64::BI__builtin_neon_vmovl_v:
3247 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vmovl_v, E);
3248 case AArch64::BI__builtin_neon_vcvt_n_f32_v:
3249 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vcvt_n_f32_v, E);
3250 case AArch64::BI__builtin_neon_vcvtq_n_f32_v:
3251 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vcvtq_n_f32_v, E);
3252 case AArch64::BI__builtin_neon_vcvt_n_f64_v:
3253 case AArch64::BI__builtin_neon_vcvtq_n_f64_v: {
3254 llvm::Type *FloatTy =
3255 GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float64, false, quad));
3256 llvm::Type *Tys[2] = { FloatTy, Ty };
3257 Int = usgn ? Intrinsic::arm_neon_vcvtfxu2fp
3258 : Intrinsic::arm_neon_vcvtfxs2fp;
3259 Function *F = CGM.getIntrinsic(Int, Tys);
3260 return EmitNeonCall(F, Ops, "vcvt_n");
3262 case AArch64::BI__builtin_neon_vcvt_n_s32_v:
3263 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vcvt_n_s32_v, E);
3264 case AArch64::BI__builtin_neon_vcvtq_n_s32_v:
3265 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vcvtq_n_s32_v, E);
3266 case AArch64::BI__builtin_neon_vcvt_n_u32_v:
3267 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vcvt_n_u32_v, E);
3268 case AArch64::BI__builtin_neon_vcvtq_n_u32_v:
3269 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vcvtq_n_u32_v, E);
3270 case AArch64::BI__builtin_neon_vcvt_n_s64_v:
3271 case AArch64::BI__builtin_neon_vcvt_n_u64_v:
3272 case AArch64::BI__builtin_neon_vcvtq_n_s64_v:
3273 case AArch64::BI__builtin_neon_vcvtq_n_u64_v: {
3274 llvm::Type *FloatTy =
3275 GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float64, false, quad));
3276 llvm::Type *Tys[2] = { Ty, FloatTy };
3277 Int = usgn ? Intrinsic::arm_neon_vcvtfp2fxu
3278 : Intrinsic::arm_neon_vcvtfp2fxs;
3279 Function *F = CGM.getIntrinsic(Int, Tys);
3280 return EmitNeonCall(F, Ops, "vcvt_n");
3284 case AArch64::BI__builtin_neon_vld1_v:
3285 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vld1_v, E);
3286 case AArch64::BI__builtin_neon_vld1q_v:
3287 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vld1q_v, E);
3288 case AArch64::BI__builtin_neon_vld2_v:
3289 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vld2_v, E);
3290 case AArch64::BI__builtin_neon_vld2q_v:
3291 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vld2q_v, E);
3292 case AArch64::BI__builtin_neon_vld3_v:
3293 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vld3_v, E);
3294 case AArch64::BI__builtin_neon_vld3q_v:
3295 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vld3q_v, E);
3296 case AArch64::BI__builtin_neon_vld4_v:
3297 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vld4_v, E);
3298 case AArch64::BI__builtin_neon_vld4q_v:
3299 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vld4q_v, E);
3300 case AArch64::BI__builtin_neon_vst1_v:
3301 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vst1_v, E);
3302 case AArch64::BI__builtin_neon_vst1q_v:
3303 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vst1q_v, E);
3304 case AArch64::BI__builtin_neon_vst2_v:
3305 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vst2_v, E);
3306 case AArch64::BI__builtin_neon_vst2q_v:
3307 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vst2q_v, E);
3308 case AArch64::BI__builtin_neon_vst3_v:
3309 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vst3_v, E);
3310 case AArch64::BI__builtin_neon_vst3q_v:
3311 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vst3q_v, E);
3312 case AArch64::BI__builtin_neon_vst4_v:
3313 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vst4_v, E);
3314 case AArch64::BI__builtin_neon_vst4q_v:
3315 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vst4q_v, E);
3316 case AArch64::BI__builtin_neon_vld1_x2_v:
3317 case AArch64::BI__builtin_neon_vld1q_x2_v:
3318 case AArch64::BI__builtin_neon_vld1_x3_v:
3319 case AArch64::BI__builtin_neon_vld1q_x3_v:
3320 case AArch64::BI__builtin_neon_vld1_x4_v:
3321 case AArch64::BI__builtin_neon_vld1q_x4_v: {
3323 switch (BuiltinID) {
3324 case AArch64::BI__builtin_neon_vld1_x2_v:
3325 case AArch64::BI__builtin_neon_vld1q_x2_v:
3326 Int = Intrinsic::aarch64_neon_vld1x2;
3328 case AArch64::BI__builtin_neon_vld1_x3_v:
3329 case AArch64::BI__builtin_neon_vld1q_x3_v:
3330 Int = Intrinsic::aarch64_neon_vld1x3;
3332 case AArch64::BI__builtin_neon_vld1_x4_v:
3333 case AArch64::BI__builtin_neon_vld1q_x4_v:
3334 Int = Intrinsic::aarch64_neon_vld1x4;
3337 Function *F = CGM.getIntrinsic(Int, Ty);
3338 Ops[1] = Builder.CreateCall2(F, Ops[1], Align, "vld1xN");
3339 Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
3340 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
3341 return Builder.CreateStore(Ops[1], Ops[0]);
3343 case AArch64::BI__builtin_neon_vst1_x2_v:
3344 case AArch64::BI__builtin_neon_vst1q_x2_v:
3345 case AArch64::BI__builtin_neon_vst1_x3_v:
3346 case AArch64::BI__builtin_neon_vst1q_x3_v:
3347 case AArch64::BI__builtin_neon_vst1_x4_v:
3348 case AArch64::BI__builtin_neon_vst1q_x4_v: {
3349 Ops.push_back(Align);
3351 switch (BuiltinID) {
3352 case AArch64::BI__builtin_neon_vst1_x2_v:
3353 case AArch64::BI__builtin_neon_vst1q_x2_v:
3354 Int = Intrinsic::aarch64_neon_vst1x2;
3356 case AArch64::BI__builtin_neon_vst1_x3_v:
3357 case AArch64::BI__builtin_neon_vst1q_x3_v:
3358 Int = Intrinsic::aarch64_neon_vst1x3;
3360 case AArch64::BI__builtin_neon_vst1_x4_v:
3361 case AArch64::BI__builtin_neon_vst1q_x4_v:
3362 Int = Intrinsic::aarch64_neon_vst1x4;
3365 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "");
3367 case AArch64::BI__builtin_neon_vld1_lane_v:
3368 case AArch64::BI__builtin_neon_vld1q_lane_v: {
3369 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
3370 Ty = llvm::PointerType::getUnqual(VTy->getElementType());
3371 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
3372 LoadInst *Ld = Builder.CreateLoad(Ops[0]);
3373 Ld->setAlignment(cast<ConstantInt>(Align)->getZExtValue());
3374 return Builder.CreateInsertElement(Ops[1], Ld, Ops[2], "vld1_lane");
3376 case AArch64::BI__builtin_neon_vld2_lane_v:
3377 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vld2q_lane_v, E);
3378 case AArch64::BI__builtin_neon_vld2q_lane_v:
3379 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vld2q_lane_v, E);
3380 case AArch64::BI__builtin_neon_vld3_lane_v:
3381 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vld3_lane_v, E);
3382 case AArch64::BI__builtin_neon_vld3q_lane_v:
3383 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vld3q_lane_v, E);
3384 case AArch64::BI__builtin_neon_vld4_lane_v:
3385 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vld4_lane_v, E);
3386 case AArch64::BI__builtin_neon_vld4q_lane_v:
3387 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vld4q_lane_v, E);
3388 case AArch64::BI__builtin_neon_vst1_lane_v:
3389 case AArch64::BI__builtin_neon_vst1q_lane_v: {
3390 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
3391 Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2]);
3392 Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
3394 Builder.CreateStore(Ops[1], Builder.CreateBitCast(Ops[0], Ty));
3395 St->setAlignment(cast<ConstantInt>(Align)->getZExtValue());
3398 case AArch64::BI__builtin_neon_vst2_lane_v:
3399 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vst2_lane_v, E);
3400 case AArch64::BI__builtin_neon_vst2q_lane_v:
3401 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vst2q_lane_v, E);
3402 case AArch64::BI__builtin_neon_vst3_lane_v:
3403 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vst3_lane_v, E);
3404 case AArch64::BI__builtin_neon_vst3q_lane_v:
3405 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vst3q_lane_v, E);
3406 case AArch64::BI__builtin_neon_vst4_lane_v:
3407 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vst4_lane_v, E);
3408 case AArch64::BI__builtin_neon_vst4q_lane_v:
3409 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vst4q_lane_v, E);
3410 case AArch64::BI__builtin_neon_vld1_dup_v:
3411 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vld1_dup_v, E);
3412 case AArch64::BI__builtin_neon_vld1q_dup_v:
3413 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vld1q_dup_v, E);
3414 case AArch64::BI__builtin_neon_vld2_dup_v:
3415 case AArch64::BI__builtin_neon_vld2q_dup_v:
3416 case AArch64::BI__builtin_neon_vld3_dup_v:
3417 case AArch64::BI__builtin_neon_vld3q_dup_v:
3418 case AArch64::BI__builtin_neon_vld4_dup_v:
3419 case AArch64::BI__builtin_neon_vld4q_dup_v: {
3420 // Handle 64-bit x 1 elements as a special-case. There is no "dup" needed.
3421 if (VTy->getElementType()->getPrimitiveSizeInBits() == 64 &&
3422 VTy->getNumElements() == 1) {
3423 switch (BuiltinID) {
3424 case AArch64::BI__builtin_neon_vld2_dup_v:
3425 Int = Intrinsic::arm_neon_vld2;
3427 case AArch64::BI__builtin_neon_vld3_dup_v:
3428 Int = Intrinsic::arm_neon_vld3;
3430 case AArch64::BI__builtin_neon_vld4_dup_v:
3431 Int = Intrinsic::arm_neon_vld4;
3434 llvm_unreachable("unknown vld_dup intrinsic?");
3436 Function *F = CGM.getIntrinsic(Int, Ty);
3437 Ops[1] = Builder.CreateCall2(F, Ops[1], Align, "vld_dup");
3438 Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
3439 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
3440 return Builder.CreateStore(Ops[1], Ops[0]);
3442 switch (BuiltinID) {
3443 case AArch64::BI__builtin_neon_vld2_dup_v:
3444 case AArch64::BI__builtin_neon_vld2q_dup_v:
3445 Int = Intrinsic::arm_neon_vld2lane;
3447 case AArch64::BI__builtin_neon_vld3_dup_v:
3448 case AArch64::BI__builtin_neon_vld3q_dup_v:
3449 Int = Intrinsic::arm_neon_vld3lane;
3451 case AArch64::BI__builtin_neon_vld4_dup_v:
3452 case AArch64::BI__builtin_neon_vld4q_dup_v:
3453 Int = Intrinsic::arm_neon_vld4lane;
3456 Function *F = CGM.getIntrinsic(Int, Ty);
3457 llvm::StructType *STy = cast<llvm::StructType>(F->getReturnType());
3459 SmallVector<Value *, 6> Args;
3460 Args.push_back(Ops[1]);
3461 Args.append(STy->getNumElements(), UndefValue::get(Ty));
3463 llvm::Constant *CI = ConstantInt::get(Int32Ty, 0);
3465 Args.push_back(Align);
3467 Ops[1] = Builder.CreateCall(F, Args, "vld_dup");
3468 // splat lane 0 to all elts in each vector of the result.
3469 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
3470 Value *Val = Builder.CreateExtractValue(Ops[1], i);
3471 Value *Elt = Builder.CreateBitCast(Val, Ty);
3472 Elt = EmitNeonSplat(Elt, CI);
3473 Elt = Builder.CreateBitCast(Elt, Val->getType());
3474 Ops[1] = Builder.CreateInsertValue(Ops[1], Elt, i);
3476 Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
3477 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
3478 return Builder.CreateStore(Ops[1], Ops[0]);
3482 case AArch64::BI__builtin_neon_vaeseq_v:
3483 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_aese, Ty),
3485 case AArch64::BI__builtin_neon_vaesdq_v:
3486 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_aesd, Ty),
3488 case AArch64::BI__builtin_neon_vaesmcq_v:
3489 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_aesmc, Ty),
3491 case AArch64::BI__builtin_neon_vaesimcq_v:
3492 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_aesimc, Ty),
3494 case AArch64::BI__builtin_neon_vsha1su1q_v:
3495 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha1su1, Ty),
3497 case AArch64::BI__builtin_neon_vsha256su0q_v:
3498 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha256su0, Ty),
3500 case AArch64::BI__builtin_neon_vsha1su0q_v:
3501 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha1su0, Ty),
3503 case AArch64::BI__builtin_neon_vsha256hq_v:
3504 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha256h, Ty),
3506 case AArch64::BI__builtin_neon_vsha256h2q_v:
3507 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha256h2, Ty),
3509 case AArch64::BI__builtin_neon_vsha256su1q_v:
3510 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha256su1, Ty),
3512 case AArch64::BI__builtin_neon_vmul_lane_v:
3513 case AArch64::BI__builtin_neon_vmul_laneq_v: {
3514 // v1f64 vmul_lane should be mapped to Neon scalar mul lane
3516 if (BuiltinID == AArch64::BI__builtin_neon_vmul_laneq_v)
3518 Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy);
3519 llvm::Type *VTy = GetNeonType(this,
3520 NeonTypeFlags(NeonTypeFlags::Float64, false, Quad));
3521 Ops[1] = Builder.CreateBitCast(Ops[1], VTy);
3522 Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2], "extract");
3523 Value *Result = Builder.CreateFMul(Ops[0], Ops[1]);
3524 return Builder.CreateBitCast(Result, Ty);
3527 // AArch64-only builtins
3528 case AArch64::BI__builtin_neon_vfmaq_laneq_v: {
3529 Value *F = CGM.getIntrinsic(Intrinsic::fma, Ty);
3530 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
3531 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
3533 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
3534 Ops[2] = EmitNeonSplat(Ops[2], cast<ConstantInt>(Ops[3]));
3535 return Builder.CreateCall3(F, Ops[2], Ops[1], Ops[0]);
3537 case AArch64::BI__builtin_neon_vfmaq_lane_v: {
3538 Value *F = CGM.getIntrinsic(Intrinsic::fma, Ty);
3539 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
3540 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
3542 llvm::VectorType *VTy = cast<llvm::VectorType>(Ty);
3543 llvm::Type *STy = llvm::VectorType::get(VTy->getElementType(),
3544 VTy->getNumElements() / 2);
3545 Ops[2] = Builder.CreateBitCast(Ops[2], STy);
3546 Value* SV = llvm::ConstantVector::getSplat(VTy->getNumElements(),
3547 cast<ConstantInt>(Ops[3]));
3548 Ops[2] = Builder.CreateShuffleVector(Ops[2], Ops[2], SV, "lane");
3550 return Builder.CreateCall3(F, Ops[2], Ops[1], Ops[0]);
3552 case AArch64::BI__builtin_neon_vfma_lane_v: {
3553 llvm::VectorType *VTy = cast<llvm::VectorType>(Ty);
3554 // v1f64 fma should be mapped to Neon scalar f64 fma
3555 if (VTy && VTy->getElementType() == DoubleTy) {
3556 Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy);
3557 Ops[1] = Builder.CreateBitCast(Ops[1], DoubleTy);
3558 llvm::Type *VTy = GetNeonType(this,
3559 NeonTypeFlags(NeonTypeFlags::Float64, false, false));
3560 Ops[2] = Builder.CreateBitCast(Ops[2], VTy);
3561 Ops[2] = Builder.CreateExtractElement(Ops[2], Ops[3], "extract");
3562 Value *F = CGM.getIntrinsic(Intrinsic::fma, DoubleTy);
3563 Value *Result = Builder.CreateCall3(F, Ops[1], Ops[2], Ops[0]);
3564 return Builder.CreateBitCast(Result, Ty);
3566 Value *F = CGM.getIntrinsic(Intrinsic::fma, Ty);
3567 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
3568 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
3570 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
3571 Ops[2] = EmitNeonSplat(Ops[2], cast<ConstantInt>(Ops[3]));
3572 return Builder.CreateCall3(F, Ops[2], Ops[1], Ops[0]);
3574 case AArch64::BI__builtin_neon_vfma_laneq_v: {
3575 llvm::VectorType *VTy = cast<llvm::VectorType>(Ty);
3576 // v1f64 fma should be mapped to Neon scalar f64 fma
3577 if (VTy && VTy->getElementType() == DoubleTy) {
3578 Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy);
3579 Ops[1] = Builder.CreateBitCast(Ops[1], DoubleTy);
3580 llvm::Type *VTy = GetNeonType(this,
3581 NeonTypeFlags(NeonTypeFlags::Float64, false, true));
3582 Ops[2] = Builder.CreateBitCast(Ops[2], VTy);
3583 Ops[2] = Builder.CreateExtractElement(Ops[2], Ops[3], "extract");
3584 Value *F = CGM.getIntrinsic(Intrinsic::fma, DoubleTy);
3585 Value *Result = Builder.CreateCall3(F, Ops[1], Ops[2], Ops[0]);
3586 return Builder.CreateBitCast(Result, Ty);
3588 Value *F = CGM.getIntrinsic(Intrinsic::fma, Ty);
3589 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
3590 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
3592 llvm::Type *STy = llvm::VectorType::get(VTy->getElementType(),
3593 VTy->getNumElements() * 2);
3594 Ops[2] = Builder.CreateBitCast(Ops[2], STy);
3595 Value* SV = llvm::ConstantVector::getSplat(VTy->getNumElements(),
3596 cast<ConstantInt>(Ops[3]));
3597 Ops[2] = Builder.CreateShuffleVector(Ops[2], Ops[2], SV, "lane");
3599 return Builder.CreateCall3(F, Ops[2], Ops[1], Ops[0]);
3601 case AArch64::BI__builtin_neon_vfms_v:
3602 case AArch64::BI__builtin_neon_vfmsq_v: {
3603 Value *F = CGM.getIntrinsic(Intrinsic::fma, Ty);
3604 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
3605 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
3606 Ops[1] = Builder.CreateFNeg(Ops[1]);
3607 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
3609 // LLVM's fma intrinsic puts the accumulator in the last position, but the
3610 // AArch64 intrinsic has it first.
3611 return Builder.CreateCall3(F, Ops[1], Ops[2], Ops[0]);
3613 case AArch64::BI__builtin_neon_vmaxnm_v:
3614 case AArch64::BI__builtin_neon_vmaxnmq_v: {
3615 Int = Intrinsic::aarch64_neon_vmaxnm;
3616 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmaxnm");
3618 case AArch64::BI__builtin_neon_vminnm_v:
3619 case AArch64::BI__builtin_neon_vminnmq_v: {
3620 Int = Intrinsic::aarch64_neon_vminnm;
3621 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vminnm");
3623 case AArch64::BI__builtin_neon_vpmaxnm_v:
3624 case AArch64::BI__builtin_neon_vpmaxnmq_v: {
3625 Int = Intrinsic::aarch64_neon_vpmaxnm;
3626 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpmaxnm");
3628 case AArch64::BI__builtin_neon_vpminnm_v:
3629 case AArch64::BI__builtin_neon_vpminnmq_v: {
3630 Int = Intrinsic::aarch64_neon_vpminnm;
3631 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpminnm");
3633 case AArch64::BI__builtin_neon_vpmaxq_v: {
3634 Int = usgn ? Intrinsic::arm_neon_vpmaxu : Intrinsic::arm_neon_vpmaxs;
3635 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpmax");
3637 case AArch64::BI__builtin_neon_vpminq_v: {
3638 Int = usgn ? Intrinsic::arm_neon_vpminu : Intrinsic::arm_neon_vpmins;
3639 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpmin");
3641 case AArch64::BI__builtin_neon_vpaddq_v: {
3642 Int = Intrinsic::arm_neon_vpadd;
3643 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpadd");
3645 case AArch64::BI__builtin_neon_vmulx_v:
3646 case AArch64::BI__builtin_neon_vmulxq_v: {
3647 Int = Intrinsic::aarch64_neon_vmulx;
3648 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmulx");
3650 case AArch64::BI__builtin_neon_vpaddl_v:
3651 case AArch64::BI__builtin_neon_vpaddlq_v:
3652 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vpaddl_v, E);
3653 case AArch64::BI__builtin_neon_vpadal_v:
3654 case AArch64::BI__builtin_neon_vpadalq_v:
3655 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vpadal_v, E);
3656 case AArch64::BI__builtin_neon_vqabs_v:
3657 case AArch64::BI__builtin_neon_vqabsq_v:
3658 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vqabs_v, E);
3659 case AArch64::BI__builtin_neon_vqneg_v:
3660 case AArch64::BI__builtin_neon_vqnegq_v:
3661 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vqneg_v, E);
3662 case AArch64::BI__builtin_neon_vabs_v:
3663 case AArch64::BI__builtin_neon_vabsq_v: {
3664 if (VTy->getElementType()->isFloatingPointTy()) {
3665 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::fabs, Ty), Ops, "vabs");
3667 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vabs_v, E);
3669 case AArch64::BI__builtin_neon_vsqadd_v:
3670 case AArch64::BI__builtin_neon_vsqaddq_v: {
3671 Int = Intrinsic::aarch64_neon_usqadd;
3672 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vsqadd");
3674 case AArch64::BI__builtin_neon_vuqadd_v:
3675 case AArch64::BI__builtin_neon_vuqaddq_v: {
3676 Int = Intrinsic::aarch64_neon_suqadd;
3677 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vuqadd");
3679 case AArch64::BI__builtin_neon_vcls_v:
3680 case AArch64::BI__builtin_neon_vclsq_v:
3681 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vcls_v, E);
3682 case AArch64::BI__builtin_neon_vclz_v:
3683 case AArch64::BI__builtin_neon_vclzq_v:
3684 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vclz_v, E);
3685 case AArch64::BI__builtin_neon_vcnt_v:
3686 case AArch64::BI__builtin_neon_vcntq_v:
3687 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vcnt_v, E);
3688 case AArch64::BI__builtin_neon_vrbit_v:
3689 case AArch64::BI__builtin_neon_vrbitq_v:
3690 Int = Intrinsic::aarch64_neon_rbit;
3691 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrbit");
3692 case AArch64::BI__builtin_neon_vmovn_v:
3693 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vmovn_v, E);
3694 case AArch64::BI__builtin_neon_vqmovun_v:
3695 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vqmovun_v, E);
3696 case AArch64::BI__builtin_neon_vqmovn_v:
3697 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vqmovn_v, E);
3698 case AArch64::BI__builtin_neon_vcvt_f16_v:
3699 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vcvt_f16_v, E);
3700 case AArch64::BI__builtin_neon_vcvt_f32_f16:
3701 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vcvt_f32_f16, E);
3702 case AArch64::BI__builtin_neon_vcvt_f32_f64: {
3703 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
3704 Ty = GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float32, false, false));
3705 return Builder.CreateFPTrunc(Ops[0], Ty, "vcvt");
3707 case AArch64::BI__builtin_neon_vcvtx_f32_v: {
3708 llvm::Type *EltTy = FloatTy;
3709 llvm::Type *ResTy = llvm::VectorType::get(EltTy, 2);
3710 llvm::Type *Tys[2] = { ResTy, Ty };
3711 Int = Intrinsic::aarch64_neon_fcvtxn;
3712 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtx_f32_f64");
3714 case AArch64::BI__builtin_neon_vcvt_f64_f32: {
3716 GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float32, false, false));
3717 Ops[0] = Builder.CreateBitCast(Ops[0], OpTy);
3718 return Builder.CreateFPExt(Ops[0], Ty, "vcvt");
3720 case AArch64::BI__builtin_neon_vcvt_f64_v:
3721 case AArch64::BI__builtin_neon_vcvtq_f64_v: {
3722 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
3723 Ty = GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float64, false, quad));
3724 return usgn ? Builder.CreateUIToFP(Ops[0], Ty, "vcvt")
3725 : Builder.CreateSIToFP(Ops[0], Ty, "vcvt");
3727 case AArch64::BI__builtin_neon_vrndn_v:
3728 case AArch64::BI__builtin_neon_vrndnq_v: {
3729 Int = Intrinsic::aarch64_neon_frintn;
3730 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndn");
3732 case AArch64::BI__builtin_neon_vrnda_v:
3733 case AArch64::BI__builtin_neon_vrndaq_v: {
3734 Int = Intrinsic::round;
3735 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrnda");
3737 case AArch64::BI__builtin_neon_vrndp_v:
3738 case AArch64::BI__builtin_neon_vrndpq_v: {
3739 Int = Intrinsic::ceil;
3740 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndp");
3742 case AArch64::BI__builtin_neon_vrndm_v:
3743 case AArch64::BI__builtin_neon_vrndmq_v: {
3744 Int = Intrinsic::floor;
3745 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndm");
3747 case AArch64::BI__builtin_neon_vrndx_v:
3748 case AArch64::BI__builtin_neon_vrndxq_v: {
3749 Int = Intrinsic::rint;
3750 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndx");
3752 case AArch64::BI__builtin_neon_vrnd_v:
3753 case AArch64::BI__builtin_neon_vrndq_v: {
3754 Int = Intrinsic::trunc;
3755 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrnd");
3757 case AArch64::BI__builtin_neon_vrndi_v:
3758 case AArch64::BI__builtin_neon_vrndiq_v: {
3759 Int = Intrinsic::nearbyint;
3760 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndi");
3762 case AArch64::BI__builtin_neon_vcvt_s32_v:
3763 case AArch64::BI__builtin_neon_vcvt_u32_v:
3764 case AArch64::BI__builtin_neon_vcvtq_s32_v:
3765 case AArch64::BI__builtin_neon_vcvtq_u32_v:
3766 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vcvtq_u32_v, E);
3767 case AArch64::BI__builtin_neon_vcvt_s64_v:
3768 case AArch64::BI__builtin_neon_vcvt_u64_v:
3769 case AArch64::BI__builtin_neon_vcvtq_s64_v:
3770 case AArch64::BI__builtin_neon_vcvtq_u64_v: {
3771 llvm::Type *DoubleTy =
3772 GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float64, false, quad));
3773 Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy);
3774 return usgn ? Builder.CreateFPToUI(Ops[0], Ty, "vcvt")
3775 : Builder.CreateFPToSI(Ops[0], Ty, "vcvt");
3777 case AArch64::BI__builtin_neon_vcvtn_s32_v:
3778 case AArch64::BI__builtin_neon_vcvtnq_s32_v: {
3779 llvm::Type *OpTy = llvm::VectorType::get(FloatTy, VTy->getNumElements());
3780 llvm::Type *Tys[2] = { Ty, OpTy };
3781 Int = Intrinsic::aarch64_neon_fcvtns;
3782 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtns_f32");
3784 case AArch64::BI__builtin_neon_vcvtn_s64_v:
3785 case AArch64::BI__builtin_neon_vcvtnq_s64_v: {
3786 llvm::Type *OpTy = llvm::VectorType::get(DoubleTy, VTy->getNumElements());
3787 llvm::Type *Tys[2] = { Ty, OpTy };
3788 Int = Intrinsic::aarch64_neon_fcvtns;
3789 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtns_f64");
3791 case AArch64::BI__builtin_neon_vcvtn_u32_v:
3792 case AArch64::BI__builtin_neon_vcvtnq_u32_v: {
3793 llvm::Type *OpTy = llvm::VectorType::get(FloatTy, VTy->getNumElements());
3794 llvm::Type *Tys[2] = { Ty, OpTy };
3795 Int = Intrinsic::aarch64_neon_fcvtnu;
3796 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtnu_f32");
3798 case AArch64::BI__builtin_neon_vcvtn_u64_v:
3799 case AArch64::BI__builtin_neon_vcvtnq_u64_v: {
3800 llvm::Type *OpTy = llvm::VectorType::get(DoubleTy, VTy->getNumElements());
3801 llvm::Type *Tys[2] = { Ty, OpTy };
3802 Int = Intrinsic::aarch64_neon_fcvtnu;
3803 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtnu_f64");
3805 case AArch64::BI__builtin_neon_vcvtp_s32_v:
3806 case AArch64::BI__builtin_neon_vcvtpq_s32_v: {
3807 llvm::Type *OpTy = llvm::VectorType::get(FloatTy, VTy->getNumElements());
3808 llvm::Type *Tys[2] = { Ty, OpTy };
3809 Int = Intrinsic::aarch64_neon_fcvtps;
3810 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtps_f32");
3812 case AArch64::BI__builtin_neon_vcvtp_s64_v:
3813 case AArch64::BI__builtin_neon_vcvtpq_s64_v: {
3814 llvm::Type *OpTy = llvm::VectorType::get(DoubleTy, VTy->getNumElements());
3815 llvm::Type *Tys[2] = { Ty, OpTy };
3816 Int = Intrinsic::aarch64_neon_fcvtps;
3817 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtps_f64");
3819 case AArch64::BI__builtin_neon_vcvtp_u32_v:
3820 case AArch64::BI__builtin_neon_vcvtpq_u32_v: {
3821 llvm::Type *OpTy = llvm::VectorType::get(FloatTy, VTy->getNumElements());
3822 llvm::Type *Tys[2] = { Ty, OpTy };
3823 Int = Intrinsic::aarch64_neon_fcvtpu;
3824 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtpu_f32");
3826 case AArch64::BI__builtin_neon_vcvtp_u64_v:
3827 case AArch64::BI__builtin_neon_vcvtpq_u64_v: {
3828 llvm::Type *OpTy = llvm::VectorType::get(DoubleTy, VTy->getNumElements());
3829 llvm::Type *Tys[2] = { Ty, OpTy };
3830 Int = Intrinsic::aarch64_neon_fcvtpu;
3831 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtpu_f64");
3833 case AArch64::BI__builtin_neon_vcvtm_s32_v:
3834 case AArch64::BI__builtin_neon_vcvtmq_s32_v: {
3835 llvm::Type *OpTy = llvm::VectorType::get(FloatTy, VTy->getNumElements());
3836 llvm::Type *Tys[2] = { Ty, OpTy };
3837 Int = Intrinsic::aarch64_neon_fcvtms;
3838 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtms_f32");
3840 case AArch64::BI__builtin_neon_vcvtm_s64_v:
3841 case AArch64::BI__builtin_neon_vcvtmq_s64_v: {
3842 llvm::Type *OpTy = llvm::VectorType::get(DoubleTy, VTy->getNumElements());
3843 llvm::Type *Tys[2] = { Ty, OpTy };
3844 Int = Intrinsic::aarch64_neon_fcvtms;
3845 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtms_f64");
3847 case AArch64::BI__builtin_neon_vcvtm_u32_v:
3848 case AArch64::BI__builtin_neon_vcvtmq_u32_v: {
3849 llvm::Type *OpTy = llvm::VectorType::get(FloatTy, VTy->getNumElements());
3850 llvm::Type *Tys[2] = { Ty, OpTy };
3851 Int = Intrinsic::aarch64_neon_fcvtmu;
3852 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtmu_f32");
3854 case AArch64::BI__builtin_neon_vcvtm_u64_v:
3855 case AArch64::BI__builtin_neon_vcvtmq_u64_v: {
3856 llvm::Type *OpTy = llvm::VectorType::get(DoubleTy, VTy->getNumElements());
3857 llvm::Type *Tys[2] = { Ty, OpTy };
3858 Int = Intrinsic::aarch64_neon_fcvtmu;
3859 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtmu_f64");
3861 case AArch64::BI__builtin_neon_vcvta_s32_v:
3862 case AArch64::BI__builtin_neon_vcvtaq_s32_v: {
3863 llvm::Type *OpTy = llvm::VectorType::get(FloatTy, VTy->getNumElements());
3864 llvm::Type *Tys[2] = { Ty, OpTy };
3865 Int = Intrinsic::aarch64_neon_fcvtas;
3866 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtas_f32");
3868 case AArch64::BI__builtin_neon_vcvta_s64_v:
3869 case AArch64::BI__builtin_neon_vcvtaq_s64_v: {
3870 llvm::Type *OpTy = llvm::VectorType::get(DoubleTy, VTy->getNumElements());
3871 llvm::Type *Tys[2] = { Ty, OpTy };
3872 Int = Intrinsic::aarch64_neon_fcvtas;
3873 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtas_f64");
3875 case AArch64::BI__builtin_neon_vcvta_u32_v:
3876 case AArch64::BI__builtin_neon_vcvtaq_u32_v: {
3877 llvm::Type *OpTy = llvm::VectorType::get(FloatTy, VTy->getNumElements());
3878 llvm::Type *Tys[2] = { Ty, OpTy };
3879 Int = Intrinsic::aarch64_neon_fcvtau;
3880 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtau_f32");
3882 case AArch64::BI__builtin_neon_vcvta_u64_v:
3883 case AArch64::BI__builtin_neon_vcvtaq_u64_v: {
3884 llvm::Type *OpTy = llvm::VectorType::get(DoubleTy, VTy->getNumElements());
3885 llvm::Type *Tys[2] = { Ty, OpTy };
3886 Int = Intrinsic::aarch64_neon_fcvtau;
3887 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtau_f64");
3889 case AArch64::BI__builtin_neon_vrecpe_v:
3890 case AArch64::BI__builtin_neon_vrecpeq_v:
3891 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vrecpe_v, E);
3892 case AArch64::BI__builtin_neon_vrsqrte_v:
3893 case AArch64::BI__builtin_neon_vrsqrteq_v:
3894 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vrsqrte_v, E);
3895 case AArch64::BI__builtin_neon_vsqrt_v:
3896 case AArch64::BI__builtin_neon_vsqrtq_v: {
3897 Int = Intrinsic::sqrt;
3898 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vsqrt");
3900 case AArch64::BI__builtin_neon_vcvt_f32_v:
3901 case AArch64::BI__builtin_neon_vcvtq_f32_v:
3902 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vcvt_f32_v, E);
3903 case AArch64::BI__builtin_neon_vceqz_v:
3904 case AArch64::BI__builtin_neon_vceqzq_v:
3905 return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OEQ,
3906 ICmpInst::ICMP_EQ, "vceqz");
3907 case AArch64::BI__builtin_neon_vcgez_v:
3908 case AArch64::BI__builtin_neon_vcgezq_v:
3909 return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OGE,
3910 ICmpInst::ICMP_SGE, "vcgez");
3911 case AArch64::BI__builtin_neon_vclez_v:
3912 case AArch64::BI__builtin_neon_vclezq_v:
3913 return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OLE,
3914 ICmpInst::ICMP_SLE, "vclez");
3915 case AArch64::BI__builtin_neon_vcgtz_v:
3916 case AArch64::BI__builtin_neon_vcgtzq_v:
3917 return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OGT,
3918 ICmpInst::ICMP_SGT, "vcgtz");
3919 case AArch64::BI__builtin_neon_vcltz_v:
3920 case AArch64::BI__builtin_neon_vcltzq_v:
3921 return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OLT,
3922 ICmpInst::ICMP_SLT, "vcltz");
3926 Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
3927 const CallExpr *E) {
3928 if (BuiltinID == ARM::BI__clear_cache) {
3929 assert(E->getNumArgs() == 2 && "__clear_cache takes 2 arguments");
3930 const FunctionDecl *FD = E->getDirectCallee();
3931 SmallVector<Value*, 2> Ops;
3932 for (unsigned i = 0; i < 2; i++)
3933 Ops.push_back(EmitScalarExpr(E->getArg(i)));
3934 llvm::Type *Ty = CGM.getTypes().ConvertType(FD->getType());
3935 llvm::FunctionType *FTy = cast<llvm::FunctionType>(Ty);
3936 StringRef Name = FD->getName();
3937 return EmitNounwindRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), Ops);
3940 if (BuiltinID == ARM::BI__builtin_arm_ldrexd ||
3941 (BuiltinID == ARM::BI__builtin_arm_ldrex &&
3942 getContext().getTypeSize(E->getType()) == 64)) {
3943 Function *F = CGM.getIntrinsic(Intrinsic::arm_ldrexd);
3945 Value *LdPtr = EmitScalarExpr(E->getArg(0));
3946 Value *Val = Builder.CreateCall(F, Builder.CreateBitCast(LdPtr, Int8PtrTy),
3949 Value *Val0 = Builder.CreateExtractValue(Val, 1);
3950 Value *Val1 = Builder.CreateExtractValue(Val, 0);
3951 Val0 = Builder.CreateZExt(Val0, Int64Ty);
3952 Val1 = Builder.CreateZExt(Val1, Int64Ty);
3954 Value *ShiftCst = llvm::ConstantInt::get(Int64Ty, 32);
3955 Val = Builder.CreateShl(Val0, ShiftCst, "shl", true /* nuw */);
3956 Val = Builder.CreateOr(Val, Val1);
3957 return Builder.CreateBitCast(Val, ConvertType(E->getType()));
3960 if (BuiltinID == ARM::BI__builtin_arm_ldrex) {
3961 Value *LoadAddr = EmitScalarExpr(E->getArg(0));
3963 QualType Ty = E->getType();
3964 llvm::Type *RealResTy = ConvertType(Ty);
3965 llvm::Type *IntResTy = llvm::IntegerType::get(getLLVMContext(),
3966 getContext().getTypeSize(Ty));
3967 LoadAddr = Builder.CreateBitCast(LoadAddr, IntResTy->getPointerTo());
3969 Function *F = CGM.getIntrinsic(Intrinsic::arm_ldrex, LoadAddr->getType());
3970 Value *Val = Builder.CreateCall(F, LoadAddr, "ldrex");
3972 if (RealResTy->isPointerTy())
3973 return Builder.CreateIntToPtr(Val, RealResTy);
3975 Val = Builder.CreateTruncOrBitCast(Val, IntResTy);
3976 return Builder.CreateBitCast(Val, RealResTy);
3980 if (BuiltinID == ARM::BI__builtin_arm_strexd ||
3981 (BuiltinID == ARM::BI__builtin_arm_strex &&
3982 getContext().getTypeSize(E->getArg(0)->getType()) == 64)) {
3983 Function *F = CGM.getIntrinsic(Intrinsic::arm_strexd);
3984 llvm::Type *STy = llvm::StructType::get(Int32Ty, Int32Ty, NULL);
3986 Value *Tmp = CreateMemTemp(E->getArg(0)->getType());
3987 Value *Val = EmitScalarExpr(E->getArg(0));
3988 Builder.CreateStore(Val, Tmp);
3990 Value *LdPtr = Builder.CreateBitCast(Tmp,llvm::PointerType::getUnqual(STy));
3991 Val = Builder.CreateLoad(LdPtr);
3993 Value *Arg0 = Builder.CreateExtractValue(Val, 0);
3994 Value *Arg1 = Builder.CreateExtractValue(Val, 1);
3995 Value *StPtr = Builder.CreateBitCast(EmitScalarExpr(E->getArg(1)), Int8PtrTy);
3996 return Builder.CreateCall3(F, Arg0, Arg1, StPtr, "strexd");
3999 if (BuiltinID == ARM::BI__builtin_arm_strex) {
4000 Value *StoreVal = EmitScalarExpr(E->getArg(0));
4001 Value *StoreAddr = EmitScalarExpr(E->getArg(1));
4003 QualType Ty = E->getArg(0)->getType();
4004 llvm::Type *StoreTy = llvm::IntegerType::get(getLLVMContext(),
4005 getContext().getTypeSize(Ty));
4006 StoreAddr = Builder.CreateBitCast(StoreAddr, StoreTy->getPointerTo());
4008 if (StoreVal->getType()->isPointerTy())
4009 StoreVal = Builder.CreatePtrToInt(StoreVal, Int32Ty);
4011 StoreVal = Builder.CreateBitCast(StoreVal, StoreTy);
4012 StoreVal = Builder.CreateZExtOrBitCast(StoreVal, Int32Ty);
4015 Function *F = CGM.getIntrinsic(Intrinsic::arm_strex, StoreAddr->getType());
4016 return Builder.CreateCall2(F, StoreVal, StoreAddr, "strex");
4019 if (BuiltinID == ARM::BI__builtin_arm_clrex) {
4020 Function *F = CGM.getIntrinsic(Intrinsic::arm_clrex);
4021 return Builder.CreateCall(F);
4024 if (BuiltinID == ARM::BI__builtin_arm_sevl) {
4025 Function *F = CGM.getIntrinsic(Intrinsic::arm_sevl);
4026 return Builder.CreateCall(F);
4030 Intrinsic::ID CRCIntrinsicID = Intrinsic::not_intrinsic;
4031 switch (BuiltinID) {
4032 case ARM::BI__builtin_arm_crc32b:
4033 CRCIntrinsicID = Intrinsic::arm_crc32b; break;
4034 case ARM::BI__builtin_arm_crc32cb:
4035 CRCIntrinsicID = Intrinsic::arm_crc32cb; break;
4036 case ARM::BI__builtin_arm_crc32h:
4037 CRCIntrinsicID = Intrinsic::arm_crc32h; break;
4038 case ARM::BI__builtin_arm_crc32ch:
4039 CRCIntrinsicID = Intrinsic::arm_crc32ch; break;
4040 case ARM::BI__builtin_arm_crc32w:
4041 case ARM::BI__builtin_arm_crc32d:
4042 CRCIntrinsicID = Intrinsic::arm_crc32w; break;
4043 case ARM::BI__builtin_arm_crc32cw:
4044 case ARM::BI__builtin_arm_crc32cd:
4045 CRCIntrinsicID = Intrinsic::arm_crc32cw; break;
4048 if (CRCIntrinsicID != Intrinsic::not_intrinsic) {
4049 Value *Arg0 = EmitScalarExpr(E->getArg(0));
4050 Value *Arg1 = EmitScalarExpr(E->getArg(1));
4052 // crc32{c,}d intrinsics are implemnted as two calls to crc32{c,}w
4053 // intrinsics, hence we need different codegen for these cases.
4054 if (BuiltinID == ARM::BI__builtin_arm_crc32d ||
4055 BuiltinID == ARM::BI__builtin_arm_crc32cd) {
4056 Value *C1 = llvm::ConstantInt::get(Int64Ty, 32);
4057 Value *Arg1a = Builder.CreateTruncOrBitCast(Arg1, Int32Ty);
4058 Value *Arg1b = Builder.CreateLShr(Arg1, C1);
4059 Arg1b = Builder.CreateTruncOrBitCast(Arg1b, Int32Ty);
4061 Function *F = CGM.getIntrinsic(CRCIntrinsicID);
4062 Value *Res = Builder.CreateCall2(F, Arg0, Arg1a);
4063 return Builder.CreateCall2(F, Res, Arg1b);
4065 Arg1 = Builder.CreateZExtOrBitCast(Arg1, Int32Ty);
4067 Function *F = CGM.getIntrinsic(CRCIntrinsicID);
4068 return Builder.CreateCall2(F, Arg0, Arg1);
4072 SmallVector<Value*, 4> Ops;
4073 llvm::Value *Align = 0;
4074 for (unsigned i = 0, e = E->getNumArgs() - 1; i != e; i++) {
4076 switch (BuiltinID) {
4077 case ARM::BI__builtin_neon_vld1_v:
4078 case ARM::BI__builtin_neon_vld1q_v:
4079 case ARM::BI__builtin_neon_vld1q_lane_v:
4080 case ARM::BI__builtin_neon_vld1_lane_v:
4081 case ARM::BI__builtin_neon_vld1_dup_v:
4082 case ARM::BI__builtin_neon_vld1q_dup_v:
4083 case ARM::BI__builtin_neon_vst1_v:
4084 case ARM::BI__builtin_neon_vst1q_v:
4085 case ARM::BI__builtin_neon_vst1q_lane_v:
4086 case ARM::BI__builtin_neon_vst1_lane_v:
4087 case ARM::BI__builtin_neon_vst2_v:
4088 case ARM::BI__builtin_neon_vst2q_v:
4089 case ARM::BI__builtin_neon_vst2_lane_v:
4090 case ARM::BI__builtin_neon_vst2q_lane_v:
4091 case ARM::BI__builtin_neon_vst3_v:
4092 case ARM::BI__builtin_neon_vst3q_v:
4093 case ARM::BI__builtin_neon_vst3_lane_v:
4094 case ARM::BI__builtin_neon_vst3q_lane_v:
4095 case ARM::BI__builtin_neon_vst4_v:
4096 case ARM::BI__builtin_neon_vst4q_v:
4097 case ARM::BI__builtin_neon_vst4_lane_v:
4098 case ARM::BI__builtin_neon_vst4q_lane_v:
4099 // Get the alignment for the argument in addition to the value;
4100 // we'll use it later.
4101 std::pair<llvm::Value*, unsigned> Src =
4102 EmitPointerWithAlignment(E->getArg(0));
4103 Ops.push_back(Src.first);
4104 Align = Builder.getInt32(Src.second);
4109 switch (BuiltinID) {
4110 case ARM::BI__builtin_neon_vld2_v:
4111 case ARM::BI__builtin_neon_vld2q_v:
4112 case ARM::BI__builtin_neon_vld3_v:
4113 case ARM::BI__builtin_neon_vld3q_v:
4114 case ARM::BI__builtin_neon_vld4_v:
4115 case ARM::BI__builtin_neon_vld4q_v:
4116 case ARM::BI__builtin_neon_vld2_lane_v:
4117 case ARM::BI__builtin_neon_vld2q_lane_v:
4118 case ARM::BI__builtin_neon_vld3_lane_v:
4119 case ARM::BI__builtin_neon_vld3q_lane_v:
4120 case ARM::BI__builtin_neon_vld4_lane_v:
4121 case ARM::BI__builtin_neon_vld4q_lane_v:
4122 case ARM::BI__builtin_neon_vld2_dup_v:
4123 case ARM::BI__builtin_neon_vld3_dup_v:
4124 case ARM::BI__builtin_neon_vld4_dup_v:
4125 // Get the alignment for the argument in addition to the value;
4126 // we'll use it later.
4127 std::pair<llvm::Value*, unsigned> Src =
4128 EmitPointerWithAlignment(E->getArg(1));
4129 Ops.push_back(Src.first);
4130 Align = Builder.getInt32(Src.second);
4134 Ops.push_back(EmitScalarExpr(E->getArg(i)));
4137 // vget_lane and vset_lane are not overloaded and do not have an extra
4138 // argument that specifies the vector type.
4139 switch (BuiltinID) {
4141 case ARM::BI__builtin_neon_vget_lane_i8:
4142 case ARM::BI__builtin_neon_vget_lane_i16:
4143 case ARM::BI__builtin_neon_vget_lane_i32:
4144 case ARM::BI__builtin_neon_vget_lane_i64:
4145 case ARM::BI__builtin_neon_vget_lane_f32:
4146 case ARM::BI__builtin_neon_vgetq_lane_i8:
4147 case ARM::BI__builtin_neon_vgetq_lane_i16:
4148 case ARM::BI__builtin_neon_vgetq_lane_i32:
4149 case ARM::BI__builtin_neon_vgetq_lane_i64:
4150 case ARM::BI__builtin_neon_vgetq_lane_f32:
4151 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
4153 case ARM::BI__builtin_neon_vset_lane_i8:
4154 case ARM::BI__builtin_neon_vset_lane_i16:
4155 case ARM::BI__builtin_neon_vset_lane_i32:
4156 case ARM::BI__builtin_neon_vset_lane_i64:
4157 case ARM::BI__builtin_neon_vset_lane_f32:
4158 case ARM::BI__builtin_neon_vsetq_lane_i8:
4159 case ARM::BI__builtin_neon_vsetq_lane_i16:
4160 case ARM::BI__builtin_neon_vsetq_lane_i32:
4161 case ARM::BI__builtin_neon_vsetq_lane_i64:
4162 case ARM::BI__builtin_neon_vsetq_lane_f32:
4163 Ops.push_back(EmitScalarExpr(E->getArg(2)));
4164 return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vset_lane");
4167 // Get the last argument, which specifies the vector type.
4168 llvm::APSInt Result;
4169 const Expr *Arg = E->getArg(E->getNumArgs()-1);
4170 if (!Arg->isIntegerConstantExpr(Result, getContext()))
4173 if (BuiltinID == ARM::BI__builtin_arm_vcvtr_f ||
4174 BuiltinID == ARM::BI__builtin_arm_vcvtr_d) {
4175 // Determine the overloaded type of this builtin.
4177 if (BuiltinID == ARM::BI__builtin_arm_vcvtr_f)
4182 // Determine whether this is an unsigned conversion or not.
4183 bool usgn = Result.getZExtValue() == 1;
4184 unsigned Int = usgn ? Intrinsic::arm_vcvtru : Intrinsic::arm_vcvtr;
4186 // Call the appropriate intrinsic.
4187 Function *F = CGM.getIntrinsic(Int, Ty);
4188 return Builder.CreateCall(F, Ops, "vcvtr");
4191 // Determine the type of this overloaded NEON intrinsic.
4192 NeonTypeFlags Type(Result.getZExtValue());
4193 bool usgn = Type.isUnsigned();
4194 bool quad = Type.isQuad();
4195 bool rightShift = false;
4197 llvm::VectorType *VTy = GetNeonType(this, Type);
4198 llvm::Type *Ty = VTy;
4203 switch (BuiltinID) {
4205 case ARM::BI__builtin_neon_vbsl_v:
4206 case ARM::BI__builtin_neon_vbslq_v:
4207 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vbsl, Ty),
4209 case ARM::BI__builtin_neon_vabd_v:
4210 case ARM::BI__builtin_neon_vabdq_v:
4211 Int = usgn ? Intrinsic::arm_neon_vabdu : Intrinsic::arm_neon_vabds;
4212 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vabd");
4213 case ARM::BI__builtin_neon_vabs_v:
4214 case ARM::BI__builtin_neon_vabsq_v:
4215 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vabs, Ty),
4217 case ARM::BI__builtin_neon_vaddhn_v: {
4218 llvm::VectorType *SrcTy =
4219 llvm::VectorType::getExtendedElementVectorType(VTy);
4221 // %sum = add <4 x i32> %lhs, %rhs
4222 Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy);
4223 Ops[1] = Builder.CreateBitCast(Ops[1], SrcTy);
4224 Ops[0] = Builder.CreateAdd(Ops[0], Ops[1], "vaddhn");
4226 // %high = lshr <4 x i32> %sum, <i32 16, i32 16, i32 16, i32 16>
4227 Constant *ShiftAmt = ConstantInt::get(SrcTy->getElementType(),
4228 SrcTy->getScalarSizeInBits() / 2);
4229 ShiftAmt = ConstantVector::getSplat(VTy->getNumElements(), ShiftAmt);
4230 Ops[0] = Builder.CreateLShr(Ops[0], ShiftAmt, "vaddhn");
4232 // %res = trunc <4 x i32> %high to <4 x i16>
4233 return Builder.CreateTrunc(Ops[0], VTy, "vaddhn");
4235 case ARM::BI__builtin_neon_vcale_v:
4236 std::swap(Ops[0], Ops[1]);
4237 case ARM::BI__builtin_neon_vcage_v: {
4238 Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vacged);
4239 return EmitNeonCall(F, Ops, "vcage");
4241 case ARM::BI__builtin_neon_vcaleq_v:
4242 std::swap(Ops[0], Ops[1]);
4243 case ARM::BI__builtin_neon_vcageq_v: {
4244 Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vacgeq);
4245 return EmitNeonCall(F, Ops, "vcage");
4247 case ARM::BI__builtin_neon_vcalt_v:
4248 std::swap(Ops[0], Ops[1]);
4249 case ARM::BI__builtin_neon_vcagt_v: {
4250 Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vacgtd);
4251 return EmitNeonCall(F, Ops, "vcagt");
4253 case ARM::BI__builtin_neon_vcaltq_v:
4254 std::swap(Ops[0], Ops[1]);
4255 case ARM::BI__builtin_neon_vcagtq_v: {
4256 Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vacgtq);
4257 return EmitNeonCall(F, Ops, "vcagt");
4259 case ARM::BI__builtin_neon_vcls_v:
4260 case ARM::BI__builtin_neon_vclsq_v: {
4261 Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vcls, Ty);
4262 return EmitNeonCall(F, Ops, "vcls");
4264 case ARM::BI__builtin_neon_vclz_v:
4265 case ARM::BI__builtin_neon_vclzq_v: {
4266 // Generate target-independent intrinsic; also need to add second argument
4267 // for whether or not clz of zero is undefined; on ARM it isn't.
4268 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, Ty);
4269 Ops.push_back(Builder.getInt1(getTarget().isCLZForZeroUndef()));
4270 return EmitNeonCall(F, Ops, "vclz");
4272 case ARM::BI__builtin_neon_vcnt_v:
4273 case ARM::BI__builtin_neon_vcntq_v: {
4274 // generate target-independent intrinsic
4275 Function *F = CGM.getIntrinsic(Intrinsic::ctpop, Ty);
4276 return EmitNeonCall(F, Ops, "vctpop");
4278 case ARM::BI__builtin_neon_vcvt_f16_v: {
4279 assert(Type.getEltType() == NeonTypeFlags::Float16 && !quad &&
4280 "unexpected vcvt_f16_v builtin");
4281 Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vcvtfp2hf);
4282 return EmitNeonCall(F, Ops, "vcvt");
4284 case ARM::BI__builtin_neon_vcvt_f32_f16: {
4285 assert(Type.getEltType() == NeonTypeFlags::Float16 && !quad &&
4286 "unexpected vcvt_f32_f16 builtin");
4287 Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vcvthf2fp);
4288 return EmitNeonCall(F, Ops, "vcvt");
4290 case ARM::BI__builtin_neon_vcvt_f32_v:
4291 case ARM::BI__builtin_neon_vcvtq_f32_v:
4292 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
4293 Ty = GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float32, false, quad));
4294 return usgn ? Builder.CreateUIToFP(Ops[0], Ty, "vcvt")
4295 : Builder.CreateSIToFP(Ops[0], Ty, "vcvt");
4296 case ARM::BI__builtin_neon_vcvt_s32_v:
4297 case ARM::BI__builtin_neon_vcvt_u32_v:
4298 case ARM::BI__builtin_neon_vcvtq_s32_v:
4299 case ARM::BI__builtin_neon_vcvtq_u32_v: {
4300 llvm::Type *FloatTy =
4301 GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float32, false, quad));
4302 Ops[0] = Builder.CreateBitCast(Ops[0], FloatTy);
4303 return usgn ? Builder.CreateFPToUI(Ops[0], Ty, "vcvt")
4304 : Builder.CreateFPToSI(Ops[0], Ty, "vcvt");
4306 case ARM::BI__builtin_neon_vcvt_n_f32_v:
4307 case ARM::BI__builtin_neon_vcvtq_n_f32_v: {
4308 llvm::Type *FloatTy =
4309 GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float32, false, quad));
4310 llvm::Type *Tys[2] = { FloatTy, Ty };
4311 Int = usgn ? Intrinsic::arm_neon_vcvtfxu2fp
4312 : Intrinsic::arm_neon_vcvtfxs2fp;
4313 Function *F = CGM.getIntrinsic(Int, Tys);
4314 return EmitNeonCall(F, Ops, "vcvt_n");
4316 case ARM::BI__builtin_neon_vcvt_n_s32_v:
4317 case ARM::BI__builtin_neon_vcvt_n_u32_v:
4318 case ARM::BI__builtin_neon_vcvtq_n_s32_v:
4319 case ARM::BI__builtin_neon_vcvtq_n_u32_v: {
4320 llvm::Type *FloatTy =
4321 GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float32, false, quad));
4322 llvm::Type *Tys[2] = { Ty, FloatTy };
4323 Int = usgn ? Intrinsic::arm_neon_vcvtfp2fxu
4324 : Intrinsic::arm_neon_vcvtfp2fxs;
4325 Function *F = CGM.getIntrinsic(Int, Tys);
4326 return EmitNeonCall(F, Ops, "vcvt_n");
4328 case ARM::BI__builtin_neon_vext_v:
4329 case ARM::BI__builtin_neon_vextq_v: {
4330 int CV = cast<ConstantInt>(Ops[2])->getSExtValue();
4331 SmallVector<Constant*, 16> Indices;
4332 for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i)
4333 Indices.push_back(ConstantInt::get(Int32Ty, i+CV));
4335 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
4336 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
4337 Value *SV = llvm::ConstantVector::get(Indices);
4338 return Builder.CreateShuffleVector(Ops[0], Ops[1], SV, "vext");
4340 case ARM::BI__builtin_neon_vhadd_v:
4341 case ARM::BI__builtin_neon_vhaddq_v:
4342 Int = usgn ? Intrinsic::arm_neon_vhaddu : Intrinsic::arm_neon_vhadds;
4343 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vhadd");
4344 case ARM::BI__builtin_neon_vhsub_v:
4345 case ARM::BI__builtin_neon_vhsubq_v:
4346 Int = usgn ? Intrinsic::arm_neon_vhsubu : Intrinsic::arm_neon_vhsubs;
4347 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vhsub");
4348 case ARM::BI__builtin_neon_vld1_v:
4349 case ARM::BI__builtin_neon_vld1q_v:
4350 Ops.push_back(Align);
4351 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vld1, Ty),
4353 case ARM::BI__builtin_neon_vld1q_lane_v:
4354 // Handle 64-bit integer elements as a special case. Use shuffles of
4355 // one-element vectors to avoid poor code for i64 in the backend.
4356 if (VTy->getElementType()->isIntegerTy(64)) {
4357 // Extract the other lane.
4358 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
4359 int Lane = cast<ConstantInt>(Ops[2])->getZExtValue();
4360 Value *SV = llvm::ConstantVector::get(ConstantInt::get(Int32Ty, 1-Lane));
4361 Ops[1] = Builder.CreateShuffleVector(Ops[1], Ops[1], SV);
4362 // Load the value as a one-element vector.
4363 Ty = llvm::VectorType::get(VTy->getElementType(), 1);
4364 Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld1, Ty);
4365 Value *Ld = Builder.CreateCall2(F, Ops[0], Align);
4367 SmallVector<Constant*, 2> Indices;
4368 Indices.push_back(ConstantInt::get(Int32Ty, 1-Lane));
4369 Indices.push_back(ConstantInt::get(Int32Ty, Lane));
4370 SV = llvm::ConstantVector::get(Indices);
4371 return Builder.CreateShuffleVector(Ops[1], Ld, SV, "vld1q_lane");
4374 case ARM::BI__builtin_neon_vld1_lane_v: {
4375 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
4376 Ty = llvm::PointerType::getUnqual(VTy->getElementType());
4377 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
4378 LoadInst *Ld = Builder.CreateLoad(Ops[0]);
4379 Ld->setAlignment(cast<ConstantInt>(Align)->getZExtValue());
4380 return Builder.CreateInsertElement(Ops[1], Ld, Ops[2], "vld1_lane");
4382 case ARM::BI__builtin_neon_vld1_dup_v:
4383 case ARM::BI__builtin_neon_vld1q_dup_v: {
4384 Value *V = UndefValue::get(Ty);
4385 Ty = llvm::PointerType::getUnqual(VTy->getElementType());
4386 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
4387 LoadInst *Ld = Builder.CreateLoad(Ops[0]);
4388 Ld->setAlignment(cast<ConstantInt>(Align)->getZExtValue());
4389 llvm::Constant *CI = ConstantInt::get(Int32Ty, 0);
4390 Ops[0] = Builder.CreateInsertElement(V, Ld, CI);
4391 return EmitNeonSplat(Ops[0], CI);
4393 case ARM::BI__builtin_neon_vld2_v:
4394 case ARM::BI__builtin_neon_vld2q_v: {
4395 Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld2, Ty);
4396 Ops[1] = Builder.CreateCall2(F, Ops[1], Align, "vld2");
4397 Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
4398 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
4399 return Builder.CreateStore(Ops[1], Ops[0]);
4401 case ARM::BI__builtin_neon_vld3_v:
4402 case ARM::BI__builtin_neon_vld3q_v: {
4403 Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld3, Ty);
4404 Ops[1] = Builder.CreateCall2(F, Ops[1], Align, "vld3");
4405 Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
4406 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
4407 return Builder.CreateStore(Ops[1], Ops[0]);
4409 case ARM::BI__builtin_neon_vld4_v:
4410 case ARM::BI__builtin_neon_vld4q_v: {
4411 Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld4, Ty);
4412 Ops[1] = Builder.CreateCall2(F, Ops[1], Align, "vld4");
4413 Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
4414 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
4415 return Builder.CreateStore(Ops[1], Ops[0]);
4417 case ARM::BI__builtin_neon_vld2_lane_v:
4418 case ARM::BI__builtin_neon_vld2q_lane_v: {
4419 Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld2lane, Ty);
4420 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
4421 Ops[3] = Builder.CreateBitCast(Ops[3], Ty);
4422 Ops.push_back(Align);
4423 Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), "vld2_lane");
4424 Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
4425 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
4426 return Builder.CreateStore(Ops[1], Ops[0]);
4428 case ARM::BI__builtin_neon_vld3_lane_v:
4429 case ARM::BI__builtin_neon_vld3q_lane_v: {
4430 Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld3lane, Ty);
4431 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
4432 Ops[3] = Builder.CreateBitCast(Ops[3], Ty);
4433 Ops[4] = Builder.CreateBitCast(Ops[4], Ty);
4434 Ops.push_back(Align);
4435 Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), "vld3_lane");
4436 Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
4437 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
4438 return Builder.CreateStore(Ops[1], Ops[0]);
4440 case ARM::BI__builtin_neon_vld4_lane_v:
4441 case ARM::BI__builtin_neon_vld4q_lane_v: {
4442 Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld4lane, Ty);
4443 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
4444 Ops[3] = Builder.CreateBitCast(Ops[3], Ty);
4445 Ops[4] = Builder.CreateBitCast(Ops[4], Ty);
4446 Ops[5] = Builder.CreateBitCast(Ops[5], Ty);
4447 Ops.push_back(Align);
4448 Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), "vld3_lane");
4449 Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
4450 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
4451 return Builder.CreateStore(Ops[1], Ops[0]);
4453 case ARM::BI__builtin_neon_vld2_dup_v:
4454 case ARM::BI__builtin_neon_vld3_dup_v:
4455 case ARM::BI__builtin_neon_vld4_dup_v: {
4456 // Handle 64-bit elements as a special-case. There is no "dup" needed.
4457 if (VTy->getElementType()->getPrimitiveSizeInBits() == 64) {
4458 switch (BuiltinID) {
4459 case ARM::BI__builtin_neon_vld2_dup_v:
4460 Int = Intrinsic::arm_neon_vld2;
4462 case ARM::BI__builtin_neon_vld3_dup_v:
4463 Int = Intrinsic::arm_neon_vld3;
4465 case ARM::BI__builtin_neon_vld4_dup_v:
4466 Int = Intrinsic::arm_neon_vld4;
4468 default: llvm_unreachable("unknown vld_dup intrinsic?");
4470 Function *F = CGM.getIntrinsic(Int, Ty);
4471 Ops[1] = Builder.CreateCall2(F, Ops[1], Align, "vld_dup");
4472 Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
4473 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
4474 return Builder.CreateStore(Ops[1], Ops[0]);
4476 switch (BuiltinID) {
4477 case ARM::BI__builtin_neon_vld2_dup_v:
4478 Int = Intrinsic::arm_neon_vld2lane;
4480 case ARM::BI__builtin_neon_vld3_dup_v:
4481 Int = Intrinsic::arm_neon_vld3lane;
4483 case ARM::BI__builtin_neon_vld4_dup_v:
4484 Int = Intrinsic::arm_neon_vld4lane;
4486 default: llvm_unreachable("unknown vld_dup intrinsic?");
4488 Function *F = CGM.getIntrinsic(Int, Ty);
4489 llvm::StructType *STy = cast<llvm::StructType>(F->getReturnType());
4491 SmallVector<Value*, 6> Args;
4492 Args.push_back(Ops[1]);
4493 Args.append(STy->getNumElements(), UndefValue::get(Ty));
4495 llvm::Constant *CI = ConstantInt::get(Int32Ty, 0);
4497 Args.push_back(Align);
4499 Ops[1] = Builder.CreateCall(F, Args, "vld_dup");
4500 // splat lane 0 to all elts in each vector of the result.
4501 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
4502 Value *Val = Builder.CreateExtractValue(Ops[1], i);
4503 Value *Elt = Builder.CreateBitCast(Val, Ty);
4504 Elt = EmitNeonSplat(Elt, CI);
4505 Elt = Builder.CreateBitCast(Elt, Val->getType());
4506 Ops[1] = Builder.CreateInsertValue(Ops[1], Elt, i);
4508 Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
4509 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
4510 return Builder.CreateStore(Ops[1], Ops[0]);
4512 case ARM::BI__builtin_neon_vmax_v:
4513 case ARM::BI__builtin_neon_vmaxq_v:
4514 Int = usgn ? Intrinsic::arm_neon_vmaxu : Intrinsic::arm_neon_vmaxs;
4515 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmax");
4516 case ARM::BI__builtin_neon_vmin_v:
4517 case ARM::BI__builtin_neon_vminq_v:
4518 Int = usgn ? Intrinsic::arm_neon_vminu : Intrinsic::arm_neon_vmins;
4519 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmin");
4520 case ARM::BI__builtin_neon_vmovl_v: {
4521 llvm::Type *DTy =llvm::VectorType::getTruncatedElementVectorType(VTy);
4522 Ops[0] = Builder.CreateBitCast(Ops[0], DTy);
4524 return Builder.CreateZExt(Ops[0], Ty, "vmovl");
4525 return Builder.CreateSExt(Ops[0], Ty, "vmovl");
4527 case ARM::BI__builtin_neon_vmovn_v: {
4528 llvm::Type *QTy = llvm::VectorType::getExtendedElementVectorType(VTy);
4529 Ops[0] = Builder.CreateBitCast(Ops[0], QTy);
4530 return Builder.CreateTrunc(Ops[0], Ty, "vmovn");
4532 case ARM::BI__builtin_neon_vmul_v:
4533 case ARM::BI__builtin_neon_vmulq_v:
4534 assert(Type.isPoly() && "vmul builtin only supported for polynomial types");
4535 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vmulp, Ty),
4537 case ARM::BI__builtin_neon_vmull_v:
4538 // FIXME: the integer vmull operations could be emitted in terms of pure
4539 // LLVM IR (2 exts followed by a mul). Unfortunately LLVM has a habit of
4540 // hoisting the exts outside loops. Until global ISel comes along that can
4541 // see through such movement this leads to bad CodeGen. So we need an
4542 // intrinsic for now.
4543 Int = usgn ? Intrinsic::arm_neon_vmullu : Intrinsic::arm_neon_vmulls;
4544 Int = Type.isPoly() ? (unsigned)Intrinsic::arm_neon_vmullp : Int;
4545 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmull");
4546 case ARM::BI__builtin_neon_vfma_v:
4547 case ARM::BI__builtin_neon_vfmaq_v: {
4548 Value *F = CGM.getIntrinsic(Intrinsic::fma, Ty);
4549 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
4550 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
4551 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
4553 // NEON intrinsic puts accumulator first, unlike the LLVM fma.
4554 return Builder.CreateCall3(F, Ops[1], Ops[2], Ops[0]);
4556 case ARM::BI__builtin_neon_vpadal_v:
4557 case ARM::BI__builtin_neon_vpadalq_v: {
4558 Int = usgn ? Intrinsic::arm_neon_vpadalu : Intrinsic::arm_neon_vpadals;
4559 // The source operand type has twice as many elements of half the size.
4560 unsigned EltBits = VTy->getElementType()->getPrimitiveSizeInBits();
4562 llvm::IntegerType::get(getLLVMContext(), EltBits / 2);
4563 llvm::Type *NarrowTy =
4564 llvm::VectorType::get(EltTy, VTy->getNumElements() * 2);
4565 llvm::Type *Tys[2] = { Ty, NarrowTy };
4566 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vpadal");
4568 case ARM::BI__builtin_neon_vpadd_v:
4569 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vpadd, Ty),
4571 case ARM::BI__builtin_neon_vpaddl_v:
4572 case ARM::BI__builtin_neon_vpaddlq_v: {
4573 Int = usgn ? Intrinsic::arm_neon_vpaddlu : Intrinsic::arm_neon_vpaddls;
4574 // The source operand type has twice as many elements of half the size.
4575 unsigned EltBits = VTy->getElementType()->getPrimitiveSizeInBits();
4576 llvm::Type *EltTy = llvm::IntegerType::get(getLLVMContext(), EltBits / 2);
4577 llvm::Type *NarrowTy =
4578 llvm::VectorType::get(EltTy, VTy->getNumElements() * 2);
4579 llvm::Type *Tys[2] = { Ty, NarrowTy };
4580 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vpaddl");
4582 case ARM::BI__builtin_neon_vpmax_v:
4583 Int = usgn ? Intrinsic::arm_neon_vpmaxu : Intrinsic::arm_neon_vpmaxs;
4584 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpmax");
4585 case ARM::BI__builtin_neon_vpmin_v:
4586 Int = usgn ? Intrinsic::arm_neon_vpminu : Intrinsic::arm_neon_vpmins;
4587 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpmin");
4588 case ARM::BI__builtin_neon_vqabs_v:
4589 case ARM::BI__builtin_neon_vqabsq_v:
4590 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqabs, Ty),
4592 case ARM::BI__builtin_neon_vqadd_v:
4593 case ARM::BI__builtin_neon_vqaddq_v:
4594 Int = usgn ? Intrinsic::arm_neon_vqaddu : Intrinsic::arm_neon_vqadds;
4595 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqadd");
4596 case ARM::BI__builtin_neon_vqdmlal_v: {
4597 SmallVector<Value *, 2> MulOps(Ops.begin() + 1, Ops.end());
4598 Value *Mul = EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqdmull, Ty),
4601 SmallVector<Value *, 2> AddOps;
4602 AddOps.push_back(Ops[0]);
4603 AddOps.push_back(Mul);
4604 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqadds, Ty),
4607 case ARM::BI__builtin_neon_vqdmlsl_v: {
4608 SmallVector<Value *, 2> MulOps(Ops.begin() + 1, Ops.end());
4609 Value *Mul = EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqdmull, Ty),
4612 SmallVector<Value *, 2> SubOps;
4613 SubOps.push_back(Ops[0]);
4614 SubOps.push_back(Mul);
4615 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqsubs, Ty),
4618 case ARM::BI__builtin_neon_vqdmulh_v:
4619 case ARM::BI__builtin_neon_vqdmulhq_v:
4620 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqdmulh, Ty),
4622 case ARM::BI__builtin_neon_vqdmull_v:
4623 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqdmull, Ty),
4625 case ARM::BI__builtin_neon_vqmovn_v:
4626 Int = usgn ? Intrinsic::arm_neon_vqmovnu : Intrinsic::arm_neon_vqmovns;
4627 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqmovn");
4628 case ARM::BI__builtin_neon_vqmovun_v:
4629 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqmovnsu, Ty),
4631 case ARM::BI__builtin_neon_vqneg_v:
4632 case ARM::BI__builtin_neon_vqnegq_v:
4633 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqneg, Ty),
4635 case ARM::BI__builtin_neon_vqrdmulh_v:
4636 case ARM::BI__builtin_neon_vqrdmulhq_v:
4637 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqrdmulh, Ty),
4639 case ARM::BI__builtin_neon_vqrshl_v:
4640 case ARM::BI__builtin_neon_vqrshlq_v:
4641 Int = usgn ? Intrinsic::arm_neon_vqrshiftu : Intrinsic::arm_neon_vqrshifts;
4642 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshl");
4643 case ARM::BI__builtin_neon_vqrshrn_n_v:
4645 usgn ? Intrinsic::arm_neon_vqrshiftnu : Intrinsic::arm_neon_vqrshiftns;
4646 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshrn_n",
4648 case ARM::BI__builtin_neon_vqrshrun_n_v:
4649 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqrshiftnsu, Ty),
4650 Ops, "vqrshrun_n", 1, true);
4651 case ARM::BI__builtin_neon_vqshl_v:
4652 case ARM::BI__builtin_neon_vqshlq_v:
4653 Int = usgn ? Intrinsic::arm_neon_vqshiftu : Intrinsic::arm_neon_vqshifts;
4654 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshl");
4655 case ARM::BI__builtin_neon_vqshl_n_v:
4656 case ARM::BI__builtin_neon_vqshlq_n_v:
4657 Int = usgn ? Intrinsic::arm_neon_vqshiftu : Intrinsic::arm_neon_vqshifts;
4658 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshl_n",
4660 case ARM::BI__builtin_neon_vqshlu_n_v:
4661 case ARM::BI__builtin_neon_vqshluq_n_v:
4662 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqshiftsu, Ty),
4663 Ops, "vqshlu", 1, false);
4664 case ARM::BI__builtin_neon_vqshrn_n_v:
4665 Int = usgn ? Intrinsic::arm_neon_vqshiftnu : Intrinsic::arm_neon_vqshiftns;
4666 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshrn_n",
4668 case ARM::BI__builtin_neon_vqshrun_n_v:
4669 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqshiftnsu, Ty),
4670 Ops, "vqshrun_n", 1, true);
4671 case ARM::BI__builtin_neon_vqsub_v:
4672 case ARM::BI__builtin_neon_vqsubq_v:
4673 Int = usgn ? Intrinsic::arm_neon_vqsubu : Intrinsic::arm_neon_vqsubs;
4674 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqsub");
4675 case ARM::BI__builtin_neon_vraddhn_v:
4676 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vraddhn, Ty),
4678 case ARM::BI__builtin_neon_vrecpe_v:
4679 case ARM::BI__builtin_neon_vrecpeq_v:
4680 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrecpe, Ty),
4682 case ARM::BI__builtin_neon_vrecps_v:
4683 case ARM::BI__builtin_neon_vrecpsq_v:
4684 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrecps, Ty),
4686 case ARM::BI__builtin_neon_vrhadd_v:
4687 case ARM::BI__builtin_neon_vrhaddq_v:
4688 Int = usgn ? Intrinsic::arm_neon_vrhaddu : Intrinsic::arm_neon_vrhadds;
4689 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrhadd");
4690 case ARM::BI__builtin_neon_vrshl_v:
4691 case ARM::BI__builtin_neon_vrshlq_v:
4692 Int = usgn ? Intrinsic::arm_neon_vrshiftu : Intrinsic::arm_neon_vrshifts;
4693 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrshl");
4694 case ARM::BI__builtin_neon_vrshrn_n_v:
4695 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrshiftn, Ty),
4696 Ops, "vrshrn_n", 1, true);
4697 case ARM::BI__builtin_neon_vrshr_n_v:
4698 case ARM::BI__builtin_neon_vrshrq_n_v:
4699 Int = usgn ? Intrinsic::arm_neon_vrshiftu : Intrinsic::arm_neon_vrshifts;
4700 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrshr_n", 1, true);
4701 case ARM::BI__builtin_neon_vrsqrte_v:
4702 case ARM::BI__builtin_neon_vrsqrteq_v:
4703 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrsqrte, Ty),
4705 case ARM::BI__builtin_neon_vrsqrts_v:
4706 case ARM::BI__builtin_neon_vrsqrtsq_v:
4707 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrsqrts, Ty),
4709 case ARM::BI__builtin_neon_vrsra_n_v:
4710 case ARM::BI__builtin_neon_vrsraq_n_v:
4711 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
4712 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
4713 Ops[2] = EmitNeonShiftVector(Ops[2], Ty, true);
4714 Int = usgn ? Intrinsic::arm_neon_vrshiftu : Intrinsic::arm_neon_vrshifts;
4715 Ops[1] = Builder.CreateCall2(CGM.getIntrinsic(Int, Ty), Ops[1], Ops[2]);
4716 return Builder.CreateAdd(Ops[0], Ops[1], "vrsra_n");
4717 case ARM::BI__builtin_neon_vrsubhn_v:
4718 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrsubhn, Ty),
4720 case ARM::BI__builtin_neon_vshl_v:
4721 case ARM::BI__builtin_neon_vshlq_v:
4722 Int = usgn ? Intrinsic::arm_neon_vshiftu : Intrinsic::arm_neon_vshifts;
4723 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vshl");
4724 case ARM::BI__builtin_neon_vshll_n_v:
4725 Int = usgn ? Intrinsic::arm_neon_vshiftlu : Intrinsic::arm_neon_vshiftls;
4726 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vshll", 1);
4727 case ARM::BI__builtin_neon_vshl_n_v:
4728 case ARM::BI__builtin_neon_vshlq_n_v:
4729 Ops[1] = EmitNeonShiftVector(Ops[1], Ty, false);
4730 return Builder.CreateShl(Builder.CreateBitCast(Ops[0],Ty), Ops[1],
4732 case ARM::BI__builtin_neon_vshrn_n_v:
4733 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vshiftn, Ty),
4734 Ops, "vshrn_n", 1, true);
4735 case ARM::BI__builtin_neon_vshr_n_v:
4736 case ARM::BI__builtin_neon_vshrq_n_v:
4737 return EmitNeonRShiftImm(Ops[0], Ops[1], Ty, usgn, "vshr_n");
4738 case ARM::BI__builtin_neon_vsri_n_v:
4739 case ARM::BI__builtin_neon_vsriq_n_v:
4741 case ARM::BI__builtin_neon_vsli_n_v:
4742 case ARM::BI__builtin_neon_vsliq_n_v:
4743 Ops[2] = EmitNeonShiftVector(Ops[2], Ty, rightShift);
4744 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vshiftins, Ty),
4746 case ARM::BI__builtin_neon_vsra_n_v:
4747 case ARM::BI__builtin_neon_vsraq_n_v:
4748 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
4749 Ops[1] = EmitNeonRShiftImm(Ops[1], Ops[2], Ty, usgn, "vsra_n");
4750 return Builder.CreateAdd(Ops[0], Ops[1]);
4751 case ARM::BI__builtin_neon_vst1_v:
4752 case ARM::BI__builtin_neon_vst1q_v:
4753 Ops.push_back(Align);
4754 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst1, Ty),
4756 case ARM::BI__builtin_neon_vst1q_lane_v:
4757 // Handle 64-bit integer elements as a special case. Use a shuffle to get
4758 // a one-element vector and avoid poor code for i64 in the backend.
4759 if (VTy->getElementType()->isIntegerTy(64)) {
4760 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
4761 Value *SV = llvm::ConstantVector::get(cast<llvm::Constant>(Ops[2]));
4762 Ops[1] = Builder.CreateShuffleVector(Ops[1], Ops[1], SV);
4764 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst1,
4765 Ops[1]->getType()), Ops);
4768 case ARM::BI__builtin_neon_vst1_lane_v: {
4769 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
4770 Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2]);
4771 Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
4772 StoreInst *St = Builder.CreateStore(Ops[1],
4773 Builder.CreateBitCast(Ops[0], Ty));
4774 St->setAlignment(cast<ConstantInt>(Align)->getZExtValue());
4777 case ARM::BI__builtin_neon_vst2_v:
4778 case ARM::BI__builtin_neon_vst2q_v:
4779 Ops.push_back(Align);
4780 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst2, Ty),
4782 case ARM::BI__builtin_neon_vst2_lane_v:
4783 case ARM::BI__builtin_neon_vst2q_lane_v:
4784 Ops.push_back(Align);
4785 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst2lane, Ty),
4787 case ARM::BI__builtin_neon_vst3_v:
4788 case ARM::BI__builtin_neon_vst3q_v:
4789 Ops.push_back(Align);
4790 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst3, Ty),
4792 case ARM::BI__builtin_neon_vst3_lane_v:
4793 case ARM::BI__builtin_neon_vst3q_lane_v:
4794 Ops.push_back(Align);
4795 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst3lane, Ty),
4797 case ARM::BI__builtin_neon_vst4_v:
4798 case ARM::BI__builtin_neon_vst4q_v:
4799 Ops.push_back(Align);
4800 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst4, Ty),
4802 case ARM::BI__builtin_neon_vst4_lane_v:
4803 case ARM::BI__builtin_neon_vst4q_lane_v:
4804 Ops.push_back(Align);
4805 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst4lane, Ty),
4807 case ARM::BI__builtin_neon_vsubhn_v: {
4808 llvm::VectorType *SrcTy =
4809 llvm::VectorType::getExtendedElementVectorType(VTy);
4811 // %sum = add <4 x i32> %lhs, %rhs
4812 Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy);
4813 Ops[1] = Builder.CreateBitCast(Ops[1], SrcTy);
4814 Ops[0] = Builder.CreateSub(Ops[0], Ops[1], "vsubhn");
4816 // %high = lshr <4 x i32> %sum, <i32 16, i32 16, i32 16, i32 16>
4817 Constant *ShiftAmt = ConstantInt::get(SrcTy->getElementType(),
4818 SrcTy->getScalarSizeInBits() / 2);
4819 ShiftAmt = ConstantVector::getSplat(VTy->getNumElements(), ShiftAmt);
4820 Ops[0] = Builder.CreateLShr(Ops[0], ShiftAmt, "vsubhn");
4822 // %res = trunc <4 x i32> %high to <4 x i16>
4823 return Builder.CreateTrunc(Ops[0], VTy, "vsubhn");
4825 case ARM::BI__builtin_neon_vtbl1_v:
4826 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl1),
4828 case ARM::BI__builtin_neon_vtbl2_v:
4829 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl2),
4831 case ARM::BI__builtin_neon_vtbl3_v:
4832 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl3),
4834 case ARM::BI__builtin_neon_vtbl4_v:
4835 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl4),
4837 case ARM::BI__builtin_neon_vtbx1_v:
4838 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx1),
4840 case ARM::BI__builtin_neon_vtbx2_v:
4841 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx2),
4843 case ARM::BI__builtin_neon_vtbx3_v:
4844 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx3),
4846 case ARM::BI__builtin_neon_vtbx4_v:
4847 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx4),
4849 case ARM::BI__builtin_neon_vtst_v:
4850 case ARM::BI__builtin_neon_vtstq_v: {
4851 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
4852 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
4853 Ops[0] = Builder.CreateAnd(Ops[0], Ops[1]);
4854 Ops[0] = Builder.CreateICmp(ICmpInst::ICMP_NE, Ops[0],
4855 ConstantAggregateZero::get(Ty));
4856 return Builder.CreateSExt(Ops[0], Ty, "vtst");
4858 case ARM::BI__builtin_neon_vtrn_v:
4859 case ARM::BI__builtin_neon_vtrnq_v: {
4860 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
4861 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
4862 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
4865 for (unsigned vi = 0; vi != 2; ++vi) {
4866 SmallVector<Constant*, 16> Indices;
4867 for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) {
4868 Indices.push_back(Builder.getInt32(i+vi));
4869 Indices.push_back(Builder.getInt32(i+e+vi));
4871 Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ops[0], vi);
4872 SV = llvm::ConstantVector::get(Indices);
4873 SV = Builder.CreateShuffleVector(Ops[1], Ops[2], SV, "vtrn");
4874 SV = Builder.CreateStore(SV, Addr);
4878 case ARM::BI__builtin_neon_vuzp_v:
4879 case ARM::BI__builtin_neon_vuzpq_v: {
4880 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
4881 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
4882 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
4885 for (unsigned vi = 0; vi != 2; ++vi) {
4886 SmallVector<Constant*, 16> Indices;
4887 for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i)
4888 Indices.push_back(ConstantInt::get(Int32Ty, 2*i+vi));
4890 Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ops[0], vi);
4891 SV = llvm::ConstantVector::get(Indices);
4892 SV = Builder.CreateShuffleVector(Ops[1], Ops[2], SV, "vuzp");
4893 SV = Builder.CreateStore(SV, Addr);
4897 case ARM::BI__builtin_neon_vzip_v:
4898 case ARM::BI__builtin_neon_vzipq_v: {
4899 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
4900 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
4901 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
4904 for (unsigned vi = 0; vi != 2; ++vi) {
4905 SmallVector<Constant*, 16> Indices;
4906 for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) {
4907 Indices.push_back(ConstantInt::get(Int32Ty, (i + vi*e) >> 1));
4908 Indices.push_back(ConstantInt::get(Int32Ty, ((i + vi*e) >> 1)+e));
4910 Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ops[0], vi);
4911 SV = llvm::ConstantVector::get(Indices);
4912 SV = Builder.CreateShuffleVector(Ops[1], Ops[2], SV, "vzip");
4913 SV = Builder.CreateStore(SV, Addr);
4920 llvm::Value *CodeGenFunction::
4921 BuildVector(ArrayRef<llvm::Value*> Ops) {
4922 assert((Ops.size() & (Ops.size() - 1)) == 0 &&
4923 "Not a power-of-two sized vector!");
4924 bool AllConstants = true;
4925 for (unsigned i = 0, e = Ops.size(); i != e && AllConstants; ++i)
4926 AllConstants &= isa<Constant>(Ops[i]);
4928 // If this is a constant vector, create a ConstantVector.
4930 SmallVector<llvm::Constant*, 16> CstOps;
4931 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
4932 CstOps.push_back(cast<Constant>(Ops[i]));
4933 return llvm::ConstantVector::get(CstOps);
4936 // Otherwise, insertelement the values to build the vector.
4938 llvm::UndefValue::get(llvm::VectorType::get(Ops[0]->getType(), Ops.size()));
4940 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
4941 Result = Builder.CreateInsertElement(Result, Ops[i], Builder.getInt32(i));
4946 Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
4947 const CallExpr *E) {
4948 SmallVector<Value*, 4> Ops;
4950 // Find out if any arguments are required to be integer constant expressions.
4951 unsigned ICEArguments = 0;
4952 ASTContext::GetBuiltinTypeError Error;
4953 getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments);
4954 assert(Error == ASTContext::GE_None && "Should not codegen an error");
4956 for (unsigned i = 0, e = E->getNumArgs(); i != e; i++) {
4957 // If this is a normal argument, just emit it as a scalar.
4958 if ((ICEArguments & (1 << i)) == 0) {
4959 Ops.push_back(EmitScalarExpr(E->getArg(i)));
4963 // If this is required to be a constant, constant fold it so that we know
4964 // that the generated intrinsic gets a ConstantInt.
4965 llvm::APSInt Result;
4966 bool IsConst = E->getArg(i)->isIntegerConstantExpr(Result, getContext());
4967 assert(IsConst && "Constant arg isn't actually constant?"); (void)IsConst;
4968 Ops.push_back(llvm::ConstantInt::get(getLLVMContext(), Result));
4971 switch (BuiltinID) {
4973 case X86::BI__builtin_ia32_vec_init_v8qi:
4974 case X86::BI__builtin_ia32_vec_init_v4hi:
4975 case X86::BI__builtin_ia32_vec_init_v2si:
4976 return Builder.CreateBitCast(BuildVector(Ops),
4977 llvm::Type::getX86_MMXTy(getLLVMContext()));
4978 case X86::BI__builtin_ia32_vec_ext_v2si:
4979 return Builder.CreateExtractElement(Ops[0],
4980 llvm::ConstantInt::get(Ops[1]->getType(), 0));
4981 case X86::BI__builtin_ia32_ldmxcsr: {
4982 Value *Tmp = CreateMemTemp(E->getArg(0)->getType());
4983 Builder.CreateStore(Ops[0], Tmp);
4984 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_ldmxcsr),
4985 Builder.CreateBitCast(Tmp, Int8PtrTy));
4987 case X86::BI__builtin_ia32_stmxcsr: {
4988 Value *Tmp = CreateMemTemp(E->getType());
4989 Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_stmxcsr),
4990 Builder.CreateBitCast(Tmp, Int8PtrTy));
4991 return Builder.CreateLoad(Tmp, "stmxcsr");
4993 case X86::BI__builtin_ia32_storehps:
4994 case X86::BI__builtin_ia32_storelps: {
4995 llvm::Type *PtrTy = llvm::PointerType::getUnqual(Int64Ty);
4996 llvm::Type *VecTy = llvm::VectorType::get(Int64Ty, 2);
4999 Ops[1] = Builder.CreateBitCast(Ops[1], VecTy, "cast");
5002 unsigned Index = BuiltinID == X86::BI__builtin_ia32_storelps ? 0 : 1;
5003 llvm::Value *Idx = llvm::ConstantInt::get(Int32Ty, Index);
5004 Ops[1] = Builder.CreateExtractElement(Ops[1], Idx, "extract");
5006 // cast pointer to i64 & store
5007 Ops[0] = Builder.CreateBitCast(Ops[0], PtrTy);
5008 return Builder.CreateStore(Ops[1], Ops[0]);
5010 case X86::BI__builtin_ia32_palignr: {
5011 unsigned shiftVal = cast<llvm::ConstantInt>(Ops[2])->getZExtValue();
5013 // If palignr is shifting the pair of input vectors less than 9 bytes,
5014 // emit a shuffle instruction.
5015 if (shiftVal <= 8) {
5016 SmallVector<llvm::Constant*, 8> Indices;
5017 for (unsigned i = 0; i != 8; ++i)
5018 Indices.push_back(llvm::ConstantInt::get(Int32Ty, shiftVal + i));
5020 Value* SV = llvm::ConstantVector::get(Indices);
5021 return Builder.CreateShuffleVector(Ops[1], Ops[0], SV, "palignr");
5024 // If palignr is shifting the pair of input vectors more than 8 but less
5025 // than 16 bytes, emit a logical right shift of the destination.
5026 if (shiftVal < 16) {
5027 // MMX has these as 1 x i64 vectors for some odd optimization reasons.
5028 llvm::Type *VecTy = llvm::VectorType::get(Int64Ty, 1);
5030 Ops[0] = Builder.CreateBitCast(Ops[0], VecTy, "cast");
5031 Ops[1] = llvm::ConstantInt::get(VecTy, (shiftVal-8) * 8);
5033 // create i32 constant
5034 llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_mmx_psrl_q);
5035 return Builder.CreateCall(F, makeArrayRef(&Ops[0], 2), "palignr");
5038 // If palignr is shifting the pair of vectors more than 16 bytes, emit zero.
5039 return llvm::Constant::getNullValue(ConvertType(E->getType()));
5041 case X86::BI__builtin_ia32_palignr128: {
5042 unsigned shiftVal = cast<llvm::ConstantInt>(Ops[2])->getZExtValue();
5044 // If palignr is shifting the pair of input vectors less than 17 bytes,
5045 // emit a shuffle instruction.
5046 if (shiftVal <= 16) {
5047 SmallVector<llvm::Constant*, 16> Indices;
5048 for (unsigned i = 0; i != 16; ++i)
5049 Indices.push_back(llvm::ConstantInt::get(Int32Ty, shiftVal + i));
5051 Value* SV = llvm::ConstantVector::get(Indices);
5052 return Builder.CreateShuffleVector(Ops[1], Ops[0], SV, "palignr");
5055 // If palignr is shifting the pair of input vectors more than 16 but less
5056 // than 32 bytes, emit a logical right shift of the destination.
5057 if (shiftVal < 32) {
5058 llvm::Type *VecTy = llvm::VectorType::get(Int64Ty, 2);
5060 Ops[0] = Builder.CreateBitCast(Ops[0], VecTy, "cast");
5061 Ops[1] = llvm::ConstantInt::get(Int32Ty, (shiftVal-16) * 8);
5063 // create i32 constant
5064 llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_sse2_psrl_dq);
5065 return Builder.CreateCall(F, makeArrayRef(&Ops[0], 2), "palignr");
5068 // If palignr is shifting the pair of vectors more than 32 bytes, emit zero.
5069 return llvm::Constant::getNullValue(ConvertType(E->getType()));
5071 case X86::BI__builtin_ia32_palignr256: {
5072 unsigned shiftVal = cast<llvm::ConstantInt>(Ops[2])->getZExtValue();
5074 // If palignr is shifting the pair of input vectors less than 17 bytes,
5075 // emit a shuffle instruction.
5076 if (shiftVal <= 16) {
5077 SmallVector<llvm::Constant*, 32> Indices;
5078 // 256-bit palignr operates on 128-bit lanes so we need to handle that
5079 for (unsigned l = 0; l != 2; ++l) {
5080 unsigned LaneStart = l * 16;
5081 unsigned LaneEnd = (l+1) * 16;
5082 for (unsigned i = 0; i != 16; ++i) {
5083 unsigned Idx = shiftVal + i + LaneStart;
5084 if (Idx >= LaneEnd) Idx += 16; // end of lane, switch operand
5085 Indices.push_back(llvm::ConstantInt::get(Int32Ty, Idx));
5089 Value* SV = llvm::ConstantVector::get(Indices);
5090 return Builder.CreateShuffleVector(Ops[1], Ops[0], SV, "palignr");
5093 // If palignr is shifting the pair of input vectors more than 16 but less
5094 // than 32 bytes, emit a logical right shift of the destination.
5095 if (shiftVal < 32) {
5096 llvm::Type *VecTy = llvm::VectorType::get(Int64Ty, 4);
5098 Ops[0] = Builder.CreateBitCast(Ops[0], VecTy, "cast");
5099 Ops[1] = llvm::ConstantInt::get(Int32Ty, (shiftVal-16) * 8);
5101 // create i32 constant
5102 llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_avx2_psrl_dq);
5103 return Builder.CreateCall(F, makeArrayRef(&Ops[0], 2), "palignr");
5106 // If palignr is shifting the pair of vectors more than 32 bytes, emit zero.
5107 return llvm::Constant::getNullValue(ConvertType(E->getType()));
5109 case X86::BI__builtin_ia32_movntps:
5110 case X86::BI__builtin_ia32_movntps256:
5111 case X86::BI__builtin_ia32_movntpd:
5112 case X86::BI__builtin_ia32_movntpd256:
5113 case X86::BI__builtin_ia32_movntdq:
5114 case X86::BI__builtin_ia32_movntdq256:
5115 case X86::BI__builtin_ia32_movnti:
5116 case X86::BI__builtin_ia32_movnti64: {
5117 llvm::MDNode *Node = llvm::MDNode::get(getLLVMContext(),
5118 Builder.getInt32(1));
5120 // Convert the type of the pointer to a pointer to the stored type.
5121 Value *BC = Builder.CreateBitCast(Ops[0],
5122 llvm::PointerType::getUnqual(Ops[1]->getType()),
5124 StoreInst *SI = Builder.CreateStore(Ops[1], BC);
5125 SI->setMetadata(CGM.getModule().getMDKindID("nontemporal"), Node);
5127 // If the operand is an integer, we can't assume alignment. Otherwise,
5128 // assume natural alignment.
5129 QualType ArgTy = E->getArg(1)->getType();
5131 if (ArgTy->isIntegerType())
5134 Align = getContext().getTypeSizeInChars(ArgTy).getQuantity();
5135 SI->setAlignment(Align);
5139 case X86::BI__builtin_ia32_pswapdsf:
5140 case X86::BI__builtin_ia32_pswapdsi: {
5141 const char *name = 0;
5142 Intrinsic::ID ID = Intrinsic::not_intrinsic;
5144 default: llvm_unreachable("Unsupported intrinsic!");
5145 case X86::BI__builtin_ia32_pswapdsf:
5146 case X86::BI__builtin_ia32_pswapdsi:
5148 ID = Intrinsic::x86_3dnowa_pswapd;
5151 llvm::Type *MMXTy = llvm::Type::getX86_MMXTy(getLLVMContext());
5152 Ops[0] = Builder.CreateBitCast(Ops[0], MMXTy, "cast");
5153 llvm::Function *F = CGM.getIntrinsic(ID);
5154 return Builder.CreateCall(F, Ops, name);
5156 case X86::BI__builtin_ia32_rdrand16_step:
5157 case X86::BI__builtin_ia32_rdrand32_step:
5158 case X86::BI__builtin_ia32_rdrand64_step:
5159 case X86::BI__builtin_ia32_rdseed16_step:
5160 case X86::BI__builtin_ia32_rdseed32_step:
5161 case X86::BI__builtin_ia32_rdseed64_step: {
5163 switch (BuiltinID) {
5164 default: llvm_unreachable("Unsupported intrinsic!");
5165 case X86::BI__builtin_ia32_rdrand16_step:
5166 ID = Intrinsic::x86_rdrand_16;
5168 case X86::BI__builtin_ia32_rdrand32_step:
5169 ID = Intrinsic::x86_rdrand_32;
5171 case X86::BI__builtin_ia32_rdrand64_step:
5172 ID = Intrinsic::x86_rdrand_64;
5174 case X86::BI__builtin_ia32_rdseed16_step:
5175 ID = Intrinsic::x86_rdseed_16;
5177 case X86::BI__builtin_ia32_rdseed32_step:
5178 ID = Intrinsic::x86_rdseed_32;
5180 case X86::BI__builtin_ia32_rdseed64_step:
5181 ID = Intrinsic::x86_rdseed_64;
5185 Value *Call = Builder.CreateCall(CGM.getIntrinsic(ID));
5186 Builder.CreateStore(Builder.CreateExtractValue(Call, 0), Ops[0]);
5187 return Builder.CreateExtractValue(Call, 1);
5190 case X86::BI__builtin_ia32_vbroadcastsi256: {
5191 Value *VecTmp = CreateMemTemp(E->getArg(0)->getType());
5192 Builder.CreateStore(Ops[0], VecTmp);
5193 Value *F = CGM.getIntrinsic(Intrinsic::x86_avx2_vbroadcasti128);
5194 return Builder.CreateCall(F, Builder.CreateBitCast(VecTmp, Int8PtrTy));
5200 Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
5201 const CallExpr *E) {
5202 SmallVector<Value*, 4> Ops;
5204 for (unsigned i = 0, e = E->getNumArgs(); i != e; i++)
5205 Ops.push_back(EmitScalarExpr(E->getArg(i)));
5207 Intrinsic::ID ID = Intrinsic::not_intrinsic;
5209 switch (BuiltinID) {
5212 // vec_ld, vec_lvsl, vec_lvsr
5213 case PPC::BI__builtin_altivec_lvx:
5214 case PPC::BI__builtin_altivec_lvxl:
5215 case PPC::BI__builtin_altivec_lvebx:
5216 case PPC::BI__builtin_altivec_lvehx:
5217 case PPC::BI__builtin_altivec_lvewx:
5218 case PPC::BI__builtin_altivec_lvsl:
5219 case PPC::BI__builtin_altivec_lvsr:
5221 Ops[1] = Builder.CreateBitCast(Ops[1], Int8PtrTy);
5223 Ops[0] = Builder.CreateGEP(Ops[1], Ops[0]);
5226 switch (BuiltinID) {
5227 default: llvm_unreachable("Unsupported ld/lvsl/lvsr intrinsic!");
5228 case PPC::BI__builtin_altivec_lvx:
5229 ID = Intrinsic::ppc_altivec_lvx;
5231 case PPC::BI__builtin_altivec_lvxl:
5232 ID = Intrinsic::ppc_altivec_lvxl;
5234 case PPC::BI__builtin_altivec_lvebx:
5235 ID = Intrinsic::ppc_altivec_lvebx;
5237 case PPC::BI__builtin_altivec_lvehx:
5238 ID = Intrinsic::ppc_altivec_lvehx;
5240 case PPC::BI__builtin_altivec_lvewx:
5241 ID = Intrinsic::ppc_altivec_lvewx;
5243 case PPC::BI__builtin_altivec_lvsl:
5244 ID = Intrinsic::ppc_altivec_lvsl;
5246 case PPC::BI__builtin_altivec_lvsr:
5247 ID = Intrinsic::ppc_altivec_lvsr;
5250 llvm::Function *F = CGM.getIntrinsic(ID);
5251 return Builder.CreateCall(F, Ops, "");
5255 case PPC::BI__builtin_altivec_stvx:
5256 case PPC::BI__builtin_altivec_stvxl:
5257 case PPC::BI__builtin_altivec_stvebx:
5258 case PPC::BI__builtin_altivec_stvehx:
5259 case PPC::BI__builtin_altivec_stvewx:
5261 Ops[2] = Builder.CreateBitCast(Ops[2], Int8PtrTy);
5262 Ops[1] = Builder.CreateGEP(Ops[2], Ops[1]);
5265 switch (BuiltinID) {
5266 default: llvm_unreachable("Unsupported st intrinsic!");
5267 case PPC::BI__builtin_altivec_stvx:
5268 ID = Intrinsic::ppc_altivec_stvx;
5270 case PPC::BI__builtin_altivec_stvxl:
5271 ID = Intrinsic::ppc_altivec_stvxl;
5273 case PPC::BI__builtin_altivec_stvebx:
5274 ID = Intrinsic::ppc_altivec_stvebx;
5276 case PPC::BI__builtin_altivec_stvehx:
5277 ID = Intrinsic::ppc_altivec_stvehx;
5279 case PPC::BI__builtin_altivec_stvewx:
5280 ID = Intrinsic::ppc_altivec_stvewx;
5283 llvm::Function *F = CGM.getIntrinsic(ID);
5284 return Builder.CreateCall(F, Ops, "");