]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - contrib/llvm/tools/clang/lib/CodeGen/CGAtomic.cpp
Merge clang 7.0.1 and several follow-up changes
[FreeBSD/FreeBSD.git] / contrib / llvm / tools / clang / lib / CodeGen / CGAtomic.cpp
1 //===--- CGAtomic.cpp - Emit LLVM IR for atomic operations ----------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file contains the code for emitting atomic operations.
11 //
12 //===----------------------------------------------------------------------===//
13
14 #include "CGCall.h"
15 #include "CGRecordLayout.h"
16 #include "CodeGenFunction.h"
17 #include "CodeGenModule.h"
18 #include "TargetInfo.h"
19 #include "clang/AST/ASTContext.h"
20 #include "clang/CodeGen/CGFunctionInfo.h"
21 #include "clang/Sema/SemaDiagnostic.h"
22 #include "llvm/ADT/DenseMap.h"
23 #include "llvm/IR/DataLayout.h"
24 #include "llvm/IR/Intrinsics.h"
25 #include "llvm/IR/Operator.h"
26
27 using namespace clang;
28 using namespace CodeGen;
29
30 namespace {
31   class AtomicInfo {
32     CodeGenFunction &CGF;
33     QualType AtomicTy;
34     QualType ValueTy;
35     uint64_t AtomicSizeInBits;
36     uint64_t ValueSizeInBits;
37     CharUnits AtomicAlign;
38     CharUnits ValueAlign;
39     CharUnits LValueAlign;
40     TypeEvaluationKind EvaluationKind;
41     bool UseLibcall;
42     LValue LVal;
43     CGBitFieldInfo BFI;
44   public:
45     AtomicInfo(CodeGenFunction &CGF, LValue &lvalue)
46         : CGF(CGF), AtomicSizeInBits(0), ValueSizeInBits(0),
47           EvaluationKind(TEK_Scalar), UseLibcall(true) {
48       assert(!lvalue.isGlobalReg());
49       ASTContext &C = CGF.getContext();
50       if (lvalue.isSimple()) {
51         AtomicTy = lvalue.getType();
52         if (auto *ATy = AtomicTy->getAs<AtomicType>())
53           ValueTy = ATy->getValueType();
54         else
55           ValueTy = AtomicTy;
56         EvaluationKind = CGF.getEvaluationKind(ValueTy);
57
58         uint64_t ValueAlignInBits;
59         uint64_t AtomicAlignInBits;
60         TypeInfo ValueTI = C.getTypeInfo(ValueTy);
61         ValueSizeInBits = ValueTI.Width;
62         ValueAlignInBits = ValueTI.Align;
63
64         TypeInfo AtomicTI = C.getTypeInfo(AtomicTy);
65         AtomicSizeInBits = AtomicTI.Width;
66         AtomicAlignInBits = AtomicTI.Align;
67
68         assert(ValueSizeInBits <= AtomicSizeInBits);
69         assert(ValueAlignInBits <= AtomicAlignInBits);
70
71         AtomicAlign = C.toCharUnitsFromBits(AtomicAlignInBits);
72         ValueAlign = C.toCharUnitsFromBits(ValueAlignInBits);
73         if (lvalue.getAlignment().isZero())
74           lvalue.setAlignment(AtomicAlign);
75
76         LVal = lvalue;
77       } else if (lvalue.isBitField()) {
78         ValueTy = lvalue.getType();
79         ValueSizeInBits = C.getTypeSize(ValueTy);
80         auto &OrigBFI = lvalue.getBitFieldInfo();
81         auto Offset = OrigBFI.Offset % C.toBits(lvalue.getAlignment());
82         AtomicSizeInBits = C.toBits(
83             C.toCharUnitsFromBits(Offset + OrigBFI.Size + C.getCharWidth() - 1)
84                 .alignTo(lvalue.getAlignment()));
85         auto VoidPtrAddr = CGF.EmitCastToVoidPtr(lvalue.getBitFieldPointer());
86         auto OffsetInChars =
87             (C.toCharUnitsFromBits(OrigBFI.Offset) / lvalue.getAlignment()) *
88             lvalue.getAlignment();
89         VoidPtrAddr = CGF.Builder.CreateConstGEP1_64(
90             VoidPtrAddr, OffsetInChars.getQuantity());
91         auto Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
92             VoidPtrAddr,
93             CGF.Builder.getIntNTy(AtomicSizeInBits)->getPointerTo(),
94             "atomic_bitfield_base");
95         BFI = OrigBFI;
96         BFI.Offset = Offset;
97         BFI.StorageSize = AtomicSizeInBits;
98         BFI.StorageOffset += OffsetInChars;
99         LVal = LValue::MakeBitfield(Address(Addr, lvalue.getAlignment()),
100                                     BFI, lvalue.getType(), lvalue.getBaseInfo(),
101                                     lvalue.getTBAAInfo());
102         AtomicTy = C.getIntTypeForBitwidth(AtomicSizeInBits, OrigBFI.IsSigned);
103         if (AtomicTy.isNull()) {
104           llvm::APInt Size(
105               /*numBits=*/32,
106               C.toCharUnitsFromBits(AtomicSizeInBits).getQuantity());
107           AtomicTy = C.getConstantArrayType(C.CharTy, Size, ArrayType::Normal,
108                                             /*IndexTypeQuals=*/0);
109         }
110         AtomicAlign = ValueAlign = lvalue.getAlignment();
111       } else if (lvalue.isVectorElt()) {
112         ValueTy = lvalue.getType()->getAs<VectorType>()->getElementType();
113         ValueSizeInBits = C.getTypeSize(ValueTy);
114         AtomicTy = lvalue.getType();
115         AtomicSizeInBits = C.getTypeSize(AtomicTy);
116         AtomicAlign = ValueAlign = lvalue.getAlignment();
117         LVal = lvalue;
118       } else {
119         assert(lvalue.isExtVectorElt());
120         ValueTy = lvalue.getType();
121         ValueSizeInBits = C.getTypeSize(ValueTy);
122         AtomicTy = ValueTy = CGF.getContext().getExtVectorType(
123             lvalue.getType(), lvalue.getExtVectorAddress()
124                                   .getElementType()->getVectorNumElements());
125         AtomicSizeInBits = C.getTypeSize(AtomicTy);
126         AtomicAlign = ValueAlign = lvalue.getAlignment();
127         LVal = lvalue;
128       }
129       UseLibcall = !C.getTargetInfo().hasBuiltinAtomic(
130           AtomicSizeInBits, C.toBits(lvalue.getAlignment()));
131     }
132
133     QualType getAtomicType() const { return AtomicTy; }
134     QualType getValueType() const { return ValueTy; }
135     CharUnits getAtomicAlignment() const { return AtomicAlign; }
136     CharUnits getValueAlignment() const { return ValueAlign; }
137     uint64_t getAtomicSizeInBits() const { return AtomicSizeInBits; }
138     uint64_t getValueSizeInBits() const { return ValueSizeInBits; }
139     TypeEvaluationKind getEvaluationKind() const { return EvaluationKind; }
140     bool shouldUseLibcall() const { return UseLibcall; }
141     const LValue &getAtomicLValue() const { return LVal; }
142     llvm::Value *getAtomicPointer() const {
143       if (LVal.isSimple())
144         return LVal.getPointer();
145       else if (LVal.isBitField())
146         return LVal.getBitFieldPointer();
147       else if (LVal.isVectorElt())
148         return LVal.getVectorPointer();
149       assert(LVal.isExtVectorElt());
150       return LVal.getExtVectorPointer();
151     }
152     Address getAtomicAddress() const {
153       return Address(getAtomicPointer(), getAtomicAlignment());
154     }
155
156     Address getAtomicAddressAsAtomicIntPointer() const {
157       return emitCastToAtomicIntPointer(getAtomicAddress());
158     }
159
160     /// Is the atomic size larger than the underlying value type?
161     ///
162     /// Note that the absence of padding does not mean that atomic
163     /// objects are completely interchangeable with non-atomic
164     /// objects: we might have promoted the alignment of a type
165     /// without making it bigger.
166     bool hasPadding() const {
167       return (ValueSizeInBits != AtomicSizeInBits);
168     }
169
170     bool emitMemSetZeroIfNecessary() const;
171
172     llvm::Value *getAtomicSizeValue() const {
173       CharUnits size = CGF.getContext().toCharUnitsFromBits(AtomicSizeInBits);
174       return CGF.CGM.getSize(size);
175     }
176
177     /// Cast the given pointer to an integer pointer suitable for atomic
178     /// operations if the source.
179     Address emitCastToAtomicIntPointer(Address Addr) const;
180
181     /// If Addr is compatible with the iN that will be used for an atomic
182     /// operation, bitcast it. Otherwise, create a temporary that is suitable
183     /// and copy the value across.
184     Address convertToAtomicIntPointer(Address Addr) const;
185
186     /// Turn an atomic-layout object into an r-value.
187     RValue convertAtomicTempToRValue(Address addr, AggValueSlot resultSlot,
188                                      SourceLocation loc, bool AsValue) const;
189
190     /// Converts a rvalue to integer value.
191     llvm::Value *convertRValueToInt(RValue RVal) const;
192
193     RValue ConvertIntToValueOrAtomic(llvm::Value *IntVal,
194                                      AggValueSlot ResultSlot,
195                                      SourceLocation Loc, bool AsValue) const;
196
197     /// Copy an atomic r-value into atomic-layout memory.
198     void emitCopyIntoMemory(RValue rvalue) const;
199
200     /// Project an l-value down to the value field.
201     LValue projectValue() const {
202       assert(LVal.isSimple());
203       Address addr = getAtomicAddress();
204       if (hasPadding())
205         addr = CGF.Builder.CreateStructGEP(addr, 0, CharUnits());
206
207       return LValue::MakeAddr(addr, getValueType(), CGF.getContext(),
208                               LVal.getBaseInfo(), LVal.getTBAAInfo());
209     }
210
211     /// Emits atomic load.
212     /// \returns Loaded value.
213     RValue EmitAtomicLoad(AggValueSlot ResultSlot, SourceLocation Loc,
214                           bool AsValue, llvm::AtomicOrdering AO,
215                           bool IsVolatile);
216
217     /// Emits atomic compare-and-exchange sequence.
218     /// \param Expected Expected value.
219     /// \param Desired Desired value.
220     /// \param Success Atomic ordering for success operation.
221     /// \param Failure Atomic ordering for failed operation.
222     /// \param IsWeak true if atomic operation is weak, false otherwise.
223     /// \returns Pair of values: previous value from storage (value type) and
224     /// boolean flag (i1 type) with true if success and false otherwise.
225     std::pair<RValue, llvm::Value *>
226     EmitAtomicCompareExchange(RValue Expected, RValue Desired,
227                               llvm::AtomicOrdering Success =
228                                   llvm::AtomicOrdering::SequentiallyConsistent,
229                               llvm::AtomicOrdering Failure =
230                                   llvm::AtomicOrdering::SequentiallyConsistent,
231                               bool IsWeak = false);
232
233     /// Emits atomic update.
234     /// \param AO Atomic ordering.
235     /// \param UpdateOp Update operation for the current lvalue.
236     void EmitAtomicUpdate(llvm::AtomicOrdering AO,
237                           const llvm::function_ref<RValue(RValue)> &UpdateOp,
238                           bool IsVolatile);
239     /// Emits atomic update.
240     /// \param AO Atomic ordering.
241     void EmitAtomicUpdate(llvm::AtomicOrdering AO, RValue UpdateRVal,
242                           bool IsVolatile);
243
244     /// Materialize an atomic r-value in atomic-layout memory.
245     Address materializeRValue(RValue rvalue) const;
246
247     /// Creates temp alloca for intermediate operations on atomic value.
248     Address CreateTempAlloca() const;
249   private:
250     bool requiresMemSetZero(llvm::Type *type) const;
251
252
253     /// Emits atomic load as a libcall.
254     void EmitAtomicLoadLibcall(llvm::Value *AddForLoaded,
255                                llvm::AtomicOrdering AO, bool IsVolatile);
256     /// Emits atomic load as LLVM instruction.
257     llvm::Value *EmitAtomicLoadOp(llvm::AtomicOrdering AO, bool IsVolatile);
258     /// Emits atomic compare-and-exchange op as a libcall.
259     llvm::Value *EmitAtomicCompareExchangeLibcall(
260         llvm::Value *ExpectedAddr, llvm::Value *DesiredAddr,
261         llvm::AtomicOrdering Success =
262             llvm::AtomicOrdering::SequentiallyConsistent,
263         llvm::AtomicOrdering Failure =
264             llvm::AtomicOrdering::SequentiallyConsistent);
265     /// Emits atomic compare-and-exchange op as LLVM instruction.
266     std::pair<llvm::Value *, llvm::Value *> EmitAtomicCompareExchangeOp(
267         llvm::Value *ExpectedVal, llvm::Value *DesiredVal,
268         llvm::AtomicOrdering Success =
269             llvm::AtomicOrdering::SequentiallyConsistent,
270         llvm::AtomicOrdering Failure =
271             llvm::AtomicOrdering::SequentiallyConsistent,
272         bool IsWeak = false);
273     /// Emit atomic update as libcalls.
274     void
275     EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO,
276                             const llvm::function_ref<RValue(RValue)> &UpdateOp,
277                             bool IsVolatile);
278     /// Emit atomic update as LLVM instructions.
279     void EmitAtomicUpdateOp(llvm::AtomicOrdering AO,
280                             const llvm::function_ref<RValue(RValue)> &UpdateOp,
281                             bool IsVolatile);
282     /// Emit atomic update as libcalls.
283     void EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO, RValue UpdateRVal,
284                                  bool IsVolatile);
285     /// Emit atomic update as LLVM instructions.
286     void EmitAtomicUpdateOp(llvm::AtomicOrdering AO, RValue UpdateRal,
287                             bool IsVolatile);
288   };
289 }
290
291 Address AtomicInfo::CreateTempAlloca() const {
292   Address TempAlloca = CGF.CreateMemTemp(
293       (LVal.isBitField() && ValueSizeInBits > AtomicSizeInBits) ? ValueTy
294                                                                 : AtomicTy,
295       getAtomicAlignment(),
296       "atomic-temp");
297   // Cast to pointer to value type for bitfields.
298   if (LVal.isBitField())
299     return CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
300         TempAlloca, getAtomicAddress().getType());
301   return TempAlloca;
302 }
303
304 static RValue emitAtomicLibcall(CodeGenFunction &CGF,
305                                 StringRef fnName,
306                                 QualType resultType,
307                                 CallArgList &args) {
308   const CGFunctionInfo &fnInfo =
309     CGF.CGM.getTypes().arrangeBuiltinFunctionCall(resultType, args);
310   llvm::FunctionType *fnTy = CGF.CGM.getTypes().GetFunctionType(fnInfo);
311   llvm::Constant *fn = CGF.CGM.CreateRuntimeFunction(fnTy, fnName);
312   auto callee = CGCallee::forDirect(fn);
313   return CGF.EmitCall(fnInfo, callee, ReturnValueSlot(), args);
314 }
315
316 /// Does a store of the given IR type modify the full expected width?
317 static bool isFullSizeType(CodeGenModule &CGM, llvm::Type *type,
318                            uint64_t expectedSize) {
319   return (CGM.getDataLayout().getTypeStoreSize(type) * 8 == expectedSize);
320 }
321
322 /// Does the atomic type require memsetting to zero before initialization?
323 ///
324 /// The IR type is provided as a way of making certain queries faster.
325 bool AtomicInfo::requiresMemSetZero(llvm::Type *type) const {
326   // If the atomic type has size padding, we definitely need a memset.
327   if (hasPadding()) return true;
328
329   // Otherwise, do some simple heuristics to try to avoid it:
330   switch (getEvaluationKind()) {
331   // For scalars and complexes, check whether the store size of the
332   // type uses the full size.
333   case TEK_Scalar:
334     return !isFullSizeType(CGF.CGM, type, AtomicSizeInBits);
335   case TEK_Complex:
336     return !isFullSizeType(CGF.CGM, type->getStructElementType(0),
337                            AtomicSizeInBits / 2);
338
339   // Padding in structs has an undefined bit pattern.  User beware.
340   case TEK_Aggregate:
341     return false;
342   }
343   llvm_unreachable("bad evaluation kind");
344 }
345
346 bool AtomicInfo::emitMemSetZeroIfNecessary() const {
347   assert(LVal.isSimple());
348   llvm::Value *addr = LVal.getPointer();
349   if (!requiresMemSetZero(addr->getType()->getPointerElementType()))
350     return false;
351
352   CGF.Builder.CreateMemSet(
353       addr, llvm::ConstantInt::get(CGF.Int8Ty, 0),
354       CGF.getContext().toCharUnitsFromBits(AtomicSizeInBits).getQuantity(),
355       LVal.getAlignment().getQuantity());
356   return true;
357 }
358
359 static void emitAtomicCmpXchg(CodeGenFunction &CGF, AtomicExpr *E, bool IsWeak,
360                               Address Dest, Address Ptr,
361                               Address Val1, Address Val2,
362                               uint64_t Size,
363                               llvm::AtomicOrdering SuccessOrder,
364                               llvm::AtomicOrdering FailureOrder,
365                               llvm::SyncScope::ID Scope) {
366   // Note that cmpxchg doesn't support weak cmpxchg, at least at the moment.
367   llvm::Value *Expected = CGF.Builder.CreateLoad(Val1);
368   llvm::Value *Desired = CGF.Builder.CreateLoad(Val2);
369
370   llvm::AtomicCmpXchgInst *Pair = CGF.Builder.CreateAtomicCmpXchg(
371       Ptr.getPointer(), Expected, Desired, SuccessOrder, FailureOrder,
372       Scope);
373   Pair->setVolatile(E->isVolatile());
374   Pair->setWeak(IsWeak);
375
376   // Cmp holds the result of the compare-exchange operation: true on success,
377   // false on failure.
378   llvm::Value *Old = CGF.Builder.CreateExtractValue(Pair, 0);
379   llvm::Value *Cmp = CGF.Builder.CreateExtractValue(Pair, 1);
380
381   // This basic block is used to hold the store instruction if the operation
382   // failed.
383   llvm::BasicBlock *StoreExpectedBB =
384       CGF.createBasicBlock("cmpxchg.store_expected", CGF.CurFn);
385
386   // This basic block is the exit point of the operation, we should end up
387   // here regardless of whether or not the operation succeeded.
388   llvm::BasicBlock *ContinueBB =
389       CGF.createBasicBlock("cmpxchg.continue", CGF.CurFn);
390
391   // Update Expected if Expected isn't equal to Old, otherwise branch to the
392   // exit point.
393   CGF.Builder.CreateCondBr(Cmp, ContinueBB, StoreExpectedBB);
394
395   CGF.Builder.SetInsertPoint(StoreExpectedBB);
396   // Update the memory at Expected with Old's value.
397   CGF.Builder.CreateStore(Old, Val1);
398   // Finally, branch to the exit point.
399   CGF.Builder.CreateBr(ContinueBB);
400
401   CGF.Builder.SetInsertPoint(ContinueBB);
402   // Update the memory at Dest with Cmp's value.
403   CGF.EmitStoreOfScalar(Cmp, CGF.MakeAddrLValue(Dest, E->getType()));
404 }
405
406 /// Given an ordering required on success, emit all possible cmpxchg
407 /// instructions to cope with the provided (but possibly only dynamically known)
408 /// FailureOrder.
409 static void emitAtomicCmpXchgFailureSet(CodeGenFunction &CGF, AtomicExpr *E,
410                                         bool IsWeak, Address Dest, Address Ptr,
411                                         Address Val1, Address Val2,
412                                         llvm::Value *FailureOrderVal,
413                                         uint64_t Size,
414                                         llvm::AtomicOrdering SuccessOrder,
415                                         llvm::SyncScope::ID Scope) {
416   llvm::AtomicOrdering FailureOrder;
417   if (llvm::ConstantInt *FO = dyn_cast<llvm::ConstantInt>(FailureOrderVal)) {
418     auto FOS = FO->getSExtValue();
419     if (!llvm::isValidAtomicOrderingCABI(FOS))
420       FailureOrder = llvm::AtomicOrdering::Monotonic;
421     else
422       switch ((llvm::AtomicOrderingCABI)FOS) {
423       case llvm::AtomicOrderingCABI::relaxed:
424       case llvm::AtomicOrderingCABI::release:
425       case llvm::AtomicOrderingCABI::acq_rel:
426         FailureOrder = llvm::AtomicOrdering::Monotonic;
427         break;
428       case llvm::AtomicOrderingCABI::consume:
429       case llvm::AtomicOrderingCABI::acquire:
430         FailureOrder = llvm::AtomicOrdering::Acquire;
431         break;
432       case llvm::AtomicOrderingCABI::seq_cst:
433         FailureOrder = llvm::AtomicOrdering::SequentiallyConsistent;
434         break;
435       }
436     if (isStrongerThan(FailureOrder, SuccessOrder)) {
437       // Don't assert on undefined behavior "failure argument shall be no
438       // stronger than the success argument".
439       FailureOrder =
440           llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(SuccessOrder);
441     }
442     emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, SuccessOrder,
443                       FailureOrder, Scope);
444     return;
445   }
446
447   // Create all the relevant BB's
448   llvm::BasicBlock *MonotonicBB = nullptr, *AcquireBB = nullptr,
449                    *SeqCstBB = nullptr;
450   MonotonicBB = CGF.createBasicBlock("monotonic_fail", CGF.CurFn);
451   if (SuccessOrder != llvm::AtomicOrdering::Monotonic &&
452       SuccessOrder != llvm::AtomicOrdering::Release)
453     AcquireBB = CGF.createBasicBlock("acquire_fail", CGF.CurFn);
454   if (SuccessOrder == llvm::AtomicOrdering::SequentiallyConsistent)
455     SeqCstBB = CGF.createBasicBlock("seqcst_fail", CGF.CurFn);
456
457   llvm::BasicBlock *ContBB = CGF.createBasicBlock("atomic.continue", CGF.CurFn);
458
459   llvm::SwitchInst *SI = CGF.Builder.CreateSwitch(FailureOrderVal, MonotonicBB);
460
461   // Emit all the different atomics
462
463   // MonotonicBB is arbitrarily chosen as the default case; in practice, this
464   // doesn't matter unless someone is crazy enough to use something that
465   // doesn't fold to a constant for the ordering.
466   CGF.Builder.SetInsertPoint(MonotonicBB);
467   emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2,
468                     Size, SuccessOrder, llvm::AtomicOrdering::Monotonic, Scope);
469   CGF.Builder.CreateBr(ContBB);
470
471   if (AcquireBB) {
472     CGF.Builder.SetInsertPoint(AcquireBB);
473     emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2,
474                       Size, SuccessOrder, llvm::AtomicOrdering::Acquire, Scope);
475     CGF.Builder.CreateBr(ContBB);
476     SI->addCase(CGF.Builder.getInt32((int)llvm::AtomicOrderingCABI::consume),
477                 AcquireBB);
478     SI->addCase(CGF.Builder.getInt32((int)llvm::AtomicOrderingCABI::acquire),
479                 AcquireBB);
480   }
481   if (SeqCstBB) {
482     CGF.Builder.SetInsertPoint(SeqCstBB);
483     emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, SuccessOrder,
484                       llvm::AtomicOrdering::SequentiallyConsistent, Scope);
485     CGF.Builder.CreateBr(ContBB);
486     SI->addCase(CGF.Builder.getInt32((int)llvm::AtomicOrderingCABI::seq_cst),
487                 SeqCstBB);
488   }
489
490   CGF.Builder.SetInsertPoint(ContBB);
491 }
492
493 static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, Address Dest,
494                          Address Ptr, Address Val1, Address Val2,
495                          llvm::Value *IsWeak, llvm::Value *FailureOrder,
496                          uint64_t Size, llvm::AtomicOrdering Order,
497                          llvm::SyncScope::ID Scope) {
498   llvm::AtomicRMWInst::BinOp Op = llvm::AtomicRMWInst::Add;
499   llvm::Instruction::BinaryOps PostOp = (llvm::Instruction::BinaryOps)0;
500
501   switch (E->getOp()) {
502   case AtomicExpr::AO__c11_atomic_init:
503   case AtomicExpr::AO__opencl_atomic_init:
504     llvm_unreachable("Already handled!");
505
506   case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
507   case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
508     emitAtomicCmpXchgFailureSet(CGF, E, false, Dest, Ptr, Val1, Val2,
509                                 FailureOrder, Size, Order, Scope);
510     return;
511   case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
512   case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
513     emitAtomicCmpXchgFailureSet(CGF, E, true, Dest, Ptr, Val1, Val2,
514                                 FailureOrder, Size, Order, Scope);
515     return;
516   case AtomicExpr::AO__atomic_compare_exchange:
517   case AtomicExpr::AO__atomic_compare_exchange_n: {
518     if (llvm::ConstantInt *IsWeakC = dyn_cast<llvm::ConstantInt>(IsWeak)) {
519       emitAtomicCmpXchgFailureSet(CGF, E, IsWeakC->getZExtValue(), Dest, Ptr,
520                                   Val1, Val2, FailureOrder, Size, Order, Scope);
521     } else {
522       // Create all the relevant BB's
523       llvm::BasicBlock *StrongBB =
524           CGF.createBasicBlock("cmpxchg.strong", CGF.CurFn);
525       llvm::BasicBlock *WeakBB = CGF.createBasicBlock("cmxchg.weak", CGF.CurFn);
526       llvm::BasicBlock *ContBB =
527           CGF.createBasicBlock("cmpxchg.continue", CGF.CurFn);
528
529       llvm::SwitchInst *SI = CGF.Builder.CreateSwitch(IsWeak, WeakBB);
530       SI->addCase(CGF.Builder.getInt1(false), StrongBB);
531
532       CGF.Builder.SetInsertPoint(StrongBB);
533       emitAtomicCmpXchgFailureSet(CGF, E, false, Dest, Ptr, Val1, Val2,
534                                   FailureOrder, Size, Order, Scope);
535       CGF.Builder.CreateBr(ContBB);
536
537       CGF.Builder.SetInsertPoint(WeakBB);
538       emitAtomicCmpXchgFailureSet(CGF, E, true, Dest, Ptr, Val1, Val2,
539                                   FailureOrder, Size, Order, Scope);
540       CGF.Builder.CreateBr(ContBB);
541
542       CGF.Builder.SetInsertPoint(ContBB);
543     }
544     return;
545   }
546   case AtomicExpr::AO__c11_atomic_load:
547   case AtomicExpr::AO__opencl_atomic_load:
548   case AtomicExpr::AO__atomic_load_n:
549   case AtomicExpr::AO__atomic_load: {
550     llvm::LoadInst *Load = CGF.Builder.CreateLoad(Ptr);
551     Load->setAtomic(Order, Scope);
552     Load->setVolatile(E->isVolatile());
553     CGF.Builder.CreateStore(Load, Dest);
554     return;
555   }
556
557   case AtomicExpr::AO__c11_atomic_store:
558   case AtomicExpr::AO__opencl_atomic_store:
559   case AtomicExpr::AO__atomic_store:
560   case AtomicExpr::AO__atomic_store_n: {
561     llvm::Value *LoadVal1 = CGF.Builder.CreateLoad(Val1);
562     llvm::StoreInst *Store = CGF.Builder.CreateStore(LoadVal1, Ptr);
563     Store->setAtomic(Order, Scope);
564     Store->setVolatile(E->isVolatile());
565     return;
566   }
567
568   case AtomicExpr::AO__c11_atomic_exchange:
569   case AtomicExpr::AO__opencl_atomic_exchange:
570   case AtomicExpr::AO__atomic_exchange_n:
571   case AtomicExpr::AO__atomic_exchange:
572     Op = llvm::AtomicRMWInst::Xchg;
573     break;
574
575   case AtomicExpr::AO__atomic_add_fetch:
576     PostOp = llvm::Instruction::Add;
577     LLVM_FALLTHROUGH;
578   case AtomicExpr::AO__c11_atomic_fetch_add:
579   case AtomicExpr::AO__opencl_atomic_fetch_add:
580   case AtomicExpr::AO__atomic_fetch_add:
581     Op = llvm::AtomicRMWInst::Add;
582     break;
583
584   case AtomicExpr::AO__atomic_sub_fetch:
585     PostOp = llvm::Instruction::Sub;
586     LLVM_FALLTHROUGH;
587   case AtomicExpr::AO__c11_atomic_fetch_sub:
588   case AtomicExpr::AO__opencl_atomic_fetch_sub:
589   case AtomicExpr::AO__atomic_fetch_sub:
590     Op = llvm::AtomicRMWInst::Sub;
591     break;
592
593   case AtomicExpr::AO__opencl_atomic_fetch_min:
594   case AtomicExpr::AO__atomic_fetch_min:
595     Op = E->getValueType()->isSignedIntegerType() ? llvm::AtomicRMWInst::Min
596                                                   : llvm::AtomicRMWInst::UMin;
597     break;
598
599   case AtomicExpr::AO__opencl_atomic_fetch_max:
600   case AtomicExpr::AO__atomic_fetch_max:
601     Op = E->getValueType()->isSignedIntegerType() ? llvm::AtomicRMWInst::Max
602                                                   : llvm::AtomicRMWInst::UMax;
603     break;
604
605   case AtomicExpr::AO__atomic_and_fetch:
606     PostOp = llvm::Instruction::And;
607     LLVM_FALLTHROUGH;
608   case AtomicExpr::AO__c11_atomic_fetch_and:
609   case AtomicExpr::AO__opencl_atomic_fetch_and:
610   case AtomicExpr::AO__atomic_fetch_and:
611     Op = llvm::AtomicRMWInst::And;
612     break;
613
614   case AtomicExpr::AO__atomic_or_fetch:
615     PostOp = llvm::Instruction::Or;
616     LLVM_FALLTHROUGH;
617   case AtomicExpr::AO__c11_atomic_fetch_or:
618   case AtomicExpr::AO__opencl_atomic_fetch_or:
619   case AtomicExpr::AO__atomic_fetch_or:
620     Op = llvm::AtomicRMWInst::Or;
621     break;
622
623   case AtomicExpr::AO__atomic_xor_fetch:
624     PostOp = llvm::Instruction::Xor;
625     LLVM_FALLTHROUGH;
626   case AtomicExpr::AO__c11_atomic_fetch_xor:
627   case AtomicExpr::AO__opencl_atomic_fetch_xor:
628   case AtomicExpr::AO__atomic_fetch_xor:
629     Op = llvm::AtomicRMWInst::Xor;
630     break;
631
632   case AtomicExpr::AO__atomic_nand_fetch:
633     PostOp = llvm::Instruction::And; // the NOT is special cased below
634     LLVM_FALLTHROUGH;
635   case AtomicExpr::AO__atomic_fetch_nand:
636     Op = llvm::AtomicRMWInst::Nand;
637     break;
638   }
639
640   llvm::Value *LoadVal1 = CGF.Builder.CreateLoad(Val1);
641   llvm::AtomicRMWInst *RMWI =
642       CGF.Builder.CreateAtomicRMW(Op, Ptr.getPointer(), LoadVal1, Order, Scope);
643   RMWI->setVolatile(E->isVolatile());
644
645   // For __atomic_*_fetch operations, perform the operation again to
646   // determine the value which was written.
647   llvm::Value *Result = RMWI;
648   if (PostOp)
649     Result = CGF.Builder.CreateBinOp(PostOp, RMWI, LoadVal1);
650   if (E->getOp() == AtomicExpr::AO__atomic_nand_fetch)
651     Result = CGF.Builder.CreateNot(Result);
652   CGF.Builder.CreateStore(Result, Dest);
653 }
654
655 // This function emits any expression (scalar, complex, or aggregate)
656 // into a temporary alloca.
657 static Address
658 EmitValToTemp(CodeGenFunction &CGF, Expr *E) {
659   Address DeclPtr = CGF.CreateMemTemp(E->getType(), ".atomictmp");
660   CGF.EmitAnyExprToMem(E, DeclPtr, E->getType().getQualifiers(),
661                        /*Init*/ true);
662   return DeclPtr;
663 }
664
665 static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *Expr, Address Dest,
666                          Address Ptr, Address Val1, Address Val2,
667                          llvm::Value *IsWeak, llvm::Value *FailureOrder,
668                          uint64_t Size, llvm::AtomicOrdering Order,
669                          llvm::Value *Scope) {
670   auto ScopeModel = Expr->getScopeModel();
671
672   // LLVM atomic instructions always have synch scope. If clang atomic
673   // expression has no scope operand, use default LLVM synch scope.
674   if (!ScopeModel) {
675     EmitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, IsWeak, FailureOrder, Size,
676                  Order, CGF.CGM.getLLVMContext().getOrInsertSyncScopeID(""));
677     return;
678   }
679
680   // Handle constant scope.
681   if (auto SC = dyn_cast<llvm::ConstantInt>(Scope)) {
682     auto SCID = CGF.getTargetHooks().getLLVMSyncScopeID(
683         ScopeModel->map(SC->getZExtValue()), CGF.CGM.getLLVMContext());
684     EmitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, IsWeak, FailureOrder, Size,
685                  Order, SCID);
686     return;
687   }
688
689   // Handle non-constant scope.
690   auto &Builder = CGF.Builder;
691   auto Scopes = ScopeModel->getRuntimeValues();
692   llvm::DenseMap<unsigned, llvm::BasicBlock *> BB;
693   for (auto S : Scopes)
694     BB[S] = CGF.createBasicBlock(getAsString(ScopeModel->map(S)), CGF.CurFn);
695
696   llvm::BasicBlock *ContBB =
697       CGF.createBasicBlock("atomic.scope.continue", CGF.CurFn);
698
699   auto *SC = Builder.CreateIntCast(Scope, Builder.getInt32Ty(), false);
700   // If unsupported synch scope is encountered at run time, assume a fallback
701   // synch scope value.
702   auto FallBack = ScopeModel->getFallBackValue();
703   llvm::SwitchInst *SI = Builder.CreateSwitch(SC, BB[FallBack]);
704   for (auto S : Scopes) {
705     auto *B = BB[S];
706     if (S != FallBack)
707       SI->addCase(Builder.getInt32(S), B);
708
709     Builder.SetInsertPoint(B);
710     EmitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, IsWeak, FailureOrder, Size,
711                  Order,
712                  CGF.getTargetHooks().getLLVMSyncScopeID(ScopeModel->map(S),
713                                                          CGF.getLLVMContext()));
714     Builder.CreateBr(ContBB);
715   }
716
717   Builder.SetInsertPoint(ContBB);
718 }
719
720 static void
721 AddDirectArgument(CodeGenFunction &CGF, CallArgList &Args,
722                   bool UseOptimizedLibcall, llvm::Value *Val, QualType ValTy,
723                   SourceLocation Loc, CharUnits SizeInChars) {
724   if (UseOptimizedLibcall) {
725     // Load value and pass it to the function directly.
726     CharUnits Align = CGF.getContext().getTypeAlignInChars(ValTy);
727     int64_t SizeInBits = CGF.getContext().toBits(SizeInChars);
728     ValTy =
729         CGF.getContext().getIntTypeForBitwidth(SizeInBits, /*Signed=*/false);
730     llvm::Type *IPtrTy = llvm::IntegerType::get(CGF.getLLVMContext(),
731                                                 SizeInBits)->getPointerTo();
732     Address Ptr = Address(CGF.Builder.CreateBitCast(Val, IPtrTy), Align);
733     Val = CGF.EmitLoadOfScalar(Ptr, false,
734                                CGF.getContext().getPointerType(ValTy),
735                                Loc);
736     // Coerce the value into an appropriately sized integer type.
737     Args.add(RValue::get(Val), ValTy);
738   } else {
739     // Non-optimized functions always take a reference.
740     Args.add(RValue::get(CGF.EmitCastToVoidPtr(Val)),
741                          CGF.getContext().VoidPtrTy);
742   }
743 }
744
745 RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
746   QualType AtomicTy = E->getPtr()->getType()->getPointeeType();
747   QualType MemTy = AtomicTy;
748   if (const AtomicType *AT = AtomicTy->getAs<AtomicType>())
749     MemTy = AT->getValueType();
750   llvm::Value *IsWeak = nullptr, *OrderFail = nullptr;
751
752   Address Val1 = Address::invalid();
753   Address Val2 = Address::invalid();
754   Address Dest = Address::invalid();
755   Address Ptr = EmitPointerWithAlignment(E->getPtr());
756
757   if (E->getOp() == AtomicExpr::AO__c11_atomic_init ||
758       E->getOp() == AtomicExpr::AO__opencl_atomic_init) {
759     LValue lvalue = MakeAddrLValue(Ptr, AtomicTy);
760     EmitAtomicInit(E->getVal1(), lvalue);
761     return RValue::get(nullptr);
762   }
763
764   CharUnits sizeChars, alignChars;
765   std::tie(sizeChars, alignChars) = getContext().getTypeInfoInChars(AtomicTy);
766   uint64_t Size = sizeChars.getQuantity();
767   unsigned MaxInlineWidthInBits = getTarget().getMaxAtomicInlineWidth();
768   bool UseLibcall = ((Ptr.getAlignment() % sizeChars) != 0 ||
769                      getContext().toBits(sizeChars) > MaxInlineWidthInBits);
770
771   if (UseLibcall)
772     CGM.getDiags().Report(E->getLocStart(), diag::warn_atomic_op_misaligned);
773
774   llvm::Value *Order = EmitScalarExpr(E->getOrder());
775   llvm::Value *Scope =
776       E->getScopeModel() ? EmitScalarExpr(E->getScope()) : nullptr;
777
778   switch (E->getOp()) {
779   case AtomicExpr::AO__c11_atomic_init:
780   case AtomicExpr::AO__opencl_atomic_init:
781     llvm_unreachable("Already handled above with EmitAtomicInit!");
782
783   case AtomicExpr::AO__c11_atomic_load:
784   case AtomicExpr::AO__opencl_atomic_load:
785   case AtomicExpr::AO__atomic_load_n:
786     break;
787
788   case AtomicExpr::AO__atomic_load:
789     Dest = EmitPointerWithAlignment(E->getVal1());
790     break;
791
792   case AtomicExpr::AO__atomic_store:
793     Val1 = EmitPointerWithAlignment(E->getVal1());
794     break;
795
796   case AtomicExpr::AO__atomic_exchange:
797     Val1 = EmitPointerWithAlignment(E->getVal1());
798     Dest = EmitPointerWithAlignment(E->getVal2());
799     break;
800
801   case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
802   case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
803   case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
804   case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
805   case AtomicExpr::AO__atomic_compare_exchange_n:
806   case AtomicExpr::AO__atomic_compare_exchange:
807     Val1 = EmitPointerWithAlignment(E->getVal1());
808     if (E->getOp() == AtomicExpr::AO__atomic_compare_exchange)
809       Val2 = EmitPointerWithAlignment(E->getVal2());
810     else
811       Val2 = EmitValToTemp(*this, E->getVal2());
812     OrderFail = EmitScalarExpr(E->getOrderFail());
813     if (E->getOp() == AtomicExpr::AO__atomic_compare_exchange_n ||
814         E->getOp() == AtomicExpr::AO__atomic_compare_exchange)
815       IsWeak = EmitScalarExpr(E->getWeak());
816     break;
817
818   case AtomicExpr::AO__c11_atomic_fetch_add:
819   case AtomicExpr::AO__c11_atomic_fetch_sub:
820   case AtomicExpr::AO__opencl_atomic_fetch_add:
821   case AtomicExpr::AO__opencl_atomic_fetch_sub:
822     if (MemTy->isPointerType()) {
823       // For pointer arithmetic, we're required to do a bit of math:
824       // adding 1 to an int* is not the same as adding 1 to a uintptr_t.
825       // ... but only for the C11 builtins. The GNU builtins expect the
826       // user to multiply by sizeof(T).
827       QualType Val1Ty = E->getVal1()->getType();
828       llvm::Value *Val1Scalar = EmitScalarExpr(E->getVal1());
829       CharUnits PointeeIncAmt =
830           getContext().getTypeSizeInChars(MemTy->getPointeeType());
831       Val1Scalar = Builder.CreateMul(Val1Scalar, CGM.getSize(PointeeIncAmt));
832       auto Temp = CreateMemTemp(Val1Ty, ".atomictmp");
833       Val1 = Temp;
834       EmitStoreOfScalar(Val1Scalar, MakeAddrLValue(Temp, Val1Ty));
835       break;
836     }
837       LLVM_FALLTHROUGH;
838   case AtomicExpr::AO__atomic_fetch_add:
839   case AtomicExpr::AO__atomic_fetch_sub:
840   case AtomicExpr::AO__atomic_add_fetch:
841   case AtomicExpr::AO__atomic_sub_fetch:
842   case AtomicExpr::AO__c11_atomic_store:
843   case AtomicExpr::AO__c11_atomic_exchange:
844   case AtomicExpr::AO__opencl_atomic_store:
845   case AtomicExpr::AO__opencl_atomic_exchange:
846   case AtomicExpr::AO__atomic_store_n:
847   case AtomicExpr::AO__atomic_exchange_n:
848   case AtomicExpr::AO__c11_atomic_fetch_and:
849   case AtomicExpr::AO__c11_atomic_fetch_or:
850   case AtomicExpr::AO__c11_atomic_fetch_xor:
851   case AtomicExpr::AO__opencl_atomic_fetch_and:
852   case AtomicExpr::AO__opencl_atomic_fetch_or:
853   case AtomicExpr::AO__opencl_atomic_fetch_xor:
854   case AtomicExpr::AO__opencl_atomic_fetch_min:
855   case AtomicExpr::AO__opencl_atomic_fetch_max:
856   case AtomicExpr::AO__atomic_fetch_and:
857   case AtomicExpr::AO__atomic_fetch_or:
858   case AtomicExpr::AO__atomic_fetch_xor:
859   case AtomicExpr::AO__atomic_fetch_nand:
860   case AtomicExpr::AO__atomic_and_fetch:
861   case AtomicExpr::AO__atomic_or_fetch:
862   case AtomicExpr::AO__atomic_xor_fetch:
863   case AtomicExpr::AO__atomic_nand_fetch:
864   case AtomicExpr::AO__atomic_fetch_min:
865   case AtomicExpr::AO__atomic_fetch_max:
866     Val1 = EmitValToTemp(*this, E->getVal1());
867     break;
868   }
869
870   QualType RValTy = E->getType().getUnqualifiedType();
871
872   // The inlined atomics only function on iN types, where N is a power of 2. We
873   // need to make sure (via temporaries if necessary) that all incoming values
874   // are compatible.
875   LValue AtomicVal = MakeAddrLValue(Ptr, AtomicTy);
876   AtomicInfo Atomics(*this, AtomicVal);
877
878   Ptr = Atomics.emitCastToAtomicIntPointer(Ptr);
879   if (Val1.isValid()) Val1 = Atomics.convertToAtomicIntPointer(Val1);
880   if (Val2.isValid()) Val2 = Atomics.convertToAtomicIntPointer(Val2);
881   if (Dest.isValid())
882     Dest = Atomics.emitCastToAtomicIntPointer(Dest);
883   else if (E->isCmpXChg())
884     Dest = CreateMemTemp(RValTy, "cmpxchg.bool");
885   else if (!RValTy->isVoidType())
886     Dest = Atomics.emitCastToAtomicIntPointer(Atomics.CreateTempAlloca());
887
888   // Use a library call.  See: http://gcc.gnu.org/wiki/Atomic/GCCMM/LIbrary .
889   if (UseLibcall) {
890     bool UseOptimizedLibcall = false;
891     switch (E->getOp()) {
892     case AtomicExpr::AO__c11_atomic_init:
893     case AtomicExpr::AO__opencl_atomic_init:
894       llvm_unreachable("Already handled above with EmitAtomicInit!");
895
896     case AtomicExpr::AO__c11_atomic_fetch_add:
897     case AtomicExpr::AO__opencl_atomic_fetch_add:
898     case AtomicExpr::AO__atomic_fetch_add:
899     case AtomicExpr::AO__c11_atomic_fetch_and:
900     case AtomicExpr::AO__opencl_atomic_fetch_and:
901     case AtomicExpr::AO__atomic_fetch_and:
902     case AtomicExpr::AO__c11_atomic_fetch_or:
903     case AtomicExpr::AO__opencl_atomic_fetch_or:
904     case AtomicExpr::AO__atomic_fetch_or:
905     case AtomicExpr::AO__atomic_fetch_nand:
906     case AtomicExpr::AO__c11_atomic_fetch_sub:
907     case AtomicExpr::AO__opencl_atomic_fetch_sub:
908     case AtomicExpr::AO__atomic_fetch_sub:
909     case AtomicExpr::AO__c11_atomic_fetch_xor:
910     case AtomicExpr::AO__opencl_atomic_fetch_xor:
911     case AtomicExpr::AO__opencl_atomic_fetch_min:
912     case AtomicExpr::AO__opencl_atomic_fetch_max:
913     case AtomicExpr::AO__atomic_fetch_xor:
914     case AtomicExpr::AO__atomic_add_fetch:
915     case AtomicExpr::AO__atomic_and_fetch:
916     case AtomicExpr::AO__atomic_nand_fetch:
917     case AtomicExpr::AO__atomic_or_fetch:
918     case AtomicExpr::AO__atomic_sub_fetch:
919     case AtomicExpr::AO__atomic_xor_fetch:
920     case AtomicExpr::AO__atomic_fetch_min:
921     case AtomicExpr::AO__atomic_fetch_max:
922       // For these, only library calls for certain sizes exist.
923       UseOptimizedLibcall = true;
924       break;
925
926     case AtomicExpr::AO__c11_atomic_load:
927     case AtomicExpr::AO__c11_atomic_store:
928     case AtomicExpr::AO__c11_atomic_exchange:
929     case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
930     case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
931     case AtomicExpr::AO__opencl_atomic_load:
932     case AtomicExpr::AO__opencl_atomic_store:
933     case AtomicExpr::AO__opencl_atomic_exchange:
934     case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
935     case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
936     case AtomicExpr::AO__atomic_load_n:
937     case AtomicExpr::AO__atomic_load:
938     case AtomicExpr::AO__atomic_store_n:
939     case AtomicExpr::AO__atomic_store:
940     case AtomicExpr::AO__atomic_exchange_n:
941     case AtomicExpr::AO__atomic_exchange:
942     case AtomicExpr::AO__atomic_compare_exchange_n:
943     case AtomicExpr::AO__atomic_compare_exchange:
944       // Only use optimized library calls for sizes for which they exist.
945       if (Size == 1 || Size == 2 || Size == 4 || Size == 8)
946         UseOptimizedLibcall = true;
947       break;
948     }
949
950     CallArgList Args;
951     if (!UseOptimizedLibcall) {
952       // For non-optimized library calls, the size is the first parameter
953       Args.add(RValue::get(llvm::ConstantInt::get(SizeTy, Size)),
954                getContext().getSizeType());
955     }
956     // Atomic address is the first or second parameter
957     // The OpenCL atomic library functions only accept pointer arguments to
958     // generic address space.
959     auto CastToGenericAddrSpace = [&](llvm::Value *V, QualType PT) {
960       if (!E->isOpenCL())
961         return V;
962       auto AS = PT->getAs<PointerType>()->getPointeeType().getAddressSpace();
963       if (AS == LangAS::opencl_generic)
964         return V;
965       auto DestAS = getContext().getTargetAddressSpace(LangAS::opencl_generic);
966       auto T = V->getType();
967       auto *DestType = T->getPointerElementType()->getPointerTo(DestAS);
968
969       return getTargetHooks().performAddrSpaceCast(
970           *this, V, AS, LangAS::opencl_generic, DestType, false);
971     };
972
973     Args.add(RValue::get(CastToGenericAddrSpace(
974                  EmitCastToVoidPtr(Ptr.getPointer()), E->getPtr()->getType())),
975              getContext().VoidPtrTy);
976
977     std::string LibCallName;
978     QualType LoweredMemTy =
979       MemTy->isPointerType() ? getContext().getIntPtrType() : MemTy;
980     QualType RetTy;
981     bool HaveRetTy = false;
982     llvm::Instruction::BinaryOps PostOp = (llvm::Instruction::BinaryOps)0;
983     switch (E->getOp()) {
984     case AtomicExpr::AO__c11_atomic_init:
985     case AtomicExpr::AO__opencl_atomic_init:
986       llvm_unreachable("Already handled!");
987
988     // There is only one libcall for compare an exchange, because there is no
989     // optimisation benefit possible from a libcall version of a weak compare
990     // and exchange.
991     // bool __atomic_compare_exchange(size_t size, void *mem, void *expected,
992     //                                void *desired, int success, int failure)
993     // bool __atomic_compare_exchange_N(T *mem, T *expected, T desired,
994     //                                  int success, int failure)
995     case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
996     case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
997     case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
998     case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
999     case AtomicExpr::AO__atomic_compare_exchange:
1000     case AtomicExpr::AO__atomic_compare_exchange_n:
1001       LibCallName = "__atomic_compare_exchange";
1002       RetTy = getContext().BoolTy;
1003       HaveRetTy = true;
1004       Args.add(
1005           RValue::get(CastToGenericAddrSpace(
1006               EmitCastToVoidPtr(Val1.getPointer()), E->getVal1()->getType())),
1007           getContext().VoidPtrTy);
1008       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val2.getPointer(),
1009                         MemTy, E->getExprLoc(), sizeChars);
1010       Args.add(RValue::get(Order), getContext().IntTy);
1011       Order = OrderFail;
1012       break;
1013     // void __atomic_exchange(size_t size, void *mem, void *val, void *return,
1014     //                        int order)
1015     // T __atomic_exchange_N(T *mem, T val, int order)
1016     case AtomicExpr::AO__c11_atomic_exchange:
1017     case AtomicExpr::AO__opencl_atomic_exchange:
1018     case AtomicExpr::AO__atomic_exchange_n:
1019     case AtomicExpr::AO__atomic_exchange:
1020       LibCallName = "__atomic_exchange";
1021       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1022                         MemTy, E->getExprLoc(), sizeChars);
1023       break;
1024     // void __atomic_store(size_t size, void *mem, void *val, int order)
1025     // void __atomic_store_N(T *mem, T val, int order)
1026     case AtomicExpr::AO__c11_atomic_store:
1027     case AtomicExpr::AO__opencl_atomic_store:
1028     case AtomicExpr::AO__atomic_store:
1029     case AtomicExpr::AO__atomic_store_n:
1030       LibCallName = "__atomic_store";
1031       RetTy = getContext().VoidTy;
1032       HaveRetTy = true;
1033       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1034                         MemTy, E->getExprLoc(), sizeChars);
1035       break;
1036     // void __atomic_load(size_t size, void *mem, void *return, int order)
1037     // T __atomic_load_N(T *mem, int order)
1038     case AtomicExpr::AO__c11_atomic_load:
1039     case AtomicExpr::AO__opencl_atomic_load:
1040     case AtomicExpr::AO__atomic_load:
1041     case AtomicExpr::AO__atomic_load_n:
1042       LibCallName = "__atomic_load";
1043       break;
1044     // T __atomic_add_fetch_N(T *mem, T val, int order)
1045     // T __atomic_fetch_add_N(T *mem, T val, int order)
1046     case AtomicExpr::AO__atomic_add_fetch:
1047       PostOp = llvm::Instruction::Add;
1048       LLVM_FALLTHROUGH;
1049     case AtomicExpr::AO__c11_atomic_fetch_add:
1050     case AtomicExpr::AO__opencl_atomic_fetch_add:
1051     case AtomicExpr::AO__atomic_fetch_add:
1052       LibCallName = "__atomic_fetch_add";
1053       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1054                         LoweredMemTy, E->getExprLoc(), sizeChars);
1055       break;
1056     // T __atomic_and_fetch_N(T *mem, T val, int order)
1057     // T __atomic_fetch_and_N(T *mem, T val, int order)
1058     case AtomicExpr::AO__atomic_and_fetch:
1059       PostOp = llvm::Instruction::And;
1060       LLVM_FALLTHROUGH;
1061     case AtomicExpr::AO__c11_atomic_fetch_and:
1062     case AtomicExpr::AO__opencl_atomic_fetch_and:
1063     case AtomicExpr::AO__atomic_fetch_and:
1064       LibCallName = "__atomic_fetch_and";
1065       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1066                         MemTy, E->getExprLoc(), sizeChars);
1067       break;
1068     // T __atomic_or_fetch_N(T *mem, T val, int order)
1069     // T __atomic_fetch_or_N(T *mem, T val, int order)
1070     case AtomicExpr::AO__atomic_or_fetch:
1071       PostOp = llvm::Instruction::Or;
1072       LLVM_FALLTHROUGH;
1073     case AtomicExpr::AO__c11_atomic_fetch_or:
1074     case AtomicExpr::AO__opencl_atomic_fetch_or:
1075     case AtomicExpr::AO__atomic_fetch_or:
1076       LibCallName = "__atomic_fetch_or";
1077       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1078                         MemTy, E->getExprLoc(), sizeChars);
1079       break;
1080     // T __atomic_sub_fetch_N(T *mem, T val, int order)
1081     // T __atomic_fetch_sub_N(T *mem, T val, int order)
1082     case AtomicExpr::AO__atomic_sub_fetch:
1083       PostOp = llvm::Instruction::Sub;
1084       LLVM_FALLTHROUGH;
1085     case AtomicExpr::AO__c11_atomic_fetch_sub:
1086     case AtomicExpr::AO__opencl_atomic_fetch_sub:
1087     case AtomicExpr::AO__atomic_fetch_sub:
1088       LibCallName = "__atomic_fetch_sub";
1089       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1090                         LoweredMemTy, E->getExprLoc(), sizeChars);
1091       break;
1092     // T __atomic_xor_fetch_N(T *mem, T val, int order)
1093     // T __atomic_fetch_xor_N(T *mem, T val, int order)
1094     case AtomicExpr::AO__atomic_xor_fetch:
1095       PostOp = llvm::Instruction::Xor;
1096       LLVM_FALLTHROUGH;
1097     case AtomicExpr::AO__c11_atomic_fetch_xor:
1098     case AtomicExpr::AO__opencl_atomic_fetch_xor:
1099     case AtomicExpr::AO__atomic_fetch_xor:
1100       LibCallName = "__atomic_fetch_xor";
1101       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1102                         MemTy, E->getExprLoc(), sizeChars);
1103       break;
1104     case AtomicExpr::AO__atomic_fetch_min:
1105     case AtomicExpr::AO__opencl_atomic_fetch_min:
1106       LibCallName = E->getValueType()->isSignedIntegerType()
1107                         ? "__atomic_fetch_min"
1108                         : "__atomic_fetch_umin";
1109       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1110                         LoweredMemTy, E->getExprLoc(), sizeChars);
1111       break;
1112     case AtomicExpr::AO__atomic_fetch_max:
1113     case AtomicExpr::AO__opencl_atomic_fetch_max:
1114       LibCallName = E->getValueType()->isSignedIntegerType()
1115                         ? "__atomic_fetch_max"
1116                         : "__atomic_fetch_umax";
1117       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1118                         LoweredMemTy, E->getExprLoc(), sizeChars);
1119       break;
1120     // T __atomic_nand_fetch_N(T *mem, T val, int order)
1121     // T __atomic_fetch_nand_N(T *mem, T val, int order)
1122     case AtomicExpr::AO__atomic_nand_fetch:
1123       PostOp = llvm::Instruction::And; // the NOT is special cased below
1124       LLVM_FALLTHROUGH;
1125     case AtomicExpr::AO__atomic_fetch_nand:
1126       LibCallName = "__atomic_fetch_nand";
1127       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1128                         MemTy, E->getExprLoc(), sizeChars);
1129       break;
1130     }
1131
1132     if (E->isOpenCL()) {
1133       LibCallName = std::string("__opencl") +
1134           StringRef(LibCallName).drop_front(1).str();
1135
1136     }
1137     // Optimized functions have the size in their name.
1138     if (UseOptimizedLibcall)
1139       LibCallName += "_" + llvm::utostr(Size);
1140     // By default, assume we return a value of the atomic type.
1141     if (!HaveRetTy) {
1142       if (UseOptimizedLibcall) {
1143         // Value is returned directly.
1144         // The function returns an appropriately sized integer type.
1145         RetTy = getContext().getIntTypeForBitwidth(
1146             getContext().toBits(sizeChars), /*Signed=*/false);
1147       } else {
1148         // Value is returned through parameter before the order.
1149         RetTy = getContext().VoidTy;
1150         Args.add(RValue::get(EmitCastToVoidPtr(Dest.getPointer())),
1151                  getContext().VoidPtrTy);
1152       }
1153     }
1154     // order is always the last parameter
1155     Args.add(RValue::get(Order),
1156              getContext().IntTy);
1157     if (E->isOpenCL())
1158       Args.add(RValue::get(Scope), getContext().IntTy);
1159
1160     // PostOp is only needed for the atomic_*_fetch operations, and
1161     // thus is only needed for and implemented in the
1162     // UseOptimizedLibcall codepath.
1163     assert(UseOptimizedLibcall || !PostOp);
1164
1165     RValue Res = emitAtomicLibcall(*this, LibCallName, RetTy, Args);
1166     // The value is returned directly from the libcall.
1167     if (E->isCmpXChg())
1168       return Res;
1169
1170     // The value is returned directly for optimized libcalls but the expr
1171     // provided an out-param.
1172     if (UseOptimizedLibcall && Res.getScalarVal()) {
1173       llvm::Value *ResVal = Res.getScalarVal();
1174       if (PostOp) {
1175         llvm::Value *LoadVal1 = Args[1].getRValue(*this).getScalarVal();
1176         ResVal = Builder.CreateBinOp(PostOp, ResVal, LoadVal1);
1177       }
1178       if (E->getOp() == AtomicExpr::AO__atomic_nand_fetch)
1179         ResVal = Builder.CreateNot(ResVal);
1180
1181       Builder.CreateStore(
1182           ResVal,
1183           Builder.CreateBitCast(Dest, ResVal->getType()->getPointerTo()));
1184     }
1185
1186     if (RValTy->isVoidType())
1187       return RValue::get(nullptr);
1188
1189     return convertTempToRValue(
1190         Builder.CreateBitCast(Dest, ConvertTypeForMem(RValTy)->getPointerTo()),
1191         RValTy, E->getExprLoc());
1192   }
1193
1194   bool IsStore = E->getOp() == AtomicExpr::AO__c11_atomic_store ||
1195                  E->getOp() == AtomicExpr::AO__opencl_atomic_store ||
1196                  E->getOp() == AtomicExpr::AO__atomic_store ||
1197                  E->getOp() == AtomicExpr::AO__atomic_store_n;
1198   bool IsLoad = E->getOp() == AtomicExpr::AO__c11_atomic_load ||
1199                 E->getOp() == AtomicExpr::AO__opencl_atomic_load ||
1200                 E->getOp() == AtomicExpr::AO__atomic_load ||
1201                 E->getOp() == AtomicExpr::AO__atomic_load_n;
1202
1203   if (isa<llvm::ConstantInt>(Order)) {
1204     auto ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
1205     // We should not ever get to a case where the ordering isn't a valid C ABI
1206     // value, but it's hard to enforce that in general.
1207     if (llvm::isValidAtomicOrderingCABI(ord))
1208       switch ((llvm::AtomicOrderingCABI)ord) {
1209       case llvm::AtomicOrderingCABI::relaxed:
1210         EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1211                      llvm::AtomicOrdering::Monotonic, Scope);
1212         break;
1213       case llvm::AtomicOrderingCABI::consume:
1214       case llvm::AtomicOrderingCABI::acquire:
1215         if (IsStore)
1216           break; // Avoid crashing on code with undefined behavior
1217         EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1218                      llvm::AtomicOrdering::Acquire, Scope);
1219         break;
1220       case llvm::AtomicOrderingCABI::release:
1221         if (IsLoad)
1222           break; // Avoid crashing on code with undefined behavior
1223         EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1224                      llvm::AtomicOrdering::Release, Scope);
1225         break;
1226       case llvm::AtomicOrderingCABI::acq_rel:
1227         if (IsLoad || IsStore)
1228           break; // Avoid crashing on code with undefined behavior
1229         EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1230                      llvm::AtomicOrdering::AcquireRelease, Scope);
1231         break;
1232       case llvm::AtomicOrderingCABI::seq_cst:
1233         EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1234                      llvm::AtomicOrdering::SequentiallyConsistent, Scope);
1235         break;
1236       }
1237     if (RValTy->isVoidType())
1238       return RValue::get(nullptr);
1239
1240     return convertTempToRValue(
1241         Builder.CreateBitCast(Dest, ConvertTypeForMem(RValTy)->getPointerTo(
1242                                         Dest.getAddressSpace())),
1243         RValTy, E->getExprLoc());
1244   }
1245
1246   // Long case, when Order isn't obviously constant.
1247
1248   // Create all the relevant BB's
1249   llvm::BasicBlock *MonotonicBB = nullptr, *AcquireBB = nullptr,
1250                    *ReleaseBB = nullptr, *AcqRelBB = nullptr,
1251                    *SeqCstBB = nullptr;
1252   MonotonicBB = createBasicBlock("monotonic", CurFn);
1253   if (!IsStore)
1254     AcquireBB = createBasicBlock("acquire", CurFn);
1255   if (!IsLoad)
1256     ReleaseBB = createBasicBlock("release", CurFn);
1257   if (!IsLoad && !IsStore)
1258     AcqRelBB = createBasicBlock("acqrel", CurFn);
1259   SeqCstBB = createBasicBlock("seqcst", CurFn);
1260   llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
1261
1262   // Create the switch for the split
1263   // MonotonicBB is arbitrarily chosen as the default case; in practice, this
1264   // doesn't matter unless someone is crazy enough to use something that
1265   // doesn't fold to a constant for the ordering.
1266   Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
1267   llvm::SwitchInst *SI = Builder.CreateSwitch(Order, MonotonicBB);
1268
1269   // Emit all the different atomics
1270   Builder.SetInsertPoint(MonotonicBB);
1271   EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1272                llvm::AtomicOrdering::Monotonic, Scope);
1273   Builder.CreateBr(ContBB);
1274   if (!IsStore) {
1275     Builder.SetInsertPoint(AcquireBB);
1276     EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1277                  llvm::AtomicOrdering::Acquire, Scope);
1278     Builder.CreateBr(ContBB);
1279     SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::consume),
1280                 AcquireBB);
1281     SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::acquire),
1282                 AcquireBB);
1283   }
1284   if (!IsLoad) {
1285     Builder.SetInsertPoint(ReleaseBB);
1286     EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1287                  llvm::AtomicOrdering::Release, Scope);
1288     Builder.CreateBr(ContBB);
1289     SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::release),
1290                 ReleaseBB);
1291   }
1292   if (!IsLoad && !IsStore) {
1293     Builder.SetInsertPoint(AcqRelBB);
1294     EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1295                  llvm::AtomicOrdering::AcquireRelease, Scope);
1296     Builder.CreateBr(ContBB);
1297     SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::acq_rel),
1298                 AcqRelBB);
1299   }
1300   Builder.SetInsertPoint(SeqCstBB);
1301   EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1302                llvm::AtomicOrdering::SequentiallyConsistent, Scope);
1303   Builder.CreateBr(ContBB);
1304   SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::seq_cst),
1305               SeqCstBB);
1306
1307   // Cleanup and return
1308   Builder.SetInsertPoint(ContBB);
1309   if (RValTy->isVoidType())
1310     return RValue::get(nullptr);
1311
1312   assert(Atomics.getValueSizeInBits() <= Atomics.getAtomicSizeInBits());
1313   return convertTempToRValue(
1314       Builder.CreateBitCast(Dest, ConvertTypeForMem(RValTy)->getPointerTo(
1315                                       Dest.getAddressSpace())),
1316       RValTy, E->getExprLoc());
1317 }
1318
1319 Address AtomicInfo::emitCastToAtomicIntPointer(Address addr) const {
1320   unsigned addrspace =
1321     cast<llvm::PointerType>(addr.getPointer()->getType())->getAddressSpace();
1322   llvm::IntegerType *ty =
1323     llvm::IntegerType::get(CGF.getLLVMContext(), AtomicSizeInBits);
1324   return CGF.Builder.CreateBitCast(addr, ty->getPointerTo(addrspace));
1325 }
1326
1327 Address AtomicInfo::convertToAtomicIntPointer(Address Addr) const {
1328   llvm::Type *Ty = Addr.getElementType();
1329   uint64_t SourceSizeInBits = CGF.CGM.getDataLayout().getTypeSizeInBits(Ty);
1330   if (SourceSizeInBits != AtomicSizeInBits) {
1331     Address Tmp = CreateTempAlloca();
1332     CGF.Builder.CreateMemCpy(Tmp, Addr,
1333                              std::min(AtomicSizeInBits, SourceSizeInBits) / 8);
1334     Addr = Tmp;
1335   }
1336
1337   return emitCastToAtomicIntPointer(Addr);
1338 }
1339
1340 RValue AtomicInfo::convertAtomicTempToRValue(Address addr,
1341                                              AggValueSlot resultSlot,
1342                                              SourceLocation loc,
1343                                              bool asValue) const {
1344   if (LVal.isSimple()) {
1345     if (EvaluationKind == TEK_Aggregate)
1346       return resultSlot.asRValue();
1347
1348     // Drill into the padding structure if we have one.
1349     if (hasPadding())
1350       addr = CGF.Builder.CreateStructGEP(addr, 0, CharUnits());
1351
1352     // Otherwise, just convert the temporary to an r-value using the
1353     // normal conversion routine.
1354     return CGF.convertTempToRValue(addr, getValueType(), loc);
1355   }
1356   if (!asValue)
1357     // Get RValue from temp memory as atomic for non-simple lvalues
1358     return RValue::get(CGF.Builder.CreateLoad(addr));
1359   if (LVal.isBitField())
1360     return CGF.EmitLoadOfBitfieldLValue(
1361         LValue::MakeBitfield(addr, LVal.getBitFieldInfo(), LVal.getType(),
1362                              LVal.getBaseInfo(), TBAAAccessInfo()), loc);
1363   if (LVal.isVectorElt())
1364     return CGF.EmitLoadOfLValue(
1365         LValue::MakeVectorElt(addr, LVal.getVectorIdx(), LVal.getType(),
1366                               LVal.getBaseInfo(), TBAAAccessInfo()), loc);
1367   assert(LVal.isExtVectorElt());
1368   return CGF.EmitLoadOfExtVectorElementLValue(LValue::MakeExtVectorElt(
1369       addr, LVal.getExtVectorElts(), LVal.getType(),
1370       LVal.getBaseInfo(), TBAAAccessInfo()));
1371 }
1372
1373 RValue AtomicInfo::ConvertIntToValueOrAtomic(llvm::Value *IntVal,
1374                                              AggValueSlot ResultSlot,
1375                                              SourceLocation Loc,
1376                                              bool AsValue) const {
1377   // Try not to in some easy cases.
1378   assert(IntVal->getType()->isIntegerTy() && "Expected integer value");
1379   if (getEvaluationKind() == TEK_Scalar &&
1380       (((!LVal.isBitField() ||
1381          LVal.getBitFieldInfo().Size == ValueSizeInBits) &&
1382         !hasPadding()) ||
1383        !AsValue)) {
1384     auto *ValTy = AsValue
1385                       ? CGF.ConvertTypeForMem(ValueTy)
1386                       : getAtomicAddress().getType()->getPointerElementType();
1387     if (ValTy->isIntegerTy()) {
1388       assert(IntVal->getType() == ValTy && "Different integer types.");
1389       return RValue::get(CGF.EmitFromMemory(IntVal, ValueTy));
1390     } else if (ValTy->isPointerTy())
1391       return RValue::get(CGF.Builder.CreateIntToPtr(IntVal, ValTy));
1392     else if (llvm::CastInst::isBitCastable(IntVal->getType(), ValTy))
1393       return RValue::get(CGF.Builder.CreateBitCast(IntVal, ValTy));
1394   }
1395
1396   // Create a temporary.  This needs to be big enough to hold the
1397   // atomic integer.
1398   Address Temp = Address::invalid();
1399   bool TempIsVolatile = false;
1400   if (AsValue && getEvaluationKind() == TEK_Aggregate) {
1401     assert(!ResultSlot.isIgnored());
1402     Temp = ResultSlot.getAddress();
1403     TempIsVolatile = ResultSlot.isVolatile();
1404   } else {
1405     Temp = CreateTempAlloca();
1406   }
1407
1408   // Slam the integer into the temporary.
1409   Address CastTemp = emitCastToAtomicIntPointer(Temp);
1410   CGF.Builder.CreateStore(IntVal, CastTemp)
1411       ->setVolatile(TempIsVolatile);
1412
1413   return convertAtomicTempToRValue(Temp, ResultSlot, Loc, AsValue);
1414 }
1415
1416 void AtomicInfo::EmitAtomicLoadLibcall(llvm::Value *AddForLoaded,
1417                                        llvm::AtomicOrdering AO, bool) {
1418   // void __atomic_load(size_t size, void *mem, void *return, int order);
1419   CallArgList Args;
1420   Args.add(RValue::get(getAtomicSizeValue()), CGF.getContext().getSizeType());
1421   Args.add(RValue::get(CGF.EmitCastToVoidPtr(getAtomicPointer())),
1422            CGF.getContext().VoidPtrTy);
1423   Args.add(RValue::get(CGF.EmitCastToVoidPtr(AddForLoaded)),
1424            CGF.getContext().VoidPtrTy);
1425   Args.add(
1426       RValue::get(llvm::ConstantInt::get(CGF.IntTy, (int)llvm::toCABI(AO))),
1427       CGF.getContext().IntTy);
1428   emitAtomicLibcall(CGF, "__atomic_load", CGF.getContext().VoidTy, Args);
1429 }
1430
1431 llvm::Value *AtomicInfo::EmitAtomicLoadOp(llvm::AtomicOrdering AO,
1432                                           bool IsVolatile) {
1433   // Okay, we're doing this natively.
1434   Address Addr = getAtomicAddressAsAtomicIntPointer();
1435   llvm::LoadInst *Load = CGF.Builder.CreateLoad(Addr, "atomic-load");
1436   Load->setAtomic(AO);
1437
1438   // Other decoration.
1439   if (IsVolatile)
1440     Load->setVolatile(true);
1441   CGF.CGM.DecorateInstructionWithTBAA(Load, LVal.getTBAAInfo());
1442   return Load;
1443 }
1444
1445 /// An LValue is a candidate for having its loads and stores be made atomic if
1446 /// we are operating under /volatile:ms *and* the LValue itself is volatile and
1447 /// performing such an operation can be performed without a libcall.
1448 bool CodeGenFunction::LValueIsSuitableForInlineAtomic(LValue LV) {
1449   if (!CGM.getCodeGenOpts().MSVolatile) return false;
1450   AtomicInfo AI(*this, LV);
1451   bool IsVolatile = LV.isVolatile() || hasVolatileMember(LV.getType());
1452   // An atomic is inline if we don't need to use a libcall.
1453   bool AtomicIsInline = !AI.shouldUseLibcall();
1454   // MSVC doesn't seem to do this for types wider than a pointer.
1455   if (getContext().getTypeSize(LV.getType()) >
1456       getContext().getTypeSize(getContext().getIntPtrType()))
1457     return false;
1458   return IsVolatile && AtomicIsInline;
1459 }
1460
1461 RValue CodeGenFunction::EmitAtomicLoad(LValue LV, SourceLocation SL,
1462                                        AggValueSlot Slot) {
1463   llvm::AtomicOrdering AO;
1464   bool IsVolatile = LV.isVolatileQualified();
1465   if (LV.getType()->isAtomicType()) {
1466     AO = llvm::AtomicOrdering::SequentiallyConsistent;
1467   } else {
1468     AO = llvm::AtomicOrdering::Acquire;
1469     IsVolatile = true;
1470   }
1471   return EmitAtomicLoad(LV, SL, AO, IsVolatile, Slot);
1472 }
1473
1474 RValue AtomicInfo::EmitAtomicLoad(AggValueSlot ResultSlot, SourceLocation Loc,
1475                                   bool AsValue, llvm::AtomicOrdering AO,
1476                                   bool IsVolatile) {
1477   // Check whether we should use a library call.
1478   if (shouldUseLibcall()) {
1479     Address TempAddr = Address::invalid();
1480     if (LVal.isSimple() && !ResultSlot.isIgnored()) {
1481       assert(getEvaluationKind() == TEK_Aggregate);
1482       TempAddr = ResultSlot.getAddress();
1483     } else
1484       TempAddr = CreateTempAlloca();
1485
1486     EmitAtomicLoadLibcall(TempAddr.getPointer(), AO, IsVolatile);
1487
1488     // Okay, turn that back into the original value or whole atomic (for
1489     // non-simple lvalues) type.
1490     return convertAtomicTempToRValue(TempAddr, ResultSlot, Loc, AsValue);
1491   }
1492
1493   // Okay, we're doing this natively.
1494   auto *Load = EmitAtomicLoadOp(AO, IsVolatile);
1495
1496   // If we're ignoring an aggregate return, don't do anything.
1497   if (getEvaluationKind() == TEK_Aggregate && ResultSlot.isIgnored())
1498     return RValue::getAggregate(Address::invalid(), false);
1499
1500   // Okay, turn that back into the original value or atomic (for non-simple
1501   // lvalues) type.
1502   return ConvertIntToValueOrAtomic(Load, ResultSlot, Loc, AsValue);
1503 }
1504
1505 /// Emit a load from an l-value of atomic type.  Note that the r-value
1506 /// we produce is an r-value of the atomic *value* type.
1507 RValue CodeGenFunction::EmitAtomicLoad(LValue src, SourceLocation loc,
1508                                        llvm::AtomicOrdering AO, bool IsVolatile,
1509                                        AggValueSlot resultSlot) {
1510   AtomicInfo Atomics(*this, src);
1511   return Atomics.EmitAtomicLoad(resultSlot, loc, /*AsValue=*/true, AO,
1512                                 IsVolatile);
1513 }
1514
1515 /// Copy an r-value into memory as part of storing to an atomic type.
1516 /// This needs to create a bit-pattern suitable for atomic operations.
1517 void AtomicInfo::emitCopyIntoMemory(RValue rvalue) const {
1518   assert(LVal.isSimple());
1519   // If we have an r-value, the rvalue should be of the atomic type,
1520   // which means that the caller is responsible for having zeroed
1521   // any padding.  Just do an aggregate copy of that type.
1522   if (rvalue.isAggregate()) {
1523     LValue Dest = CGF.MakeAddrLValue(getAtomicAddress(), getAtomicType());
1524     LValue Src = CGF.MakeAddrLValue(rvalue.getAggregateAddress(),
1525                                     getAtomicType());
1526     bool IsVolatile = rvalue.isVolatileQualified() ||
1527                       LVal.isVolatileQualified();
1528     CGF.EmitAggregateCopy(Dest, Src, getAtomicType(),
1529                           AggValueSlot::DoesNotOverlap, IsVolatile);
1530     return;
1531   }
1532
1533   // Okay, otherwise we're copying stuff.
1534
1535   // Zero out the buffer if necessary.
1536   emitMemSetZeroIfNecessary();
1537
1538   // Drill past the padding if present.
1539   LValue TempLVal = projectValue();
1540
1541   // Okay, store the rvalue in.
1542   if (rvalue.isScalar()) {
1543     CGF.EmitStoreOfScalar(rvalue.getScalarVal(), TempLVal, /*init*/ true);
1544   } else {
1545     CGF.EmitStoreOfComplex(rvalue.getComplexVal(), TempLVal, /*init*/ true);
1546   }
1547 }
1548
1549
1550 /// Materialize an r-value into memory for the purposes of storing it
1551 /// to an atomic type.
1552 Address AtomicInfo::materializeRValue(RValue rvalue) const {
1553   // Aggregate r-values are already in memory, and EmitAtomicStore
1554   // requires them to be values of the atomic type.
1555   if (rvalue.isAggregate())
1556     return rvalue.getAggregateAddress();
1557
1558   // Otherwise, make a temporary and materialize into it.
1559   LValue TempLV = CGF.MakeAddrLValue(CreateTempAlloca(), getAtomicType());
1560   AtomicInfo Atomics(CGF, TempLV);
1561   Atomics.emitCopyIntoMemory(rvalue);
1562   return TempLV.getAddress();
1563 }
1564
1565 llvm::Value *AtomicInfo::convertRValueToInt(RValue RVal) const {
1566   // If we've got a scalar value of the right size, try to avoid going
1567   // through memory.
1568   if (RVal.isScalar() && (!hasPadding() || !LVal.isSimple())) {
1569     llvm::Value *Value = RVal.getScalarVal();
1570     if (isa<llvm::IntegerType>(Value->getType()))
1571       return CGF.EmitToMemory(Value, ValueTy);
1572     else {
1573       llvm::IntegerType *InputIntTy = llvm::IntegerType::get(
1574           CGF.getLLVMContext(),
1575           LVal.isSimple() ? getValueSizeInBits() : getAtomicSizeInBits());
1576       if (isa<llvm::PointerType>(Value->getType()))
1577         return CGF.Builder.CreatePtrToInt(Value, InputIntTy);
1578       else if (llvm::BitCastInst::isBitCastable(Value->getType(), InputIntTy))
1579         return CGF.Builder.CreateBitCast(Value, InputIntTy);
1580     }
1581   }
1582   // Otherwise, we need to go through memory.
1583   // Put the r-value in memory.
1584   Address Addr = materializeRValue(RVal);
1585
1586   // Cast the temporary to the atomic int type and pull a value out.
1587   Addr = emitCastToAtomicIntPointer(Addr);
1588   return CGF.Builder.CreateLoad(Addr);
1589 }
1590
1591 std::pair<llvm::Value *, llvm::Value *> AtomicInfo::EmitAtomicCompareExchangeOp(
1592     llvm::Value *ExpectedVal, llvm::Value *DesiredVal,
1593     llvm::AtomicOrdering Success, llvm::AtomicOrdering Failure, bool IsWeak) {
1594   // Do the atomic store.
1595   Address Addr = getAtomicAddressAsAtomicIntPointer();
1596   auto *Inst = CGF.Builder.CreateAtomicCmpXchg(Addr.getPointer(),
1597                                                ExpectedVal, DesiredVal,
1598                                                Success, Failure);
1599   // Other decoration.
1600   Inst->setVolatile(LVal.isVolatileQualified());
1601   Inst->setWeak(IsWeak);
1602
1603   // Okay, turn that back into the original value type.
1604   auto *PreviousVal = CGF.Builder.CreateExtractValue(Inst, /*Idxs=*/0);
1605   auto *SuccessFailureVal = CGF.Builder.CreateExtractValue(Inst, /*Idxs=*/1);
1606   return std::make_pair(PreviousVal, SuccessFailureVal);
1607 }
1608
1609 llvm::Value *
1610 AtomicInfo::EmitAtomicCompareExchangeLibcall(llvm::Value *ExpectedAddr,
1611                                              llvm::Value *DesiredAddr,
1612                                              llvm::AtomicOrdering Success,
1613                                              llvm::AtomicOrdering Failure) {
1614   // bool __atomic_compare_exchange(size_t size, void *obj, void *expected,
1615   // void *desired, int success, int failure);
1616   CallArgList Args;
1617   Args.add(RValue::get(getAtomicSizeValue()), CGF.getContext().getSizeType());
1618   Args.add(RValue::get(CGF.EmitCastToVoidPtr(getAtomicPointer())),
1619            CGF.getContext().VoidPtrTy);
1620   Args.add(RValue::get(CGF.EmitCastToVoidPtr(ExpectedAddr)),
1621            CGF.getContext().VoidPtrTy);
1622   Args.add(RValue::get(CGF.EmitCastToVoidPtr(DesiredAddr)),
1623            CGF.getContext().VoidPtrTy);
1624   Args.add(RValue::get(
1625                llvm::ConstantInt::get(CGF.IntTy, (int)llvm::toCABI(Success))),
1626            CGF.getContext().IntTy);
1627   Args.add(RValue::get(
1628                llvm::ConstantInt::get(CGF.IntTy, (int)llvm::toCABI(Failure))),
1629            CGF.getContext().IntTy);
1630   auto SuccessFailureRVal = emitAtomicLibcall(CGF, "__atomic_compare_exchange",
1631                                               CGF.getContext().BoolTy, Args);
1632
1633   return SuccessFailureRVal.getScalarVal();
1634 }
1635
1636 std::pair<RValue, llvm::Value *> AtomicInfo::EmitAtomicCompareExchange(
1637     RValue Expected, RValue Desired, llvm::AtomicOrdering Success,
1638     llvm::AtomicOrdering Failure, bool IsWeak) {
1639   if (isStrongerThan(Failure, Success))
1640     // Don't assert on undefined behavior "failure argument shall be no stronger
1641     // than the success argument".
1642     Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(Success);
1643
1644   // Check whether we should use a library call.
1645   if (shouldUseLibcall()) {
1646     // Produce a source address.
1647     Address ExpectedAddr = materializeRValue(Expected);
1648     Address DesiredAddr = materializeRValue(Desired);
1649     auto *Res = EmitAtomicCompareExchangeLibcall(ExpectedAddr.getPointer(),
1650                                                  DesiredAddr.getPointer(),
1651                                                  Success, Failure);
1652     return std::make_pair(
1653         convertAtomicTempToRValue(ExpectedAddr, AggValueSlot::ignored(),
1654                                   SourceLocation(), /*AsValue=*/false),
1655         Res);
1656   }
1657
1658   // If we've got a scalar value of the right size, try to avoid going
1659   // through memory.
1660   auto *ExpectedVal = convertRValueToInt(Expected);
1661   auto *DesiredVal = convertRValueToInt(Desired);
1662   auto Res = EmitAtomicCompareExchangeOp(ExpectedVal, DesiredVal, Success,
1663                                          Failure, IsWeak);
1664   return std::make_pair(
1665       ConvertIntToValueOrAtomic(Res.first, AggValueSlot::ignored(),
1666                                 SourceLocation(), /*AsValue=*/false),
1667       Res.second);
1668 }
1669
1670 static void
1671 EmitAtomicUpdateValue(CodeGenFunction &CGF, AtomicInfo &Atomics, RValue OldRVal,
1672                       const llvm::function_ref<RValue(RValue)> &UpdateOp,
1673                       Address DesiredAddr) {
1674   RValue UpRVal;
1675   LValue AtomicLVal = Atomics.getAtomicLValue();
1676   LValue DesiredLVal;
1677   if (AtomicLVal.isSimple()) {
1678     UpRVal = OldRVal;
1679     DesiredLVal = CGF.MakeAddrLValue(DesiredAddr, AtomicLVal.getType());
1680   } else {
1681     // Build new lvalue for temp address
1682     Address Ptr = Atomics.materializeRValue(OldRVal);
1683     LValue UpdateLVal;
1684     if (AtomicLVal.isBitField()) {
1685       UpdateLVal =
1686           LValue::MakeBitfield(Ptr, AtomicLVal.getBitFieldInfo(),
1687                                AtomicLVal.getType(),
1688                                AtomicLVal.getBaseInfo(),
1689                                AtomicLVal.getTBAAInfo());
1690       DesiredLVal =
1691           LValue::MakeBitfield(DesiredAddr, AtomicLVal.getBitFieldInfo(),
1692                                AtomicLVal.getType(), AtomicLVal.getBaseInfo(),
1693                                AtomicLVal.getTBAAInfo());
1694     } else if (AtomicLVal.isVectorElt()) {
1695       UpdateLVal = LValue::MakeVectorElt(Ptr, AtomicLVal.getVectorIdx(),
1696                                          AtomicLVal.getType(),
1697                                          AtomicLVal.getBaseInfo(),
1698                                          AtomicLVal.getTBAAInfo());
1699       DesiredLVal = LValue::MakeVectorElt(
1700           DesiredAddr, AtomicLVal.getVectorIdx(), AtomicLVal.getType(),
1701           AtomicLVal.getBaseInfo(), AtomicLVal.getTBAAInfo());
1702     } else {
1703       assert(AtomicLVal.isExtVectorElt());
1704       UpdateLVal = LValue::MakeExtVectorElt(Ptr, AtomicLVal.getExtVectorElts(),
1705                                             AtomicLVal.getType(),
1706                                             AtomicLVal.getBaseInfo(),
1707                                             AtomicLVal.getTBAAInfo());
1708       DesiredLVal = LValue::MakeExtVectorElt(
1709           DesiredAddr, AtomicLVal.getExtVectorElts(), AtomicLVal.getType(),
1710           AtomicLVal.getBaseInfo(), AtomicLVal.getTBAAInfo());
1711     }
1712     UpRVal = CGF.EmitLoadOfLValue(UpdateLVal, SourceLocation());
1713   }
1714   // Store new value in the corresponding memory area
1715   RValue NewRVal = UpdateOp(UpRVal);
1716   if (NewRVal.isScalar()) {
1717     CGF.EmitStoreThroughLValue(NewRVal, DesiredLVal);
1718   } else {
1719     assert(NewRVal.isComplex());
1720     CGF.EmitStoreOfComplex(NewRVal.getComplexVal(), DesiredLVal,
1721                            /*isInit=*/false);
1722   }
1723 }
1724
1725 void AtomicInfo::EmitAtomicUpdateLibcall(
1726     llvm::AtomicOrdering AO, const llvm::function_ref<RValue(RValue)> &UpdateOp,
1727     bool IsVolatile) {
1728   auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1729
1730   Address ExpectedAddr = CreateTempAlloca();
1731
1732   EmitAtomicLoadLibcall(ExpectedAddr.getPointer(), AO, IsVolatile);
1733   auto *ContBB = CGF.createBasicBlock("atomic_cont");
1734   auto *ExitBB = CGF.createBasicBlock("atomic_exit");
1735   CGF.EmitBlock(ContBB);
1736   Address DesiredAddr = CreateTempAlloca();
1737   if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
1738       requiresMemSetZero(getAtomicAddress().getElementType())) {
1739     auto *OldVal = CGF.Builder.CreateLoad(ExpectedAddr);
1740     CGF.Builder.CreateStore(OldVal, DesiredAddr);
1741   }
1742   auto OldRVal = convertAtomicTempToRValue(ExpectedAddr,
1743                                            AggValueSlot::ignored(),
1744                                            SourceLocation(), /*AsValue=*/false);
1745   EmitAtomicUpdateValue(CGF, *this, OldRVal, UpdateOp, DesiredAddr);
1746   auto *Res =
1747       EmitAtomicCompareExchangeLibcall(ExpectedAddr.getPointer(),
1748                                        DesiredAddr.getPointer(),
1749                                        AO, Failure);
1750   CGF.Builder.CreateCondBr(Res, ExitBB, ContBB);
1751   CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
1752 }
1753
1754 void AtomicInfo::EmitAtomicUpdateOp(
1755     llvm::AtomicOrdering AO, const llvm::function_ref<RValue(RValue)> &UpdateOp,
1756     bool IsVolatile) {
1757   auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1758
1759   // Do the atomic load.
1760   auto *OldVal = EmitAtomicLoadOp(AO, IsVolatile);
1761   // For non-simple lvalues perform compare-and-swap procedure.
1762   auto *ContBB = CGF.createBasicBlock("atomic_cont");
1763   auto *ExitBB = CGF.createBasicBlock("atomic_exit");
1764   auto *CurBB = CGF.Builder.GetInsertBlock();
1765   CGF.EmitBlock(ContBB);
1766   llvm::PHINode *PHI = CGF.Builder.CreatePHI(OldVal->getType(),
1767                                              /*NumReservedValues=*/2);
1768   PHI->addIncoming(OldVal, CurBB);
1769   Address NewAtomicAddr = CreateTempAlloca();
1770   Address NewAtomicIntAddr = emitCastToAtomicIntPointer(NewAtomicAddr);
1771   if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
1772       requiresMemSetZero(getAtomicAddress().getElementType())) {
1773     CGF.Builder.CreateStore(PHI, NewAtomicIntAddr);
1774   }
1775   auto OldRVal = ConvertIntToValueOrAtomic(PHI, AggValueSlot::ignored(),
1776                                            SourceLocation(), /*AsValue=*/false);
1777   EmitAtomicUpdateValue(CGF, *this, OldRVal, UpdateOp, NewAtomicAddr);
1778   auto *DesiredVal = CGF.Builder.CreateLoad(NewAtomicIntAddr);
1779   // Try to write new value using cmpxchg operation
1780   auto Res = EmitAtomicCompareExchangeOp(PHI, DesiredVal, AO, Failure);
1781   PHI->addIncoming(Res.first, CGF.Builder.GetInsertBlock());
1782   CGF.Builder.CreateCondBr(Res.second, ExitBB, ContBB);
1783   CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
1784 }
1785
1786 static void EmitAtomicUpdateValue(CodeGenFunction &CGF, AtomicInfo &Atomics,
1787                                   RValue UpdateRVal, Address DesiredAddr) {
1788   LValue AtomicLVal = Atomics.getAtomicLValue();
1789   LValue DesiredLVal;
1790   // Build new lvalue for temp address
1791   if (AtomicLVal.isBitField()) {
1792     DesiredLVal =
1793         LValue::MakeBitfield(DesiredAddr, AtomicLVal.getBitFieldInfo(),
1794                              AtomicLVal.getType(), AtomicLVal.getBaseInfo(),
1795                              AtomicLVal.getTBAAInfo());
1796   } else if (AtomicLVal.isVectorElt()) {
1797     DesiredLVal =
1798         LValue::MakeVectorElt(DesiredAddr, AtomicLVal.getVectorIdx(),
1799                               AtomicLVal.getType(), AtomicLVal.getBaseInfo(),
1800                               AtomicLVal.getTBAAInfo());
1801   } else {
1802     assert(AtomicLVal.isExtVectorElt());
1803     DesiredLVal = LValue::MakeExtVectorElt(
1804         DesiredAddr, AtomicLVal.getExtVectorElts(), AtomicLVal.getType(),
1805         AtomicLVal.getBaseInfo(), AtomicLVal.getTBAAInfo());
1806   }
1807   // Store new value in the corresponding memory area
1808   assert(UpdateRVal.isScalar());
1809   CGF.EmitStoreThroughLValue(UpdateRVal, DesiredLVal);
1810 }
1811
1812 void AtomicInfo::EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO,
1813                                          RValue UpdateRVal, bool IsVolatile) {
1814   auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1815
1816   Address ExpectedAddr = CreateTempAlloca();
1817
1818   EmitAtomicLoadLibcall(ExpectedAddr.getPointer(), AO, IsVolatile);
1819   auto *ContBB = CGF.createBasicBlock("atomic_cont");
1820   auto *ExitBB = CGF.createBasicBlock("atomic_exit");
1821   CGF.EmitBlock(ContBB);
1822   Address DesiredAddr = CreateTempAlloca();
1823   if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
1824       requiresMemSetZero(getAtomicAddress().getElementType())) {
1825     auto *OldVal = CGF.Builder.CreateLoad(ExpectedAddr);
1826     CGF.Builder.CreateStore(OldVal, DesiredAddr);
1827   }
1828   EmitAtomicUpdateValue(CGF, *this, UpdateRVal, DesiredAddr);
1829   auto *Res =
1830       EmitAtomicCompareExchangeLibcall(ExpectedAddr.getPointer(),
1831                                        DesiredAddr.getPointer(),
1832                                        AO, Failure);
1833   CGF.Builder.CreateCondBr(Res, ExitBB, ContBB);
1834   CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
1835 }
1836
1837 void AtomicInfo::EmitAtomicUpdateOp(llvm::AtomicOrdering AO, RValue UpdateRVal,
1838                                     bool IsVolatile) {
1839   auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1840
1841   // Do the atomic load.
1842   auto *OldVal = EmitAtomicLoadOp(AO, IsVolatile);
1843   // For non-simple lvalues perform compare-and-swap procedure.
1844   auto *ContBB = CGF.createBasicBlock("atomic_cont");
1845   auto *ExitBB = CGF.createBasicBlock("atomic_exit");
1846   auto *CurBB = CGF.Builder.GetInsertBlock();
1847   CGF.EmitBlock(ContBB);
1848   llvm::PHINode *PHI = CGF.Builder.CreatePHI(OldVal->getType(),
1849                                              /*NumReservedValues=*/2);
1850   PHI->addIncoming(OldVal, CurBB);
1851   Address NewAtomicAddr = CreateTempAlloca();
1852   Address NewAtomicIntAddr = emitCastToAtomicIntPointer(NewAtomicAddr);
1853   if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
1854       requiresMemSetZero(getAtomicAddress().getElementType())) {
1855     CGF.Builder.CreateStore(PHI, NewAtomicIntAddr);
1856   }
1857   EmitAtomicUpdateValue(CGF, *this, UpdateRVal, NewAtomicAddr);
1858   auto *DesiredVal = CGF.Builder.CreateLoad(NewAtomicIntAddr);
1859   // Try to write new value using cmpxchg operation
1860   auto Res = EmitAtomicCompareExchangeOp(PHI, DesiredVal, AO, Failure);
1861   PHI->addIncoming(Res.first, CGF.Builder.GetInsertBlock());
1862   CGF.Builder.CreateCondBr(Res.second, ExitBB, ContBB);
1863   CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
1864 }
1865
1866 void AtomicInfo::EmitAtomicUpdate(
1867     llvm::AtomicOrdering AO, const llvm::function_ref<RValue(RValue)> &UpdateOp,
1868     bool IsVolatile) {
1869   if (shouldUseLibcall()) {
1870     EmitAtomicUpdateLibcall(AO, UpdateOp, IsVolatile);
1871   } else {
1872     EmitAtomicUpdateOp(AO, UpdateOp, IsVolatile);
1873   }
1874 }
1875
1876 void AtomicInfo::EmitAtomicUpdate(llvm::AtomicOrdering AO, RValue UpdateRVal,
1877                                   bool IsVolatile) {
1878   if (shouldUseLibcall()) {
1879     EmitAtomicUpdateLibcall(AO, UpdateRVal, IsVolatile);
1880   } else {
1881     EmitAtomicUpdateOp(AO, UpdateRVal, IsVolatile);
1882   }
1883 }
1884
1885 void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue lvalue,
1886                                       bool isInit) {
1887   bool IsVolatile = lvalue.isVolatileQualified();
1888   llvm::AtomicOrdering AO;
1889   if (lvalue.getType()->isAtomicType()) {
1890     AO = llvm::AtomicOrdering::SequentiallyConsistent;
1891   } else {
1892     AO = llvm::AtomicOrdering::Release;
1893     IsVolatile = true;
1894   }
1895   return EmitAtomicStore(rvalue, lvalue, AO, IsVolatile, isInit);
1896 }
1897
1898 /// Emit a store to an l-value of atomic type.
1899 ///
1900 /// Note that the r-value is expected to be an r-value *of the atomic
1901 /// type*; this means that for aggregate r-values, it should include
1902 /// storage for any padding that was necessary.
1903 void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue dest,
1904                                       llvm::AtomicOrdering AO, bool IsVolatile,
1905                                       bool isInit) {
1906   // If this is an aggregate r-value, it should agree in type except
1907   // maybe for address-space qualification.
1908   assert(!rvalue.isAggregate() ||
1909          rvalue.getAggregateAddress().getElementType()
1910            == dest.getAddress().getElementType());
1911
1912   AtomicInfo atomics(*this, dest);
1913   LValue LVal = atomics.getAtomicLValue();
1914
1915   // If this is an initialization, just put the value there normally.
1916   if (LVal.isSimple()) {
1917     if (isInit) {
1918       atomics.emitCopyIntoMemory(rvalue);
1919       return;
1920     }
1921
1922     // Check whether we should use a library call.
1923     if (atomics.shouldUseLibcall()) {
1924       // Produce a source address.
1925       Address srcAddr = atomics.materializeRValue(rvalue);
1926
1927       // void __atomic_store(size_t size, void *mem, void *val, int order)
1928       CallArgList args;
1929       args.add(RValue::get(atomics.getAtomicSizeValue()),
1930                getContext().getSizeType());
1931       args.add(RValue::get(EmitCastToVoidPtr(atomics.getAtomicPointer())),
1932                getContext().VoidPtrTy);
1933       args.add(RValue::get(EmitCastToVoidPtr(srcAddr.getPointer())),
1934                getContext().VoidPtrTy);
1935       args.add(
1936           RValue::get(llvm::ConstantInt::get(IntTy, (int)llvm::toCABI(AO))),
1937           getContext().IntTy);
1938       emitAtomicLibcall(*this, "__atomic_store", getContext().VoidTy, args);
1939       return;
1940     }
1941
1942     // Okay, we're doing this natively.
1943     llvm::Value *intValue = atomics.convertRValueToInt(rvalue);
1944
1945     // Do the atomic store.
1946     Address addr =
1947         atomics.emitCastToAtomicIntPointer(atomics.getAtomicAddress());
1948     intValue = Builder.CreateIntCast(
1949         intValue, addr.getElementType(), /*isSigned=*/false);
1950     llvm::StoreInst *store = Builder.CreateStore(intValue, addr);
1951
1952     // Initializations don't need to be atomic.
1953     if (!isInit)
1954       store->setAtomic(AO);
1955
1956     // Other decoration.
1957     if (IsVolatile)
1958       store->setVolatile(true);
1959     CGM.DecorateInstructionWithTBAA(store, dest.getTBAAInfo());
1960     return;
1961   }
1962
1963   // Emit simple atomic update operation.
1964   atomics.EmitAtomicUpdate(AO, rvalue, IsVolatile);
1965 }
1966
1967 /// Emit a compare-and-exchange op for atomic type.
1968 ///
1969 std::pair<RValue, llvm::Value *> CodeGenFunction::EmitAtomicCompareExchange(
1970     LValue Obj, RValue Expected, RValue Desired, SourceLocation Loc,
1971     llvm::AtomicOrdering Success, llvm::AtomicOrdering Failure, bool IsWeak,
1972     AggValueSlot Slot) {
1973   // If this is an aggregate r-value, it should agree in type except
1974   // maybe for address-space qualification.
1975   assert(!Expected.isAggregate() ||
1976          Expected.getAggregateAddress().getElementType() ==
1977              Obj.getAddress().getElementType());
1978   assert(!Desired.isAggregate() ||
1979          Desired.getAggregateAddress().getElementType() ==
1980              Obj.getAddress().getElementType());
1981   AtomicInfo Atomics(*this, Obj);
1982
1983   return Atomics.EmitAtomicCompareExchange(Expected, Desired, Success, Failure,
1984                                            IsWeak);
1985 }
1986
1987 void CodeGenFunction::EmitAtomicUpdate(
1988     LValue LVal, llvm::AtomicOrdering AO,
1989     const llvm::function_ref<RValue(RValue)> &UpdateOp, bool IsVolatile) {
1990   AtomicInfo Atomics(*this, LVal);
1991   Atomics.EmitAtomicUpdate(AO, UpdateOp, IsVolatile);
1992 }
1993
1994 void CodeGenFunction::EmitAtomicInit(Expr *init, LValue dest) {
1995   AtomicInfo atomics(*this, dest);
1996
1997   switch (atomics.getEvaluationKind()) {
1998   case TEK_Scalar: {
1999     llvm::Value *value = EmitScalarExpr(init);
2000     atomics.emitCopyIntoMemory(RValue::get(value));
2001     return;
2002   }
2003
2004   case TEK_Complex: {
2005     ComplexPairTy value = EmitComplexExpr(init);
2006     atomics.emitCopyIntoMemory(RValue::getComplex(value));
2007     return;
2008   }
2009
2010   case TEK_Aggregate: {
2011     // Fix up the destination if the initializer isn't an expression
2012     // of atomic type.
2013     bool Zeroed = false;
2014     if (!init->getType()->isAtomicType()) {
2015       Zeroed = atomics.emitMemSetZeroIfNecessary();
2016       dest = atomics.projectValue();
2017     }
2018
2019     // Evaluate the expression directly into the destination.
2020     AggValueSlot slot = AggValueSlot::forLValue(dest,
2021                                         AggValueSlot::IsNotDestructed,
2022                                         AggValueSlot::DoesNotNeedGCBarriers,
2023                                         AggValueSlot::IsNotAliased,
2024                                         AggValueSlot::DoesNotOverlap,
2025                                         Zeroed ? AggValueSlot::IsZeroed :
2026                                                  AggValueSlot::IsNotZeroed);
2027
2028     EmitAggExpr(init, slot);
2029     return;
2030   }
2031   }
2032   llvm_unreachable("bad evaluation kind");
2033 }