1 //===--- CGAtomic.cpp - Emit LLVM IR for atomic operations ----------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file contains the code for emitting atomic operations.
11 //===----------------------------------------------------------------------===//
14 #include "CGRecordLayout.h"
15 #include "CodeGenFunction.h"
16 #include "CodeGenModule.h"
17 #include "TargetInfo.h"
18 #include "clang/AST/ASTContext.h"
19 #include "clang/CodeGen/CGFunctionInfo.h"
20 #include "clang/Frontend/FrontendDiagnostic.h"
21 #include "llvm/ADT/DenseMap.h"
22 #include "llvm/IR/DataLayout.h"
23 #include "llvm/IR/Intrinsics.h"
24 #include "llvm/IR/Operator.h"
26 using namespace clang;
27 using namespace CodeGen;
34 uint64_t AtomicSizeInBits;
35 uint64_t ValueSizeInBits;
36 CharUnits AtomicAlign;
38 TypeEvaluationKind EvaluationKind;
43 AtomicInfo(CodeGenFunction &CGF, LValue &lvalue)
44 : CGF(CGF), AtomicSizeInBits(0), ValueSizeInBits(0),
45 EvaluationKind(TEK_Scalar), UseLibcall(true) {
46 assert(!lvalue.isGlobalReg());
47 ASTContext &C = CGF.getContext();
48 if (lvalue.isSimple()) {
49 AtomicTy = lvalue.getType();
50 if (auto *ATy = AtomicTy->getAs<AtomicType>())
51 ValueTy = ATy->getValueType();
54 EvaluationKind = CGF.getEvaluationKind(ValueTy);
56 uint64_t ValueAlignInBits;
57 uint64_t AtomicAlignInBits;
58 TypeInfo ValueTI = C.getTypeInfo(ValueTy);
59 ValueSizeInBits = ValueTI.Width;
60 ValueAlignInBits = ValueTI.Align;
62 TypeInfo AtomicTI = C.getTypeInfo(AtomicTy);
63 AtomicSizeInBits = AtomicTI.Width;
64 AtomicAlignInBits = AtomicTI.Align;
66 assert(ValueSizeInBits <= AtomicSizeInBits);
67 assert(ValueAlignInBits <= AtomicAlignInBits);
69 AtomicAlign = C.toCharUnitsFromBits(AtomicAlignInBits);
70 ValueAlign = C.toCharUnitsFromBits(ValueAlignInBits);
71 if (lvalue.getAlignment().isZero())
72 lvalue.setAlignment(AtomicAlign);
75 } else if (lvalue.isBitField()) {
76 ValueTy = lvalue.getType();
77 ValueSizeInBits = C.getTypeSize(ValueTy);
78 auto &OrigBFI = lvalue.getBitFieldInfo();
79 auto Offset = OrigBFI.Offset % C.toBits(lvalue.getAlignment());
80 AtomicSizeInBits = C.toBits(
81 C.toCharUnitsFromBits(Offset + OrigBFI.Size + C.getCharWidth() - 1)
82 .alignTo(lvalue.getAlignment()));
83 auto VoidPtrAddr = CGF.EmitCastToVoidPtr(lvalue.getBitFieldPointer());
85 (C.toCharUnitsFromBits(OrigBFI.Offset) / lvalue.getAlignment()) *
86 lvalue.getAlignment();
87 VoidPtrAddr = CGF.Builder.CreateConstGEP1_64(
88 CGF.Int8Ty, VoidPtrAddr, OffsetInChars.getQuantity());
89 auto Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
91 CGF.Builder.getIntNTy(AtomicSizeInBits)->getPointerTo(),
92 "atomic_bitfield_base");
95 BFI.StorageSize = AtomicSizeInBits;
96 BFI.StorageOffset += OffsetInChars;
97 LVal = LValue::MakeBitfield(Address(Addr, lvalue.getAlignment()),
98 BFI, lvalue.getType(), lvalue.getBaseInfo(),
99 lvalue.getTBAAInfo());
100 AtomicTy = C.getIntTypeForBitwidth(AtomicSizeInBits, OrigBFI.IsSigned);
101 if (AtomicTy.isNull()) {
104 C.toCharUnitsFromBits(AtomicSizeInBits).getQuantity());
106 C.getConstantArrayType(C.CharTy, Size, nullptr, ArrayType::Normal,
107 /*IndexTypeQuals=*/0);
109 AtomicAlign = ValueAlign = lvalue.getAlignment();
110 } else if (lvalue.isVectorElt()) {
111 ValueTy = lvalue.getType()->castAs<VectorType>()->getElementType();
112 ValueSizeInBits = C.getTypeSize(ValueTy);
113 AtomicTy = lvalue.getType();
114 AtomicSizeInBits = C.getTypeSize(AtomicTy);
115 AtomicAlign = ValueAlign = lvalue.getAlignment();
118 assert(lvalue.isExtVectorElt());
119 ValueTy = lvalue.getType();
120 ValueSizeInBits = C.getTypeSize(ValueTy);
121 AtomicTy = ValueTy = CGF.getContext().getExtVectorType(
122 lvalue.getType(), cast<llvm::FixedVectorType>(
123 lvalue.getExtVectorAddress().getElementType())
125 AtomicSizeInBits = C.getTypeSize(AtomicTy);
126 AtomicAlign = ValueAlign = lvalue.getAlignment();
129 UseLibcall = !C.getTargetInfo().hasBuiltinAtomic(
130 AtomicSizeInBits, C.toBits(lvalue.getAlignment()));
133 QualType getAtomicType() const { return AtomicTy; }
134 QualType getValueType() const { return ValueTy; }
135 CharUnits getAtomicAlignment() const { return AtomicAlign; }
136 uint64_t getAtomicSizeInBits() const { return AtomicSizeInBits; }
137 uint64_t getValueSizeInBits() const { return ValueSizeInBits; }
138 TypeEvaluationKind getEvaluationKind() const { return EvaluationKind; }
139 bool shouldUseLibcall() const { return UseLibcall; }
140 const LValue &getAtomicLValue() const { return LVal; }
141 llvm::Value *getAtomicPointer() const {
143 return LVal.getPointer(CGF);
144 else if (LVal.isBitField())
145 return LVal.getBitFieldPointer();
146 else if (LVal.isVectorElt())
147 return LVal.getVectorPointer();
148 assert(LVal.isExtVectorElt());
149 return LVal.getExtVectorPointer();
151 Address getAtomicAddress() const {
152 return Address(getAtomicPointer(), getAtomicAlignment());
155 Address getAtomicAddressAsAtomicIntPointer() const {
156 return emitCastToAtomicIntPointer(getAtomicAddress());
159 /// Is the atomic size larger than the underlying value type?
161 /// Note that the absence of padding does not mean that atomic
162 /// objects are completely interchangeable with non-atomic
163 /// objects: we might have promoted the alignment of a type
164 /// without making it bigger.
165 bool hasPadding() const {
166 return (ValueSizeInBits != AtomicSizeInBits);
169 bool emitMemSetZeroIfNecessary() const;
171 llvm::Value *getAtomicSizeValue() const {
172 CharUnits size = CGF.getContext().toCharUnitsFromBits(AtomicSizeInBits);
173 return CGF.CGM.getSize(size);
176 /// Cast the given pointer to an integer pointer suitable for atomic
177 /// operations if the source.
178 Address emitCastToAtomicIntPointer(Address Addr) const;
180 /// If Addr is compatible with the iN that will be used for an atomic
181 /// operation, bitcast it. Otherwise, create a temporary that is suitable
182 /// and copy the value across.
183 Address convertToAtomicIntPointer(Address Addr) const;
185 /// Turn an atomic-layout object into an r-value.
186 RValue convertAtomicTempToRValue(Address addr, AggValueSlot resultSlot,
187 SourceLocation loc, bool AsValue) const;
189 /// Converts a rvalue to integer value.
190 llvm::Value *convertRValueToInt(RValue RVal) const;
192 RValue ConvertIntToValueOrAtomic(llvm::Value *IntVal,
193 AggValueSlot ResultSlot,
194 SourceLocation Loc, bool AsValue) const;
196 /// Copy an atomic r-value into atomic-layout memory.
197 void emitCopyIntoMemory(RValue rvalue) const;
199 /// Project an l-value down to the value field.
200 LValue projectValue() const {
201 assert(LVal.isSimple());
202 Address addr = getAtomicAddress();
204 addr = CGF.Builder.CreateStructGEP(addr, 0);
206 return LValue::MakeAddr(addr, getValueType(), CGF.getContext(),
207 LVal.getBaseInfo(), LVal.getTBAAInfo());
210 /// Emits atomic load.
211 /// \returns Loaded value.
212 RValue EmitAtomicLoad(AggValueSlot ResultSlot, SourceLocation Loc,
213 bool AsValue, llvm::AtomicOrdering AO,
216 /// Emits atomic compare-and-exchange sequence.
217 /// \param Expected Expected value.
218 /// \param Desired Desired value.
219 /// \param Success Atomic ordering for success operation.
220 /// \param Failure Atomic ordering for failed operation.
221 /// \param IsWeak true if atomic operation is weak, false otherwise.
222 /// \returns Pair of values: previous value from storage (value type) and
223 /// boolean flag (i1 type) with true if success and false otherwise.
224 std::pair<RValue, llvm::Value *>
225 EmitAtomicCompareExchange(RValue Expected, RValue Desired,
226 llvm::AtomicOrdering Success =
227 llvm::AtomicOrdering::SequentiallyConsistent,
228 llvm::AtomicOrdering Failure =
229 llvm::AtomicOrdering::SequentiallyConsistent,
230 bool IsWeak = false);
232 /// Emits atomic update.
233 /// \param AO Atomic ordering.
234 /// \param UpdateOp Update operation for the current lvalue.
235 void EmitAtomicUpdate(llvm::AtomicOrdering AO,
236 const llvm::function_ref<RValue(RValue)> &UpdateOp,
238 /// Emits atomic update.
239 /// \param AO Atomic ordering.
240 void EmitAtomicUpdate(llvm::AtomicOrdering AO, RValue UpdateRVal,
243 /// Materialize an atomic r-value in atomic-layout memory.
244 Address materializeRValue(RValue rvalue) const;
246 /// Creates temp alloca for intermediate operations on atomic value.
247 Address CreateTempAlloca() const;
249 bool requiresMemSetZero(llvm::Type *type) const;
252 /// Emits atomic load as a libcall.
253 void EmitAtomicLoadLibcall(llvm::Value *AddForLoaded,
254 llvm::AtomicOrdering AO, bool IsVolatile);
255 /// Emits atomic load as LLVM instruction.
256 llvm::Value *EmitAtomicLoadOp(llvm::AtomicOrdering AO, bool IsVolatile);
257 /// Emits atomic compare-and-exchange op as a libcall.
258 llvm::Value *EmitAtomicCompareExchangeLibcall(
259 llvm::Value *ExpectedAddr, llvm::Value *DesiredAddr,
260 llvm::AtomicOrdering Success =
261 llvm::AtomicOrdering::SequentiallyConsistent,
262 llvm::AtomicOrdering Failure =
263 llvm::AtomicOrdering::SequentiallyConsistent);
264 /// Emits atomic compare-and-exchange op as LLVM instruction.
265 std::pair<llvm::Value *, llvm::Value *> EmitAtomicCompareExchangeOp(
266 llvm::Value *ExpectedVal, llvm::Value *DesiredVal,
267 llvm::AtomicOrdering Success =
268 llvm::AtomicOrdering::SequentiallyConsistent,
269 llvm::AtomicOrdering Failure =
270 llvm::AtomicOrdering::SequentiallyConsistent,
271 bool IsWeak = false);
272 /// Emit atomic update as libcalls.
274 EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO,
275 const llvm::function_ref<RValue(RValue)> &UpdateOp,
277 /// Emit atomic update as LLVM instructions.
278 void EmitAtomicUpdateOp(llvm::AtomicOrdering AO,
279 const llvm::function_ref<RValue(RValue)> &UpdateOp,
281 /// Emit atomic update as libcalls.
282 void EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO, RValue UpdateRVal,
284 /// Emit atomic update as LLVM instructions.
285 void EmitAtomicUpdateOp(llvm::AtomicOrdering AO, RValue UpdateRal,
290 Address AtomicInfo::CreateTempAlloca() const {
291 Address TempAlloca = CGF.CreateMemTemp(
292 (LVal.isBitField() && ValueSizeInBits > AtomicSizeInBits) ? ValueTy
294 getAtomicAlignment(),
296 // Cast to pointer to value type for bitfields.
297 if (LVal.isBitField())
298 return CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
299 TempAlloca, getAtomicAddress().getType());
303 static RValue emitAtomicLibcall(CodeGenFunction &CGF,
307 const CGFunctionInfo &fnInfo =
308 CGF.CGM.getTypes().arrangeBuiltinFunctionCall(resultType, args);
309 llvm::FunctionType *fnTy = CGF.CGM.getTypes().GetFunctionType(fnInfo);
310 llvm::AttrBuilder fnAttrB;
311 fnAttrB.addAttribute(llvm::Attribute::NoUnwind);
312 fnAttrB.addAttribute(llvm::Attribute::WillReturn);
313 llvm::AttributeList fnAttrs = llvm::AttributeList::get(
314 CGF.getLLVMContext(), llvm::AttributeList::FunctionIndex, fnAttrB);
316 llvm::FunctionCallee fn =
317 CGF.CGM.CreateRuntimeFunction(fnTy, fnName, fnAttrs);
318 auto callee = CGCallee::forDirect(fn);
319 return CGF.EmitCall(fnInfo, callee, ReturnValueSlot(), args);
322 /// Does a store of the given IR type modify the full expected width?
323 static bool isFullSizeType(CodeGenModule &CGM, llvm::Type *type,
324 uint64_t expectedSize) {
325 return (CGM.getDataLayout().getTypeStoreSize(type) * 8 == expectedSize);
328 /// Does the atomic type require memsetting to zero before initialization?
330 /// The IR type is provided as a way of making certain queries faster.
331 bool AtomicInfo::requiresMemSetZero(llvm::Type *type) const {
332 // If the atomic type has size padding, we definitely need a memset.
333 if (hasPadding()) return true;
335 // Otherwise, do some simple heuristics to try to avoid it:
336 switch (getEvaluationKind()) {
337 // For scalars and complexes, check whether the store size of the
338 // type uses the full size.
340 return !isFullSizeType(CGF.CGM, type, AtomicSizeInBits);
342 return !isFullSizeType(CGF.CGM, type->getStructElementType(0),
343 AtomicSizeInBits / 2);
345 // Padding in structs has an undefined bit pattern. User beware.
349 llvm_unreachable("bad evaluation kind");
352 bool AtomicInfo::emitMemSetZeroIfNecessary() const {
353 assert(LVal.isSimple());
354 llvm::Value *addr = LVal.getPointer(CGF);
355 if (!requiresMemSetZero(addr->getType()->getPointerElementType()))
358 CGF.Builder.CreateMemSet(
359 addr, llvm::ConstantInt::get(CGF.Int8Ty, 0),
360 CGF.getContext().toCharUnitsFromBits(AtomicSizeInBits).getQuantity(),
361 LVal.getAlignment().getAsAlign());
365 static void emitAtomicCmpXchg(CodeGenFunction &CGF, AtomicExpr *E, bool IsWeak,
366 Address Dest, Address Ptr,
367 Address Val1, Address Val2,
369 llvm::AtomicOrdering SuccessOrder,
370 llvm::AtomicOrdering FailureOrder,
371 llvm::SyncScope::ID Scope) {
372 // Note that cmpxchg doesn't support weak cmpxchg, at least at the moment.
373 llvm::Value *Expected = CGF.Builder.CreateLoad(Val1);
374 llvm::Value *Desired = CGF.Builder.CreateLoad(Val2);
376 llvm::AtomicCmpXchgInst *Pair = CGF.Builder.CreateAtomicCmpXchg(
377 Ptr.getPointer(), Expected, Desired, SuccessOrder, FailureOrder,
379 Pair->setVolatile(E->isVolatile());
380 Pair->setWeak(IsWeak);
382 // Cmp holds the result of the compare-exchange operation: true on success,
384 llvm::Value *Old = CGF.Builder.CreateExtractValue(Pair, 0);
385 llvm::Value *Cmp = CGF.Builder.CreateExtractValue(Pair, 1);
387 // This basic block is used to hold the store instruction if the operation
389 llvm::BasicBlock *StoreExpectedBB =
390 CGF.createBasicBlock("cmpxchg.store_expected", CGF.CurFn);
392 // This basic block is the exit point of the operation, we should end up
393 // here regardless of whether or not the operation succeeded.
394 llvm::BasicBlock *ContinueBB =
395 CGF.createBasicBlock("cmpxchg.continue", CGF.CurFn);
397 // Update Expected if Expected isn't equal to Old, otherwise branch to the
399 CGF.Builder.CreateCondBr(Cmp, ContinueBB, StoreExpectedBB);
401 CGF.Builder.SetInsertPoint(StoreExpectedBB);
402 // Update the memory at Expected with Old's value.
403 CGF.Builder.CreateStore(Old, Val1);
404 // Finally, branch to the exit point.
405 CGF.Builder.CreateBr(ContinueBB);
407 CGF.Builder.SetInsertPoint(ContinueBB);
408 // Update the memory at Dest with Cmp's value.
409 CGF.EmitStoreOfScalar(Cmp, CGF.MakeAddrLValue(Dest, E->getType()));
412 /// Given an ordering required on success, emit all possible cmpxchg
413 /// instructions to cope with the provided (but possibly only dynamically known)
415 static void emitAtomicCmpXchgFailureSet(CodeGenFunction &CGF, AtomicExpr *E,
416 bool IsWeak, Address Dest, Address Ptr,
417 Address Val1, Address Val2,
418 llvm::Value *FailureOrderVal,
420 llvm::AtomicOrdering SuccessOrder,
421 llvm::SyncScope::ID Scope) {
422 llvm::AtomicOrdering FailureOrder;
423 if (llvm::ConstantInt *FO = dyn_cast<llvm::ConstantInt>(FailureOrderVal)) {
424 auto FOS = FO->getSExtValue();
425 if (!llvm::isValidAtomicOrderingCABI(FOS))
426 FailureOrder = llvm::AtomicOrdering::Monotonic;
428 switch ((llvm::AtomicOrderingCABI)FOS) {
429 case llvm::AtomicOrderingCABI::relaxed:
430 // 31.7.2.18: "The failure argument shall not be memory_order_release
431 // nor memory_order_acq_rel". Fallback to monotonic.
432 case llvm::AtomicOrderingCABI::release:
433 case llvm::AtomicOrderingCABI::acq_rel:
434 FailureOrder = llvm::AtomicOrdering::Monotonic;
436 case llvm::AtomicOrderingCABI::consume:
437 case llvm::AtomicOrderingCABI::acquire:
438 FailureOrder = llvm::AtomicOrdering::Acquire;
440 case llvm::AtomicOrderingCABI::seq_cst:
441 FailureOrder = llvm::AtomicOrdering::SequentiallyConsistent;
444 // Prior to c++17, "the failure argument shall be no stronger than the
445 // success argument". This condition has been lifted and the only
446 // precondition is 31.7.2.18. Effectively treat this as a DR and skip
447 // language version checks.
448 emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, SuccessOrder,
449 FailureOrder, Scope);
453 // Create all the relevant BB's
454 auto *MonotonicBB = CGF.createBasicBlock("monotonic_fail", CGF.CurFn);
455 auto *AcquireBB = CGF.createBasicBlock("acquire_fail", CGF.CurFn);
456 auto *SeqCstBB = CGF.createBasicBlock("seqcst_fail", CGF.CurFn);
457 auto *ContBB = CGF.createBasicBlock("atomic.continue", CGF.CurFn);
459 // MonotonicBB is arbitrarily chosen as the default case; in practice, this
460 // doesn't matter unless someone is crazy enough to use something that
461 // doesn't fold to a constant for the ordering.
462 llvm::SwitchInst *SI = CGF.Builder.CreateSwitch(FailureOrderVal, MonotonicBB);
463 // Implemented as acquire, since it's the closest in LLVM.
464 SI->addCase(CGF.Builder.getInt32((int)llvm::AtomicOrderingCABI::consume),
466 SI->addCase(CGF.Builder.getInt32((int)llvm::AtomicOrderingCABI::acquire),
468 SI->addCase(CGF.Builder.getInt32((int)llvm::AtomicOrderingCABI::seq_cst),
471 // Emit all the different atomics
472 CGF.Builder.SetInsertPoint(MonotonicBB);
473 emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2,
474 Size, SuccessOrder, llvm::AtomicOrdering::Monotonic, Scope);
475 CGF.Builder.CreateBr(ContBB);
477 CGF.Builder.SetInsertPoint(AcquireBB);
478 emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, SuccessOrder,
479 llvm::AtomicOrdering::Acquire, Scope);
480 CGF.Builder.CreateBr(ContBB);
482 CGF.Builder.SetInsertPoint(SeqCstBB);
483 emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, SuccessOrder,
484 llvm::AtomicOrdering::SequentiallyConsistent, Scope);
485 CGF.Builder.CreateBr(ContBB);
487 CGF.Builder.SetInsertPoint(ContBB);
490 /// Duplicate the atomic min/max operation in conventional IR for the builtin
491 /// variants that return the new rather than the original value.
492 static llvm::Value *EmitPostAtomicMinMax(CGBuilderTy &Builder,
493 AtomicExpr::AtomicOp Op,
497 llvm::CmpInst::Predicate Pred;
500 llvm_unreachable("Unexpected min/max operation");
501 case AtomicExpr::AO__atomic_max_fetch:
502 Pred = IsSigned ? llvm::CmpInst::ICMP_SGT : llvm::CmpInst::ICMP_UGT;
504 case AtomicExpr::AO__atomic_min_fetch:
505 Pred = IsSigned ? llvm::CmpInst::ICMP_SLT : llvm::CmpInst::ICMP_ULT;
508 llvm::Value *Cmp = Builder.CreateICmp(Pred, OldVal, RHS, "tst");
509 return Builder.CreateSelect(Cmp, OldVal, RHS, "newval");
512 static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, Address Dest,
513 Address Ptr, Address Val1, Address Val2,
514 llvm::Value *IsWeak, llvm::Value *FailureOrder,
515 uint64_t Size, llvm::AtomicOrdering Order,
516 llvm::SyncScope::ID Scope) {
517 llvm::AtomicRMWInst::BinOp Op = llvm::AtomicRMWInst::Add;
518 bool PostOpMinMax = false;
521 switch (E->getOp()) {
522 case AtomicExpr::AO__c11_atomic_init:
523 case AtomicExpr::AO__opencl_atomic_init:
524 llvm_unreachable("Already handled!");
526 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
527 case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
528 emitAtomicCmpXchgFailureSet(CGF, E, false, Dest, Ptr, Val1, Val2,
529 FailureOrder, Size, Order, Scope);
531 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
532 case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
533 emitAtomicCmpXchgFailureSet(CGF, E, true, Dest, Ptr, Val1, Val2,
534 FailureOrder, Size, Order, Scope);
536 case AtomicExpr::AO__atomic_compare_exchange:
537 case AtomicExpr::AO__atomic_compare_exchange_n: {
538 if (llvm::ConstantInt *IsWeakC = dyn_cast<llvm::ConstantInt>(IsWeak)) {
539 emitAtomicCmpXchgFailureSet(CGF, E, IsWeakC->getZExtValue(), Dest, Ptr,
540 Val1, Val2, FailureOrder, Size, Order, Scope);
542 // Create all the relevant BB's
543 llvm::BasicBlock *StrongBB =
544 CGF.createBasicBlock("cmpxchg.strong", CGF.CurFn);
545 llvm::BasicBlock *WeakBB = CGF.createBasicBlock("cmxchg.weak", CGF.CurFn);
546 llvm::BasicBlock *ContBB =
547 CGF.createBasicBlock("cmpxchg.continue", CGF.CurFn);
549 llvm::SwitchInst *SI = CGF.Builder.CreateSwitch(IsWeak, WeakBB);
550 SI->addCase(CGF.Builder.getInt1(false), StrongBB);
552 CGF.Builder.SetInsertPoint(StrongBB);
553 emitAtomicCmpXchgFailureSet(CGF, E, false, Dest, Ptr, Val1, Val2,
554 FailureOrder, Size, Order, Scope);
555 CGF.Builder.CreateBr(ContBB);
557 CGF.Builder.SetInsertPoint(WeakBB);
558 emitAtomicCmpXchgFailureSet(CGF, E, true, Dest, Ptr, Val1, Val2,
559 FailureOrder, Size, Order, Scope);
560 CGF.Builder.CreateBr(ContBB);
562 CGF.Builder.SetInsertPoint(ContBB);
566 case AtomicExpr::AO__c11_atomic_load:
567 case AtomicExpr::AO__opencl_atomic_load:
568 case AtomicExpr::AO__atomic_load_n:
569 case AtomicExpr::AO__atomic_load: {
570 llvm::LoadInst *Load = CGF.Builder.CreateLoad(Ptr);
571 Load->setAtomic(Order, Scope);
572 Load->setVolatile(E->isVolatile());
573 CGF.Builder.CreateStore(Load, Dest);
577 case AtomicExpr::AO__c11_atomic_store:
578 case AtomicExpr::AO__opencl_atomic_store:
579 case AtomicExpr::AO__atomic_store:
580 case AtomicExpr::AO__atomic_store_n: {
581 llvm::Value *LoadVal1 = CGF.Builder.CreateLoad(Val1);
582 llvm::StoreInst *Store = CGF.Builder.CreateStore(LoadVal1, Ptr);
583 Store->setAtomic(Order, Scope);
584 Store->setVolatile(E->isVolatile());
588 case AtomicExpr::AO__c11_atomic_exchange:
589 case AtomicExpr::AO__opencl_atomic_exchange:
590 case AtomicExpr::AO__atomic_exchange_n:
591 case AtomicExpr::AO__atomic_exchange:
592 Op = llvm::AtomicRMWInst::Xchg;
595 case AtomicExpr::AO__atomic_add_fetch:
596 PostOp = E->getValueType()->isFloatingType() ? llvm::Instruction::FAdd
597 : llvm::Instruction::Add;
599 case AtomicExpr::AO__c11_atomic_fetch_add:
600 case AtomicExpr::AO__opencl_atomic_fetch_add:
601 case AtomicExpr::AO__atomic_fetch_add:
602 Op = E->getValueType()->isFloatingType() ? llvm::AtomicRMWInst::FAdd
603 : llvm::AtomicRMWInst::Add;
606 case AtomicExpr::AO__atomic_sub_fetch:
607 PostOp = E->getValueType()->isFloatingType() ? llvm::Instruction::FSub
608 : llvm::Instruction::Sub;
610 case AtomicExpr::AO__c11_atomic_fetch_sub:
611 case AtomicExpr::AO__opencl_atomic_fetch_sub:
612 case AtomicExpr::AO__atomic_fetch_sub:
613 Op = E->getValueType()->isFloatingType() ? llvm::AtomicRMWInst::FSub
614 : llvm::AtomicRMWInst::Sub;
617 case AtomicExpr::AO__atomic_min_fetch:
620 case AtomicExpr::AO__c11_atomic_fetch_min:
621 case AtomicExpr::AO__opencl_atomic_fetch_min:
622 case AtomicExpr::AO__atomic_fetch_min:
623 Op = E->getValueType()->isSignedIntegerType() ? llvm::AtomicRMWInst::Min
624 : llvm::AtomicRMWInst::UMin;
627 case AtomicExpr::AO__atomic_max_fetch:
630 case AtomicExpr::AO__c11_atomic_fetch_max:
631 case AtomicExpr::AO__opencl_atomic_fetch_max:
632 case AtomicExpr::AO__atomic_fetch_max:
633 Op = E->getValueType()->isSignedIntegerType() ? llvm::AtomicRMWInst::Max
634 : llvm::AtomicRMWInst::UMax;
637 case AtomicExpr::AO__atomic_and_fetch:
638 PostOp = llvm::Instruction::And;
640 case AtomicExpr::AO__c11_atomic_fetch_and:
641 case AtomicExpr::AO__opencl_atomic_fetch_and:
642 case AtomicExpr::AO__atomic_fetch_and:
643 Op = llvm::AtomicRMWInst::And;
646 case AtomicExpr::AO__atomic_or_fetch:
647 PostOp = llvm::Instruction::Or;
649 case AtomicExpr::AO__c11_atomic_fetch_or:
650 case AtomicExpr::AO__opencl_atomic_fetch_or:
651 case AtomicExpr::AO__atomic_fetch_or:
652 Op = llvm::AtomicRMWInst::Or;
655 case AtomicExpr::AO__atomic_xor_fetch:
656 PostOp = llvm::Instruction::Xor;
658 case AtomicExpr::AO__c11_atomic_fetch_xor:
659 case AtomicExpr::AO__opencl_atomic_fetch_xor:
660 case AtomicExpr::AO__atomic_fetch_xor:
661 Op = llvm::AtomicRMWInst::Xor;
664 case AtomicExpr::AO__atomic_nand_fetch:
665 PostOp = llvm::Instruction::And; // the NOT is special cased below
667 case AtomicExpr::AO__atomic_fetch_nand:
668 Op = llvm::AtomicRMWInst::Nand;
672 llvm::Value *LoadVal1 = CGF.Builder.CreateLoad(Val1);
673 llvm::AtomicRMWInst *RMWI =
674 CGF.Builder.CreateAtomicRMW(Op, Ptr.getPointer(), LoadVal1, Order, Scope);
675 RMWI->setVolatile(E->isVolatile());
677 // For __atomic_*_fetch operations, perform the operation again to
678 // determine the value which was written.
679 llvm::Value *Result = RMWI;
681 Result = EmitPostAtomicMinMax(CGF.Builder, E->getOp(),
682 E->getValueType()->isSignedIntegerType(),
685 Result = CGF.Builder.CreateBinOp((llvm::Instruction::BinaryOps)PostOp, RMWI,
687 if (E->getOp() == AtomicExpr::AO__atomic_nand_fetch)
688 Result = CGF.Builder.CreateNot(Result);
689 CGF.Builder.CreateStore(Result, Dest);
692 // This function emits any expression (scalar, complex, or aggregate)
693 // into a temporary alloca.
695 EmitValToTemp(CodeGenFunction &CGF, Expr *E) {
696 Address DeclPtr = CGF.CreateMemTemp(E->getType(), ".atomictmp");
697 CGF.EmitAnyExprToMem(E, DeclPtr, E->getType().getQualifiers(),
702 static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *Expr, Address Dest,
703 Address Ptr, Address Val1, Address Val2,
704 llvm::Value *IsWeak, llvm::Value *FailureOrder,
705 uint64_t Size, llvm::AtomicOrdering Order,
706 llvm::Value *Scope) {
707 auto ScopeModel = Expr->getScopeModel();
709 // LLVM atomic instructions always have synch scope. If clang atomic
710 // expression has no scope operand, use default LLVM synch scope.
712 EmitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, IsWeak, FailureOrder, Size,
713 Order, CGF.CGM.getLLVMContext().getOrInsertSyncScopeID(""));
717 // Handle constant scope.
718 if (auto SC = dyn_cast<llvm::ConstantInt>(Scope)) {
719 auto SCID = CGF.getTargetHooks().getLLVMSyncScopeID(
720 CGF.CGM.getLangOpts(), ScopeModel->map(SC->getZExtValue()),
721 Order, CGF.CGM.getLLVMContext());
722 EmitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, IsWeak, FailureOrder, Size,
727 // Handle non-constant scope.
728 auto &Builder = CGF.Builder;
729 auto Scopes = ScopeModel->getRuntimeValues();
730 llvm::DenseMap<unsigned, llvm::BasicBlock *> BB;
731 for (auto S : Scopes)
732 BB[S] = CGF.createBasicBlock(getAsString(ScopeModel->map(S)), CGF.CurFn);
734 llvm::BasicBlock *ContBB =
735 CGF.createBasicBlock("atomic.scope.continue", CGF.CurFn);
737 auto *SC = Builder.CreateIntCast(Scope, Builder.getInt32Ty(), false);
738 // If unsupported synch scope is encountered at run time, assume a fallback
739 // synch scope value.
740 auto FallBack = ScopeModel->getFallBackValue();
741 llvm::SwitchInst *SI = Builder.CreateSwitch(SC, BB[FallBack]);
742 for (auto S : Scopes) {
745 SI->addCase(Builder.getInt32(S), B);
747 Builder.SetInsertPoint(B);
748 EmitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, IsWeak, FailureOrder, Size,
750 CGF.getTargetHooks().getLLVMSyncScopeID(CGF.CGM.getLangOpts(),
753 CGF.getLLVMContext()));
754 Builder.CreateBr(ContBB);
757 Builder.SetInsertPoint(ContBB);
761 AddDirectArgument(CodeGenFunction &CGF, CallArgList &Args,
762 bool UseOptimizedLibcall, llvm::Value *Val, QualType ValTy,
763 SourceLocation Loc, CharUnits SizeInChars) {
764 if (UseOptimizedLibcall) {
765 // Load value and pass it to the function directly.
766 CharUnits Align = CGF.getContext().getTypeAlignInChars(ValTy);
767 int64_t SizeInBits = CGF.getContext().toBits(SizeInChars);
769 CGF.getContext().getIntTypeForBitwidth(SizeInBits, /*Signed=*/false);
770 llvm::Type *IPtrTy = llvm::IntegerType::get(CGF.getLLVMContext(),
771 SizeInBits)->getPointerTo();
772 Address Ptr = Address(CGF.Builder.CreateBitCast(Val, IPtrTy), Align);
773 Val = CGF.EmitLoadOfScalar(Ptr, false,
774 CGF.getContext().getPointerType(ValTy),
776 // Coerce the value into an appropriately sized integer type.
777 Args.add(RValue::get(Val), ValTy);
779 // Non-optimized functions always take a reference.
780 Args.add(RValue::get(CGF.EmitCastToVoidPtr(Val)),
781 CGF.getContext().VoidPtrTy);
785 RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
786 QualType AtomicTy = E->getPtr()->getType()->getPointeeType();
787 QualType MemTy = AtomicTy;
788 if (const AtomicType *AT = AtomicTy->getAs<AtomicType>())
789 MemTy = AT->getValueType();
790 llvm::Value *IsWeak = nullptr, *OrderFail = nullptr;
792 Address Val1 = Address::invalid();
793 Address Val2 = Address::invalid();
794 Address Dest = Address::invalid();
795 Address Ptr = EmitPointerWithAlignment(E->getPtr());
797 if (E->getOp() == AtomicExpr::AO__c11_atomic_init ||
798 E->getOp() == AtomicExpr::AO__opencl_atomic_init) {
799 LValue lvalue = MakeAddrLValue(Ptr, AtomicTy);
800 EmitAtomicInit(E->getVal1(), lvalue);
801 return RValue::get(nullptr);
804 auto TInfo = getContext().getTypeInfoInChars(AtomicTy);
805 uint64_t Size = TInfo.Width.getQuantity();
806 unsigned MaxInlineWidthInBits = getTarget().getMaxAtomicInlineWidth();
808 bool Oversized = getContext().toBits(TInfo.Width) > MaxInlineWidthInBits;
809 bool Misaligned = (Ptr.getAlignment() % TInfo.Width) != 0;
810 bool UseLibcall = Misaligned | Oversized;
811 bool ShouldCastToIntPtrTy = true;
813 CharUnits MaxInlineWidth =
814 getContext().toCharUnitsFromBits(MaxInlineWidthInBits);
816 DiagnosticsEngine &Diags = CGM.getDiags();
819 Diags.Report(E->getBeginLoc(), diag::warn_atomic_op_misaligned)
820 << (int)TInfo.Width.getQuantity()
821 << (int)Ptr.getAlignment().getQuantity();
825 Diags.Report(E->getBeginLoc(), diag::warn_atomic_op_oversized)
826 << (int)TInfo.Width.getQuantity() << (int)MaxInlineWidth.getQuantity();
829 llvm::Value *Order = EmitScalarExpr(E->getOrder());
831 E->getScopeModel() ? EmitScalarExpr(E->getScope()) : nullptr;
833 switch (E->getOp()) {
834 case AtomicExpr::AO__c11_atomic_init:
835 case AtomicExpr::AO__opencl_atomic_init:
836 llvm_unreachable("Already handled above with EmitAtomicInit!");
838 case AtomicExpr::AO__c11_atomic_load:
839 case AtomicExpr::AO__opencl_atomic_load:
840 case AtomicExpr::AO__atomic_load_n:
843 case AtomicExpr::AO__atomic_load:
844 Dest = EmitPointerWithAlignment(E->getVal1());
847 case AtomicExpr::AO__atomic_store:
848 Val1 = EmitPointerWithAlignment(E->getVal1());
851 case AtomicExpr::AO__atomic_exchange:
852 Val1 = EmitPointerWithAlignment(E->getVal1());
853 Dest = EmitPointerWithAlignment(E->getVal2());
856 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
857 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
858 case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
859 case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
860 case AtomicExpr::AO__atomic_compare_exchange_n:
861 case AtomicExpr::AO__atomic_compare_exchange:
862 Val1 = EmitPointerWithAlignment(E->getVal1());
863 if (E->getOp() == AtomicExpr::AO__atomic_compare_exchange)
864 Val2 = EmitPointerWithAlignment(E->getVal2());
866 Val2 = EmitValToTemp(*this, E->getVal2());
867 OrderFail = EmitScalarExpr(E->getOrderFail());
868 if (E->getOp() == AtomicExpr::AO__atomic_compare_exchange_n ||
869 E->getOp() == AtomicExpr::AO__atomic_compare_exchange)
870 IsWeak = EmitScalarExpr(E->getWeak());
873 case AtomicExpr::AO__c11_atomic_fetch_add:
874 case AtomicExpr::AO__c11_atomic_fetch_sub:
875 case AtomicExpr::AO__opencl_atomic_fetch_add:
876 case AtomicExpr::AO__opencl_atomic_fetch_sub:
877 if (MemTy->isPointerType()) {
878 // For pointer arithmetic, we're required to do a bit of math:
879 // adding 1 to an int* is not the same as adding 1 to a uintptr_t.
880 // ... but only for the C11 builtins. The GNU builtins expect the
881 // user to multiply by sizeof(T).
882 QualType Val1Ty = E->getVal1()->getType();
883 llvm::Value *Val1Scalar = EmitScalarExpr(E->getVal1());
884 CharUnits PointeeIncAmt =
885 getContext().getTypeSizeInChars(MemTy->getPointeeType());
886 Val1Scalar = Builder.CreateMul(Val1Scalar, CGM.getSize(PointeeIncAmt));
887 auto Temp = CreateMemTemp(Val1Ty, ".atomictmp");
889 EmitStoreOfScalar(Val1Scalar, MakeAddrLValue(Temp, Val1Ty));
893 case AtomicExpr::AO__atomic_fetch_add:
894 case AtomicExpr::AO__atomic_fetch_sub:
895 case AtomicExpr::AO__atomic_add_fetch:
896 case AtomicExpr::AO__atomic_sub_fetch:
897 ShouldCastToIntPtrTy = !MemTy->isFloatingType();
900 case AtomicExpr::AO__c11_atomic_store:
901 case AtomicExpr::AO__c11_atomic_exchange:
902 case AtomicExpr::AO__opencl_atomic_store:
903 case AtomicExpr::AO__opencl_atomic_exchange:
904 case AtomicExpr::AO__atomic_store_n:
905 case AtomicExpr::AO__atomic_exchange_n:
906 case AtomicExpr::AO__c11_atomic_fetch_and:
907 case AtomicExpr::AO__c11_atomic_fetch_or:
908 case AtomicExpr::AO__c11_atomic_fetch_xor:
909 case AtomicExpr::AO__c11_atomic_fetch_max:
910 case AtomicExpr::AO__c11_atomic_fetch_min:
911 case AtomicExpr::AO__opencl_atomic_fetch_and:
912 case AtomicExpr::AO__opencl_atomic_fetch_or:
913 case AtomicExpr::AO__opencl_atomic_fetch_xor:
914 case AtomicExpr::AO__opencl_atomic_fetch_min:
915 case AtomicExpr::AO__opencl_atomic_fetch_max:
916 case AtomicExpr::AO__atomic_fetch_and:
917 case AtomicExpr::AO__atomic_fetch_or:
918 case AtomicExpr::AO__atomic_fetch_xor:
919 case AtomicExpr::AO__atomic_fetch_nand:
920 case AtomicExpr::AO__atomic_and_fetch:
921 case AtomicExpr::AO__atomic_or_fetch:
922 case AtomicExpr::AO__atomic_xor_fetch:
923 case AtomicExpr::AO__atomic_nand_fetch:
924 case AtomicExpr::AO__atomic_max_fetch:
925 case AtomicExpr::AO__atomic_min_fetch:
926 case AtomicExpr::AO__atomic_fetch_max:
927 case AtomicExpr::AO__atomic_fetch_min:
928 Val1 = EmitValToTemp(*this, E->getVal1());
932 QualType RValTy = E->getType().getUnqualifiedType();
934 // The inlined atomics only function on iN types, where N is a power of 2. We
935 // need to make sure (via temporaries if necessary) that all incoming values
937 LValue AtomicVal = MakeAddrLValue(Ptr, AtomicTy);
938 AtomicInfo Atomics(*this, AtomicVal);
940 if (ShouldCastToIntPtrTy) {
941 Ptr = Atomics.emitCastToAtomicIntPointer(Ptr);
943 Val1 = Atomics.convertToAtomicIntPointer(Val1);
945 Val2 = Atomics.convertToAtomicIntPointer(Val2);
947 if (Dest.isValid()) {
948 if (ShouldCastToIntPtrTy)
949 Dest = Atomics.emitCastToAtomicIntPointer(Dest);
950 } else if (E->isCmpXChg())
951 Dest = CreateMemTemp(RValTy, "cmpxchg.bool");
952 else if (!RValTy->isVoidType()) {
953 Dest = Atomics.CreateTempAlloca();
954 if (ShouldCastToIntPtrTy)
955 Dest = Atomics.emitCastToAtomicIntPointer(Dest);
958 // Use a library call. See: http://gcc.gnu.org/wiki/Atomic/GCCMM/LIbrary .
960 bool UseOptimizedLibcall = false;
961 switch (E->getOp()) {
962 case AtomicExpr::AO__c11_atomic_init:
963 case AtomicExpr::AO__opencl_atomic_init:
964 llvm_unreachable("Already handled above with EmitAtomicInit!");
966 case AtomicExpr::AO__c11_atomic_fetch_add:
967 case AtomicExpr::AO__opencl_atomic_fetch_add:
968 case AtomicExpr::AO__atomic_fetch_add:
969 case AtomicExpr::AO__c11_atomic_fetch_and:
970 case AtomicExpr::AO__opencl_atomic_fetch_and:
971 case AtomicExpr::AO__atomic_fetch_and:
972 case AtomicExpr::AO__c11_atomic_fetch_or:
973 case AtomicExpr::AO__opencl_atomic_fetch_or:
974 case AtomicExpr::AO__atomic_fetch_or:
975 case AtomicExpr::AO__atomic_fetch_nand:
976 case AtomicExpr::AO__c11_atomic_fetch_sub:
977 case AtomicExpr::AO__opencl_atomic_fetch_sub:
978 case AtomicExpr::AO__atomic_fetch_sub:
979 case AtomicExpr::AO__c11_atomic_fetch_xor:
980 case AtomicExpr::AO__opencl_atomic_fetch_xor:
981 case AtomicExpr::AO__opencl_atomic_fetch_min:
982 case AtomicExpr::AO__opencl_atomic_fetch_max:
983 case AtomicExpr::AO__atomic_fetch_xor:
984 case AtomicExpr::AO__c11_atomic_fetch_max:
985 case AtomicExpr::AO__c11_atomic_fetch_min:
986 case AtomicExpr::AO__atomic_add_fetch:
987 case AtomicExpr::AO__atomic_and_fetch:
988 case AtomicExpr::AO__atomic_nand_fetch:
989 case AtomicExpr::AO__atomic_or_fetch:
990 case AtomicExpr::AO__atomic_sub_fetch:
991 case AtomicExpr::AO__atomic_xor_fetch:
992 case AtomicExpr::AO__atomic_fetch_max:
993 case AtomicExpr::AO__atomic_fetch_min:
994 case AtomicExpr::AO__atomic_max_fetch:
995 case AtomicExpr::AO__atomic_min_fetch:
996 // For these, only library calls for certain sizes exist.
997 UseOptimizedLibcall = true;
1000 case AtomicExpr::AO__atomic_load:
1001 case AtomicExpr::AO__atomic_store:
1002 case AtomicExpr::AO__atomic_exchange:
1003 case AtomicExpr::AO__atomic_compare_exchange:
1004 // Use the generic version if we don't know that the operand will be
1005 // suitably aligned for the optimized version.
1009 case AtomicExpr::AO__c11_atomic_load:
1010 case AtomicExpr::AO__c11_atomic_store:
1011 case AtomicExpr::AO__c11_atomic_exchange:
1012 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
1013 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
1014 case AtomicExpr::AO__opencl_atomic_load:
1015 case AtomicExpr::AO__opencl_atomic_store:
1016 case AtomicExpr::AO__opencl_atomic_exchange:
1017 case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
1018 case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
1019 case AtomicExpr::AO__atomic_load_n:
1020 case AtomicExpr::AO__atomic_store_n:
1021 case AtomicExpr::AO__atomic_exchange_n:
1022 case AtomicExpr::AO__atomic_compare_exchange_n:
1023 // Only use optimized library calls for sizes for which they exist.
1024 // FIXME: Size == 16 optimized library functions exist too.
1025 if (Size == 1 || Size == 2 || Size == 4 || Size == 8)
1026 UseOptimizedLibcall = true;
1031 if (!UseOptimizedLibcall) {
1032 // For non-optimized library calls, the size is the first parameter
1033 Args.add(RValue::get(llvm::ConstantInt::get(SizeTy, Size)),
1034 getContext().getSizeType());
1036 // Atomic address is the first or second parameter
1037 // The OpenCL atomic library functions only accept pointer arguments to
1038 // generic address space.
1039 auto CastToGenericAddrSpace = [&](llvm::Value *V, QualType PT) {
1042 auto AS = PT->castAs<PointerType>()->getPointeeType().getAddressSpace();
1043 if (AS == LangAS::opencl_generic)
1045 auto DestAS = getContext().getTargetAddressSpace(LangAS::opencl_generic);
1046 auto T = V->getType();
1047 auto *DestType = T->getPointerElementType()->getPointerTo(DestAS);
1049 return getTargetHooks().performAddrSpaceCast(
1050 *this, V, AS, LangAS::opencl_generic, DestType, false);
1053 Args.add(RValue::get(CastToGenericAddrSpace(
1054 EmitCastToVoidPtr(Ptr.getPointer()), E->getPtr()->getType())),
1055 getContext().VoidPtrTy);
1057 std::string LibCallName;
1058 QualType LoweredMemTy =
1059 MemTy->isPointerType() ? getContext().getIntPtrType() : MemTy;
1061 bool HaveRetTy = false;
1062 llvm::Instruction::BinaryOps PostOp = (llvm::Instruction::BinaryOps)0;
1063 bool PostOpMinMax = false;
1064 switch (E->getOp()) {
1065 case AtomicExpr::AO__c11_atomic_init:
1066 case AtomicExpr::AO__opencl_atomic_init:
1067 llvm_unreachable("Already handled!");
1069 // There is only one libcall for compare an exchange, because there is no
1070 // optimisation benefit possible from a libcall version of a weak compare
1072 // bool __atomic_compare_exchange(size_t size, void *mem, void *expected,
1073 // void *desired, int success, int failure)
1074 // bool __atomic_compare_exchange_N(T *mem, T *expected, T desired,
1075 // int success, int failure)
1076 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
1077 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
1078 case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
1079 case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
1080 case AtomicExpr::AO__atomic_compare_exchange:
1081 case AtomicExpr::AO__atomic_compare_exchange_n:
1082 LibCallName = "__atomic_compare_exchange";
1083 RetTy = getContext().BoolTy;
1086 RValue::get(CastToGenericAddrSpace(
1087 EmitCastToVoidPtr(Val1.getPointer()), E->getVal1()->getType())),
1088 getContext().VoidPtrTy);
1089 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val2.getPointer(),
1090 MemTy, E->getExprLoc(), TInfo.Width);
1091 Args.add(RValue::get(Order), getContext().IntTy);
1094 // void __atomic_exchange(size_t size, void *mem, void *val, void *return,
1096 // T __atomic_exchange_N(T *mem, T val, int order)
1097 case AtomicExpr::AO__c11_atomic_exchange:
1098 case AtomicExpr::AO__opencl_atomic_exchange:
1099 case AtomicExpr::AO__atomic_exchange_n:
1100 case AtomicExpr::AO__atomic_exchange:
1101 LibCallName = "__atomic_exchange";
1102 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1103 MemTy, E->getExprLoc(), TInfo.Width);
1105 // void __atomic_store(size_t size, void *mem, void *val, int order)
1106 // void __atomic_store_N(T *mem, T val, int order)
1107 case AtomicExpr::AO__c11_atomic_store:
1108 case AtomicExpr::AO__opencl_atomic_store:
1109 case AtomicExpr::AO__atomic_store:
1110 case AtomicExpr::AO__atomic_store_n:
1111 LibCallName = "__atomic_store";
1112 RetTy = getContext().VoidTy;
1114 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1115 MemTy, E->getExprLoc(), TInfo.Width);
1117 // void __atomic_load(size_t size, void *mem, void *return, int order)
1118 // T __atomic_load_N(T *mem, int order)
1119 case AtomicExpr::AO__c11_atomic_load:
1120 case AtomicExpr::AO__opencl_atomic_load:
1121 case AtomicExpr::AO__atomic_load:
1122 case AtomicExpr::AO__atomic_load_n:
1123 LibCallName = "__atomic_load";
1125 // T __atomic_add_fetch_N(T *mem, T val, int order)
1126 // T __atomic_fetch_add_N(T *mem, T val, int order)
1127 case AtomicExpr::AO__atomic_add_fetch:
1128 PostOp = llvm::Instruction::Add;
1130 case AtomicExpr::AO__c11_atomic_fetch_add:
1131 case AtomicExpr::AO__opencl_atomic_fetch_add:
1132 case AtomicExpr::AO__atomic_fetch_add:
1133 LibCallName = "__atomic_fetch_add";
1134 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1135 LoweredMemTy, E->getExprLoc(), TInfo.Width);
1137 // T __atomic_and_fetch_N(T *mem, T val, int order)
1138 // T __atomic_fetch_and_N(T *mem, T val, int order)
1139 case AtomicExpr::AO__atomic_and_fetch:
1140 PostOp = llvm::Instruction::And;
1142 case AtomicExpr::AO__c11_atomic_fetch_and:
1143 case AtomicExpr::AO__opencl_atomic_fetch_and:
1144 case AtomicExpr::AO__atomic_fetch_and:
1145 LibCallName = "__atomic_fetch_and";
1146 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1147 MemTy, E->getExprLoc(), TInfo.Width);
1149 // T __atomic_or_fetch_N(T *mem, T val, int order)
1150 // T __atomic_fetch_or_N(T *mem, T val, int order)
1151 case AtomicExpr::AO__atomic_or_fetch:
1152 PostOp = llvm::Instruction::Or;
1154 case AtomicExpr::AO__c11_atomic_fetch_or:
1155 case AtomicExpr::AO__opencl_atomic_fetch_or:
1156 case AtomicExpr::AO__atomic_fetch_or:
1157 LibCallName = "__atomic_fetch_or";
1158 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1159 MemTy, E->getExprLoc(), TInfo.Width);
1161 // T __atomic_sub_fetch_N(T *mem, T val, int order)
1162 // T __atomic_fetch_sub_N(T *mem, T val, int order)
1163 case AtomicExpr::AO__atomic_sub_fetch:
1164 PostOp = llvm::Instruction::Sub;
1166 case AtomicExpr::AO__c11_atomic_fetch_sub:
1167 case AtomicExpr::AO__opencl_atomic_fetch_sub:
1168 case AtomicExpr::AO__atomic_fetch_sub:
1169 LibCallName = "__atomic_fetch_sub";
1170 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1171 LoweredMemTy, E->getExprLoc(), TInfo.Width);
1173 // T __atomic_xor_fetch_N(T *mem, T val, int order)
1174 // T __atomic_fetch_xor_N(T *mem, T val, int order)
1175 case AtomicExpr::AO__atomic_xor_fetch:
1176 PostOp = llvm::Instruction::Xor;
1178 case AtomicExpr::AO__c11_atomic_fetch_xor:
1179 case AtomicExpr::AO__opencl_atomic_fetch_xor:
1180 case AtomicExpr::AO__atomic_fetch_xor:
1181 LibCallName = "__atomic_fetch_xor";
1182 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1183 MemTy, E->getExprLoc(), TInfo.Width);
1185 case AtomicExpr::AO__atomic_min_fetch:
1186 PostOpMinMax = true;
1188 case AtomicExpr::AO__c11_atomic_fetch_min:
1189 case AtomicExpr::AO__atomic_fetch_min:
1190 case AtomicExpr::AO__opencl_atomic_fetch_min:
1191 LibCallName = E->getValueType()->isSignedIntegerType()
1192 ? "__atomic_fetch_min"
1193 : "__atomic_fetch_umin";
1194 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1195 LoweredMemTy, E->getExprLoc(), TInfo.Width);
1197 case AtomicExpr::AO__atomic_max_fetch:
1198 PostOpMinMax = true;
1200 case AtomicExpr::AO__c11_atomic_fetch_max:
1201 case AtomicExpr::AO__atomic_fetch_max:
1202 case AtomicExpr::AO__opencl_atomic_fetch_max:
1203 LibCallName = E->getValueType()->isSignedIntegerType()
1204 ? "__atomic_fetch_max"
1205 : "__atomic_fetch_umax";
1206 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1207 LoweredMemTy, E->getExprLoc(), TInfo.Width);
1209 // T __atomic_nand_fetch_N(T *mem, T val, int order)
1210 // T __atomic_fetch_nand_N(T *mem, T val, int order)
1211 case AtomicExpr::AO__atomic_nand_fetch:
1212 PostOp = llvm::Instruction::And; // the NOT is special cased below
1214 case AtomicExpr::AO__atomic_fetch_nand:
1215 LibCallName = "__atomic_fetch_nand";
1216 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1217 MemTy, E->getExprLoc(), TInfo.Width);
1221 if (E->isOpenCL()) {
1222 LibCallName = std::string("__opencl") +
1223 StringRef(LibCallName).drop_front(1).str();
1226 // Optimized functions have the size in their name.
1227 if (UseOptimizedLibcall)
1228 LibCallName += "_" + llvm::utostr(Size);
1229 // By default, assume we return a value of the atomic type.
1231 if (UseOptimizedLibcall) {
1232 // Value is returned directly.
1233 // The function returns an appropriately sized integer type.
1234 RetTy = getContext().getIntTypeForBitwidth(
1235 getContext().toBits(TInfo.Width), /*Signed=*/false);
1237 // Value is returned through parameter before the order.
1238 RetTy = getContext().VoidTy;
1239 Args.add(RValue::get(EmitCastToVoidPtr(Dest.getPointer())),
1240 getContext().VoidPtrTy);
1243 // order is always the last parameter
1244 Args.add(RValue::get(Order),
1245 getContext().IntTy);
1247 Args.add(RValue::get(Scope), getContext().IntTy);
1249 // PostOp is only needed for the atomic_*_fetch operations, and
1250 // thus is only needed for and implemented in the
1251 // UseOptimizedLibcall codepath.
1252 assert(UseOptimizedLibcall || (!PostOp && !PostOpMinMax));
1254 RValue Res = emitAtomicLibcall(*this, LibCallName, RetTy, Args);
1255 // The value is returned directly from the libcall.
1259 // The value is returned directly for optimized libcalls but the expr
1260 // provided an out-param.
1261 if (UseOptimizedLibcall && Res.getScalarVal()) {
1262 llvm::Value *ResVal = Res.getScalarVal();
1264 llvm::Value *LoadVal1 = Args[1].getRValue(*this).getScalarVal();
1265 ResVal = EmitPostAtomicMinMax(Builder, E->getOp(),
1266 E->getValueType()->isSignedIntegerType(),
1268 } else if (PostOp) {
1269 llvm::Value *LoadVal1 = Args[1].getRValue(*this).getScalarVal();
1270 ResVal = Builder.CreateBinOp(PostOp, ResVal, LoadVal1);
1272 if (E->getOp() == AtomicExpr::AO__atomic_nand_fetch)
1273 ResVal = Builder.CreateNot(ResVal);
1275 Builder.CreateStore(
1277 Builder.CreateBitCast(Dest, ResVal->getType()->getPointerTo()));
1280 if (RValTy->isVoidType())
1281 return RValue::get(nullptr);
1283 return convertTempToRValue(
1284 Builder.CreateBitCast(Dest, ConvertTypeForMem(RValTy)->getPointerTo()),
1285 RValTy, E->getExprLoc());
1288 bool IsStore = E->getOp() == AtomicExpr::AO__c11_atomic_store ||
1289 E->getOp() == AtomicExpr::AO__opencl_atomic_store ||
1290 E->getOp() == AtomicExpr::AO__atomic_store ||
1291 E->getOp() == AtomicExpr::AO__atomic_store_n;
1292 bool IsLoad = E->getOp() == AtomicExpr::AO__c11_atomic_load ||
1293 E->getOp() == AtomicExpr::AO__opencl_atomic_load ||
1294 E->getOp() == AtomicExpr::AO__atomic_load ||
1295 E->getOp() == AtomicExpr::AO__atomic_load_n;
1297 if (isa<llvm::ConstantInt>(Order)) {
1298 auto ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
1299 // We should not ever get to a case where the ordering isn't a valid C ABI
1300 // value, but it's hard to enforce that in general.
1301 if (llvm::isValidAtomicOrderingCABI(ord))
1302 switch ((llvm::AtomicOrderingCABI)ord) {
1303 case llvm::AtomicOrderingCABI::relaxed:
1304 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1305 llvm::AtomicOrdering::Monotonic, Scope);
1307 case llvm::AtomicOrderingCABI::consume:
1308 case llvm::AtomicOrderingCABI::acquire:
1310 break; // Avoid crashing on code with undefined behavior
1311 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1312 llvm::AtomicOrdering::Acquire, Scope);
1314 case llvm::AtomicOrderingCABI::release:
1316 break; // Avoid crashing on code with undefined behavior
1317 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1318 llvm::AtomicOrdering::Release, Scope);
1320 case llvm::AtomicOrderingCABI::acq_rel:
1321 if (IsLoad || IsStore)
1322 break; // Avoid crashing on code with undefined behavior
1323 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1324 llvm::AtomicOrdering::AcquireRelease, Scope);
1326 case llvm::AtomicOrderingCABI::seq_cst:
1327 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1328 llvm::AtomicOrdering::SequentiallyConsistent, Scope);
1331 if (RValTy->isVoidType())
1332 return RValue::get(nullptr);
1334 return convertTempToRValue(
1335 Builder.CreateBitCast(Dest, ConvertTypeForMem(RValTy)->getPointerTo(
1336 Dest.getAddressSpace())),
1337 RValTy, E->getExprLoc());
1340 // Long case, when Order isn't obviously constant.
1342 // Create all the relevant BB's
1343 llvm::BasicBlock *MonotonicBB = nullptr, *AcquireBB = nullptr,
1344 *ReleaseBB = nullptr, *AcqRelBB = nullptr,
1345 *SeqCstBB = nullptr;
1346 MonotonicBB = createBasicBlock("monotonic", CurFn);
1348 AcquireBB = createBasicBlock("acquire", CurFn);
1350 ReleaseBB = createBasicBlock("release", CurFn);
1351 if (!IsLoad && !IsStore)
1352 AcqRelBB = createBasicBlock("acqrel", CurFn);
1353 SeqCstBB = createBasicBlock("seqcst", CurFn);
1354 llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
1356 // Create the switch for the split
1357 // MonotonicBB is arbitrarily chosen as the default case; in practice, this
1358 // doesn't matter unless someone is crazy enough to use something that
1359 // doesn't fold to a constant for the ordering.
1360 Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
1361 llvm::SwitchInst *SI = Builder.CreateSwitch(Order, MonotonicBB);
1363 // Emit all the different atomics
1364 Builder.SetInsertPoint(MonotonicBB);
1365 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1366 llvm::AtomicOrdering::Monotonic, Scope);
1367 Builder.CreateBr(ContBB);
1369 Builder.SetInsertPoint(AcquireBB);
1370 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1371 llvm::AtomicOrdering::Acquire, Scope);
1372 Builder.CreateBr(ContBB);
1373 SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::consume),
1375 SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::acquire),
1379 Builder.SetInsertPoint(ReleaseBB);
1380 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1381 llvm::AtomicOrdering::Release, Scope);
1382 Builder.CreateBr(ContBB);
1383 SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::release),
1386 if (!IsLoad && !IsStore) {
1387 Builder.SetInsertPoint(AcqRelBB);
1388 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1389 llvm::AtomicOrdering::AcquireRelease, Scope);
1390 Builder.CreateBr(ContBB);
1391 SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::acq_rel),
1394 Builder.SetInsertPoint(SeqCstBB);
1395 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1396 llvm::AtomicOrdering::SequentiallyConsistent, Scope);
1397 Builder.CreateBr(ContBB);
1398 SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::seq_cst),
1401 // Cleanup and return
1402 Builder.SetInsertPoint(ContBB);
1403 if (RValTy->isVoidType())
1404 return RValue::get(nullptr);
1406 assert(Atomics.getValueSizeInBits() <= Atomics.getAtomicSizeInBits());
1407 return convertTempToRValue(
1408 Builder.CreateBitCast(Dest, ConvertTypeForMem(RValTy)->getPointerTo(
1409 Dest.getAddressSpace())),
1410 RValTy, E->getExprLoc());
1413 Address AtomicInfo::emitCastToAtomicIntPointer(Address addr) const {
1414 unsigned addrspace =
1415 cast<llvm::PointerType>(addr.getPointer()->getType())->getAddressSpace();
1416 llvm::IntegerType *ty =
1417 llvm::IntegerType::get(CGF.getLLVMContext(), AtomicSizeInBits);
1418 return CGF.Builder.CreateBitCast(addr, ty->getPointerTo(addrspace));
1421 Address AtomicInfo::convertToAtomicIntPointer(Address Addr) const {
1422 llvm::Type *Ty = Addr.getElementType();
1423 uint64_t SourceSizeInBits = CGF.CGM.getDataLayout().getTypeSizeInBits(Ty);
1424 if (SourceSizeInBits != AtomicSizeInBits) {
1425 Address Tmp = CreateTempAlloca();
1426 CGF.Builder.CreateMemCpy(Tmp, Addr,
1427 std::min(AtomicSizeInBits, SourceSizeInBits) / 8);
1431 return emitCastToAtomicIntPointer(Addr);
1434 RValue AtomicInfo::convertAtomicTempToRValue(Address addr,
1435 AggValueSlot resultSlot,
1437 bool asValue) const {
1438 if (LVal.isSimple()) {
1439 if (EvaluationKind == TEK_Aggregate)
1440 return resultSlot.asRValue();
1442 // Drill into the padding structure if we have one.
1444 addr = CGF.Builder.CreateStructGEP(addr, 0);
1446 // Otherwise, just convert the temporary to an r-value using the
1447 // normal conversion routine.
1448 return CGF.convertTempToRValue(addr, getValueType(), loc);
1451 // Get RValue from temp memory as atomic for non-simple lvalues
1452 return RValue::get(CGF.Builder.CreateLoad(addr));
1453 if (LVal.isBitField())
1454 return CGF.EmitLoadOfBitfieldLValue(
1455 LValue::MakeBitfield(addr, LVal.getBitFieldInfo(), LVal.getType(),
1456 LVal.getBaseInfo(), TBAAAccessInfo()), loc);
1457 if (LVal.isVectorElt())
1458 return CGF.EmitLoadOfLValue(
1459 LValue::MakeVectorElt(addr, LVal.getVectorIdx(), LVal.getType(),
1460 LVal.getBaseInfo(), TBAAAccessInfo()), loc);
1461 assert(LVal.isExtVectorElt());
1462 return CGF.EmitLoadOfExtVectorElementLValue(LValue::MakeExtVectorElt(
1463 addr, LVal.getExtVectorElts(), LVal.getType(),
1464 LVal.getBaseInfo(), TBAAAccessInfo()));
1467 RValue AtomicInfo::ConvertIntToValueOrAtomic(llvm::Value *IntVal,
1468 AggValueSlot ResultSlot,
1470 bool AsValue) const {
1471 // Try not to in some easy cases.
1472 assert(IntVal->getType()->isIntegerTy() && "Expected integer value");
1473 if (getEvaluationKind() == TEK_Scalar &&
1474 (((!LVal.isBitField() ||
1475 LVal.getBitFieldInfo().Size == ValueSizeInBits) &&
1478 auto *ValTy = AsValue
1479 ? CGF.ConvertTypeForMem(ValueTy)
1480 : getAtomicAddress().getType()->getPointerElementType();
1481 if (ValTy->isIntegerTy()) {
1482 assert(IntVal->getType() == ValTy && "Different integer types.");
1483 return RValue::get(CGF.EmitFromMemory(IntVal, ValueTy));
1484 } else if (ValTy->isPointerTy())
1485 return RValue::get(CGF.Builder.CreateIntToPtr(IntVal, ValTy));
1486 else if (llvm::CastInst::isBitCastable(IntVal->getType(), ValTy))
1487 return RValue::get(CGF.Builder.CreateBitCast(IntVal, ValTy));
1490 // Create a temporary. This needs to be big enough to hold the
1492 Address Temp = Address::invalid();
1493 bool TempIsVolatile = false;
1494 if (AsValue && getEvaluationKind() == TEK_Aggregate) {
1495 assert(!ResultSlot.isIgnored());
1496 Temp = ResultSlot.getAddress();
1497 TempIsVolatile = ResultSlot.isVolatile();
1499 Temp = CreateTempAlloca();
1502 // Slam the integer into the temporary.
1503 Address CastTemp = emitCastToAtomicIntPointer(Temp);
1504 CGF.Builder.CreateStore(IntVal, CastTemp)
1505 ->setVolatile(TempIsVolatile);
1507 return convertAtomicTempToRValue(Temp, ResultSlot, Loc, AsValue);
1510 void AtomicInfo::EmitAtomicLoadLibcall(llvm::Value *AddForLoaded,
1511 llvm::AtomicOrdering AO, bool) {
1512 // void __atomic_load(size_t size, void *mem, void *return, int order);
1514 Args.add(RValue::get(getAtomicSizeValue()), CGF.getContext().getSizeType());
1515 Args.add(RValue::get(CGF.EmitCastToVoidPtr(getAtomicPointer())),
1516 CGF.getContext().VoidPtrTy);
1517 Args.add(RValue::get(CGF.EmitCastToVoidPtr(AddForLoaded)),
1518 CGF.getContext().VoidPtrTy);
1520 RValue::get(llvm::ConstantInt::get(CGF.IntTy, (int)llvm::toCABI(AO))),
1521 CGF.getContext().IntTy);
1522 emitAtomicLibcall(CGF, "__atomic_load", CGF.getContext().VoidTy, Args);
1525 llvm::Value *AtomicInfo::EmitAtomicLoadOp(llvm::AtomicOrdering AO,
1527 // Okay, we're doing this natively.
1528 Address Addr = getAtomicAddressAsAtomicIntPointer();
1529 llvm::LoadInst *Load = CGF.Builder.CreateLoad(Addr, "atomic-load");
1530 Load->setAtomic(AO);
1532 // Other decoration.
1534 Load->setVolatile(true);
1535 CGF.CGM.DecorateInstructionWithTBAA(Load, LVal.getTBAAInfo());
1539 /// An LValue is a candidate for having its loads and stores be made atomic if
1540 /// we are operating under /volatile:ms *and* the LValue itself is volatile and
1541 /// performing such an operation can be performed without a libcall.
1542 bool CodeGenFunction::LValueIsSuitableForInlineAtomic(LValue LV) {
1543 if (!CGM.getCodeGenOpts().MSVolatile) return false;
1544 AtomicInfo AI(*this, LV);
1545 bool IsVolatile = LV.isVolatile() || hasVolatileMember(LV.getType());
1546 // An atomic is inline if we don't need to use a libcall.
1547 bool AtomicIsInline = !AI.shouldUseLibcall();
1548 // MSVC doesn't seem to do this for types wider than a pointer.
1549 if (getContext().getTypeSize(LV.getType()) >
1550 getContext().getTypeSize(getContext().getIntPtrType()))
1552 return IsVolatile && AtomicIsInline;
1555 RValue CodeGenFunction::EmitAtomicLoad(LValue LV, SourceLocation SL,
1556 AggValueSlot Slot) {
1557 llvm::AtomicOrdering AO;
1558 bool IsVolatile = LV.isVolatileQualified();
1559 if (LV.getType()->isAtomicType()) {
1560 AO = llvm::AtomicOrdering::SequentiallyConsistent;
1562 AO = llvm::AtomicOrdering::Acquire;
1565 return EmitAtomicLoad(LV, SL, AO, IsVolatile, Slot);
1568 RValue AtomicInfo::EmitAtomicLoad(AggValueSlot ResultSlot, SourceLocation Loc,
1569 bool AsValue, llvm::AtomicOrdering AO,
1571 // Check whether we should use a library call.
1572 if (shouldUseLibcall()) {
1573 Address TempAddr = Address::invalid();
1574 if (LVal.isSimple() && !ResultSlot.isIgnored()) {
1575 assert(getEvaluationKind() == TEK_Aggregate);
1576 TempAddr = ResultSlot.getAddress();
1578 TempAddr = CreateTempAlloca();
1580 EmitAtomicLoadLibcall(TempAddr.getPointer(), AO, IsVolatile);
1582 // Okay, turn that back into the original value or whole atomic (for
1583 // non-simple lvalues) type.
1584 return convertAtomicTempToRValue(TempAddr, ResultSlot, Loc, AsValue);
1587 // Okay, we're doing this natively.
1588 auto *Load = EmitAtomicLoadOp(AO, IsVolatile);
1590 // If we're ignoring an aggregate return, don't do anything.
1591 if (getEvaluationKind() == TEK_Aggregate && ResultSlot.isIgnored())
1592 return RValue::getAggregate(Address::invalid(), false);
1594 // Okay, turn that back into the original value or atomic (for non-simple
1596 return ConvertIntToValueOrAtomic(Load, ResultSlot, Loc, AsValue);
1599 /// Emit a load from an l-value of atomic type. Note that the r-value
1600 /// we produce is an r-value of the atomic *value* type.
1601 RValue CodeGenFunction::EmitAtomicLoad(LValue src, SourceLocation loc,
1602 llvm::AtomicOrdering AO, bool IsVolatile,
1603 AggValueSlot resultSlot) {
1604 AtomicInfo Atomics(*this, src);
1605 return Atomics.EmitAtomicLoad(resultSlot, loc, /*AsValue=*/true, AO,
1609 /// Copy an r-value into memory as part of storing to an atomic type.
1610 /// This needs to create a bit-pattern suitable for atomic operations.
1611 void AtomicInfo::emitCopyIntoMemory(RValue rvalue) const {
1612 assert(LVal.isSimple());
1613 // If we have an r-value, the rvalue should be of the atomic type,
1614 // which means that the caller is responsible for having zeroed
1615 // any padding. Just do an aggregate copy of that type.
1616 if (rvalue.isAggregate()) {
1617 LValue Dest = CGF.MakeAddrLValue(getAtomicAddress(), getAtomicType());
1618 LValue Src = CGF.MakeAddrLValue(rvalue.getAggregateAddress(),
1620 bool IsVolatile = rvalue.isVolatileQualified() ||
1621 LVal.isVolatileQualified();
1622 CGF.EmitAggregateCopy(Dest, Src, getAtomicType(),
1623 AggValueSlot::DoesNotOverlap, IsVolatile);
1627 // Okay, otherwise we're copying stuff.
1629 // Zero out the buffer if necessary.
1630 emitMemSetZeroIfNecessary();
1632 // Drill past the padding if present.
1633 LValue TempLVal = projectValue();
1635 // Okay, store the rvalue in.
1636 if (rvalue.isScalar()) {
1637 CGF.EmitStoreOfScalar(rvalue.getScalarVal(), TempLVal, /*init*/ true);
1639 CGF.EmitStoreOfComplex(rvalue.getComplexVal(), TempLVal, /*init*/ true);
1644 /// Materialize an r-value into memory for the purposes of storing it
1645 /// to an atomic type.
1646 Address AtomicInfo::materializeRValue(RValue rvalue) const {
1647 // Aggregate r-values are already in memory, and EmitAtomicStore
1648 // requires them to be values of the atomic type.
1649 if (rvalue.isAggregate())
1650 return rvalue.getAggregateAddress();
1652 // Otherwise, make a temporary and materialize into it.
1653 LValue TempLV = CGF.MakeAddrLValue(CreateTempAlloca(), getAtomicType());
1654 AtomicInfo Atomics(CGF, TempLV);
1655 Atomics.emitCopyIntoMemory(rvalue);
1656 return TempLV.getAddress(CGF);
1659 llvm::Value *AtomicInfo::convertRValueToInt(RValue RVal) const {
1660 // If we've got a scalar value of the right size, try to avoid going
1662 if (RVal.isScalar() && (!hasPadding() || !LVal.isSimple())) {
1663 llvm::Value *Value = RVal.getScalarVal();
1664 if (isa<llvm::IntegerType>(Value->getType()))
1665 return CGF.EmitToMemory(Value, ValueTy);
1667 llvm::IntegerType *InputIntTy = llvm::IntegerType::get(
1668 CGF.getLLVMContext(),
1669 LVal.isSimple() ? getValueSizeInBits() : getAtomicSizeInBits());
1670 if (isa<llvm::PointerType>(Value->getType()))
1671 return CGF.Builder.CreatePtrToInt(Value, InputIntTy);
1672 else if (llvm::BitCastInst::isBitCastable(Value->getType(), InputIntTy))
1673 return CGF.Builder.CreateBitCast(Value, InputIntTy);
1676 // Otherwise, we need to go through memory.
1677 // Put the r-value in memory.
1678 Address Addr = materializeRValue(RVal);
1680 // Cast the temporary to the atomic int type and pull a value out.
1681 Addr = emitCastToAtomicIntPointer(Addr);
1682 return CGF.Builder.CreateLoad(Addr);
1685 std::pair<llvm::Value *, llvm::Value *> AtomicInfo::EmitAtomicCompareExchangeOp(
1686 llvm::Value *ExpectedVal, llvm::Value *DesiredVal,
1687 llvm::AtomicOrdering Success, llvm::AtomicOrdering Failure, bool IsWeak) {
1688 // Do the atomic store.
1689 Address Addr = getAtomicAddressAsAtomicIntPointer();
1690 auto *Inst = CGF.Builder.CreateAtomicCmpXchg(Addr.getPointer(),
1691 ExpectedVal, DesiredVal,
1693 // Other decoration.
1694 Inst->setVolatile(LVal.isVolatileQualified());
1695 Inst->setWeak(IsWeak);
1697 // Okay, turn that back into the original value type.
1698 auto *PreviousVal = CGF.Builder.CreateExtractValue(Inst, /*Idxs=*/0);
1699 auto *SuccessFailureVal = CGF.Builder.CreateExtractValue(Inst, /*Idxs=*/1);
1700 return std::make_pair(PreviousVal, SuccessFailureVal);
1704 AtomicInfo::EmitAtomicCompareExchangeLibcall(llvm::Value *ExpectedAddr,
1705 llvm::Value *DesiredAddr,
1706 llvm::AtomicOrdering Success,
1707 llvm::AtomicOrdering Failure) {
1708 // bool __atomic_compare_exchange(size_t size, void *obj, void *expected,
1709 // void *desired, int success, int failure);
1711 Args.add(RValue::get(getAtomicSizeValue()), CGF.getContext().getSizeType());
1712 Args.add(RValue::get(CGF.EmitCastToVoidPtr(getAtomicPointer())),
1713 CGF.getContext().VoidPtrTy);
1714 Args.add(RValue::get(CGF.EmitCastToVoidPtr(ExpectedAddr)),
1715 CGF.getContext().VoidPtrTy);
1716 Args.add(RValue::get(CGF.EmitCastToVoidPtr(DesiredAddr)),
1717 CGF.getContext().VoidPtrTy);
1718 Args.add(RValue::get(
1719 llvm::ConstantInt::get(CGF.IntTy, (int)llvm::toCABI(Success))),
1720 CGF.getContext().IntTy);
1721 Args.add(RValue::get(
1722 llvm::ConstantInt::get(CGF.IntTy, (int)llvm::toCABI(Failure))),
1723 CGF.getContext().IntTy);
1724 auto SuccessFailureRVal = emitAtomicLibcall(CGF, "__atomic_compare_exchange",
1725 CGF.getContext().BoolTy, Args);
1727 return SuccessFailureRVal.getScalarVal();
1730 std::pair<RValue, llvm::Value *> AtomicInfo::EmitAtomicCompareExchange(
1731 RValue Expected, RValue Desired, llvm::AtomicOrdering Success,
1732 llvm::AtomicOrdering Failure, bool IsWeak) {
1733 // Check whether we should use a library call.
1734 if (shouldUseLibcall()) {
1735 // Produce a source address.
1736 Address ExpectedAddr = materializeRValue(Expected);
1737 Address DesiredAddr = materializeRValue(Desired);
1738 auto *Res = EmitAtomicCompareExchangeLibcall(ExpectedAddr.getPointer(),
1739 DesiredAddr.getPointer(),
1741 return std::make_pair(
1742 convertAtomicTempToRValue(ExpectedAddr, AggValueSlot::ignored(),
1743 SourceLocation(), /*AsValue=*/false),
1747 // If we've got a scalar value of the right size, try to avoid going
1749 auto *ExpectedVal = convertRValueToInt(Expected);
1750 auto *DesiredVal = convertRValueToInt(Desired);
1751 auto Res = EmitAtomicCompareExchangeOp(ExpectedVal, DesiredVal, Success,
1753 return std::make_pair(
1754 ConvertIntToValueOrAtomic(Res.first, AggValueSlot::ignored(),
1755 SourceLocation(), /*AsValue=*/false),
1760 EmitAtomicUpdateValue(CodeGenFunction &CGF, AtomicInfo &Atomics, RValue OldRVal,
1761 const llvm::function_ref<RValue(RValue)> &UpdateOp,
1762 Address DesiredAddr) {
1764 LValue AtomicLVal = Atomics.getAtomicLValue();
1766 if (AtomicLVal.isSimple()) {
1768 DesiredLVal = CGF.MakeAddrLValue(DesiredAddr, AtomicLVal.getType());
1770 // Build new lvalue for temp address.
1771 Address Ptr = Atomics.materializeRValue(OldRVal);
1773 if (AtomicLVal.isBitField()) {
1775 LValue::MakeBitfield(Ptr, AtomicLVal.getBitFieldInfo(),
1776 AtomicLVal.getType(),
1777 AtomicLVal.getBaseInfo(),
1778 AtomicLVal.getTBAAInfo());
1780 LValue::MakeBitfield(DesiredAddr, AtomicLVal.getBitFieldInfo(),
1781 AtomicLVal.getType(), AtomicLVal.getBaseInfo(),
1782 AtomicLVal.getTBAAInfo());
1783 } else if (AtomicLVal.isVectorElt()) {
1784 UpdateLVal = LValue::MakeVectorElt(Ptr, AtomicLVal.getVectorIdx(),
1785 AtomicLVal.getType(),
1786 AtomicLVal.getBaseInfo(),
1787 AtomicLVal.getTBAAInfo());
1788 DesiredLVal = LValue::MakeVectorElt(
1789 DesiredAddr, AtomicLVal.getVectorIdx(), AtomicLVal.getType(),
1790 AtomicLVal.getBaseInfo(), AtomicLVal.getTBAAInfo());
1792 assert(AtomicLVal.isExtVectorElt());
1793 UpdateLVal = LValue::MakeExtVectorElt(Ptr, AtomicLVal.getExtVectorElts(),
1794 AtomicLVal.getType(),
1795 AtomicLVal.getBaseInfo(),
1796 AtomicLVal.getTBAAInfo());
1797 DesiredLVal = LValue::MakeExtVectorElt(
1798 DesiredAddr, AtomicLVal.getExtVectorElts(), AtomicLVal.getType(),
1799 AtomicLVal.getBaseInfo(), AtomicLVal.getTBAAInfo());
1801 UpRVal = CGF.EmitLoadOfLValue(UpdateLVal, SourceLocation());
1803 // Store new value in the corresponding memory area.
1804 RValue NewRVal = UpdateOp(UpRVal);
1805 if (NewRVal.isScalar()) {
1806 CGF.EmitStoreThroughLValue(NewRVal, DesiredLVal);
1808 assert(NewRVal.isComplex());
1809 CGF.EmitStoreOfComplex(NewRVal.getComplexVal(), DesiredLVal,
1814 void AtomicInfo::EmitAtomicUpdateLibcall(
1815 llvm::AtomicOrdering AO, const llvm::function_ref<RValue(RValue)> &UpdateOp,
1817 auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1819 Address ExpectedAddr = CreateTempAlloca();
1821 EmitAtomicLoadLibcall(ExpectedAddr.getPointer(), AO, IsVolatile);
1822 auto *ContBB = CGF.createBasicBlock("atomic_cont");
1823 auto *ExitBB = CGF.createBasicBlock("atomic_exit");
1824 CGF.EmitBlock(ContBB);
1825 Address DesiredAddr = CreateTempAlloca();
1826 if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
1827 requiresMemSetZero(getAtomicAddress().getElementType())) {
1828 auto *OldVal = CGF.Builder.CreateLoad(ExpectedAddr);
1829 CGF.Builder.CreateStore(OldVal, DesiredAddr);
1831 auto OldRVal = convertAtomicTempToRValue(ExpectedAddr,
1832 AggValueSlot::ignored(),
1833 SourceLocation(), /*AsValue=*/false);
1834 EmitAtomicUpdateValue(CGF, *this, OldRVal, UpdateOp, DesiredAddr);
1836 EmitAtomicCompareExchangeLibcall(ExpectedAddr.getPointer(),
1837 DesiredAddr.getPointer(),
1839 CGF.Builder.CreateCondBr(Res, ExitBB, ContBB);
1840 CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
1843 void AtomicInfo::EmitAtomicUpdateOp(
1844 llvm::AtomicOrdering AO, const llvm::function_ref<RValue(RValue)> &UpdateOp,
1846 auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1848 // Do the atomic load.
1849 auto *OldVal = EmitAtomicLoadOp(Failure, IsVolatile);
1850 // For non-simple lvalues perform compare-and-swap procedure.
1851 auto *ContBB = CGF.createBasicBlock("atomic_cont");
1852 auto *ExitBB = CGF.createBasicBlock("atomic_exit");
1853 auto *CurBB = CGF.Builder.GetInsertBlock();
1854 CGF.EmitBlock(ContBB);
1855 llvm::PHINode *PHI = CGF.Builder.CreatePHI(OldVal->getType(),
1856 /*NumReservedValues=*/2);
1857 PHI->addIncoming(OldVal, CurBB);
1858 Address NewAtomicAddr = CreateTempAlloca();
1859 Address NewAtomicIntAddr = emitCastToAtomicIntPointer(NewAtomicAddr);
1860 if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
1861 requiresMemSetZero(getAtomicAddress().getElementType())) {
1862 CGF.Builder.CreateStore(PHI, NewAtomicIntAddr);
1864 auto OldRVal = ConvertIntToValueOrAtomic(PHI, AggValueSlot::ignored(),
1865 SourceLocation(), /*AsValue=*/false);
1866 EmitAtomicUpdateValue(CGF, *this, OldRVal, UpdateOp, NewAtomicAddr);
1867 auto *DesiredVal = CGF.Builder.CreateLoad(NewAtomicIntAddr);
1868 // Try to write new value using cmpxchg operation.
1869 auto Res = EmitAtomicCompareExchangeOp(PHI, DesiredVal, AO, Failure);
1870 PHI->addIncoming(Res.first, CGF.Builder.GetInsertBlock());
1871 CGF.Builder.CreateCondBr(Res.second, ExitBB, ContBB);
1872 CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
1875 static void EmitAtomicUpdateValue(CodeGenFunction &CGF, AtomicInfo &Atomics,
1876 RValue UpdateRVal, Address DesiredAddr) {
1877 LValue AtomicLVal = Atomics.getAtomicLValue();
1879 // Build new lvalue for temp address.
1880 if (AtomicLVal.isBitField()) {
1882 LValue::MakeBitfield(DesiredAddr, AtomicLVal.getBitFieldInfo(),
1883 AtomicLVal.getType(), AtomicLVal.getBaseInfo(),
1884 AtomicLVal.getTBAAInfo());
1885 } else if (AtomicLVal.isVectorElt()) {
1887 LValue::MakeVectorElt(DesiredAddr, AtomicLVal.getVectorIdx(),
1888 AtomicLVal.getType(), AtomicLVal.getBaseInfo(),
1889 AtomicLVal.getTBAAInfo());
1891 assert(AtomicLVal.isExtVectorElt());
1892 DesiredLVal = LValue::MakeExtVectorElt(
1893 DesiredAddr, AtomicLVal.getExtVectorElts(), AtomicLVal.getType(),
1894 AtomicLVal.getBaseInfo(), AtomicLVal.getTBAAInfo());
1896 // Store new value in the corresponding memory area.
1897 assert(UpdateRVal.isScalar());
1898 CGF.EmitStoreThroughLValue(UpdateRVal, DesiredLVal);
1901 void AtomicInfo::EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO,
1902 RValue UpdateRVal, bool IsVolatile) {
1903 auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1905 Address ExpectedAddr = CreateTempAlloca();
1907 EmitAtomicLoadLibcall(ExpectedAddr.getPointer(), AO, IsVolatile);
1908 auto *ContBB = CGF.createBasicBlock("atomic_cont");
1909 auto *ExitBB = CGF.createBasicBlock("atomic_exit");
1910 CGF.EmitBlock(ContBB);
1911 Address DesiredAddr = CreateTempAlloca();
1912 if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
1913 requiresMemSetZero(getAtomicAddress().getElementType())) {
1914 auto *OldVal = CGF.Builder.CreateLoad(ExpectedAddr);
1915 CGF.Builder.CreateStore(OldVal, DesiredAddr);
1917 EmitAtomicUpdateValue(CGF, *this, UpdateRVal, DesiredAddr);
1919 EmitAtomicCompareExchangeLibcall(ExpectedAddr.getPointer(),
1920 DesiredAddr.getPointer(),
1922 CGF.Builder.CreateCondBr(Res, ExitBB, ContBB);
1923 CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
1926 void AtomicInfo::EmitAtomicUpdateOp(llvm::AtomicOrdering AO, RValue UpdateRVal,
1928 auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1930 // Do the atomic load.
1931 auto *OldVal = EmitAtomicLoadOp(Failure, IsVolatile);
1932 // For non-simple lvalues perform compare-and-swap procedure.
1933 auto *ContBB = CGF.createBasicBlock("atomic_cont");
1934 auto *ExitBB = CGF.createBasicBlock("atomic_exit");
1935 auto *CurBB = CGF.Builder.GetInsertBlock();
1936 CGF.EmitBlock(ContBB);
1937 llvm::PHINode *PHI = CGF.Builder.CreatePHI(OldVal->getType(),
1938 /*NumReservedValues=*/2);
1939 PHI->addIncoming(OldVal, CurBB);
1940 Address NewAtomicAddr = CreateTempAlloca();
1941 Address NewAtomicIntAddr = emitCastToAtomicIntPointer(NewAtomicAddr);
1942 if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
1943 requiresMemSetZero(getAtomicAddress().getElementType())) {
1944 CGF.Builder.CreateStore(PHI, NewAtomicIntAddr);
1946 EmitAtomicUpdateValue(CGF, *this, UpdateRVal, NewAtomicAddr);
1947 auto *DesiredVal = CGF.Builder.CreateLoad(NewAtomicIntAddr);
1948 // Try to write new value using cmpxchg operation.
1949 auto Res = EmitAtomicCompareExchangeOp(PHI, DesiredVal, AO, Failure);
1950 PHI->addIncoming(Res.first, CGF.Builder.GetInsertBlock());
1951 CGF.Builder.CreateCondBr(Res.second, ExitBB, ContBB);
1952 CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
1955 void AtomicInfo::EmitAtomicUpdate(
1956 llvm::AtomicOrdering AO, const llvm::function_ref<RValue(RValue)> &UpdateOp,
1958 if (shouldUseLibcall()) {
1959 EmitAtomicUpdateLibcall(AO, UpdateOp, IsVolatile);
1961 EmitAtomicUpdateOp(AO, UpdateOp, IsVolatile);
1965 void AtomicInfo::EmitAtomicUpdate(llvm::AtomicOrdering AO, RValue UpdateRVal,
1967 if (shouldUseLibcall()) {
1968 EmitAtomicUpdateLibcall(AO, UpdateRVal, IsVolatile);
1970 EmitAtomicUpdateOp(AO, UpdateRVal, IsVolatile);
1974 void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue lvalue,
1976 bool IsVolatile = lvalue.isVolatileQualified();
1977 llvm::AtomicOrdering AO;
1978 if (lvalue.getType()->isAtomicType()) {
1979 AO = llvm::AtomicOrdering::SequentiallyConsistent;
1981 AO = llvm::AtomicOrdering::Release;
1984 return EmitAtomicStore(rvalue, lvalue, AO, IsVolatile, isInit);
1987 /// Emit a store to an l-value of atomic type.
1989 /// Note that the r-value is expected to be an r-value *of the atomic
1990 /// type*; this means that for aggregate r-values, it should include
1991 /// storage for any padding that was necessary.
1992 void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue dest,
1993 llvm::AtomicOrdering AO, bool IsVolatile,
1995 // If this is an aggregate r-value, it should agree in type except
1996 // maybe for address-space qualification.
1997 assert(!rvalue.isAggregate() ||
1998 rvalue.getAggregateAddress().getElementType() ==
1999 dest.getAddress(*this).getElementType());
2001 AtomicInfo atomics(*this, dest);
2002 LValue LVal = atomics.getAtomicLValue();
2004 // If this is an initialization, just put the value there normally.
2005 if (LVal.isSimple()) {
2007 atomics.emitCopyIntoMemory(rvalue);
2011 // Check whether we should use a library call.
2012 if (atomics.shouldUseLibcall()) {
2013 // Produce a source address.
2014 Address srcAddr = atomics.materializeRValue(rvalue);
2016 // void __atomic_store(size_t size, void *mem, void *val, int order)
2018 args.add(RValue::get(atomics.getAtomicSizeValue()),
2019 getContext().getSizeType());
2020 args.add(RValue::get(EmitCastToVoidPtr(atomics.getAtomicPointer())),
2021 getContext().VoidPtrTy);
2022 args.add(RValue::get(EmitCastToVoidPtr(srcAddr.getPointer())),
2023 getContext().VoidPtrTy);
2025 RValue::get(llvm::ConstantInt::get(IntTy, (int)llvm::toCABI(AO))),
2026 getContext().IntTy);
2027 emitAtomicLibcall(*this, "__atomic_store", getContext().VoidTy, args);
2031 // Okay, we're doing this natively.
2032 llvm::Value *intValue = atomics.convertRValueToInt(rvalue);
2034 // Do the atomic store.
2036 atomics.emitCastToAtomicIntPointer(atomics.getAtomicAddress());
2037 intValue = Builder.CreateIntCast(
2038 intValue, addr.getElementType(), /*isSigned=*/false);
2039 llvm::StoreInst *store = Builder.CreateStore(intValue, addr);
2041 if (AO == llvm::AtomicOrdering::Acquire)
2042 AO = llvm::AtomicOrdering::Monotonic;
2043 else if (AO == llvm::AtomicOrdering::AcquireRelease)
2044 AO = llvm::AtomicOrdering::Release;
2045 // Initializations don't need to be atomic.
2047 store->setAtomic(AO);
2049 // Other decoration.
2051 store->setVolatile(true);
2052 CGM.DecorateInstructionWithTBAA(store, dest.getTBAAInfo());
2056 // Emit simple atomic update operation.
2057 atomics.EmitAtomicUpdate(AO, rvalue, IsVolatile);
2060 /// Emit a compare-and-exchange op for atomic type.
2062 std::pair<RValue, llvm::Value *> CodeGenFunction::EmitAtomicCompareExchange(
2063 LValue Obj, RValue Expected, RValue Desired, SourceLocation Loc,
2064 llvm::AtomicOrdering Success, llvm::AtomicOrdering Failure, bool IsWeak,
2065 AggValueSlot Slot) {
2066 // If this is an aggregate r-value, it should agree in type except
2067 // maybe for address-space qualification.
2068 assert(!Expected.isAggregate() ||
2069 Expected.getAggregateAddress().getElementType() ==
2070 Obj.getAddress(*this).getElementType());
2071 assert(!Desired.isAggregate() ||
2072 Desired.getAggregateAddress().getElementType() ==
2073 Obj.getAddress(*this).getElementType());
2074 AtomicInfo Atomics(*this, Obj);
2076 return Atomics.EmitAtomicCompareExchange(Expected, Desired, Success, Failure,
2080 void CodeGenFunction::EmitAtomicUpdate(
2081 LValue LVal, llvm::AtomicOrdering AO,
2082 const llvm::function_ref<RValue(RValue)> &UpdateOp, bool IsVolatile) {
2083 AtomicInfo Atomics(*this, LVal);
2084 Atomics.EmitAtomicUpdate(AO, UpdateOp, IsVolatile);
2087 void CodeGenFunction::EmitAtomicInit(Expr *init, LValue dest) {
2088 AtomicInfo atomics(*this, dest);
2090 switch (atomics.getEvaluationKind()) {
2092 llvm::Value *value = EmitScalarExpr(init);
2093 atomics.emitCopyIntoMemory(RValue::get(value));
2098 ComplexPairTy value = EmitComplexExpr(init);
2099 atomics.emitCopyIntoMemory(RValue::getComplex(value));
2103 case TEK_Aggregate: {
2104 // Fix up the destination if the initializer isn't an expression
2106 bool Zeroed = false;
2107 if (!init->getType()->isAtomicType()) {
2108 Zeroed = atomics.emitMemSetZeroIfNecessary();
2109 dest = atomics.projectValue();
2112 // Evaluate the expression directly into the destination.
2113 AggValueSlot slot = AggValueSlot::forLValue(
2114 dest, *this, AggValueSlot::IsNotDestructed,
2115 AggValueSlot::DoesNotNeedGCBarriers, AggValueSlot::IsNotAliased,
2116 AggValueSlot::DoesNotOverlap,
2117 Zeroed ? AggValueSlot::IsZeroed : AggValueSlot::IsNotZeroed);
2119 EmitAggExpr(init, slot);
2123 llvm_unreachable("bad evaluation kind");