1 ; RUN: opt -basicaa -print-memoryssa -verify-memoryssa -analyze < %s 2>&1 | FileCheck %s
2 ; RUN: opt -aa-pipeline=basic-aa -passes='print<memoryssa>,verify<memoryssa>' -disable-output < %s 2>&1 | FileCheck %s
4 ; Ensures that atomic loads count as MemoryDefs
6 ; CHECK-LABEL: define i32 @foo
7 define i32 @foo(i32* %a, i32* %b) {
8 ; CHECK: 1 = MemoryDef(liveOnEntry)
9 ; CHECK-NEXT: store i32 4
10 store i32 4, i32* %a, align 4
11 ; CHECK: 2 = MemoryDef(1)
12 ; CHECK-NEXT: %1 = load atomic i32
13 %1 = load atomic i32, i32* %b acquire, align 4
15 ; CHECK-NEXT: %2 = load i32
16 %2 = load i32, i32* %a, align 4
21 ; CHECK-LABEL: define void @bar
22 define void @bar(i32* %a) {
23 ; CHECK: MemoryUse(liveOnEntry)
24 ; CHECK-NEXT: load atomic i32, i32* %a unordered, align 4
25 load atomic i32, i32* %a unordered, align 4
26 ; CHECK: 1 = MemoryDef(liveOnEntry)
27 ; CHECK-NEXT: load atomic i32, i32* %a monotonic, align 4
28 load atomic i32, i32* %a monotonic, align 4
29 ; CHECK: 2 = MemoryDef(1)
30 ; CHECK-NEXT: load atomic i32, i32* %a acquire, align 4
31 load atomic i32, i32* %a acquire, align 4
32 ; CHECK: 3 = MemoryDef(2)
33 ; CHECK-NEXT: load atomic i32, i32* %a seq_cst, align 4
34 load atomic i32, i32* %a seq_cst, align 4
38 ; CHECK-LABEL: define void @baz
39 define void @baz(i32* %a) {
40 ; CHECK: 1 = MemoryDef(liveOnEntry)
41 ; CHECK-NEXT: %1 = load atomic i32
42 %1 = load atomic i32, i32* %a acquire, align 4
44 ; CHECK-NEXT: %2 = load atomic i32, i32* %a unordered, align 4
45 %2 = load atomic i32, i32* %a unordered, align 4
46 ; CHECK: 2 = MemoryDef(1)
47 ; CHECK-NEXT: %3 = load atomic i32, i32* %a monotonic, align 4
48 %3 = load atomic i32, i32* %a monotonic, align 4
52 ; CHECK-LABEL: define void @fences
53 define void @fences(i32* %a) {
54 ; CHECK: 1 = MemoryDef(liveOnEntry)
55 ; CHECK-NEXT: fence acquire
58 ; CHECK-NEXT: %1 = load i32, i32* %a
59 %1 = load i32, i32* %a
61 ; CHECK: 2 = MemoryDef(1)
62 ; CHECK-NEXT: fence release
65 ; CHECK-NEXT: %2 = load i32, i32* %a
66 %2 = load i32, i32* %a
68 ; CHECK: 3 = MemoryDef(2)
69 ; CHECK-NEXT: fence acq_rel
72 ; CHECK-NEXT: %3 = load i32, i32* %a
73 %3 = load i32, i32* %a
75 ; CHECK: 4 = MemoryDef(3)
76 ; CHECK-NEXT: fence seq_cst
79 ; CHECK-NEXT: %4 = load i32, i32* %a
80 %4 = load i32, i32* %a
84 ; CHECK-LABEL: define void @seq_cst_clobber
85 define void @seq_cst_clobber(i32* noalias %a, i32* noalias %b) {
86 ; CHECK: 1 = MemoryDef(liveOnEntry)
87 ; CHECK-NEXT: %1 = load atomic i32, i32* %a monotonic, align 4
88 load atomic i32, i32* %a monotonic, align 4
90 ; CHECK: 2 = MemoryDef(1)
91 ; CHECK-NEXT: %2 = load atomic i32, i32* %a seq_cst, align 4
92 load atomic i32, i32* %a seq_cst, align 4
94 ; CHECK: 3 = MemoryDef(2)
95 ; CHECK-NEXT: load atomic i32, i32* %a monotonic, align 4
96 load atomic i32, i32* %a monotonic, align 4
101 ; Ensure that AA hands us MRI_Mod on unreorderable atomic ops.
103 ; This test is a bit implementation-specific. In particular, it depends on that
104 ; we pass cmpxchg-load queries to AA, without trying to reason about them on
107 ; If AA gets more aggressive, we can find another way.
109 ; CHECK-LABEL: define void @check_aa_is_sane
110 define void @check_aa_is_sane(i32* noalias %a, i32* noalias %b) {
111 ; CHECK: 1 = MemoryDef(liveOnEntry)
112 ; CHECK-NEXT: cmpxchg i32* %a, i32 0, i32 1 acquire acquire
113 cmpxchg i32* %a, i32 0, i32 1 acquire acquire
114 ; CHECK: MemoryUse(1)
115 ; CHECK-NEXT: load i32, i32* %b, align 4
116 load i32, i32* %b, align 4