2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2019, 2020 Jeffrey Roberson <jeff@FreeBSD.org>
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice unmodified, this list of conditions, and the following
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 #ifndef _SYS_SMR_TYPES_H_
31 #define _SYS_SMR_TYPES_H_
36 * SMR Accessors are meant to provide safe access to SMR protected
37 * pointers and prevent misuse and accidental access.
39 * Accessors are grouped by type:
40 * entered - Use while in a read section (between smr_enter/smr_exit())
41 * serialized - Use while holding a lock that serializes writers. Updates
42 * are synchronized with readers via included barriers.
43 * unserialized - Use after the memory is out of scope and not visible to
46 * All acceses include a parameter for an assert to verify the required
47 * synchronization. For example, a writer might use:
49 * smr_serialized_store(pointer, value, mtx_owned(&writelock));
51 * These are only enabled in INVARIANTS kernels.
54 /* Type restricting pointer access to force smr accessors. */
55 #define SMR_POINTER(type) \
57 type __ptr; /* Do not access directly */ \
61 * Read from an SMR protected pointer while in a read section.
63 #define smr_entered_load(p, smr) ({ \
64 SMR_ASSERT(SMR_ENTERED((smr)), "smr_entered_load"); \
65 (__typeof((p)->__ptr))atomic_load_acq_ptr((uintptr_t *)&(p)->__ptr); \
69 * Read from an SMR protected pointer while serialized by an
70 * external mechanism. 'ex' should contain an assert that the
71 * external mechanism is held. i.e. mtx_owned()
73 #define smr_serialized_load(p, ex) ({ \
74 SMR_ASSERT(ex, "smr_serialized_load"); \
75 (__typeof((p)->__ptr))atomic_load_ptr(&(p)->__ptr); \
79 * Store 'v' to an SMR protected pointer while serialized by an
80 * external mechanism. 'ex' should contain an assert that the
81 * external mechanism is held. i.e. mtx_owned()
83 * Writers that are serialized with mutual exclusion or on a single
84 * thread should use smr_serialized_store() rather than swap.
86 #define smr_serialized_store(p, v, ex) do { \
87 SMR_ASSERT(ex, "smr_serialized_store"); \
88 __typeof((p)->__ptr) _v = (v); \
89 atomic_store_rel_ptr((uintptr_t *)&(p)->__ptr, (uintptr_t)_v); \
93 * swap 'v' with an SMR protected pointer and return the old value
94 * while serialized by an external mechanism. 'ex' should contain
95 * an assert that the external mechanism is provided. i.e. mtx_owned()
97 * Swap permits multiple writers to update a pointer concurrently.
99 #define smr_serialized_swap(p, v, ex) ({ \
100 SMR_ASSERT(ex, "smr_serialized_swap"); \
101 __typeof((p)->__ptr) _v = (v); \
102 /* Release barrier guarantees contents are visible to reader */ \
103 atomic_thread_fence_rel(); \
104 (__typeof((p)->__ptr))atomic_swap_ptr( \
105 (uintptr_t *)&(p)->__ptr, (uintptr_t)_v); \
109 * Read from an SMR protected pointer when no serialization is required
110 * such as in the destructor callback or when the caller guarantees other
113 #define smr_unserialized_load(p, ex) ({ \
114 SMR_ASSERT(ex, "smr_unserialized_load"); \
115 (__typeof((p)->__ptr))atomic_load_ptr(&(p)->__ptr); \
119 * Store to an SMR protected pointer when no serialiation is required
120 * such as in the destructor callback or when the caller guarantees other
123 #define smr_unserialized_store(p, v, ex) do { \
124 SMR_ASSERT(ex, "smr_unserialized_store"); \
125 __typeof((p)->__ptr) _v = (v); \
126 atomic_store_ptr((uintptr_t *)&(p)->__ptr, (uintptr_t)_v); \
132 * Load an SMR protected pointer when accessing kernel data structures through
135 #define smr_kvm_load(p) ((p)->__ptr)
137 #endif /* !_KERNEL */
138 #endif /* !_SYS_SMR_TYPES_H_ */