2 * Copyright (c) 2014 Mateusz Guzik <mjg@FreeBSD.org>
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 #include <sys/systm.h>
32 #include <sys/types.h>
35 * seqc_t may be included in structs visible to userspace
37 #include <sys/_seqc.h>
41 /* A hack to get MPASS macro */
44 #include <machine/cpu.h>
49 * Predicts from inline functions are not honored by clang.
51 #define seqc_in_modify(seqc) ({ \
52 seqc_t __seqc = (seqc); \
54 __predict_false(__seqc & SEQC_MOD); \
58 seqc_write_begin(seqc_t *seqcp)
62 MPASS(!seqc_in_modify(*seqcp));
64 atomic_thread_fence_rel();
68 seqc_write_end(seqc_t *seqcp)
71 atomic_thread_fence_rel();
73 MPASS(!seqc_in_modify(*seqcp));
77 static __inline seqc_t
78 seqc_read_any(const seqc_t *seqcp)
81 return (atomic_load_acq_int(__DECONST(seqc_t *, seqcp)));
84 static __inline seqc_t
85 seqc_read_notmodify(const seqc_t *seqcp)
88 return (atomic_load_acq_int(__DECONST(seqc_t *, seqcp)) & ~SEQC_MOD);
91 static __inline seqc_t
92 seqc_read(const seqc_t *seqcp)
97 ret = seqc_read_any(seqcp);
98 if (seqc_in_modify(ret)) {
108 #define seqc_consistent_nomb(seqcp, oldseqc) ({ \
109 const seqc_t *__seqcp = (seqcp); \
110 seqc_t __oldseqc = (oldseqc); \
112 MPASS(!(seqc_in_modify(__oldseqc))); \
113 __predict_true(*__seqcp == __oldseqc); \
116 #define seqc_consistent(seqcp, oldseqc) ({ \
117 atomic_thread_fence_acq(); \
118 seqc_consistent_nomb(seqcp, oldseqc); \
122 * Variant which does not critical enter/exit.
125 seqc_sleepable_write_begin(seqc_t *seqcp)
128 MPASS(!seqc_in_modify(*seqcp));
130 atomic_thread_fence_rel();
134 seqc_sleepable_write_end(seqc_t *seqcp)
137 atomic_thread_fence_rel();
139 MPASS(!seqc_in_modify(*seqcp));
143 #endif /* _SYS_SEQC_H_ */