2 * Copyright (c) 2014 Mateusz Guzik <mjg@FreeBSD.org>
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 #include <sys/systm.h>
34 #include <sys/types.h>
37 * seqc_t may be included in structs visible to userspace
39 #include <sys/_seqc.h>
43 /* A hack to get MPASS macro */
46 #include <machine/cpu.h>
51 * Predicts from inline functions are not honored by clang.
53 #define seqc_in_modify(seqc) ({ \
54 seqc_t __seqc = (seqc); \
56 __predict_false(__seqc & SEQC_MOD); \
60 seqc_write_begin(seqc_t *seqcp)
64 MPASS(!seqc_in_modify(*seqcp));
66 atomic_thread_fence_rel();
70 seqc_write_end(seqc_t *seqcp)
73 atomic_thread_fence_rel();
75 MPASS(!seqc_in_modify(*seqcp));
79 static __inline seqc_t
80 seqc_read_any(const seqc_t *seqcp)
83 return (atomic_load_acq_int(__DECONST(seqc_t *, seqcp)));
86 static __inline seqc_t
87 seqc_read_notmodify(const seqc_t *seqcp)
90 return (atomic_load_acq_int(__DECONST(seqc_t *, seqcp)) & ~SEQC_MOD);
93 static __inline seqc_t
94 seqc_read(const seqc_t *seqcp)
99 ret = seqc_read_any(seqcp);
100 if (seqc_in_modify(ret)) {
110 #define seqc_consistent_nomb(seqcp, oldseqc) ({ \
111 const seqc_t *__seqcp = (seqcp); \
112 seqc_t __oldseqc = (oldseqc); \
114 MPASS(!(seqc_in_modify(__oldseqc))); \
115 __predict_true(*__seqcp == __oldseqc); \
118 #define seqc_consistent(seqcp, oldseqc) ({ \
119 atomic_thread_fence_acq(); \
120 seqc_consistent_nomb(seqcp, oldseqc); \
124 * Variant which does not critical enter/exit.
127 seqc_sleepable_write_begin(seqc_t *seqcp)
130 MPASS(!seqc_in_modify(*seqcp));
132 atomic_thread_fence_rel();
136 seqc_sleepable_write_end(seqc_t *seqcp)
139 atomic_thread_fence_rel();
141 MPASS(!seqc_in_modify(*seqcp));
145 #endif /* _SYS_SEQC_H_ */