2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (c) 2019 Justin Hibbits
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include <sys/cdefs.h>
29 #include <sys/param.h>
30 #include <sys/kernel.h>
32 #include <sys/mutex.h>
34 #include <sys/systm.h>
36 #include <machine/atomic.h>
37 #include <machine/param.h>
58 #define A64_POOL_SIZE MAXCPU
59 /* Estimated size of a cacheline */
60 #define CACHE_ALIGN CACHE_LINE_SIZE
61 static struct mtx a64_mtx_pool[A64_POOL_SIZE];
63 #define GET_MUTEX(p) \
64 (&a64_mtx_pool[(pmap_kextract((vm_offset_t)p) / CACHE_ALIGN) % (A64_POOL_SIZE)])
67 struct mtx *_amtx = GET_MUTEX(p); \
68 if (smp_started) mtx_lock(_amtx)
70 #define UNLOCK_A64() if (smp_started) mtx_unlock(_amtx)
74 #define LOCK_A64() { register_t s = intr_disable()
75 #define UNLOCK_A64() intr_restore(s); }
79 #define ATOMIC64_EMU_UN(op, rt, block, ret) \
81 atomic_##op##_64(volatile u_int64_t *p) { \
82 u_int64_t tmp __unused; \
88 #define ATOMIC64_EMU_BIN(op, rt, block, ret) \
90 atomic_##op##_64(volatile u_int64_t *p, u_int64_t v) { \
91 u_int64_t tmp __unused; \
97 ATOMIC64_EMU_BIN(add, void, (*p = *p + v), return);
98 ATOMIC64_EMU_BIN(clear, void, *p &= ~v, return);
99 ATOMIC64_EMU_BIN(fetchadd, u_int64_t, (*p = *p + v, v = *p - v), return (v));
100 ATOMIC64_EMU_UN(load, u_int64_t, (tmp = *p), return (tmp));
101 ATOMIC64_EMU_BIN(set, void, *p |= v, return);
102 ATOMIC64_EMU_BIN(subtract, void, (*p = *p - v), return);
103 ATOMIC64_EMU_BIN(store, void, *p = v, return);
104 ATOMIC64_EMU_BIN(swap, u_int64_t, tmp = *p; *p = v; v = tmp, return(v));
106 int atomic_cmpset_64(volatile u_int64_t *p, u_int64_t old, u_int64_t new)
119 int atomic_fcmpset_64(volatile u_int64_t *p, u_int64_t *old, u_int64_t new)
121 u_int64_t tmp, tmp_old;
132 return (tmp == tmp_old);
137 atomic64_mtxinit(void *x __unused)
141 for (i = 0; i < A64_POOL_SIZE; i++)
142 mtx_init(&a64_mtx_pool[i], "atomic64 mutex", NULL, MTX_DEF);
145 SYSINIT(atomic64_mtxinit, SI_SUB_LOCK, SI_ORDER_MIDDLE, atomic64_mtxinit, NULL);