2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2019-2020 Rubicon Communications, LLC (Netgate)
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 #ifndef SYS_SUPPORT_H_
31 #define SYS_SUPPORT_H_
33 #include <machine/asm.h>
34 #define SYM_FUNC_START ENTRY
35 #define SYM_FUNC_END END
38 #include <sys/types.h>
39 #include <sys/limits.h>
40 #include <sys/endian.h>
41 #include <sys/libkern.h>
42 #include <sys/malloc.h>
47 #if defined(__aarch64__) || defined(__amd64__) || defined(__i386__)
48 #include <machine/fpu.h>
50 #include <crypto/siphash/siphash.h>
53 #define COMPAT_ZINC_IS_A_MODULE
56 #define BUILD_BUG_ON(x) CTASSERT(!(x))
58 #define BIT(nr) (1UL << (nr))
59 #define BIT_ULL(nr) (1ULL << (nr))
61 #define BITS_PER_LONG 64
63 #define BITS_PER_LONG 32
66 #define rw_enter_write rw_wlock
67 #define rw_exit_write rw_wunlock
68 #define rw_enter_read rw_rlock
69 #define rw_exit_read rw_runlock
70 #define rw_exit rw_unlock
72 #define ASSERT(x) MPASS(x)
74 #define ___PASTE(a,b) a##b
75 #define __PASTE(a,b) ___PASTE(a,b)
76 #define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)
78 #define typeof(x) __typeof__(x)
81 #define min_t(t, a, b) ({ t __a = (a); t __b = (b); __a > __b ? __b : __a; })
86 typedef uint32_t __le32;
88 typedef uint64_t __le64;
90 #define __must_check __attribute__((__warn_unused_result__))
92 #define __ro_after_init __read_mostly
94 #define get_unaligned_le32(x) le32dec(x)
95 #define get_unaligned_le64(x) le64dec(x)
97 #define cpu_to_le64(x) htole64(x)
98 #define cpu_to_le32(x) htole32(x)
99 #define letoh64(x) le64toh(x)
101 #define need_resched() \
102 ((curthread->td_flags & (TDF_NEEDRESCHED|TDF_ASTPENDING)) || \
103 curthread->td_owepreempt)
106 #define CONTAINER_OF(a, b, c) __containerof((a), b, c)
113 static inline uint64_t
114 siphash24(const SIPHASH_KEY *key, const void *src, size_t len)
118 return (SipHashX(&ctx, 2, 4, (const uint8_t *)key, src, len));
122 put_unaligned_le32(u32 val, void *p)
124 *((__le32 *)p) = cpu_to_le32(val);
128 #define rol32(i32, n) ((i32) << (n) | (i32) >> (32 - (n)))
130 #define memzero_explicit(p, s) explicit_bzero(p, s)
132 #define EXPORT_SYMBOL(x)
134 #define U32_MAX ((u32)~0U)
135 #if defined(__aarch64__) || defined(__amd64__) || defined(__i386__)
136 #define kfpu_begin(ctx) { \
137 if (ctx->sc_fpu_ctx == NULL) { \
138 ctx->sc_fpu_ctx = fpu_kern_alloc_ctx(0); \
141 fpu_kern_enter(curthread, ctx->sc_fpu_ctx, FPU_KERN_NORMAL); \
144 #define kfpu_end(ctx) { \
145 MPASS(ctx->sc_fpu_ctx != NULL); \
146 fpu_kern_leave(curthread, ctx->sc_fpu_ctx); \
150 #define kfpu_begin(ctx)
151 #define kfpu_end(ctx)
152 #define fpu_kern_free_ctx(p)
156 HAVE_NO_SIMD = 1 << 0,
157 HAVE_FULL_SIMD = 1 << 1,
158 HAVE_SIMD_IN_USE = 1 << 31
159 } simd_context_state_t;
162 simd_context_state_t sc_state;
163 struct fpu_kern_ctx *sc_fpu_ctx;
167 #define DONT_USE_SIMD NULL
169 static __must_check inline bool
172 #if defined(__amd64__)
180 simd_get(simd_context_t *ctx)
182 ctx->sc_state = may_use_simd() ? HAVE_FULL_SIMD : HAVE_NO_SIMD;
186 simd_put(simd_context_t *ctx)
188 #if defined(__aarch64__) || defined(__amd64__) || defined(__i386__)
189 if (is_fpu_kern_thread(0))
192 if (ctx->sc_state & HAVE_SIMD_IN_USE)
194 ctx->sc_state = HAVE_NO_SIMD;
197 static __must_check inline bool
198 simd_use(simd_context_t *ctx)
200 #if defined(__aarch64__) || defined(__amd64__) || defined(__i386__)
201 if (is_fpu_kern_thread(0))
208 if (!(ctx->sc_state & HAVE_FULL_SIMD))
210 if (ctx->sc_state & HAVE_SIMD_IN_USE)
213 ctx->sc_state |= HAVE_SIMD_IN_USE;
218 simd_relax(simd_context_t *ctx)
220 if ((ctx->sc_state & HAVE_SIMD_IN_USE) && need_resched()) {
223 return simd_use(ctx);
228 #define unlikely(x) __predict_false(x)
229 #define likely(x) __predict_true(x)
230 /* Generic path for arbitrary size */
233 static inline unsigned long
234 __crypto_memneq_generic(const void *a, const void *b, size_t size)
236 unsigned long neq = 0;
238 while (size >= sizeof(unsigned long)) {
239 neq |= *(const unsigned long *)a ^ *(const unsigned long *)b;
241 a = ((const char *)a + sizeof(unsigned long));
242 b = ((const char *)b + sizeof(unsigned long));
243 size -= sizeof(unsigned long);
246 neq |= *(const unsigned char *)a ^ *(const unsigned char *)b;
248 a = (const char *)a + 1;
249 b = (const char *)b + 1;
255 #define crypto_memneq(a, b, c) __crypto_memneq_generic((a), (b), (c))
258 __cpu_to_le32s(uint32_t *buf)
260 *buf = htole32(*buf);
263 static inline void cpu_to_le32_array(u32 *buf, unsigned int words)
271 #define CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 1
272 void __crypto_xor(u8 *dst, const u8 *src1, const u8 *src2, unsigned int len);
274 static inline void crypto_xor_cpy(u8 *dst, const u8 *src1, const u8 *src2,
277 if (CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS &&
278 __builtin_constant_p(size) &&
279 (size % sizeof(unsigned long)) == 0) {
280 unsigned long *d = (unsigned long *)dst;
281 const unsigned long *s1 = (const unsigned long *)src1;
282 const unsigned long *s2 = (const unsigned long *)src2;
285 *d++ = *s1++ ^ *s2++;
286 size -= sizeof(unsigned long);
289 __crypto_xor(dst, src1, src2, size);
292 #include <sys/kernel.h>
293 #define module_init(fn) \
295 wrap_ ## fn(void *dummy __unused) \
299 SYSINIT(if_wg_ ## fn, SI_SUB_LAST, SI_ORDER_FIRST, wrap_ ## fn, NULL)
302 #define module_exit(fn) \
304 wrap_ ## fn(void *dummy __unused) \
308 SYSUNINIT(if_wg_ ## fn, SI_SUB_LAST, SI_ORDER_FIRST, wrap_ ## fn, NULL)
310 #define module_param(a, b, c)
311 #define MODULE_LICENSE(x)
312 #define MODULE_DESCRIPTION(x)
313 #define MODULE_AUTHOR(x)
315 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
321 #define BUG() panic("%s:%d bug hit!\n", __FILE__, __LINE__)
323 #define WARN_ON(cond) ({ \
324 bool __ret = (cond); \
326 printf("WARNING %s failed at %s:%d\n", \
327 __stringify(cond), __FILE__, __LINE__); \
332 #define pr_err printf
333 #define pr_info printf
334 #define IS_ENABLED(x) 0
335 #define ___stringify(...) #__VA_ARGS__
336 #define __stringify(...) ___stringify(__VA_ARGS__)
337 #define kmalloc(size, flag) malloc((size), M_WG, M_WAITOK)
338 #define kfree(p) free(p, M_WG)
339 #define vzalloc(size) malloc((size), M_WG, M_WAITOK|M_ZERO)
340 #define vfree(p) free(p, M_WG)