2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (c) 2012 Konstantin Belousov <kib@FreeBSD.org>
5 * Copyright (c) 2021 Dmitry Chagin <dchagin@FreeBSD.org>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #if defined(__aarch64__)
30 #define __VDSO_PREFIX __kernel
32 #define __VDSO_PREFIX __vdso
35 #define __vdsoN(x) __CONCAT(__CONCAT(__VDSO_PREFIX,_),x)
43 return ((__builtin_clz(mask) ^ 0x1f) + 1);
54 for (bit = 1; mask != 1; bit++)
55 mask = (unsigned long)mask >> 1;
66 for (bit = 1; mask != 1; bit++)
67 mask = (unsigned long long)mask >> 1;
73 __vdso_native_to_linux_timespec(struct l_timespec *lts,
78 if (nts->tv_sec > INT_MAX || nts->tv_sec < INT_MIN)
79 return (LINUX_EOVERFLOW);
81 lts->tv_sec = nts->tv_sec;
82 lts->tv_nsec = nts->tv_nsec;
87 __vdso_native_to_linux_timeval(l_timeval *ltv,
92 if (ntv->tv_sec > INT_MAX || ntv->tv_sec < INT_MIN)
93 return (LINUX_EOVERFLOW);
95 ltv->tv_sec = ntv->tv_sec;
96 ltv->tv_usec = ntv->tv_usec;
101 #if defined(__i386__) || (defined(__amd64__) && defined(COMPAT_LINUX32))
103 __vdso_native_to_linux_timespec64(struct l_timespec64 *lts,
104 struct timespec *nts)
107 lts->tv_sec = nts->tv_sec;
108 lts->tv_nsec = nts->tv_nsec;
114 __vdso_linux_to_native_clockid(clockid_t *n, clockid_t l)
118 case LINUX_CLOCK_REALTIME:
121 case LINUX_CLOCK_MONOTONIC:
122 *n = CLOCK_MONOTONIC;
124 case LINUX_CLOCK_REALTIME_COARSE:
125 *n = CLOCK_REALTIME_FAST;
127 case LINUX_CLOCK_MONOTONIC_COARSE:
128 case LINUX_CLOCK_MONOTONIC_RAW:
129 *n = CLOCK_MONOTONIC_FAST;
131 case LINUX_CLOCK_BOOTTIME:
135 return (LINUX_EINVAL);
141 * The code below adapted from
142 * lib/libc/sys/__vdso_gettimeofday.c
146 __vdso_gettimekeep(struct vdso_timekeep **tk)
149 *tk = (struct vdso_timekeep *)kern_timekeep_base;
153 tc_delta(const struct vdso_timehands *th, u_int *delta)
158 error = __vdso_gettc(th, &tc);
160 *delta = (tc - th->th_offset_count) & th->th_counter_mask;
165 * Calculate the absolute or boot-relative time from the
166 * machine-specific fast timecounter and the published timehands
167 * structure read from the shared page.
169 * The lockless reading scheme is similar to the one used to read the
170 * in-kernel timehands, see sys/kern/kern_tc.c:binuptime(). This code
171 * is based on the kernel implementation.
174 freebsd_binuptime(struct bintime *bt, struct vdso_timekeep *tk, bool abs)
176 struct vdso_timehands *th;
179 u_int delta, scale_bits;
186 curr = atomic_load_acq_32(&tk->tk_current);
187 th = &tk->tk_th[curr];
188 gen = atomic_load_acq_32(&th->th_gen);
190 error = tc_delta(th, &delta);
195 scale = th->th_scale;
197 scale_bits = flsl(scale);
199 scale_bits = flsll(scale);
201 if (__predict_false(scale_bits + fls(delta) > 63)) {
202 x = (scale >> 32) * delta;
205 bintime_addx(bt, x << 32);
207 bintime_addx(bt, scale * delta);
209 bintime_add(bt, &th->th_boottime);
212 * Ensure that the load of th_offset is completed
213 * before the load of th_gen.
215 atomic_thread_fence_acq();
216 } while (curr != tk->tk_current || gen == 0 || gen != th->th_gen);
221 freebsd_getnanouptime(struct bintime *bt, struct vdso_timekeep *tk)
223 struct vdso_timehands *th;
230 curr = atomic_load_acq_32(&tk->tk_current);
231 th = &tk->tk_th[curr];
232 gen = atomic_load_acq_32(&th->th_gen);
236 * Ensure that the load of th_offset is completed
237 * before the load of th_gen.
239 atomic_thread_fence_acq();
240 } while (curr != tk->tk_current || gen == 0 || gen != th->th_gen);
245 freebsd_gettimeofday(struct timeval *tv, struct timezone *tz)
247 struct vdso_timekeep *tk;
253 __vdso_gettimekeep(&tk);
256 if (tk->tk_ver != VDSO_TK_VER_CURR)
258 error = freebsd_binuptime(&bt, tk, true);
260 bintime2timeval(&bt, tv);
265 freebsd_clock_gettime(clockid_t clock_id, struct timespec *ts)
267 struct vdso_timekeep *tk;
271 __vdso_gettimekeep(&tk);
274 if (tk->tk_ver != VDSO_TK_VER_CURR)
278 case CLOCK_REALTIME_PRECISE:
279 case CLOCK_REALTIME_FAST:
280 error = freebsd_binuptime(&bt, tk, true);
282 case CLOCK_MONOTONIC:
283 case CLOCK_MONOTONIC_PRECISE:
285 case CLOCK_UPTIME_PRECISE:
286 error = freebsd_binuptime(&bt, tk, false);
288 case CLOCK_MONOTONIC_FAST:
289 case CLOCK_UPTIME_FAST:
290 error = freebsd_getnanouptime(&bt, tk);
297 bintime2timespec(&bt, ts);
302 * Linux vDSO interfaces
306 __vdsoN(clock_gettime)(clockid_t clock_id, struct l_timespec *lts)
312 error = __vdso_linux_to_native_clockid(&which, clock_id);
314 return (__vdso_clock_gettime_fallback(clock_id, lts));
315 error = freebsd_clock_gettime(which, &ts);
317 return (-__vdso_native_to_linux_timespec(lts, &ts));
319 return (__vdso_clock_gettime_fallback(clock_id, lts));
323 __vdsoN(gettimeofday)(l_timeval *ltv, struct timezone *tz)
328 error = freebsd_gettimeofday(&tv, tz);
330 return (__vdso_gettimeofday_fallback(ltv, tz));
331 return (-__vdso_native_to_linux_timeval(ltv, &tv));
335 __vdsoN(clock_getres)(clockid_t clock_id, struct l_timespec *lts)
338 return (__vdso_clock_getres_fallback(clock_id, lts));
341 #if defined(__i386__) || defined(COMPAT_LINUX32)
343 __vdso_clock_gettime64(clockid_t clock_id, struct l_timespec64 *lts)
349 error = __vdso_linux_to_native_clockid(&which, clock_id);
351 return (__vdso_clock_gettime64_fallback(clock_id, lts));
352 error = freebsd_clock_gettime(which, &ts);
354 return(-__vdso_native_to_linux_timespec64(lts, &ts));
356 return(__vdso_clock_gettime64_fallback(clock_id, lts));
359 int clock_gettime64(clockid_t clock_id, struct l_timespec64 *lts)
360 __attribute__((weak, alias("__vdso_clock_gettime64")));
363 #if defined(__i386__) || defined(__amd64__)
365 __vdso_getcpu(uint32_t *cpu, uint32_t *node, void *cache)
370 return (__vdso_getcpu_fallback(cpu, node, cache));
371 ret = __vdso_getcpu_try();
373 return (__vdso_getcpu_fallback(cpu, node, cache));
379 #if defined(__i386__) || defined(__amd64__)
381 __vdso_time(long *tm)
386 error = freebsd_gettimeofday(&tv, NULL);
388 return (__vdso_time_fallback(tm));