2 * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3 * Copyright (C) 2007 The Regents of the University of California.
4 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5 * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
8 * This file is part of the SPL, Solaris Porting Layer.
9 * For details, see <http://zfsonlinux.org/>.
11 * The SPL is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
16 * The SPL is distributed in the hope that it will be useful, but WITHOUT
17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
21 * You should have received a copy of the GNU General Public License along
22 * with the SPL. If not, see <http://www.gnu.org/licenses/>.
24 * Solaris Porting Layer (SPL) Credential Implementation.
27 #include <sys/condvar.h>
29 #include <linux/hrtimer.h>
32 __cv_init(kcondvar_t *cvp, char *name, kcv_type_t type, void *arg)
36 ASSERT(type == CV_DEFAULT);
39 cvp->cv_magic = CV_MAGIC;
40 init_waitqueue_head(&cvp->cv_event);
41 init_waitqueue_head(&cvp->cv_destroy);
42 atomic_set(&cvp->cv_waiters, 0);
43 atomic_set(&cvp->cv_refs, 1);
46 EXPORT_SYMBOL(__cv_init);
49 cv_destroy_wakeup(kcondvar_t *cvp)
51 if (!atomic_read(&cvp->cv_waiters) && !atomic_read(&cvp->cv_refs)) {
52 ASSERT(cvp->cv_mutex == NULL);
53 ASSERT(!waitqueue_active(&cvp->cv_event));
61 __cv_destroy(kcondvar_t *cvp)
64 ASSERT(cvp->cv_magic == CV_MAGIC);
66 cvp->cv_magic = CV_DESTROY;
67 atomic_dec(&cvp->cv_refs);
69 /* Block until all waiters are woken and references dropped. */
70 while (cv_destroy_wakeup(cvp) == 0)
71 wait_event_timeout(cvp->cv_destroy, cv_destroy_wakeup(cvp), 1);
73 ASSERT3P(cvp->cv_mutex, ==, NULL);
74 ASSERT3S(atomic_read(&cvp->cv_refs), ==, 0);
75 ASSERT3S(atomic_read(&cvp->cv_waiters), ==, 0);
76 ASSERT3S(waitqueue_active(&cvp->cv_event), ==, 0);
78 EXPORT_SYMBOL(__cv_destroy);
81 cv_wait_common(kcondvar_t *cvp, kmutex_t *mp, int state, int io)
88 ASSERT(cvp->cv_magic == CV_MAGIC);
89 ASSERT(mutex_owned(mp));
90 atomic_inc(&cvp->cv_refs);
92 m = ACCESS_ONCE(cvp->cv_mutex);
94 m = xchg(&cvp->cv_mutex, mp);
95 /* Ensure the same mutex is used by all callers */
96 ASSERT(m == NULL || m == mp);
98 prepare_to_wait_exclusive(&cvp->cv_event, &wait, state);
99 atomic_inc(&cvp->cv_waiters);
102 * Mutex should be dropped after prepare_to_wait() this
103 * ensures we're linked in to the waiters list and avoids the
104 * race where 'cvp->cv_waiters > 0' but the list is empty.
112 /* No more waiters a different mutex could be used */
113 if (atomic_dec_and_test(&cvp->cv_waiters)) {
115 * This is set without any lock, so it's racy. But this is
116 * just for debug anyway, so make it best-effort
118 cvp->cv_mutex = NULL;
119 wake_up(&cvp->cv_destroy);
122 finish_wait(&cvp->cv_event, &wait);
123 atomic_dec(&cvp->cv_refs);
126 * Hold mutex after we release the cvp, otherwise we could dead lock
127 * with a thread holding the mutex and call cv_destroy.
133 __cv_wait(kcondvar_t *cvp, kmutex_t *mp)
135 cv_wait_common(cvp, mp, TASK_UNINTERRUPTIBLE, 0);
137 EXPORT_SYMBOL(__cv_wait);
140 __cv_wait_io(kcondvar_t *cvp, kmutex_t *mp)
142 cv_wait_common(cvp, mp, TASK_UNINTERRUPTIBLE, 1);
144 EXPORT_SYMBOL(__cv_wait_io);
147 __cv_wait_sig(kcondvar_t *cvp, kmutex_t *mp)
149 cv_wait_common(cvp, mp, TASK_INTERRUPTIBLE, 0);
151 EXPORT_SYMBOL(__cv_wait_sig);
153 #if defined(HAVE_IO_SCHEDULE_TIMEOUT)
154 #define spl_io_schedule_timeout(t) io_schedule_timeout(t)
157 __cv_wakeup(unsigned long data)
159 wake_up_process((struct task_struct *)data);
163 spl_io_schedule_timeout(long time_left)
165 long expire_time = jiffies + time_left;
166 struct timer_list timer;
169 setup_timer(&timer, __cv_wakeup, (unsigned long)current);
170 timer.expires = expire_time;
175 del_timer_sync(&timer);
176 time_left = expire_time - jiffies;
178 return (time_left < 0 ? 0 : time_left);
183 * 'expire_time' argument is an absolute wall clock time in jiffies.
184 * Return value is time left (expire_time - now) or -1 if timeout occurred.
187 __cv_timedwait_common(kcondvar_t *cvp, kmutex_t *mp, clock_t expire_time,
196 ASSERT(cvp->cv_magic == CV_MAGIC);
197 ASSERT(mutex_owned(mp));
199 /* XXX - Does not handle jiffie wrap properly */
200 time_left = expire_time - jiffies;
204 atomic_inc(&cvp->cv_refs);
205 m = ACCESS_ONCE(cvp->cv_mutex);
207 m = xchg(&cvp->cv_mutex, mp);
208 /* Ensure the same mutex is used by all callers */
209 ASSERT(m == NULL || m == mp);
211 prepare_to_wait_exclusive(&cvp->cv_event, &wait, state);
212 atomic_inc(&cvp->cv_waiters);
215 * Mutex should be dropped after prepare_to_wait() this
216 * ensures we're linked in to the waiters list and avoids the
217 * race where 'cvp->cv_waiters > 0' but the list is empty.
221 time_left = spl_io_schedule_timeout(time_left);
223 time_left = schedule_timeout(time_left);
225 /* No more waiters a different mutex could be used */
226 if (atomic_dec_and_test(&cvp->cv_waiters)) {
228 * This is set without any lock, so it's racy. But this is
229 * just for debug anyway, so make it best-effort
231 cvp->cv_mutex = NULL;
232 wake_up(&cvp->cv_destroy);
235 finish_wait(&cvp->cv_event, &wait);
236 atomic_dec(&cvp->cv_refs);
239 * Hold mutex after we release the cvp, otherwise we could dead lock
240 * with a thread holding the mutex and call cv_destroy.
243 return (time_left > 0 ? time_left : -1);
247 __cv_timedwait(kcondvar_t *cvp, kmutex_t *mp, clock_t exp_time)
249 return (__cv_timedwait_common(cvp, mp, exp_time,
250 TASK_UNINTERRUPTIBLE, 0));
252 EXPORT_SYMBOL(__cv_timedwait);
255 __cv_timedwait_io(kcondvar_t *cvp, kmutex_t *mp, clock_t exp_time)
257 return (__cv_timedwait_common(cvp, mp, exp_time,
258 TASK_UNINTERRUPTIBLE, 1));
260 EXPORT_SYMBOL(__cv_timedwait_io);
263 __cv_timedwait_sig(kcondvar_t *cvp, kmutex_t *mp, clock_t exp_time)
265 return (__cv_timedwait_common(cvp, mp, exp_time,
266 TASK_INTERRUPTIBLE, 0));
268 EXPORT_SYMBOL(__cv_timedwait_sig);
271 * 'expire_time' argument is an absolute clock time in nanoseconds.
272 * Return value is time left (expire_time - now) or -1 if timeout occurred.
275 __cv_timedwait_hires(kcondvar_t *cvp, kmutex_t *mp, hrtime_t expire_time,
285 ASSERT(cvp->cv_magic == CV_MAGIC);
286 ASSERT(mutex_owned(mp));
288 time_left = expire_time - gethrtime();
292 atomic_inc(&cvp->cv_refs);
293 m = ACCESS_ONCE(cvp->cv_mutex);
295 m = xchg(&cvp->cv_mutex, mp);
296 /* Ensure the same mutex is used by all callers */
297 ASSERT(m == NULL || m == mp);
299 prepare_to_wait_exclusive(&cvp->cv_event, &wait, state);
300 atomic_inc(&cvp->cv_waiters);
303 * Mutex should be dropped after prepare_to_wait() this
304 * ensures we're linked in to the waiters list and avoids the
305 * race where 'cvp->cv_waiters > 0' but the list is empty.
309 * Allow a 100 us range to give kernel an opportunity to coalesce
312 ktime_left = ktime_set(0, time_left);
313 schedule_hrtimeout_range(&ktime_left, 100 * NSEC_PER_USEC,
316 /* No more waiters a different mutex could be used */
317 if (atomic_dec_and_test(&cvp->cv_waiters)) {
319 * This is set without any lock, so it's racy. But this is
320 * just for debug anyway, so make it best-effort
322 cvp->cv_mutex = NULL;
323 wake_up(&cvp->cv_destroy);
326 finish_wait(&cvp->cv_event, &wait);
327 atomic_dec(&cvp->cv_refs);
330 time_left = expire_time - gethrtime();
331 return (time_left > 0 ? NSEC_TO_TICK(time_left) : -1);
335 * Compatibility wrapper for the cv_timedwait_hires() Illumos interface.
338 cv_timedwait_hires_common(kcondvar_t *cvp, kmutex_t *mp, hrtime_t tim,
339 hrtime_t res, int flag, int state)
343 * Align expiration to the specified resolution.
345 if (flag & CALLOUT_FLAG_ROUNDUP)
347 tim = (tim / res) * res;
350 if (!(flag & CALLOUT_FLAG_ABSOLUTE))
353 return (__cv_timedwait_hires(cvp, mp, tim, state));
357 cv_timedwait_hires(kcondvar_t *cvp, kmutex_t *mp, hrtime_t tim, hrtime_t res,
360 return (cv_timedwait_hires_common(cvp, mp, tim, res, flag,
361 TASK_UNINTERRUPTIBLE));
363 EXPORT_SYMBOL(cv_timedwait_hires);
366 cv_timedwait_sig_hires(kcondvar_t *cvp, kmutex_t *mp, hrtime_t tim,
367 hrtime_t res, int flag)
369 return (cv_timedwait_hires_common(cvp, mp, tim, res, flag,
370 TASK_INTERRUPTIBLE));
372 EXPORT_SYMBOL(cv_timedwait_sig_hires);
375 __cv_signal(kcondvar_t *cvp)
378 ASSERT(cvp->cv_magic == CV_MAGIC);
379 atomic_inc(&cvp->cv_refs);
382 * All waiters are added with WQ_FLAG_EXCLUSIVE so only one
383 * waiter will be set runable with each call to wake_up().
384 * Additionally wake_up() holds a spin_lock assoicated with
385 * the wait queue to ensure we don't race waking up processes.
387 if (atomic_read(&cvp->cv_waiters) > 0)
388 wake_up(&cvp->cv_event);
390 atomic_dec(&cvp->cv_refs);
392 EXPORT_SYMBOL(__cv_signal);
395 __cv_broadcast(kcondvar_t *cvp)
398 ASSERT(cvp->cv_magic == CV_MAGIC);
399 atomic_inc(&cvp->cv_refs);
402 * Wake_up_all() will wake up all waiters even those which
403 * have the WQ_FLAG_EXCLUSIVE flag set.
405 if (atomic_read(&cvp->cv_waiters) > 0)
406 wake_up_all(&cvp->cv_event);
408 atomic_dec(&cvp->cv_refs);
410 EXPORT_SYMBOL(__cv_broadcast);