2 * Copyright (c) 1982, 1986, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 4. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * From: @(#)kern_clock.c 8.5 (Berkeley) 1/21/94
37 #include <sys/cdefs.h>
38 __FBSDID("$FreeBSD$");
40 #include "opt_kdtrace.h"
42 #include <sys/param.h>
43 #include <sys/systm.h>
45 #include <sys/callout.h>
46 #include <sys/condvar.h>
47 #include <sys/interrupt.h>
48 #include <sys/kernel.h>
51 #include <sys/malloc.h>
52 #include <sys/mutex.h>
55 #include <sys/sleepqueue.h>
56 #include <sys/sysctl.h>
59 SDT_PROVIDER_DEFINE(callout_execute);
60 SDT_PROBE_DEFINE(callout_execute, kernel, , callout_start);
61 SDT_PROBE_ARGTYPE(callout_execute, kernel, , callout_start, 0,
63 SDT_PROBE_DEFINE(callout_execute, kernel, , callout_end);
64 SDT_PROBE_ARGTYPE(callout_execute, kernel, , callout_end, 0,
68 SYSCTL_INT(_debug, OID_AUTO, to_avg_depth, CTLFLAG_RD, &avg_depth, 0,
69 "Average number of items examined per softclock call. Units = 1/1000");
70 static int avg_gcalls;
71 SYSCTL_INT(_debug, OID_AUTO, to_avg_gcalls, CTLFLAG_RD, &avg_gcalls, 0,
72 "Average number of Giant callouts made per softclock call. Units = 1/1000");
73 static int avg_lockcalls;
74 SYSCTL_INT(_debug, OID_AUTO, to_avg_lockcalls, CTLFLAG_RD, &avg_lockcalls, 0,
75 "Average number of lock callouts made per softclock call. Units = 1/1000");
76 static int avg_mpcalls;
77 SYSCTL_INT(_debug, OID_AUTO, to_avg_mpcalls, CTLFLAG_RD, &avg_mpcalls, 0,
78 "Average number of MP callouts made per softclock call. Units = 1/1000");
81 * allocate more timeout table slots when table overflows.
83 int callwheelsize, callwheelbits, callwheelmask;
87 struct callout *cc_callout;
88 struct callout_tailq *cc_callwheel;
89 struct callout_list cc_callfree;
90 struct callout *cc_next;
91 struct callout *cc_curr;
99 struct callout_cpu cc_cpu[MAXCPU];
100 #define CC_CPU(cpu) (&cc_cpu[(cpu)])
101 #define CC_SELF() CC_CPU(PCPU_GET(cpuid))
103 struct callout_cpu cc_cpu;
104 #define CC_CPU(cpu) &cc_cpu
105 #define CC_SELF() &cc_cpu
107 #define CC_LOCK(cc) mtx_lock_spin(&(cc)->cc_lock)
108 #define CC_UNLOCK(cc) mtx_unlock_spin(&(cc)->cc_lock)
110 static int timeout_cpu;
112 MALLOC_DEFINE(M_CALLOUT, "callout", "Callout datastructures");
116 * cc_curr - If a callout is in progress, it is curr_callout.
117 * If curr_callout is non-NULL, threads waiting in
118 * callout_drain() will be woken up as soon as the
119 * relevant callout completes.
120 * cc_cancel - Changing to 1 with both callout_lock and c_lock held
121 * guarantees that the current callout will not run.
122 * The softclock() function sets this to 0 before it
123 * drops callout_lock to acquire c_lock, and it calls
124 * the handler only if curr_cancelled is still 0 after
125 * c_lock is successfully acquired.
126 * cc_waiting - If a thread is waiting in callout_drain(), then
127 * callout_wait is nonzero. Set only when
128 * curr_callout is non-NULL.
132 * kern_timeout_callwheel_alloc() - kernel low level callwheel initialization
134 * This code is called very early in the kernel initialization sequence,
135 * and may be called more then once.
138 kern_timeout_callwheel_alloc(caddr_t v)
140 struct callout_cpu *cc;
142 timeout_cpu = PCPU_GET(cpuid);
143 cc = CC_CPU(timeout_cpu);
145 * Calculate callout wheel size
147 for (callwheelsize = 1, callwheelbits = 0;
148 callwheelsize < ncallout;
149 callwheelsize <<= 1, ++callwheelbits)
151 callwheelmask = callwheelsize - 1;
153 cc->cc_callout = (struct callout *)v;
154 v = (caddr_t)(cc->cc_callout + ncallout);
155 cc->cc_callwheel = (struct callout_tailq *)v;
156 v = (caddr_t)(cc->cc_callwheel + callwheelsize);
161 callout_cpu_init(struct callout_cpu *cc)
166 mtx_init(&cc->cc_lock, "callout", NULL, MTX_SPIN | MTX_RECURSE);
167 SLIST_INIT(&cc->cc_callfree);
168 for (i = 0; i < callwheelsize; i++) {
169 TAILQ_INIT(&cc->cc_callwheel[i]);
171 if (cc->cc_callout == NULL)
173 for (i = 0; i < ncallout; i++) {
174 c = &cc->cc_callout[i];
176 c->c_flags = CALLOUT_LOCAL_ALLOC;
177 SLIST_INSERT_HEAD(&cc->cc_callfree, c, c_links.sle);
182 * kern_timeout_callwheel_init() - initialize previously reserved callwheel
185 * This code is called just once, after the space reserved for the
186 * callout wheel has been finalized.
189 kern_timeout_callwheel_init(void)
191 callout_cpu_init(CC_CPU(timeout_cpu));
195 * Start standard softclock thread.
200 start_softclock(void *dummy)
202 struct callout_cpu *cc;
207 cc = CC_CPU(timeout_cpu);
208 if (swi_add(&clk_intr_event, "clock", softclock, cc, SWI_CLOCK,
209 INTR_MPSAFE, &softclock_ih))
210 panic("died while creating standard software ithreads");
211 cc->cc_cookie = softclock_ih;
213 for (cpu = 0; cpu <= mp_maxid; cpu++) {
214 if (cpu == timeout_cpu)
219 if (swi_add(NULL, "clock", softclock, cc, SWI_CLOCK,
220 INTR_MPSAFE, &cc->cc_cookie))
221 panic("died while creating standard software ithreads");
222 cc->cc_callout = NULL; /* Only cpu0 handles timeout(). */
223 cc->cc_callwheel = malloc(
224 sizeof(struct callout_tailq) * callwheelsize, M_CALLOUT,
226 callout_cpu_init(cc);
231 SYSINIT(start_softclock, SI_SUB_SOFTINTR, SI_ORDER_FIRST, start_softclock, NULL);
236 struct callout_cpu *cc;
241 * Process callouts at a very low cpu priority, so we don't keep the
242 * relatively high clock interrupt priority any longer than necessary.
246 mtx_lock_spin_flags(&cc->cc_lock, MTX_QUIET);
247 for (; (cc->cc_softticks - ticks) < 0; cc->cc_softticks++) {
248 bucket = cc->cc_softticks & callwheelmask;
249 if (!TAILQ_EMPTY(&cc->cc_callwheel[bucket])) {
254 mtx_unlock_spin_flags(&cc->cc_lock, MTX_QUIET);
256 * swi_sched acquires the thread lock, so we don't want to call it
257 * with cc_lock held; incorrect locking order.
260 swi_sched(cc->cc_cookie, 0);
263 static struct callout_cpu *
264 callout_lock(struct callout *c)
266 struct callout_cpu *cc;
281 * The callout mechanism is based on the work of Adam M. Costello and
282 * George Varghese, published in a technical report entitled "Redesigning
283 * the BSD Callout and Timer Facilities" and modified slightly for inclusion
284 * in FreeBSD by Justin T. Gibbs. The original work on the data structures
285 * used in this implementation was published by G. Varghese and T. Lauck in
286 * the paper "Hashed and Hierarchical Timing Wheels: Data Structures for
287 * the Efficient Implementation of a Timer Facility" in the Proceedings of
288 * the 11th ACM Annual Symposium on Operating Systems Principles,
289 * Austin, Texas Nov 1987.
293 * Software (low priority) clock interrupt.
294 * Run periodic events from timeout queue.
299 struct callout_cpu *cc;
301 struct callout_tailq *bucket;
303 int steps; /* #steps since we last allowed interrupts */
309 struct bintime bt1, bt2;
311 static uint64_t maxdt = 36893488147419102LL; /* 2 msec */
312 static timeout_t *lastfunc;
315 #ifndef MAX_SOFTCLOCK_STEPS
316 #define MAX_SOFTCLOCK_STEPS 100 /* Maximum allowed value of steps. */
317 #endif /* MAX_SOFTCLOCK_STEPS */
324 cc = (struct callout_cpu *)arg;
326 while (cc->cc_softticks != ticks) {
328 * cc_softticks may be modified by hard clock, so cache
329 * it while we work on a given bucket.
331 curticks = cc->cc_softticks;
333 bucket = &cc->cc_callwheel[curticks & callwheelmask];
334 c = TAILQ_FIRST(bucket);
337 if (c->c_time != curticks) {
338 c = TAILQ_NEXT(c, c_links.tqe);
340 if (steps >= MAX_SOFTCLOCK_STEPS) {
342 /* Give interrupts a chance. */
350 void (*c_func)(void *);
352 struct lock_class *class;
353 struct lock_object *c_lock;
354 int c_flags, sharedlock;
356 cc->cc_next = TAILQ_NEXT(c, c_links.tqe);
357 TAILQ_REMOVE(bucket, c, c_links.tqe);
358 class = (c->c_lock != NULL) ?
359 LOCK_CLASS(c->c_lock) : NULL;
360 sharedlock = (c->c_flags & CALLOUT_SHAREDLOCK) ?
365 c_flags = c->c_flags;
366 if (c->c_flags & CALLOUT_LOCAL_ALLOC) {
367 c->c_flags = CALLOUT_LOCAL_ALLOC;
370 (c->c_flags & ~CALLOUT_PENDING);
375 if (c_lock != NULL) {
376 class->lc_lock(c_lock, sharedlock);
378 * The callout may have been cancelled
379 * while we switched locks.
382 class->lc_unlock(c_lock);
385 /* The callout cannot be stopped now. */
388 if (c_lock == &Giant.lock_object) {
391 "callout %p func %p arg %p",
395 CTR3(KTR_CALLOUT, "callout lock"
396 " %p func %p arg %p",
402 "callout mpsafe %p func %p arg %p",
408 THREAD_NO_SLEEPING();
409 SDT_PROBE(callout_execute, kernel, ,
410 callout_start, c, 0, 0, 0, 0);
412 SDT_PROBE(callout_execute, kernel, ,
413 callout_end, c, 0, 0, 0, 0);
414 THREAD_SLEEPING_OK();
417 bintime_sub(&bt2, &bt1);
418 if (bt2.frac > maxdt) {
419 if (lastfunc != c_func ||
420 bt2.frac > maxdt * 2) {
421 bintime2timespec(&bt2, &ts2);
423 "Expensive timeout(9) function: %p(%p) %jd.%09ld s\n",
425 (intmax_t)ts2.tv_sec,
432 CTR1(KTR_CALLOUT, "callout %p finished", c);
433 if ((c_flags & CALLOUT_RETURNUNLOCKED) == 0)
434 class->lc_unlock(c_lock);
438 * If the current callout is locally
439 * allocated (from timeout(9))
440 * then put it on the freelist.
442 * Note: we need to check the cached
443 * copy of c_flags because if it was not
444 * local, then it's not safe to deref the
447 if (c_flags & CALLOUT_LOCAL_ALLOC) {
448 KASSERT(c->c_flags ==
450 ("corrupted callout"));
452 SLIST_INSERT_HEAD(&cc->cc_callfree, c,
456 if (cc->cc_waiting) {
458 * There is someone waiting
459 * for the callout to complete.
463 wakeup(&cc->cc_waiting);
471 avg_depth += (depth * 1000 - avg_depth) >> 8;
472 avg_mpcalls += (mpcalls * 1000 - avg_mpcalls) >> 8;
473 avg_lockcalls += (lockcalls * 1000 - avg_lockcalls) >> 8;
474 avg_gcalls += (gcalls * 1000 - avg_gcalls) >> 8;
481 * Execute a function after a specified length of time.
484 * Cancel previous timeout function call.
486 * callout_handle_init --
487 * Initialize a handle so that using it with untimeout is benign.
489 * See AT&T BCI Driver Reference Manual for specification. This
490 * implementation differs from that one in that although an
491 * identification value is returned from timeout, the original
492 * arguments to timeout as well as the identifier are used to
493 * identify entries for untimeout.
495 struct callout_handle
496 timeout(ftn, arg, to_ticks)
501 struct callout_cpu *cc;
503 struct callout_handle handle;
505 cc = CC_CPU(timeout_cpu);
507 /* Fill in the next free callout structure. */
508 new = SLIST_FIRST(&cc->cc_callfree);
510 /* XXX Attempt to malloc first */
511 panic("timeout table full");
512 SLIST_REMOVE_HEAD(&cc->cc_callfree, c_links.sle);
513 callout_reset(new, to_ticks, ftn, arg);
514 handle.callout = new;
521 untimeout(ftn, arg, handle)
524 struct callout_handle handle;
526 struct callout_cpu *cc;
529 * Check for a handle that was initialized
530 * by callout_handle_init, but never used
531 * for a real timeout.
533 if (handle.callout == NULL)
536 cc = callout_lock(handle.callout);
537 if (handle.callout->c_func == ftn && handle.callout->c_arg == arg)
538 callout_stop(handle.callout);
543 callout_handle_init(struct callout_handle *handle)
545 handle->callout = NULL;
549 * New interface; clients allocate their own callout structures.
551 * callout_reset() - establish or change a timeout
552 * callout_stop() - disestablish a timeout
553 * callout_init() - initialize a callout structure so that it can
554 * safely be passed to callout_reset() and callout_stop()
556 * <sys/callout.h> defines three convenience macros:
558 * callout_active() - returns truth if callout has not been stopped,
559 * drained, or deactivated since the last time the callout was
561 * callout_pending() - returns truth if callout is still waiting for timeout
562 * callout_deactivate() - marks the callout as having been serviced
565 callout_reset_on(struct callout *c, int to_ticks, void (*ftn)(void *),
568 struct callout_cpu *cc;
572 * Don't allow migration of pre-allocated callouts lest they
575 if (c->c_flags & CALLOUT_LOCAL_ALLOC)
578 cc = callout_lock(c);
579 if (cc->cc_curr == c) {
581 * We're being asked to reschedule a callout which is
582 * currently in progress. If there is a lock then we
583 * can cancel the callout if it has not really started.
585 if (c->c_lock != NULL && !cc->cc_cancel)
586 cancelled = cc->cc_cancel = 1;
587 if (cc->cc_waiting) {
589 * Someone has called callout_drain to kill this
590 * callout. Don't reschedule.
592 CTR4(KTR_CALLOUT, "%s %p func %p arg %p",
593 cancelled ? "cancelled" : "failed to cancel",
594 c, c->c_func, c->c_arg);
599 if (c->c_flags & CALLOUT_PENDING) {
600 if (cc->cc_next == c) {
601 cc->cc_next = TAILQ_NEXT(c, c_links.tqe);
603 TAILQ_REMOVE(&cc->cc_callwheel[c->c_time & callwheelmask], c,
607 c->c_flags &= ~(CALLOUT_ACTIVE | CALLOUT_PENDING);
610 * If the lock must migrate we have to check the state again as
611 * we can't hold both the new and old locks simultaneously.
613 if (c->c_cpu != cpu) {
623 c->c_flags |= (CALLOUT_ACTIVE | CALLOUT_PENDING);
625 c->c_time = ticks + to_ticks;
626 TAILQ_INSERT_TAIL(&cc->cc_callwheel[c->c_time & callwheelmask],
628 CTR5(KTR_CALLOUT, "%sscheduled %p func %p arg %p in %d",
629 cancelled ? "re" : "", c, c->c_func, c->c_arg, to_ticks);
636 * Common idioms that can be optimized in the future.
639 callout_schedule_on(struct callout *c, int to_ticks, int cpu)
641 return callout_reset_on(c, to_ticks, c->c_func, c->c_arg, cpu);
645 callout_schedule(struct callout *c, int to_ticks)
647 return callout_reset_on(c, to_ticks, c->c_func, c->c_arg, c->c_cpu);
651 _callout_stop_safe(c, safe)
655 struct callout_cpu *cc;
656 struct lock_class *class;
657 int use_lock, sq_locked;
660 * Some old subsystems don't hold Giant while running a callout_stop(),
661 * so just discard this check for the moment.
663 if (!safe && c->c_lock != NULL) {
664 if (c->c_lock == &Giant.lock_object)
665 use_lock = mtx_owned(&Giant);
668 class = LOCK_CLASS(c->c_lock);
669 class->lc_assert(c->c_lock, LA_XLOCKED);
676 cc = callout_lock(c);
678 * If the callout isn't pending, it's not on the queue, so
679 * don't attempt to remove it from the queue. We can try to
680 * stop it by other means however.
682 if (!(c->c_flags & CALLOUT_PENDING)) {
683 c->c_flags &= ~CALLOUT_ACTIVE;
686 * If it wasn't on the queue and it isn't the current
687 * callout, then we can't stop it, so just bail.
689 if (cc->cc_curr != c) {
690 CTR3(KTR_CALLOUT, "failed to stop %p func %p arg %p",
691 c, c->c_func, c->c_arg);
694 sleepq_release(&cc->cc_waiting);
700 * The current callout is running (or just
701 * about to run) and blocking is allowed, so
702 * just wait for the current invocation to
705 while (cc->cc_curr == c) {
708 * Use direct calls to sleepqueue interface
709 * instead of cv/msleep in order to avoid
710 * a LOR between cc_lock and sleepqueue
711 * chain spinlocks. This piece of code
712 * emulates a msleep_spin() call actually.
714 * If we already have the sleepqueue chain
715 * locked, then we can safely block. If we
716 * don't already have it locked, however,
717 * we have to drop the cc_lock to lock
718 * it. This opens several races, so we
719 * restart at the beginning once we have
720 * both locks. If nothing has changed, then
721 * we will end up back here with sq_locked
726 sleepq_lock(&cc->cc_waiting);
733 sleepq_add(&cc->cc_waiting,
734 &cc->cc_lock.lock_object, "codrain",
736 sleepq_wait(&cc->cc_waiting, 0);
739 /* Reacquire locks previously released. */
743 } else if (use_lock && !cc->cc_cancel) {
745 * The current callout is waiting for its
746 * lock which we hold. Cancel the callout
747 * and return. After our caller drops the
748 * lock, the callout will be skipped in
752 CTR3(KTR_CALLOUT, "cancelled %p func %p arg %p",
753 c, c->c_func, c->c_arg);
755 KASSERT(!sq_locked, ("sleepqueue chain locked"));
758 CTR3(KTR_CALLOUT, "failed to stop %p func %p arg %p",
759 c, c->c_func, c->c_arg);
761 KASSERT(!sq_locked, ("sleepqueue chain still locked"));
765 sleepq_release(&cc->cc_waiting);
767 c->c_flags &= ~(CALLOUT_ACTIVE | CALLOUT_PENDING);
769 if (cc->cc_next == c) {
770 cc->cc_next = TAILQ_NEXT(c, c_links.tqe);
772 TAILQ_REMOVE(&cc->cc_callwheel[c->c_time & callwheelmask], c,
775 CTR3(KTR_CALLOUT, "cancelled %p func %p arg %p",
776 c, c->c_func, c->c_arg);
778 if (c->c_flags & CALLOUT_LOCAL_ALLOC) {
780 SLIST_INSERT_HEAD(&cc->cc_callfree, c, c_links.sle);
787 callout_init(c, mpsafe)
794 c->c_flags = CALLOUT_RETURNUNLOCKED;
796 c->c_lock = &Giant.lock_object;
799 c->c_cpu = timeout_cpu;
803 _callout_init_lock(c, lock, flags)
805 struct lock_object *lock;
810 KASSERT((flags & ~(CALLOUT_RETURNUNLOCKED | CALLOUT_SHAREDLOCK)) == 0,
811 ("callout_init_lock: bad flags %d", flags));
812 KASSERT(lock != NULL || (flags & CALLOUT_RETURNUNLOCKED) == 0,
813 ("callout_init_lock: CALLOUT_RETURNUNLOCKED with no lock"));
814 KASSERT(lock == NULL || !(LOCK_CLASS(lock)->lc_flags &
815 (LC_SPINLOCK | LC_SLEEPABLE)), ("%s: invalid lock class",
817 c->c_flags = flags & (CALLOUT_RETURNUNLOCKED | CALLOUT_SHAREDLOCK);
818 c->c_cpu = timeout_cpu;
821 #ifdef APM_FIXUP_CALLTODO
823 * Adjust the kernel calltodo timeout list. This routine is used after
824 * an APM resume to recalculate the calltodo timer list values with the
825 * number of hz's we have been sleeping. The next hardclock() will detect
826 * that there are fired timers and run softclock() to execute them.
828 * Please note, I have not done an exhaustive analysis of what code this
829 * might break. I am motivated to have my select()'s and alarm()'s that
830 * have expired during suspend firing upon resume so that the applications
831 * which set the timer can do the maintanence the timer was for as close
832 * as possible to the originally intended time. Testing this code for a
833 * week showed that resuming from a suspend resulted in 22 to 25 timers
834 * firing, which seemed independant on whether the suspend was 2 hours or
835 * 2 days. Your milage may vary. - Ken Key <key@cs.utk.edu>
838 adjust_timeout_calltodo(time_change)
839 struct timeval *time_change;
841 register struct callout *p;
842 unsigned long delta_ticks;
845 * How many ticks were we asleep?
846 * (stolen from tvtohz()).
849 /* Don't do anything */
850 if (time_change->tv_sec < 0)
852 else if (time_change->tv_sec <= LONG_MAX / 1000000)
853 delta_ticks = (time_change->tv_sec * 1000000 +
854 time_change->tv_usec + (tick - 1)) / tick + 1;
855 else if (time_change->tv_sec <= LONG_MAX / hz)
856 delta_ticks = time_change->tv_sec * hz +
857 (time_change->tv_usec + (tick - 1)) / tick + 1;
859 delta_ticks = LONG_MAX;
861 if (delta_ticks > INT_MAX)
862 delta_ticks = INT_MAX;
865 * Now rip through the timer calltodo list looking for timers
869 /* don't collide with softclock() */
871 for (p = calltodo.c_next; p != NULL; p = p->c_next) {
872 p->c_time -= delta_ticks;
874 /* Break if the timer had more time on it than delta_ticks */
878 /* take back the ticks the timer didn't use (p->c_time <= 0) */
879 delta_ticks = -p->c_time;
885 #endif /* APM_FIXUP_CALLTODO */