2 * Copyright (c) 1995-1998 John Birrell <jb@cimlogic.com.au>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. Neither the name of the author nor the names of any co-contributors
14 * may be used to endorse or promote products derived from this software
15 * without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39 #include <sys/param.h>
40 #include <sys/types.h>
41 #include <sys/signalvar.h>
44 #include <sys/socket.h>
46 #include <sys/syscall.h>
49 #include "pthread_private.h"
51 /* #define DEBUG_THREAD_KERN */
52 #ifdef DEBUG_THREAD_KERN
53 #define DBG_MSG stdout_debug
58 /* Static function prototype definitions: */
60 thread_kern_poll(int wait_reqd);
63 dequeue_signals(void);
66 thread_run_switch_hook(pthread_t thread_out, pthread_t thread_in);
68 /* Static variables: */
69 static int last_tick = 0;
70 static int called_from_handler = 0;
73 * This is called when a signal handler finishes and wants to
74 * return to a previous frame.
77 _thread_kern_sched_frame(struct pthread_signal_frame *psf)
79 struct pthread *curthread = _get_curthread();
82 * Flag the pthread kernel as executing scheduler code
83 * to avoid a signal from interrupting this execution and
84 * corrupting the (soon-to-be) current frame.
86 _thread_kern_in_sched = 1;
88 /* Restore the signal frame: */
89 _thread_sigframe_restore(curthread, psf);
91 /* The signal mask was restored; check for any pending signals: */
92 curthread->check_pending = 1;
94 /* Switch to the thread scheduler: */
95 ___longjmp(_thread_kern_sched_jb, 1);
100 _thread_kern_sched(ucontext_t *ucp)
102 struct pthread *curthread = _get_curthread();
105 * Flag the pthread kernel as executing scheduler code
106 * to avoid a scheduler signal from interrupting this
107 * execution and calling the scheduler again.
109 _thread_kern_in_sched = 1;
111 /* Check if this function was called from the signal handler: */
113 called_from_handler = 1;
114 DBG_MSG("Entering scheduler due to signal\n");
117 /* Save the state of the current thread: */
118 if (_setjmp(curthread->ctx.jb) != 0) {
119 DBG_MSG("Returned from ___longjmp, thread %p\n",
122 * This point is reached when a longjmp() is called
123 * to restore the state of a thread.
125 * This is the normal way out of the scheduler.
127 _thread_kern_in_sched = 0;
129 if (curthread->sig_defer_count == 0) {
130 if (((curthread->cancelflags &
131 PTHREAD_AT_CANCEL_POINT) == 0) &&
132 ((curthread->cancelflags &
133 PTHREAD_CANCEL_ASYNCHRONOUS) != 0))
135 * Cancellations override signals.
137 * Stick a cancellation point at the
138 * start of each async-cancellable
139 * thread's resumption.
141 * We allow threads woken at cancel
142 * points to do their own checks.
144 pthread_testcancel();
147 if (_sched_switch_hook != NULL) {
148 /* Run the installed switch hook: */
149 thread_run_switch_hook(_last_user_thread, curthread);
155 * Set the process signal mask in the context; it
156 * could have changed by the handler.
158 ucp->uc_sigmask = _process_sigmask;
160 /* Resume the interrupted thread: */
161 __sys_sigreturn(ucp);
164 /* Switch to the thread scheduler: */
165 ___longjmp(_thread_kern_sched_jb, 1);
169 _thread_kern_sched_sig(void)
171 struct pthread *curthread = _get_curthread();
173 curthread->check_pending = 1;
174 _thread_kern_sched(NULL);
179 _thread_kern_scheduler(void)
183 struct pthread *curthread = _get_curthread();
184 pthread_t pthread, pthread_h;
185 unsigned int current_tick;
188 /* If the currently running thread is a user thread, save it: */
189 if ((curthread->flags & PTHREAD_FLAGS_PRIVATE) == 0)
190 _last_user_thread = curthread;
192 if (called_from_handler != 0) {
193 called_from_handler = 0;
196 * We were called from a signal handler; restore the process
199 if (__sys_sigprocmask(SIG_SETMASK,
200 &_process_sigmask, NULL) != 0)
201 PANIC("Unable to restore process mask after signal");
205 * Enter a scheduling loop that finds the next thread that is
206 * ready to run. This loop completes when there are no more threads
207 * in the global list or when a thread has its state restored by
208 * either a sigreturn (if the state was saved as a sigcontext) or a
209 * longjmp (if the state was saved by a setjmp).
211 while (!(TAILQ_EMPTY(&_thread_list))) {
212 /* Get the current time of day: */
214 TIMEVAL_TO_TIMESPEC(&tv, &ts);
215 current_tick = _sched_ticks;
218 * Protect the scheduling queues from access by the signal
224 if (curthread != &_thread_kern_thread) {
226 * This thread no longer needs to yield the CPU.
228 curthread->yield_on_sig_undefer = 0;
230 if (curthread->state != PS_RUNNING) {
232 * Save the current time as the time that the
233 * thread became inactive:
235 curthread->last_inactive = (long)current_tick;
236 if (curthread->last_inactive <
237 curthread->last_active) {
238 /* Account for a rollover: */
239 curthread->last_inactive =+
245 * Place the currently running thread into the
246 * appropriate queue(s).
248 switch (curthread->state) {
250 case PS_STATE_MAX: /* to silence -Wall */
253 * Dead and suspended threads are not placed
260 * Runnable threads can't be placed in the
261 * priority queue until after waiting threads
262 * are polled (to preserve round-robin
269 * States which do not depend on file descriptor I/O
270 * operations or timeouts:
282 /* No timeouts for these states: */
283 curthread->wakeup_time.tv_sec = -1;
284 curthread->wakeup_time.tv_nsec = -1;
286 /* Restart the time slice: */
287 curthread->slice_usec = -1;
289 /* Insert into the waiting queue: */
290 PTHREAD_WAITQ_INSERT(curthread);
293 /* States which can timeout: */
296 /* Restart the time slice: */
297 curthread->slice_usec = -1;
299 /* Insert into the waiting queue: */
300 PTHREAD_WAITQ_INSERT(curthread);
303 /* States that require periodic work: */
305 /* No timeouts for this state: */
306 curthread->wakeup_time.tv_sec = -1;
307 curthread->wakeup_time.tv_nsec = -1;
309 /* Increment spinblock count: */
317 /* Restart the time slice: */
318 curthread->slice_usec = -1;
320 /* Insert into the waiting queue: */
321 PTHREAD_WAITQ_INSERT(curthread);
323 /* Insert into the work queue: */
324 PTHREAD_WORKQ_INSERT(curthread);
329 * Are there pending signals for this thread?
331 * This check has to be performed after the thread
332 * has been placed in the queue(s) appropriate for
333 * its state. The process of adding pending signals
334 * can change a threads state, which in turn will
335 * attempt to add or remove the thread from any
336 * scheduling queue to which it belongs.
338 if (curthread->check_pending != 0) {
339 curthread->check_pending = 0;
340 _thread_sig_check_pending(curthread);
345 * Avoid polling file descriptors if there are none
348 if (TAILQ_EMPTY(&_workq) != 0) {
351 * Poll file descriptors only if a new scheduling signal
352 * has occurred or if we have no more runnable threads.
354 else if (((current_tick = _sched_ticks) != last_tick) ||
355 ((curthread->state != PS_RUNNING) &&
356 (PTHREAD_PRIOQ_FIRST() == NULL))) {
357 /* Unprotect the scheduling queues: */
361 * Poll file descriptors to update the state of threads
362 * waiting on file I/O where data may be available:
366 /* Protect the scheduling queues: */
369 last_tick = current_tick;
372 * Wake up threads that have timedout. This has to be
373 * done after polling in case a thread does a poll or
374 * select with zero time.
376 PTHREAD_WAITQ_SETACTIVE();
377 while (((pthread = TAILQ_FIRST(&_waitingq)) != NULL) &&
378 (pthread->wakeup_time.tv_sec != -1) &&
379 (((pthread->wakeup_time.tv_sec == 0) &&
380 (pthread->wakeup_time.tv_nsec == 0)) ||
381 (pthread->wakeup_time.tv_sec < ts.tv_sec) ||
382 ((pthread->wakeup_time.tv_sec == ts.tv_sec) &&
383 (pthread->wakeup_time.tv_nsec <= ts.tv_nsec)))) {
384 switch (pthread->state) {
387 /* Return zero file descriptors ready: */
388 pthread->data.poll_data->nfds = 0;
392 * Remove this thread from the waiting queue
393 * (and work queue if necessary) and place it
394 * in the ready queue.
396 PTHREAD_WAITQ_CLEARACTIVE();
397 if (pthread->flags & PTHREAD_FLAGS_IN_WORKQ)
398 PTHREAD_WORKQ_REMOVE(pthread);
399 PTHREAD_NEW_STATE(pthread, PS_RUNNING);
400 PTHREAD_WAITQ_SETACTIVE();
404 * Flag the timeout in the thread structure:
406 pthread->timeout = 1;
408 PTHREAD_WAITQ_CLEARACTIVE();
411 * Check to see if the current thread needs to be added
412 * to the priority queue:
414 if (add_to_prioq != 0) {
416 * Save the current time as the time that the
417 * thread became inactive:
419 current_tick = _sched_ticks;
420 curthread->last_inactive = (long)current_tick;
421 if (curthread->last_inactive <
422 curthread->last_active) {
423 /* Account for a rollover: */
424 curthread->last_inactive =+ UINT_MAX + 1;
427 if ((curthread->slice_usec != -1) &&
428 (curthread->attr.sched_policy != SCHED_FIFO)) {
430 * Accumulate the number of microseconds for
431 * which the current thread has run:
433 curthread->slice_usec +=
434 (curthread->last_inactive -
435 curthread->last_active) *
436 (long)_clock_res_usec;
437 /* Check for time quantum exceeded: */
438 if (curthread->slice_usec > TIMESLICE_USEC)
439 curthread->slice_usec = -1;
442 if (curthread->slice_usec == -1) {
444 * The thread exceeded its time
445 * quantum or it yielded the CPU;
446 * place it at the tail of the
447 * queue for its priority.
449 PTHREAD_PRIOQ_INSERT_TAIL(curthread);
452 * The thread hasn't exceeded its
453 * interval. Place it at the head
454 * of the queue for its priority.
456 PTHREAD_PRIOQ_INSERT_HEAD(curthread);
461 * Get the highest priority thread in the ready queue.
463 pthread_h = PTHREAD_PRIOQ_FIRST();
465 /* Check if there are no threads ready to run: */
466 if (pthread_h == NULL) {
468 * Lock the pthread kernel by changing the pointer to
469 * the running thread to point to the global kernel
472 _set_curthread(&_thread_kern_thread);
473 curthread = &_thread_kern_thread;
475 DBG_MSG("No runnable threads, using kernel thread %p\n",
478 /* Unprotect the scheduling queues: */
482 * There are no threads ready to run, so wait until
483 * something happens that changes this condition:
488 * This process' usage will likely be very small
489 * while waiting in a poll. Since the scheduling
490 * clock is based on the profiling timer, it is
491 * unlikely that the profiling timer will fire
492 * and update the time of day. To account for this,
493 * get the time of day after polling with a timeout.
495 gettimeofday((struct timeval *) &_sched_tod, NULL);
497 /* Check once more for a runnable thread: */
499 pthread_h = PTHREAD_PRIOQ_FIRST();
503 if (pthread_h != NULL) {
504 /* Remove the thread from the ready queue: */
505 PTHREAD_PRIOQ_REMOVE(pthread_h);
507 /* Unprotect the scheduling queues: */
511 * Check for signals queued while the scheduling
512 * queues were protected:
514 while (_sigq_check_reqd != 0) {
515 /* Clear before handling queued signals: */
516 _sigq_check_reqd = 0;
518 /* Protect the scheduling queues again: */
524 * Check for a higher priority thread that
525 * became runnable due to signal handling.
527 if (((pthread = PTHREAD_PRIOQ_FIRST()) != NULL) &&
528 (pthread->active_priority > pthread_h->active_priority)) {
529 /* Remove the thread from the ready queue: */
530 PTHREAD_PRIOQ_REMOVE(pthread);
533 * Insert the lower priority thread
534 * at the head of its priority list:
536 PTHREAD_PRIOQ_INSERT_HEAD(pthread_h);
538 /* There's a new thread in town: */
542 /* Unprotect the scheduling queues: */
546 /* Make the selected thread the current thread: */
547 _set_curthread(pthread_h);
548 curthread = pthread_h;
551 * Save the current time as the time that the thread
554 current_tick = _sched_ticks;
555 curthread->last_active = (long) current_tick;
558 * Check if this thread is running for the first time
559 * or running again after using its full time slice
562 if (curthread->slice_usec == -1) {
563 /* Reset the accumulated time slice period: */
564 curthread->slice_usec = 0;
568 * If we had a context switch, run any
569 * installed switch hooks.
571 if ((_sched_switch_hook != NULL) &&
572 (_last_user_thread != curthread)) {
573 thread_run_switch_hook(_last_user_thread,
577 * Continue the thread at its current frame:
580 _setcontext(&curthread->ctx.uc);
582 ___longjmp(curthread->ctx.jb, 1);
584 /* This point should not be reached. */
585 PANIC("Thread has returned from sigreturn or longjmp");
589 /* There are no more threads, so exit this process: */
594 _thread_kern_sched_state(enum pthread_state state, char *fname, int lineno)
596 struct pthread *curthread = _get_curthread();
599 * Flag the pthread kernel as executing scheduler code
600 * to avoid a scheduler signal from interrupting this
601 * execution and calling the scheduler again.
603 _thread_kern_in_sched = 1;
606 * Prevent the signal handler from fiddling with this thread
607 * before its state is set and is placed into the proper queue.
611 /* Change the state of the current thread: */
612 curthread->state = state;
613 curthread->fname = fname;
614 curthread->lineno = lineno;
616 /* Schedule the next thread that is ready: */
617 _thread_kern_sched(NULL);
621 _thread_kern_sched_state_unlock(enum pthread_state state,
622 spinlock_t *lock, char *fname, int lineno)
624 struct pthread *curthread = _get_curthread();
627 * Flag the pthread kernel as executing scheduler code
628 * to avoid a scheduler signal from interrupting this
629 * execution and calling the scheduler again.
631 _thread_kern_in_sched = 1;
634 * Prevent the signal handler from fiddling with this thread
635 * before its state is set and it is placed into the proper
640 /* Change the state of the current thread: */
641 curthread->state = state;
642 curthread->fname = fname;
643 curthread->lineno = lineno;
647 /* Schedule the next thread that is ready: */
648 _thread_kern_sched(NULL);
652 thread_kern_poll(int wait_reqd)
656 int kern_pipe_added = 0;
659 struct pthread *pthread;
663 /* Check if the caller wants to wait: */
664 if (wait_reqd == 0) {
668 /* Get the current time of day: */
670 TIMEVAL_TO_TIMESPEC(&tv, &ts);
673 pthread = TAILQ_FIRST(&_waitingq);
676 if ((pthread == NULL) || (pthread->wakeup_time.tv_sec == -1)) {
678 * Either there are no threads in the waiting queue,
679 * or there are no threads that can timeout.
683 else if (pthread->wakeup_time.tv_sec - ts.tv_sec > 60000)
684 /* Limit maximum timeout to prevent rollover. */
688 * Calculate the time left for the next thread to
691 timeout_ms = ((pthread->wakeup_time.tv_sec - ts.tv_sec) *
692 1000) + ((pthread->wakeup_time.tv_nsec - ts.tv_nsec) /
695 * Don't allow negative timeouts:
702 /* Protect the scheduling queues: */
706 * Check to see if the signal queue needs to be walked to look
707 * for threads awoken by a signal while in the scheduler.
709 if (_sigq_check_reqd != 0) {
710 /* Reset flag before handling queued signals: */
711 _sigq_check_reqd = 0;
717 * Check for a thread that became runnable due to a signal:
719 if (PTHREAD_PRIOQ_FIRST() != NULL) {
721 * Since there is at least one runnable thread,
728 * Form the poll table:
731 if (timeout_ms != 0) {
732 /* Add the kernel pipe to the poll table: */
733 _thread_pfd_table[nfds].fd = _thread_kern_pipe[0];
734 _thread_pfd_table[nfds].events = POLLRDNORM;
735 _thread_pfd_table[nfds].revents = 0;
740 PTHREAD_WAITQ_SETACTIVE();
741 TAILQ_FOREACH(pthread, &_workq, qe) {
742 switch (pthread->state) {
745 * If the lock is available, let the thread run.
747 if (pthread->data.spinlock->access_lock == 0) {
748 PTHREAD_WAITQ_CLEARACTIVE();
749 PTHREAD_WORKQ_REMOVE(pthread);
750 PTHREAD_NEW_STATE(pthread,PS_RUNNING);
751 PTHREAD_WAITQ_SETACTIVE();
752 /* One less thread in a spinblock state: */
755 * Since there is at least one runnable
756 * thread, disable the wait.
762 /* File descriptor read wait: */
764 /* Limit number of polled files to table size: */
765 if (nfds < _thread_dtablesize) {
766 _thread_pfd_table[nfds].events = POLLRDNORM;
767 _thread_pfd_table[nfds].fd = pthread->data.fd.fd;
772 /* File descriptor write wait: */
774 /* Limit number of polled files to table size: */
775 if (nfds < _thread_dtablesize) {
776 _thread_pfd_table[nfds].events = POLLWRNORM;
777 _thread_pfd_table[nfds].fd = pthread->data.fd.fd;
782 /* File descriptor poll or select wait: */
785 /* Limit number of polled files to table size: */
786 if (pthread->data.poll_data->nfds + nfds <
787 _thread_dtablesize) {
788 for (i = 0; i < pthread->data.poll_data->nfds; i++) {
789 _thread_pfd_table[nfds + i].fd =
790 pthread->data.poll_data->fds[i].fd;
791 _thread_pfd_table[nfds + i].events =
792 pthread->data.poll_data->fds[i].events;
794 nfds += pthread->data.poll_data->nfds;
798 /* Other states do not depend on file I/O. */
803 PTHREAD_WAITQ_CLEARACTIVE();
806 * Wait for a file descriptor to be ready for read, write, or
807 * an exception, or a timeout to occur:
809 count = __sys_poll(_thread_pfd_table, nfds, timeout_ms);
811 if (kern_pipe_added != 0)
813 * Remove the pthread kernel pipe file descriptor
814 * from the pollfd table:
821 * Check if it is possible that there are bytes in the kernel
822 * read pipe waiting to be read:
824 if (count < 0 || ((kern_pipe_added != 0) &&
825 (_thread_pfd_table[0].revents & POLLRDNORM))) {
827 * If the kernel read pipe was included in the
831 /* Decrement the count of file descriptors: */
835 if (_sigq_check_reqd != 0) {
836 /* Reset flag before handling signals: */
837 _sigq_check_reqd = 0;
844 * Check if any file descriptors are ready:
848 * Enter a loop to look for threads waiting on file
849 * descriptors that are flagged as available by the
852 PTHREAD_WAITQ_SETACTIVE();
853 TAILQ_FOREACH(pthread, &_workq, qe) {
854 switch (pthread->state) {
857 * If the lock is available, let the thread run.
859 if (pthread->data.spinlock->access_lock == 0) {
860 PTHREAD_WAITQ_CLEARACTIVE();
861 PTHREAD_WORKQ_REMOVE(pthread);
862 PTHREAD_NEW_STATE(pthread,PS_RUNNING);
863 PTHREAD_WAITQ_SETACTIVE();
866 * One less thread in a spinblock state:
872 /* File descriptor read wait: */
874 if ((nfds < _thread_dtablesize) &&
875 (_thread_pfd_table[nfds].revents
876 & (POLLRDNORM|POLLERR|POLLHUP|POLLNVAL))
878 PTHREAD_WAITQ_CLEARACTIVE();
879 PTHREAD_WORKQ_REMOVE(pthread);
880 PTHREAD_NEW_STATE(pthread,PS_RUNNING);
881 PTHREAD_WAITQ_SETACTIVE();
886 /* File descriptor write wait: */
888 if ((nfds < _thread_dtablesize) &&
889 (_thread_pfd_table[nfds].revents
890 & (POLLWRNORM|POLLERR|POLLHUP|POLLNVAL))
892 PTHREAD_WAITQ_CLEARACTIVE();
893 PTHREAD_WORKQ_REMOVE(pthread);
894 PTHREAD_NEW_STATE(pthread,PS_RUNNING);
895 PTHREAD_WAITQ_SETACTIVE();
900 /* File descriptor poll or select wait: */
903 if (pthread->data.poll_data->nfds + nfds <
904 _thread_dtablesize) {
906 * Enter a loop looking for I/O
910 for (i = 0; i < pthread->data.poll_data->nfds; i++) {
911 if (_thread_pfd_table[nfds + i].revents != 0) {
912 pthread->data.poll_data->fds[i].revents =
913 _thread_pfd_table[nfds + i].revents;
918 /* Increment before destroying: */
919 nfds += pthread->data.poll_data->nfds;
922 pthread->data.poll_data->nfds = found;
923 PTHREAD_WAITQ_CLEARACTIVE();
924 PTHREAD_WORKQ_REMOVE(pthread);
925 PTHREAD_NEW_STATE(pthread,PS_RUNNING);
926 PTHREAD_WAITQ_SETACTIVE();
930 nfds += pthread->data.poll_data->nfds;
933 /* Other states do not depend on file I/O. */
938 PTHREAD_WAITQ_CLEARACTIVE();
940 else if (_spinblock_count != 0) {
942 * Enter a loop to look for threads waiting on a spinlock
943 * that is now available.
945 PTHREAD_WAITQ_SETACTIVE();
946 TAILQ_FOREACH(pthread, &_workq, qe) {
947 if (pthread->state == PS_SPINBLOCK) {
949 * If the lock is available, let the thread run.
951 if (pthread->data.spinlock->access_lock == 0) {
952 PTHREAD_WAITQ_CLEARACTIVE();
953 PTHREAD_WORKQ_REMOVE(pthread);
954 PTHREAD_NEW_STATE(pthread,PS_RUNNING);
955 PTHREAD_WAITQ_SETACTIVE();
958 * One less thread in a spinblock state:
964 PTHREAD_WAITQ_CLEARACTIVE();
967 /* Unprotect the scheduling queues: */
970 while (_sigq_check_reqd != 0) {
971 /* Handle queued signals: */
972 _sigq_check_reqd = 0;
974 /* Protect the scheduling queues: */
979 /* Unprotect the scheduling queues: */
985 _thread_kern_set_timeout(const struct timespec * timeout)
987 struct pthread *curthread = _get_curthread();
988 struct timespec current_time;
991 /* Reset the timeout flag for the running thread: */
992 curthread->timeout = 0;
994 /* Check if the thread is to wait forever: */
995 if (timeout == NULL) {
997 * Set the wakeup time to something that can be recognised as
998 * different to an actual time of day:
1000 curthread->wakeup_time.tv_sec = -1;
1001 curthread->wakeup_time.tv_nsec = -1;
1003 /* Check if no waiting is required: */
1004 else if (timeout->tv_sec == 0 && timeout->tv_nsec == 0) {
1005 /* Set the wake up time to 'immediately': */
1006 curthread->wakeup_time.tv_sec = 0;
1007 curthread->wakeup_time.tv_nsec = 0;
1009 /* Get the current time: */
1010 GET_CURRENT_TOD(tv);
1011 TIMEVAL_TO_TIMESPEC(&tv, ¤t_time);
1013 /* Calculate the time for the current thread to wake up: */
1014 curthread->wakeup_time.tv_sec = current_time.tv_sec + timeout->tv_sec;
1015 curthread->wakeup_time.tv_nsec = current_time.tv_nsec + timeout->tv_nsec;
1017 /* Check if the nanosecond field needs to wrap: */
1018 if (curthread->wakeup_time.tv_nsec >= 1000000000) {
1019 /* Wrap the nanosecond field: */
1020 curthread->wakeup_time.tv_sec += 1;
1021 curthread->wakeup_time.tv_nsec -= 1000000000;
1027 _thread_kern_sig_defer(void)
1029 struct pthread *curthread = _get_curthread();
1031 /* Allow signal deferral to be recursive. */
1032 curthread->sig_defer_count++;
1036 _thread_kern_sig_undefer(void)
1038 struct pthread *curthread = _get_curthread();
1041 * Perform checks to yield only if we are about to undefer
1044 if (curthread->sig_defer_count > 1) {
1045 /* Decrement the signal deferral count. */
1046 curthread->sig_defer_count--;
1048 else if (curthread->sig_defer_count == 1) {
1049 /* Reenable signals: */
1050 curthread->sig_defer_count = 0;
1053 * Check if there are queued signals:
1055 if (_sigq_check_reqd != 0)
1056 _thread_kern_sched(NULL);
1059 * Check for asynchronous cancellation before delivering any
1062 if (((curthread->cancelflags & PTHREAD_AT_CANCEL_POINT) == 0) &&
1063 ((curthread->cancelflags & PTHREAD_CANCEL_ASYNCHRONOUS) != 0))
1064 pthread_testcancel();
1067 * If there are pending signals or this thread has
1068 * to yield the CPU, call the kernel scheduler:
1070 * XXX - Come back and revisit the pending signal problem
1072 if ((curthread->yield_on_sig_undefer != 0) ||
1073 SIGNOTEMPTY(curthread->sigpend)) {
1074 curthread->yield_on_sig_undefer = 0;
1075 _thread_kern_sched(NULL);
1081 dequeue_signals(void)
1087 * Enter a loop to clear the pthread kernel pipe:
1089 while (((num = __sys_read(_thread_kern_pipe[0], bufr,
1090 sizeof(bufr))) > 0) || (num == -1 && errno == EINTR)) {
1092 if ((num < 0) && (errno != EAGAIN)) {
1094 * The only error we should expect is if there is
1097 PANIC("Unable to read from thread kernel pipe");
1099 /* Handle any pending signals: */
1100 _thread_sig_handle_pending();
1104 thread_run_switch_hook(pthread_t thread_out, pthread_t thread_in)
1106 pthread_t tid_out = thread_out;
1107 pthread_t tid_in = thread_in;
1109 if ((tid_out != NULL) &&
1110 (tid_out->flags & PTHREAD_FLAGS_PRIVATE) != 0)
1112 if ((tid_in != NULL) &&
1113 (tid_in->flags & PTHREAD_FLAGS_PRIVATE) != 0)
1116 if ((_sched_switch_hook != NULL) && (tid_out != tid_in)) {
1117 /* Run the scheduler switch hook: */
1118 _sched_switch_hook(tid_out, tid_in);
1123 _get_curthread(void)
1125 if (_thread_initial == NULL)
1128 return (_thread_run);
1132 _set_curthread(struct pthread *newthread)
1134 _thread_run = newthread;