2 * Copyright (c) 1995-1998 John Birrell <jb@cimlogic.com.au>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by John Birrell.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
42 #include <sys/types.h>
45 #include <sys/socket.h>
47 #include <sys/syscall.h>
51 #include "pthread_private.h"
53 /* Static function prototype definitions: */
55 _thread_kern_poll(int wait_reqd);
58 dequeue_signals(void);
61 thread_run_switch_hook(pthread_t thread_out, pthread_t thread_in);
64 _thread_kern_sched(ucontext_t * scp)
69 pthread_t pthread, pthread_h = NULL;
70 pthread_t last_thread = NULL;
71 struct itimerval itimer;
72 struct timespec ts, ts1;
73 struct timeval tv, tv1;
77 * Flag the pthread kernel as executing scheduler code
78 * to avoid a scheduler signal from interrupting this
79 * execution and calling the scheduler again.
81 _thread_kern_in_sched = 1;
83 /* Check if this function was called from the signal handler: */
86 * Copy the signal context to the current thread's jump
89 memcpy(&_thread_run->saved_sigcontext, scp, sizeof(_thread_run->saved_sigcontext));
92 /* Point to the floating point data in the running thread: */
93 fdata = _thread_run->saved_fp;
95 /* Save the floating point data: */
96 __asm__("fnsave %0": :"m"(*fdata));
99 /* Flag the signal context as the last state saved: */
100 _thread_run->sig_saved = 1;
102 /* Save the state of the current thread: */
103 else if (setjmp(_thread_run->saved_jmp_buf) != 0) {
105 * This point is reached when a longjmp() is called to
106 * restore the state of a thread.
108 * This is the normal way out of the scheduler.
110 _thread_kern_in_sched = 0;
112 if (_sched_switch_hook != NULL) {
113 /* Run the installed switch hook: */
114 thread_run_switch_hook(_last_user_thread, _thread_run);
119 /* Flag the jump buffer was the last state saved: */
120 _thread_run->sig_saved = 0;
122 /* If the currently running thread is a user thread, save it: */
123 if ((_thread_run->flags & PTHREAD_FLAGS_PRIVATE) == 0)
124 _last_user_thread = _thread_run;
127 * Enter a scheduling loop that finds the next thread that is
128 * ready to run. This loop completes when there are no more threads
129 * in the global list or when a thread has its state restored by
130 * either a sigreturn (if the state was saved as a sigcontext) or a
131 * longjmp (if the state was saved by a setjmp).
133 while (!(TAILQ_EMPTY(&_thread_list))) {
134 /* Get the current time of day: */
135 gettimeofday(&tv, NULL);
136 TIMEVAL_TO_TIMESPEC(&tv, &ts);
139 * Protect the scheduling queues from access by the signal
144 if (_thread_run != &_thread_kern_thread) {
147 * This thread no longer needs to yield the CPU.
149 _thread_run->yield_on_sig_undefer = 0;
152 * Save the current time as the time that the thread
155 _thread_run->last_inactive.tv_sec = tv.tv_sec;
156 _thread_run->last_inactive.tv_usec = tv.tv_usec;
159 * Place the currently running thread into the
160 * appropriate queue(s).
162 switch (_thread_run->state) {
165 * Dead threads are not placed in any queue:
171 * Runnable threads can't be placed in the
172 * priority queue until after waiting threads
173 * are polled (to preserve round-robin
176 if ((_thread_run->slice_usec != -1) &&
177 (_thread_run->attr.sched_policy != SCHED_FIFO)) {
179 * Accumulate the number of microseconds that
180 * this thread has run for:
182 _thread_run->slice_usec +=
183 (_thread_run->last_inactive.tv_sec -
184 _thread_run->last_active.tv_sec) * 1000000 +
185 _thread_run->last_inactive.tv_usec -
186 _thread_run->last_active.tv_usec;
188 /* Check for time quantum exceeded: */
189 if (_thread_run->slice_usec > TIMESLICE_USEC)
190 _thread_run->slice_usec = -1;
195 * States which do not depend on file descriptor I/O
196 * operations or timeouts:
209 /* No timeouts for these states: */
210 _thread_run->wakeup_time.tv_sec = -1;
211 _thread_run->wakeup_time.tv_nsec = -1;
213 /* Restart the time slice: */
214 _thread_run->slice_usec = -1;
216 /* Insert into the waiting queue: */
217 PTHREAD_WAITQ_INSERT(_thread_run);
220 /* States which can timeout: */
223 /* Restart the time slice: */
224 _thread_run->slice_usec = -1;
226 /* Insert into the waiting queue: */
227 PTHREAD_WAITQ_INSERT(_thread_run);
230 /* States that require periodic work: */
232 /* No timeouts for this state: */
233 _thread_run->wakeup_time.tv_sec = -1;
234 _thread_run->wakeup_time.tv_nsec = -1;
236 /* Increment spinblock count: */
244 /* Restart the time slice: */
245 _thread_run->slice_usec = -1;
247 /* Insert into the waiting queue: */
248 PTHREAD_WAITQ_INSERT(_thread_run);
250 /* Insert into the work queue: */
251 PTHREAD_WORKQ_INSERT(_thread_run);
255 /* Unprotect the scheduling queues: */
259 * Poll file descriptors to update the state of threads
260 * waiting on file I/O where data may be available:
262 _thread_kern_poll(0);
264 /* Protect the scheduling queues: */
268 * Wake up threads that have timedout. This has to be
269 * done after polling in case a thread does a poll or
270 * select with zero time.
272 PTHREAD_WAITQ_SETACTIVE();
273 while (((pthread = TAILQ_FIRST(&_waitingq)) != NULL) &&
274 (pthread->wakeup_time.tv_sec != -1) &&
275 (((pthread->wakeup_time.tv_sec == 0) &&
276 (pthread->wakeup_time.tv_nsec == 0)) ||
277 (pthread->wakeup_time.tv_sec < ts.tv_sec) ||
278 ((pthread->wakeup_time.tv_sec == ts.tv_sec) &&
279 (pthread->wakeup_time.tv_nsec <= ts.tv_nsec)))) {
280 switch (pthread->state) {
283 /* Return zero file descriptors ready: */
284 pthread->data.poll_data->nfds = 0;
288 * Remove this thread from the waiting queue
289 * (and work queue if necessary) and place it
290 * in the ready queue.
292 PTHREAD_WAITQ_CLEARACTIVE();
293 if (pthread->flags & PTHREAD_FLAGS_IN_WORKQ)
294 PTHREAD_WORKQ_REMOVE(pthread);
295 PTHREAD_NEW_STATE(pthread, PS_RUNNING);
296 PTHREAD_WAITQ_SETACTIVE();
300 * Flag the timeout in the thread structure:
302 pthread->timeout = 1;
304 PTHREAD_WAITQ_CLEARACTIVE();
307 * Check if there is a current runnable thread that isn't
308 * already in the ready queue:
310 if ((_thread_run != &_thread_kern_thread) &&
311 (_thread_run->state == PS_RUNNING) &&
312 ((_thread_run->flags & PTHREAD_FLAGS_IN_PRIOQ) == 0)) {
313 if (_thread_run->slice_usec == -1) {
315 * The thread exceeded its time
316 * quantum or it yielded the CPU;
317 * place it at the tail of the
318 * queue for its priority.
320 PTHREAD_PRIOQ_INSERT_TAIL(_thread_run);
323 * The thread hasn't exceeded its
324 * interval. Place it at the head
325 * of the queue for its priority.
327 PTHREAD_PRIOQ_INSERT_HEAD(_thread_run);
332 * Get the highest priority thread in the ready queue.
334 pthread_h = PTHREAD_PRIOQ_FIRST();
336 /* Check if there are no threads ready to run: */
337 if (pthread_h == NULL) {
339 * Lock the pthread kernel by changing the pointer to
340 * the running thread to point to the global kernel
343 _thread_run = &_thread_kern_thread;
345 /* Unprotect the scheduling queues: */
349 * There are no threads ready to run, so wait until
350 * something happens that changes this condition:
352 _thread_kern_poll(1);
355 /* Remove the thread from the ready queue: */
356 PTHREAD_PRIOQ_REMOVE(pthread_h);
358 /* Get first thread on the waiting list: */
359 pthread = TAILQ_FIRST(&_waitingq);
361 /* Check to see if there is more than one thread: */
362 if (pthread_h != TAILQ_FIRST(&_thread_list) ||
363 TAILQ_NEXT(pthread_h, tle) != NULL)
368 /* Unprotect the scheduling queues: */
372 * Check for signals queued while the scheduling
373 * queues were protected:
375 while (_sigq_check_reqd != 0) {
376 /* Clear before handling queued signals: */
377 _sigq_check_reqd = 0;
379 /* Protect the scheduling queues again: */
385 * Check for a higher priority thread that
386 * became runnable due to signal handling.
388 if (((pthread = PTHREAD_PRIOQ_FIRST()) != NULL) &&
389 (pthread->active_priority > pthread_h->active_priority)) {
391 * Insert the lower priority thread
392 * at the head of its priority list:
394 PTHREAD_PRIOQ_INSERT_HEAD(pthread_h);
396 /* Remove the thread from the ready queue: */
397 PTHREAD_PRIOQ_REMOVE(pthread);
399 /* There's a new thread in town: */
403 /* Get first thread on the waiting list: */
404 pthread = TAILQ_FIRST(&_waitingq);
407 * Check to see if there is more than one
410 if (pthread_h != TAILQ_FIRST(&_thread_list) ||
411 TAILQ_NEXT(pthread_h, tle) != NULL)
416 /* Unprotect the scheduling queues: */
420 /* Make the selected thread the current thread: */
421 _thread_run = pthread_h;
424 * Save the current time as the time that the thread
427 _thread_run->last_active.tv_sec = tv.tv_sec;
428 _thread_run->last_active.tv_usec = tv.tv_usec;
431 * Define the maximum time before a scheduling signal
434 itimer.it_value.tv_sec = 0;
435 itimer.it_value.tv_usec = TIMESLICE_USEC;
438 * The interval timer is not reloaded when it
439 * times out. The interval time needs to be
440 * calculated every time.
442 itimer.it_interval.tv_sec = 0;
443 itimer.it_interval.tv_usec = 0;
445 /* Get first thread on the waiting list: */
446 if ((pthread != NULL) &&
447 (pthread->wakeup_time.tv_sec != -1)) {
449 * Calculate the time until this thread
450 * is ready, allowing for the clock
453 ts1.tv_sec = pthread->wakeup_time.tv_sec
455 ts1.tv_nsec = pthread->wakeup_time.tv_nsec
456 - ts.tv_nsec + _clock_res_nsec;
459 * Check for underflow of the nanosecond field:
461 while (ts1.tv_nsec < 0) {
463 * Allow for the underflow of the
467 ts1.tv_nsec += 1000000000;
470 * Check for overflow of the nanosecond field:
472 while (ts1.tv_nsec >= 1000000000) {
474 * Allow for the overflow of the
478 ts1.tv_nsec -= 1000000000;
481 * Convert the timespec structure to a
484 TIMESPEC_TO_TIMEVAL(&tv1, &ts1);
487 * Check if the thread will be ready
488 * sooner than the earliest ones found
491 if (timercmp(&tv1, &itimer.it_value, <)) {
493 * Update the time value:
495 itimer.it_value.tv_sec = tv1.tv_sec;
496 itimer.it_value.tv_usec = tv1.tv_usec;
501 * Check if this thread is running for the first time
502 * or running again after using its full time slice
505 if (_thread_run->slice_usec == -1) {
506 /* Reset the accumulated time slice period: */
507 _thread_run->slice_usec = 0;
510 /* Check if there is more than one thread: */
511 if (set_timer != 0) {
513 * Start the interval timer for the
514 * calculated time interval:
516 if (setitimer(_ITIMER_SCHED_TIMER, &itimer, NULL) != 0) {
518 * Cannot initialise the timer, so
519 * abort this process:
521 PANIC("Cannot set scheduling timer");
525 /* Check if a signal context was saved: */
526 if (_thread_run->sig_saved == 1) {
529 * Point to the floating point data in the
532 fdata = _thread_run->saved_fp;
534 /* Restore the floating point state: */
535 __asm__("frstor %0": :"m"(*fdata));
538 * Do a sigreturn to restart the thread that
539 * was interrupted by a signal:
541 _thread_kern_in_sched = 0;
544 * If we had a context switch, run any
545 * installed switch hooks.
547 if ((_sched_switch_hook != NULL) &&
548 (_last_user_thread != _thread_run)) {
549 thread_run_switch_hook(_last_user_thread,
552 _thread_sys_sigreturn(&_thread_run->saved_sigcontext);
555 * Do a longjmp to restart the thread that
556 * was context switched out (by a longjmp to
557 * a different thread):
559 longjmp(_thread_run->saved_jmp_buf, 1);
562 /* This point should not be reached. */
563 PANIC("Thread has returned from sigreturn or longjmp");
567 /* There are no more threads, so exit this process: */
572 _thread_kern_sched_state(enum pthread_state state, char *fname, int lineno)
575 * Flag the pthread kernel as executing scheduler code
576 * to avoid a scheduler signal from interrupting this
577 * execution and calling the scheduler again.
579 _thread_kern_in_sched = 1;
582 * Prevent the signal handler from fiddling with this thread
583 * before its state is set and is placed into the proper queue.
587 /* Change the state of the current thread: */
588 _thread_run->state = state;
589 _thread_run->fname = fname;
590 _thread_run->lineno = lineno;
592 /* Schedule the next thread that is ready: */
593 _thread_kern_sched(NULL);
598 _thread_kern_sched_state_unlock(enum pthread_state state,
599 spinlock_t *lock, char *fname, int lineno)
602 * Flag the pthread kernel as executing scheduler code
603 * to avoid a scheduler signal from interrupting this
604 * execution and calling the scheduler again.
606 _thread_kern_in_sched = 1;
609 * Prevent the signal handler from fiddling with this thread
610 * before its state is set and it is placed into the proper
615 /* Change the state of the current thread: */
616 _thread_run->state = state;
617 _thread_run->fname = fname;
618 _thread_run->lineno = lineno;
622 /* Schedule the next thread that is ready: */
623 _thread_kern_sched(NULL);
628 _thread_kern_poll(int wait_reqd)
633 int kern_pipe_added = 0;
636 struct pthread *pthread, *pthread_next;
641 /* Check if the caller wants to wait: */
642 if (wait_reqd == 0) {
646 /* Get the current time of day: */
647 gettimeofday(&tv, NULL);
648 TIMEVAL_TO_TIMESPEC(&tv, &ts);
651 pthread = TAILQ_FIRST(&_waitingq);
654 if ((pthread == NULL) || (pthread->wakeup_time.tv_sec == -1)) {
656 * Either there are no threads in the waiting queue,
657 * or there are no threads that can timeout.
663 * Calculate the time left for the next thread to
664 * timeout allowing for the clock resolution:
666 timeout_ms = ((pthread->wakeup_time.tv_sec - ts.tv_sec) *
667 1000) + ((pthread->wakeup_time.tv_nsec - ts.tv_nsec +
668 _clock_res_nsec) / 1000000);
670 * Don't allow negative timeouts:
677 /* Protect the scheduling queues: */
681 * Check to see if the signal queue needs to be walked to look
682 * for threads awoken by a signal while in the scheduler.
684 if (_sigq_check_reqd != 0) {
685 /* Reset flag before handling queued signals: */
686 _sigq_check_reqd = 0;
692 * Check for a thread that became runnable due to a signal:
694 if (PTHREAD_PRIOQ_FIRST() != NULL) {
696 * Since there is at least one runnable thread,
703 * Form the poll table:
706 if (timeout_ms != 0) {
707 /* Add the kernel pipe to the poll table: */
708 _thread_pfd_table[nfds].fd = _thread_kern_pipe[0];
709 _thread_pfd_table[nfds].events = POLLRDNORM;
710 _thread_pfd_table[nfds].revents = 0;
715 PTHREAD_WAITQ_SETACTIVE();
716 TAILQ_FOREACH(pthread, &_workq, qe) {
717 switch (pthread->state) {
720 * If the lock is available, let the thread run.
722 if (pthread->data.spinlock->access_lock == 0) {
723 PTHREAD_WAITQ_CLEARACTIVE();
724 PTHREAD_WORKQ_REMOVE(pthread);
725 PTHREAD_NEW_STATE(pthread,PS_RUNNING);
726 PTHREAD_WAITQ_SETACTIVE();
727 /* One less thread in a spinblock state: */
730 * Since there is at least one runnable
731 * thread, disable the wait.
737 /* File descriptor read wait: */
739 /* Limit number of polled files to table size: */
740 if (nfds < _thread_dtablesize) {
741 _thread_pfd_table[nfds].events = POLLRDNORM;
742 _thread_pfd_table[nfds].fd = pthread->data.fd.fd;
747 /* File descriptor write wait: */
749 /* Limit number of polled files to table size: */
750 if (nfds < _thread_dtablesize) {
751 _thread_pfd_table[nfds].events = POLLWRNORM;
752 _thread_pfd_table[nfds].fd = pthread->data.fd.fd;
757 /* File descriptor poll or select wait: */
760 /* Limit number of polled files to table size: */
761 if (pthread->data.poll_data->nfds + nfds <
762 _thread_dtablesize) {
763 for (i = 0; i < pthread->data.poll_data->nfds; i++) {
764 _thread_pfd_table[nfds + i].fd =
765 pthread->data.poll_data->fds[i].fd;
766 _thread_pfd_table[nfds + i].events =
767 pthread->data.poll_data->fds[i].events;
769 nfds += pthread->data.poll_data->nfds;
773 /* Other states do not depend on file I/O. */
778 PTHREAD_WAITQ_CLEARACTIVE();
781 * Wait for a file descriptor to be ready for read, write, or
782 * an exception, or a timeout to occur:
784 count = _thread_sys_poll(_thread_pfd_table, nfds, timeout_ms);
786 if (kern_pipe_added != 0)
788 * Remove the pthread kernel pipe file descriptor
789 * from the pollfd table:
796 * Check if it is possible that there are bytes in the kernel
797 * read pipe waiting to be read:
799 if (count < 0 || ((kern_pipe_added != 0) &&
800 (_thread_pfd_table[0].revents & POLLRDNORM))) {
802 * If the kernel read pipe was included in the
806 /* Decrement the count of file descriptors: */
810 if (_sigq_check_reqd != 0) {
811 /* Reset flag before handling signals: */
812 _sigq_check_reqd = 0;
819 * Check if any file descriptors are ready:
823 * Enter a loop to look for threads waiting on file
824 * descriptors that are flagged as available by the
827 PTHREAD_WAITQ_SETACTIVE();
828 TAILQ_FOREACH(pthread, &_workq, qe) {
829 switch (pthread->state) {
832 * If the lock is available, let the thread run.
834 if (pthread->data.spinlock->access_lock == 0) {
835 PTHREAD_WAITQ_CLEARACTIVE();
836 PTHREAD_WORKQ_REMOVE(pthread);
837 PTHREAD_NEW_STATE(pthread,PS_RUNNING);
838 PTHREAD_WAITQ_SETACTIVE();
841 * One less thread in a spinblock state:
847 /* File descriptor read wait: */
849 if ((nfds < _thread_dtablesize) &&
850 (_thread_pfd_table[nfds].revents & POLLRDNORM)) {
851 PTHREAD_WAITQ_CLEARACTIVE();
852 PTHREAD_WORKQ_REMOVE(pthread);
853 PTHREAD_NEW_STATE(pthread,PS_RUNNING);
854 PTHREAD_WAITQ_SETACTIVE();
859 /* File descriptor write wait: */
861 if ((nfds < _thread_dtablesize) &&
862 (_thread_pfd_table[nfds].revents & POLLWRNORM)) {
863 PTHREAD_WAITQ_CLEARACTIVE();
864 PTHREAD_WORKQ_REMOVE(pthread);
865 PTHREAD_NEW_STATE(pthread,PS_RUNNING);
866 PTHREAD_WAITQ_SETACTIVE();
871 /* File descriptor poll or select wait: */
874 if (pthread->data.poll_data->nfds + nfds <
875 _thread_dtablesize) {
877 * Enter a loop looking for I/O
881 for (i = 0; i < pthread->data.poll_data->nfds; i++) {
882 if (_thread_pfd_table[nfds + i].revents != 0) {
883 pthread->data.poll_data->fds[i].revents =
884 _thread_pfd_table[nfds + i].revents;
889 /* Increment before destroying: */
890 nfds += pthread->data.poll_data->nfds;
893 pthread->data.poll_data->nfds = found;
894 PTHREAD_WAITQ_CLEARACTIVE();
895 PTHREAD_WORKQ_REMOVE(pthread);
896 PTHREAD_NEW_STATE(pthread,PS_RUNNING);
897 PTHREAD_WAITQ_SETACTIVE();
901 nfds += pthread->data.poll_data->nfds;
904 /* Other states do not depend on file I/O. */
909 PTHREAD_WAITQ_CLEARACTIVE();
911 else if (_spinblock_count != 0) {
913 * Enter a loop to look for threads waiting on a spinlock
914 * that is now available.
916 PTHREAD_WAITQ_SETACTIVE();
917 TAILQ_FOREACH(pthread, &_workq, qe) {
918 if (pthread->state == PS_SPINBLOCK) {
920 * If the lock is available, let the thread run.
922 if (pthread->data.spinlock->access_lock == 0) {
923 PTHREAD_WAITQ_CLEARACTIVE();
924 PTHREAD_WORKQ_REMOVE(pthread);
925 PTHREAD_NEW_STATE(pthread,PS_RUNNING);
926 PTHREAD_WAITQ_SETACTIVE();
929 * One less thread in a spinblock state:
935 PTHREAD_WAITQ_CLEARACTIVE();
938 /* Unprotect the scheduling queues: */
941 while (_sigq_check_reqd != 0) {
942 /* Handle queued signals: */
943 _sigq_check_reqd = 0;
945 /* Protect the scheduling queues: */
950 /* Unprotect the scheduling queues: */
954 /* Nothing to return. */
959 _thread_kern_set_timeout(struct timespec * timeout)
961 struct timespec current_time;
964 /* Reset the timeout flag for the running thread: */
965 _thread_run->timeout = 0;
967 /* Check if the thread is to wait forever: */
968 if (timeout == NULL) {
970 * Set the wakeup time to something that can be recognised as
971 * different to an actual time of day:
973 _thread_run->wakeup_time.tv_sec = -1;
974 _thread_run->wakeup_time.tv_nsec = -1;
976 /* Check if no waiting is required: */
977 else if (timeout->tv_sec == 0 && timeout->tv_nsec == 0) {
978 /* Set the wake up time to 'immediately': */
979 _thread_run->wakeup_time.tv_sec = 0;
980 _thread_run->wakeup_time.tv_nsec = 0;
982 /* Get the current time: */
983 gettimeofday(&tv, NULL);
984 TIMEVAL_TO_TIMESPEC(&tv, ¤t_time);
986 /* Calculate the time for the current thread to wake up: */
987 _thread_run->wakeup_time.tv_sec = current_time.tv_sec + timeout->tv_sec;
988 _thread_run->wakeup_time.tv_nsec = current_time.tv_nsec + timeout->tv_nsec;
990 /* Check if the nanosecond field needs to wrap: */
991 if (_thread_run->wakeup_time.tv_nsec >= 1000000000) {
992 /* Wrap the nanosecond field: */
993 _thread_run->wakeup_time.tv_sec += 1;
994 _thread_run->wakeup_time.tv_nsec -= 1000000000;
1001 _thread_kern_sig_defer(void)
1003 /* Allow signal deferral to be recursive. */
1004 _thread_run->sig_defer_count++;
1008 _thread_kern_sig_undefer(void)
1011 int need_resched = 0;
1014 * Perform checks to yield only if we are about to undefer
1017 if (_thread_run->sig_defer_count > 1) {
1018 /* Decrement the signal deferral count. */
1019 _thread_run->sig_defer_count--;
1021 else if (_thread_run->sig_defer_count == 1) {
1022 /* Reenable signals: */
1023 _thread_run->sig_defer_count = 0;
1026 * Check if there are queued signals:
1028 while (_sigq_check_reqd != 0) {
1029 /* Defer scheduling while we process queued signals: */
1030 _thread_run->sig_defer_count = 1;
1032 /* Clear the flag before checking the signal queue: */
1033 _sigq_check_reqd = 0;
1035 /* Dequeue and handle signals: */
1039 * Avoiding an unnecessary check to reschedule, check
1040 * to see if signal handling caused a higher priority
1041 * thread to become ready.
1043 if ((need_resched == 0) &&
1044 (((pthread = PTHREAD_PRIOQ_FIRST()) != NULL) &&
1045 (pthread->active_priority > _thread_run->active_priority))) {
1049 /* Reenable signals: */
1050 _thread_run->sig_defer_count = 0;
1053 /* Yield the CPU if necessary: */
1054 if (need_resched || _thread_run->yield_on_sig_undefer != 0) {
1055 _thread_run->yield_on_sig_undefer = 0;
1056 _thread_kern_sched(NULL);
1062 dequeue_signals(void)
1068 * Enter a loop to read and handle queued signals from the
1069 * pthread kernel pipe:
1071 while (((num = _thread_sys_read(_thread_kern_pipe[0], bufr,
1072 sizeof(bufr))) > 0) || (num == -1 && errno == EINTR)) {
1074 * The buffer read contains one byte per signal and
1075 * each byte is the signal number.
1077 for (i = 0; i < num; i++) {
1078 if ((int) bufr[i] == _SCHED_SIGNAL) {
1080 * Scheduling signals shouldn't ever be
1081 * queued; just ignore it for now.
1085 /* Handle this signal: */
1086 _thread_sig_handle((int) bufr[i], NULL);
1090 if ((num < 0) && (errno != EAGAIN)) {
1092 * The only error we should expect is if there is
1095 PANIC("Unable to read from thread kernel pipe");
1100 thread_run_switch_hook(pthread_t thread_out, pthread_t thread_in)
1102 pthread_t tid_out = thread_out;
1103 pthread_t tid_in = thread_in;
1105 if ((tid_out != NULL) &&
1106 (tid_out->flags & PTHREAD_FLAGS_PRIVATE != 0))
1108 if ((tid_in != NULL) &&
1109 (tid_in->flags & PTHREAD_FLAGS_PRIVATE != 0))
1112 if ((_sched_switch_hook != NULL) && (tid_out != tid_in)) {
1113 /* Run the scheduler switch hook: */
1114 _sched_switch_hook(tid_out, tid_in);