2 * Copyright (c) 1995-1998 John Birrell <jb@cimlogic.com.au>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by John Birrell.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
42 #include <sys/param.h>
43 #include <sys/types.h>
44 #include <sys/signalvar.h>
47 #include <sys/socket.h>
49 #include <sys/syscall.h>
53 #include "pthread_private.h"
55 /* Static function prototype definitions: */
57 _thread_kern_poll(int wait_reqd);
60 dequeue_signals(void);
63 thread_run_switch_hook(pthread_t thread_out, pthread_t thread_in);
66 _thread_kern_sched(ucontext_t * scp)
71 pthread_t pthread, pthread_h = NULL;
72 struct itimerval itimer;
73 struct timespec ts, ts1;
74 struct timeval tv, tv1;
78 * Flag the pthread kernel as executing scheduler code
79 * to avoid a scheduler signal from interrupting this
80 * execution and calling the scheduler again.
82 _thread_kern_in_sched = 1;
84 /* Check if this function was called from the signal handler: */
87 * Copy the signal context to the current thread's jump
90 memcpy(&_thread_run->saved_sigcontext, scp, sizeof(_thread_run->saved_sigcontext));
93 /* Point to the floating point data in the running thread: */
94 fdata = _thread_run->saved_fp;
96 /* Save the floating point data: */
97 __asm__("fnsave %0": :"m"(*fdata));
100 /* Flag the signal context as the last state saved: */
101 _thread_run->sig_saved = 1;
103 /* Save the state of the current thread: */
104 else if (setjmp(_thread_run->saved_jmp_buf) != 0) {
106 * This point is reached when a longjmp() is called to
107 * restore the state of a thread.
109 * This is the normal way out of the scheduler.
111 _thread_kern_in_sched = 0;
113 if (((_thread_run->cancelflags & PTHREAD_AT_CANCEL_POINT) == 0) &&
114 ((_thread_run->cancelflags & PTHREAD_CANCEL_ASYNCHRONOUS) != 0)) {
116 * Cancellations override signals.
118 * Stick a cancellation point at the start of
119 * each async-cancellable thread's resumption.
121 * We allow threads woken at cancel points to do their
124 pthread_testcancel();
128 * Check for undispatched signals due to calls to
131 if (SIGNOTEMPTY(_thread_run->sigpend))
134 if (_sched_switch_hook != NULL) {
135 /* Run the installed switch hook: */
136 thread_run_switch_hook(_last_user_thread, _thread_run);
141 /* Flag the jump buffer was the last state saved: */
142 _thread_run->sig_saved = 0;
144 /* If the currently running thread is a user thread, save it: */
145 if ((_thread_run->flags & PTHREAD_FLAGS_PRIVATE) == 0)
146 _last_user_thread = _thread_run;
149 * Enter a scheduling loop that finds the next thread that is
150 * ready to run. This loop completes when there are no more threads
151 * in the global list or when a thread has its state restored by
152 * either a sigreturn (if the state was saved as a sigcontext) or a
153 * longjmp (if the state was saved by a setjmp).
155 while (!(TAILQ_EMPTY(&_thread_list))) {
156 /* Get the current time of day: */
157 gettimeofday(&tv, NULL);
158 TIMEVAL_TO_TIMESPEC(&tv, &ts);
161 * Protect the scheduling queues from access by the signal
166 if (_thread_run != &_thread_kern_thread) {
169 * This thread no longer needs to yield the CPU.
171 _thread_run->yield_on_sig_undefer = 0;
174 * Save the current time as the time that the thread
177 _thread_run->last_inactive.tv_sec = tv.tv_sec;
178 _thread_run->last_inactive.tv_usec = tv.tv_usec;
181 * Place the currently running thread into the
182 * appropriate queue(s).
184 switch (_thread_run->state) {
186 case PS_STATE_MAX: /* to silence -Wall */
188 * Dead threads are not placed in any queue:
194 * Runnable threads can't be placed in the
195 * priority queue until after waiting threads
196 * are polled (to preserve round-robin
199 if ((_thread_run->slice_usec != -1) &&
200 (_thread_run->attr.sched_policy != SCHED_FIFO)) {
202 * Accumulate the number of microseconds that
203 * this thread has run for:
205 _thread_run->slice_usec +=
206 (_thread_run->last_inactive.tv_sec -
207 _thread_run->last_active.tv_sec) * 1000000 +
208 _thread_run->last_inactive.tv_usec -
209 _thread_run->last_active.tv_usec;
211 /* Check for time quantum exceeded: */
212 if (_thread_run->slice_usec > TIMESLICE_USEC)
213 _thread_run->slice_usec = -1;
218 * States which do not depend on file descriptor I/O
219 * operations or timeouts:
232 /* No timeouts for these states: */
233 _thread_run->wakeup_time.tv_sec = -1;
234 _thread_run->wakeup_time.tv_nsec = -1;
236 /* Restart the time slice: */
237 _thread_run->slice_usec = -1;
239 /* Insert into the waiting queue: */
240 PTHREAD_WAITQ_INSERT(_thread_run);
243 /* States which can timeout: */
246 /* Restart the time slice: */
247 _thread_run->slice_usec = -1;
249 /* Insert into the waiting queue: */
250 PTHREAD_WAITQ_INSERT(_thread_run);
253 /* States that require periodic work: */
255 /* No timeouts for this state: */
256 _thread_run->wakeup_time.tv_sec = -1;
257 _thread_run->wakeup_time.tv_nsec = -1;
259 /* Increment spinblock count: */
267 /* Restart the time slice: */
268 _thread_run->slice_usec = -1;
270 /* Insert into the waiting queue: */
271 PTHREAD_WAITQ_INSERT(_thread_run);
273 /* Insert into the work queue: */
274 PTHREAD_WORKQ_INSERT(_thread_run);
279 /* Unprotect the scheduling queues: */
283 * Poll file descriptors to update the state of threads
284 * waiting on file I/O where data may be available:
286 _thread_kern_poll(0);
288 /* Protect the scheduling queues: */
292 * Wake up threads that have timedout. This has to be
293 * done after polling in case a thread does a poll or
294 * select with zero time.
296 PTHREAD_WAITQ_SETACTIVE();
297 while (((pthread = TAILQ_FIRST(&_waitingq)) != NULL) &&
298 (pthread->wakeup_time.tv_sec != -1) &&
299 (((pthread->wakeup_time.tv_sec == 0) &&
300 (pthread->wakeup_time.tv_nsec == 0)) ||
301 (pthread->wakeup_time.tv_sec < ts.tv_sec) ||
302 ((pthread->wakeup_time.tv_sec == ts.tv_sec) &&
303 (pthread->wakeup_time.tv_nsec <= ts.tv_nsec)))) {
304 switch (pthread->state) {
307 /* Return zero file descriptors ready: */
308 pthread->data.poll_data->nfds = 0;
312 * Remove this thread from the waiting queue
313 * (and work queue if necessary) and place it
314 * in the ready queue.
316 PTHREAD_WAITQ_CLEARACTIVE();
317 if (pthread->flags & PTHREAD_FLAGS_IN_WORKQ)
318 PTHREAD_WORKQ_REMOVE(pthread);
319 PTHREAD_NEW_STATE(pthread, PS_RUNNING);
320 PTHREAD_WAITQ_SETACTIVE();
324 * Flag the timeout in the thread structure:
326 pthread->timeout = 1;
328 PTHREAD_WAITQ_CLEARACTIVE();
331 * Check if there is a current runnable thread that isn't
332 * already in the ready queue:
334 if ((_thread_run != &_thread_kern_thread) &&
335 (_thread_run->state == PS_RUNNING) &&
336 ((_thread_run->flags & PTHREAD_FLAGS_IN_PRIOQ) == 0)) {
337 if (_thread_run->slice_usec == -1) {
339 * The thread exceeded its time
340 * quantum or it yielded the CPU;
341 * place it at the tail of the
342 * queue for its priority.
344 PTHREAD_PRIOQ_INSERT_TAIL(_thread_run);
347 * The thread hasn't exceeded its
348 * interval. Place it at the head
349 * of the queue for its priority.
351 PTHREAD_PRIOQ_INSERT_HEAD(_thread_run);
356 * Get the highest priority thread in the ready queue.
358 pthread_h = PTHREAD_PRIOQ_FIRST();
360 /* Check if there are no threads ready to run: */
361 if (pthread_h == NULL) {
363 * Lock the pthread kernel by changing the pointer to
364 * the running thread to point to the global kernel
367 _thread_run = &_thread_kern_thread;
369 /* Unprotect the scheduling queues: */
373 * There are no threads ready to run, so wait until
374 * something happens that changes this condition:
376 _thread_kern_poll(1);
378 /* Remove the thread from the ready queue: */
379 PTHREAD_PRIOQ_REMOVE(pthread_h);
381 /* Get first thread on the waiting list: */
382 pthread = TAILQ_FIRST(&_waitingq);
384 /* Check to see if there is more than one thread: */
385 if (pthread_h != TAILQ_FIRST(&_thread_list) ||
386 TAILQ_NEXT(pthread_h, tle) != NULL)
391 /* Unprotect the scheduling queues: */
395 * Check for signals queued while the scheduling
396 * queues were protected:
398 while (_sigq_check_reqd != 0) {
399 /* Clear before handling queued signals: */
400 _sigq_check_reqd = 0;
402 /* Protect the scheduling queues again: */
408 * Check for a higher priority thread that
409 * became runnable due to signal handling.
411 if (((pthread = PTHREAD_PRIOQ_FIRST()) != NULL) &&
412 (pthread->active_priority > pthread_h->active_priority)) {
414 * Insert the lower priority thread
415 * at the head of its priority list:
417 PTHREAD_PRIOQ_INSERT_HEAD(pthread_h);
419 /* Remove the thread from the ready queue: */
420 PTHREAD_PRIOQ_REMOVE(pthread);
422 /* There's a new thread in town: */
426 /* Get first thread on the waiting list: */
427 pthread = TAILQ_FIRST(&_waitingq);
430 * Check to see if there is more than one
433 if (pthread_h != TAILQ_FIRST(&_thread_list) ||
434 TAILQ_NEXT(pthread_h, tle) != NULL)
439 /* Unprotect the scheduling queues: */
443 /* Make the selected thread the current thread: */
444 _thread_run = pthread_h;
447 * Save the current time as the time that the thread
450 _thread_run->last_active.tv_sec = tv.tv_sec;
451 _thread_run->last_active.tv_usec = tv.tv_usec;
454 * Define the maximum time before a scheduling signal
457 itimer.it_value.tv_sec = 0;
458 itimer.it_value.tv_usec = TIMESLICE_USEC;
461 * The interval timer is not reloaded when it
462 * times out. The interval time needs to be
463 * calculated every time.
465 itimer.it_interval.tv_sec = 0;
466 itimer.it_interval.tv_usec = 0;
468 /* Get first thread on the waiting list: */
469 if ((pthread != NULL) &&
470 (pthread->wakeup_time.tv_sec != -1)) {
472 * Calculate the time until this thread
473 * is ready, allowing for the clock
476 ts1.tv_sec = pthread->wakeup_time.tv_sec
478 ts1.tv_nsec = pthread->wakeup_time.tv_nsec
479 - ts.tv_nsec + _clock_res_nsec;
482 * Check for underflow of the nanosecond field:
484 while (ts1.tv_nsec < 0) {
486 * Allow for the underflow of the
490 ts1.tv_nsec += 1000000000;
493 * Check for overflow of the nanosecond field:
495 while (ts1.tv_nsec >= 1000000000) {
497 * Allow for the overflow of the
501 ts1.tv_nsec -= 1000000000;
504 * Convert the timespec structure to a
507 TIMESPEC_TO_TIMEVAL(&tv1, &ts1);
510 * Check if the thread will be ready
511 * sooner than the earliest ones found
514 if (timercmp(&tv1, &itimer.it_value, <)) {
516 * Update the time value:
518 itimer.it_value.tv_sec = tv1.tv_sec;
519 itimer.it_value.tv_usec = tv1.tv_usec;
524 * Check if this thread is running for the first time
525 * or running again after using its full time slice
528 if (_thread_run->slice_usec == -1) {
529 /* Reset the accumulated time slice period: */
530 _thread_run->slice_usec = 0;
533 /* Check if there is more than one thread: */
534 if (set_timer != 0) {
536 * Start the interval timer for the
537 * calculated time interval:
539 if (setitimer(_ITIMER_SCHED_TIMER, &itimer, NULL) != 0) {
541 * Cannot initialise the timer, so
542 * abort this process:
544 PANIC("Cannot set scheduling timer");
549 * Check if this thread is being continued from a
550 * longjmp() out of a signal handler:
552 if ((_thread_run->jmpflags & JMPFLAGS_LONGJMP) != 0) {
553 _thread_run->jmpflags = 0;
554 __longjmp(_thread_run->nested_jmp.jmp,
555 _thread_run->longjmp_val);
558 * Check if this thread is being continued from a
559 * _longjmp() out of a signal handler:
561 else if ((_thread_run->jmpflags & JMPFLAGS__LONGJMP) !=
563 _thread_run->jmpflags = 0;
564 ___longjmp(_thread_run->nested_jmp.jmp,
565 _thread_run->longjmp_val);
568 * Check if this thread is being continued from a
569 * siglongjmp() out of a signal handler:
571 else if ((_thread_run->jmpflags & JMPFLAGS_SIGLONGJMP)
573 _thread_run->jmpflags = 0;
575 _thread_run->nested_jmp.sigjmp,
576 _thread_run->longjmp_val);
578 /* Check if a signal context was saved: */
579 else if (_thread_run->sig_saved == 1) {
582 * Point to the floating point data in the
585 fdata = _thread_run->saved_fp;
587 /* Restore the floating point state: */
588 __asm__("frstor %0": :"m"(*fdata));
591 * Do a sigreturn to restart the thread that
592 * was interrupted by a signal:
594 _thread_kern_in_sched = 0;
597 * If we had a context switch, run any
598 * installed switch hooks.
600 if ((_sched_switch_hook != NULL) &&
601 (_last_user_thread != _thread_run)) {
602 thread_run_switch_hook(_last_user_thread,
605 _thread_sys_sigreturn(&_thread_run->saved_sigcontext);
608 * Do a longjmp to restart the thread that
609 * was context switched out (by a longjmp to
610 * a different thread):
612 __longjmp(_thread_run->saved_jmp_buf, 1);
615 /* This point should not be reached. */
616 PANIC("Thread has returned from sigreturn or longjmp");
620 /* There are no more threads, so exit this process: */
625 _thread_kern_sched_state(enum pthread_state state, char *fname, int lineno)
628 * Flag the pthread kernel as executing scheduler code
629 * to avoid a scheduler signal from interrupting this
630 * execution and calling the scheduler again.
632 _thread_kern_in_sched = 1;
635 * Prevent the signal handler from fiddling with this thread
636 * before its state is set and is placed into the proper queue.
640 /* Change the state of the current thread: */
641 _thread_run->state = state;
642 _thread_run->fname = fname;
643 _thread_run->lineno = lineno;
645 /* Schedule the next thread that is ready: */
646 _thread_kern_sched(NULL);
651 _thread_kern_sched_state_unlock(enum pthread_state state,
652 spinlock_t *lock, char *fname, int lineno)
655 * Flag the pthread kernel as executing scheduler code
656 * to avoid a scheduler signal from interrupting this
657 * execution and calling the scheduler again.
659 _thread_kern_in_sched = 1;
662 * Prevent the signal handler from fiddling with this thread
663 * before its state is set and it is placed into the proper
668 /* Change the state of the current thread: */
669 _thread_run->state = state;
670 _thread_run->fname = fname;
671 _thread_run->lineno = lineno;
675 /* Schedule the next thread that is ready: */
676 _thread_kern_sched(NULL);
681 _thread_kern_poll(int wait_reqd)
685 int kern_pipe_added = 0;
688 struct pthread *pthread;
692 /* Check if the caller wants to wait: */
693 if (wait_reqd == 0) {
697 /* Get the current time of day: */
698 gettimeofday(&tv, NULL);
699 TIMEVAL_TO_TIMESPEC(&tv, &ts);
702 pthread = TAILQ_FIRST(&_waitingq);
705 if ((pthread == NULL) || (pthread->wakeup_time.tv_sec == -1)) {
707 * Either there are no threads in the waiting queue,
708 * or there are no threads that can timeout.
714 * Calculate the time left for the next thread to
715 * timeout allowing for the clock resolution:
717 timeout_ms = ((pthread->wakeup_time.tv_sec - ts.tv_sec) *
718 1000) + ((pthread->wakeup_time.tv_nsec - ts.tv_nsec +
719 _clock_res_nsec) / 1000000);
721 * Don't allow negative timeouts:
728 /* Protect the scheduling queues: */
732 * Check to see if the signal queue needs to be walked to look
733 * for threads awoken by a signal while in the scheduler.
735 if (_sigq_check_reqd != 0) {
736 /* Reset flag before handling queued signals: */
737 _sigq_check_reqd = 0;
743 * Check for a thread that became runnable due to a signal:
745 if (PTHREAD_PRIOQ_FIRST() != NULL) {
747 * Since there is at least one runnable thread,
754 * Form the poll table:
757 if (timeout_ms != 0) {
758 /* Add the kernel pipe to the poll table: */
759 _thread_pfd_table[nfds].fd = _thread_kern_pipe[0];
760 _thread_pfd_table[nfds].events = POLLRDNORM;
761 _thread_pfd_table[nfds].revents = 0;
766 PTHREAD_WAITQ_SETACTIVE();
767 TAILQ_FOREACH(pthread, &_workq, qe) {
768 switch (pthread->state) {
771 * If the lock is available, let the thread run.
773 if (pthread->data.spinlock->access_lock == 0) {
774 PTHREAD_WAITQ_CLEARACTIVE();
775 PTHREAD_WORKQ_REMOVE(pthread);
776 PTHREAD_NEW_STATE(pthread,PS_RUNNING);
777 PTHREAD_WAITQ_SETACTIVE();
778 /* One less thread in a spinblock state: */
781 * Since there is at least one runnable
782 * thread, disable the wait.
788 /* File descriptor read wait: */
790 /* Limit number of polled files to table size: */
791 if (nfds < _thread_dtablesize) {
792 _thread_pfd_table[nfds].events = POLLRDNORM;
793 _thread_pfd_table[nfds].fd = pthread->data.fd.fd;
798 /* File descriptor write wait: */
800 /* Limit number of polled files to table size: */
801 if (nfds < _thread_dtablesize) {
802 _thread_pfd_table[nfds].events = POLLWRNORM;
803 _thread_pfd_table[nfds].fd = pthread->data.fd.fd;
808 /* File descriptor poll or select wait: */
811 /* Limit number of polled files to table size: */
812 if (pthread->data.poll_data->nfds + nfds <
813 _thread_dtablesize) {
814 for (i = 0; i < pthread->data.poll_data->nfds; i++) {
815 _thread_pfd_table[nfds + i].fd =
816 pthread->data.poll_data->fds[i].fd;
817 _thread_pfd_table[nfds + i].events =
818 pthread->data.poll_data->fds[i].events;
820 nfds += pthread->data.poll_data->nfds;
824 /* Other states do not depend on file I/O. */
829 PTHREAD_WAITQ_CLEARACTIVE();
832 * Wait for a file descriptor to be ready for read, write, or
833 * an exception, or a timeout to occur:
835 count = _thread_sys_poll(_thread_pfd_table, nfds, timeout_ms);
837 if (kern_pipe_added != 0)
839 * Remove the pthread kernel pipe file descriptor
840 * from the pollfd table:
847 * Check if it is possible that there are bytes in the kernel
848 * read pipe waiting to be read:
850 if (count < 0 || ((kern_pipe_added != 0) &&
851 (_thread_pfd_table[0].revents & POLLRDNORM))) {
853 * If the kernel read pipe was included in the
857 /* Decrement the count of file descriptors: */
861 if (_sigq_check_reqd != 0) {
862 /* Reset flag before handling signals: */
863 _sigq_check_reqd = 0;
870 * Check if any file descriptors are ready:
874 * Enter a loop to look for threads waiting on file
875 * descriptors that are flagged as available by the
878 PTHREAD_WAITQ_SETACTIVE();
879 TAILQ_FOREACH(pthread, &_workq, qe) {
880 switch (pthread->state) {
883 * If the lock is available, let the thread run.
885 if (pthread->data.spinlock->access_lock == 0) {
886 PTHREAD_WAITQ_CLEARACTIVE();
887 PTHREAD_WORKQ_REMOVE(pthread);
888 PTHREAD_NEW_STATE(pthread,PS_RUNNING);
889 PTHREAD_WAITQ_SETACTIVE();
892 * One less thread in a spinblock state:
898 /* File descriptor read wait: */
900 if ((nfds < _thread_dtablesize) &&
901 (_thread_pfd_table[nfds].revents & POLLRDNORM)) {
902 PTHREAD_WAITQ_CLEARACTIVE();
903 PTHREAD_WORKQ_REMOVE(pthread);
904 PTHREAD_NEW_STATE(pthread,PS_RUNNING);
905 PTHREAD_WAITQ_SETACTIVE();
910 /* File descriptor write wait: */
912 if ((nfds < _thread_dtablesize) &&
913 (_thread_pfd_table[nfds].revents & POLLWRNORM)) {
914 PTHREAD_WAITQ_CLEARACTIVE();
915 PTHREAD_WORKQ_REMOVE(pthread);
916 PTHREAD_NEW_STATE(pthread,PS_RUNNING);
917 PTHREAD_WAITQ_SETACTIVE();
922 /* File descriptor poll or select wait: */
925 if (pthread->data.poll_data->nfds + nfds <
926 _thread_dtablesize) {
928 * Enter a loop looking for I/O
932 for (i = 0; i < pthread->data.poll_data->nfds; i++) {
933 if (_thread_pfd_table[nfds + i].revents != 0) {
934 pthread->data.poll_data->fds[i].revents =
935 _thread_pfd_table[nfds + i].revents;
940 /* Increment before destroying: */
941 nfds += pthread->data.poll_data->nfds;
944 pthread->data.poll_data->nfds = found;
945 PTHREAD_WAITQ_CLEARACTIVE();
946 PTHREAD_WORKQ_REMOVE(pthread);
947 PTHREAD_NEW_STATE(pthread,PS_RUNNING);
948 PTHREAD_WAITQ_SETACTIVE();
952 nfds += pthread->data.poll_data->nfds;
955 /* Other states do not depend on file I/O. */
960 PTHREAD_WAITQ_CLEARACTIVE();
962 else if (_spinblock_count != 0) {
964 * Enter a loop to look for threads waiting on a spinlock
965 * that is now available.
967 PTHREAD_WAITQ_SETACTIVE();
968 TAILQ_FOREACH(pthread, &_workq, qe) {
969 if (pthread->state == PS_SPINBLOCK) {
971 * If the lock is available, let the thread run.
973 if (pthread->data.spinlock->access_lock == 0) {
974 PTHREAD_WAITQ_CLEARACTIVE();
975 PTHREAD_WORKQ_REMOVE(pthread);
976 PTHREAD_NEW_STATE(pthread,PS_RUNNING);
977 PTHREAD_WAITQ_SETACTIVE();
980 * One less thread in a spinblock state:
986 PTHREAD_WAITQ_CLEARACTIVE();
989 /* Unprotect the scheduling queues: */
992 while (_sigq_check_reqd != 0) {
993 /* Handle queued signals: */
994 _sigq_check_reqd = 0;
996 /* Protect the scheduling queues: */
1001 /* Unprotect the scheduling queues: */
1005 /* Nothing to return. */
1010 _thread_kern_set_timeout(struct timespec * timeout)
1012 struct timespec current_time;
1015 /* Reset the timeout flag for the running thread: */
1016 _thread_run->timeout = 0;
1018 /* Check if the thread is to wait forever: */
1019 if (timeout == NULL) {
1021 * Set the wakeup time to something that can be recognised as
1022 * different to an actual time of day:
1024 _thread_run->wakeup_time.tv_sec = -1;
1025 _thread_run->wakeup_time.tv_nsec = -1;
1027 /* Check if no waiting is required: */
1028 else if (timeout->tv_sec == 0 && timeout->tv_nsec == 0) {
1029 /* Set the wake up time to 'immediately': */
1030 _thread_run->wakeup_time.tv_sec = 0;
1031 _thread_run->wakeup_time.tv_nsec = 0;
1033 /* Get the current time: */
1034 gettimeofday(&tv, NULL);
1035 TIMEVAL_TO_TIMESPEC(&tv, ¤t_time);
1037 /* Calculate the time for the current thread to wake up: */
1038 _thread_run->wakeup_time.tv_sec = current_time.tv_sec + timeout->tv_sec;
1039 _thread_run->wakeup_time.tv_nsec = current_time.tv_nsec + timeout->tv_nsec;
1041 /* Check if the nanosecond field needs to wrap: */
1042 if (_thread_run->wakeup_time.tv_nsec >= 1000000000) {
1043 /* Wrap the nanosecond field: */
1044 _thread_run->wakeup_time.tv_sec += 1;
1045 _thread_run->wakeup_time.tv_nsec -= 1000000000;
1052 _thread_kern_sig_defer(void)
1054 /* Allow signal deferral to be recursive. */
1055 _thread_run->sig_defer_count++;
1059 _thread_kern_sig_undefer(void)
1062 int need_resched = 0;
1065 * Perform checks to yield only if we are about to undefer
1068 if (_thread_run->sig_defer_count > 1) {
1069 /* Decrement the signal deferral count. */
1070 _thread_run->sig_defer_count--;
1072 else if (_thread_run->sig_defer_count == 1) {
1073 /* Reenable signals: */
1074 _thread_run->sig_defer_count = 0;
1077 * Check if there are queued signals:
1079 while (_sigq_check_reqd != 0) {
1080 /* Defer scheduling while we process queued signals: */
1081 _thread_run->sig_defer_count = 1;
1083 /* Clear the flag before checking the signal queue: */
1084 _sigq_check_reqd = 0;
1086 /* Dequeue and handle signals: */
1090 * Avoiding an unnecessary check to reschedule, check
1091 * to see if signal handling caused a higher priority
1092 * thread to become ready.
1094 if ((need_resched == 0) &&
1095 (((pthread = PTHREAD_PRIOQ_FIRST()) != NULL) &&
1096 (pthread->active_priority > _thread_run->active_priority))) {
1100 /* Reenable signals: */
1101 _thread_run->sig_defer_count = 0;
1104 /* Yield the CPU if necessary: */
1105 if (need_resched || _thread_run->yield_on_sig_undefer != 0) {
1106 _thread_run->yield_on_sig_undefer = 0;
1107 _thread_kern_sched(NULL);
1113 dequeue_signals(void)
1120 * Enter a loop to read and handle queued signals from the
1121 * pthread kernel pipe:
1123 while (((num = _thread_sys_read(_thread_kern_pipe[0], bufr,
1124 sizeof(bufr))) > 0) || (num == -1 && errno == EINTR)) {
1126 * The buffer read contains one byte per signal and
1127 * each byte is the signal number.
1129 for (i = 0; i < num; i++) {
1130 if ((int) bufr[i] == _SCHED_SIGNAL) {
1132 * Scheduling signals shouldn't ever be
1133 * queued; just ignore it for now.
1137 /* Handle this signal: */
1138 pthread = _thread_sig_handle((int) bufr[i],
1140 if (pthread != NULL)
1141 _thread_sig_deliver(pthread,
1146 if ((num < 0) && (errno != EAGAIN)) {
1148 * The only error we should expect is if there is
1151 PANIC("Unable to read from thread kernel pipe");
1156 thread_run_switch_hook(pthread_t thread_out, pthread_t thread_in)
1158 pthread_t tid_out = thread_out;
1159 pthread_t tid_in = thread_in;
1161 if ((tid_out != NULL) &&
1162 (tid_out->flags & PTHREAD_FLAGS_PRIVATE) != 0)
1164 if ((tid_in != NULL) &&
1165 (tid_in->flags & PTHREAD_FLAGS_PRIVATE) != 0)
1168 if ((_sched_switch_hook != NULL) && (tid_out != tid_in)) {
1169 /* Run the scheduler switch hook: */
1170 _sched_switch_hook(tid_out, tid_in);