2 * Copyright (c) 1995-1998 John Birrell <jb@cimlogic.com.au>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. Neither the name of the author nor the names of any co-contributors
14 * may be used to endorse or promote products derived from this software
15 * without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 #include "pthread_private.h"
39 #define FDQ_INSERT(q,p) \
41 TAILQ_INSERT_TAIL(q,p,qe); \
42 p->flags |= PTHREAD_FLAGS_IN_FDQ; \
45 #define FDQ_REMOVE(q,p) \
47 if ((p->flags & PTHREAD_FLAGS_IN_FDQ) != 0) { \
48 TAILQ_REMOVE(q,p,qe); \
49 p->flags &= ~PTHREAD_FLAGS_IN_FDQ; \
54 /* Static variables: */
55 static spinlock_t fd_table_lock = _SPINLOCK_INITIALIZER;
58 #ifdef _FDLOCKS_ENABLED
59 static inline pthread_t fd_next_reader(int fd);
60 static inline pthread_t fd_next_writer(int fd);
65 * This function *must* return -1 and set the thread specific errno
66 * as a system call. This is because the error return from this
67 * function is propagated directly back from thread-wrapped system
72 _thread_fd_table_init(int fd)
75 struct fd_table_entry *entry;
78 if (_thread_initial == NULL)
81 /* Check if the file descriptor is out of range: */
82 if (fd < 0 || fd >= _thread_dtablesize) {
83 /* Return a bad file descriptor error: */
89 * Check if memory has already been allocated for this file
92 else if (_thread_fd_table[fd] != NULL) {
93 /* Memory has already been allocated. */
95 /* Allocate memory for the file descriptor table entry: */
96 } else if ((entry = (struct fd_table_entry *)
97 malloc(sizeof(struct fd_table_entry))) == NULL) {
98 /* Return an insufficient memory error: */
102 /* Initialise the file locks: */
103 memset(&entry->lock, 0, sizeof(entry->lock));
104 entry->r_owner = NULL;
105 entry->w_owner = NULL;
106 entry->r_fname = NULL;
107 entry->w_fname = NULL;
110 entry->r_lockcount = 0;
111 entry->w_lockcount = 0;
113 /* Initialise the read/write queues: */
114 TAILQ_INIT(&entry->r_queue);
115 TAILQ_INIT(&entry->w_queue);
117 /* Get the flags for the file: */
118 if (((fd >= 3) || (_pthread_stdio_flags[fd] == -1)) &&
119 (entry->flags = __sys_fcntl(fd, F_GETFL, 0)) == -1) {
123 /* Check if a stdio descriptor: */
124 if ((fd < 3) && (_pthread_stdio_flags[fd] != -1))
126 * Use the stdio flags read by
127 * _pthread_init() to avoid
128 * mistaking the non-blocking
129 * flag that, when set on one
130 * stdio fd, is set on all stdio
133 entry->flags = _pthread_stdio_flags[fd];
136 * Make the file descriptor non-blocking.
137 * This might fail if the device driver does
138 * not support non-blocking calls, or if the
139 * driver is naturally non-blocking.
142 __sys_fcntl(fd, F_SETFL,
143 entry->flags | O_NONBLOCK);
146 /* Lock the file descriptor table: */
147 _SPINLOCK(&fd_table_lock);
150 * Check if another thread allocated the
151 * file descriptor entry while this thread
152 * was doing the same thing. The table wasn't
153 * kept locked during this operation because
154 * it has the potential to recurse.
156 if (_thread_fd_table[fd] == NULL) {
157 /* This thread wins: */
158 _thread_fd_table[fd] = entry;
162 /* Unlock the file descriptor table: */
163 _SPINUNLOCK(&fd_table_lock);
167 * Check if another thread initialised the table entry
168 * before this one could:
172 * Throw away the table entry that this thread
173 * prepared. The other thread wins.
178 /* Return the completion status: */
183 _thread_fd_getflags(int fd)
185 if (_thread_fd_table[fd] != NULL)
186 return (_thread_fd_table[fd]->flags);
192 _thread_fd_setflags(int fd, int flags)
194 if (_thread_fd_table[fd] != NULL)
195 _thread_fd_table[fd]->flags = flags;
198 #ifdef _FDLOCKS_ENABLED
200 _thread_fd_unlock(int fd, int lock_type)
202 struct pthread *curthread = _get_curthread();
206 * Check that the file descriptor table is initialised for this
209 if ((ret = _thread_fd_table_init(fd)) == 0) {
211 * Defer signals to protect the scheduling queues from
212 * access by the signal handler:
214 _thread_kern_sig_defer();
217 * Lock the file descriptor table entry to prevent
218 * other threads for clashing with the current
221 _SPINLOCK(&_thread_fd_table[fd]->lock);
223 /* Check if the running thread owns the read lock: */
224 if (_thread_fd_table[fd]->r_owner == curthread) {
225 /* Check the file descriptor and lock types: */
226 if (lock_type == FD_READ || lock_type == FD_RDWR) {
228 * Decrement the read lock count for the
231 _thread_fd_table[fd]->r_lockcount--;
234 * Check if the running thread still has read
235 * locks on this file descriptor:
237 if (_thread_fd_table[fd]->r_lockcount != 0) {
240 * Get the next thread in the queue for a
241 * read lock on this file descriptor:
243 else if ((_thread_fd_table[fd]->r_owner = fd_next_reader(fd)) == NULL) {
245 /* Remove this thread from the queue: */
246 FDQ_REMOVE(&_thread_fd_table[fd]->r_queue,
247 _thread_fd_table[fd]->r_owner);
250 * Set the state of the new owner of
251 * the thread to running:
253 PTHREAD_NEW_STATE(_thread_fd_table[fd]->r_owner,PS_RUNNING);
256 * Reset the number of read locks.
257 * This will be incremented by the
258 * new owner of the lock when it sees
259 * that it has the lock.
261 _thread_fd_table[fd]->r_lockcount = 0;
265 /* Check if the running thread owns the write lock: */
266 if (_thread_fd_table[fd]->w_owner == curthread) {
267 /* Check the file descriptor and lock types: */
268 if (lock_type == FD_WRITE || lock_type == FD_RDWR) {
270 * Decrement the write lock count for the
273 _thread_fd_table[fd]->w_lockcount--;
276 * Check if the running thread still has
277 * write locks on this file descriptor:
279 if (_thread_fd_table[fd]->w_lockcount != 0) {
282 * Get the next thread in the queue for a
283 * write lock on this file descriptor:
285 else if ((_thread_fd_table[fd]->w_owner = fd_next_writer(fd)) == NULL) {
287 /* Remove this thread from the queue: */
288 FDQ_REMOVE(&_thread_fd_table[fd]->w_queue,
289 _thread_fd_table[fd]->w_owner);
292 * Set the state of the new owner of
293 * the thread to running:
295 PTHREAD_NEW_STATE(_thread_fd_table[fd]->w_owner,PS_RUNNING);
298 * Reset the number of write locks.
299 * This will be incremented by the
300 * new owner of the lock when it
301 * sees that it has the lock.
303 _thread_fd_table[fd]->w_lockcount = 0;
308 /* Unlock the file descriptor table entry: */
309 _SPINUNLOCK(&_thread_fd_table[fd]->lock);
312 * Undefer and handle pending signals, yielding if
315 _thread_kern_sig_undefer();
320 _thread_fd_lock(int fd, int lock_type, struct timespec * timeout)
322 struct pthread *curthread = _get_curthread();
326 * Check that the file descriptor table is initialised for this
329 if ((ret = _thread_fd_table_init(fd)) == 0) {
330 /* Clear the interrupted flag: */
331 curthread->interrupted = 0;
334 * Lock the file descriptor table entry to prevent
335 * other threads for clashing with the current
338 _SPINLOCK(&_thread_fd_table[fd]->lock);
340 /* Check the file descriptor and lock types: */
341 if (lock_type == FD_READ || lock_type == FD_RDWR) {
343 * Wait for the file descriptor to be locked
344 * for read for the current thread:
346 while ((_thread_fd_table[fd]->r_owner != curthread) &&
347 (curthread->interrupted == 0)) {
349 * Check if the file descriptor is locked by
352 if (_thread_fd_table[fd]->r_owner != NULL) {
354 * Another thread has locked the file
355 * descriptor for read, so join the
356 * queue of threads waiting for a
357 * read lock on this file descriptor:
359 FDQ_INSERT(&_thread_fd_table[fd]->r_queue, curthread);
362 * Save the file descriptor details
363 * in the thread structure for the
366 curthread->data.fd.fd = fd;
368 /* Set the timeout: */
369 _thread_kern_set_timeout(timeout);
372 * Unlock the file descriptor
375 _SPINUNLOCK(&_thread_fd_table[fd]->lock);
378 * Schedule this thread to wait on
379 * the read lock. It will only be
380 * woken when it becomes the next in
381 * the queue and is granted access
382 * to the lock by the thread
383 * that is unlocking the file
386 _thread_kern_sched_state(PS_FDLR_WAIT, __FILE__, __LINE__);
389 * Lock the file descriptor
392 _SPINLOCK(&_thread_fd_table[fd]->lock);
394 if (curthread->interrupted != 0) {
395 FDQ_REMOVE(&_thread_fd_table[fd]->r_queue,
400 * The running thread now owns the
401 * read lock on this file descriptor:
403 _thread_fd_table[fd]->r_owner = curthread;
406 * Reset the number of read locks for
407 * this file descriptor:
409 _thread_fd_table[fd]->r_lockcount = 0;
413 if (_thread_fd_table[fd]->r_owner == curthread)
414 /* Increment the read lock count: */
415 _thread_fd_table[fd]->r_lockcount++;
418 /* Check the file descriptor and lock types: */
419 if (curthread->interrupted == 0 &&
420 (lock_type == FD_WRITE || lock_type == FD_RDWR)) {
422 * Wait for the file descriptor to be locked
423 * for write for the current thread:
425 while ((_thread_fd_table[fd]->w_owner != curthread) &&
426 (curthread->interrupted == 0)) {
428 * Check if the file descriptor is locked by
431 if (_thread_fd_table[fd]->w_owner != NULL) {
433 * Another thread has locked the file
434 * descriptor for write, so join the
435 * queue of threads waiting for a
436 * write lock on this file
439 FDQ_INSERT(&_thread_fd_table[fd]->w_queue, curthread);
442 * Save the file descriptor details
443 * in the thread structure for the
446 curthread->data.fd.fd = fd;
448 /* Set the timeout: */
449 _thread_kern_set_timeout(timeout);
452 * Unlock the file descriptor
455 _SPINUNLOCK(&_thread_fd_table[fd]->lock);
458 * Schedule this thread to wait on
459 * the write lock. It will only be
460 * woken when it becomes the next in
461 * the queue and is granted access to
462 * the lock by the thread that is
463 * unlocking the file descriptor.
465 _thread_kern_sched_state(PS_FDLW_WAIT, __FILE__, __LINE__);
468 * Lock the file descriptor
471 _SPINLOCK(&_thread_fd_table[fd]->lock);
473 if (curthread->interrupted != 0) {
474 FDQ_REMOVE(&_thread_fd_table[fd]->w_queue,
479 * The running thread now owns the
480 * write lock on this file
483 _thread_fd_table[fd]->w_owner = curthread;
486 * Reset the number of write locks
487 * for this file descriptor:
489 _thread_fd_table[fd]->w_lockcount = 0;
493 if (_thread_fd_table[fd]->w_owner == curthread)
494 /* Increment the write lock count: */
495 _thread_fd_table[fd]->w_lockcount++;
498 /* Unlock the file descriptor table entry: */
499 _SPINUNLOCK(&_thread_fd_table[fd]->lock);
501 if (curthread->interrupted != 0) {
504 if (curthread->continuation != NULL)
505 curthread->continuation((void *)curthread);
509 /* Return the completion status: */
514 _thread_fd_unlock_debug(int fd, int lock_type, char *fname, int lineno)
516 struct pthread *curthread = _get_curthread();
520 * Check that the file descriptor table is initialised for this
523 if ((ret = _thread_fd_table_init(fd)) == 0) {
525 * Defer signals to protect the scheduling queues from
526 * access by the signal handler:
528 _thread_kern_sig_defer();
531 * Lock the file descriptor table entry to prevent
532 * other threads for clashing with the current
535 _spinlock_debug(&_thread_fd_table[fd]->lock, fname, lineno);
537 /* Check if the running thread owns the read lock: */
538 if (_thread_fd_table[fd]->r_owner == curthread) {
539 /* Check the file descriptor and lock types: */
540 if (lock_type == FD_READ || lock_type == FD_RDWR) {
542 * Decrement the read lock count for the
545 _thread_fd_table[fd]->r_lockcount--;
548 * Check if the running thread still has read
549 * locks on this file descriptor:
551 if (_thread_fd_table[fd]->r_lockcount != 0) {
554 * Get the next thread in the queue for a
555 * read lock on this file descriptor:
557 else if ((_thread_fd_table[fd]->r_owner = fd_next_reader(fd)) == NULL) {
559 /* Remove this thread from the queue: */
560 FDQ_REMOVE(&_thread_fd_table[fd]->r_queue,
561 _thread_fd_table[fd]->r_owner);
564 * Set the state of the new owner of
565 * the thread to running:
567 PTHREAD_NEW_STATE(_thread_fd_table[fd]->r_owner,PS_RUNNING);
570 * Reset the number of read locks.
571 * This will be incremented by the
572 * new owner of the lock when it sees
573 * that it has the lock.
575 _thread_fd_table[fd]->r_lockcount = 0;
579 /* Check if the running thread owns the write lock: */
580 if (_thread_fd_table[fd]->w_owner == curthread) {
581 /* Check the file descriptor and lock types: */
582 if (lock_type == FD_WRITE || lock_type == FD_RDWR) {
584 * Decrement the write lock count for the
587 _thread_fd_table[fd]->w_lockcount--;
590 * Check if the running thread still has
591 * write locks on this file descriptor:
593 if (_thread_fd_table[fd]->w_lockcount != 0) {
596 * Get the next thread in the queue for a
597 * write lock on this file descriptor:
599 else if ((_thread_fd_table[fd]->w_owner = fd_next_writer(fd)) == NULL) {
601 /* Remove this thread from the queue: */
602 FDQ_REMOVE(&_thread_fd_table[fd]->w_queue,
603 _thread_fd_table[fd]->w_owner);
606 * Set the state of the new owner of
607 * the thread to running:
609 PTHREAD_NEW_STATE(_thread_fd_table[fd]->w_owner,PS_RUNNING);
612 * Reset the number of write locks.
613 * This will be incremented by the
614 * new owner of the lock when it
615 * sees that it has the lock.
617 _thread_fd_table[fd]->w_lockcount = 0;
622 /* Unlock the file descriptor table entry: */
623 _SPINUNLOCK(&_thread_fd_table[fd]->lock);
626 * Undefer and handle pending signals, yielding if
629 _thread_kern_sig_undefer();
634 _thread_fd_lock_debug(int fd, int lock_type, struct timespec * timeout,
635 char *fname, int lineno)
637 struct pthread *curthread = _get_curthread();
641 * Check that the file descriptor table is initialised for this
644 if ((ret = _thread_fd_table_init(fd)) == 0) {
645 /* Clear the interrupted flag: */
646 curthread->interrupted = 0;
649 * Lock the file descriptor table entry to prevent
650 * other threads for clashing with the current
653 _spinlock_debug(&_thread_fd_table[fd]->lock, fname, lineno);
655 /* Check the file descriptor and lock types: */
656 if (lock_type == FD_READ || lock_type == FD_RDWR) {
658 * Wait for the file descriptor to be locked
659 * for read for the current thread:
661 while ((_thread_fd_table[fd]->r_owner != curthread) &&
662 (curthread->interrupted == 0)) {
664 * Check if the file descriptor is locked by
667 if (_thread_fd_table[fd]->r_owner != NULL) {
669 * Another thread has locked the file
670 * descriptor for read, so join the
671 * queue of threads waiting for a
672 * read lock on this file descriptor:
674 FDQ_INSERT(&_thread_fd_table[fd]->r_queue, curthread);
677 * Save the file descriptor details
678 * in the thread structure for the
681 curthread->data.fd.fd = fd;
682 curthread->data.fd.branch = lineno;
683 curthread->data.fd.fname = fname;
685 /* Set the timeout: */
686 _thread_kern_set_timeout(timeout);
689 * Unlock the file descriptor
692 _SPINUNLOCK(&_thread_fd_table[fd]->lock);
695 * Schedule this thread to wait on
696 * the read lock. It will only be
697 * woken when it becomes the next in
698 * the queue and is granted access
699 * to the lock by the thread
700 * that is unlocking the file
703 _thread_kern_sched_state(PS_FDLR_WAIT, __FILE__, __LINE__);
706 * Lock the file descriptor
709 _SPINLOCK(&_thread_fd_table[fd]->lock);
711 if (curthread->interrupted != 0) {
712 FDQ_REMOVE(&_thread_fd_table[fd]->r_queue,
717 * The running thread now owns the
718 * read lock on this file descriptor:
720 _thread_fd_table[fd]->r_owner = curthread;
723 * Reset the number of read locks for
724 * this file descriptor:
726 _thread_fd_table[fd]->r_lockcount = 0;
729 * Save the source file details for
732 _thread_fd_table[fd]->r_fname = fname;
733 _thread_fd_table[fd]->r_lineno = lineno;
737 if (_thread_fd_table[fd]->r_owner == curthread)
738 /* Increment the read lock count: */
739 _thread_fd_table[fd]->r_lockcount++;
742 /* Check the file descriptor and lock types: */
743 if (curthread->interrupted == 0 &&
744 (lock_type == FD_WRITE || lock_type == FD_RDWR)) {
746 * Wait for the file descriptor to be locked
747 * for write for the current thread:
749 while ((_thread_fd_table[fd]->w_owner != curthread) &&
750 (curthread->interrupted == 0)) {
752 * Check if the file descriptor is locked by
755 if (_thread_fd_table[fd]->w_owner != NULL) {
757 * Another thread has locked the file
758 * descriptor for write, so join the
759 * queue of threads waiting for a
760 * write lock on this file
763 FDQ_INSERT(&_thread_fd_table[fd]->w_queue, curthread);
766 * Save the file descriptor details
767 * in the thread structure for the
770 curthread->data.fd.fd = fd;
771 curthread->data.fd.branch = lineno;
772 curthread->data.fd.fname = fname;
774 /* Set the timeout: */
775 _thread_kern_set_timeout(timeout);
778 * Unlock the file descriptor
781 _SPINUNLOCK(&_thread_fd_table[fd]->lock);
784 * Schedule this thread to wait on
785 * the write lock. It will only be
786 * woken when it becomes the next in
787 * the queue and is granted access to
788 * the lock by the thread that is
789 * unlocking the file descriptor.
791 _thread_kern_sched_state(PS_FDLW_WAIT, __FILE__, __LINE__);
794 * Lock the file descriptor
797 _SPINLOCK(&_thread_fd_table[fd]->lock);
799 if (curthread->interrupted != 0) {
800 FDQ_REMOVE(&_thread_fd_table[fd]->w_queue,
805 * The running thread now owns the
806 * write lock on this file
809 _thread_fd_table[fd]->w_owner = curthread;
812 * Reset the number of write locks
813 * for this file descriptor:
815 _thread_fd_table[fd]->w_lockcount = 0;
818 * Save the source file details for
821 _thread_fd_table[fd]->w_fname = fname;
822 _thread_fd_table[fd]->w_lineno = lineno;
826 if (_thread_fd_table[fd]->w_owner == curthread)
827 /* Increment the write lock count: */
828 _thread_fd_table[fd]->w_lockcount++;
831 /* Unlock the file descriptor table entry: */
832 _SPINUNLOCK(&_thread_fd_table[fd]->lock);
834 if (curthread->interrupted != 0) {
837 if (curthread->continuation != NULL)
838 curthread->continuation((void *)curthread);
842 /* Return the completion status: */
847 _thread_fd_unlock_owned(pthread_t pthread)
851 for (fd = 0; fd < _thread_dtablesize; fd++) {
852 if ((_thread_fd_table[fd] != NULL) &&
853 ((_thread_fd_table[fd]->r_owner == pthread) ||
854 (_thread_fd_table[fd]->w_owner == pthread))) {
856 * Defer signals to protect the scheduling queues
857 * from access by the signal handler:
859 _thread_kern_sig_defer();
862 * Lock the file descriptor table entry to prevent
863 * other threads for clashing with the current
866 _SPINLOCK(&_thread_fd_table[fd]->lock);
868 /* Check if the thread owns the read lock: */
869 if (_thread_fd_table[fd]->r_owner == pthread) {
870 /* Clear the read lock count: */
871 _thread_fd_table[fd]->r_lockcount = 0;
874 * Get the next thread in the queue for a
875 * read lock on this file descriptor:
877 if ((_thread_fd_table[fd]->r_owner = fd_next_reader(fd)) != NULL) {
878 /* Remove this thread from the queue: */
879 FDQ_REMOVE(&_thread_fd_table[fd]->r_queue,
880 _thread_fd_table[fd]->r_owner);
883 * Set the state of the new owner of
884 * the thread to running:
886 PTHREAD_NEW_STATE(_thread_fd_table[fd]->r_owner,PS_RUNNING);
890 /* Check if the thread owns the write lock: */
891 if (_thread_fd_table[fd]->w_owner == pthread) {
892 /* Clear the write lock count: */
893 _thread_fd_table[fd]->w_lockcount = 0;
896 * Get the next thread in the queue for a
897 * write lock on this file descriptor:
899 if ((_thread_fd_table[fd]->w_owner = fd_next_writer(fd)) != NULL) {
900 /* Remove this thread from the queue: */
901 FDQ_REMOVE(&_thread_fd_table[fd]->w_queue,
902 _thread_fd_table[fd]->w_owner);
905 * Set the state of the new owner of
906 * the thread to running:
908 PTHREAD_NEW_STATE(_thread_fd_table[fd]->w_owner,PS_RUNNING);
913 /* Unlock the file descriptor table entry: */
914 _SPINUNLOCK(&_thread_fd_table[fd]->lock);
917 * Undefer and handle pending signals, yielding if
920 _thread_kern_sig_undefer();
926 _fd_lock_backout(pthread_t pthread)
931 * Defer signals to protect the scheduling queues
932 * from access by the signal handler:
934 _thread_kern_sig_defer();
936 switch (pthread->state) {
939 fd = pthread->data.fd.fd;
942 * Lock the file descriptor table entry to prevent
943 * other threads for clashing with the current
946 _SPINLOCK(&_thread_fd_table[fd]->lock);
948 /* Remove the thread from the waiting queue: */
949 FDQ_REMOVE(&_thread_fd_table[fd]->r_queue, pthread);
953 fd = pthread->data.fd.fd;
956 * Lock the file descriptor table entry to prevent
957 * other threads from clashing with the current
960 _SPINLOCK(&_thread_fd_table[fd]->lock);
962 /* Remove the thread from the waiting queue: */
963 FDQ_REMOVE(&_thread_fd_table[fd]->w_queue, pthread);
971 * Undefer and handle pending signals, yielding if
974 _thread_kern_sig_undefer();
977 static inline pthread_t
978 fd_next_reader(int fd)
982 while (((pthread = TAILQ_FIRST(&_thread_fd_table[fd]->r_queue)) != NULL) &&
983 (pthread->interrupted != 0)) {
985 * This thread has either been interrupted by a signal or
986 * it has been canceled. Remove it from the queue.
988 FDQ_REMOVE(&_thread_fd_table[fd]->r_queue, pthread);
994 static inline pthread_t
995 fd_next_writer(int fd)
999 while (((pthread = TAILQ_FIRST(&_thread_fd_table[fd]->w_queue)) != NULL) &&
1000 (pthread->interrupted != 0)) {
1002 * This thread has either been interrupted by a signal or
1003 * it has been canceled. Remove it from the queue.
1005 FDQ_REMOVE(&_thread_fd_table[fd]->w_queue, pthread);
1014 _thread_fd_unlock(int fd, int lock_type)
1019 _thread_fd_lock(int fd, int lock_type, struct timespec * timeout)
1022 * Insure that the file descriptor table is initialized for this
1025 return (_thread_fd_table_init(fd));
1029 _thread_fd_unlock_debug(int fd, int lock_type, char *fname, int lineno)
1034 _thread_fd_lock_debug(int fd, int lock_type, struct timespec * timeout,
1035 char *fname, int lineno)
1038 * Insure that the file descriptor table is initialized for this
1041 return (_thread_fd_table_init(fd));
1045 _thread_fd_unlock_owned(pthread_t pthread)
1050 _fd_lock_backout(pthread_t pthread)