]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/kern/sys_pipe.c
Most users of pipe(2) do not call fstat(2) on the returned pipe descriptors.
[FreeBSD/FreeBSD.git] / sys / kern / sys_pipe.c
1 /*-
2  * Copyright (c) 1996 John S. Dyson
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice immediately at the beginning of the file, without modification,
10  *    this list of conditions, and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. Absolutely no warranty of function or purpose is made by the author
15  *    John S. Dyson.
16  * 4. Modifications may be freely made to this file if the above conditions
17  *    are met.
18  */
19
20 /*
21  * This file contains a high-performance replacement for the socket-based
22  * pipes scheme originally used in FreeBSD/4.4Lite.  It does not support
23  * all features of sockets, but does do everything that pipes normally
24  * do.
25  */
26
27 /*
28  * This code has two modes of operation, a small write mode and a large
29  * write mode.  The small write mode acts like conventional pipes with
30  * a kernel buffer.  If the buffer is less than PIPE_MINDIRECT, then the
31  * "normal" pipe buffering is done.  If the buffer is between PIPE_MINDIRECT
32  * and PIPE_SIZE in size, the sending process pins the underlying pages in
33  * memory, and the receiving process copies directly from these pinned pages
34  * in the sending process.
35  *
36  * If the sending process receives a signal, it is possible that it will
37  * go away, and certainly its address space can change, because control
38  * is returned back to the user-mode side.  In that case, the pipe code
39  * arranges to copy the buffer supplied by the user process, to a pageable
40  * kernel buffer, and the receiving process will grab the data from the
41  * pageable kernel buffer.  Since signals don't happen all that often,
42  * the copy operation is normally eliminated.
43  *
44  * The constant PIPE_MINDIRECT is chosen to make sure that buffering will
45  * happen for small transfers so that the system will not spend all of
46  * its time context switching.
47  *
48  * In order to limit the resource use of pipes, two sysctls exist:
49  *
50  * kern.ipc.maxpipekva - This is a hard limit on the amount of pageable
51  * address space available to us in pipe_map. This value is normally
52  * autotuned, but may also be loader tuned.
53  *
54  * kern.ipc.pipekva - This read-only sysctl tracks the current amount of
55  * memory in use by pipes.
56  *
57  * Based on how large pipekva is relative to maxpipekva, the following
58  * will happen:
59  *
60  * 0% - 50%:
61  *     New pipes are given 16K of memory backing, pipes may dynamically
62  *     grow to as large as 64K where needed.
63  * 50% - 75%:
64  *     New pipes are given 4K (or PAGE_SIZE) of memory backing,
65  *     existing pipes may NOT grow.
66  * 75% - 100%:
67  *     New pipes are given 4K (or PAGE_SIZE) of memory backing,
68  *     existing pipes will be shrunk down to 4K whenever possible.
69  *
70  * Resizing may be disabled by setting kern.ipc.piperesizeallowed=0.  If
71  * that is set,  the only resize that will occur is the 0 -> SMALL_PIPE_SIZE
72  * resize which MUST occur for reverse-direction pipes when they are
73  * first used.
74  *
75  * Additional information about the current state of pipes may be obtained
76  * from kern.ipc.pipes, kern.ipc.pipefragretry, kern.ipc.pipeallocfail,
77  * and kern.ipc.piperesizefail.
78  *
79  * Locking rules:  There are two locks present here:  A mutex, used via
80  * PIPE_LOCK, and a flag, used via pipelock().  All locking is done via
81  * the flag, as mutexes can not persist over uiomove.  The mutex
82  * exists only to guard access to the flag, and is not in itself a
83  * locking mechanism.  Also note that there is only a single mutex for
84  * both directions of a pipe.
85  *
86  * As pipelock() may have to sleep before it can acquire the flag, it
87  * is important to reread all data after a call to pipelock(); everything
88  * in the structure may have changed.
89  */
90
91 #include <sys/cdefs.h>
92 __FBSDID("$FreeBSD$");
93
94 #include <sys/param.h>
95 #include <sys/systm.h>
96 #include <sys/conf.h>
97 #include <sys/fcntl.h>
98 #include <sys/file.h>
99 #include <sys/filedesc.h>
100 #include <sys/filio.h>
101 #include <sys/kernel.h>
102 #include <sys/lock.h>
103 #include <sys/mutex.h>
104 #include <sys/ttycom.h>
105 #include <sys/stat.h>
106 #include <sys/malloc.h>
107 #include <sys/poll.h>
108 #include <sys/selinfo.h>
109 #include <sys/signalvar.h>
110 #include <sys/syscallsubr.h>
111 #include <sys/sysctl.h>
112 #include <sys/sysproto.h>
113 #include <sys/pipe.h>
114 #include <sys/proc.h>
115 #include <sys/vnode.h>
116 #include <sys/uio.h>
117 #include <sys/event.h>
118
119 #include <security/mac/mac_framework.h>
120
121 #include <vm/vm.h>
122 #include <vm/vm_param.h>
123 #include <vm/vm_object.h>
124 #include <vm/vm_kern.h>
125 #include <vm/vm_extern.h>
126 #include <vm/pmap.h>
127 #include <vm/vm_map.h>
128 #include <vm/vm_page.h>
129 #include <vm/uma.h>
130
131 /*
132  * Use this define if you want to disable *fancy* VM things.  Expect an
133  * approx 30% decrease in transfer rate.  This could be useful for
134  * NetBSD or OpenBSD.
135  */
136 /* #define PIPE_NODIRECT */
137
138 /*
139  * interfaces to the outside world
140  */
141 static fo_rdwr_t        pipe_read;
142 static fo_rdwr_t        pipe_write;
143 static fo_truncate_t    pipe_truncate;
144 static fo_ioctl_t       pipe_ioctl;
145 static fo_poll_t        pipe_poll;
146 static fo_kqfilter_t    pipe_kqfilter;
147 static fo_stat_t        pipe_stat;
148 static fo_close_t       pipe_close;
149
150 static struct fileops pipeops = {
151         .fo_read = pipe_read,
152         .fo_write = pipe_write,
153         .fo_truncate = pipe_truncate,
154         .fo_ioctl = pipe_ioctl,
155         .fo_poll = pipe_poll,
156         .fo_kqfilter = pipe_kqfilter,
157         .fo_stat = pipe_stat,
158         .fo_close = pipe_close,
159         .fo_chmod = invfo_chmod,
160         .fo_chown = invfo_chown,
161         .fo_flags = DFLAG_PASSABLE
162 };
163
164 static void     filt_pipedetach(struct knote *kn);
165 static int      filt_piperead(struct knote *kn, long hint);
166 static int      filt_pipewrite(struct knote *kn, long hint);
167
168 static struct filterops pipe_rfiltops = {
169         .f_isfd = 1,
170         .f_detach = filt_pipedetach,
171         .f_event = filt_piperead
172 };
173 static struct filterops pipe_wfiltops = {
174         .f_isfd = 1,
175         .f_detach = filt_pipedetach,
176         .f_event = filt_pipewrite
177 };
178
179 /*
180  * Default pipe buffer size(s), this can be kind-of large now because pipe
181  * space is pageable.  The pipe code will try to maintain locality of
182  * reference for performance reasons, so small amounts of outstanding I/O
183  * will not wipe the cache.
184  */
185 #define MINPIPESIZE (PIPE_SIZE/3)
186 #define MAXPIPESIZE (2*PIPE_SIZE/3)
187
188 static long amountpipekva;
189 static int pipefragretry;
190 static int pipeallocfail;
191 static int piperesizefail;
192 static int piperesizeallowed = 1;
193
194 SYSCTL_LONG(_kern_ipc, OID_AUTO, maxpipekva, CTLFLAG_RDTUN,
195            &maxpipekva, 0, "Pipe KVA limit");
196 SYSCTL_LONG(_kern_ipc, OID_AUTO, pipekva, CTLFLAG_RD,
197            &amountpipekva, 0, "Pipe KVA usage");
198 SYSCTL_INT(_kern_ipc, OID_AUTO, pipefragretry, CTLFLAG_RD,
199           &pipefragretry, 0, "Pipe allocation retries due to fragmentation");
200 SYSCTL_INT(_kern_ipc, OID_AUTO, pipeallocfail, CTLFLAG_RD,
201           &pipeallocfail, 0, "Pipe allocation failures");
202 SYSCTL_INT(_kern_ipc, OID_AUTO, piperesizefail, CTLFLAG_RD,
203           &piperesizefail, 0, "Pipe resize failures");
204 SYSCTL_INT(_kern_ipc, OID_AUTO, piperesizeallowed, CTLFLAG_RW,
205           &piperesizeallowed, 0, "Pipe resizing allowed");
206
207 static void pipeinit(void *dummy __unused);
208 static void pipeclose(struct pipe *cpipe);
209 static void pipe_free_kmem(struct pipe *cpipe);
210 static int pipe_create(struct pipe *pipe, int backing);
211 static __inline int pipelock(struct pipe *cpipe, int catch);
212 static __inline void pipeunlock(struct pipe *cpipe);
213 static __inline void pipeselwakeup(struct pipe *cpipe);
214 #ifndef PIPE_NODIRECT
215 static int pipe_build_write_buffer(struct pipe *wpipe, struct uio *uio);
216 static void pipe_destroy_write_buffer(struct pipe *wpipe);
217 static int pipe_direct_write(struct pipe *wpipe, struct uio *uio);
218 static void pipe_clone_write_buffer(struct pipe *wpipe);
219 #endif
220 static int pipespace(struct pipe *cpipe, int size);
221 static int pipespace_new(struct pipe *cpipe, int size);
222
223 static int      pipe_zone_ctor(void *mem, int size, void *arg, int flags);
224 static int      pipe_zone_init(void *mem, int size, int flags);
225 static void     pipe_zone_fini(void *mem, int size);
226
227 static uma_zone_t pipe_zone;
228 static struct unrhdr *pipeino_unr;
229 static dev_t pipedev_ino;
230
231 SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_ANY, pipeinit, NULL);
232
233 static void
234 pipeinit(void *dummy __unused)
235 {
236
237         pipe_zone = uma_zcreate("pipe", sizeof(struct pipepair),
238             pipe_zone_ctor, NULL, pipe_zone_init, pipe_zone_fini,
239             UMA_ALIGN_PTR, 0);
240         KASSERT(pipe_zone != NULL, ("pipe_zone not initialized"));
241         pipeino_unr = new_unrhdr(1, INT32_MAX, NULL);
242         KASSERT(pipeino_unr != NULL, ("pipe fake inodes not initialized"));
243         pipedev_ino = devfs_alloc_cdp_inode();
244         KASSERT(pipedev_ino > 0, ("pipe dev inode not initialized"));
245 }
246
247 static int
248 pipe_zone_ctor(void *mem, int size, void *arg, int flags)
249 {
250         struct pipepair *pp;
251         struct pipe *rpipe, *wpipe;
252
253         KASSERT(size == sizeof(*pp), ("pipe_zone_ctor: wrong size"));
254
255         pp = (struct pipepair *)mem;
256
257         /*
258          * We zero both pipe endpoints to make sure all the kmem pointers
259          * are NULL, flag fields are zero'd, etc.  We timestamp both
260          * endpoints with the same time.
261          */
262         rpipe = &pp->pp_rpipe;
263         bzero(rpipe, sizeof(*rpipe));
264         vfs_timestamp(&rpipe->pipe_ctime);
265         rpipe->pipe_atime = rpipe->pipe_mtime = rpipe->pipe_ctime;
266
267         wpipe = &pp->pp_wpipe;
268         bzero(wpipe, sizeof(*wpipe));
269         wpipe->pipe_ctime = rpipe->pipe_ctime;
270         wpipe->pipe_atime = wpipe->pipe_mtime = rpipe->pipe_ctime;
271
272         rpipe->pipe_peer = wpipe;
273         rpipe->pipe_pair = pp;
274         wpipe->pipe_peer = rpipe;
275         wpipe->pipe_pair = pp;
276
277         /*
278          * Mark both endpoints as present; they will later get free'd
279          * one at a time.  When both are free'd, then the whole pair
280          * is released.
281          */
282         rpipe->pipe_present = PIPE_ACTIVE;
283         wpipe->pipe_present = PIPE_ACTIVE;
284
285         /*
286          * Eventually, the MAC Framework may initialize the label
287          * in ctor or init, but for now we do it elswhere to avoid
288          * blocking in ctor or init.
289          */
290         pp->pp_label = NULL;
291
292         return (0);
293 }
294
295 static int
296 pipe_zone_init(void *mem, int size, int flags)
297 {
298         struct pipepair *pp;
299
300         KASSERT(size == sizeof(*pp), ("pipe_zone_init: wrong size"));
301
302         pp = (struct pipepair *)mem;
303
304         mtx_init(&pp->pp_mtx, "pipe mutex", NULL, MTX_DEF | MTX_RECURSE);
305         return (0);
306 }
307
308 static void
309 pipe_zone_fini(void *mem, int size)
310 {
311         struct pipepair *pp;
312
313         KASSERT(size == sizeof(*pp), ("pipe_zone_fini: wrong size"));
314
315         pp = (struct pipepair *)mem;
316
317         mtx_destroy(&pp->pp_mtx);
318 }
319
320 /*
321  * The pipe system call for the DTYPE_PIPE type of pipes.  If we fail, let
322  * the zone pick up the pieces via pipeclose().
323  */
324 int
325 kern_pipe(struct thread *td, int fildes[2])
326 {
327         struct filedesc *fdp = td->td_proc->p_fd;
328         struct file *rf, *wf;
329         struct pipepair *pp;
330         struct pipe *rpipe, *wpipe;
331         int fd, error;
332
333         pp = uma_zalloc(pipe_zone, M_WAITOK);
334 #ifdef MAC
335         /*
336          * The MAC label is shared between the connected endpoints.  As a
337          * result mac_pipe_init() and mac_pipe_create() are called once
338          * for the pair, and not on the endpoints.
339          */
340         mac_pipe_init(pp);
341         mac_pipe_create(td->td_ucred, pp);
342 #endif
343         rpipe = &pp->pp_rpipe;
344         wpipe = &pp->pp_wpipe;
345
346         knlist_init_mtx(&rpipe->pipe_sel.si_note, PIPE_MTX(rpipe));
347         knlist_init_mtx(&wpipe->pipe_sel.si_note, PIPE_MTX(wpipe));
348
349         /* Only the forward direction pipe is backed by default */
350         if ((error = pipe_create(rpipe, 1)) != 0 ||
351             (error = pipe_create(wpipe, 0)) != 0) {
352                 pipeclose(rpipe);
353                 pipeclose(wpipe);
354                 return (error);
355         }
356
357         rpipe->pipe_state |= PIPE_DIRECTOK;
358         wpipe->pipe_state |= PIPE_DIRECTOK;
359
360         error = falloc(td, &rf, &fd, 0);
361         if (error) {
362                 pipeclose(rpipe);
363                 pipeclose(wpipe);
364                 return (error);
365         }
366         /* An extra reference on `rf' has been held for us by falloc(). */
367         fildes[0] = fd;
368
369         /*
370          * Warning: once we've gotten past allocation of the fd for the
371          * read-side, we can only drop the read side via fdrop() in order
372          * to avoid races against processes which manage to dup() the read
373          * side while we are blocked trying to allocate the write side.
374          */
375         finit(rf, FREAD | FWRITE, DTYPE_PIPE, rpipe, &pipeops);
376         error = falloc(td, &wf, &fd, 0);
377         if (error) {
378                 fdclose(fdp, rf, fildes[0], td);
379                 fdrop(rf, td);
380                 /* rpipe has been closed by fdrop(). */
381                 pipeclose(wpipe);
382                 return (error);
383         }
384         /* An extra reference on `wf' has been held for us by falloc(). */
385         finit(wf, FREAD | FWRITE, DTYPE_PIPE, wpipe, &pipeops);
386         fdrop(wf, td);
387         fildes[1] = fd;
388         fdrop(rf, td);
389
390         return (0);
391 }
392
393 /* ARGSUSED */
394 int
395 sys_pipe(struct thread *td, struct pipe_args *uap)
396 {
397         int error;
398         int fildes[2];
399
400         error = kern_pipe(td, fildes);
401         if (error)
402                 return (error);
403         
404         td->td_retval[0] = fildes[0];
405         td->td_retval[1] = fildes[1];
406
407         return (0);
408 }
409
410 /*
411  * Allocate kva for pipe circular buffer, the space is pageable
412  * This routine will 'realloc' the size of a pipe safely, if it fails
413  * it will retain the old buffer.
414  * If it fails it will return ENOMEM.
415  */
416 static int
417 pipespace_new(cpipe, size)
418         struct pipe *cpipe;
419         int size;
420 {
421         caddr_t buffer;
422         int error, cnt, firstseg;
423         static int curfail = 0;
424         static struct timeval lastfail;
425
426         KASSERT(!mtx_owned(PIPE_MTX(cpipe)), ("pipespace: pipe mutex locked"));
427         KASSERT(!(cpipe->pipe_state & PIPE_DIRECTW),
428                 ("pipespace: resize of direct writes not allowed"));
429 retry:
430         cnt = cpipe->pipe_buffer.cnt;
431         if (cnt > size)
432                 size = cnt;
433
434         size = round_page(size);
435         buffer = (caddr_t) vm_map_min(pipe_map);
436
437         error = vm_map_find(pipe_map, NULL, 0,
438                 (vm_offset_t *) &buffer, size, 1,
439                 VM_PROT_ALL, VM_PROT_ALL, 0);
440         if (error != KERN_SUCCESS) {
441                 if ((cpipe->pipe_buffer.buffer == NULL) &&
442                         (size > SMALL_PIPE_SIZE)) {
443                         size = SMALL_PIPE_SIZE;
444                         pipefragretry++;
445                         goto retry;
446                 }
447                 if (cpipe->pipe_buffer.buffer == NULL) {
448                         pipeallocfail++;
449                         if (ppsratecheck(&lastfail, &curfail, 1))
450                                 printf("kern.ipc.maxpipekva exceeded; see tuning(7)\n");
451                 } else {
452                         piperesizefail++;
453                 }
454                 return (ENOMEM);
455         }
456
457         /* copy data, then free old resources if we're resizing */
458         if (cnt > 0) {
459                 if (cpipe->pipe_buffer.in <= cpipe->pipe_buffer.out) {
460                         firstseg = cpipe->pipe_buffer.size - cpipe->pipe_buffer.out;
461                         bcopy(&cpipe->pipe_buffer.buffer[cpipe->pipe_buffer.out],
462                                 buffer, firstseg);
463                         if ((cnt - firstseg) > 0)
464                                 bcopy(cpipe->pipe_buffer.buffer, &buffer[firstseg],
465                                         cpipe->pipe_buffer.in);
466                 } else {
467                         bcopy(&cpipe->pipe_buffer.buffer[cpipe->pipe_buffer.out],
468                                 buffer, cnt);
469                 }
470         }
471         pipe_free_kmem(cpipe);
472         cpipe->pipe_buffer.buffer = buffer;
473         cpipe->pipe_buffer.size = size;
474         cpipe->pipe_buffer.in = cnt;
475         cpipe->pipe_buffer.out = 0;
476         cpipe->pipe_buffer.cnt = cnt;
477         atomic_add_long(&amountpipekva, cpipe->pipe_buffer.size);
478         return (0);
479 }
480
481 /*
482  * Wrapper for pipespace_new() that performs locking assertions.
483  */
484 static int
485 pipespace(cpipe, size)
486         struct pipe *cpipe;
487         int size;
488 {
489
490         KASSERT(cpipe->pipe_state & PIPE_LOCKFL,
491                 ("Unlocked pipe passed to pipespace"));
492         return (pipespace_new(cpipe, size));
493 }
494
495 /*
496  * lock a pipe for I/O, blocking other access
497  */
498 static __inline int
499 pipelock(cpipe, catch)
500         struct pipe *cpipe;
501         int catch;
502 {
503         int error;
504
505         PIPE_LOCK_ASSERT(cpipe, MA_OWNED);
506         while (cpipe->pipe_state & PIPE_LOCKFL) {
507                 cpipe->pipe_state |= PIPE_LWANT;
508                 error = msleep(cpipe, PIPE_MTX(cpipe),
509                     catch ? (PRIBIO | PCATCH) : PRIBIO,
510                     "pipelk", 0);
511                 if (error != 0)
512                         return (error);
513         }
514         cpipe->pipe_state |= PIPE_LOCKFL;
515         return (0);
516 }
517
518 /*
519  * unlock a pipe I/O lock
520  */
521 static __inline void
522 pipeunlock(cpipe)
523         struct pipe *cpipe;
524 {
525
526         PIPE_LOCK_ASSERT(cpipe, MA_OWNED);
527         KASSERT(cpipe->pipe_state & PIPE_LOCKFL,
528                 ("Unlocked pipe passed to pipeunlock"));
529         cpipe->pipe_state &= ~PIPE_LOCKFL;
530         if (cpipe->pipe_state & PIPE_LWANT) {
531                 cpipe->pipe_state &= ~PIPE_LWANT;
532                 wakeup(cpipe);
533         }
534 }
535
536 static __inline void
537 pipeselwakeup(cpipe)
538         struct pipe *cpipe;
539 {
540
541         PIPE_LOCK_ASSERT(cpipe, MA_OWNED);
542         if (cpipe->pipe_state & PIPE_SEL) {
543                 selwakeuppri(&cpipe->pipe_sel, PSOCK);
544                 if (!SEL_WAITING(&cpipe->pipe_sel))
545                         cpipe->pipe_state &= ~PIPE_SEL;
546         }
547         if ((cpipe->pipe_state & PIPE_ASYNC) && cpipe->pipe_sigio)
548                 pgsigio(&cpipe->pipe_sigio, SIGIO, 0);
549         KNOTE_LOCKED(&cpipe->pipe_sel.si_note, 0);
550 }
551
552 /*
553  * Initialize and allocate VM and memory for pipe.  The structure
554  * will start out zero'd from the ctor, so we just manage the kmem.
555  */
556 static int
557 pipe_create(pipe, backing)
558         struct pipe *pipe;
559         int backing;
560 {
561         int error;
562
563         if (backing) {
564                 if (amountpipekva > maxpipekva / 2)
565                         error = pipespace_new(pipe, SMALL_PIPE_SIZE);
566                 else
567                         error = pipespace_new(pipe, PIPE_SIZE);
568         } else {
569                 /* If we're not backing this pipe, no need to do anything. */
570                 error = 0;
571         }
572         pipe->pipe_ino = -1;
573         return (error);
574 }
575
576 /* ARGSUSED */
577 static int
578 pipe_read(fp, uio, active_cred, flags, td)
579         struct file *fp;
580         struct uio *uio;
581         struct ucred *active_cred;
582         struct thread *td;
583         int flags;
584 {
585         struct pipe *rpipe = fp->f_data;
586         int error;
587         int nread = 0;
588         u_int size;
589
590         PIPE_LOCK(rpipe);
591         ++rpipe->pipe_busy;
592         error = pipelock(rpipe, 1);
593         if (error)
594                 goto unlocked_error;
595
596 #ifdef MAC
597         error = mac_pipe_check_read(active_cred, rpipe->pipe_pair);
598         if (error)
599                 goto locked_error;
600 #endif
601         if (amountpipekva > (3 * maxpipekva) / 4) {
602                 if (!(rpipe->pipe_state & PIPE_DIRECTW) &&
603                         (rpipe->pipe_buffer.size > SMALL_PIPE_SIZE) &&
604                         (rpipe->pipe_buffer.cnt <= SMALL_PIPE_SIZE) &&
605                         (piperesizeallowed == 1)) {
606                         PIPE_UNLOCK(rpipe);
607                         pipespace(rpipe, SMALL_PIPE_SIZE);
608                         PIPE_LOCK(rpipe);
609                 }
610         }
611
612         while (uio->uio_resid) {
613                 /*
614                  * normal pipe buffer receive
615                  */
616                 if (rpipe->pipe_buffer.cnt > 0) {
617                         size = rpipe->pipe_buffer.size - rpipe->pipe_buffer.out;
618                         if (size > rpipe->pipe_buffer.cnt)
619                                 size = rpipe->pipe_buffer.cnt;
620                         if (size > (u_int) uio->uio_resid)
621                                 size = (u_int) uio->uio_resid;
622
623                         PIPE_UNLOCK(rpipe);
624                         error = uiomove(
625                             &rpipe->pipe_buffer.buffer[rpipe->pipe_buffer.out],
626                             size, uio);
627                         PIPE_LOCK(rpipe);
628                         if (error)
629                                 break;
630
631                         rpipe->pipe_buffer.out += size;
632                         if (rpipe->pipe_buffer.out >= rpipe->pipe_buffer.size)
633                                 rpipe->pipe_buffer.out = 0;
634
635                         rpipe->pipe_buffer.cnt -= size;
636
637                         /*
638                          * If there is no more to read in the pipe, reset
639                          * its pointers to the beginning.  This improves
640                          * cache hit stats.
641                          */
642                         if (rpipe->pipe_buffer.cnt == 0) {
643                                 rpipe->pipe_buffer.in = 0;
644                                 rpipe->pipe_buffer.out = 0;
645                         }
646                         nread += size;
647 #ifndef PIPE_NODIRECT
648                 /*
649                  * Direct copy, bypassing a kernel buffer.
650                  */
651                 } else if ((size = rpipe->pipe_map.cnt) &&
652                            (rpipe->pipe_state & PIPE_DIRECTW)) {
653                         if (size > (u_int) uio->uio_resid)
654                                 size = (u_int) uio->uio_resid;
655
656                         PIPE_UNLOCK(rpipe);
657                         error = uiomove_fromphys(rpipe->pipe_map.ms,
658                             rpipe->pipe_map.pos, size, uio);
659                         PIPE_LOCK(rpipe);
660                         if (error)
661                                 break;
662                         nread += size;
663                         rpipe->pipe_map.pos += size;
664                         rpipe->pipe_map.cnt -= size;
665                         if (rpipe->pipe_map.cnt == 0) {
666                                 rpipe->pipe_state &= ~PIPE_DIRECTW;
667                                 wakeup(rpipe);
668                         }
669 #endif
670                 } else {
671                         /*
672                          * detect EOF condition
673                          * read returns 0 on EOF, no need to set error
674                          */
675                         if (rpipe->pipe_state & PIPE_EOF)
676                                 break;
677
678                         /*
679                          * If the "write-side" has been blocked, wake it up now.
680                          */
681                         if (rpipe->pipe_state & PIPE_WANTW) {
682                                 rpipe->pipe_state &= ~PIPE_WANTW;
683                                 wakeup(rpipe);
684                         }
685
686                         /*
687                          * Break if some data was read.
688                          */
689                         if (nread > 0)
690                                 break;
691
692                         /*
693                          * Unlock the pipe buffer for our remaining processing.
694                          * We will either break out with an error or we will
695                          * sleep and relock to loop.
696                          */
697                         pipeunlock(rpipe);
698
699                         /*
700                          * Handle non-blocking mode operation or
701                          * wait for more data.
702                          */
703                         if (fp->f_flag & FNONBLOCK) {
704                                 error = EAGAIN;
705                         } else {
706                                 rpipe->pipe_state |= PIPE_WANTR;
707                                 if ((error = msleep(rpipe, PIPE_MTX(rpipe),
708                                     PRIBIO | PCATCH,
709                                     "piperd", 0)) == 0)
710                                         error = pipelock(rpipe, 1);
711                         }
712                         if (error)
713                                 goto unlocked_error;
714                 }
715         }
716 #ifdef MAC
717 locked_error:
718 #endif
719         pipeunlock(rpipe);
720
721         /* XXX: should probably do this before getting any locks. */
722         if (error == 0)
723                 vfs_timestamp(&rpipe->pipe_atime);
724 unlocked_error:
725         --rpipe->pipe_busy;
726
727         /*
728          * PIPE_WANT processing only makes sense if pipe_busy is 0.
729          */
730         if ((rpipe->pipe_busy == 0) && (rpipe->pipe_state & PIPE_WANT)) {
731                 rpipe->pipe_state &= ~(PIPE_WANT|PIPE_WANTW);
732                 wakeup(rpipe);
733         } else if (rpipe->pipe_buffer.cnt < MINPIPESIZE) {
734                 /*
735                  * Handle write blocking hysteresis.
736                  */
737                 if (rpipe->pipe_state & PIPE_WANTW) {
738                         rpipe->pipe_state &= ~PIPE_WANTW;
739                         wakeup(rpipe);
740                 }
741         }
742
743         if ((rpipe->pipe_buffer.size - rpipe->pipe_buffer.cnt) >= PIPE_BUF)
744                 pipeselwakeup(rpipe);
745
746         PIPE_UNLOCK(rpipe);
747         return (error);
748 }
749
750 #ifndef PIPE_NODIRECT
751 /*
752  * Map the sending processes' buffer into kernel space and wire it.
753  * This is similar to a physical write operation.
754  */
755 static int
756 pipe_build_write_buffer(wpipe, uio)
757         struct pipe *wpipe;
758         struct uio *uio;
759 {
760         u_int size;
761         int i;
762
763         PIPE_LOCK_ASSERT(wpipe, MA_NOTOWNED);
764         KASSERT(wpipe->pipe_state & PIPE_DIRECTW,
765                 ("Clone attempt on non-direct write pipe!"));
766
767         size = (u_int) uio->uio_iov->iov_len;
768         if (size > wpipe->pipe_buffer.size)
769                 size = wpipe->pipe_buffer.size;
770
771         if ((i = vm_fault_quick_hold_pages(&curproc->p_vmspace->vm_map,
772             (vm_offset_t)uio->uio_iov->iov_base, size, VM_PROT_READ,
773             wpipe->pipe_map.ms, PIPENPAGES)) < 0)
774                 return (EFAULT);
775
776 /*
777  * set up the control block
778  */
779         wpipe->pipe_map.npages = i;
780         wpipe->pipe_map.pos =
781             ((vm_offset_t) uio->uio_iov->iov_base) & PAGE_MASK;
782         wpipe->pipe_map.cnt = size;
783
784 /*
785  * and update the uio data
786  */
787
788         uio->uio_iov->iov_len -= size;
789         uio->uio_iov->iov_base = (char *)uio->uio_iov->iov_base + size;
790         if (uio->uio_iov->iov_len == 0)
791                 uio->uio_iov++;
792         uio->uio_resid -= size;
793         uio->uio_offset += size;
794         return (0);
795 }
796
797 /*
798  * unmap and unwire the process buffer
799  */
800 static void
801 pipe_destroy_write_buffer(wpipe)
802         struct pipe *wpipe;
803 {
804
805         PIPE_LOCK_ASSERT(wpipe, MA_OWNED);
806         vm_page_unhold_pages(wpipe->pipe_map.ms, wpipe->pipe_map.npages);
807         wpipe->pipe_map.npages = 0;
808 }
809
810 /*
811  * In the case of a signal, the writing process might go away.  This
812  * code copies the data into the circular buffer so that the source
813  * pages can be freed without loss of data.
814  */
815 static void
816 pipe_clone_write_buffer(wpipe)
817         struct pipe *wpipe;
818 {
819         struct uio uio;
820         struct iovec iov;
821         int size;
822         int pos;
823
824         PIPE_LOCK_ASSERT(wpipe, MA_OWNED);
825         size = wpipe->pipe_map.cnt;
826         pos = wpipe->pipe_map.pos;
827
828         wpipe->pipe_buffer.in = size;
829         wpipe->pipe_buffer.out = 0;
830         wpipe->pipe_buffer.cnt = size;
831         wpipe->pipe_state &= ~PIPE_DIRECTW;
832
833         PIPE_UNLOCK(wpipe);
834         iov.iov_base = wpipe->pipe_buffer.buffer;
835         iov.iov_len = size;
836         uio.uio_iov = &iov;
837         uio.uio_iovcnt = 1;
838         uio.uio_offset = 0;
839         uio.uio_resid = size;
840         uio.uio_segflg = UIO_SYSSPACE;
841         uio.uio_rw = UIO_READ;
842         uio.uio_td = curthread;
843         uiomove_fromphys(wpipe->pipe_map.ms, pos, size, &uio);
844         PIPE_LOCK(wpipe);
845         pipe_destroy_write_buffer(wpipe);
846 }
847
848 /*
849  * This implements the pipe buffer write mechanism.  Note that only
850  * a direct write OR a normal pipe write can be pending at any given time.
851  * If there are any characters in the pipe buffer, the direct write will
852  * be deferred until the receiving process grabs all of the bytes from
853  * the pipe buffer.  Then the direct mapping write is set-up.
854  */
855 static int
856 pipe_direct_write(wpipe, uio)
857         struct pipe *wpipe;
858         struct uio *uio;
859 {
860         int error;
861
862 retry:
863         PIPE_LOCK_ASSERT(wpipe, MA_OWNED);
864         error = pipelock(wpipe, 1);
865         if (wpipe->pipe_state & PIPE_EOF)
866                 error = EPIPE;
867         if (error) {
868                 pipeunlock(wpipe);
869                 goto error1;
870         }
871         while (wpipe->pipe_state & PIPE_DIRECTW) {
872                 if (wpipe->pipe_state & PIPE_WANTR) {
873                         wpipe->pipe_state &= ~PIPE_WANTR;
874                         wakeup(wpipe);
875                 }
876                 pipeselwakeup(wpipe);
877                 wpipe->pipe_state |= PIPE_WANTW;
878                 pipeunlock(wpipe);
879                 error = msleep(wpipe, PIPE_MTX(wpipe),
880                     PRIBIO | PCATCH, "pipdww", 0);
881                 if (error)
882                         goto error1;
883                 else
884                         goto retry;
885         }
886         wpipe->pipe_map.cnt = 0;        /* transfer not ready yet */
887         if (wpipe->pipe_buffer.cnt > 0) {
888                 if (wpipe->pipe_state & PIPE_WANTR) {
889                         wpipe->pipe_state &= ~PIPE_WANTR;
890                         wakeup(wpipe);
891                 }
892                 pipeselwakeup(wpipe);
893                 wpipe->pipe_state |= PIPE_WANTW;
894                 pipeunlock(wpipe);
895                 error = msleep(wpipe, PIPE_MTX(wpipe),
896                     PRIBIO | PCATCH, "pipdwc", 0);
897                 if (error)
898                         goto error1;
899                 else
900                         goto retry;
901         }
902
903         wpipe->pipe_state |= PIPE_DIRECTW;
904
905         PIPE_UNLOCK(wpipe);
906         error = pipe_build_write_buffer(wpipe, uio);
907         PIPE_LOCK(wpipe);
908         if (error) {
909                 wpipe->pipe_state &= ~PIPE_DIRECTW;
910                 pipeunlock(wpipe);
911                 goto error1;
912         }
913
914         error = 0;
915         while (!error && (wpipe->pipe_state & PIPE_DIRECTW)) {
916                 if (wpipe->pipe_state & PIPE_EOF) {
917                         pipe_destroy_write_buffer(wpipe);
918                         pipeselwakeup(wpipe);
919                         pipeunlock(wpipe);
920                         error = EPIPE;
921                         goto error1;
922                 }
923                 if (wpipe->pipe_state & PIPE_WANTR) {
924                         wpipe->pipe_state &= ~PIPE_WANTR;
925                         wakeup(wpipe);
926                 }
927                 pipeselwakeup(wpipe);
928                 pipeunlock(wpipe);
929                 error = msleep(wpipe, PIPE_MTX(wpipe), PRIBIO | PCATCH,
930                     "pipdwt", 0);
931                 pipelock(wpipe, 0);
932         }
933
934         if (wpipe->pipe_state & PIPE_EOF)
935                 error = EPIPE;
936         if (wpipe->pipe_state & PIPE_DIRECTW) {
937                 /*
938                  * this bit of trickery substitutes a kernel buffer for
939                  * the process that might be going away.
940                  */
941                 pipe_clone_write_buffer(wpipe);
942         } else {
943                 pipe_destroy_write_buffer(wpipe);
944         }
945         pipeunlock(wpipe);
946         return (error);
947
948 error1:
949         wakeup(wpipe);
950         return (error);
951 }
952 #endif
953
954 static int
955 pipe_write(fp, uio, active_cred, flags, td)
956         struct file *fp;
957         struct uio *uio;
958         struct ucred *active_cred;
959         struct thread *td;
960         int flags;
961 {
962         int error = 0;
963         int desiredsize, orig_resid;
964         struct pipe *wpipe, *rpipe;
965
966         rpipe = fp->f_data;
967         wpipe = rpipe->pipe_peer;
968
969         PIPE_LOCK(rpipe);
970         error = pipelock(wpipe, 1);
971         if (error) {
972                 PIPE_UNLOCK(rpipe);
973                 return (error);
974         }
975         /*
976          * detect loss of pipe read side, issue SIGPIPE if lost.
977          */
978         if (wpipe->pipe_present != PIPE_ACTIVE ||
979             (wpipe->pipe_state & PIPE_EOF)) {
980                 pipeunlock(wpipe);
981                 PIPE_UNLOCK(rpipe);
982                 return (EPIPE);
983         }
984 #ifdef MAC
985         error = mac_pipe_check_write(active_cred, wpipe->pipe_pair);
986         if (error) {
987                 pipeunlock(wpipe);
988                 PIPE_UNLOCK(rpipe);
989                 return (error);
990         }
991 #endif
992         ++wpipe->pipe_busy;
993
994         /* Choose a larger size if it's advantageous */
995         desiredsize = max(SMALL_PIPE_SIZE, wpipe->pipe_buffer.size);
996         while (desiredsize < wpipe->pipe_buffer.cnt + uio->uio_resid) {
997                 if (piperesizeallowed != 1)
998                         break;
999                 if (amountpipekva > maxpipekva / 2)
1000                         break;
1001                 if (desiredsize == BIG_PIPE_SIZE)
1002                         break;
1003                 desiredsize = desiredsize * 2;
1004         }
1005
1006         /* Choose a smaller size if we're in a OOM situation */
1007         if ((amountpipekva > (3 * maxpipekva) / 4) &&
1008                 (wpipe->pipe_buffer.size > SMALL_PIPE_SIZE) &&
1009                 (wpipe->pipe_buffer.cnt <= SMALL_PIPE_SIZE) &&
1010                 (piperesizeallowed == 1))
1011                 desiredsize = SMALL_PIPE_SIZE;
1012
1013         /* Resize if the above determined that a new size was necessary */
1014         if ((desiredsize != wpipe->pipe_buffer.size) &&
1015                 ((wpipe->pipe_state & PIPE_DIRECTW) == 0)) {
1016                 PIPE_UNLOCK(wpipe);
1017                 pipespace(wpipe, desiredsize);
1018                 PIPE_LOCK(wpipe);
1019         }
1020         if (wpipe->pipe_buffer.size == 0) {
1021                 /*
1022                  * This can only happen for reverse direction use of pipes
1023                  * in a complete OOM situation.
1024                  */
1025                 error = ENOMEM;
1026                 --wpipe->pipe_busy;
1027                 pipeunlock(wpipe);
1028                 PIPE_UNLOCK(wpipe);
1029                 return (error);
1030         }
1031
1032         pipeunlock(wpipe);
1033
1034         orig_resid = uio->uio_resid;
1035
1036         while (uio->uio_resid) {
1037                 int space;
1038
1039                 pipelock(wpipe, 0);
1040                 if (wpipe->pipe_state & PIPE_EOF) {
1041                         pipeunlock(wpipe);
1042                         error = EPIPE;
1043                         break;
1044                 }
1045 #ifndef PIPE_NODIRECT
1046                 /*
1047                  * If the transfer is large, we can gain performance if
1048                  * we do process-to-process copies directly.
1049                  * If the write is non-blocking, we don't use the
1050                  * direct write mechanism.
1051                  *
1052                  * The direct write mechanism will detect the reader going
1053                  * away on us.
1054                  */
1055                 if (uio->uio_segflg == UIO_USERSPACE &&
1056                     uio->uio_iov->iov_len >= PIPE_MINDIRECT &&
1057                     wpipe->pipe_buffer.size >= PIPE_MINDIRECT &&
1058                     (fp->f_flag & FNONBLOCK) == 0) {
1059                         pipeunlock(wpipe);
1060                         error = pipe_direct_write(wpipe, uio);
1061                         if (error)
1062                                 break;
1063                         continue;
1064                 }
1065 #endif
1066
1067                 /*
1068                  * Pipe buffered writes cannot be coincidental with
1069                  * direct writes.  We wait until the currently executing
1070                  * direct write is completed before we start filling the
1071                  * pipe buffer.  We break out if a signal occurs or the
1072                  * reader goes away.
1073                  */
1074                 if (wpipe->pipe_state & PIPE_DIRECTW) {
1075                         if (wpipe->pipe_state & PIPE_WANTR) {
1076                                 wpipe->pipe_state &= ~PIPE_WANTR;
1077                                 wakeup(wpipe);
1078                         }
1079                         pipeselwakeup(wpipe);
1080                         wpipe->pipe_state |= PIPE_WANTW;
1081                         pipeunlock(wpipe);
1082                         error = msleep(wpipe, PIPE_MTX(rpipe), PRIBIO | PCATCH,
1083                             "pipbww", 0);
1084                         if (error)
1085                                 break;
1086                         else
1087                                 continue;
1088                 }
1089
1090                 space = wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt;
1091
1092                 /* Writes of size <= PIPE_BUF must be atomic. */
1093                 if ((space < uio->uio_resid) && (orig_resid <= PIPE_BUF))
1094                         space = 0;
1095
1096                 if (space > 0) {
1097                         int size;       /* Transfer size */
1098                         int segsize;    /* first segment to transfer */
1099
1100                         /*
1101                          * Transfer size is minimum of uio transfer
1102                          * and free space in pipe buffer.
1103                          */
1104                         if (space > uio->uio_resid)
1105                                 size = uio->uio_resid;
1106                         else
1107                                 size = space;
1108                         /*
1109                          * First segment to transfer is minimum of
1110                          * transfer size and contiguous space in
1111                          * pipe buffer.  If first segment to transfer
1112                          * is less than the transfer size, we've got
1113                          * a wraparound in the buffer.
1114                          */
1115                         segsize = wpipe->pipe_buffer.size -
1116                                 wpipe->pipe_buffer.in;
1117                         if (segsize > size)
1118                                 segsize = size;
1119
1120                         /* Transfer first segment */
1121
1122                         PIPE_UNLOCK(rpipe);
1123                         error = uiomove(&wpipe->pipe_buffer.buffer[wpipe->pipe_buffer.in],
1124                                         segsize, uio);
1125                         PIPE_LOCK(rpipe);
1126
1127                         if (error == 0 && segsize < size) {
1128                                 KASSERT(wpipe->pipe_buffer.in + segsize ==
1129                                         wpipe->pipe_buffer.size,
1130                                         ("Pipe buffer wraparound disappeared"));
1131                                 /*
1132                                  * Transfer remaining part now, to
1133                                  * support atomic writes.  Wraparound
1134                                  * happened.
1135                                  */
1136
1137                                 PIPE_UNLOCK(rpipe);
1138                                 error = uiomove(
1139                                     &wpipe->pipe_buffer.buffer[0],
1140                                     size - segsize, uio);
1141                                 PIPE_LOCK(rpipe);
1142                         }
1143                         if (error == 0) {
1144                                 wpipe->pipe_buffer.in += size;
1145                                 if (wpipe->pipe_buffer.in >=
1146                                     wpipe->pipe_buffer.size) {
1147                                         KASSERT(wpipe->pipe_buffer.in ==
1148                                                 size - segsize +
1149                                                 wpipe->pipe_buffer.size,
1150                                                 ("Expected wraparound bad"));
1151                                         wpipe->pipe_buffer.in = size - segsize;
1152                                 }
1153
1154                                 wpipe->pipe_buffer.cnt += size;
1155                                 KASSERT(wpipe->pipe_buffer.cnt <=
1156                                         wpipe->pipe_buffer.size,
1157                                         ("Pipe buffer overflow"));
1158                         }
1159                         pipeunlock(wpipe);
1160                         if (error != 0)
1161                                 break;
1162                 } else {
1163                         /*
1164                          * If the "read-side" has been blocked, wake it up now.
1165                          */
1166                         if (wpipe->pipe_state & PIPE_WANTR) {
1167                                 wpipe->pipe_state &= ~PIPE_WANTR;
1168                                 wakeup(wpipe);
1169                         }
1170
1171                         /*
1172                          * don't block on non-blocking I/O
1173                          */
1174                         if (fp->f_flag & FNONBLOCK) {
1175                                 error = EAGAIN;
1176                                 pipeunlock(wpipe);
1177                                 break;
1178                         }
1179
1180                         /*
1181                          * We have no more space and have something to offer,
1182                          * wake up select/poll.
1183                          */
1184                         pipeselwakeup(wpipe);
1185
1186                         wpipe->pipe_state |= PIPE_WANTW;
1187                         pipeunlock(wpipe);
1188                         error = msleep(wpipe, PIPE_MTX(rpipe),
1189                             PRIBIO | PCATCH, "pipewr", 0);
1190                         if (error != 0)
1191                                 break;
1192                 }
1193         }
1194
1195         pipelock(wpipe, 0);
1196         --wpipe->pipe_busy;
1197
1198         if ((wpipe->pipe_busy == 0) && (wpipe->pipe_state & PIPE_WANT)) {
1199                 wpipe->pipe_state &= ~(PIPE_WANT | PIPE_WANTR);
1200                 wakeup(wpipe);
1201         } else if (wpipe->pipe_buffer.cnt > 0) {
1202                 /*
1203                  * If we have put any characters in the buffer, we wake up
1204                  * the reader.
1205                  */
1206                 if (wpipe->pipe_state & PIPE_WANTR) {
1207                         wpipe->pipe_state &= ~PIPE_WANTR;
1208                         wakeup(wpipe);
1209                 }
1210         }
1211
1212         /*
1213          * Don't return EPIPE if I/O was successful
1214          */
1215         if ((wpipe->pipe_buffer.cnt == 0) &&
1216             (uio->uio_resid == 0) &&
1217             (error == EPIPE)) {
1218                 error = 0;
1219         }
1220
1221         if (error == 0)
1222                 vfs_timestamp(&wpipe->pipe_mtime);
1223
1224         /*
1225          * We have something to offer,
1226          * wake up select/poll.
1227          */
1228         if (wpipe->pipe_buffer.cnt)
1229                 pipeselwakeup(wpipe);
1230
1231         pipeunlock(wpipe);
1232         PIPE_UNLOCK(rpipe);
1233         return (error);
1234 }
1235
1236 /* ARGSUSED */
1237 static int
1238 pipe_truncate(fp, length, active_cred, td)
1239         struct file *fp;
1240         off_t length;
1241         struct ucred *active_cred;
1242         struct thread *td;
1243 {
1244
1245         return (EINVAL);
1246 }
1247
1248 /*
1249  * we implement a very minimal set of ioctls for compatibility with sockets.
1250  */
1251 static int
1252 pipe_ioctl(fp, cmd, data, active_cred, td)
1253         struct file *fp;
1254         u_long cmd;
1255         void *data;
1256         struct ucred *active_cred;
1257         struct thread *td;
1258 {
1259         struct pipe *mpipe = fp->f_data;
1260         int error;
1261
1262         PIPE_LOCK(mpipe);
1263
1264 #ifdef MAC
1265         error = mac_pipe_check_ioctl(active_cred, mpipe->pipe_pair, cmd, data);
1266         if (error) {
1267                 PIPE_UNLOCK(mpipe);
1268                 return (error);
1269         }
1270 #endif
1271
1272         error = 0;
1273         switch (cmd) {
1274
1275         case FIONBIO:
1276                 break;
1277
1278         case FIOASYNC:
1279                 if (*(int *)data) {
1280                         mpipe->pipe_state |= PIPE_ASYNC;
1281                 } else {
1282                         mpipe->pipe_state &= ~PIPE_ASYNC;
1283                 }
1284                 break;
1285
1286         case FIONREAD:
1287                 if (mpipe->pipe_state & PIPE_DIRECTW)
1288                         *(int *)data = mpipe->pipe_map.cnt;
1289                 else
1290                         *(int *)data = mpipe->pipe_buffer.cnt;
1291                 break;
1292
1293         case FIOSETOWN:
1294                 PIPE_UNLOCK(mpipe);
1295                 error = fsetown(*(int *)data, &mpipe->pipe_sigio);
1296                 goto out_unlocked;
1297
1298         case FIOGETOWN:
1299                 *(int *)data = fgetown(&mpipe->pipe_sigio);
1300                 break;
1301
1302         /* This is deprecated, FIOSETOWN should be used instead. */
1303         case TIOCSPGRP:
1304                 PIPE_UNLOCK(mpipe);
1305                 error = fsetown(-(*(int *)data), &mpipe->pipe_sigio);
1306                 goto out_unlocked;
1307
1308         /* This is deprecated, FIOGETOWN should be used instead. */
1309         case TIOCGPGRP:
1310                 *(int *)data = -fgetown(&mpipe->pipe_sigio);
1311                 break;
1312
1313         default:
1314                 error = ENOTTY;
1315                 break;
1316         }
1317         PIPE_UNLOCK(mpipe);
1318 out_unlocked:
1319         return (error);
1320 }
1321
1322 static int
1323 pipe_poll(fp, events, active_cred, td)
1324         struct file *fp;
1325         int events;
1326         struct ucred *active_cred;
1327         struct thread *td;
1328 {
1329         struct pipe *rpipe = fp->f_data;
1330         struct pipe *wpipe;
1331         int revents = 0;
1332 #ifdef MAC
1333         int error;
1334 #endif
1335
1336         wpipe = rpipe->pipe_peer;
1337         PIPE_LOCK(rpipe);
1338 #ifdef MAC
1339         error = mac_pipe_check_poll(active_cred, rpipe->pipe_pair);
1340         if (error)
1341                 goto locked_error;
1342 #endif
1343         if (events & (POLLIN | POLLRDNORM))
1344                 if ((rpipe->pipe_state & PIPE_DIRECTW) ||
1345                     (rpipe->pipe_buffer.cnt > 0))
1346                         revents |= events & (POLLIN | POLLRDNORM);
1347
1348         if (events & (POLLOUT | POLLWRNORM))
1349                 if (wpipe->pipe_present != PIPE_ACTIVE ||
1350                     (wpipe->pipe_state & PIPE_EOF) ||
1351                     (((wpipe->pipe_state & PIPE_DIRECTW) == 0) &&
1352                      (wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt) >= PIPE_BUF))
1353                         revents |= events & (POLLOUT | POLLWRNORM);
1354
1355         if ((events & POLLINIGNEOF) == 0) {
1356                 if (rpipe->pipe_state & PIPE_EOF) {
1357                         revents |= (events & (POLLIN | POLLRDNORM));
1358                         if (wpipe->pipe_present != PIPE_ACTIVE ||
1359                             (wpipe->pipe_state & PIPE_EOF))
1360                                 revents |= POLLHUP;
1361                 }
1362         }
1363
1364         if (revents == 0) {
1365                 if (events & (POLLIN | POLLRDNORM)) {
1366                         selrecord(td, &rpipe->pipe_sel);
1367                         if (SEL_WAITING(&rpipe->pipe_sel))
1368                                 rpipe->pipe_state |= PIPE_SEL;
1369                 }
1370
1371                 if (events & (POLLOUT | POLLWRNORM)) {
1372                         selrecord(td, &wpipe->pipe_sel);
1373                         if (SEL_WAITING(&wpipe->pipe_sel))
1374                                 wpipe->pipe_state |= PIPE_SEL;
1375                 }
1376         }
1377 #ifdef MAC
1378 locked_error:
1379 #endif
1380         PIPE_UNLOCK(rpipe);
1381
1382         return (revents);
1383 }
1384
1385 /*
1386  * We shouldn't need locks here as we're doing a read and this should
1387  * be a natural race.
1388  */
1389 static int
1390 pipe_stat(fp, ub, active_cred, td)
1391         struct file *fp;
1392         struct stat *ub;
1393         struct ucred *active_cred;
1394         struct thread *td;
1395 {
1396         struct pipe *pipe;
1397         int new_unr;
1398 #ifdef MAC
1399         int error;
1400 #endif
1401
1402         pipe = fp->f_data;
1403         PIPE_LOCK(pipe);
1404 #ifdef MAC
1405         error = mac_pipe_check_stat(active_cred, pipe->pipe_pair);
1406         if (error) {
1407                 PIPE_UNLOCK(pipe);
1408                 return (error);
1409         }
1410 #endif
1411         /*
1412          * Lazily allocate an inode number for the pipe.  Most pipe
1413          * users do not call fstat(2) on the pipe, which means that
1414          * postponing the inode allocation until it is must be
1415          * returned to userland is useful.  If alloc_unr failed,
1416          * assign st_ino zero instead of returning an error.
1417          * Special pipe_ino values:
1418          *  -1 - not yet initialized;
1419          *  0  - alloc_unr failed, return 0 as st_ino forever.
1420          */
1421         if (pipe->pipe_ino == (ino_t)-1) {
1422                 new_unr = alloc_unr(pipeino_unr);
1423                 if (new_unr != -1)
1424                         pipe->pipe_ino = new_unr;
1425                 else
1426                         pipe->pipe_ino = 0;
1427         }
1428         PIPE_UNLOCK(pipe);
1429
1430         bzero(ub, sizeof(*ub));
1431         ub->st_mode = S_IFIFO;
1432         ub->st_blksize = PAGE_SIZE;
1433         if (pipe->pipe_state & PIPE_DIRECTW)
1434                 ub->st_size = pipe->pipe_map.cnt;
1435         else
1436                 ub->st_size = pipe->pipe_buffer.cnt;
1437         ub->st_blocks = (ub->st_size + ub->st_blksize - 1) / ub->st_blksize;
1438         ub->st_atim = pipe->pipe_atime;
1439         ub->st_mtim = pipe->pipe_mtime;
1440         ub->st_ctim = pipe->pipe_ctime;
1441         ub->st_uid = fp->f_cred->cr_uid;
1442         ub->st_gid = fp->f_cred->cr_gid;
1443         ub->st_dev = pipedev_ino;
1444         ub->st_ino = pipe->pipe_ino;
1445         /*
1446          * Left as 0: st_nlink, st_rdev, st_flags, st_gen.
1447          */
1448         return (0);
1449 }
1450
1451 /* ARGSUSED */
1452 static int
1453 pipe_close(fp, td)
1454         struct file *fp;
1455         struct thread *td;
1456 {
1457         struct pipe *cpipe = fp->f_data;
1458
1459         fp->f_ops = &badfileops;
1460         fp->f_data = NULL;
1461         funsetown(&cpipe->pipe_sigio);
1462         pipeclose(cpipe);
1463         return (0);
1464 }
1465
1466 static void
1467 pipe_free_kmem(cpipe)
1468         struct pipe *cpipe;
1469 {
1470
1471         KASSERT(!mtx_owned(PIPE_MTX(cpipe)),
1472             ("pipe_free_kmem: pipe mutex locked"));
1473
1474         if (cpipe->pipe_buffer.buffer != NULL) {
1475                 atomic_subtract_long(&amountpipekva, cpipe->pipe_buffer.size);
1476                 vm_map_remove(pipe_map,
1477                     (vm_offset_t)cpipe->pipe_buffer.buffer,
1478                     (vm_offset_t)cpipe->pipe_buffer.buffer + cpipe->pipe_buffer.size);
1479                 cpipe->pipe_buffer.buffer = NULL;
1480         }
1481 #ifndef PIPE_NODIRECT
1482         {
1483                 cpipe->pipe_map.cnt = 0;
1484                 cpipe->pipe_map.pos = 0;
1485                 cpipe->pipe_map.npages = 0;
1486         }
1487 #endif
1488 }
1489
1490 /*
1491  * shutdown the pipe
1492  */
1493 static void
1494 pipeclose(cpipe)
1495         struct pipe *cpipe;
1496 {
1497         struct pipepair *pp;
1498         struct pipe *ppipe;
1499         ino_t ino;
1500
1501         KASSERT(cpipe != NULL, ("pipeclose: cpipe == NULL"));
1502
1503         PIPE_LOCK(cpipe);
1504         pipelock(cpipe, 0);
1505         pp = cpipe->pipe_pair;
1506
1507         pipeselwakeup(cpipe);
1508
1509         /*
1510          * If the other side is blocked, wake it up saying that
1511          * we want to close it down.
1512          */
1513         cpipe->pipe_state |= PIPE_EOF;
1514         while (cpipe->pipe_busy) {
1515                 wakeup(cpipe);
1516                 cpipe->pipe_state |= PIPE_WANT;
1517                 pipeunlock(cpipe);
1518                 msleep(cpipe, PIPE_MTX(cpipe), PRIBIO, "pipecl", 0);
1519                 pipelock(cpipe, 0);
1520         }
1521
1522
1523         /*
1524          * Disconnect from peer, if any.
1525          */
1526         ppipe = cpipe->pipe_peer;
1527         if (ppipe->pipe_present == PIPE_ACTIVE) {
1528                 pipeselwakeup(ppipe);
1529
1530                 ppipe->pipe_state |= PIPE_EOF;
1531                 wakeup(ppipe);
1532                 KNOTE_LOCKED(&ppipe->pipe_sel.si_note, 0);
1533         }
1534
1535         /*
1536          * Mark this endpoint as free.  Release kmem resources.  We
1537          * don't mark this endpoint as unused until we've finished
1538          * doing that, or the pipe might disappear out from under
1539          * us.
1540          */
1541         PIPE_UNLOCK(cpipe);
1542         pipe_free_kmem(cpipe);
1543         PIPE_LOCK(cpipe);
1544         cpipe->pipe_present = PIPE_CLOSING;
1545         pipeunlock(cpipe);
1546
1547         /*
1548          * knlist_clear() may sleep dropping the PIPE_MTX. Set the
1549          * PIPE_FINALIZED, that allows other end to free the
1550          * pipe_pair, only after the knotes are completely dismantled.
1551          */
1552         knlist_clear(&cpipe->pipe_sel.si_note, 1);
1553         cpipe->pipe_present = PIPE_FINALIZED;
1554         seldrain(&cpipe->pipe_sel);
1555         knlist_destroy(&cpipe->pipe_sel.si_note);
1556
1557         /*
1558          * Postpone the destroy of the fake inode number allocated for
1559          * our end, until pipe mtx is unlocked.
1560          */
1561         ino = cpipe->pipe_ino;
1562
1563         /*
1564          * If both endpoints are now closed, release the memory for the
1565          * pipe pair.  If not, unlock.
1566          */
1567         if (ppipe->pipe_present == PIPE_FINALIZED) {
1568                 PIPE_UNLOCK(cpipe);
1569 #ifdef MAC
1570                 mac_pipe_destroy(pp);
1571 #endif
1572                 uma_zfree(pipe_zone, cpipe->pipe_pair);
1573         } else
1574                 PIPE_UNLOCK(cpipe);
1575
1576         if (ino != 0 && ino != (ino_t)-1)
1577                 free_unr(pipeino_unr, ino);
1578 }
1579
1580 /*ARGSUSED*/
1581 static int
1582 pipe_kqfilter(struct file *fp, struct knote *kn)
1583 {
1584         struct pipe *cpipe;
1585
1586         cpipe = kn->kn_fp->f_data;
1587         PIPE_LOCK(cpipe);
1588         switch (kn->kn_filter) {
1589         case EVFILT_READ:
1590                 kn->kn_fop = &pipe_rfiltops;
1591                 break;
1592         case EVFILT_WRITE:
1593                 kn->kn_fop = &pipe_wfiltops;
1594                 if (cpipe->pipe_peer->pipe_present != PIPE_ACTIVE) {
1595                         /* other end of pipe has been closed */
1596                         PIPE_UNLOCK(cpipe);
1597                         return (EPIPE);
1598                 }
1599                 cpipe = cpipe->pipe_peer;
1600                 break;
1601         default:
1602                 PIPE_UNLOCK(cpipe);
1603                 return (EINVAL);
1604         }
1605
1606         knlist_add(&cpipe->pipe_sel.si_note, kn, 1);
1607         PIPE_UNLOCK(cpipe);
1608         return (0);
1609 }
1610
1611 static void
1612 filt_pipedetach(struct knote *kn)
1613 {
1614         struct pipe *cpipe = (struct pipe *)kn->kn_fp->f_data;
1615
1616         PIPE_LOCK(cpipe);
1617         if (kn->kn_filter == EVFILT_WRITE)
1618                 cpipe = cpipe->pipe_peer;
1619         knlist_remove(&cpipe->pipe_sel.si_note, kn, 1);
1620         PIPE_UNLOCK(cpipe);
1621 }
1622
1623 /*ARGSUSED*/
1624 static int
1625 filt_piperead(struct knote *kn, long hint)
1626 {
1627         struct pipe *rpipe = kn->kn_fp->f_data;
1628         struct pipe *wpipe = rpipe->pipe_peer;
1629         int ret;
1630
1631         PIPE_LOCK(rpipe);
1632         kn->kn_data = rpipe->pipe_buffer.cnt;
1633         if ((kn->kn_data == 0) && (rpipe->pipe_state & PIPE_DIRECTW))
1634                 kn->kn_data = rpipe->pipe_map.cnt;
1635
1636         if ((rpipe->pipe_state & PIPE_EOF) ||
1637             wpipe->pipe_present != PIPE_ACTIVE ||
1638             (wpipe->pipe_state & PIPE_EOF)) {
1639                 kn->kn_flags |= EV_EOF;
1640                 PIPE_UNLOCK(rpipe);
1641                 return (1);
1642         }
1643         ret = kn->kn_data > 0;
1644         PIPE_UNLOCK(rpipe);
1645         return ret;
1646 }
1647
1648 /*ARGSUSED*/
1649 static int
1650 filt_pipewrite(struct knote *kn, long hint)
1651 {
1652         struct pipe *rpipe = kn->kn_fp->f_data;
1653         struct pipe *wpipe = rpipe->pipe_peer;
1654
1655         PIPE_LOCK(rpipe);
1656         if (wpipe->pipe_present != PIPE_ACTIVE ||
1657             (wpipe->pipe_state & PIPE_EOF)) {
1658                 kn->kn_data = 0;
1659                 kn->kn_flags |= EV_EOF;
1660                 PIPE_UNLOCK(rpipe);
1661                 return (1);
1662         }
1663         kn->kn_data = wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt;
1664         if (wpipe->pipe_state & PIPE_DIRECTW)
1665                 kn->kn_data = 0;
1666
1667         PIPE_UNLOCK(rpipe);
1668         return (kn->kn_data >= PIPE_BUF);
1669 }