2 * Copyright (c) 2002 Poul-Henning Kamp
3 * Copyright (c) 2002 Networks Associates Technology, Inc.
4 * Copyright (c) 2013 The FreeBSD Foundation
7 * This software was developed for the FreeBSD Project by Poul-Henning Kamp
8 * and NAI Labs, the Security Research Division of Network Associates, Inc.
9 * under DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the
10 * DARPA CHATS research program.
12 * Portions of this software were developed by Konstantin Belousov
13 * under sponsorship from the FreeBSD Foundation.
15 * Redistribution and use in source and binary forms, with or without
16 * modification, are permitted provided that the following conditions
18 * 1. Redistributions of source code must retain the above copyright
19 * notice, this list of conditions and the following disclaimer.
20 * 2. Redistributions in binary form must reproduce the above copyright
21 * notice, this list of conditions and the following disclaimer in the
22 * documentation and/or other materials provided with the distribution.
23 * 3. The names of the authors may not be used to endorse or promote
24 * products derived from this software without specific prior written
27 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
28 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
29 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
30 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
31 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
40 #include <sys/cdefs.h>
41 __FBSDID("$FreeBSD$");
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/kernel.h>
46 #include <sys/malloc.h>
50 #include <sys/stack.h>
51 #include <sys/sysctl.h>
54 #include <sys/errno.h>
55 #include <geom/geom.h>
56 #include <geom/geom_int.h>
57 #include <sys/devicestat.h>
61 #include <vm/vm_param.h>
62 #include <vm/vm_kern.h>
63 #include <vm/vm_page.h>
64 #include <vm/vm_object.h>
65 #include <vm/vm_extern.h>
66 #include <vm/vm_map.h>
68 static int g_io_transient_map_bio(struct bio *bp);
70 static struct g_bioq g_bio_run_down;
71 static struct g_bioq g_bio_run_up;
72 static struct g_bioq g_bio_run_task;
75 * Pace is a hint that we've had some trouble recently allocating
76 * bios, so we should back off trying to send I/O down the stack
77 * a bit to let the problem resolve. When pacing, we also turn
78 * off direct dispatch to also reduce memory pressure from I/Os
79 * there, at the expxense of some added latency while the memory
80 * pressures exist. See g_io_schedule_down() for more details
83 static volatile u_int pace;
85 static uma_zone_t biozone;
88 * The head of the list of classifiers used in g_io_request.
89 * Use g_register_classifier() and g_unregister_classifier()
90 * to add/remove entries to the list.
91 * Classifiers are invoked in registration order.
93 static TAILQ_HEAD(g_classifier_tailq, g_classifier_hook)
94 g_classifier_tailq = TAILQ_HEAD_INITIALIZER(g_classifier_tailq);
96 #include <machine/atomic.h>
99 g_bioq_lock(struct g_bioq *bq)
102 mtx_lock(&bq->bio_queue_lock);
106 g_bioq_unlock(struct g_bioq *bq)
109 mtx_unlock(&bq->bio_queue_lock);
114 g_bioq_destroy(struct g_bioq *bq)
117 mtx_destroy(&bq->bio_queue_lock);
122 g_bioq_init(struct g_bioq *bq)
125 TAILQ_INIT(&bq->bio_queue);
126 mtx_init(&bq->bio_queue_lock, "bio queue", NULL, MTX_DEF);
130 g_bioq_first(struct g_bioq *bq)
134 bp = TAILQ_FIRST(&bq->bio_queue);
136 KASSERT((bp->bio_flags & BIO_ONQUEUE),
137 ("Bio not on queue bp=%p target %p", bp, bq));
138 bp->bio_flags &= ~BIO_ONQUEUE;
139 TAILQ_REMOVE(&bq->bio_queue, bp, bio_queue);
140 bq->bio_queue_length--;
150 bp = uma_zalloc(biozone, M_NOWAIT | M_ZERO);
152 if ((KTR_COMPILE & KTR_GEOM) && (ktr_mask & KTR_GEOM)) {
155 CTR1(KTR_GEOM, "g_new_bio(): %p", bp);
157 CTRSTACK(KTR_GEOM, &st, 3, 0);
168 bp = uma_zalloc(biozone, M_WAITOK | M_ZERO);
170 if ((KTR_COMPILE & KTR_GEOM) && (ktr_mask & KTR_GEOM)) {
173 CTR1(KTR_GEOM, "g_alloc_bio(): %p", bp);
175 CTRSTACK(KTR_GEOM, &st, 3, 0);
182 g_destroy_bio(struct bio *bp)
185 if ((KTR_COMPILE & KTR_GEOM) && (ktr_mask & KTR_GEOM)) {
188 CTR1(KTR_GEOM, "g_destroy_bio(): %p", bp);
190 CTRSTACK(KTR_GEOM, &st, 3, 0);
193 uma_zfree(biozone, bp);
197 g_clone_bio(struct bio *bp)
201 bp2 = uma_zalloc(biozone, M_NOWAIT | M_ZERO);
203 bp2->bio_parent = bp;
204 bp2->bio_cmd = bp->bio_cmd;
206 * BIO_ORDERED flag may be used by disk drivers to enforce
207 * ordering restrictions, so this flag needs to be cloned.
208 * BIO_UNMAPPED and BIO_VLIST should be inherited, to properly
209 * indicate which way the buffer is passed.
210 * Other bio flags are not suitable for cloning.
212 bp2->bio_flags = bp->bio_flags &
213 (BIO_ORDERED | BIO_UNMAPPED | BIO_VLIST);
214 bp2->bio_length = bp->bio_length;
215 bp2->bio_offset = bp->bio_offset;
216 bp2->bio_data = bp->bio_data;
217 bp2->bio_ma = bp->bio_ma;
218 bp2->bio_ma_n = bp->bio_ma_n;
219 bp2->bio_ma_offset = bp->bio_ma_offset;
220 bp2->bio_attribute = bp->bio_attribute;
221 /* Inherit classification info from the parent */
222 bp2->bio_classifier1 = bp->bio_classifier1;
223 bp2->bio_classifier2 = bp->bio_classifier2;
227 if ((KTR_COMPILE & KTR_GEOM) && (ktr_mask & KTR_GEOM)) {
230 CTR2(KTR_GEOM, "g_clone_bio(%p): %p", bp, bp2);
232 CTRSTACK(KTR_GEOM, &st, 3, 0);
239 g_duplicate_bio(struct bio *bp)
243 bp2 = uma_zalloc(biozone, M_WAITOK | M_ZERO);
244 bp2->bio_flags = bp->bio_flags & (BIO_UNMAPPED | BIO_VLIST);
245 bp2->bio_parent = bp;
246 bp2->bio_cmd = bp->bio_cmd;
247 bp2->bio_length = bp->bio_length;
248 bp2->bio_offset = bp->bio_offset;
249 bp2->bio_data = bp->bio_data;
250 bp2->bio_ma = bp->bio_ma;
251 bp2->bio_ma_n = bp->bio_ma_n;
252 bp2->bio_ma_offset = bp->bio_ma_offset;
253 bp2->bio_attribute = bp->bio_attribute;
256 if ((KTR_COMPILE & KTR_GEOM) && (ktr_mask & KTR_GEOM)) {
259 CTR2(KTR_GEOM, "g_duplicate_bio(%p): %p", bp, bp2);
261 CTRSTACK(KTR_GEOM, &st, 3, 0);
268 g_reset_bio(struct bio *bp)
271 bzero(bp, sizeof(*bp));
278 g_bioq_init(&g_bio_run_down);
279 g_bioq_init(&g_bio_run_up);
280 g_bioq_init(&g_bio_run_task);
281 biozone = uma_zcreate("g_bio", sizeof (struct bio),
288 g_io_getattr(const char *attr, struct g_consumer *cp, int *len, void *ptr)
293 g_trace(G_T_BIO, "bio_getattr(%s)", attr);
295 bp->bio_cmd = BIO_GETATTR;
297 bp->bio_attribute = attr;
298 bp->bio_length = *len;
300 g_io_request(bp, cp);
301 error = biowait(bp, "ggetattr");
302 *len = bp->bio_completed;
308 g_io_flush(struct g_consumer *cp)
313 g_trace(G_T_BIO, "bio_flush(%s)", cp->provider->name);
315 bp->bio_cmd = BIO_FLUSH;
316 bp->bio_flags |= BIO_ORDERED;
318 bp->bio_attribute = NULL;
319 bp->bio_offset = cp->provider->mediasize;
322 g_io_request(bp, cp);
323 error = biowait(bp, "gflush");
329 g_io_check(struct bio *bp)
331 struct g_consumer *cp;
332 struct g_provider *pp;
339 /* Fail if access counters dont allow the operation */
340 switch(bp->bio_cmd) {
355 /* if provider is marked for error, don't disturb. */
358 if (cp->flags & G_CF_ORPHAN)
361 switch(bp->bio_cmd) {
365 /* Zero sectorsize or mediasize is probably a lack of media. */
366 if (pp->sectorsize == 0 || pp->mediasize == 0)
368 /* Reject I/O not on sector boundary */
369 if (bp->bio_offset % pp->sectorsize)
371 /* Reject I/O not integral sector long */
372 if (bp->bio_length % pp->sectorsize)
374 /* Reject requests before or past the end of media. */
375 if (bp->bio_offset < 0)
377 if (bp->bio_offset > pp->mediasize)
380 /* Truncate requests to the end of providers media. */
381 excess = bp->bio_offset + bp->bio_length;
382 if (excess > bp->bio_to->mediasize) {
383 KASSERT((bp->bio_flags & BIO_UNMAPPED) == 0 ||
384 round_page(bp->bio_ma_offset +
385 bp->bio_length) / PAGE_SIZE == bp->bio_ma_n,
386 ("excess bio %p too short", bp));
387 excess -= bp->bio_to->mediasize;
388 bp->bio_length -= excess;
389 if ((bp->bio_flags & BIO_UNMAPPED) != 0) {
390 bp->bio_ma_n = round_page(bp->bio_ma_offset +
391 bp->bio_length) / PAGE_SIZE;
394 CTR3(KTR_GEOM, "g_down truncated bio "
395 "%p provider %s by %d", bp,
396 bp->bio_to->name, excess);
399 /* Deliver zero length transfers right here. */
400 if (bp->bio_length == 0) {
401 CTR2(KTR_GEOM, "g_down terminated 0-length "
402 "bp %p provider %s", bp, bp->bio_to->name);
406 if ((bp->bio_flags & BIO_UNMAPPED) != 0 &&
407 (bp->bio_to->flags & G_PF_ACCEPT_UNMAPPED) == 0 &&
408 (bp->bio_cmd == BIO_READ || bp->bio_cmd == BIO_WRITE)) {
409 if ((error = g_io_transient_map_bio(bp)) >= 0)
416 return (EJUSTRETURN);
420 * bio classification support.
422 * g_register_classifier() and g_unregister_classifier()
423 * are used to add/remove a classifier from the list.
424 * The list is protected using the g_bio_run_down lock,
425 * because the classifiers are called in this path.
427 * g_io_request() passes bio's that are not already classified
428 * (i.e. those with bio_classifier1 == NULL) to g_run_classifiers().
429 * Classifiers can store their result in the two fields
430 * bio_classifier1 and bio_classifier2.
431 * A classifier that updates one of the fields should
432 * return a non-zero value.
433 * If no classifier updates the field, g_run_classifiers() sets
434 * bio_classifier1 = BIO_NOTCLASSIFIED to avoid further calls.
438 g_register_classifier(struct g_classifier_hook *hook)
441 g_bioq_lock(&g_bio_run_down);
442 TAILQ_INSERT_TAIL(&g_classifier_tailq, hook, link);
443 g_bioq_unlock(&g_bio_run_down);
449 g_unregister_classifier(struct g_classifier_hook *hook)
451 struct g_classifier_hook *entry;
453 g_bioq_lock(&g_bio_run_down);
454 TAILQ_FOREACH(entry, &g_classifier_tailq, link) {
456 TAILQ_REMOVE(&g_classifier_tailq, hook, link);
460 g_bioq_unlock(&g_bio_run_down);
464 g_run_classifiers(struct bio *bp)
466 struct g_classifier_hook *hook;
469 TAILQ_FOREACH(hook, &g_classifier_tailq, link)
470 classified |= hook->func(hook->arg, bp);
473 bp->bio_classifier1 = BIO_NOTCLASSIFIED;
477 g_io_request(struct bio *bp, struct g_consumer *cp)
479 struct g_provider *pp;
481 int direct, error, first;
483 KASSERT(cp != NULL, ("NULL cp in g_io_request"));
484 KASSERT(bp != NULL, ("NULL bp in g_io_request"));
486 KASSERT(pp != NULL, ("consumer not attached in g_io_request"));
488 KASSERT(bp->bio_driver1 == NULL,
489 ("bio_driver1 used by the consumer (geom %s)", cp->geom->name));
490 KASSERT(bp->bio_driver2 == NULL,
491 ("bio_driver2 used by the consumer (geom %s)", cp->geom->name));
492 KASSERT(bp->bio_pflags == 0,
493 ("bio_pflags used by the consumer (geom %s)", cp->geom->name));
495 * Remember consumer's private fields, so we can detect if they were
496 * modified by the provider.
498 bp->_bio_caller1 = bp->bio_caller1;
499 bp->_bio_caller2 = bp->bio_caller2;
500 bp->_bio_cflags = bp->bio_cflags;
503 if (bp->bio_cmd & (BIO_READ|BIO_WRITE|BIO_GETATTR)) {
504 KASSERT(bp->bio_data != NULL,
505 ("NULL bp->data in g_io_request(cmd=%hhu)", bp->bio_cmd));
507 if (bp->bio_cmd & (BIO_DELETE|BIO_FLUSH)) {
508 KASSERT(bp->bio_data == NULL,
509 ("non-NULL bp->data in g_io_request(cmd=%hhu)",
512 if (bp->bio_cmd & (BIO_READ|BIO_WRITE|BIO_DELETE)) {
513 KASSERT(bp->bio_offset % cp->provider->sectorsize == 0,
514 ("wrong offset %jd for sectorsize %u",
515 bp->bio_offset, cp->provider->sectorsize));
516 KASSERT(bp->bio_length % cp->provider->sectorsize == 0,
517 ("wrong length %jd for sectorsize %u",
518 bp->bio_length, cp->provider->sectorsize));
521 g_trace(G_T_BIO, "bio_request(%p) from %p(%s) to %p(%s) cmd %d",
522 bp, cp, cp->geom->name, pp, pp->name, bp->bio_cmd);
527 bp->bio_completed = 0;
529 KASSERT(!(bp->bio_flags & BIO_ONQUEUE),
530 ("Bio already on queue bp=%p", bp));
531 if ((g_collectstats & G_STATS_CONSUMERS) != 0 ||
532 ((g_collectstats & G_STATS_PROVIDERS) != 0 && pp->stat != NULL))
533 binuptime(&bp->bio_t0);
535 getbinuptime(&bp->bio_t0);
537 #ifdef GET_STACK_USAGE
538 direct = (cp->flags & G_CF_DIRECT_SEND) != 0 &&
539 (pp->flags & G_PF_DIRECT_RECEIVE) != 0 &&
540 !g_is_geom_thread(curthread) &&
541 ((pp->flags & G_PF_ACCEPT_UNMAPPED) != 0 ||
542 (bp->bio_flags & BIO_UNMAPPED) == 0 || THREAD_CAN_SLEEP()) &&
545 /* Block direct execution if less then half of stack left. */
547 GET_STACK_USAGE(st, su);
555 if (!TAILQ_EMPTY(&g_classifier_tailq) && !bp->bio_classifier1) {
556 g_bioq_lock(&g_bio_run_down);
557 g_run_classifiers(bp);
558 g_bioq_unlock(&g_bio_run_down);
562 * The statistics collection is lockless, as such, but we
563 * can not update one instance of the statistics from more
564 * than one thread at a time, so grab the lock first.
566 mtxp = mtx_pool_find(mtxpool_sleep, pp);
568 if (g_collectstats & G_STATS_PROVIDERS)
569 devstat_start_transaction(pp->stat, &bp->bio_t0);
570 if (g_collectstats & G_STATS_CONSUMERS)
571 devstat_start_transaction(cp->stat, &bp->bio_t0);
577 error = g_io_check(bp);
579 CTR3(KTR_GEOM, "g_io_request g_io_check on bp %p "
580 "provider %s returned %d", bp, bp->bio_to->name,
582 g_io_deliver(bp, error);
585 bp->bio_to->geom->start(bp);
587 g_bioq_lock(&g_bio_run_down);
588 first = TAILQ_EMPTY(&g_bio_run_down.bio_queue);
589 TAILQ_INSERT_TAIL(&g_bio_run_down.bio_queue, bp, bio_queue);
590 bp->bio_flags |= BIO_ONQUEUE;
591 g_bio_run_down.bio_queue_length++;
592 g_bioq_unlock(&g_bio_run_down);
593 /* Pass it on down. */
595 wakeup(&g_wait_down);
600 g_io_deliver(struct bio *bp, int error)
603 struct g_consumer *cp;
604 struct g_provider *pp;
608 KASSERT(bp != NULL, ("NULL bp in g_io_deliver"));
610 KASSERT(pp != NULL, ("NULL bio_to in g_io_deliver"));
613 bp->bio_error = error;
617 KASSERT(cp != NULL, ("NULL bio_from in g_io_deliver"));
618 KASSERT(cp->geom != NULL, ("NULL bio_from->geom in g_io_deliver"));
621 * Some classes - GJournal in particular - can modify bio's
622 * private fields while the bio is in transit; G_GEOM_VOLATILE_BIO
623 * flag means it's an expected behaviour for that particular geom.
625 if ((cp->geom->flags & G_GEOM_VOLATILE_BIO) == 0) {
626 KASSERT(bp->bio_caller1 == bp->_bio_caller1,
627 ("bio_caller1 used by the provider %s", pp->name));
628 KASSERT(bp->bio_caller2 == bp->_bio_caller2,
629 ("bio_caller2 used by the provider %s", pp->name));
630 KASSERT(bp->bio_cflags == bp->_bio_cflags,
631 ("bio_cflags used by the provider %s", pp->name));
634 KASSERT(bp->bio_completed >= 0, ("bio_completed can't be less than 0"));
635 KASSERT(bp->bio_completed <= bp->bio_length,
636 ("bio_completed can't be greater than bio_length"));
639 "g_io_deliver(%p) from %p(%s) to %p(%s) cmd %d error %d off %jd len %jd",
640 bp, cp, cp->geom->name, pp, pp->name, bp->bio_cmd, error,
641 (intmax_t)bp->bio_offset, (intmax_t)bp->bio_length);
643 KASSERT(!(bp->bio_flags & BIO_ONQUEUE),
644 ("Bio already on queue bp=%p", bp));
647 * XXX: next two doesn't belong here
649 bp->bio_bcount = bp->bio_length;
650 bp->bio_resid = bp->bio_bcount - bp->bio_completed;
652 #ifdef GET_STACK_USAGE
653 direct = (pp->flags & G_PF_DIRECT_SEND) &&
654 (cp->flags & G_CF_DIRECT_RECEIVE) &&
655 !g_is_geom_thread(curthread);
657 /* Block direct execution if less then half of stack left. */
659 GET_STACK_USAGE(st, su);
668 * The statistics collection is lockless, as such, but we
669 * can not update one instance of the statistics from more
670 * than one thread at a time, so grab the lock first.
672 if ((g_collectstats & G_STATS_CONSUMERS) != 0 ||
673 ((g_collectstats & G_STATS_PROVIDERS) != 0 && pp->stat != NULL))
675 mtxp = mtx_pool_find(mtxpool_sleep, cp);
677 if (g_collectstats & G_STATS_PROVIDERS)
678 devstat_end_transaction_bio_bt(pp->stat, bp, &now);
679 if (g_collectstats & G_STATS_CONSUMERS)
680 devstat_end_transaction_bio_bt(cp->stat, bp, &now);
685 if (error != ENOMEM) {
686 bp->bio_error = error;
690 g_bioq_lock(&g_bio_run_up);
691 first = TAILQ_EMPTY(&g_bio_run_up.bio_queue);
692 TAILQ_INSERT_TAIL(&g_bio_run_up.bio_queue, bp, bio_queue);
693 bp->bio_flags |= BIO_ONQUEUE;
694 g_bio_run_up.bio_queue_length++;
695 g_bioq_unlock(&g_bio_run_up);
703 printf("ENOMEM %p on %p(%s)\n", bp, pp, pp->name);
704 bp->bio_children = 0;
706 bp->bio_driver1 = NULL;
707 bp->bio_driver2 = NULL;
709 g_io_request(bp, cp);
714 SYSCTL_DECL(_kern_geom);
716 static long transient_maps;
717 SYSCTL_LONG(_kern_geom, OID_AUTO, transient_maps, CTLFLAG_RD,
719 "Total count of the transient mapping requests");
720 u_int transient_map_retries = 10;
721 SYSCTL_UINT(_kern_geom, OID_AUTO, transient_map_retries, CTLFLAG_RW,
722 &transient_map_retries, 0,
723 "Max count of retries used before giving up on creating transient map");
724 int transient_map_hard_failures;
725 SYSCTL_INT(_kern_geom, OID_AUTO, transient_map_hard_failures, CTLFLAG_RD,
726 &transient_map_hard_failures, 0,
727 "Failures to establish the transient mapping due to retry attempts "
729 int transient_map_soft_failures;
730 SYSCTL_INT(_kern_geom, OID_AUTO, transient_map_soft_failures, CTLFLAG_RD,
731 &transient_map_soft_failures, 0,
732 "Count of retried failures to establish the transient mapping");
733 int inflight_transient_maps;
734 SYSCTL_INT(_kern_geom, OID_AUTO, inflight_transient_maps, CTLFLAG_RD,
735 &inflight_transient_maps, 0,
736 "Current count of the active transient maps");
739 g_io_transient_map_bio(struct bio *bp)
745 KASSERT(unmapped_buf_allowed, ("unmapped disabled"));
747 size = round_page(bp->bio_ma_offset + bp->bio_length);
748 KASSERT(size / PAGE_SIZE == bp->bio_ma_n, ("Bio too short %p", bp));
751 atomic_add_long(&transient_maps, 1);
753 if (vmem_alloc(transient_arena, size, M_BESTFIT | M_NOWAIT, &addr)) {
754 if (transient_map_retries != 0 &&
755 retried >= transient_map_retries) {
756 CTR2(KTR_GEOM, "g_down cannot map bp %p provider %s",
757 bp, bp->bio_to->name);
758 atomic_add_int(&transient_map_hard_failures, 1);
759 return (EDEADLK/* XXXKIB */);
762 * Naive attempt to quisce the I/O to get more
763 * in-flight requests completed and defragment
764 * the transient_arena.
766 CTR3(KTR_GEOM, "g_down retrymap bp %p provider %s r %d",
767 bp, bp->bio_to->name, retried);
768 pause("g_d_tra", hz / 10);
770 atomic_add_int(&transient_map_soft_failures, 1);
774 atomic_add_int(&inflight_transient_maps, 1);
775 pmap_qenter((vm_offset_t)addr, bp->bio_ma, OFF_TO_IDX(size));
776 bp->bio_data = (caddr_t)addr + bp->bio_ma_offset;
777 bp->bio_flags |= BIO_TRANSIENT_MAPPING;
778 bp->bio_flags &= ~BIO_UNMAPPED;
779 return (EJUSTRETURN);
783 g_io_schedule_down(struct thread *tp __unused)
789 g_bioq_lock(&g_bio_run_down);
790 bp = g_bioq_first(&g_bio_run_down);
792 CTR0(KTR_GEOM, "g_down going to sleep");
793 msleep(&g_wait_down, &g_bio_run_down.bio_queue_lock,
794 PRIBIO | PDROP, "-", 0);
797 CTR0(KTR_GEOM, "g_down has work to do");
798 g_bioq_unlock(&g_bio_run_down);
801 * There has been at least one memory allocation
802 * failure since the last I/O completed. Pause 1ms to
803 * give the system a chance to free up memory. We only
804 * do this once because a large number of allocations
805 * can fail in the direct dispatch case and there's no
806 * relationship between the number of these failures and
807 * the length of the outage. If there's still an outage,
808 * we'll pause again and again until it's
809 * resolved. Older versions paused longer and once per
810 * allocation failure. This was OK for a single threaded
811 * g_down, but with direct dispatch would lead to max of
812 * 10 IOPs for minutes at a time when transient memory
813 * issues prevented allocation for a batch of requests
814 * from the upper layers.
816 * XXX This pacing is really lame. It needs to be solved
817 * by other methods. This is OK only because the worst
818 * case scenario is so rare. In the worst case scenario
819 * all memory is tied up waiting for I/O to complete
820 * which can never happen since we can't allocate bios
823 CTR0(KTR_GEOM, "g_down pacing self");
824 pause("g_down", min(hz/1000, 1));
827 CTR2(KTR_GEOM, "g_down processing bp %p provider %s", bp,
829 error = g_io_check(bp);
831 CTR3(KTR_GEOM, "g_down g_io_check on bp %p provider "
832 "%s returned %d", bp, bp->bio_to->name, error);
833 g_io_deliver(bp, error);
836 THREAD_NO_SLEEPING();
837 CTR4(KTR_GEOM, "g_down starting bp %p provider %s off %ld "
838 "len %ld", bp, bp->bio_to->name, bp->bio_offset,
840 bp->bio_to->geom->start(bp);
841 THREAD_SLEEPING_OK();
846 bio_taskqueue(struct bio *bp, bio_task_t *func, void *arg)
849 bp->bio_task_arg = arg;
851 * The taskqueue is actually just a second queue off the "up"
852 * queue, so we use the same lock.
854 g_bioq_lock(&g_bio_run_up);
855 KASSERT(!(bp->bio_flags & BIO_ONQUEUE),
856 ("Bio already on queue bp=%p target taskq", bp));
857 bp->bio_flags |= BIO_ONQUEUE;
858 TAILQ_INSERT_TAIL(&g_bio_run_task.bio_queue, bp, bio_queue);
859 g_bio_run_task.bio_queue_length++;
861 g_bioq_unlock(&g_bio_run_up);
866 g_io_schedule_up(struct thread *tp __unused)
870 g_bioq_lock(&g_bio_run_up);
871 bp = g_bioq_first(&g_bio_run_task);
873 g_bioq_unlock(&g_bio_run_up);
874 THREAD_NO_SLEEPING();
875 CTR1(KTR_GEOM, "g_up processing task bp %p", bp);
876 bp->bio_task(bp->bio_task_arg);
877 THREAD_SLEEPING_OK();
880 bp = g_bioq_first(&g_bio_run_up);
882 g_bioq_unlock(&g_bio_run_up);
883 THREAD_NO_SLEEPING();
884 CTR4(KTR_GEOM, "g_up biodone bp %p provider %s off "
885 "%jd len %ld", bp, bp->bio_to->name,
886 bp->bio_offset, bp->bio_length);
888 THREAD_SLEEPING_OK();
891 CTR0(KTR_GEOM, "g_up going to sleep");
892 msleep(&g_wait_up, &g_bio_run_up.bio_queue_lock,
893 PRIBIO | PDROP, "-", 0);
898 g_read_data(struct g_consumer *cp, off_t offset, off_t length, int *error)
904 KASSERT(length > 0 && length >= cp->provider->sectorsize &&
905 length <= MAXPHYS, ("g_read_data(): invalid length %jd",
909 bp->bio_cmd = BIO_READ;
911 bp->bio_offset = offset;
912 bp->bio_length = length;
913 ptr = g_malloc(length, M_WAITOK);
915 g_io_request(bp, cp);
916 errorc = biowait(bp, "gread");
928 g_write_data(struct g_consumer *cp, off_t offset, void *ptr, off_t length)
933 KASSERT(length > 0 && length >= cp->provider->sectorsize &&
934 length <= MAXPHYS, ("g_write_data(): invalid length %jd",
938 bp->bio_cmd = BIO_WRITE;
940 bp->bio_offset = offset;
941 bp->bio_length = length;
943 g_io_request(bp, cp);
944 error = biowait(bp, "gwrite");
950 g_delete_data(struct g_consumer *cp, off_t offset, off_t length)
955 KASSERT(length > 0 && length >= cp->provider->sectorsize,
956 ("g_delete_data(): invalid length %jd", (intmax_t)length));
959 bp->bio_cmd = BIO_DELETE;
961 bp->bio_offset = offset;
962 bp->bio_length = length;
964 g_io_request(bp, cp);
965 error = biowait(bp, "gdelete");
971 g_print_bio(struct bio *bp)
973 const char *pname, *cmd = NULL;
975 if (bp->bio_to != NULL)
976 pname = bp->bio_to->name;
980 switch (bp->bio_cmd) {
983 printf("%s[%s(attr=%s)]", pname, cmd, bp->bio_attribute);
987 printf("%s[%s]", pname, cmd);
1000 printf("%s[%s()]", pname, cmd);
1003 printf("%s[%s(offset=%jd, length=%jd)]", pname, cmd,
1004 (intmax_t)bp->bio_offset, (intmax_t)bp->bio_length);