2 * Copyright (c) 2002 Poul-Henning Kamp
3 * Copyright (c) 2002 Networks Associates Technology, Inc.
4 * Copyright (c) 2013 The FreeBSD Foundation
7 * This software was developed for the FreeBSD Project by Poul-Henning Kamp
8 * and NAI Labs, the Security Research Division of Network Associates, Inc.
9 * under DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the
10 * DARPA CHATS research program.
12 * Portions of this software were developed by Konstantin Belousov
13 * under sponsorship from the FreeBSD Foundation.
15 * Redistribution and use in source and binary forms, with or without
16 * modification, are permitted provided that the following conditions
18 * 1. Redistributions of source code must retain the above copyright
19 * notice, this list of conditions and the following disclaimer.
20 * 2. Redistributions in binary form must reproduce the above copyright
21 * notice, this list of conditions and the following disclaimer in the
22 * documentation and/or other materials provided with the distribution.
23 * 3. The names of the authors may not be used to endorse or promote
24 * products derived from this software without specific prior written
27 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
28 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
29 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
30 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
31 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
40 #include <sys/cdefs.h>
41 __FBSDID("$FreeBSD$");
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/kernel.h>
46 #include <sys/malloc.h>
50 #include <sys/stack.h>
51 #include <sys/sysctl.h>
54 #include <sys/errno.h>
55 #include <geom/geom.h>
56 #include <geom/geom_int.h>
57 #include <sys/devicestat.h>
61 #include <vm/vm_param.h>
62 #include <vm/vm_kern.h>
63 #include <vm/vm_page.h>
64 #include <vm/vm_object.h>
65 #include <vm/vm_extern.h>
66 #include <vm/vm_map.h>
68 static int g_io_transient_map_bio(struct bio *bp);
70 static struct g_bioq g_bio_run_down;
71 static struct g_bioq g_bio_run_up;
74 * Pace is a hint that we've had some trouble recently allocating
75 * bios, so we should back off trying to send I/O down the stack
76 * a bit to let the problem resolve. When pacing, we also turn
77 * off direct dispatch to also reduce memory pressure from I/Os
78 * there, at the expxense of some added latency while the memory
79 * pressures exist. See g_io_schedule_down() for more details
82 static volatile u_int pace;
84 static uma_zone_t biozone;
87 * The head of the list of classifiers used in g_io_request.
88 * Use g_register_classifier() and g_unregister_classifier()
89 * to add/remove entries to the list.
90 * Classifiers are invoked in registration order.
92 static TAILQ_HEAD(g_classifier_tailq, g_classifier_hook)
93 g_classifier_tailq = TAILQ_HEAD_INITIALIZER(g_classifier_tailq);
95 #include <machine/atomic.h>
98 g_bioq_lock(struct g_bioq *bq)
101 mtx_lock(&bq->bio_queue_lock);
105 g_bioq_unlock(struct g_bioq *bq)
108 mtx_unlock(&bq->bio_queue_lock);
113 g_bioq_destroy(struct g_bioq *bq)
116 mtx_destroy(&bq->bio_queue_lock);
121 g_bioq_init(struct g_bioq *bq)
124 TAILQ_INIT(&bq->bio_queue);
125 mtx_init(&bq->bio_queue_lock, "bio queue", NULL, MTX_DEF);
129 g_bioq_first(struct g_bioq *bq)
133 bp = TAILQ_FIRST(&bq->bio_queue);
135 KASSERT((bp->bio_flags & BIO_ONQUEUE),
136 ("Bio not on queue bp=%p target %p", bp, bq));
137 bp->bio_flags &= ~BIO_ONQUEUE;
138 TAILQ_REMOVE(&bq->bio_queue, bp, bio_queue);
139 bq->bio_queue_length--;
149 bp = uma_zalloc(biozone, M_NOWAIT | M_ZERO);
151 if ((KTR_COMPILE & KTR_GEOM) && (ktr_mask & KTR_GEOM)) {
154 CTR1(KTR_GEOM, "g_new_bio(): %p", bp);
156 CTRSTACK(KTR_GEOM, &st, 3, 0);
167 bp = uma_zalloc(biozone, M_WAITOK | M_ZERO);
169 if ((KTR_COMPILE & KTR_GEOM) && (ktr_mask & KTR_GEOM)) {
172 CTR1(KTR_GEOM, "g_alloc_bio(): %p", bp);
174 CTRSTACK(KTR_GEOM, &st, 3, 0);
181 g_destroy_bio(struct bio *bp)
184 if ((KTR_COMPILE & KTR_GEOM) && (ktr_mask & KTR_GEOM)) {
187 CTR1(KTR_GEOM, "g_destroy_bio(): %p", bp);
189 CTRSTACK(KTR_GEOM, &st, 3, 0);
192 uma_zfree(biozone, bp);
196 g_clone_bio(struct bio *bp)
200 bp2 = uma_zalloc(biozone, M_NOWAIT | M_ZERO);
202 bp2->bio_parent = bp;
203 bp2->bio_cmd = bp->bio_cmd;
205 * BIO_ORDERED flag may be used by disk drivers to enforce
206 * ordering restrictions, so this flag needs to be cloned.
207 * BIO_UNMAPPED and BIO_VLIST should be inherited, to properly
208 * indicate which way the buffer is passed.
209 * Other bio flags are not suitable for cloning.
211 bp2->bio_flags = bp->bio_flags &
212 (BIO_ORDERED | BIO_UNMAPPED | BIO_VLIST);
213 bp2->bio_length = bp->bio_length;
214 bp2->bio_offset = bp->bio_offset;
215 bp2->bio_data = bp->bio_data;
216 bp2->bio_ma = bp->bio_ma;
217 bp2->bio_ma_n = bp->bio_ma_n;
218 bp2->bio_ma_offset = bp->bio_ma_offset;
219 bp2->bio_attribute = bp->bio_attribute;
220 if (bp->bio_cmd == BIO_ZONE)
221 bcopy(&bp->bio_zone, &bp2->bio_zone,
222 sizeof(bp->bio_zone));
223 /* Inherit classification info from the parent */
224 bp2->bio_classifier1 = bp->bio_classifier1;
225 bp2->bio_classifier2 = bp->bio_classifier2;
229 if ((KTR_COMPILE & KTR_GEOM) && (ktr_mask & KTR_GEOM)) {
232 CTR2(KTR_GEOM, "g_clone_bio(%p): %p", bp, bp2);
234 CTRSTACK(KTR_GEOM, &st, 3, 0);
241 g_duplicate_bio(struct bio *bp)
245 bp2 = uma_zalloc(biozone, M_WAITOK | M_ZERO);
246 bp2->bio_flags = bp->bio_flags & (BIO_UNMAPPED | BIO_VLIST);
247 bp2->bio_parent = bp;
248 bp2->bio_cmd = bp->bio_cmd;
249 bp2->bio_length = bp->bio_length;
250 bp2->bio_offset = bp->bio_offset;
251 bp2->bio_data = bp->bio_data;
252 bp2->bio_ma = bp->bio_ma;
253 bp2->bio_ma_n = bp->bio_ma_n;
254 bp2->bio_ma_offset = bp->bio_ma_offset;
255 bp2->bio_attribute = bp->bio_attribute;
258 if ((KTR_COMPILE & KTR_GEOM) && (ktr_mask & KTR_GEOM)) {
261 CTR2(KTR_GEOM, "g_duplicate_bio(%p): %p", bp, bp2);
263 CTRSTACK(KTR_GEOM, &st, 3, 0);
270 g_reset_bio(struct bio *bp)
273 bzero(bp, sizeof(*bp));
280 g_bioq_init(&g_bio_run_down);
281 g_bioq_init(&g_bio_run_up);
282 biozone = uma_zcreate("g_bio", sizeof (struct bio),
289 g_io_getattr(const char *attr, struct g_consumer *cp, int *len, void *ptr)
294 g_trace(G_T_BIO, "bio_getattr(%s)", attr);
296 bp->bio_cmd = BIO_GETATTR;
298 bp->bio_attribute = attr;
299 bp->bio_length = *len;
301 g_io_request(bp, cp);
302 error = biowait(bp, "ggetattr");
303 *len = bp->bio_completed;
309 g_io_zonecmd(struct disk_zone_args *zone_args, struct g_consumer *cp)
314 g_trace(G_T_BIO, "bio_zone(%d)", zone_args->zone_cmd);
316 bp->bio_cmd = BIO_ZONE;
319 * XXX KDM need to handle report zone data.
321 bcopy(zone_args, &bp->bio_zone, sizeof(*zone_args));
322 if (zone_args->zone_cmd == DISK_ZONE_REPORT_ZONES)
324 zone_args->zone_params.report.entries_allocated *
325 sizeof(struct disk_zone_rep_entry);
329 g_io_request(bp, cp);
330 error = biowait(bp, "gzone");
331 bcopy(&bp->bio_zone, zone_args, sizeof(*zone_args));
337 g_io_flush(struct g_consumer *cp)
342 g_trace(G_T_BIO, "bio_flush(%s)", cp->provider->name);
344 bp->bio_cmd = BIO_FLUSH;
345 bp->bio_flags |= BIO_ORDERED;
347 bp->bio_attribute = NULL;
348 bp->bio_offset = cp->provider->mediasize;
351 g_io_request(bp, cp);
352 error = biowait(bp, "gflush");
358 g_io_check(struct bio *bp)
360 struct g_consumer *cp;
361 struct g_provider *pp;
368 /* Fail if access counters dont allow the operation */
369 switch(bp->bio_cmd) {
382 if ((bp->bio_zone.zone_cmd == DISK_ZONE_REPORT_ZONES) ||
383 (bp->bio_zone.zone_cmd == DISK_ZONE_GET_PARAMS)) {
386 } else if (cp->acw == 0)
392 /* if provider is marked for error, don't disturb. */
395 if (cp->flags & G_CF_ORPHAN)
398 switch(bp->bio_cmd) {
402 /* Zero sectorsize or mediasize is probably a lack of media. */
403 if (pp->sectorsize == 0 || pp->mediasize == 0)
405 /* Reject I/O not on sector boundary */
406 if (bp->bio_offset % pp->sectorsize)
408 /* Reject I/O not integral sector long */
409 if (bp->bio_length % pp->sectorsize)
411 /* Reject requests before or past the end of media. */
412 if (bp->bio_offset < 0)
414 if (bp->bio_offset > pp->mediasize)
417 /* Truncate requests to the end of providers media. */
418 excess = bp->bio_offset + bp->bio_length;
419 if (excess > bp->bio_to->mediasize) {
420 KASSERT((bp->bio_flags & BIO_UNMAPPED) == 0 ||
421 round_page(bp->bio_ma_offset +
422 bp->bio_length) / PAGE_SIZE == bp->bio_ma_n,
423 ("excess bio %p too short", bp));
424 excess -= bp->bio_to->mediasize;
425 bp->bio_length -= excess;
426 if ((bp->bio_flags & BIO_UNMAPPED) != 0) {
427 bp->bio_ma_n = round_page(bp->bio_ma_offset +
428 bp->bio_length) / PAGE_SIZE;
431 CTR3(KTR_GEOM, "g_down truncated bio "
432 "%p provider %s by %d", bp,
433 bp->bio_to->name, excess);
436 /* Deliver zero length transfers right here. */
437 if (bp->bio_length == 0) {
438 CTR2(KTR_GEOM, "g_down terminated 0-length "
439 "bp %p provider %s", bp, bp->bio_to->name);
443 if ((bp->bio_flags & BIO_UNMAPPED) != 0 &&
444 (bp->bio_to->flags & G_PF_ACCEPT_UNMAPPED) == 0 &&
445 (bp->bio_cmd == BIO_READ || bp->bio_cmd == BIO_WRITE)) {
446 if ((error = g_io_transient_map_bio(bp)) >= 0)
453 return (EJUSTRETURN);
457 * bio classification support.
459 * g_register_classifier() and g_unregister_classifier()
460 * are used to add/remove a classifier from the list.
461 * The list is protected using the g_bio_run_down lock,
462 * because the classifiers are called in this path.
464 * g_io_request() passes bio's that are not already classified
465 * (i.e. those with bio_classifier1 == NULL) to g_run_classifiers().
466 * Classifiers can store their result in the two fields
467 * bio_classifier1 and bio_classifier2.
468 * A classifier that updates one of the fields should
469 * return a non-zero value.
470 * If no classifier updates the field, g_run_classifiers() sets
471 * bio_classifier1 = BIO_NOTCLASSIFIED to avoid further calls.
475 g_register_classifier(struct g_classifier_hook *hook)
478 g_bioq_lock(&g_bio_run_down);
479 TAILQ_INSERT_TAIL(&g_classifier_tailq, hook, link);
480 g_bioq_unlock(&g_bio_run_down);
486 g_unregister_classifier(struct g_classifier_hook *hook)
488 struct g_classifier_hook *entry;
490 g_bioq_lock(&g_bio_run_down);
491 TAILQ_FOREACH(entry, &g_classifier_tailq, link) {
493 TAILQ_REMOVE(&g_classifier_tailq, hook, link);
497 g_bioq_unlock(&g_bio_run_down);
501 g_run_classifiers(struct bio *bp)
503 struct g_classifier_hook *hook;
506 TAILQ_FOREACH(hook, &g_classifier_tailq, link)
507 classified |= hook->func(hook->arg, bp);
510 bp->bio_classifier1 = BIO_NOTCLASSIFIED;
514 g_io_request(struct bio *bp, struct g_consumer *cp)
516 struct g_provider *pp;
518 int direct, error, first;
521 KASSERT(cp != NULL, ("NULL cp in g_io_request"));
522 KASSERT(bp != NULL, ("NULL bp in g_io_request"));
524 KASSERT(pp != NULL, ("consumer not attached in g_io_request"));
526 KASSERT(bp->bio_driver1 == NULL,
527 ("bio_driver1 used by the consumer (geom %s)", cp->geom->name));
528 KASSERT(bp->bio_driver2 == NULL,
529 ("bio_driver2 used by the consumer (geom %s)", cp->geom->name));
530 KASSERT(bp->bio_pflags == 0,
531 ("bio_pflags used by the consumer (geom %s)", cp->geom->name));
533 * Remember consumer's private fields, so we can detect if they were
534 * modified by the provider.
536 bp->_bio_caller1 = bp->bio_caller1;
537 bp->_bio_caller2 = bp->bio_caller2;
538 bp->_bio_cflags = bp->bio_cflags;
542 if (cmd == BIO_READ || cmd == BIO_WRITE || cmd == BIO_GETATTR) {
543 KASSERT(bp->bio_data != NULL,
544 ("NULL bp->data in g_io_request(cmd=%hu)", bp->bio_cmd));
546 if (cmd == BIO_DELETE || cmd == BIO_FLUSH) {
547 KASSERT(bp->bio_data == NULL,
548 ("non-NULL bp->data in g_io_request(cmd=%hu)",
551 if (cmd == BIO_READ || cmd == BIO_WRITE || cmd == BIO_DELETE) {
552 KASSERT(bp->bio_offset % cp->provider->sectorsize == 0,
553 ("wrong offset %jd for sectorsize %u",
554 bp->bio_offset, cp->provider->sectorsize));
555 KASSERT(bp->bio_length % cp->provider->sectorsize == 0,
556 ("wrong length %jd for sectorsize %u",
557 bp->bio_length, cp->provider->sectorsize));
560 g_trace(G_T_BIO, "bio_request(%p) from %p(%s) to %p(%s) cmd %d",
561 bp, cp, cp->geom->name, pp, pp->name, bp->bio_cmd);
566 bp->bio_completed = 0;
568 KASSERT(!(bp->bio_flags & BIO_ONQUEUE),
569 ("Bio already on queue bp=%p", bp));
570 if ((g_collectstats & G_STATS_CONSUMERS) != 0 ||
571 ((g_collectstats & G_STATS_PROVIDERS) != 0 && pp->stat != NULL))
572 binuptime(&bp->bio_t0);
574 getbinuptime(&bp->bio_t0);
576 #ifdef GET_STACK_USAGE
577 direct = (cp->flags & G_CF_DIRECT_SEND) != 0 &&
578 (pp->flags & G_PF_DIRECT_RECEIVE) != 0 &&
579 !g_is_geom_thread(curthread) &&
580 ((pp->flags & G_PF_ACCEPT_UNMAPPED) != 0 ||
581 (bp->bio_flags & BIO_UNMAPPED) == 0 || THREAD_CAN_SLEEP()) &&
584 /* Block direct execution if less then half of stack left. */
586 GET_STACK_USAGE(st, su);
594 if (!TAILQ_EMPTY(&g_classifier_tailq) && !bp->bio_classifier1) {
595 g_bioq_lock(&g_bio_run_down);
596 g_run_classifiers(bp);
597 g_bioq_unlock(&g_bio_run_down);
601 * The statistics collection is lockless, as such, but we
602 * can not update one instance of the statistics from more
603 * than one thread at a time, so grab the lock first.
605 mtxp = mtx_pool_find(mtxpool_sleep, pp);
607 if (g_collectstats & G_STATS_PROVIDERS)
608 devstat_start_transaction(pp->stat, &bp->bio_t0);
609 if (g_collectstats & G_STATS_CONSUMERS)
610 devstat_start_transaction(cp->stat, &bp->bio_t0);
616 error = g_io_check(bp);
618 CTR3(KTR_GEOM, "g_io_request g_io_check on bp %p "
619 "provider %s returned %d", bp, bp->bio_to->name,
621 g_io_deliver(bp, error);
624 bp->bio_to->geom->start(bp);
626 g_bioq_lock(&g_bio_run_down);
627 first = TAILQ_EMPTY(&g_bio_run_down.bio_queue);
628 TAILQ_INSERT_TAIL(&g_bio_run_down.bio_queue, bp, bio_queue);
629 bp->bio_flags |= BIO_ONQUEUE;
630 g_bio_run_down.bio_queue_length++;
631 g_bioq_unlock(&g_bio_run_down);
632 /* Pass it on down. */
634 wakeup(&g_wait_down);
639 g_io_deliver(struct bio *bp, int error)
642 struct g_consumer *cp;
643 struct g_provider *pp;
647 KASSERT(bp != NULL, ("NULL bp in g_io_deliver"));
649 KASSERT(pp != NULL, ("NULL bio_to in g_io_deliver"));
652 bp->bio_error = error;
656 KASSERT(cp != NULL, ("NULL bio_from in g_io_deliver"));
657 KASSERT(cp->geom != NULL, ("NULL bio_from->geom in g_io_deliver"));
660 * Some classes - GJournal in particular - can modify bio's
661 * private fields while the bio is in transit; G_GEOM_VOLATILE_BIO
662 * flag means it's an expected behaviour for that particular geom.
664 if ((cp->geom->flags & G_GEOM_VOLATILE_BIO) == 0) {
665 KASSERT(bp->bio_caller1 == bp->_bio_caller1,
666 ("bio_caller1 used by the provider %s", pp->name));
667 KASSERT(bp->bio_caller2 == bp->_bio_caller2,
668 ("bio_caller2 used by the provider %s", pp->name));
669 KASSERT(bp->bio_cflags == bp->_bio_cflags,
670 ("bio_cflags used by the provider %s", pp->name));
673 KASSERT(bp->bio_completed >= 0, ("bio_completed can't be less than 0"));
674 KASSERT(bp->bio_completed <= bp->bio_length,
675 ("bio_completed can't be greater than bio_length"));
678 "g_io_deliver(%p) from %p(%s) to %p(%s) cmd %d error %d off %jd len %jd",
679 bp, cp, cp->geom->name, pp, pp->name, bp->bio_cmd, error,
680 (intmax_t)bp->bio_offset, (intmax_t)bp->bio_length);
682 KASSERT(!(bp->bio_flags & BIO_ONQUEUE),
683 ("Bio already on queue bp=%p", bp));
686 * XXX: next two doesn't belong here
688 bp->bio_bcount = bp->bio_length;
689 bp->bio_resid = bp->bio_bcount - bp->bio_completed;
691 #ifdef GET_STACK_USAGE
692 direct = (pp->flags & G_PF_DIRECT_SEND) &&
693 (cp->flags & G_CF_DIRECT_RECEIVE) &&
694 !g_is_geom_thread(curthread);
696 /* Block direct execution if less then half of stack left. */
698 GET_STACK_USAGE(st, su);
707 * The statistics collection is lockless, as such, but we
708 * can not update one instance of the statistics from more
709 * than one thread at a time, so grab the lock first.
711 if ((g_collectstats & G_STATS_CONSUMERS) != 0 ||
712 ((g_collectstats & G_STATS_PROVIDERS) != 0 && pp->stat != NULL))
714 mtxp = mtx_pool_find(mtxpool_sleep, cp);
716 if (g_collectstats & G_STATS_PROVIDERS)
717 devstat_end_transaction_bio_bt(pp->stat, bp, &now);
718 if (g_collectstats & G_STATS_CONSUMERS)
719 devstat_end_transaction_bio_bt(cp->stat, bp, &now);
724 if (error != ENOMEM) {
725 bp->bio_error = error;
729 g_bioq_lock(&g_bio_run_up);
730 first = TAILQ_EMPTY(&g_bio_run_up.bio_queue);
731 TAILQ_INSERT_TAIL(&g_bio_run_up.bio_queue, bp, bio_queue);
732 bp->bio_flags |= BIO_ONQUEUE;
733 g_bio_run_up.bio_queue_length++;
734 g_bioq_unlock(&g_bio_run_up);
742 printf("ENOMEM %p on %p(%s)\n", bp, pp, pp->name);
743 bp->bio_children = 0;
745 bp->bio_driver1 = NULL;
746 bp->bio_driver2 = NULL;
748 g_io_request(bp, cp);
753 SYSCTL_DECL(_kern_geom);
755 static long transient_maps;
756 SYSCTL_LONG(_kern_geom, OID_AUTO, transient_maps, CTLFLAG_RD,
758 "Total count of the transient mapping requests");
759 u_int transient_map_retries = 10;
760 SYSCTL_UINT(_kern_geom, OID_AUTO, transient_map_retries, CTLFLAG_RW,
761 &transient_map_retries, 0,
762 "Max count of retries used before giving up on creating transient map");
763 int transient_map_hard_failures;
764 SYSCTL_INT(_kern_geom, OID_AUTO, transient_map_hard_failures, CTLFLAG_RD,
765 &transient_map_hard_failures, 0,
766 "Failures to establish the transient mapping due to retry attempts "
768 int transient_map_soft_failures;
769 SYSCTL_INT(_kern_geom, OID_AUTO, transient_map_soft_failures, CTLFLAG_RD,
770 &transient_map_soft_failures, 0,
771 "Count of retried failures to establish the transient mapping");
772 int inflight_transient_maps;
773 SYSCTL_INT(_kern_geom, OID_AUTO, inflight_transient_maps, CTLFLAG_RD,
774 &inflight_transient_maps, 0,
775 "Current count of the active transient maps");
778 g_io_transient_map_bio(struct bio *bp)
784 KASSERT(unmapped_buf_allowed, ("unmapped disabled"));
786 size = round_page(bp->bio_ma_offset + bp->bio_length);
787 KASSERT(size / PAGE_SIZE == bp->bio_ma_n, ("Bio too short %p", bp));
790 atomic_add_long(&transient_maps, 1);
792 if (vmem_alloc(transient_arena, size, M_BESTFIT | M_NOWAIT, &addr)) {
793 if (transient_map_retries != 0 &&
794 retried >= transient_map_retries) {
795 CTR2(KTR_GEOM, "g_down cannot map bp %p provider %s",
796 bp, bp->bio_to->name);
797 atomic_add_int(&transient_map_hard_failures, 1);
798 return (EDEADLK/* XXXKIB */);
801 * Naive attempt to quisce the I/O to get more
802 * in-flight requests completed and defragment
803 * the transient_arena.
805 CTR3(KTR_GEOM, "g_down retrymap bp %p provider %s r %d",
806 bp, bp->bio_to->name, retried);
807 pause("g_d_tra", hz / 10);
809 atomic_add_int(&transient_map_soft_failures, 1);
813 atomic_add_int(&inflight_transient_maps, 1);
814 pmap_qenter((vm_offset_t)addr, bp->bio_ma, OFF_TO_IDX(size));
815 bp->bio_data = (caddr_t)addr + bp->bio_ma_offset;
816 bp->bio_flags |= BIO_TRANSIENT_MAPPING;
817 bp->bio_flags &= ~BIO_UNMAPPED;
818 return (EJUSTRETURN);
822 g_io_schedule_down(struct thread *tp __unused)
828 g_bioq_lock(&g_bio_run_down);
829 bp = g_bioq_first(&g_bio_run_down);
831 CTR0(KTR_GEOM, "g_down going to sleep");
832 msleep(&g_wait_down, &g_bio_run_down.bio_queue_lock,
833 PRIBIO | PDROP, "-", 0);
836 CTR0(KTR_GEOM, "g_down has work to do");
837 g_bioq_unlock(&g_bio_run_down);
840 * There has been at least one memory allocation
841 * failure since the last I/O completed. Pause 1ms to
842 * give the system a chance to free up memory. We only
843 * do this once because a large number of allocations
844 * can fail in the direct dispatch case and there's no
845 * relationship between the number of these failures and
846 * the length of the outage. If there's still an outage,
847 * we'll pause again and again until it's
848 * resolved. Older versions paused longer and once per
849 * allocation failure. This was OK for a single threaded
850 * g_down, but with direct dispatch would lead to max of
851 * 10 IOPs for minutes at a time when transient memory
852 * issues prevented allocation for a batch of requests
853 * from the upper layers.
855 * XXX This pacing is really lame. It needs to be solved
856 * by other methods. This is OK only because the worst
857 * case scenario is so rare. In the worst case scenario
858 * all memory is tied up waiting for I/O to complete
859 * which can never happen since we can't allocate bios
862 CTR0(KTR_GEOM, "g_down pacing self");
863 pause("g_down", min(hz/1000, 1));
866 CTR2(KTR_GEOM, "g_down processing bp %p provider %s", bp,
868 error = g_io_check(bp);
870 CTR3(KTR_GEOM, "g_down g_io_check on bp %p provider "
871 "%s returned %d", bp, bp->bio_to->name, error);
872 g_io_deliver(bp, error);
875 THREAD_NO_SLEEPING();
876 CTR4(KTR_GEOM, "g_down starting bp %p provider %s off %ld "
877 "len %ld", bp, bp->bio_to->name, bp->bio_offset,
879 bp->bio_to->geom->start(bp);
880 THREAD_SLEEPING_OK();
885 g_io_schedule_up(struct thread *tp __unused)
890 g_bioq_lock(&g_bio_run_up);
891 bp = g_bioq_first(&g_bio_run_up);
893 CTR0(KTR_GEOM, "g_up going to sleep");
894 msleep(&g_wait_up, &g_bio_run_up.bio_queue_lock,
895 PRIBIO | PDROP, "-", 0);
898 g_bioq_unlock(&g_bio_run_up);
899 THREAD_NO_SLEEPING();
900 CTR4(KTR_GEOM, "g_up biodone bp %p provider %s off "
901 "%jd len %ld", bp, bp->bio_to->name,
902 bp->bio_offset, bp->bio_length);
904 THREAD_SLEEPING_OK();
909 g_read_data(struct g_consumer *cp, off_t offset, off_t length, int *error)
915 KASSERT(length > 0 && length >= cp->provider->sectorsize &&
916 length <= MAXPHYS, ("g_read_data(): invalid length %jd",
920 bp->bio_cmd = BIO_READ;
922 bp->bio_offset = offset;
923 bp->bio_length = length;
924 ptr = g_malloc(length, M_WAITOK);
926 g_io_request(bp, cp);
927 errorc = biowait(bp, "gread");
939 g_write_data(struct g_consumer *cp, off_t offset, void *ptr, off_t length)
944 KASSERT(length > 0 && length >= cp->provider->sectorsize &&
945 length <= MAXPHYS, ("g_write_data(): invalid length %jd",
949 bp->bio_cmd = BIO_WRITE;
951 bp->bio_offset = offset;
952 bp->bio_length = length;
954 g_io_request(bp, cp);
955 error = biowait(bp, "gwrite");
961 g_delete_data(struct g_consumer *cp, off_t offset, off_t length)
966 KASSERT(length > 0 && length >= cp->provider->sectorsize,
967 ("g_delete_data(): invalid length %jd", (intmax_t)length));
970 bp->bio_cmd = BIO_DELETE;
972 bp->bio_offset = offset;
973 bp->bio_length = length;
975 g_io_request(bp, cp);
976 error = biowait(bp, "gdelete");
982 g_print_bio(struct bio *bp)
984 const char *pname, *cmd = NULL;
986 if (bp->bio_to != NULL)
987 pname = bp->bio_to->name;
991 switch (bp->bio_cmd) {
994 printf("%s[%s(attr=%s)]", pname, cmd, bp->bio_attribute);
998 printf("%s[%s]", pname, cmd);
1001 char *subcmd = NULL;
1003 switch (bp->bio_zone.zone_cmd) {
1004 case DISK_ZONE_OPEN:
1007 case DISK_ZONE_CLOSE:
1010 case DISK_ZONE_FINISH:
1016 case DISK_ZONE_REPORT_ZONES:
1017 subcmd = "REPORT ZONES";
1019 case DISK_ZONE_GET_PARAMS:
1020 subcmd = "GET PARAMS";
1026 printf("%s[%s,%s]", pname, cmd, subcmd);
1040 printf("%s[%s()]", pname, cmd);
1043 printf("%s[%s(offset=%jd, length=%jd)]", pname, cmd,
1044 (intmax_t)bp->bio_offset, (intmax_t)bp->bio_length);