2 * SPDX-License-Identifier: BSD-3-Clause
4 * Copyright (c) 2002 Poul-Henning Kamp
5 * Copyright (c) 2002 Networks Associates Technology, Inc.
6 * Copyright (c) 2013 The FreeBSD Foundation
9 * This software was developed for the FreeBSD Project by Poul-Henning Kamp
10 * and NAI Labs, the Security Research Division of Network Associates, Inc.
11 * under DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the
12 * DARPA CHATS research program.
14 * Portions of this software were developed by Konstantin Belousov
15 * under sponsorship from the FreeBSD Foundation.
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions
20 * 1. Redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer.
22 * 2. Redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution.
25 * 3. The names of the authors may not be used to endorse or promote
26 * products derived from this software without specific prior written
29 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
30 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
42 #include <sys/cdefs.h>
43 __FBSDID("$FreeBSD$");
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/kernel.h>
48 #include <sys/malloc.h>
53 #include <sys/stack.h>
54 #include <sys/sysctl.h>
56 #include <machine/stdarg.h>
58 #include <sys/errno.h>
59 #include <geom/geom.h>
60 #include <geom/geom_int.h>
61 #include <sys/devicestat.h>
65 #include <vm/vm_param.h>
66 #include <vm/vm_kern.h>
67 #include <vm/vm_page.h>
68 #include <vm/vm_object.h>
69 #include <vm/vm_extern.h>
70 #include <vm/vm_map.h>
72 static int g_io_transient_map_bio(struct bio *bp);
74 static struct g_bioq g_bio_run_down;
75 static struct g_bioq g_bio_run_up;
78 * Pace is a hint that we've had some trouble recently allocating
79 * bios, so we should back off trying to send I/O down the stack
80 * a bit to let the problem resolve. When pacing, we also turn
81 * off direct dispatch to also reduce memory pressure from I/Os
82 * there, at the expxense of some added latency while the memory
83 * pressures exist. See g_io_schedule_down() for more details
86 static volatile u_int __read_mostly pace;
88 static uma_zone_t __read_mostly biozone;
91 * The head of the list of classifiers used in g_io_request.
92 * Use g_register_classifier() and g_unregister_classifier()
93 * to add/remove entries to the list.
94 * Classifiers are invoked in registration order.
96 static TAILQ_HEAD(, g_classifier_hook) g_classifier_tailq __read_mostly =
97 TAILQ_HEAD_INITIALIZER(g_classifier_tailq);
99 #include <machine/atomic.h>
102 g_bioq_lock(struct g_bioq *bq)
105 mtx_lock(&bq->bio_queue_lock);
109 g_bioq_unlock(struct g_bioq *bq)
112 mtx_unlock(&bq->bio_queue_lock);
117 g_bioq_destroy(struct g_bioq *bq)
120 mtx_destroy(&bq->bio_queue_lock);
125 g_bioq_init(struct g_bioq *bq)
128 TAILQ_INIT(&bq->bio_queue);
129 mtx_init(&bq->bio_queue_lock, "bio queue", NULL, MTX_DEF);
133 g_bioq_first(struct g_bioq *bq)
137 bp = TAILQ_FIRST(&bq->bio_queue);
139 KASSERT((bp->bio_flags & BIO_ONQUEUE),
140 ("Bio not on queue bp=%p target %p", bp, bq));
141 bp->bio_flags &= ~BIO_ONQUEUE;
142 TAILQ_REMOVE(&bq->bio_queue, bp, bio_queue);
143 bq->bio_queue_length--;
153 bp = uma_zalloc(biozone, M_NOWAIT | M_ZERO);
155 if ((KTR_COMPILE & KTR_GEOM) && (ktr_mask & KTR_GEOM)) {
158 CTR1(KTR_GEOM, "g_new_bio(): %p", bp);
160 CTRSTACK(KTR_GEOM, &st, 3);
171 bp = uma_zalloc(biozone, M_WAITOK | M_ZERO);
173 if ((KTR_COMPILE & KTR_GEOM) && (ktr_mask & KTR_GEOM)) {
176 CTR1(KTR_GEOM, "g_alloc_bio(): %p", bp);
178 CTRSTACK(KTR_GEOM, &st, 3);
185 g_destroy_bio(struct bio *bp)
188 if ((KTR_COMPILE & KTR_GEOM) && (ktr_mask & KTR_GEOM)) {
191 CTR1(KTR_GEOM, "g_destroy_bio(): %p", bp);
193 CTRSTACK(KTR_GEOM, &st, 3);
196 uma_zfree(biozone, bp);
200 g_clone_bio(struct bio *bp)
204 bp2 = uma_zalloc(biozone, M_NOWAIT | M_ZERO);
206 bp2->bio_parent = bp;
207 bp2->bio_cmd = bp->bio_cmd;
209 * BIO_ORDERED flag may be used by disk drivers to enforce
210 * ordering restrictions, so this flag needs to be cloned.
211 * BIO_UNMAPPED and BIO_VLIST should be inherited, to properly
212 * indicate which way the buffer is passed.
213 * Other bio flags are not suitable for cloning.
215 bp2->bio_flags = bp->bio_flags &
216 (BIO_ORDERED | BIO_UNMAPPED | BIO_VLIST);
217 bp2->bio_length = bp->bio_length;
218 bp2->bio_offset = bp->bio_offset;
219 bp2->bio_data = bp->bio_data;
220 bp2->bio_ma = bp->bio_ma;
221 bp2->bio_ma_n = bp->bio_ma_n;
222 bp2->bio_ma_offset = bp->bio_ma_offset;
223 bp2->bio_attribute = bp->bio_attribute;
224 if (bp->bio_cmd == BIO_ZONE)
225 bcopy(&bp->bio_zone, &bp2->bio_zone,
226 sizeof(bp->bio_zone));
227 /* Inherit classification info from the parent */
228 bp2->bio_classifier1 = bp->bio_classifier1;
229 bp2->bio_classifier2 = bp->bio_classifier2;
230 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING)
231 bp2->bio_track_bp = bp->bio_track_bp;
236 if ((KTR_COMPILE & KTR_GEOM) && (ktr_mask & KTR_GEOM)) {
239 CTR2(KTR_GEOM, "g_clone_bio(%p): %p", bp, bp2);
241 CTRSTACK(KTR_GEOM, &st, 3);
248 g_duplicate_bio(struct bio *bp)
252 bp2 = uma_zalloc(biozone, M_WAITOK | M_ZERO);
253 bp2->bio_flags = bp->bio_flags & (BIO_UNMAPPED | BIO_VLIST);
254 bp2->bio_parent = bp;
255 bp2->bio_cmd = bp->bio_cmd;
256 bp2->bio_length = bp->bio_length;
257 bp2->bio_offset = bp->bio_offset;
258 bp2->bio_data = bp->bio_data;
259 bp2->bio_ma = bp->bio_ma;
260 bp2->bio_ma_n = bp->bio_ma_n;
261 bp2->bio_ma_offset = bp->bio_ma_offset;
262 bp2->bio_attribute = bp->bio_attribute;
265 if ((KTR_COMPILE & KTR_GEOM) && (ktr_mask & KTR_GEOM)) {
268 CTR2(KTR_GEOM, "g_duplicate_bio(%p): %p", bp, bp2);
270 CTRSTACK(KTR_GEOM, &st, 3);
277 g_reset_bio(struct bio *bp)
280 bzero(bp, sizeof(*bp));
287 g_bioq_init(&g_bio_run_down);
288 g_bioq_init(&g_bio_run_up);
289 biozone = uma_zcreate("g_bio", sizeof (struct bio),
296 g_io_getattr(const char *attr, struct g_consumer *cp, int *len, void *ptr)
301 g_trace(G_T_BIO, "bio_getattr(%s)", attr);
303 bp->bio_cmd = BIO_GETATTR;
305 bp->bio_attribute = attr;
306 bp->bio_length = *len;
308 g_io_request(bp, cp);
309 error = biowait(bp, "ggetattr");
310 *len = bp->bio_completed;
316 g_io_zonecmd(struct disk_zone_args *zone_args, struct g_consumer *cp)
321 g_trace(G_T_BIO, "bio_zone(%d)", zone_args->zone_cmd);
323 bp->bio_cmd = BIO_ZONE;
326 * XXX KDM need to handle report zone data.
328 bcopy(zone_args, &bp->bio_zone, sizeof(*zone_args));
329 if (zone_args->zone_cmd == DISK_ZONE_REPORT_ZONES)
331 zone_args->zone_params.report.entries_allocated *
332 sizeof(struct disk_zone_rep_entry);
336 g_io_request(bp, cp);
337 error = biowait(bp, "gzone");
338 bcopy(&bp->bio_zone, zone_args, sizeof(*zone_args));
344 * Send a BIO_SPEEDUP down the stack. This is used to tell the lower layers that
345 * the upper layers have detected a resource shortage. The lower layers are
346 * advised to stop delaying I/O that they might be holding for performance
347 * reasons and to schedule it (non-trims) or complete it successfully (trims) as
348 * quickly as it can. bio_length is the amount of the shortage. This call
349 * should be non-blocking. bio_resid is used to communicate back if the lower
350 * layers couldn't find bio_length worth of I/O to schedule or discard. A length
351 * of 0 means to do as much as you can (schedule the h/w queues full, discard
352 * all trims). flags are a hint from the upper layers to the lower layers what
353 * operation should be done.
356 g_io_speedup(size_t shortage, u_int flags, size_t *resid, struct g_consumer *cp)
361 KASSERT((flags & (BIO_SPEEDUP_TRIM | BIO_SPEEDUP_WRITE)) != 0,
362 ("Invalid flags passed to g_io_speedup: %#x", flags));
363 g_trace(G_T_BIO, "bio_speedup(%s, %zu, %#x)", cp->provider->name,
368 bp->bio_cmd = BIO_SPEEDUP;
369 bp->bio_length = shortage;
371 bp->bio_flags |= flags;
372 g_io_request(bp, cp);
373 error = biowait(bp, "gflush");
374 *resid = bp->bio_resid;
380 g_io_flush(struct g_consumer *cp)
385 g_trace(G_T_BIO, "bio_flush(%s)", cp->provider->name);
387 bp->bio_cmd = BIO_FLUSH;
388 bp->bio_flags |= BIO_ORDERED;
390 bp->bio_attribute = NULL;
391 bp->bio_offset = cp->provider->mediasize;
394 g_io_request(bp, cp);
395 error = biowait(bp, "gflush");
401 g_io_check(struct bio *bp)
403 struct g_consumer *cp;
404 struct g_provider *pp;
408 biotrack(bp, __func__);
413 /* Fail if access counters dont allow the operation */
414 switch(bp->bio_cmd) {
427 if ((bp->bio_zone.zone_cmd == DISK_ZONE_REPORT_ZONES) ||
428 (bp->bio_zone.zone_cmd == DISK_ZONE_GET_PARAMS)) {
431 } else if (cp->acw == 0)
437 /* if provider is marked for error, don't disturb. */
440 if (cp->flags & G_CF_ORPHAN)
443 switch(bp->bio_cmd) {
447 /* Zero sectorsize or mediasize is probably a lack of media. */
448 if (pp->sectorsize == 0 || pp->mediasize == 0)
450 /* Reject I/O not on sector boundary */
451 if (bp->bio_offset % pp->sectorsize)
453 /* Reject I/O not integral sector long */
454 if (bp->bio_length % pp->sectorsize)
456 /* Reject requests before or past the end of media. */
457 if (bp->bio_offset < 0)
459 if (bp->bio_offset > pp->mediasize)
462 /* Truncate requests to the end of providers media. */
463 excess = bp->bio_offset + bp->bio_length;
464 if (excess > bp->bio_to->mediasize) {
465 KASSERT((bp->bio_flags & BIO_UNMAPPED) == 0 ||
466 round_page(bp->bio_ma_offset +
467 bp->bio_length) / PAGE_SIZE == bp->bio_ma_n,
468 ("excess bio %p too short", bp));
469 excess -= bp->bio_to->mediasize;
470 bp->bio_length -= excess;
471 if ((bp->bio_flags & BIO_UNMAPPED) != 0) {
472 bp->bio_ma_n = round_page(bp->bio_ma_offset +
473 bp->bio_length) / PAGE_SIZE;
476 CTR3(KTR_GEOM, "g_down truncated bio "
477 "%p provider %s by %d", bp,
478 bp->bio_to->name, excess);
481 /* Deliver zero length transfers right here. */
482 if (bp->bio_length == 0) {
483 CTR2(KTR_GEOM, "g_down terminated 0-length "
484 "bp %p provider %s", bp, bp->bio_to->name);
488 if ((bp->bio_flags & BIO_UNMAPPED) != 0 &&
489 (bp->bio_to->flags & G_PF_ACCEPT_UNMAPPED) == 0 &&
490 (bp->bio_cmd == BIO_READ || bp->bio_cmd == BIO_WRITE)) {
491 if ((error = g_io_transient_map_bio(bp)) >= 0)
498 return (EJUSTRETURN);
502 * bio classification support.
504 * g_register_classifier() and g_unregister_classifier()
505 * are used to add/remove a classifier from the list.
506 * The list is protected using the g_bio_run_down lock,
507 * because the classifiers are called in this path.
509 * g_io_request() passes bio's that are not already classified
510 * (i.e. those with bio_classifier1 == NULL) to g_run_classifiers().
511 * Classifiers can store their result in the two fields
512 * bio_classifier1 and bio_classifier2.
513 * A classifier that updates one of the fields should
514 * return a non-zero value.
515 * If no classifier updates the field, g_run_classifiers() sets
516 * bio_classifier1 = BIO_NOTCLASSIFIED to avoid further calls.
520 g_register_classifier(struct g_classifier_hook *hook)
523 g_bioq_lock(&g_bio_run_down);
524 TAILQ_INSERT_TAIL(&g_classifier_tailq, hook, link);
525 g_bioq_unlock(&g_bio_run_down);
531 g_unregister_classifier(struct g_classifier_hook *hook)
533 struct g_classifier_hook *entry;
535 g_bioq_lock(&g_bio_run_down);
536 TAILQ_FOREACH(entry, &g_classifier_tailq, link) {
538 TAILQ_REMOVE(&g_classifier_tailq, hook, link);
542 g_bioq_unlock(&g_bio_run_down);
546 g_run_classifiers(struct bio *bp)
548 struct g_classifier_hook *hook;
551 biotrack(bp, __func__);
553 TAILQ_FOREACH(hook, &g_classifier_tailq, link)
554 classified |= hook->func(hook->arg, bp);
557 bp->bio_classifier1 = BIO_NOTCLASSIFIED;
561 g_io_request(struct bio *bp, struct g_consumer *cp)
563 struct g_provider *pp;
565 int direct, error, first;
568 biotrack(bp, __func__);
570 KASSERT(cp != NULL, ("NULL cp in g_io_request"));
571 KASSERT(bp != NULL, ("NULL bp in g_io_request"));
573 KASSERT(pp != NULL, ("consumer not attached in g_io_request"));
575 KASSERT(bp->bio_driver1 == NULL,
576 ("bio_driver1 used by the consumer (geom %s)", cp->geom->name));
577 KASSERT(bp->bio_driver2 == NULL,
578 ("bio_driver2 used by the consumer (geom %s)", cp->geom->name));
579 KASSERT(bp->bio_pflags == 0,
580 ("bio_pflags used by the consumer (geom %s)", cp->geom->name));
582 * Remember consumer's private fields, so we can detect if they were
583 * modified by the provider.
585 bp->_bio_caller1 = bp->bio_caller1;
586 bp->_bio_caller2 = bp->bio_caller2;
587 bp->_bio_cflags = bp->bio_cflags;
591 if (cmd == BIO_READ || cmd == BIO_WRITE || cmd == BIO_GETATTR) {
592 KASSERT(bp->bio_data != NULL,
593 ("NULL bp->data in g_io_request(cmd=%hu)", bp->bio_cmd));
595 if (cmd == BIO_DELETE || cmd == BIO_FLUSH) {
596 KASSERT(bp->bio_data == NULL,
597 ("non-NULL bp->data in g_io_request(cmd=%hu)",
600 if (cmd == BIO_READ || cmd == BIO_WRITE || cmd == BIO_DELETE) {
601 KASSERT(bp->bio_offset % cp->provider->sectorsize == 0,
602 ("wrong offset %jd for sectorsize %u",
603 bp->bio_offset, cp->provider->sectorsize));
604 KASSERT(bp->bio_length % cp->provider->sectorsize == 0,
605 ("wrong length %jd for sectorsize %u",
606 bp->bio_length, cp->provider->sectorsize));
609 g_trace(G_T_BIO, "bio_request(%p) from %p(%s) to %p(%s) cmd %d",
610 bp, cp, cp->geom->name, pp, pp->name, bp->bio_cmd);
615 bp->bio_completed = 0;
617 KASSERT(!(bp->bio_flags & BIO_ONQUEUE),
618 ("Bio already on queue bp=%p", bp));
619 if ((g_collectstats & G_STATS_CONSUMERS) != 0 ||
620 ((g_collectstats & G_STATS_PROVIDERS) != 0 && pp->stat != NULL))
621 binuptime(&bp->bio_t0);
623 getbinuptime(&bp->bio_t0);
625 #ifdef GET_STACK_USAGE
626 direct = (cp->flags & G_CF_DIRECT_SEND) != 0 &&
627 (pp->flags & G_PF_DIRECT_RECEIVE) != 0 &&
628 !g_is_geom_thread(curthread) &&
629 ((pp->flags & G_PF_ACCEPT_UNMAPPED) != 0 ||
630 (bp->bio_flags & BIO_UNMAPPED) == 0 || THREAD_CAN_SLEEP()) &&
633 /* Block direct execution if less then half of stack left. */
635 GET_STACK_USAGE(st, su);
643 if (!TAILQ_EMPTY(&g_classifier_tailq) && !bp->bio_classifier1) {
644 g_bioq_lock(&g_bio_run_down);
645 g_run_classifiers(bp);
646 g_bioq_unlock(&g_bio_run_down);
650 * The statistics collection is lockless, as such, but we
651 * can not update one instance of the statistics from more
652 * than one thread at a time, so grab the lock first.
654 mtxp = mtx_pool_find(mtxpool_sleep, pp);
656 if (g_collectstats & G_STATS_PROVIDERS)
657 devstat_start_transaction(pp->stat, &bp->bio_t0);
658 if (g_collectstats & G_STATS_CONSUMERS)
659 devstat_start_transaction(cp->stat, &bp->bio_t0);
665 error = g_io_check(bp);
667 CTR3(KTR_GEOM, "g_io_request g_io_check on bp %p "
668 "provider %s returned %d", bp, bp->bio_to->name,
670 g_io_deliver(bp, error);
673 bp->bio_to->geom->start(bp);
675 g_bioq_lock(&g_bio_run_down);
676 first = TAILQ_EMPTY(&g_bio_run_down.bio_queue);
677 TAILQ_INSERT_TAIL(&g_bio_run_down.bio_queue, bp, bio_queue);
678 bp->bio_flags |= BIO_ONQUEUE;
679 g_bio_run_down.bio_queue_length++;
680 g_bioq_unlock(&g_bio_run_down);
681 /* Pass it on down. */
683 wakeup(&g_wait_down);
688 g_io_deliver(struct bio *bp, int error)
691 struct g_consumer *cp;
692 struct g_provider *pp;
696 biotrack(bp, __func__);
698 KASSERT(bp != NULL, ("NULL bp in g_io_deliver"));
700 KASSERT(pp != NULL, ("NULL bio_to in g_io_deliver"));
703 bp->bio_error = error;
707 KASSERT(cp != NULL, ("NULL bio_from in g_io_deliver"));
708 KASSERT(cp->geom != NULL, ("NULL bio_from->geom in g_io_deliver"));
711 * Some classes - GJournal in particular - can modify bio's
712 * private fields while the bio is in transit; G_GEOM_VOLATILE_BIO
713 * flag means it's an expected behaviour for that particular geom.
715 if ((cp->geom->flags & G_GEOM_VOLATILE_BIO) == 0) {
716 KASSERT(bp->bio_caller1 == bp->_bio_caller1,
717 ("bio_caller1 used by the provider %s", pp->name));
718 KASSERT(bp->bio_caller2 == bp->_bio_caller2,
719 ("bio_caller2 used by the provider %s", pp->name));
720 KASSERT(bp->bio_cflags == bp->_bio_cflags,
721 ("bio_cflags used by the provider %s", pp->name));
724 KASSERT(bp->bio_completed >= 0, ("bio_completed can't be less than 0"));
725 KASSERT(bp->bio_completed <= bp->bio_length,
726 ("bio_completed can't be greater than bio_length"));
729 "g_io_deliver(%p) from %p(%s) to %p(%s) cmd %d error %d off %jd len %jd",
730 bp, cp, cp->geom->name, pp, pp->name, bp->bio_cmd, error,
731 (intmax_t)bp->bio_offset, (intmax_t)bp->bio_length);
733 KASSERT(!(bp->bio_flags & BIO_ONQUEUE),
734 ("Bio already on queue bp=%p", bp));
737 * XXX: next two doesn't belong here
739 bp->bio_bcount = bp->bio_length;
740 bp->bio_resid = bp->bio_bcount - bp->bio_completed;
742 #ifdef GET_STACK_USAGE
743 direct = (pp->flags & G_PF_DIRECT_SEND) &&
744 (cp->flags & G_CF_DIRECT_RECEIVE) &&
745 !g_is_geom_thread(curthread);
747 /* Block direct execution if less then half of stack left. */
749 GET_STACK_USAGE(st, su);
758 * The statistics collection is lockless, as such, but we
759 * can not update one instance of the statistics from more
760 * than one thread at a time, so grab the lock first.
762 if ((g_collectstats & G_STATS_CONSUMERS) != 0 ||
763 ((g_collectstats & G_STATS_PROVIDERS) != 0 && pp->stat != NULL))
765 mtxp = mtx_pool_find(mtxpool_sleep, cp);
767 if (g_collectstats & G_STATS_PROVIDERS)
768 devstat_end_transaction_bio_bt(pp->stat, bp, &now);
769 if (g_collectstats & G_STATS_CONSUMERS)
770 devstat_end_transaction_bio_bt(cp->stat, bp, &now);
775 if (error != ENOMEM) {
776 bp->bio_error = error;
780 g_bioq_lock(&g_bio_run_up);
781 first = TAILQ_EMPTY(&g_bio_run_up.bio_queue);
782 TAILQ_INSERT_TAIL(&g_bio_run_up.bio_queue, bp, bio_queue);
783 bp->bio_flags |= BIO_ONQUEUE;
784 g_bio_run_up.bio_queue_length++;
785 g_bioq_unlock(&g_bio_run_up);
793 printf("ENOMEM %p on %p(%s)\n", bp, pp, pp->name);
794 bp->bio_children = 0;
796 bp->bio_driver1 = NULL;
797 bp->bio_driver2 = NULL;
799 g_io_request(bp, cp);
804 SYSCTL_DECL(_kern_geom);
806 static long transient_maps;
807 SYSCTL_LONG(_kern_geom, OID_AUTO, transient_maps, CTLFLAG_RD,
809 "Total count of the transient mapping requests");
810 u_int transient_map_retries = 10;
811 SYSCTL_UINT(_kern_geom, OID_AUTO, transient_map_retries, CTLFLAG_RW,
812 &transient_map_retries, 0,
813 "Max count of retries used before giving up on creating transient map");
814 int transient_map_hard_failures;
815 SYSCTL_INT(_kern_geom, OID_AUTO, transient_map_hard_failures, CTLFLAG_RD,
816 &transient_map_hard_failures, 0,
817 "Failures to establish the transient mapping due to retry attempts "
819 int transient_map_soft_failures;
820 SYSCTL_INT(_kern_geom, OID_AUTO, transient_map_soft_failures, CTLFLAG_RD,
821 &transient_map_soft_failures, 0,
822 "Count of retried failures to establish the transient mapping");
823 int inflight_transient_maps;
824 SYSCTL_INT(_kern_geom, OID_AUTO, inflight_transient_maps, CTLFLAG_RD,
825 &inflight_transient_maps, 0,
826 "Current count of the active transient maps");
829 g_io_transient_map_bio(struct bio *bp)
835 KASSERT(unmapped_buf_allowed, ("unmapped disabled"));
837 size = round_page(bp->bio_ma_offset + bp->bio_length);
838 KASSERT(size / PAGE_SIZE == bp->bio_ma_n, ("Bio too short %p", bp));
841 atomic_add_long(&transient_maps, 1);
843 if (vmem_alloc(transient_arena, size, M_BESTFIT | M_NOWAIT, &addr)) {
844 if (transient_map_retries != 0 &&
845 retried >= transient_map_retries) {
846 CTR2(KTR_GEOM, "g_down cannot map bp %p provider %s",
847 bp, bp->bio_to->name);
848 atomic_add_int(&transient_map_hard_failures, 1);
849 return (EDEADLK/* XXXKIB */);
852 * Naive attempt to quisce the I/O to get more
853 * in-flight requests completed and defragment
854 * the transient_arena.
856 CTR3(KTR_GEOM, "g_down retrymap bp %p provider %s r %d",
857 bp, bp->bio_to->name, retried);
858 pause("g_d_tra", hz / 10);
860 atomic_add_int(&transient_map_soft_failures, 1);
864 atomic_add_int(&inflight_transient_maps, 1);
865 pmap_qenter((vm_offset_t)addr, bp->bio_ma, OFF_TO_IDX(size));
866 bp->bio_data = (caddr_t)addr + bp->bio_ma_offset;
867 bp->bio_flags |= BIO_TRANSIENT_MAPPING;
868 bp->bio_flags &= ~BIO_UNMAPPED;
869 return (EJUSTRETURN);
873 g_io_schedule_down(struct thread *tp __unused)
879 g_bioq_lock(&g_bio_run_down);
880 bp = g_bioq_first(&g_bio_run_down);
882 CTR0(KTR_GEOM, "g_down going to sleep");
883 msleep(&g_wait_down, &g_bio_run_down.bio_queue_lock,
884 PRIBIO | PDROP, "-", 0);
887 CTR0(KTR_GEOM, "g_down has work to do");
888 g_bioq_unlock(&g_bio_run_down);
889 biotrack(bp, __func__);
892 * There has been at least one memory allocation
893 * failure since the last I/O completed. Pause 1ms to
894 * give the system a chance to free up memory. We only
895 * do this once because a large number of allocations
896 * can fail in the direct dispatch case and there's no
897 * relationship between the number of these failures and
898 * the length of the outage. If there's still an outage,
899 * we'll pause again and again until it's
900 * resolved. Older versions paused longer and once per
901 * allocation failure. This was OK for a single threaded
902 * g_down, but with direct dispatch would lead to max of
903 * 10 IOPs for minutes at a time when transient memory
904 * issues prevented allocation for a batch of requests
905 * from the upper layers.
907 * XXX This pacing is really lame. It needs to be solved
908 * by other methods. This is OK only because the worst
909 * case scenario is so rare. In the worst case scenario
910 * all memory is tied up waiting for I/O to complete
911 * which can never happen since we can't allocate bios
914 CTR0(KTR_GEOM, "g_down pacing self");
915 pause("g_down", min(hz/1000, 1));
918 CTR2(KTR_GEOM, "g_down processing bp %p provider %s", bp,
920 error = g_io_check(bp);
922 CTR3(KTR_GEOM, "g_down g_io_check on bp %p provider "
923 "%s returned %d", bp, bp->bio_to->name, error);
924 g_io_deliver(bp, error);
927 THREAD_NO_SLEEPING();
928 CTR4(KTR_GEOM, "g_down starting bp %p provider %s off %ld "
929 "len %ld", bp, bp->bio_to->name, bp->bio_offset,
931 bp->bio_to->geom->start(bp);
932 THREAD_SLEEPING_OK();
937 g_io_schedule_up(struct thread *tp __unused)
942 g_bioq_lock(&g_bio_run_up);
943 bp = g_bioq_first(&g_bio_run_up);
945 CTR0(KTR_GEOM, "g_up going to sleep");
946 msleep(&g_wait_up, &g_bio_run_up.bio_queue_lock,
947 PRIBIO | PDROP, "-", 0);
950 g_bioq_unlock(&g_bio_run_up);
951 THREAD_NO_SLEEPING();
952 CTR4(KTR_GEOM, "g_up biodone bp %p provider %s off "
953 "%jd len %ld", bp, bp->bio_to->name,
954 bp->bio_offset, bp->bio_length);
956 THREAD_SLEEPING_OK();
961 g_read_data(struct g_consumer *cp, off_t offset, off_t length, int *error)
967 KASSERT(length > 0 && length >= cp->provider->sectorsize &&
968 length <= MAXPHYS, ("g_read_data(): invalid length %jd",
972 bp->bio_cmd = BIO_READ;
974 bp->bio_offset = offset;
975 bp->bio_length = length;
976 ptr = g_malloc(length, M_WAITOK);
978 g_io_request(bp, cp);
979 errorc = biowait(bp, "gread");
991 * A read function for use by ffs_sbget when used by GEOM-layer routines.
994 g_use_g_read_data(void *devfd, off_t loc, void **bufp, int size)
996 struct g_consumer *cp;
998 KASSERT(*bufp == NULL,
999 ("g_use_g_read_data: non-NULL *bufp %p\n", *bufp));
1001 cp = (struct g_consumer *)devfd;
1003 * Take care not to issue an invalid I/O request. The offset of
1004 * the superblock candidate must be multiples of the provider's
1005 * sector size, otherwise an FFS can't exist on the provider
1008 if (loc % cp->provider->sectorsize != 0)
1010 *bufp = g_read_data(cp, loc, size, NULL);
1017 g_write_data(struct g_consumer *cp, off_t offset, void *ptr, off_t length)
1022 KASSERT(length > 0 && length >= cp->provider->sectorsize &&
1023 length <= MAXPHYS, ("g_write_data(): invalid length %jd",
1027 bp->bio_cmd = BIO_WRITE;
1028 bp->bio_done = NULL;
1029 bp->bio_offset = offset;
1030 bp->bio_length = length;
1032 g_io_request(bp, cp);
1033 error = biowait(bp, "gwrite");
1039 * A write function for use by ffs_sbput when used by GEOM-layer routines.
1042 g_use_g_write_data(void *devfd, off_t loc, void *buf, int size)
1045 return (g_write_data((struct g_consumer *)devfd, loc, buf, size));
1049 g_delete_data(struct g_consumer *cp, off_t offset, off_t length)
1054 KASSERT(length > 0 && length >= cp->provider->sectorsize,
1055 ("g_delete_data(): invalid length %jd", (intmax_t)length));
1058 bp->bio_cmd = BIO_DELETE;
1059 bp->bio_done = NULL;
1060 bp->bio_offset = offset;
1061 bp->bio_length = length;
1062 bp->bio_data = NULL;
1063 g_io_request(bp, cp);
1064 error = biowait(bp, "gdelete");
1070 g_print_bio(const char *prefix, const struct bio *bp, const char *fmtsuffix,
1073 #ifndef PRINTF_BUFR_SIZE
1074 #define PRINTF_BUFR_SIZE 64
1076 char bufr[PRINTF_BUFR_SIZE];
1077 struct sbuf sb, *sbp __unused;
1080 sbp = sbuf_new(&sb, bufr, sizeof(bufr), SBUF_FIXEDLEN);
1081 KASSERT(sbp != NULL, ("sbuf_new misused?"));
1083 sbuf_set_drain(&sb, sbuf_printf_drain, NULL);
1085 sbuf_cat(&sb, prefix);
1086 g_format_bio(&sb, bp);
1088 va_start(ap, fmtsuffix);
1089 sbuf_vprintf(&sb, fmtsuffix, ap);
1092 sbuf_nl_terminate(&sb);
1099 g_format_bio(struct sbuf *sb, const struct bio *bp)
1101 const char *pname, *cmd = NULL;
1103 if (bp->bio_to != NULL)
1104 pname = bp->bio_to->name;
1106 pname = "[unknown]";
1108 switch (bp->bio_cmd) {
1111 sbuf_printf(sb, "%s[%s(attr=%s)]", pname, cmd,
1116 sbuf_printf(sb, "%s[%s]", pname, cmd);
1119 char *subcmd = NULL;
1121 switch (bp->bio_zone.zone_cmd) {
1122 case DISK_ZONE_OPEN:
1125 case DISK_ZONE_CLOSE:
1128 case DISK_ZONE_FINISH:
1134 case DISK_ZONE_REPORT_ZONES:
1135 subcmd = "REPORT ZONES";
1137 case DISK_ZONE_GET_PARAMS:
1138 subcmd = "GET PARAMS";
1144 sbuf_printf(sb, "%s[%s,%s]", pname, cmd, subcmd);
1158 sbuf_printf(sb, "%s[%s()]", pname, cmd);
1161 sbuf_printf(sb, "%s[%s(offset=%jd, length=%jd)]", pname, cmd,
1162 (intmax_t)bp->bio_offset, (intmax_t)bp->bio_length);