2 * SPDX-License-Identifier: BSD-3-Clause
4 * Copyright (c) 2002 Poul-Henning Kamp
5 * Copyright (c) 2002 Networks Associates Technology, Inc.
6 * Copyright (c) 2013 The FreeBSD Foundation
9 * This software was developed for the FreeBSD Project by Poul-Henning Kamp
10 * and NAI Labs, the Security Research Division of Network Associates, Inc.
11 * under DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the
12 * DARPA CHATS research program.
14 * Portions of this software were developed by Konstantin Belousov
15 * under sponsorship from the FreeBSD Foundation.
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions
20 * 1. Redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer.
22 * 2. Redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution.
25 * 3. The names of the authors may not be used to endorse or promote
26 * products derived from this software without specific prior written
29 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
30 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
42 #include <sys/cdefs.h>
43 __FBSDID("$FreeBSD$");
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/kernel.h>
48 #include <sys/malloc.h>
53 #include <sys/stack.h>
54 #include <sys/sysctl.h>
56 #include <machine/stdarg.h>
58 #include <sys/errno.h>
59 #include <geom/geom.h>
60 #include <geom/geom_int.h>
61 #include <sys/devicestat.h>
65 #include <vm/vm_param.h>
66 #include <vm/vm_kern.h>
67 #include <vm/vm_page.h>
68 #include <vm/vm_object.h>
69 #include <vm/vm_extern.h>
70 #include <vm/vm_map.h>
72 static int g_io_transient_map_bio(struct bio *bp);
74 static struct g_bioq g_bio_run_down;
75 static struct g_bioq g_bio_run_up;
78 * Pace is a hint that we've had some trouble recently allocating
79 * bios, so we should back off trying to send I/O down the stack
80 * a bit to let the problem resolve. When pacing, we also turn
81 * off direct dispatch to also reduce memory pressure from I/Os
82 * there, at the expxense of some added latency while the memory
83 * pressures exist. See g_io_schedule_down() for more details
86 static volatile u_int __read_mostly pace;
88 static uma_zone_t __read_mostly biozone;
90 #include <machine/atomic.h>
93 g_bioq_lock(struct g_bioq *bq)
96 mtx_lock(&bq->bio_queue_lock);
100 g_bioq_unlock(struct g_bioq *bq)
103 mtx_unlock(&bq->bio_queue_lock);
108 g_bioq_destroy(struct g_bioq *bq)
111 mtx_destroy(&bq->bio_queue_lock);
116 g_bioq_init(struct g_bioq *bq)
119 TAILQ_INIT(&bq->bio_queue);
120 mtx_init(&bq->bio_queue_lock, "bio queue", NULL, MTX_DEF);
124 g_bioq_first(struct g_bioq *bq)
128 bp = TAILQ_FIRST(&bq->bio_queue);
130 KASSERT((bp->bio_flags & BIO_ONQUEUE),
131 ("Bio not on queue bp=%p target %p", bp, bq));
132 bp->bio_flags &= ~BIO_ONQUEUE;
133 TAILQ_REMOVE(&bq->bio_queue, bp, bio_queue);
134 bq->bio_queue_length--;
144 bp = uma_zalloc(biozone, M_NOWAIT | M_ZERO);
146 if ((KTR_COMPILE & KTR_GEOM) && (ktr_mask & KTR_GEOM)) {
149 CTR1(KTR_GEOM, "g_new_bio(): %p", bp);
151 CTRSTACK(KTR_GEOM, &st, 3);
162 bp = uma_zalloc(biozone, M_WAITOK | M_ZERO);
164 if ((KTR_COMPILE & KTR_GEOM) && (ktr_mask & KTR_GEOM)) {
167 CTR1(KTR_GEOM, "g_alloc_bio(): %p", bp);
169 CTRSTACK(KTR_GEOM, &st, 3);
176 g_destroy_bio(struct bio *bp)
179 if ((KTR_COMPILE & KTR_GEOM) && (ktr_mask & KTR_GEOM)) {
182 CTR1(KTR_GEOM, "g_destroy_bio(): %p", bp);
184 CTRSTACK(KTR_GEOM, &st, 3);
187 uma_zfree(biozone, bp);
191 g_clone_bio(struct bio *bp)
195 bp2 = uma_zalloc(biozone, M_NOWAIT | M_ZERO);
197 bp2->bio_parent = bp;
198 bp2->bio_cmd = bp->bio_cmd;
200 * BIO_ORDERED flag may be used by disk drivers to enforce
201 * ordering restrictions, so this flag needs to be cloned.
202 * BIO_UNMAPPED and BIO_VLIST should be inherited, to properly
203 * indicate which way the buffer is passed.
204 * Other bio flags are not suitable for cloning.
206 bp2->bio_flags = bp->bio_flags &
207 (BIO_ORDERED | BIO_UNMAPPED | BIO_VLIST);
208 bp2->bio_length = bp->bio_length;
209 bp2->bio_offset = bp->bio_offset;
210 bp2->bio_data = bp->bio_data;
211 bp2->bio_ma = bp->bio_ma;
212 bp2->bio_ma_n = bp->bio_ma_n;
213 bp2->bio_ma_offset = bp->bio_ma_offset;
214 bp2->bio_attribute = bp->bio_attribute;
215 if (bp->bio_cmd == BIO_ZONE)
216 bcopy(&bp->bio_zone, &bp2->bio_zone,
217 sizeof(bp->bio_zone));
218 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING)
219 bp2->bio_track_bp = bp->bio_track_bp;
224 if ((KTR_COMPILE & KTR_GEOM) && (ktr_mask & KTR_GEOM)) {
227 CTR2(KTR_GEOM, "g_clone_bio(%p): %p", bp, bp2);
229 CTRSTACK(KTR_GEOM, &st, 3);
236 g_duplicate_bio(struct bio *bp)
240 bp2 = uma_zalloc(biozone, M_WAITOK | M_ZERO);
241 bp2->bio_flags = bp->bio_flags & (BIO_UNMAPPED | BIO_VLIST);
242 bp2->bio_parent = bp;
243 bp2->bio_cmd = bp->bio_cmd;
244 bp2->bio_length = bp->bio_length;
245 bp2->bio_offset = bp->bio_offset;
246 bp2->bio_data = bp->bio_data;
247 bp2->bio_ma = bp->bio_ma;
248 bp2->bio_ma_n = bp->bio_ma_n;
249 bp2->bio_ma_offset = bp->bio_ma_offset;
250 bp2->bio_attribute = bp->bio_attribute;
253 if ((KTR_COMPILE & KTR_GEOM) && (ktr_mask & KTR_GEOM)) {
256 CTR2(KTR_GEOM, "g_duplicate_bio(%p): %p", bp, bp2);
258 CTRSTACK(KTR_GEOM, &st, 3);
265 g_reset_bio(struct bio *bp)
268 bzero(bp, sizeof(*bp));
275 g_bioq_init(&g_bio_run_down);
276 g_bioq_init(&g_bio_run_up);
277 biozone = uma_zcreate("g_bio", sizeof (struct bio),
284 g_io_getattr(const char *attr, struct g_consumer *cp, int *len, void *ptr)
289 g_trace(G_T_BIO, "bio_getattr(%s)", attr);
291 bp->bio_cmd = BIO_GETATTR;
293 bp->bio_attribute = attr;
294 bp->bio_length = *len;
296 g_io_request(bp, cp);
297 error = biowait(bp, "ggetattr");
298 *len = bp->bio_completed;
304 g_io_zonecmd(struct disk_zone_args *zone_args, struct g_consumer *cp)
309 g_trace(G_T_BIO, "bio_zone(%d)", zone_args->zone_cmd);
311 bp->bio_cmd = BIO_ZONE;
314 * XXX KDM need to handle report zone data.
316 bcopy(zone_args, &bp->bio_zone, sizeof(*zone_args));
317 if (zone_args->zone_cmd == DISK_ZONE_REPORT_ZONES)
319 zone_args->zone_params.report.entries_allocated *
320 sizeof(struct disk_zone_rep_entry);
324 g_io_request(bp, cp);
325 error = biowait(bp, "gzone");
326 bcopy(&bp->bio_zone, zone_args, sizeof(*zone_args));
332 * Send a BIO_SPEEDUP down the stack. This is used to tell the lower layers that
333 * the upper layers have detected a resource shortage. The lower layers are
334 * advised to stop delaying I/O that they might be holding for performance
335 * reasons and to schedule it (non-trims) or complete it successfully (trims) as
336 * quickly as it can. bio_length is the amount of the shortage. This call
337 * should be non-blocking. bio_resid is used to communicate back if the lower
338 * layers couldn't find bio_length worth of I/O to schedule or discard. A length
339 * of 0 means to do as much as you can (schedule the h/w queues full, discard
340 * all trims). flags are a hint from the upper layers to the lower layers what
341 * operation should be done.
344 g_io_speedup(size_t shortage, u_int flags, size_t *resid, struct g_consumer *cp)
349 KASSERT((flags & (BIO_SPEEDUP_TRIM | BIO_SPEEDUP_WRITE)) != 0,
350 ("Invalid flags passed to g_io_speedup: %#x", flags));
351 g_trace(G_T_BIO, "bio_speedup(%s, %zu, %#x)", cp->provider->name,
356 bp->bio_cmd = BIO_SPEEDUP;
357 bp->bio_length = shortage;
359 bp->bio_flags |= flags;
360 g_io_request(bp, cp);
361 error = biowait(bp, "gflush");
362 *resid = bp->bio_resid;
368 g_io_flush(struct g_consumer *cp)
373 g_trace(G_T_BIO, "bio_flush(%s)", cp->provider->name);
375 bp->bio_cmd = BIO_FLUSH;
376 bp->bio_flags |= BIO_ORDERED;
378 bp->bio_attribute = NULL;
379 bp->bio_offset = cp->provider->mediasize;
382 g_io_request(bp, cp);
383 error = biowait(bp, "gflush");
389 g_io_check(struct bio *bp)
391 struct g_consumer *cp;
392 struct g_provider *pp;
396 biotrack(bp, __func__);
401 /* Fail if access counters dont allow the operation */
402 switch(bp->bio_cmd) {
416 if ((bp->bio_zone.zone_cmd == DISK_ZONE_REPORT_ZONES) ||
417 (bp->bio_zone.zone_cmd == DISK_ZONE_GET_PARAMS)) {
420 } else if (cp->acw == 0)
426 /* if provider is marked for error, don't disturb. */
429 if (cp->flags & G_CF_ORPHAN)
432 switch(bp->bio_cmd) {
436 /* Zero sectorsize or mediasize is probably a lack of media. */
437 if (pp->sectorsize == 0 || pp->mediasize == 0)
439 /* Reject I/O not on sector boundary */
440 if (bp->bio_offset % pp->sectorsize)
442 /* Reject I/O not integral sector long */
443 if (bp->bio_length % pp->sectorsize)
445 /* Reject requests before or past the end of media. */
446 if (bp->bio_offset < 0)
448 if (bp->bio_offset > pp->mediasize)
451 /* Truncate requests to the end of providers media. */
452 excess = bp->bio_offset + bp->bio_length;
453 if (excess > bp->bio_to->mediasize) {
454 KASSERT((bp->bio_flags & BIO_UNMAPPED) == 0 ||
455 round_page(bp->bio_ma_offset +
456 bp->bio_length) / PAGE_SIZE == bp->bio_ma_n,
457 ("excess bio %p too short", bp));
458 excess -= bp->bio_to->mediasize;
459 bp->bio_length -= excess;
460 if ((bp->bio_flags & BIO_UNMAPPED) != 0) {
461 bp->bio_ma_n = round_page(bp->bio_ma_offset +
462 bp->bio_length) / PAGE_SIZE;
465 CTR3(KTR_GEOM, "g_down truncated bio "
466 "%p provider %s by %d", bp,
467 bp->bio_to->name, excess);
470 /* Deliver zero length transfers right here. */
471 if (bp->bio_length == 0) {
472 CTR2(KTR_GEOM, "g_down terminated 0-length "
473 "bp %p provider %s", bp, bp->bio_to->name);
477 if ((bp->bio_flags & BIO_UNMAPPED) != 0 &&
478 (bp->bio_to->flags & G_PF_ACCEPT_UNMAPPED) == 0 &&
479 (bp->bio_cmd == BIO_READ || bp->bio_cmd == BIO_WRITE)) {
480 if ((error = g_io_transient_map_bio(bp)) >= 0)
487 return (EJUSTRETURN);
491 g_io_request(struct bio *bp, struct g_consumer *cp)
493 struct g_provider *pp;
494 int direct, error, first;
497 biotrack(bp, __func__);
499 KASSERT(cp != NULL, ("NULL cp in g_io_request"));
500 KASSERT(bp != NULL, ("NULL bp in g_io_request"));
502 KASSERT(pp != NULL, ("consumer not attached in g_io_request"));
504 KASSERT(bp->bio_driver1 == NULL,
505 ("bio_driver1 used by the consumer (geom %s)", cp->geom->name));
506 KASSERT(bp->bio_driver2 == NULL,
507 ("bio_driver2 used by the consumer (geom %s)", cp->geom->name));
508 KASSERT(bp->bio_pflags == 0,
509 ("bio_pflags used by the consumer (geom %s)", cp->geom->name));
511 * Remember consumer's private fields, so we can detect if they were
512 * modified by the provider.
514 bp->_bio_caller1 = bp->bio_caller1;
515 bp->_bio_caller2 = bp->bio_caller2;
516 bp->_bio_cflags = bp->bio_cflags;
520 if (cmd == BIO_READ || cmd == BIO_WRITE || cmd == BIO_GETATTR) {
521 KASSERT(bp->bio_data != NULL,
522 ("NULL bp->data in g_io_request(cmd=%hu)", bp->bio_cmd));
524 if (cmd == BIO_DELETE || cmd == BIO_FLUSH) {
525 KASSERT(bp->bio_data == NULL,
526 ("non-NULL bp->data in g_io_request(cmd=%hu)",
529 if (cmd == BIO_READ || cmd == BIO_WRITE || cmd == BIO_DELETE) {
530 KASSERT(bp->bio_offset % cp->provider->sectorsize == 0,
531 ("wrong offset %jd for sectorsize %u",
532 bp->bio_offset, cp->provider->sectorsize));
533 KASSERT(bp->bio_length % cp->provider->sectorsize == 0,
534 ("wrong length %jd for sectorsize %u",
535 bp->bio_length, cp->provider->sectorsize));
538 g_trace(G_T_BIO, "bio_request(%p) from %p(%s) to %p(%s) cmd %d",
539 bp, cp, cp->geom->name, pp, pp->name, bp->bio_cmd);
544 bp->bio_completed = 0;
546 KASSERT(!(bp->bio_flags & BIO_ONQUEUE),
547 ("Bio already on queue bp=%p", bp));
549 if ((g_collectstats & G_STATS_CONSUMERS) != 0 ||
550 ((g_collectstats & G_STATS_PROVIDERS) != 0 && pp->stat != NULL))
551 binuptime(&bp->bio_t0);
553 getbinuptime(&bp->bio_t0);
554 if (g_collectstats & G_STATS_CONSUMERS)
555 devstat_start_transaction(cp->stat, &bp->bio_t0);
556 if (g_collectstats & G_STATS_PROVIDERS)
557 devstat_start_transaction(pp->stat, &bp->bio_t0);
559 atomic_add_int(&cp->nstart, 1);
562 #ifdef GET_STACK_USAGE
563 direct = (cp->flags & G_CF_DIRECT_SEND) != 0 &&
564 (pp->flags & G_PF_DIRECT_RECEIVE) != 0 &&
565 !g_is_geom_thread(curthread) &&
566 ((pp->flags & G_PF_ACCEPT_UNMAPPED) != 0 ||
567 (bp->bio_flags & BIO_UNMAPPED) == 0 || THREAD_CAN_SLEEP()) &&
570 /* Block direct execution if less then half of stack left. */
572 GET_STACK_USAGE(st, su);
581 error = g_io_check(bp);
583 CTR3(KTR_GEOM, "g_io_request g_io_check on bp %p "
584 "provider %s returned %d", bp, bp->bio_to->name,
586 g_io_deliver(bp, error);
589 bp->bio_to->geom->start(bp);
591 g_bioq_lock(&g_bio_run_down);
592 first = TAILQ_EMPTY(&g_bio_run_down.bio_queue);
593 TAILQ_INSERT_TAIL(&g_bio_run_down.bio_queue, bp, bio_queue);
594 bp->bio_flags |= BIO_ONQUEUE;
595 g_bio_run_down.bio_queue_length++;
596 g_bioq_unlock(&g_bio_run_down);
597 /* Pass it on down. */
599 wakeup(&g_wait_down);
604 g_io_deliver(struct bio *bp, int error)
607 struct g_consumer *cp;
608 struct g_provider *pp;
612 biotrack(bp, __func__);
614 KASSERT(bp != NULL, ("NULL bp in g_io_deliver"));
616 KASSERT(pp != NULL, ("NULL bio_to in g_io_deliver"));
619 bp->bio_error = error;
623 KASSERT(cp != NULL, ("NULL bio_from in g_io_deliver"));
624 KASSERT(cp->geom != NULL, ("NULL bio_from->geom in g_io_deliver"));
627 * Some classes - GJournal in particular - can modify bio's
628 * private fields while the bio is in transit; G_GEOM_VOLATILE_BIO
629 * flag means it's an expected behaviour for that particular geom.
631 if ((cp->geom->flags & G_GEOM_VOLATILE_BIO) == 0) {
632 KASSERT(bp->bio_caller1 == bp->_bio_caller1,
633 ("bio_caller1 used by the provider %s", pp->name));
634 KASSERT(bp->bio_caller2 == bp->_bio_caller2,
635 ("bio_caller2 used by the provider %s", pp->name));
636 KASSERT(bp->bio_cflags == bp->_bio_cflags,
637 ("bio_cflags used by the provider %s", pp->name));
640 KASSERT(bp->bio_completed >= 0, ("bio_completed can't be less than 0"));
641 KASSERT(bp->bio_completed <= bp->bio_length,
642 ("bio_completed can't be greater than bio_length"));
645 "g_io_deliver(%p) from %p(%s) to %p(%s) cmd %d error %d off %jd len %jd",
646 bp, cp, cp->geom->name, pp, pp->name, bp->bio_cmd, error,
647 (intmax_t)bp->bio_offset, (intmax_t)bp->bio_length);
649 KASSERT(!(bp->bio_flags & BIO_ONQUEUE),
650 ("Bio already on queue bp=%p", bp));
653 * XXX: next two doesn't belong here
655 bp->bio_bcount = bp->bio_length;
656 bp->bio_resid = bp->bio_bcount - bp->bio_completed;
658 #ifdef GET_STACK_USAGE
659 direct = (pp->flags & G_PF_DIRECT_SEND) &&
660 (cp->flags & G_CF_DIRECT_RECEIVE) &&
661 !g_is_geom_thread(curthread);
663 /* Block direct execution if less then half of stack left. */
665 GET_STACK_USAGE(st, su);
674 * The statistics collection is lockless, as such, but we
675 * can not update one instance of the statistics from more
676 * than one thread at a time, so grab the lock first.
678 if ((g_collectstats & G_STATS_CONSUMERS) != 0 ||
679 ((g_collectstats & G_STATS_PROVIDERS) != 0 && pp->stat != NULL))
681 mtxp = mtx_pool_find(mtxpool_sleep, cp);
683 if (g_collectstats & G_STATS_PROVIDERS)
684 devstat_end_transaction_bio_bt(pp->stat, bp, &now);
685 if (g_collectstats & G_STATS_CONSUMERS)
686 devstat_end_transaction_bio_bt(cp->stat, bp, &now);
692 if (error != ENOMEM) {
693 bp->bio_error = error;
697 g_bioq_lock(&g_bio_run_up);
698 first = TAILQ_EMPTY(&g_bio_run_up.bio_queue);
699 TAILQ_INSERT_TAIL(&g_bio_run_up.bio_queue, bp, bio_queue);
700 bp->bio_flags |= BIO_ONQUEUE;
701 g_bio_run_up.bio_queue_length++;
702 g_bioq_unlock(&g_bio_run_up);
710 printf("ENOMEM %p on %p(%s)\n", bp, pp, pp->name);
711 bp->bio_children = 0;
713 bp->bio_driver1 = NULL;
714 bp->bio_driver2 = NULL;
716 g_io_request(bp, cp);
721 SYSCTL_DECL(_kern_geom);
723 static long transient_maps;
724 SYSCTL_LONG(_kern_geom, OID_AUTO, transient_maps, CTLFLAG_RD,
726 "Total count of the transient mapping requests");
727 u_int transient_map_retries = 10;
728 SYSCTL_UINT(_kern_geom, OID_AUTO, transient_map_retries, CTLFLAG_RW,
729 &transient_map_retries, 0,
730 "Max count of retries used before giving up on creating transient map");
731 int transient_map_hard_failures;
732 SYSCTL_INT(_kern_geom, OID_AUTO, transient_map_hard_failures, CTLFLAG_RD,
733 &transient_map_hard_failures, 0,
734 "Failures to establish the transient mapping due to retry attempts "
736 int transient_map_soft_failures;
737 SYSCTL_INT(_kern_geom, OID_AUTO, transient_map_soft_failures, CTLFLAG_RD,
738 &transient_map_soft_failures, 0,
739 "Count of retried failures to establish the transient mapping");
740 int inflight_transient_maps;
741 SYSCTL_INT(_kern_geom, OID_AUTO, inflight_transient_maps, CTLFLAG_RD,
742 &inflight_transient_maps, 0,
743 "Current count of the active transient maps");
746 g_io_transient_map_bio(struct bio *bp)
752 KASSERT(unmapped_buf_allowed, ("unmapped disabled"));
754 size = round_page(bp->bio_ma_offset + bp->bio_length);
755 KASSERT(size / PAGE_SIZE == bp->bio_ma_n, ("Bio too short %p", bp));
758 atomic_add_long(&transient_maps, 1);
760 if (vmem_alloc(transient_arena, size, M_BESTFIT | M_NOWAIT, &addr)) {
761 if (transient_map_retries != 0 &&
762 retried >= transient_map_retries) {
763 CTR2(KTR_GEOM, "g_down cannot map bp %p provider %s",
764 bp, bp->bio_to->name);
765 atomic_add_int(&transient_map_hard_failures, 1);
766 return (EDEADLK/* XXXKIB */);
769 * Naive attempt to quisce the I/O to get more
770 * in-flight requests completed and defragment
771 * the transient_arena.
773 CTR3(KTR_GEOM, "g_down retrymap bp %p provider %s r %d",
774 bp, bp->bio_to->name, retried);
775 pause("g_d_tra", hz / 10);
777 atomic_add_int(&transient_map_soft_failures, 1);
781 atomic_add_int(&inflight_transient_maps, 1);
782 pmap_qenter((vm_offset_t)addr, bp->bio_ma, OFF_TO_IDX(size));
783 bp->bio_data = (caddr_t)addr + bp->bio_ma_offset;
784 bp->bio_flags |= BIO_TRANSIENT_MAPPING;
785 bp->bio_flags &= ~BIO_UNMAPPED;
786 return (EJUSTRETURN);
790 g_io_schedule_down(struct thread *tp __unused)
796 g_bioq_lock(&g_bio_run_down);
797 bp = g_bioq_first(&g_bio_run_down);
799 CTR0(KTR_GEOM, "g_down going to sleep");
800 msleep(&g_wait_down, &g_bio_run_down.bio_queue_lock,
801 PRIBIO | PDROP, "-", 0);
804 CTR0(KTR_GEOM, "g_down has work to do");
805 g_bioq_unlock(&g_bio_run_down);
806 biotrack(bp, __func__);
809 * There has been at least one memory allocation
810 * failure since the last I/O completed. Pause 1ms to
811 * give the system a chance to free up memory. We only
812 * do this once because a large number of allocations
813 * can fail in the direct dispatch case and there's no
814 * relationship between the number of these failures and
815 * the length of the outage. If there's still an outage,
816 * we'll pause again and again until it's
817 * resolved. Older versions paused longer and once per
818 * allocation failure. This was OK for a single threaded
819 * g_down, but with direct dispatch would lead to max of
820 * 10 IOPs for minutes at a time when transient memory
821 * issues prevented allocation for a batch of requests
822 * from the upper layers.
824 * XXX This pacing is really lame. It needs to be solved
825 * by other methods. This is OK only because the worst
826 * case scenario is so rare. In the worst case scenario
827 * all memory is tied up waiting for I/O to complete
828 * which can never happen since we can't allocate bios
831 CTR0(KTR_GEOM, "g_down pacing self");
832 pause("g_down", min(hz/1000, 1));
835 CTR2(KTR_GEOM, "g_down processing bp %p provider %s", bp,
837 error = g_io_check(bp);
839 CTR3(KTR_GEOM, "g_down g_io_check on bp %p provider "
840 "%s returned %d", bp, bp->bio_to->name, error);
841 g_io_deliver(bp, error);
844 THREAD_NO_SLEEPING();
845 CTR4(KTR_GEOM, "g_down starting bp %p provider %s off %ld "
846 "len %ld", bp, bp->bio_to->name, bp->bio_offset,
848 bp->bio_to->geom->start(bp);
849 THREAD_SLEEPING_OK();
854 g_io_schedule_up(struct thread *tp __unused)
859 g_bioq_lock(&g_bio_run_up);
860 bp = g_bioq_first(&g_bio_run_up);
862 CTR0(KTR_GEOM, "g_up going to sleep");
863 msleep(&g_wait_up, &g_bio_run_up.bio_queue_lock,
864 PRIBIO | PDROP, "-", 0);
867 g_bioq_unlock(&g_bio_run_up);
868 THREAD_NO_SLEEPING();
869 CTR4(KTR_GEOM, "g_up biodone bp %p provider %s off "
870 "%jd len %ld", bp, bp->bio_to->name,
871 bp->bio_offset, bp->bio_length);
873 THREAD_SLEEPING_OK();
878 g_read_data(struct g_consumer *cp, off_t offset, off_t length, int *error)
884 KASSERT(length > 0 && length >= cp->provider->sectorsize &&
885 length <= MAXPHYS, ("g_read_data(): invalid length %jd",
889 bp->bio_cmd = BIO_READ;
891 bp->bio_offset = offset;
892 bp->bio_length = length;
893 ptr = g_malloc(length, M_WAITOK);
895 g_io_request(bp, cp);
896 errorc = biowait(bp, "gread");
908 * A read function for use by ffs_sbget when used by GEOM-layer routines.
911 g_use_g_read_data(void *devfd, off_t loc, void **bufp, int size)
913 struct g_consumer *cp;
915 KASSERT(*bufp == NULL,
916 ("g_use_g_read_data: non-NULL *bufp %p\n", *bufp));
918 cp = (struct g_consumer *)devfd;
920 * Take care not to issue an invalid I/O request. The offset of
921 * the superblock candidate must be multiples of the provider's
922 * sector size, otherwise an FFS can't exist on the provider
925 if (loc % cp->provider->sectorsize != 0)
927 *bufp = g_read_data(cp, loc, size, NULL);
934 g_write_data(struct g_consumer *cp, off_t offset, void *ptr, off_t length)
939 KASSERT(length > 0 && length >= cp->provider->sectorsize &&
940 length <= MAXPHYS, ("g_write_data(): invalid length %jd",
944 bp->bio_cmd = BIO_WRITE;
946 bp->bio_offset = offset;
947 bp->bio_length = length;
949 g_io_request(bp, cp);
950 error = biowait(bp, "gwrite");
956 * A write function for use by ffs_sbput when used by GEOM-layer routines.
959 g_use_g_write_data(void *devfd, off_t loc, void *buf, int size)
962 return (g_write_data((struct g_consumer *)devfd, loc, buf, size));
966 g_delete_data(struct g_consumer *cp, off_t offset, off_t length)
971 KASSERT(length > 0 && length >= cp->provider->sectorsize,
972 ("g_delete_data(): invalid length %jd", (intmax_t)length));
975 bp->bio_cmd = BIO_DELETE;
977 bp->bio_offset = offset;
978 bp->bio_length = length;
980 g_io_request(bp, cp);
981 error = biowait(bp, "gdelete");
987 g_print_bio(const char *prefix, const struct bio *bp, const char *fmtsuffix,
990 #ifndef PRINTF_BUFR_SIZE
991 #define PRINTF_BUFR_SIZE 64
993 char bufr[PRINTF_BUFR_SIZE];
994 struct sbuf sb, *sbp __unused;
997 sbp = sbuf_new(&sb, bufr, sizeof(bufr), SBUF_FIXEDLEN);
998 KASSERT(sbp != NULL, ("sbuf_new misused?"));
1000 sbuf_set_drain(&sb, sbuf_printf_drain, NULL);
1002 sbuf_cat(&sb, prefix);
1003 g_format_bio(&sb, bp);
1005 va_start(ap, fmtsuffix);
1006 sbuf_vprintf(&sb, fmtsuffix, ap);
1009 sbuf_nl_terminate(&sb);
1016 g_format_bio(struct sbuf *sb, const struct bio *bp)
1018 const char *pname, *cmd = NULL;
1020 if (bp->bio_to != NULL)
1021 pname = bp->bio_to->name;
1023 pname = "[unknown]";
1025 switch (bp->bio_cmd) {
1028 sbuf_printf(sb, "%s[%s(attr=%s)]", pname, cmd,
1033 sbuf_printf(sb, "%s[%s]", pname, cmd);
1036 char *subcmd = NULL;
1038 switch (bp->bio_zone.zone_cmd) {
1039 case DISK_ZONE_OPEN:
1042 case DISK_ZONE_CLOSE:
1045 case DISK_ZONE_FINISH:
1051 case DISK_ZONE_REPORT_ZONES:
1052 subcmd = "REPORT ZONES";
1054 case DISK_ZONE_GET_PARAMS:
1055 subcmd = "GET PARAMS";
1061 sbuf_printf(sb, "%s[%s,%s]", pname, cmd, subcmd);
1075 sbuf_printf(sb, "%s[%s()]", pname, cmd);
1078 sbuf_printf(sb, "%s[%s(offset=%jd, length=%jd)]", pname, cmd,
1079 (intmax_t)bp->bio_offset, (intmax_t)bp->bio_length);