2 * SPDX-License-Identifier: (Beerware AND BSD-3-Clause)
4 * ----------------------------------------------------------------------------
5 * "THE BEER-WARE LICENSE" (Revision 42):
6 * <phk@FreeBSD.ORG> wrote this file. As long as you retain this notice you
7 * can do whatever you want with this stuff. If we meet some day, and you think
8 * this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp
9 * ----------------------------------------------------------------------------
16 * The following functions are based in the vn(4) driver: mdstart_swap(),
17 * mdstart_vnode(), mdcreate_swap(), mdcreate_vnode() and mddestroy(),
18 * and as such under the following copyright:
20 * Copyright (c) 1988 University of Utah.
21 * Copyright (c) 1990, 1993
22 * The Regents of the University of California. All rights reserved.
23 * Copyright (c) 2013 The FreeBSD Foundation
24 * All rights reserved.
26 * This code is derived from software contributed to Berkeley by
27 * the Systems Programming Group of the University of Utah Computer
30 * Portions of this software were developed by Konstantin Belousov
31 * under sponsorship from the FreeBSD Foundation.
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions
36 * 1. Redistributions of source code must retain the above copyright
37 * notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce the above copyright
39 * notice, this list of conditions and the following disclaimer in the
40 * documentation and/or other materials provided with the distribution.
41 * 3. Neither the name of the University nor the names of its contributors
42 * may be used to endorse or promote products derived from this software
43 * without specific prior written permission.
45 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
46 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
47 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
48 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
49 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
50 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
51 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
52 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
53 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
54 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
57 * from: Utah Hdr: vn.c 1.13 94/04/02
59 * from: @(#)vn.c 8.6 (Berkeley) 4/1/94
60 * From: src/sys/dev/vn/vn.c,v 1.122 2000/12/16 16:06:03
63 #include "opt_rootdevname.h"
67 #include <sys/param.h>
68 #include <sys/systm.h>
72 #include <sys/devicestat.h>
73 #include <sys/fcntl.h>
74 #include <sys/kernel.h>
75 #include <sys/kthread.h>
76 #include <sys/limits.h>
77 #include <sys/linker.h>
79 #include <sys/malloc.h>
80 #include <sys/mdioctl.h>
81 #include <sys/mount.h>
82 #include <sys/mutex.h>
84 #include <sys/namei.h>
86 #include <sys/queue.h>
87 #include <sys/rwlock.h>
89 #include <sys/sched.h>
90 #include <sys/sf_buf.h>
91 #include <sys/sysctl.h>
93 #include <sys/vnode.h>
96 #include <geom/geom.h>
97 #include <geom/geom_int.h>
100 #include <vm/vm_param.h>
101 #include <vm/vm_object.h>
102 #include <vm/vm_page.h>
103 #include <vm/vm_pager.h>
104 #include <vm/swap_pager.h>
107 #include <machine/bus.h>
111 #define MD_SHUTDOWN 0x10000 /* Tell worker thread to terminate. */
112 #define MD_EXITING 0x20000 /* Worker thread is exiting. */
113 #define MD_PROVIDERGONE 0x40000 /* Safe to free the softc */
116 #define MD_NSECT (10000 * 2)
120 unsigned md_unit; /* unit number */
121 enum md_types md_type; /* type of disk */
122 off_t md_mediasize; /* size of disk in bytes */
123 unsigned md_sectorsize; /* sectorsize */
124 unsigned md_options; /* options */
125 int md_fwheads; /* firmware heads */
126 int md_fwsectors; /* firmware sectors */
127 char *md_file; /* pathname of file to mount */
128 enum uio_seg md_file_seg; /* location of md_file */
129 char *md_label; /* label of the device (userspace) */
130 int *md_units; /* pointer to units array (kernel) */
131 size_t md_units_nitems; /* items in md_units array */
134 #ifdef COMPAT_FREEBSD32
138 enum md_types md_type;
141 unsigned md_sectorsize;
148 } __attribute__((__packed__));
149 CTASSERT((sizeof(struct md_ioctl32)) == 436);
151 #define MDIOCATTACH_32 _IOC_NEWTYPE(MDIOCATTACH, struct md_ioctl32)
152 #define MDIOCDETACH_32 _IOC_NEWTYPE(MDIOCDETACH, struct md_ioctl32)
153 #define MDIOCQUERY_32 _IOC_NEWTYPE(MDIOCQUERY, struct md_ioctl32)
154 #define MDIOCLIST_32 _IOC_NEWTYPE(MDIOCLIST, struct md_ioctl32)
155 #define MDIOCRESIZE_32 _IOC_NEWTYPE(MDIOCRESIZE, struct md_ioctl32)
156 #endif /* COMPAT_FREEBSD32 */
158 static MALLOC_DEFINE(M_MD, "md_disk", "Memory Disk");
159 static MALLOC_DEFINE(M_MDSECT, "md_sectors", "Memory Disk Sectors");
162 SYSCTL_INT(_debug, OID_AUTO, mddebug, CTLFLAG_RW, &md_debug, 0,
163 "Enable md(4) debug messages");
164 static int md_malloc_wait;
165 SYSCTL_INT(_vm, OID_AUTO, md_malloc_wait, CTLFLAG_RW, &md_malloc_wait, 0,
166 "Allow malloc to wait for memory allocations");
168 #if defined(MD_ROOT) && !defined(MD_ROOT_FSTYPE)
169 #define MD_ROOT_FSTYPE "ufs"
174 * Preloaded image gets put here.
176 #if defined(MD_ROOT_SIZE)
178 * We put the mfs_root symbol into the oldmfs section of the kernel object file.
179 * Applications that patch the object with the image can determine
180 * the size looking at the oldmfs section size within the kernel.
182 u_char mfs_root[MD_ROOT_SIZE*1024] __attribute__ ((section ("oldmfs")));
183 const int mfs_root_size = sizeof(mfs_root);
184 #elif defined(MD_ROOT_MEM)
185 /* MD region already mapped in the memory */
189 extern volatile u_char __weak_symbol mfs_root;
190 extern volatile u_char __weak_symbol mfs_root_end;
192 __GLOBL(mfs_root_end);
193 #define mfs_root_size ((uintptr_t)(&mfs_root_end - &mfs_root))
197 static g_init_t g_md_init;
198 static g_fini_t g_md_fini;
199 static g_start_t g_md_start;
200 static g_access_t g_md_access;
201 static void g_md_dumpconf(struct sbuf *sb, const char *indent,
202 struct g_geom *gp, struct g_consumer *cp __unused, struct g_provider *pp);
203 static g_provgone_t g_md_providergone;
205 static struct cdev *status_dev = NULL;
206 static struct sx md_sx;
207 static struct unrhdr *md_uh;
209 static d_ioctl_t mdctlioctl;
211 static struct cdevsw mdctl_cdevsw = {
212 .d_version = D_VERSION,
213 .d_ioctl = mdctlioctl,
217 struct g_class g_md_class = {
219 .version = G_VERSION,
223 .access = g_md_access,
224 .dumpconf = g_md_dumpconf,
225 .providergone = g_md_providergone,
228 DECLARE_GEOM_CLASS(g_md_class, g_md);
231 static LIST_HEAD(, md_s) md_softc_list = LIST_HEAD_INITIALIZER(md_softc_list);
233 #define NINDIR (PAGE_SIZE / sizeof(uintptr_t))
234 #define NMASK (NINDIR-1)
237 static uma_zone_t md_pbuf_zone;
248 LIST_ENTRY(md_s) list;
249 struct bio_queue_head bio_queue;
250 struct mtx queue_mtx;
264 struct g_provider *pp;
265 int (*start)(struct md_s *sc, struct bio *bp);
266 struct devstat *devstat;
268 /* MD_MALLOC related fields */
272 /* MD_PRELOAD related fields */
276 /* MD_VNODE related fields */
279 char label[PATH_MAX];
282 /* MD_SWAP related fields */
286 static struct indir *
287 new_indir(u_int shift)
291 ip = malloc(sizeof *ip, M_MD, (md_malloc_wait ? M_WAITOK : M_NOWAIT)
295 ip->array = malloc(sizeof(uintptr_t) * NINDIR,
296 M_MDSECT, (md_malloc_wait ? M_WAITOK : M_NOWAIT) | M_ZERO);
297 if (ip->array == NULL) {
307 del_indir(struct indir *ip)
310 free(ip->array, M_MDSECT);
315 destroy_indir(struct md_s *sc, struct indir *ip)
319 for (i = 0; i < NINDIR; i++) {
323 destroy_indir(sc, (struct indir*)(ip->array[i]));
324 else if (ip->array[i] > 255)
325 uma_zfree(sc->uma, (void *)(ip->array[i]));
331 * This function does the math and allocates the top level "indir" structure
332 * for a device of "size" sectors.
335 static struct indir *
336 dimension(off_t size)
344 while (rcnt > NINDIR) {
350 * XXX: the top layer is probably not fully populated, so we allocate
351 * too much space for ip->array in here.
353 ip = malloc(sizeof *ip, M_MD, M_WAITOK | M_ZERO);
354 ip->array = malloc(sizeof(uintptr_t) * NINDIR,
355 M_MDSECT, M_WAITOK | M_ZERO);
357 ip->shift = layer * nshift;
362 * Read a given sector
366 s_read(struct indir *ip, off_t offset)
373 printf("s_read(%jd)\n", (intmax_t)offset);
375 for (cip = ip; cip != NULL;) {
377 idx = (offset >> cip->shift) & NMASK;
378 up = cip->array[idx];
379 cip = (struct indir *)up;
382 idx = offset & NMASK;
383 return (cip->array[idx]);
389 * Write a given sector, prune the tree if the value is 0
393 s_write(struct indir *ip, off_t offset, uintptr_t ptr)
395 struct indir *cip, *lip[10];
400 printf("s_write(%jd, %p)\n", (intmax_t)offset, (void *)ptr);
407 idx = (offset >> cip->shift) & NMASK;
408 up = cip->array[idx];
410 cip = (struct indir *)up;
413 /* Allocate branch */
415 (uintptr_t)new_indir(cip->shift - nshift);
416 if (cip->array[idx] == 0)
419 up = cip->array[idx];
420 cip = (struct indir *)up;
424 idx = offset & NMASK;
425 up = cip->array[idx];
428 cip->array[idx] = ptr;
433 if (cip->used != 0 || li == 1)
436 while (cip->used == 0 && cip != ip) {
438 idx = (offset >> lip[li]->shift) & NMASK;
439 up = lip[li]->array[idx];
440 KASSERT(up == (uintptr_t)cip, ("md screwed up"));
442 lip[li]->array[idx] = 0;
451 g_md_access(struct g_provider *pp, int r, int w, int e)
455 sc = pp->geom->softc;
457 if (r <= 0 && w <= 0 && e <= 0)
464 if ((sc->flags & MD_READONLY) != 0 && w > 0)
466 if ((pp->acr + pp->acw + pp->ace) == 0 && (r + w + e) > 0) {
468 } else if ((pp->acr + pp->acw + pp->ace) > 0 && (r + w + e) == 0) {
475 g_md_start(struct bio *bp)
479 sc = bp->bio_to->geom->softc;
480 if ((bp->bio_cmd == BIO_READ) || (bp->bio_cmd == BIO_WRITE)) {
481 mtx_lock(&sc->stat_mtx);
482 devstat_start_transaction_bio(sc->devstat, bp);
483 mtx_unlock(&sc->stat_mtx);
485 mtx_lock(&sc->queue_mtx);
486 bioq_disksort(&sc->bio_queue, bp);
488 mtx_unlock(&sc->queue_mtx);
491 #define MD_MALLOC_MOVE_ZERO 1
492 #define MD_MALLOC_MOVE_FILL 2
493 #define MD_MALLOC_MOVE_READ 3
494 #define MD_MALLOC_MOVE_WRITE 4
495 #define MD_MALLOC_MOVE_CMP 5
498 md_malloc_move_ma(vm_page_t **mp, int *ma_offs, unsigned sectorsize,
499 void *ptr, u_char fill, int op)
506 int error, i, ma_offs1, sz, first_read;
511 /* if (op == MD_MALLOC_MOVE_CMP) { gcc */
519 for (n = sectorsize; n != 0; n -= sz) {
520 sz = imin(PAGE_SIZE - *ma_offs, n);
525 sf = sf_buf_alloc(m, SFB_CPUPRIVATE |
526 (md_malloc_wait ? 0 : SFB_NOWAIT));
532 p = (char *)sf_buf_kva(sf) + *ma_offs;
534 case MD_MALLOC_MOVE_ZERO:
537 case MD_MALLOC_MOVE_FILL:
540 case MD_MALLOC_MOVE_READ:
542 cpu_flush_dcache(p, sz);
544 case MD_MALLOC_MOVE_WRITE:
547 case MD_MALLOC_MOVE_CMP:
548 for (i = 0; i < sz; i++, p++) {
553 } else if (*p != first) {
560 KASSERT(0, ("md_malloc_move_ma unknown op %d\n", op));
566 *ma_offs %= PAGE_SIZE;
569 ptr = (char *)ptr + sz;
575 if (op == MD_MALLOC_MOVE_CMP && error != 0) {
583 md_malloc_move_vlist(bus_dma_segment_t **pvlist, int *pma_offs,
584 unsigned len, void *ptr, u_char fill, int op)
586 bus_dma_segment_t *vlist;
587 uint8_t *p, *end, first;
589 int ma_offs, seg_len;
595 for (; len != 0; len -= seg_len) {
596 seg_len = imin(vlist->ds_len - ma_offs, len);
597 p = (uint8_t *)(uintptr_t)vlist->ds_addr + ma_offs;
599 case MD_MALLOC_MOVE_ZERO:
602 case MD_MALLOC_MOVE_FILL:
603 memset(p, fill, seg_len);
605 case MD_MALLOC_MOVE_READ:
606 bcopy(ptr, p, seg_len);
607 cpu_flush_dcache(p, seg_len);
609 case MD_MALLOC_MOVE_WRITE:
610 bcopy(p, ptr, seg_len);
612 case MD_MALLOC_MOVE_CMP:
615 /* Confirm all following bytes match the first */
622 KASSERT(0, ("md_malloc_move_vlist unknown op %d\n", op));
627 if (ma_offs == vlist->ds_len) {
631 ptr = (uint8_t *)ptr + seg_len;
640 mdstart_malloc(struct md_s *sc, struct bio *bp)
644 bus_dma_segment_t *vlist;
645 int i, error, error1, ma_offs, notmapped;
646 off_t secno, nsec, uc;
649 switch (bp->bio_cmd) {
658 notmapped = (bp->bio_flags & BIO_UNMAPPED) != 0;
659 vlist = (bp->bio_flags & BIO_VLIST) != 0 ?
660 (bus_dma_segment_t *)bp->bio_data : NULL;
663 ma_offs = bp->bio_ma_offset;
665 KASSERT(vlist == NULL, ("vlists cannot be unmapped"));
666 } else if (vlist != NULL) {
667 ma_offs = bp->bio_ma_offset;
673 nsec = bp->bio_length / sc->sectorsize;
674 secno = bp->bio_offset / sc->sectorsize;
677 osp = s_read(sc->indir, secno);
678 if (bp->bio_cmd == BIO_DELETE) {
680 error = s_write(sc->indir, secno, 0);
681 } else if (bp->bio_cmd == BIO_READ) {
684 error = md_malloc_move_ma(&m, &ma_offs,
685 sc->sectorsize, NULL, 0,
686 MD_MALLOC_MOVE_ZERO);
687 } else if (vlist != NULL) {
688 error = md_malloc_move_vlist(&vlist,
689 &ma_offs, sc->sectorsize, NULL, 0,
690 MD_MALLOC_MOVE_ZERO);
692 bzero(dst, sc->sectorsize);
693 } else if (osp <= 255) {
695 error = md_malloc_move_ma(&m, &ma_offs,
696 sc->sectorsize, NULL, osp,
697 MD_MALLOC_MOVE_FILL);
698 } else if (vlist != NULL) {
699 error = md_malloc_move_vlist(&vlist,
700 &ma_offs, sc->sectorsize, NULL, osp,
701 MD_MALLOC_MOVE_FILL);
703 memset(dst, osp, sc->sectorsize);
706 error = md_malloc_move_ma(&m, &ma_offs,
707 sc->sectorsize, (void *)osp, 0,
708 MD_MALLOC_MOVE_READ);
709 } else if (vlist != NULL) {
710 error = md_malloc_move_vlist(&vlist,
711 &ma_offs, sc->sectorsize,
713 MD_MALLOC_MOVE_READ);
715 bcopy((void *)osp, dst, sc->sectorsize);
716 cpu_flush_dcache(dst, sc->sectorsize);
720 } else if (bp->bio_cmd == BIO_WRITE) {
721 if (sc->flags & MD_COMPRESS) {
723 error1 = md_malloc_move_ma(&m, &ma_offs,
724 sc->sectorsize, &uc, 0,
726 i = error1 == 0 ? sc->sectorsize : 0;
727 } else if (vlist != NULL) {
728 error1 = md_malloc_move_vlist(&vlist,
729 &ma_offs, sc->sectorsize, &uc, 0,
731 i = error1 == 0 ? sc->sectorsize : 0;
734 for (i = 1; i < sc->sectorsize; i++) {
743 if (i == sc->sectorsize) {
745 error = s_write(sc->indir, secno, uc);
748 sp = (uintptr_t)uma_zalloc(sc->uma,
749 md_malloc_wait ? M_WAITOK :
756 error = md_malloc_move_ma(&m,
757 &ma_offs, sc->sectorsize,
759 MD_MALLOC_MOVE_WRITE);
760 } else if (vlist != NULL) {
761 error = md_malloc_move_vlist(
763 sc->sectorsize, (void *)sp,
764 0, MD_MALLOC_MOVE_WRITE);
766 bcopy(dst, (void *)sp,
769 error = s_write(sc->indir, secno, sp);
772 error = md_malloc_move_ma(&m,
773 &ma_offs, sc->sectorsize,
775 MD_MALLOC_MOVE_WRITE);
776 } else if (vlist != NULL) {
777 error = md_malloc_move_vlist(
779 sc->sectorsize, (void *)osp,
780 0, MD_MALLOC_MOVE_WRITE);
782 bcopy(dst, (void *)osp,
792 uma_zfree(sc->uma, (void*)osp);
796 if (!notmapped && vlist == NULL)
797 dst += sc->sectorsize;
804 mdcopyto_vlist(void *src, bus_dma_segment_t *vlist, off_t offset, off_t len)
808 while (offset >= vlist->ds_len) {
809 offset -= vlist->ds_len;
814 seg_len = omin(len, vlist->ds_len - offset);
815 bcopy(src, (void *)(uintptr_t)(vlist->ds_addr + offset),
818 src = (uint8_t *)src + seg_len;
825 mdcopyfrom_vlist(bus_dma_segment_t *vlist, off_t offset, void *dst, off_t len)
829 while (offset >= vlist->ds_len) {
830 offset -= vlist->ds_len;
835 seg_len = omin(len, vlist->ds_len - offset);
836 bcopy((void *)(uintptr_t)(vlist->ds_addr + offset), dst,
839 dst = (uint8_t *)dst + seg_len;
846 mdstart_preload(struct md_s *sc, struct bio *bp)
850 p = sc->pl_ptr + bp->bio_offset;
851 switch (bp->bio_cmd) {
853 if ((bp->bio_flags & BIO_VLIST) != 0) {
854 mdcopyto_vlist(p, (bus_dma_segment_t *)bp->bio_data,
855 bp->bio_ma_offset, bp->bio_length);
857 bcopy(p, bp->bio_data, bp->bio_length);
859 cpu_flush_dcache(bp->bio_data, bp->bio_length);
862 if ((bp->bio_flags & BIO_VLIST) != 0) {
863 mdcopyfrom_vlist((bus_dma_segment_t *)bp->bio_data,
864 bp->bio_ma_offset, p, bp->bio_length);
866 bcopy(bp->bio_data, p, bp->bio_length);
875 mdstart_vnode(struct md_s *sc, struct bio *bp)
884 bus_dma_segment_t *vlist;
886 off_t iolen, iostart, len, zerosize;
889 switch (bp->bio_cmd) {
891 auio.uio_rw = UIO_READ;
895 auio.uio_rw = UIO_WRITE;
907 ma_offs = bp->bio_ma_offset;
908 len = bp->bio_length;
913 * If an error occurs, we set BIO_ERROR but we do not set
914 * B_INVAL because (for a write anyway), the buffer is
918 if (bp->bio_cmd == BIO_FLUSH) {
919 (void) vn_start_write(vp, &mp, V_WAIT);
920 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
921 error = VOP_FSYNC(vp, MNT_WAIT, td);
923 vn_finished_write(mp);
927 auio.uio_offset = (vm_ooffset_t)bp->bio_offset;
928 auio.uio_resid = bp->bio_length;
929 auio.uio_segflg = UIO_SYSSPACE;
932 if (bp->bio_cmd == BIO_DELETE) {
934 * Emulate BIO_DELETE by writing zeros.
936 zerosize = ZERO_REGION_SIZE -
937 (ZERO_REGION_SIZE % sc->sectorsize);
938 auio.uio_iovcnt = howmany(bp->bio_length, zerosize);
939 piov = malloc(sizeof(*piov) * auio.uio_iovcnt, M_MD, M_WAITOK);
942 piov->iov_base = __DECONST(void *, zero_region);
945 piov->iov_len = zerosize;
946 len -= piov->iov_len;
950 } else if ((bp->bio_flags & BIO_VLIST) != 0) {
951 piov = malloc(sizeof(*piov) * bp->bio_ma_n, M_MD, M_WAITOK);
953 vlist = (bus_dma_segment_t *)bp->bio_data;
955 piov->iov_base = (void *)(uintptr_t)(vlist->ds_addr +
957 piov->iov_len = vlist->ds_len - ma_offs;
958 if (piov->iov_len > len)
960 len -= piov->iov_len;
965 auio.uio_iovcnt = piov - auio.uio_iov;
967 } else if ((bp->bio_flags & BIO_UNMAPPED) != 0) {
968 pb = uma_zalloc(md_pbuf_zone, M_WAITOK);
971 npages = atop(min(MAXPHYS, round_page(len + (ma_offs &
973 iolen = min(ptoa(npages) - (ma_offs & PAGE_MASK), len);
974 KASSERT(iolen > 0, ("zero iolen"));
975 pmap_qenter((vm_offset_t)pb->b_data,
976 &bp->bio_ma[atop(ma_offs)], npages);
977 aiov.iov_base = (void *)((vm_offset_t)pb->b_data +
978 (ma_offs & PAGE_MASK));
979 aiov.iov_len = iolen;
980 auio.uio_iov = &aiov;
982 auio.uio_resid = iolen;
984 aiov.iov_base = bp->bio_data;
985 aiov.iov_len = bp->bio_length;
986 auio.uio_iov = &aiov;
989 iostart = auio.uio_offset;
990 if (auio.uio_rw == UIO_READ) {
991 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
992 error = VOP_READ(vp, &auio, 0, sc->cred);
995 (void) vn_start_write(vp, &mp, V_WAIT);
996 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
997 error = VOP_WRITE(vp, &auio, sc->flags & MD_ASYNC ? 0 : IO_SYNC,
1000 vn_finished_write(mp);
1002 sc->flags &= ~MD_VERIFY;
1005 /* When MD_CACHE is set, try to avoid double-caching the data. */
1006 if (error == 0 && (sc->flags & MD_CACHE) == 0)
1007 VOP_ADVISE(vp, iostart, auio.uio_offset - 1,
1008 POSIX_FADV_DONTNEED);
1011 pmap_qremove((vm_offset_t)pb->b_data, npages);
1014 bp->bio_resid -= iolen;
1019 uma_zfree(md_pbuf_zone, pb);
1024 bp->bio_resid = auio.uio_resid;
1029 md_swap_page_free(vm_page_t m)
1039 mdstart_swap(struct md_s *sc, struct bio *bp)
1043 vm_pindex_t i, lastp;
1044 bus_dma_segment_t *vlist;
1045 int rv, ma_offs, offs, len, lastend;
1047 switch (bp->bio_cmd) {
1053 return (EOPNOTSUPP);
1057 ma_offs = (bp->bio_flags & (BIO_UNMAPPED|BIO_VLIST)) != 0 ?
1058 bp->bio_ma_offset : 0;
1059 vlist = (bp->bio_flags & BIO_VLIST) != 0 ?
1060 (bus_dma_segment_t *)bp->bio_data : NULL;
1063 * offs is the offset at which to start operating on the
1064 * next (ie, first) page. lastp is the last page on
1065 * which we're going to operate. lastend is the ending
1066 * position within that last page (ie, PAGE_SIZE if
1067 * we're operating on complete aligned pages).
1069 offs = bp->bio_offset % PAGE_SIZE;
1070 lastp = (bp->bio_offset + bp->bio_length - 1) / PAGE_SIZE;
1071 lastend = (bp->bio_offset + bp->bio_length - 1) % PAGE_SIZE + 1;
1074 VM_OBJECT_WLOCK(sc->object);
1075 vm_object_pip_add(sc->object, 1);
1076 for (i = bp->bio_offset / PAGE_SIZE; i <= lastp; i++) {
1077 len = ((i == lastp) ? lastend : PAGE_SIZE) - offs;
1078 m = vm_page_grab(sc->object, i, VM_ALLOC_SYSTEM);
1079 if (bp->bio_cmd == BIO_READ) {
1080 if (m->valid == VM_PAGE_BITS_ALL)
1083 rv = vm_pager_get_pages(sc->object, &m, 1,
1085 if (rv == VM_PAGER_ERROR) {
1086 md_swap_page_free(m);
1088 } else if (rv == VM_PAGER_FAIL) {
1090 * Pager does not have the page. Zero
1091 * the allocated page, and mark it as
1092 * valid. Do not set dirty, the page
1093 * can be recreated if thrown out.
1096 m->valid = VM_PAGE_BITS_ALL;
1098 if ((bp->bio_flags & BIO_UNMAPPED) != 0) {
1099 pmap_copy_pages(&m, offs, bp->bio_ma,
1101 } else if ((bp->bio_flags & BIO_VLIST) != 0) {
1102 physcopyout_vlist(VM_PAGE_TO_PHYS(m) + offs,
1103 vlist, ma_offs, len);
1104 cpu_flush_dcache(p, len);
1106 physcopyout(VM_PAGE_TO_PHYS(m) + offs, p, len);
1107 cpu_flush_dcache(p, len);
1109 } else if (bp->bio_cmd == BIO_WRITE) {
1110 if (len == PAGE_SIZE || m->valid == VM_PAGE_BITS_ALL)
1113 rv = vm_pager_get_pages(sc->object, &m, 1,
1115 if (rv == VM_PAGER_ERROR) {
1116 md_swap_page_free(m);
1118 } else if (rv == VM_PAGER_FAIL)
1121 if ((bp->bio_flags & BIO_UNMAPPED) != 0) {
1122 pmap_copy_pages(bp->bio_ma, ma_offs, &m,
1124 } else if ((bp->bio_flags & BIO_VLIST) != 0) {
1125 physcopyin_vlist(vlist, ma_offs,
1126 VM_PAGE_TO_PHYS(m) + offs, len);
1128 physcopyin(p, VM_PAGE_TO_PHYS(m) + offs, len);
1131 m->valid = VM_PAGE_BITS_ALL;
1132 if (m->dirty != VM_PAGE_BITS_ALL) {
1134 vm_pager_page_unswapped(m);
1136 } else if (bp->bio_cmd == BIO_DELETE) {
1137 if (len == PAGE_SIZE || m->valid == VM_PAGE_BITS_ALL)
1140 rv = vm_pager_get_pages(sc->object, &m, 1,
1142 if (rv == VM_PAGER_ERROR) {
1143 md_swap_page_free(m);
1145 } else if (rv == VM_PAGER_FAIL) {
1146 md_swap_page_free(m);
1149 /* Page is valid. */
1150 if (len != PAGE_SIZE) {
1151 pmap_zero_page_area(m, offs, len);
1152 if (m->dirty != VM_PAGE_BITS_ALL) {
1154 vm_pager_page_unswapped(m);
1157 vm_pager_page_unswapped(m);
1158 md_swap_page_free(m);
1166 if (vm_page_active(m))
1167 vm_page_reference(m);
1169 vm_page_activate(m);
1173 /* Actions on further pages start at offset 0 */
1174 p += PAGE_SIZE - offs;
1178 vm_object_pip_wakeup(sc->object);
1179 VM_OBJECT_WUNLOCK(sc->object);
1180 return (rv != VM_PAGER_ERROR ? 0 : ENOSPC);
1184 mdstart_null(struct md_s *sc, struct bio *bp)
1187 switch (bp->bio_cmd) {
1189 bzero(bp->bio_data, bp->bio_length);
1190 cpu_flush_dcache(bp->bio_data, bp->bio_length);
1200 md_kthread(void *arg)
1207 thread_lock(curthread);
1208 sched_prio(curthread, PRIBIO);
1209 thread_unlock(curthread);
1210 if (sc->type == MD_VNODE)
1211 curthread->td_pflags |= TDP_NORUNNINGBUF;
1214 mtx_lock(&sc->queue_mtx);
1215 if (sc->flags & MD_SHUTDOWN) {
1216 sc->flags |= MD_EXITING;
1217 mtx_unlock(&sc->queue_mtx);
1220 bp = bioq_takefirst(&sc->bio_queue);
1222 msleep(sc, &sc->queue_mtx, PRIBIO | PDROP, "mdwait", 0);
1225 mtx_unlock(&sc->queue_mtx);
1226 if (bp->bio_cmd == BIO_GETATTR) {
1227 int isv = ((sc->flags & MD_VERIFY) != 0);
1229 if ((sc->fwsectors && sc->fwheads &&
1230 (g_handleattr_int(bp, "GEOM::fwsectors",
1232 g_handleattr_int(bp, "GEOM::fwheads",
1234 g_handleattr_int(bp, "GEOM::candelete", 1))
1236 else if (sc->ident[0] != '\0' &&
1237 g_handleattr_str(bp, "GEOM::ident", sc->ident))
1239 else if (g_handleattr_int(bp, "MNT::verified", isv))
1244 error = sc->start(sc, bp);
1247 if (bp->bio_cmd == BIO_READ || bp->bio_cmd == BIO_WRITE) {
1249 * Devstat uses (bio_bcount, bio_resid) for
1250 * determining the length of the completed part of
1251 * the i/o. g_io_deliver() will translate from
1252 * bio_completed to that, but it also destroys the
1253 * bio so we must do our own translation.
1255 bp->bio_bcount = bp->bio_length;
1256 bp->bio_resid = (error == -1 ? bp->bio_bcount : 0);
1257 devstat_end_transaction_bio(sc->devstat, bp);
1260 bp->bio_completed = bp->bio_length;
1261 g_io_deliver(bp, error);
1266 static struct md_s *
1271 LIST_FOREACH(sc, &md_softc_list, list) {
1272 if (sc->unit == unit)
1278 static struct md_s *
1279 mdnew(int unit, int *errp, enum md_types type)
1286 unit = alloc_unr(md_uh);
1288 unit = alloc_unr_specific(md_uh, unit);
1295 sc = (struct md_s *)malloc(sizeof *sc, M_MD, M_WAITOK | M_ZERO);
1297 bioq_init(&sc->bio_queue);
1298 mtx_init(&sc->queue_mtx, "md bio queue", NULL, MTX_DEF);
1299 mtx_init(&sc->stat_mtx, "md stat", NULL, MTX_DEF);
1301 sprintf(sc->name, "md%d", unit);
1302 LIST_INSERT_HEAD(&md_softc_list, sc, list);
1303 error = kproc_create(md_kthread, sc, &sc->procp, 0, 0,"%s", sc->name);
1306 LIST_REMOVE(sc, list);
1307 mtx_destroy(&sc->stat_mtx);
1308 mtx_destroy(&sc->queue_mtx);
1309 free_unr(md_uh, sc->unit);
1316 mdinit(struct md_s *sc)
1319 struct g_provider *pp;
1322 gp = g_new_geomf(&g_md_class, "md%d", sc->unit);
1324 pp = g_new_providerf(gp, "md%d", sc->unit);
1325 pp->flags |= G_PF_DIRECT_SEND | G_PF_DIRECT_RECEIVE;
1326 pp->mediasize = sc->mediasize;
1327 pp->sectorsize = sc->sectorsize;
1332 pp->flags |= G_PF_ACCEPT_UNMAPPED;
1340 g_error_provider(pp, 0);
1341 g_topology_unlock();
1342 sc->devstat = devstat_new_entry("md", sc->unit, sc->sectorsize,
1343 DEVSTAT_ALL_SUPPORTED, DEVSTAT_TYPE_DIRECT, DEVSTAT_PRIORITY_MAX);
1347 mdcreate_malloc(struct md_s *sc, struct md_req *mdr)
1354 if (mdr->md_options & ~(MD_AUTOUNIT | MD_COMPRESS | MD_RESERVE))
1356 if (mdr->md_sectorsize != 0 && !powerof2(mdr->md_sectorsize))
1358 /* Compression doesn't make sense if we have reserved space */
1359 if (mdr->md_options & MD_RESERVE)
1360 mdr->md_options &= ~MD_COMPRESS;
1361 if (mdr->md_fwsectors != 0)
1362 sc->fwsectors = mdr->md_fwsectors;
1363 if (mdr->md_fwheads != 0)
1364 sc->fwheads = mdr->md_fwheads;
1365 sc->flags = mdr->md_options & (MD_COMPRESS | MD_FORCE);
1366 sc->indir = dimension(sc->mediasize / sc->sectorsize);
1367 sc->uma = uma_zcreate(sc->name, sc->sectorsize, NULL, NULL, NULL, NULL,
1369 if (mdr->md_options & MD_RESERVE) {
1372 nsectors = sc->mediasize / sc->sectorsize;
1373 for (u = 0; u < nsectors; u++) {
1374 sp = (uintptr_t)uma_zalloc(sc->uma, (md_malloc_wait ?
1375 M_WAITOK : M_NOWAIT) | M_ZERO);
1377 error = s_write(sc->indir, u, sp);
1389 mdsetcred(struct md_s *sc, struct ucred *cred)
1395 * Set credits in our softc
1400 sc->cred = crhold(cred);
1403 * Horrible kludge to establish credentials for NFS XXX.
1410 tmpbuf = malloc(sc->sectorsize, M_TEMP, M_WAITOK);
1411 bzero(&auio, sizeof(auio));
1413 aiov.iov_base = tmpbuf;
1414 aiov.iov_len = sc->sectorsize;
1415 auio.uio_iov = &aiov;
1416 auio.uio_iovcnt = 1;
1417 auio.uio_offset = 0;
1418 auio.uio_rw = UIO_READ;
1419 auio.uio_segflg = UIO_SYSSPACE;
1420 auio.uio_resid = aiov.iov_len;
1421 vn_lock(sc->vnode, LK_EXCLUSIVE | LK_RETRY);
1422 error = VOP_READ(sc->vnode, &auio, 0, sc->cred);
1423 VOP_UNLOCK(sc->vnode, 0);
1424 free(tmpbuf, M_TEMP);
1430 mdcreate_vnode(struct md_s *sc, struct md_req *mdr, struct thread *td)
1433 struct nameidata nd;
1437 fname = mdr->md_file;
1438 if (mdr->md_file_seg == UIO_USERSPACE) {
1439 error = copyinstr(fname, sc->file, sizeof(sc->file), NULL);
1442 } else if (mdr->md_file_seg == UIO_SYSSPACE)
1443 strlcpy(sc->file, fname, sizeof(sc->file));
1448 * If the user specified that this is a read only device, don't
1449 * set the FWRITE mask before trying to open the backing store.
1451 flags = FREAD | ((mdr->md_options & MD_READONLY) ? 0 : FWRITE) \
1452 | ((mdr->md_options & MD_VERIFY) ? O_VERIFY : 0);
1453 NDINIT(&nd, LOOKUP, FOLLOW, UIO_SYSSPACE, sc->file, td);
1454 error = vn_open(&nd, &flags, 0, NULL);
1457 NDFREE(&nd, NDF_ONLY_PNBUF);
1458 if (nd.ni_vp->v_type != VREG) {
1462 error = VOP_GETATTR(nd.ni_vp, &vattr, td->td_ucred);
1465 if (VOP_ISLOCKED(nd.ni_vp) != LK_EXCLUSIVE) {
1466 vn_lock(nd.ni_vp, LK_UPGRADE | LK_RETRY);
1467 if (nd.ni_vp->v_iflag & VI_DOOMED) {
1468 /* Forced unmount. */
1473 nd.ni_vp->v_vflag |= VV_MD;
1474 VOP_UNLOCK(nd.ni_vp, 0);
1476 if (mdr->md_fwsectors != 0)
1477 sc->fwsectors = mdr->md_fwsectors;
1478 if (mdr->md_fwheads != 0)
1479 sc->fwheads = mdr->md_fwheads;
1480 snprintf(sc->ident, sizeof(sc->ident), "MD-DEV%ju-INO%ju",
1481 (uintmax_t)vattr.va_fsid, (uintmax_t)vattr.va_fileid);
1482 sc->flags = mdr->md_options & (MD_ASYNC | MD_CACHE | MD_FORCE |
1484 if (!(flags & FWRITE))
1485 sc->flags |= MD_READONLY;
1486 sc->vnode = nd.ni_vp;
1488 error = mdsetcred(sc, td->td_ucred);
1491 vn_lock(nd.ni_vp, LK_EXCLUSIVE | LK_RETRY);
1492 nd.ni_vp->v_vflag &= ~VV_MD;
1497 VOP_UNLOCK(nd.ni_vp, 0);
1498 (void)vn_close(nd.ni_vp, flags, td->td_ucred, td);
1503 g_md_providergone(struct g_provider *pp)
1505 struct md_s *sc = pp->geom->softc;
1507 mtx_lock(&sc->queue_mtx);
1508 sc->flags |= MD_PROVIDERGONE;
1510 mtx_unlock(&sc->queue_mtx);
1514 mddestroy(struct md_s *sc, struct thread *td)
1519 g_wither_geom(sc->gp, ENXIO);
1520 g_topology_unlock();
1522 mtx_lock(&sc->queue_mtx);
1523 while (!(sc->flags & MD_PROVIDERGONE))
1524 msleep(&sc->flags, &sc->queue_mtx, PRIBIO, "mddestroy", 0);
1525 mtx_unlock(&sc->queue_mtx);
1528 devstat_remove_entry(sc->devstat);
1531 mtx_lock(&sc->queue_mtx);
1532 sc->flags |= MD_SHUTDOWN;
1534 while (!(sc->flags & MD_EXITING))
1535 msleep(sc->procp, &sc->queue_mtx, PRIBIO, "mddestroy", hz / 10);
1536 mtx_unlock(&sc->queue_mtx);
1537 mtx_destroy(&sc->stat_mtx);
1538 mtx_destroy(&sc->queue_mtx);
1539 if (sc->vnode != NULL) {
1540 vn_lock(sc->vnode, LK_EXCLUSIVE | LK_RETRY);
1541 sc->vnode->v_vflag &= ~VV_MD;
1542 VOP_UNLOCK(sc->vnode, 0);
1543 (void)vn_close(sc->vnode, sc->flags & MD_READONLY ?
1544 FREAD : (FREAD|FWRITE), sc->cred, td);
1546 if (sc->cred != NULL)
1548 if (sc->object != NULL)
1549 vm_object_deallocate(sc->object);
1551 destroy_indir(sc, sc->indir);
1553 uma_zdestroy(sc->uma);
1555 LIST_REMOVE(sc, list);
1556 free_unr(md_uh, sc->unit);
1562 mdresize(struct md_s *sc, struct md_req *mdr)
1565 vm_pindex_t oldpages, newpages;
1572 if (mdr->md_mediasize <= 0 ||
1573 (mdr->md_mediasize % PAGE_SIZE) != 0)
1575 oldpages = OFF_TO_IDX(round_page(sc->mediasize));
1576 newpages = OFF_TO_IDX(round_page(mdr->md_mediasize));
1577 if (newpages < oldpages) {
1578 VM_OBJECT_WLOCK(sc->object);
1579 vm_object_page_remove(sc->object, newpages, 0, 0);
1580 swap_pager_freespace(sc->object, newpages,
1581 oldpages - newpages);
1582 swap_release_by_cred(IDX_TO_OFF(oldpages -
1583 newpages), sc->cred);
1584 sc->object->charge = IDX_TO_OFF(newpages);
1585 sc->object->size = newpages;
1586 VM_OBJECT_WUNLOCK(sc->object);
1587 } else if (newpages > oldpages) {
1588 res = swap_reserve_by_cred(IDX_TO_OFF(newpages -
1589 oldpages), sc->cred);
1592 if ((mdr->md_options & MD_RESERVE) ||
1593 (sc->flags & MD_RESERVE)) {
1594 error = swap_pager_reserve(sc->object,
1595 oldpages, newpages - oldpages);
1597 swap_release_by_cred(
1598 IDX_TO_OFF(newpages - oldpages),
1603 VM_OBJECT_WLOCK(sc->object);
1604 sc->object->charge = IDX_TO_OFF(newpages);
1605 sc->object->size = newpages;
1606 VM_OBJECT_WUNLOCK(sc->object);
1610 return (EOPNOTSUPP);
1613 sc->mediasize = mdr->md_mediasize;
1615 g_resize_provider(sc->pp, sc->mediasize);
1616 g_topology_unlock();
1621 mdcreate_swap(struct md_s *sc, struct md_req *mdr, struct thread *td)
1627 * Range check. Disallow negative sizes and sizes not being
1628 * multiple of page size.
1630 if (sc->mediasize <= 0 || (sc->mediasize % PAGE_SIZE) != 0)
1634 * Allocate an OBJT_SWAP object.
1636 * Note the truncation.
1639 if ((mdr->md_options & MD_VERIFY) != 0)
1641 npage = mdr->md_mediasize / PAGE_SIZE;
1642 if (mdr->md_fwsectors != 0)
1643 sc->fwsectors = mdr->md_fwsectors;
1644 if (mdr->md_fwheads != 0)
1645 sc->fwheads = mdr->md_fwheads;
1646 sc->object = vm_pager_allocate(OBJT_SWAP, NULL, PAGE_SIZE * npage,
1647 VM_PROT_DEFAULT, 0, td->td_ucred);
1648 if (sc->object == NULL)
1650 sc->flags = mdr->md_options & (MD_FORCE | MD_RESERVE);
1651 if (mdr->md_options & MD_RESERVE) {
1652 if (swap_pager_reserve(sc->object, 0, npage) < 0) {
1657 error = mdsetcred(sc, td->td_ucred);
1660 vm_object_deallocate(sc->object);
1667 mdcreate_null(struct md_s *sc, struct md_req *mdr, struct thread *td)
1671 * Range check. Disallow negative sizes and sizes not being
1672 * multiple of page size.
1674 if (sc->mediasize <= 0 || (sc->mediasize % PAGE_SIZE) != 0)
1681 kern_mdattach_locked(struct thread *td, struct md_req *mdr)
1687 sx_assert(&md_sx, SA_XLOCKED);
1689 switch (mdr->md_type) {
1699 if (mdr->md_sectorsize == 0)
1700 sectsize = DEV_BSIZE;
1702 sectsize = mdr->md_sectorsize;
1703 if (sectsize > MAXPHYS || mdr->md_mediasize < sectsize)
1705 if (mdr->md_options & MD_AUTOUNIT)
1706 sc = mdnew(-1, &error, mdr->md_type);
1708 if (mdr->md_unit > INT_MAX)
1710 sc = mdnew(mdr->md_unit, &error, mdr->md_type);
1714 if (mdr->md_label != NULL)
1715 error = copyinstr(mdr->md_label, sc->label,
1716 sizeof(sc->label), NULL);
1719 if (mdr->md_options & MD_AUTOUNIT)
1720 mdr->md_unit = sc->unit;
1721 sc->mediasize = mdr->md_mediasize;
1722 sc->sectorsize = sectsize;
1726 sc->start = mdstart_malloc;
1727 error = mdcreate_malloc(sc, mdr);
1731 * We disallow attaching preloaded memory disks via
1732 * ioctl. Preloaded memory disks are automatically
1733 * attached in g_md_init().
1738 sc->start = mdstart_vnode;
1739 error = mdcreate_vnode(sc, mdr, td);
1742 sc->start = mdstart_swap;
1743 error = mdcreate_swap(sc, mdr, td);
1746 sc->start = mdstart_null;
1747 error = mdcreate_null(sc, mdr, td);
1756 /* Prune off any residual fractional sector */
1757 i = sc->mediasize % sc->sectorsize;
1765 kern_mdattach(struct thread *td, struct md_req *mdr)
1770 error = kern_mdattach_locked(td, mdr);
1776 kern_mddetach_locked(struct thread *td, struct md_req *mdr)
1780 sx_assert(&md_sx, SA_XLOCKED);
1782 if (mdr->md_mediasize != 0 ||
1783 (mdr->md_options & ~MD_FORCE) != 0)
1786 sc = mdfind(mdr->md_unit);
1789 if (sc->opencount != 0 && !(sc->flags & MD_FORCE) &&
1790 !(mdr->md_options & MD_FORCE))
1792 return (mddestroy(sc, td));
1796 kern_mddetach(struct thread *td, struct md_req *mdr)
1801 error = kern_mddetach_locked(td, mdr);
1807 kern_mdresize_locked(struct md_req *mdr)
1811 sx_assert(&md_sx, SA_XLOCKED);
1813 if ((mdr->md_options & ~(MD_FORCE | MD_RESERVE)) != 0)
1816 sc = mdfind(mdr->md_unit);
1819 if (mdr->md_mediasize < sc->sectorsize)
1821 if (mdr->md_mediasize < sc->mediasize &&
1822 !(sc->flags & MD_FORCE) &&
1823 !(mdr->md_options & MD_FORCE))
1825 return (mdresize(sc, mdr));
1829 kern_mdresize(struct md_req *mdr)
1834 error = kern_mdresize_locked(mdr);
1840 kern_mdquery_locked(struct md_req *mdr)
1845 sx_assert(&md_sx, SA_XLOCKED);
1847 sc = mdfind(mdr->md_unit);
1850 mdr->md_type = sc->type;
1851 mdr->md_options = sc->flags;
1852 mdr->md_mediasize = sc->mediasize;
1853 mdr->md_sectorsize = sc->sectorsize;
1855 if (mdr->md_label != NULL) {
1856 error = copyout(sc->label, mdr->md_label,
1857 strlen(sc->label) + 1);
1861 if (sc->type == MD_VNODE ||
1862 (sc->type == MD_PRELOAD && mdr->md_file != NULL))
1863 error = copyout(sc->file, mdr->md_file,
1864 strlen(sc->file) + 1);
1869 kern_mdquery(struct md_req *mdr)
1874 error = kern_mdquery_locked(mdr);
1880 kern_mdlist_locked(struct md_req *mdr)
1885 sx_assert(&md_sx, SA_XLOCKED);
1888 * Write the number of md devices to mdr->md_units[0].
1889 * Write the unit number of the first (mdr->md_units_nitems - 2)
1890 * units to mdr->md_units[1::(mdr->md_units - 2)] and terminate the
1893 * XXX: There is currently no mechanism to retrieve unit
1894 * numbers for more than (MDNPAD - 2) units.
1896 * XXX: Due to the use of LIST_INSERT_HEAD in mdnew(), the
1897 * list of visible unit numbers not stable.
1900 LIST_FOREACH(sc, &md_softc_list, list) {
1901 if (i < mdr->md_units_nitems - 1)
1902 mdr->md_units[i] = sc->unit;
1905 mdr->md_units[MIN(i, mdr->md_units_nitems - 1)] = -1;
1906 mdr->md_units[0] = i - 1;
1911 kern_mdlist(struct md_req *mdr)
1916 error = kern_mdlist_locked(mdr);
1921 /* Copy members that are not userspace pointers. */
1922 #define MD_IOCTL2REQ(mdio, mdr) do { \
1923 (mdr)->md_unit = (mdio)->md_unit; \
1924 (mdr)->md_type = (mdio)->md_type; \
1925 (mdr)->md_mediasize = (mdio)->md_mediasize; \
1926 (mdr)->md_sectorsize = (mdio)->md_sectorsize; \
1927 (mdr)->md_options = (mdio)->md_options; \
1928 (mdr)->md_fwheads = (mdio)->md_fwheads; \
1929 (mdr)->md_fwsectors = (mdio)->md_fwsectors; \
1930 (mdr)->md_units = &(mdio)->md_pad[0]; \
1931 (mdr)->md_units_nitems = nitems((mdio)->md_pad); \
1934 /* Copy members that might have been updated */
1935 #define MD_REQ2IOCTL(mdr, mdio) do { \
1936 (mdio)->md_unit = (mdr)->md_unit; \
1937 (mdio)->md_type = (mdr)->md_type; \
1938 (mdio)->md_mediasize = (mdr)->md_mediasize; \
1939 (mdio)->md_sectorsize = (mdr)->md_sectorsize; \
1940 (mdio)->md_options = (mdr)->md_options; \
1941 (mdio)->md_fwheads = (mdr)->md_fwheads; \
1942 (mdio)->md_fwsectors = (mdr)->md_fwsectors; \
1946 mdctlioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags,
1953 printf("mdctlioctl(%s %lx %p %x %p)\n",
1954 devtoname(dev), cmd, addr, flags, td);
1956 bzero(&mdr, sizeof(mdr));
1963 struct md_ioctl *mdio = (struct md_ioctl *)addr;
1964 if (mdio->md_version != MDIOVERSION)
1966 MD_IOCTL2REQ(mdio, &mdr);
1967 mdr.md_file = mdio->md_file;
1968 mdr.md_file_seg = UIO_USERSPACE;
1969 /* If the file is adjacent to the md_ioctl it's in kernel. */
1970 if ((void *)mdio->md_file == (void *)(mdio + 1))
1971 mdr.md_file_seg = UIO_SYSSPACE;
1972 mdr.md_label = mdio->md_label;
1975 #ifdef COMPAT_FREEBSD32
1976 case MDIOCATTACH_32:
1977 case MDIOCDETACH_32:
1978 case MDIOCRESIZE_32:
1980 case MDIOCLIST_32: {
1981 struct md_ioctl32 *mdio = (struct md_ioctl32 *)addr;
1982 if (mdio->md_version != MDIOVERSION)
1984 MD_IOCTL2REQ(mdio, &mdr);
1985 mdr.md_file = (void *)(uintptr_t)mdio->md_file;
1986 mdr.md_file_seg = UIO_USERSPACE;
1987 mdr.md_label = (void *)(uintptr_t)mdio->md_label;
1992 /* Fall through to handler switch. */
1999 #ifdef COMPAT_FREEBSD32
2000 case MDIOCATTACH_32:
2002 error = kern_mdattach(td, &mdr);
2005 #ifdef COMPAT_FREEBSD32
2006 case MDIOCDETACH_32:
2008 error = kern_mddetach(td, &mdr);
2011 #ifdef COMPAT_FREEBSD32
2012 case MDIOCRESIZE_32:
2014 error = kern_mdresize(&mdr);
2017 #ifdef COMPAT_FREEBSD32
2020 error = kern_mdquery(&mdr);
2023 #ifdef COMPAT_FREEBSD32
2026 error = kern_mdlist(&mdr);
2035 struct md_ioctl *mdio = (struct md_ioctl *)addr;
2036 MD_REQ2IOCTL(&mdr, mdio);
2039 #ifdef COMPAT_FREEBSD32
2040 case MDIOCATTACH_32:
2041 case MDIOCQUERY_32: {
2042 struct md_ioctl32 *mdio = (struct md_ioctl32 *)addr;
2043 MD_REQ2IOCTL(&mdr, mdio);
2048 /* Other commands to not alter mdr. */
2056 md_preloaded(u_char *image, size_t length, const char *name)
2061 sc = mdnew(-1, &error, MD_PRELOAD);
2064 sc->mediasize = length;
2065 sc->sectorsize = DEV_BSIZE;
2067 sc->pl_len = length;
2068 sc->start = mdstart_preload;
2070 strlcpy(sc->file, name, sizeof(sc->file));
2072 if (sc->unit == 0) {
2074 rootdevnames[0] = MD_ROOT_FSTYPE ":/dev/md0";
2076 #ifdef MD_ROOT_READONLY
2077 sc->flags |= MD_READONLY;
2083 printf("%s%d: Preloaded image <%s> %zd bytes at %p\n",
2084 MD_NAME, sc->unit, name, length, image);
2086 printf("%s%d: Embedded image %zd bytes at %p\n",
2087 MD_NAME, sc->unit, length, image);
2092 g_md_init(struct g_class *mp __unused)
2095 u_char *ptr, *name, *type;
2099 /* figure out log2(NINDIR) */
2100 for (i = NINDIR, nshift = -1; i; nshift++)
2104 sx_init(&md_sx, "MD config lock");
2105 g_topology_unlock();
2106 md_uh = new_unrhdr(0, INT_MAX, NULL);
2108 if (mfs_root_size != 0) {
2111 md_preloaded(mfs_root, mfs_root_size, NULL);
2113 md_preloaded(__DEVOLATILE(u_char *, &mfs_root), mfs_root_size,
2119 /* XXX: are preload_* static or do they need Giant ? */
2120 while ((mod = preload_search_next_name(mod)) != NULL) {
2121 name = (char *)preload_search_info(mod, MODINFO_NAME);
2124 type = (char *)preload_search_info(mod, MODINFO_TYPE);
2127 if (strcmp(type, "md_image") && strcmp(type, "mfs_root"))
2129 ptr = preload_fetch_addr(mod);
2130 len = preload_fetch_size(mod);
2131 if (ptr != NULL && len != 0) {
2133 md_preloaded(ptr, len, name);
2137 md_pbuf_zone = pbuf_zsecond_create("mdpbuf", nswbuf / 10);
2138 status_dev = make_dev(&mdctl_cdevsw, INT_MAX, UID_ROOT, GID_WHEEL,
2144 g_md_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp,
2145 struct g_consumer *cp __unused, struct g_provider *pp)
2176 if (indent == NULL) {
2177 sbuf_printf(sb, " u %d", mp->unit);
2178 sbuf_printf(sb, " s %ju", (uintmax_t) mp->sectorsize);
2179 sbuf_printf(sb, " f %ju", (uintmax_t) mp->fwheads);
2180 sbuf_printf(sb, " fs %ju", (uintmax_t) mp->fwsectors);
2181 sbuf_printf(sb, " l %ju", (uintmax_t) mp->mediasize);
2182 sbuf_printf(sb, " t %s", type);
2183 if ((mp->type == MD_VNODE && mp->vnode != NULL) ||
2184 (mp->type == MD_PRELOAD && mp->file[0] != '\0'))
2185 sbuf_printf(sb, " file %s", mp->file);
2186 sbuf_printf(sb, " label %s", mp->label);
2188 sbuf_printf(sb, "%s<unit>%d</unit>\n", indent,
2190 sbuf_printf(sb, "%s<sectorsize>%ju</sectorsize>\n",
2191 indent, (uintmax_t) mp->sectorsize);
2192 sbuf_printf(sb, "%s<fwheads>%ju</fwheads>\n",
2193 indent, (uintmax_t) mp->fwheads);
2194 sbuf_printf(sb, "%s<fwsectors>%ju</fwsectors>\n",
2195 indent, (uintmax_t) mp->fwsectors);
2196 if (mp->ident[0] != '\0') {
2197 sbuf_printf(sb, "%s<ident>", indent);
2198 g_conf_printf_escaped(sb, "%s", mp->ident);
2199 sbuf_printf(sb, "</ident>\n");
2201 sbuf_printf(sb, "%s<length>%ju</length>\n",
2202 indent, (uintmax_t) mp->mediasize);
2203 sbuf_printf(sb, "%s<compression>%s</compression>\n", indent,
2204 (mp->flags & MD_COMPRESS) == 0 ? "off": "on");
2205 sbuf_printf(sb, "%s<access>%s</access>\n", indent,
2206 (mp->flags & MD_READONLY) == 0 ? "read-write":
2208 sbuf_printf(sb, "%s<type>%s</type>\n", indent,
2210 if ((mp->type == MD_VNODE && mp->vnode != NULL) ||
2211 (mp->type == MD_PRELOAD && mp->file[0] != '\0')) {
2212 sbuf_printf(sb, "%s<file>", indent);
2213 g_conf_printf_escaped(sb, "%s", mp->file);
2214 sbuf_printf(sb, "</file>\n");
2216 if (mp->type == MD_VNODE)
2217 sbuf_printf(sb, "%s<cache>%s</cache>\n", indent,
2218 (mp->flags & MD_CACHE) == 0 ? "off": "on");
2219 sbuf_printf(sb, "%s<label>", indent);
2220 g_conf_printf_escaped(sb, "%s", mp->label);
2221 sbuf_printf(sb, "</label>\n");
2227 g_md_fini(struct g_class *mp __unused)
2231 if (status_dev != NULL)
2232 destroy_dev(status_dev);
2233 uma_zdestroy(md_pbuf_zone);
2234 delete_unrhdr(md_uh);