2 * ----------------------------------------------------------------------------
3 * "THE BEER-WARE LICENSE" (Revision 42):
4 * <phk@FreeBSD.ORG> wrote this file. As long as you retain this notice you
5 * can do whatever you want with this stuff. If we meet some day, and you think
6 * this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp
7 * ----------------------------------------------------------------------------
14 * The following functions are based in the vn(4) driver: mdstart_swap(),
15 * mdstart_vnode(), mdcreate_swap(), mdcreate_vnode() and mddestroy(),
16 * and as such under the following copyright:
18 * Copyright (c) 1988 University of Utah.
19 * Copyright (c) 1990, 1993
20 * The Regents of the University of California. All rights reserved.
22 * This code is derived from software contributed to Berkeley by
23 * the Systems Programming Group of the University of Utah Computer
26 * Redistribution and use in source and binary forms, with or without
27 * modification, are permitted provided that the following conditions
29 * 1. Redistributions of source code must retain the above copyright
30 * notice, this list of conditions and the following disclaimer.
31 * 2. Redistributions in binary form must reproduce the above copyright
32 * notice, this list of conditions and the following disclaimer in the
33 * documentation and/or other materials provided with the distribution.
34 * 4. Neither the name of the University nor the names of its contributors
35 * may be used to endorse or promote products derived from this software
36 * without specific prior written permission.
38 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
39 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
40 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
41 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
42 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
43 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
44 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
45 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
46 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
47 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
50 * from: Utah Hdr: vn.c 1.13 94/04/02
52 * from: @(#)vn.c 8.6 (Berkeley) 4/1/94
53 * From: src/sys/dev/vn/vn.c,v 1.122 2000/12/16 16:06:03
59 #include <sys/param.h>
60 #include <sys/systm.h>
63 #include <sys/devicestat.h>
64 #include <sys/fcntl.h>
65 #include <sys/kernel.h>
66 #include <sys/kthread.h>
67 #include <sys/limits.h>
68 #include <sys/linker.h>
70 #include <sys/malloc.h>
71 #include <sys/mdioctl.h>
72 #include <sys/mount.h>
73 #include <sys/mutex.h>
75 #include <sys/namei.h>
77 #include <sys/queue.h>
78 #include <sys/sched.h>
79 #include <sys/sf_buf.h>
80 #include <sys/sysctl.h>
81 #include <sys/vnode.h>
83 #include <geom/geom.h>
86 #include <vm/vm_object.h>
87 #include <vm/vm_page.h>
88 #include <vm/vm_pager.h>
89 #include <vm/swap_pager.h>
94 #define MD_SHUTDOWN 0x10000 /* Tell worker thread to terminate. */
95 #define MD_EXITING 0x20000 /* Worker thread is exiting. */
98 #define MD_NSECT (10000 * 2)
101 static MALLOC_DEFINE(M_MD, "md_disk", "Memory Disk");
102 static MALLOC_DEFINE(M_MDSECT, "md_sectors", "Memory Disk Sectors");
105 SYSCTL_INT(_debug, OID_AUTO, mddebug, CTLFLAG_RW, &md_debug, 0, "");
107 #if defined(MD_ROOT) && defined(MD_ROOT_SIZE)
109 * Preloaded image gets put here.
110 * Applications that patch the object with the image can determine
111 * the size looking at the start and end markers (strings),
112 * so we want them contiguous.
115 u_char start[MD_ROOT_SIZE*1024];
118 .start = "MFS Filesystem goes here",
119 .end = "MFS Filesystem had better STOP here",
123 static g_init_t g_md_init;
124 static g_fini_t g_md_fini;
125 static g_start_t g_md_start;
126 static g_access_t g_md_access;
127 static void g_md_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp,
128 struct g_consumer *cp __unused, struct g_provider *pp);
131 static struct cdev *status_dev = 0;
132 static struct sx md_sx;
134 static d_ioctl_t mdctlioctl;
136 static struct cdevsw mdctl_cdevsw = {
137 .d_version = D_VERSION,
138 .d_ioctl = mdctlioctl,
142 struct g_class g_md_class = {
144 .version = G_VERSION,
148 .access = g_md_access,
149 .dumpconf = g_md_dumpconf,
152 DECLARE_GEOM_CLASS(g_md_class, g_md);
155 static LIST_HEAD(, md_s) md_softc_list = LIST_HEAD_INITIALIZER(&md_softc_list);
157 #define NINDIR (PAGE_SIZE / sizeof(uintptr_t))
158 #define NMASK (NINDIR-1)
170 LIST_ENTRY(md_s) list;
171 struct bio_queue_head bio_queue;
172 struct mtx queue_mtx;
184 struct g_provider *pp;
185 int (*start)(struct md_s *sc, struct bio *bp);
186 struct devstat *devstat;
188 /* MD_MALLOC related fields */
192 /* MD_PRELOAD related fields */
196 /* MD_VNODE related fields */
201 /* MD_SWAP related fields */
205 static struct indir *
206 new_indir(u_int shift)
210 ip = malloc(sizeof *ip, M_MD, M_NOWAIT | M_ZERO);
213 ip->array = malloc(sizeof(uintptr_t) * NINDIR,
214 M_MDSECT, M_NOWAIT | M_ZERO);
215 if (ip->array == NULL) {
225 del_indir(struct indir *ip)
228 free(ip->array, M_MDSECT);
233 destroy_indir(struct md_s *sc, struct indir *ip)
237 for (i = 0; i < NINDIR; i++) {
241 destroy_indir(sc, (struct indir*)(ip->array[i]));
242 else if (ip->array[i] > 255)
243 uma_zfree(sc->uma, (void *)(ip->array[i]));
249 * This function does the math and allocates the top level "indir" structure
250 * for a device of "size" sectors.
253 static struct indir *
254 dimension(off_t size)
262 while (rcnt > NINDIR) {
266 /* figure out log2(NINDIR) */
267 for (i = NINDIR, nshift = -1; i; nshift++)
271 * XXX: the top layer is probably not fully populated, so we allocate
272 * too much space for ip->array in here.
274 ip = malloc(sizeof *ip, M_MD, M_WAITOK | M_ZERO);
275 ip->array = malloc(sizeof(uintptr_t) * NINDIR,
276 M_MDSECT, M_WAITOK | M_ZERO);
278 ip->shift = layer * nshift;
283 * Read a given sector
287 s_read(struct indir *ip, off_t offset)
294 printf("s_read(%jd)\n", (intmax_t)offset);
296 for (cip = ip; cip != NULL;) {
298 idx = (offset >> cip->shift) & NMASK;
299 up = cip->array[idx];
300 cip = (struct indir *)up;
303 idx = offset & NMASK;
304 return (cip->array[idx]);
310 * Write a given sector, prune the tree if the value is 0
314 s_write(struct indir *ip, off_t offset, uintptr_t ptr)
316 struct indir *cip, *lip[10];
321 printf("s_write(%jd, %p)\n", (intmax_t)offset, (void *)ptr);
328 idx = (offset >> cip->shift) & NMASK;
329 up = cip->array[idx];
331 cip = (struct indir *)up;
334 /* Allocate branch */
336 (uintptr_t)new_indir(cip->shift - nshift);
337 if (cip->array[idx] == 0)
340 up = cip->array[idx];
341 cip = (struct indir *)up;
345 idx = offset & NMASK;
346 up = cip->array[idx];
349 cip->array[idx] = ptr;
354 if (cip->used != 0 || li == 1)
357 while (cip->used == 0 && cip != ip) {
359 idx = (offset >> lip[li]->shift) & NMASK;
360 up = lip[li]->array[idx];
361 KASSERT(up == (uintptr_t)cip, ("md screwed up"));
363 lip[li]->array[idx] = 0;
372 g_md_access(struct g_provider *pp, int r, int w, int e)
376 sc = pp->geom->softc;
378 if (r <= 0 && w <= 0 && e <= 0)
385 if ((sc->flags & MD_READONLY) != 0 && w > 0)
387 if ((pp->acr + pp->acw + pp->ace) == 0 && (r + w + e) > 0) {
389 } else if ((pp->acr + pp->acw + pp->ace) > 0 && (r + w + e) == 0) {
396 g_md_start(struct bio *bp)
400 sc = bp->bio_to->geom->softc;
401 if ((bp->bio_cmd == BIO_READ) || (bp->bio_cmd == BIO_WRITE))
402 devstat_start_transaction_bio(sc->devstat, bp);
403 mtx_lock(&sc->queue_mtx);
404 bioq_disksort(&sc->bio_queue, bp);
405 mtx_unlock(&sc->queue_mtx);
410 mdstart_malloc(struct md_s *sc, struct bio *bp)
414 off_t secno, nsec, uc;
417 switch (bp->bio_cmd) {
426 nsec = bp->bio_length / sc->sectorsize;
427 secno = bp->bio_offset / sc->sectorsize;
431 osp = s_read(sc->indir, secno);
432 if (bp->bio_cmd == BIO_DELETE) {
434 error = s_write(sc->indir, secno, 0);
435 } else if (bp->bio_cmd == BIO_READ) {
437 bzero(dst, sc->sectorsize);
439 memset(dst, osp, sc->sectorsize);
441 bcopy((void *)osp, dst, sc->sectorsize);
442 cpu_flush_dcache(dst, sc->sectorsize);
445 } else if (bp->bio_cmd == BIO_WRITE) {
446 if (sc->flags & MD_COMPRESS) {
448 for (i = 1; i < sc->sectorsize; i++)
455 if (i == sc->sectorsize) {
457 error = s_write(sc->indir, secno, uc);
460 sp = (uintptr_t)uma_zalloc(sc->uma,
466 bcopy(dst, (void *)sp, sc->sectorsize);
467 error = s_write(sc->indir, secno, sp);
469 bcopy(dst, (void *)osp, sc->sectorsize);
477 uma_zfree(sc->uma, (void*)osp);
481 dst += sc->sectorsize;
488 mdstart_preload(struct md_s *sc, struct bio *bp)
491 switch (bp->bio_cmd) {
493 bcopy(sc->pl_ptr + bp->bio_offset, bp->bio_data,
495 cpu_flush_dcache(bp->bio_data, bp->bio_length);
498 bcopy(bp->bio_data, sc->pl_ptr + bp->bio_offset,
507 mdstart_vnode(struct md_s *sc, struct bio *bp)
509 int error, vfslocked;
516 switch (bp->bio_cmd) {
531 * If an error occurs, we set BIO_ERROR but we do not set
532 * B_INVAL because (for a write anyway), the buffer is
536 if (bp->bio_cmd == BIO_FLUSH) {
537 vfslocked = VFS_LOCK_GIANT(vp->v_mount);
538 (void) vn_start_write(vp, &mp, V_WAIT);
539 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
540 error = VOP_FSYNC(vp, MNT_WAIT, td);
542 vn_finished_write(mp);
543 VFS_UNLOCK_GIANT(vfslocked);
547 bzero(&auio, sizeof(auio));
549 aiov.iov_base = bp->bio_data;
550 aiov.iov_len = bp->bio_length;
551 auio.uio_iov = &aiov;
553 auio.uio_offset = (vm_ooffset_t)bp->bio_offset;
554 auio.uio_segflg = UIO_SYSSPACE;
555 if (bp->bio_cmd == BIO_READ)
556 auio.uio_rw = UIO_READ;
557 else if (bp->bio_cmd == BIO_WRITE)
558 auio.uio_rw = UIO_WRITE;
560 panic("wrong BIO_OP in mdstart_vnode");
561 auio.uio_resid = bp->bio_length;
564 * When reading set IO_DIRECT to try to avoid double-caching
565 * the data. When writing IO_DIRECT is not optimal.
567 vfslocked = VFS_LOCK_GIANT(vp->v_mount);
568 if (bp->bio_cmd == BIO_READ) {
569 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
570 error = VOP_READ(vp, &auio, IO_DIRECT, sc->cred);
573 (void) vn_start_write(vp, &mp, V_WAIT);
574 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
575 error = VOP_WRITE(vp, &auio, sc->flags & MD_ASYNC ? 0 : IO_SYNC,
578 vn_finished_write(mp);
580 VFS_UNLOCK_GIANT(vfslocked);
581 bp->bio_resid = auio.uio_resid;
586 mdstart_swap(struct md_s *sc, struct bio *bp)
589 int rv, offs, len, lastend;
590 vm_pindex_t i, lastp;
594 switch (bp->bio_cmd) {
606 * offs is the offset at which to start operating on the
607 * next (ie, first) page. lastp is the last page on
608 * which we're going to operate. lastend is the ending
609 * position within that last page (ie, PAGE_SIZE if
610 * we're operating on complete aligned pages).
612 offs = bp->bio_offset % PAGE_SIZE;
613 lastp = (bp->bio_offset + bp->bio_length - 1) / PAGE_SIZE;
614 lastend = (bp->bio_offset + bp->bio_length - 1) % PAGE_SIZE + 1;
617 VM_OBJECT_LOCK(sc->object);
618 vm_object_pip_add(sc->object, 1);
619 for (i = bp->bio_offset / PAGE_SIZE; i <= lastp; i++) {
620 len = ((i == lastp) ? lastend : PAGE_SIZE) - offs;
622 m = vm_page_grab(sc->object, i,
623 VM_ALLOC_NORMAL|VM_ALLOC_RETRY);
624 VM_OBJECT_UNLOCK(sc->object);
626 sf = sf_buf_alloc(m, SFB_CPUPRIVATE);
627 VM_OBJECT_LOCK(sc->object);
628 if (bp->bio_cmd == BIO_READ) {
629 if (m->valid != VM_PAGE_BITS_ALL)
630 rv = vm_pager_get_pages(sc->object, &m, 1, 0);
631 if (rv == VM_PAGER_ERROR) {
637 bcopy((void *)(sf_buf_kva(sf) + offs), p, len);
638 cpu_flush_dcache(p, len);
639 } else if (bp->bio_cmd == BIO_WRITE) {
640 if (len != PAGE_SIZE && m->valid != VM_PAGE_BITS_ALL)
641 rv = vm_pager_get_pages(sc->object, &m, 1, 0);
642 if (rv == VM_PAGER_ERROR) {
648 bcopy(p, (void *)(sf_buf_kva(sf) + offs), len);
649 m->valid = VM_PAGE_BITS_ALL;
651 } else if (bp->bio_cmd == BIO_DELETE) {
652 if (len != PAGE_SIZE && m->valid != VM_PAGE_BITS_ALL)
653 rv = vm_pager_get_pages(sc->object, &m, 1, 0);
654 if (rv == VM_PAGER_ERROR) {
660 bzero((void *)(sf_buf_kva(sf) + offs), len);
662 m->valid = VM_PAGE_BITS_ALL;
668 vm_page_lock_queues();
670 if (bp->bio_cmd == BIO_WRITE)
672 vm_page_unlock_queues();
674 /* Actions on further pages start at offset 0 */
675 p += PAGE_SIZE - offs;
678 if (bootverbose || bp->bio_offset / PAGE_SIZE < 17)
679 printf("wire_count %d busy %d flags %x hold_count %d act_count %d queue %d valid %d dirty %d @ %d\n",
680 m->wire_count, m->busy,
681 m->flags, m->hold_count, m->act_count, m->queue, m->valid, m->dirty, i);
684 vm_object_pip_subtract(sc->object, 1);
685 vm_object_set_writeable_dirty(sc->object);
686 VM_OBJECT_UNLOCK(sc->object);
687 return (rv != VM_PAGER_ERROR ? 0 : ENOSPC);
691 md_kthread(void *arg)
698 thread_lock(curthread);
699 sched_prio(curthread, PRIBIO);
700 thread_unlock(curthread);
701 if (sc->type == MD_VNODE)
702 curthread->td_pflags |= TDP_NORUNNINGBUF;
705 mtx_lock(&sc->queue_mtx);
706 if (sc->flags & MD_SHUTDOWN) {
707 sc->flags |= MD_EXITING;
708 mtx_unlock(&sc->queue_mtx);
711 bp = bioq_takefirst(&sc->bio_queue);
713 msleep(sc, &sc->queue_mtx, PRIBIO | PDROP, "mdwait", 0);
716 mtx_unlock(&sc->queue_mtx);
717 if (bp->bio_cmd == BIO_GETATTR) {
718 if (sc->fwsectors && sc->fwheads &&
719 (g_handleattr_int(bp, "GEOM::fwsectors",
721 g_handleattr_int(bp, "GEOM::fwheads",
727 error = sc->start(sc, bp);
731 bp->bio_completed = bp->bio_length;
732 g_io_deliver(bp, error);
733 if ((bp->bio_cmd == BIO_READ) || (bp->bio_cmd == BIO_WRITE))
734 devstat_end_transaction_bio(sc->devstat, bp);
744 LIST_FOREACH(sc, &md_softc_list, list) {
745 if (sc->unit == unit)
752 mdnew(int unit, int *errp, enum md_types type)
754 struct md_s *sc, *sc2;
758 LIST_FOREACH(sc2, &md_softc_list, list) {
759 if (unit == sc2->unit) {
763 if (unit == -1 && sc2->unit > max)
768 sc = (struct md_s *)malloc(sizeof *sc, M_MD, M_WAITOK | M_ZERO);
770 bioq_init(&sc->bio_queue);
771 mtx_init(&sc->queue_mtx, "md bio queue", NULL, MTX_DEF);
773 sprintf(sc->name, "md%d", unit);
774 LIST_INSERT_HEAD(&md_softc_list, sc, list);
775 error = kproc_create(md_kthread, sc, &sc->procp, 0, 0,"%s", sc->name);
778 LIST_REMOVE(sc, list);
779 mtx_destroy(&sc->queue_mtx);
786 mdinit(struct md_s *sc)
790 struct g_provider *pp;
793 gp = g_new_geomf(&g_md_class, "md%d", sc->unit);
795 pp = g_new_providerf(gp, "md%d", sc->unit);
796 pp->mediasize = sc->mediasize;
797 pp->sectorsize = sc->sectorsize;
800 g_error_provider(pp, 0);
802 sc->devstat = devstat_new_entry("md", sc->unit, sc->sectorsize,
803 DEVSTAT_ALL_SUPPORTED, DEVSTAT_TYPE_DIRECT, DEVSTAT_PRIORITY_MAX);
807 * XXX: we should check that the range they feed us is mapped.
808 * XXX: we should implement read-only.
812 mdcreate_preload(struct md_s *sc, struct md_ioctl *mdio)
815 if (mdio->md_options & ~(MD_AUTOUNIT | MD_FORCE))
817 sc->flags = mdio->md_options & MD_FORCE;
818 /* Cast to pointer size, then to pointer to avoid warning */
819 sc->pl_ptr = (u_char *)(uintptr_t)mdio->md_base;
820 sc->pl_len = (size_t)sc->mediasize;
826 mdcreate_malloc(struct md_s *sc, struct md_ioctl *mdio)
833 if (mdio->md_options & ~(MD_AUTOUNIT | MD_COMPRESS | MD_RESERVE))
835 if (mdio->md_sectorsize != 0 && !powerof2(mdio->md_sectorsize))
837 /* Compression doesn't make sense if we have reserved space */
838 if (mdio->md_options & MD_RESERVE)
839 mdio->md_options &= ~MD_COMPRESS;
840 if (mdio->md_fwsectors != 0)
841 sc->fwsectors = mdio->md_fwsectors;
842 if (mdio->md_fwheads != 0)
843 sc->fwheads = mdio->md_fwheads;
844 sc->flags = mdio->md_options & (MD_COMPRESS | MD_FORCE);
845 sc->indir = dimension(sc->mediasize / sc->sectorsize);
846 sc->uma = uma_zcreate(sc->name, sc->sectorsize, NULL, NULL, NULL, NULL,
848 if (mdio->md_options & MD_RESERVE) {
851 nsectors = sc->mediasize / sc->sectorsize;
852 for (u = 0; u < nsectors; u++) {
853 sp = (uintptr_t)uma_zalloc(sc->uma, M_NOWAIT | M_ZERO);
855 error = s_write(sc->indir, u, sp);
867 mdsetcred(struct md_s *sc, struct ucred *cred)
873 * Set credits in our softc
878 sc->cred = crhold(cred);
881 * Horrible kludge to establish credentials for NFS XXX.
888 tmpbuf = malloc(sc->sectorsize, M_TEMP, M_WAITOK);
889 bzero(&auio, sizeof(auio));
891 aiov.iov_base = tmpbuf;
892 aiov.iov_len = sc->sectorsize;
893 auio.uio_iov = &aiov;
896 auio.uio_rw = UIO_READ;
897 auio.uio_segflg = UIO_SYSSPACE;
898 auio.uio_resid = aiov.iov_len;
899 vn_lock(sc->vnode, LK_EXCLUSIVE | LK_RETRY);
900 error = VOP_READ(sc->vnode, &auio, 0, sc->cred);
901 VOP_UNLOCK(sc->vnode, 0);
902 free(tmpbuf, M_TEMP);
908 mdcreate_vnode(struct md_s *sc, struct md_ioctl *mdio, struct thread *td)
912 int error, flags, vfslocked;
914 error = copyinstr(mdio->md_file, sc->file, sizeof(sc->file), NULL);
917 flags = FREAD|FWRITE;
919 * If the user specified that this is a read only device, unset the
920 * FWRITE mask before trying to open the backing store.
922 if ((mdio->md_options & MD_READONLY) != 0)
924 NDINIT(&nd, LOOKUP, FOLLOW | MPSAFE, UIO_SYSSPACE, sc->file, td);
925 error = vn_open(&nd, &flags, 0, NULL);
928 vfslocked = NDHASGIANT(&nd);
929 NDFREE(&nd, NDF_ONLY_PNBUF);
930 if (nd.ni_vp->v_type != VREG) {
934 error = VOP_GETATTR(nd.ni_vp, &vattr, td->td_ucred);
937 if (VOP_ISLOCKED(nd.ni_vp) != LK_EXCLUSIVE) {
938 vn_lock(nd.ni_vp, LK_UPGRADE | LK_RETRY);
939 if (nd.ni_vp->v_iflag & VI_DOOMED) {
940 /* Forced unmount. */
945 nd.ni_vp->v_vflag |= VV_MD;
946 VOP_UNLOCK(nd.ni_vp, 0);
948 if (mdio->md_fwsectors != 0)
949 sc->fwsectors = mdio->md_fwsectors;
950 if (mdio->md_fwheads != 0)
951 sc->fwheads = mdio->md_fwheads;
952 sc->flags = mdio->md_options & (MD_FORCE | MD_ASYNC);
953 if (!(flags & FWRITE))
954 sc->flags |= MD_READONLY;
955 sc->vnode = nd.ni_vp;
957 error = mdsetcred(sc, td->td_ucred);
960 vn_lock(nd.ni_vp, LK_EXCLUSIVE | LK_RETRY);
961 nd.ni_vp->v_vflag &= ~VV_MD;
964 VFS_UNLOCK_GIANT(vfslocked);
967 VOP_UNLOCK(nd.ni_vp, 0);
968 (void)vn_close(nd.ni_vp, flags, td->td_ucred, td);
969 VFS_UNLOCK_GIANT(vfslocked);
974 mddestroy(struct md_s *sc, struct thread *td)
979 sc->gp->softc = NULL;
981 g_wither_geom(sc->gp, ENXIO);
987 devstat_remove_entry(sc->devstat);
990 mtx_lock(&sc->queue_mtx);
991 sc->flags |= MD_SHUTDOWN;
993 while (!(sc->flags & MD_EXITING))
994 msleep(sc->procp, &sc->queue_mtx, PRIBIO, "mddestroy", hz / 10);
995 mtx_unlock(&sc->queue_mtx);
996 mtx_destroy(&sc->queue_mtx);
997 if (sc->vnode != NULL) {
998 vfslocked = VFS_LOCK_GIANT(sc->vnode->v_mount);
999 vn_lock(sc->vnode, LK_EXCLUSIVE | LK_RETRY);
1000 sc->vnode->v_vflag &= ~VV_MD;
1001 VOP_UNLOCK(sc->vnode, 0);
1002 (void)vn_close(sc->vnode, sc->flags & MD_READONLY ?
1003 FREAD : (FREAD|FWRITE), sc->cred, td);
1004 VFS_UNLOCK_GIANT(vfslocked);
1006 if (sc->cred != NULL)
1008 if (sc->object != NULL)
1009 vm_object_deallocate(sc->object);
1011 destroy_indir(sc, sc->indir);
1013 uma_zdestroy(sc->uma);
1015 LIST_REMOVE(sc, list);
1021 mdcreate_swap(struct md_s *sc, struct md_ioctl *mdio, struct thread *td)
1027 * Range check. Disallow negative sizes or any size less then the
1028 * size of a page. Then round to a page.
1030 if (sc->mediasize == 0 || (sc->mediasize % PAGE_SIZE) != 0)
1034 * Allocate an OBJT_SWAP object.
1036 * Note the truncation.
1039 npage = mdio->md_mediasize / PAGE_SIZE;
1040 if (mdio->md_fwsectors != 0)
1041 sc->fwsectors = mdio->md_fwsectors;
1042 if (mdio->md_fwheads != 0)
1043 sc->fwheads = mdio->md_fwheads;
1044 sc->object = vm_pager_allocate(OBJT_SWAP, NULL, PAGE_SIZE * npage,
1045 VM_PROT_DEFAULT, 0, td->td_ucred);
1046 if (sc->object == NULL)
1048 sc->flags = mdio->md_options & MD_FORCE;
1049 if (mdio->md_options & MD_RESERVE) {
1050 if (swap_pager_reserve(sc->object, 0, npage) < 0) {
1055 error = mdsetcred(sc, td->td_ucred);
1058 vm_object_deallocate(sc->object);
1066 xmdctlioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td)
1068 struct md_ioctl *mdio;
1073 printf("mdctlioctl(%s %lx %p %x %p)\n",
1074 devtoname(dev), cmd, addr, flags, td);
1076 mdio = (struct md_ioctl *)addr;
1077 if (mdio->md_version != MDIOVERSION)
1081 * We assert the version number in the individual ioctl
1082 * handlers instead of out here because (a) it is possible we
1083 * may add another ioctl in the future which doesn't read an
1084 * mdio, and (b) the correct return value for an unknown ioctl
1085 * is ENOIOCTL, not EINVAL.
1090 switch (mdio->md_type) {
1099 if (mdio->md_options & MD_AUTOUNIT)
1100 sc = mdnew(-1, &error, mdio->md_type);
1102 sc = mdnew(mdio->md_unit, &error, mdio->md_type);
1105 if (mdio->md_options & MD_AUTOUNIT)
1106 mdio->md_unit = sc->unit;
1107 sc->mediasize = mdio->md_mediasize;
1108 if (mdio->md_sectorsize == 0)
1109 sc->sectorsize = DEV_BSIZE;
1111 sc->sectorsize = mdio->md_sectorsize;
1115 sc->start = mdstart_malloc;
1116 error = mdcreate_malloc(sc, mdio);
1119 sc->start = mdstart_preload;
1120 error = mdcreate_preload(sc, mdio);
1123 sc->start = mdstart_vnode;
1124 error = mdcreate_vnode(sc, mdio, td);
1127 sc->start = mdstart_swap;
1128 error = mdcreate_swap(sc, mdio, td);
1136 /* Prune off any residual fractional sector */
1137 i = sc->mediasize % sc->sectorsize;
1143 if (mdio->md_mediasize != 0 ||
1144 (mdio->md_options & ~MD_FORCE) != 0)
1147 sc = mdfind(mdio->md_unit);
1150 if (sc->opencount != 0 && !(sc->flags & MD_FORCE) &&
1151 !(mdio->md_options & MD_FORCE))
1153 return (mddestroy(sc, td));
1155 sc = mdfind(mdio->md_unit);
1158 mdio->md_type = sc->type;
1159 mdio->md_options = sc->flags;
1160 mdio->md_mediasize = sc->mediasize;
1161 mdio->md_sectorsize = sc->sectorsize;
1162 if (sc->type == MD_VNODE)
1163 error = copyout(sc->file, mdio->md_file,
1164 strlen(sc->file) + 1);
1168 LIST_FOREACH(sc, &md_softc_list, list) {
1169 if (i == MDNPAD - 1)
1170 mdio->md_pad[i] = -1;
1172 mdio->md_pad[i++] = sc->unit;
1174 mdio->md_pad[0] = i - 1;
1182 mdctlioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td)
1187 error = xmdctlioctl(dev, cmd, addr, flags, td);
1193 md_preloaded(u_char *image, size_t length)
1198 sc = mdnew(-1, &error, MD_PRELOAD);
1201 sc->mediasize = length;
1202 sc->sectorsize = DEV_BSIZE;
1204 sc->pl_len = length;
1205 sc->start = mdstart_preload;
1208 rootdevnames[0] = "ufs:/dev/md0";
1214 g_md_init(struct g_class *mp __unused)
1219 u_char *ptr, *name, *type;
1223 sx_init(&md_sx, "MD config lock");
1224 g_topology_unlock();
1227 md_preloaded(mfs_root.start, sizeof(mfs_root.start));
1230 /* XXX: are preload_* static or do they need Giant ? */
1231 while ((mod = preload_search_next_name(mod)) != NULL) {
1232 name = (char *)preload_search_info(mod, MODINFO_NAME);
1235 type = (char *)preload_search_info(mod, MODINFO_TYPE);
1238 if (strcmp(type, "md_image") && strcmp(type, "mfs_root"))
1240 c = preload_search_info(mod, MODINFO_ADDR);
1241 ptr = *(u_char **)c;
1242 c = preload_search_info(mod, MODINFO_SIZE);
1244 printf("%s%d: Preloaded image <%s> %d bytes at %p\n",
1245 MD_NAME, mdunits, name, len, ptr);
1247 md_preloaded(ptr, len);
1250 status_dev = make_dev(&mdctl_cdevsw, INT_MAX, UID_ROOT, GID_WHEEL,
1256 g_md_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp,
1257 struct g_consumer *cp __unused, struct g_provider *pp)
1285 if (indent == NULL) {
1286 sbuf_printf(sb, " u %d", mp->unit);
1287 sbuf_printf(sb, " s %ju", (uintmax_t) mp->sectorsize);
1288 sbuf_printf(sb, " f %ju", (uintmax_t) mp->fwheads);
1289 sbuf_printf(sb, " fs %ju", (uintmax_t) mp->fwsectors);
1290 sbuf_printf(sb, " l %ju", (uintmax_t) mp->mediasize);
1291 sbuf_printf(sb, " t %s", type);
1292 if (mp->type == MD_VNODE && mp->vnode != NULL)
1293 sbuf_printf(sb, " file %s", mp->file);
1295 sbuf_printf(sb, "%s<unit>%d</unit>\n", indent,
1297 sbuf_printf(sb, "%s<sectorsize>%ju</sectorsize>\n",
1298 indent, (uintmax_t) mp->sectorsize);
1299 sbuf_printf(sb, "%s<fwheads>%ju</fwheads>\n",
1300 indent, (uintmax_t) mp->fwheads);
1301 sbuf_printf(sb, "%s<fwsectors>%ju</fwsectors>\n",
1302 indent, (uintmax_t) mp->fwsectors);
1303 sbuf_printf(sb, "%s<length>%ju</length>\n",
1304 indent, (uintmax_t) mp->mediasize);
1305 sbuf_printf(sb, "%s<type>%s</type>\n", indent,
1307 if (mp->type == MD_VNODE && mp->vnode != NULL)
1308 sbuf_printf(sb, "%s<file>%s</file>\n",
1315 g_md_fini(struct g_class *mp __unused)
1319 if (status_dev != NULL)
1320 destroy_dev(status_dev);