2 * ----------------------------------------------------------------------------
3 * "THE BEER-WARE LICENSE" (Revision 42):
4 * <phk@FreeBSD.ORG> wrote this file. As long as you retain this notice you
5 * can do whatever you want with this stuff. If we meet some day, and you think
6 * this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp
7 * ----------------------------------------------------------------------------
14 * The following functions are based in the vn(4) driver: mdstart_swap(),
15 * mdstart_vnode(), mdcreate_swap(), mdcreate_vnode() and mddestroy(),
16 * and as such under the following copyright:
18 * Copyright (c) 1988 University of Utah.
19 * Copyright (c) 1990, 1993
20 * The Regents of the University of California. All rights reserved.
22 * This code is derived from software contributed to Berkeley by
23 * the Systems Programming Group of the University of Utah Computer
26 * Redistribution and use in source and binary forms, with or without
27 * modification, are permitted provided that the following conditions
29 * 1. Redistributions of source code must retain the above copyright
30 * notice, this list of conditions and the following disclaimer.
31 * 2. Redistributions in binary form must reproduce the above copyright
32 * notice, this list of conditions and the following disclaimer in the
33 * documentation and/or other materials provided with the distribution.
34 * 4. Neither the name of the University nor the names of its contributors
35 * may be used to endorse or promote products derived from this software
36 * without specific prior written permission.
38 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
39 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
40 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
41 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
42 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
43 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
44 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
45 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
46 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
47 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
50 * from: Utah Hdr: vn.c 1.13 94/04/02
52 * from: @(#)vn.c 8.6 (Berkeley) 4/1/94
53 * From: src/sys/dev/vn/vn.c,v 1.122 2000/12/16 16:06:03
59 #include <sys/param.h>
60 #include <sys/systm.h>
63 #include <sys/devicestat.h>
64 #include <sys/fcntl.h>
65 #include <sys/kernel.h>
66 #include <sys/kthread.h>
67 #include <sys/limits.h>
68 #include <sys/linker.h>
70 #include <sys/malloc.h>
71 #include <sys/mdioctl.h>
72 #include <sys/mount.h>
73 #include <sys/mutex.h>
75 #include <sys/namei.h>
77 #include <sys/queue.h>
79 #include <sys/sched.h>
80 #include <sys/sf_buf.h>
81 #include <sys/sysctl.h>
82 #include <sys/vnode.h>
84 #include <geom/geom.h>
87 #include <vm/vm_object.h>
88 #include <vm/vm_page.h>
89 #include <vm/vm_pager.h>
90 #include <vm/swap_pager.h>
93 #include <machine/vmparam.h>
97 #define MD_SHUTDOWN 0x10000 /* Tell worker thread to terminate. */
98 #define MD_EXITING 0x20000 /* Worker thread is exiting. */
101 #define MD_NSECT (10000 * 2)
104 static MALLOC_DEFINE(M_MD, "md_disk", "Memory Disk");
105 static MALLOC_DEFINE(M_MDSECT, "md_sectors", "Memory Disk Sectors");
108 SYSCTL_INT(_debug, OID_AUTO, mddebug, CTLFLAG_RW, &md_debug, 0, "");
109 static int md_malloc_wait;
110 SYSCTL_INT(_vm, OID_AUTO, md_malloc_wait, CTLFLAG_RW, &md_malloc_wait, 0, "");
112 #if defined(MD_ROOT) && defined(MD_ROOT_SIZE)
114 * Preloaded image gets put here.
115 * Applications that patch the object with the image can determine
116 * the size looking at the start and end markers (strings),
117 * so we want them contiguous.
120 u_char start[MD_ROOT_SIZE*1024];
123 .start = "MFS Filesystem goes here",
124 .end = "MFS Filesystem had better STOP here",
128 static g_init_t g_md_init;
129 static g_fini_t g_md_fini;
130 static g_start_t g_md_start;
131 static g_access_t g_md_access;
132 static void g_md_dumpconf(struct sbuf *sb, const char *indent,
133 struct g_geom *gp, struct g_consumer *cp __unused, struct g_provider *pp);
136 static struct cdev *status_dev = 0;
137 static struct sx md_sx;
138 static struct unrhdr *md_uh;
140 static d_ioctl_t mdctlioctl;
142 static struct cdevsw mdctl_cdevsw = {
143 .d_version = D_VERSION,
144 .d_ioctl = mdctlioctl,
148 struct g_class g_md_class = {
150 .version = G_VERSION,
154 .access = g_md_access,
155 .dumpconf = g_md_dumpconf,
158 DECLARE_GEOM_CLASS(g_md_class, g_md);
161 static LIST_HEAD(, md_s) md_softc_list = LIST_HEAD_INITIALIZER(md_softc_list);
163 #define NINDIR (PAGE_SIZE / sizeof(uintptr_t))
164 #define NMASK (NINDIR-1)
176 LIST_ENTRY(md_s) list;
177 struct bio_queue_head bio_queue;
178 struct mtx queue_mtx;
190 struct g_provider *pp;
191 int (*start)(struct md_s *sc, struct bio *bp);
192 struct devstat *devstat;
194 /* MD_MALLOC related fields */
198 /* MD_PRELOAD related fields */
202 /* MD_VNODE related fields */
207 /* MD_SWAP related fields */
211 static struct indir *
212 new_indir(u_int shift)
216 ip = malloc(sizeof *ip, M_MD, (md_malloc_wait ? M_WAITOK : M_NOWAIT)
220 ip->array = malloc(sizeof(uintptr_t) * NINDIR,
221 M_MDSECT, (md_malloc_wait ? M_WAITOK : M_NOWAIT) | M_ZERO);
222 if (ip->array == NULL) {
232 del_indir(struct indir *ip)
235 free(ip->array, M_MDSECT);
240 destroy_indir(struct md_s *sc, struct indir *ip)
244 for (i = 0; i < NINDIR; i++) {
248 destroy_indir(sc, (struct indir*)(ip->array[i]));
249 else if (ip->array[i] > 255)
250 uma_zfree(sc->uma, (void *)(ip->array[i]));
256 * This function does the math and allocates the top level "indir" structure
257 * for a device of "size" sectors.
260 static struct indir *
261 dimension(off_t size)
269 while (rcnt > NINDIR) {
275 * XXX: the top layer is probably not fully populated, so we allocate
276 * too much space for ip->array in here.
278 ip = malloc(sizeof *ip, M_MD, M_WAITOK | M_ZERO);
279 ip->array = malloc(sizeof(uintptr_t) * NINDIR,
280 M_MDSECT, M_WAITOK | M_ZERO);
282 ip->shift = layer * nshift;
287 * Read a given sector
291 s_read(struct indir *ip, off_t offset)
298 printf("s_read(%jd)\n", (intmax_t)offset);
300 for (cip = ip; cip != NULL;) {
302 idx = (offset >> cip->shift) & NMASK;
303 up = cip->array[idx];
304 cip = (struct indir *)up;
307 idx = offset & NMASK;
308 return (cip->array[idx]);
314 * Write a given sector, prune the tree if the value is 0
318 s_write(struct indir *ip, off_t offset, uintptr_t ptr)
320 struct indir *cip, *lip[10];
325 printf("s_write(%jd, %p)\n", (intmax_t)offset, (void *)ptr);
332 idx = (offset >> cip->shift) & NMASK;
333 up = cip->array[idx];
335 cip = (struct indir *)up;
338 /* Allocate branch */
340 (uintptr_t)new_indir(cip->shift - nshift);
341 if (cip->array[idx] == 0)
344 up = cip->array[idx];
345 cip = (struct indir *)up;
349 idx = offset & NMASK;
350 up = cip->array[idx];
353 cip->array[idx] = ptr;
358 if (cip->used != 0 || li == 1)
361 while (cip->used == 0 && cip != ip) {
363 idx = (offset >> lip[li]->shift) & NMASK;
364 up = lip[li]->array[idx];
365 KASSERT(up == (uintptr_t)cip, ("md screwed up"));
367 lip[li]->array[idx] = 0;
376 g_md_access(struct g_provider *pp, int r, int w, int e)
380 sc = pp->geom->softc;
382 if (r <= 0 && w <= 0 && e <= 0)
389 if ((sc->flags & MD_READONLY) != 0 && w > 0)
391 if ((pp->acr + pp->acw + pp->ace) == 0 && (r + w + e) > 0) {
393 } else if ((pp->acr + pp->acw + pp->ace) > 0 && (r + w + e) == 0) {
400 g_md_start(struct bio *bp)
404 sc = bp->bio_to->geom->softc;
405 if ((bp->bio_cmd == BIO_READ) || (bp->bio_cmd == BIO_WRITE))
406 devstat_start_transaction_bio(sc->devstat, bp);
407 mtx_lock(&sc->queue_mtx);
408 bioq_disksort(&sc->bio_queue, bp);
409 mtx_unlock(&sc->queue_mtx);
414 mdstart_malloc(struct md_s *sc, struct bio *bp)
418 off_t secno, nsec, uc;
421 switch (bp->bio_cmd) {
430 nsec = bp->bio_length / sc->sectorsize;
431 secno = bp->bio_offset / sc->sectorsize;
435 osp = s_read(sc->indir, secno);
436 if (bp->bio_cmd == BIO_DELETE) {
438 error = s_write(sc->indir, secno, 0);
439 } else if (bp->bio_cmd == BIO_READ) {
441 bzero(dst, sc->sectorsize);
443 memset(dst, osp, sc->sectorsize);
445 bcopy((void *)osp, dst, sc->sectorsize);
446 cpu_flush_dcache(dst, sc->sectorsize);
449 } else if (bp->bio_cmd == BIO_WRITE) {
450 if (sc->flags & MD_COMPRESS) {
452 for (i = 1; i < sc->sectorsize; i++)
459 if (i == sc->sectorsize) {
461 error = s_write(sc->indir, secno, uc);
464 sp = (uintptr_t)uma_zalloc(sc->uma,
465 md_malloc_wait ? M_WAITOK :
471 bcopy(dst, (void *)sp, sc->sectorsize);
472 error = s_write(sc->indir, secno, sp);
474 bcopy(dst, (void *)osp, sc->sectorsize);
482 uma_zfree(sc->uma, (void*)osp);
486 dst += sc->sectorsize;
493 mdstart_preload(struct md_s *sc, struct bio *bp)
496 switch (bp->bio_cmd) {
498 bcopy(sc->pl_ptr + bp->bio_offset, bp->bio_data,
500 cpu_flush_dcache(bp->bio_data, bp->bio_length);
503 bcopy(bp->bio_data, sc->pl_ptr + bp->bio_offset,
512 mdstart_vnode(struct md_s *sc, struct bio *bp)
514 int error, vfslocked;
522 switch (bp->bio_cmd) {
538 * If an error occurs, we set BIO_ERROR but we do not set
539 * B_INVAL because (for a write anyway), the buffer is
543 if (bp->bio_cmd == BIO_FLUSH) {
544 vfslocked = VFS_LOCK_GIANT(vp->v_mount);
545 (void) vn_start_write(vp, &mp, V_WAIT);
546 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
547 error = VOP_FSYNC(vp, MNT_WAIT, td);
549 vn_finished_write(mp);
550 VFS_UNLOCK_GIANT(vfslocked);
554 bzero(&auio, sizeof(auio));
557 * Special case for BIO_DELETE. On the surface, this is very
558 * similar to BIO_WRITE, except that we write from our own
559 * fixed-length buffer, so we have to loop. The net result is
560 * that the two cases end up having very little in common.
562 if (bp->bio_cmd == BIO_DELETE) {
563 zerosize = ZERO_REGION_SIZE -
564 (ZERO_REGION_SIZE % sc->sectorsize);
565 auio.uio_iov = &aiov;
567 auio.uio_offset = (vm_ooffset_t)bp->bio_offset;
568 auio.uio_segflg = UIO_SYSSPACE;
569 auio.uio_rw = UIO_WRITE;
571 end = bp->bio_offset + bp->bio_length;
572 vfslocked = VFS_LOCK_GIANT(vp->v_mount);
573 (void) vn_start_write(vp, &mp, V_WAIT);
574 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
576 while (auio.uio_offset < end) {
577 aiov.iov_base = __DECONST(void *, zero_region);
578 aiov.iov_len = end - auio.uio_offset;
579 if (aiov.iov_len > zerosize)
580 aiov.iov_len = zerosize;
581 auio.uio_resid = aiov.iov_len;
582 error = VOP_WRITE(vp, &auio,
583 sc->flags & MD_ASYNC ? 0 : IO_SYNC, sc->cred);
588 vn_finished_write(mp);
589 bp->bio_resid = end - auio.uio_offset;
590 VFS_UNLOCK_GIANT(vfslocked);
594 aiov.iov_base = bp->bio_data;
595 aiov.iov_len = bp->bio_length;
596 auio.uio_iov = &aiov;
598 auio.uio_offset = (vm_ooffset_t)bp->bio_offset;
599 auio.uio_segflg = UIO_SYSSPACE;
600 if (bp->bio_cmd == BIO_READ)
601 auio.uio_rw = UIO_READ;
602 else if (bp->bio_cmd == BIO_WRITE)
603 auio.uio_rw = UIO_WRITE;
605 panic("wrong BIO_OP in mdstart_vnode");
606 auio.uio_resid = bp->bio_length;
609 * When reading set IO_DIRECT to try to avoid double-caching
610 * the data. When writing IO_DIRECT is not optimal.
612 vfslocked = VFS_LOCK_GIANT(vp->v_mount);
613 if (bp->bio_cmd == BIO_READ) {
614 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
615 error = VOP_READ(vp, &auio, IO_DIRECT, sc->cred);
618 (void) vn_start_write(vp, &mp, V_WAIT);
619 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
620 error = VOP_WRITE(vp, &auio, sc->flags & MD_ASYNC ? 0 : IO_SYNC,
623 vn_finished_write(mp);
625 VFS_UNLOCK_GIANT(vfslocked);
626 bp->bio_resid = auio.uio_resid;
631 mdstart_swap(struct md_s *sc, struct bio *bp)
634 int rv, offs, len, lastend;
635 vm_pindex_t i, lastp;
639 switch (bp->bio_cmd) {
651 * offs is the offset at which to start operating on the
652 * next (ie, first) page. lastp is the last page on
653 * which we're going to operate. lastend is the ending
654 * position within that last page (ie, PAGE_SIZE if
655 * we're operating on complete aligned pages).
657 offs = bp->bio_offset % PAGE_SIZE;
658 lastp = (bp->bio_offset + bp->bio_length - 1) / PAGE_SIZE;
659 lastend = (bp->bio_offset + bp->bio_length - 1) % PAGE_SIZE + 1;
662 VM_OBJECT_LOCK(sc->object);
663 vm_object_pip_add(sc->object, 1);
664 for (i = bp->bio_offset / PAGE_SIZE; i <= lastp; i++) {
665 len = ((i == lastp) ? lastend : PAGE_SIZE) - offs;
667 m = vm_page_grab(sc->object, i,
668 VM_ALLOC_NORMAL|VM_ALLOC_RETRY);
669 VM_OBJECT_UNLOCK(sc->object);
671 sf = sf_buf_alloc(m, SFB_CPUPRIVATE);
672 VM_OBJECT_LOCK(sc->object);
673 if (bp->bio_cmd == BIO_READ) {
674 if (m->valid != VM_PAGE_BITS_ALL)
675 rv = vm_pager_get_pages(sc->object, &m, 1, 0);
676 if (rv == VM_PAGER_ERROR) {
682 bcopy((void *)(sf_buf_kva(sf) + offs), p, len);
683 cpu_flush_dcache(p, len);
684 } else if (bp->bio_cmd == BIO_WRITE) {
685 if (len != PAGE_SIZE && m->valid != VM_PAGE_BITS_ALL)
686 rv = vm_pager_get_pages(sc->object, &m, 1, 0);
687 if (rv == VM_PAGER_ERROR) {
693 bcopy(p, (void *)(sf_buf_kva(sf) + offs), len);
694 m->valid = VM_PAGE_BITS_ALL;
695 } else if (bp->bio_cmd == BIO_DELETE) {
696 if (len != PAGE_SIZE && m->valid != VM_PAGE_BITS_ALL)
697 rv = vm_pager_get_pages(sc->object, &m, 1, 0);
698 if (rv == VM_PAGER_ERROR) {
704 if (len != PAGE_SIZE) {
705 bzero((void *)(sf_buf_kva(sf) + offs), len);
706 vm_page_clear_dirty(m, offs, len);
707 m->valid = VM_PAGE_BITS_ALL;
709 vm_pager_page_unswapped(m);
715 if (bp->bio_cmd == BIO_DELETE && len == PAGE_SIZE)
720 if (bp->bio_cmd == BIO_WRITE)
723 /* Actions on further pages start at offset 0 */
724 p += PAGE_SIZE - offs;
727 if (bootverbose || bp->bio_offset / PAGE_SIZE < 17)
728 printf("wire_count %d busy %d flags %x hold_count %d act_count %d queue %d valid %d dirty %d @ %d\n",
729 m->wire_count, m->busy,
730 m->flags, m->hold_count, m->act_count, m->queue, m->valid, m->dirty, i);
733 vm_object_pip_subtract(sc->object, 1);
734 VM_OBJECT_UNLOCK(sc->object);
735 return (rv != VM_PAGER_ERROR ? 0 : ENOSPC);
739 md_kthread(void *arg)
746 thread_lock(curthread);
747 sched_prio(curthread, PRIBIO);
748 thread_unlock(curthread);
749 if (sc->type == MD_VNODE)
750 curthread->td_pflags |= TDP_NORUNNINGBUF;
753 mtx_lock(&sc->queue_mtx);
754 if (sc->flags & MD_SHUTDOWN) {
755 sc->flags |= MD_EXITING;
756 mtx_unlock(&sc->queue_mtx);
759 bp = bioq_takefirst(&sc->bio_queue);
761 msleep(sc, &sc->queue_mtx, PRIBIO | PDROP, "mdwait", 0);
764 mtx_unlock(&sc->queue_mtx);
765 if (bp->bio_cmd == BIO_GETATTR) {
766 if ((sc->fwsectors && sc->fwheads &&
767 (g_handleattr_int(bp, "GEOM::fwsectors",
769 g_handleattr_int(bp, "GEOM::fwheads",
771 g_handleattr_int(bp, "GEOM::candelete", 1))
776 error = sc->start(sc, bp);
780 bp->bio_completed = bp->bio_length;
781 if ((bp->bio_cmd == BIO_READ) || (bp->bio_cmd == BIO_WRITE))
782 devstat_end_transaction_bio(sc->devstat, bp);
783 g_io_deliver(bp, error);
793 LIST_FOREACH(sc, &md_softc_list, list) {
794 if (sc->unit == unit)
801 mdnew(int unit, int *errp, enum md_types type)
808 unit = alloc_unr(md_uh);
810 unit = alloc_unr_specific(md_uh, unit);
817 sc = (struct md_s *)malloc(sizeof *sc, M_MD, M_WAITOK | M_ZERO);
819 bioq_init(&sc->bio_queue);
820 mtx_init(&sc->queue_mtx, "md bio queue", NULL, MTX_DEF);
822 sprintf(sc->name, "md%d", unit);
823 LIST_INSERT_HEAD(&md_softc_list, sc, list);
824 error = kproc_create(md_kthread, sc, &sc->procp, 0, 0,"%s", sc->name);
827 LIST_REMOVE(sc, list);
828 mtx_destroy(&sc->queue_mtx);
829 free_unr(md_uh, sc->unit);
836 mdinit(struct md_s *sc)
839 struct g_provider *pp;
842 gp = g_new_geomf(&g_md_class, "md%d", sc->unit);
844 pp = g_new_providerf(gp, "md%d", sc->unit);
845 pp->mediasize = sc->mediasize;
846 pp->sectorsize = sc->sectorsize;
849 g_error_provider(pp, 0);
851 sc->devstat = devstat_new_entry("md", sc->unit, sc->sectorsize,
852 DEVSTAT_ALL_SUPPORTED, DEVSTAT_TYPE_DIRECT, DEVSTAT_PRIORITY_MAX);
856 * XXX: we should check that the range they feed us is mapped.
857 * XXX: we should implement read-only.
861 mdcreate_preload(struct md_s *sc, struct md_ioctl *mdio)
864 if (mdio->md_options & ~(MD_AUTOUNIT | MD_FORCE))
866 if (mdio->md_base == 0)
868 sc->flags = mdio->md_options & MD_FORCE;
869 /* Cast to pointer size, then to pointer to avoid warning */
870 sc->pl_ptr = (u_char *)(uintptr_t)mdio->md_base;
871 sc->pl_len = (size_t)sc->mediasize;
877 mdcreate_malloc(struct md_s *sc, struct md_ioctl *mdio)
884 if (mdio->md_options & ~(MD_AUTOUNIT | MD_COMPRESS | MD_RESERVE))
886 if (mdio->md_sectorsize != 0 && !powerof2(mdio->md_sectorsize))
888 /* Compression doesn't make sense if we have reserved space */
889 if (mdio->md_options & MD_RESERVE)
890 mdio->md_options &= ~MD_COMPRESS;
891 if (mdio->md_fwsectors != 0)
892 sc->fwsectors = mdio->md_fwsectors;
893 if (mdio->md_fwheads != 0)
894 sc->fwheads = mdio->md_fwheads;
895 sc->flags = mdio->md_options & (MD_COMPRESS | MD_FORCE);
896 sc->indir = dimension(sc->mediasize / sc->sectorsize);
897 sc->uma = uma_zcreate(sc->name, sc->sectorsize, NULL, NULL, NULL, NULL,
899 if (mdio->md_options & MD_RESERVE) {
902 nsectors = sc->mediasize / sc->sectorsize;
903 for (u = 0; u < nsectors; u++) {
904 sp = (uintptr_t)uma_zalloc(sc->uma, (md_malloc_wait ?
905 M_WAITOK : M_NOWAIT) | M_ZERO);
907 error = s_write(sc->indir, u, sp);
919 mdsetcred(struct md_s *sc, struct ucred *cred)
925 * Set credits in our softc
930 sc->cred = crhold(cred);
933 * Horrible kludge to establish credentials for NFS XXX.
940 tmpbuf = malloc(sc->sectorsize, M_TEMP, M_WAITOK);
941 bzero(&auio, sizeof(auio));
943 aiov.iov_base = tmpbuf;
944 aiov.iov_len = sc->sectorsize;
945 auio.uio_iov = &aiov;
948 auio.uio_rw = UIO_READ;
949 auio.uio_segflg = UIO_SYSSPACE;
950 auio.uio_resid = aiov.iov_len;
951 vn_lock(sc->vnode, LK_EXCLUSIVE | LK_RETRY);
952 error = VOP_READ(sc->vnode, &auio, 0, sc->cred);
953 VOP_UNLOCK(sc->vnode, 0);
954 free(tmpbuf, M_TEMP);
960 mdcreate_vnode(struct md_s *sc, struct md_ioctl *mdio, struct thread *td)
965 int error, flags, vfslocked;
968 * Kernel-originated requests must have the filename appended
969 * to the mdio structure to protect against malicious software.
971 fname = mdio->md_file;
972 if ((void *)fname != (void *)(mdio + 1)) {
973 error = copyinstr(fname, sc->file, sizeof(sc->file), NULL);
977 strlcpy(sc->file, fname, sizeof(sc->file));
980 * If the user specified that this is a read only device, don't
981 * set the FWRITE mask before trying to open the backing store.
983 flags = FREAD | ((mdio->md_options & MD_READONLY) ? 0 : FWRITE);
984 NDINIT(&nd, LOOKUP, FOLLOW | MPSAFE, UIO_SYSSPACE, sc->file, td);
985 error = vn_open(&nd, &flags, 0, NULL);
988 vfslocked = NDHASGIANT(&nd);
989 NDFREE(&nd, NDF_ONLY_PNBUF);
990 if (nd.ni_vp->v_type != VREG) {
994 error = VOP_GETATTR(nd.ni_vp, &vattr, td->td_ucred);
997 if (VOP_ISLOCKED(nd.ni_vp) != LK_EXCLUSIVE) {
998 vn_lock(nd.ni_vp, LK_UPGRADE | LK_RETRY);
999 if (nd.ni_vp->v_iflag & VI_DOOMED) {
1000 /* Forced unmount. */
1005 nd.ni_vp->v_vflag |= VV_MD;
1006 VOP_UNLOCK(nd.ni_vp, 0);
1008 if (mdio->md_fwsectors != 0)
1009 sc->fwsectors = mdio->md_fwsectors;
1010 if (mdio->md_fwheads != 0)
1011 sc->fwheads = mdio->md_fwheads;
1012 sc->flags = mdio->md_options & (MD_FORCE | MD_ASYNC);
1013 if (!(flags & FWRITE))
1014 sc->flags |= MD_READONLY;
1015 sc->vnode = nd.ni_vp;
1017 error = mdsetcred(sc, td->td_ucred);
1020 vn_lock(nd.ni_vp, LK_EXCLUSIVE | LK_RETRY);
1021 nd.ni_vp->v_vflag &= ~VV_MD;
1024 VFS_UNLOCK_GIANT(vfslocked);
1027 VOP_UNLOCK(nd.ni_vp, 0);
1028 (void)vn_close(nd.ni_vp, flags, td->td_ucred, td);
1029 VFS_UNLOCK_GIANT(vfslocked);
1034 mddestroy(struct md_s *sc, struct thread *td)
1039 sc->gp->softc = NULL;
1041 g_wither_geom(sc->gp, ENXIO);
1042 g_topology_unlock();
1047 devstat_remove_entry(sc->devstat);
1050 mtx_lock(&sc->queue_mtx);
1051 sc->flags |= MD_SHUTDOWN;
1053 while (!(sc->flags & MD_EXITING))
1054 msleep(sc->procp, &sc->queue_mtx, PRIBIO, "mddestroy", hz / 10);
1055 mtx_unlock(&sc->queue_mtx);
1056 mtx_destroy(&sc->queue_mtx);
1057 if (sc->vnode != NULL) {
1058 vfslocked = VFS_LOCK_GIANT(sc->vnode->v_mount);
1059 vn_lock(sc->vnode, LK_EXCLUSIVE | LK_RETRY);
1060 sc->vnode->v_vflag &= ~VV_MD;
1061 VOP_UNLOCK(sc->vnode, 0);
1062 (void)vn_close(sc->vnode, sc->flags & MD_READONLY ?
1063 FREAD : (FREAD|FWRITE), sc->cred, td);
1064 VFS_UNLOCK_GIANT(vfslocked);
1066 if (sc->cred != NULL)
1068 if (sc->object != NULL)
1069 vm_object_deallocate(sc->object);
1071 destroy_indir(sc, sc->indir);
1073 uma_zdestroy(sc->uma);
1075 LIST_REMOVE(sc, list);
1076 free_unr(md_uh, sc->unit);
1082 mdcreate_swap(struct md_s *sc, struct md_ioctl *mdio, struct thread *td)
1088 * Range check. Disallow negative sizes or any size less then the
1089 * size of a page. Then round to a page.
1091 if (sc->mediasize == 0 || (sc->mediasize % PAGE_SIZE) != 0)
1095 * Allocate an OBJT_SWAP object.
1097 * Note the truncation.
1100 npage = mdio->md_mediasize / PAGE_SIZE;
1101 if (mdio->md_fwsectors != 0)
1102 sc->fwsectors = mdio->md_fwsectors;
1103 if (mdio->md_fwheads != 0)
1104 sc->fwheads = mdio->md_fwheads;
1105 sc->object = vm_pager_allocate(OBJT_SWAP, NULL, PAGE_SIZE * npage,
1106 VM_PROT_DEFAULT, 0, td->td_ucred);
1107 if (sc->object == NULL)
1109 sc->flags = mdio->md_options & MD_FORCE;
1110 if (mdio->md_options & MD_RESERVE) {
1111 if (swap_pager_reserve(sc->object, 0, npage) < 0) {
1116 error = mdsetcred(sc, td->td_ucred);
1119 vm_object_deallocate(sc->object);
1127 xmdctlioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td)
1129 struct md_ioctl *mdio;
1134 printf("mdctlioctl(%s %lx %p %x %p)\n",
1135 devtoname(dev), cmd, addr, flags, td);
1137 mdio = (struct md_ioctl *)addr;
1138 if (mdio->md_version != MDIOVERSION)
1142 * We assert the version number in the individual ioctl
1143 * handlers instead of out here because (a) it is possible we
1144 * may add another ioctl in the future which doesn't read an
1145 * mdio, and (b) the correct return value for an unknown ioctl
1146 * is ENOIOCTL, not EINVAL.
1151 switch (mdio->md_type) {
1160 if (mdio->md_options & MD_AUTOUNIT)
1161 sc = mdnew(-1, &error, mdio->md_type);
1163 if (mdio->md_unit > INT_MAX)
1165 sc = mdnew(mdio->md_unit, &error, mdio->md_type);
1169 if (mdio->md_options & MD_AUTOUNIT)
1170 mdio->md_unit = sc->unit;
1171 sc->mediasize = mdio->md_mediasize;
1172 if (mdio->md_sectorsize == 0)
1173 sc->sectorsize = DEV_BSIZE;
1175 sc->sectorsize = mdio->md_sectorsize;
1179 sc->start = mdstart_malloc;
1180 error = mdcreate_malloc(sc, mdio);
1183 sc->start = mdstart_preload;
1184 error = mdcreate_preload(sc, mdio);
1187 sc->start = mdstart_vnode;
1188 error = mdcreate_vnode(sc, mdio, td);
1191 sc->start = mdstart_swap;
1192 error = mdcreate_swap(sc, mdio, td);
1200 /* Prune off any residual fractional sector */
1201 i = sc->mediasize % sc->sectorsize;
1207 if (mdio->md_mediasize != 0 ||
1208 (mdio->md_options & ~MD_FORCE) != 0)
1211 sc = mdfind(mdio->md_unit);
1214 if (sc->opencount != 0 && !(sc->flags & MD_FORCE) &&
1215 !(mdio->md_options & MD_FORCE))
1217 return (mddestroy(sc, td));
1219 sc = mdfind(mdio->md_unit);
1222 mdio->md_type = sc->type;
1223 mdio->md_options = sc->flags;
1224 mdio->md_mediasize = sc->mediasize;
1225 mdio->md_sectorsize = sc->sectorsize;
1226 if (sc->type == MD_VNODE)
1227 error = copyout(sc->file, mdio->md_file,
1228 strlen(sc->file) + 1);
1232 LIST_FOREACH(sc, &md_softc_list, list) {
1233 if (i == MDNPAD - 1)
1234 mdio->md_pad[i] = -1;
1236 mdio->md_pad[i++] = sc->unit;
1238 mdio->md_pad[0] = i - 1;
1246 mdctlioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td)
1251 error = xmdctlioctl(dev, cmd, addr, flags, td);
1257 md_preloaded(u_char *image, size_t length)
1262 sc = mdnew(-1, &error, MD_PRELOAD);
1265 sc->mediasize = length;
1266 sc->sectorsize = DEV_BSIZE;
1268 sc->pl_len = length;
1269 sc->start = mdstart_preload;
1272 rootdevnames[0] = "ufs:/dev/md0";
1278 g_md_init(struct g_class *mp __unused)
1281 u_char *ptr, *name, *type;
1285 /* figure out log2(NINDIR) */
1286 for (i = NINDIR, nshift = -1; i; nshift++)
1290 sx_init(&md_sx, "MD config lock");
1291 g_topology_unlock();
1292 md_uh = new_unrhdr(0, INT_MAX, NULL);
1295 md_preloaded(mfs_root.start, sizeof(mfs_root.start));
1298 /* XXX: are preload_* static or do they need Giant ? */
1299 while ((mod = preload_search_next_name(mod)) != NULL) {
1300 name = (char *)preload_search_info(mod, MODINFO_NAME);
1303 type = (char *)preload_search_info(mod, MODINFO_TYPE);
1306 if (strcmp(type, "md_image") && strcmp(type, "mfs_root"))
1308 ptr = preload_fetch_addr(mod);
1309 len = preload_fetch_size(mod);
1310 if (ptr != NULL && len != 0) {
1311 printf("%s%d: Preloaded image <%s> %d bytes at %p\n",
1312 MD_NAME, mdunits, name, len, ptr);
1314 md_preloaded(ptr, len);
1318 status_dev = make_dev(&mdctl_cdevsw, INT_MAX, UID_ROOT, GID_WHEEL,
1324 g_md_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp,
1325 struct g_consumer *cp __unused, struct g_provider *pp)
1353 if (indent == NULL) {
1354 sbuf_printf(sb, " u %d", mp->unit);
1355 sbuf_printf(sb, " s %ju", (uintmax_t) mp->sectorsize);
1356 sbuf_printf(sb, " f %ju", (uintmax_t) mp->fwheads);
1357 sbuf_printf(sb, " fs %ju", (uintmax_t) mp->fwsectors);
1358 sbuf_printf(sb, " l %ju", (uintmax_t) mp->mediasize);
1359 sbuf_printf(sb, " t %s", type);
1360 if (mp->type == MD_VNODE && mp->vnode != NULL)
1361 sbuf_printf(sb, " file %s", mp->file);
1363 sbuf_printf(sb, "%s<unit>%d</unit>\n", indent,
1365 sbuf_printf(sb, "%s<sectorsize>%ju</sectorsize>\n",
1366 indent, (uintmax_t) mp->sectorsize);
1367 sbuf_printf(sb, "%s<fwheads>%ju</fwheads>\n",
1368 indent, (uintmax_t) mp->fwheads);
1369 sbuf_printf(sb, "%s<fwsectors>%ju</fwsectors>\n",
1370 indent, (uintmax_t) mp->fwsectors);
1371 sbuf_printf(sb, "%s<length>%ju</length>\n",
1372 indent, (uintmax_t) mp->mediasize);
1373 sbuf_printf(sb, "%s<type>%s</type>\n", indent,
1375 if (mp->type == MD_VNODE && mp->vnode != NULL)
1376 sbuf_printf(sb, "%s<file>%s</file>\n",
1383 g_md_fini(struct g_class *mp __unused)
1387 if (status_dev != NULL)
1388 destroy_dev(status_dev);
1389 delete_unrhdr(md_uh);