2 * ----------------------------------------------------------------------------
3 * "THE BEER-WARE LICENSE" (Revision 42):
4 * <phk@FreeBSD.ORG> wrote this file. As long as you retain this notice you
5 * can do whatever you want with this stuff. If we meet some day, and you think
6 * this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp
7 * ----------------------------------------------------------------------------
14 * The following functions are based in the vn(4) driver: mdstart_swap(),
15 * mdstart_vnode(), mdcreate_swap(), mdcreate_vnode() and mddestroy(),
16 * and as such under the following copyright:
18 * Copyright (c) 1988 University of Utah.
19 * Copyright (c) 1990, 1993
20 * The Regents of the University of California. All rights reserved.
21 * Copyright (c) 2013 The FreeBSD Foundation
22 * All rights reserved.
24 * This code is derived from software contributed to Berkeley by
25 * the Systems Programming Group of the University of Utah Computer
28 * Portions of this software were developed by Konstantin Belousov
29 * under sponsorship from the FreeBSD Foundation.
31 * Redistribution and use in source and binary forms, with or without
32 * modification, are permitted provided that the following conditions
34 * 1. Redistributions of source code must retain the above copyright
35 * notice, this list of conditions and the following disclaimer.
36 * 2. Redistributions in binary form must reproduce the above copyright
37 * notice, this list of conditions and the following disclaimer in the
38 * documentation and/or other materials provided with the distribution.
39 * 4. Neither the name of the University nor the names of its contributors
40 * may be used to endorse or promote products derived from this software
41 * without specific prior written permission.
43 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
44 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
45 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
46 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
47 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
48 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
49 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
50 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
51 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
52 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
55 * from: Utah Hdr: vn.c 1.13 94/04/02
57 * from: @(#)vn.c 8.6 (Berkeley) 4/1/94
58 * From: src/sys/dev/vn/vn.c,v 1.122 2000/12/16 16:06:03
64 #include <sys/param.h>
65 #include <sys/systm.h>
69 #include <sys/devicestat.h>
70 #include <sys/fcntl.h>
71 #include <sys/kernel.h>
72 #include <sys/kthread.h>
73 #include <sys/limits.h>
74 #include <sys/linker.h>
76 #include <sys/malloc.h>
77 #include <sys/mdioctl.h>
78 #include <sys/mount.h>
79 #include <sys/mutex.h>
81 #include <sys/namei.h>
83 #include <sys/queue.h>
84 #include <sys/rwlock.h>
86 #include <sys/sched.h>
87 #include <sys/sf_buf.h>
88 #include <sys/sysctl.h>
89 #include <sys/vnode.h>
91 #include <geom/geom.h>
94 #include <vm/vm_param.h>
95 #include <vm/vm_object.h>
96 #include <vm/vm_page.h>
97 #include <vm/vm_pager.h>
98 #include <vm/swap_pager.h>
103 #define MD_SHUTDOWN 0x10000 /* Tell worker thread to terminate. */
104 #define MD_EXITING 0x20000 /* Worker thread is exiting. */
107 #define MD_NSECT (10000 * 2)
110 static MALLOC_DEFINE(M_MD, "md_disk", "Memory Disk");
111 static MALLOC_DEFINE(M_MDSECT, "md_sectors", "Memory Disk Sectors");
114 SYSCTL_INT(_debug, OID_AUTO, mddebug, CTLFLAG_RW, &md_debug, 0,
115 "Enable md(4) debug messages");
116 static int md_malloc_wait;
117 SYSCTL_INT(_vm, OID_AUTO, md_malloc_wait, CTLFLAG_RW, &md_malloc_wait, 0,
118 "Allow malloc to wait for memory allocations");
120 #if defined(MD_ROOT) && !defined(MD_ROOT_FSTYPE)
121 #define MD_ROOT_FSTYPE "ufs"
124 #if defined(MD_ROOT) && defined(MD_ROOT_SIZE)
126 * Preloaded image gets put here.
127 * Applications that patch the object with the image can determine
128 * the size looking at the start and end markers (strings),
129 * so we want them contiguous.
132 u_char start[MD_ROOT_SIZE*1024];
135 .start = "MFS Filesystem goes here",
136 .end = "MFS Filesystem had better STOP here",
140 static g_init_t g_md_init;
141 static g_fini_t g_md_fini;
142 static g_start_t g_md_start;
143 static g_access_t g_md_access;
144 static void g_md_dumpconf(struct sbuf *sb, const char *indent,
145 struct g_geom *gp, struct g_consumer *cp __unused, struct g_provider *pp);
147 static struct cdev *status_dev = 0;
148 static struct sx md_sx;
149 static struct unrhdr *md_uh;
151 static d_ioctl_t mdctlioctl;
153 static struct cdevsw mdctl_cdevsw = {
154 .d_version = D_VERSION,
155 .d_ioctl = mdctlioctl,
159 struct g_class g_md_class = {
161 .version = G_VERSION,
165 .access = g_md_access,
166 .dumpconf = g_md_dumpconf,
169 DECLARE_GEOM_CLASS(g_md_class, g_md);
172 static LIST_HEAD(, md_s) md_softc_list = LIST_HEAD_INITIALIZER(md_softc_list);
174 #define NINDIR (PAGE_SIZE / sizeof(uintptr_t))
175 #define NMASK (NINDIR-1)
178 static int md_vnode_pbuf_freecnt;
189 LIST_ENTRY(md_s) list;
190 struct bio_queue_head bio_queue;
191 struct mtx queue_mtx;
204 struct g_provider *pp;
205 int (*start)(struct md_s *sc, struct bio *bp);
206 struct devstat *devstat;
208 /* MD_MALLOC related fields */
212 /* MD_PRELOAD related fields */
216 /* MD_VNODE related fields */
221 /* MD_SWAP related fields */
225 static struct indir *
226 new_indir(u_int shift)
230 ip = malloc(sizeof *ip, M_MD, (md_malloc_wait ? M_WAITOK : M_NOWAIT)
234 ip->array = malloc(sizeof(uintptr_t) * NINDIR,
235 M_MDSECT, (md_malloc_wait ? M_WAITOK : M_NOWAIT) | M_ZERO);
236 if (ip->array == NULL) {
246 del_indir(struct indir *ip)
249 free(ip->array, M_MDSECT);
254 destroy_indir(struct md_s *sc, struct indir *ip)
258 for (i = 0; i < NINDIR; i++) {
262 destroy_indir(sc, (struct indir*)(ip->array[i]));
263 else if (ip->array[i] > 255)
264 uma_zfree(sc->uma, (void *)(ip->array[i]));
270 * This function does the math and allocates the top level "indir" structure
271 * for a device of "size" sectors.
274 static struct indir *
275 dimension(off_t size)
283 while (rcnt > NINDIR) {
289 * XXX: the top layer is probably not fully populated, so we allocate
290 * too much space for ip->array in here.
292 ip = malloc(sizeof *ip, M_MD, M_WAITOK | M_ZERO);
293 ip->array = malloc(sizeof(uintptr_t) * NINDIR,
294 M_MDSECT, M_WAITOK | M_ZERO);
296 ip->shift = layer * nshift;
301 * Read a given sector
305 s_read(struct indir *ip, off_t offset)
312 printf("s_read(%jd)\n", (intmax_t)offset);
314 for (cip = ip; cip != NULL;) {
316 idx = (offset >> cip->shift) & NMASK;
317 up = cip->array[idx];
318 cip = (struct indir *)up;
321 idx = offset & NMASK;
322 return (cip->array[idx]);
328 * Write a given sector, prune the tree if the value is 0
332 s_write(struct indir *ip, off_t offset, uintptr_t ptr)
334 struct indir *cip, *lip[10];
339 printf("s_write(%jd, %p)\n", (intmax_t)offset, (void *)ptr);
346 idx = (offset >> cip->shift) & NMASK;
347 up = cip->array[idx];
349 cip = (struct indir *)up;
352 /* Allocate branch */
354 (uintptr_t)new_indir(cip->shift - nshift);
355 if (cip->array[idx] == 0)
358 up = cip->array[idx];
359 cip = (struct indir *)up;
363 idx = offset & NMASK;
364 up = cip->array[idx];
367 cip->array[idx] = ptr;
372 if (cip->used != 0 || li == 1)
375 while (cip->used == 0 && cip != ip) {
377 idx = (offset >> lip[li]->shift) & NMASK;
378 up = lip[li]->array[idx];
379 KASSERT(up == (uintptr_t)cip, ("md screwed up"));
381 lip[li]->array[idx] = 0;
390 g_md_access(struct g_provider *pp, int r, int w, int e)
394 sc = pp->geom->softc;
396 if (r <= 0 && w <= 0 && e <= 0)
403 if ((sc->flags & MD_READONLY) != 0 && w > 0)
405 if ((pp->acr + pp->acw + pp->ace) == 0 && (r + w + e) > 0) {
407 } else if ((pp->acr + pp->acw + pp->ace) > 0 && (r + w + e) == 0) {
414 g_md_start(struct bio *bp)
418 sc = bp->bio_to->geom->softc;
419 if ((bp->bio_cmd == BIO_READ) || (bp->bio_cmd == BIO_WRITE)) {
420 mtx_lock(&sc->stat_mtx);
421 devstat_start_transaction_bio(sc->devstat, bp);
422 mtx_unlock(&sc->stat_mtx);
424 mtx_lock(&sc->queue_mtx);
425 bioq_disksort(&sc->bio_queue, bp);
426 mtx_unlock(&sc->queue_mtx);
430 #define MD_MALLOC_MOVE_ZERO 1
431 #define MD_MALLOC_MOVE_FILL 2
432 #define MD_MALLOC_MOVE_READ 3
433 #define MD_MALLOC_MOVE_WRITE 4
434 #define MD_MALLOC_MOVE_CMP 5
437 md_malloc_move(vm_page_t **mp, int *ma_offs, unsigned sectorsize,
438 void *ptr, u_char fill, int op)
445 int error, i, ma_offs1, sz, first_read;
450 /* if (op == MD_MALLOC_MOVE_CMP) { gcc */
458 for (n = sectorsize; n != 0; n -= sz) {
459 sz = imin(PAGE_SIZE - *ma_offs, n);
464 sf = sf_buf_alloc(m, SFB_CPUPRIVATE |
465 (md_malloc_wait ? 0 : SFB_NOWAIT));
471 p = (char *)sf_buf_kva(sf) + *ma_offs;
473 case MD_MALLOC_MOVE_ZERO:
476 case MD_MALLOC_MOVE_FILL:
479 case MD_MALLOC_MOVE_READ:
481 cpu_flush_dcache(p, sz);
483 case MD_MALLOC_MOVE_WRITE:
486 case MD_MALLOC_MOVE_CMP:
487 for (i = 0; i < sz; i++, p++) {
492 } else if (*p != first) {
499 KASSERT(0, ("md_malloc_move unknown op %d\n", op));
505 *ma_offs %= PAGE_SIZE;
508 ptr = (char *)ptr + sz;
514 if (op == MD_MALLOC_MOVE_CMP && error != 0) {
522 mdstart_malloc(struct md_s *sc, struct bio *bp)
526 int i, error, error1, ma_offs, notmapped;
527 off_t secno, nsec, uc;
530 switch (bp->bio_cmd) {
539 notmapped = (bp->bio_flags & BIO_UNMAPPED) != 0;
542 ma_offs = bp->bio_ma_offset;
548 nsec = bp->bio_length / sc->sectorsize;
549 secno = bp->bio_offset / sc->sectorsize;
552 osp = s_read(sc->indir, secno);
553 if (bp->bio_cmd == BIO_DELETE) {
555 error = s_write(sc->indir, secno, 0);
556 } else if (bp->bio_cmd == BIO_READ) {
559 error = md_malloc_move(&m, &ma_offs,
560 sc->sectorsize, NULL, 0,
561 MD_MALLOC_MOVE_ZERO);
563 bzero(dst, sc->sectorsize);
564 } else if (osp <= 255) {
566 error = md_malloc_move(&m, &ma_offs,
567 sc->sectorsize, NULL, osp,
568 MD_MALLOC_MOVE_FILL);
570 memset(dst, osp, sc->sectorsize);
573 error = md_malloc_move(&m, &ma_offs,
574 sc->sectorsize, (void *)osp, 0,
575 MD_MALLOC_MOVE_READ);
577 bcopy((void *)osp, dst, sc->sectorsize);
578 cpu_flush_dcache(dst, sc->sectorsize);
582 } else if (bp->bio_cmd == BIO_WRITE) {
583 if (sc->flags & MD_COMPRESS) {
585 error1 = md_malloc_move(&m, &ma_offs,
586 sc->sectorsize, &uc, 0,
588 i = error1 == 0 ? sc->sectorsize : 0;
591 for (i = 1; i < sc->sectorsize; i++) {
600 if (i == sc->sectorsize) {
602 error = s_write(sc->indir, secno, uc);
605 sp = (uintptr_t)uma_zalloc(sc->uma,
606 md_malloc_wait ? M_WAITOK :
613 error = md_malloc_move(&m,
614 &ma_offs, sc->sectorsize,
616 MD_MALLOC_MOVE_WRITE);
618 bcopy(dst, (void *)sp,
621 error = s_write(sc->indir, secno, sp);
624 error = md_malloc_move(&m,
625 &ma_offs, sc->sectorsize,
627 MD_MALLOC_MOVE_WRITE);
629 bcopy(dst, (void *)osp,
639 uma_zfree(sc->uma, (void*)osp);
644 dst += sc->sectorsize;
651 mdstart_preload(struct md_s *sc, struct bio *bp)
654 switch (bp->bio_cmd) {
656 bcopy(sc->pl_ptr + bp->bio_offset, bp->bio_data,
658 cpu_flush_dcache(bp->bio_data, bp->bio_length);
661 bcopy(bp->bio_data, sc->pl_ptr + bp->bio_offset,
670 mdstart_vnode(struct md_s *sc, struct bio *bp)
681 switch (bp->bio_cmd) {
697 * If an error occurs, we set BIO_ERROR but we do not set
698 * B_INVAL because (for a write anyway), the buffer is
702 if (bp->bio_cmd == BIO_FLUSH) {
703 (void) vn_start_write(vp, &mp, V_WAIT);
704 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
705 error = VOP_FSYNC(vp, MNT_WAIT, td);
707 vn_finished_write(mp);
711 bzero(&auio, sizeof(auio));
714 * Special case for BIO_DELETE. On the surface, this is very
715 * similar to BIO_WRITE, except that we write from our own
716 * fixed-length buffer, so we have to loop. The net result is
717 * that the two cases end up having very little in common.
719 if (bp->bio_cmd == BIO_DELETE) {
720 zerosize = ZERO_REGION_SIZE -
721 (ZERO_REGION_SIZE % sc->sectorsize);
722 auio.uio_iov = &aiov;
724 auio.uio_offset = (vm_ooffset_t)bp->bio_offset;
725 auio.uio_segflg = UIO_SYSSPACE;
726 auio.uio_rw = UIO_WRITE;
728 end = bp->bio_offset + bp->bio_length;
729 (void) vn_start_write(vp, &mp, V_WAIT);
730 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
732 while (auio.uio_offset < end) {
733 aiov.iov_base = __DECONST(void *, zero_region);
734 aiov.iov_len = end - auio.uio_offset;
735 if (aiov.iov_len > zerosize)
736 aiov.iov_len = zerosize;
737 auio.uio_resid = aiov.iov_len;
738 error = VOP_WRITE(vp, &auio,
739 sc->flags & MD_ASYNC ? 0 : IO_SYNC, sc->cred);
744 vn_finished_write(mp);
745 bp->bio_resid = end - auio.uio_offset;
749 KASSERT(bp->bio_length <= MAXPHYS, ("bio_length %jd",
750 (uintmax_t)bp->bio_length));
751 if ((bp->bio_flags & BIO_UNMAPPED) == 0) {
753 aiov.iov_base = bp->bio_data;
755 pb = getpbuf(&md_vnode_pbuf_freecnt);
756 pmap_qenter((vm_offset_t)pb->b_data, bp->bio_ma, bp->bio_ma_n);
757 aiov.iov_base = (void *)((vm_offset_t)pb->b_data +
760 aiov.iov_len = bp->bio_length;
761 auio.uio_iov = &aiov;
763 auio.uio_offset = (vm_ooffset_t)bp->bio_offset;
764 auio.uio_segflg = UIO_SYSSPACE;
765 if (bp->bio_cmd == BIO_READ)
766 auio.uio_rw = UIO_READ;
767 else if (bp->bio_cmd == BIO_WRITE)
768 auio.uio_rw = UIO_WRITE;
770 panic("wrong BIO_OP in mdstart_vnode");
771 auio.uio_resid = bp->bio_length;
774 * When reading set IO_DIRECT to try to avoid double-caching
775 * the data. When writing IO_DIRECT is not optimal.
777 if (bp->bio_cmd == BIO_READ) {
778 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
779 error = VOP_READ(vp, &auio, IO_DIRECT, sc->cred);
782 (void) vn_start_write(vp, &mp, V_WAIT);
783 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
784 error = VOP_WRITE(vp, &auio, sc->flags & MD_ASYNC ? 0 : IO_SYNC,
787 vn_finished_write(mp);
789 if ((bp->bio_flags & BIO_UNMAPPED) != 0) {
790 pmap_qremove((vm_offset_t)pb->b_data, bp->bio_ma_n);
791 relpbuf(pb, &md_vnode_pbuf_freecnt);
793 bp->bio_resid = auio.uio_resid;
798 mdstart_swap(struct md_s *sc, struct bio *bp)
802 vm_pindex_t i, lastp;
803 int rv, ma_offs, offs, len, lastend;
805 switch (bp->bio_cmd) {
815 ma_offs = (bp->bio_flags & BIO_UNMAPPED) == 0 ? 0 : bp->bio_ma_offset;
818 * offs is the offset at which to start operating on the
819 * next (ie, first) page. lastp is the last page on
820 * which we're going to operate. lastend is the ending
821 * position within that last page (ie, PAGE_SIZE if
822 * we're operating on complete aligned pages).
824 offs = bp->bio_offset % PAGE_SIZE;
825 lastp = (bp->bio_offset + bp->bio_length - 1) / PAGE_SIZE;
826 lastend = (bp->bio_offset + bp->bio_length - 1) % PAGE_SIZE + 1;
829 VM_OBJECT_WLOCK(sc->object);
830 vm_object_pip_add(sc->object, 1);
831 for (i = bp->bio_offset / PAGE_SIZE; i <= lastp; i++) {
832 len = ((i == lastp) ? lastend : PAGE_SIZE) - offs;
833 m = vm_page_grab(sc->object, i, VM_ALLOC_SYSTEM);
834 if (bp->bio_cmd == BIO_READ) {
835 if (m->valid == VM_PAGE_BITS_ALL)
838 rv = vm_pager_get_pages(sc->object, &m, 1, 0);
839 if (rv == VM_PAGER_ERROR) {
842 } else if (rv == VM_PAGER_FAIL) {
844 * Pager does not have the page. Zero
845 * the allocated page, and mark it as
846 * valid. Do not set dirty, the page
847 * can be recreated if thrown out.
850 m->valid = VM_PAGE_BITS_ALL;
852 if ((bp->bio_flags & BIO_UNMAPPED) != 0) {
853 pmap_copy_pages(&m, offs, bp->bio_ma,
856 physcopyout(VM_PAGE_TO_PHYS(m) + offs, p, len);
857 cpu_flush_dcache(p, len);
859 } else if (bp->bio_cmd == BIO_WRITE) {
860 if (len != PAGE_SIZE && m->valid != VM_PAGE_BITS_ALL)
861 rv = vm_pager_get_pages(sc->object, &m, 1, 0);
864 if (rv == VM_PAGER_ERROR) {
868 if ((bp->bio_flags & BIO_UNMAPPED) != 0) {
869 pmap_copy_pages(bp->bio_ma, ma_offs, &m,
872 physcopyin(p, VM_PAGE_TO_PHYS(m) + offs, len);
874 m->valid = VM_PAGE_BITS_ALL;
875 } else if (bp->bio_cmd == BIO_DELETE) {
876 if (len != PAGE_SIZE && m->valid != VM_PAGE_BITS_ALL)
877 rv = vm_pager_get_pages(sc->object, &m, 1, 0);
880 if (rv == VM_PAGER_ERROR) {
884 if (len != PAGE_SIZE) {
885 pmap_zero_page_area(m, offs, len);
886 vm_page_clear_dirty(m, offs, len);
887 m->valid = VM_PAGE_BITS_ALL;
889 vm_pager_page_unswapped(m);
893 if (bp->bio_cmd == BIO_DELETE && len == PAGE_SIZE)
898 if (bp->bio_cmd == BIO_WRITE) {
900 vm_pager_page_unswapped(m);
903 /* Actions on further pages start at offset 0 */
904 p += PAGE_SIZE - offs;
908 vm_object_pip_subtract(sc->object, 1);
909 VM_OBJECT_WUNLOCK(sc->object);
910 return (rv != VM_PAGER_ERROR ? 0 : ENOSPC);
914 md_kthread(void *arg)
921 thread_lock(curthread);
922 sched_prio(curthread, PRIBIO);
923 thread_unlock(curthread);
924 if (sc->type == MD_VNODE)
925 curthread->td_pflags |= TDP_NORUNNINGBUF;
928 mtx_lock(&sc->queue_mtx);
929 if (sc->flags & MD_SHUTDOWN) {
930 sc->flags |= MD_EXITING;
931 mtx_unlock(&sc->queue_mtx);
934 bp = bioq_takefirst(&sc->bio_queue);
936 msleep(sc, &sc->queue_mtx, PRIBIO | PDROP, "mdwait", 0);
939 mtx_unlock(&sc->queue_mtx);
940 if (bp->bio_cmd == BIO_GETATTR) {
941 if ((sc->fwsectors && sc->fwheads &&
942 (g_handleattr_int(bp, "GEOM::fwsectors",
944 g_handleattr_int(bp, "GEOM::fwheads",
946 g_handleattr_int(bp, "GEOM::candelete", 1))
951 error = sc->start(sc, bp);
955 bp->bio_completed = bp->bio_length;
956 if ((bp->bio_cmd == BIO_READ) || (bp->bio_cmd == BIO_WRITE))
957 devstat_end_transaction_bio(sc->devstat, bp);
958 g_io_deliver(bp, error);
968 LIST_FOREACH(sc, &md_softc_list, list) {
969 if (sc->unit == unit)
976 mdnew(int unit, int *errp, enum md_types type)
983 unit = alloc_unr(md_uh);
985 unit = alloc_unr_specific(md_uh, unit);
992 sc = (struct md_s *)malloc(sizeof *sc, M_MD, M_WAITOK | M_ZERO);
994 bioq_init(&sc->bio_queue);
995 mtx_init(&sc->queue_mtx, "md bio queue", NULL, MTX_DEF);
996 mtx_init(&sc->stat_mtx, "md stat", NULL, MTX_DEF);
998 sprintf(sc->name, "md%d", unit);
999 LIST_INSERT_HEAD(&md_softc_list, sc, list);
1000 error = kproc_create(md_kthread, sc, &sc->procp, 0, 0,"%s", sc->name);
1003 LIST_REMOVE(sc, list);
1004 mtx_destroy(&sc->stat_mtx);
1005 mtx_destroy(&sc->queue_mtx);
1006 free_unr(md_uh, sc->unit);
1013 mdinit(struct md_s *sc)
1016 struct g_provider *pp;
1019 gp = g_new_geomf(&g_md_class, "md%d", sc->unit);
1021 pp = g_new_providerf(gp, "md%d", sc->unit);
1022 pp->flags |= G_PF_DIRECT_SEND | G_PF_DIRECT_RECEIVE;
1023 pp->mediasize = sc->mediasize;
1024 pp->sectorsize = sc->sectorsize;
1029 pp->flags |= G_PF_ACCEPT_UNMAPPED;
1036 g_error_provider(pp, 0);
1037 g_topology_unlock();
1038 sc->devstat = devstat_new_entry("md", sc->unit, sc->sectorsize,
1039 DEVSTAT_ALL_SUPPORTED, DEVSTAT_TYPE_DIRECT, DEVSTAT_PRIORITY_MAX);
1043 mdcreate_malloc(struct md_s *sc, struct md_ioctl *mdio)
1050 if (mdio->md_options & ~(MD_AUTOUNIT | MD_COMPRESS | MD_RESERVE))
1052 if (mdio->md_sectorsize != 0 && !powerof2(mdio->md_sectorsize))
1054 /* Compression doesn't make sense if we have reserved space */
1055 if (mdio->md_options & MD_RESERVE)
1056 mdio->md_options &= ~MD_COMPRESS;
1057 if (mdio->md_fwsectors != 0)
1058 sc->fwsectors = mdio->md_fwsectors;
1059 if (mdio->md_fwheads != 0)
1060 sc->fwheads = mdio->md_fwheads;
1061 sc->flags = mdio->md_options & (MD_COMPRESS | MD_FORCE);
1062 sc->indir = dimension(sc->mediasize / sc->sectorsize);
1063 sc->uma = uma_zcreate(sc->name, sc->sectorsize, NULL, NULL, NULL, NULL,
1065 if (mdio->md_options & MD_RESERVE) {
1068 nsectors = sc->mediasize / sc->sectorsize;
1069 for (u = 0; u < nsectors; u++) {
1070 sp = (uintptr_t)uma_zalloc(sc->uma, (md_malloc_wait ?
1071 M_WAITOK : M_NOWAIT) | M_ZERO);
1073 error = s_write(sc->indir, u, sp);
1085 mdsetcred(struct md_s *sc, struct ucred *cred)
1091 * Set credits in our softc
1096 sc->cred = crhold(cred);
1099 * Horrible kludge to establish credentials for NFS XXX.
1106 tmpbuf = malloc(sc->sectorsize, M_TEMP, M_WAITOK);
1107 bzero(&auio, sizeof(auio));
1109 aiov.iov_base = tmpbuf;
1110 aiov.iov_len = sc->sectorsize;
1111 auio.uio_iov = &aiov;
1112 auio.uio_iovcnt = 1;
1113 auio.uio_offset = 0;
1114 auio.uio_rw = UIO_READ;
1115 auio.uio_segflg = UIO_SYSSPACE;
1116 auio.uio_resid = aiov.iov_len;
1117 vn_lock(sc->vnode, LK_EXCLUSIVE | LK_RETRY);
1118 error = VOP_READ(sc->vnode, &auio, 0, sc->cred);
1119 VOP_UNLOCK(sc->vnode, 0);
1120 free(tmpbuf, M_TEMP);
1126 mdcreate_vnode(struct md_s *sc, struct md_ioctl *mdio, struct thread *td)
1129 struct nameidata nd;
1134 * Kernel-originated requests must have the filename appended
1135 * to the mdio structure to protect against malicious software.
1137 fname = mdio->md_file;
1138 if ((void *)fname != (void *)(mdio + 1)) {
1139 error = copyinstr(fname, sc->file, sizeof(sc->file), NULL);
1143 strlcpy(sc->file, fname, sizeof(sc->file));
1146 * If the user specified that this is a read only device, don't
1147 * set the FWRITE mask before trying to open the backing store.
1149 flags = FREAD | ((mdio->md_options & MD_READONLY) ? 0 : FWRITE);
1150 NDINIT(&nd, LOOKUP, FOLLOW, UIO_SYSSPACE, sc->file, td);
1151 error = vn_open(&nd, &flags, 0, NULL);
1154 NDFREE(&nd, NDF_ONLY_PNBUF);
1155 if (nd.ni_vp->v_type != VREG) {
1159 error = VOP_GETATTR(nd.ni_vp, &vattr, td->td_ucred);
1162 if (VOP_ISLOCKED(nd.ni_vp) != LK_EXCLUSIVE) {
1163 vn_lock(nd.ni_vp, LK_UPGRADE | LK_RETRY);
1164 if (nd.ni_vp->v_iflag & VI_DOOMED) {
1165 /* Forced unmount. */
1170 nd.ni_vp->v_vflag |= VV_MD;
1171 VOP_UNLOCK(nd.ni_vp, 0);
1173 if (mdio->md_fwsectors != 0)
1174 sc->fwsectors = mdio->md_fwsectors;
1175 if (mdio->md_fwheads != 0)
1176 sc->fwheads = mdio->md_fwheads;
1177 sc->flags = mdio->md_options & (MD_FORCE | MD_ASYNC);
1178 if (!(flags & FWRITE))
1179 sc->flags |= MD_READONLY;
1180 sc->vnode = nd.ni_vp;
1182 error = mdsetcred(sc, td->td_ucred);
1185 vn_lock(nd.ni_vp, LK_EXCLUSIVE | LK_RETRY);
1186 nd.ni_vp->v_vflag &= ~VV_MD;
1191 VOP_UNLOCK(nd.ni_vp, 0);
1192 (void)vn_close(nd.ni_vp, flags, td->td_ucred, td);
1197 mddestroy(struct md_s *sc, struct thread *td)
1201 sc->gp->softc = NULL;
1203 g_wither_geom(sc->gp, ENXIO);
1204 g_topology_unlock();
1209 devstat_remove_entry(sc->devstat);
1212 mtx_lock(&sc->queue_mtx);
1213 sc->flags |= MD_SHUTDOWN;
1215 while (!(sc->flags & MD_EXITING))
1216 msleep(sc->procp, &sc->queue_mtx, PRIBIO, "mddestroy", hz / 10);
1217 mtx_unlock(&sc->queue_mtx);
1218 mtx_destroy(&sc->stat_mtx);
1219 mtx_destroy(&sc->queue_mtx);
1220 if (sc->vnode != NULL) {
1221 vn_lock(sc->vnode, LK_EXCLUSIVE | LK_RETRY);
1222 sc->vnode->v_vflag &= ~VV_MD;
1223 VOP_UNLOCK(sc->vnode, 0);
1224 (void)vn_close(sc->vnode, sc->flags & MD_READONLY ?
1225 FREAD : (FREAD|FWRITE), sc->cred, td);
1227 if (sc->cred != NULL)
1229 if (sc->object != NULL)
1230 vm_object_deallocate(sc->object);
1232 destroy_indir(sc, sc->indir);
1234 uma_zdestroy(sc->uma);
1236 LIST_REMOVE(sc, list);
1237 free_unr(md_uh, sc->unit);
1243 mdresize(struct md_s *sc, struct md_ioctl *mdio)
1246 vm_pindex_t oldpages, newpages;
1252 if (mdio->md_mediasize <= 0 ||
1253 (mdio->md_mediasize % PAGE_SIZE) != 0)
1255 oldpages = OFF_TO_IDX(round_page(sc->mediasize));
1256 newpages = OFF_TO_IDX(round_page(mdio->md_mediasize));
1257 if (newpages < oldpages) {
1258 VM_OBJECT_WLOCK(sc->object);
1259 vm_object_page_remove(sc->object, newpages, 0, 0);
1260 swap_pager_freespace(sc->object, newpages,
1261 oldpages - newpages);
1262 swap_release_by_cred(IDX_TO_OFF(oldpages -
1263 newpages), sc->cred);
1264 sc->object->charge = IDX_TO_OFF(newpages);
1265 sc->object->size = newpages;
1266 VM_OBJECT_WUNLOCK(sc->object);
1267 } else if (newpages > oldpages) {
1268 res = swap_reserve_by_cred(IDX_TO_OFF(newpages -
1269 oldpages), sc->cred);
1272 if ((mdio->md_options & MD_RESERVE) ||
1273 (sc->flags & MD_RESERVE)) {
1274 error = swap_pager_reserve(sc->object,
1275 oldpages, newpages - oldpages);
1277 swap_release_by_cred(
1278 IDX_TO_OFF(newpages - oldpages),
1283 VM_OBJECT_WLOCK(sc->object);
1284 sc->object->charge = IDX_TO_OFF(newpages);
1285 sc->object->size = newpages;
1286 VM_OBJECT_WUNLOCK(sc->object);
1290 return (EOPNOTSUPP);
1293 sc->mediasize = mdio->md_mediasize;
1295 g_resize_provider(sc->pp, sc->mediasize);
1296 g_topology_unlock();
1301 mdcreate_swap(struct md_s *sc, struct md_ioctl *mdio, struct thread *td)
1307 * Range check. Disallow negative sizes or any size less then the
1308 * size of a page. Then round to a page.
1310 if (sc->mediasize <= 0 || (sc->mediasize % PAGE_SIZE) != 0)
1314 * Allocate an OBJT_SWAP object.
1316 * Note the truncation.
1319 npage = mdio->md_mediasize / PAGE_SIZE;
1320 if (mdio->md_fwsectors != 0)
1321 sc->fwsectors = mdio->md_fwsectors;
1322 if (mdio->md_fwheads != 0)
1323 sc->fwheads = mdio->md_fwheads;
1324 sc->object = vm_pager_allocate(OBJT_SWAP, NULL, PAGE_SIZE * npage,
1325 VM_PROT_DEFAULT, 0, td->td_ucred);
1326 if (sc->object == NULL)
1328 sc->flags = mdio->md_options & (MD_FORCE | MD_RESERVE);
1329 if (mdio->md_options & MD_RESERVE) {
1330 if (swap_pager_reserve(sc->object, 0, npage) < 0) {
1335 error = mdsetcred(sc, td->td_ucred);
1338 vm_object_deallocate(sc->object);
1346 xmdctlioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td)
1348 struct md_ioctl *mdio;
1354 printf("mdctlioctl(%s %lx %p %x %p)\n",
1355 devtoname(dev), cmd, addr, flags, td);
1357 mdio = (struct md_ioctl *)addr;
1358 if (mdio->md_version != MDIOVERSION)
1362 * We assert the version number in the individual ioctl
1363 * handlers instead of out here because (a) it is possible we
1364 * may add another ioctl in the future which doesn't read an
1365 * mdio, and (b) the correct return value for an unknown ioctl
1366 * is ENOIOCTL, not EINVAL.
1371 switch (mdio->md_type) {
1380 if (mdio->md_sectorsize == 0)
1381 sectsize = DEV_BSIZE;
1383 sectsize = mdio->md_sectorsize;
1384 if (sectsize > MAXPHYS || mdio->md_mediasize < sectsize)
1386 if (mdio->md_options & MD_AUTOUNIT)
1387 sc = mdnew(-1, &error, mdio->md_type);
1389 if (mdio->md_unit > INT_MAX)
1391 sc = mdnew(mdio->md_unit, &error, mdio->md_type);
1395 if (mdio->md_options & MD_AUTOUNIT)
1396 mdio->md_unit = sc->unit;
1397 sc->mediasize = mdio->md_mediasize;
1398 sc->sectorsize = sectsize;
1402 sc->start = mdstart_malloc;
1403 error = mdcreate_malloc(sc, mdio);
1407 * We disallow attaching preloaded memory disks via
1408 * ioctl. Preloaded memory disks are automatically
1409 * attached in g_md_init().
1414 sc->start = mdstart_vnode;
1415 error = mdcreate_vnode(sc, mdio, td);
1418 sc->start = mdstart_swap;
1419 error = mdcreate_swap(sc, mdio, td);
1427 /* Prune off any residual fractional sector */
1428 i = sc->mediasize % sc->sectorsize;
1434 if (mdio->md_mediasize != 0 ||
1435 (mdio->md_options & ~MD_FORCE) != 0)
1438 sc = mdfind(mdio->md_unit);
1441 if (sc->opencount != 0 && !(sc->flags & MD_FORCE) &&
1442 !(mdio->md_options & MD_FORCE))
1444 return (mddestroy(sc, td));
1446 if ((mdio->md_options & ~(MD_FORCE | MD_RESERVE)) != 0)
1449 sc = mdfind(mdio->md_unit);
1452 if (mdio->md_mediasize < sc->sectorsize)
1454 if (mdio->md_mediasize < sc->mediasize &&
1455 !(sc->flags & MD_FORCE) &&
1456 !(mdio->md_options & MD_FORCE))
1458 return (mdresize(sc, mdio));
1460 sc = mdfind(mdio->md_unit);
1463 mdio->md_type = sc->type;
1464 mdio->md_options = sc->flags;
1465 mdio->md_mediasize = sc->mediasize;
1466 mdio->md_sectorsize = sc->sectorsize;
1467 if (sc->type == MD_VNODE)
1468 error = copyout(sc->file, mdio->md_file,
1469 strlen(sc->file) + 1);
1473 LIST_FOREACH(sc, &md_softc_list, list) {
1474 if (i == MDNPAD - 1)
1475 mdio->md_pad[i] = -1;
1477 mdio->md_pad[i++] = sc->unit;
1479 mdio->md_pad[0] = i - 1;
1487 mdctlioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td)
1492 error = xmdctlioctl(dev, cmd, addr, flags, td);
1498 md_preloaded(u_char *image, size_t length, const char *name)
1503 sc = mdnew(-1, &error, MD_PRELOAD);
1506 sc->mediasize = length;
1507 sc->sectorsize = DEV_BSIZE;
1509 sc->pl_len = length;
1510 sc->start = mdstart_preload;
1513 rootdevnames[0] = MD_ROOT_FSTYPE ":/dev/md0";
1517 printf("%s%d: Preloaded image <%s> %zd bytes at %p\n",
1518 MD_NAME, sc->unit, name, length, image);
1523 g_md_init(struct g_class *mp __unused)
1526 u_char *ptr, *name, *type;
1530 /* figure out log2(NINDIR) */
1531 for (i = NINDIR, nshift = -1; i; nshift++)
1535 sx_init(&md_sx, "MD config lock");
1536 g_topology_unlock();
1537 md_uh = new_unrhdr(0, INT_MAX, NULL);
1540 md_preloaded(mfs_root.start, sizeof(mfs_root.start), NULL);
1543 /* XXX: are preload_* static or do they need Giant ? */
1544 while ((mod = preload_search_next_name(mod)) != NULL) {
1545 name = (char *)preload_search_info(mod, MODINFO_NAME);
1548 type = (char *)preload_search_info(mod, MODINFO_TYPE);
1551 if (strcmp(type, "md_image") && strcmp(type, "mfs_root"))
1553 ptr = preload_fetch_addr(mod);
1554 len = preload_fetch_size(mod);
1555 if (ptr != NULL && len != 0) {
1557 md_preloaded(ptr, len, name);
1561 md_vnode_pbuf_freecnt = nswbuf / 10;
1562 status_dev = make_dev(&mdctl_cdevsw, INT_MAX, UID_ROOT, GID_WHEEL,
1568 g_md_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp,
1569 struct g_consumer *cp __unused, struct g_provider *pp)
1597 if (indent == NULL) {
1598 sbuf_printf(sb, " u %d", mp->unit);
1599 sbuf_printf(sb, " s %ju", (uintmax_t) mp->sectorsize);
1600 sbuf_printf(sb, " f %ju", (uintmax_t) mp->fwheads);
1601 sbuf_printf(sb, " fs %ju", (uintmax_t) mp->fwsectors);
1602 sbuf_printf(sb, " l %ju", (uintmax_t) mp->mediasize);
1603 sbuf_printf(sb, " t %s", type);
1604 if (mp->type == MD_VNODE && mp->vnode != NULL)
1605 sbuf_printf(sb, " file %s", mp->file);
1607 sbuf_printf(sb, "%s<unit>%d</unit>\n", indent,
1609 sbuf_printf(sb, "%s<sectorsize>%ju</sectorsize>\n",
1610 indent, (uintmax_t) mp->sectorsize);
1611 sbuf_printf(sb, "%s<fwheads>%ju</fwheads>\n",
1612 indent, (uintmax_t) mp->fwheads);
1613 sbuf_printf(sb, "%s<fwsectors>%ju</fwsectors>\n",
1614 indent, (uintmax_t) mp->fwsectors);
1615 sbuf_printf(sb, "%s<length>%ju</length>\n",
1616 indent, (uintmax_t) mp->mediasize);
1617 sbuf_printf(sb, "%s<compression>%s</compression>\n", indent,
1618 (mp->flags & MD_COMPRESS) == 0 ? "off": "on");
1619 sbuf_printf(sb, "%s<access>%s</access>\n", indent,
1620 (mp->flags & MD_READONLY) == 0 ? "read-write":
1622 sbuf_printf(sb, "%s<type>%s</type>\n", indent,
1624 if (mp->type == MD_VNODE && mp->vnode != NULL)
1625 sbuf_printf(sb, "%s<file>%s</file>\n",
1632 g_md_fini(struct g_class *mp __unused)
1636 if (status_dev != NULL)
1637 destroy_dev(status_dev);
1638 delete_unrhdr(md_uh);