2 * ----------------------------------------------------------------------------
3 * "THE BEER-WARE LICENSE" (Revision 42):
4 * <phk@FreeBSD.ORG> wrote this file. As long as you retain this notice you
5 * can do whatever you want with this stuff. If we meet some day, and you think
6 * this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp
7 * ----------------------------------------------------------------------------
14 * The following functions are based in the vn(4) driver: mdstart_swap(),
15 * mdstart_vnode(), mdcreate_swap(), mdcreate_vnode() and mddestroy(),
16 * and as such under the following copyright:
18 * Copyright (c) 1988 University of Utah.
19 * Copyright (c) 1990, 1993
20 * The Regents of the University of California. All rights reserved.
21 * Copyright (c) 2013 The FreeBSD Foundation
22 * All rights reserved.
24 * This code is derived from software contributed to Berkeley by
25 * the Systems Programming Group of the University of Utah Computer
28 * Portions of this software were developed by Konstantin Belousov
29 * under sponsorship from the FreeBSD Foundation.
31 * Redistribution and use in source and binary forms, with or without
32 * modification, are permitted provided that the following conditions
34 * 1. Redistributions of source code must retain the above copyright
35 * notice, this list of conditions and the following disclaimer.
36 * 2. Redistributions in binary form must reproduce the above copyright
37 * notice, this list of conditions and the following disclaimer in the
38 * documentation and/or other materials provided with the distribution.
39 * 3. Neither the name of the University nor the names of its contributors
40 * may be used to endorse or promote products derived from this software
41 * without specific prior written permission.
43 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
44 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
45 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
46 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
47 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
48 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
49 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
50 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
51 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
52 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
55 * from: Utah Hdr: vn.c 1.13 94/04/02
57 * from: @(#)vn.c 8.6 (Berkeley) 4/1/94
58 * From: src/sys/dev/vn/vn.c,v 1.122 2000/12/16 16:06:03
61 #include "opt_rootdevname.h"
65 #include <sys/param.h>
66 #include <sys/systm.h>
70 #include <sys/devicestat.h>
71 #include <sys/fcntl.h>
72 #include <sys/kernel.h>
73 #include <sys/kthread.h>
74 #include <sys/limits.h>
75 #include <sys/linker.h>
77 #include <sys/malloc.h>
78 #include <sys/mdioctl.h>
79 #include <sys/mount.h>
80 #include <sys/mutex.h>
82 #include <sys/namei.h>
84 #include <sys/queue.h>
85 #include <sys/rwlock.h>
87 #include <sys/sched.h>
88 #include <sys/sf_buf.h>
89 #include <sys/sysctl.h>
90 #include <sys/vnode.h>
93 #include <geom/geom.h>
94 #include <geom/geom_int.h>
97 #include <vm/vm_param.h>
98 #include <vm/vm_object.h>
99 #include <vm/vm_page.h>
100 #include <vm/vm_pager.h>
101 #include <vm/swap_pager.h>
104 #include <machine/bus.h>
108 #define MD_SHUTDOWN 0x10000 /* Tell worker thread to terminate. */
109 #define MD_EXITING 0x20000 /* Worker thread is exiting. */
112 #define MD_NSECT (10000 * 2)
115 static MALLOC_DEFINE(M_MD, "md_disk", "Memory Disk");
116 static MALLOC_DEFINE(M_MDSECT, "md_sectors", "Memory Disk Sectors");
119 SYSCTL_INT(_debug, OID_AUTO, mddebug, CTLFLAG_RW, &md_debug, 0,
120 "Enable md(4) debug messages");
121 static int md_malloc_wait;
122 SYSCTL_INT(_vm, OID_AUTO, md_malloc_wait, CTLFLAG_RW, &md_malloc_wait, 0,
123 "Allow malloc to wait for memory allocations");
125 #if defined(MD_ROOT) && !defined(MD_ROOT_FSTYPE)
126 #define MD_ROOT_FSTYPE "ufs"
131 * Preloaded image gets put here.
133 #if defined(MD_ROOT_SIZE)
135 * We put the mfs_root symbol into the oldmfs section of the kernel object file.
136 * Applications that patch the object with the image can determine
137 * the size looking at the oldmfs section size within the kernel.
139 u_char mfs_root[MD_ROOT_SIZE*1024] __attribute__ ((section ("oldmfs")));
140 const int mfs_root_size = sizeof(mfs_root);
142 extern volatile u_char __weak_symbol mfs_root;
143 extern volatile u_char __weak_symbol mfs_root_end;
145 __GLOBL(mfs_root_end);
146 #define mfs_root_size ((uintptr_t)(&mfs_root_end - &mfs_root))
150 static g_init_t g_md_init;
151 static g_fini_t g_md_fini;
152 static g_start_t g_md_start;
153 static g_access_t g_md_access;
154 static void g_md_dumpconf(struct sbuf *sb, const char *indent,
155 struct g_geom *gp, struct g_consumer *cp __unused, struct g_provider *pp);
157 static struct cdev *status_dev = NULL;
158 static struct sx md_sx;
159 static struct unrhdr *md_uh;
161 static d_ioctl_t mdctlioctl;
163 static struct cdevsw mdctl_cdevsw = {
164 .d_version = D_VERSION,
165 .d_ioctl = mdctlioctl,
169 struct g_class g_md_class = {
171 .version = G_VERSION,
175 .access = g_md_access,
176 .dumpconf = g_md_dumpconf,
179 DECLARE_GEOM_CLASS(g_md_class, g_md);
182 static LIST_HEAD(, md_s) md_softc_list = LIST_HEAD_INITIALIZER(md_softc_list);
184 #define NINDIR (PAGE_SIZE / sizeof(uintptr_t))
185 #define NMASK (NINDIR-1)
188 static int md_vnode_pbuf_freecnt;
199 LIST_ENTRY(md_s) list;
200 struct bio_queue_head bio_queue;
201 struct mtx queue_mtx;
214 struct g_provider *pp;
215 int (*start)(struct md_s *sc, struct bio *bp);
216 struct devstat *devstat;
218 /* MD_MALLOC related fields */
222 /* MD_PRELOAD related fields */
226 /* MD_VNODE related fields */
231 /* MD_SWAP related fields */
235 static struct indir *
236 new_indir(u_int shift)
240 ip = malloc(sizeof *ip, M_MD, (md_malloc_wait ? M_WAITOK : M_NOWAIT)
244 ip->array = malloc(sizeof(uintptr_t) * NINDIR,
245 M_MDSECT, (md_malloc_wait ? M_WAITOK : M_NOWAIT) | M_ZERO);
246 if (ip->array == NULL) {
256 del_indir(struct indir *ip)
259 free(ip->array, M_MDSECT);
264 destroy_indir(struct md_s *sc, struct indir *ip)
268 for (i = 0; i < NINDIR; i++) {
272 destroy_indir(sc, (struct indir*)(ip->array[i]));
273 else if (ip->array[i] > 255)
274 uma_zfree(sc->uma, (void *)(ip->array[i]));
280 * This function does the math and allocates the top level "indir" structure
281 * for a device of "size" sectors.
284 static struct indir *
285 dimension(off_t size)
293 while (rcnt > NINDIR) {
299 * XXX: the top layer is probably not fully populated, so we allocate
300 * too much space for ip->array in here.
302 ip = malloc(sizeof *ip, M_MD, M_WAITOK | M_ZERO);
303 ip->array = malloc(sizeof(uintptr_t) * NINDIR,
304 M_MDSECT, M_WAITOK | M_ZERO);
306 ip->shift = layer * nshift;
311 * Read a given sector
315 s_read(struct indir *ip, off_t offset)
322 printf("s_read(%jd)\n", (intmax_t)offset);
324 for (cip = ip; cip != NULL;) {
326 idx = (offset >> cip->shift) & NMASK;
327 up = cip->array[idx];
328 cip = (struct indir *)up;
331 idx = offset & NMASK;
332 return (cip->array[idx]);
338 * Write a given sector, prune the tree if the value is 0
342 s_write(struct indir *ip, off_t offset, uintptr_t ptr)
344 struct indir *cip, *lip[10];
349 printf("s_write(%jd, %p)\n", (intmax_t)offset, (void *)ptr);
356 idx = (offset >> cip->shift) & NMASK;
357 up = cip->array[idx];
359 cip = (struct indir *)up;
362 /* Allocate branch */
364 (uintptr_t)new_indir(cip->shift - nshift);
365 if (cip->array[idx] == 0)
368 up = cip->array[idx];
369 cip = (struct indir *)up;
373 idx = offset & NMASK;
374 up = cip->array[idx];
377 cip->array[idx] = ptr;
382 if (cip->used != 0 || li == 1)
385 while (cip->used == 0 && cip != ip) {
387 idx = (offset >> lip[li]->shift) & NMASK;
388 up = lip[li]->array[idx];
389 KASSERT(up == (uintptr_t)cip, ("md screwed up"));
391 lip[li]->array[idx] = 0;
400 g_md_access(struct g_provider *pp, int r, int w, int e)
404 sc = pp->geom->softc;
406 if (r <= 0 && w <= 0 && e <= 0)
413 if ((sc->flags & MD_READONLY) != 0 && w > 0)
415 if ((pp->acr + pp->acw + pp->ace) == 0 && (r + w + e) > 0) {
417 } else if ((pp->acr + pp->acw + pp->ace) > 0 && (r + w + e) == 0) {
424 g_md_start(struct bio *bp)
428 sc = bp->bio_to->geom->softc;
429 if ((bp->bio_cmd == BIO_READ) || (bp->bio_cmd == BIO_WRITE)) {
430 mtx_lock(&sc->stat_mtx);
431 devstat_start_transaction_bio(sc->devstat, bp);
432 mtx_unlock(&sc->stat_mtx);
434 mtx_lock(&sc->queue_mtx);
435 bioq_disksort(&sc->bio_queue, bp);
436 mtx_unlock(&sc->queue_mtx);
440 #define MD_MALLOC_MOVE_ZERO 1
441 #define MD_MALLOC_MOVE_FILL 2
442 #define MD_MALLOC_MOVE_READ 3
443 #define MD_MALLOC_MOVE_WRITE 4
444 #define MD_MALLOC_MOVE_CMP 5
447 md_malloc_move_ma(vm_page_t **mp, int *ma_offs, unsigned sectorsize,
448 void *ptr, u_char fill, int op)
455 int error, i, ma_offs1, sz, first_read;
460 /* if (op == MD_MALLOC_MOVE_CMP) { gcc */
468 for (n = sectorsize; n != 0; n -= sz) {
469 sz = imin(PAGE_SIZE - *ma_offs, n);
474 sf = sf_buf_alloc(m, SFB_CPUPRIVATE |
475 (md_malloc_wait ? 0 : SFB_NOWAIT));
481 p = (char *)sf_buf_kva(sf) + *ma_offs;
483 case MD_MALLOC_MOVE_ZERO:
486 case MD_MALLOC_MOVE_FILL:
489 case MD_MALLOC_MOVE_READ:
491 cpu_flush_dcache(p, sz);
493 case MD_MALLOC_MOVE_WRITE:
496 case MD_MALLOC_MOVE_CMP:
497 for (i = 0; i < sz; i++, p++) {
502 } else if (*p != first) {
509 KASSERT(0, ("md_malloc_move_ma unknown op %d\n", op));
515 *ma_offs %= PAGE_SIZE;
518 ptr = (char *)ptr + sz;
524 if (op == MD_MALLOC_MOVE_CMP && error != 0) {
532 md_malloc_move_vlist(bus_dma_segment_t **pvlist, int *pma_offs,
533 unsigned len, void *ptr, u_char fill, int op)
535 bus_dma_segment_t *vlist;
536 uint8_t *p, *end, first;
538 int ma_offs, seg_len;
544 for (; len != 0; len -= seg_len) {
545 seg_len = imin(vlist->ds_len - ma_offs, len);
546 p = (uint8_t *)(uintptr_t)vlist->ds_addr + ma_offs;
548 case MD_MALLOC_MOVE_ZERO:
551 case MD_MALLOC_MOVE_FILL:
552 memset(p, fill, seg_len);
554 case MD_MALLOC_MOVE_READ:
555 bcopy(ptr, p, seg_len);
556 cpu_flush_dcache(p, seg_len);
558 case MD_MALLOC_MOVE_WRITE:
559 bcopy(p, ptr, seg_len);
561 case MD_MALLOC_MOVE_CMP:
564 /* Confirm all following bytes match the first */
571 KASSERT(0, ("md_malloc_move_vlist unknown op %d\n", op));
576 if (ma_offs == vlist->ds_len) {
580 ptr = (uint8_t *)ptr + seg_len;
589 mdstart_malloc(struct md_s *sc, struct bio *bp)
593 bus_dma_segment_t *vlist;
594 int i, error, error1, ma_offs, notmapped;
595 off_t secno, nsec, uc;
598 switch (bp->bio_cmd) {
607 notmapped = (bp->bio_flags & BIO_UNMAPPED) != 0;
608 vlist = (bp->bio_flags & BIO_VLIST) != 0 ?
609 (bus_dma_segment_t *)bp->bio_data : NULL;
612 ma_offs = bp->bio_ma_offset;
614 KASSERT(vlist == NULL, ("vlists cannot be unmapped"));
615 } else if (vlist != NULL) {
616 ma_offs = bp->bio_ma_offset;
622 nsec = bp->bio_length / sc->sectorsize;
623 secno = bp->bio_offset / sc->sectorsize;
626 osp = s_read(sc->indir, secno);
627 if (bp->bio_cmd == BIO_DELETE) {
629 error = s_write(sc->indir, secno, 0);
630 } else if (bp->bio_cmd == BIO_READ) {
633 error = md_malloc_move_ma(&m, &ma_offs,
634 sc->sectorsize, NULL, 0,
635 MD_MALLOC_MOVE_ZERO);
636 } else if (vlist != NULL) {
637 error = md_malloc_move_vlist(&vlist,
638 &ma_offs, sc->sectorsize, NULL, 0,
639 MD_MALLOC_MOVE_ZERO);
641 bzero(dst, sc->sectorsize);
642 } else if (osp <= 255) {
644 error = md_malloc_move_ma(&m, &ma_offs,
645 sc->sectorsize, NULL, osp,
646 MD_MALLOC_MOVE_FILL);
647 } else if (vlist != NULL) {
648 error = md_malloc_move_vlist(&vlist,
649 &ma_offs, sc->sectorsize, NULL, osp,
650 MD_MALLOC_MOVE_FILL);
652 memset(dst, osp, sc->sectorsize);
655 error = md_malloc_move_ma(&m, &ma_offs,
656 sc->sectorsize, (void *)osp, 0,
657 MD_MALLOC_MOVE_READ);
658 } else if (vlist != NULL) {
659 error = md_malloc_move_vlist(&vlist,
660 &ma_offs, sc->sectorsize,
662 MD_MALLOC_MOVE_READ);
664 bcopy((void *)osp, dst, sc->sectorsize);
665 cpu_flush_dcache(dst, sc->sectorsize);
669 } else if (bp->bio_cmd == BIO_WRITE) {
670 if (sc->flags & MD_COMPRESS) {
672 error1 = md_malloc_move_ma(&m, &ma_offs,
673 sc->sectorsize, &uc, 0,
675 i = error1 == 0 ? sc->sectorsize : 0;
676 } else if (vlist != NULL) {
677 error1 = md_malloc_move_vlist(&vlist,
678 &ma_offs, sc->sectorsize, &uc, 0,
680 i = error1 == 0 ? sc->sectorsize : 0;
683 for (i = 1; i < sc->sectorsize; i++) {
692 if (i == sc->sectorsize) {
694 error = s_write(sc->indir, secno, uc);
697 sp = (uintptr_t)uma_zalloc(sc->uma,
698 md_malloc_wait ? M_WAITOK :
705 error = md_malloc_move_ma(&m,
706 &ma_offs, sc->sectorsize,
708 MD_MALLOC_MOVE_WRITE);
709 } else if (vlist != NULL) {
710 error = md_malloc_move_vlist(
712 sc->sectorsize, (void *)sp,
713 0, MD_MALLOC_MOVE_WRITE);
715 bcopy(dst, (void *)sp,
718 error = s_write(sc->indir, secno, sp);
721 error = md_malloc_move_ma(&m,
722 &ma_offs, sc->sectorsize,
724 MD_MALLOC_MOVE_WRITE);
725 } else if (vlist != NULL) {
726 error = md_malloc_move_vlist(
728 sc->sectorsize, (void *)osp,
729 0, MD_MALLOC_MOVE_WRITE);
731 bcopy(dst, (void *)osp,
741 uma_zfree(sc->uma, (void*)osp);
745 if (!notmapped && vlist == NULL)
746 dst += sc->sectorsize;
753 mdcopyto_vlist(void *src, bus_dma_segment_t *vlist, off_t offset, off_t len)
757 while (offset >= vlist->ds_len) {
758 offset -= vlist->ds_len;
763 seg_len = omin(len, vlist->ds_len - offset);
764 bcopy(src, (void *)(uintptr_t)(vlist->ds_addr + offset),
767 src = (uint8_t *)src + seg_len;
774 mdcopyfrom_vlist(bus_dma_segment_t *vlist, off_t offset, void *dst, off_t len)
778 while (offset >= vlist->ds_len) {
779 offset -= vlist->ds_len;
784 seg_len = omin(len, vlist->ds_len - offset);
785 bcopy((void *)(uintptr_t)(vlist->ds_addr + offset), dst,
788 dst = (uint8_t *)dst + seg_len;
795 mdstart_preload(struct md_s *sc, struct bio *bp)
799 p = sc->pl_ptr + bp->bio_offset;
800 switch (bp->bio_cmd) {
802 if ((bp->bio_flags & BIO_VLIST) != 0) {
803 mdcopyto_vlist(p, (bus_dma_segment_t *)bp->bio_data,
804 bp->bio_ma_offset, bp->bio_length);
806 bcopy(p, bp->bio_data, bp->bio_length);
808 cpu_flush_dcache(bp->bio_data, bp->bio_length);
811 if ((bp->bio_flags & BIO_VLIST) != 0) {
812 mdcopyfrom_vlist((bus_dma_segment_t *)bp->bio_data,
813 bp->bio_ma_offset, p, bp->bio_length);
815 bcopy(bp->bio_data, p, bp->bio_length);
824 mdstart_vnode(struct md_s *sc, struct bio *bp)
833 bus_dma_segment_t *vlist;
835 off_t iolen, len, zerosize;
838 switch (bp->bio_cmd) {
840 auio.uio_rw = UIO_READ;
844 auio.uio_rw = UIO_WRITE;
856 ma_offs = bp->bio_ma_offset;
857 len = bp->bio_length;
862 * If an error occurs, we set BIO_ERROR but we do not set
863 * B_INVAL because (for a write anyway), the buffer is
867 if (bp->bio_cmd == BIO_FLUSH) {
868 (void) vn_start_write(vp, &mp, V_WAIT);
869 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
870 error = VOP_FSYNC(vp, MNT_WAIT, td);
872 vn_finished_write(mp);
876 auio.uio_offset = (vm_ooffset_t)bp->bio_offset;
877 auio.uio_resid = bp->bio_length;
878 auio.uio_segflg = UIO_SYSSPACE;
881 if (bp->bio_cmd == BIO_DELETE) {
883 * Emulate BIO_DELETE by writing zeros.
885 zerosize = ZERO_REGION_SIZE -
886 (ZERO_REGION_SIZE % sc->sectorsize);
887 auio.uio_iovcnt = howmany(bp->bio_length, zerosize);
888 piov = malloc(sizeof(*piov) * auio.uio_iovcnt, M_MD, M_WAITOK);
891 piov->iov_base = __DECONST(void *, zero_region);
894 piov->iov_len = zerosize;
895 len -= piov->iov_len;
899 } else if ((bp->bio_flags & BIO_VLIST) != 0) {
900 piov = malloc(sizeof(*piov) * bp->bio_ma_n, M_MD, M_WAITOK);
902 vlist = (bus_dma_segment_t *)bp->bio_data;
904 piov->iov_base = (void *)(uintptr_t)(vlist->ds_addr +
906 piov->iov_len = vlist->ds_len - ma_offs;
907 if (piov->iov_len > len)
909 len -= piov->iov_len;
914 auio.uio_iovcnt = piov - auio.uio_iov;
916 } else if ((bp->bio_flags & BIO_UNMAPPED) != 0) {
917 pb = getpbuf(&md_vnode_pbuf_freecnt);
920 npages = atop(min(MAXPHYS, round_page(len + (ma_offs &
922 iolen = min(ptoa(npages) - (ma_offs & PAGE_MASK), len);
923 KASSERT(iolen > 0, ("zero iolen"));
924 pmap_qenter((vm_offset_t)pb->b_data,
925 &bp->bio_ma[atop(ma_offs)], npages);
926 aiov.iov_base = (void *)((vm_offset_t)pb->b_data +
927 (ma_offs & PAGE_MASK));
928 aiov.iov_len = iolen;
929 auio.uio_iov = &aiov;
931 auio.uio_resid = iolen;
933 aiov.iov_base = bp->bio_data;
934 aiov.iov_len = bp->bio_length;
935 auio.uio_iov = &aiov;
939 * When reading set IO_DIRECT to try to avoid double-caching
940 * the data. When writing IO_DIRECT is not optimal.
942 if (auio.uio_rw == UIO_READ) {
943 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
944 error = VOP_READ(vp, &auio, IO_DIRECT, sc->cred);
947 (void) vn_start_write(vp, &mp, V_WAIT);
948 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
949 error = VOP_WRITE(vp, &auio, sc->flags & MD_ASYNC ? 0 : IO_SYNC,
952 vn_finished_write(mp);
954 sc->flags &= ~MD_VERIFY;
958 pmap_qremove((vm_offset_t)pb->b_data, npages);
961 bp->bio_resid -= iolen;
966 relpbuf(pb, &md_vnode_pbuf_freecnt);
971 bp->bio_resid = auio.uio_resid;
976 mdstart_swap(struct md_s *sc, struct bio *bp)
980 vm_pindex_t i, lastp;
981 bus_dma_segment_t *vlist;
982 int rv, ma_offs, offs, len, lastend;
984 switch (bp->bio_cmd) {
994 ma_offs = (bp->bio_flags & (BIO_UNMAPPED|BIO_VLIST)) != 0 ?
995 bp->bio_ma_offset : 0;
996 vlist = (bp->bio_flags & BIO_VLIST) != 0 ?
997 (bus_dma_segment_t *)bp->bio_data : NULL;
1000 * offs is the offset at which to start operating on the
1001 * next (ie, first) page. lastp is the last page on
1002 * which we're going to operate. lastend is the ending
1003 * position within that last page (ie, PAGE_SIZE if
1004 * we're operating on complete aligned pages).
1006 offs = bp->bio_offset % PAGE_SIZE;
1007 lastp = (bp->bio_offset + bp->bio_length - 1) / PAGE_SIZE;
1008 lastend = (bp->bio_offset + bp->bio_length - 1) % PAGE_SIZE + 1;
1011 VM_OBJECT_WLOCK(sc->object);
1012 vm_object_pip_add(sc->object, 1);
1013 for (i = bp->bio_offset / PAGE_SIZE; i <= lastp; i++) {
1014 len = ((i == lastp) ? lastend : PAGE_SIZE) - offs;
1015 m = vm_page_grab(sc->object, i, VM_ALLOC_SYSTEM);
1016 if (bp->bio_cmd == BIO_READ) {
1017 if (m->valid == VM_PAGE_BITS_ALL)
1020 rv = vm_pager_get_pages(sc->object, &m, 1,
1022 if (rv == VM_PAGER_ERROR) {
1025 } else if (rv == VM_PAGER_FAIL) {
1027 * Pager does not have the page. Zero
1028 * the allocated page, and mark it as
1029 * valid. Do not set dirty, the page
1030 * can be recreated if thrown out.
1033 m->valid = VM_PAGE_BITS_ALL;
1035 if ((bp->bio_flags & BIO_UNMAPPED) != 0) {
1036 pmap_copy_pages(&m, offs, bp->bio_ma,
1038 } else if ((bp->bio_flags & BIO_VLIST) != 0) {
1039 physcopyout_vlist(VM_PAGE_TO_PHYS(m) + offs,
1040 vlist, ma_offs, len);
1041 cpu_flush_dcache(p, len);
1043 physcopyout(VM_PAGE_TO_PHYS(m) + offs, p, len);
1044 cpu_flush_dcache(p, len);
1046 } else if (bp->bio_cmd == BIO_WRITE) {
1047 if (len != PAGE_SIZE && m->valid != VM_PAGE_BITS_ALL)
1048 rv = vm_pager_get_pages(sc->object, &m, 1,
1052 if (rv == VM_PAGER_ERROR) {
1056 if ((bp->bio_flags & BIO_UNMAPPED) != 0) {
1057 pmap_copy_pages(bp->bio_ma, ma_offs, &m,
1059 } else if ((bp->bio_flags & BIO_VLIST) != 0) {
1060 physcopyin_vlist(vlist, ma_offs,
1061 VM_PAGE_TO_PHYS(m) + offs, len);
1063 physcopyin(p, VM_PAGE_TO_PHYS(m) + offs, len);
1065 m->valid = VM_PAGE_BITS_ALL;
1066 } else if (bp->bio_cmd == BIO_DELETE) {
1067 if (len != PAGE_SIZE && m->valid != VM_PAGE_BITS_ALL)
1068 rv = vm_pager_get_pages(sc->object, &m, 1,
1072 if (rv == VM_PAGER_ERROR) {
1076 if (len != PAGE_SIZE) {
1077 pmap_zero_page_area(m, offs, len);
1078 vm_page_clear_dirty(m, offs, len);
1079 m->valid = VM_PAGE_BITS_ALL;
1081 vm_pager_page_unswapped(m);
1085 if (bp->bio_cmd == BIO_DELETE && len == PAGE_SIZE)
1088 vm_page_activate(m);
1090 if (bp->bio_cmd == BIO_WRITE) {
1092 vm_pager_page_unswapped(m);
1095 /* Actions on further pages start at offset 0 */
1096 p += PAGE_SIZE - offs;
1100 vm_object_pip_wakeup(sc->object);
1101 VM_OBJECT_WUNLOCK(sc->object);
1102 return (rv != VM_PAGER_ERROR ? 0 : ENOSPC);
1106 mdstart_null(struct md_s *sc, struct bio *bp)
1109 switch (bp->bio_cmd) {
1111 bzero(bp->bio_data, bp->bio_length);
1112 cpu_flush_dcache(bp->bio_data, bp->bio_length);
1122 md_kthread(void *arg)
1129 thread_lock(curthread);
1130 sched_prio(curthread, PRIBIO);
1131 thread_unlock(curthread);
1132 if (sc->type == MD_VNODE)
1133 curthread->td_pflags |= TDP_NORUNNINGBUF;
1136 mtx_lock(&sc->queue_mtx);
1137 if (sc->flags & MD_SHUTDOWN) {
1138 sc->flags |= MD_EXITING;
1139 mtx_unlock(&sc->queue_mtx);
1142 bp = bioq_takefirst(&sc->bio_queue);
1144 msleep(sc, &sc->queue_mtx, PRIBIO | PDROP, "mdwait", 0);
1147 mtx_unlock(&sc->queue_mtx);
1148 if (bp->bio_cmd == BIO_GETATTR) {
1149 int isv = ((sc->flags & MD_VERIFY) != 0);
1151 if ((sc->fwsectors && sc->fwheads &&
1152 (g_handleattr_int(bp, "GEOM::fwsectors",
1154 g_handleattr_int(bp, "GEOM::fwheads",
1156 g_handleattr_int(bp, "GEOM::candelete", 1))
1158 else if (g_handleattr_int(bp, "MNT::verified", isv))
1163 error = sc->start(sc, bp);
1167 bp->bio_completed = bp->bio_length;
1168 if ((bp->bio_cmd == BIO_READ) || (bp->bio_cmd == BIO_WRITE))
1169 devstat_end_transaction_bio(sc->devstat, bp);
1170 g_io_deliver(bp, error);
1175 static struct md_s *
1180 LIST_FOREACH(sc, &md_softc_list, list) {
1181 if (sc->unit == unit)
1187 static struct md_s *
1188 mdnew(int unit, int *errp, enum md_types type)
1195 unit = alloc_unr(md_uh);
1197 unit = alloc_unr_specific(md_uh, unit);
1204 sc = (struct md_s *)malloc(sizeof *sc, M_MD, M_WAITOK | M_ZERO);
1206 bioq_init(&sc->bio_queue);
1207 mtx_init(&sc->queue_mtx, "md bio queue", NULL, MTX_DEF);
1208 mtx_init(&sc->stat_mtx, "md stat", NULL, MTX_DEF);
1210 sprintf(sc->name, "md%d", unit);
1211 LIST_INSERT_HEAD(&md_softc_list, sc, list);
1212 error = kproc_create(md_kthread, sc, &sc->procp, 0, 0,"%s", sc->name);
1215 LIST_REMOVE(sc, list);
1216 mtx_destroy(&sc->stat_mtx);
1217 mtx_destroy(&sc->queue_mtx);
1218 free_unr(md_uh, sc->unit);
1225 mdinit(struct md_s *sc)
1228 struct g_provider *pp;
1231 gp = g_new_geomf(&g_md_class, "md%d", sc->unit);
1233 pp = g_new_providerf(gp, "md%d", sc->unit);
1234 pp->flags |= G_PF_DIRECT_SEND | G_PF_DIRECT_RECEIVE;
1235 pp->mediasize = sc->mediasize;
1236 pp->sectorsize = sc->sectorsize;
1241 pp->flags |= G_PF_ACCEPT_UNMAPPED;
1249 g_error_provider(pp, 0);
1250 g_topology_unlock();
1251 sc->devstat = devstat_new_entry("md", sc->unit, sc->sectorsize,
1252 DEVSTAT_ALL_SUPPORTED, DEVSTAT_TYPE_DIRECT, DEVSTAT_PRIORITY_MAX);
1256 mdcreate_malloc(struct md_s *sc, struct md_ioctl *mdio)
1263 if (mdio->md_options & ~(MD_AUTOUNIT | MD_COMPRESS | MD_RESERVE))
1265 if (mdio->md_sectorsize != 0 && !powerof2(mdio->md_sectorsize))
1267 /* Compression doesn't make sense if we have reserved space */
1268 if (mdio->md_options & MD_RESERVE)
1269 mdio->md_options &= ~MD_COMPRESS;
1270 if (mdio->md_fwsectors != 0)
1271 sc->fwsectors = mdio->md_fwsectors;
1272 if (mdio->md_fwheads != 0)
1273 sc->fwheads = mdio->md_fwheads;
1274 sc->flags = mdio->md_options & (MD_COMPRESS | MD_FORCE);
1275 sc->indir = dimension(sc->mediasize / sc->sectorsize);
1276 sc->uma = uma_zcreate(sc->name, sc->sectorsize, NULL, NULL, NULL, NULL,
1278 if (mdio->md_options & MD_RESERVE) {
1281 nsectors = sc->mediasize / sc->sectorsize;
1282 for (u = 0; u < nsectors; u++) {
1283 sp = (uintptr_t)uma_zalloc(sc->uma, (md_malloc_wait ?
1284 M_WAITOK : M_NOWAIT) | M_ZERO);
1286 error = s_write(sc->indir, u, sp);
1298 mdsetcred(struct md_s *sc, struct ucred *cred)
1304 * Set credits in our softc
1309 sc->cred = crhold(cred);
1312 * Horrible kludge to establish credentials for NFS XXX.
1319 tmpbuf = malloc(sc->sectorsize, M_TEMP, M_WAITOK);
1320 bzero(&auio, sizeof(auio));
1322 aiov.iov_base = tmpbuf;
1323 aiov.iov_len = sc->sectorsize;
1324 auio.uio_iov = &aiov;
1325 auio.uio_iovcnt = 1;
1326 auio.uio_offset = 0;
1327 auio.uio_rw = UIO_READ;
1328 auio.uio_segflg = UIO_SYSSPACE;
1329 auio.uio_resid = aiov.iov_len;
1330 vn_lock(sc->vnode, LK_EXCLUSIVE | LK_RETRY);
1331 error = VOP_READ(sc->vnode, &auio, 0, sc->cred);
1332 VOP_UNLOCK(sc->vnode, 0);
1333 free(tmpbuf, M_TEMP);
1339 mdcreate_vnode(struct md_s *sc, struct md_ioctl *mdio, struct thread *td)
1342 struct nameidata nd;
1347 * Kernel-originated requests must have the filename appended
1348 * to the mdio structure to protect against malicious software.
1350 fname = mdio->md_file;
1351 if ((void *)fname != (void *)(mdio + 1)) {
1352 error = copyinstr(fname, sc->file, sizeof(sc->file), NULL);
1356 strlcpy(sc->file, fname, sizeof(sc->file));
1359 * If the user specified that this is a read only device, don't
1360 * set the FWRITE mask before trying to open the backing store.
1362 flags = FREAD | ((mdio->md_options & MD_READONLY) ? 0 : FWRITE) \
1363 | ((mdio->md_options & MD_VERIFY) ? 0 : O_VERIFY);
1364 NDINIT(&nd, LOOKUP, FOLLOW, UIO_SYSSPACE, sc->file, td);
1365 error = vn_open(&nd, &flags, 0, NULL);
1368 NDFREE(&nd, NDF_ONLY_PNBUF);
1369 if (nd.ni_vp->v_type != VREG) {
1373 error = VOP_GETATTR(nd.ni_vp, &vattr, td->td_ucred);
1376 if (VOP_ISLOCKED(nd.ni_vp) != LK_EXCLUSIVE) {
1377 vn_lock(nd.ni_vp, LK_UPGRADE | LK_RETRY);
1378 if (nd.ni_vp->v_iflag & VI_DOOMED) {
1379 /* Forced unmount. */
1384 nd.ni_vp->v_vflag |= VV_MD;
1385 VOP_UNLOCK(nd.ni_vp, 0);
1387 if (mdio->md_fwsectors != 0)
1388 sc->fwsectors = mdio->md_fwsectors;
1389 if (mdio->md_fwheads != 0)
1390 sc->fwheads = mdio->md_fwheads;
1391 sc->flags = mdio->md_options & (MD_FORCE | MD_ASYNC | MD_VERIFY);
1392 if (!(flags & FWRITE))
1393 sc->flags |= MD_READONLY;
1394 sc->vnode = nd.ni_vp;
1396 error = mdsetcred(sc, td->td_ucred);
1399 vn_lock(nd.ni_vp, LK_EXCLUSIVE | LK_RETRY);
1400 nd.ni_vp->v_vflag &= ~VV_MD;
1405 VOP_UNLOCK(nd.ni_vp, 0);
1406 (void)vn_close(nd.ni_vp, flags, td->td_ucred, td);
1411 mddestroy(struct md_s *sc, struct thread *td)
1415 sc->gp->softc = NULL;
1417 g_wither_geom(sc->gp, ENXIO);
1418 g_topology_unlock();
1423 devstat_remove_entry(sc->devstat);
1426 mtx_lock(&sc->queue_mtx);
1427 sc->flags |= MD_SHUTDOWN;
1429 while (!(sc->flags & MD_EXITING))
1430 msleep(sc->procp, &sc->queue_mtx, PRIBIO, "mddestroy", hz / 10);
1431 mtx_unlock(&sc->queue_mtx);
1432 mtx_destroy(&sc->stat_mtx);
1433 mtx_destroy(&sc->queue_mtx);
1434 if (sc->vnode != NULL) {
1435 vn_lock(sc->vnode, LK_EXCLUSIVE | LK_RETRY);
1436 sc->vnode->v_vflag &= ~VV_MD;
1437 VOP_UNLOCK(sc->vnode, 0);
1438 (void)vn_close(sc->vnode, sc->flags & MD_READONLY ?
1439 FREAD : (FREAD|FWRITE), sc->cred, td);
1441 if (sc->cred != NULL)
1443 if (sc->object != NULL)
1444 vm_object_deallocate(sc->object);
1446 destroy_indir(sc, sc->indir);
1448 uma_zdestroy(sc->uma);
1450 LIST_REMOVE(sc, list);
1451 free_unr(md_uh, sc->unit);
1457 mdresize(struct md_s *sc, struct md_ioctl *mdio)
1460 vm_pindex_t oldpages, newpages;
1467 if (mdio->md_mediasize <= 0 ||
1468 (mdio->md_mediasize % PAGE_SIZE) != 0)
1470 oldpages = OFF_TO_IDX(round_page(sc->mediasize));
1471 newpages = OFF_TO_IDX(round_page(mdio->md_mediasize));
1472 if (newpages < oldpages) {
1473 VM_OBJECT_WLOCK(sc->object);
1474 vm_object_page_remove(sc->object, newpages, 0, 0);
1475 swap_pager_freespace(sc->object, newpages,
1476 oldpages - newpages);
1477 swap_release_by_cred(IDX_TO_OFF(oldpages -
1478 newpages), sc->cred);
1479 sc->object->charge = IDX_TO_OFF(newpages);
1480 sc->object->size = newpages;
1481 VM_OBJECT_WUNLOCK(sc->object);
1482 } else if (newpages > oldpages) {
1483 res = swap_reserve_by_cred(IDX_TO_OFF(newpages -
1484 oldpages), sc->cred);
1487 if ((mdio->md_options & MD_RESERVE) ||
1488 (sc->flags & MD_RESERVE)) {
1489 error = swap_pager_reserve(sc->object,
1490 oldpages, newpages - oldpages);
1492 swap_release_by_cred(
1493 IDX_TO_OFF(newpages - oldpages),
1498 VM_OBJECT_WLOCK(sc->object);
1499 sc->object->charge = IDX_TO_OFF(newpages);
1500 sc->object->size = newpages;
1501 VM_OBJECT_WUNLOCK(sc->object);
1505 return (EOPNOTSUPP);
1508 sc->mediasize = mdio->md_mediasize;
1510 g_resize_provider(sc->pp, sc->mediasize);
1511 g_topology_unlock();
1516 mdcreate_swap(struct md_s *sc, struct md_ioctl *mdio, struct thread *td)
1522 * Range check. Disallow negative sizes and sizes not being
1523 * multiple of page size.
1525 if (sc->mediasize <= 0 || (sc->mediasize % PAGE_SIZE) != 0)
1529 * Allocate an OBJT_SWAP object.
1531 * Note the truncation.
1534 if ((mdio->md_options & MD_VERIFY) != 0)
1536 npage = mdio->md_mediasize / PAGE_SIZE;
1537 if (mdio->md_fwsectors != 0)
1538 sc->fwsectors = mdio->md_fwsectors;
1539 if (mdio->md_fwheads != 0)
1540 sc->fwheads = mdio->md_fwheads;
1541 sc->object = vm_pager_allocate(OBJT_SWAP, NULL, PAGE_SIZE * npage,
1542 VM_PROT_DEFAULT, 0, td->td_ucred);
1543 if (sc->object == NULL)
1545 sc->flags = mdio->md_options & (MD_FORCE | MD_RESERVE);
1546 if (mdio->md_options & MD_RESERVE) {
1547 if (swap_pager_reserve(sc->object, 0, npage) < 0) {
1552 error = mdsetcred(sc, td->td_ucred);
1555 vm_object_deallocate(sc->object);
1562 mdcreate_null(struct md_s *sc, struct md_ioctl *mdio, struct thread *td)
1566 * Range check. Disallow negative sizes and sizes not being
1567 * multiple of page size.
1569 if (sc->mediasize <= 0 || (sc->mediasize % PAGE_SIZE) != 0)
1576 xmdctlioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td)
1578 struct md_ioctl *mdio;
1584 printf("mdctlioctl(%s %lx %p %x %p)\n",
1585 devtoname(dev), cmd, addr, flags, td);
1587 mdio = (struct md_ioctl *)addr;
1588 if (mdio->md_version != MDIOVERSION)
1592 * We assert the version number in the individual ioctl
1593 * handlers instead of out here because (a) it is possible we
1594 * may add another ioctl in the future which doesn't read an
1595 * mdio, and (b) the correct return value for an unknown ioctl
1596 * is ENOIOCTL, not EINVAL.
1601 switch (mdio->md_type) {
1611 if (mdio->md_sectorsize == 0)
1612 sectsize = DEV_BSIZE;
1614 sectsize = mdio->md_sectorsize;
1615 if (sectsize > MAXPHYS || mdio->md_mediasize < sectsize)
1617 if (mdio->md_options & MD_AUTOUNIT)
1618 sc = mdnew(-1, &error, mdio->md_type);
1620 if (mdio->md_unit > INT_MAX)
1622 sc = mdnew(mdio->md_unit, &error, mdio->md_type);
1626 if (mdio->md_options & MD_AUTOUNIT)
1627 mdio->md_unit = sc->unit;
1628 sc->mediasize = mdio->md_mediasize;
1629 sc->sectorsize = sectsize;
1633 sc->start = mdstart_malloc;
1634 error = mdcreate_malloc(sc, mdio);
1638 * We disallow attaching preloaded memory disks via
1639 * ioctl. Preloaded memory disks are automatically
1640 * attached in g_md_init().
1645 sc->start = mdstart_vnode;
1646 error = mdcreate_vnode(sc, mdio, td);
1649 sc->start = mdstart_swap;
1650 error = mdcreate_swap(sc, mdio, td);
1653 sc->start = mdstart_null;
1654 error = mdcreate_null(sc, mdio, td);
1662 /* Prune off any residual fractional sector */
1663 i = sc->mediasize % sc->sectorsize;
1669 if (mdio->md_mediasize != 0 ||
1670 (mdio->md_options & ~MD_FORCE) != 0)
1673 sc = mdfind(mdio->md_unit);
1676 if (sc->opencount != 0 && !(sc->flags & MD_FORCE) &&
1677 !(mdio->md_options & MD_FORCE))
1679 return (mddestroy(sc, td));
1681 if ((mdio->md_options & ~(MD_FORCE | MD_RESERVE)) != 0)
1684 sc = mdfind(mdio->md_unit);
1687 if (mdio->md_mediasize < sc->sectorsize)
1689 if (mdio->md_mediasize < sc->mediasize &&
1690 !(sc->flags & MD_FORCE) &&
1691 !(mdio->md_options & MD_FORCE))
1693 return (mdresize(sc, mdio));
1695 sc = mdfind(mdio->md_unit);
1698 mdio->md_type = sc->type;
1699 mdio->md_options = sc->flags;
1700 mdio->md_mediasize = sc->mediasize;
1701 mdio->md_sectorsize = sc->sectorsize;
1702 if (sc->type == MD_VNODE ||
1703 (sc->type == MD_PRELOAD && mdio->md_file != NULL))
1704 error = copyout(sc->file, mdio->md_file,
1705 strlen(sc->file) + 1);
1709 LIST_FOREACH(sc, &md_softc_list, list) {
1710 if (i == MDNPAD - 1)
1711 mdio->md_pad[i] = -1;
1713 mdio->md_pad[i++] = sc->unit;
1715 mdio->md_pad[0] = i - 1;
1723 mdctlioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td)
1728 error = xmdctlioctl(dev, cmd, addr, flags, td);
1734 md_preloaded(u_char *image, size_t length, const char *name)
1739 sc = mdnew(-1, &error, MD_PRELOAD);
1742 sc->mediasize = length;
1743 sc->sectorsize = DEV_BSIZE;
1745 sc->pl_len = length;
1746 sc->start = mdstart_preload;
1748 strlcpy(sc->file, name, sizeof(sc->file));
1749 #if defined(MD_ROOT) && !defined(ROOTDEVNAME)
1751 rootdevnames[0] = MD_ROOT_FSTYPE ":/dev/md0";
1755 printf("%s%d: Preloaded image <%s> %zd bytes at %p\n",
1756 MD_NAME, sc->unit, name, length, image);
1758 printf("%s%d: Embedded image %zd bytes at %p\n",
1759 MD_NAME, sc->unit, length, image);
1764 g_md_init(struct g_class *mp __unused)
1767 u_char *ptr, *name, *type;
1771 /* figure out log2(NINDIR) */
1772 for (i = NINDIR, nshift = -1; i; nshift++)
1776 sx_init(&md_sx, "MD config lock");
1777 g_topology_unlock();
1778 md_uh = new_unrhdr(0, INT_MAX, NULL);
1780 if (mfs_root_size != 0) {
1782 md_preloaded(__DEVOLATILE(u_char *, &mfs_root), mfs_root_size,
1787 /* XXX: are preload_* static or do they need Giant ? */
1788 while ((mod = preload_search_next_name(mod)) != NULL) {
1789 name = (char *)preload_search_info(mod, MODINFO_NAME);
1792 type = (char *)preload_search_info(mod, MODINFO_TYPE);
1795 if (strcmp(type, "md_image") && strcmp(type, "mfs_root"))
1797 ptr = preload_fetch_addr(mod);
1798 len = preload_fetch_size(mod);
1799 if (ptr != NULL && len != 0) {
1801 md_preloaded(ptr, len, name);
1805 md_vnode_pbuf_freecnt = nswbuf / 10;
1806 status_dev = make_dev(&mdctl_cdevsw, INT_MAX, UID_ROOT, GID_WHEEL,
1812 g_md_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp,
1813 struct g_consumer *cp __unused, struct g_provider *pp)
1844 if (indent == NULL) {
1845 sbuf_printf(sb, " u %d", mp->unit);
1846 sbuf_printf(sb, " s %ju", (uintmax_t) mp->sectorsize);
1847 sbuf_printf(sb, " f %ju", (uintmax_t) mp->fwheads);
1848 sbuf_printf(sb, " fs %ju", (uintmax_t) mp->fwsectors);
1849 sbuf_printf(sb, " l %ju", (uintmax_t) mp->mediasize);
1850 sbuf_printf(sb, " t %s", type);
1851 if ((mp->type == MD_VNODE && mp->vnode != NULL) ||
1852 (mp->type == MD_PRELOAD && mp->file[0] != '\0'))
1853 sbuf_printf(sb, " file %s", mp->file);
1855 sbuf_printf(sb, "%s<unit>%d</unit>\n", indent,
1857 sbuf_printf(sb, "%s<sectorsize>%ju</sectorsize>\n",
1858 indent, (uintmax_t) mp->sectorsize);
1859 sbuf_printf(sb, "%s<fwheads>%ju</fwheads>\n",
1860 indent, (uintmax_t) mp->fwheads);
1861 sbuf_printf(sb, "%s<fwsectors>%ju</fwsectors>\n",
1862 indent, (uintmax_t) mp->fwsectors);
1863 sbuf_printf(sb, "%s<length>%ju</length>\n",
1864 indent, (uintmax_t) mp->mediasize);
1865 sbuf_printf(sb, "%s<compression>%s</compression>\n", indent,
1866 (mp->flags & MD_COMPRESS) == 0 ? "off": "on");
1867 sbuf_printf(sb, "%s<access>%s</access>\n", indent,
1868 (mp->flags & MD_READONLY) == 0 ? "read-write":
1870 sbuf_printf(sb, "%s<type>%s</type>\n", indent,
1872 if ((mp->type == MD_VNODE && mp->vnode != NULL) ||
1873 (mp->type == MD_PRELOAD && mp->file[0] != '\0')) {
1874 sbuf_printf(sb, "%s<file>", indent);
1875 g_conf_printf_escaped(sb, "%s", mp->file);
1876 sbuf_printf(sb, "</file>\n");
1883 g_md_fini(struct g_class *mp __unused)
1887 if (status_dev != NULL)
1888 destroy_dev(status_dev);
1889 delete_unrhdr(md_uh);