2 * Copyright (c) 1999 Boris Popov
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/kernel.h>
34 #include <sys/mount.h>
35 #include <sys/namei.h>
36 #include <sys/vnode.h>
37 #include <sys/dirent.h>
38 #include <sys/sysctl.h>
41 #include <vm/vm_param.h>
42 #include <vm/vm_page.h>
43 #include <vm/vm_extern.h>
44 #include <vm/vm_object.h>
45 #include <vm/vm_pager.h>
46 #include <vm/vnode_pager.h>
48 #include <netncp/ncp.h>
49 #include <netncp/ncp_conn.h>
50 #include <netncp/ncp_subr.h>
51 #include <netncp/ncp_ncp.h>
53 #include <fs/nwfs/nwfs.h>
54 #include <fs/nwfs/nwfs_node.h>
55 #include <fs/nwfs/nwfs_subr.h>
57 static int nwfs_fastlookup = 1;
59 SYSCTL_DECL(_vfs_nwfs);
60 SYSCTL_INT(_vfs_nwfs, OID_AUTO, fastlookup, CTLFLAG_RW, &nwfs_fastlookup, 0, "");
63 extern int nwfs_pbuf_freecnt;
65 #define DE_SIZE (sizeof(struct dirent))
69 nwfs_readvdir(struct vnode *vp, struct uio *uio, struct ucred *cred) {
70 struct nwmount *nmp = VTONWFS(vp);
73 struct nwnode *np = VTONW(vp);
74 struct nw_entry_info fattr;
76 struct componentname cn;
80 NCPVNDEBUG("dirname='%s'\n",np->n_name);
81 if (uio->uio_resid < DE_SIZE || (uio->uio_offset < 0))
85 i = uio->uio_offset / DE_SIZE; /* offset in directory */
87 error = ncp_initsearch(vp, uio->uio_td, cred);
89 NCPVNDEBUG("cannot initialize search, error=%d",error);
94 for (; uio->uio_resid >= DE_SIZE; i++) {
95 bzero((char *) &dp, DE_SIZE);
96 dp.d_reclen = DE_SIZE;
100 dp.d_fileno = (i == 0) ? np->n_fid.f_id : np->n_parent.f_id;
101 if (!dp.d_fileno) dp.d_fileno = NWFS_ROOT_INO;
105 dp.d_name[i + 1] = '\0';
109 error = ncp_search_for_file_or_subdir(nmp, &np->n_seq, &fattr, uio->uio_td, cred);
110 if (error && error < 0x80) break;
111 dp.d_fileno = fattr.dirEntNum;
112 dp.d_type = (fattr.attributes & aDIR) ? DT_DIR : DT_REG;
113 dp.d_namlen = fattr.nameLen;
114 bcopy(fattr.entryName, dp.d_name, dp.d_namlen);
115 dp.d_name[dp.d_namlen] = '\0';
117 if (error && eofflag) {
124 if (nwfs_fastlookup && !error && i > 1) {
125 fid.f_id = fattr.dirEntNum;
126 fid.f_parent = np->n_fid.f_id;
127 error = nwfs_nget(vp->v_mount, fid, &fattr, vp, &newvp);
129 VTONW(newvp)->n_ctime = VTONW(newvp)->n_vattr.va_ctime.tv_sec;
130 cn.cn_nameptr = dp.d_name;
131 cn.cn_namelen = dp.d_namlen;
132 cache_enter(vp, newvp, &cn);
141 if ((error = uiomove(&dp, DE_SIZE, uio)))
145 uio->uio_offset = i * DE_SIZE;
150 nwfs_readvnode(struct vnode *vp, struct uio *uiop, struct ucred *cred) {
151 struct nwmount *nmp = VFSTONWFS(vp->v_mount);
152 struct nwnode *np = VTONW(vp);
157 if (vp->v_type != VREG && vp->v_type != VDIR) {
158 printf("%s: vn types other than VREG or VDIR are unsupported !\n",__func__);
161 if (uiop->uio_resid == 0) return 0;
162 if (uiop->uio_offset < 0) return EINVAL;
163 /* if (uiop->uio_offset + uiop->uio_resid > nmp->nm_maxfilesize)
166 if (vp->v_type == VDIR) {
167 error = nwfs_readvdir(vp, uiop, cred);
170 biosize = NWFSTOCONN(nmp)->buffer_size;
171 if (np->n_flag & NMODIFIED) {
172 nwfs_attr_cacheremove(vp);
173 error = VOP_GETATTR(vp, &vattr, cred);
174 if (error) return (error);
175 np->n_mtime = vattr.va_mtime.tv_sec;
177 error = VOP_GETATTR(vp, &vattr, cred);
178 if (error) return (error);
179 if (np->n_mtime != vattr.va_mtime.tv_sec) {
180 error = nwfs_vinvalbuf(vp, td);
181 if (error) return (error);
182 np->n_mtime = vattr.va_mtime.tv_sec;
185 error = ncp_read(NWFSTOCONN(nmp), &np->n_fh, uiop, cred);
190 nwfs_writevnode(vp, uiop, cred, ioflag)
196 struct nwmount *nmp = VTONWFS(vp);
197 struct nwnode *np = VTONW(vp);
199 /* struct vattr vattr;*/
202 if (vp->v_type != VREG) {
203 printf("%s: vn types other than VREG unsupported !\n",__func__);
206 NCPVNDEBUG("ofs=%d,resid=%d\n",(int)uiop->uio_offset, uiop->uio_resid);
207 if (uiop->uio_offset < 0) return EINVAL;
208 /* if (uiop->uio_offset + uiop->uio_resid > nmp->nm_maxfilesize)
211 if (ioflag & (IO_APPEND | IO_SYNC)) {
212 if (np->n_flag & NMODIFIED) {
213 nwfs_attr_cacheremove(vp);
214 error = nwfs_vinvalbuf(vp, td);
215 if (error) return (error);
217 if (ioflag & IO_APPEND) {
218 /* We can relay only on local information about file size,
219 * because until file is closed NetWare will not return
220 * the correct size. */
222 nwfs_attr_cacheremove(vp);
223 error = VOP_GETATTR(vp, &vattr, cred);
224 if (error) return (error);
226 uiop->uio_offset = np->n_size;
229 if (uiop->uio_resid == 0) return 0;
231 if (vn_rlimit_fsize(vp, uiop, td))
234 error = ncp_write(NWFSTOCONN(nmp), &np->n_fh, uiop, cred);
235 NCPVNDEBUG("after: ofs=%d,resid=%d\n",(int)uiop->uio_offset, uiop->uio_resid);
237 if (uiop->uio_offset > np->n_size) {
238 np->n_vattr.va_size = np->n_size = uiop->uio_offset;
239 vnode_pager_setsize(vp, np->n_size);
246 * Do an I/O operation to/from a cache block.
249 nwfs_doio(vp, bp, cr, td)
263 nmp = VFSTONWFS(vp->v_mount);
266 uiop->uio_iovcnt = 1;
267 uiop->uio_segflg = UIO_SYSSPACE;
269 if (bp->b_iocmd == BIO_READ) {
270 io.iov_len = uiop->uio_resid = bp->b_bcount;
271 io.iov_base = bp->b_data;
272 uiop->uio_rw = UIO_READ;
273 switch (vp->v_type) {
275 uiop->uio_offset = ((off_t)bp->b_blkno) * DEV_BSIZE;
276 error = ncp_read(NWFSTOCONN(nmp), &np->n_fh, uiop, cr);
279 if (uiop->uio_resid) {
280 int left = uiop->uio_resid;
281 int nread = bp->b_bcount - left;
283 bzero((char *)bp->b_data + nread, left);
287 nfsstats.readdir_bios++;
288 uiop->uio_offset = ((u_quad_t)bp->b_lblkno) * NFS_DIRBLKSIZ;
289 if (nmp->nm_flag & NFSMNT_RDIRPLUS) {
290 error = nfs_readdirplusrpc(vp, uiop, cr);
291 if (error == NFSERR_NOTSUPP)
292 nmp->nm_flag &= ~NFSMNT_RDIRPLUS;
294 if ((nmp->nm_flag & NFSMNT_RDIRPLUS) == 0)
295 error = nfs_readdirrpc(vp, uiop, cr);
296 if (error == 0 && uiop->uio_resid == bp->b_bcount)
297 bp->b_flags |= B_INVAL;
301 printf("nwfs_doio: type %x unexpected\n",vp->v_type);
305 bp->b_ioflags |= BIO_ERROR;
309 if (((bp->b_blkno * DEV_BSIZE) + bp->b_dirtyend) > np->n_size)
310 bp->b_dirtyend = np->n_size - (bp->b_blkno * DEV_BSIZE);
312 if (bp->b_dirtyend > bp->b_dirtyoff) {
313 io.iov_len = uiop->uio_resid = bp->b_dirtyend - bp->b_dirtyoff;
314 uiop->uio_offset = ((off_t)bp->b_blkno) * DEV_BSIZE + bp->b_dirtyoff;
315 io.iov_base = (char *)bp->b_data + bp->b_dirtyoff;
316 uiop->uio_rw = UIO_WRITE;
317 error = ncp_write(NWFSTOCONN(nmp), &np->n_fh, uiop, cr);
320 * For an interrupted write, the buffer is still valid
321 * and the write hasn't been pushed to the server yet,
322 * so we can't set BIO_ERROR and report the interruption
323 * by setting B_EINTR. For the B_ASYNC case, B_EINTR
324 * is not relevant, so the rpc attempt is essentially
325 * a noop. For the case of a V3 write rpc not being
326 * committed to stable storage, the block is still
327 * dirty and requires either a commit rpc or another
328 * write rpc with iomode == NFSV3WRITE_FILESYNC before
329 * the block is reused. This is indicated by setting
330 * the B_DELWRI and B_NEEDCOMMIT flags.
333 || (!error && (bp->b_flags & B_NEEDCOMMIT))) {
337 bp->b_flags &= ~(B_INVAL|B_NOCACHE);
338 if ((bp->b_flags & B_ASYNC) == 0)
339 bp->b_flags |= B_EINTR;
340 if ((bp->b_flags & B_PAGING) == 0) {
342 bp->b_flags &= ~B_DONE;
344 if ((bp->b_flags & B_ASYNC) == 0)
345 bp->b_flags |= B_EINTR;
349 bp->b_ioflags |= BIO_ERROR;
350 bp->b_error /*= np->n_error */= error;
351 /* np->n_flag |= NWRITEERR;*/
353 bp->b_dirtyoff = bp->b_dirtyend = 0;
361 bp->b_resid = uiop->uio_resid;
367 * Vnode op for VM getpages.
368 * Wish wish .... get rid from multiple IO routines
372 struct vop_getpages_args /* {
377 vm_ooffset_t a_offset;
381 return vop_stdgetpages(ap);(ap->a_vp, ap->a_m, ap->a_count,
383 int i, error, nextoff, size, toff, npages, count;
397 td = curthread; /* XXX */
398 cred = td->td_ucred; /* XXX */
400 nmp = VFSTONWFS(vp->v_mount);
404 if ((object = vp->v_object) == NULL) {
405 printf("nwfs_getpages: called with non-merged cache vnode??\n");
406 return VM_PAGER_ERROR;
409 bp = getpbuf(&nwfs_pbuf_freecnt);
410 npages = btoc(count);
411 kva = (vm_offset_t) bp->b_data;
412 pmap_qenter(kva, pages, npages);
414 iov.iov_base = (caddr_t) kva;
418 uio.uio_offset = IDX_TO_OFF(pages[0]->pindex);
419 uio.uio_resid = count;
420 uio.uio_segflg = UIO_SYSSPACE;
421 uio.uio_rw = UIO_READ;
424 error = ncp_read(NWFSTOCONN(nmp), &np->n_fh, &uio,cred);
425 pmap_qremove(kva, npages);
427 relpbuf(bp, &nwfs_pbuf_freecnt);
429 VM_OBJECT_LOCK(object);
430 if (error && (uio.uio_resid == count)) {
431 printf("nwfs_getpages: error %d\n",error);
432 for (i = 0; i < npages; i++) {
433 if (ap->a_reqpage != i) {
434 vm_page_lock(pages[i]);
435 vm_page_free(pages[i]);
436 vm_page_unlock(pages[i]);
439 VM_OBJECT_UNLOCK(object);
440 return VM_PAGER_ERROR;
443 size = count - uio.uio_resid;
445 for (i = 0, toff = 0; i < npages; i++, toff = nextoff) {
447 nextoff = toff + PAGE_SIZE;
450 if (nextoff <= size) {
451 m->valid = VM_PAGE_BITS_ALL;
452 KASSERT(m->dirty == 0,
453 ("nwfs_getpages: page %p is dirty", m));
455 int nvalid = ((size + DEV_BSIZE - 1) - toff) & ~(DEV_BSIZE - 1);
456 vm_page_set_valid(m, 0, nvalid);
457 KASSERT((m->dirty & vm_page_bits(0, nvalid)) == 0,
458 ("nwfs_getpages: page %p is dirty", m));
461 if (i != ap->a_reqpage)
462 vm_page_readahead_finish(m);
464 VM_OBJECT_UNLOCK(object);
466 #endif /* NWFS_RWCACHE */
470 * Vnode op for VM putpages.
471 * possible bug: all IO done in sync mode
472 * Note that vop_close always invalidate pages before close, so it's
473 * not necessary to open vnode.
477 struct vop_putpages_args /* {
483 vm_ooffset_t a_offset;
487 struct vnode *vp = ap->a_vp;
492 td = curthread; /* XXX */
493 cred = td->td_ucred; /* XXX */
494 VOP_OPEN(vp, FWRITE, cred, td, NULL);
495 error = vop_stdputpages(ap);
496 VOP_CLOSE(vp, FWRITE, cred, td);
503 int i, npages, count;
509 td = curthread; /* XXX */
510 cred = td->td_ucred; /* XXX */
511 /* VOP_OPEN(vp, FWRITE, cred, td, NULL);*/
513 nmp = VFSTONWFS(vp->v_mount);
516 rtvals = ap->a_rtvals;
517 npages = btoc(count);
519 for (i = 0; i < npages; i++) {
520 rtvals[i] = VM_PAGER_ERROR;
523 bp = getpbuf(&nwfs_pbuf_freecnt);
524 kva = (vm_offset_t) bp->b_data;
525 pmap_qenter(kva, pages, npages);
527 iov.iov_base = (caddr_t) kva;
531 uio.uio_offset = IDX_TO_OFF(pages[0]->pindex);
532 uio.uio_resid = count;
533 uio.uio_segflg = UIO_SYSSPACE;
534 uio.uio_rw = UIO_WRITE;
536 NCPVNDEBUG("ofs=%d,resid=%d\n",(int)uio.uio_offset, uio.uio_resid);
538 error = ncp_write(NWFSTOCONN(nmp), &np->n_fh, &uio, cred);
539 /* VOP_CLOSE(vp, FWRITE, cred, td);*/
540 NCPVNDEBUG("paged write done: %d\n", error);
542 pmap_qremove(kva, npages);
543 relpbuf(bp, &nwfs_pbuf_freecnt);
546 vnode_pager_undirty_pages(pages, rtvals, count - uio.uio_resid);
548 #endif /* NWFS_RWCACHE */
551 * Flush and invalidate all dirty buffers. If another process is already
552 * doing the flush, just wait for completion.
555 nwfs_vinvalbuf(vp, td)
559 struct nwnode *np = VTONW(vp);
560 /* struct nwmount *nmp = VTONWFS(vp);*/
563 if (vp->v_iflag & VI_DOOMED)
566 while (np->n_flag & NFLUSHINPROG) {
567 np->n_flag |= NFLUSHWANT;
568 error = tsleep(&np->n_flag, PRIBIO + 2, "nwfsvinv", 2 * hz);
569 error = ncp_chkintr(NWFSTOCONN(VTONWFS(vp)), td);
573 np->n_flag |= NFLUSHINPROG;
575 if (vp->v_bufobj.bo_object != NULL) {
576 VM_OBJECT_LOCK(vp->v_bufobj.bo_object);
577 vm_object_page_clean(vp->v_bufobj.bo_object, 0, 0, OBJPC_SYNC);
578 VM_OBJECT_UNLOCK(vp->v_bufobj.bo_object);
581 error = vinvalbuf(vp, V_SAVE, PCATCH, 0);
583 if (error == ERESTART || error == EINTR) {
584 np->n_flag &= ~NFLUSHINPROG;
585 if (np->n_flag & NFLUSHWANT) {
586 np->n_flag &= ~NFLUSHWANT;
591 error = vinvalbuf(vp, V_SAVE, PCATCH, 0);
593 np->n_flag &= ~(NMODIFIED | NFLUSHINPROG);
594 if (np->n_flag & NFLUSHWANT) {
595 np->n_flag &= ~NFLUSHWANT;