2 * Copyright 1998, 2000 Marshall Kirk McKusick.
3 * Copyright 2009, 2010 Jeffrey W. Roberson <jeff@FreeBSD.org>
6 * The soft updates code is derived from the appendix of a University
7 * of Michigan technical report (Gregory R. Ganger and Yale N. Patt,
8 * "Soft Updates: A Solution to the Metadata Update Problem in File
9 * Systems", CSE-TR-254-95, August 1995).
11 * Further information about soft updates can be obtained from:
13 * Marshall Kirk McKusick http://www.mckusick.com/softdep/
14 * 1614 Oxford Street mckusick@mckusick.com
15 * Berkeley, CA 94709-1608 +1-510-843-9542
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions
22 * 1. Redistributions of source code must retain the above copyright
23 * notice, this list of conditions and the following disclaimer.
24 * 2. Redistributions in binary form must reproduce the above copyright
25 * notice, this list of conditions and the following disclaimer in the
26 * documentation and/or other materials provided with the distribution.
28 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
29 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
30 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
31 * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
32 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
33 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
34 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
35 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
36 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
37 * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 * from: @(#)ffs_softdep.c 9.59 (McKusick) 6/21/00
42 #include <sys/cdefs.h>
43 __FBSDID("$FreeBSD$");
46 #include "opt_quota.h"
50 * For now we want the safety net that the DEBUG flag provides.
56 #include <sys/param.h>
57 #include <sys/kernel.h>
58 #include <sys/systm.h>
62 #include <sys/kthread.h>
64 #include <sys/limits.h>
66 #include <sys/malloc.h>
67 #include <sys/mount.h>
68 #include <sys/mutex.h>
69 #include <sys/namei.h>
73 #include <sys/sysctl.h>
74 #include <sys/syslog.h>
75 #include <sys/vnode.h>
78 #include <ufs/ufs/dir.h>
79 #include <ufs/ufs/extattr.h>
80 #include <ufs/ufs/quota.h>
81 #include <ufs/ufs/inode.h>
82 #include <ufs/ufs/ufsmount.h>
83 #include <ufs/ffs/fs.h>
84 #include <ufs/ffs/softdep.h>
85 #include <ufs/ffs/ffs_extern.h>
86 #include <ufs/ufs/ufs_extern.h>
89 #include <vm/vm_extern.h>
90 #include <vm/vm_object.h>
92 #include <geom/geom.h>
96 #define KTR_SUJ 0 /* Define to KTR_SPARE. */
101 softdep_flushfiles(oldmnt, flags, td)
102 struct mount *oldmnt;
107 panic("softdep_flushfiles called");
111 softdep_mount(devvp, mp, fs, cred)
129 softdep_uninitialize()
143 softdep_setup_sbupdate(ump, fs, bp)
144 struct ufsmount *ump;
151 softdep_setup_inomapdep(bp, ip, newinum, mode)
158 panic("softdep_setup_inomapdep called");
162 softdep_setup_blkmapdep(bp, mp, newblkno, frags, oldfrags)
165 ufs2_daddr_t newblkno;
170 panic("softdep_setup_blkmapdep called");
174 softdep_setup_allocdirect(ip, lbn, newblkno, oldblkno, newsize, oldsize, bp)
177 ufs2_daddr_t newblkno;
178 ufs2_daddr_t oldblkno;
184 panic("softdep_setup_allocdirect called");
188 softdep_setup_allocext(ip, lbn, newblkno, oldblkno, newsize, oldsize, bp)
191 ufs2_daddr_t newblkno;
192 ufs2_daddr_t oldblkno;
198 panic("softdep_setup_allocext called");
202 softdep_setup_allocindir_page(ip, lbn, bp, ptrno, newblkno, oldblkno, nbp)
207 ufs2_daddr_t newblkno;
208 ufs2_daddr_t oldblkno;
212 panic("softdep_setup_allocindir_page called");
216 softdep_setup_allocindir_meta(nbp, ip, bp, ptrno, newblkno)
221 ufs2_daddr_t newblkno;
224 panic("softdep_setup_allocindir_meta called");
228 softdep_journal_freeblocks(ip, cred, length, flags)
235 panic("softdep_journal_freeblocks called");
239 softdep_journal_fsync(ip)
243 panic("softdep_journal_fsync called");
247 softdep_setup_freeblocks(ip, length, flags)
253 panic("softdep_setup_freeblocks called");
257 softdep_freefile(pvp, ino, mode)
263 panic("softdep_freefile called");
267 softdep_setup_directory_add(bp, dp, diroffset, newinum, newdirbp, isnewblk)
272 struct buf *newdirbp;
276 panic("softdep_setup_directory_add called");
280 softdep_change_directoryentry_offset(bp, dp, base, oldloc, newloc, entrysize)
289 panic("softdep_change_directoryentry_offset called");
293 softdep_setup_remove(bp, dp, ip, isrmdir)
300 panic("softdep_setup_remove called");
304 softdep_setup_directory_change(bp, dp, ip, newinum, isrmdir)
312 panic("softdep_setup_directory_change called");
316 softdep_setup_blkfree(mp, bp, blkno, frags, wkhd)
321 struct workhead *wkhd;
324 panic("%s called", __FUNCTION__);
328 softdep_setup_inofree(mp, bp, ino, wkhd)
332 struct workhead *wkhd;
335 panic("%s called", __FUNCTION__);
339 softdep_setup_unlink(dp, ip)
344 panic("%s called", __FUNCTION__);
348 softdep_setup_link(dp, ip)
353 panic("%s called", __FUNCTION__);
357 softdep_revert_link(dp, ip)
362 panic("%s called", __FUNCTION__);
366 softdep_setup_rmdir(dp, ip)
371 panic("%s called", __FUNCTION__);
375 softdep_revert_rmdir(dp, ip)
380 panic("%s called", __FUNCTION__);
384 softdep_setup_create(dp, ip)
389 panic("%s called", __FUNCTION__);
393 softdep_revert_create(dp, ip)
398 panic("%s called", __FUNCTION__);
402 softdep_setup_mkdir(dp, ip)
407 panic("%s called", __FUNCTION__);
411 softdep_revert_mkdir(dp, ip)
416 panic("%s called", __FUNCTION__);
420 softdep_setup_dotdot_link(dp, ip)
425 panic("%s called", __FUNCTION__);
429 softdep_prealloc(vp, waitok)
434 panic("%s called", __FUNCTION__);
440 softdep_journal_lookup(mp, vpp)
449 softdep_change_linkcnt(ip)
453 panic("softdep_change_linkcnt called");
457 softdep_load_inodeblock(ip)
461 panic("softdep_load_inodeblock called");
465 softdep_update_inodeblock(ip, bp, waitfor)
471 panic("softdep_update_inodeblock called");
476 struct vnode *vp; /* the "in_core" copy of the inode */
483 softdep_fsync_mountdev(vp)
491 softdep_flushworklist(oldmnt, countp, td)
492 struct mount *oldmnt;
502 softdep_sync_metadata(struct vnode *vp)
509 softdep_sync_buf(struct vnode *vp, struct buf *bp, int waitfor)
520 panic("softdep_slowdown called");
524 softdep_releasefile(ip)
525 struct inode *ip; /* inode with the zero effective link count */
528 panic("softdep_releasefile called");
532 softdep_request_cleanup(fs, vp, cred, resource)
543 softdep_check_suspend(struct mount *mp,
547 int secondary_writes,
548 int secondary_accwrites)
554 (void) softdep_accdeps;
556 bo = &devvp->v_bufobj;
557 ASSERT_BO_LOCKED(bo);
560 while (mp->mnt_secondary_writes != 0) {
562 msleep(&mp->mnt_secondary_writes, MNT_MTX(mp),
563 (PUSER - 1) | PDROP, "secwr", 0);
569 * Reasons for needing more work before suspend:
570 * - Dirty buffers on devvp.
571 * - Secondary writes occurred after start of vnode sync loop
574 if (bo->bo_numoutput > 0 ||
575 bo->bo_dirty.bv_cnt > 0 ||
576 secondary_writes != 0 ||
577 mp->mnt_secondary_writes != 0 ||
578 secondary_accwrites != mp->mnt_secondary_accwrites)
585 softdep_get_depcounts(struct mount *mp,
587 int *softdepactiveaccp)
591 *softdepactiveaccp = 0;
595 softdep_buf_append(bp, wkhd)
597 struct workhead *wkhd;
600 panic("softdep_buf_appendwork called");
604 softdep_inode_append(ip, cred, wkhd)
607 struct workhead *wkhd;
610 panic("softdep_inode_appendwork called");
614 softdep_freework(wkhd)
615 struct workhead *wkhd;
618 panic("softdep_freework called");
623 FEATURE(softupdates, "FFS soft-updates support");
626 * These definitions need to be adapted to the system to which
627 * this file is being ported.
630 #define M_SOFTDEP_FLAGS (M_WAITOK)
634 #define D_BMSAFEMAP 2
636 #define D_ALLOCDIRECT 4
638 #define D_ALLOCINDIR 6
645 #define D_NEWDIRBLK 13
646 #define D_FREEWORK 14
652 #define D_JFREEBLK 20
653 #define D_JFREEFRAG 21
659 #define D_SENTINEL 27
660 #define D_LAST D_SENTINEL
662 unsigned long dep_current[D_LAST + 1];
663 unsigned long dep_total[D_LAST + 1];
664 unsigned long dep_write[D_LAST + 1];
667 static SYSCTL_NODE(_debug, OID_AUTO, softdep, CTLFLAG_RW, 0,
668 "soft updates stats");
669 static SYSCTL_NODE(_debug_softdep, OID_AUTO, total, CTLFLAG_RW, 0,
670 "total dependencies allocated");
671 static SYSCTL_NODE(_debug_softdep, OID_AUTO, current, CTLFLAG_RW, 0,
672 "current dependencies allocated");
673 static SYSCTL_NODE(_debug_softdep, OID_AUTO, write, CTLFLAG_RW, 0,
674 "current dependencies written");
676 #define SOFTDEP_TYPE(type, str, long) \
677 static MALLOC_DEFINE(M_ ## type, #str, long); \
678 SYSCTL_ULONG(_debug_softdep_total, OID_AUTO, str, CTLFLAG_RD, \
679 &dep_total[D_ ## type], 0, ""); \
680 SYSCTL_ULONG(_debug_softdep_current, OID_AUTO, str, CTLFLAG_RD, \
681 &dep_current[D_ ## type], 0, ""); \
682 SYSCTL_ULONG(_debug_softdep_write, OID_AUTO, str, CTLFLAG_RD, \
683 &dep_write[D_ ## type], 0, "");
685 SOFTDEP_TYPE(PAGEDEP, pagedep, "File page dependencies");
686 SOFTDEP_TYPE(INODEDEP, inodedep, "Inode dependencies");
687 SOFTDEP_TYPE(BMSAFEMAP, bmsafemap,
688 "Block or frag allocated from cyl group map");
689 SOFTDEP_TYPE(NEWBLK, newblk, "New block or frag allocation dependency");
690 SOFTDEP_TYPE(ALLOCDIRECT, allocdirect, "Block or frag dependency for an inode");
691 SOFTDEP_TYPE(INDIRDEP, indirdep, "Indirect block dependencies");
692 SOFTDEP_TYPE(ALLOCINDIR, allocindir, "Block dependency for an indirect block");
693 SOFTDEP_TYPE(FREEFRAG, freefrag, "Previously used frag for an inode");
694 SOFTDEP_TYPE(FREEBLKS, freeblks, "Blocks freed from an inode");
695 SOFTDEP_TYPE(FREEFILE, freefile, "Inode deallocated");
696 SOFTDEP_TYPE(DIRADD, diradd, "New directory entry");
697 SOFTDEP_TYPE(MKDIR, mkdir, "New directory");
698 SOFTDEP_TYPE(DIRREM, dirrem, "Directory entry deleted");
699 SOFTDEP_TYPE(NEWDIRBLK, newdirblk, "Unclaimed new directory block");
700 SOFTDEP_TYPE(FREEWORK, freework, "free an inode block");
701 SOFTDEP_TYPE(FREEDEP, freedep, "track a block free");
702 SOFTDEP_TYPE(JADDREF, jaddref, "Journal inode ref add");
703 SOFTDEP_TYPE(JREMREF, jremref, "Journal inode ref remove");
704 SOFTDEP_TYPE(JMVREF, jmvref, "Journal inode ref move");
705 SOFTDEP_TYPE(JNEWBLK, jnewblk, "Journal new block");
706 SOFTDEP_TYPE(JFREEBLK, jfreeblk, "Journal free block");
707 SOFTDEP_TYPE(JFREEFRAG, jfreefrag, "Journal free frag");
708 SOFTDEP_TYPE(JSEG, jseg, "Journal segment");
709 SOFTDEP_TYPE(JSEGDEP, jsegdep, "Journal segment complete");
710 SOFTDEP_TYPE(SBDEP, sbdep, "Superblock write dependency");
711 SOFTDEP_TYPE(JTRUNC, jtrunc, "Journal inode truncation");
712 SOFTDEP_TYPE(JFSYNC, jfsync, "Journal fsync complete");
714 static MALLOC_DEFINE(M_SENTINEL, "sentinel", "Worklist sentinel");
716 static MALLOC_DEFINE(M_SAVEDINO, "savedino", "Saved inodes");
717 static MALLOC_DEFINE(M_JBLOCKS, "jblocks", "Journal block locations");
720 * translate from workitem type to memory type
721 * MUST match the defines above, such that memtype[D_XXX] == M_XXX
723 static struct malloc_type *memtype[] = {
754 static LIST_HEAD(mkdirlist, mkdir) mkdirlisthd;
756 #define DtoM(type) (memtype[type])
759 * Names of malloc types.
761 #define TYPENAME(type) \
762 ((unsigned)(type) <= D_LAST ? memtype[type]->ks_shortdesc : "???")
764 * End system adaptation definitions.
767 #define DOTDOT_OFFSET offsetof(struct dirtemplate, dotdot_ino)
768 #define DOT_OFFSET offsetof(struct dirtemplate, dot_ino)
771 * Forward declarations.
773 struct inodedep_hashhead;
774 struct newblk_hashhead;
775 struct pagedep_hashhead;
776 struct bmsafemap_hashhead;
779 * Private journaling structures.
782 struct jseglst jb_segs; /* TAILQ of current segments. */
783 struct jseg *jb_writeseg; /* Next write to complete. */
784 struct jseg *jb_oldestseg; /* Oldest segment with valid entries. */
785 struct jextent *jb_extent; /* Extent array. */
786 uint64_t jb_nextseq; /* Next sequence number. */
787 uint64_t jb_oldestwrseq; /* Oldest written sequence number. */
788 uint8_t jb_needseg; /* Need a forced segment. */
789 uint8_t jb_suspended; /* Did journal suspend writes? */
790 int jb_avail; /* Available extents. */
791 int jb_used; /* Last used extent. */
792 int jb_head; /* Allocator head. */
793 int jb_off; /* Allocator extent offset. */
794 int jb_blocks; /* Total disk blocks covered. */
795 int jb_free; /* Total disk blocks free. */
796 int jb_min; /* Minimum free space. */
797 int jb_low; /* Low on space. */
798 int jb_age; /* Insertion time of oldest rec. */
802 ufs2_daddr_t je_daddr; /* Disk block address. */
803 int je_blocks; /* Disk block count. */
807 * Internal function prototypes.
809 static void softdep_error(char *, int);
810 static void drain_output(struct vnode *);
811 static struct buf *getdirtybuf(struct buf *, struct mtx *, int);
812 static void clear_remove(struct thread *);
813 static void clear_inodedeps(struct thread *);
814 static void unlinked_inodedep(struct mount *, struct inodedep *);
815 static void clear_unlinked_inodedep(struct inodedep *);
816 static struct inodedep *first_unlinked_inodedep(struct ufsmount *);
817 static int flush_pagedep_deps(struct vnode *, struct mount *,
819 static int free_pagedep(struct pagedep *);
820 static int flush_newblk_dep(struct vnode *, struct mount *, ufs_lbn_t);
821 static int flush_inodedep_deps(struct vnode *, struct mount *, ino_t);
822 static int flush_deplist(struct allocdirectlst *, int, int *);
823 static int sync_cgs(struct mount *, int);
824 static int handle_written_filepage(struct pagedep *, struct buf *);
825 static int handle_written_sbdep(struct sbdep *, struct buf *);
826 static void initiate_write_sbdep(struct sbdep *);
827 static void diradd_inode_written(struct diradd *, struct inodedep *);
828 static int handle_written_indirdep(struct indirdep *, struct buf *,
830 static int handle_written_inodeblock(struct inodedep *, struct buf *);
831 static int jnewblk_rollforward(struct jnewblk *, struct fs *, struct cg *,
833 static int handle_written_bmsafemap(struct bmsafemap *, struct buf *);
834 static void handle_written_jaddref(struct jaddref *);
835 static void handle_written_jremref(struct jremref *);
836 static void handle_written_jseg(struct jseg *, struct buf *);
837 static void handle_written_jnewblk(struct jnewblk *);
838 static void handle_written_jblkdep(struct jblkdep *);
839 static void handle_written_jfreefrag(struct jfreefrag *);
840 static void complete_jseg(struct jseg *);
841 static void complete_jsegs(struct jseg *);
842 static void jseg_write(struct ufsmount *ump, struct jseg *, uint8_t *);
843 static void jaddref_write(struct jaddref *, struct jseg *, uint8_t *);
844 static void jremref_write(struct jremref *, struct jseg *, uint8_t *);
845 static void jmvref_write(struct jmvref *, struct jseg *, uint8_t *);
846 static void jtrunc_write(struct jtrunc *, struct jseg *, uint8_t *);
847 static void jfsync_write(struct jfsync *, struct jseg *, uint8_t *data);
848 static void jnewblk_write(struct jnewblk *, struct jseg *, uint8_t *);
849 static void jfreeblk_write(struct jfreeblk *, struct jseg *, uint8_t *);
850 static void jfreefrag_write(struct jfreefrag *, struct jseg *, uint8_t *);
851 static inline void inoref_write(struct inoref *, struct jseg *,
853 static void handle_allocdirect_partdone(struct allocdirect *,
855 static struct jnewblk *cancel_newblk(struct newblk *, struct worklist *,
857 static void indirdep_complete(struct indirdep *);
858 static int indirblk_lookup(struct mount *, ufs2_daddr_t);
859 static void indirblk_insert(struct freework *);
860 static void indirblk_remove(struct freework *);
861 static void handle_allocindir_partdone(struct allocindir *);
862 static void initiate_write_filepage(struct pagedep *, struct buf *);
863 static void initiate_write_indirdep(struct indirdep*, struct buf *);
864 static void handle_written_mkdir(struct mkdir *, int);
865 static int jnewblk_rollback(struct jnewblk *, struct fs *, struct cg *,
867 static void initiate_write_bmsafemap(struct bmsafemap *, struct buf *);
868 static void initiate_write_inodeblock_ufs1(struct inodedep *, struct buf *);
869 static void initiate_write_inodeblock_ufs2(struct inodedep *, struct buf *);
870 static void handle_workitem_freefile(struct freefile *);
871 static int handle_workitem_remove(struct dirrem *, int);
872 static struct dirrem *newdirrem(struct buf *, struct inode *,
873 struct inode *, int, struct dirrem **);
874 static struct indirdep *indirdep_lookup(struct mount *, struct inode *,
876 static void cancel_indirdep(struct indirdep *, struct buf *,
878 static void free_indirdep(struct indirdep *);
879 static void free_diradd(struct diradd *, struct workhead *);
880 static void merge_diradd(struct inodedep *, struct diradd *);
881 static void complete_diradd(struct diradd *);
882 static struct diradd *diradd_lookup(struct pagedep *, int);
883 static struct jremref *cancel_diradd_dotdot(struct inode *, struct dirrem *,
885 static struct jremref *cancel_mkdir_dotdot(struct inode *, struct dirrem *,
887 static void cancel_diradd(struct diradd *, struct dirrem *, struct jremref *,
888 struct jremref *, struct jremref *);
889 static void dirrem_journal(struct dirrem *, struct jremref *, struct jremref *,
891 static void cancel_allocindir(struct allocindir *, struct buf *bp,
892 struct freeblks *, int);
893 static int setup_trunc_indir(struct freeblks *, struct inode *,
894 ufs_lbn_t, ufs_lbn_t, ufs2_daddr_t);
895 static void complete_trunc_indir(struct freework *);
896 static void trunc_indirdep(struct indirdep *, struct freeblks *, struct buf *,
898 static void complete_mkdir(struct mkdir *);
899 static void free_newdirblk(struct newdirblk *);
900 static void free_jremref(struct jremref *);
901 static void free_jaddref(struct jaddref *);
902 static void free_jsegdep(struct jsegdep *);
903 static void free_jsegs(struct jblocks *);
904 static void rele_jseg(struct jseg *);
905 static void free_jseg(struct jseg *, struct jblocks *);
906 static void free_jnewblk(struct jnewblk *);
907 static void free_jblkdep(struct jblkdep *);
908 static void free_jfreefrag(struct jfreefrag *);
909 static void free_freedep(struct freedep *);
910 static void journal_jremref(struct dirrem *, struct jremref *,
912 static void cancel_jnewblk(struct jnewblk *, struct workhead *);
913 static int cancel_jaddref(struct jaddref *, struct inodedep *,
915 static void cancel_jfreefrag(struct jfreefrag *);
916 static inline void setup_freedirect(struct freeblks *, struct inode *,
918 static inline void setup_freeext(struct freeblks *, struct inode *, int, int);
919 static inline void setup_freeindir(struct freeblks *, struct inode *, int,
921 static inline struct freeblks *newfreeblks(struct mount *, struct inode *);
922 static void freeblks_free(struct ufsmount *, struct freeblks *, int);
923 static void indir_trunc(struct freework *, ufs2_daddr_t, ufs_lbn_t);
924 ufs2_daddr_t blkcount(struct fs *, ufs2_daddr_t, off_t);
925 static int trunc_check_buf(struct buf *, int *, ufs_lbn_t, int, int);
926 static void trunc_dependencies(struct inode *, struct freeblks *, ufs_lbn_t,
928 static void trunc_pages(struct inode *, off_t, ufs2_daddr_t, int);
929 static int cancel_pagedep(struct pagedep *, struct freeblks *, int);
930 static int deallocate_dependencies(struct buf *, struct freeblks *, int);
931 static void newblk_freefrag(struct newblk*);
932 static void free_newblk(struct newblk *);
933 static void cancel_allocdirect(struct allocdirectlst *,
934 struct allocdirect *, struct freeblks *);
935 static int check_inode_unwritten(struct inodedep *);
936 static int free_inodedep(struct inodedep *);
937 static void freework_freeblock(struct freework *);
938 static void freework_enqueue(struct freework *);
939 static int handle_workitem_freeblocks(struct freeblks *, int);
940 static int handle_complete_freeblocks(struct freeblks *, int);
941 static void handle_workitem_indirblk(struct freework *);
942 static void handle_written_freework(struct freework *);
943 static void merge_inode_lists(struct allocdirectlst *,struct allocdirectlst *);
944 static struct worklist *jnewblk_merge(struct worklist *, struct worklist *,
946 static struct freefrag *setup_allocindir_phase2(struct buf *, struct inode *,
947 struct inodedep *, struct allocindir *, ufs_lbn_t);
948 static struct allocindir *newallocindir(struct inode *, int, ufs2_daddr_t,
949 ufs2_daddr_t, ufs_lbn_t);
950 static void handle_workitem_freefrag(struct freefrag *);
951 static struct freefrag *newfreefrag(struct inode *, ufs2_daddr_t, long,
953 static void allocdirect_merge(struct allocdirectlst *,
954 struct allocdirect *, struct allocdirect *);
955 static struct freefrag *allocindir_merge(struct allocindir *,
956 struct allocindir *);
957 static int bmsafemap_find(struct bmsafemap_hashhead *, struct mount *, int,
958 struct bmsafemap **);
959 static struct bmsafemap *bmsafemap_lookup(struct mount *, struct buf *,
960 int cg, struct bmsafemap *);
961 static int newblk_find(struct newblk_hashhead *, struct mount *, ufs2_daddr_t,
962 int, struct newblk **);
963 static int newblk_lookup(struct mount *, ufs2_daddr_t, int, struct newblk **);
964 static int inodedep_find(struct inodedep_hashhead *, struct fs *, ino_t,
966 static int inodedep_lookup(struct mount *, ino_t, int, struct inodedep **);
967 static int pagedep_lookup(struct mount *, struct buf *bp, ino_t, ufs_lbn_t,
968 int, struct pagedep **);
969 static int pagedep_find(struct pagedep_hashhead *, ino_t, ufs_lbn_t,
970 struct mount *mp, int, struct pagedep **);
971 static void pause_timer(void *);
972 static int request_cleanup(struct mount *, int);
973 static int process_worklist_item(struct mount *, int, int);
974 static void process_removes(struct vnode *);
975 static void process_truncates(struct vnode *);
976 static void jwork_move(struct workhead *, struct workhead *);
977 static void jwork_insert(struct workhead *, struct jsegdep *);
978 static void add_to_worklist(struct worklist *, int);
979 static void wake_worklist(struct worklist *);
980 static void wait_worklist(struct worklist *, char *);
981 static void remove_from_worklist(struct worklist *);
982 static void softdep_flush(void);
983 static void softdep_flushjournal(struct mount *);
984 static int softdep_speedup(void);
985 static void worklist_speedup(void);
986 static int journal_mount(struct mount *, struct fs *, struct ucred *);
987 static void journal_unmount(struct mount *);
988 static int journal_space(struct ufsmount *, int);
989 static void journal_suspend(struct ufsmount *);
990 static int journal_unsuspend(struct ufsmount *ump);
991 static void softdep_prelink(struct vnode *, struct vnode *);
992 static void add_to_journal(struct worklist *);
993 static void remove_from_journal(struct worklist *);
994 static void softdep_process_journal(struct mount *, struct worklist *, int);
995 static struct jremref *newjremref(struct dirrem *, struct inode *,
996 struct inode *ip, off_t, nlink_t);
997 static struct jaddref *newjaddref(struct inode *, ino_t, off_t, int16_t,
999 static inline void newinoref(struct inoref *, ino_t, ino_t, off_t, nlink_t,
1001 static inline struct jsegdep *inoref_jseg(struct inoref *);
1002 static struct jmvref *newjmvref(struct inode *, ino_t, off_t, off_t);
1003 static struct jfreeblk *newjfreeblk(struct freeblks *, ufs_lbn_t,
1005 static struct jtrunc *newjtrunc(struct freeblks *, off_t, int);
1006 static void move_newblock_dep(struct jaddref *, struct inodedep *);
1007 static void cancel_jfreeblk(struct freeblks *, ufs2_daddr_t);
1008 static struct jfreefrag *newjfreefrag(struct freefrag *, struct inode *,
1009 ufs2_daddr_t, long, ufs_lbn_t);
1010 static struct freework *newfreework(struct ufsmount *, struct freeblks *,
1011 struct freework *, ufs_lbn_t, ufs2_daddr_t, int, int, int);
1012 static int jwait(struct worklist *, int);
1013 static struct inodedep *inodedep_lookup_ip(struct inode *);
1014 static int bmsafemap_backgroundwrite(struct bmsafemap *, struct buf *);
1015 static struct freefile *handle_bufwait(struct inodedep *, struct workhead *);
1016 static void handle_jwork(struct workhead *);
1017 static struct mkdir *setup_newdir(struct diradd *, ino_t, ino_t, struct buf *,
1019 static struct jblocks *jblocks_create(void);
1020 static ufs2_daddr_t jblocks_alloc(struct jblocks *, int, int *);
1021 static void jblocks_free(struct jblocks *, struct mount *, int);
1022 static void jblocks_destroy(struct jblocks *);
1023 static void jblocks_add(struct jblocks *, ufs2_daddr_t, int);
1026 * Exported softdep operations.
1028 static void softdep_disk_io_initiation(struct buf *);
1029 static void softdep_disk_write_complete(struct buf *);
1030 static void softdep_deallocate_dependencies(struct buf *);
1031 static int softdep_count_dependencies(struct buf *bp, int);
1033 static struct mtx lk;
1034 MTX_SYSINIT(softdep_lock, &lk, "Softdep Lock", MTX_DEF);
1036 #define TRY_ACQUIRE_LOCK(lk) mtx_trylock(lk)
1037 #define ACQUIRE_LOCK(lk) mtx_lock(lk)
1038 #define FREE_LOCK(lk) mtx_unlock(lk)
1040 #define BUF_AREC(bp) lockallowrecurse(&(bp)->b_lock)
1041 #define BUF_NOREC(bp) lockdisablerecurse(&(bp)->b_lock)
1044 * Worklist queue management.
1045 * These routines require that the lock be held.
1047 #ifndef /* NOT */ DEBUG
1048 #define WORKLIST_INSERT(head, item) do { \
1049 (item)->wk_state |= ONWORKLIST; \
1050 LIST_INSERT_HEAD(head, item, wk_list); \
1052 #define WORKLIST_REMOVE(item) do { \
1053 (item)->wk_state &= ~ONWORKLIST; \
1054 LIST_REMOVE(item, wk_list); \
1056 #define WORKLIST_INSERT_UNLOCKED WORKLIST_INSERT
1057 #define WORKLIST_REMOVE_UNLOCKED WORKLIST_REMOVE
1060 static void worklist_insert(struct workhead *, struct worklist *, int);
1061 static void worklist_remove(struct worklist *, int);
1063 #define WORKLIST_INSERT(head, item) worklist_insert(head, item, 1)
1064 #define WORKLIST_INSERT_UNLOCKED(head, item) worklist_insert(head, item, 0)
1065 #define WORKLIST_REMOVE(item) worklist_remove(item, 1)
1066 #define WORKLIST_REMOVE_UNLOCKED(item) worklist_remove(item, 0)
1069 worklist_insert(head, item, locked)
1070 struct workhead *head;
1071 struct worklist *item;
1076 mtx_assert(&lk, MA_OWNED);
1077 if (item->wk_state & ONWORKLIST)
1078 panic("worklist_insert: %p %s(0x%X) already on list",
1079 item, TYPENAME(item->wk_type), item->wk_state);
1080 item->wk_state |= ONWORKLIST;
1081 LIST_INSERT_HEAD(head, item, wk_list);
1085 worklist_remove(item, locked)
1086 struct worklist *item;
1091 mtx_assert(&lk, MA_OWNED);
1092 if ((item->wk_state & ONWORKLIST) == 0)
1093 panic("worklist_remove: %p %s(0x%X) not on list",
1094 item, TYPENAME(item->wk_type), item->wk_state);
1095 item->wk_state &= ~ONWORKLIST;
1096 LIST_REMOVE(item, wk_list);
1101 * Merge two jsegdeps keeping only the oldest one as newer references
1102 * can't be discarded until after older references.
1104 static inline struct jsegdep *
1105 jsegdep_merge(struct jsegdep *one, struct jsegdep *two)
1107 struct jsegdep *swp;
1112 if (one->jd_seg->js_seq > two->jd_seg->js_seq) {
1117 WORKLIST_REMOVE(&two->jd_list);
1124 * If two freedeps are compatible free one to reduce list size.
1126 static inline struct freedep *
1127 freedep_merge(struct freedep *one, struct freedep *two)
1132 if (one->fd_freework == two->fd_freework) {
1133 WORKLIST_REMOVE(&two->fd_list);
1140 * Move journal work from one list to another. Duplicate freedeps and
1141 * jsegdeps are coalesced to keep the lists as small as possible.
1144 jwork_move(dst, src)
1145 struct workhead *dst;
1146 struct workhead *src;
1148 struct freedep *freedep;
1149 struct jsegdep *jsegdep;
1150 struct worklist *wkn;
1151 struct worklist *wk;
1154 ("jwork_move: dst == src"));
1157 LIST_FOREACH_SAFE(wk, dst, wk_list, wkn) {
1158 if (wk->wk_type == D_JSEGDEP)
1159 jsegdep = jsegdep_merge(WK_JSEGDEP(wk), jsegdep);
1160 if (wk->wk_type == D_FREEDEP)
1161 freedep = freedep_merge(WK_FREEDEP(wk), freedep);
1164 mtx_assert(&lk, MA_OWNED);
1165 while ((wk = LIST_FIRST(src)) != NULL) {
1166 WORKLIST_REMOVE(wk);
1167 WORKLIST_INSERT(dst, wk);
1168 if (wk->wk_type == D_JSEGDEP) {
1169 jsegdep = jsegdep_merge(WK_JSEGDEP(wk), jsegdep);
1172 if (wk->wk_type == D_FREEDEP)
1173 freedep = freedep_merge(WK_FREEDEP(wk), freedep);
1178 jwork_insert(dst, jsegdep)
1179 struct workhead *dst;
1180 struct jsegdep *jsegdep;
1182 struct jsegdep *jsegdepn;
1183 struct worklist *wk;
1185 LIST_FOREACH(wk, dst, wk_list)
1186 if (wk->wk_type == D_JSEGDEP)
1189 WORKLIST_INSERT(dst, &jsegdep->jd_list);
1192 jsegdepn = WK_JSEGDEP(wk);
1193 if (jsegdep->jd_seg->js_seq < jsegdepn->jd_seg->js_seq) {
1194 WORKLIST_REMOVE(wk);
1195 free_jsegdep(jsegdepn);
1196 WORKLIST_INSERT(dst, &jsegdep->jd_list);
1198 free_jsegdep(jsegdep);
1202 * Routines for tracking and managing workitems.
1204 static void workitem_free(struct worklist *, int);
1205 static void workitem_alloc(struct worklist *, int, struct mount *);
1207 #define WORKITEM_FREE(item, type) workitem_free((struct worklist *)(item), (type))
1210 workitem_free(item, type)
1211 struct worklist *item;
1214 struct ufsmount *ump;
1215 mtx_assert(&lk, MA_OWNED);
1218 if (item->wk_state & ONWORKLIST)
1219 panic("workitem_free: %s(0x%X) still on list",
1220 TYPENAME(item->wk_type), item->wk_state);
1221 if (item->wk_type != type)
1222 panic("workitem_free: type mismatch %s != %s",
1223 TYPENAME(item->wk_type), TYPENAME(type));
1225 if (item->wk_state & IOWAITING)
1227 ump = VFSTOUFS(item->wk_mp);
1228 if (--ump->softdep_deps == 0 && ump->softdep_req)
1229 wakeup(&ump->softdep_deps);
1230 dep_current[type]--;
1231 free(item, DtoM(type));
1235 workitem_alloc(item, type, mp)
1236 struct worklist *item;
1240 struct ufsmount *ump;
1242 item->wk_type = type;
1248 dep_current[type]++;
1250 ump->softdep_deps++;
1251 ump->softdep_accdeps++;
1256 * Workitem queue management
1258 static int max_softdeps; /* maximum number of structs before slowdown */
1259 static int maxindirdeps = 50; /* max number of indirdeps before slowdown */
1260 static int tickdelay = 2; /* number of ticks to pause during slowdown */
1261 static int proc_waiting; /* tracks whether we have a timeout posted */
1262 static int *stat_countp; /* statistic to count in proc_waiting timeout */
1263 static struct callout softdep_callout;
1264 static int req_pending;
1265 static int req_clear_inodedeps; /* syncer process flush some inodedeps */
1266 static int req_clear_remove; /* syncer process flush some freeblks */
1267 static int softdep_flushcache = 0; /* Should we do BIO_FLUSH? */
1270 * runtime statistics
1272 static int stat_worklist_push; /* number of worklist cleanups */
1273 static int stat_blk_limit_push; /* number of times block limit neared */
1274 static int stat_ino_limit_push; /* number of times inode limit neared */
1275 static int stat_blk_limit_hit; /* number of times block slowdown imposed */
1276 static int stat_ino_limit_hit; /* number of times inode slowdown imposed */
1277 static int stat_sync_limit_hit; /* number of synchronous slowdowns imposed */
1278 static int stat_indir_blk_ptrs; /* bufs redirtied as indir ptrs not written */
1279 static int stat_inode_bitmap; /* bufs redirtied as inode bitmap not written */
1280 static int stat_direct_blk_ptrs;/* bufs redirtied as direct ptrs not written */
1281 static int stat_dir_entry; /* bufs redirtied as dir entry cannot write */
1282 static int stat_jaddref; /* bufs redirtied as ino bitmap can not write */
1283 static int stat_jnewblk; /* bufs redirtied as blk bitmap can not write */
1284 static int stat_journal_min; /* Times hit journal min threshold */
1285 static int stat_journal_low; /* Times hit journal low threshold */
1286 static int stat_journal_wait; /* Times blocked in jwait(). */
1287 static int stat_jwait_filepage; /* Times blocked in jwait() for filepage. */
1288 static int stat_jwait_freeblks; /* Times blocked in jwait() for freeblks. */
1289 static int stat_jwait_inode; /* Times blocked in jwait() for inodes. */
1290 static int stat_jwait_newblk; /* Times blocked in jwait() for newblks. */
1291 static int stat_cleanup_high_delay; /* Maximum cleanup delay (in ticks) */
1292 static int stat_cleanup_blkrequests; /* Number of block cleanup requests */
1293 static int stat_cleanup_inorequests; /* Number of inode cleanup requests */
1294 static int stat_cleanup_retries; /* Number of cleanups that needed to flush */
1295 static int stat_cleanup_failures; /* Number of cleanup requests that failed */
1297 SYSCTL_INT(_debug_softdep, OID_AUTO, max_softdeps, CTLFLAG_RW,
1298 &max_softdeps, 0, "");
1299 SYSCTL_INT(_debug_softdep, OID_AUTO, tickdelay, CTLFLAG_RW,
1301 SYSCTL_INT(_debug_softdep, OID_AUTO, maxindirdeps, CTLFLAG_RW,
1302 &maxindirdeps, 0, "");
1303 SYSCTL_INT(_debug_softdep, OID_AUTO, worklist_push, CTLFLAG_RW,
1304 &stat_worklist_push, 0,"");
1305 SYSCTL_INT(_debug_softdep, OID_AUTO, blk_limit_push, CTLFLAG_RW,
1306 &stat_blk_limit_push, 0,"");
1307 SYSCTL_INT(_debug_softdep, OID_AUTO, ino_limit_push, CTLFLAG_RW,
1308 &stat_ino_limit_push, 0,"");
1309 SYSCTL_INT(_debug_softdep, OID_AUTO, blk_limit_hit, CTLFLAG_RW,
1310 &stat_blk_limit_hit, 0, "");
1311 SYSCTL_INT(_debug_softdep, OID_AUTO, ino_limit_hit, CTLFLAG_RW,
1312 &stat_ino_limit_hit, 0, "");
1313 SYSCTL_INT(_debug_softdep, OID_AUTO, sync_limit_hit, CTLFLAG_RW,
1314 &stat_sync_limit_hit, 0, "");
1315 SYSCTL_INT(_debug_softdep, OID_AUTO, indir_blk_ptrs, CTLFLAG_RW,
1316 &stat_indir_blk_ptrs, 0, "");
1317 SYSCTL_INT(_debug_softdep, OID_AUTO, inode_bitmap, CTLFLAG_RW,
1318 &stat_inode_bitmap, 0, "");
1319 SYSCTL_INT(_debug_softdep, OID_AUTO, direct_blk_ptrs, CTLFLAG_RW,
1320 &stat_direct_blk_ptrs, 0, "");
1321 SYSCTL_INT(_debug_softdep, OID_AUTO, dir_entry, CTLFLAG_RW,
1322 &stat_dir_entry, 0, "");
1323 SYSCTL_INT(_debug_softdep, OID_AUTO, jaddref_rollback, CTLFLAG_RW,
1324 &stat_jaddref, 0, "");
1325 SYSCTL_INT(_debug_softdep, OID_AUTO, jnewblk_rollback, CTLFLAG_RW,
1326 &stat_jnewblk, 0, "");
1327 SYSCTL_INT(_debug_softdep, OID_AUTO, journal_low, CTLFLAG_RW,
1328 &stat_journal_low, 0, "");
1329 SYSCTL_INT(_debug_softdep, OID_AUTO, journal_min, CTLFLAG_RW,
1330 &stat_journal_min, 0, "");
1331 SYSCTL_INT(_debug_softdep, OID_AUTO, journal_wait, CTLFLAG_RW,
1332 &stat_journal_wait, 0, "");
1333 SYSCTL_INT(_debug_softdep, OID_AUTO, jwait_filepage, CTLFLAG_RW,
1334 &stat_jwait_filepage, 0, "");
1335 SYSCTL_INT(_debug_softdep, OID_AUTO, jwait_freeblks, CTLFLAG_RW,
1336 &stat_jwait_freeblks, 0, "");
1337 SYSCTL_INT(_debug_softdep, OID_AUTO, jwait_inode, CTLFLAG_RW,
1338 &stat_jwait_inode, 0, "");
1339 SYSCTL_INT(_debug_softdep, OID_AUTO, jwait_newblk, CTLFLAG_RW,
1340 &stat_jwait_newblk, 0, "");
1341 SYSCTL_INT(_debug_softdep, OID_AUTO, cleanup_blkrequests, CTLFLAG_RW,
1342 &stat_cleanup_blkrequests, 0, "");
1343 SYSCTL_INT(_debug_softdep, OID_AUTO, cleanup_inorequests, CTLFLAG_RW,
1344 &stat_cleanup_inorequests, 0, "");
1345 SYSCTL_INT(_debug_softdep, OID_AUTO, cleanup_high_delay, CTLFLAG_RW,
1346 &stat_cleanup_high_delay, 0, "");
1347 SYSCTL_INT(_debug_softdep, OID_AUTO, cleanup_retries, CTLFLAG_RW,
1348 &stat_cleanup_retries, 0, "");
1349 SYSCTL_INT(_debug_softdep, OID_AUTO, cleanup_failures, CTLFLAG_RW,
1350 &stat_cleanup_failures, 0, "");
1351 SYSCTL_INT(_debug_softdep, OID_AUTO, flushcache, CTLFLAG_RW,
1352 &softdep_flushcache, 0, "");
1354 SYSCTL_DECL(_vfs_ffs);
1356 LIST_HEAD(bmsafemap_hashhead, bmsafemap) *bmsafemap_hashtbl;
1357 static u_long bmsafemap_hash; /* size of hash table - 1 */
1359 static int compute_summary_at_mount = 0; /* Whether to recompute the summary at mount time */
1360 SYSCTL_INT(_vfs_ffs, OID_AUTO, compute_summary_at_mount, CTLFLAG_RW,
1361 &compute_summary_at_mount, 0, "Recompute summary at mount");
1363 static struct proc *softdepproc;
1364 static struct kproc_desc softdep_kp = {
1369 SYSINIT(sdproc, SI_SUB_KTHREAD_UPDATE, SI_ORDER_ANY, kproc_start,
1377 struct ufsmount *ump;
1384 td->td_pflags |= TDP_NORUNNINGBUF;
1387 kproc_suspend_check(softdepproc);
1388 vfslocked = VFS_LOCK_GIANT((struct mount *)NULL);
1391 * If requested, try removing inode or removal dependencies.
1393 if (req_clear_inodedeps) {
1394 clear_inodedeps(td);
1395 req_clear_inodedeps -= 1;
1396 wakeup_one(&proc_waiting);
1398 if (req_clear_remove) {
1400 req_clear_remove -= 1;
1401 wakeup_one(&proc_waiting);
1404 VFS_UNLOCK_GIANT(vfslocked);
1405 remaining = progress = 0;
1406 mtx_lock(&mountlist_mtx);
1407 for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) {
1408 nmp = TAILQ_NEXT(mp, mnt_list);
1409 if (MOUNTEDSOFTDEP(mp) == 0)
1411 if (vfs_busy(mp, MBF_NOWAIT | MBF_MNTLSTLOCK))
1413 vfslocked = VFS_LOCK_GIANT(mp);
1414 progress += softdep_process_worklist(mp, 0);
1416 remaining += ump->softdep_on_worklist;
1417 VFS_UNLOCK_GIANT(vfslocked);
1418 mtx_lock(&mountlist_mtx);
1419 nmp = TAILQ_NEXT(mp, mnt_list);
1422 mtx_unlock(&mountlist_mtx);
1423 if (remaining && progress)
1427 msleep(&req_pending, &lk, PVM, "sdflush", hz);
1434 worklist_speedup(void)
1436 mtx_assert(&lk, MA_OWNED);
1437 if (req_pending == 0) {
1439 wakeup(&req_pending);
1444 softdep_speedup(void)
1449 return speedup_syncer();
1453 * Add an item to the end of the work queue.
1454 * This routine requires that the lock be held.
1455 * This is the only routine that adds items to the list.
1456 * The following routine is the only one that removes items
1457 * and does so in order from first to last.
1460 #define WK_HEAD 0x0001 /* Add to HEAD. */
1461 #define WK_NODELAY 0x0002 /* Process immediately. */
1464 add_to_worklist(wk, flags)
1465 struct worklist *wk;
1468 struct ufsmount *ump;
1470 mtx_assert(&lk, MA_OWNED);
1471 ump = VFSTOUFS(wk->wk_mp);
1472 if (wk->wk_state & ONWORKLIST)
1473 panic("add_to_worklist: %s(0x%X) already on list",
1474 TYPENAME(wk->wk_type), wk->wk_state);
1475 wk->wk_state |= ONWORKLIST;
1476 if (ump->softdep_on_worklist == 0) {
1477 LIST_INSERT_HEAD(&ump->softdep_workitem_pending, wk, wk_list);
1478 ump->softdep_worklist_tail = wk;
1479 } else if (flags & WK_HEAD) {
1480 LIST_INSERT_HEAD(&ump->softdep_workitem_pending, wk, wk_list);
1482 LIST_INSERT_AFTER(ump->softdep_worklist_tail, wk, wk_list);
1483 ump->softdep_worklist_tail = wk;
1485 ump->softdep_on_worklist += 1;
1486 if (flags & WK_NODELAY)
1491 * Remove the item to be processed. If we are removing the last
1492 * item on the list, we need to recalculate the tail pointer.
1495 remove_from_worklist(wk)
1496 struct worklist *wk;
1498 struct ufsmount *ump;
1500 ump = VFSTOUFS(wk->wk_mp);
1501 WORKLIST_REMOVE(wk);
1502 if (ump->softdep_worklist_tail == wk)
1503 ump->softdep_worklist_tail =
1504 (struct worklist *)wk->wk_list.le_prev;
1505 ump->softdep_on_worklist -= 1;
1510 struct worklist *wk;
1512 if (wk->wk_state & IOWAITING) {
1513 wk->wk_state &= ~IOWAITING;
1519 wait_worklist(wk, wmesg)
1520 struct worklist *wk;
1524 wk->wk_state |= IOWAITING;
1525 msleep(wk, &lk, PVM, wmesg, 0);
1529 * Process that runs once per second to handle items in the background queue.
1531 * Note that we ensure that everything is done in the order in which they
1532 * appear in the queue. The code below depends on this property to ensure
1533 * that blocks of a file are freed before the inode itself is freed. This
1534 * ordering ensures that no new <vfsid, inum, lbn> triples will be generated
1535 * until all the old ones have been purged from the dependency lists.
1538 softdep_process_worklist(mp, full)
1542 struct thread *td = curthread;
1544 struct ufsmount *ump;
1547 KASSERT(mp != NULL, ("softdep_process_worklist: NULL mp"));
1549 * Record the process identifier of our caller so that we can give
1550 * this process preferential treatment in request_cleanup below.
1555 starttime = time_second;
1556 softdep_process_journal(mp, NULL, full?MNT_WAIT:0);
1557 while (ump->softdep_on_worklist > 0) {
1558 if ((cnt = process_worklist_item(mp, 10, LK_NOWAIT)) == 0)
1563 * If requested, try removing inode or removal dependencies.
1565 if (req_clear_inodedeps) {
1566 clear_inodedeps(td);
1567 req_clear_inodedeps -= 1;
1568 wakeup_one(&proc_waiting);
1570 if (req_clear_remove) {
1572 req_clear_remove -= 1;
1573 wakeup_one(&proc_waiting);
1576 * We do not generally want to stop for buffer space, but if
1577 * we are really being a buffer hog, we will stop and wait.
1579 if (should_yield()) {
1581 kern_yield(PRI_UNCHANGED);
1586 * Never allow processing to run for more than one
1587 * second. Otherwise the other mountpoints may get
1588 * excessively backlogged.
1590 if (!full && starttime != time_second)
1594 journal_unsuspend(ump);
1600 * Process all removes associated with a vnode if we are running out of
1601 * journal space. Any other process which attempts to flush these will
1602 * be unable as we have the vnodes locked.
1608 struct inodedep *inodedep;
1609 struct dirrem *dirrem;
1613 mtx_assert(&lk, MA_OWNED);
1616 inum = VTOI(vp)->i_number;
1619 if (inodedep_lookup(mp, inum, 0, &inodedep) == 0)
1621 LIST_FOREACH(dirrem, &inodedep->id_dirremhd, dm_inonext) {
1623 * If another thread is trying to lock this vnode
1624 * it will fail but we must wait for it to do so
1625 * before we can proceed.
1627 if (dirrem->dm_state & INPROGRESS) {
1628 wait_worklist(&dirrem->dm_list, "pwrwait");
1631 if ((dirrem->dm_state & (COMPLETE | ONWORKLIST)) ==
1632 (COMPLETE | ONWORKLIST))
1637 remove_from_worklist(&dirrem->dm_list);
1639 if (vn_start_secondary_write(NULL, &mp, V_NOWAIT))
1640 panic("process_removes: suspended filesystem");
1641 handle_workitem_remove(dirrem, 0);
1642 vn_finished_secondary_write(mp);
1648 * Process all truncations associated with a vnode if we are running out
1649 * of journal space. This is called when the vnode lock is already held
1650 * and no other process can clear the truncation. This function returns
1651 * a value greater than zero if it did any work.
1654 process_truncates(vp)
1657 struct inodedep *inodedep;
1658 struct freeblks *freeblks;
1663 mtx_assert(&lk, MA_OWNED);
1666 inum = VTOI(vp)->i_number;
1668 if (inodedep_lookup(mp, inum, 0, &inodedep) == 0)
1671 TAILQ_FOREACH(freeblks, &inodedep->id_freeblklst, fb_next) {
1672 /* Journal entries not yet written. */
1673 if (!LIST_EMPTY(&freeblks->fb_jblkdephd)) {
1675 &freeblks->fb_jblkdephd)->jb_list,
1679 /* Another thread is executing this item. */
1680 if (freeblks->fb_state & INPROGRESS) {
1681 wait_worklist(&freeblks->fb_list, "ptrwait");
1684 /* Freeblks is waiting on a inode write. */
1685 if ((freeblks->fb_state & COMPLETE) == 0) {
1691 if ((freeblks->fb_state & (ALLCOMPLETE | ONWORKLIST)) ==
1692 (ALLCOMPLETE | ONWORKLIST)) {
1693 remove_from_worklist(&freeblks->fb_list);
1694 freeblks->fb_state |= INPROGRESS;
1696 if (vn_start_secondary_write(NULL, &mp,
1698 panic("process_truncates: "
1699 "suspended filesystem");
1700 handle_workitem_freeblocks(freeblks, 0);
1701 vn_finished_secondary_write(mp);
1705 if (freeblks->fb_cgwait)
1710 sync_cgs(mp, MNT_WAIT);
1711 ffs_sync_snap(mp, MNT_WAIT);
1715 if (freeblks == NULL)
1722 * Process one item on the worklist.
1725 process_worklist_item(mp, target, flags)
1730 struct worklist sentinel;
1731 struct worklist *wk;
1732 struct ufsmount *ump;
1736 mtx_assert(&lk, MA_OWNED);
1737 KASSERT(mp != NULL, ("process_worklist_item: NULL mp"));
1739 * If we are being called because of a process doing a
1740 * copy-on-write, then it is not safe to write as we may
1741 * recurse into the copy-on-write routine.
1743 if (curthread->td_pflags & TDP_COWINPROGRESS)
1745 PHOLD(curproc); /* Don't let the stack go away. */
1748 sentinel.wk_mp = NULL;
1749 sentinel.wk_type = D_SENTINEL;
1750 LIST_INSERT_HEAD(&ump->softdep_workitem_pending, &sentinel, wk_list);
1751 for (wk = LIST_NEXT(&sentinel, wk_list); wk != NULL;
1752 wk = LIST_NEXT(&sentinel, wk_list)) {
1753 if (wk->wk_type == D_SENTINEL) {
1754 LIST_REMOVE(&sentinel, wk_list);
1755 LIST_INSERT_AFTER(wk, &sentinel, wk_list);
1758 if (wk->wk_state & INPROGRESS)
1759 panic("process_worklist_item: %p already in progress.",
1761 wk->wk_state |= INPROGRESS;
1762 remove_from_worklist(wk);
1764 if (vn_start_secondary_write(NULL, &mp, V_NOWAIT))
1765 panic("process_worklist_item: suspended filesystem");
1766 switch (wk->wk_type) {
1768 /* removal of a directory entry */
1769 error = handle_workitem_remove(WK_DIRREM(wk), flags);
1773 /* releasing blocks and/or fragments from a file */
1774 error = handle_workitem_freeblocks(WK_FREEBLKS(wk),
1779 /* releasing a fragment when replaced as a file grows */
1780 handle_workitem_freefrag(WK_FREEFRAG(wk));
1785 /* releasing an inode when its link count drops to 0 */
1786 handle_workitem_freefile(WK_FREEFILE(wk));
1791 panic("%s_process_worklist: Unknown type %s",
1792 "softdep", TYPENAME(wk->wk_type));
1795 vn_finished_secondary_write(mp);
1798 if (++matchcnt == target)
1803 * We have to retry the worklist item later. Wake up any
1804 * waiters who may be able to complete it immediately and
1805 * add the item back to the head so we don't try to execute
1808 wk->wk_state &= ~INPROGRESS;
1810 add_to_worklist(wk, WK_HEAD);
1812 LIST_REMOVE(&sentinel, wk_list);
1813 /* Sentinal could've become the tail from remove_from_worklist. */
1814 if (ump->softdep_worklist_tail == &sentinel)
1815 ump->softdep_worklist_tail =
1816 (struct worklist *)sentinel.wk_list.le_prev;
1822 * Move dependencies from one buffer to another.
1825 softdep_move_dependencies(oldbp, newbp)
1829 struct worklist *wk, *wktail;
1835 while ((wk = LIST_FIRST(&oldbp->b_dep)) != NULL) {
1836 LIST_REMOVE(wk, wk_list);
1837 if (wk->wk_type == D_BMSAFEMAP &&
1838 bmsafemap_backgroundwrite(WK_BMSAFEMAP(wk), newbp))
1841 LIST_INSERT_HEAD(&newbp->b_dep, wk, wk_list);
1843 LIST_INSERT_AFTER(wktail, wk, wk_list);
1852 * Purge the work list of all items associated with a particular mount point.
1855 softdep_flushworklist(oldmnt, countp, td)
1856 struct mount *oldmnt;
1860 struct vnode *devvp;
1861 int count, error = 0;
1862 struct ufsmount *ump;
1865 * Alternately flush the block device associated with the mount
1866 * point and process any dependencies that the flushing
1867 * creates. We continue until no more worklist dependencies
1871 ump = VFSTOUFS(oldmnt);
1872 devvp = ump->um_devvp;
1873 while ((count = softdep_process_worklist(oldmnt, 1)) > 0) {
1875 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
1876 error = VOP_FSYNC(devvp, MNT_WAIT, td);
1877 VOP_UNLOCK(devvp, 0);
1885 softdep_waitidle(struct mount *mp)
1887 struct ufsmount *ump;
1893 for (i = 0; i < 10 && ump->softdep_deps; i++) {
1894 ump->softdep_req = 1;
1895 if (ump->softdep_on_worklist)
1896 panic("softdep_waitidle: work added after flush.");
1897 msleep(&ump->softdep_deps, &lk, PVM, "softdeps", 1);
1899 ump->softdep_req = 0;
1904 printf("softdep_waitidle: Failed to flush worklist for %p\n",
1912 * Flush all vnodes and worklist items associated with a specified mount point.
1915 softdep_flushfiles(oldmnt, flags, td)
1916 struct mount *oldmnt;
1921 struct ufsmount *ump;
1924 int error, early, depcount, loopcnt, retry_flush_count, retry;
1928 retry_flush_count = 3;
1933 * Alternately flush the vnodes associated with the mount
1934 * point and process any dependencies that the flushing
1935 * creates. In theory, this loop can happen at most twice,
1936 * but we give it a few extra just to be sure.
1938 for (; loopcnt > 0; loopcnt--) {
1940 * Do another flush in case any vnodes were brought in
1941 * as part of the cleanup operations.
1943 early = retry_flush_count == 1 || (oldmnt->mnt_kern_flag &
1944 MNTK_UNMOUNT) == 0 ? 0 : EARLYFLUSH;
1945 if ((error = ffs_flushfiles(oldmnt, flags | early, td)) != 0)
1947 if ((error = softdep_flushworklist(oldmnt, &depcount, td)) != 0 ||
1952 * If we are unmounting then it is an error to fail. If we
1953 * are simply trying to downgrade to read-only, then filesystem
1954 * activity can keep us busy forever, so we just fail with EBUSY.
1957 if (oldmnt->mnt_kern_flag & MNTK_UNMOUNT)
1958 panic("softdep_flushfiles: looping");
1962 error = softdep_waitidle(oldmnt);
1964 if (oldmnt->mnt_kern_flag & MNTK_UNMOUNT) {
1967 KASSERT((oldmnt->mnt_kern_flag & MNTK_NOINSMNTQ) != 0,
1968 ("softdep_flushfiles: !MNTK_NOINSMNTQ"));
1969 morework = oldmnt->mnt_nvnodelistsize > 0;
1971 ump = VFSTOUFS(oldmnt);
1973 for (i = 0; i < MAXQUOTAS; i++) {
1974 if (ump->um_quotas[i] != NULLVP)
1980 if (--retry_flush_count > 0) {
1986 MNT_IUNLOCK(oldmnt);
1995 * Structure hashing.
1997 * There are three types of structures that can be looked up:
1998 * 1) pagedep structures identified by mount point, inode number,
1999 * and logical block.
2000 * 2) inodedep structures identified by mount point and inode number.
2001 * 3) newblk structures identified by mount point and
2002 * physical block number.
2004 * The "pagedep" and "inodedep" dependency structures are hashed
2005 * separately from the file blocks and inodes to which they correspond.
2006 * This separation helps when the in-memory copy of an inode or
2007 * file block must be replaced. It also obviates the need to access
2008 * an inode or file page when simply updating (or de-allocating)
2009 * dependency structures. Lookup of newblk structures is needed to
2010 * find newly allocated blocks when trying to associate them with
2011 * their allocdirect or allocindir structure.
2013 * The lookup routines optionally create and hash a new instance when
2014 * an existing entry is not found.
2016 #define DEPALLOC 0x0001 /* allocate structure if lookup fails */
2017 #define NODELAY 0x0002 /* cannot do background work */
2020 * Structures and routines associated with pagedep caching.
2022 LIST_HEAD(pagedep_hashhead, pagedep) *pagedep_hashtbl;
2023 u_long pagedep_hash; /* size of hash table - 1 */
2024 #define PAGEDEP_HASH(mp, inum, lbn) \
2025 (&pagedep_hashtbl[((((register_t)(mp)) >> 13) + (inum) + (lbn)) & \
2029 pagedep_find(pagedephd, ino, lbn, mp, flags, pagedeppp)
2030 struct pagedep_hashhead *pagedephd;
2035 struct pagedep **pagedeppp;
2037 struct pagedep *pagedep;
2039 LIST_FOREACH(pagedep, pagedephd, pd_hash) {
2040 if (ino == pagedep->pd_ino && lbn == pagedep->pd_lbn &&
2041 mp == pagedep->pd_list.wk_mp) {
2042 *pagedeppp = pagedep;
2050 * Look up a pagedep. Return 1 if found, 0 otherwise.
2051 * If not found, allocate if DEPALLOC flag is passed.
2052 * Found or allocated entry is returned in pagedeppp.
2053 * This routine must be called with splbio interrupts blocked.
2056 pagedep_lookup(mp, bp, ino, lbn, flags, pagedeppp)
2062 struct pagedep **pagedeppp;
2064 struct pagedep *pagedep;
2065 struct pagedep_hashhead *pagedephd;
2066 struct worklist *wk;
2070 mtx_assert(&lk, MA_OWNED);
2072 LIST_FOREACH(wk, &bp->b_dep, wk_list) {
2073 if (wk->wk_type == D_PAGEDEP) {
2074 *pagedeppp = WK_PAGEDEP(wk);
2079 pagedephd = PAGEDEP_HASH(mp, ino, lbn);
2080 ret = pagedep_find(pagedephd, ino, lbn, mp, flags, pagedeppp);
2082 if (((*pagedeppp)->pd_state & ONWORKLIST) == 0 && bp)
2083 WORKLIST_INSERT(&bp->b_dep, &(*pagedeppp)->pd_list);
2086 if ((flags & DEPALLOC) == 0)
2089 pagedep = malloc(sizeof(struct pagedep),
2090 M_PAGEDEP, M_SOFTDEP_FLAGS|M_ZERO);
2091 workitem_alloc(&pagedep->pd_list, D_PAGEDEP, mp);
2093 ret = pagedep_find(pagedephd, ino, lbn, mp, flags, pagedeppp);
2096 * This should never happen since we only create pagedeps
2097 * with the vnode lock held. Could be an assert.
2099 WORKITEM_FREE(pagedep, D_PAGEDEP);
2102 pagedep->pd_ino = ino;
2103 pagedep->pd_lbn = lbn;
2104 LIST_INIT(&pagedep->pd_dirremhd);
2105 LIST_INIT(&pagedep->pd_pendinghd);
2106 for (i = 0; i < DAHASHSZ; i++)
2107 LIST_INIT(&pagedep->pd_diraddhd[i]);
2108 LIST_INSERT_HEAD(pagedephd, pagedep, pd_hash);
2109 WORKLIST_INSERT(&bp->b_dep, &pagedep->pd_list);
2110 *pagedeppp = pagedep;
2115 * Structures and routines associated with inodedep caching.
2117 LIST_HEAD(inodedep_hashhead, inodedep) *inodedep_hashtbl;
2118 static u_long inodedep_hash; /* size of hash table - 1 */
2119 #define INODEDEP_HASH(fs, inum) \
2120 (&inodedep_hashtbl[((((register_t)(fs)) >> 13) + (inum)) & inodedep_hash])
2123 inodedep_find(inodedephd, fs, inum, inodedeppp)
2124 struct inodedep_hashhead *inodedephd;
2127 struct inodedep **inodedeppp;
2129 struct inodedep *inodedep;
2131 LIST_FOREACH(inodedep, inodedephd, id_hash)
2132 if (inum == inodedep->id_ino && fs == inodedep->id_fs)
2135 *inodedeppp = inodedep;
2143 * Look up an inodedep. Return 1 if found, 0 if not found.
2144 * If not found, allocate if DEPALLOC flag is passed.
2145 * Found or allocated entry is returned in inodedeppp.
2146 * This routine must be called with splbio interrupts blocked.
2149 inodedep_lookup(mp, inum, flags, inodedeppp)
2153 struct inodedep **inodedeppp;
2155 struct inodedep *inodedep;
2156 struct inodedep_hashhead *inodedephd;
2159 mtx_assert(&lk, MA_OWNED);
2160 fs = VFSTOUFS(mp)->um_fs;
2161 inodedephd = INODEDEP_HASH(fs, inum);
2163 if (inodedep_find(inodedephd, fs, inum, inodedeppp))
2165 if ((flags & DEPALLOC) == 0)
2168 * If we are over our limit, try to improve the situation.
2170 if (dep_current[D_INODEDEP] > max_softdeps && (flags & NODELAY) == 0)
2171 request_cleanup(mp, FLUSH_INODES);
2173 inodedep = malloc(sizeof(struct inodedep),
2174 M_INODEDEP, M_SOFTDEP_FLAGS);
2175 workitem_alloc(&inodedep->id_list, D_INODEDEP, mp);
2177 if (inodedep_find(inodedephd, fs, inum, inodedeppp)) {
2178 WORKITEM_FREE(inodedep, D_INODEDEP);
2181 inodedep->id_fs = fs;
2182 inodedep->id_ino = inum;
2183 inodedep->id_state = ALLCOMPLETE;
2184 inodedep->id_nlinkdelta = 0;
2185 inodedep->id_savedino1 = NULL;
2186 inodedep->id_savedsize = -1;
2187 inodedep->id_savedextsize = -1;
2188 inodedep->id_savednlink = -1;
2189 inodedep->id_bmsafemap = NULL;
2190 inodedep->id_mkdiradd = NULL;
2191 LIST_INIT(&inodedep->id_dirremhd);
2192 LIST_INIT(&inodedep->id_pendinghd);
2193 LIST_INIT(&inodedep->id_inowait);
2194 LIST_INIT(&inodedep->id_bufwait);
2195 TAILQ_INIT(&inodedep->id_inoreflst);
2196 TAILQ_INIT(&inodedep->id_inoupdt);
2197 TAILQ_INIT(&inodedep->id_newinoupdt);
2198 TAILQ_INIT(&inodedep->id_extupdt);
2199 TAILQ_INIT(&inodedep->id_newextupdt);
2200 TAILQ_INIT(&inodedep->id_freeblklst);
2201 LIST_INSERT_HEAD(inodedephd, inodedep, id_hash);
2202 *inodedeppp = inodedep;
2207 * Structures and routines associated with newblk caching.
2209 LIST_HEAD(newblk_hashhead, newblk) *newblk_hashtbl;
2210 u_long newblk_hash; /* size of hash table - 1 */
2211 #define NEWBLK_HASH(fs, inum) \
2212 (&newblk_hashtbl[((((register_t)(fs)) >> 13) + (inum)) & newblk_hash])
2215 newblk_find(newblkhd, mp, newblkno, flags, newblkpp)
2216 struct newblk_hashhead *newblkhd;
2218 ufs2_daddr_t newblkno;
2220 struct newblk **newblkpp;
2222 struct newblk *newblk;
2224 LIST_FOREACH(newblk, newblkhd, nb_hash) {
2225 if (newblkno != newblk->nb_newblkno)
2227 if (mp != newblk->nb_list.wk_mp)
2230 * If we're creating a new dependency don't match those that
2231 * have already been converted to allocdirects. This is for
2234 if ((flags & DEPALLOC) && newblk->nb_list.wk_type != D_NEWBLK)
2247 * Look up a newblk. Return 1 if found, 0 if not found.
2248 * If not found, allocate if DEPALLOC flag is passed.
2249 * Found or allocated entry is returned in newblkpp.
2252 newblk_lookup(mp, newblkno, flags, newblkpp)
2254 ufs2_daddr_t newblkno;
2256 struct newblk **newblkpp;
2258 struct newblk *newblk;
2259 struct newblk_hashhead *newblkhd;
2261 newblkhd = NEWBLK_HASH(VFSTOUFS(mp)->um_fs, newblkno);
2262 if (newblk_find(newblkhd, mp, newblkno, flags, newblkpp))
2264 if ((flags & DEPALLOC) == 0)
2267 newblk = malloc(sizeof(union allblk), M_NEWBLK,
2268 M_SOFTDEP_FLAGS | M_ZERO);
2269 workitem_alloc(&newblk->nb_list, D_NEWBLK, mp);
2271 if (newblk_find(newblkhd, mp, newblkno, flags, newblkpp)) {
2272 WORKITEM_FREE(newblk, D_NEWBLK);
2275 newblk->nb_freefrag = NULL;
2276 LIST_INIT(&newblk->nb_indirdeps);
2277 LIST_INIT(&newblk->nb_newdirblk);
2278 LIST_INIT(&newblk->nb_jwork);
2279 newblk->nb_state = ATTACHED;
2280 newblk->nb_newblkno = newblkno;
2281 LIST_INSERT_HEAD(newblkhd, newblk, nb_hash);
2287 * Structures and routines associated with freed indirect block caching.
2289 struct freeworklst *indir_hashtbl;
2290 u_long indir_hash; /* size of hash table - 1 */
2291 #define INDIR_HASH(mp, blkno) \
2292 (&indir_hashtbl[((((register_t)(mp)) >> 13) + (blkno)) & indir_hash])
2295 * Lookup an indirect block in the indir hash table. The freework is
2296 * removed and potentially freed. The caller must do a blocking journal
2297 * write before writing to the blkno.
2300 indirblk_lookup(mp, blkno)
2304 struct freework *freework;
2305 struct freeworklst *wkhd;
2307 wkhd = INDIR_HASH(mp, blkno);
2308 TAILQ_FOREACH(freework, wkhd, fw_next) {
2309 if (freework->fw_blkno != blkno)
2311 if (freework->fw_list.wk_mp != mp)
2313 indirblk_remove(freework);
2320 * Insert an indirect block represented by freework into the indirblk
2321 * hash table so that it may prevent the block from being re-used prior
2322 * to the journal being written.
2325 indirblk_insert(freework)
2326 struct freework *freework;
2328 struct jblocks *jblocks;
2331 jblocks = VFSTOUFS(freework->fw_list.wk_mp)->softdep_jblocks;
2332 jseg = TAILQ_LAST(&jblocks->jb_segs, jseglst);
2336 LIST_INSERT_HEAD(&jseg->js_indirs, freework, fw_segs);
2337 TAILQ_INSERT_HEAD(INDIR_HASH(freework->fw_list.wk_mp,
2338 freework->fw_blkno), freework, fw_next);
2339 freework->fw_state &= ~DEPCOMPLETE;
2343 indirblk_remove(freework)
2344 struct freework *freework;
2347 LIST_REMOVE(freework, fw_segs);
2348 TAILQ_REMOVE(INDIR_HASH(freework->fw_list.wk_mp,
2349 freework->fw_blkno), freework, fw_next);
2350 freework->fw_state |= DEPCOMPLETE;
2351 if ((freework->fw_state & ALLCOMPLETE) == ALLCOMPLETE)
2352 WORKITEM_FREE(freework, D_FREEWORK);
2356 * Executed during filesystem system initialization before
2357 * mounting any filesystems.
2360 softdep_initialize()
2364 LIST_INIT(&mkdirlisthd);
2365 max_softdeps = desiredvnodes * 4;
2366 pagedep_hashtbl = hashinit(desiredvnodes / 5, M_PAGEDEP, &pagedep_hash);
2367 inodedep_hashtbl = hashinit(desiredvnodes, M_INODEDEP, &inodedep_hash);
2368 newblk_hashtbl = hashinit(desiredvnodes / 5, M_NEWBLK, &newblk_hash);
2369 bmsafemap_hashtbl = hashinit(1024, M_BMSAFEMAP, &bmsafemap_hash);
2370 i = 1 << (ffs(desiredvnodes / 10) - 1);
2371 indir_hashtbl = malloc(i * sizeof(indir_hashtbl[0]), M_FREEWORK,
2374 for (i = 0; i <= indir_hash; i++)
2375 TAILQ_INIT(&indir_hashtbl[i]);
2377 /* initialise bioops hack */
2378 bioops.io_start = softdep_disk_io_initiation;
2379 bioops.io_complete = softdep_disk_write_complete;
2380 bioops.io_deallocate = softdep_deallocate_dependencies;
2381 bioops.io_countdeps = softdep_count_dependencies;
2383 /* Initialize the callout with an mtx. */
2384 callout_init_mtx(&softdep_callout, &lk, 0);
2388 * Executed after all filesystems have been unmounted during
2389 * filesystem module unload.
2392 softdep_uninitialize()
2395 callout_drain(&softdep_callout);
2396 hashdestroy(pagedep_hashtbl, M_PAGEDEP, pagedep_hash);
2397 hashdestroy(inodedep_hashtbl, M_INODEDEP, inodedep_hash);
2398 hashdestroy(newblk_hashtbl, M_NEWBLK, newblk_hash);
2399 hashdestroy(bmsafemap_hashtbl, M_BMSAFEMAP, bmsafemap_hash);
2400 free(indir_hashtbl, M_FREEWORK);
2404 * Called at mount time to notify the dependency code that a
2405 * filesystem wishes to use it.
2408 softdep_mount(devvp, mp, fs, cred)
2409 struct vnode *devvp;
2414 struct csum_total cstotal;
2415 struct ufsmount *ump;
2421 mp->mnt_flag = (mp->mnt_flag & ~MNT_ASYNC) | MNT_SOFTDEP;
2422 if ((mp->mnt_kern_flag & MNTK_SOFTDEP) == 0) {
2423 mp->mnt_kern_flag = (mp->mnt_kern_flag & ~MNTK_ASYNC) |
2424 MNTK_SOFTDEP | MNTK_NOASYNC;
2428 LIST_INIT(&ump->softdep_workitem_pending);
2429 LIST_INIT(&ump->softdep_journal_pending);
2430 TAILQ_INIT(&ump->softdep_unlinked);
2431 LIST_INIT(&ump->softdep_dirtycg);
2432 ump->softdep_worklist_tail = NULL;
2433 ump->softdep_on_worklist = 0;
2434 ump->softdep_deps = 0;
2435 if ((fs->fs_flags & FS_SUJ) &&
2436 (error = journal_mount(mp, fs, cred)) != 0) {
2437 printf("Failed to start journal: %d\n", error);
2441 * When doing soft updates, the counters in the
2442 * superblock may have gotten out of sync. Recomputation
2443 * can take a long time and can be deferred for background
2444 * fsck. However, the old behavior of scanning the cylinder
2445 * groups and recalculating them at mount time is available
2446 * by setting vfs.ffs.compute_summary_at_mount to one.
2448 if (compute_summary_at_mount == 0 || fs->fs_clean != 0)
2450 bzero(&cstotal, sizeof cstotal);
2451 for (cyl = 0; cyl < fs->fs_ncg; cyl++) {
2452 if ((error = bread(devvp, fsbtodb(fs, cgtod(fs, cyl)),
2453 fs->fs_cgsize, cred, &bp)) != 0) {
2457 cgp = (struct cg *)bp->b_data;
2458 cstotal.cs_nffree += cgp->cg_cs.cs_nffree;
2459 cstotal.cs_nbfree += cgp->cg_cs.cs_nbfree;
2460 cstotal.cs_nifree += cgp->cg_cs.cs_nifree;
2461 cstotal.cs_ndir += cgp->cg_cs.cs_ndir;
2462 fs->fs_cs(fs, cyl) = cgp->cg_cs;
2466 if (bcmp(&cstotal, &fs->fs_cstotal, sizeof cstotal))
2467 printf("%s: superblock summary recomputed\n", fs->fs_fsmnt);
2469 bcopy(&cstotal, &fs->fs_cstotal, sizeof cstotal);
2479 mp->mnt_flag &= ~MNT_SOFTDEP;
2480 if (MOUNTEDSUJ(mp) == 0) {
2484 mp->mnt_flag &= ~MNT_SUJ;
2486 journal_unmount(mp);
2489 static struct jblocks *
2490 jblocks_create(void)
2492 struct jblocks *jblocks;
2494 jblocks = malloc(sizeof(*jblocks), M_JBLOCKS, M_WAITOK | M_ZERO);
2495 TAILQ_INIT(&jblocks->jb_segs);
2496 jblocks->jb_avail = 10;
2497 jblocks->jb_extent = malloc(sizeof(struct jextent) * jblocks->jb_avail,
2498 M_JBLOCKS, M_WAITOK | M_ZERO);
2504 jblocks_alloc(jblocks, bytes, actual)
2505 struct jblocks *jblocks;
2510 struct jextent *jext;
2514 blocks = bytes / DEV_BSIZE;
2515 jext = &jblocks->jb_extent[jblocks->jb_head];
2516 freecnt = jext->je_blocks - jblocks->jb_off;
2518 jblocks->jb_off = 0;
2519 if (++jblocks->jb_head > jblocks->jb_used)
2520 jblocks->jb_head = 0;
2521 jext = &jblocks->jb_extent[jblocks->jb_head];
2522 freecnt = jext->je_blocks;
2524 if (freecnt > blocks)
2526 *actual = freecnt * DEV_BSIZE;
2527 daddr = jext->je_daddr + jblocks->jb_off;
2528 jblocks->jb_off += freecnt;
2529 jblocks->jb_free -= freecnt;
2535 jblocks_free(jblocks, mp, bytes)
2536 struct jblocks *jblocks;
2541 jblocks->jb_free += bytes / DEV_BSIZE;
2542 if (jblocks->jb_suspended)
2548 jblocks_destroy(jblocks)
2549 struct jblocks *jblocks;
2552 if (jblocks->jb_extent)
2553 free(jblocks->jb_extent, M_JBLOCKS);
2554 free(jblocks, M_JBLOCKS);
2558 jblocks_add(jblocks, daddr, blocks)
2559 struct jblocks *jblocks;
2563 struct jextent *jext;
2565 jblocks->jb_blocks += blocks;
2566 jblocks->jb_free += blocks;
2567 jext = &jblocks->jb_extent[jblocks->jb_used];
2568 /* Adding the first block. */
2569 if (jext->je_daddr == 0) {
2570 jext->je_daddr = daddr;
2571 jext->je_blocks = blocks;
2574 /* Extending the last extent. */
2575 if (jext->je_daddr + jext->je_blocks == daddr) {
2576 jext->je_blocks += blocks;
2579 /* Adding a new extent. */
2580 if (++jblocks->jb_used == jblocks->jb_avail) {
2581 jblocks->jb_avail *= 2;
2582 jext = malloc(sizeof(struct jextent) * jblocks->jb_avail,
2583 M_JBLOCKS, M_WAITOK | M_ZERO);
2584 memcpy(jext, jblocks->jb_extent,
2585 sizeof(struct jextent) * jblocks->jb_used);
2586 free(jblocks->jb_extent, M_JBLOCKS);
2587 jblocks->jb_extent = jext;
2589 jext = &jblocks->jb_extent[jblocks->jb_used];
2590 jext->je_daddr = daddr;
2591 jext->je_blocks = blocks;
2596 softdep_journal_lookup(mp, vpp)
2600 struct componentname cnp;
2605 error = VFS_VGET(mp, ROOTINO, LK_EXCLUSIVE, &dvp);
2608 bzero(&cnp, sizeof(cnp));
2609 cnp.cn_nameiop = LOOKUP;
2610 cnp.cn_flags = ISLASTCN;
2611 cnp.cn_thread = curthread;
2612 cnp.cn_cred = curthread->td_ucred;
2613 cnp.cn_pnbuf = SUJ_FILE;
2614 cnp.cn_nameptr = SUJ_FILE;
2615 cnp.cn_namelen = strlen(SUJ_FILE);
2616 error = ufs_lookup_ino(dvp, NULL, &cnp, &sujournal);
2620 error = VFS_VGET(mp, sujournal, LK_EXCLUSIVE, vpp);
2625 * Open and verify the journal file.
2628 journal_mount(mp, fs, cred)
2633 struct jblocks *jblocks;
2641 error = softdep_journal_lookup(mp, &vp);
2643 printf("Failed to find journal. Use tunefs to create one\n");
2647 if (ip->i_size < SUJ_MIN) {
2651 bcount = lblkno(fs, ip->i_size); /* Only use whole blocks. */
2652 jblocks = jblocks_create();
2653 for (i = 0; i < bcount; i++) {
2654 error = ufs_bmaparray(vp, i, &blkno, NULL, NULL, NULL);
2657 jblocks_add(jblocks, blkno, fsbtodb(fs, fs->fs_frag));
2660 jblocks_destroy(jblocks);
2663 jblocks->jb_low = jblocks->jb_free / 3; /* Reserve 33%. */
2664 jblocks->jb_min = jblocks->jb_free / 10; /* Suspend at 10%. */
2665 VFSTOUFS(mp)->softdep_jblocks = jblocks;
2669 mp->mnt_flag |= MNT_SUJ;
2670 mp->mnt_flag &= ~MNT_SOFTDEP;
2673 * Only validate the journal contents if the
2674 * filesystem is clean, otherwise we write the logs
2675 * but they'll never be used. If the filesystem was
2676 * still dirty when we mounted it the journal is
2677 * invalid and a new journal can only be valid if it
2678 * starts from a clean mount.
2681 DIP_SET(ip, i_modrev, fs->fs_mtime);
2682 ip->i_flags |= IN_MODIFIED;
2694 struct ufsmount *ump;
2697 if (ump->softdep_jblocks)
2698 jblocks_destroy(ump->softdep_jblocks);
2699 ump->softdep_jblocks = NULL;
2703 * Called when a journal record is ready to be written. Space is allocated
2704 * and the journal entry is created when the journal is flushed to stable
2709 struct worklist *wk;
2711 struct ufsmount *ump;
2713 mtx_assert(&lk, MA_OWNED);
2714 ump = VFSTOUFS(wk->wk_mp);
2715 if (wk->wk_state & ONWORKLIST)
2716 panic("add_to_journal: %s(0x%X) already on list",
2717 TYPENAME(wk->wk_type), wk->wk_state);
2718 wk->wk_state |= ONWORKLIST | DEPCOMPLETE;
2719 if (LIST_EMPTY(&ump->softdep_journal_pending)) {
2720 ump->softdep_jblocks->jb_age = ticks;
2721 LIST_INSERT_HEAD(&ump->softdep_journal_pending, wk, wk_list);
2723 LIST_INSERT_AFTER(ump->softdep_journal_tail, wk, wk_list);
2724 ump->softdep_journal_tail = wk;
2725 ump->softdep_on_journal += 1;
2729 * Remove an arbitrary item for the journal worklist maintain the tail
2730 * pointer. This happens when a new operation obviates the need to
2731 * journal an old operation.
2734 remove_from_journal(wk)
2735 struct worklist *wk;
2737 struct ufsmount *ump;
2739 mtx_assert(&lk, MA_OWNED);
2740 ump = VFSTOUFS(wk->wk_mp);
2743 struct worklist *wkn;
2745 LIST_FOREACH(wkn, &ump->softdep_journal_pending, wk_list)
2749 panic("remove_from_journal: %p is not in journal", wk);
2753 * We emulate a TAILQ to save space in most structures which do not
2754 * require TAILQ semantics. Here we must update the tail position
2755 * when removing the tail which is not the final entry. This works
2756 * only if the worklist linkage are at the beginning of the structure.
2758 if (ump->softdep_journal_tail == wk)
2759 ump->softdep_journal_tail =
2760 (struct worklist *)wk->wk_list.le_prev;
2762 WORKLIST_REMOVE(wk);
2763 ump->softdep_on_journal -= 1;
2767 * Check for journal space as well as dependency limits so the prelink
2768 * code can throttle both journaled and non-journaled filesystems.
2769 * Threshold is 0 for low and 1 for min.
2772 journal_space(ump, thresh)
2773 struct ufsmount *ump;
2776 struct jblocks *jblocks;
2779 jblocks = ump->softdep_jblocks;
2780 if (jblocks == NULL)
2783 * We use a tighter restriction here to prevent request_cleanup()
2784 * running in threads from running into locks we currently hold.
2786 if (dep_current[D_INODEDEP] > (max_softdeps / 10) * 9)
2789 thresh = jblocks->jb_min;
2791 thresh = jblocks->jb_low;
2792 avail = (ump->softdep_on_journal * JREC_SIZE) / DEV_BSIZE;
2793 avail = jblocks->jb_free - avail;
2795 return (avail > thresh);
2799 journal_suspend(ump)
2800 struct ufsmount *ump;
2802 struct jblocks *jblocks;
2806 jblocks = ump->softdep_jblocks;
2808 if ((mp->mnt_kern_flag & MNTK_SUSPEND) == 0) {
2810 mp->mnt_kern_flag |= MNTK_SUSPEND;
2811 mp->mnt_susp_owner = FIRST_THREAD_IN_PROC(softdepproc);
2813 jblocks->jb_suspended = 1;
2818 journal_unsuspend(struct ufsmount *ump)
2820 struct jblocks *jblocks;
2824 jblocks = ump->softdep_jblocks;
2826 if (jblocks != NULL && jblocks->jb_suspended &&
2827 journal_space(ump, jblocks->jb_min)) {
2828 jblocks->jb_suspended = 0;
2830 mp->mnt_susp_owner = curthread;
2831 vfs_write_resume(mp);
2839 * Called before any allocation function to be certain that there is
2840 * sufficient space in the journal prior to creating any new records.
2841 * Since in the case of block allocation we may have multiple locked
2842 * buffers at the time of the actual allocation we can not block
2843 * when the journal records are created. Doing so would create a deadlock
2844 * if any of these buffers needed to be flushed to reclaim space. Instead
2845 * we require a sufficiently large amount of available space such that
2846 * each thread in the system could have passed this allocation check and
2847 * still have sufficient free space. With 20% of a minimum journal size
2848 * of 1MB we have 6553 records available.
2851 softdep_prealloc(vp, waitok)
2855 struct ufsmount *ump;
2858 * Nothing to do if we are not running journaled soft updates.
2859 * If we currently hold the snapshot lock, we must avoid handling
2860 * other resources that could cause deadlock.
2862 if (DOINGSUJ(vp) == 0 || IS_SNAPSHOT(VTOI(vp)))
2864 ump = VFSTOUFS(vp->v_mount);
2866 if (journal_space(ump, 0)) {
2872 if (waitok == MNT_NOWAIT)
2875 * Attempt to sync this vnode once to flush any journal
2876 * work attached to it.
2878 if ((curthread->td_pflags & TDP_COWINPROGRESS) == 0)
2879 ffs_syncvnode(vp, waitok, 0);
2881 process_removes(vp);
2882 process_truncates(vp);
2883 if (journal_space(ump, 0) == 0) {
2885 if (journal_space(ump, 1) == 0)
2886 journal_suspend(ump);
2894 * Before adjusting a link count on a vnode verify that we have sufficient
2895 * journal space. If not, process operations that depend on the currently
2896 * locked pair of vnodes to try to flush space as the syncer, buf daemon,
2897 * and softdep flush threads can not acquire these locks to reclaim space.
2900 softdep_prelink(dvp, vp)
2904 struct ufsmount *ump;
2906 ump = VFSTOUFS(dvp->v_mount);
2907 mtx_assert(&lk, MA_OWNED);
2909 * Nothing to do if we have sufficient journal space.
2910 * If we currently hold the snapshot lock, we must avoid
2911 * handling other resources that could cause deadlock.
2913 if (journal_space(ump, 0) || (vp && IS_SNAPSHOT(VTOI(vp))))
2918 ffs_syncvnode(vp, MNT_NOWAIT, 0);
2919 ffs_syncvnode(dvp, MNT_WAIT, 0);
2921 /* Process vp before dvp as it may create .. removes. */
2923 process_removes(vp);
2924 process_truncates(vp);
2926 process_removes(dvp);
2927 process_truncates(dvp);
2929 process_worklist_item(UFSTOVFS(ump), 2, LK_NOWAIT);
2930 if (journal_space(ump, 0) == 0) {
2932 if (journal_space(ump, 1) == 0)
2933 journal_suspend(ump);
2938 jseg_write(ump, jseg, data)
2939 struct ufsmount *ump;
2943 struct jsegrec *rec;
2945 rec = (struct jsegrec *)data;
2946 rec->jsr_seq = jseg->js_seq;
2947 rec->jsr_oldest = jseg->js_oldseq;
2948 rec->jsr_cnt = jseg->js_cnt;
2949 rec->jsr_blocks = jseg->js_size / ump->um_devvp->v_bufobj.bo_bsize;
2951 rec->jsr_time = ump->um_fs->fs_mtime;
2955 inoref_write(inoref, jseg, rec)
2956 struct inoref *inoref;
2958 struct jrefrec *rec;
2961 inoref->if_jsegdep->jd_seg = jseg;
2962 rec->jr_ino = inoref->if_ino;
2963 rec->jr_parent = inoref->if_parent;
2964 rec->jr_nlink = inoref->if_nlink;
2965 rec->jr_mode = inoref->if_mode;
2966 rec->jr_diroff = inoref->if_diroff;
2970 jaddref_write(jaddref, jseg, data)
2971 struct jaddref *jaddref;
2975 struct jrefrec *rec;
2977 rec = (struct jrefrec *)data;
2978 rec->jr_op = JOP_ADDREF;
2979 inoref_write(&jaddref->ja_ref, jseg, rec);
2983 jremref_write(jremref, jseg, data)
2984 struct jremref *jremref;
2988 struct jrefrec *rec;
2990 rec = (struct jrefrec *)data;
2991 rec->jr_op = JOP_REMREF;
2992 inoref_write(&jremref->jr_ref, jseg, rec);
2996 jmvref_write(jmvref, jseg, data)
2997 struct jmvref *jmvref;
3003 rec = (struct jmvrec *)data;
3004 rec->jm_op = JOP_MVREF;
3005 rec->jm_ino = jmvref->jm_ino;
3006 rec->jm_parent = jmvref->jm_parent;
3007 rec->jm_oldoff = jmvref->jm_oldoff;
3008 rec->jm_newoff = jmvref->jm_newoff;
3012 jnewblk_write(jnewblk, jseg, data)
3013 struct jnewblk *jnewblk;
3017 struct jblkrec *rec;
3019 jnewblk->jn_jsegdep->jd_seg = jseg;
3020 rec = (struct jblkrec *)data;
3021 rec->jb_op = JOP_NEWBLK;
3022 rec->jb_ino = jnewblk->jn_ino;
3023 rec->jb_blkno = jnewblk->jn_blkno;
3024 rec->jb_lbn = jnewblk->jn_lbn;
3025 rec->jb_frags = jnewblk->jn_frags;
3026 rec->jb_oldfrags = jnewblk->jn_oldfrags;
3030 jfreeblk_write(jfreeblk, jseg, data)
3031 struct jfreeblk *jfreeblk;
3035 struct jblkrec *rec;
3037 jfreeblk->jf_dep.jb_jsegdep->jd_seg = jseg;
3038 rec = (struct jblkrec *)data;
3039 rec->jb_op = JOP_FREEBLK;
3040 rec->jb_ino = jfreeblk->jf_ino;
3041 rec->jb_blkno = jfreeblk->jf_blkno;
3042 rec->jb_lbn = jfreeblk->jf_lbn;
3043 rec->jb_frags = jfreeblk->jf_frags;
3044 rec->jb_oldfrags = 0;
3048 jfreefrag_write(jfreefrag, jseg, data)
3049 struct jfreefrag *jfreefrag;
3053 struct jblkrec *rec;
3055 jfreefrag->fr_jsegdep->jd_seg = jseg;
3056 rec = (struct jblkrec *)data;
3057 rec->jb_op = JOP_FREEBLK;
3058 rec->jb_ino = jfreefrag->fr_ino;
3059 rec->jb_blkno = jfreefrag->fr_blkno;
3060 rec->jb_lbn = jfreefrag->fr_lbn;
3061 rec->jb_frags = jfreefrag->fr_frags;
3062 rec->jb_oldfrags = 0;
3066 jtrunc_write(jtrunc, jseg, data)
3067 struct jtrunc *jtrunc;
3071 struct jtrncrec *rec;
3073 jtrunc->jt_dep.jb_jsegdep->jd_seg = jseg;
3074 rec = (struct jtrncrec *)data;
3075 rec->jt_op = JOP_TRUNC;
3076 rec->jt_ino = jtrunc->jt_ino;
3077 rec->jt_size = jtrunc->jt_size;
3078 rec->jt_extsize = jtrunc->jt_extsize;
3082 jfsync_write(jfsync, jseg, data)
3083 struct jfsync *jfsync;
3087 struct jtrncrec *rec;
3089 rec = (struct jtrncrec *)data;
3090 rec->jt_op = JOP_SYNC;
3091 rec->jt_ino = jfsync->jfs_ino;
3092 rec->jt_size = jfsync->jfs_size;
3093 rec->jt_extsize = jfsync->jfs_extsize;
3097 softdep_flushjournal(mp)
3100 struct jblocks *jblocks;
3101 struct ufsmount *ump;
3103 if (MOUNTEDSUJ(mp) == 0)
3106 jblocks = ump->softdep_jblocks;
3108 while (ump->softdep_on_journal) {
3109 jblocks->jb_needseg = 1;
3110 softdep_process_journal(mp, NULL, MNT_WAIT);
3115 static void softdep_synchronize_completed(struct bio *);
3116 static void softdep_synchronize(struct bio *, struct ufsmount *, void *);
3119 softdep_synchronize_completed(bp)
3122 struct jseg *oldest;
3126 * caller1 marks the last segment written before we issued the
3127 * synchronize cache.
3129 jseg = bp->bio_caller1;
3133 * Mark all the journal entries waiting on the synchronize cache
3134 * as completed so they may continue on.
3136 while (jseg != NULL && (jseg->js_state & COMPLETE) == 0) {
3137 jseg->js_state |= COMPLETE;
3139 jseg = TAILQ_PREV(jseg, jseglst, js_next);
3142 * Restart deferred journal entry processing from the oldest
3146 complete_jsegs(oldest);
3153 * Send BIO_FLUSH/SYNCHRONIZE CACHE to the device to enforce write ordering
3154 * barriers. The journal must be written prior to any blocks that depend
3155 * on it and the journal can not be released until the blocks have be
3156 * written. This code handles both barriers simultaneously.
3159 softdep_synchronize(bp, ump, caller1)
3161 struct ufsmount *ump;
3165 bp->bio_cmd = BIO_FLUSH;
3166 bp->bio_flags |= BIO_ORDERED;
3167 bp->bio_data = NULL;
3168 bp->bio_offset = ump->um_cp->provider->mediasize;
3170 bp->bio_done = softdep_synchronize_completed;
3171 bp->bio_caller1 = caller1;
3173 (struct g_consumer *)ump->um_devvp->v_bufobj.bo_private);
3177 * Flush some journal records to disk.
3180 softdep_process_journal(mp, needwk, flags)
3182 struct worklist *needwk;
3185 struct jblocks *jblocks;
3186 struct ufsmount *ump;
3187 struct worklist *wk;
3195 int jrecmin; /* Minimum records per block. */
3196 int jrecmax; /* Maximum records per block. */
3202 if (MOUNTEDSUJ(mp) == 0)
3204 shouldflush = softdep_flushcache;
3209 jblocks = ump->softdep_jblocks;
3210 devbsize = ump->um_devvp->v_bufobj.bo_bsize;
3212 * We write anywhere between a disk block and fs block. The upper
3213 * bound is picked to prevent buffer cache fragmentation and limit
3214 * processing time per I/O.
3216 jrecmin = (devbsize / JREC_SIZE) - 1; /* -1 for seg header */
3217 jrecmax = (fs->fs_bsize / devbsize) * jrecmin;
3220 cnt = ump->softdep_on_journal;
3222 * Criteria for writing a segment:
3223 * 1) We have a full block.
3224 * 2) We're called from jwait() and haven't found the
3226 * 3) Always write if needseg is set.
3227 * 4) If we are called from process_worklist and have
3228 * not yet written anything we write a partial block
3229 * to enforce a 1 second maximum latency on journal
3232 if (cnt < (jrecmax - 1) && needwk == NULL &&
3233 jblocks->jb_needseg == 0 && (segwritten || cnt == 0))
3237 * Verify some free journal space. softdep_prealloc() should
3238 * guarantee that we don't run out so this is indicative of
3239 * a problem with the flow control. Try to recover
3240 * gracefully in any event.
3242 while (jblocks->jb_free == 0) {
3243 if (flags != MNT_WAIT)
3245 printf("softdep: Out of journal space!\n");
3247 msleep(jblocks, &lk, PRIBIO, "jblocks", hz);
3250 jseg = malloc(sizeof(*jseg), M_JSEG, M_SOFTDEP_FLAGS);
3251 workitem_alloc(&jseg->js_list, D_JSEG, mp);
3252 LIST_INIT(&jseg->js_entries);
3253 LIST_INIT(&jseg->js_indirs);
3254 jseg->js_state = ATTACHED;
3255 if (shouldflush == 0)
3256 jseg->js_state |= COMPLETE;
3257 else if (bio == NULL)
3258 bio = g_alloc_bio();
3259 jseg->js_jblocks = jblocks;
3260 bp = geteblk(fs->fs_bsize, 0);
3263 * If there was a race while we were allocating the block
3264 * and jseg the entry we care about was likely written.
3265 * We bail out in both the WAIT and NOWAIT case and assume
3266 * the caller will loop if the entry it cares about is
3269 cnt = ump->softdep_on_journal;
3270 if (cnt + jblocks->jb_needseg == 0 || jblocks->jb_free == 0) {
3271 bp->b_flags |= B_INVAL | B_NOCACHE;
3272 WORKITEM_FREE(jseg, D_JSEG);
3279 * Calculate the disk block size required for the available
3280 * records rounded to the min size.
3284 else if (cnt < jrecmax)
3285 size = howmany(cnt, jrecmin) * devbsize;
3287 size = fs->fs_bsize;
3289 * Allocate a disk block for this journal data and account
3290 * for truncation of the requested size if enough contiguous
3291 * space was not available.
3293 bp->b_blkno = jblocks_alloc(jblocks, size, &size);
3294 bp->b_lblkno = bp->b_blkno;
3295 bp->b_offset = bp->b_blkno * DEV_BSIZE;
3296 bp->b_bcount = size;
3297 bp->b_bufobj = &ump->um_devvp->v_bufobj;
3298 bp->b_flags &= ~B_INVAL;
3299 bp->b_flags |= B_VALIDSUSPWRT | B_NOCOPY;
3301 * Initialize our jseg with cnt records. Assign the next
3302 * sequence number to it and link it in-order.
3304 cnt = MIN(cnt, (size / devbsize) * jrecmin);
3307 jseg->js_refs = cnt + 1; /* Self ref. */
3308 jseg->js_size = size;
3309 jseg->js_seq = jblocks->jb_nextseq++;
3310 if (jblocks->jb_oldestseg == NULL)
3311 jblocks->jb_oldestseg = jseg;
3312 jseg->js_oldseq = jblocks->jb_oldestseg->js_seq;
3313 TAILQ_INSERT_TAIL(&jblocks->jb_segs, jseg, js_next);
3314 if (jblocks->jb_writeseg == NULL)
3315 jblocks->jb_writeseg = jseg;
3317 * Start filling in records from the pending list.
3321 while ((wk = LIST_FIRST(&ump->softdep_journal_pending))
3325 /* Place a segment header on every device block. */
3326 if ((off % devbsize) == 0) {
3327 jseg_write(ump, jseg, data);
3329 data = bp->b_data + off;
3333 remove_from_journal(wk);
3334 wk->wk_state |= INPROGRESS;
3335 WORKLIST_INSERT(&jseg->js_entries, wk);
3336 switch (wk->wk_type) {
3338 jaddref_write(WK_JADDREF(wk), jseg, data);
3341 jremref_write(WK_JREMREF(wk), jseg, data);
3344 jmvref_write(WK_JMVREF(wk), jseg, data);
3347 jnewblk_write(WK_JNEWBLK(wk), jseg, data);
3350 jfreeblk_write(WK_JFREEBLK(wk), jseg, data);
3353 jfreefrag_write(WK_JFREEFRAG(wk), jseg, data);
3356 jtrunc_write(WK_JTRUNC(wk), jseg, data);
3359 jfsync_write(WK_JFSYNC(wk), jseg, data);
3362 panic("process_journal: Unknown type %s",
3363 TYPENAME(wk->wk_type));
3367 data = bp->b_data + off;
3371 * Write this one buffer and continue.
3374 jblocks->jb_needseg = 0;
3375 WORKLIST_INSERT(&bp->b_dep, &jseg->js_list);
3377 BO_LOCK(bp->b_bufobj);
3378 bgetvp(ump->um_devvp, bp);
3379 BO_UNLOCK(bp->b_bufobj);
3381 * We only do the blocking wait once we find the journal
3382 * entry we're looking for.
3384 if (needwk == NULL && flags == MNT_WAIT)
3391 * If we wrote a segment issue a synchronize cache so the journal
3392 * is reflected on disk before the data is written. Since reclaiming
3393 * journal space also requires writing a journal record this
3394 * process also enforces a barrier before reclamation.
3396 if (segwritten && shouldflush) {
3397 softdep_synchronize(bio, ump,
3398 TAILQ_LAST(&jblocks->jb_segs, jseglst));
3402 * If we've suspended the filesystem because we ran out of journal
3403 * space either try to sync it here to make some progress or
3404 * unsuspend it if we already have.
3406 if (flags == 0 && jblocks->jb_suspended) {
3407 if (journal_unsuspend(ump))
3410 VFS_SYNC(mp, MNT_NOWAIT);
3411 ffs_sbupdate(ump, MNT_WAIT, 0);
3417 * Complete a jseg, allowing all dependencies awaiting journal writes
3418 * to proceed. Each journal dependency also attaches a jsegdep to dependent
3419 * structures so that the journal segment can be freed to reclaim space.
3425 struct worklist *wk;
3426 struct jmvref *jmvref;
3432 while ((wk = LIST_FIRST(&jseg->js_entries)) != NULL) {
3433 WORKLIST_REMOVE(wk);
3434 waiting = wk->wk_state & IOWAITING;
3435 wk->wk_state &= ~(INPROGRESS | IOWAITING);
3436 wk->wk_state |= COMPLETE;
3437 KASSERT(i++ < jseg->js_cnt,
3438 ("handle_written_jseg: overflow %d >= %d",
3439 i - 1, jseg->js_cnt));
3440 switch (wk->wk_type) {
3442 handle_written_jaddref(WK_JADDREF(wk));
3445 handle_written_jremref(WK_JREMREF(wk));
3448 rele_jseg(jseg); /* No jsegdep. */
3449 jmvref = WK_JMVREF(wk);
3450 LIST_REMOVE(jmvref, jm_deps);
3451 if ((jmvref->jm_pagedep->pd_state & ONWORKLIST) == 0)
3452 free_pagedep(jmvref->jm_pagedep);
3453 WORKITEM_FREE(jmvref, D_JMVREF);
3456 handle_written_jnewblk(WK_JNEWBLK(wk));
3459 handle_written_jblkdep(&WK_JFREEBLK(wk)->jf_dep);
3462 handle_written_jblkdep(&WK_JTRUNC(wk)->jt_dep);
3465 rele_jseg(jseg); /* No jsegdep. */
3466 WORKITEM_FREE(wk, D_JFSYNC);
3469 handle_written_jfreefrag(WK_JFREEFRAG(wk));
3472 panic("handle_written_jseg: Unknown type %s",
3473 TYPENAME(wk->wk_type));
3479 /* Release the self reference so the structure may be freed. */
3484 * Determine which jsegs are ready for completion processing. Waits for
3485 * synchronize cache to complete as well as forcing in-order completion
3486 * of journal entries.
3489 complete_jsegs(jseg)
3492 struct jblocks *jblocks;
3495 jblocks = jseg->js_jblocks;
3497 * Don't allow out of order completions. If this isn't the first
3498 * block wait for it to write before we're done.
3500 if (jseg != jblocks->jb_writeseg)
3502 /* Iterate through available jsegs processing their entries. */
3503 while (jseg && (jseg->js_state & ALLCOMPLETE) == ALLCOMPLETE) {
3504 jblocks->jb_oldestwrseq = jseg->js_oldseq;
3505 jsegn = TAILQ_NEXT(jseg, js_next);
3506 complete_jseg(jseg);
3509 jblocks->jb_writeseg = jseg;
3511 * Attempt to free jsegs now that oldestwrseq may have advanced.
3513 free_jsegs(jblocks);
3517 * Mark a jseg as DEPCOMPLETE and throw away the buffer. Attempt to handle
3518 * the final completions.
3521 handle_written_jseg(jseg, bp)
3526 if (jseg->js_refs == 0)
3527 panic("handle_written_jseg: No self-reference on %p", jseg);
3528 jseg->js_state |= DEPCOMPLETE;
3530 * We'll never need this buffer again, set flags so it will be
3533 bp->b_flags |= B_INVAL | B_NOCACHE;
3534 complete_jsegs(jseg);
3537 static inline struct jsegdep *
3539 struct inoref *inoref;
3541 struct jsegdep *jsegdep;
3543 jsegdep = inoref->if_jsegdep;
3544 inoref->if_jsegdep = NULL;
3550 * Called once a jremref has made it to stable store. The jremref is marked
3551 * complete and we attempt to free it. Any pagedeps writes sleeping waiting
3552 * for the jremref to complete will be awoken by free_jremref.
3555 handle_written_jremref(jremref)
3556 struct jremref *jremref;
3558 struct inodedep *inodedep;
3559 struct jsegdep *jsegdep;
3560 struct dirrem *dirrem;
3562 /* Grab the jsegdep. */
3563 jsegdep = inoref_jseg(&jremref->jr_ref);
3565 * Remove us from the inoref list.
3567 if (inodedep_lookup(jremref->jr_list.wk_mp, jremref->jr_ref.if_ino,
3569 panic("handle_written_jremref: Lost inodedep");
3570 TAILQ_REMOVE(&inodedep->id_inoreflst, &jremref->jr_ref, if_deps);
3572 * Complete the dirrem.
3574 dirrem = jremref->jr_dirrem;
3575 jremref->jr_dirrem = NULL;
3576 LIST_REMOVE(jremref, jr_deps);
3577 jsegdep->jd_state |= jremref->jr_state & MKDIR_PARENT;
3578 jwork_insert(&dirrem->dm_jwork, jsegdep);
3579 if (LIST_EMPTY(&dirrem->dm_jremrefhd) &&
3580 (dirrem->dm_state & COMPLETE) != 0)
3581 add_to_worklist(&dirrem->dm_list, 0);
3582 free_jremref(jremref);
3586 * Called once a jaddref has made it to stable store. The dependency is
3587 * marked complete and any dependent structures are added to the inode
3588 * bufwait list to be completed as soon as it is written. If a bitmap write
3589 * depends on this entry we move the inode into the inodedephd of the
3590 * bmsafemap dependency and attempt to remove the jaddref from the bmsafemap.
3593 handle_written_jaddref(jaddref)
3594 struct jaddref *jaddref;
3596 struct jsegdep *jsegdep;
3597 struct inodedep *inodedep;
3598 struct diradd *diradd;
3599 struct mkdir *mkdir;
3601 /* Grab the jsegdep. */
3602 jsegdep = inoref_jseg(&jaddref->ja_ref);
3605 if (inodedep_lookup(jaddref->ja_list.wk_mp, jaddref->ja_ino,
3607 panic("handle_written_jaddref: Lost inodedep.");
3608 if (jaddref->ja_diradd == NULL)
3609 panic("handle_written_jaddref: No dependency");
3610 if (jaddref->ja_diradd->da_list.wk_type == D_DIRADD) {
3611 diradd = jaddref->ja_diradd;
3612 WORKLIST_INSERT(&inodedep->id_bufwait, &diradd->da_list);
3613 } else if (jaddref->ja_state & MKDIR_PARENT) {
3614 mkdir = jaddref->ja_mkdir;
3615 WORKLIST_INSERT(&inodedep->id_bufwait, &mkdir->md_list);
3616 } else if (jaddref->ja_state & MKDIR_BODY)
3617 mkdir = jaddref->ja_mkdir;
3619 panic("handle_written_jaddref: Unknown dependency %p",
3620 jaddref->ja_diradd);
3621 jaddref->ja_diradd = NULL; /* also clears ja_mkdir */
3623 * Remove us from the inode list.
3625 TAILQ_REMOVE(&inodedep->id_inoreflst, &jaddref->ja_ref, if_deps);
3627 * The mkdir may be waiting on the jaddref to clear before freeing.
3630 KASSERT(mkdir->md_list.wk_type == D_MKDIR,
3631 ("handle_written_jaddref: Incorrect type for mkdir %s",
3632 TYPENAME(mkdir->md_list.wk_type)));
3633 mkdir->md_jaddref = NULL;
3634 diradd = mkdir->md_diradd;
3635 mkdir->md_state |= DEPCOMPLETE;
3636 complete_mkdir(mkdir);
3638 jwork_insert(&diradd->da_jwork, jsegdep);
3639 if (jaddref->ja_state & NEWBLOCK) {
3640 inodedep->id_state |= ONDEPLIST;
3641 LIST_INSERT_HEAD(&inodedep->id_bmsafemap->sm_inodedephd,
3644 free_jaddref(jaddref);
3648 * Called once a jnewblk journal is written. The allocdirect or allocindir
3649 * is placed in the bmsafemap to await notification of a written bitmap. If
3650 * the operation was canceled we add the segdep to the appropriate
3651 * dependency to free the journal space once the canceling operation
3655 handle_written_jnewblk(jnewblk)
3656 struct jnewblk *jnewblk;
3658 struct bmsafemap *bmsafemap;
3659 struct freefrag *freefrag;
3660 struct freework *freework;
3661 struct jsegdep *jsegdep;
3662 struct newblk *newblk;
3664 /* Grab the jsegdep. */
3665 jsegdep = jnewblk->jn_jsegdep;
3666 jnewblk->jn_jsegdep = NULL;
3667 if (jnewblk->jn_dep == NULL)
3668 panic("handle_written_jnewblk: No dependency for the segdep.");
3669 switch (jnewblk->jn_dep->wk_type) {
3674 * Add the written block to the bmsafemap so it can
3675 * be notified when the bitmap is on disk.
3677 newblk = WK_NEWBLK(jnewblk->jn_dep);
3678 newblk->nb_jnewblk = NULL;
3679 if ((newblk->nb_state & GOINGAWAY) == 0) {
3680 bmsafemap = newblk->nb_bmsafemap;
3681 newblk->nb_state |= ONDEPLIST;
3682 LIST_INSERT_HEAD(&bmsafemap->sm_newblkhd, newblk,
3685 jwork_insert(&newblk->nb_jwork, jsegdep);
3689 * A newblock being removed by a freefrag when replaced by
3692 freefrag = WK_FREEFRAG(jnewblk->jn_dep);
3693 freefrag->ff_jdep = NULL;
3694 jwork_insert(&freefrag->ff_jwork, jsegdep);
3698 * A direct block was removed by truncate.
3700 freework = WK_FREEWORK(jnewblk->jn_dep);
3701 freework->fw_jnewblk = NULL;
3702 jwork_insert(&freework->fw_freeblks->fb_jwork, jsegdep);
3705 panic("handle_written_jnewblk: Unknown type %d.",
3706 jnewblk->jn_dep->wk_type);
3708 jnewblk->jn_dep = NULL;
3709 free_jnewblk(jnewblk);
3713 * Cancel a jfreefrag that won't be needed, probably due to colliding with
3714 * an in-flight allocation that has not yet been committed. Divorce us
3715 * from the freefrag and mark it DEPCOMPLETE so that it may be added
3719 cancel_jfreefrag(jfreefrag)
3720 struct jfreefrag *jfreefrag;
3722 struct freefrag *freefrag;
3724 if (jfreefrag->fr_jsegdep) {
3725 free_jsegdep(jfreefrag->fr_jsegdep);
3726 jfreefrag->fr_jsegdep = NULL;
3728 freefrag = jfreefrag->fr_freefrag;
3729 jfreefrag->fr_freefrag = NULL;
3730 free_jfreefrag(jfreefrag);
3731 freefrag->ff_state |= DEPCOMPLETE;
3732 CTR1(KTR_SUJ, "cancel_jfreefrag: blkno %jd", freefrag->ff_blkno);
3736 * Free a jfreefrag when the parent freefrag is rendered obsolete.
3739 free_jfreefrag(jfreefrag)
3740 struct jfreefrag *jfreefrag;
3743 if (jfreefrag->fr_state & INPROGRESS)
3744 WORKLIST_REMOVE(&jfreefrag->fr_list);
3745 else if (jfreefrag->fr_state & ONWORKLIST)
3746 remove_from_journal(&jfreefrag->fr_list);
3747 if (jfreefrag->fr_freefrag != NULL)
3748 panic("free_jfreefrag: Still attached to a freefrag.");
3749 WORKITEM_FREE(jfreefrag, D_JFREEFRAG);
3753 * Called when the journal write for a jfreefrag completes. The parent
3754 * freefrag is added to the worklist if this completes its dependencies.
3757 handle_written_jfreefrag(jfreefrag)
3758 struct jfreefrag *jfreefrag;
3760 struct jsegdep *jsegdep;
3761 struct freefrag *freefrag;
3763 /* Grab the jsegdep. */
3764 jsegdep = jfreefrag->fr_jsegdep;
3765 jfreefrag->fr_jsegdep = NULL;
3766 freefrag = jfreefrag->fr_freefrag;
3767 if (freefrag == NULL)
3768 panic("handle_written_jfreefrag: No freefrag.");
3769 freefrag->ff_state |= DEPCOMPLETE;
3770 freefrag->ff_jdep = NULL;
3771 jwork_insert(&freefrag->ff_jwork, jsegdep);
3772 if ((freefrag->ff_state & ALLCOMPLETE) == ALLCOMPLETE)
3773 add_to_worklist(&freefrag->ff_list, 0);
3774 jfreefrag->fr_freefrag = NULL;
3775 free_jfreefrag(jfreefrag);
3779 * Called when the journal write for a jfreeblk completes. The jfreeblk
3780 * is removed from the freeblks list of pending journal writes and the
3781 * jsegdep is moved to the freeblks jwork to be completed when all blocks
3782 * have been reclaimed.
3785 handle_written_jblkdep(jblkdep)
3786 struct jblkdep *jblkdep;
3788 struct freeblks *freeblks;
3789 struct jsegdep *jsegdep;
3791 /* Grab the jsegdep. */
3792 jsegdep = jblkdep->jb_jsegdep;
3793 jblkdep->jb_jsegdep = NULL;
3794 freeblks = jblkdep->jb_freeblks;
3795 LIST_REMOVE(jblkdep, jb_deps);
3796 jwork_insert(&freeblks->fb_jwork, jsegdep);
3798 * If the freeblks is all journaled, we can add it to the worklist.
3800 if (LIST_EMPTY(&freeblks->fb_jblkdephd) &&
3801 (freeblks->fb_state & ALLCOMPLETE) == ALLCOMPLETE)
3802 add_to_worklist(&freeblks->fb_list, WK_NODELAY);
3804 free_jblkdep(jblkdep);
3807 static struct jsegdep *
3808 newjsegdep(struct worklist *wk)
3810 struct jsegdep *jsegdep;
3812 jsegdep = malloc(sizeof(*jsegdep), M_JSEGDEP, M_SOFTDEP_FLAGS);
3813 workitem_alloc(&jsegdep->jd_list, D_JSEGDEP, wk->wk_mp);
3814 jsegdep->jd_seg = NULL;
3819 static struct jmvref *
3820 newjmvref(dp, ino, oldoff, newoff)
3826 struct jmvref *jmvref;
3828 jmvref = malloc(sizeof(*jmvref), M_JMVREF, M_SOFTDEP_FLAGS);
3829 workitem_alloc(&jmvref->jm_list, D_JMVREF, UFSTOVFS(dp->i_ump));
3830 jmvref->jm_list.wk_state = ATTACHED | DEPCOMPLETE;
3831 jmvref->jm_parent = dp->i_number;
3832 jmvref->jm_ino = ino;
3833 jmvref->jm_oldoff = oldoff;
3834 jmvref->jm_newoff = newoff;
3840 * Allocate a new jremref that tracks the removal of ip from dp with the
3841 * directory entry offset of diroff. Mark the entry as ATTACHED and
3842 * DEPCOMPLETE as we have all the information required for the journal write
3843 * and the directory has already been removed from the buffer. The caller
3844 * is responsible for linking the jremref into the pagedep and adding it
3845 * to the journal to write. The MKDIR_PARENT flag is set if we're doing
3846 * a DOTDOT addition so handle_workitem_remove() can properly assign
3847 * the jsegdep when we're done.
3849 static struct jremref *
3850 newjremref(struct dirrem *dirrem, struct inode *dp, struct inode *ip,
3851 off_t diroff, nlink_t nlink)
3853 struct jremref *jremref;
3855 jremref = malloc(sizeof(*jremref), M_JREMREF, M_SOFTDEP_FLAGS);
3856 workitem_alloc(&jremref->jr_list, D_JREMREF, UFSTOVFS(dp->i_ump));
3857 jremref->jr_state = ATTACHED;
3858 newinoref(&jremref->jr_ref, ip->i_number, dp->i_number, diroff,
3860 jremref->jr_dirrem = dirrem;
3866 newinoref(struct inoref *inoref, ino_t ino, ino_t parent, off_t diroff,
3867 nlink_t nlink, uint16_t mode)
3870 inoref->if_jsegdep = newjsegdep(&inoref->if_list);
3871 inoref->if_diroff = diroff;
3872 inoref->if_ino = ino;
3873 inoref->if_parent = parent;
3874 inoref->if_nlink = nlink;
3875 inoref->if_mode = mode;
3879 * Allocate a new jaddref to track the addition of ino to dp at diroff. The
3880 * directory offset may not be known until later. The caller is responsible
3881 * adding the entry to the journal when this information is available. nlink
3882 * should be the link count prior to the addition and mode is only required
3883 * to have the correct FMT.
3885 static struct jaddref *
3886 newjaddref(struct inode *dp, ino_t ino, off_t diroff, int16_t nlink,
3889 struct jaddref *jaddref;
3891 jaddref = malloc(sizeof(*jaddref), M_JADDREF, M_SOFTDEP_FLAGS);
3892 workitem_alloc(&jaddref->ja_list, D_JADDREF, UFSTOVFS(dp->i_ump));
3893 jaddref->ja_state = ATTACHED;
3894 jaddref->ja_mkdir = NULL;
3895 newinoref(&jaddref->ja_ref, ino, dp->i_number, diroff, nlink, mode);
3901 * Create a new free dependency for a freework. The caller is responsible
3902 * for adjusting the reference count when it has the lock held. The freedep
3903 * will track an outstanding bitmap write that will ultimately clear the
3904 * freework to continue.
3906 static struct freedep *
3907 newfreedep(struct freework *freework)
3909 struct freedep *freedep;
3911 freedep = malloc(sizeof(*freedep), M_FREEDEP, M_SOFTDEP_FLAGS);
3912 workitem_alloc(&freedep->fd_list, D_FREEDEP, freework->fw_list.wk_mp);
3913 freedep->fd_freework = freework;
3919 * Free a freedep structure once the buffer it is linked to is written. If
3920 * this is the last reference to the freework schedule it for completion.
3923 free_freedep(freedep)
3924 struct freedep *freedep;
3926 struct freework *freework;
3928 freework = freedep->fd_freework;
3929 freework->fw_freeblks->fb_cgwait--;
3930 if (--freework->fw_ref == 0)
3931 freework_enqueue(freework);
3932 WORKITEM_FREE(freedep, D_FREEDEP);
3936 * Allocate a new freework structure that may be a level in an indirect
3937 * when parent is not NULL or a top level block when it is. The top level
3938 * freework structures are allocated without lk held and before the freeblks
3939 * is visible outside of softdep_setup_freeblocks().
3941 static struct freework *
3942 newfreework(ump, freeblks, parent, lbn, nb, frags, off, journal)
3943 struct ufsmount *ump;
3944 struct freeblks *freeblks;
3945 struct freework *parent;
3952 struct freework *freework;
3954 freework = malloc(sizeof(*freework), M_FREEWORK, M_SOFTDEP_FLAGS);
3955 workitem_alloc(&freework->fw_list, D_FREEWORK, freeblks->fb_list.wk_mp);
3956 freework->fw_state = ATTACHED;
3957 freework->fw_jnewblk = NULL;
3958 freework->fw_freeblks = freeblks;
3959 freework->fw_parent = parent;
3960 freework->fw_lbn = lbn;
3961 freework->fw_blkno = nb;
3962 freework->fw_frags = frags;
3963 freework->fw_indir = NULL;
3964 freework->fw_ref = (MOUNTEDSUJ(UFSTOVFS(ump)) == 0 || lbn >= -NXADDR)
3965 ? 0 : NINDIR(ump->um_fs) + 1;
3966 freework->fw_start = freework->fw_off = off;
3968 newjfreeblk(freeblks, lbn, nb, frags);
3969 if (parent == NULL) {
3971 WORKLIST_INSERT(&freeblks->fb_freeworkhd, &freework->fw_list);
3980 * Eliminate a jfreeblk for a block that does not need journaling.
3983 cancel_jfreeblk(freeblks, blkno)
3984 struct freeblks *freeblks;
3987 struct jfreeblk *jfreeblk;
3988 struct jblkdep *jblkdep;
3990 LIST_FOREACH(jblkdep, &freeblks->fb_jblkdephd, jb_deps) {
3991 if (jblkdep->jb_list.wk_type != D_JFREEBLK)
3993 jfreeblk = WK_JFREEBLK(&jblkdep->jb_list);
3994 if (jfreeblk->jf_blkno == blkno)
3997 if (jblkdep == NULL)
3999 CTR1(KTR_SUJ, "cancel_jfreeblk: blkno %jd", blkno);
4000 free_jsegdep(jblkdep->jb_jsegdep);
4001 LIST_REMOVE(jblkdep, jb_deps);
4002 WORKITEM_FREE(jfreeblk, D_JFREEBLK);
4006 * Allocate a new jfreeblk to journal top level block pointer when truncating
4007 * a file. The caller must add this to the worklist when lk is held.
4009 static struct jfreeblk *
4010 newjfreeblk(freeblks, lbn, blkno, frags)
4011 struct freeblks *freeblks;
4016 struct jfreeblk *jfreeblk;
4018 jfreeblk = malloc(sizeof(*jfreeblk), M_JFREEBLK, M_SOFTDEP_FLAGS);
4019 workitem_alloc(&jfreeblk->jf_dep.jb_list, D_JFREEBLK,
4020 freeblks->fb_list.wk_mp);
4021 jfreeblk->jf_dep.jb_jsegdep = newjsegdep(&jfreeblk->jf_dep.jb_list);
4022 jfreeblk->jf_dep.jb_freeblks = freeblks;
4023 jfreeblk->jf_ino = freeblks->fb_inum;
4024 jfreeblk->jf_lbn = lbn;
4025 jfreeblk->jf_blkno = blkno;
4026 jfreeblk->jf_frags = frags;
4027 LIST_INSERT_HEAD(&freeblks->fb_jblkdephd, &jfreeblk->jf_dep, jb_deps);
4033 * Allocate a new jtrunc to track a partial truncation.
4035 static struct jtrunc *
4036 newjtrunc(freeblks, size, extsize)
4037 struct freeblks *freeblks;
4041 struct jtrunc *jtrunc;
4043 jtrunc = malloc(sizeof(*jtrunc), M_JTRUNC, M_SOFTDEP_FLAGS);
4044 workitem_alloc(&jtrunc->jt_dep.jb_list, D_JTRUNC,
4045 freeblks->fb_list.wk_mp);
4046 jtrunc->jt_dep.jb_jsegdep = newjsegdep(&jtrunc->jt_dep.jb_list);
4047 jtrunc->jt_dep.jb_freeblks = freeblks;
4048 jtrunc->jt_ino = freeblks->fb_inum;
4049 jtrunc->jt_size = size;
4050 jtrunc->jt_extsize = extsize;
4051 LIST_INSERT_HEAD(&freeblks->fb_jblkdephd, &jtrunc->jt_dep, jb_deps);
4057 * If we're canceling a new bitmap we have to search for another ref
4058 * to move into the bmsafemap dep. This might be better expressed
4059 * with another structure.
4062 move_newblock_dep(jaddref, inodedep)
4063 struct jaddref *jaddref;
4064 struct inodedep *inodedep;
4066 struct inoref *inoref;
4067 struct jaddref *jaddrefn;
4070 for (inoref = TAILQ_NEXT(&jaddref->ja_ref, if_deps); inoref;
4071 inoref = TAILQ_NEXT(inoref, if_deps)) {
4072 if ((jaddref->ja_state & NEWBLOCK) &&
4073 inoref->if_list.wk_type == D_JADDREF) {
4074 jaddrefn = (struct jaddref *)inoref;
4078 if (jaddrefn == NULL)
4080 jaddrefn->ja_state &= ~(ATTACHED | UNDONE);
4081 jaddrefn->ja_state |= jaddref->ja_state &
4082 (ATTACHED | UNDONE | NEWBLOCK);
4083 jaddref->ja_state &= ~(ATTACHED | UNDONE | NEWBLOCK);
4084 jaddref->ja_state |= ATTACHED;
4085 LIST_REMOVE(jaddref, ja_bmdeps);
4086 LIST_INSERT_HEAD(&inodedep->id_bmsafemap->sm_jaddrefhd, jaddrefn,
4091 * Cancel a jaddref either before it has been written or while it is being
4092 * written. This happens when a link is removed before the add reaches
4093 * the disk. The jaddref dependency is kept linked into the bmsafemap
4094 * and inode to prevent the link count or bitmap from reaching the disk
4095 * until handle_workitem_remove() re-adjusts the counts and bitmaps as
4098 * Returns 1 if the canceled addref requires journaling of the remove and
4102 cancel_jaddref(jaddref, inodedep, wkhd)
4103 struct jaddref *jaddref;
4104 struct inodedep *inodedep;
4105 struct workhead *wkhd;
4107 struct inoref *inoref;
4108 struct jsegdep *jsegdep;
4111 KASSERT((jaddref->ja_state & COMPLETE) == 0,
4112 ("cancel_jaddref: Canceling complete jaddref"));
4113 if (jaddref->ja_state & (INPROGRESS | COMPLETE))
4117 if (inodedep == NULL)
4118 if (inodedep_lookup(jaddref->ja_list.wk_mp, jaddref->ja_ino,
4120 panic("cancel_jaddref: Lost inodedep");
4122 * We must adjust the nlink of any reference operation that follows
4123 * us so that it is consistent with the in-memory reference. This
4124 * ensures that inode nlink rollbacks always have the correct link.
4127 for (inoref = TAILQ_NEXT(&jaddref->ja_ref, if_deps); inoref;
4128 inoref = TAILQ_NEXT(inoref, if_deps)) {
4129 if (inoref->if_state & GOINGAWAY)
4134 jsegdep = inoref_jseg(&jaddref->ja_ref);
4135 if (jaddref->ja_state & NEWBLOCK)
4136 move_newblock_dep(jaddref, inodedep);
4137 wake_worklist(&jaddref->ja_list);
4138 jaddref->ja_mkdir = NULL;
4139 if (jaddref->ja_state & INPROGRESS) {
4140 jaddref->ja_state &= ~INPROGRESS;
4141 WORKLIST_REMOVE(&jaddref->ja_list);
4142 jwork_insert(wkhd, jsegdep);
4144 free_jsegdep(jsegdep);
4145 if (jaddref->ja_state & DEPCOMPLETE)
4146 remove_from_journal(&jaddref->ja_list);
4148 jaddref->ja_state |= (GOINGAWAY | DEPCOMPLETE);
4150 * Leave NEWBLOCK jaddrefs on the inodedep so handle_workitem_remove
4151 * can arrange for them to be freed with the bitmap. Otherwise we
4152 * no longer need this addref attached to the inoreflst and it
4153 * will incorrectly adjust nlink if we leave it.
4155 if ((jaddref->ja_state & NEWBLOCK) == 0) {
4156 TAILQ_REMOVE(&inodedep->id_inoreflst, &jaddref->ja_ref,
4158 jaddref->ja_state |= COMPLETE;
4159 free_jaddref(jaddref);
4163 * Leave the head of the list for jsegdeps for fast merging.
4165 if (LIST_FIRST(wkhd) != NULL) {
4166 jaddref->ja_state |= ONWORKLIST;
4167 LIST_INSERT_AFTER(LIST_FIRST(wkhd), &jaddref->ja_list, wk_list);
4169 WORKLIST_INSERT(wkhd, &jaddref->ja_list);
4175 * Attempt to free a jaddref structure when some work completes. This
4176 * should only succeed once the entry is written and all dependencies have
4180 free_jaddref(jaddref)
4181 struct jaddref *jaddref;
4184 if ((jaddref->ja_state & ALLCOMPLETE) != ALLCOMPLETE)
4186 if (jaddref->ja_ref.if_jsegdep)
4187 panic("free_jaddref: segdep attached to jaddref %p(0x%X)\n",
4188 jaddref, jaddref->ja_state);
4189 if (jaddref->ja_state & NEWBLOCK)
4190 LIST_REMOVE(jaddref, ja_bmdeps);
4191 if (jaddref->ja_state & (INPROGRESS | ONWORKLIST))
4192 panic("free_jaddref: Bad state %p(0x%X)",
4193 jaddref, jaddref->ja_state);
4194 if (jaddref->ja_mkdir != NULL)
4195 panic("free_jaddref: Work pending, 0x%X\n", jaddref->ja_state);
4196 WORKITEM_FREE(jaddref, D_JADDREF);
4200 * Free a jremref structure once it has been written or discarded.
4203 free_jremref(jremref)
4204 struct jremref *jremref;
4207 if (jremref->jr_ref.if_jsegdep)
4208 free_jsegdep(jremref->jr_ref.if_jsegdep);
4209 if (jremref->jr_state & INPROGRESS)
4210 panic("free_jremref: IO still pending");
4211 WORKITEM_FREE(jremref, D_JREMREF);
4215 * Free a jnewblk structure.
4218 free_jnewblk(jnewblk)
4219 struct jnewblk *jnewblk;
4222 if ((jnewblk->jn_state & ALLCOMPLETE) != ALLCOMPLETE)
4224 LIST_REMOVE(jnewblk, jn_deps);
4225 if (jnewblk->jn_dep != NULL)
4226 panic("free_jnewblk: Dependency still attached.");
4227 WORKITEM_FREE(jnewblk, D_JNEWBLK);
4231 * Cancel a jnewblk which has been been made redundant by frag extension.
4234 cancel_jnewblk(jnewblk, wkhd)
4235 struct jnewblk *jnewblk;
4236 struct workhead *wkhd;
4238 struct jsegdep *jsegdep;
4240 CTR1(KTR_SUJ, "cancel_jnewblk: blkno %jd", jnewblk->jn_blkno);
4241 jsegdep = jnewblk->jn_jsegdep;
4242 if (jnewblk->jn_jsegdep == NULL || jnewblk->jn_dep == NULL)
4243 panic("cancel_jnewblk: Invalid state");
4244 jnewblk->jn_jsegdep = NULL;
4245 jnewblk->jn_dep = NULL;
4246 jnewblk->jn_state |= GOINGAWAY;
4247 if (jnewblk->jn_state & INPROGRESS) {
4248 jnewblk->jn_state &= ~INPROGRESS;
4249 WORKLIST_REMOVE(&jnewblk->jn_list);
4250 jwork_insert(wkhd, jsegdep);
4252 free_jsegdep(jsegdep);
4253 remove_from_journal(&jnewblk->jn_list);
4255 wake_worklist(&jnewblk->jn_list);
4256 WORKLIST_INSERT(wkhd, &jnewblk->jn_list);
4260 free_jblkdep(jblkdep)
4261 struct jblkdep *jblkdep;
4264 if (jblkdep->jb_list.wk_type == D_JFREEBLK)
4265 WORKITEM_FREE(jblkdep, D_JFREEBLK);
4266 else if (jblkdep->jb_list.wk_type == D_JTRUNC)
4267 WORKITEM_FREE(jblkdep, D_JTRUNC);
4269 panic("free_jblkdep: Unexpected type %s",
4270 TYPENAME(jblkdep->jb_list.wk_type));
4274 * Free a single jseg once it is no longer referenced in memory or on
4275 * disk. Reclaim journal blocks and dependencies waiting for the segment
4279 free_jseg(jseg, jblocks)
4281 struct jblocks *jblocks;
4283 struct freework *freework;
4286 * Free freework structures that were lingering to indicate freed
4287 * indirect blocks that forced journal write ordering on reallocate.
4289 while ((freework = LIST_FIRST(&jseg->js_indirs)) != NULL)
4290 indirblk_remove(freework);
4291 if (jblocks->jb_oldestseg == jseg)
4292 jblocks->jb_oldestseg = TAILQ_NEXT(jseg, js_next);
4293 TAILQ_REMOVE(&jblocks->jb_segs, jseg, js_next);
4294 jblocks_free(jblocks, jseg->js_list.wk_mp, jseg->js_size);
4295 KASSERT(LIST_EMPTY(&jseg->js_entries),
4296 ("free_jseg: Freed jseg has valid entries."));
4297 WORKITEM_FREE(jseg, D_JSEG);
4301 * Free all jsegs that meet the criteria for being reclaimed and update
4306 struct jblocks *jblocks;
4311 * Free only those jsegs which have none allocated before them to
4312 * preserve the journal space ordering.
4314 while ((jseg = TAILQ_FIRST(&jblocks->jb_segs)) != NULL) {
4316 * Only reclaim space when nothing depends on this journal
4317 * set and another set has written that it is no longer
4320 if (jseg->js_refs != 0) {
4321 jblocks->jb_oldestseg = jseg;
4324 if ((jseg->js_state & ALLCOMPLETE) != ALLCOMPLETE)
4326 if (jseg->js_seq > jblocks->jb_oldestwrseq)
4329 * We can free jsegs that didn't write entries when
4330 * oldestwrseq == js_seq.
4332 if (jseg->js_seq == jblocks->jb_oldestwrseq &&
4335 free_jseg(jseg, jblocks);
4338 * If we exited the loop above we still must discover the
4339 * oldest valid segment.
4342 for (jseg = jblocks->jb_oldestseg; jseg != NULL;
4343 jseg = TAILQ_NEXT(jseg, js_next))
4344 if (jseg->js_refs != 0)
4346 jblocks->jb_oldestseg = jseg;
4348 * The journal has no valid records but some jsegs may still be
4349 * waiting on oldestwrseq to advance. We force a small record
4350 * out to permit these lingering records to be reclaimed.
4352 if (jblocks->jb_oldestseg == NULL && !TAILQ_EMPTY(&jblocks->jb_segs))
4353 jblocks->jb_needseg = 1;
4357 * Release one reference to a jseg and free it if the count reaches 0. This
4358 * should eventually reclaim journal space as well.
4365 KASSERT(jseg->js_refs > 0,
4366 ("free_jseg: Invalid refcnt %d", jseg->js_refs));
4367 if (--jseg->js_refs != 0)
4369 free_jsegs(jseg->js_jblocks);
4373 * Release a jsegdep and decrement the jseg count.
4376 free_jsegdep(jsegdep)
4377 struct jsegdep *jsegdep;
4380 if (jsegdep->jd_seg)
4381 rele_jseg(jsegdep->jd_seg);
4382 WORKITEM_FREE(jsegdep, D_JSEGDEP);
4386 * Wait for a journal item to make it to disk. Initiate journal processing
4391 struct worklist *wk;
4396 * Blocking journal waits cause slow synchronous behavior. Record
4397 * stats on the frequency of these blocking operations.
4399 if (waitfor == MNT_WAIT) {
4400 stat_journal_wait++;
4401 switch (wk->wk_type) {
4404 stat_jwait_filepage++;
4408 stat_jwait_freeblks++;
4411 stat_jwait_newblk++;
4421 * If IO has not started we process the journal. We can't mark the
4422 * worklist item as IOWAITING because we drop the lock while
4423 * processing the journal and the worklist entry may be freed after
4424 * this point. The caller may call back in and re-issue the request.
4426 if ((wk->wk_state & INPROGRESS) == 0) {
4427 softdep_process_journal(wk->wk_mp, wk, waitfor);
4428 if (waitfor != MNT_WAIT)
4432 if (waitfor != MNT_WAIT)
4434 wait_worklist(wk, "jwait");
4439 * Lookup an inodedep based on an inode pointer and set the nlinkdelta as
4440 * appropriate. This is a convenience function to reduce duplicate code
4441 * for the setup and revert functions below.
4443 static struct inodedep *
4444 inodedep_lookup_ip(ip)
4447 struct inodedep *inodedep;
4450 KASSERT(ip->i_nlink >= ip->i_effnlink,
4451 ("inodedep_lookup_ip: bad delta"));
4453 if (IS_SNAPSHOT(ip))
4455 (void) inodedep_lookup(UFSTOVFS(ip->i_ump), ip->i_number, dflags,
4457 inodedep->id_nlinkdelta = ip->i_nlink - ip->i_effnlink;
4458 KASSERT((inodedep->id_state & UNLINKED) == 0, ("inode unlinked"));
4464 * Called prior to creating a new inode and linking it to a directory. The
4465 * jaddref structure must already be allocated by softdep_setup_inomapdep
4466 * and it is discovered here so we can initialize the mode and update
4470 softdep_setup_create(dp, ip)
4474 struct inodedep *inodedep;
4475 struct jaddref *jaddref;
4478 KASSERT(ip->i_nlink == 1,
4479 ("softdep_setup_create: Invalid link count."));
4482 inodedep = inodedep_lookup_ip(ip);
4483 if (DOINGSUJ(dvp)) {
4484 jaddref = (struct jaddref *)TAILQ_LAST(&inodedep->id_inoreflst,
4486 KASSERT(jaddref != NULL && jaddref->ja_parent == dp->i_number,
4487 ("softdep_setup_create: No addref structure present."));
4489 softdep_prelink(dvp, NULL);
4494 * Create a jaddref structure to track the addition of a DOTDOT link when
4495 * we are reparenting an inode as part of a rename. This jaddref will be
4496 * found by softdep_setup_directory_change. Adjusts nlinkdelta for
4497 * non-journaling softdep.
4500 softdep_setup_dotdot_link(dp, ip)
4504 struct inodedep *inodedep;
4505 struct jaddref *jaddref;
4513 * We don't set MKDIR_PARENT as this is not tied to a mkdir and
4514 * is used as a normal link would be.
4517 jaddref = newjaddref(ip, dp->i_number, DOTDOT_OFFSET,
4518 dp->i_effnlink - 1, dp->i_mode);
4520 inodedep = inodedep_lookup_ip(dp);
4522 TAILQ_INSERT_TAIL(&inodedep->id_inoreflst, &jaddref->ja_ref,
4524 softdep_prelink(dvp, ITOV(ip));
4529 * Create a jaddref structure to track a new link to an inode. The directory
4530 * offset is not known until softdep_setup_directory_add or
4531 * softdep_setup_directory_change. Adjusts nlinkdelta for non-journaling
4535 softdep_setup_link(dp, ip)
4539 struct inodedep *inodedep;
4540 struct jaddref *jaddref;
4546 jaddref = newjaddref(dp, ip->i_number, 0, ip->i_effnlink - 1,
4549 inodedep = inodedep_lookup_ip(ip);
4551 TAILQ_INSERT_TAIL(&inodedep->id_inoreflst, &jaddref->ja_ref,
4553 softdep_prelink(dvp, ITOV(ip));
4558 * Called to create the jaddref structures to track . and .. references as
4559 * well as lookup and further initialize the incomplete jaddref created
4560 * by softdep_setup_inomapdep when the inode was allocated. Adjusts
4561 * nlinkdelta for non-journaling softdep.
4564 softdep_setup_mkdir(dp, ip)
4568 struct inodedep *inodedep;
4569 struct jaddref *dotdotaddref;
4570 struct jaddref *dotaddref;
4571 struct jaddref *jaddref;
4575 dotaddref = dotdotaddref = NULL;
4576 if (DOINGSUJ(dvp)) {
4577 dotaddref = newjaddref(ip, ip->i_number, DOT_OFFSET, 1,
4579 dotaddref->ja_state |= MKDIR_BODY;
4580 dotdotaddref = newjaddref(ip, dp->i_number, DOTDOT_OFFSET,
4581 dp->i_effnlink - 1, dp->i_mode);
4582 dotdotaddref->ja_state |= MKDIR_PARENT;
4585 inodedep = inodedep_lookup_ip(ip);
4586 if (DOINGSUJ(dvp)) {
4587 jaddref = (struct jaddref *)TAILQ_LAST(&inodedep->id_inoreflst,
4589 KASSERT(jaddref != NULL,
4590 ("softdep_setup_mkdir: No addref structure present."));
4591 KASSERT(jaddref->ja_parent == dp->i_number,
4592 ("softdep_setup_mkdir: bad parent %d",
4593 jaddref->ja_parent));
4594 TAILQ_INSERT_BEFORE(&jaddref->ja_ref, &dotaddref->ja_ref,
4597 inodedep = inodedep_lookup_ip(dp);
4599 TAILQ_INSERT_TAIL(&inodedep->id_inoreflst,
4600 &dotdotaddref->ja_ref, if_deps);
4601 softdep_prelink(ITOV(dp), NULL);
4606 * Called to track nlinkdelta of the inode and parent directories prior to
4607 * unlinking a directory.
4610 softdep_setup_rmdir(dp, ip)
4618 (void) inodedep_lookup_ip(ip);
4619 (void) inodedep_lookup_ip(dp);
4620 softdep_prelink(dvp, ITOV(ip));
4625 * Called to track nlinkdelta of the inode and parent directories prior to
4629 softdep_setup_unlink(dp, ip)
4637 (void) inodedep_lookup_ip(ip);
4638 (void) inodedep_lookup_ip(dp);
4639 softdep_prelink(dvp, ITOV(ip));
4644 * Called to release the journal structures created by a failed non-directory
4645 * creation. Adjusts nlinkdelta for non-journaling softdep.
4648 softdep_revert_create(dp, ip)
4652 struct inodedep *inodedep;
4653 struct jaddref *jaddref;
4658 inodedep = inodedep_lookup_ip(ip);
4659 if (DOINGSUJ(dvp)) {
4660 jaddref = (struct jaddref *)TAILQ_LAST(&inodedep->id_inoreflst,
4662 KASSERT(jaddref->ja_parent == dp->i_number,
4663 ("softdep_revert_create: addref parent mismatch"));
4664 cancel_jaddref(jaddref, inodedep, &inodedep->id_inowait);
4670 * Called to release the journal structures created by a failed dotdot link
4671 * creation. Adjusts nlinkdelta for non-journaling softdep.
4674 softdep_revert_dotdot_link(dp, ip)
4678 struct inodedep *inodedep;
4679 struct jaddref *jaddref;
4684 inodedep = inodedep_lookup_ip(dp);
4685 if (DOINGSUJ(dvp)) {
4686 jaddref = (struct jaddref *)TAILQ_LAST(&inodedep->id_inoreflst,
4688 KASSERT(jaddref->ja_parent == ip->i_number,
4689 ("softdep_revert_dotdot_link: addref parent mismatch"));
4690 cancel_jaddref(jaddref, inodedep, &inodedep->id_inowait);
4696 * Called to release the journal structures created by a failed link
4697 * addition. Adjusts nlinkdelta for non-journaling softdep.
4700 softdep_revert_link(dp, ip)
4704 struct inodedep *inodedep;
4705 struct jaddref *jaddref;
4710 inodedep = inodedep_lookup_ip(ip);
4711 if (DOINGSUJ(dvp)) {
4712 jaddref = (struct jaddref *)TAILQ_LAST(&inodedep->id_inoreflst,
4714 KASSERT(jaddref->ja_parent == dp->i_number,
4715 ("softdep_revert_link: addref parent mismatch"));
4716 cancel_jaddref(jaddref, inodedep, &inodedep->id_inowait);
4722 * Called to release the journal structures created by a failed mkdir
4723 * attempt. Adjusts nlinkdelta for non-journaling softdep.
4726 softdep_revert_mkdir(dp, ip)
4730 struct inodedep *inodedep;
4731 struct jaddref *jaddref;
4732 struct jaddref *dotaddref;
4738 inodedep = inodedep_lookup_ip(dp);
4739 if (DOINGSUJ(dvp)) {
4740 jaddref = (struct jaddref *)TAILQ_LAST(&inodedep->id_inoreflst,
4742 KASSERT(jaddref->ja_parent == ip->i_number,
4743 ("softdep_revert_mkdir: dotdot addref parent mismatch"));
4744 cancel_jaddref(jaddref, inodedep, &inodedep->id_inowait);
4746 inodedep = inodedep_lookup_ip(ip);
4747 if (DOINGSUJ(dvp)) {
4748 jaddref = (struct jaddref *)TAILQ_LAST(&inodedep->id_inoreflst,
4750 KASSERT(jaddref->ja_parent == dp->i_number,
4751 ("softdep_revert_mkdir: addref parent mismatch"));
4752 dotaddref = (struct jaddref *)TAILQ_PREV(&jaddref->ja_ref,
4753 inoreflst, if_deps);
4754 cancel_jaddref(jaddref, inodedep, &inodedep->id_inowait);
4755 KASSERT(dotaddref->ja_parent == ip->i_number,
4756 ("softdep_revert_mkdir: dot addref parent mismatch"));
4757 cancel_jaddref(dotaddref, inodedep, &inodedep->id_inowait);
4763 * Called to correct nlinkdelta after a failed rmdir.
4766 softdep_revert_rmdir(dp, ip)
4772 (void) inodedep_lookup_ip(ip);
4773 (void) inodedep_lookup_ip(dp);
4778 * Protecting the freemaps (or bitmaps).
4780 * To eliminate the need to execute fsck before mounting a filesystem
4781 * after a power failure, one must (conservatively) guarantee that the
4782 * on-disk copy of the bitmaps never indicate that a live inode or block is
4783 * free. So, when a block or inode is allocated, the bitmap should be
4784 * updated (on disk) before any new pointers. When a block or inode is
4785 * freed, the bitmap should not be updated until all pointers have been
4786 * reset. The latter dependency is handled by the delayed de-allocation
4787 * approach described below for block and inode de-allocation. The former
4788 * dependency is handled by calling the following procedure when a block or
4789 * inode is allocated. When an inode is allocated an "inodedep" is created
4790 * with its DEPCOMPLETE flag cleared until its bitmap is written to disk.
4791 * Each "inodedep" is also inserted into the hash indexing structure so
4792 * that any additional link additions can be made dependent on the inode
4795 * The ufs filesystem maintains a number of free block counts (e.g., per
4796 * cylinder group, per cylinder and per <cylinder, rotational position> pair)
4797 * in addition to the bitmaps. These counts are used to improve efficiency
4798 * during allocation and therefore must be consistent with the bitmaps.
4799 * There is no convenient way to guarantee post-crash consistency of these
4800 * counts with simple update ordering, for two main reasons: (1) The counts
4801 * and bitmaps for a single cylinder group block are not in the same disk
4802 * sector. If a disk write is interrupted (e.g., by power failure), one may
4803 * be written and the other not. (2) Some of the counts are located in the
4804 * superblock rather than the cylinder group block. So, we focus our soft
4805 * updates implementation on protecting the bitmaps. When mounting a
4806 * filesystem, we recompute the auxiliary counts from the bitmaps.
4810 * Called just after updating the cylinder group block to allocate an inode.
4813 softdep_setup_inomapdep(bp, ip, newinum, mode)
4814 struct buf *bp; /* buffer for cylgroup block with inode map */
4815 struct inode *ip; /* inode related to allocation */
4816 ino_t newinum; /* new inode number being allocated */
4819 struct inodedep *inodedep;
4820 struct bmsafemap *bmsafemap;
4821 struct jaddref *jaddref;
4825 mp = UFSTOVFS(ip->i_ump);
4826 fs = ip->i_ump->um_fs;
4830 * Allocate the journal reference add structure so that the bitmap
4831 * can be dependent on it.
4833 if (MOUNTEDSUJ(mp)) {
4834 jaddref = newjaddref(ip, newinum, 0, 0, mode);
4835 jaddref->ja_state |= NEWBLOCK;
4839 * Create a dependency for the newly allocated inode.
4840 * Panic if it already exists as something is seriously wrong.
4841 * Otherwise add it to the dependency list for the buffer holding
4842 * the cylinder group map from which it was allocated.
4844 * We have to preallocate a bmsafemap entry in case it is needed
4845 * in bmsafemap_lookup since once we allocate the inodedep, we
4846 * have to finish initializing it before we can FREE_LOCK().
4847 * By preallocating, we avoid FREE_LOCK() while doing a malloc
4848 * in bmsafemap_lookup. We cannot call bmsafemap_lookup before
4849 * creating the inodedep as it can be freed during the time
4850 * that we FREE_LOCK() while allocating the inodedep. We must
4851 * call workitem_alloc() before entering the locked section as
4852 * it also acquires the lock and we must avoid trying doing so
4855 bmsafemap = malloc(sizeof(struct bmsafemap),
4856 M_BMSAFEMAP, M_SOFTDEP_FLAGS);
4857 workitem_alloc(&bmsafemap->sm_list, D_BMSAFEMAP, mp);
4859 if ((inodedep_lookup(mp, newinum, DEPALLOC | NODELAY, &inodedep)))
4860 panic("softdep_setup_inomapdep: dependency %p for new"
4861 "inode already exists", inodedep);
4862 bmsafemap = bmsafemap_lookup(mp, bp, ino_to_cg(fs, newinum), bmsafemap);
4864 LIST_INSERT_HEAD(&bmsafemap->sm_jaddrefhd, jaddref, ja_bmdeps);
4865 TAILQ_INSERT_TAIL(&inodedep->id_inoreflst, &jaddref->ja_ref,
4868 inodedep->id_state |= ONDEPLIST;
4869 LIST_INSERT_HEAD(&bmsafemap->sm_inodedephd, inodedep, id_deps);
4871 inodedep->id_bmsafemap = bmsafemap;
4872 inodedep->id_state &= ~DEPCOMPLETE;
4877 * Called just after updating the cylinder group block to
4878 * allocate block or fragment.
4881 softdep_setup_blkmapdep(bp, mp, newblkno, frags, oldfrags)
4882 struct buf *bp; /* buffer for cylgroup block with block map */
4883 struct mount *mp; /* filesystem doing allocation */
4884 ufs2_daddr_t newblkno; /* number of newly allocated block */
4885 int frags; /* Number of fragments. */
4886 int oldfrags; /* Previous number of fragments for extend. */
4888 struct newblk *newblk;
4889 struct bmsafemap *bmsafemap;
4890 struct jnewblk *jnewblk;
4893 fs = VFSTOUFS(mp)->um_fs;
4896 * Create a dependency for the newly allocated block.
4897 * Add it to the dependency list for the buffer holding
4898 * the cylinder group map from which it was allocated.
4900 if (MOUNTEDSUJ(mp)) {
4901 jnewblk = malloc(sizeof(*jnewblk), M_JNEWBLK, M_SOFTDEP_FLAGS);
4902 workitem_alloc(&jnewblk->jn_list, D_JNEWBLK, mp);
4903 jnewblk->jn_jsegdep = newjsegdep(&jnewblk->jn_list);
4904 jnewblk->jn_state = ATTACHED;
4905 jnewblk->jn_blkno = newblkno;
4906 jnewblk->jn_frags = frags;
4907 jnewblk->jn_oldfrags = oldfrags;
4915 cgp = (struct cg *)bp->b_data;
4916 blksfree = cg_blksfree(cgp);
4917 bno = dtogd(fs, jnewblk->jn_blkno);
4918 for (i = jnewblk->jn_oldfrags; i < jnewblk->jn_frags;
4920 if (isset(blksfree, bno + i))
4921 panic("softdep_setup_blkmapdep: "
4922 "free fragment %d from %d-%d "
4923 "state 0x%X dep %p", i,
4924 jnewblk->jn_oldfrags,
4934 "softdep_setup_blkmapdep: blkno %jd frags %d oldfrags %d",
4935 newblkno, frags, oldfrags);
4937 if (newblk_lookup(mp, newblkno, DEPALLOC, &newblk) != 0)
4938 panic("softdep_setup_blkmapdep: found block");
4939 newblk->nb_bmsafemap = bmsafemap = bmsafemap_lookup(mp, bp,
4940 dtog(fs, newblkno), NULL);
4942 jnewblk->jn_dep = (struct worklist *)newblk;
4943 LIST_INSERT_HEAD(&bmsafemap->sm_jnewblkhd, jnewblk, jn_deps);
4945 newblk->nb_state |= ONDEPLIST;
4946 LIST_INSERT_HEAD(&bmsafemap->sm_newblkhd, newblk, nb_deps);
4948 newblk->nb_bmsafemap = bmsafemap;
4949 newblk->nb_jnewblk = jnewblk;
4953 #define BMSAFEMAP_HASH(fs, cg) \
4954 (&bmsafemap_hashtbl[((((register_t)(fs)) >> 13) + (cg)) & bmsafemap_hash])
4957 bmsafemap_find(bmsafemaphd, mp, cg, bmsafemapp)
4958 struct bmsafemap_hashhead *bmsafemaphd;
4961 struct bmsafemap **bmsafemapp;
4963 struct bmsafemap *bmsafemap;
4965 LIST_FOREACH(bmsafemap, bmsafemaphd, sm_hash)
4966 if (bmsafemap->sm_list.wk_mp == mp && bmsafemap->sm_cg == cg)
4969 *bmsafemapp = bmsafemap;
4978 * Find the bmsafemap associated with a cylinder group buffer.
4979 * If none exists, create one. The buffer must be locked when
4980 * this routine is called and this routine must be called with
4981 * the softdep lock held. To avoid giving up the lock while
4982 * allocating a new bmsafemap, a preallocated bmsafemap may be
4983 * provided. If it is provided but not needed, it is freed.
4985 static struct bmsafemap *
4986 bmsafemap_lookup(mp, bp, cg, newbmsafemap)
4990 struct bmsafemap *newbmsafemap;
4992 struct bmsafemap_hashhead *bmsafemaphd;
4993 struct bmsafemap *bmsafemap, *collision;
4994 struct worklist *wk;
4997 mtx_assert(&lk, MA_OWNED);
4998 KASSERT(bp != NULL, ("bmsafemap_lookup: missing buffer"));
4999 LIST_FOREACH(wk, &bp->b_dep, wk_list) {
5000 if (wk->wk_type == D_BMSAFEMAP) {
5002 WORKITEM_FREE(newbmsafemap, D_BMSAFEMAP);
5003 return (WK_BMSAFEMAP(wk));
5006 fs = VFSTOUFS(mp)->um_fs;
5007 bmsafemaphd = BMSAFEMAP_HASH(fs, cg);
5008 if (bmsafemap_find(bmsafemaphd, mp, cg, &bmsafemap) == 1) {
5010 WORKITEM_FREE(newbmsafemap, D_BMSAFEMAP);
5014 bmsafemap = newbmsafemap;
5017 bmsafemap = malloc(sizeof(struct bmsafemap),
5018 M_BMSAFEMAP, M_SOFTDEP_FLAGS);
5019 workitem_alloc(&bmsafemap->sm_list, D_BMSAFEMAP, mp);
5022 bmsafemap->sm_buf = bp;
5023 LIST_INIT(&bmsafemap->sm_inodedephd);
5024 LIST_INIT(&bmsafemap->sm_inodedepwr);
5025 LIST_INIT(&bmsafemap->sm_newblkhd);
5026 LIST_INIT(&bmsafemap->sm_newblkwr);
5027 LIST_INIT(&bmsafemap->sm_jaddrefhd);
5028 LIST_INIT(&bmsafemap->sm_jnewblkhd);
5029 LIST_INIT(&bmsafemap->sm_freehd);
5030 LIST_INIT(&bmsafemap->sm_freewr);
5031 if (bmsafemap_find(bmsafemaphd, mp, cg, &collision) == 1) {
5032 WORKITEM_FREE(bmsafemap, D_BMSAFEMAP);
5035 bmsafemap->sm_cg = cg;
5036 LIST_INSERT_HEAD(bmsafemaphd, bmsafemap, sm_hash);
5037 LIST_INSERT_HEAD(&VFSTOUFS(mp)->softdep_dirtycg, bmsafemap, sm_next);
5038 WORKLIST_INSERT(&bp->b_dep, &bmsafemap->sm_list);
5043 * Direct block allocation dependencies.
5045 * When a new block is allocated, the corresponding disk locations must be
5046 * initialized (with zeros or new data) before the on-disk inode points to
5047 * them. Also, the freemap from which the block was allocated must be
5048 * updated (on disk) before the inode's pointer. These two dependencies are
5049 * independent of each other and are needed for all file blocks and indirect
5050 * blocks that are pointed to directly by the inode. Just before the
5051 * "in-core" version of the inode is updated with a newly allocated block
5052 * number, a procedure (below) is called to setup allocation dependency
5053 * structures. These structures are removed when the corresponding
5054 * dependencies are satisfied or when the block allocation becomes obsolete
5055 * (i.e., the file is deleted, the block is de-allocated, or the block is a
5056 * fragment that gets upgraded). All of these cases are handled in
5057 * procedures described later.
5059 * When a file extension causes a fragment to be upgraded, either to a larger
5060 * fragment or to a full block, the on-disk location may change (if the
5061 * previous fragment could not simply be extended). In this case, the old
5062 * fragment must be de-allocated, but not until after the inode's pointer has
5063 * been updated. In most cases, this is handled by later procedures, which
5064 * will construct a "freefrag" structure to be added to the workitem queue
5065 * when the inode update is complete (or obsolete). The main exception to
5066 * this is when an allocation occurs while a pending allocation dependency
5067 * (for the same block pointer) remains. This case is handled in the main
5068 * allocation dependency setup procedure by immediately freeing the
5069 * unreferenced fragments.
5072 softdep_setup_allocdirect(ip, off, newblkno, oldblkno, newsize, oldsize, bp)
5073 struct inode *ip; /* inode to which block is being added */
5074 ufs_lbn_t off; /* block pointer within inode */
5075 ufs2_daddr_t newblkno; /* disk block number being added */
5076 ufs2_daddr_t oldblkno; /* previous block number, 0 unless frag */
5077 long newsize; /* size of new block */
5078 long oldsize; /* size of new block */
5079 struct buf *bp; /* bp for allocated block */
5081 struct allocdirect *adp, *oldadp;
5082 struct allocdirectlst *adphead;
5083 struct freefrag *freefrag;
5084 struct inodedep *inodedep;
5085 struct pagedep *pagedep;
5086 struct jnewblk *jnewblk;
5087 struct newblk *newblk;
5092 mp = UFSTOVFS(ip->i_ump);
5093 if (oldblkno && oldblkno != newblkno)
5094 freefrag = newfreefrag(ip, oldblkno, oldsize, lbn);
5099 "softdep_setup_allocdirect: ino %d blkno %jd oldblkno %jd "
5100 "off %jd newsize %ld oldsize %d",
5101 ip->i_number, newblkno, oldblkno, off, newsize, oldsize);
5103 if (off >= NDADDR) {
5105 panic("softdep_setup_allocdirect: bad lbn %jd, off %jd",
5107 /* allocating an indirect block */
5109 panic("softdep_setup_allocdirect: non-zero indir");
5112 panic("softdep_setup_allocdirect: lbn %jd != off %jd",
5115 * Allocating a direct block.
5117 * If we are allocating a directory block, then we must
5118 * allocate an associated pagedep to track additions and
5121 if ((ip->i_mode & IFMT) == IFDIR)
5122 pagedep_lookup(mp, bp, ip->i_number, off, DEPALLOC,
5125 if (newblk_lookup(mp, newblkno, 0, &newblk) == 0)
5126 panic("softdep_setup_allocdirect: lost block");
5127 KASSERT(newblk->nb_list.wk_type == D_NEWBLK,
5128 ("softdep_setup_allocdirect: newblk already initialized"));
5130 * Convert the newblk to an allocdirect.
5132 newblk->nb_list.wk_type = D_ALLOCDIRECT;
5133 adp = (struct allocdirect *)newblk;
5134 newblk->nb_freefrag = freefrag;
5135 adp->ad_offset = off;
5136 adp->ad_oldblkno = oldblkno;
5137 adp->ad_newsize = newsize;
5138 adp->ad_oldsize = oldsize;
5141 * Finish initializing the journal.
5143 if ((jnewblk = newblk->nb_jnewblk) != NULL) {
5144 jnewblk->jn_ino = ip->i_number;
5145 jnewblk->jn_lbn = lbn;
5146 add_to_journal(&jnewblk->jn_list);
5148 if (freefrag && freefrag->ff_jdep != NULL &&
5149 freefrag->ff_jdep->wk_type == D_JFREEFRAG)
5150 add_to_journal(freefrag->ff_jdep);
5151 inodedep_lookup(mp, ip->i_number, DEPALLOC | NODELAY, &inodedep);
5152 adp->ad_inodedep = inodedep;
5154 WORKLIST_INSERT(&bp->b_dep, &newblk->nb_list);
5156 * The list of allocdirects must be kept in sorted and ascending
5157 * order so that the rollback routines can quickly determine the
5158 * first uncommitted block (the size of the file stored on disk
5159 * ends at the end of the lowest committed fragment, or if there
5160 * are no fragments, at the end of the highest committed block).
5161 * Since files generally grow, the typical case is that the new
5162 * block is to be added at the end of the list. We speed this
5163 * special case by checking against the last allocdirect in the
5164 * list before laboriously traversing the list looking for the
5167 adphead = &inodedep->id_newinoupdt;
5168 oldadp = TAILQ_LAST(adphead, allocdirectlst);
5169 if (oldadp == NULL || oldadp->ad_offset <= off) {
5170 /* insert at end of list */
5171 TAILQ_INSERT_TAIL(adphead, adp, ad_next);
5172 if (oldadp != NULL && oldadp->ad_offset == off)
5173 allocdirect_merge(adphead, adp, oldadp);
5177 TAILQ_FOREACH(oldadp, adphead, ad_next) {
5178 if (oldadp->ad_offset >= off)
5182 panic("softdep_setup_allocdirect: lost entry");
5183 /* insert in middle of list */
5184 TAILQ_INSERT_BEFORE(oldadp, adp, ad_next);
5185 if (oldadp->ad_offset == off)
5186 allocdirect_merge(adphead, adp, oldadp);
5192 * Merge a newer and older journal record to be stored either in a
5193 * newblock or freefrag. This handles aggregating journal records for
5194 * fragment allocation into a second record as well as replacing a
5195 * journal free with an aborted journal allocation. A segment for the
5196 * oldest record will be placed on wkhd if it has been written. If not
5197 * the segment for the newer record will suffice.
5199 static struct worklist *
5200 jnewblk_merge(new, old, wkhd)
5201 struct worklist *new;
5202 struct worklist *old;
5203 struct workhead *wkhd;
5205 struct jnewblk *njnewblk;
5206 struct jnewblk *jnewblk;
5208 /* Handle NULLs to simplify callers. */
5213 /* Replace a jfreefrag with a jnewblk. */
5214 if (new->wk_type == D_JFREEFRAG) {
5215 if (WK_JNEWBLK(old)->jn_blkno != WK_JFREEFRAG(new)->fr_blkno)
5216 panic("jnewblk_merge: blkno mismatch: %p, %p",
5218 cancel_jfreefrag(WK_JFREEFRAG(new));
5221 if (old->wk_type != D_JNEWBLK || new->wk_type != D_JNEWBLK)
5222 panic("jnewblk_merge: Bad type: old %d new %d\n",
5223 old->wk_type, new->wk_type);
5225 * Handle merging of two jnewblk records that describe
5226 * different sets of fragments in the same block.
5228 jnewblk = WK_JNEWBLK(old);
5229 njnewblk = WK_JNEWBLK(new);
5230 if (jnewblk->jn_blkno != njnewblk->jn_blkno)
5231 panic("jnewblk_merge: Merging disparate blocks.");
5233 * The record may be rolled back in the cg.
5235 if (jnewblk->jn_state & UNDONE) {
5236 jnewblk->jn_state &= ~UNDONE;
5237 njnewblk->jn_state |= UNDONE;
5238 njnewblk->jn_state &= ~ATTACHED;
5241 * We modify the newer addref and free the older so that if neither
5242 * has been written the most up-to-date copy will be on disk. If
5243 * both have been written but rolled back we only temporarily need
5244 * one of them to fix the bits when the cg write completes.
5246 jnewblk->jn_state |= ATTACHED | COMPLETE;
5247 njnewblk->jn_oldfrags = jnewblk->jn_oldfrags;
5248 cancel_jnewblk(jnewblk, wkhd);
5249 WORKLIST_REMOVE(&jnewblk->jn_list);
5250 free_jnewblk(jnewblk);
5255 * Replace an old allocdirect dependency with a newer one.
5256 * This routine must be called with splbio interrupts blocked.
5259 allocdirect_merge(adphead, newadp, oldadp)
5260 struct allocdirectlst *adphead; /* head of list holding allocdirects */
5261 struct allocdirect *newadp; /* allocdirect being added */
5262 struct allocdirect *oldadp; /* existing allocdirect being checked */
5264 struct worklist *wk;
5265 struct freefrag *freefrag;
5268 mtx_assert(&lk, MA_OWNED);
5269 if (newadp->ad_oldblkno != oldadp->ad_newblkno ||
5270 newadp->ad_oldsize != oldadp->ad_newsize ||
5271 newadp->ad_offset >= NDADDR)
5272 panic("%s %jd != new %jd || old size %ld != new %ld",
5273 "allocdirect_merge: old blkno",
5274 (intmax_t)newadp->ad_oldblkno,
5275 (intmax_t)oldadp->ad_newblkno,
5276 newadp->ad_oldsize, oldadp->ad_newsize);
5277 newadp->ad_oldblkno = oldadp->ad_oldblkno;
5278 newadp->ad_oldsize = oldadp->ad_oldsize;
5280 * If the old dependency had a fragment to free or had never
5281 * previously had a block allocated, then the new dependency
5282 * can immediately post its freefrag and adopt the old freefrag.
5283 * This action is done by swapping the freefrag dependencies.
5284 * The new dependency gains the old one's freefrag, and the
5285 * old one gets the new one and then immediately puts it on
5286 * the worklist when it is freed by free_newblk. It is
5287 * not possible to do this swap when the old dependency had a
5288 * non-zero size but no previous fragment to free. This condition
5289 * arises when the new block is an extension of the old block.
5290 * Here, the first part of the fragment allocated to the new
5291 * dependency is part of the block currently claimed on disk by
5292 * the old dependency, so cannot legitimately be freed until the
5293 * conditions for the new dependency are fulfilled.
5295 freefrag = newadp->ad_freefrag;
5296 if (oldadp->ad_freefrag != NULL || oldadp->ad_oldblkno == 0) {
5297 newadp->ad_freefrag = oldadp->ad_freefrag;
5298 oldadp->ad_freefrag = freefrag;
5301 * If we are tracking a new directory-block allocation,
5302 * move it from the old allocdirect to the new allocdirect.
5304 if ((wk = LIST_FIRST(&oldadp->ad_newdirblk)) != NULL) {
5305 WORKLIST_REMOVE(wk);
5306 if (!LIST_EMPTY(&oldadp->ad_newdirblk))
5307 panic("allocdirect_merge: extra newdirblk");
5308 WORKLIST_INSERT(&newadp->ad_newdirblk, wk);
5310 TAILQ_REMOVE(adphead, oldadp, ad_next);
5312 * We need to move any journal dependencies over to the freefrag
5313 * that releases this block if it exists. Otherwise we are
5314 * extending an existing block and we'll wait until that is
5315 * complete to release the journal space and extend the
5316 * new journal to cover this old space as well.
5318 if (freefrag == NULL) {
5319 if (oldadp->ad_newblkno != newadp->ad_newblkno)
5320 panic("allocdirect_merge: %jd != %jd",
5321 oldadp->ad_newblkno, newadp->ad_newblkno);
5322 newadp->ad_block.nb_jnewblk = (struct jnewblk *)
5323 jnewblk_merge(&newadp->ad_block.nb_jnewblk->jn_list,
5324 &oldadp->ad_block.nb_jnewblk->jn_list,
5325 &newadp->ad_block.nb_jwork);
5326 oldadp->ad_block.nb_jnewblk = NULL;
5327 cancel_newblk(&oldadp->ad_block, NULL,
5328 &newadp->ad_block.nb_jwork);
5330 wk = (struct worklist *) cancel_newblk(&oldadp->ad_block,
5331 &freefrag->ff_list, &freefrag->ff_jwork);
5332 freefrag->ff_jdep = jnewblk_merge(freefrag->ff_jdep, wk,
5333 &freefrag->ff_jwork);
5335 free_newblk(&oldadp->ad_block);
5339 * Allocate a jfreefrag structure to journal a single block free.
5341 static struct jfreefrag *
5342 newjfreefrag(freefrag, ip, blkno, size, lbn)
5343 struct freefrag *freefrag;
5349 struct jfreefrag *jfreefrag;
5353 jfreefrag = malloc(sizeof(struct jfreefrag), M_JFREEFRAG,
5355 workitem_alloc(&jfreefrag->fr_list, D_JFREEFRAG, UFSTOVFS(ip->i_ump));
5356 jfreefrag->fr_jsegdep = newjsegdep(&jfreefrag->fr_list);
5357 jfreefrag->fr_state = ATTACHED | DEPCOMPLETE;
5358 jfreefrag->fr_ino = ip->i_number;
5359 jfreefrag->fr_lbn = lbn;
5360 jfreefrag->fr_blkno = blkno;
5361 jfreefrag->fr_frags = numfrags(fs, size);
5362 jfreefrag->fr_freefrag = freefrag;
5368 * Allocate a new freefrag structure.
5370 static struct freefrag *
5371 newfreefrag(ip, blkno, size, lbn)
5377 struct freefrag *freefrag;
5380 CTR4(KTR_SUJ, "newfreefrag: ino %d blkno %jd size %ld lbn %jd",
5381 ip->i_number, blkno, size, lbn);
5383 if (fragnum(fs, blkno) + numfrags(fs, size) > fs->fs_frag)
5384 panic("newfreefrag: frag size");
5385 freefrag = malloc(sizeof(struct freefrag),
5386 M_FREEFRAG, M_SOFTDEP_FLAGS);
5387 workitem_alloc(&freefrag->ff_list, D_FREEFRAG, UFSTOVFS(ip->i_ump));
5388 freefrag->ff_state = ATTACHED;
5389 LIST_INIT(&freefrag->ff_jwork);
5390 freefrag->ff_inum = ip->i_number;
5391 freefrag->ff_vtype = ITOV(ip)->v_type;
5392 freefrag->ff_blkno = blkno;
5393 freefrag->ff_fragsize = size;
5395 if (MOUNTEDSUJ(UFSTOVFS(ip->i_ump))) {
5396 freefrag->ff_jdep = (struct worklist *)
5397 newjfreefrag(freefrag, ip, blkno, size, lbn);
5399 freefrag->ff_state |= DEPCOMPLETE;
5400 freefrag->ff_jdep = NULL;
5407 * This workitem de-allocates fragments that were replaced during
5408 * file block allocation.
5411 handle_workitem_freefrag(freefrag)
5412 struct freefrag *freefrag;
5414 struct ufsmount *ump = VFSTOUFS(freefrag->ff_list.wk_mp);
5415 struct workhead wkhd;
5418 "handle_workitem_freefrag: ino %d blkno %jd size %ld",
5419 freefrag->ff_inum, freefrag->ff_blkno, freefrag->ff_fragsize);
5421 * It would be illegal to add new completion items to the
5422 * freefrag after it was schedule to be done so it must be
5423 * safe to modify the list head here.
5427 LIST_SWAP(&freefrag->ff_jwork, &wkhd, worklist, wk_list);
5429 * If the journal has not been written we must cancel it here.
5431 if (freefrag->ff_jdep) {
5432 if (freefrag->ff_jdep->wk_type != D_JNEWBLK)
5433 panic("handle_workitem_freefrag: Unexpected type %d\n",
5434 freefrag->ff_jdep->wk_type);
5435 cancel_jnewblk(WK_JNEWBLK(freefrag->ff_jdep), &wkhd);
5438 ffs_blkfree(ump, ump->um_fs, ump->um_devvp, freefrag->ff_blkno,
5439 freefrag->ff_fragsize, freefrag->ff_inum, freefrag->ff_vtype, &wkhd);
5441 WORKITEM_FREE(freefrag, D_FREEFRAG);
5446 * Set up a dependency structure for an external attributes data block.
5447 * This routine follows much of the structure of softdep_setup_allocdirect.
5448 * See the description of softdep_setup_allocdirect above for details.
5451 softdep_setup_allocext(ip, off, newblkno, oldblkno, newsize, oldsize, bp)
5454 ufs2_daddr_t newblkno;
5455 ufs2_daddr_t oldblkno;
5460 struct allocdirect *adp, *oldadp;
5461 struct allocdirectlst *adphead;
5462 struct freefrag *freefrag;
5463 struct inodedep *inodedep;
5464 struct jnewblk *jnewblk;
5465 struct newblk *newblk;
5470 panic("softdep_setup_allocext: lbn %lld > NXADDR",
5474 mp = UFSTOVFS(ip->i_ump);
5475 if (oldblkno && oldblkno != newblkno)
5476 freefrag = newfreefrag(ip, oldblkno, oldsize, lbn);
5481 if (newblk_lookup(mp, newblkno, 0, &newblk) == 0)
5482 panic("softdep_setup_allocext: lost block");
5483 KASSERT(newblk->nb_list.wk_type == D_NEWBLK,
5484 ("softdep_setup_allocext: newblk already initialized"));
5486 * Convert the newblk to an allocdirect.
5488 newblk->nb_list.wk_type = D_ALLOCDIRECT;
5489 adp = (struct allocdirect *)newblk;
5490 newblk->nb_freefrag = freefrag;
5491 adp->ad_offset = off;
5492 adp->ad_oldblkno = oldblkno;
5493 adp->ad_newsize = newsize;
5494 adp->ad_oldsize = oldsize;
5495 adp->ad_state |= EXTDATA;
5498 * Finish initializing the journal.
5500 if ((jnewblk = newblk->nb_jnewblk) != NULL) {
5501 jnewblk->jn_ino = ip->i_number;
5502 jnewblk->jn_lbn = lbn;
5503 add_to_journal(&jnewblk->jn_list);
5505 if (freefrag && freefrag->ff_jdep != NULL &&
5506 freefrag->ff_jdep->wk_type == D_JFREEFRAG)
5507 add_to_journal(freefrag->ff_jdep);
5508 inodedep_lookup(mp, ip->i_number, DEPALLOC | NODELAY, &inodedep);
5509 adp->ad_inodedep = inodedep;
5511 WORKLIST_INSERT(&bp->b_dep, &newblk->nb_list);
5513 * The list of allocdirects must be kept in sorted and ascending
5514 * order so that the rollback routines can quickly determine the
5515 * first uncommitted block (the size of the file stored on disk
5516 * ends at the end of the lowest committed fragment, or if there
5517 * are no fragments, at the end of the highest committed block).
5518 * Since files generally grow, the typical case is that the new
5519 * block is to be added at the end of the list. We speed this
5520 * special case by checking against the last allocdirect in the
5521 * list before laboriously traversing the list looking for the
5524 adphead = &inodedep->id_newextupdt;
5525 oldadp = TAILQ_LAST(adphead, allocdirectlst);
5526 if (oldadp == NULL || oldadp->ad_offset <= off) {
5527 /* insert at end of list */
5528 TAILQ_INSERT_TAIL(adphead, adp, ad_next);
5529 if (oldadp != NULL && oldadp->ad_offset == off)
5530 allocdirect_merge(adphead, adp, oldadp);
5534 TAILQ_FOREACH(oldadp, adphead, ad_next) {
5535 if (oldadp->ad_offset >= off)
5539 panic("softdep_setup_allocext: lost entry");
5540 /* insert in middle of list */
5541 TAILQ_INSERT_BEFORE(oldadp, adp, ad_next);
5542 if (oldadp->ad_offset == off)
5543 allocdirect_merge(adphead, adp, oldadp);
5548 * Indirect block allocation dependencies.
5550 * The same dependencies that exist for a direct block also exist when
5551 * a new block is allocated and pointed to by an entry in a block of
5552 * indirect pointers. The undo/redo states described above are also
5553 * used here. Because an indirect block contains many pointers that
5554 * may have dependencies, a second copy of the entire in-memory indirect
5555 * block is kept. The buffer cache copy is always completely up-to-date.
5556 * The second copy, which is used only as a source for disk writes,
5557 * contains only the safe pointers (i.e., those that have no remaining
5558 * update dependencies). The second copy is freed when all pointers
5559 * are safe. The cache is not allowed to replace indirect blocks with
5560 * pending update dependencies. If a buffer containing an indirect
5561 * block with dependencies is written, these routines will mark it
5562 * dirty again. It can only be successfully written once all the
5563 * dependencies are removed. The ffs_fsync routine in conjunction with
5564 * softdep_sync_metadata work together to get all the dependencies
5565 * removed so that a file can be successfully written to disk. Three
5566 * procedures are used when setting up indirect block pointer
5567 * dependencies. The division is necessary because of the organization
5568 * of the "balloc" routine and because of the distinction between file
5569 * pages and file metadata blocks.
5573 * Allocate a new allocindir structure.
5575 static struct allocindir *
5576 newallocindir(ip, ptrno, newblkno, oldblkno, lbn)
5577 struct inode *ip; /* inode for file being extended */
5578 int ptrno; /* offset of pointer in indirect block */
5579 ufs2_daddr_t newblkno; /* disk block number being added */
5580 ufs2_daddr_t oldblkno; /* previous block number, 0 if none */
5583 struct newblk *newblk;
5584 struct allocindir *aip;
5585 struct freefrag *freefrag;
5586 struct jnewblk *jnewblk;
5589 freefrag = newfreefrag(ip, oldblkno, ip->i_fs->fs_bsize, lbn);
5593 if (newblk_lookup(UFSTOVFS(ip->i_ump), newblkno, 0, &newblk) == 0)
5594 panic("new_allocindir: lost block");
5595 KASSERT(newblk->nb_list.wk_type == D_NEWBLK,
5596 ("newallocindir: newblk already initialized"));
5597 newblk->nb_list.wk_type = D_ALLOCINDIR;
5598 newblk->nb_freefrag = freefrag;
5599 aip = (struct allocindir *)newblk;
5600 aip->ai_offset = ptrno;
5601 aip->ai_oldblkno = oldblkno;
5603 if ((jnewblk = newblk->nb_jnewblk) != NULL) {
5604 jnewblk->jn_ino = ip->i_number;
5605 jnewblk->jn_lbn = lbn;
5606 add_to_journal(&jnewblk->jn_list);
5608 if (freefrag && freefrag->ff_jdep != NULL &&
5609 freefrag->ff_jdep->wk_type == D_JFREEFRAG)
5610 add_to_journal(freefrag->ff_jdep);
5615 * Called just before setting an indirect block pointer
5616 * to a newly allocated file page.
5619 softdep_setup_allocindir_page(ip, lbn, bp, ptrno, newblkno, oldblkno, nbp)
5620 struct inode *ip; /* inode for file being extended */
5621 ufs_lbn_t lbn; /* allocated block number within file */
5622 struct buf *bp; /* buffer with indirect blk referencing page */
5623 int ptrno; /* offset of pointer in indirect block */
5624 ufs2_daddr_t newblkno; /* disk block number being added */
5625 ufs2_daddr_t oldblkno; /* previous block number, 0 if none */
5626 struct buf *nbp; /* buffer holding allocated page */
5628 struct inodedep *inodedep;
5629 struct freefrag *freefrag;
5630 struct allocindir *aip;
5631 struct pagedep *pagedep;
5635 if (lbn != nbp->b_lblkno)
5636 panic("softdep_setup_allocindir_page: lbn %jd != lblkno %jd",
5639 "softdep_setup_allocindir_page: ino %d blkno %jd oldblkno %jd "
5640 "lbn %jd", ip->i_number, newblkno, oldblkno, lbn);
5641 ASSERT_VOP_LOCKED(ITOV(ip), "softdep_setup_allocindir_page");
5642 mp = UFSTOVFS(ip->i_ump);
5643 aip = newallocindir(ip, ptrno, newblkno, oldblkno, lbn);
5645 if (IS_SNAPSHOT(ip))
5647 (void) inodedep_lookup(mp, ip->i_number, dflags, &inodedep);
5649 * If we are allocating a directory page, then we must
5650 * allocate an associated pagedep to track additions and
5653 if ((ip->i_mode & IFMT) == IFDIR)
5654 pagedep_lookup(mp, nbp, ip->i_number, lbn, DEPALLOC, &pagedep);
5655 WORKLIST_INSERT(&nbp->b_dep, &aip->ai_block.nb_list);
5656 freefrag = setup_allocindir_phase2(bp, ip, inodedep, aip, lbn);
5659 handle_workitem_freefrag(freefrag);
5663 * Called just before setting an indirect block pointer to a
5664 * newly allocated indirect block.
5667 softdep_setup_allocindir_meta(nbp, ip, bp, ptrno, newblkno)
5668 struct buf *nbp; /* newly allocated indirect block */
5669 struct inode *ip; /* inode for file being extended */
5670 struct buf *bp; /* indirect block referencing allocated block */
5671 int ptrno; /* offset of pointer in indirect block */
5672 ufs2_daddr_t newblkno; /* disk block number being added */
5674 struct inodedep *inodedep;
5675 struct allocindir *aip;
5680 "softdep_setup_allocindir_meta: ino %d blkno %jd ptrno %d",
5681 ip->i_number, newblkno, ptrno);
5682 lbn = nbp->b_lblkno;
5683 ASSERT_VOP_LOCKED(ITOV(ip), "softdep_setup_allocindir_meta");
5684 aip = newallocindir(ip, ptrno, newblkno, 0, lbn);
5686 if (IS_SNAPSHOT(ip))
5688 inodedep_lookup(UFSTOVFS(ip->i_ump), ip->i_number, dflags, &inodedep);
5689 WORKLIST_INSERT(&nbp->b_dep, &aip->ai_block.nb_list);
5690 if (setup_allocindir_phase2(bp, ip, inodedep, aip, lbn))
5691 panic("softdep_setup_allocindir_meta: Block already existed");
5696 indirdep_complete(indirdep)
5697 struct indirdep *indirdep;
5699 struct allocindir *aip;
5701 LIST_REMOVE(indirdep, ir_next);
5702 indirdep->ir_state |= DEPCOMPLETE;
5704 while ((aip = LIST_FIRST(&indirdep->ir_completehd)) != NULL) {
5705 LIST_REMOVE(aip, ai_next);
5706 free_newblk(&aip->ai_block);
5709 * If this indirdep is not attached to a buf it was simply waiting
5710 * on completion to clear completehd. free_indirdep() asserts
5711 * that nothing is dangling.
5713 if ((indirdep->ir_state & ONWORKLIST) == 0)
5714 free_indirdep(indirdep);
5717 static struct indirdep *
5718 indirdep_lookup(mp, ip, bp)
5723 struct indirdep *indirdep, *newindirdep;
5724 struct newblk *newblk;
5725 struct worklist *wk;
5729 mtx_assert(&lk, MA_OWNED);
5734 LIST_FOREACH(wk, &bp->b_dep, wk_list) {
5735 if (wk->wk_type != D_INDIRDEP)
5737 indirdep = WK_INDIRDEP(wk);
5740 /* Found on the buffer worklist, no new structure to free. */
5741 if (indirdep != NULL && newindirdep == NULL)
5743 if (indirdep != NULL && newindirdep != NULL)
5744 panic("indirdep_lookup: simultaneous create");
5745 /* None found on the buffer and a new structure is ready. */
5746 if (indirdep == NULL && newindirdep != NULL)
5748 /* None found and no new structure available. */
5750 newindirdep = malloc(sizeof(struct indirdep),
5751 M_INDIRDEP, M_SOFTDEP_FLAGS);
5752 workitem_alloc(&newindirdep->ir_list, D_INDIRDEP, mp);
5753 newindirdep->ir_state = ATTACHED;
5754 if (ip->i_ump->um_fstype == UFS1)
5755 newindirdep->ir_state |= UFS1FMT;
5756 TAILQ_INIT(&newindirdep->ir_trunc);
5757 newindirdep->ir_saveddata = NULL;
5758 LIST_INIT(&newindirdep->ir_deplisthd);
5759 LIST_INIT(&newindirdep->ir_donehd);
5760 LIST_INIT(&newindirdep->ir_writehd);
5761 LIST_INIT(&newindirdep->ir_completehd);
5762 if (bp->b_blkno == bp->b_lblkno) {
5763 ufs_bmaparray(bp->b_vp, bp->b_lblkno, &blkno, bp,
5765 bp->b_blkno = blkno;
5767 newindirdep->ir_freeblks = NULL;
5768 newindirdep->ir_savebp =
5769 getblk(ip->i_devvp, bp->b_blkno, bp->b_bcount, 0, 0, 0);
5770 newindirdep->ir_bp = bp;
5771 BUF_KERNPROC(newindirdep->ir_savebp);
5772 bcopy(bp->b_data, newindirdep->ir_savebp->b_data, bp->b_bcount);
5775 indirdep = newindirdep;
5776 WORKLIST_INSERT(&bp->b_dep, &indirdep->ir_list);
5778 * If the block is not yet allocated we don't set DEPCOMPLETE so
5779 * that we don't free dependencies until the pointers are valid.
5780 * This could search b_dep for D_ALLOCDIRECT/D_ALLOCINDIR rather
5781 * than using the hash.
5783 if (newblk_lookup(mp, dbtofsb(fs, bp->b_blkno), 0, &newblk))
5784 LIST_INSERT_HEAD(&newblk->nb_indirdeps, indirdep, ir_next);
5786 indirdep->ir_state |= DEPCOMPLETE;
5791 * Called to finish the allocation of the "aip" allocated
5792 * by one of the two routines above.
5794 static struct freefrag *
5795 setup_allocindir_phase2(bp, ip, inodedep, aip, lbn)
5796 struct buf *bp; /* in-memory copy of the indirect block */
5797 struct inode *ip; /* inode for file being extended */
5798 struct inodedep *inodedep; /* Inodedep for ip */
5799 struct allocindir *aip; /* allocindir allocated by the above routines */
5800 ufs_lbn_t lbn; /* Logical block number for this block. */
5803 struct indirdep *indirdep;
5804 struct allocindir *oldaip;
5805 struct freefrag *freefrag;
5808 mtx_assert(&lk, MA_OWNED);
5809 mp = UFSTOVFS(ip->i_ump);
5811 if (bp->b_lblkno >= 0)
5812 panic("setup_allocindir_phase2: not indir blk");
5813 KASSERT(aip->ai_offset >= 0 && aip->ai_offset < NINDIR(fs),
5814 ("setup_allocindir_phase2: Bad offset %d", aip->ai_offset));
5815 indirdep = indirdep_lookup(mp, ip, bp);
5816 KASSERT(indirdep->ir_savebp != NULL,
5817 ("setup_allocindir_phase2 NULL ir_savebp"));
5818 aip->ai_indirdep = indirdep;
5820 * Check for an unwritten dependency for this indirect offset. If
5821 * there is, merge the old dependency into the new one. This happens
5822 * as a result of reallocblk only.
5825 if (aip->ai_oldblkno != 0) {
5826 LIST_FOREACH(oldaip, &indirdep->ir_deplisthd, ai_next) {
5827 if (oldaip->ai_offset == aip->ai_offset) {
5828 freefrag = allocindir_merge(aip, oldaip);
5832 LIST_FOREACH(oldaip, &indirdep->ir_donehd, ai_next) {
5833 if (oldaip->ai_offset == aip->ai_offset) {
5834 freefrag = allocindir_merge(aip, oldaip);
5840 LIST_INSERT_HEAD(&indirdep->ir_deplisthd, aip, ai_next);
5845 * Merge two allocindirs which refer to the same block. Move newblock
5846 * dependencies and setup the freefrags appropriately.
5848 static struct freefrag *
5849 allocindir_merge(aip, oldaip)
5850 struct allocindir *aip;
5851 struct allocindir *oldaip;
5853 struct freefrag *freefrag;
5854 struct worklist *wk;
5856 if (oldaip->ai_newblkno != aip->ai_oldblkno)
5857 panic("allocindir_merge: blkno");
5858 aip->ai_oldblkno = oldaip->ai_oldblkno;
5859 freefrag = aip->ai_freefrag;
5860 aip->ai_freefrag = oldaip->ai_freefrag;
5861 oldaip->ai_freefrag = NULL;
5862 KASSERT(freefrag != NULL, ("setup_allocindir_phase2: No freefrag"));
5864 * If we are tracking a new directory-block allocation,
5865 * move it from the old allocindir to the new allocindir.
5867 if ((wk = LIST_FIRST(&oldaip->ai_newdirblk)) != NULL) {
5868 WORKLIST_REMOVE(wk);
5869 if (!LIST_EMPTY(&oldaip->ai_newdirblk))
5870 panic("allocindir_merge: extra newdirblk");
5871 WORKLIST_INSERT(&aip->ai_newdirblk, wk);
5874 * We can skip journaling for this freefrag and just complete
5875 * any pending journal work for the allocindir that is being
5876 * removed after the freefrag completes.
5878 if (freefrag->ff_jdep)
5879 cancel_jfreefrag(WK_JFREEFRAG(freefrag->ff_jdep));
5880 LIST_REMOVE(oldaip, ai_next);
5881 freefrag->ff_jdep = (struct worklist *)cancel_newblk(&oldaip->ai_block,
5882 &freefrag->ff_list, &freefrag->ff_jwork);
5883 free_newblk(&oldaip->ai_block);
5889 setup_freedirect(freeblks, ip, i, needj)
5890 struct freeblks *freeblks;
5898 blkno = DIP(ip, i_db[i]);
5901 DIP_SET(ip, i_db[i], 0);
5902 frags = sblksize(ip->i_fs, ip->i_size, i);
5903 frags = numfrags(ip->i_fs, frags);
5904 newfreework(ip->i_ump, freeblks, NULL, i, blkno, frags, 0, needj);
5908 setup_freeext(freeblks, ip, i, needj)
5909 struct freeblks *freeblks;
5917 blkno = ip->i_din2->di_extb[i];
5920 ip->i_din2->di_extb[i] = 0;
5921 frags = sblksize(ip->i_fs, ip->i_din2->di_extsize, i);
5922 frags = numfrags(ip->i_fs, frags);
5923 newfreework(ip->i_ump, freeblks, NULL, -1 - i, blkno, frags, 0, needj);
5927 setup_freeindir(freeblks, ip, i, lbn, needj)
5928 struct freeblks *freeblks;
5936 blkno = DIP(ip, i_ib[i]);
5939 DIP_SET(ip, i_ib[i], 0);
5940 newfreework(ip->i_ump, freeblks, NULL, lbn, blkno, ip->i_fs->fs_frag,
5944 static inline struct freeblks *
5949 struct freeblks *freeblks;
5951 freeblks = malloc(sizeof(struct freeblks),
5952 M_FREEBLKS, M_SOFTDEP_FLAGS|M_ZERO);
5953 workitem_alloc(&freeblks->fb_list, D_FREEBLKS, mp);
5954 LIST_INIT(&freeblks->fb_jblkdephd);
5955 LIST_INIT(&freeblks->fb_jwork);
5956 freeblks->fb_ref = 0;
5957 freeblks->fb_cgwait = 0;
5958 freeblks->fb_state = ATTACHED;
5959 freeblks->fb_uid = ip->i_uid;
5960 freeblks->fb_inum = ip->i_number;
5961 freeblks->fb_vtype = ITOV(ip)->v_type;
5962 freeblks->fb_modrev = DIP(ip, i_modrev);
5963 freeblks->fb_devvp = ip->i_devvp;
5964 freeblks->fb_chkcnt = 0;
5965 freeblks->fb_len = 0;
5971 trunc_indirdep(indirdep, freeblks, bp, off)
5972 struct indirdep *indirdep;
5973 struct freeblks *freeblks;
5977 struct allocindir *aip, *aipn;
5980 * The first set of allocindirs won't be in savedbp.
5982 LIST_FOREACH_SAFE(aip, &indirdep->ir_deplisthd, ai_next, aipn)
5983 if (aip->ai_offset > off)
5984 cancel_allocindir(aip, bp, freeblks, 1);
5985 LIST_FOREACH_SAFE(aip, &indirdep->ir_donehd, ai_next, aipn)
5986 if (aip->ai_offset > off)
5987 cancel_allocindir(aip, bp, freeblks, 1);
5989 * These will exist in savedbp.
5991 LIST_FOREACH_SAFE(aip, &indirdep->ir_writehd, ai_next, aipn)
5992 if (aip->ai_offset > off)
5993 cancel_allocindir(aip, NULL, freeblks, 0);
5994 LIST_FOREACH_SAFE(aip, &indirdep->ir_completehd, ai_next, aipn)
5995 if (aip->ai_offset > off)
5996 cancel_allocindir(aip, NULL, freeblks, 0);
6000 * Follow the chain of indirects down to lastlbn creating a freework
6001 * structure for each. This will be used to start indir_trunc() at
6002 * the right offset and create the journal records for the parrtial
6003 * truncation. A second step will handle the truncated dependencies.
6006 setup_trunc_indir(freeblks, ip, lbn, lastlbn, blkno)
6007 struct freeblks *freeblks;
6013 struct indirdep *indirdep;
6014 struct indirdep *indirn;
6015 struct freework *freework;
6016 struct newblk *newblk;
6030 mp = freeblks->fb_list.wk_mp;
6031 bp = getblk(ITOV(ip), lbn, mp->mnt_stat.f_iosize, 0, 0, 0);
6032 if ((bp->b_flags & B_CACHE) == 0) {
6033 bp->b_blkno = blkptrtodb(VFSTOUFS(mp), blkno);
6034 bp->b_iocmd = BIO_READ;
6035 bp->b_flags &= ~B_INVAL;
6036 bp->b_ioflags &= ~BIO_ERROR;
6037 vfs_busy_pages(bp, 0);
6038 bp->b_iooffset = dbtob(bp->b_blkno);
6040 curthread->td_ru.ru_inblock++;
6041 error = bufwait(bp);
6047 level = lbn_level(lbn);
6048 lbnadd = lbn_offset(ip->i_fs, level);
6050 * Compute the offset of the last block we want to keep. Store
6051 * in the freework the first block we want to completely free.
6053 off = (lastlbn - -(lbn + level)) / lbnadd;
6054 if (off + 1 == NINDIR(ip->i_fs))
6056 freework = newfreework(ip->i_ump, freeblks, NULL, lbn, blkno, 0, off+1,
6059 * Link the freework into the indirdep. This will prevent any new
6060 * allocations from proceeding until we are finished with the
6061 * truncate and the block is written.
6064 indirdep = indirdep_lookup(mp, ip, bp);
6065 if (indirdep->ir_freeblks)
6066 panic("setup_trunc_indir: indirdep already truncated.");
6067 TAILQ_INSERT_TAIL(&indirdep->ir_trunc, freework, fw_next);
6068 freework->fw_indir = indirdep;
6070 * Cancel any allocindirs that will not make it to disk.
6071 * We have to do this for all copies of the indirdep that
6072 * live on this newblk.
6074 if ((indirdep->ir_state & DEPCOMPLETE) == 0) {
6075 newblk_lookup(mp, dbtofsb(ip->i_fs, bp->b_blkno), 0, &newblk);
6076 LIST_FOREACH(indirn, &newblk->nb_indirdeps, ir_next)
6077 trunc_indirdep(indirn, freeblks, bp, off);
6079 trunc_indirdep(indirdep, freeblks, bp, off);
6082 * Creation is protected by the buf lock. The saveddata is only
6083 * needed if a full truncation follows a partial truncation but it
6084 * is difficult to allocate in that case so we fetch it anyway.
6086 if (indirdep->ir_saveddata == NULL)
6087 indirdep->ir_saveddata = malloc(bp->b_bcount, M_INDIRDEP,
6090 /* Fetch the blkno of the child and the zero start offset. */
6091 if (ip->i_ump->um_fstype == UFS1) {
6092 blkno = ((ufs1_daddr_t *)bp->b_data)[off];
6093 start = (uint8_t *)&((ufs1_daddr_t *)bp->b_data)[off+1];
6095 blkno = ((ufs2_daddr_t *)bp->b_data)[off];
6096 start = (uint8_t *)&((ufs2_daddr_t *)bp->b_data)[off+1];
6099 /* Zero the truncated pointers. */
6100 end = bp->b_data + bp->b_bcount;
6101 bzero(start, end - start);
6107 lbn++; /* adjust level */
6108 lbn -= (off * lbnadd);
6109 return setup_trunc_indir(freeblks, ip, lbn, lastlbn, blkno);
6113 * Complete the partial truncation of an indirect block setup by
6114 * setup_trunc_indir(). This zeros the truncated pointers in the saved
6115 * copy and writes them to disk before the freeblks is allowed to complete.
6118 complete_trunc_indir(freework)
6119 struct freework *freework;
6121 struct freework *fwn;
6122 struct indirdep *indirdep;
6127 indirdep = freework->fw_indir;
6129 bp = indirdep->ir_bp;
6130 /* See if the block was discarded. */
6133 /* Inline part of getdirtybuf(). We dont want bremfree. */
6134 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL) == 0)
6137 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, &lk) == 0)
6141 mtx_assert(&lk, MA_OWNED);
6142 freework->fw_state |= DEPCOMPLETE;
6143 TAILQ_REMOVE(&indirdep->ir_trunc, freework, fw_next);
6145 * Zero the pointers in the saved copy.
6147 if (indirdep->ir_state & UFS1FMT)
6148 start = sizeof(ufs1_daddr_t);
6150 start = sizeof(ufs2_daddr_t);
6151 start *= freework->fw_start;
6152 count = indirdep->ir_savebp->b_bcount - start;
6153 start += (uintptr_t)indirdep->ir_savebp->b_data;
6154 bzero((char *)start, count);
6156 * We need to start the next truncation in the list if it has not
6159 fwn = TAILQ_FIRST(&indirdep->ir_trunc);
6161 if (fwn->fw_freeblks == indirdep->ir_freeblks)
6162 TAILQ_REMOVE(&indirdep->ir_trunc, fwn, fw_next);
6163 if ((fwn->fw_state & ONWORKLIST) == 0)
6164 freework_enqueue(fwn);
6167 * If bp is NULL the block was fully truncated, restore
6168 * the saved block list otherwise free it if it is no
6171 if (TAILQ_EMPTY(&indirdep->ir_trunc)) {
6173 bcopy(indirdep->ir_saveddata,
6174 indirdep->ir_savebp->b_data,
6175 indirdep->ir_savebp->b_bcount);
6176 free(indirdep->ir_saveddata, M_INDIRDEP);
6177 indirdep->ir_saveddata = NULL;
6180 * When bp is NULL there is a full truncation pending. We
6181 * must wait for this full truncation to be journaled before
6182 * we can release this freework because the disk pointers will
6183 * never be written as zero.
6186 if (LIST_EMPTY(&indirdep->ir_freeblks->fb_jblkdephd))
6187 handle_written_freework(freework);
6189 WORKLIST_INSERT(&indirdep->ir_freeblks->fb_freeworkhd,
6190 &freework->fw_list);
6192 /* Complete when the real copy is written. */
6193 WORKLIST_INSERT(&bp->b_dep, &freework->fw_list);
6199 * Calculate the number of blocks we are going to release where datablocks
6200 * is the current total and length is the new file size.
6203 blkcount(fs, datablocks, length)
6205 ufs2_daddr_t datablocks;
6208 off_t totblks, numblks;
6211 numblks = howmany(length, fs->fs_bsize);
6212 if (numblks <= NDADDR) {
6213 totblks = howmany(length, fs->fs_fsize);
6216 totblks = blkstofrags(fs, numblks);
6219 * Count all single, then double, then triple indirects required.
6220 * Subtracting one indirects worth of blocks for each pass
6221 * acknowledges one of each pointed to by the inode.
6224 totblks += blkstofrags(fs, howmany(numblks, NINDIR(fs)));
6225 numblks -= NINDIR(fs);
6228 numblks = howmany(numblks, NINDIR(fs));
6231 totblks = fsbtodb(fs, totblks);
6233 * Handle sparse files. We can't reclaim more blocks than the inode
6234 * references. We will correct it later in handle_complete_freeblks()
6235 * when we know the real count.
6237 if (totblks > datablocks)
6239 return (datablocks - totblks);
6243 * Handle freeblocks for journaled softupdate filesystems.
6245 * Contrary to normal softupdates, we must preserve the block pointers in
6246 * indirects until their subordinates are free. This is to avoid journaling
6247 * every block that is freed which may consume more space than the journal
6248 * itself. The recovery program will see the free block journals at the
6249 * base of the truncated area and traverse them to reclaim space. The
6250 * pointers in the inode may be cleared immediately after the journal
6251 * records are written because each direct and indirect pointer in the
6252 * inode is recorded in a journal. This permits full truncation to proceed
6253 * asynchronously. The write order is journal -> inode -> cgs -> indirects.
6255 * The algorithm is as follows:
6256 * 1) Traverse the in-memory state and create journal entries to release
6257 * the relevant blocks and full indirect trees.
6258 * 2) Traverse the indirect block chain adding partial truncation freework
6259 * records to indirects in the path to lastlbn. The freework will
6260 * prevent new allocation dependencies from being satisfied in this
6261 * indirect until the truncation completes.
6262 * 3) Read and lock the inode block, performing an update with the new size
6263 * and pointers. This prevents truncated data from becoming valid on
6264 * disk through step 4.
6265 * 4) Reap unsatisfied dependencies that are beyond the truncated area,
6266 * eliminate journal work for those records that do not require it.
6267 * 5) Schedule the journal records to be written followed by the inode block.
6268 * 6) Allocate any necessary frags for the end of file.
6269 * 7) Zero any partially truncated blocks.
6271 * From this truncation proceeds asynchronously using the freework and
6272 * indir_trunc machinery. The file will not be extended again into a
6273 * partially truncated indirect block until all work is completed but
6274 * the normal dependency mechanism ensures that it is rolled back/forward
6275 * as appropriate. Further truncation may occur without delay and is
6276 * serialized in indir_trunc().
6279 softdep_journal_freeblocks(ip, cred, length, flags)
6280 struct inode *ip; /* The inode whose length is to be reduced */
6282 off_t length; /* The new length for the file */
6283 int flags; /* IO_EXT and/or IO_NORMAL */
6285 struct freeblks *freeblks, *fbn;
6286 struct worklist *wk, *wkn;
6287 struct inodedep *inodedep;
6288 struct jblkdep *jblkdep;
6289 struct allocdirect *adp, *adpn;
6294 ufs2_daddr_t extblocks, datablocks;
6295 ufs_lbn_t tmpval, lbn, lastlbn;
6296 int frags, lastoff, iboff, allocblock, needj, dflags, error, i;
6299 mp = UFSTOVFS(ip->i_ump);
6307 freeblks = newfreeblks(mp, ip);
6310 * If we're truncating a removed file that will never be written
6311 * we don't need to journal the block frees. The canceled journals
6312 * for the allocations will suffice.
6315 if (IS_SNAPSHOT(ip))
6317 inodedep_lookup(mp, ip->i_number, dflags, &inodedep);
6318 if ((inodedep->id_state & (UNLINKED | DEPCOMPLETE)) == UNLINKED &&
6321 CTR3(KTR_SUJ, "softdep_journal_freeblks: ip %d length %ld needj %d",
6322 ip->i_number, length, needj);
6325 * Calculate the lbn that we are truncating to. This results in -1
6326 * if we're truncating the 0 bytes. So it is the last lbn we want
6327 * to keep, not the first lbn we want to truncate.
6329 lastlbn = lblkno(fs, length + fs->fs_bsize - 1) - 1;
6330 lastoff = blkoff(fs, length);
6332 * Compute frags we are keeping in lastlbn. 0 means all.
6334 if (lastlbn >= 0 && lastlbn < NDADDR) {
6335 frags = fragroundup(fs, lastoff);
6336 /* adp offset of last valid allocdirect. */
6338 } else if (lastlbn > 0)
6340 if (fs->fs_magic == FS_UFS2_MAGIC)
6341 extblocks = btodb(fragroundup(fs, ip->i_din2->di_extsize));
6343 * Handle normal data blocks and indirects. This section saves
6344 * values used after the inode update to complete frag and indirect
6347 if ((flags & IO_NORMAL) != 0) {
6349 * Handle truncation of whole direct and indirect blocks.
6351 for (i = iboff + 1; i < NDADDR; i++)
6352 setup_freedirect(freeblks, ip, i, needj);
6353 for (i = 0, tmpval = NINDIR(fs), lbn = NDADDR; i < NIADDR;
6354 i++, lbn += tmpval, tmpval *= NINDIR(fs)) {
6355 /* Release a whole indirect tree. */
6356 if (lbn > lastlbn) {
6357 setup_freeindir(freeblks, ip, i, -lbn -i,
6363 * Traverse partially truncated indirect tree.
6365 if (lbn <= lastlbn && lbn + tmpval - 1 > lastlbn)
6366 setup_trunc_indir(freeblks, ip, -lbn - i,
6367 lastlbn, DIP(ip, i_ib[i]));
6370 * Handle partial truncation to a frag boundary.
6376 oldfrags = blksize(fs, ip, lastlbn);
6377 blkno = DIP(ip, i_db[lastlbn]);
6378 if (blkno && oldfrags != frags) {
6380 oldfrags = numfrags(ip->i_fs, oldfrags);
6381 blkno += numfrags(ip->i_fs, frags);
6382 newfreework(ip->i_ump, freeblks, NULL, lastlbn,
6383 blkno, oldfrags, 0, needj);
6384 } else if (blkno == 0)
6388 * Add a journal record for partial truncate if we are
6389 * handling indirect blocks. Non-indirects need no extra
6392 if (length != 0 && lastlbn >= NDADDR) {
6393 ip->i_flag |= IN_TRUNCATED;
6394 newjtrunc(freeblks, length, 0);
6396 ip->i_size = length;
6397 DIP_SET(ip, i_size, ip->i_size);
6398 datablocks = DIP(ip, i_blocks) - extblocks;
6400 datablocks = blkcount(ip->i_fs, datablocks, length);
6401 freeblks->fb_len = length;
6403 if ((flags & IO_EXT) != 0) {
6404 for (i = 0; i < NXADDR; i++)
6405 setup_freeext(freeblks, ip, i, needj);
6406 ip->i_din2->di_extsize = 0;
6407 datablocks += extblocks;
6410 /* Reference the quotas in case the block count is wrong in the end. */
6411 quotaref(vp, freeblks->fb_quota);
6412 (void) chkdq(ip, -datablocks, NOCRED, 0);
6414 freeblks->fb_chkcnt = -datablocks;
6415 UFS_LOCK(ip->i_ump);
6416 fs->fs_pendingblocks += datablocks;
6417 UFS_UNLOCK(ip->i_ump);
6418 DIP_SET(ip, i_blocks, DIP(ip, i_blocks) - datablocks);
6420 * Handle truncation of incomplete alloc direct dependencies. We
6421 * hold the inode block locked to prevent incomplete dependencies
6422 * from reaching the disk while we are eliminating those that
6423 * have been truncated. This is a partially inlined ffs_update().
6426 ip->i_flag &= ~(IN_LAZYACCESS | IN_LAZYMOD | IN_MODIFIED);
6427 error = bread(ip->i_devvp, fsbtodb(fs, ino_to_fsba(fs, ip->i_number)),
6428 (int)fs->fs_bsize, cred, &bp);
6431 softdep_error("softdep_journal_freeblocks", error);
6434 if (bp->b_bufsize == fs->fs_bsize)
6435 bp->b_flags |= B_CLUSTEROK;
6436 softdep_update_inodeblock(ip, bp, 0);
6437 if (ip->i_ump->um_fstype == UFS1)
6438 *((struct ufs1_dinode *)bp->b_data +
6439 ino_to_fsbo(fs, ip->i_number)) = *ip->i_din1;
6441 *((struct ufs2_dinode *)bp->b_data +
6442 ino_to_fsbo(fs, ip->i_number)) = *ip->i_din2;
6444 (void) inodedep_lookup(mp, ip->i_number, dflags, &inodedep);
6445 if ((inodedep->id_state & IOSTARTED) != 0)
6446 panic("softdep_setup_freeblocks: inode busy");
6448 * Add the freeblks structure to the list of operations that
6449 * must await the zero'ed inode being written to disk. If we
6450 * still have a bitmap dependency (needj), then the inode
6451 * has never been written to disk, so we can process the
6452 * freeblks below once we have deleted the dependencies.
6455 WORKLIST_INSERT(&bp->b_dep, &freeblks->fb_list);
6457 freeblks->fb_state |= COMPLETE;
6458 if ((flags & IO_NORMAL) != 0) {
6459 TAILQ_FOREACH_SAFE(adp, &inodedep->id_inoupdt, ad_next, adpn) {
6460 if (adp->ad_offset > iboff)
6461 cancel_allocdirect(&inodedep->id_inoupdt, adp,
6464 * Truncate the allocdirect. We could eliminate
6465 * or modify journal records as well.
6467 else if (adp->ad_offset == iboff && frags)
6468 adp->ad_newsize = frags;
6471 if ((flags & IO_EXT) != 0)
6472 while ((adp = TAILQ_FIRST(&inodedep->id_extupdt)) != 0)
6473 cancel_allocdirect(&inodedep->id_extupdt, adp,
6476 * Scan the bufwait list for newblock dependencies that will never
6479 LIST_FOREACH_SAFE(wk, &inodedep->id_bufwait, wk_list, wkn) {
6480 if (wk->wk_type != D_ALLOCDIRECT)
6482 adp = WK_ALLOCDIRECT(wk);
6483 if (((flags & IO_NORMAL) != 0 && (adp->ad_offset > iboff)) ||
6484 ((flags & IO_EXT) != 0 && (adp->ad_state & EXTDATA))) {
6485 cancel_jfreeblk(freeblks, adp->ad_newblkno);
6486 cancel_newblk(WK_NEWBLK(wk), NULL, &freeblks->fb_jwork);
6487 WORKLIST_INSERT(&freeblks->fb_freeworkhd, wk);
6493 LIST_FOREACH(jblkdep, &freeblks->fb_jblkdephd, jb_deps)
6494 add_to_journal(&jblkdep->jb_list);
6498 * Truncate dependency structures beyond length.
6500 trunc_dependencies(ip, freeblks, lastlbn, frags, flags);
6502 * This is only set when we need to allocate a fragment because
6503 * none existed at the end of a frag-sized file. It handles only
6504 * allocating a new, zero filled block.
6507 ip->i_size = length - lastoff;
6508 DIP_SET(ip, i_size, ip->i_size);
6509 error = UFS_BALLOC(vp, length - 1, 1, cred, BA_CLRBUF, &bp);
6511 softdep_error("softdep_journal_freeblks", error);
6514 ip->i_size = length;
6515 DIP_SET(ip, i_size, length);
6516 ip->i_flag |= IN_CHANGE | IN_UPDATE;
6517 allocbuf(bp, frags);
6520 } else if (lastoff != 0 && vp->v_type != VDIR) {
6524 * Zero the end of a truncated frag or block.
6526 size = sblksize(fs, length, lastlbn);
6527 error = bread(vp, lastlbn, size, cred, &bp);
6529 softdep_error("softdep_journal_freeblks", error);
6532 bzero((char *)bp->b_data + lastoff, size - lastoff);
6537 inodedep_lookup(mp, ip->i_number, dflags, &inodedep);
6538 TAILQ_INSERT_TAIL(&inodedep->id_freeblklst, freeblks, fb_next);
6539 freeblks->fb_state |= DEPCOMPLETE | ONDEPLIST;
6541 * We zero earlier truncations so they don't erroneously
6544 if (freeblks->fb_len == 0 && (flags & IO_NORMAL) != 0)
6545 TAILQ_FOREACH(fbn, &inodedep->id_freeblklst, fb_next)
6547 if ((freeblks->fb_state & ALLCOMPLETE) == ALLCOMPLETE &&
6548 LIST_EMPTY(&freeblks->fb_jblkdephd))
6549 freeblks->fb_state |= INPROGRESS;
6554 handle_workitem_freeblocks(freeblks, 0);
6555 trunc_pages(ip, length, extblocks, flags);
6560 * Flush a JOP_SYNC to the journal.
6563 softdep_journal_fsync(ip)
6566 struct jfsync *jfsync;
6568 if ((ip->i_flag & IN_TRUNCATED) == 0)
6570 ip->i_flag &= ~IN_TRUNCATED;
6571 jfsync = malloc(sizeof(*jfsync), M_JFSYNC, M_SOFTDEP_FLAGS | M_ZERO);
6572 workitem_alloc(&jfsync->jfs_list, D_JFSYNC, UFSTOVFS(ip->i_ump));
6573 jfsync->jfs_size = ip->i_size;
6574 jfsync->jfs_ino = ip->i_number;
6576 add_to_journal(&jfsync->jfs_list);
6577 jwait(&jfsync->jfs_list, MNT_WAIT);
6582 * Block de-allocation dependencies.
6584 * When blocks are de-allocated, the on-disk pointers must be nullified before
6585 * the blocks are made available for use by other files. (The true
6586 * requirement is that old pointers must be nullified before new on-disk
6587 * pointers are set. We chose this slightly more stringent requirement to
6588 * reduce complexity.) Our implementation handles this dependency by updating
6589 * the inode (or indirect block) appropriately but delaying the actual block
6590 * de-allocation (i.e., freemap and free space count manipulation) until
6591 * after the updated versions reach stable storage. After the disk is
6592 * updated, the blocks can be safely de-allocated whenever it is convenient.
6593 * This implementation handles only the common case of reducing a file's
6594 * length to zero. Other cases are handled by the conventional synchronous
6597 * The ffs implementation with which we worked double-checks
6598 * the state of the block pointers and file size as it reduces
6599 * a file's length. Some of this code is replicated here in our
6600 * soft updates implementation. The freeblks->fb_chkcnt field is
6601 * used to transfer a part of this information to the procedure
6602 * that eventually de-allocates the blocks.
6604 * This routine should be called from the routine that shortens
6605 * a file's length, before the inode's size or block pointers
6606 * are modified. It will save the block pointer information for
6607 * later release and zero the inode so that the calling routine
6611 softdep_setup_freeblocks(ip, length, flags)
6612 struct inode *ip; /* The inode whose length is to be reduced */
6613 off_t length; /* The new length for the file */
6614 int flags; /* IO_EXT and/or IO_NORMAL */
6616 struct ufs1_dinode *dp1;
6617 struct ufs2_dinode *dp2;
6618 struct freeblks *freeblks;
6619 struct inodedep *inodedep;
6620 struct allocdirect *adp;
6623 ufs2_daddr_t extblocks, datablocks;
6625 int i, delay, error, dflags;
6629 CTR2(KTR_SUJ, "softdep_setup_freeblks: ip %d length %ld",
6630 ip->i_number, length);
6632 mp = UFSTOVFS(ip->i_ump);
6634 panic("softdep_setup_freeblocks: non-zero length");
6635 freeblks = newfreeblks(mp, ip);
6638 if (fs->fs_magic == FS_UFS2_MAGIC)
6639 extblocks = btodb(fragroundup(fs, ip->i_din2->di_extsize));
6640 if ((flags & IO_NORMAL) != 0) {
6641 for (i = 0; i < NDADDR; i++)
6642 setup_freedirect(freeblks, ip, i, 0);
6643 for (i = 0, tmpval = NINDIR(fs), lbn = NDADDR; i < NIADDR;
6644 i++, lbn += tmpval, tmpval *= NINDIR(fs))
6645 setup_freeindir(freeblks, ip, i, -lbn -i, 0);
6647 DIP_SET(ip, i_size, 0);
6648 datablocks = DIP(ip, i_blocks) - extblocks;
6650 if ((flags & IO_EXT) != 0) {
6651 for (i = 0; i < NXADDR; i++)
6652 setup_freeext(freeblks, ip, i, 0);
6653 ip->i_din2->di_extsize = 0;
6654 datablocks += extblocks;
6657 /* Reference the quotas in case the block count is wrong in the end. */
6658 quotaref(ITOV(ip), freeblks->fb_quota);
6659 (void) chkdq(ip, -datablocks, NOCRED, 0);
6661 freeblks->fb_chkcnt = -datablocks;
6662 UFS_LOCK(ip->i_ump);
6663 fs->fs_pendingblocks += datablocks;
6664 UFS_UNLOCK(ip->i_ump);
6665 DIP_SET(ip, i_blocks, DIP(ip, i_blocks) - datablocks);
6667 * Push the zero'ed inode to to its disk buffer so that we are free
6668 * to delete its dependencies below. Once the dependencies are gone
6669 * the buffer can be safely released.
6671 if ((error = bread(ip->i_devvp,
6672 fsbtodb(fs, ino_to_fsba(fs, ip->i_number)),
6673 (int)fs->fs_bsize, NOCRED, &bp)) != 0) {
6675 softdep_error("softdep_setup_freeblocks", error);
6677 if (ip->i_ump->um_fstype == UFS1) {
6678 dp1 = ((struct ufs1_dinode *)bp->b_data +
6679 ino_to_fsbo(fs, ip->i_number));
6680 ip->i_din1->di_freelink = dp1->di_freelink;
6683 dp2 = ((struct ufs2_dinode *)bp->b_data +
6684 ino_to_fsbo(fs, ip->i_number));
6685 ip->i_din2->di_freelink = dp2->di_freelink;
6689 * Find and eliminate any inode dependencies.
6693 if (IS_SNAPSHOT(ip))
6695 (void) inodedep_lookup(mp, ip->i_number, dflags, &inodedep);
6696 if ((inodedep->id_state & IOSTARTED) != 0)
6697 panic("softdep_setup_freeblocks: inode busy");
6699 * Add the freeblks structure to the list of operations that
6700 * must await the zero'ed inode being written to disk. If we
6701 * still have a bitmap dependency (delay == 0), then the inode
6702 * has never been written to disk, so we can process the
6703 * freeblks below once we have deleted the dependencies.
6705 delay = (inodedep->id_state & DEPCOMPLETE);
6707 WORKLIST_INSERT(&bp->b_dep, &freeblks->fb_list);
6709 freeblks->fb_state |= COMPLETE;
6711 * Because the file length has been truncated to zero, any
6712 * pending block allocation dependency structures associated
6713 * with this inode are obsolete and can simply be de-allocated.
6714 * We must first merge the two dependency lists to get rid of
6715 * any duplicate freefrag structures, then purge the merged list.
6716 * If we still have a bitmap dependency, then the inode has never
6717 * been written to disk, so we can free any fragments without delay.
6719 if (flags & IO_NORMAL) {
6720 merge_inode_lists(&inodedep->id_newinoupdt,
6721 &inodedep->id_inoupdt);
6722 while ((adp = TAILQ_FIRST(&inodedep->id_inoupdt)) != 0)
6723 cancel_allocdirect(&inodedep->id_inoupdt, adp,
6726 if (flags & IO_EXT) {
6727 merge_inode_lists(&inodedep->id_newextupdt,
6728 &inodedep->id_extupdt);
6729 while ((adp = TAILQ_FIRST(&inodedep->id_extupdt)) != 0)
6730 cancel_allocdirect(&inodedep->id_extupdt, adp,
6735 trunc_dependencies(ip, freeblks, -1, 0, flags);
6737 if (inodedep_lookup(mp, ip->i_number, 0, &inodedep) != 0)
6738 (void) free_inodedep(inodedep);
6739 freeblks->fb_state |= DEPCOMPLETE;
6741 * If the inode with zeroed block pointers is now on disk
6742 * we can start freeing blocks.
6744 if ((freeblks->fb_state & ALLCOMPLETE) == ALLCOMPLETE)
6745 freeblks->fb_state |= INPROGRESS;
6750 handle_workitem_freeblocks(freeblks, 0);
6751 trunc_pages(ip, length, extblocks, flags);
6755 * Eliminate pages from the page cache that back parts of this inode and
6756 * adjust the vnode pager's idea of our size. This prevents stale data
6757 * from hanging around in the page cache.
6760 trunc_pages(ip, length, extblocks, flags)
6763 ufs2_daddr_t extblocks;
6773 extend = OFF_TO_IDX(lblktosize(fs, -extblocks));
6774 if ((flags & IO_EXT) != 0)
6775 vn_pages_remove(vp, extend, 0);
6776 if ((flags & IO_NORMAL) == 0)
6778 BO_LOCK(&vp->v_bufobj);
6780 BO_UNLOCK(&vp->v_bufobj);
6782 * The vnode pager eliminates file pages we eliminate indirects
6785 vnode_pager_setsize(vp, length);
6787 * Calculate the end based on the last indirect we want to keep. If
6788 * the block extends into indirects we can just use the negative of
6789 * its lbn. Doubles and triples exist at lower numbers so we must
6790 * be careful not to remove those, if they exist. double and triple
6791 * indirect lbns do not overlap with others so it is not important
6792 * to verify how many levels are required.
6794 lbn = lblkno(fs, length);
6795 if (lbn >= NDADDR) {
6796 /* Calculate the virtual lbn of the triple indirect. */
6797 lbn = -lbn - (NIADDR - 1);
6798 end = OFF_TO_IDX(lblktosize(fs, lbn));
6801 vn_pages_remove(vp, OFF_TO_IDX(OFF_MAX), end);
6805 * See if the buf bp is in the range eliminated by truncation.
6808 trunc_check_buf(bp, blkoffp, lastlbn, lastoff, flags)
6818 /* Only match ext/normal blocks as appropriate. */
6819 if (((flags & IO_EXT) == 0 && (bp->b_xflags & BX_ALTDATA)) ||
6820 ((flags & IO_NORMAL) == 0 && (bp->b_xflags & BX_ALTDATA) == 0))
6822 /* ALTDATA is always a full truncation. */
6823 if ((bp->b_xflags & BX_ALTDATA) != 0)
6825 /* -1 is full truncation. */
6829 * If this is a partial truncate we only want those
6830 * blocks and indirect blocks that cover the range
6835 lbn = -(lbn + lbn_level(lbn));
6838 /* Here we only truncate lblkno if it's partial. */
6839 if (lbn == lastlbn) {
6848 * Eliminate any dependencies that exist in memory beyond lblkno:off
6851 trunc_dependencies(ip, freeblks, lastlbn, lastoff, flags)
6853 struct freeblks *freeblks;
6865 * We must wait for any I/O in progress to finish so that
6866 * all potential buffers on the dirty list will be visible.
6867 * Once they are all there, walk the list and get rid of
6875 TAILQ_FOREACH(bp, &bo->bo_dirty.bv_hd, b_bobufs)
6876 bp->b_vflags &= ~BV_SCANNED;
6878 TAILQ_FOREACH(bp, &bo->bo_dirty.bv_hd, b_bobufs) {
6879 if (bp->b_vflags & BV_SCANNED)
6881 if (!trunc_check_buf(bp, &blkoff, lastlbn, lastoff, flags)) {
6882 bp->b_vflags |= BV_SCANNED;
6885 if ((bp = getdirtybuf(bp, BO_MTX(bo), MNT_WAIT)) == NULL)
6888 if (deallocate_dependencies(bp, freeblks, blkoff))
6896 * Now do the work of vtruncbuf while also matching indirect blocks.
6898 TAILQ_FOREACH(bp, &bo->bo_clean.bv_hd, b_bobufs)
6899 bp->b_vflags &= ~BV_SCANNED;
6901 TAILQ_FOREACH(bp, &bo->bo_clean.bv_hd, b_bobufs) {
6902 if (bp->b_vflags & BV_SCANNED)
6904 if (!trunc_check_buf(bp, &blkoff, lastlbn, lastoff, flags)) {
6905 bp->b_vflags |= BV_SCANNED;
6909 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK,
6910 BO_MTX(bo)) == ENOLCK) {
6914 bp->b_vflags |= BV_SCANNED;
6919 allocbuf(bp, blkoff);
6922 bp->b_flags |= B_INVAL | B_NOCACHE | B_RELBUF;
6933 cancel_pagedep(pagedep, freeblks, blkoff)
6934 struct pagedep *pagedep;
6935 struct freeblks *freeblks;
6938 struct jremref *jremref;
6939 struct jmvref *jmvref;
6940 struct dirrem *dirrem, *tmp;
6944 * Copy any directory remove dependencies to the list
6945 * to be processed after the freeblks proceeds. If
6946 * directory entry never made it to disk they
6947 * can be dumped directly onto the work list.
6949 LIST_FOREACH_SAFE(dirrem, &pagedep->pd_dirremhd, dm_next, tmp) {
6950 /* Skip this directory removal if it is intended to remain. */
6951 if (dirrem->dm_offset < blkoff)
6954 * If there are any dirrems we wait for the journal write
6955 * to complete and then restart the buf scan as the lock
6958 while ((jremref = LIST_FIRST(&dirrem->dm_jremrefhd)) != NULL) {
6959 jwait(&jremref->jr_list, MNT_WAIT);
6962 LIST_REMOVE(dirrem, dm_next);
6963 dirrem->dm_dirinum = pagedep->pd_ino;
6964 WORKLIST_INSERT(&freeblks->fb_freeworkhd, &dirrem->dm_list);
6966 while ((jmvref = LIST_FIRST(&pagedep->pd_jmvrefhd)) != NULL) {
6967 jwait(&jmvref->jm_list, MNT_WAIT);
6971 * When we're partially truncating a pagedep we just want to flush
6972 * journal entries and return. There can not be any adds in the
6973 * truncated portion of the directory and newblk must remain if
6974 * part of the block remains.
6979 LIST_FOREACH(dap, &pagedep->pd_pendinghd, da_pdlist)
6980 if (dap->da_offset > blkoff)
6981 panic("cancel_pagedep: diradd %p off %d > %d",
6982 dap, dap->da_offset, blkoff);
6983 for (i = 0; i < DAHASHSZ; i++)
6984 LIST_FOREACH(dap, &pagedep->pd_diraddhd[i], da_pdlist)
6985 if (dap->da_offset > blkoff)
6986 panic("cancel_pagedep: diradd %p off %d > %d",
6987 dap, dap->da_offset, blkoff);
6991 * There should be no directory add dependencies present
6992 * as the directory could not be truncated until all
6993 * children were removed.
6995 KASSERT(LIST_FIRST(&pagedep->pd_pendinghd) == NULL,
6996 ("deallocate_dependencies: pendinghd != NULL"));
6997 for (i = 0; i < DAHASHSZ; i++)
6998 KASSERT(LIST_FIRST(&pagedep->pd_diraddhd[i]) == NULL,
6999 ("deallocate_dependencies: diraddhd != NULL"));
7000 if ((pagedep->pd_state & NEWBLOCK) != 0)
7001 free_newdirblk(pagedep->pd_newdirblk);
7002 if (free_pagedep(pagedep) == 0)
7003 panic("Failed to free pagedep %p", pagedep);
7008 * Reclaim any dependency structures from a buffer that is about to
7009 * be reallocated to a new vnode. The buffer must be locked, thus,
7010 * no I/O completion operations can occur while we are manipulating
7011 * its associated dependencies. The mutex is held so that other I/O's
7012 * associated with related dependencies do not occur.
7015 deallocate_dependencies(bp, freeblks, off)
7017 struct freeblks *freeblks;
7020 struct indirdep *indirdep;
7021 struct pagedep *pagedep;
7022 struct allocdirect *adp;
7023 struct worklist *wk, *wkn;
7026 LIST_FOREACH_SAFE(wk, &bp->b_dep, wk_list, wkn) {
7027 switch (wk->wk_type) {
7029 indirdep = WK_INDIRDEP(wk);
7030 if (bp->b_lblkno >= 0 ||
7031 bp->b_blkno != indirdep->ir_savebp->b_lblkno)
7032 panic("deallocate_dependencies: not indir");
7033 cancel_indirdep(indirdep, bp, freeblks);
7037 pagedep = WK_PAGEDEP(wk);
7038 if (cancel_pagedep(pagedep, freeblks, off)) {
7046 * Simply remove the allocindir, we'll find it via
7047 * the indirdep where we can clear pointers if
7050 WORKLIST_REMOVE(wk);
7055 * A truncation is waiting for the zero'd pointers
7056 * to be written. It can be freed when the freeblks
7059 WORKLIST_REMOVE(wk);
7060 wk->wk_state |= ONDEPLIST;
7061 WORKLIST_INSERT(&freeblks->fb_freeworkhd, wk);
7065 adp = WK_ALLOCDIRECT(wk);
7070 panic("deallocate_dependencies: Unexpected type %s",
7071 TYPENAME(wk->wk_type));
7077 * Don't throw away this buf, we were partially truncating and
7078 * some deps may always remain.
7082 bp->b_vflags |= BV_SCANNED;
7085 bp->b_flags |= B_INVAL | B_NOCACHE;
7091 * An allocdirect is being canceled due to a truncate. We must make sure
7092 * the journal entry is released in concert with the blkfree that releases
7093 * the storage. Completed journal entries must not be released until the
7094 * space is no longer pointed to by the inode or in the bitmap.
7097 cancel_allocdirect(adphead, adp, freeblks)
7098 struct allocdirectlst *adphead;
7099 struct allocdirect *adp;
7100 struct freeblks *freeblks;
7102 struct freework *freework;
7103 struct newblk *newblk;
7104 struct worklist *wk;
7106 TAILQ_REMOVE(adphead, adp, ad_next);
7107 newblk = (struct newblk *)adp;
7110 * Find the correct freework structure.
7112 LIST_FOREACH(wk, &freeblks->fb_freeworkhd, wk_list) {
7113 if (wk->wk_type != D_FREEWORK)
7115 freework = WK_FREEWORK(wk);
7116 if (freework->fw_blkno == newblk->nb_newblkno)
7119 if (freework == NULL)
7120 panic("cancel_allocdirect: Freework not found");
7122 * If a newblk exists at all we still have the journal entry that
7123 * initiated the allocation so we do not need to journal the free.
7125 cancel_jfreeblk(freeblks, freework->fw_blkno);
7127 * If the journal hasn't been written the jnewblk must be passed
7128 * to the call to ffs_blkfree that reclaims the space. We accomplish
7129 * this by linking the journal dependency into the freework to be
7130 * freed when freework_freeblock() is called. If the journal has
7131 * been written we can simply reclaim the journal space when the
7132 * freeblks work is complete.
7134 freework->fw_jnewblk = cancel_newblk(newblk, &freework->fw_list,
7135 &freeblks->fb_jwork);
7136 WORKLIST_INSERT(&freeblks->fb_freeworkhd, &newblk->nb_list);
7141 * Cancel a new block allocation. May be an indirect or direct block. We
7142 * remove it from various lists and return any journal record that needs to
7143 * be resolved by the caller.
7145 * A special consideration is made for indirects which were never pointed
7146 * at on disk and will never be found once this block is released.
7148 static struct jnewblk *
7149 cancel_newblk(newblk, wk, wkhd)
7150 struct newblk *newblk;
7151 struct worklist *wk;
7152 struct workhead *wkhd;
7154 struct jnewblk *jnewblk;
7156 CTR1(KTR_SUJ, "cancel_newblk: blkno %jd", newblk->nb_newblkno);
7158 newblk->nb_state |= GOINGAWAY;
7160 * Previously we traversed the completedhd on each indirdep
7161 * attached to this newblk to cancel them and gather journal
7162 * work. Since we need only the oldest journal segment and
7163 * the lowest point on the tree will always have the oldest
7164 * journal segment we are free to release the segments
7165 * of any subordinates and may leave the indirdep list to
7166 * indirdep_complete() when this newblk is freed.
7168 if (newblk->nb_state & ONDEPLIST) {
7169 newblk->nb_state &= ~ONDEPLIST;
7170 LIST_REMOVE(newblk, nb_deps);
7172 if (newblk->nb_state & ONWORKLIST)
7173 WORKLIST_REMOVE(&newblk->nb_list);
7175 * If the journal entry hasn't been written we save a pointer to
7176 * the dependency that frees it until it is written or the
7177 * superseding operation completes.
7179 jnewblk = newblk->nb_jnewblk;
7180 if (jnewblk != NULL && wk != NULL) {
7181 newblk->nb_jnewblk = NULL;
7182 jnewblk->jn_dep = wk;
7184 if (!LIST_EMPTY(&newblk->nb_jwork))
7185 jwork_move(wkhd, &newblk->nb_jwork);
7187 * When truncating we must free the newdirblk early to remove
7188 * the pagedep from the hash before returning.
7190 if ((wk = LIST_FIRST(&newblk->nb_newdirblk)) != NULL)
7191 free_newdirblk(WK_NEWDIRBLK(wk));
7192 if (!LIST_EMPTY(&newblk->nb_newdirblk))
7193 panic("cancel_newblk: extra newdirblk");
7199 * Schedule the freefrag associated with a newblk to be released once
7200 * the pointers are written and the previous block is no longer needed.
7203 newblk_freefrag(newblk)
7204 struct newblk *newblk;
7206 struct freefrag *freefrag;
7208 if (newblk->nb_freefrag == NULL)
7210 freefrag = newblk->nb_freefrag;
7211 newblk->nb_freefrag = NULL;
7212 freefrag->ff_state |= COMPLETE;
7213 if ((freefrag->ff_state & ALLCOMPLETE) == ALLCOMPLETE)
7214 add_to_worklist(&freefrag->ff_list, 0);
7218 * Free a newblk. Generate a new freefrag work request if appropriate.
7219 * This must be called after the inode pointer and any direct block pointers
7220 * are valid or fully removed via truncate or frag extension.
7224 struct newblk *newblk;
7226 struct indirdep *indirdep;
7227 struct worklist *wk;
7229 KASSERT(newblk->nb_jnewblk == NULL,
7230 ("free_newblk; jnewblk %p still attached", newblk->nb_jnewblk));
7231 mtx_assert(&lk, MA_OWNED);
7232 newblk_freefrag(newblk);
7233 if (newblk->nb_state & ONDEPLIST)
7234 LIST_REMOVE(newblk, nb_deps);
7235 if (newblk->nb_state & ONWORKLIST)
7236 WORKLIST_REMOVE(&newblk->nb_list);
7237 LIST_REMOVE(newblk, nb_hash);
7238 if ((wk = LIST_FIRST(&newblk->nb_newdirblk)) != NULL)
7239 free_newdirblk(WK_NEWDIRBLK(wk));
7240 if (!LIST_EMPTY(&newblk->nb_newdirblk))
7241 panic("free_newblk: extra newdirblk");
7242 while ((indirdep = LIST_FIRST(&newblk->nb_indirdeps)) != NULL)
7243 indirdep_complete(indirdep);
7244 handle_jwork(&newblk->nb_jwork);
7245 newblk->nb_list.wk_type = D_NEWBLK;
7246 WORKITEM_FREE(newblk, D_NEWBLK);
7250 * Free a newdirblk. Clear the NEWBLOCK flag on its associated pagedep.
7251 * This routine must be called with splbio interrupts blocked.
7254 free_newdirblk(newdirblk)
7255 struct newdirblk *newdirblk;
7257 struct pagedep *pagedep;
7259 struct worklist *wk;
7261 mtx_assert(&lk, MA_OWNED);
7262 WORKLIST_REMOVE(&newdirblk->db_list);
7264 * If the pagedep is still linked onto the directory buffer
7265 * dependency chain, then some of the entries on the
7266 * pd_pendinghd list may not be committed to disk yet. In
7267 * this case, we will simply clear the NEWBLOCK flag and
7268 * let the pd_pendinghd list be processed when the pagedep
7269 * is next written. If the pagedep is no longer on the buffer
7270 * dependency chain, then all the entries on the pd_pending
7271 * list are committed to disk and we can free them here.
7273 pagedep = newdirblk->db_pagedep;
7274 pagedep->pd_state &= ~NEWBLOCK;
7275 if ((pagedep->pd_state & ONWORKLIST) == 0) {
7276 while ((dap = LIST_FIRST(&pagedep->pd_pendinghd)) != NULL)
7277 free_diradd(dap, NULL);
7279 * If no dependencies remain, the pagedep will be freed.
7281 free_pagedep(pagedep);
7283 /* Should only ever be one item in the list. */
7284 while ((wk = LIST_FIRST(&newdirblk->db_mkdir)) != NULL) {
7285 WORKLIST_REMOVE(wk);
7286 handle_written_mkdir(WK_MKDIR(wk), MKDIR_BODY);
7288 WORKITEM_FREE(newdirblk, D_NEWDIRBLK);
7292 * Prepare an inode to be freed. The actual free operation is not
7293 * done until the zero'ed inode has been written to disk.
7296 softdep_freefile(pvp, ino, mode)
7301 struct inode *ip = VTOI(pvp);
7302 struct inodedep *inodedep;
7303 struct freefile *freefile;
7304 struct freeblks *freeblks;
7307 * This sets up the inode de-allocation dependency.
7309 freefile = malloc(sizeof(struct freefile),
7310 M_FREEFILE, M_SOFTDEP_FLAGS);
7311 workitem_alloc(&freefile->fx_list, D_FREEFILE, pvp->v_mount);
7312 freefile->fx_mode = mode;
7313 freefile->fx_oldinum = ino;
7314 freefile->fx_devvp = ip->i_devvp;
7315 LIST_INIT(&freefile->fx_jwork);
7316 UFS_LOCK(ip->i_ump);
7317 ip->i_fs->fs_pendinginodes += 1;
7318 UFS_UNLOCK(ip->i_ump);
7321 * If the inodedep does not exist, then the zero'ed inode has
7322 * been written to disk. If the allocated inode has never been
7323 * written to disk, then the on-disk inode is zero'ed. In either
7324 * case we can free the file immediately. If the journal was
7325 * canceled before being written the inode will never make it to
7326 * disk and we must send the canceled journal entrys to
7327 * ffs_freefile() to be cleared in conjunction with the bitmap.
7328 * Any blocks waiting on the inode to write can be safely freed
7329 * here as it will never been written.
7332 inodedep_lookup(pvp->v_mount, ino, 0, &inodedep);
7335 * Clear out freeblks that no longer need to reference
7339 TAILQ_FIRST(&inodedep->id_freeblklst)) != NULL) {
7340 TAILQ_REMOVE(&inodedep->id_freeblklst, freeblks,
7342 freeblks->fb_state &= ~ONDEPLIST;
7345 * Remove this inode from the unlinked list.
7347 if (inodedep->id_state & UNLINKED) {
7349 * Save the journal work to be freed with the bitmap
7350 * before we clear UNLINKED. Otherwise it can be lost
7351 * if the inode block is written.
7353 handle_bufwait(inodedep, &freefile->fx_jwork);
7354 clear_unlinked_inodedep(inodedep);
7355 /* Re-acquire inodedep as we've dropped lk. */
7356 inodedep_lookup(pvp->v_mount, ino, 0, &inodedep);
7359 if (inodedep == NULL || check_inode_unwritten(inodedep)) {
7361 handle_workitem_freefile(freefile);
7364 if ((inodedep->id_state & DEPCOMPLETE) == 0)
7365 inodedep->id_state |= GOINGAWAY;
7366 WORKLIST_INSERT(&inodedep->id_inowait, &freefile->fx_list);
7368 if (ip->i_number == ino)
7369 ip->i_flag |= IN_MODIFIED;
7373 * Check to see if an inode has never been written to disk. If
7374 * so free the inodedep and return success, otherwise return failure.
7375 * This routine must be called with splbio interrupts blocked.
7377 * If we still have a bitmap dependency, then the inode has never
7378 * been written to disk. Drop the dependency as it is no longer
7379 * necessary since the inode is being deallocated. We set the
7380 * ALLCOMPLETE flags since the bitmap now properly shows that the
7381 * inode is not allocated. Even if the inode is actively being
7382 * written, it has been rolled back to its zero'ed state, so we
7383 * are ensured that a zero inode is what is on the disk. For short
7384 * lived files, this change will usually result in removing all the
7385 * dependencies from the inode so that it can be freed immediately.
7388 check_inode_unwritten(inodedep)
7389 struct inodedep *inodedep;
7392 mtx_assert(&lk, MA_OWNED);
7394 if ((inodedep->id_state & (DEPCOMPLETE | UNLINKED)) != 0 ||
7395 !LIST_EMPTY(&inodedep->id_dirremhd) ||
7396 !LIST_EMPTY(&inodedep->id_pendinghd) ||
7397 !LIST_EMPTY(&inodedep->id_bufwait) ||
7398 !LIST_EMPTY(&inodedep->id_inowait) ||
7399 !TAILQ_EMPTY(&inodedep->id_inoreflst) ||
7400 !TAILQ_EMPTY(&inodedep->id_inoupdt) ||
7401 !TAILQ_EMPTY(&inodedep->id_newinoupdt) ||
7402 !TAILQ_EMPTY(&inodedep->id_extupdt) ||
7403 !TAILQ_EMPTY(&inodedep->id_newextupdt) ||
7404 !TAILQ_EMPTY(&inodedep->id_freeblklst) ||
7405 inodedep->id_mkdiradd != NULL ||
7406 inodedep->id_nlinkdelta != 0)
7409 * Another process might be in initiate_write_inodeblock_ufs[12]
7410 * trying to allocate memory without holding "Softdep Lock".
7412 if ((inodedep->id_state & IOSTARTED) != 0 &&
7413 inodedep->id_savedino1 == NULL)
7416 if (inodedep->id_state & ONDEPLIST)
7417 LIST_REMOVE(inodedep, id_deps);
7418 inodedep->id_state &= ~ONDEPLIST;
7419 inodedep->id_state |= ALLCOMPLETE;
7420 inodedep->id_bmsafemap = NULL;
7421 if (inodedep->id_state & ONWORKLIST)
7422 WORKLIST_REMOVE(&inodedep->id_list);
7423 if (inodedep->id_savedino1 != NULL) {
7424 free(inodedep->id_savedino1, M_SAVEDINO);
7425 inodedep->id_savedino1 = NULL;
7427 if (free_inodedep(inodedep) == 0)
7428 panic("check_inode_unwritten: busy inode");
7433 * Try to free an inodedep structure. Return 1 if it could be freed.
7436 free_inodedep(inodedep)
7437 struct inodedep *inodedep;
7440 mtx_assert(&lk, MA_OWNED);
7441 if ((inodedep->id_state & (ONWORKLIST | UNLINKED)) != 0 ||
7442 (inodedep->id_state & ALLCOMPLETE) != ALLCOMPLETE ||
7443 !LIST_EMPTY(&inodedep->id_dirremhd) ||
7444 !LIST_EMPTY(&inodedep->id_pendinghd) ||
7445 !LIST_EMPTY(&inodedep->id_bufwait) ||
7446 !LIST_EMPTY(&inodedep->id_inowait) ||
7447 !TAILQ_EMPTY(&inodedep->id_inoreflst) ||
7448 !TAILQ_EMPTY(&inodedep->id_inoupdt) ||
7449 !TAILQ_EMPTY(&inodedep->id_newinoupdt) ||
7450 !TAILQ_EMPTY(&inodedep->id_extupdt) ||
7451 !TAILQ_EMPTY(&inodedep->id_newextupdt) ||
7452 !TAILQ_EMPTY(&inodedep->id_freeblklst) ||
7453 inodedep->id_mkdiradd != NULL ||
7454 inodedep->id_nlinkdelta != 0 ||
7455 inodedep->id_savedino1 != NULL)
7457 if (inodedep->id_state & ONDEPLIST)
7458 LIST_REMOVE(inodedep, id_deps);
7459 LIST_REMOVE(inodedep, id_hash);
7460 WORKITEM_FREE(inodedep, D_INODEDEP);
7465 * Free the block referenced by a freework structure. The parent freeblks
7466 * structure is released and completed when the final cg bitmap reaches
7467 * the disk. This routine may be freeing a jnewblk which never made it to
7468 * disk in which case we do not have to wait as the operation is undone
7469 * in memory immediately.
7472 freework_freeblock(freework)
7473 struct freework *freework;
7475 struct freeblks *freeblks;
7476 struct jnewblk *jnewblk;
7477 struct ufsmount *ump;
7478 struct workhead wkhd;
7483 mtx_assert(&lk, MA_OWNED);
7485 * Handle partial truncate separately.
7487 if (freework->fw_indir) {
7488 complete_trunc_indir(freework);
7491 freeblks = freework->fw_freeblks;
7492 ump = VFSTOUFS(freeblks->fb_list.wk_mp);
7494 needj = MOUNTEDSUJ(freeblks->fb_list.wk_mp) != 0;
7495 bsize = lfragtosize(fs, freework->fw_frags);
7498 * DEPCOMPLETE is cleared in indirblk_insert() if the block lives
7499 * on the indirblk hashtable and prevents premature freeing.
7501 freework->fw_state |= DEPCOMPLETE;
7503 * SUJ needs to wait for the segment referencing freed indirect
7504 * blocks to expire so that we know the checker will not confuse
7505 * a re-allocated indirect block with its old contents.
7507 if (needj && freework->fw_lbn <= -NDADDR)
7508 indirblk_insert(freework);
7510 * If we are canceling an existing jnewblk pass it to the free
7511 * routine, otherwise pass the freeblk which will ultimately
7512 * release the freeblks. If we're not journaling, we can just
7513 * free the freeblks immediately.
7515 jnewblk = freework->fw_jnewblk;
7516 if (jnewblk != NULL) {
7517 cancel_jnewblk(jnewblk, &wkhd);
7520 freework->fw_state |= DELAYEDFREE;
7521 freeblks->fb_cgwait++;
7522 WORKLIST_INSERT(&wkhd, &freework->fw_list);
7525 freeblks_free(ump, freeblks, btodb(bsize));
7527 "freework_freeblock: ino %d blkno %jd lbn %jd size %ld",
7528 freeblks->fb_inum, freework->fw_blkno, freework->fw_lbn, bsize);
7529 ffs_blkfree(ump, fs, freeblks->fb_devvp, freework->fw_blkno, bsize,
7530 freeblks->fb_inum, freeblks->fb_vtype, &wkhd);
7533 * The jnewblk will be discarded and the bits in the map never
7534 * made it to disk. We can immediately free the freeblk.
7537 handle_written_freework(freework);
7541 * We enqueue freework items that need processing back on the freeblks and
7542 * add the freeblks to the worklist. This makes it easier to find all work
7543 * required to flush a truncation in process_truncates().
7546 freework_enqueue(freework)
7547 struct freework *freework;
7549 struct freeblks *freeblks;
7551 freeblks = freework->fw_freeblks;
7552 if ((freework->fw_state & INPROGRESS) == 0)
7553 WORKLIST_INSERT(&freeblks->fb_freeworkhd, &freework->fw_list);
7554 if ((freeblks->fb_state &
7555 (ONWORKLIST | INPROGRESS | ALLCOMPLETE)) == ALLCOMPLETE &&
7556 LIST_EMPTY(&freeblks->fb_jblkdephd))
7557 add_to_worklist(&freeblks->fb_list, WK_NODELAY);
7561 * Start, continue, or finish the process of freeing an indirect block tree.
7562 * The free operation may be paused at any point with fw_off containing the
7563 * offset to restart from. This enables us to implement some flow control
7564 * for large truncates which may fan out and generate a huge number of
7568 handle_workitem_indirblk(freework)
7569 struct freework *freework;
7571 struct freeblks *freeblks;
7572 struct ufsmount *ump;
7575 freeblks = freework->fw_freeblks;
7576 ump = VFSTOUFS(freeblks->fb_list.wk_mp);
7578 if (freework->fw_state & DEPCOMPLETE) {
7579 handle_written_freework(freework);
7582 if (freework->fw_off == NINDIR(fs)) {
7583 freework_freeblock(freework);
7586 freework->fw_state |= INPROGRESS;
7588 indir_trunc(freework, fsbtodb(fs, freework->fw_blkno),
7594 * Called when a freework structure attached to a cg buf is written. The
7595 * ref on either the parent or the freeblks structure is released and
7596 * the freeblks is added back to the worklist if there is more work to do.
7599 handle_written_freework(freework)
7600 struct freework *freework;
7602 struct freeblks *freeblks;
7603 struct freework *parent;
7605 freeblks = freework->fw_freeblks;
7606 parent = freework->fw_parent;
7607 if (freework->fw_state & DELAYEDFREE)
7608 freeblks->fb_cgwait--;
7609 freework->fw_state |= COMPLETE;
7610 if ((freework->fw_state & ALLCOMPLETE) == ALLCOMPLETE)
7611 WORKITEM_FREE(freework, D_FREEWORK);
7613 if (--parent->fw_ref == 0)
7614 freework_enqueue(parent);
7617 if (--freeblks->fb_ref != 0)
7619 if ((freeblks->fb_state & (ALLCOMPLETE | ONWORKLIST | INPROGRESS)) ==
7620 ALLCOMPLETE && LIST_EMPTY(&freeblks->fb_jblkdephd))
7621 add_to_worklist(&freeblks->fb_list, WK_NODELAY);
7625 * This workitem routine performs the block de-allocation.
7626 * The workitem is added to the pending list after the updated
7627 * inode block has been written to disk. As mentioned above,
7628 * checks regarding the number of blocks de-allocated (compared
7629 * to the number of blocks allocated for the file) are also
7630 * performed in this function.
7633 handle_workitem_freeblocks(freeblks, flags)
7634 struct freeblks *freeblks;
7637 struct freework *freework;
7638 struct newblk *newblk;
7639 struct allocindir *aip;
7640 struct ufsmount *ump;
7641 struct worklist *wk;
7643 KASSERT(LIST_EMPTY(&freeblks->fb_jblkdephd),
7644 ("handle_workitem_freeblocks: Journal entries not written."));
7645 ump = VFSTOUFS(freeblks->fb_list.wk_mp);
7647 while ((wk = LIST_FIRST(&freeblks->fb_freeworkhd)) != NULL) {
7648 WORKLIST_REMOVE(wk);
7649 switch (wk->wk_type) {
7651 wk->wk_state |= COMPLETE;
7652 add_to_worklist(wk, 0);
7656 free_newblk(WK_NEWBLK(wk));
7660 aip = WK_ALLOCINDIR(wk);
7662 if (aip->ai_state & DELAYEDFREE) {
7664 freework = newfreework(ump, freeblks, NULL,
7665 aip->ai_lbn, aip->ai_newblkno,
7666 ump->um_fs->fs_frag, 0, 0);
7669 newblk = WK_NEWBLK(wk);
7670 if (newblk->nb_jnewblk) {
7671 freework->fw_jnewblk = newblk->nb_jnewblk;
7672 newblk->nb_jnewblk->jn_dep = &freework->fw_list;
7673 newblk->nb_jnewblk = NULL;
7675 free_newblk(newblk);
7679 freework = WK_FREEWORK(wk);
7680 if (freework->fw_lbn <= -NDADDR)
7681 handle_workitem_indirblk(freework);
7683 freework_freeblock(freework);
7686 panic("handle_workitem_freeblocks: Unknown type %s",
7687 TYPENAME(wk->wk_type));
7690 if (freeblks->fb_ref != 0) {
7691 freeblks->fb_state &= ~INPROGRESS;
7692 wake_worklist(&freeblks->fb_list);
7697 return handle_complete_freeblocks(freeblks, flags);
7702 * Handle completion of block free via truncate. This allows fs_pending
7703 * to track the actual free block count more closely than if we only updated
7704 * it at the end. We must be careful to handle cases where the block count
7705 * on free was incorrect.
7708 freeblks_free(ump, freeblks, blocks)
7709 struct ufsmount *ump;
7710 struct freeblks *freeblks;
7714 ufs2_daddr_t remain;
7717 remain = -freeblks->fb_chkcnt;
7718 freeblks->fb_chkcnt += blocks;
7720 if (remain < blocks)
7723 fs->fs_pendingblocks -= blocks;
7729 * Once all of the freework workitems are complete we can retire the
7730 * freeblocks dependency and any journal work awaiting completion. This
7731 * can not be called until all other dependencies are stable on disk.
7734 handle_complete_freeblocks(freeblks, flags)
7735 struct freeblks *freeblks;
7738 struct inodedep *inodedep;
7742 struct ufsmount *ump;
7745 ump = VFSTOUFS(freeblks->fb_list.wk_mp);
7747 flags = LK_EXCLUSIVE | flags;
7748 spare = freeblks->fb_chkcnt;
7751 * If we did not release the expected number of blocks we may have
7752 * to adjust the inode block count here. Only do so if it wasn't
7753 * a truncation to zero and the modrev still matches.
7755 if (spare && freeblks->fb_len != 0) {
7756 if (ffs_vgetf(freeblks->fb_list.wk_mp, freeblks->fb_inum,
7757 flags, &vp, FFSV_FORCEINSMQ) != 0)
7760 if (DIP(ip, i_modrev) == freeblks->fb_modrev) {
7761 DIP_SET(ip, i_blocks, DIP(ip, i_blocks) - spare);
7762 ip->i_flag |= IN_CHANGE;
7764 * We must wait so this happens before the
7765 * journal is reclaimed.
7773 fs->fs_pendingblocks += spare;
7779 quotaadj(freeblks->fb_quota, ump, -spare);
7780 quotarele(freeblks->fb_quota);
7783 if (freeblks->fb_state & ONDEPLIST) {
7784 inodedep_lookup(freeblks->fb_list.wk_mp, freeblks->fb_inum,
7786 TAILQ_REMOVE(&inodedep->id_freeblklst, freeblks, fb_next);
7787 freeblks->fb_state &= ~ONDEPLIST;
7788 if (TAILQ_EMPTY(&inodedep->id_freeblklst))
7789 free_inodedep(inodedep);
7792 * All of the freeblock deps must be complete prior to this call
7793 * so it's now safe to complete earlier outstanding journal entries.
7795 handle_jwork(&freeblks->fb_jwork);
7796 WORKITEM_FREE(freeblks, D_FREEBLKS);
7802 * Release blocks associated with the freeblks and stored in the indirect
7803 * block dbn. If level is greater than SINGLE, the block is an indirect block
7804 * and recursive calls to indirtrunc must be used to cleanse other indirect
7807 * This handles partial and complete truncation of blocks. Partial is noted
7808 * with goingaway == 0. In this case the freework is completed after the
7809 * zero'd indirects are written to disk. For full truncation the freework
7810 * is completed after the block is freed.
7813 indir_trunc(freework, dbn, lbn)
7814 struct freework *freework;
7818 struct freework *nfreework;
7819 struct workhead wkhd;
7820 struct freeblks *freeblks;
7823 struct indirdep *indirdep;
7824 struct ufsmount *ump;
7825 ufs1_daddr_t *bap1 = 0;
7826 ufs2_daddr_t nb, nnb, *bap2 = 0;
7827 ufs_lbn_t lbnadd, nlbn;
7828 int i, nblocks, ufs1fmt;
7836 freeblks = freework->fw_freeblks;
7837 ump = VFSTOUFS(freeblks->fb_list.wk_mp);
7840 * Get buffer of block pointers to be freed. There are three cases:
7842 * 1) Partial truncate caches the indirdep pointer in the freework
7843 * which provides us a back copy to the save bp which holds the
7844 * pointers we want to clear. When this completes the zero
7845 * pointers are written to the real copy.
7846 * 2) The indirect is being completely truncated, cancel_indirdep()
7847 * eliminated the real copy and placed the indirdep on the saved
7848 * copy. The indirdep and buf are discarded when this completes.
7849 * 3) The indirect was not in memory, we read a copy off of the disk
7850 * using the devvp and drop and invalidate the buffer when we're
7855 if (freework->fw_indir != NULL) {
7857 indirdep = freework->fw_indir;
7858 bp = indirdep->ir_savebp;
7859 if (bp == NULL || bp->b_blkno != dbn)
7860 panic("indir_trunc: Bad saved buf %p blkno %jd",
7862 } else if ((bp = incore(&freeblks->fb_devvp->v_bufobj, dbn)) != NULL) {
7864 * The lock prevents the buf dep list from changing and
7865 * indirects on devvp should only ever have one dependency.
7867 indirdep = WK_INDIRDEP(LIST_FIRST(&bp->b_dep));
7868 if (indirdep == NULL || (indirdep->ir_state & GOINGAWAY) == 0)
7869 panic("indir_trunc: Bad indirdep %p from buf %p",
7871 } else if (bread(freeblks->fb_devvp, dbn, (int)fs->fs_bsize,
7872 NOCRED, &bp) != 0) {
7877 /* Protects against a race with complete_trunc_indir(). */
7878 freework->fw_state &= ~INPROGRESS;
7880 * If we have an indirdep we need to enforce the truncation order
7881 * and discard it when it is complete.
7884 if (freework != TAILQ_FIRST(&indirdep->ir_trunc) &&
7885 !TAILQ_EMPTY(&indirdep->ir_trunc)) {
7887 * Add the complete truncate to the list on the
7888 * indirdep to enforce in-order processing.
7890 if (freework->fw_indir == NULL)
7891 TAILQ_INSERT_TAIL(&indirdep->ir_trunc,
7897 * If we're goingaway, free the indirdep. Otherwise it will
7898 * linger until the write completes.
7901 free_indirdep(indirdep);
7902 ump->um_numindirdeps -= 1;
7906 /* Initialize pointers depending on block size. */
7907 if (ump->um_fstype == UFS1) {
7908 bap1 = (ufs1_daddr_t *)bp->b_data;
7909 nb = bap1[freework->fw_off];
7912 bap2 = (ufs2_daddr_t *)bp->b_data;
7913 nb = bap2[freework->fw_off];
7916 level = lbn_level(lbn);
7917 needj = MOUNTEDSUJ(UFSTOVFS(ump)) != 0;
7918 lbnadd = lbn_offset(fs, level);
7919 nblocks = btodb(fs->fs_bsize);
7920 nfreework = freework;
7924 * Reclaim blocks. Traverses into nested indirect levels and
7925 * arranges for the current level to be freed when subordinates
7926 * are free when journaling.
7928 for (i = freework->fw_off; i < NINDIR(fs); i++, nb = nnb) {
7929 if (i != NINDIR(fs) - 1) {
7940 nlbn = (lbn + 1) - (i * lbnadd);
7942 nfreework = newfreework(ump, freeblks, freework,
7943 nlbn, nb, fs->fs_frag, 0, 0);
7946 indir_trunc(nfreework, fsbtodb(fs, nb), nlbn);
7948 struct freedep *freedep;
7951 * Attempt to aggregate freedep dependencies for
7952 * all blocks being released to the same CG.
7956 (nnb == 0 || (dtog(fs, nb) != dtog(fs, nnb)))) {
7957 freedep = newfreedep(freework);
7958 WORKLIST_INSERT_UNLOCKED(&wkhd,
7963 "indir_trunc: ino %d blkno %jd size %ld",
7964 freeblks->fb_inum, nb, fs->fs_bsize);
7965 ffs_blkfree(ump, fs, freeblks->fb_devvp, nb,
7966 fs->fs_bsize, freeblks->fb_inum,
7967 freeblks->fb_vtype, &wkhd);
7971 bp->b_flags |= B_INVAL | B_NOCACHE;
7976 freedblocks = (nblocks * cnt);
7978 freedblocks += nblocks;
7979 freeblks_free(ump, freeblks, freedblocks);
7981 * If we are journaling set up the ref counts and offset so this
7982 * indirect can be completed when its children are free.
7986 freework->fw_off = i;
7987 freework->fw_ref += freedeps;
7988 freework->fw_ref -= NINDIR(fs) + 1;
7990 freeblks->fb_cgwait += freedeps;
7991 if (freework->fw_ref == 0)
7992 freework_freeblock(freework);
7997 * If we're not journaling we can free the indirect now.
7999 dbn = dbtofsb(fs, dbn);
8001 "indir_trunc 2: ino %d blkno %jd size %ld",
8002 freeblks->fb_inum, dbn, fs->fs_bsize);
8003 ffs_blkfree(ump, fs, freeblks->fb_devvp, dbn, fs->fs_bsize,
8004 freeblks->fb_inum, freeblks->fb_vtype, NULL);
8005 /* Non SUJ softdep does single-threaded truncations. */
8006 if (freework->fw_blkno == dbn) {
8007 freework->fw_state |= ALLCOMPLETE;
8009 handle_written_freework(freework);
8016 * Cancel an allocindir when it is removed via truncation. When bp is not
8017 * NULL the indirect never appeared on disk and is scheduled to be freed
8018 * independently of the indir so we can more easily track journal work.
8021 cancel_allocindir(aip, bp, freeblks, trunc)
8022 struct allocindir *aip;
8024 struct freeblks *freeblks;
8027 struct indirdep *indirdep;
8028 struct freefrag *freefrag;
8029 struct newblk *newblk;
8031 newblk = (struct newblk *)aip;
8032 LIST_REMOVE(aip, ai_next);
8034 * We must eliminate the pointer in bp if it must be freed on its
8035 * own due to partial truncate or pending journal work.
8037 if (bp && (trunc || newblk->nb_jnewblk)) {
8039 * Clear the pointer and mark the aip to be freed
8040 * directly if it never existed on disk.
8042 aip->ai_state |= DELAYEDFREE;
8043 indirdep = aip->ai_indirdep;
8044 if (indirdep->ir_state & UFS1FMT)
8045 ((ufs1_daddr_t *)bp->b_data)[aip->ai_offset] = 0;
8047 ((ufs2_daddr_t *)bp->b_data)[aip->ai_offset] = 0;
8050 * When truncating the previous pointer will be freed via
8051 * savedbp. Eliminate the freefrag which would dup free.
8053 if (trunc && (freefrag = newblk->nb_freefrag) != NULL) {
8054 newblk->nb_freefrag = NULL;
8055 if (freefrag->ff_jdep)
8057 WK_JFREEFRAG(freefrag->ff_jdep));
8058 jwork_move(&freeblks->fb_jwork, &freefrag->ff_jwork);
8059 WORKITEM_FREE(freefrag, D_FREEFRAG);
8062 * If the journal hasn't been written the jnewblk must be passed
8063 * to the call to ffs_blkfree that reclaims the space. We accomplish
8064 * this by leaving the journal dependency on the newblk to be freed
8065 * when a freework is created in handle_workitem_freeblocks().
8067 cancel_newblk(newblk, NULL, &freeblks->fb_jwork);
8068 WORKLIST_INSERT(&freeblks->fb_freeworkhd, &newblk->nb_list);
8072 * Create the mkdir dependencies for . and .. in a new directory. Link them
8073 * in to a newdirblk so any subsequent additions are tracked properly. The
8074 * caller is responsible for adding the mkdir1 dependency to the journal
8075 * and updating id_mkdiradd. This function returns with lk held.
8077 static struct mkdir *
8078 setup_newdir(dap, newinum, dinum, newdirbp, mkdirp)
8082 struct buf *newdirbp;
8083 struct mkdir **mkdirp;
8085 struct newblk *newblk;
8086 struct pagedep *pagedep;
8087 struct inodedep *inodedep;
8088 struct newdirblk *newdirblk = 0;
8089 struct mkdir *mkdir1, *mkdir2;
8090 struct worklist *wk;
8091 struct jaddref *jaddref;
8094 mp = dap->da_list.wk_mp;
8095 newdirblk = malloc(sizeof(struct newdirblk), M_NEWDIRBLK,
8097 workitem_alloc(&newdirblk->db_list, D_NEWDIRBLK, mp);
8098 LIST_INIT(&newdirblk->db_mkdir);
8099 mkdir1 = malloc(sizeof(struct mkdir), M_MKDIR, M_SOFTDEP_FLAGS);
8100 workitem_alloc(&mkdir1->md_list, D_MKDIR, mp);
8101 mkdir1->md_state = ATTACHED | MKDIR_BODY;
8102 mkdir1->md_diradd = dap;
8103 mkdir1->md_jaddref = NULL;
8104 mkdir2 = malloc(sizeof(struct mkdir), M_MKDIR, M_SOFTDEP_FLAGS);
8105 workitem_alloc(&mkdir2->md_list, D_MKDIR, mp);
8106 mkdir2->md_state = ATTACHED | MKDIR_PARENT;
8107 mkdir2->md_diradd = dap;
8108 mkdir2->md_jaddref = NULL;
8109 if (MOUNTEDSUJ(mp) == 0) {
8110 mkdir1->md_state |= DEPCOMPLETE;
8111 mkdir2->md_state |= DEPCOMPLETE;
8114 * Dependency on "." and ".." being written to disk.
8116 mkdir1->md_buf = newdirbp;
8118 LIST_INSERT_HEAD(&mkdirlisthd, mkdir1, md_mkdirs);
8120 * We must link the pagedep, allocdirect, and newdirblk for
8121 * the initial file page so the pointer to the new directory
8122 * is not written until the directory contents are live and
8123 * any subsequent additions are not marked live until the
8124 * block is reachable via the inode.
8126 if (pagedep_lookup(mp, newdirbp, newinum, 0, 0, &pagedep) == 0)
8127 panic("setup_newdir: lost pagedep");
8128 LIST_FOREACH(wk, &newdirbp->b_dep, wk_list)
8129 if (wk->wk_type == D_ALLOCDIRECT)
8132 panic("setup_newdir: lost allocdirect");
8133 if (pagedep->pd_state & NEWBLOCK)
8134 panic("setup_newdir: NEWBLOCK already set");
8135 newblk = WK_NEWBLK(wk);
8136 pagedep->pd_state |= NEWBLOCK;
8137 pagedep->pd_newdirblk = newdirblk;
8138 newdirblk->db_pagedep = pagedep;
8139 WORKLIST_INSERT(&newblk->nb_newdirblk, &newdirblk->db_list);
8140 WORKLIST_INSERT(&newdirblk->db_mkdir, &mkdir1->md_list);
8142 * Look up the inodedep for the parent directory so that we
8143 * can link mkdir2 into the pending dotdot jaddref or
8144 * the inode write if there is none. If the inode is
8145 * ALLCOMPLETE and no jaddref is present all dependencies have
8146 * been satisfied and mkdir2 can be freed.
8148 inodedep_lookup(mp, dinum, 0, &inodedep);
8149 if (MOUNTEDSUJ(mp)) {
8150 if (inodedep == NULL)
8151 panic("setup_newdir: Lost parent.");
8152 jaddref = (struct jaddref *)TAILQ_LAST(&inodedep->id_inoreflst,
8154 KASSERT(jaddref != NULL && jaddref->ja_parent == newinum &&
8155 (jaddref->ja_state & MKDIR_PARENT),
8156 ("setup_newdir: bad dotdot jaddref %p", jaddref));
8157 LIST_INSERT_HEAD(&mkdirlisthd, mkdir2, md_mkdirs);
8158 mkdir2->md_jaddref = jaddref;
8159 jaddref->ja_mkdir = mkdir2;
8160 } else if (inodedep == NULL ||
8161 (inodedep->id_state & ALLCOMPLETE) == ALLCOMPLETE) {
8162 dap->da_state &= ~MKDIR_PARENT;
8163 WORKITEM_FREE(mkdir2, D_MKDIR);
8166 LIST_INSERT_HEAD(&mkdirlisthd, mkdir2, md_mkdirs);
8167 WORKLIST_INSERT(&inodedep->id_bufwait, &mkdir2->md_list);
8175 * Directory entry addition dependencies.
8177 * When adding a new directory entry, the inode (with its incremented link
8178 * count) must be written to disk before the directory entry's pointer to it.
8179 * Also, if the inode is newly allocated, the corresponding freemap must be
8180 * updated (on disk) before the directory entry's pointer. These requirements
8181 * are met via undo/redo on the directory entry's pointer, which consists
8182 * simply of the inode number.
8184 * As directory entries are added and deleted, the free space within a
8185 * directory block can become fragmented. The ufs filesystem will compact
8186 * a fragmented directory block to make space for a new entry. When this
8187 * occurs, the offsets of previously added entries change. Any "diradd"
8188 * dependency structures corresponding to these entries must be updated with
8193 * This routine is called after the in-memory inode's link
8194 * count has been incremented, but before the directory entry's
8195 * pointer to the inode has been set.
8198 softdep_setup_directory_add(bp, dp, diroffset, newinum, newdirbp, isnewblk)
8199 struct buf *bp; /* buffer containing directory block */
8200 struct inode *dp; /* inode for directory */
8201 off_t diroffset; /* offset of new entry in directory */
8202 ino_t newinum; /* inode referenced by new directory entry */
8203 struct buf *newdirbp; /* non-NULL => contents of new mkdir */
8204 int isnewblk; /* entry is in a newly allocated block */
8206 int offset; /* offset of new entry within directory block */
8207 ufs_lbn_t lbn; /* block in directory containing new entry */
8210 struct newblk *newblk;
8211 struct pagedep *pagedep;
8212 struct inodedep *inodedep;
8213 struct newdirblk *newdirblk = 0;
8214 struct mkdir *mkdir1, *mkdir2;
8215 struct jaddref *jaddref;
8220 * Whiteouts have no dependencies.
8222 if (newinum == WINO) {
8223 if (newdirbp != NULL)
8228 mkdir1 = mkdir2 = NULL;
8229 mp = UFSTOVFS(dp->i_ump);
8231 lbn = lblkno(fs, diroffset);
8232 offset = blkoff(fs, diroffset);
8233 dap = malloc(sizeof(struct diradd), M_DIRADD,
8234 M_SOFTDEP_FLAGS|M_ZERO);
8235 workitem_alloc(&dap->da_list, D_DIRADD, mp);
8236 dap->da_offset = offset;
8237 dap->da_newinum = newinum;
8238 dap->da_state = ATTACHED;
8239 LIST_INIT(&dap->da_jwork);
8240 isindir = bp->b_lblkno >= NDADDR;
8242 (isindir ? blkoff(fs, diroffset) : fragoff(fs, diroffset)) == 0) {
8243 newdirblk = malloc(sizeof(struct newdirblk),
8244 M_NEWDIRBLK, M_SOFTDEP_FLAGS);
8245 workitem_alloc(&newdirblk->db_list, D_NEWDIRBLK, mp);
8246 LIST_INIT(&newdirblk->db_mkdir);
8249 * If we're creating a new directory setup the dependencies and set
8250 * the dap state to wait for them. Otherwise it's COMPLETE and
8253 if (newdirbp == NULL) {
8254 dap->da_state |= DEPCOMPLETE;
8257 dap->da_state |= MKDIR_BODY | MKDIR_PARENT;
8258 mkdir1 = setup_newdir(dap, newinum, dp->i_number, newdirbp,
8262 * Link into parent directory pagedep to await its being written.
8264 pagedep_lookup(mp, bp, dp->i_number, lbn, DEPALLOC, &pagedep);
8266 if (diradd_lookup(pagedep, offset) != NULL)
8267 panic("softdep_setup_directory_add: %p already at off %d\n",
8268 diradd_lookup(pagedep, offset), offset);
8270 dap->da_pagedep = pagedep;
8271 LIST_INSERT_HEAD(&pagedep->pd_diraddhd[DIRADDHASH(offset)], dap,
8273 inodedep_lookup(mp, newinum, DEPALLOC | NODELAY, &inodedep);
8275 * If we're journaling, link the diradd into the jaddref so it
8276 * may be completed after the journal entry is written. Otherwise,
8277 * link the diradd into its inodedep. If the inode is not yet
8278 * written place it on the bufwait list, otherwise do the post-inode
8279 * write processing to put it on the id_pendinghd list.
8281 if (MOUNTEDSUJ(mp)) {
8282 jaddref = (struct jaddref *)TAILQ_LAST(&inodedep->id_inoreflst,
8284 KASSERT(jaddref != NULL && jaddref->ja_parent == dp->i_number,
8285 ("softdep_setup_directory_add: bad jaddref %p", jaddref));
8286 jaddref->ja_diroff = diroffset;
8287 jaddref->ja_diradd = dap;
8288 add_to_journal(&jaddref->ja_list);
8289 } else if ((inodedep->id_state & ALLCOMPLETE) == ALLCOMPLETE)
8290 diradd_inode_written(dap, inodedep);
8292 WORKLIST_INSERT(&inodedep->id_bufwait, &dap->da_list);
8294 * Add the journal entries for . and .. links now that the primary
8297 if (mkdir1 != NULL && MOUNTEDSUJ(mp)) {
8298 jaddref = (struct jaddref *)TAILQ_PREV(&jaddref->ja_ref,
8299 inoreflst, if_deps);
8300 KASSERT(jaddref != NULL &&
8301 jaddref->ja_ino == jaddref->ja_parent &&
8302 (jaddref->ja_state & MKDIR_BODY),
8303 ("softdep_setup_directory_add: bad dot jaddref %p",
8305 mkdir1->md_jaddref = jaddref;
8306 jaddref->ja_mkdir = mkdir1;
8308 * It is important that the dotdot journal entry
8309 * is added prior to the dot entry since dot writes
8310 * both the dot and dotdot links. These both must
8311 * be added after the primary link for the journal
8312 * to remain consistent.
8314 add_to_journal(&mkdir2->md_jaddref->ja_list);
8315 add_to_journal(&jaddref->ja_list);
8318 * If we are adding a new directory remember this diradd so that if
8319 * we rename it we can keep the dot and dotdot dependencies. If
8320 * we are adding a new name for an inode that has a mkdiradd we
8321 * must be in rename and we have to move the dot and dotdot
8322 * dependencies to this new name. The old name is being orphaned
8325 if (mkdir1 != NULL) {
8326 if (inodedep->id_mkdiradd != NULL)
8327 panic("softdep_setup_directory_add: Existing mkdir");
8328 inodedep->id_mkdiradd = dap;
8329 } else if (inodedep->id_mkdiradd)
8330 merge_diradd(inodedep, dap);
8333 * There is nothing to do if we are already tracking
8336 if ((pagedep->pd_state & NEWBLOCK) != 0) {
8337 WORKITEM_FREE(newdirblk, D_NEWDIRBLK);
8341 if (newblk_lookup(mp, dbtofsb(fs, bp->b_blkno), 0, &newblk)
8343 panic("softdep_setup_directory_add: lost entry");
8344 WORKLIST_INSERT(&newblk->nb_newdirblk, &newdirblk->db_list);
8345 pagedep->pd_state |= NEWBLOCK;
8346 pagedep->pd_newdirblk = newdirblk;
8347 newdirblk->db_pagedep = pagedep;
8350 * If we extended into an indirect signal direnter to sync.
8361 * This procedure is called to change the offset of a directory
8362 * entry when compacting a directory block which must be owned
8363 * exclusively by the caller. Note that the actual entry movement
8364 * must be done in this procedure to ensure that no I/O completions
8365 * occur while the move is in progress.
8368 softdep_change_directoryentry_offset(bp, dp, base, oldloc, newloc, entrysize)
8369 struct buf *bp; /* Buffer holding directory block. */
8370 struct inode *dp; /* inode for directory */
8371 caddr_t base; /* address of dp->i_offset */
8372 caddr_t oldloc; /* address of old directory location */
8373 caddr_t newloc; /* address of new directory location */
8374 int entrysize; /* size of directory entry */
8376 int offset, oldoffset, newoffset;
8377 struct pagedep *pagedep;
8378 struct jmvref *jmvref;
8385 mp = UFSTOVFS(dp->i_ump);
8386 de = (struct direct *)oldloc;
8390 * Moves are always journaled as it would be too complex to
8391 * determine if any affected adds or removes are present in the
8394 if (MOUNTEDSUJ(mp)) {
8396 jmvref = newjmvref(dp, de->d_ino,
8397 dp->i_offset + (oldloc - base),
8398 dp->i_offset + (newloc - base));
8400 lbn = lblkno(dp->i_fs, dp->i_offset);
8401 offset = blkoff(dp->i_fs, dp->i_offset);
8402 oldoffset = offset + (oldloc - base);
8403 newoffset = offset + (newloc - base);
8405 if (pagedep_lookup(mp, bp, dp->i_number, lbn, flags, &pagedep) == 0)
8407 dap = diradd_lookup(pagedep, oldoffset);
8409 dap->da_offset = newoffset;
8410 newoffset = DIRADDHASH(newoffset);
8411 oldoffset = DIRADDHASH(oldoffset);
8412 if ((dap->da_state & ALLCOMPLETE) != ALLCOMPLETE &&
8413 newoffset != oldoffset) {
8414 LIST_REMOVE(dap, da_pdlist);
8415 LIST_INSERT_HEAD(&pagedep->pd_diraddhd[newoffset],
8421 jmvref->jm_pagedep = pagedep;
8422 LIST_INSERT_HEAD(&pagedep->pd_jmvrefhd, jmvref, jm_deps);
8423 add_to_journal(&jmvref->jm_list);
8425 bcopy(oldloc, newloc, entrysize);
8430 * Move the mkdir dependencies and journal work from one diradd to another
8431 * when renaming a directory. The new name must depend on the mkdir deps
8432 * completing as the old name did. Directories can only have one valid link
8433 * at a time so one must be canonical.
8436 merge_diradd(inodedep, newdap)
8437 struct inodedep *inodedep;
8438 struct diradd *newdap;
8440 struct diradd *olddap;
8441 struct mkdir *mkdir, *nextmd;
8444 olddap = inodedep->id_mkdiradd;
8445 inodedep->id_mkdiradd = newdap;
8446 if ((olddap->da_state & (MKDIR_PARENT | MKDIR_BODY)) != 0) {
8447 newdap->da_state &= ~DEPCOMPLETE;
8448 for (mkdir = LIST_FIRST(&mkdirlisthd); mkdir; mkdir = nextmd) {
8449 nextmd = LIST_NEXT(mkdir, md_mkdirs);
8450 if (mkdir->md_diradd != olddap)
8452 mkdir->md_diradd = newdap;
8453 state = mkdir->md_state & (MKDIR_PARENT | MKDIR_BODY);
8454 newdap->da_state |= state;
8455 olddap->da_state &= ~state;
8456 if ((olddap->da_state &
8457 (MKDIR_PARENT | MKDIR_BODY)) == 0)
8460 if ((olddap->da_state & (MKDIR_PARENT | MKDIR_BODY)) != 0)
8461 panic("merge_diradd: unfound ref");
8464 * Any mkdir related journal items are not safe to be freed until
8465 * the new name is stable.
8467 jwork_move(&newdap->da_jwork, &olddap->da_jwork);
8468 olddap->da_state |= DEPCOMPLETE;
8469 complete_diradd(olddap);
8473 * Move the diradd to the pending list when all diradd dependencies are
8477 complete_diradd(dap)
8480 struct pagedep *pagedep;
8482 if ((dap->da_state & ALLCOMPLETE) == ALLCOMPLETE) {
8483 if (dap->da_state & DIRCHG)
8484 pagedep = dap->da_previous->dm_pagedep;
8486 pagedep = dap->da_pagedep;
8487 LIST_REMOVE(dap, da_pdlist);
8488 LIST_INSERT_HEAD(&pagedep->pd_pendinghd, dap, da_pdlist);
8493 * Cancel a diradd when a dirrem overlaps with it. We must cancel the journal
8494 * add entries and conditonally journal the remove.
8497 cancel_diradd(dap, dirrem, jremref, dotremref, dotdotremref)
8499 struct dirrem *dirrem;
8500 struct jremref *jremref;
8501 struct jremref *dotremref;
8502 struct jremref *dotdotremref;
8504 struct inodedep *inodedep;
8505 struct jaddref *jaddref;
8506 struct inoref *inoref;
8507 struct mkdir *mkdir;
8510 * If no remove references were allocated we're on a non-journaled
8511 * filesystem and can skip the cancel step.
8513 if (jremref == NULL) {
8514 free_diradd(dap, NULL);
8518 * Cancel the primary name an free it if it does not require
8521 if (inodedep_lookup(dap->da_list.wk_mp, dap->da_newinum,
8522 0, &inodedep) != 0) {
8523 /* Abort the addref that reference this diradd. */
8524 TAILQ_FOREACH(inoref, &inodedep->id_inoreflst, if_deps) {
8525 if (inoref->if_list.wk_type != D_JADDREF)
8527 jaddref = (struct jaddref *)inoref;
8528 if (jaddref->ja_diradd != dap)
8530 if (cancel_jaddref(jaddref, inodedep,
8531 &dirrem->dm_jwork) == 0) {
8532 free_jremref(jremref);
8539 * Cancel subordinate names and free them if they do not require
8542 if ((dap->da_state & (MKDIR_PARENT | MKDIR_BODY)) != 0) {
8543 LIST_FOREACH(mkdir, &mkdirlisthd, md_mkdirs) {
8544 if (mkdir->md_diradd != dap)
8546 if ((jaddref = mkdir->md_jaddref) == NULL)
8548 mkdir->md_jaddref = NULL;
8549 if (mkdir->md_state & MKDIR_PARENT) {
8550 if (cancel_jaddref(jaddref, NULL,
8551 &dirrem->dm_jwork) == 0) {
8552 free_jremref(dotdotremref);
8553 dotdotremref = NULL;
8556 if (cancel_jaddref(jaddref, inodedep,
8557 &dirrem->dm_jwork) == 0) {
8558 free_jremref(dotremref);
8566 journal_jremref(dirrem, jremref, inodedep);
8568 journal_jremref(dirrem, dotremref, inodedep);
8570 journal_jremref(dirrem, dotdotremref, NULL);
8571 jwork_move(&dirrem->dm_jwork, &dap->da_jwork);
8572 free_diradd(dap, &dirrem->dm_jwork);
8576 * Free a diradd dependency structure. This routine must be called
8577 * with splbio interrupts blocked.
8580 free_diradd(dap, wkhd)
8582 struct workhead *wkhd;
8584 struct dirrem *dirrem;
8585 struct pagedep *pagedep;
8586 struct inodedep *inodedep;
8587 struct mkdir *mkdir, *nextmd;
8589 mtx_assert(&lk, MA_OWNED);
8590 LIST_REMOVE(dap, da_pdlist);
8591 if (dap->da_state & ONWORKLIST)
8592 WORKLIST_REMOVE(&dap->da_list);
8593 if ((dap->da_state & DIRCHG) == 0) {
8594 pagedep = dap->da_pagedep;
8596 dirrem = dap->da_previous;
8597 pagedep = dirrem->dm_pagedep;
8598 dirrem->dm_dirinum = pagedep->pd_ino;
8599 dirrem->dm_state |= COMPLETE;
8600 if (LIST_EMPTY(&dirrem->dm_jremrefhd))
8601 add_to_worklist(&dirrem->dm_list, 0);
8603 if (inodedep_lookup(pagedep->pd_list.wk_mp, dap->da_newinum,
8605 if (inodedep->id_mkdiradd == dap)
8606 inodedep->id_mkdiradd = NULL;
8607 if ((dap->da_state & (MKDIR_PARENT | MKDIR_BODY)) != 0) {
8608 for (mkdir = LIST_FIRST(&mkdirlisthd); mkdir; mkdir = nextmd) {
8609 nextmd = LIST_NEXT(mkdir, md_mkdirs);
8610 if (mkdir->md_diradd != dap)
8613 ~(mkdir->md_state & (MKDIR_PARENT | MKDIR_BODY));
8614 LIST_REMOVE(mkdir, md_mkdirs);
8615 if (mkdir->md_state & ONWORKLIST)
8616 WORKLIST_REMOVE(&mkdir->md_list);
8617 if (mkdir->md_jaddref != NULL)
8618 panic("free_diradd: Unexpected jaddref");
8619 WORKITEM_FREE(mkdir, D_MKDIR);
8620 if ((dap->da_state & (MKDIR_PARENT | MKDIR_BODY)) == 0)
8623 if ((dap->da_state & (MKDIR_PARENT | MKDIR_BODY)) != 0)
8624 panic("free_diradd: unfound ref");
8627 free_inodedep(inodedep);
8629 * Free any journal segments waiting for the directory write.
8631 handle_jwork(&dap->da_jwork);
8632 WORKITEM_FREE(dap, D_DIRADD);
8636 * Directory entry removal dependencies.
8638 * When removing a directory entry, the entry's inode pointer must be
8639 * zero'ed on disk before the corresponding inode's link count is decremented
8640 * (possibly freeing the inode for re-use). This dependency is handled by
8641 * updating the directory entry but delaying the inode count reduction until
8642 * after the directory block has been written to disk. After this point, the
8643 * inode count can be decremented whenever it is convenient.
8647 * This routine should be called immediately after removing
8648 * a directory entry. The inode's link count should not be
8649 * decremented by the calling procedure -- the soft updates
8650 * code will do this task when it is safe.
8653 softdep_setup_remove(bp, dp, ip, isrmdir)
8654 struct buf *bp; /* buffer containing directory block */
8655 struct inode *dp; /* inode for the directory being modified */
8656 struct inode *ip; /* inode for directory entry being removed */
8657 int isrmdir; /* indicates if doing RMDIR */
8659 struct dirrem *dirrem, *prevdirrem;
8660 struct inodedep *inodedep;
8664 * Allocate a new dirrem if appropriate and ACQUIRE_LOCK. We want
8665 * newdirrem() to setup the full directory remove which requires
8668 dirrem = newdirrem(bp, dp, ip, isrmdir, &prevdirrem);
8670 * Add the dirrem to the inodedep's pending remove list for quick
8673 if (inodedep_lookup(UFSTOVFS(ip->i_ump), ip->i_number, 0,
8675 panic("softdep_setup_remove: Lost inodedep.");
8676 KASSERT((inodedep->id_state & UNLINKED) == 0, ("inode unlinked"));
8677 dirrem->dm_state |= ONDEPLIST;
8678 LIST_INSERT_HEAD(&inodedep->id_dirremhd, dirrem, dm_inonext);
8681 * If the COMPLETE flag is clear, then there were no active
8682 * entries and we want to roll back to a zeroed entry until
8683 * the new inode is committed to disk. If the COMPLETE flag is
8684 * set then we have deleted an entry that never made it to
8685 * disk. If the entry we deleted resulted from a name change,
8686 * then the old name still resides on disk. We cannot delete
8687 * its inode (returned to us in prevdirrem) until the zeroed
8688 * directory entry gets to disk. The new inode has never been
8689 * referenced on the disk, so can be deleted immediately.
8691 if ((dirrem->dm_state & COMPLETE) == 0) {
8692 LIST_INSERT_HEAD(&dirrem->dm_pagedep->pd_dirremhd, dirrem,
8696 if (prevdirrem != NULL)
8697 LIST_INSERT_HEAD(&dirrem->dm_pagedep->pd_dirremhd,
8698 prevdirrem, dm_next);
8699 dirrem->dm_dirinum = dirrem->dm_pagedep->pd_ino;
8700 direct = LIST_EMPTY(&dirrem->dm_jremrefhd);
8703 handle_workitem_remove(dirrem, 0);
8708 * Check for an entry matching 'offset' on both the pd_dirraddhd list and the
8709 * pd_pendinghd list of a pagedep.
8711 static struct diradd *
8712 diradd_lookup(pagedep, offset)
8713 struct pagedep *pagedep;
8718 LIST_FOREACH(dap, &pagedep->pd_diraddhd[DIRADDHASH(offset)], da_pdlist)
8719 if (dap->da_offset == offset)
8721 LIST_FOREACH(dap, &pagedep->pd_pendinghd, da_pdlist)
8722 if (dap->da_offset == offset)
8728 * Search for a .. diradd dependency in a directory that is being removed.
8729 * If the directory was renamed to a new parent we have a diradd rather
8730 * than a mkdir for the .. entry. We need to cancel it now before
8731 * it is found in truncate().
8733 static struct jremref *
8734 cancel_diradd_dotdot(ip, dirrem, jremref)
8736 struct dirrem *dirrem;
8737 struct jremref *jremref;
8739 struct pagedep *pagedep;
8741 struct worklist *wk;
8743 if (pagedep_lookup(UFSTOVFS(ip->i_ump), NULL, ip->i_number, 0, 0,
8746 dap = diradd_lookup(pagedep, DOTDOT_OFFSET);
8749 cancel_diradd(dap, dirrem, jremref, NULL, NULL);
8751 * Mark any journal work as belonging to the parent so it is freed
8752 * with the .. reference.
8754 LIST_FOREACH(wk, &dirrem->dm_jwork, wk_list)
8755 wk->wk_state |= MKDIR_PARENT;
8760 * Cancel the MKDIR_PARENT mkdir component of a diradd when we're going to
8761 * replace it with a dirrem/diradd pair as a result of re-parenting a
8762 * directory. This ensures that we don't simultaneously have a mkdir and
8763 * a diradd for the same .. entry.
8765 static struct jremref *
8766 cancel_mkdir_dotdot(ip, dirrem, jremref)
8768 struct dirrem *dirrem;
8769 struct jremref *jremref;
8771 struct inodedep *inodedep;
8772 struct jaddref *jaddref;
8773 struct mkdir *mkdir;
8776 if (inodedep_lookup(UFSTOVFS(ip->i_ump), ip->i_number, 0,
8779 dap = inodedep->id_mkdiradd;
8780 if (dap == NULL || (dap->da_state & MKDIR_PARENT) == 0)
8782 for (mkdir = LIST_FIRST(&mkdirlisthd); mkdir;
8783 mkdir = LIST_NEXT(mkdir, md_mkdirs))
8784 if (mkdir->md_diradd == dap && mkdir->md_state & MKDIR_PARENT)
8787 panic("cancel_mkdir_dotdot: Unable to find mkdir\n");
8788 if ((jaddref = mkdir->md_jaddref) != NULL) {
8789 mkdir->md_jaddref = NULL;
8790 jaddref->ja_state &= ~MKDIR_PARENT;
8791 if (inodedep_lookup(UFSTOVFS(ip->i_ump), jaddref->ja_ino, 0,
8793 panic("cancel_mkdir_dotdot: Lost parent inodedep");
8794 if (cancel_jaddref(jaddref, inodedep, &dirrem->dm_jwork)) {
8795 journal_jremref(dirrem, jremref, inodedep);
8799 if (mkdir->md_state & ONWORKLIST)
8800 WORKLIST_REMOVE(&mkdir->md_list);
8801 mkdir->md_state |= ALLCOMPLETE;
8802 complete_mkdir(mkdir);
8807 journal_jremref(dirrem, jremref, inodedep)
8808 struct dirrem *dirrem;
8809 struct jremref *jremref;
8810 struct inodedep *inodedep;
8813 if (inodedep == NULL)
8814 if (inodedep_lookup(jremref->jr_list.wk_mp,
8815 jremref->jr_ref.if_ino, 0, &inodedep) == 0)
8816 panic("journal_jremref: Lost inodedep");
8817 LIST_INSERT_HEAD(&dirrem->dm_jremrefhd, jremref, jr_deps);
8818 TAILQ_INSERT_TAIL(&inodedep->id_inoreflst, &jremref->jr_ref, if_deps);
8819 add_to_journal(&jremref->jr_list);
8823 dirrem_journal(dirrem, jremref, dotremref, dotdotremref)
8824 struct dirrem *dirrem;
8825 struct jremref *jremref;
8826 struct jremref *dotremref;
8827 struct jremref *dotdotremref;
8829 struct inodedep *inodedep;
8832 if (inodedep_lookup(jremref->jr_list.wk_mp, jremref->jr_ref.if_ino, 0,
8834 panic("dirrem_journal: Lost inodedep");
8835 journal_jremref(dirrem, jremref, inodedep);
8837 journal_jremref(dirrem, dotremref, inodedep);
8839 journal_jremref(dirrem, dotdotremref, NULL);
8843 * Allocate a new dirrem if appropriate and return it along with
8844 * its associated pagedep. Called without a lock, returns with lock.
8846 static struct dirrem *
8847 newdirrem(bp, dp, ip, isrmdir, prevdirremp)
8848 struct buf *bp; /* buffer containing directory block */
8849 struct inode *dp; /* inode for the directory being modified */
8850 struct inode *ip; /* inode for directory entry being removed */
8851 int isrmdir; /* indicates if doing RMDIR */
8852 struct dirrem **prevdirremp; /* previously referenced inode, if any */
8857 struct dirrem *dirrem;
8858 struct pagedep *pagedep;
8859 struct jremref *jremref;
8860 struct jremref *dotremref;
8861 struct jremref *dotdotremref;
8865 * Whiteouts have no deletion dependencies.
8868 panic("newdirrem: whiteout");
8871 * If we are over our limit, try to improve the situation.
8872 * Limiting the number of dirrem structures will also limit
8873 * the number of freefile and freeblks structures.
8876 if (!IS_SNAPSHOT(ip) && dep_current[D_DIRREM] > max_softdeps / 2)
8877 (void) request_cleanup(ITOV(dp)->v_mount, FLUSH_BLOCKS);
8879 dirrem = malloc(sizeof(struct dirrem),
8880 M_DIRREM, M_SOFTDEP_FLAGS|M_ZERO);
8881 workitem_alloc(&dirrem->dm_list, D_DIRREM, dvp->v_mount);
8882 LIST_INIT(&dirrem->dm_jremrefhd);
8883 LIST_INIT(&dirrem->dm_jwork);
8884 dirrem->dm_state = isrmdir ? RMDIR : 0;
8885 dirrem->dm_oldinum = ip->i_number;
8886 *prevdirremp = NULL;
8888 * Allocate remove reference structures to track journal write
8889 * dependencies. We will always have one for the link and
8890 * when doing directories we will always have one more for dot.
8891 * When renaming a directory we skip the dotdot link change so
8892 * this is not needed.
8894 jremref = dotremref = dotdotremref = NULL;
8895 if (DOINGSUJ(dvp)) {
8897 jremref = newjremref(dirrem, dp, ip, dp->i_offset,
8898 ip->i_effnlink + 2);
8899 dotremref = newjremref(dirrem, ip, ip, DOT_OFFSET,
8900 ip->i_effnlink + 1);
8901 dotdotremref = newjremref(dirrem, ip, dp, DOTDOT_OFFSET,
8902 dp->i_effnlink + 1);
8903 dotdotremref->jr_state |= MKDIR_PARENT;
8905 jremref = newjremref(dirrem, dp, ip, dp->i_offset,
8906 ip->i_effnlink + 1);
8909 lbn = lblkno(dp->i_fs, dp->i_offset);
8910 offset = blkoff(dp->i_fs, dp->i_offset);
8911 pagedep_lookup(UFSTOVFS(dp->i_ump), bp, dp->i_number, lbn, DEPALLOC,
8913 dirrem->dm_pagedep = pagedep;
8914 dirrem->dm_offset = offset;
8916 * If we're renaming a .. link to a new directory, cancel any
8917 * existing MKDIR_PARENT mkdir. If it has already been canceled
8918 * the jremref is preserved for any potential diradd in this
8919 * location. This can not coincide with a rmdir.
8921 if (dp->i_offset == DOTDOT_OFFSET) {
8923 panic("newdirrem: .. directory change during remove?");
8924 jremref = cancel_mkdir_dotdot(dp, dirrem, jremref);
8927 * If we're removing a directory search for the .. dependency now and
8928 * cancel it. Any pending journal work will be added to the dirrem
8929 * to be completed when the workitem remove completes.
8932 dotdotremref = cancel_diradd_dotdot(ip, dirrem, dotdotremref);
8934 * Check for a diradd dependency for the same directory entry.
8935 * If present, then both dependencies become obsolete and can
8938 dap = diradd_lookup(pagedep, offset);
8941 * Link the jremref structures into the dirrem so they are
8942 * written prior to the pagedep.
8945 dirrem_journal(dirrem, jremref, dotremref,
8950 * Must be ATTACHED at this point.
8952 if ((dap->da_state & ATTACHED) == 0)
8953 panic("newdirrem: not ATTACHED");
8954 if (dap->da_newinum != ip->i_number)
8955 panic("newdirrem: inum %d should be %d",
8956 ip->i_number, dap->da_newinum);
8958 * If we are deleting a changed name that never made it to disk,
8959 * then return the dirrem describing the previous inode (which
8960 * represents the inode currently referenced from this entry on disk).
8962 if ((dap->da_state & DIRCHG) != 0) {
8963 *prevdirremp = dap->da_previous;
8964 dap->da_state &= ~DIRCHG;
8965 dap->da_pagedep = pagedep;
8968 * We are deleting an entry that never made it to disk.
8969 * Mark it COMPLETE so we can delete its inode immediately.
8971 dirrem->dm_state |= COMPLETE;
8972 cancel_diradd(dap, dirrem, jremref, dotremref, dotdotremref);
8975 struct worklist *wk;
8977 LIST_FOREACH(wk, &dirrem->dm_jwork, wk_list)
8978 if (wk->wk_state & (MKDIR_BODY | MKDIR_PARENT))
8979 panic("bad wk %p (0x%X)\n", wk, wk->wk_state);
8987 * Directory entry change dependencies.
8989 * Changing an existing directory entry requires that an add operation
8990 * be completed first followed by a deletion. The semantics for the addition
8991 * are identical to the description of adding a new entry above except
8992 * that the rollback is to the old inode number rather than zero. Once
8993 * the addition dependency is completed, the removal is done as described
8994 * in the removal routine above.
8998 * This routine should be called immediately after changing
8999 * a directory entry. The inode's link count should not be
9000 * decremented by the calling procedure -- the soft updates
9001 * code will perform this task when it is safe.
9004 softdep_setup_directory_change(bp, dp, ip, newinum, isrmdir)
9005 struct buf *bp; /* buffer containing directory block */
9006 struct inode *dp; /* inode for the directory being modified */
9007 struct inode *ip; /* inode for directory entry being removed */
9008 ino_t newinum; /* new inode number for changed entry */
9009 int isrmdir; /* indicates if doing RMDIR */
9012 struct diradd *dap = NULL;
9013 struct dirrem *dirrem, *prevdirrem;
9014 struct pagedep *pagedep;
9015 struct inodedep *inodedep;
9016 struct jaddref *jaddref;
9019 offset = blkoff(dp->i_fs, dp->i_offset);
9020 mp = UFSTOVFS(dp->i_ump);
9023 * Whiteouts do not need diradd dependencies.
9025 if (newinum != WINO) {
9026 dap = malloc(sizeof(struct diradd),
9027 M_DIRADD, M_SOFTDEP_FLAGS|M_ZERO);
9028 workitem_alloc(&dap->da_list, D_DIRADD, mp);
9029 dap->da_state = DIRCHG | ATTACHED | DEPCOMPLETE;
9030 dap->da_offset = offset;
9031 dap->da_newinum = newinum;
9032 LIST_INIT(&dap->da_jwork);
9036 * Allocate a new dirrem and ACQUIRE_LOCK.
9038 dirrem = newdirrem(bp, dp, ip, isrmdir, &prevdirrem);
9039 pagedep = dirrem->dm_pagedep;
9041 * The possible values for isrmdir:
9042 * 0 - non-directory file rename
9043 * 1 - directory rename within same directory
9044 * inum - directory rename to new directory of given inode number
9045 * When renaming to a new directory, we are both deleting and
9046 * creating a new directory entry, so the link count on the new
9047 * directory should not change. Thus we do not need the followup
9048 * dirrem which is usually done in handle_workitem_remove. We set
9049 * the DIRCHG flag to tell handle_workitem_remove to skip the
9053 dirrem->dm_state |= DIRCHG;
9056 * Whiteouts have no additional dependencies,
9057 * so just put the dirrem on the correct list.
9059 if (newinum == WINO) {
9060 if ((dirrem->dm_state & COMPLETE) == 0) {
9061 LIST_INSERT_HEAD(&pagedep->pd_dirremhd, dirrem,
9064 dirrem->dm_dirinum = pagedep->pd_ino;
9065 if (LIST_EMPTY(&dirrem->dm_jremrefhd))
9066 add_to_worklist(&dirrem->dm_list, 0);
9072 * Add the dirrem to the inodedep's pending remove list for quick
9073 * discovery later. A valid nlinkdelta ensures that this lookup
9076 if (inodedep_lookup(mp, ip->i_number, 0, &inodedep) == 0)
9077 panic("softdep_setup_directory_change: Lost inodedep.");
9078 dirrem->dm_state |= ONDEPLIST;
9079 LIST_INSERT_HEAD(&inodedep->id_dirremhd, dirrem, dm_inonext);
9082 * If the COMPLETE flag is clear, then there were no active
9083 * entries and we want to roll back to the previous inode until
9084 * the new inode is committed to disk. If the COMPLETE flag is
9085 * set, then we have deleted an entry that never made it to disk.
9086 * If the entry we deleted resulted from a name change, then the old
9087 * inode reference still resides on disk. Any rollback that we do
9088 * needs to be to that old inode (returned to us in prevdirrem). If
9089 * the entry we deleted resulted from a create, then there is
9090 * no entry on the disk, so we want to roll back to zero rather
9091 * than the uncommitted inode. In either of the COMPLETE cases we
9092 * want to immediately free the unwritten and unreferenced inode.
9094 if ((dirrem->dm_state & COMPLETE) == 0) {
9095 dap->da_previous = dirrem;
9097 if (prevdirrem != NULL) {
9098 dap->da_previous = prevdirrem;
9100 dap->da_state &= ~DIRCHG;
9101 dap->da_pagedep = pagedep;
9103 dirrem->dm_dirinum = pagedep->pd_ino;
9104 if (LIST_EMPTY(&dirrem->dm_jremrefhd))
9105 add_to_worklist(&dirrem->dm_list, 0);
9108 * Lookup the jaddref for this journal entry. We must finish
9109 * initializing it and make the diradd write dependent on it.
9110 * If we're not journaling, put it on the id_bufwait list if the
9111 * inode is not yet written. If it is written, do the post-inode
9112 * write processing to put it on the id_pendinghd list.
9114 inodedep_lookup(mp, newinum, DEPALLOC | NODELAY, &inodedep);
9115 if (MOUNTEDSUJ(mp)) {
9116 jaddref = (struct jaddref *)TAILQ_LAST(&inodedep->id_inoreflst,
9118 KASSERT(jaddref != NULL && jaddref->ja_parent == dp->i_number,
9119 ("softdep_setup_directory_change: bad jaddref %p",
9121 jaddref->ja_diroff = dp->i_offset;
9122 jaddref->ja_diradd = dap;
9123 LIST_INSERT_HEAD(&pagedep->pd_diraddhd[DIRADDHASH(offset)],
9125 add_to_journal(&jaddref->ja_list);
9126 } else if ((inodedep->id_state & ALLCOMPLETE) == ALLCOMPLETE) {
9127 dap->da_state |= COMPLETE;
9128 LIST_INSERT_HEAD(&pagedep->pd_pendinghd, dap, da_pdlist);
9129 WORKLIST_INSERT(&inodedep->id_pendinghd, &dap->da_list);
9131 LIST_INSERT_HEAD(&pagedep->pd_diraddhd[DIRADDHASH(offset)],
9133 WORKLIST_INSERT(&inodedep->id_bufwait, &dap->da_list);
9136 * If we're making a new name for a directory that has not been
9137 * committed when need to move the dot and dotdot references to
9140 if (inodedep->id_mkdiradd && dp->i_offset != DOTDOT_OFFSET)
9141 merge_diradd(inodedep, dap);
9146 * Called whenever the link count on an inode is changed.
9147 * It creates an inode dependency so that the new reference(s)
9148 * to the inode cannot be committed to disk until the updated
9149 * inode has been written.
9152 softdep_change_linkcnt(ip)
9153 struct inode *ip; /* the inode with the increased link count */
9155 struct inodedep *inodedep;
9160 if (IS_SNAPSHOT(ip))
9162 inodedep_lookup(UFSTOVFS(ip->i_ump), ip->i_number, dflags, &inodedep);
9163 if (ip->i_nlink < ip->i_effnlink)
9164 panic("softdep_change_linkcnt: bad delta");
9165 inodedep->id_nlinkdelta = ip->i_nlink - ip->i_effnlink;
9170 * Attach a sbdep dependency to the superblock buf so that we can keep
9171 * track of the head of the linked list of referenced but unlinked inodes.
9174 softdep_setup_sbupdate(ump, fs, bp)
9175 struct ufsmount *ump;
9179 struct sbdep *sbdep;
9180 struct worklist *wk;
9182 if (MOUNTEDSUJ(UFSTOVFS(ump)) == 0)
9184 LIST_FOREACH(wk, &bp->b_dep, wk_list)
9185 if (wk->wk_type == D_SBDEP)
9189 sbdep = malloc(sizeof(struct sbdep), M_SBDEP, M_SOFTDEP_FLAGS);
9190 workitem_alloc(&sbdep->sb_list, D_SBDEP, UFSTOVFS(ump));
9192 sbdep->sb_ump = ump;
9194 WORKLIST_INSERT(&bp->b_dep, &sbdep->sb_list);
9199 * Return the first unlinked inodedep which is ready to be the head of the
9200 * list. The inodedep and all those after it must have valid next pointers.
9202 static struct inodedep *
9203 first_unlinked_inodedep(ump)
9204 struct ufsmount *ump;
9206 struct inodedep *inodedep;
9207 struct inodedep *idp;
9209 mtx_assert(&lk, MA_OWNED);
9210 for (inodedep = TAILQ_LAST(&ump->softdep_unlinked, inodedeplst);
9211 inodedep; inodedep = idp) {
9212 if ((inodedep->id_state & UNLINKNEXT) == 0)
9214 idp = TAILQ_PREV(inodedep, inodedeplst, id_unlinked);
9215 if (idp == NULL || (idp->id_state & UNLINKNEXT) == 0)
9217 if ((inodedep->id_state & UNLINKPREV) == 0)
9224 * Set the sujfree unlinked head pointer prior to writing a superblock.
9227 initiate_write_sbdep(sbdep)
9228 struct sbdep *sbdep;
9230 struct inodedep *inodedep;
9234 bpfs = sbdep->sb_fs;
9235 fs = sbdep->sb_ump->um_fs;
9236 inodedep = first_unlinked_inodedep(sbdep->sb_ump);
9238 fs->fs_sujfree = inodedep->id_ino;
9239 inodedep->id_state |= UNLINKPREV;
9242 bpfs->fs_sujfree = fs->fs_sujfree;
9246 * After a superblock is written determine whether it must be written again
9247 * due to a changing unlinked list head.
9250 handle_written_sbdep(sbdep, bp)
9251 struct sbdep *sbdep;
9254 struct inodedep *inodedep;
9258 mtx_assert(&lk, MA_OWNED);
9260 mp = UFSTOVFS(sbdep->sb_ump);
9262 * If the superblock doesn't match the in-memory list start over.
9264 inodedep = first_unlinked_inodedep(sbdep->sb_ump);
9265 if ((inodedep && fs->fs_sujfree != inodedep->id_ino) ||
9266 (inodedep == NULL && fs->fs_sujfree != 0)) {
9270 WORKITEM_FREE(sbdep, D_SBDEP);
9271 if (fs->fs_sujfree == 0)
9274 * Now that we have a record of this inode in stable store allow it
9275 * to be written to free up pending work. Inodes may see a lot of
9276 * write activity after they are unlinked which we must not hold up.
9278 for (; inodedep != NULL; inodedep = TAILQ_NEXT(inodedep, id_unlinked)) {
9279 if ((inodedep->id_state & UNLINKLINKS) != UNLINKLINKS)
9280 panic("handle_written_sbdep: Bad inodedep %p (0x%X)",
9281 inodedep, inodedep->id_state);
9282 if (inodedep->id_state & UNLINKONLIST)
9284 inodedep->id_state |= DEPCOMPLETE | UNLINKONLIST;
9291 * Mark an inodedep as unlinked and insert it into the in-memory unlinked list.
9294 unlinked_inodedep(mp, inodedep)
9296 struct inodedep *inodedep;
9298 struct ufsmount *ump;
9300 mtx_assert(&lk, MA_OWNED);
9301 if (MOUNTEDSUJ(mp) == 0)
9304 ump->um_fs->fs_fmod = 1;
9305 if (inodedep->id_state & UNLINKED)
9306 panic("unlinked_inodedep: %p already unlinked\n", inodedep);
9307 inodedep->id_state |= UNLINKED;
9308 TAILQ_INSERT_HEAD(&ump->softdep_unlinked, inodedep, id_unlinked);
9312 * Remove an inodedep from the unlinked inodedep list. This may require
9313 * disk writes if the inode has made it that far.
9316 clear_unlinked_inodedep(inodedep)
9317 struct inodedep *inodedep;
9319 struct ufsmount *ump;
9320 struct inodedep *idp;
9321 struct inodedep *idn;
9329 ump = VFSTOUFS(inodedep->id_list.wk_mp);
9331 ino = inodedep->id_ino;
9334 mtx_assert(&lk, MA_OWNED);
9335 KASSERT((inodedep->id_state & UNLINKED) != 0,
9336 ("clear_unlinked_inodedep: inodedep %p not unlinked",
9339 * If nothing has yet been written simply remove us from
9340 * the in memory list and return. This is the most common
9341 * case where handle_workitem_remove() loses the final
9344 if ((inodedep->id_state & UNLINKLINKS) == 0)
9347 * If we have a NEXT pointer and no PREV pointer we can simply
9348 * clear NEXT's PREV and remove ourselves from the list. Be
9349 * careful not to clear PREV if the superblock points at
9352 idn = TAILQ_NEXT(inodedep, id_unlinked);
9353 if ((inodedep->id_state & UNLINKLINKS) == UNLINKNEXT) {
9354 if (idn && fs->fs_sujfree != idn->id_ino)
9355 idn->id_state &= ~UNLINKPREV;
9359 * Here we have an inodedep which is actually linked into
9360 * the list. We must remove it by forcing a write to the
9361 * link before us, whether it be the superblock or an inode.
9362 * Unfortunately the list may change while we're waiting
9363 * on the buf lock for either resource so we must loop until
9364 * we lock the right one. If both the superblock and an
9365 * inode point to this inode we must clear the inode first
9366 * followed by the superblock.
9368 idp = TAILQ_PREV(inodedep, inodedeplst, id_unlinked);
9370 if (idp && (idp->id_state & UNLINKNEXT))
9374 bp = getblk(ump->um_devvp, btodb(fs->fs_sblockloc),
9375 (int)fs->fs_sbsize, 0, 0, 0);
9377 error = bread(ump->um_devvp,
9378 fsbtodb(fs, ino_to_fsba(fs, pino)),
9379 (int)fs->fs_bsize, NOCRED, &bp);
9386 /* If the list has changed restart the loop. */
9387 idp = TAILQ_PREV(inodedep, inodedeplst, id_unlinked);
9389 if (idp && (idp->id_state & UNLINKNEXT))
9392 (inodedep->id_state & UNLINKPREV) != UNLINKPREV) {
9399 idn = TAILQ_NEXT(inodedep, id_unlinked);
9403 * Remove us from the in memory list. After this we cannot
9404 * access the inodedep.
9406 KASSERT((inodedep->id_state & UNLINKED) != 0,
9407 ("clear_unlinked_inodedep: inodedep %p not unlinked",
9409 inodedep->id_state &= ~(UNLINKED | UNLINKLINKS | UNLINKONLIST);
9410 TAILQ_REMOVE(&ump->softdep_unlinked, inodedep, id_unlinked);
9413 * The predecessor's next pointer is manually updated here
9414 * so that the NEXT flag is never cleared for an element
9415 * that is in the list.
9418 bcopy((caddr_t)fs, bp->b_data, (u_int)fs->fs_sbsize);
9419 ffs_oldfscompat_write((struct fs *)bp->b_data, ump);
9420 softdep_setup_sbupdate(ump, (struct fs *)bp->b_data,
9422 } else if (fs->fs_magic == FS_UFS1_MAGIC)
9423 ((struct ufs1_dinode *)bp->b_data +
9424 ino_to_fsbo(fs, pino))->di_freelink = nino;
9426 ((struct ufs2_dinode *)bp->b_data +
9427 ino_to_fsbo(fs, pino))->di_freelink = nino;
9429 * If the bwrite fails we have no recourse to recover. The
9430 * filesystem is corrupted already.
9435 * If the superblock pointer still needs to be cleared force
9438 if (fs->fs_sujfree == ino) {
9440 bp = getblk(ump->um_devvp, btodb(fs->fs_sblockloc),
9441 (int)fs->fs_sbsize, 0, 0, 0);
9442 bcopy((caddr_t)fs, bp->b_data, (u_int)fs->fs_sbsize);
9443 ffs_oldfscompat_write((struct fs *)bp->b_data, ump);
9444 softdep_setup_sbupdate(ump, (struct fs *)bp->b_data,
9450 if (fs->fs_sujfree != ino)
9452 panic("clear_unlinked_inodedep: Failed to clear free head");
9454 if (inodedep->id_ino == fs->fs_sujfree)
9455 panic("clear_unlinked_inodedep: Freeing head of free list");
9456 inodedep->id_state &= ~(UNLINKED | UNLINKLINKS | UNLINKONLIST);
9457 TAILQ_REMOVE(&ump->softdep_unlinked, inodedep, id_unlinked);
9462 * This workitem decrements the inode's link count.
9463 * If the link count reaches zero, the file is removed.
9466 handle_workitem_remove(dirrem, flags)
9467 struct dirrem *dirrem;
9470 struct inodedep *inodedep;
9471 struct workhead dotdotwk;
9472 struct worklist *wk;
9473 struct ufsmount *ump;
9479 if (dirrem->dm_state & ONWORKLIST)
9480 panic("handle_workitem_remove: dirrem %p still on worklist",
9482 oldinum = dirrem->dm_oldinum;
9483 mp = dirrem->dm_list.wk_mp;
9485 flags |= LK_EXCLUSIVE;
9486 if (ffs_vgetf(mp, oldinum, flags, &vp, FFSV_FORCEINSMQ) != 0)
9490 if ((inodedep_lookup(mp, oldinum, 0, &inodedep)) == 0)
9491 panic("handle_workitem_remove: lost inodedep");
9492 if (dirrem->dm_state & ONDEPLIST)
9493 LIST_REMOVE(dirrem, dm_inonext);
9494 KASSERT(LIST_EMPTY(&dirrem->dm_jremrefhd),
9495 ("handle_workitem_remove: Journal entries not written."));
9498 * Move all dependencies waiting on the remove to complete
9499 * from the dirrem to the inode inowait list to be completed
9500 * after the inode has been updated and written to disk. Any
9501 * marked MKDIR_PARENT are saved to be completed when the .. ref
9504 LIST_INIT(&dotdotwk);
9505 while ((wk = LIST_FIRST(&dirrem->dm_jwork)) != NULL) {
9506 WORKLIST_REMOVE(wk);
9507 if (wk->wk_state & MKDIR_PARENT) {
9508 wk->wk_state &= ~MKDIR_PARENT;
9509 WORKLIST_INSERT(&dotdotwk, wk);
9512 WORKLIST_INSERT(&inodedep->id_inowait, wk);
9514 LIST_SWAP(&dirrem->dm_jwork, &dotdotwk, worklist, wk_list);
9516 * Normal file deletion.
9518 if ((dirrem->dm_state & RMDIR) == 0) {
9520 DIP_SET(ip, i_nlink, ip->i_nlink);
9521 ip->i_flag |= IN_CHANGE;
9522 if (ip->i_nlink < ip->i_effnlink)
9523 panic("handle_workitem_remove: bad file delta");
9524 if (ip->i_nlink == 0)
9525 unlinked_inodedep(mp, inodedep);
9526 inodedep->id_nlinkdelta = ip->i_nlink - ip->i_effnlink;
9527 KASSERT(LIST_EMPTY(&dirrem->dm_jwork),
9528 ("handle_workitem_remove: worklist not empty. %s",
9529 TYPENAME(LIST_FIRST(&dirrem->dm_jwork)->wk_type)));
9530 WORKITEM_FREE(dirrem, D_DIRREM);
9535 * Directory deletion. Decrement reference count for both the
9536 * just deleted parent directory entry and the reference for ".".
9537 * Arrange to have the reference count on the parent decremented
9538 * to account for the loss of "..".
9541 DIP_SET(ip, i_nlink, ip->i_nlink);
9542 ip->i_flag |= IN_CHANGE;
9543 if (ip->i_nlink < ip->i_effnlink)
9544 panic("handle_workitem_remove: bad dir delta");
9545 if (ip->i_nlink == 0)
9546 unlinked_inodedep(mp, inodedep);
9547 inodedep->id_nlinkdelta = ip->i_nlink - ip->i_effnlink;
9549 * Rename a directory to a new parent. Since, we are both deleting
9550 * and creating a new directory entry, the link count on the new
9551 * directory should not change. Thus we skip the followup dirrem.
9553 if (dirrem->dm_state & DIRCHG) {
9554 KASSERT(LIST_EMPTY(&dirrem->dm_jwork),
9555 ("handle_workitem_remove: DIRCHG and worklist not empty."));
9556 WORKITEM_FREE(dirrem, D_DIRREM);
9560 dirrem->dm_state = ONDEPLIST;
9561 dirrem->dm_oldinum = dirrem->dm_dirinum;
9563 * Place the dirrem on the parent's diremhd list.
9565 if (inodedep_lookup(mp, dirrem->dm_oldinum, 0, &inodedep) == 0)
9566 panic("handle_workitem_remove: lost dir inodedep");
9567 LIST_INSERT_HEAD(&inodedep->id_dirremhd, dirrem, dm_inonext);
9569 * If the allocated inode has never been written to disk, then
9570 * the on-disk inode is zero'ed and we can remove the file
9571 * immediately. When journaling if the inode has been marked
9572 * unlinked and not DEPCOMPLETE we know it can never be written.
9574 inodedep_lookup(mp, oldinum, 0, &inodedep);
9575 if (inodedep == NULL ||
9576 (inodedep->id_state & (DEPCOMPLETE | UNLINKED)) == UNLINKED ||
9577 check_inode_unwritten(inodedep)) {
9580 return handle_workitem_remove(dirrem, flags);
9582 WORKLIST_INSERT(&inodedep->id_inowait, &dirrem->dm_list);
9584 ip->i_flag |= IN_CHANGE;
9592 * Inode de-allocation dependencies.
9594 * When an inode's link count is reduced to zero, it can be de-allocated. We
9595 * found it convenient to postpone de-allocation until after the inode is
9596 * written to disk with its new link count (zero). At this point, all of the
9597 * on-disk inode's block pointers are nullified and, with careful dependency
9598 * list ordering, all dependencies related to the inode will be satisfied and
9599 * the corresponding dependency structures de-allocated. So, if/when the
9600 * inode is reused, there will be no mixing of old dependencies with new
9601 * ones. This artificial dependency is set up by the block de-allocation
9602 * procedure above (softdep_setup_freeblocks) and completed by the
9603 * following procedure.
9606 handle_workitem_freefile(freefile)
9607 struct freefile *freefile;
9609 struct workhead wkhd;
9611 struct inodedep *idp;
9612 struct ufsmount *ump;
9615 ump = VFSTOUFS(freefile->fx_list.wk_mp);
9619 error = inodedep_lookup(UFSTOVFS(ump), freefile->fx_oldinum, 0, &idp);
9622 panic("handle_workitem_freefile: inodedep %p survived", idp);
9625 fs->fs_pendinginodes -= 1;
9628 LIST_SWAP(&freefile->fx_jwork, &wkhd, worklist, wk_list);
9629 if ((error = ffs_freefile(ump, fs, freefile->fx_devvp,
9630 freefile->fx_oldinum, freefile->fx_mode, &wkhd)) != 0)
9631 softdep_error("handle_workitem_freefile", error);
9633 WORKITEM_FREE(freefile, D_FREEFILE);
9639 * Helper function which unlinks marker element from work list and returns
9640 * the next element on the list.
9642 static __inline struct worklist *
9643 markernext(struct worklist *marker)
9645 struct worklist *next;
9647 next = LIST_NEXT(marker, wk_list);
9648 LIST_REMOVE(marker, wk_list);
9655 * The dependency structures constructed above are most actively used when file
9656 * system blocks are written to disk. No constraints are placed on when a
9657 * block can be written, but unsatisfied update dependencies are made safe by
9658 * modifying (or replacing) the source memory for the duration of the disk
9659 * write. When the disk write completes, the memory block is again brought
9662 * In-core inode structure reclamation.
9664 * Because there are a finite number of "in-core" inode structures, they are
9665 * reused regularly. By transferring all inode-related dependencies to the
9666 * in-memory inode block and indexing them separately (via "inodedep"s), we
9667 * can allow "in-core" inode structures to be reused at any time and avoid
9668 * any increase in contention.
9670 * Called just before entering the device driver to initiate a new disk I/O.
9671 * The buffer must be locked, thus, no I/O completion operations can occur
9672 * while we are manipulating its associated dependencies.
9675 softdep_disk_io_initiation(bp)
9676 struct buf *bp; /* structure describing disk write to occur */
9678 struct worklist *wk;
9679 struct worklist marker;
9680 struct inodedep *inodedep;
9681 struct freeblks *freeblks;
9682 struct jblkdep *jblkdep;
9683 struct newblk *newblk;
9686 * We only care about write operations. There should never
9687 * be dependencies for reads.
9689 if (bp->b_iocmd != BIO_WRITE)
9690 panic("softdep_disk_io_initiation: not write");
9692 if (bp->b_vflags & BV_BKGRDINPROG)
9693 panic("softdep_disk_io_initiation: Writing buffer with "
9694 "background write in progress: %p", bp);
9696 marker.wk_type = D_LAST + 1; /* Not a normal workitem */
9697 PHOLD(curproc); /* Don't swap out kernel stack */
9701 * Do any necessary pre-I/O processing.
9703 for (wk = LIST_FIRST(&bp->b_dep); wk != NULL;
9704 wk = markernext(&marker)) {
9705 LIST_INSERT_AFTER(wk, &marker, wk_list);
9706 switch (wk->wk_type) {
9709 initiate_write_filepage(WK_PAGEDEP(wk), bp);
9713 inodedep = WK_INODEDEP(wk);
9714 if (inodedep->id_fs->fs_magic == FS_UFS1_MAGIC)
9715 initiate_write_inodeblock_ufs1(inodedep, bp);
9717 initiate_write_inodeblock_ufs2(inodedep, bp);
9721 initiate_write_indirdep(WK_INDIRDEP(wk), bp);
9725 initiate_write_bmsafemap(WK_BMSAFEMAP(wk), bp);
9729 WK_JSEG(wk)->js_buf = NULL;
9733 freeblks = WK_FREEBLKS(wk);
9734 jblkdep = LIST_FIRST(&freeblks->fb_jblkdephd);
9736 * We have to wait for the freeblks to be journaled
9737 * before we can write an inodeblock with updated
9738 * pointers. Be careful to arrange the marker so
9739 * we revisit the freeblks if it's not removed by
9740 * the first jwait().
9742 if (jblkdep != NULL) {
9743 LIST_REMOVE(&marker, wk_list);
9744 LIST_INSERT_BEFORE(wk, &marker, wk_list);
9745 jwait(&jblkdep->jb_list, MNT_WAIT);
9751 * We have to wait for the jnewblk to be journaled
9752 * before we can write to a block if the contents
9753 * may be confused with an earlier file's indirect
9754 * at recovery time. Handle the marker as described
9757 newblk = WK_NEWBLK(wk);
9758 if (newblk->nb_jnewblk != NULL &&
9759 indirblk_lookup(newblk->nb_list.wk_mp,
9760 newblk->nb_newblkno)) {
9761 LIST_REMOVE(&marker, wk_list);
9762 LIST_INSERT_BEFORE(wk, &marker, wk_list);
9763 jwait(&newblk->nb_jnewblk->jn_list, MNT_WAIT);
9768 initiate_write_sbdep(WK_SBDEP(wk));
9778 panic("handle_disk_io_initiation: Unexpected type %s",
9779 TYPENAME(wk->wk_type));
9784 PRELE(curproc); /* Allow swapout of kernel stack */
9788 * Called from within the procedure above to deal with unsatisfied
9789 * allocation dependencies in a directory. The buffer must be locked,
9790 * thus, no I/O completion operations can occur while we are
9791 * manipulating its associated dependencies.
9794 initiate_write_filepage(pagedep, bp)
9795 struct pagedep *pagedep;
9798 struct jremref *jremref;
9799 struct jmvref *jmvref;
9800 struct dirrem *dirrem;
9805 if (pagedep->pd_state & IOSTARTED) {
9807 * This can only happen if there is a driver that does not
9808 * understand chaining. Here biodone will reissue the call
9809 * to strategy for the incomplete buffers.
9811 printf("initiate_write_filepage: already started\n");
9814 pagedep->pd_state |= IOSTARTED;
9816 * Wait for all journal remove dependencies to hit the disk.
9817 * We can not allow any potentially conflicting directory adds
9818 * to be visible before removes and rollback is too difficult.
9819 * lk may be dropped and re-acquired, however we hold the buf
9820 * locked so the dependency can not go away.
9822 LIST_FOREACH(dirrem, &pagedep->pd_dirremhd, dm_next)
9823 while ((jremref = LIST_FIRST(&dirrem->dm_jremrefhd)) != NULL)
9824 jwait(&jremref->jr_list, MNT_WAIT);
9825 while ((jmvref = LIST_FIRST(&pagedep->pd_jmvrefhd)) != NULL)
9826 jwait(&jmvref->jm_list, MNT_WAIT);
9827 for (i = 0; i < DAHASHSZ; i++) {
9828 LIST_FOREACH(dap, &pagedep->pd_diraddhd[i], da_pdlist) {
9829 ep = (struct direct *)
9830 ((char *)bp->b_data + dap->da_offset);
9831 if (ep->d_ino != dap->da_newinum)
9832 panic("%s: dir inum %d != new %d",
9833 "initiate_write_filepage",
9834 ep->d_ino, dap->da_newinum);
9835 if (dap->da_state & DIRCHG)
9836 ep->d_ino = dap->da_previous->dm_oldinum;
9839 dap->da_state &= ~ATTACHED;
9840 dap->da_state |= UNDONE;
9846 * Version of initiate_write_inodeblock that handles UFS1 dinodes.
9847 * Note that any bug fixes made to this routine must be done in the
9848 * version found below.
9850 * Called from within the procedure above to deal with unsatisfied
9851 * allocation dependencies in an inodeblock. The buffer must be
9852 * locked, thus, no I/O completion operations can occur while we
9853 * are manipulating its associated dependencies.
9856 initiate_write_inodeblock_ufs1(inodedep, bp)
9857 struct inodedep *inodedep;
9858 struct buf *bp; /* The inode block */
9860 struct allocdirect *adp, *lastadp;
9861 struct ufs1_dinode *dp;
9862 struct ufs1_dinode *sip;
9863 struct inoref *inoref;
9867 ufs_lbn_t prevlbn = 0;
9871 if (inodedep->id_state & IOSTARTED)
9872 panic("initiate_write_inodeblock_ufs1: already started");
9873 inodedep->id_state |= IOSTARTED;
9874 fs = inodedep->id_fs;
9875 dp = (struct ufs1_dinode *)bp->b_data +
9876 ino_to_fsbo(fs, inodedep->id_ino);
9879 * If we're on the unlinked list but have not yet written our
9880 * next pointer initialize it here.
9882 if ((inodedep->id_state & (UNLINKED | UNLINKNEXT)) == UNLINKED) {
9883 struct inodedep *inon;
9885 inon = TAILQ_NEXT(inodedep, id_unlinked);
9886 dp->di_freelink = inon ? inon->id_ino : 0;
9889 * If the bitmap is not yet written, then the allocated
9890 * inode cannot be written to disk.
9892 if ((inodedep->id_state & DEPCOMPLETE) == 0) {
9893 if (inodedep->id_savedino1 != NULL)
9894 panic("initiate_write_inodeblock_ufs1: I/O underway");
9896 sip = malloc(sizeof(struct ufs1_dinode),
9897 M_SAVEDINO, M_SOFTDEP_FLAGS);
9899 inodedep->id_savedino1 = sip;
9900 *inodedep->id_savedino1 = *dp;
9901 bzero((caddr_t)dp, sizeof(struct ufs1_dinode));
9902 dp->di_gen = inodedep->id_savedino1->di_gen;
9903 dp->di_freelink = inodedep->id_savedino1->di_freelink;
9907 * If no dependencies, then there is nothing to roll back.
9909 inodedep->id_savedsize = dp->di_size;
9910 inodedep->id_savedextsize = 0;
9911 inodedep->id_savednlink = dp->di_nlink;
9912 if (TAILQ_EMPTY(&inodedep->id_inoupdt) &&
9913 TAILQ_EMPTY(&inodedep->id_inoreflst))
9916 * Revert the link count to that of the first unwritten journal entry.
9918 inoref = TAILQ_FIRST(&inodedep->id_inoreflst);
9920 dp->di_nlink = inoref->if_nlink;
9922 * Set the dependencies to busy.
9924 for (deplist = 0, adp = TAILQ_FIRST(&inodedep->id_inoupdt); adp;
9925 adp = TAILQ_NEXT(adp, ad_next)) {
9927 if (deplist != 0 && prevlbn >= adp->ad_offset)
9928 panic("softdep_write_inodeblock: lbn order");
9929 prevlbn = adp->ad_offset;
9930 if (adp->ad_offset < NDADDR &&
9931 dp->di_db[adp->ad_offset] != adp->ad_newblkno)
9932 panic("%s: direct pointer #%jd mismatch %d != %jd",
9933 "softdep_write_inodeblock",
9934 (intmax_t)adp->ad_offset,
9935 dp->di_db[adp->ad_offset],
9936 (intmax_t)adp->ad_newblkno);
9937 if (adp->ad_offset >= NDADDR &&
9938 dp->di_ib[adp->ad_offset - NDADDR] != adp->ad_newblkno)
9939 panic("%s: indirect pointer #%jd mismatch %d != %jd",
9940 "softdep_write_inodeblock",
9941 (intmax_t)adp->ad_offset - NDADDR,
9942 dp->di_ib[adp->ad_offset - NDADDR],
9943 (intmax_t)adp->ad_newblkno);
9944 deplist |= 1 << adp->ad_offset;
9945 if ((adp->ad_state & ATTACHED) == 0)
9946 panic("softdep_write_inodeblock: Unknown state 0x%x",
9948 #endif /* INVARIANTS */
9949 adp->ad_state &= ~ATTACHED;
9950 adp->ad_state |= UNDONE;
9953 * The on-disk inode cannot claim to be any larger than the last
9954 * fragment that has been written. Otherwise, the on-disk inode
9955 * might have fragments that were not the last block in the file
9956 * which would corrupt the filesystem.
9958 for (lastadp = NULL, adp = TAILQ_FIRST(&inodedep->id_inoupdt); adp;
9959 lastadp = adp, adp = TAILQ_NEXT(adp, ad_next)) {
9960 if (adp->ad_offset >= NDADDR)
9962 dp->di_db[adp->ad_offset] = adp->ad_oldblkno;
9963 /* keep going until hitting a rollback to a frag */
9964 if (adp->ad_oldsize == 0 || adp->ad_oldsize == fs->fs_bsize)
9966 dp->di_size = fs->fs_bsize * adp->ad_offset + adp->ad_oldsize;
9967 for (i = adp->ad_offset + 1; i < NDADDR; i++) {
9969 if (dp->di_db[i] != 0 && (deplist & (1 << i)) == 0)
9970 panic("softdep_write_inodeblock: lost dep1");
9971 #endif /* INVARIANTS */
9974 for (i = 0; i < NIADDR; i++) {
9976 if (dp->di_ib[i] != 0 &&
9977 (deplist & ((1 << NDADDR) << i)) == 0)
9978 panic("softdep_write_inodeblock: lost dep2");
9979 #endif /* INVARIANTS */
9985 * If we have zero'ed out the last allocated block of the file,
9986 * roll back the size to the last currently allocated block.
9987 * We know that this last allocated block is a full-sized as
9988 * we already checked for fragments in the loop above.
9990 if (lastadp != NULL &&
9991 dp->di_size <= (lastadp->ad_offset + 1) * fs->fs_bsize) {
9992 for (i = lastadp->ad_offset; i >= 0; i--)
9993 if (dp->di_db[i] != 0)
9995 dp->di_size = (i + 1) * fs->fs_bsize;
9998 * The only dependencies are for indirect blocks.
10000 * The file size for indirect block additions is not guaranteed.
10001 * Such a guarantee would be non-trivial to achieve. The conventional
10002 * synchronous write implementation also does not make this guarantee.
10003 * Fsck should catch and fix discrepancies. Arguably, the file size
10004 * can be over-estimated without destroying integrity when the file
10005 * moves into the indirect blocks (i.e., is large). If we want to
10006 * postpone fsck, we are stuck with this argument.
10008 for (; adp; adp = TAILQ_NEXT(adp, ad_next))
10009 dp->di_ib[adp->ad_offset - NDADDR] = 0;
10013 * Version of initiate_write_inodeblock that handles UFS2 dinodes.
10014 * Note that any bug fixes made to this routine must be done in the
10015 * version found above.
10017 * Called from within the procedure above to deal with unsatisfied
10018 * allocation dependencies in an inodeblock. The buffer must be
10019 * locked, thus, no I/O completion operations can occur while we
10020 * are manipulating its associated dependencies.
10023 initiate_write_inodeblock_ufs2(inodedep, bp)
10024 struct inodedep *inodedep;
10025 struct buf *bp; /* The inode block */
10027 struct allocdirect *adp, *lastadp;
10028 struct ufs2_dinode *dp;
10029 struct ufs2_dinode *sip;
10030 struct inoref *inoref;
10034 ufs_lbn_t prevlbn = 0;
10038 if (inodedep->id_state & IOSTARTED)
10039 panic("initiate_write_inodeblock_ufs2: already started");
10040 inodedep->id_state |= IOSTARTED;
10041 fs = inodedep->id_fs;
10042 dp = (struct ufs2_dinode *)bp->b_data +
10043 ino_to_fsbo(fs, inodedep->id_ino);
10046 * If we're on the unlinked list but have not yet written our
10047 * next pointer initialize it here.
10049 if ((inodedep->id_state & (UNLINKED | UNLINKNEXT)) == UNLINKED) {
10050 struct inodedep *inon;
10052 inon = TAILQ_NEXT(inodedep, id_unlinked);
10053 dp->di_freelink = inon ? inon->id_ino : 0;
10056 * If the bitmap is not yet written, then the allocated
10057 * inode cannot be written to disk.
10059 if ((inodedep->id_state & DEPCOMPLETE) == 0) {
10060 if (inodedep->id_savedino2 != NULL)
10061 panic("initiate_write_inodeblock_ufs2: I/O underway");
10063 sip = malloc(sizeof(struct ufs2_dinode),
10064 M_SAVEDINO, M_SOFTDEP_FLAGS);
10066 inodedep->id_savedino2 = sip;
10067 *inodedep->id_savedino2 = *dp;
10068 bzero((caddr_t)dp, sizeof(struct ufs2_dinode));
10069 dp->di_gen = inodedep->id_savedino2->di_gen;
10070 dp->di_freelink = inodedep->id_savedino2->di_freelink;
10074 * If no dependencies, then there is nothing to roll back.
10076 inodedep->id_savedsize = dp->di_size;
10077 inodedep->id_savedextsize = dp->di_extsize;
10078 inodedep->id_savednlink = dp->di_nlink;
10079 if (TAILQ_EMPTY(&inodedep->id_inoupdt) &&
10080 TAILQ_EMPTY(&inodedep->id_extupdt) &&
10081 TAILQ_EMPTY(&inodedep->id_inoreflst))
10084 * Revert the link count to that of the first unwritten journal entry.
10086 inoref = TAILQ_FIRST(&inodedep->id_inoreflst);
10088 dp->di_nlink = inoref->if_nlink;
10091 * Set the ext data dependencies to busy.
10093 for (deplist = 0, adp = TAILQ_FIRST(&inodedep->id_extupdt); adp;
10094 adp = TAILQ_NEXT(adp, ad_next)) {
10096 if (deplist != 0 && prevlbn >= adp->ad_offset)
10097 panic("softdep_write_inodeblock: lbn order");
10098 prevlbn = adp->ad_offset;
10099 if (dp->di_extb[adp->ad_offset] != adp->ad_newblkno)
10100 panic("%s: direct pointer #%jd mismatch %jd != %jd",
10101 "softdep_write_inodeblock",
10102 (intmax_t)adp->ad_offset,
10103 (intmax_t)dp->di_extb[adp->ad_offset],
10104 (intmax_t)adp->ad_newblkno);
10105 deplist |= 1 << adp->ad_offset;
10106 if ((adp->ad_state & ATTACHED) == 0)
10107 panic("softdep_write_inodeblock: Unknown state 0x%x",
10109 #endif /* INVARIANTS */
10110 adp->ad_state &= ~ATTACHED;
10111 adp->ad_state |= UNDONE;
10114 * The on-disk inode cannot claim to be any larger than the last
10115 * fragment that has been written. Otherwise, the on-disk inode
10116 * might have fragments that were not the last block in the ext
10117 * data which would corrupt the filesystem.
10119 for (lastadp = NULL, adp = TAILQ_FIRST(&inodedep->id_extupdt); adp;
10120 lastadp = adp, adp = TAILQ_NEXT(adp, ad_next)) {
10121 dp->di_extb[adp->ad_offset] = adp->ad_oldblkno;
10122 /* keep going until hitting a rollback to a frag */
10123 if (adp->ad_oldsize == 0 || adp->ad_oldsize == fs->fs_bsize)
10125 dp->di_extsize = fs->fs_bsize * adp->ad_offset + adp->ad_oldsize;
10126 for (i = adp->ad_offset + 1; i < NXADDR; i++) {
10128 if (dp->di_extb[i] != 0 && (deplist & (1 << i)) == 0)
10129 panic("softdep_write_inodeblock: lost dep1");
10130 #endif /* INVARIANTS */
10131 dp->di_extb[i] = 0;
10137 * If we have zero'ed out the last allocated block of the ext
10138 * data, roll back the size to the last currently allocated block.
10139 * We know that this last allocated block is a full-sized as
10140 * we already checked for fragments in the loop above.
10142 if (lastadp != NULL &&
10143 dp->di_extsize <= (lastadp->ad_offset + 1) * fs->fs_bsize) {
10144 for (i = lastadp->ad_offset; i >= 0; i--)
10145 if (dp->di_extb[i] != 0)
10147 dp->di_extsize = (i + 1) * fs->fs_bsize;
10150 * Set the file data dependencies to busy.
10152 for (deplist = 0, adp = TAILQ_FIRST(&inodedep->id_inoupdt); adp;
10153 adp = TAILQ_NEXT(adp, ad_next)) {
10155 if (deplist != 0 && prevlbn >= adp->ad_offset)
10156 panic("softdep_write_inodeblock: lbn order");
10157 if ((adp->ad_state & ATTACHED) == 0)
10158 panic("inodedep %p and adp %p not attached", inodedep, adp);
10159 prevlbn = adp->ad_offset;
10160 if (adp->ad_offset < NDADDR &&
10161 dp->di_db[adp->ad_offset] != adp->ad_newblkno)
10162 panic("%s: direct pointer #%jd mismatch %jd != %jd",
10163 "softdep_write_inodeblock",
10164 (intmax_t)adp->ad_offset,
10165 (intmax_t)dp->di_db[adp->ad_offset],
10166 (intmax_t)adp->ad_newblkno);
10167 if (adp->ad_offset >= NDADDR &&
10168 dp->di_ib[adp->ad_offset - NDADDR] != adp->ad_newblkno)
10169 panic("%s indirect pointer #%jd mismatch %jd != %jd",
10170 "softdep_write_inodeblock:",
10171 (intmax_t)adp->ad_offset - NDADDR,
10172 (intmax_t)dp->di_ib[adp->ad_offset - NDADDR],
10173 (intmax_t)adp->ad_newblkno);
10174 deplist |= 1 << adp->ad_offset;
10175 if ((adp->ad_state & ATTACHED) == 0)
10176 panic("softdep_write_inodeblock: Unknown state 0x%x",
10178 #endif /* INVARIANTS */
10179 adp->ad_state &= ~ATTACHED;
10180 adp->ad_state |= UNDONE;
10183 * The on-disk inode cannot claim to be any larger than the last
10184 * fragment that has been written. Otherwise, the on-disk inode
10185 * might have fragments that were not the last block in the file
10186 * which would corrupt the filesystem.
10188 for (lastadp = NULL, adp = TAILQ_FIRST(&inodedep->id_inoupdt); adp;
10189 lastadp = adp, adp = TAILQ_NEXT(adp, ad_next)) {
10190 if (adp->ad_offset >= NDADDR)
10192 dp->di_db[adp->ad_offset] = adp->ad_oldblkno;
10193 /* keep going until hitting a rollback to a frag */
10194 if (adp->ad_oldsize == 0 || adp->ad_oldsize == fs->fs_bsize)
10196 dp->di_size = fs->fs_bsize * adp->ad_offset + adp->ad_oldsize;
10197 for (i = adp->ad_offset + 1; i < NDADDR; i++) {
10199 if (dp->di_db[i] != 0 && (deplist & (1 << i)) == 0)
10200 panic("softdep_write_inodeblock: lost dep2");
10201 #endif /* INVARIANTS */
10204 for (i = 0; i < NIADDR; i++) {
10206 if (dp->di_ib[i] != 0 &&
10207 (deplist & ((1 << NDADDR) << i)) == 0)
10208 panic("softdep_write_inodeblock: lost dep3");
10209 #endif /* INVARIANTS */
10215 * If we have zero'ed out the last allocated block of the file,
10216 * roll back the size to the last currently allocated block.
10217 * We know that this last allocated block is a full-sized as
10218 * we already checked for fragments in the loop above.
10220 if (lastadp != NULL &&
10221 dp->di_size <= (lastadp->ad_offset + 1) * fs->fs_bsize) {
10222 for (i = lastadp->ad_offset; i >= 0; i--)
10223 if (dp->di_db[i] != 0)
10225 dp->di_size = (i + 1) * fs->fs_bsize;
10228 * The only dependencies are for indirect blocks.
10230 * The file size for indirect block additions is not guaranteed.
10231 * Such a guarantee would be non-trivial to achieve. The conventional
10232 * synchronous write implementation also does not make this guarantee.
10233 * Fsck should catch and fix discrepancies. Arguably, the file size
10234 * can be over-estimated without destroying integrity when the file
10235 * moves into the indirect blocks (i.e., is large). If we want to
10236 * postpone fsck, we are stuck with this argument.
10238 for (; adp; adp = TAILQ_NEXT(adp, ad_next))
10239 dp->di_ib[adp->ad_offset - NDADDR] = 0;
10243 * Cancel an indirdep as a result of truncation. Release all of the
10244 * children allocindirs and place their journal work on the appropriate
10248 cancel_indirdep(indirdep, bp, freeblks)
10249 struct indirdep *indirdep;
10251 struct freeblks *freeblks;
10253 struct allocindir *aip;
10256 * None of the indirect pointers will ever be visible,
10257 * so they can simply be tossed. GOINGAWAY ensures
10258 * that allocated pointers will be saved in the buffer
10259 * cache until they are freed. Note that they will
10260 * only be able to be found by their physical address
10261 * since the inode mapping the logical address will
10262 * be gone. The save buffer used for the safe copy
10263 * was allocated in setup_allocindir_phase2 using
10264 * the physical address so it could be used for this
10265 * purpose. Hence we swap the safe copy with the real
10266 * copy, allowing the safe copy to be freed and holding
10267 * on to the real copy for later use in indir_trunc.
10269 if (indirdep->ir_state & GOINGAWAY)
10270 panic("cancel_indirdep: already gone");
10271 if ((indirdep->ir_state & DEPCOMPLETE) == 0) {
10272 indirdep->ir_state |= DEPCOMPLETE;
10273 LIST_REMOVE(indirdep, ir_next);
10275 indirdep->ir_state |= GOINGAWAY;
10276 VFSTOUFS(indirdep->ir_list.wk_mp)->um_numindirdeps += 1;
10278 * Pass in bp for blocks still have journal writes
10279 * pending so we can cancel them on their own.
10281 while ((aip = LIST_FIRST(&indirdep->ir_deplisthd)) != 0)
10282 cancel_allocindir(aip, bp, freeblks, 0);
10283 while ((aip = LIST_FIRST(&indirdep->ir_donehd)) != 0)
10284 cancel_allocindir(aip, NULL, freeblks, 0);
10285 while ((aip = LIST_FIRST(&indirdep->ir_writehd)) != 0)
10286 cancel_allocindir(aip, NULL, freeblks, 0);
10287 while ((aip = LIST_FIRST(&indirdep->ir_completehd)) != 0)
10288 cancel_allocindir(aip, NULL, freeblks, 0);
10290 * If there are pending partial truncations we need to keep the
10291 * old block copy around until they complete. This is because
10292 * the current b_data is not a perfect superset of the available
10295 if (TAILQ_EMPTY(&indirdep->ir_trunc))
10296 bcopy(bp->b_data, indirdep->ir_savebp->b_data, bp->b_bcount);
10298 bcopy(bp->b_data, indirdep->ir_saveddata, bp->b_bcount);
10299 WORKLIST_REMOVE(&indirdep->ir_list);
10300 WORKLIST_INSERT(&indirdep->ir_savebp->b_dep, &indirdep->ir_list);
10301 indirdep->ir_bp = NULL;
10302 indirdep->ir_freeblks = freeblks;
10306 * Free an indirdep once it no longer has new pointers to track.
10309 free_indirdep(indirdep)
10310 struct indirdep *indirdep;
10313 KASSERT(TAILQ_EMPTY(&indirdep->ir_trunc),
10314 ("free_indirdep: Indir trunc list not empty."));
10315 KASSERT(LIST_EMPTY(&indirdep->ir_completehd),
10316 ("free_indirdep: Complete head not empty."));
10317 KASSERT(LIST_EMPTY(&indirdep->ir_writehd),
10318 ("free_indirdep: write head not empty."));
10319 KASSERT(LIST_EMPTY(&indirdep->ir_donehd),
10320 ("free_indirdep: done head not empty."));
10321 KASSERT(LIST_EMPTY(&indirdep->ir_deplisthd),
10322 ("free_indirdep: deplist head not empty."));
10323 KASSERT((indirdep->ir_state & DEPCOMPLETE),
10324 ("free_indirdep: %p still on newblk list.", indirdep));
10325 KASSERT(indirdep->ir_saveddata == NULL,
10326 ("free_indirdep: %p still has saved data.", indirdep));
10327 if (indirdep->ir_state & ONWORKLIST)
10328 WORKLIST_REMOVE(&indirdep->ir_list);
10329 WORKITEM_FREE(indirdep, D_INDIRDEP);
10333 * Called before a write to an indirdep. This routine is responsible for
10334 * rolling back pointers to a safe state which includes only those
10335 * allocindirs which have been completed.
10338 initiate_write_indirdep(indirdep, bp)
10339 struct indirdep *indirdep;
10343 indirdep->ir_state |= IOSTARTED;
10344 if (indirdep->ir_state & GOINGAWAY)
10345 panic("disk_io_initiation: indirdep gone");
10347 * If there are no remaining dependencies, this will be writing
10348 * the real pointers.
10350 if (LIST_EMPTY(&indirdep->ir_deplisthd) &&
10351 TAILQ_EMPTY(&indirdep->ir_trunc))
10354 * Replace up-to-date version with safe version.
10356 if (indirdep->ir_saveddata == NULL) {
10358 indirdep->ir_saveddata = malloc(bp->b_bcount, M_INDIRDEP,
10362 indirdep->ir_state &= ~ATTACHED;
10363 indirdep->ir_state |= UNDONE;
10364 bcopy(bp->b_data, indirdep->ir_saveddata, bp->b_bcount);
10365 bcopy(indirdep->ir_savebp->b_data, bp->b_data,
10370 * Called when an inode has been cleared in a cg bitmap. This finally
10371 * eliminates any canceled jaddrefs
10374 softdep_setup_inofree(mp, bp, ino, wkhd)
10378 struct workhead *wkhd;
10380 struct worklist *wk, *wkn;
10381 struct inodedep *inodedep;
10387 fs = VFSTOUFS(mp)->um_fs;
10388 cgp = (struct cg *)bp->b_data;
10389 inosused = cg_inosused(cgp);
10390 if (isset(inosused, ino % fs->fs_ipg))
10391 panic("softdep_setup_inofree: inode %d not freed.", ino);
10392 if (inodedep_lookup(mp, ino, 0, &inodedep))
10393 panic("softdep_setup_inofree: ino %d has existing inodedep %p",
10396 LIST_FOREACH_SAFE(wk, wkhd, wk_list, wkn) {
10397 if (wk->wk_type != D_JADDREF)
10399 WORKLIST_REMOVE(wk);
10401 * We can free immediately even if the jaddref
10402 * isn't attached in a background write as now
10403 * the bitmaps are reconciled.
10405 wk->wk_state |= COMPLETE | ATTACHED;
10406 free_jaddref(WK_JADDREF(wk));
10408 jwork_move(&bp->b_dep, wkhd);
10415 * Called via ffs_blkfree() after a set of frags has been cleared from a cg
10416 * map. Any dependencies waiting for the write to clear are added to the
10417 * buf's list and any jnewblks that are being canceled are discarded
10421 softdep_setup_blkfree(mp, bp, blkno, frags, wkhd)
10424 ufs2_daddr_t blkno;
10426 struct workhead *wkhd;
10428 struct bmsafemap *bmsafemap;
10429 struct jnewblk *jnewblk;
10430 struct worklist *wk;
10435 ufs2_daddr_t jstart;
10443 "softdep_setup_blkfree: blkno %jd frags %d wk head %p",
10444 blkno, frags, wkhd);
10447 /* Lookup the bmsafemap so we track when it is dirty. */
10448 fs = VFSTOUFS(mp)->um_fs;
10449 bmsafemap = bmsafemap_lookup(mp, bp, dtog(fs, blkno), NULL);
10451 * Detach any jnewblks which have been canceled. They must linger
10452 * until the bitmap is cleared again by ffs_blkfree() to prevent
10453 * an unjournaled allocation from hitting the disk.
10456 while ((wk = LIST_FIRST(wkhd)) != NULL) {
10458 "softdep_setup_blkfree: blkno %jd wk type %d",
10459 blkno, wk->wk_type);
10460 WORKLIST_REMOVE(wk);
10461 if (wk->wk_type != D_JNEWBLK) {
10462 WORKLIST_INSERT(&bmsafemap->sm_freehd, wk);
10465 jnewblk = WK_JNEWBLK(wk);
10466 KASSERT(jnewblk->jn_state & GOINGAWAY,
10467 ("softdep_setup_blkfree: jnewblk not canceled."));
10470 * Assert that this block is free in the bitmap
10471 * before we discard the jnewblk.
10473 cgp = (struct cg *)bp->b_data;
10474 blksfree = cg_blksfree(cgp);
10475 bno = dtogd(fs, jnewblk->jn_blkno);
10476 for (i = jnewblk->jn_oldfrags;
10477 i < jnewblk->jn_frags; i++) {
10478 if (isset(blksfree, bno + i))
10480 panic("softdep_setup_blkfree: not free");
10484 * Even if it's not attached we can free immediately
10485 * as the new bitmap is correct.
10487 wk->wk_state |= COMPLETE | ATTACHED;
10488 free_jnewblk(jnewblk);
10494 * Assert that we are not freeing a block which has an outstanding
10495 * allocation dependency.
10497 fs = VFSTOUFS(mp)->um_fs;
10498 bmsafemap = bmsafemap_lookup(mp, bp, dtog(fs, blkno), NULL);
10499 end = blkno + frags;
10500 LIST_FOREACH(jnewblk, &bmsafemap->sm_jnewblkhd, jn_deps) {
10502 * Don't match against blocks that will be freed when the
10503 * background write is done.
10505 if ((jnewblk->jn_state & (ATTACHED | COMPLETE | DEPCOMPLETE)) ==
10506 (COMPLETE | DEPCOMPLETE))
10508 jstart = jnewblk->jn_blkno + jnewblk->jn_oldfrags;
10509 jend = jnewblk->jn_blkno + jnewblk->jn_frags;
10510 if ((blkno >= jstart && blkno < jend) ||
10511 (end > jstart && end <= jend)) {
10512 printf("state 0x%X %jd - %d %d dep %p\n",
10513 jnewblk->jn_state, jnewblk->jn_blkno,
10514 jnewblk->jn_oldfrags, jnewblk->jn_frags,
10516 panic("softdep_setup_blkfree: "
10517 "%jd-%jd(%d) overlaps with %jd-%jd",
10518 blkno, end, frags, jstart, jend);
10526 * Revert a block allocation when the journal record that describes it
10527 * is not yet written.
10530 jnewblk_rollback(jnewblk, fs, cgp, blksfree)
10531 struct jnewblk *jnewblk;
10536 ufs1_daddr_t fragno;
10542 cgbno = dtogd(fs, jnewblk->jn_blkno);
10544 * We have to test which frags need to be rolled back. We may
10545 * be operating on a stale copy when doing background writes.
10547 for (i = jnewblk->jn_oldfrags; i < jnewblk->jn_frags; i++)
10548 if (isclr(blksfree, cgbno + i))
10553 * This is mostly ffs_blkfree() sans some validation and
10554 * superblock updates.
10556 if (frags == fs->fs_frag) {
10557 fragno = fragstoblks(fs, cgbno);
10558 ffs_setblock(fs, blksfree, fragno);
10559 ffs_clusteracct(fs, cgp, fragno, 1);
10560 cgp->cg_cs.cs_nbfree++;
10562 cgbno += jnewblk->jn_oldfrags;
10563 bbase = cgbno - fragnum(fs, cgbno);
10564 /* Decrement the old frags. */
10565 blk = blkmap(fs, blksfree, bbase);
10566 ffs_fragacct(fs, blk, cgp->cg_frsum, -1);
10567 /* Deallocate the fragment */
10568 for (i = 0; i < frags; i++)
10569 setbit(blksfree, cgbno + i);
10570 cgp->cg_cs.cs_nffree += frags;
10571 /* Add back in counts associated with the new frags */
10572 blk = blkmap(fs, blksfree, bbase);
10573 ffs_fragacct(fs, blk, cgp->cg_frsum, 1);
10574 /* If a complete block has been reassembled, account for it. */
10575 fragno = fragstoblks(fs, bbase);
10576 if (ffs_isblock(fs, blksfree, fragno)) {
10577 cgp->cg_cs.cs_nffree -= fs->fs_frag;
10578 ffs_clusteracct(fs, cgp, fragno, 1);
10579 cgp->cg_cs.cs_nbfree++;
10583 jnewblk->jn_state &= ~ATTACHED;
10584 jnewblk->jn_state |= UNDONE;
10590 initiate_write_bmsafemap(bmsafemap, bp)
10591 struct bmsafemap *bmsafemap;
10592 struct buf *bp; /* The cg block. */
10594 struct jaddref *jaddref;
10595 struct jnewblk *jnewblk;
10602 if (bmsafemap->sm_state & IOSTARTED)
10604 bmsafemap->sm_state |= IOSTARTED;
10606 * Clear any inode allocations which are pending journal writes.
10608 if (LIST_FIRST(&bmsafemap->sm_jaddrefhd) != NULL) {
10609 cgp = (struct cg *)bp->b_data;
10610 fs = VFSTOUFS(bmsafemap->sm_list.wk_mp)->um_fs;
10611 inosused = cg_inosused(cgp);
10612 LIST_FOREACH(jaddref, &bmsafemap->sm_jaddrefhd, ja_bmdeps) {
10613 ino = jaddref->ja_ino % fs->fs_ipg;
10614 if (isset(inosused, ino)) {
10615 if ((jaddref->ja_mode & IFMT) == IFDIR)
10616 cgp->cg_cs.cs_ndir--;
10617 cgp->cg_cs.cs_nifree++;
10618 clrbit(inosused, ino);
10619 jaddref->ja_state &= ~ATTACHED;
10620 jaddref->ja_state |= UNDONE;
10623 panic("initiate_write_bmsafemap: inode %d "
10624 "marked free", jaddref->ja_ino);
10628 * Clear any block allocations which are pending journal writes.
10630 if (LIST_FIRST(&bmsafemap->sm_jnewblkhd) != NULL) {
10631 cgp = (struct cg *)bp->b_data;
10632 fs = VFSTOUFS(bmsafemap->sm_list.wk_mp)->um_fs;
10633 blksfree = cg_blksfree(cgp);
10634 LIST_FOREACH(jnewblk, &bmsafemap->sm_jnewblkhd, jn_deps) {
10635 if (jnewblk_rollback(jnewblk, fs, cgp, blksfree))
10637 panic("initiate_write_bmsafemap: block %jd "
10638 "marked free", jnewblk->jn_blkno);
10642 * Move allocation lists to the written lists so they can be
10643 * cleared once the block write is complete.
10645 LIST_SWAP(&bmsafemap->sm_inodedephd, &bmsafemap->sm_inodedepwr,
10646 inodedep, id_deps);
10647 LIST_SWAP(&bmsafemap->sm_newblkhd, &bmsafemap->sm_newblkwr,
10649 LIST_SWAP(&bmsafemap->sm_freehd, &bmsafemap->sm_freewr, worklist,
10654 * This routine is called during the completion interrupt
10655 * service routine for a disk write (from the procedure called
10656 * by the device driver to inform the filesystem caches of
10657 * a request completion). It should be called early in this
10658 * procedure, before the block is made available to other
10659 * processes or other routines are called.
10663 softdep_disk_write_complete(bp)
10664 struct buf *bp; /* describes the completed disk write */
10666 struct worklist *wk;
10667 struct worklist *owk;
10668 struct workhead reattach;
10669 struct freeblks *freeblks;
10673 * If an error occurred while doing the write, then the data
10674 * has not hit the disk and the dependencies cannot be unrolled.
10676 if ((bp->b_ioflags & BIO_ERROR) != 0 && (bp->b_flags & B_INVAL) == 0)
10678 LIST_INIT(&reattach);
10680 * This lock must not be released anywhere in this code segment.
10685 while ((wk = LIST_FIRST(&bp->b_dep)) != NULL) {
10686 WORKLIST_REMOVE(wk);
10687 dep_write[wk->wk_type]++;
10689 panic("duplicate worklist: %p\n", wk);
10691 switch (wk->wk_type) {
10694 if (handle_written_filepage(WK_PAGEDEP(wk), bp))
10695 WORKLIST_INSERT(&reattach, wk);
10699 if (handle_written_inodeblock(WK_INODEDEP(wk), bp))
10700 WORKLIST_INSERT(&reattach, wk);
10704 if (handle_written_bmsafemap(WK_BMSAFEMAP(wk), bp))
10705 WORKLIST_INSERT(&reattach, wk);
10709 handle_written_mkdir(WK_MKDIR(wk), MKDIR_BODY);
10712 case D_ALLOCDIRECT:
10713 wk->wk_state |= COMPLETE;
10714 handle_allocdirect_partdone(WK_ALLOCDIRECT(wk), NULL);
10718 wk->wk_state |= COMPLETE;
10719 handle_allocindir_partdone(WK_ALLOCINDIR(wk));
10723 if (handle_written_indirdep(WK_INDIRDEP(wk), bp, &sbp))
10724 WORKLIST_INSERT(&reattach, wk);
10728 wk->wk_state |= COMPLETE;
10729 freeblks = WK_FREEBLKS(wk);
10730 if ((wk->wk_state & ALLCOMPLETE) == ALLCOMPLETE &&
10731 LIST_EMPTY(&freeblks->fb_jblkdephd))
10732 add_to_worklist(wk, WK_NODELAY);
10736 handle_written_freework(WK_FREEWORK(wk));
10740 free_jsegdep(WK_JSEGDEP(wk));
10744 handle_written_jseg(WK_JSEG(wk), bp);
10748 if (handle_written_sbdep(WK_SBDEP(wk), bp))
10749 WORKLIST_INSERT(&reattach, wk);
10753 free_freedep(WK_FREEDEP(wk));
10757 panic("handle_disk_write_complete: Unknown type %s",
10758 TYPENAME(wk->wk_type));
10763 * Reattach any requests that must be redone.
10765 while ((wk = LIST_FIRST(&reattach)) != NULL) {
10766 WORKLIST_REMOVE(wk);
10767 WORKLIST_INSERT(&bp->b_dep, wk);
10775 * Called from within softdep_disk_write_complete above. Note that
10776 * this routine is always called from interrupt level with further
10777 * splbio interrupts blocked.
10780 handle_allocdirect_partdone(adp, wkhd)
10781 struct allocdirect *adp; /* the completed allocdirect */
10782 struct workhead *wkhd; /* Work to do when inode is writtne. */
10784 struct allocdirectlst *listhead;
10785 struct allocdirect *listadp;
10786 struct inodedep *inodedep;
10789 if ((adp->ad_state & ALLCOMPLETE) != ALLCOMPLETE)
10792 * The on-disk inode cannot claim to be any larger than the last
10793 * fragment that has been written. Otherwise, the on-disk inode
10794 * might have fragments that were not the last block in the file
10795 * which would corrupt the filesystem. Thus, we cannot free any
10796 * allocdirects after one whose ad_oldblkno claims a fragment as
10797 * these blocks must be rolled back to zero before writing the inode.
10798 * We check the currently active set of allocdirects in id_inoupdt
10799 * or id_extupdt as appropriate.
10801 inodedep = adp->ad_inodedep;
10802 bsize = inodedep->id_fs->fs_bsize;
10803 if (adp->ad_state & EXTDATA)
10804 listhead = &inodedep->id_extupdt;
10806 listhead = &inodedep->id_inoupdt;
10807 TAILQ_FOREACH(listadp, listhead, ad_next) {
10808 /* found our block */
10809 if (listadp == adp)
10811 /* continue if ad_oldlbn is not a fragment */
10812 if (listadp->ad_oldsize == 0 ||
10813 listadp->ad_oldsize == bsize)
10815 /* hit a fragment */
10819 * If we have reached the end of the current list without
10820 * finding the just finished dependency, then it must be
10821 * on the future dependency list. Future dependencies cannot
10822 * be freed until they are moved to the current list.
10824 if (listadp == NULL) {
10826 if (adp->ad_state & EXTDATA)
10827 listhead = &inodedep->id_newextupdt;
10829 listhead = &inodedep->id_newinoupdt;
10830 TAILQ_FOREACH(listadp, listhead, ad_next)
10831 /* found our block */
10832 if (listadp == adp)
10834 if (listadp == NULL)
10835 panic("handle_allocdirect_partdone: lost dep");
10840 * If we have found the just finished dependency, then queue
10841 * it along with anything that follows it that is complete.
10842 * Since the pointer has not yet been written in the inode
10843 * as the dependency prevents it, place the allocdirect on the
10844 * bufwait list where it will be freed once the pointer is
10848 wkhd = &inodedep->id_bufwait;
10849 for (; adp; adp = listadp) {
10850 listadp = TAILQ_NEXT(adp, ad_next);
10851 if ((adp->ad_state & ALLCOMPLETE) != ALLCOMPLETE)
10853 TAILQ_REMOVE(listhead, adp, ad_next);
10854 WORKLIST_INSERT(wkhd, &adp->ad_block.nb_list);
10859 * Called from within softdep_disk_write_complete above. This routine
10860 * completes successfully written allocindirs.
10863 handle_allocindir_partdone(aip)
10864 struct allocindir *aip; /* the completed allocindir */
10866 struct indirdep *indirdep;
10868 if ((aip->ai_state & ALLCOMPLETE) != ALLCOMPLETE)
10870 indirdep = aip->ai_indirdep;
10871 LIST_REMOVE(aip, ai_next);
10873 * Don't set a pointer while the buffer is undergoing IO or while
10874 * we have active truncations.
10876 if (indirdep->ir_state & UNDONE || !TAILQ_EMPTY(&indirdep->ir_trunc)) {
10877 LIST_INSERT_HEAD(&indirdep->ir_donehd, aip, ai_next);
10880 if (indirdep->ir_state & UFS1FMT)
10881 ((ufs1_daddr_t *)indirdep->ir_savebp->b_data)[aip->ai_offset] =
10884 ((ufs2_daddr_t *)indirdep->ir_savebp->b_data)[aip->ai_offset] =
10887 * Await the pointer write before freeing the allocindir.
10889 LIST_INSERT_HEAD(&indirdep->ir_writehd, aip, ai_next);
10893 * Release segments held on a jwork list.
10897 struct workhead *wkhd;
10899 struct worklist *wk;
10901 while ((wk = LIST_FIRST(wkhd)) != NULL) {
10902 WORKLIST_REMOVE(wk);
10903 switch (wk->wk_type) {
10905 free_jsegdep(WK_JSEGDEP(wk));
10908 free_freedep(WK_FREEDEP(wk));
10911 rele_jseg(WK_JSEG(WK_FREEFRAG(wk)->ff_jdep));
10912 WORKITEM_FREE(wk, D_FREEFRAG);
10915 handle_written_freework(WK_FREEWORK(wk));
10918 panic("handle_jwork: Unknown type %s\n",
10919 TYPENAME(wk->wk_type));
10925 * Handle the bufwait list on an inode when it is safe to release items
10926 * held there. This normally happens after an inode block is written but
10927 * may be delayed and handled later if there are pending journal items that
10928 * are not yet safe to be released.
10930 static struct freefile *
10931 handle_bufwait(inodedep, refhd)
10932 struct inodedep *inodedep;
10933 struct workhead *refhd;
10935 struct jaddref *jaddref;
10936 struct freefile *freefile;
10937 struct worklist *wk;
10940 while ((wk = LIST_FIRST(&inodedep->id_bufwait)) != NULL) {
10941 WORKLIST_REMOVE(wk);
10942 switch (wk->wk_type) {
10945 * We defer adding freefile to the worklist
10946 * until all other additions have been made to
10947 * ensure that it will be done after all the
10948 * old blocks have been freed.
10950 if (freefile != NULL)
10951 panic("handle_bufwait: freefile");
10952 freefile = WK_FREEFILE(wk);
10956 handle_written_mkdir(WK_MKDIR(wk), MKDIR_PARENT);
10960 diradd_inode_written(WK_DIRADD(wk), inodedep);
10964 wk->wk_state |= COMPLETE;
10965 if ((wk->wk_state & ALLCOMPLETE) == ALLCOMPLETE)
10966 add_to_worklist(wk, 0);
10970 wk->wk_state |= COMPLETE;
10971 add_to_worklist(wk, 0);
10974 case D_ALLOCDIRECT:
10976 free_newblk(WK_NEWBLK(wk));
10980 wk->wk_state |= COMPLETE;
10981 free_jnewblk(WK_JNEWBLK(wk));
10985 * Save freed journal segments and add references on
10986 * the supplied list which will delay their release
10987 * until the cg bitmap is cleared on disk.
10991 free_jsegdep(WK_JSEGDEP(wk));
10993 WORKLIST_INSERT(refhd, wk);
10997 jaddref = WK_JADDREF(wk);
10998 TAILQ_REMOVE(&inodedep->id_inoreflst, &jaddref->ja_ref,
11001 * Transfer any jaddrefs to the list to be freed with
11002 * the bitmap if we're handling a removed file.
11004 if (refhd == NULL) {
11005 wk->wk_state |= COMPLETE;
11006 free_jaddref(jaddref);
11008 WORKLIST_INSERT(refhd, wk);
11012 panic("handle_bufwait: Unknown type %p(%s)",
11013 wk, TYPENAME(wk->wk_type));
11020 * Called from within softdep_disk_write_complete above to restore
11021 * in-memory inode block contents to their most up-to-date state. Note
11022 * that this routine is always called from interrupt level with further
11023 * splbio interrupts blocked.
11026 handle_written_inodeblock(inodedep, bp)
11027 struct inodedep *inodedep;
11028 struct buf *bp; /* buffer containing the inode block */
11030 struct freefile *freefile;
11031 struct allocdirect *adp, *nextadp;
11032 struct ufs1_dinode *dp1 = NULL;
11033 struct ufs2_dinode *dp2 = NULL;
11034 struct workhead wkhd;
11035 int hadchanges, fstype;
11041 if ((inodedep->id_state & IOSTARTED) == 0)
11042 panic("handle_written_inodeblock: not started");
11043 inodedep->id_state &= ~IOSTARTED;
11044 if (inodedep->id_fs->fs_magic == FS_UFS1_MAGIC) {
11046 dp1 = (struct ufs1_dinode *)bp->b_data +
11047 ino_to_fsbo(inodedep->id_fs, inodedep->id_ino);
11048 freelink = dp1->di_freelink;
11051 dp2 = (struct ufs2_dinode *)bp->b_data +
11052 ino_to_fsbo(inodedep->id_fs, inodedep->id_ino);
11053 freelink = dp2->di_freelink;
11056 * Leave this inodeblock dirty until it's in the list.
11058 if ((inodedep->id_state & (UNLINKED | UNLINKONLIST)) == UNLINKED) {
11059 struct inodedep *inon;
11061 inon = TAILQ_NEXT(inodedep, id_unlinked);
11062 if ((inon == NULL && freelink == 0) ||
11063 (inon && inon->id_ino == freelink)) {
11065 inon->id_state |= UNLINKPREV;
11066 inodedep->id_state |= UNLINKNEXT;
11071 * If we had to rollback the inode allocation because of
11072 * bitmaps being incomplete, then simply restore it.
11073 * Keep the block dirty so that it will not be reclaimed until
11074 * all associated dependencies have been cleared and the
11075 * corresponding updates written to disk.
11077 if (inodedep->id_savedino1 != NULL) {
11079 if (fstype == UFS1)
11080 *dp1 = *inodedep->id_savedino1;
11082 *dp2 = *inodedep->id_savedino2;
11083 free(inodedep->id_savedino1, M_SAVEDINO);
11084 inodedep->id_savedino1 = NULL;
11085 if ((bp->b_flags & B_DELWRI) == 0)
11086 stat_inode_bitmap++;
11089 * If the inode is clear here and GOINGAWAY it will never
11090 * be written. Process the bufwait and clear any pending
11091 * work which may include the freefile.
11093 if (inodedep->id_state & GOINGAWAY)
11097 inodedep->id_state |= COMPLETE;
11099 * Roll forward anything that had to be rolled back before
11100 * the inode could be updated.
11102 for (adp = TAILQ_FIRST(&inodedep->id_inoupdt); adp; adp = nextadp) {
11103 nextadp = TAILQ_NEXT(adp, ad_next);
11104 if (adp->ad_state & ATTACHED)
11105 panic("handle_written_inodeblock: new entry");
11106 if (fstype == UFS1) {
11107 if (adp->ad_offset < NDADDR) {
11108 if (dp1->di_db[adp->ad_offset]!=adp->ad_oldblkno)
11109 panic("%s %s #%jd mismatch %d != %jd",
11110 "handle_written_inodeblock:",
11112 (intmax_t)adp->ad_offset,
11113 dp1->di_db[adp->ad_offset],
11114 (intmax_t)adp->ad_oldblkno);
11115 dp1->di_db[adp->ad_offset] = adp->ad_newblkno;
11117 if (dp1->di_ib[adp->ad_offset - NDADDR] != 0)
11118 panic("%s: %s #%jd allocated as %d",
11119 "handle_written_inodeblock",
11120 "indirect pointer",
11121 (intmax_t)adp->ad_offset - NDADDR,
11122 dp1->di_ib[adp->ad_offset - NDADDR]);
11123 dp1->di_ib[adp->ad_offset - NDADDR] =
11127 if (adp->ad_offset < NDADDR) {
11128 if (dp2->di_db[adp->ad_offset]!=adp->ad_oldblkno)
11129 panic("%s: %s #%jd %s %jd != %jd",
11130 "handle_written_inodeblock",
11132 (intmax_t)adp->ad_offset, "mismatch",
11133 (intmax_t)dp2->di_db[adp->ad_offset],
11134 (intmax_t)adp->ad_oldblkno);
11135 dp2->di_db[adp->ad_offset] = adp->ad_newblkno;
11137 if (dp2->di_ib[adp->ad_offset - NDADDR] != 0)
11138 panic("%s: %s #%jd allocated as %jd",
11139 "handle_written_inodeblock",
11140 "indirect pointer",
11141 (intmax_t)adp->ad_offset - NDADDR,
11143 dp2->di_ib[adp->ad_offset - NDADDR]);
11144 dp2->di_ib[adp->ad_offset - NDADDR] =
11148 adp->ad_state &= ~UNDONE;
11149 adp->ad_state |= ATTACHED;
11152 for (adp = TAILQ_FIRST(&inodedep->id_extupdt); adp; adp = nextadp) {
11153 nextadp = TAILQ_NEXT(adp, ad_next);
11154 if (adp->ad_state & ATTACHED)
11155 panic("handle_written_inodeblock: new entry");
11156 if (dp2->di_extb[adp->ad_offset] != adp->ad_oldblkno)
11157 panic("%s: direct pointers #%jd %s %jd != %jd",
11158 "handle_written_inodeblock",
11159 (intmax_t)adp->ad_offset, "mismatch",
11160 (intmax_t)dp2->di_extb[adp->ad_offset],
11161 (intmax_t)adp->ad_oldblkno);
11162 dp2->di_extb[adp->ad_offset] = adp->ad_newblkno;
11163 adp->ad_state &= ~UNDONE;
11164 adp->ad_state |= ATTACHED;
11167 if (hadchanges && (bp->b_flags & B_DELWRI) == 0)
11168 stat_direct_blk_ptrs++;
11170 * Reset the file size to its most up-to-date value.
11172 if (inodedep->id_savedsize == -1 || inodedep->id_savedextsize == -1)
11173 panic("handle_written_inodeblock: bad size");
11174 if (inodedep->id_savednlink > LINK_MAX)
11175 panic("handle_written_inodeblock: Invalid link count "
11176 "%d for inodedep %p", inodedep->id_savednlink, inodedep);
11177 if (fstype == UFS1) {
11178 if (dp1->di_nlink != inodedep->id_savednlink) {
11179 dp1->di_nlink = inodedep->id_savednlink;
11182 if (dp1->di_size != inodedep->id_savedsize) {
11183 dp1->di_size = inodedep->id_savedsize;
11187 if (dp2->di_nlink != inodedep->id_savednlink) {
11188 dp2->di_nlink = inodedep->id_savednlink;
11191 if (dp2->di_size != inodedep->id_savedsize) {
11192 dp2->di_size = inodedep->id_savedsize;
11195 if (dp2->di_extsize != inodedep->id_savedextsize) {
11196 dp2->di_extsize = inodedep->id_savedextsize;
11200 inodedep->id_savedsize = -1;
11201 inodedep->id_savedextsize = -1;
11202 inodedep->id_savednlink = -1;
11204 * If there were any rollbacks in the inode block, then it must be
11205 * marked dirty so that its will eventually get written back in
11206 * its correct form.
11212 * Process any allocdirects that completed during the update.
11214 if ((adp = TAILQ_FIRST(&inodedep->id_inoupdt)) != NULL)
11215 handle_allocdirect_partdone(adp, &wkhd);
11216 if ((adp = TAILQ_FIRST(&inodedep->id_extupdt)) != NULL)
11217 handle_allocdirect_partdone(adp, &wkhd);
11219 * Process deallocations that were held pending until the
11220 * inode had been written to disk. Freeing of the inode
11221 * is delayed until after all blocks have been freed to
11222 * avoid creation of new <vfsid, inum, lbn> triples
11223 * before the old ones have been deleted. Completely
11224 * unlinked inodes are not processed until the unlinked
11225 * inode list is written or the last reference is removed.
11227 if ((inodedep->id_state & (UNLINKED | UNLINKONLIST)) != UNLINKED) {
11228 freefile = handle_bufwait(inodedep, NULL);
11229 if (freefile && !LIST_EMPTY(&wkhd)) {
11230 WORKLIST_INSERT(&wkhd, &freefile->fx_list);
11235 * Move rolled forward dependency completions to the bufwait list
11236 * now that those that were already written have been processed.
11238 if (!LIST_EMPTY(&wkhd) && hadchanges == 0)
11239 panic("handle_written_inodeblock: bufwait but no changes");
11240 jwork_move(&inodedep->id_bufwait, &wkhd);
11242 if (freefile != NULL) {
11244 * If the inode is goingaway it was never written. Fake up
11245 * the state here so free_inodedep() can succeed.
11247 if (inodedep->id_state & GOINGAWAY)
11248 inodedep->id_state |= COMPLETE | DEPCOMPLETE;
11249 if (free_inodedep(inodedep) == 0)
11250 panic("handle_written_inodeblock: live inodedep %p",
11252 add_to_worklist(&freefile->fx_list, 0);
11257 * If no outstanding dependencies, free it.
11259 if (free_inodedep(inodedep) ||
11260 (TAILQ_FIRST(&inodedep->id_inoreflst) == 0 &&
11261 TAILQ_FIRST(&inodedep->id_inoupdt) == 0 &&
11262 TAILQ_FIRST(&inodedep->id_extupdt) == 0 &&
11263 LIST_FIRST(&inodedep->id_bufwait) == 0))
11265 return (hadchanges);
11269 handle_written_indirdep(indirdep, bp, bpp)
11270 struct indirdep *indirdep;
11274 struct allocindir *aip;
11278 if (indirdep->ir_state & GOINGAWAY)
11279 panic("handle_written_indirdep: indirdep gone");
11280 if ((indirdep->ir_state & IOSTARTED) == 0)
11281 panic("handle_written_indirdep: IO not started");
11284 * If there were rollbacks revert them here.
11286 if (indirdep->ir_saveddata) {
11287 bcopy(indirdep->ir_saveddata, bp->b_data, bp->b_bcount);
11288 if (TAILQ_EMPTY(&indirdep->ir_trunc)) {
11289 free(indirdep->ir_saveddata, M_INDIRDEP);
11290 indirdep->ir_saveddata = NULL;
11294 indirdep->ir_state &= ~(UNDONE | IOSTARTED);
11295 indirdep->ir_state |= ATTACHED;
11297 * Move allocindirs with written pointers to the completehd if
11298 * the indirdep's pointer is not yet written. Otherwise
11301 while ((aip = LIST_FIRST(&indirdep->ir_writehd)) != 0) {
11302 LIST_REMOVE(aip, ai_next);
11303 if ((indirdep->ir_state & DEPCOMPLETE) == 0) {
11304 LIST_INSERT_HEAD(&indirdep->ir_completehd, aip,
11306 newblk_freefrag(&aip->ai_block);
11309 free_newblk(&aip->ai_block);
11312 * Move allocindirs that have finished dependency processing from
11313 * the done list to the write list after updating the pointers.
11315 if (TAILQ_EMPTY(&indirdep->ir_trunc)) {
11316 while ((aip = LIST_FIRST(&indirdep->ir_donehd)) != 0) {
11317 handle_allocindir_partdone(aip);
11318 if (aip == LIST_FIRST(&indirdep->ir_donehd))
11319 panic("disk_write_complete: not gone");
11324 * Preserve the indirdep if there were any changes or if it is not
11325 * yet valid on disk.
11328 stat_indir_blk_ptrs++;
11333 * If there were no changes we can discard the savedbp and detach
11334 * ourselves from the buf. We are only carrying completed pointers
11337 sbp = indirdep->ir_savebp;
11338 sbp->b_flags |= B_INVAL | B_NOCACHE;
11339 indirdep->ir_savebp = NULL;
11340 indirdep->ir_bp = NULL;
11342 panic("handle_written_indirdep: bp already exists.");
11345 * The indirdep may not be freed until its parent points at it.
11347 if (indirdep->ir_state & DEPCOMPLETE)
11348 free_indirdep(indirdep);
11354 * Process a diradd entry after its dependent inode has been written.
11355 * This routine must be called with splbio interrupts blocked.
11358 diradd_inode_written(dap, inodedep)
11359 struct diradd *dap;
11360 struct inodedep *inodedep;
11363 dap->da_state |= COMPLETE;
11364 complete_diradd(dap);
11365 WORKLIST_INSERT(&inodedep->id_pendinghd, &dap->da_list);
11369 * Returns true if the bmsafemap will have rollbacks when written. Must
11370 * only be called with lk and the buf lock on the cg held.
11373 bmsafemap_backgroundwrite(bmsafemap, bp)
11374 struct bmsafemap *bmsafemap;
11379 dirty = !LIST_EMPTY(&bmsafemap->sm_jaddrefhd) |
11380 !LIST_EMPTY(&bmsafemap->sm_jnewblkhd);
11382 * If we're initiating a background write we need to process the
11383 * rollbacks as they exist now, not as they exist when IO starts.
11384 * No other consumers will look at the contents of the shadowed
11385 * buf so this is safe to do here.
11387 if (bp->b_xflags & BX_BKGRDMARKER)
11388 initiate_write_bmsafemap(bmsafemap, bp);
11394 * Re-apply an allocation when a cg write is complete.
11397 jnewblk_rollforward(jnewblk, fs, cgp, blksfree)
11398 struct jnewblk *jnewblk;
11403 ufs1_daddr_t fragno;
11404 ufs2_daddr_t blkno;
11410 cgbno = dtogd(fs, jnewblk->jn_blkno);
11411 for (i = jnewblk->jn_oldfrags; i < jnewblk->jn_frags; i++) {
11412 if (isclr(blksfree, cgbno + i))
11413 panic("jnewblk_rollforward: re-allocated fragment");
11416 if (frags == fs->fs_frag) {
11417 blkno = fragstoblks(fs, cgbno);
11418 ffs_clrblock(fs, blksfree, (long)blkno);
11419 ffs_clusteracct(fs, cgp, blkno, -1);
11420 cgp->cg_cs.cs_nbfree--;
11422 bbase = cgbno - fragnum(fs, cgbno);
11423 cgbno += jnewblk->jn_oldfrags;
11424 /* If a complete block had been reassembled, account for it. */
11425 fragno = fragstoblks(fs, bbase);
11426 if (ffs_isblock(fs, blksfree, fragno)) {
11427 cgp->cg_cs.cs_nffree += fs->fs_frag;
11428 ffs_clusteracct(fs, cgp, fragno, -1);
11429 cgp->cg_cs.cs_nbfree--;
11431 /* Decrement the old frags. */
11432 blk = blkmap(fs, blksfree, bbase);
11433 ffs_fragacct(fs, blk, cgp->cg_frsum, -1);
11434 /* Allocate the fragment */
11435 for (i = 0; i < frags; i++)
11436 clrbit(blksfree, cgbno + i);
11437 cgp->cg_cs.cs_nffree -= frags;
11438 /* Add back in counts associated with the new frags */
11439 blk = blkmap(fs, blksfree, bbase);
11440 ffs_fragacct(fs, blk, cgp->cg_frsum, 1);
11446 * Complete a write to a bmsafemap structure. Roll forward any bitmap
11447 * changes if it's not a background write. Set all written dependencies
11448 * to DEPCOMPLETE and free the structure if possible.
11451 handle_written_bmsafemap(bmsafemap, bp)
11452 struct bmsafemap *bmsafemap;
11455 struct newblk *newblk;
11456 struct inodedep *inodedep;
11457 struct jaddref *jaddref, *jatmp;
11458 struct jnewblk *jnewblk, *jntmp;
11459 struct ufsmount *ump;
11467 if ((bmsafemap->sm_state & IOSTARTED) == 0)
11468 panic("initiate_write_bmsafemap: Not started\n");
11469 ump = VFSTOUFS(bmsafemap->sm_list.wk_mp);
11471 bmsafemap->sm_state &= ~IOSTARTED;
11473 * Release journal work that was waiting on the write.
11475 handle_jwork(&bmsafemap->sm_freewr);
11478 * Restore unwritten inode allocation pending jaddref writes.
11480 if (!LIST_EMPTY(&bmsafemap->sm_jaddrefhd)) {
11481 cgp = (struct cg *)bp->b_data;
11482 fs = VFSTOUFS(bmsafemap->sm_list.wk_mp)->um_fs;
11483 inosused = cg_inosused(cgp);
11484 LIST_FOREACH_SAFE(jaddref, &bmsafemap->sm_jaddrefhd,
11485 ja_bmdeps, jatmp) {
11486 if ((jaddref->ja_state & UNDONE) == 0)
11488 ino = jaddref->ja_ino % fs->fs_ipg;
11489 if (isset(inosused, ino))
11490 panic("handle_written_bmsafemap: "
11491 "re-allocated inode");
11492 if ((bp->b_xflags & BX_BKGRDMARKER) == 0) {
11493 if ((jaddref->ja_mode & IFMT) == IFDIR)
11494 cgp->cg_cs.cs_ndir++;
11495 cgp->cg_cs.cs_nifree--;
11496 setbit(inosused, ino);
11499 jaddref->ja_state &= ~UNDONE;
11500 jaddref->ja_state |= ATTACHED;
11501 free_jaddref(jaddref);
11505 * Restore any block allocations which are pending journal writes.
11507 if (LIST_FIRST(&bmsafemap->sm_jnewblkhd) != NULL) {
11508 cgp = (struct cg *)bp->b_data;
11509 fs = VFSTOUFS(bmsafemap->sm_list.wk_mp)->um_fs;
11510 blksfree = cg_blksfree(cgp);
11511 LIST_FOREACH_SAFE(jnewblk, &bmsafemap->sm_jnewblkhd, jn_deps,
11513 if ((jnewblk->jn_state & UNDONE) == 0)
11515 if ((bp->b_xflags & BX_BKGRDMARKER) == 0 &&
11516 jnewblk_rollforward(jnewblk, fs, cgp, blksfree))
11518 jnewblk->jn_state &= ~(UNDONE | NEWBLOCK);
11519 jnewblk->jn_state |= ATTACHED;
11520 free_jnewblk(jnewblk);
11523 while ((newblk = LIST_FIRST(&bmsafemap->sm_newblkwr))) {
11524 newblk->nb_state |= DEPCOMPLETE;
11525 newblk->nb_state &= ~ONDEPLIST;
11526 newblk->nb_bmsafemap = NULL;
11527 LIST_REMOVE(newblk, nb_deps);
11528 if (newblk->nb_list.wk_type == D_ALLOCDIRECT)
11529 handle_allocdirect_partdone(
11530 WK_ALLOCDIRECT(&newblk->nb_list), NULL);
11531 else if (newblk->nb_list.wk_type == D_ALLOCINDIR)
11532 handle_allocindir_partdone(
11533 WK_ALLOCINDIR(&newblk->nb_list));
11534 else if (newblk->nb_list.wk_type != D_NEWBLK)
11535 panic("handle_written_bmsafemap: Unexpected type: %s",
11536 TYPENAME(newblk->nb_list.wk_type));
11538 while ((inodedep = LIST_FIRST(&bmsafemap->sm_inodedepwr)) != NULL) {
11539 inodedep->id_state |= DEPCOMPLETE;
11540 inodedep->id_state &= ~ONDEPLIST;
11541 LIST_REMOVE(inodedep, id_deps);
11542 inodedep->id_bmsafemap = NULL;
11544 LIST_REMOVE(bmsafemap, sm_next);
11545 if (chgs == 0 && LIST_EMPTY(&bmsafemap->sm_jaddrefhd) &&
11546 LIST_EMPTY(&bmsafemap->sm_jnewblkhd) &&
11547 LIST_EMPTY(&bmsafemap->sm_newblkhd) &&
11548 LIST_EMPTY(&bmsafemap->sm_inodedephd) &&
11549 LIST_EMPTY(&bmsafemap->sm_freehd)) {
11550 LIST_REMOVE(bmsafemap, sm_hash);
11551 WORKITEM_FREE(bmsafemap, D_BMSAFEMAP);
11554 LIST_INSERT_HEAD(&ump->softdep_dirtycg, bmsafemap, sm_next);
11560 * Try to free a mkdir dependency.
11563 complete_mkdir(mkdir)
11564 struct mkdir *mkdir;
11566 struct diradd *dap;
11568 if ((mkdir->md_state & ALLCOMPLETE) != ALLCOMPLETE)
11570 LIST_REMOVE(mkdir, md_mkdirs);
11571 dap = mkdir->md_diradd;
11572 dap->da_state &= ~(mkdir->md_state & (MKDIR_PARENT | MKDIR_BODY));
11573 if ((dap->da_state & (MKDIR_PARENT | MKDIR_BODY)) == 0) {
11574 dap->da_state |= DEPCOMPLETE;
11575 complete_diradd(dap);
11577 WORKITEM_FREE(mkdir, D_MKDIR);
11581 * Handle the completion of a mkdir dependency.
11584 handle_written_mkdir(mkdir, type)
11585 struct mkdir *mkdir;
11589 if ((mkdir->md_state & (MKDIR_PARENT | MKDIR_BODY)) != type)
11590 panic("handle_written_mkdir: bad type");
11591 mkdir->md_state |= COMPLETE;
11592 complete_mkdir(mkdir);
11596 free_pagedep(pagedep)
11597 struct pagedep *pagedep;
11601 if (pagedep->pd_state & NEWBLOCK)
11603 if (!LIST_EMPTY(&pagedep->pd_dirremhd))
11605 for (i = 0; i < DAHASHSZ; i++)
11606 if (!LIST_EMPTY(&pagedep->pd_diraddhd[i]))
11608 if (!LIST_EMPTY(&pagedep->pd_pendinghd))
11610 if (!LIST_EMPTY(&pagedep->pd_jmvrefhd))
11612 if (pagedep->pd_state & ONWORKLIST)
11613 WORKLIST_REMOVE(&pagedep->pd_list);
11614 LIST_REMOVE(pagedep, pd_hash);
11615 WORKITEM_FREE(pagedep, D_PAGEDEP);
11621 * Called from within softdep_disk_write_complete above.
11622 * A write operation was just completed. Removed inodes can
11623 * now be freed and associated block pointers may be committed.
11624 * Note that this routine is always called from interrupt level
11625 * with further splbio interrupts blocked.
11628 handle_written_filepage(pagedep, bp)
11629 struct pagedep *pagedep;
11630 struct buf *bp; /* buffer containing the written page */
11632 struct dirrem *dirrem;
11633 struct diradd *dap, *nextdap;
11637 if ((pagedep->pd_state & IOSTARTED) == 0)
11638 panic("handle_written_filepage: not started");
11639 pagedep->pd_state &= ~IOSTARTED;
11641 * Process any directory removals that have been committed.
11643 while ((dirrem = LIST_FIRST(&pagedep->pd_dirremhd)) != NULL) {
11644 LIST_REMOVE(dirrem, dm_next);
11645 dirrem->dm_state |= COMPLETE;
11646 dirrem->dm_dirinum = pagedep->pd_ino;
11647 KASSERT(LIST_EMPTY(&dirrem->dm_jremrefhd),
11648 ("handle_written_filepage: Journal entries not written."));
11649 add_to_worklist(&dirrem->dm_list, 0);
11652 * Free any directory additions that have been committed.
11653 * If it is a newly allocated block, we have to wait until
11654 * the on-disk directory inode claims the new block.
11656 if ((pagedep->pd_state & NEWBLOCK) == 0)
11657 while ((dap = LIST_FIRST(&pagedep->pd_pendinghd)) != NULL)
11658 free_diradd(dap, NULL);
11660 * Uncommitted directory entries must be restored.
11662 for (chgs = 0, i = 0; i < DAHASHSZ; i++) {
11663 for (dap = LIST_FIRST(&pagedep->pd_diraddhd[i]); dap;
11665 nextdap = LIST_NEXT(dap, da_pdlist);
11666 if (dap->da_state & ATTACHED)
11667 panic("handle_written_filepage: attached");
11668 ep = (struct direct *)
11669 ((char *)bp->b_data + dap->da_offset);
11670 ep->d_ino = dap->da_newinum;
11671 dap->da_state &= ~UNDONE;
11672 dap->da_state |= ATTACHED;
11675 * If the inode referenced by the directory has
11676 * been written out, then the dependency can be
11677 * moved to the pending list.
11679 if ((dap->da_state & ALLCOMPLETE) == ALLCOMPLETE) {
11680 LIST_REMOVE(dap, da_pdlist);
11681 LIST_INSERT_HEAD(&pagedep->pd_pendinghd, dap,
11687 * If there were any rollbacks in the directory, then it must be
11688 * marked dirty so that its will eventually get written back in
11689 * its correct form.
11692 if ((bp->b_flags & B_DELWRI) == 0)
11698 * If we are not waiting for a new directory block to be
11699 * claimed by its inode, then the pagedep will be freed.
11700 * Otherwise it will remain to track any new entries on
11701 * the page in case they are fsync'ed.
11703 free_pagedep(pagedep);
11708 * Writing back in-core inode structures.
11710 * The filesystem only accesses an inode's contents when it occupies an
11711 * "in-core" inode structure. These "in-core" structures are separate from
11712 * the page frames used to cache inode blocks. Only the latter are
11713 * transferred to/from the disk. So, when the updated contents of the
11714 * "in-core" inode structure are copied to the corresponding in-memory inode
11715 * block, the dependencies are also transferred. The following procedure is
11716 * called when copying a dirty "in-core" inode to a cached inode block.
11720 * Called when an inode is loaded from disk. If the effective link count
11721 * differed from the actual link count when it was last flushed, then we
11722 * need to ensure that the correct effective link count is put back.
11725 softdep_load_inodeblock(ip)
11726 struct inode *ip; /* the "in_core" copy of the inode */
11728 struct inodedep *inodedep;
11731 * Check for alternate nlink count.
11733 ip->i_effnlink = ip->i_nlink;
11735 if (inodedep_lookup(UFSTOVFS(ip->i_ump), ip->i_number, 0,
11740 ip->i_effnlink -= inodedep->id_nlinkdelta;
11745 * This routine is called just before the "in-core" inode
11746 * information is to be copied to the in-memory inode block.
11747 * Recall that an inode block contains several inodes. If
11748 * the force flag is set, then the dependencies will be
11749 * cleared so that the update can always be made. Note that
11750 * the buffer is locked when this routine is called, so we
11751 * will never be in the middle of writing the inode block
11755 softdep_update_inodeblock(ip, bp, waitfor)
11756 struct inode *ip; /* the "in_core" copy of the inode */
11757 struct buf *bp; /* the buffer containing the inode block */
11758 int waitfor; /* nonzero => update must be allowed */
11760 struct inodedep *inodedep;
11761 struct inoref *inoref;
11762 struct worklist *wk;
11768 mp = UFSTOVFS(ip->i_ump);
11771 * Preserve the freelink that is on disk. clear_unlinked_inodedep()
11772 * does not have access to the in-core ip so must write directly into
11773 * the inode block buffer when setting freelink.
11775 if (fs->fs_magic == FS_UFS1_MAGIC)
11776 DIP_SET(ip, i_freelink, ((struct ufs1_dinode *)bp->b_data +
11777 ino_to_fsbo(fs, ip->i_number))->di_freelink);
11779 DIP_SET(ip, i_freelink, ((struct ufs2_dinode *)bp->b_data +
11780 ino_to_fsbo(fs, ip->i_number))->di_freelink);
11782 * If the effective link count is not equal to the actual link
11783 * count, then we must track the difference in an inodedep while
11784 * the inode is (potentially) tossed out of the cache. Otherwise,
11785 * if there is no existing inodedep, then there are no dependencies
11790 if (inodedep_lookup(mp, ip->i_number, 0, &inodedep) == 0) {
11792 if (ip->i_effnlink != ip->i_nlink)
11793 panic("softdep_update_inodeblock: bad link count");
11796 if (inodedep->id_nlinkdelta != ip->i_nlink - ip->i_effnlink)
11797 panic("softdep_update_inodeblock: bad delta");
11799 * If we're flushing all dependencies we must also move any waiting
11800 * for journal writes onto the bufwait list prior to I/O.
11803 TAILQ_FOREACH(inoref, &inodedep->id_inoreflst, if_deps) {
11804 if ((inoref->if_state & (DEPCOMPLETE | GOINGAWAY))
11806 jwait(&inoref->if_list, MNT_WAIT);
11812 * Changes have been initiated. Anything depending on these
11813 * changes cannot occur until this inode has been written.
11815 inodedep->id_state &= ~COMPLETE;
11816 if ((inodedep->id_state & ONWORKLIST) == 0)
11817 WORKLIST_INSERT(&bp->b_dep, &inodedep->id_list);
11819 * Any new dependencies associated with the incore inode must
11820 * now be moved to the list associated with the buffer holding
11821 * the in-memory copy of the inode. Once merged process any
11822 * allocdirects that are completed by the merger.
11824 merge_inode_lists(&inodedep->id_newinoupdt, &inodedep->id_inoupdt);
11825 if (!TAILQ_EMPTY(&inodedep->id_inoupdt))
11826 handle_allocdirect_partdone(TAILQ_FIRST(&inodedep->id_inoupdt),
11828 merge_inode_lists(&inodedep->id_newextupdt, &inodedep->id_extupdt);
11829 if (!TAILQ_EMPTY(&inodedep->id_extupdt))
11830 handle_allocdirect_partdone(TAILQ_FIRST(&inodedep->id_extupdt),
11833 * Now that the inode has been pushed into the buffer, the
11834 * operations dependent on the inode being written to disk
11835 * can be moved to the id_bufwait so that they will be
11836 * processed when the buffer I/O completes.
11838 while ((wk = LIST_FIRST(&inodedep->id_inowait)) != NULL) {
11839 WORKLIST_REMOVE(wk);
11840 WORKLIST_INSERT(&inodedep->id_bufwait, wk);
11843 * Newly allocated inodes cannot be written until the bitmap
11844 * that allocates them have been written (indicated by
11845 * DEPCOMPLETE being set in id_state). If we are doing a
11846 * forced sync (e.g., an fsync on a file), we force the bitmap
11847 * to be written so that the update can be done.
11849 if (waitfor == 0) {
11854 if ((inodedep->id_state & (DEPCOMPLETE | GOINGAWAY)) != 0) {
11858 ibp = inodedep->id_bmsafemap->sm_buf;
11859 ibp = getdirtybuf(ibp, &lk, MNT_WAIT);
11862 * If ibp came back as NULL, the dependency could have been
11863 * freed while we slept. Look it up again, and check to see
11864 * that it has completed.
11866 if (inodedep_lookup(mp, ip->i_number, 0, &inodedep) != 0)
11872 if ((error = bwrite(ibp)) != 0)
11873 softdep_error("softdep_update_inodeblock: bwrite", error);
11877 * Merge the a new inode dependency list (such as id_newinoupdt) into an
11878 * old inode dependency list (such as id_inoupdt). This routine must be
11879 * called with splbio interrupts blocked.
11882 merge_inode_lists(newlisthead, oldlisthead)
11883 struct allocdirectlst *newlisthead;
11884 struct allocdirectlst *oldlisthead;
11886 struct allocdirect *listadp, *newadp;
11888 newadp = TAILQ_FIRST(newlisthead);
11889 for (listadp = TAILQ_FIRST(oldlisthead); listadp && newadp;) {
11890 if (listadp->ad_offset < newadp->ad_offset) {
11891 listadp = TAILQ_NEXT(listadp, ad_next);
11894 TAILQ_REMOVE(newlisthead, newadp, ad_next);
11895 TAILQ_INSERT_BEFORE(listadp, newadp, ad_next);
11896 if (listadp->ad_offset == newadp->ad_offset) {
11897 allocdirect_merge(oldlisthead, newadp,
11901 newadp = TAILQ_FIRST(newlisthead);
11903 while ((newadp = TAILQ_FIRST(newlisthead)) != NULL) {
11904 TAILQ_REMOVE(newlisthead, newadp, ad_next);
11905 TAILQ_INSERT_TAIL(oldlisthead, newadp, ad_next);
11910 * If we are doing an fsync, then we must ensure that any directory
11911 * entries for the inode have been written after the inode gets to disk.
11915 struct vnode *vp; /* the "in_core" copy of the inode */
11917 struct inodedep *inodedep;
11918 struct pagedep *pagedep;
11919 struct inoref *inoref;
11920 struct worklist *wk;
11921 struct diradd *dap;
11927 struct thread *td = curthread;
11928 int error, flushparent, pagedep_new_block;
11937 if (inodedep_lookup(mp, ip->i_number, 0, &inodedep) == 0) {
11941 TAILQ_FOREACH(inoref, &inodedep->id_inoreflst, if_deps) {
11942 if ((inoref->if_state & (DEPCOMPLETE | GOINGAWAY))
11944 jwait(&inoref->if_list, MNT_WAIT);
11948 if (!LIST_EMPTY(&inodedep->id_inowait) ||
11949 !TAILQ_EMPTY(&inodedep->id_extupdt) ||
11950 !TAILQ_EMPTY(&inodedep->id_newextupdt) ||
11951 !TAILQ_EMPTY(&inodedep->id_inoupdt) ||
11952 !TAILQ_EMPTY(&inodedep->id_newinoupdt))
11953 panic("softdep_fsync: pending ops %p", inodedep);
11954 for (error = 0, flushparent = 0; ; ) {
11955 if ((wk = LIST_FIRST(&inodedep->id_pendinghd)) == NULL)
11957 if (wk->wk_type != D_DIRADD)
11958 panic("softdep_fsync: Unexpected type %s",
11959 TYPENAME(wk->wk_type));
11960 dap = WK_DIRADD(wk);
11962 * Flush our parent if this directory entry has a MKDIR_PARENT
11963 * dependency or is contained in a newly allocated block.
11965 if (dap->da_state & DIRCHG)
11966 pagedep = dap->da_previous->dm_pagedep;
11968 pagedep = dap->da_pagedep;
11969 parentino = pagedep->pd_ino;
11970 lbn = pagedep->pd_lbn;
11971 if ((dap->da_state & (MKDIR_BODY | COMPLETE)) != COMPLETE)
11972 panic("softdep_fsync: dirty");
11973 if ((dap->da_state & MKDIR_PARENT) ||
11974 (pagedep->pd_state & NEWBLOCK))
11979 * If we are being fsync'ed as part of vgone'ing this vnode,
11980 * then we will not be able to release and recover the
11981 * vnode below, so we just have to give up on writing its
11982 * directory entry out. It will eventually be written, just
11983 * not now, but then the user was not asking to have it
11984 * written, so we are not breaking any promises.
11986 if (vp->v_iflag & VI_DOOMED)
11989 * We prevent deadlock by always fetching inodes from the
11990 * root, moving down the directory tree. Thus, when fetching
11991 * our parent directory, we first try to get the lock. If
11992 * that fails, we must unlock ourselves before requesting
11993 * the lock on our parent. See the comment in ufs_lookup
11994 * for details on possible races.
11997 if (ffs_vgetf(mp, parentino, LK_NOWAIT | LK_EXCLUSIVE, &pvp,
11998 FFSV_FORCEINSMQ)) {
11999 error = vfs_busy(mp, MBF_NOWAIT);
12003 error = vfs_busy(mp, 0);
12004 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
12008 if (vp->v_iflag & VI_DOOMED) {
12014 error = ffs_vgetf(mp, parentino, LK_EXCLUSIVE,
12015 &pvp, FFSV_FORCEINSMQ);
12017 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
12018 if (vp->v_iflag & VI_DOOMED) {
12027 * All MKDIR_PARENT dependencies and all the NEWBLOCK pagedeps
12028 * that are contained in direct blocks will be resolved by
12029 * doing a ffs_update. Pagedeps contained in indirect blocks
12030 * may require a complete sync'ing of the directory. So, we
12031 * try the cheap and fast ffs_update first, and if that fails,
12032 * then we do the slower ffs_syncvnode of the directory.
12037 if ((error = ffs_update(pvp, 1)) != 0) {
12043 if (inodedep_lookup(mp, ip->i_number, 0, &inodedep) != 0) {
12044 if ((wk = LIST_FIRST(&inodedep->id_pendinghd)) != NULL) {
12045 if (wk->wk_type != D_DIRADD)
12046 panic("softdep_fsync: Unexpected type %s",
12047 TYPENAME(wk->wk_type));
12048 dap = WK_DIRADD(wk);
12049 if (dap->da_state & DIRCHG)
12050 pagedep = dap->da_previous->dm_pagedep;
12052 pagedep = dap->da_pagedep;
12053 pagedep_new_block = pagedep->pd_state & NEWBLOCK;
12056 if (pagedep_new_block && (error =
12057 ffs_syncvnode(pvp, MNT_WAIT, 0))) {
12067 * Flush directory page containing the inode's name.
12069 error = bread(pvp, lbn, blksize(fs, VTOI(pvp), lbn), td->td_ucred,
12072 error = bwrite(bp);
12079 if (inodedep_lookup(mp, ip->i_number, 0, &inodedep) == 0)
12087 * Flush all the dirty bitmaps associated with the block device
12088 * before flushing the rest of the dirty blocks so as to reduce
12089 * the number of dependencies that will have to be rolled back.
12094 softdep_fsync_mountdev(vp)
12097 struct buf *bp, *nbp;
12098 struct worklist *wk;
12101 if (!vn_isdisk(vp, NULL))
12102 panic("softdep_fsync_mountdev: vnode not a disk");
12103 bo = &vp->v_bufobj;
12107 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) {
12109 * If it is already scheduled, skip to the next buffer.
12111 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL))
12114 if ((bp->b_flags & B_DELWRI) == 0)
12115 panic("softdep_fsync_mountdev: not dirty");
12117 * We are only interested in bitmaps with outstanding
12120 if ((wk = LIST_FIRST(&bp->b_dep)) == NULL ||
12121 wk->wk_type != D_BMSAFEMAP ||
12122 (bp->b_vflags & BV_BKGRDINPROG)) {
12129 (void) bawrite(bp);
12138 * Sync all cylinder groups that were dirty at the time this function is
12139 * called. Newly dirtied cgs will be inserted before the sentinel. This
12140 * is used to flush freedep activity that may be holding up writes to a
12144 sync_cgs(mp, waitfor)
12148 struct bmsafemap *bmsafemap;
12149 struct bmsafemap *sentinel;
12150 struct ufsmount *ump;
12154 sentinel = malloc(sizeof(*sentinel), M_BMSAFEMAP, M_ZERO | M_WAITOK);
12155 sentinel->sm_cg = -1;
12156 ump = VFSTOUFS(mp);
12159 LIST_INSERT_HEAD(&ump->softdep_dirtycg, sentinel, sm_next);
12160 for (bmsafemap = LIST_NEXT(sentinel, sm_next); bmsafemap != NULL;
12161 bmsafemap = LIST_NEXT(sentinel, sm_next)) {
12162 /* Skip sentinels and cgs with no work to release. */
12163 if (bmsafemap->sm_cg == -1 ||
12164 (LIST_EMPTY(&bmsafemap->sm_freehd) &&
12165 LIST_EMPTY(&bmsafemap->sm_freewr))) {
12166 LIST_REMOVE(sentinel, sm_next);
12167 LIST_INSERT_AFTER(bmsafemap, sentinel, sm_next);
12171 * If we don't get the lock and we're waiting try again, if
12172 * not move on to the next buf and try to sync it.
12174 bp = getdirtybuf(bmsafemap->sm_buf, &lk, waitfor);
12175 if (bp == NULL && waitfor == MNT_WAIT)
12177 LIST_REMOVE(sentinel, sm_next);
12178 LIST_INSERT_AFTER(bmsafemap, sentinel, sm_next);
12182 if (waitfor == MNT_NOWAIT)
12185 error = bwrite(bp);
12190 LIST_REMOVE(sentinel, sm_next);
12192 free(sentinel, M_BMSAFEMAP);
12197 * This routine is called when we are trying to synchronously flush a
12198 * file. This routine must eliminate any filesystem metadata dependencies
12199 * so that the syncing routine can succeed.
12202 softdep_sync_metadata(struct vnode *vp)
12207 * Ensure that any direct block dependencies have been cleared,
12208 * truncations are started, and inode references are journaled.
12212 * Write all journal records to prevent rollbacks on devvp.
12214 if (vp->v_type == VCHR)
12215 softdep_flushjournal(vp->v_mount);
12216 error = flush_inodedep_deps(vp, vp->v_mount, VTOI(vp)->i_number);
12218 * Ensure that all truncates are written so we won't find deps on
12221 process_truncates(vp);
12228 * This routine is called when we are attempting to sync a buf with
12229 * dependencies. If waitfor is MNT_NOWAIT it attempts to schedule any
12230 * other IO it can but returns EBUSY if the buffer is not yet able to
12231 * be written. Dependencies which will not cause rollbacks will always
12235 softdep_sync_buf(struct vnode *vp, struct buf *bp, int waitfor)
12237 struct indirdep *indirdep;
12238 struct pagedep *pagedep;
12239 struct allocindir *aip;
12240 struct newblk *newblk;
12242 struct worklist *wk;
12246 * For VCHR we just don't want to force flush any dependencies that
12247 * will cause rollbacks.
12249 if (vp->v_type == VCHR) {
12250 if (waitfor == MNT_NOWAIT && softdep_count_dependencies(bp, 0))
12256 * As we hold the buffer locked, none of its dependencies
12261 LIST_FOREACH(wk, &bp->b_dep, wk_list) {
12262 switch (wk->wk_type) {
12264 case D_ALLOCDIRECT:
12266 newblk = WK_NEWBLK(wk);
12267 if (newblk->nb_jnewblk != NULL) {
12268 if (waitfor == MNT_NOWAIT) {
12272 jwait(&newblk->nb_jnewblk->jn_list, waitfor);
12275 if (newblk->nb_state & DEPCOMPLETE ||
12276 waitfor == MNT_NOWAIT)
12278 nbp = newblk->nb_bmsafemap->sm_buf;
12279 nbp = getdirtybuf(nbp, &lk, waitfor);
12283 if ((error = bwrite(nbp)) != 0)
12289 indirdep = WK_INDIRDEP(wk);
12290 if (waitfor == MNT_NOWAIT) {
12291 if (!TAILQ_EMPTY(&indirdep->ir_trunc) ||
12292 !LIST_EMPTY(&indirdep->ir_deplisthd)) {
12297 if (!TAILQ_EMPTY(&indirdep->ir_trunc))
12298 panic("softdep_sync_buf: truncation pending.");
12300 LIST_FOREACH(aip, &indirdep->ir_deplisthd, ai_next) {
12301 newblk = (struct newblk *)aip;
12302 if (newblk->nb_jnewblk != NULL) {
12303 jwait(&newblk->nb_jnewblk->jn_list,
12307 if (newblk->nb_state & DEPCOMPLETE)
12309 nbp = newblk->nb_bmsafemap->sm_buf;
12310 nbp = getdirtybuf(nbp, &lk, waitfor);
12314 if ((error = bwrite(nbp)) != 0)
12323 * Only flush directory entries in synchronous passes.
12325 if (waitfor != MNT_WAIT) {
12330 * While syncing snapshots, we must allow recursive
12335 * We are trying to sync a directory that may
12336 * have dependencies on both its own metadata
12337 * and/or dependencies on the inodes of any
12338 * recently allocated files. We walk its diradd
12339 * lists pushing out the associated inode.
12341 pagedep = WK_PAGEDEP(wk);
12342 for (i = 0; i < DAHASHSZ; i++) {
12343 if (LIST_FIRST(&pagedep->pd_diraddhd[i]) == 0)
12345 if ((error = flush_pagedep_deps(vp, wk->wk_mp,
12346 &pagedep->pd_diraddhd[i]))) {
12361 panic("softdep_sync_buf: Unknown type %s",
12362 TYPENAME(wk->wk_type));
12373 * Flush the dependencies associated with an inodedep.
12374 * Called with splbio blocked.
12377 flush_inodedep_deps(vp, mp, ino)
12382 struct inodedep *inodedep;
12383 struct inoref *inoref;
12384 int error, waitfor;
12387 * This work is done in two passes. The first pass grabs most
12388 * of the buffers and begins asynchronously writing them. The
12389 * only way to wait for these asynchronous writes is to sleep
12390 * on the filesystem vnode which may stay busy for a long time
12391 * if the filesystem is active. So, instead, we make a second
12392 * pass over the dependencies blocking on each write. In the
12393 * usual case we will be blocking against a write that we
12394 * initiated, so when it is done the dependency will have been
12395 * resolved. Thus the second pass is expected to end quickly.
12396 * We give a brief window at the top of the loop to allow
12397 * any pending I/O to complete.
12399 for (error = 0, waitfor = MNT_NOWAIT; ; ) {
12405 if (inodedep_lookup(mp, ino, 0, &inodedep) == 0)
12407 TAILQ_FOREACH(inoref, &inodedep->id_inoreflst, if_deps) {
12408 if ((inoref->if_state & (DEPCOMPLETE | GOINGAWAY))
12410 jwait(&inoref->if_list, MNT_WAIT);
12414 if (flush_deplist(&inodedep->id_inoupdt, waitfor, &error) ||
12415 flush_deplist(&inodedep->id_newinoupdt, waitfor, &error) ||
12416 flush_deplist(&inodedep->id_extupdt, waitfor, &error) ||
12417 flush_deplist(&inodedep->id_newextupdt, waitfor, &error))
12420 * If pass2, we are done, otherwise do pass 2.
12422 if (waitfor == MNT_WAIT)
12424 waitfor = MNT_WAIT;
12427 * Try freeing inodedep in case all dependencies have been removed.
12429 if (inodedep_lookup(mp, ino, 0, &inodedep) != 0)
12430 (void) free_inodedep(inodedep);
12435 * Flush an inode dependency list.
12436 * Called with splbio blocked.
12439 flush_deplist(listhead, waitfor, errorp)
12440 struct allocdirectlst *listhead;
12444 struct allocdirect *adp;
12445 struct newblk *newblk;
12448 mtx_assert(&lk, MA_OWNED);
12449 TAILQ_FOREACH(adp, listhead, ad_next) {
12450 newblk = (struct newblk *)adp;
12451 if (newblk->nb_jnewblk != NULL) {
12452 jwait(&newblk->nb_jnewblk->jn_list, MNT_WAIT);
12455 if (newblk->nb_state & DEPCOMPLETE)
12457 bp = newblk->nb_bmsafemap->sm_buf;
12458 bp = getdirtybuf(bp, &lk, waitfor);
12460 if (waitfor == MNT_NOWAIT)
12465 if (waitfor == MNT_NOWAIT)
12468 *errorp = bwrite(bp);
12476 * Flush dependencies associated with an allocdirect block.
12479 flush_newblk_dep(vp, mp, lbn)
12484 struct newblk *newblk;
12488 ufs2_daddr_t blkno;
12492 bo = &vp->v_bufobj;
12494 blkno = DIP(ip, i_db[lbn]);
12496 panic("flush_newblk_dep: Missing block");
12499 * Loop until all dependencies related to this block are satisfied.
12500 * We must be careful to restart after each sleep in case a write
12501 * completes some part of this process for us.
12504 if (newblk_lookup(mp, blkno, 0, &newblk) == 0) {
12508 if (newblk->nb_list.wk_type != D_ALLOCDIRECT)
12509 panic("flush_newblk_deps: Bad newblk %p", newblk);
12511 * Flush the journal.
12513 if (newblk->nb_jnewblk != NULL) {
12514 jwait(&newblk->nb_jnewblk->jn_list, MNT_WAIT);
12518 * Write the bitmap dependency.
12520 if ((newblk->nb_state & DEPCOMPLETE) == 0) {
12521 bp = newblk->nb_bmsafemap->sm_buf;
12522 bp = getdirtybuf(bp, &lk, MNT_WAIT);
12526 error = bwrite(bp);
12533 * Write the buffer.
12537 bp = gbincore(bo, lbn);
12539 error = BUF_LOCK(bp, LK_EXCLUSIVE | LK_SLEEPFAIL |
12540 LK_INTERLOCK, BO_MTX(bo));
12541 if (error == ENOLCK) {
12543 continue; /* Slept, retry */
12546 break; /* Failed */
12547 if (bp->b_flags & B_DELWRI) {
12549 error = bwrite(bp);
12557 * We have to wait for the direct pointers to
12558 * point at the newdirblk before the dependency
12561 error = ffs_update(vp, 1);
12570 * Eliminate a pagedep dependency by flushing out all its diradd dependencies.
12571 * Called with splbio blocked.
12574 flush_pagedep_deps(pvp, mp, diraddhdp)
12577 struct diraddhd *diraddhdp;
12579 struct inodedep *inodedep;
12580 struct inoref *inoref;
12581 struct ufsmount *ump;
12582 struct diradd *dap;
12588 ump = VFSTOUFS(mp);
12590 while ((dap = LIST_FIRST(diraddhdp)) != NULL) {
12592 * Flush ourselves if this directory entry
12593 * has a MKDIR_PARENT dependency.
12595 if (dap->da_state & MKDIR_PARENT) {
12597 if ((error = ffs_update(pvp, 1)) != 0)
12601 * If that cleared dependencies, go on to next.
12603 if (dap != LIST_FIRST(diraddhdp))
12605 if (dap->da_state & MKDIR_PARENT)
12606 panic("flush_pagedep_deps: MKDIR_PARENT");
12609 * A newly allocated directory must have its "." and
12610 * ".." entries written out before its name can be
12611 * committed in its parent.
12613 inum = dap->da_newinum;
12614 if (inodedep_lookup(UFSTOVFS(ump), inum, 0, &inodedep) == 0)
12615 panic("flush_pagedep_deps: lost inode1");
12617 * Wait for any pending journal adds to complete so we don't
12618 * cause rollbacks while syncing.
12620 TAILQ_FOREACH(inoref, &inodedep->id_inoreflst, if_deps) {
12621 if ((inoref->if_state & (DEPCOMPLETE | GOINGAWAY))
12623 jwait(&inoref->if_list, MNT_WAIT);
12627 if (dap->da_state & MKDIR_BODY) {
12629 if ((error = ffs_vgetf(mp, inum, LK_EXCLUSIVE, &vp,
12632 error = flush_newblk_dep(vp, mp, 0);
12634 * If we still have the dependency we might need to
12635 * update the vnode to sync the new link count to
12638 if (error == 0 && dap == LIST_FIRST(diraddhdp))
12639 error = ffs_update(vp, 1);
12645 * If that cleared dependencies, go on to next.
12647 if (dap != LIST_FIRST(diraddhdp))
12649 if (dap->da_state & MKDIR_BODY) {
12650 inodedep_lookup(UFSTOVFS(ump), inum, 0,
12652 panic("flush_pagedep_deps: MKDIR_BODY "
12653 "inodedep %p dap %p vp %p",
12654 inodedep, dap, vp);
12658 * Flush the inode on which the directory entry depends.
12659 * Having accounted for MKDIR_PARENT and MKDIR_BODY above,
12660 * the only remaining dependency is that the updated inode
12661 * count must get pushed to disk. The inode has already
12662 * been pushed into its inode buffer (via VOP_UPDATE) at
12663 * the time of the reference count change. So we need only
12664 * locate that buffer, ensure that there will be no rollback
12665 * caused by a bitmap dependency, then write the inode buffer.
12668 if (inodedep_lookup(UFSTOVFS(ump), inum, 0, &inodedep) == 0)
12669 panic("flush_pagedep_deps: lost inode");
12671 * If the inode still has bitmap dependencies,
12672 * push them to disk.
12674 if ((inodedep->id_state & (DEPCOMPLETE | GOINGAWAY)) == 0) {
12675 bp = inodedep->id_bmsafemap->sm_buf;
12676 bp = getdirtybuf(bp, &lk, MNT_WAIT);
12680 if ((error = bwrite(bp)) != 0)
12683 if (dap != LIST_FIRST(diraddhdp))
12687 * If the inode is still sitting in a buffer waiting
12688 * to be written or waiting for the link count to be
12689 * adjusted update it here to flush it to disk.
12691 if (dap == LIST_FIRST(diraddhdp)) {
12693 if ((error = ffs_vgetf(mp, inum, LK_EXCLUSIVE, &vp,
12696 error = ffs_update(vp, 1);
12703 * If we have failed to get rid of all the dependencies
12704 * then something is seriously wrong.
12706 if (dap == LIST_FIRST(diraddhdp)) {
12707 inodedep_lookup(UFSTOVFS(ump), inum, 0, &inodedep);
12708 panic("flush_pagedep_deps: failed to flush "
12709 "inodedep %p ino %d dap %p", inodedep, inum, dap);
12718 * A large burst of file addition or deletion activity can drive the
12719 * memory load excessively high. First attempt to slow things down
12720 * using the techniques below. If that fails, this routine requests
12721 * the offending operations to fall back to running synchronously
12722 * until the memory load returns to a reasonable level.
12725 softdep_slowdown(vp)
12728 struct ufsmount *ump;
12730 int max_softdeps_hard;
12735 * Check for journal space if needed.
12737 if (DOINGSUJ(vp)) {
12738 ump = VFSTOUFS(vp->v_mount);
12739 if (journal_space(ump, 0) == 0)
12742 max_softdeps_hard = max_softdeps * 11 / 10;
12743 if (dep_current[D_DIRREM] < max_softdeps_hard / 2 &&
12744 dep_current[D_INODEDEP] < max_softdeps_hard &&
12745 VFSTOUFS(vp->v_mount)->um_numindirdeps < maxindirdeps &&
12746 dep_current[D_FREEBLKS] < max_softdeps_hard && jlow == 0) {
12750 if (VFSTOUFS(vp->v_mount)->um_numindirdeps >= maxindirdeps || jlow)
12752 stat_sync_limit_hit += 1;
12760 * Called by the allocation routines when they are about to fail
12761 * in the hope that we can free up the requested resource (inodes
12764 * First check to see if the work list has anything on it. If it has,
12765 * clean up entries until we successfully free the requested resource.
12766 * Because this process holds inodes locked, we cannot handle any remove
12767 * requests that might block on a locked inode as that could lead to
12768 * deadlock. If the worklist yields none of the requested resource,
12769 * start syncing out vnodes to free up the needed space.
12772 softdep_request_cleanup(fs, vp, cred, resource)
12775 struct ucred *cred;
12778 struct ufsmount *ump;
12780 struct vnode *lvp, *mvp;
12782 ufs2_daddr_t needed;
12786 * If we are being called because of a process doing a
12787 * copy-on-write, then it is not safe to process any
12788 * worklist items as we will recurse into the copyonwrite
12789 * routine. This will result in an incoherent snapshot.
12790 * If the vnode that we hold is a snapshot, we must avoid
12791 * handling other resources that could cause deadlock.
12793 if ((curthread->td_pflags & TDP_COWINPROGRESS) || IS_SNAPSHOT(VTOI(vp)))
12796 if (resource == FLUSH_BLOCKS_WAIT)
12797 stat_cleanup_blkrequests += 1;
12799 stat_cleanup_inorequests += 1;
12802 ump = VFSTOUFS(mp);
12803 mtx_assert(UFS_MTX(ump), MA_OWNED);
12805 error = ffs_update(vp, 1);
12811 * If we are in need of resources, consider pausing for
12812 * tickdelay to give ourselves some breathing room.
12815 process_removes(vp);
12816 process_truncates(vp);
12817 request_cleanup(UFSTOVFS(ump), resource);
12820 * Now clean up at least as many resources as we will need.
12822 * When requested to clean up inodes, the number that are needed
12823 * is set by the number of simultaneous writers (mnt_writeopcount)
12824 * plus a bit of slop (2) in case some more writers show up while
12827 * When requested to free up space, the amount of space that
12828 * we need is enough blocks to allocate a full-sized segment
12829 * (fs_contigsumsize). The number of such segments that will
12830 * be needed is set by the number of simultaneous writers
12831 * (mnt_writeopcount) plus a bit of slop (2) in case some more
12832 * writers show up while we are cleaning.
12834 * Additionally, if we are unpriviledged and allocating space,
12835 * we need to ensure that we clean up enough blocks to get the
12836 * needed number of blocks over the threshhold of the minimum
12837 * number of blocks required to be kept free by the filesystem
12840 if (resource == FLUSH_INODES_WAIT) {
12841 needed = vp->v_mount->mnt_writeopcount + 2;
12842 } else if (resource == FLUSH_BLOCKS_WAIT) {
12843 needed = (vp->v_mount->mnt_writeopcount + 2) *
12844 fs->fs_contigsumsize;
12845 if (priv_check_cred(cred, PRIV_VFS_BLOCKRESERVE, 0))
12846 needed += fragstoblks(fs,
12847 roundup((fs->fs_dsize * fs->fs_minfree / 100) -
12848 fs->fs_cstotal.cs_nffree, fs->fs_frag));
12851 printf("softdep_request_cleanup: Unknown resource type %d\n",
12855 starttime = time_second;
12857 if ((resource == FLUSH_BLOCKS_WAIT && ump->softdep_on_worklist > 0 &&
12858 fs->fs_cstotal.cs_nbfree <= needed) ||
12859 (resource == FLUSH_INODES_WAIT && fs->fs_pendinginodes > 0 &&
12860 fs->fs_cstotal.cs_nifree <= needed)) {
12862 if (ump->softdep_on_worklist > 0 &&
12863 process_worklist_item(UFSTOVFS(ump),
12864 ump->softdep_on_worklist, LK_NOWAIT) != 0)
12865 stat_worklist_push += 1;
12869 * If we still need resources and there are no more worklist
12870 * entries to process to obtain them, we have to start flushing
12871 * the dirty vnodes to force the release of additional requests
12872 * to the worklist that we can then process to reap addition
12873 * resources. We walk the vnodes associated with the mount point
12874 * until we get the needed worklist requests that we can reap.
12876 if ((resource == FLUSH_BLOCKS_WAIT &&
12877 fs->fs_cstotal.cs_nbfree <= needed) ||
12878 (resource == FLUSH_INODES_WAIT && fs->fs_pendinginodes > 0 &&
12879 fs->fs_cstotal.cs_nifree <= needed)) {
12880 MNT_VNODE_FOREACH_ALL(lvp, mp, mvp) {
12881 if (TAILQ_FIRST(&lvp->v_bufobj.bo_dirty.bv_hd) == 0) {
12885 if (vget(lvp, LK_EXCLUSIVE | LK_INTERLOCK | LK_NOWAIT,
12888 if (lvp->v_vflag & VV_NOSYNC) { /* unlinked */
12892 (void) ffs_syncvnode(lvp, MNT_NOWAIT, 0);
12895 lvp = ump->um_devvp;
12896 if (vn_lock(lvp, LK_EXCLUSIVE | LK_NOWAIT) == 0) {
12897 VOP_FSYNC(lvp, MNT_NOWAIT, curthread);
12898 VOP_UNLOCK(lvp, 0);
12900 if (ump->softdep_on_worklist > 0) {
12901 stat_cleanup_retries += 1;
12904 stat_cleanup_failures += 1;
12906 if (time_second - starttime > stat_cleanup_high_delay)
12907 stat_cleanup_high_delay = time_second - starttime;
12913 * If memory utilization has gotten too high, deliberately slow things
12914 * down and speed up the I/O processing.
12916 extern struct thread *syncertd;
12918 request_cleanup(mp, resource)
12922 struct thread *td = curthread;
12923 struct ufsmount *ump;
12925 mtx_assert(&lk, MA_OWNED);
12927 * We never hold up the filesystem syncer or buf daemon.
12929 if (td->td_pflags & (TDP_SOFTDEP|TDP_NORUNNINGBUF))
12931 ump = VFSTOUFS(mp);
12933 * First check to see if the work list has gotten backlogged.
12934 * If it has, co-opt this process to help clean up two entries.
12935 * Because this process may hold inodes locked, we cannot
12936 * handle any remove requests that might block on a locked
12937 * inode as that could lead to deadlock. We set TDP_SOFTDEP
12938 * to avoid recursively processing the worklist.
12940 if (ump->softdep_on_worklist > max_softdeps / 10) {
12941 td->td_pflags |= TDP_SOFTDEP;
12942 process_worklist_item(mp, 2, LK_NOWAIT);
12943 td->td_pflags &= ~TDP_SOFTDEP;
12944 stat_worklist_push += 2;
12948 * Next, we attempt to speed up the syncer process. If that
12949 * is successful, then we allow the process to continue.
12951 if (softdep_speedup() &&
12952 resource != FLUSH_BLOCKS_WAIT &&
12953 resource != FLUSH_INODES_WAIT)
12956 * If we are resource constrained on inode dependencies, try
12957 * flushing some dirty inodes. Otherwise, we are constrained
12958 * by file deletions, so try accelerating flushes of directories
12959 * with removal dependencies. We would like to do the cleanup
12960 * here, but we probably hold an inode locked at this point and
12961 * that might deadlock against one that we try to clean. So,
12962 * the best that we can do is request the syncer daemon to do
12963 * the cleanup for us.
12965 switch (resource) {
12968 case FLUSH_INODES_WAIT:
12969 stat_ino_limit_push += 1;
12970 req_clear_inodedeps += 1;
12971 stat_countp = &stat_ino_limit_hit;
12975 case FLUSH_BLOCKS_WAIT:
12976 stat_blk_limit_push += 1;
12977 req_clear_remove += 1;
12978 stat_countp = &stat_blk_limit_hit;
12982 panic("request_cleanup: unknown type");
12985 * Hopefully the syncer daemon will catch up and awaken us.
12986 * We wait at most tickdelay before proceeding in any case.
12989 if (callout_pending(&softdep_callout) == FALSE)
12990 callout_reset(&softdep_callout, tickdelay > 2 ? tickdelay : 2,
12993 msleep((caddr_t)&proc_waiting, &lk, PPAUSE, "softupdate", 0);
12999 * Awaken processes pausing in request_cleanup and clear proc_waiting
13000 * to indicate that there is no longer a timer running.
13008 * The callout_ API has acquired mtx and will hold it around this
13012 wakeup_one(&proc_waiting);
13013 if (proc_waiting > 0)
13014 callout_reset(&softdep_callout, tickdelay > 2 ? tickdelay : 2,
13019 * Flush out a directory with at least one removal dependency in an effort to
13020 * reduce the number of dirrem, freefile, and freeblks dependency structures.
13026 struct pagedep_hashhead *pagedephd;
13027 struct pagedep *pagedep;
13028 static int next = 0;
13035 mtx_assert(&lk, MA_OWNED);
13037 for (cnt = 0; cnt <= pagedep_hash; cnt++) {
13038 pagedephd = &pagedep_hashtbl[next++];
13039 if (next > pagedep_hash)
13041 LIST_FOREACH(pagedep, pagedephd, pd_hash) {
13042 if (LIST_EMPTY(&pagedep->pd_dirremhd))
13044 mp = pagedep->pd_list.wk_mp;
13045 ino = pagedep->pd_ino;
13046 if (vn_start_write(NULL, &mp, V_NOWAIT) != 0)
13051 * Let unmount clear deps
13053 error = vfs_busy(mp, MBF_NOWAIT);
13056 error = ffs_vgetf(mp, ino, LK_EXCLUSIVE, &vp,
13060 softdep_error("clear_remove: vget", error);
13063 if ((error = ffs_syncvnode(vp, MNT_NOWAIT, 0)))
13064 softdep_error("clear_remove: fsync", error);
13065 bo = &vp->v_bufobj;
13071 vn_finished_write(mp);
13079 * Clear out a block of dirty inodes in an effort to reduce
13080 * the number of inodedep dependency structures.
13083 clear_inodedeps(td)
13086 struct inodedep_hashhead *inodedephd;
13087 struct inodedep *inodedep;
13088 static int next = 0;
13093 ino_t firstino, lastino, ino;
13095 mtx_assert(&lk, MA_OWNED);
13097 * Pick a random inode dependency to be cleared.
13098 * We will then gather up all the inodes in its block
13099 * that have dependencies and flush them out.
13101 for (cnt = 0; cnt <= inodedep_hash; cnt++) {
13102 inodedephd = &inodedep_hashtbl[next++];
13103 if (next > inodedep_hash)
13105 if ((inodedep = LIST_FIRST(inodedephd)) != NULL)
13108 if (inodedep == NULL)
13110 fs = inodedep->id_fs;
13111 mp = inodedep->id_list.wk_mp;
13113 * Find the last inode in the block with dependencies.
13115 firstino = inodedep->id_ino & ~(INOPB(fs) - 1);
13116 for (lastino = firstino + INOPB(fs) - 1; lastino > firstino; lastino--)
13117 if (inodedep_lookup(mp, lastino, 0, &inodedep) != 0)
13120 * Asynchronously push all but the last inode with dependencies.
13121 * Synchronously push the last inode with dependencies to ensure
13122 * that the inode block gets written to free up the inodedeps.
13124 for (ino = firstino; ino <= lastino; ino++) {
13125 if (inodedep_lookup(mp, ino, 0, &inodedep) == 0)
13127 if (vn_start_write(NULL, &mp, V_NOWAIT) != 0)
13130 error = vfs_busy(mp, MBF_NOWAIT); /* Let unmount clear deps */
13132 vn_finished_write(mp);
13136 if ((error = ffs_vgetf(mp, ino, LK_EXCLUSIVE, &vp,
13137 FFSV_FORCEINSMQ)) != 0) {
13138 softdep_error("clear_inodedeps: vget", error);
13140 vn_finished_write(mp);
13145 if (ino == lastino) {
13146 if ((error = ffs_syncvnode(vp, MNT_WAIT, 0)))
13147 softdep_error("clear_inodedeps: fsync1", error);
13149 if ((error = ffs_syncvnode(vp, MNT_NOWAIT, 0)))
13150 softdep_error("clear_inodedeps: fsync2", error);
13151 BO_LOCK(&vp->v_bufobj);
13153 BO_UNLOCK(&vp->v_bufobj);
13156 vn_finished_write(mp);
13162 softdep_buf_append(bp, wkhd)
13164 struct workhead *wkhd;
13166 struct worklist *wk;
13169 while ((wk = LIST_FIRST(wkhd)) != NULL) {
13170 WORKLIST_REMOVE(wk);
13171 WORKLIST_INSERT(&bp->b_dep, wk);
13178 softdep_inode_append(ip, cred, wkhd)
13180 struct ucred *cred;
13181 struct workhead *wkhd;
13188 error = bread(ip->i_devvp, fsbtodb(fs, ino_to_fsba(fs, ip->i_number)),
13189 (int)fs->fs_bsize, cred, &bp);
13192 softdep_freework(wkhd);
13195 softdep_buf_append(bp, wkhd);
13200 softdep_freework(wkhd)
13201 struct workhead *wkhd;
13205 handle_jwork(wkhd);
13210 * Function to determine if the buffer has outstanding dependencies
13211 * that will cause a roll-back if the buffer is written. If wantcount
13212 * is set, return number of dependencies, otherwise just yes or no.
13215 softdep_count_dependencies(bp, wantcount)
13219 struct worklist *wk;
13220 struct bmsafemap *bmsafemap;
13221 struct freework *freework;
13222 struct inodedep *inodedep;
13223 struct indirdep *indirdep;
13224 struct freeblks *freeblks;
13225 struct allocindir *aip;
13226 struct pagedep *pagedep;
13227 struct dirrem *dirrem;
13228 struct newblk *newblk;
13229 struct mkdir *mkdir;
13230 struct diradd *dap;
13235 LIST_FOREACH(wk, &bp->b_dep, wk_list) {
13236 switch (wk->wk_type) {
13239 inodedep = WK_INODEDEP(wk);
13240 if ((inodedep->id_state & DEPCOMPLETE) == 0) {
13241 /* bitmap allocation dependency */
13246 if (TAILQ_FIRST(&inodedep->id_inoupdt)) {
13247 /* direct block pointer dependency */
13252 if (TAILQ_FIRST(&inodedep->id_extupdt)) {
13253 /* direct block pointer dependency */
13258 if (TAILQ_FIRST(&inodedep->id_inoreflst)) {
13259 /* Add reference dependency. */
13267 indirdep = WK_INDIRDEP(wk);
13269 TAILQ_FOREACH(freework, &indirdep->ir_trunc, fw_next) {
13270 /* indirect truncation dependency */
13276 LIST_FOREACH(aip, &indirdep->ir_deplisthd, ai_next) {
13277 /* indirect block pointer dependency */
13285 pagedep = WK_PAGEDEP(wk);
13286 LIST_FOREACH(dirrem, &pagedep->pd_dirremhd, dm_next) {
13287 if (LIST_FIRST(&dirrem->dm_jremrefhd)) {
13288 /* Journal remove ref dependency. */
13294 for (i = 0; i < DAHASHSZ; i++) {
13296 LIST_FOREACH(dap, &pagedep->pd_diraddhd[i], da_pdlist) {
13297 /* directory entry dependency */
13306 bmsafemap = WK_BMSAFEMAP(wk);
13307 if (LIST_FIRST(&bmsafemap->sm_jaddrefhd)) {
13308 /* Add reference dependency. */
13313 if (LIST_FIRST(&bmsafemap->sm_jnewblkhd)) {
13314 /* Allocate block dependency. */
13322 freeblks = WK_FREEBLKS(wk);
13323 if (LIST_FIRST(&freeblks->fb_jblkdephd)) {
13324 /* Freeblk journal dependency. */
13331 case D_ALLOCDIRECT:
13333 newblk = WK_NEWBLK(wk);
13334 if (newblk->nb_jnewblk) {
13335 /* Journal allocate dependency. */
13343 mkdir = WK_MKDIR(wk);
13344 if (mkdir->md_jaddref) {
13345 /* Journal reference dependency. */
13357 /* never a dependency on these blocks */
13361 panic("softdep_count_dependencies: Unexpected type %s",
13362 TYPENAME(wk->wk_type));
13372 * Acquire exclusive access to a buffer.
13373 * Must be called with a locked mtx parameter.
13374 * Return acquired buffer or NULL on failure.
13376 static struct buf *
13377 getdirtybuf(bp, mtx, waitfor)
13384 mtx_assert(mtx, MA_OWNED);
13385 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL) != 0) {
13386 if (waitfor != MNT_WAIT)
13388 error = BUF_LOCK(bp,
13389 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, mtx);
13391 * Even if we sucessfully acquire bp here, we have dropped
13392 * mtx, which may violates our guarantee.
13396 else if (error != ENOLCK)
13397 panic("getdirtybuf: inconsistent lock: %d", error);
13401 if ((bp->b_vflags & BV_BKGRDINPROG) != 0) {
13402 if (mtx == &lk && waitfor == MNT_WAIT) {
13404 BO_LOCK(bp->b_bufobj);
13406 if ((bp->b_vflags & BV_BKGRDINPROG) != 0) {
13407 bp->b_vflags |= BV_BKGRDWAIT;
13408 msleep(&bp->b_xflags, BO_MTX(bp->b_bufobj),
13409 PRIBIO | PDROP, "getbuf", 0);
13411 BO_UNLOCK(bp->b_bufobj);
13416 if (waitfor != MNT_WAIT)
13419 * The mtx argument must be bp->b_vp's mutex in
13422 #ifdef DEBUG_VFS_LOCKS
13423 if (bp->b_vp->v_type != VCHR)
13424 ASSERT_BO_LOCKED(bp->b_bufobj);
13426 bp->b_vflags |= BV_BKGRDWAIT;
13427 msleep(&bp->b_xflags, mtx, PRIBIO, "getbuf", 0);
13430 if ((bp->b_flags & B_DELWRI) == 0) {
13440 * Check if it is safe to suspend the file system now. On entry,
13441 * the vnode interlock for devvp should be held. Return 0 with
13442 * the mount interlock held if the file system can be suspended now,
13443 * otherwise return EAGAIN with the mount interlock held.
13446 softdep_check_suspend(struct mount *mp,
13447 struct vnode *devvp,
13449 int softdep_accdeps,
13450 int secondary_writes,
13451 int secondary_accwrites)
13454 struct ufsmount *ump;
13457 ump = VFSTOUFS(mp);
13458 bo = &devvp->v_bufobj;
13459 ASSERT_BO_LOCKED(bo);
13462 if (!TRY_ACQUIRE_LOCK(&lk)) {
13470 if (mp->mnt_secondary_writes != 0) {
13473 msleep(&mp->mnt_secondary_writes,
13475 (PUSER - 1) | PDROP, "secwr", 0);
13483 * Reasons for needing more work before suspend:
13484 * - Dirty buffers on devvp.
13485 * - Softdep activity occurred after start of vnode sync loop
13486 * - Secondary writes occurred after start of vnode sync loop
13489 if (bo->bo_numoutput > 0 ||
13490 bo->bo_dirty.bv_cnt > 0 ||
13491 softdep_deps != 0 ||
13492 ump->softdep_deps != 0 ||
13493 softdep_accdeps != ump->softdep_accdeps ||
13494 secondary_writes != 0 ||
13495 mp->mnt_secondary_writes != 0 ||
13496 secondary_accwrites != mp->mnt_secondary_accwrites)
13505 * Get the number of dependency structures for the file system, both
13506 * the current number and the total number allocated. These will
13507 * later be used to detect that softdep processing has occurred.
13510 softdep_get_depcounts(struct mount *mp,
13511 int *softdep_depsp,
13512 int *softdep_accdepsp)
13514 struct ufsmount *ump;
13516 ump = VFSTOUFS(mp);
13518 *softdep_depsp = ump->softdep_deps;
13519 *softdep_accdepsp = ump->softdep_accdeps;
13524 * Wait for pending output on a vnode to complete.
13525 * Must be called with vnode lock and interlock locked.
13527 * XXX: Should just be a call to bufobj_wwait().
13535 bo = &vp->v_bufobj;
13536 ASSERT_VOP_LOCKED(vp, "drain_output");
13537 ASSERT_BO_LOCKED(bo);
13539 while (bo->bo_numoutput) {
13540 bo->bo_flag |= BO_WWAIT;
13541 msleep((caddr_t)&bo->bo_numoutput,
13542 BO_MTX(bo), PRIBIO + 1, "drainvp", 0);
13547 * Called whenever a buffer that is being invalidated or reallocated
13548 * contains dependencies. This should only happen if an I/O error has
13549 * occurred. The routine is called with the buffer locked.
13552 softdep_deallocate_dependencies(bp)
13556 if ((bp->b_ioflags & BIO_ERROR) == 0)
13557 panic("softdep_deallocate_dependencies: dangling deps");
13558 softdep_error(bp->b_vp->v_mount->mnt_stat.f_mntonname, bp->b_error);
13559 panic("softdep_deallocate_dependencies: unrecovered I/O error");
13563 * Function to handle asynchronous write errors in the filesystem.
13566 softdep_error(func, error)
13571 /* XXX should do something better! */
13572 printf("%s: got error %d while accessing filesystem\n", func, error);
13578 inodedep_print(struct inodedep *inodedep, int verbose)
13580 db_printf("%p fs %p st %x ino %jd inoblk %jd delta %d nlink %d"
13582 inodedep, inodedep->id_fs, inodedep->id_state,
13583 (intmax_t)inodedep->id_ino,
13584 (intmax_t)fsbtodb(inodedep->id_fs,
13585 ino_to_fsba(inodedep->id_fs, inodedep->id_ino)),
13586 inodedep->id_nlinkdelta, inodedep->id_savednlink,
13587 inodedep->id_savedino1);
13592 db_printf("\tpendinghd %p, bufwait %p, inowait %p, inoreflst %p, "
13594 LIST_FIRST(&inodedep->id_pendinghd),
13595 LIST_FIRST(&inodedep->id_bufwait),
13596 LIST_FIRST(&inodedep->id_inowait),
13597 TAILQ_FIRST(&inodedep->id_inoreflst),
13598 inodedep->id_mkdiradd);
13599 db_printf("\tinoupdt %p, newinoupdt %p, extupdt %p, newextupdt %p\n",
13600 TAILQ_FIRST(&inodedep->id_inoupdt),
13601 TAILQ_FIRST(&inodedep->id_newinoupdt),
13602 TAILQ_FIRST(&inodedep->id_extupdt),
13603 TAILQ_FIRST(&inodedep->id_newextupdt));
13606 DB_SHOW_COMMAND(inodedep, db_show_inodedep)
13609 if (have_addr == 0) {
13610 db_printf("Address required\n");
13613 inodedep_print((struct inodedep*)addr, 1);
13616 DB_SHOW_COMMAND(inodedeps, db_show_inodedeps)
13618 struct inodedep_hashhead *inodedephd;
13619 struct inodedep *inodedep;
13623 fs = have_addr ? (struct fs *)addr : NULL;
13624 for (cnt = 0; cnt < inodedep_hash; cnt++) {
13625 inodedephd = &inodedep_hashtbl[cnt];
13626 LIST_FOREACH(inodedep, inodedephd, id_hash) {
13627 if (fs != NULL && fs != inodedep->id_fs)
13629 inodedep_print(inodedep, 0);
13634 DB_SHOW_COMMAND(worklist, db_show_worklist)
13636 struct worklist *wk;
13638 if (have_addr == 0) {
13639 db_printf("Address required\n");
13642 wk = (struct worklist *)addr;
13643 printf("worklist: %p type %s state 0x%X\n",
13644 wk, TYPENAME(wk->wk_type), wk->wk_state);
13647 DB_SHOW_COMMAND(workhead, db_show_workhead)
13649 struct workhead *wkhd;
13650 struct worklist *wk;
13653 if (have_addr == 0) {
13654 db_printf("Address required\n");
13657 wkhd = (struct workhead *)addr;
13658 wk = LIST_FIRST(wkhd);
13659 for (i = 0; i < 100 && wk != NULL; i++, wk = LIST_NEXT(wk, wk_list))
13660 db_printf("worklist: %p type %s state 0x%X",
13661 wk, TYPENAME(wk->wk_type), wk->wk_state);
13663 db_printf("workhead overflow");
13668 DB_SHOW_COMMAND(mkdirs, db_show_mkdirs)
13670 struct jaddref *jaddref;
13671 struct diradd *diradd;
13672 struct mkdir *mkdir;
13674 LIST_FOREACH(mkdir, &mkdirlisthd, md_mkdirs) {
13675 diradd = mkdir->md_diradd;
13676 db_printf("mkdir: %p state 0x%X dap %p state 0x%X",
13677 mkdir, mkdir->md_state, diradd, diradd->da_state);
13678 if ((jaddref = mkdir->md_jaddref) != NULL)
13679 db_printf(" jaddref %p jaddref state 0x%X",
13680 jaddref, jaddref->ja_state);
13687 #endif /* SOFTUPDATES */