2 * SPDX-License-Identifier: BSD-3-Clause
4 * Copyright (c) 1989, 1993, 1995
5 * The Regents of the University of California. All rights reserved.
7 * This code is derived from software contributed to Berkeley by
8 * Poul-Henning Kamp of the FreeBSD Project.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * @(#)vfs_cache.c 8.5 (Berkeley) 3/22/95
37 #include <sys/cdefs.h>
38 __FBSDID("$FreeBSD$");
41 #include "opt_ktrace.h"
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/counter.h>
46 #include <sys/filedesc.h>
47 #include <sys/fnv_hash.h>
48 #include <sys/kernel.h>
51 #include <sys/malloc.h>
52 #include <sys/fcntl.h>
53 #include <sys/mount.h>
54 #include <sys/namei.h>
56 #include <sys/rwlock.h>
59 #include <sys/syscallsubr.h>
60 #include <sys/sysctl.h>
61 #include <sys/sysproto.h>
62 #include <sys/vnode.h>
64 #include <sys/ktrace.h>
73 SDT_PROVIDER_DECLARE(vfs);
74 SDT_PROBE_DEFINE3(vfs, namecache, enter, done, "struct vnode *", "char *",
76 SDT_PROBE_DEFINE2(vfs, namecache, enter_negative, done, "struct vnode *",
78 SDT_PROBE_DEFINE1(vfs, namecache, fullpath, entry, "struct vnode *");
79 SDT_PROBE_DEFINE3(vfs, namecache, fullpath, hit, "struct vnode *",
80 "char *", "struct vnode *");
81 SDT_PROBE_DEFINE1(vfs, namecache, fullpath, miss, "struct vnode *");
82 SDT_PROBE_DEFINE3(vfs, namecache, fullpath, return, "int",
83 "struct vnode *", "char *");
84 SDT_PROBE_DEFINE3(vfs, namecache, lookup, hit, "struct vnode *", "char *",
86 SDT_PROBE_DEFINE2(vfs, namecache, lookup, hit__negative,
87 "struct vnode *", "char *");
88 SDT_PROBE_DEFINE2(vfs, namecache, lookup, miss, "struct vnode *",
90 SDT_PROBE_DEFINE1(vfs, namecache, purge, done, "struct vnode *");
91 SDT_PROBE_DEFINE1(vfs, namecache, purge_negative, done, "struct vnode *");
92 SDT_PROBE_DEFINE1(vfs, namecache, purgevfs, done, "struct mount *");
93 SDT_PROBE_DEFINE3(vfs, namecache, zap, done, "struct vnode *", "char *",
95 SDT_PROBE_DEFINE3(vfs, namecache, zap_negative, done, "struct vnode *",
97 SDT_PROBE_DEFINE3(vfs, namecache, shrink_negative, done, "struct vnode *",
101 * This structure describes the elements in the cache of recent
102 * names looked up by namei.
106 LIST_ENTRY(namecache) nc_hash; /* hash chain */
107 LIST_ENTRY(namecache) nc_src; /* source vnode list */
108 TAILQ_ENTRY(namecache) nc_dst; /* destination vnode list */
109 struct vnode *nc_dvp; /* vnode of parent of name */
111 struct vnode *nu_vp; /* vnode the name refers to */
112 u_int nu_neghits; /* negative entry hits */
114 u_char nc_flag; /* flag bits */
115 u_char nc_nlen; /* length of name */
116 char nc_name[0]; /* segment name + nul */
120 * struct namecache_ts repeats struct namecache layout up to the
122 * struct namecache_ts is used in place of struct namecache when time(s) need
123 * to be stored. The nc_dotdottime field is used when a cache entry is mapping
124 * both a non-dotdot directory name plus dotdot for the directory's
127 struct namecache_ts {
128 struct timespec nc_time; /* timespec provided by fs */
129 struct timespec nc_dotdottime; /* dotdot timespec provided by fs */
130 int nc_ticks; /* ticks value when entry was added */
131 struct namecache nc_nc;
134 #define nc_vp n_un.nu_vp
135 #define nc_neghits n_un.nu_neghits
138 * Flags in namecache.nc_flag
140 #define NCF_WHITE 0x01
141 #define NCF_ISDOTDOT 0x02
144 #define NCF_DVDROP 0x10
145 #define NCF_NEGATIVE 0x20
146 #define NCF_HOTNEGATIVE 0x40
149 * Name caching works as follows:
151 * Names found by directory scans are retained in a cache
152 * for future reference. It is managed LRU, so frequently
153 * used names will hang around. Cache is indexed by hash value
154 * obtained from (dvp, name) where dvp refers to the directory
157 * If it is a "negative" entry, (i.e. for a name that is known NOT to
158 * exist) the vnode pointer will be NULL.
160 * Upon reaching the last segment of a path, if the reference
161 * is for DELETE, or NOCACHE is set (rewrite), and the
162 * name is located in the cache, it will be dropped.
164 * These locks are used (in the order in which they can be taken):
166 * vnodelock mtx vnode lists and v_cache_dd field protection
167 * bucketlock rwlock for access to given set of hash buckets
168 * neglist mtx negative entry LRU management
170 * Additionally, ncneg_shrink_lock mtx is used to have at most one thread
171 * shrinking the LRU list.
173 * It is legal to take multiple vnodelock and bucketlock locks. The locking
174 * order is lower address first. Both are recursive.
176 * "." lookups are lockless.
178 * ".." and vnode -> name lookups require vnodelock.
180 * name -> vnode lookup requires the relevant bucketlock to be held for reading.
182 * Insertions and removals of entries require involved vnodes and bucketlocks
183 * to be write-locked to prevent other threads from seeing the entry.
185 * Some lookups result in removal of the found entry (e.g. getting rid of a
186 * negative entry with the intent to create a positive one), which poses a
187 * problem when multiple threads reach the state. Similarly, two different
188 * threads can purge two different vnodes and try to remove the same name.
190 * If the already held vnode lock is lower than the second required lock, we
191 * can just take the other lock. However, in the opposite case, this could
192 * deadlock. As such, this is resolved by trylocking and if that fails unlocking
193 * the first node, locking everything in order and revalidating the state.
197 * Structures associated with name caching.
199 #define NCHHASH(hash) \
200 (&nchashtbl[(hash) & nchash])
201 static __read_mostly LIST_HEAD(nchashhead, namecache) *nchashtbl;/* Hash Table */
202 static u_long __read_mostly nchash; /* size of hash table */
203 SYSCTL_ULONG(_debug, OID_AUTO, nchash, CTLFLAG_RD, &nchash, 0,
204 "Size of namecache hash table");
205 static u_long __read_mostly ncnegfactor = 12; /* ratio of negative entries */
206 SYSCTL_ULONG(_vfs, OID_AUTO, ncnegfactor, CTLFLAG_RW, &ncnegfactor, 0,
207 "Ratio of negative namecache entries");
208 static u_long __exclusive_cache_line numneg; /* number of negative entries allocated */
209 SYSCTL_ULONG(_debug, OID_AUTO, numneg, CTLFLAG_RD, &numneg, 0,
210 "Number of negative entries in namecache");
211 static u_long __exclusive_cache_line numcache;/* number of cache entries allocated */
212 SYSCTL_ULONG(_debug, OID_AUTO, numcache, CTLFLAG_RD, &numcache, 0,
213 "Number of namecache entries");
214 static u_long __exclusive_cache_line numcachehv;/* number of cache entries with vnodes held */
215 SYSCTL_ULONG(_debug, OID_AUTO, numcachehv, CTLFLAG_RD, &numcachehv, 0,
216 "Number of namecache entries with vnodes held");
217 u_int __read_mostly ncsizefactor = 2;
218 SYSCTL_UINT(_vfs, OID_AUTO, ncsizefactor, CTLFLAG_RW, &ncsizefactor, 0,
219 "Size factor for namecache");
220 static u_int __read_mostly ncpurgeminvnodes;
221 SYSCTL_UINT(_vfs, OID_AUTO, ncpurgeminvnodes, CTLFLAG_RW, &ncpurgeminvnodes, 0,
222 "Number of vnodes below which purgevfs ignores the request");
223 static u_int __read_mostly ncneghitsrequeue = 8;
224 SYSCTL_UINT(_vfs, OID_AUTO, ncneghitsrequeue, CTLFLAG_RW, &ncneghitsrequeue, 0,
225 "Number of hits to requeue a negative entry in the LRU list");
227 struct nchstats nchstats; /* cache effectiveness statistics */
229 static struct mtx ncneg_shrink_lock;
230 static int shrink_list_turn;
234 TAILQ_HEAD(, namecache) nl_list;
235 } __aligned(CACHE_LINE_SIZE);
237 static struct neglist __read_mostly *neglists;
238 static struct neglist ncneg_hot;
240 #define numneglists (ncneghash + 1)
241 static u_int __read_mostly ncneghash;
242 static inline struct neglist *
243 NCP2NEGLIST(struct namecache *ncp)
246 return (&neglists[(((uintptr_t)(ncp) >> 8) & ncneghash)]);
249 #define numbucketlocks (ncbuckethash + 1)
250 static u_int __read_mostly ncbuckethash;
251 static struct rwlock_padalign __read_mostly *bucketlocks;
252 #define HASH2BUCKETLOCK(hash) \
253 ((struct rwlock *)(&bucketlocks[((hash) & ncbuckethash)]))
255 #define numvnodelocks (ncvnodehash + 1)
256 static u_int __read_mostly ncvnodehash;
257 static struct mtx __read_mostly *vnodelocks;
258 static inline struct mtx *
259 VP2VNODELOCK(struct vnode *vp)
262 return (&vnodelocks[(((uintptr_t)(vp) >> 8) & ncvnodehash)]);
266 * UMA zones for the VFS cache.
268 * The small cache is used for entries with short names, which are the
269 * most common. The large cache is used for entries which are too big to
270 * fit in the small cache.
272 static uma_zone_t __read_mostly cache_zone_small;
273 static uma_zone_t __read_mostly cache_zone_small_ts;
274 static uma_zone_t __read_mostly cache_zone_large;
275 static uma_zone_t __read_mostly cache_zone_large_ts;
277 #define CACHE_PATH_CUTOFF 35
279 static struct namecache *
280 cache_alloc(int len, int ts)
282 struct namecache_ts *ncp_ts;
283 struct namecache *ncp;
285 if (__predict_false(ts)) {
286 if (len <= CACHE_PATH_CUTOFF)
287 ncp_ts = uma_zalloc(cache_zone_small_ts, M_WAITOK);
289 ncp_ts = uma_zalloc(cache_zone_large_ts, M_WAITOK);
290 ncp = &ncp_ts->nc_nc;
292 if (len <= CACHE_PATH_CUTOFF)
293 ncp = uma_zalloc(cache_zone_small, M_WAITOK);
295 ncp = uma_zalloc(cache_zone_large, M_WAITOK);
301 cache_free(struct namecache *ncp)
303 struct namecache_ts *ncp_ts;
307 if ((ncp->nc_flag & NCF_DVDROP) != 0)
309 if (__predict_false(ncp->nc_flag & NCF_TS)) {
310 ncp_ts = __containerof(ncp, struct namecache_ts, nc_nc);
311 if (ncp->nc_nlen <= CACHE_PATH_CUTOFF)
312 uma_zfree(cache_zone_small_ts, ncp_ts);
314 uma_zfree(cache_zone_large_ts, ncp_ts);
316 if (ncp->nc_nlen <= CACHE_PATH_CUTOFF)
317 uma_zfree(cache_zone_small, ncp);
319 uma_zfree(cache_zone_large, ncp);
324 cache_out_ts(struct namecache *ncp, struct timespec *tsp, int *ticksp)
326 struct namecache_ts *ncp_ts;
328 KASSERT((ncp->nc_flag & NCF_TS) != 0 ||
329 (tsp == NULL && ticksp == NULL),
332 if (tsp == NULL && ticksp == NULL)
335 ncp_ts = __containerof(ncp, struct namecache_ts, nc_nc);
337 *tsp = ncp_ts->nc_time;
339 *ticksp = ncp_ts->nc_ticks;
342 static int __read_mostly doingcache = 1; /* 1 => enable the cache */
343 SYSCTL_INT(_debug, OID_AUTO, vfscache, CTLFLAG_RW, &doingcache, 0,
344 "VFS namecache enabled");
346 /* Export size information to userland */
347 SYSCTL_INT(_debug_sizeof, OID_AUTO, namecache, CTLFLAG_RD, SYSCTL_NULL_INT_PTR,
348 sizeof(struct namecache), "sizeof(struct namecache)");
351 * The new name cache statistics
353 static SYSCTL_NODE(_vfs, OID_AUTO, cache, CTLFLAG_RW, 0,
354 "Name cache statistics");
355 #define STATNODE_ULONG(name, descr) \
356 SYSCTL_ULONG(_vfs_cache, OID_AUTO, name, CTLFLAG_RD, &name, 0, descr);
357 #define STATNODE_COUNTER(name, descr) \
358 static counter_u64_t __read_mostly name; \
359 SYSCTL_COUNTER_U64(_vfs_cache, OID_AUTO, name, CTLFLAG_RD, &name, descr);
360 STATNODE_ULONG(numneg, "Number of negative cache entries");
361 STATNODE_ULONG(numcache, "Number of cache entries");
362 STATNODE_COUNTER(numcalls, "Number of cache lookups");
363 STATNODE_COUNTER(dothits, "Number of '.' hits");
364 STATNODE_COUNTER(dotdothits, "Number of '..' hits");
365 STATNODE_COUNTER(numchecks, "Number of checks in lookup");
366 STATNODE_COUNTER(nummiss, "Number of cache misses");
367 STATNODE_COUNTER(nummisszap, "Number of cache misses we do not want to cache");
368 STATNODE_COUNTER(numposzaps,
369 "Number of cache hits (positive) we do not want to cache");
370 STATNODE_COUNTER(numposhits, "Number of cache hits (positive)");
371 STATNODE_COUNTER(numnegzaps,
372 "Number of cache hits (negative) we do not want to cache");
373 STATNODE_COUNTER(numneghits, "Number of cache hits (negative)");
374 /* These count for kern___getcwd(), too. */
375 STATNODE_COUNTER(numfullpathcalls, "Number of fullpath search calls");
376 STATNODE_COUNTER(numfullpathfail1, "Number of fullpath search errors (ENOTDIR)");
377 STATNODE_COUNTER(numfullpathfail2,
378 "Number of fullpath search errors (VOP_VPTOCNP failures)");
379 STATNODE_COUNTER(numfullpathfail4, "Number of fullpath search errors (ENOMEM)");
380 STATNODE_COUNTER(numfullpathfound, "Number of successful fullpath calls");
381 static long zap_and_exit_bucket_fail; STATNODE_ULONG(zap_and_exit_bucket_fail,
382 "Number of times zap_and_exit failed to lock");
383 static long cache_lock_vnodes_cel_3_failures;
384 STATNODE_ULONG(cache_lock_vnodes_cel_3_failures,
385 "Number of times 3-way vnode locking failed");
387 static void cache_zap_locked(struct namecache *ncp, bool neg_locked);
388 static int vn_fullpath1(struct thread *td, struct vnode *vp, struct vnode *rdir,
389 char *buf, char **retbuf, u_int buflen);
391 static MALLOC_DEFINE(M_VFSCACHE, "vfscache", "VFS name cache entries");
393 static int cache_yield;
394 SYSCTL_INT(_vfs_cache, OID_AUTO, yield, CTLFLAG_RD, &cache_yield, 0,
395 "Number of times cache called yield");
398 cache_maybe_yield(void)
401 if (should_yield()) {
403 kern_yield(PRI_USER);
408 cache_assert_vlp_locked(struct mtx *vlp)
412 mtx_assert(vlp, MA_OWNED);
416 cache_assert_vnode_locked(struct vnode *vp)
420 vlp = VP2VNODELOCK(vp);
421 cache_assert_vlp_locked(vlp);
425 cache_get_hash(char *name, u_char len, struct vnode *dvp)
429 hash = fnv_32_buf(name, len, FNV1_32_INIT);
430 hash = fnv_32_buf(&dvp, sizeof(dvp), hash);
434 static inline struct rwlock *
435 NCP2BUCKETLOCK(struct namecache *ncp)
439 hash = cache_get_hash(ncp->nc_name, ncp->nc_nlen, ncp->nc_dvp);
440 return (HASH2BUCKETLOCK(hash));
445 cache_assert_bucket_locked(struct namecache *ncp, int mode)
449 blp = NCP2BUCKETLOCK(ncp);
450 rw_assert(blp, mode);
453 #define cache_assert_bucket_locked(x, y) do { } while (0)
456 #define cache_sort(x, y) _cache_sort((void **)(x), (void **)(y))
458 _cache_sort(void **p1, void **p2)
470 cache_lock_all_buckets(void)
474 for (i = 0; i < numbucketlocks; i++)
475 rw_wlock(&bucketlocks[i]);
479 cache_unlock_all_buckets(void)
483 for (i = 0; i < numbucketlocks; i++)
484 rw_wunlock(&bucketlocks[i]);
488 cache_lock_all_vnodes(void)
492 for (i = 0; i < numvnodelocks; i++)
493 mtx_lock(&vnodelocks[i]);
497 cache_unlock_all_vnodes(void)
501 for (i = 0; i < numvnodelocks; i++)
502 mtx_unlock(&vnodelocks[i]);
506 cache_trylock_vnodes(struct mtx *vlp1, struct mtx *vlp2)
509 cache_sort(&vlp1, &vlp2);
513 if (!mtx_trylock(vlp1))
516 if (!mtx_trylock(vlp2)) {
526 cache_unlock_vnodes(struct mtx *vlp1, struct mtx *vlp2)
529 MPASS(vlp1 != NULL || vlp2 != NULL);
538 sysctl_nchstats(SYSCTL_HANDLER_ARGS)
540 struct nchstats snap;
542 if (req->oldptr == NULL)
543 return (SYSCTL_OUT(req, 0, sizeof(snap)));
546 snap.ncs_goodhits = counter_u64_fetch(numposhits);
547 snap.ncs_neghits = counter_u64_fetch(numneghits);
548 snap.ncs_badhits = counter_u64_fetch(numposzaps) +
549 counter_u64_fetch(numnegzaps);
550 snap.ncs_miss = counter_u64_fetch(nummisszap) +
551 counter_u64_fetch(nummiss);
553 return (SYSCTL_OUT(req, &snap, sizeof(snap)));
555 SYSCTL_PROC(_vfs_cache, OID_AUTO, nchstats, CTLTYPE_OPAQUE | CTLFLAG_RD |
556 CTLFLAG_MPSAFE, 0, 0, sysctl_nchstats, "LU",
557 "VFS cache effectiveness statistics");
561 * Grab an atomic snapshot of the name cache hash chain lengths
563 static SYSCTL_NODE(_debug, OID_AUTO, hashstat, CTLFLAG_RW, NULL,
567 sysctl_debug_hashstat_rawnchash(SYSCTL_HANDLER_ARGS)
569 struct nchashhead *ncpp;
570 struct namecache *ncp;
571 int i, error, n_nchash, *cntbuf;
574 n_nchash = nchash + 1; /* nchash is max index, not count */
575 if (req->oldptr == NULL)
576 return SYSCTL_OUT(req, 0, n_nchash * sizeof(int));
577 cntbuf = malloc(n_nchash * sizeof(int), M_TEMP, M_ZERO | M_WAITOK);
578 cache_lock_all_buckets();
579 if (n_nchash != nchash + 1) {
580 cache_unlock_all_buckets();
581 free(cntbuf, M_TEMP);
584 /* Scan hash tables counting entries */
585 for (ncpp = nchashtbl, i = 0; i < n_nchash; ncpp++, i++)
586 LIST_FOREACH(ncp, ncpp, nc_hash)
588 cache_unlock_all_buckets();
589 for (error = 0, i = 0; i < n_nchash; i++)
590 if ((error = SYSCTL_OUT(req, &cntbuf[i], sizeof(int))) != 0)
592 free(cntbuf, M_TEMP);
595 SYSCTL_PROC(_debug_hashstat, OID_AUTO, rawnchash, CTLTYPE_INT|CTLFLAG_RD|
596 CTLFLAG_MPSAFE, 0, 0, sysctl_debug_hashstat_rawnchash, "S,int",
597 "nchash chain lengths");
600 sysctl_debug_hashstat_nchash(SYSCTL_HANDLER_ARGS)
603 struct nchashhead *ncpp;
604 struct namecache *ncp;
606 int count, maxlength, used, pct;
609 return SYSCTL_OUT(req, 0, 4 * sizeof(int));
611 cache_lock_all_buckets();
612 n_nchash = nchash + 1; /* nchash is max index, not count */
616 /* Scan hash tables for applicable entries */
617 for (ncpp = nchashtbl; n_nchash > 0; n_nchash--, ncpp++) {
619 LIST_FOREACH(ncp, ncpp, nc_hash) {
624 if (maxlength < count)
627 n_nchash = nchash + 1;
628 cache_unlock_all_buckets();
629 pct = (used * 100) / (n_nchash / 100);
630 error = SYSCTL_OUT(req, &n_nchash, sizeof(n_nchash));
633 error = SYSCTL_OUT(req, &used, sizeof(used));
636 error = SYSCTL_OUT(req, &maxlength, sizeof(maxlength));
639 error = SYSCTL_OUT(req, &pct, sizeof(pct));
644 SYSCTL_PROC(_debug_hashstat, OID_AUTO, nchash, CTLTYPE_INT|CTLFLAG_RD|
645 CTLFLAG_MPSAFE, 0, 0, sysctl_debug_hashstat_nchash, "I",
646 "nchash statistics (number of total/used buckets, maximum chain length, usage percentage)");
650 * Negative entries management
652 * A variation of LRU scheme is used. New entries are hashed into one of
653 * numneglists cold lists. Entries get promoted to the hot list on first hit.
654 * Partial LRU for the hot list is maintained by requeueing them every
655 * ncneghitsrequeue hits.
657 * The shrinker will demote hot list head and evict from the cold list in a
658 * round-robin manner.
661 cache_negative_hit(struct namecache *ncp)
663 struct neglist *neglist;
666 MPASS(ncp->nc_flag & NCF_NEGATIVE);
667 hits = atomic_fetchadd_int(&ncp->nc_neghits, 1);
668 if (ncp->nc_flag & NCF_HOTNEGATIVE) {
669 if ((hits % ncneghitsrequeue) != 0)
671 mtx_lock(&ncneg_hot.nl_lock);
672 if (ncp->nc_flag & NCF_HOTNEGATIVE) {
673 TAILQ_REMOVE(&ncneg_hot.nl_list, ncp, nc_dst);
674 TAILQ_INSERT_TAIL(&ncneg_hot.nl_list, ncp, nc_dst);
675 mtx_unlock(&ncneg_hot.nl_lock);
679 * The shrinker cleared the flag and removed the entry from
680 * the hot list. Put it back.
683 mtx_lock(&ncneg_hot.nl_lock);
685 neglist = NCP2NEGLIST(ncp);
686 mtx_lock(&neglist->nl_lock);
687 if (!(ncp->nc_flag & NCF_HOTNEGATIVE)) {
688 TAILQ_REMOVE(&neglist->nl_list, ncp, nc_dst);
689 TAILQ_INSERT_TAIL(&ncneg_hot.nl_list, ncp, nc_dst);
690 ncp->nc_flag |= NCF_HOTNEGATIVE;
692 mtx_unlock(&neglist->nl_lock);
693 mtx_unlock(&ncneg_hot.nl_lock);
697 cache_negative_insert(struct namecache *ncp, bool neg_locked)
699 struct neglist *neglist;
701 MPASS(ncp->nc_flag & NCF_NEGATIVE);
702 cache_assert_bucket_locked(ncp, RA_WLOCKED);
703 neglist = NCP2NEGLIST(ncp);
705 mtx_lock(&neglist->nl_lock);
707 mtx_assert(&neglist->nl_lock, MA_OWNED);
709 TAILQ_INSERT_TAIL(&neglist->nl_list, ncp, nc_dst);
711 mtx_unlock(&neglist->nl_lock);
712 atomic_add_rel_long(&numneg, 1);
716 cache_negative_remove(struct namecache *ncp, bool neg_locked)
718 struct neglist *neglist;
719 bool hot_locked = false;
720 bool list_locked = false;
722 MPASS(ncp->nc_flag & NCF_NEGATIVE);
723 cache_assert_bucket_locked(ncp, RA_WLOCKED);
724 neglist = NCP2NEGLIST(ncp);
726 if (ncp->nc_flag & NCF_HOTNEGATIVE) {
728 mtx_lock(&ncneg_hot.nl_lock);
729 if (!(ncp->nc_flag & NCF_HOTNEGATIVE)) {
731 mtx_lock(&neglist->nl_lock);
735 mtx_lock(&neglist->nl_lock);
738 if (ncp->nc_flag & NCF_HOTNEGATIVE) {
739 mtx_assert(&ncneg_hot.nl_lock, MA_OWNED);
740 TAILQ_REMOVE(&ncneg_hot.nl_list, ncp, nc_dst);
742 mtx_assert(&neglist->nl_lock, MA_OWNED);
743 TAILQ_REMOVE(&neglist->nl_list, ncp, nc_dst);
746 mtx_unlock(&neglist->nl_lock);
748 mtx_unlock(&ncneg_hot.nl_lock);
749 atomic_subtract_rel_long(&numneg, 1);
753 cache_negative_shrink_select(int start, struct namecache **ncpp,
754 struct neglist **neglistpp)
756 struct neglist *neglist;
757 struct namecache *ncp;
763 for (i = start; i < numneglists; i++) {
764 neglist = &neglists[i];
765 if (TAILQ_FIRST(&neglist->nl_list) == NULL)
767 mtx_lock(&neglist->nl_lock);
768 ncp = TAILQ_FIRST(&neglist->nl_list);
771 mtx_unlock(&neglist->nl_lock);
774 *neglistpp = neglist;
779 cache_negative_zap_one(void)
781 struct namecache *ncp, *ncp2;
782 struct neglist *neglist;
786 if (!mtx_trylock(&ncneg_shrink_lock))
789 mtx_lock(&ncneg_hot.nl_lock);
790 ncp = TAILQ_FIRST(&ncneg_hot.nl_list);
792 neglist = NCP2NEGLIST(ncp);
793 mtx_lock(&neglist->nl_lock);
794 TAILQ_REMOVE(&ncneg_hot.nl_list, ncp, nc_dst);
795 TAILQ_INSERT_TAIL(&neglist->nl_list, ncp, nc_dst);
796 ncp->nc_flag &= ~NCF_HOTNEGATIVE;
797 mtx_unlock(&neglist->nl_lock);
800 cache_negative_shrink_select(shrink_list_turn, &ncp, &neglist);
802 if (shrink_list_turn == numneglists)
803 shrink_list_turn = 0;
804 if (ncp == NULL && shrink_list_turn == 0)
805 cache_negative_shrink_select(shrink_list_turn, &ncp, &neglist);
807 mtx_unlock(&ncneg_hot.nl_lock);
811 MPASS(ncp->nc_flag & NCF_NEGATIVE);
812 dvlp = VP2VNODELOCK(ncp->nc_dvp);
813 blp = NCP2BUCKETLOCK(ncp);
814 mtx_unlock(&neglist->nl_lock);
815 mtx_unlock(&ncneg_hot.nl_lock);
818 mtx_lock(&neglist->nl_lock);
819 ncp2 = TAILQ_FIRST(&neglist->nl_list);
820 if (ncp != ncp2 || dvlp != VP2VNODELOCK(ncp2->nc_dvp) ||
821 blp != NCP2BUCKETLOCK(ncp2) || !(ncp2->nc_flag & NCF_NEGATIVE)) {
825 SDT_PROBE3(vfs, namecache, shrink_negative, done, ncp->nc_dvp,
826 ncp->nc_name, ncp->nc_neghits);
828 cache_zap_locked(ncp, true);
830 mtx_unlock(&neglist->nl_lock);
834 mtx_unlock(&ncneg_shrink_lock);
839 * cache_zap_locked():
841 * Removes a namecache entry from cache, whether it contains an actual
842 * pointer to a vnode or if it is just a negative cache entry.
845 cache_zap_locked(struct namecache *ncp, bool neg_locked)
848 if (!(ncp->nc_flag & NCF_NEGATIVE))
849 cache_assert_vnode_locked(ncp->nc_vp);
850 cache_assert_vnode_locked(ncp->nc_dvp);
851 cache_assert_bucket_locked(ncp, RA_WLOCKED);
853 CTR2(KTR_VFS, "cache_zap(%p) vp %p", ncp,
854 (ncp->nc_flag & NCF_NEGATIVE) ? NULL : ncp->nc_vp);
855 LIST_REMOVE(ncp, nc_hash);
856 if (!(ncp->nc_flag & NCF_NEGATIVE)) {
857 SDT_PROBE3(vfs, namecache, zap, done, ncp->nc_dvp,
858 ncp->nc_name, ncp->nc_vp);
859 TAILQ_REMOVE(&ncp->nc_vp->v_cache_dst, ncp, nc_dst);
860 if (ncp == ncp->nc_vp->v_cache_dd)
861 ncp->nc_vp->v_cache_dd = NULL;
863 SDT_PROBE3(vfs, namecache, zap_negative, done, ncp->nc_dvp,
864 ncp->nc_name, ncp->nc_neghits);
865 cache_negative_remove(ncp, neg_locked);
867 if (ncp->nc_flag & NCF_ISDOTDOT) {
868 if (ncp == ncp->nc_dvp->v_cache_dd)
869 ncp->nc_dvp->v_cache_dd = NULL;
871 LIST_REMOVE(ncp, nc_src);
872 if (LIST_EMPTY(&ncp->nc_dvp->v_cache_src)) {
873 ncp->nc_flag |= NCF_DVDROP;
874 atomic_subtract_rel_long(&numcachehv, 1);
877 atomic_subtract_rel_long(&numcache, 1);
881 cache_zap_negative_locked_vnode_kl(struct namecache *ncp, struct vnode *vp)
885 MPASS(ncp->nc_dvp == vp);
886 MPASS(ncp->nc_flag & NCF_NEGATIVE);
887 cache_assert_vnode_locked(vp);
889 blp = NCP2BUCKETLOCK(ncp);
891 cache_zap_locked(ncp, false);
896 cache_zap_locked_vnode_kl2(struct namecache *ncp, struct vnode *vp,
899 struct mtx *pvlp, *vlp1, *vlp2, *to_unlock;
902 MPASS(vp == ncp->nc_dvp || vp == ncp->nc_vp);
903 cache_assert_vnode_locked(vp);
905 if (ncp->nc_flag & NCF_NEGATIVE) {
910 cache_zap_negative_locked_vnode_kl(ncp, vp);
914 pvlp = VP2VNODELOCK(vp);
915 blp = NCP2BUCKETLOCK(ncp);
916 vlp1 = VP2VNODELOCK(ncp->nc_dvp);
917 vlp2 = VP2VNODELOCK(ncp->nc_vp);
919 if (*vlpp == vlp1 || *vlpp == vlp2) {
927 cache_sort(&vlp1, &vlp2);
932 if (!mtx_trylock(vlp1))
938 cache_zap_locked(ncp, false);
940 if (to_unlock != NULL)
941 mtx_unlock(to_unlock);
948 MPASS(*vlpp == NULL);
954 cache_zap_locked_vnode(struct namecache *ncp, struct vnode *vp)
956 struct mtx *pvlp, *vlp1, *vlp2, *to_unlock;
960 MPASS(vp == ncp->nc_dvp || vp == ncp->nc_vp);
961 cache_assert_vnode_locked(vp);
963 pvlp = VP2VNODELOCK(vp);
964 if (ncp->nc_flag & NCF_NEGATIVE) {
965 cache_zap_negative_locked_vnode_kl(ncp, vp);
969 blp = NCP2BUCKETLOCK(ncp);
970 vlp1 = VP2VNODELOCK(ncp->nc_dvp);
971 vlp2 = VP2VNODELOCK(ncp->nc_vp);
972 cache_sort(&vlp1, &vlp2);
977 if (!mtx_trylock(vlp1)) {
984 cache_zap_locked(ncp, false);
986 mtx_unlock(to_unlock);
993 cache_zap_wlocked_bucket(struct namecache *ncp, struct rwlock *blp)
995 struct mtx *dvlp, *vlp;
997 cache_assert_bucket_locked(ncp, RA_WLOCKED);
999 dvlp = VP2VNODELOCK(ncp->nc_dvp);
1001 if (!(ncp->nc_flag & NCF_NEGATIVE))
1002 vlp = VP2VNODELOCK(ncp->nc_vp);
1003 if (cache_trylock_vnodes(dvlp, vlp) == 0) {
1004 cache_zap_locked(ncp, false);
1006 cache_unlock_vnodes(dvlp, vlp);
1015 cache_zap_rlocked_bucket(struct namecache *ncp, struct rwlock *blp)
1017 struct mtx *dvlp, *vlp;
1019 cache_assert_bucket_locked(ncp, RA_RLOCKED);
1021 dvlp = VP2VNODELOCK(ncp->nc_dvp);
1023 if (!(ncp->nc_flag & NCF_NEGATIVE))
1024 vlp = VP2VNODELOCK(ncp->nc_vp);
1025 if (cache_trylock_vnodes(dvlp, vlp) == 0) {
1028 cache_zap_locked(ncp, false);
1030 cache_unlock_vnodes(dvlp, vlp);
1039 cache_zap_wlocked_bucket_kl(struct namecache *ncp, struct rwlock *blp,
1040 struct mtx **vlpp1, struct mtx **vlpp2)
1042 struct mtx *dvlp, *vlp;
1044 cache_assert_bucket_locked(ncp, RA_WLOCKED);
1046 dvlp = VP2VNODELOCK(ncp->nc_dvp);
1048 if (!(ncp->nc_flag & NCF_NEGATIVE))
1049 vlp = VP2VNODELOCK(ncp->nc_vp);
1050 cache_sort(&dvlp, &vlp);
1052 if (*vlpp1 == dvlp && *vlpp2 == vlp) {
1053 cache_zap_locked(ncp, false);
1054 cache_unlock_vnodes(dvlp, vlp);
1067 if (cache_trylock_vnodes(dvlp, vlp) == 0) {
1068 cache_zap_locked(ncp, false);
1069 cache_unlock_vnodes(dvlp, vlp);
1084 cache_lookup_unlock(struct rwlock *blp, struct mtx *vlp)
1094 static int __noinline
1095 cache_lookup_dot(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp,
1096 struct timespec *tsp, int *ticksp)
1101 CTR2(KTR_VFS, "cache_lookup(%p, %s) found via .",
1102 dvp, cnp->cn_nameptr);
1103 counter_u64_add(dothits, 1);
1104 SDT_PROBE3(vfs, namecache, lookup, hit, dvp, ".", *vpp);
1111 * When we lookup "." we still can be asked to lock it
1114 ltype = cnp->cn_lkflags & LK_TYPE_MASK;
1115 if (ltype != VOP_ISLOCKED(*vpp)) {
1116 if (ltype == LK_EXCLUSIVE) {
1117 vn_lock(*vpp, LK_UPGRADE | LK_RETRY);
1118 if ((*vpp)->v_iflag & VI_DOOMED) {
1119 /* forced unmount */
1125 vn_lock(*vpp, LK_DOWNGRADE | LK_RETRY);
1130 static __noinline int
1131 cache_lookup_nomakeentry(struct vnode *dvp, struct vnode **vpp,
1132 struct componentname *cnp, struct timespec *tsp, int *ticksp)
1134 struct namecache *ncp;
1136 struct mtx *dvlp, *dvlp2;
1140 if (cnp->cn_namelen == 2 &&
1141 cnp->cn_nameptr[0] == '.' && cnp->cn_nameptr[1] == '.') {
1142 counter_u64_add(dotdothits, 1);
1143 dvlp = VP2VNODELOCK(dvp);
1147 ncp = dvp->v_cache_dd;
1149 SDT_PROBE3(vfs, namecache, lookup, miss, dvp,
1156 if ((ncp->nc_flag & NCF_ISDOTDOT) != 0) {
1157 if (ncp->nc_dvp != dvp)
1158 panic("dvp %p v_cache_dd %p\n", dvp, ncp);
1159 if (!cache_zap_locked_vnode_kl2(ncp,
1162 MPASS(dvp->v_cache_dd == NULL);
1168 dvp->v_cache_dd = NULL;
1176 hash = cache_get_hash(cnp->cn_nameptr, cnp->cn_namelen, dvp);
1177 blp = HASH2BUCKETLOCK(hash);
1179 if (LIST_EMPTY(NCHHASH(hash)))
1184 LIST_FOREACH(ncp, (NCHHASH(hash)), nc_hash) {
1185 counter_u64_add(numchecks, 1);
1186 if (ncp->nc_dvp == dvp && ncp->nc_nlen == cnp->cn_namelen &&
1187 !bcmp(ncp->nc_name, cnp->cn_nameptr, ncp->nc_nlen))
1191 /* We failed to find an entry */
1197 counter_u64_add(numposzaps, 1);
1199 error = cache_zap_wlocked_bucket(ncp, blp);
1201 zap_and_exit_bucket_fail++;
1202 cache_maybe_yield();
1208 SDT_PROBE3(vfs, namecache, lookup, miss, dvp, cnp->cn_nameptr, NULL);
1209 counter_u64_add(nummisszap, 1);
1214 * Lookup a name in the name cache
1218 * - dvp: Parent directory in which to search.
1219 * - vpp: Return argument. Will contain desired vnode on cache hit.
1220 * - cnp: Parameters of the name search. The most interesting bits of
1221 * the cn_flags field have the following meanings:
1222 * - MAKEENTRY: If clear, free an entry from the cache rather than look
1224 * - ISDOTDOT: Must be set if and only if cn_nameptr == ".."
1225 * - tsp: Return storage for cache timestamp. On a successful (positive
1226 * or negative) lookup, tsp will be filled with any timespec that
1227 * was stored when this cache entry was created. However, it will
1228 * be clear for "." entries.
1229 * - ticks: Return storage for alternate cache timestamp. On a successful
1230 * (positive or negative) lookup, it will contain the ticks value
1231 * that was current when the cache entry was created, unless cnp
1236 * - -1: A positive cache hit. vpp will contain the desired vnode.
1237 * - ENOENT: A negative cache hit, or dvp was recycled out from under us due
1238 * to a forced unmount. vpp will not be modified. If the entry
1239 * is a whiteout, then the ISWHITEOUT flag will be set in
1241 * - 0: A cache miss. vpp will not be modified.
1245 * On a cache hit, vpp will be returned locked and ref'd. If we're looking up
1246 * .., dvp is unlocked. If we're looking up . an extra ref is taken, but the
1247 * lock is not recursively acquired.
1250 cache_lookup(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp,
1251 struct timespec *tsp, int *ticksp)
1253 struct namecache_ts *ncp_ts;
1254 struct namecache *ncp;
1260 if (__predict_false(!doingcache)) {
1261 cnp->cn_flags &= ~MAKEENTRY;
1265 counter_u64_add(numcalls, 1);
1267 if (__predict_false(cnp->cn_namelen == 1 && cnp->cn_nameptr[0] == '.'))
1268 return (cache_lookup_dot(dvp, vpp, cnp, tsp, ticksp));
1270 if ((cnp->cn_flags & MAKEENTRY) == 0)
1271 return (cache_lookup_nomakeentry(dvp, vpp, cnp, tsp, ticksp));
1277 if (cnp->cn_namelen == 2 &&
1278 cnp->cn_nameptr[0] == '.' && cnp->cn_nameptr[1] == '.') {
1279 counter_u64_add(dotdothits, 1);
1280 dvlp = VP2VNODELOCK(dvp);
1282 ncp = dvp->v_cache_dd;
1284 SDT_PROBE3(vfs, namecache, lookup, miss, dvp,
1289 if ((ncp->nc_flag & NCF_ISDOTDOT) != 0) {
1290 if (ncp->nc_flag & NCF_NEGATIVE)
1296 /* Return failure if negative entry was found. */
1298 goto negative_success;
1299 CTR3(KTR_VFS, "cache_lookup(%p, %s) found %p via ..",
1300 dvp, cnp->cn_nameptr, *vpp);
1301 SDT_PROBE3(vfs, namecache, lookup, hit, dvp, "..",
1303 cache_out_ts(ncp, tsp, ticksp);
1304 if ((ncp->nc_flag & (NCF_ISDOTDOT | NCF_DTS)) ==
1305 NCF_DTS && tsp != NULL) {
1306 ncp_ts = __containerof(ncp, struct namecache_ts, nc_nc);
1307 *tsp = ncp_ts->nc_dotdottime;
1312 hash = cache_get_hash(cnp->cn_nameptr, cnp->cn_namelen, dvp);
1313 blp = HASH2BUCKETLOCK(hash);
1316 LIST_FOREACH(ncp, (NCHHASH(hash)), nc_hash) {
1317 counter_u64_add(numchecks, 1);
1318 if (ncp->nc_dvp == dvp && ncp->nc_nlen == cnp->cn_namelen &&
1319 !bcmp(ncp->nc_name, cnp->cn_nameptr, ncp->nc_nlen))
1323 /* We failed to find an entry */
1326 SDT_PROBE3(vfs, namecache, lookup, miss, dvp, cnp->cn_nameptr,
1328 counter_u64_add(nummiss, 1);
1332 /* We found a "positive" match, return the vnode */
1333 if (!(ncp->nc_flag & NCF_NEGATIVE)) {
1334 counter_u64_add(numposhits, 1);
1336 CTR4(KTR_VFS, "cache_lookup(%p, %s) found %p via ncp %p",
1337 dvp, cnp->cn_nameptr, *vpp, ncp);
1338 SDT_PROBE3(vfs, namecache, lookup, hit, dvp, ncp->nc_name,
1340 cache_out_ts(ncp, tsp, ticksp);
1345 /* We found a negative match, and want to create it, so purge */
1346 if (cnp->cn_nameiop == CREATE) {
1347 counter_u64_add(numnegzaps, 1);
1351 counter_u64_add(numneghits, 1);
1352 cache_negative_hit(ncp);
1353 if (ncp->nc_flag & NCF_WHITE)
1354 cnp->cn_flags |= ISWHITEOUT;
1355 SDT_PROBE2(vfs, namecache, lookup, hit__negative, dvp,
1357 cache_out_ts(ncp, tsp, ticksp);
1358 cache_lookup_unlock(blp, dvlp);
1363 * On success we return a locked and ref'd vnode as per the lookup
1367 ltype = 0; /* silence gcc warning */
1368 if (cnp->cn_flags & ISDOTDOT) {
1369 ltype = VOP_ISLOCKED(dvp);
1373 cache_lookup_unlock(blp, dvlp);
1374 error = vget(*vpp, cnp->cn_lkflags | LK_VNHELD, cnp->cn_thread);
1375 if (cnp->cn_flags & ISDOTDOT) {
1376 vn_lock(dvp, ltype | LK_RETRY);
1377 if (dvp->v_iflag & VI_DOOMED) {
1388 if ((cnp->cn_flags & ISLASTCN) &&
1389 (cnp->cn_lkflags & LK_TYPE_MASK) == LK_EXCLUSIVE) {
1390 ASSERT_VOP_ELOCKED(*vpp, "cache_lookup");
1396 error = cache_zap_rlocked_bucket(ncp, blp);
1398 error = cache_zap_locked_vnode(ncp, dvp);
1400 zap_and_exit_bucket_fail++;
1401 cache_maybe_yield();
1408 struct celockstate {
1410 struct rwlock *blp[2];
1412 CTASSERT((nitems(((struct celockstate *)0)->vlp) == 3));
1413 CTASSERT((nitems(((struct celockstate *)0)->blp) == 2));
1416 cache_celockstate_init(struct celockstate *cel)
1419 bzero(cel, sizeof(*cel));
1423 cache_lock_vnodes_cel(struct celockstate *cel, struct vnode *vp,
1426 struct mtx *vlp1, *vlp2;
1428 MPASS(cel->vlp[0] == NULL);
1429 MPASS(cel->vlp[1] == NULL);
1430 MPASS(cel->vlp[2] == NULL);
1432 MPASS(vp != NULL || dvp != NULL);
1434 vlp1 = VP2VNODELOCK(vp);
1435 vlp2 = VP2VNODELOCK(dvp);
1436 cache_sort(&vlp1, &vlp2);
1447 cache_unlock_vnodes_cel(struct celockstate *cel)
1450 MPASS(cel->vlp[0] != NULL || cel->vlp[1] != NULL);
1452 if (cel->vlp[0] != NULL)
1453 mtx_unlock(cel->vlp[0]);
1454 if (cel->vlp[1] != NULL)
1455 mtx_unlock(cel->vlp[1]);
1456 if (cel->vlp[2] != NULL)
1457 mtx_unlock(cel->vlp[2]);
1461 cache_lock_vnodes_cel_3(struct celockstate *cel, struct vnode *vp)
1466 cache_assert_vlp_locked(cel->vlp[0]);
1467 cache_assert_vlp_locked(cel->vlp[1]);
1468 MPASS(cel->vlp[2] == NULL);
1471 vlp = VP2VNODELOCK(vp);
1474 if (vlp >= cel->vlp[1]) {
1477 if (mtx_trylock(vlp))
1479 cache_lock_vnodes_cel_3_failures++;
1480 cache_unlock_vnodes_cel(cel);
1481 if (vlp < cel->vlp[0]) {
1483 mtx_lock(cel->vlp[0]);
1484 mtx_lock(cel->vlp[1]);
1486 if (cel->vlp[0] != NULL)
1487 mtx_lock(cel->vlp[0]);
1489 mtx_lock(cel->vlp[1]);
1499 cache_lock_buckets_cel(struct celockstate *cel, struct rwlock *blp1,
1500 struct rwlock *blp2)
1503 MPASS(cel->blp[0] == NULL);
1504 MPASS(cel->blp[1] == NULL);
1506 cache_sort(&blp1, &blp2);
1517 cache_unlock_buckets_cel(struct celockstate *cel)
1520 if (cel->blp[0] != NULL)
1521 rw_wunlock(cel->blp[0]);
1522 rw_wunlock(cel->blp[1]);
1526 * Lock part of the cache affected by the insertion.
1528 * This means vnodelocks for dvp, vp and the relevant bucketlock.
1529 * However, insertion can result in removal of an old entry. In this
1530 * case we have an additional vnode and bucketlock pair to lock. If the
1531 * entry is negative, ncelock is locked instead of the vnode.
1533 * That is, in the worst case we have to lock 3 vnodes and 2 bucketlocks, while
1534 * preserving the locking order (smaller address first).
1537 cache_enter_lock(struct celockstate *cel, struct vnode *dvp, struct vnode *vp,
1540 struct namecache *ncp;
1541 struct rwlock *blps[2];
1543 blps[0] = HASH2BUCKETLOCK(hash);
1546 cache_lock_vnodes_cel(cel, dvp, vp);
1547 if (vp == NULL || vp->v_type != VDIR)
1549 ncp = vp->v_cache_dd;
1552 if ((ncp->nc_flag & NCF_ISDOTDOT) == 0)
1554 MPASS(ncp->nc_dvp == vp);
1555 blps[1] = NCP2BUCKETLOCK(ncp);
1556 if (ncp->nc_flag & NCF_NEGATIVE)
1558 if (cache_lock_vnodes_cel_3(cel, ncp->nc_vp))
1561 * All vnodes got re-locked. Re-validate the state and if
1562 * nothing changed we are done. Otherwise restart.
1564 if (ncp == vp->v_cache_dd &&
1565 (ncp->nc_flag & NCF_ISDOTDOT) != 0 &&
1566 blps[1] == NCP2BUCKETLOCK(ncp) &&
1567 VP2VNODELOCK(ncp->nc_vp) == cel->vlp[2])
1569 cache_unlock_vnodes_cel(cel);
1574 cache_lock_buckets_cel(cel, blps[0], blps[1]);
1578 cache_enter_lock_dd(struct celockstate *cel, struct vnode *dvp, struct vnode *vp,
1581 struct namecache *ncp;
1582 struct rwlock *blps[2];
1584 blps[0] = HASH2BUCKETLOCK(hash);
1587 cache_lock_vnodes_cel(cel, dvp, vp);
1588 ncp = dvp->v_cache_dd;
1591 if ((ncp->nc_flag & NCF_ISDOTDOT) == 0)
1593 MPASS(ncp->nc_dvp == dvp);
1594 blps[1] = NCP2BUCKETLOCK(ncp);
1595 if (ncp->nc_flag & NCF_NEGATIVE)
1597 if (cache_lock_vnodes_cel_3(cel, ncp->nc_vp))
1599 if (ncp == dvp->v_cache_dd &&
1600 (ncp->nc_flag & NCF_ISDOTDOT) != 0 &&
1601 blps[1] == NCP2BUCKETLOCK(ncp) &&
1602 VP2VNODELOCK(ncp->nc_vp) == cel->vlp[2])
1604 cache_unlock_vnodes_cel(cel);
1609 cache_lock_buckets_cel(cel, blps[0], blps[1]);
1613 cache_enter_unlock(struct celockstate *cel)
1616 cache_unlock_buckets_cel(cel);
1617 cache_unlock_vnodes_cel(cel);
1621 * Add an entry to the cache.
1624 cache_enter_time(struct vnode *dvp, struct vnode *vp, struct componentname *cnp,
1625 struct timespec *tsp, struct timespec *dtsp)
1627 struct celockstate cel;
1628 struct namecache *ncp, *n2, *ndd;
1629 struct namecache_ts *ncp_ts, *n2_ts;
1630 struct nchashhead *ncpp;
1631 struct neglist *neglist;
1638 CTR3(KTR_VFS, "cache_enter(%p, %p, %s)", dvp, vp, cnp->cn_nameptr);
1639 VNASSERT(vp == NULL || (vp->v_iflag & VI_DOOMED) == 0, vp,
1640 ("cache_enter: Adding a doomed vnode"));
1641 VNASSERT(dvp == NULL || (dvp->v_iflag & VI_DOOMED) == 0, dvp,
1642 ("cache_enter: Doomed vnode used as src"));
1644 if (__predict_false(!doingcache))
1648 * Avoid blowout in namecache entries.
1650 lnumcache = atomic_fetchadd_long(&numcache, 1) + 1;
1651 if (__predict_false(lnumcache >= desiredvnodes * ncsizefactor)) {
1652 atomic_add_long(&numcache, -1);
1656 cache_celockstate_init(&cel);
1660 if (cnp->cn_nameptr[0] == '.') {
1661 if (cnp->cn_namelen == 1)
1663 if (cnp->cn_namelen == 2 && cnp->cn_nameptr[1] == '.') {
1664 len = cnp->cn_namelen;
1665 hash = cache_get_hash(cnp->cn_nameptr, len, dvp);
1666 cache_enter_lock_dd(&cel, dvp, vp, hash);
1668 * If dotdot entry already exists, just retarget it
1669 * to new parent vnode, otherwise continue with new
1670 * namecache entry allocation.
1672 if ((ncp = dvp->v_cache_dd) != NULL &&
1673 ncp->nc_flag & NCF_ISDOTDOT) {
1674 KASSERT(ncp->nc_dvp == dvp,
1675 ("wrong isdotdot parent"));
1677 if (ncp->nc_flag & NCF_NEGATIVE || vp == NULL) {
1678 neglist = NCP2NEGLIST(ncp);
1679 mtx_lock(&ncneg_hot.nl_lock);
1680 mtx_lock(&neglist->nl_lock);
1683 if (!(ncp->nc_flag & NCF_NEGATIVE)) {
1684 TAILQ_REMOVE(&ncp->nc_vp->v_cache_dst,
1687 cache_negative_remove(ncp, true);
1690 TAILQ_INSERT_HEAD(&vp->v_cache_dst,
1692 ncp->nc_flag &= ~(NCF_NEGATIVE|NCF_HOTNEGATIVE);
1694 ncp->nc_flag &= ~(NCF_HOTNEGATIVE);
1695 ncp->nc_flag |= NCF_NEGATIVE;
1696 cache_negative_insert(ncp, true);
1699 mtx_unlock(&neglist->nl_lock);
1700 mtx_unlock(&ncneg_hot.nl_lock);
1703 cache_enter_unlock(&cel);
1706 dvp->v_cache_dd = NULL;
1707 cache_enter_unlock(&cel);
1708 cache_celockstate_init(&cel);
1709 SDT_PROBE3(vfs, namecache, enter, done, dvp, "..", vp);
1710 flag = NCF_ISDOTDOT;
1715 * Calculate the hash key and setup as much of the new
1716 * namecache entry as possible before acquiring the lock.
1718 ncp = cache_alloc(cnp->cn_namelen, tsp != NULL);
1719 ncp->nc_flag = flag;
1722 ncp->nc_flag |= NCF_NEGATIVE;
1725 ncp_ts = __containerof(ncp, struct namecache_ts, nc_nc);
1726 ncp_ts->nc_time = *tsp;
1727 ncp_ts->nc_ticks = ticks;
1728 ncp_ts->nc_nc.nc_flag |= NCF_TS;
1730 ncp_ts->nc_dotdottime = *dtsp;
1731 ncp_ts->nc_nc.nc_flag |= NCF_DTS;
1734 len = ncp->nc_nlen = cnp->cn_namelen;
1735 hash = cache_get_hash(cnp->cn_nameptr, len, dvp);
1736 strlcpy(ncp->nc_name, cnp->cn_nameptr, len + 1);
1737 cache_enter_lock(&cel, dvp, vp, hash);
1740 * See if this vnode or negative entry is already in the cache
1741 * with this name. This can happen with concurrent lookups of
1742 * the same path name.
1744 ncpp = NCHHASH(hash);
1745 LIST_FOREACH(n2, ncpp, nc_hash) {
1746 if (n2->nc_dvp == dvp &&
1747 n2->nc_nlen == cnp->cn_namelen &&
1748 !bcmp(n2->nc_name, cnp->cn_nameptr, n2->nc_nlen)) {
1750 KASSERT((n2->nc_flag & NCF_TS) != 0,
1752 n2_ts = __containerof(n2, struct namecache_ts, nc_nc);
1753 n2_ts->nc_time = ncp_ts->nc_time;
1754 n2_ts->nc_ticks = ncp_ts->nc_ticks;
1756 n2_ts->nc_dotdottime = ncp_ts->nc_dotdottime;
1757 if (ncp->nc_flag & NCF_NEGATIVE)
1758 mtx_lock(&ncneg_hot.nl_lock);
1759 n2_ts->nc_nc.nc_flag |= NCF_DTS;
1760 if (ncp->nc_flag & NCF_NEGATIVE)
1761 mtx_unlock(&ncneg_hot.nl_lock);
1764 goto out_unlock_free;
1768 if (flag == NCF_ISDOTDOT) {
1770 * See if we are trying to add .. entry, but some other lookup
1771 * has populated v_cache_dd pointer already.
1773 if (dvp->v_cache_dd != NULL)
1774 goto out_unlock_free;
1775 KASSERT(vp == NULL || vp->v_type == VDIR,
1776 ("wrong vnode type %p", vp));
1777 dvp->v_cache_dd = ncp;
1781 if (vp->v_type == VDIR) {
1782 if (flag != NCF_ISDOTDOT) {
1784 * For this case, the cache entry maps both the
1785 * directory name in it and the name ".." for the
1786 * directory's parent.
1788 if ((ndd = vp->v_cache_dd) != NULL) {
1789 if ((ndd->nc_flag & NCF_ISDOTDOT) != 0)
1790 cache_zap_locked(ndd, false);
1794 vp->v_cache_dd = ncp;
1797 vp->v_cache_dd = NULL;
1801 if (flag != NCF_ISDOTDOT) {
1802 if (LIST_EMPTY(&dvp->v_cache_src)) {
1804 atomic_add_rel_long(&numcachehv, 1);
1806 LIST_INSERT_HEAD(&dvp->v_cache_src, ncp, nc_src);
1810 * Insert the new namecache entry into the appropriate chain
1811 * within the cache entries table.
1813 LIST_INSERT_HEAD(ncpp, ncp, nc_hash);
1816 * If the entry is "negative", we place it into the
1817 * "negative" cache queue, otherwise, we place it into the
1818 * destination vnode's cache entries queue.
1821 TAILQ_INSERT_HEAD(&vp->v_cache_dst, ncp, nc_dst);
1822 SDT_PROBE3(vfs, namecache, enter, done, dvp, ncp->nc_name,
1825 if (cnp->cn_flags & ISWHITEOUT)
1826 ncp->nc_flag |= NCF_WHITE;
1827 cache_negative_insert(ncp, false);
1828 SDT_PROBE2(vfs, namecache, enter_negative, done, dvp,
1831 cache_enter_unlock(&cel);
1832 if (numneg * ncnegfactor > lnumcache)
1833 cache_negative_zap_one();
1837 cache_enter_unlock(&cel);
1843 cache_roundup_2(u_int val)
1847 for (res = 1; res <= val; res <<= 1)
1854 * Name cache initialization, from vfs_init() when we are booting
1857 nchinit(void *dummy __unused)
1861 cache_zone_small = uma_zcreate("S VFS Cache",
1862 sizeof(struct namecache) + CACHE_PATH_CUTOFF + 1,
1863 NULL, NULL, NULL, NULL, UMA_ALIGNOF(struct namecache),
1865 cache_zone_small_ts = uma_zcreate("STS VFS Cache",
1866 sizeof(struct namecache_ts) + CACHE_PATH_CUTOFF + 1,
1867 NULL, NULL, NULL, NULL, UMA_ALIGNOF(struct namecache_ts),
1869 cache_zone_large = uma_zcreate("L VFS Cache",
1870 sizeof(struct namecache) + NAME_MAX + 1,
1871 NULL, NULL, NULL, NULL, UMA_ALIGNOF(struct namecache),
1873 cache_zone_large_ts = uma_zcreate("LTS VFS Cache",
1874 sizeof(struct namecache_ts) + NAME_MAX + 1,
1875 NULL, NULL, NULL, NULL, UMA_ALIGNOF(struct namecache_ts),
1878 nchashtbl = hashinit(desiredvnodes * 2, M_VFSCACHE, &nchash);
1879 ncbuckethash = cache_roundup_2(mp_ncpus * 64) - 1;
1880 if (ncbuckethash > nchash)
1881 ncbuckethash = nchash;
1882 bucketlocks = malloc(sizeof(*bucketlocks) * numbucketlocks, M_VFSCACHE,
1884 for (i = 0; i < numbucketlocks; i++)
1885 rw_init_flags(&bucketlocks[i], "ncbuc", RW_DUPOK | RW_RECURSE);
1886 ncvnodehash = cache_roundup_2(mp_ncpus * 64) - 1;
1887 vnodelocks = malloc(sizeof(*vnodelocks) * numvnodelocks, M_VFSCACHE,
1889 for (i = 0; i < numvnodelocks; i++)
1890 mtx_init(&vnodelocks[i], "ncvn", NULL, MTX_DUPOK | MTX_RECURSE);
1891 ncpurgeminvnodes = numbucketlocks;
1894 neglists = malloc(sizeof(*neglists) * numneglists, M_VFSCACHE,
1896 for (i = 0; i < numneglists; i++) {
1897 mtx_init(&neglists[i].nl_lock, "ncnegl", NULL, MTX_DEF);
1898 TAILQ_INIT(&neglists[i].nl_list);
1900 mtx_init(&ncneg_hot.nl_lock, "ncneglh", NULL, MTX_DEF);
1901 TAILQ_INIT(&ncneg_hot.nl_list);
1903 mtx_init(&ncneg_shrink_lock, "ncnegs", NULL, MTX_DEF);
1905 numcalls = counter_u64_alloc(M_WAITOK);
1906 dothits = counter_u64_alloc(M_WAITOK);
1907 dotdothits = counter_u64_alloc(M_WAITOK);
1908 numchecks = counter_u64_alloc(M_WAITOK);
1909 nummiss = counter_u64_alloc(M_WAITOK);
1910 nummisszap = counter_u64_alloc(M_WAITOK);
1911 numposzaps = counter_u64_alloc(M_WAITOK);
1912 numposhits = counter_u64_alloc(M_WAITOK);
1913 numnegzaps = counter_u64_alloc(M_WAITOK);
1914 numneghits = counter_u64_alloc(M_WAITOK);
1915 numfullpathcalls = counter_u64_alloc(M_WAITOK);
1916 numfullpathfail1 = counter_u64_alloc(M_WAITOK);
1917 numfullpathfail2 = counter_u64_alloc(M_WAITOK);
1918 numfullpathfail4 = counter_u64_alloc(M_WAITOK);
1919 numfullpathfound = counter_u64_alloc(M_WAITOK);
1921 SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_SECOND, nchinit, NULL);
1924 cache_changesize(int newmaxvnodes)
1926 struct nchashhead *new_nchashtbl, *old_nchashtbl;
1927 u_long new_nchash, old_nchash;
1928 struct namecache *ncp;
1932 newmaxvnodes = cache_roundup_2(newmaxvnodes * 2);
1933 if (newmaxvnodes < numbucketlocks)
1934 newmaxvnodes = numbucketlocks;
1936 new_nchashtbl = hashinit(newmaxvnodes, M_VFSCACHE, &new_nchash);
1937 /* If same hash table size, nothing to do */
1938 if (nchash == new_nchash) {
1939 free(new_nchashtbl, M_VFSCACHE);
1943 * Move everything from the old hash table to the new table.
1944 * None of the namecache entries in the table can be removed
1945 * because to do so, they have to be removed from the hash table.
1947 cache_lock_all_vnodes();
1948 cache_lock_all_buckets();
1949 old_nchashtbl = nchashtbl;
1950 old_nchash = nchash;
1951 nchashtbl = new_nchashtbl;
1952 nchash = new_nchash;
1953 for (i = 0; i <= old_nchash; i++) {
1954 while ((ncp = LIST_FIRST(&old_nchashtbl[i])) != NULL) {
1955 hash = cache_get_hash(ncp->nc_name, ncp->nc_nlen,
1957 LIST_REMOVE(ncp, nc_hash);
1958 LIST_INSERT_HEAD(NCHHASH(hash), ncp, nc_hash);
1961 cache_unlock_all_buckets();
1962 cache_unlock_all_vnodes();
1963 free(old_nchashtbl, M_VFSCACHE);
1967 * Invalidate all entries from and to a particular vnode.
1970 cache_purge(struct vnode *vp)
1972 TAILQ_HEAD(, namecache) ncps;
1973 struct namecache *ncp, *nnp;
1974 struct mtx *vlp, *vlp2;
1976 CTR1(KTR_VFS, "cache_purge(%p)", vp);
1977 SDT_PROBE1(vfs, namecache, purge, done, vp);
1978 if (LIST_EMPTY(&vp->v_cache_src) && TAILQ_EMPTY(&vp->v_cache_dst) &&
1979 vp->v_cache_dd == NULL)
1982 vlp = VP2VNODELOCK(vp);
1986 while (!LIST_EMPTY(&vp->v_cache_src)) {
1987 ncp = LIST_FIRST(&vp->v_cache_src);
1988 if (!cache_zap_locked_vnode_kl2(ncp, vp, &vlp2))
1990 TAILQ_INSERT_TAIL(&ncps, ncp, nc_dst);
1992 while (!TAILQ_EMPTY(&vp->v_cache_dst)) {
1993 ncp = TAILQ_FIRST(&vp->v_cache_dst);
1994 if (!cache_zap_locked_vnode_kl2(ncp, vp, &vlp2))
1996 TAILQ_INSERT_TAIL(&ncps, ncp, nc_dst);
1998 ncp = vp->v_cache_dd;
2000 KASSERT(ncp->nc_flag & NCF_ISDOTDOT,
2001 ("lost dotdot link"));
2002 if (!cache_zap_locked_vnode_kl2(ncp, vp, &vlp2))
2004 TAILQ_INSERT_TAIL(&ncps, ncp, nc_dst);
2006 KASSERT(vp->v_cache_dd == NULL, ("incomplete purge"));
2010 TAILQ_FOREACH_SAFE(ncp, &ncps, nc_dst, nnp) {
2016 * Invalidate all negative entries for a particular directory vnode.
2019 cache_purge_negative(struct vnode *vp)
2021 TAILQ_HEAD(, namecache) ncps;
2022 struct namecache *ncp, *nnp;
2025 CTR1(KTR_VFS, "cache_purge_negative(%p)", vp);
2026 SDT_PROBE1(vfs, namecache, purge_negative, done, vp);
2027 if (LIST_EMPTY(&vp->v_cache_src))
2030 vlp = VP2VNODELOCK(vp);
2032 LIST_FOREACH_SAFE(ncp, &vp->v_cache_src, nc_src, nnp) {
2033 if (!(ncp->nc_flag & NCF_NEGATIVE))
2035 cache_zap_negative_locked_vnode_kl(ncp, vp);
2036 TAILQ_INSERT_TAIL(&ncps, ncp, nc_dst);
2039 TAILQ_FOREACH_SAFE(ncp, &ncps, nc_dst, nnp) {
2045 * Flush all entries referencing a particular filesystem.
2048 cache_purgevfs(struct mount *mp, bool force)
2050 TAILQ_HEAD(, namecache) ncps;
2051 struct mtx *vlp1, *vlp2;
2053 struct nchashhead *bucket;
2054 struct namecache *ncp, *nnp;
2055 u_long i, j, n_nchash;
2058 /* Scan hash tables for applicable entries */
2059 SDT_PROBE1(vfs, namecache, purgevfs, done, mp);
2060 if (!force && mp->mnt_nvnodelistsize <= ncpurgeminvnodes)
2063 n_nchash = nchash + 1;
2065 for (i = 0; i < numbucketlocks; i++) {
2066 blp = (struct rwlock *)&bucketlocks[i];
2068 for (j = i; j < n_nchash; j += numbucketlocks) {
2070 bucket = &nchashtbl[j];
2071 LIST_FOREACH_SAFE(ncp, bucket, nc_hash, nnp) {
2072 cache_assert_bucket_locked(ncp, RA_WLOCKED);
2073 if (ncp->nc_dvp->v_mount != mp)
2075 error = cache_zap_wlocked_bucket_kl(ncp, blp,
2079 TAILQ_INSERT_HEAD(&ncps, ncp, nc_dst);
2083 if (vlp1 == NULL && vlp2 == NULL)
2084 cache_maybe_yield();
2091 TAILQ_FOREACH_SAFE(ncp, &ncps, nc_dst, nnp) {
2097 * Perform canonical checks and cache lookup and pass on to filesystem
2098 * through the vop_cachedlookup only if needed.
2102 vfs_cache_lookup(struct vop_lookup_args *ap)
2106 struct vnode **vpp = ap->a_vpp;
2107 struct componentname *cnp = ap->a_cnp;
2108 struct ucred *cred = cnp->cn_cred;
2109 int flags = cnp->cn_flags;
2110 struct thread *td = cnp->cn_thread;
2115 if (dvp->v_type != VDIR)
2118 if ((flags & ISLASTCN) && (dvp->v_mount->mnt_flag & MNT_RDONLY) &&
2119 (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME))
2122 error = VOP_ACCESS(dvp, VEXEC, cred, td);
2126 error = cache_lookup(dvp, vpp, cnp, NULL, NULL);
2128 return (VOP_CACHEDLOOKUP(dvp, vpp, cnp));
2135 * XXX All of these sysctls would probably be more productive dead.
2137 static int __read_mostly disablecwd;
2138 SYSCTL_INT(_debug, OID_AUTO, disablecwd, CTLFLAG_RW, &disablecwd, 0,
2139 "Disable the getcwd syscall");
2141 /* Implementation of the getcwd syscall. */
2143 sys___getcwd(struct thread *td, struct __getcwd_args *uap)
2146 return (kern___getcwd(td, uap->buf, UIO_USERSPACE, uap->buflen,
2151 kern___getcwd(struct thread *td, char *buf, enum uio_seg bufseg, size_t buflen,
2155 struct filedesc *fdp;
2156 struct vnode *cdir, *rdir;
2159 if (__predict_false(disablecwd))
2161 if (__predict_false(buflen < 2))
2163 if (buflen > path_max)
2166 tmpbuf = malloc(buflen, M_TEMP, M_WAITOK);
2167 fdp = td->td_proc->p_fd;
2168 FILEDESC_SLOCK(fdp);
2169 cdir = fdp->fd_cdir;
2171 rdir = fdp->fd_rdir;
2173 FILEDESC_SUNLOCK(fdp);
2174 error = vn_fullpath1(td, cdir, rdir, tmpbuf, &bp, buflen);
2179 if (bufseg == UIO_SYSSPACE)
2180 bcopy(bp, buf, strlen(bp) + 1);
2182 error = copyout(bp, buf, strlen(bp) + 1);
2184 if (KTRPOINT(curthread, KTR_NAMEI))
2188 free(tmpbuf, M_TEMP);
2193 * Thus begins the fullpath magic.
2196 static int __read_mostly disablefullpath;
2197 SYSCTL_INT(_debug, OID_AUTO, disablefullpath, CTLFLAG_RW, &disablefullpath, 0,
2198 "Disable the vn_fullpath function");
2201 * Retrieve the full filesystem path that correspond to a vnode from the name
2202 * cache (if available)
2205 vn_fullpath(struct thread *td, struct vnode *vn, char **retbuf, char **freebuf)
2208 struct filedesc *fdp;
2212 if (__predict_false(disablefullpath))
2214 if (__predict_false(vn == NULL))
2217 buf = malloc(MAXPATHLEN, M_TEMP, M_WAITOK);
2218 fdp = td->td_proc->p_fd;
2219 FILEDESC_SLOCK(fdp);
2220 rdir = fdp->fd_rdir;
2222 FILEDESC_SUNLOCK(fdp);
2223 error = vn_fullpath1(td, vn, rdir, buf, retbuf, MAXPATHLEN);
2234 * This function is similar to vn_fullpath, but it attempts to lookup the
2235 * pathname relative to the global root mount point. This is required for the
2236 * auditing sub-system, as audited pathnames must be absolute, relative to the
2237 * global root mount point.
2240 vn_fullpath_global(struct thread *td, struct vnode *vn,
2241 char **retbuf, char **freebuf)
2246 if (__predict_false(disablefullpath))
2248 if (__predict_false(vn == NULL))
2250 buf = malloc(MAXPATHLEN, M_TEMP, M_WAITOK);
2251 error = vn_fullpath1(td, vn, rootvnode, buf, retbuf, MAXPATHLEN);
2260 vn_vptocnp(struct vnode **vp, struct ucred *cred, char *buf, u_int *buflen)
2263 struct namecache *ncp;
2267 vlp = VP2VNODELOCK(*vp);
2269 TAILQ_FOREACH(ncp, &((*vp)->v_cache_dst), nc_dst) {
2270 if ((ncp->nc_flag & NCF_ISDOTDOT) == 0)
2274 if (*buflen < ncp->nc_nlen) {
2277 counter_u64_add(numfullpathfail4, 1);
2279 SDT_PROBE3(vfs, namecache, fullpath, return, error,
2283 *buflen -= ncp->nc_nlen;
2284 memcpy(buf + *buflen, ncp->nc_name, ncp->nc_nlen);
2285 SDT_PROBE3(vfs, namecache, fullpath, hit, ncp->nc_dvp,
2294 SDT_PROBE1(vfs, namecache, fullpath, miss, vp);
2297 vn_lock(*vp, LK_SHARED | LK_RETRY);
2298 error = VOP_VPTOCNP(*vp, &dvp, cred, buf, buflen);
2301 counter_u64_add(numfullpathfail2, 1);
2302 SDT_PROBE3(vfs, namecache, fullpath, return, error, vp, NULL);
2307 if (dvp->v_iflag & VI_DOOMED) {
2308 /* forced unmount */
2311 SDT_PROBE3(vfs, namecache, fullpath, return, error, vp, NULL);
2315 * *vp has its use count incremented still.
2322 * The magic behind kern___getcwd() and vn_fullpath().
2325 vn_fullpath1(struct thread *td, struct vnode *vp, struct vnode *rdir,
2326 char *buf, char **retbuf, u_int buflen)
2328 int error, slash_prefixed;
2329 #ifdef KDTRACE_HOOKS
2330 struct vnode *startvp = vp;
2339 SDT_PROBE1(vfs, namecache, fullpath, entry, vp);
2340 counter_u64_add(numfullpathcalls, 1);
2342 if (vp->v_type != VDIR) {
2343 error = vn_vptocnp(&vp, td->td_ucred, buf, &buflen);
2350 buf[--buflen] = '/';
2353 while (vp != rdir && vp != rootvnode) {
2355 * The vp vnode must be already fully constructed,
2356 * since it is either found in namecache or obtained
2357 * from VOP_VPTOCNP(). We may test for VV_ROOT safely
2358 * without obtaining the vnode lock.
2360 if ((vp->v_vflag & VV_ROOT) != 0) {
2361 vn_lock(vp, LK_RETRY | LK_SHARED);
2364 * With the vnode locked, check for races with
2365 * unmount, forced or not. Note that we
2366 * already verified that vp is not equal to
2367 * the root vnode, which means that
2368 * mnt_vnodecovered can be NULL only for the
2371 if ((vp->v_iflag & VI_DOOMED) != 0 ||
2372 (vp1 = vp->v_mount->mnt_vnodecovered) == NULL ||
2373 vp1->v_mountedhere != vp->v_mount) {
2376 SDT_PROBE3(vfs, namecache, fullpath, return,
2386 if (vp->v_type != VDIR) {
2388 counter_u64_add(numfullpathfail1, 1);
2390 SDT_PROBE3(vfs, namecache, fullpath, return,
2394 error = vn_vptocnp(&vp, td->td_ucred, buf, &buflen);
2400 SDT_PROBE3(vfs, namecache, fullpath, return, error,
2404 buf[--buflen] = '/';
2409 if (!slash_prefixed) {
2412 counter_u64_add(numfullpathfail4, 1);
2413 SDT_PROBE3(vfs, namecache, fullpath, return, ENOMEM,
2417 buf[--buflen] = '/';
2419 counter_u64_add(numfullpathfound, 1);
2422 SDT_PROBE3(vfs, namecache, fullpath, return, 0, startvp, buf + buflen);
2423 *retbuf = buf + buflen;
2428 vn_dir_dd_ino(struct vnode *vp)
2430 struct namecache *ncp;
2434 ASSERT_VOP_LOCKED(vp, "vn_dir_dd_ino");
2435 vlp = VP2VNODELOCK(vp);
2437 TAILQ_FOREACH(ncp, &(vp->v_cache_dst), nc_dst) {
2438 if ((ncp->nc_flag & NCF_ISDOTDOT) != 0)
2443 if (vget(ddvp, LK_SHARED | LK_NOWAIT | LK_VNHELD, curthread))
2452 vn_commname(struct vnode *vp, char *buf, u_int buflen)
2454 struct namecache *ncp;
2458 vlp = VP2VNODELOCK(vp);
2460 TAILQ_FOREACH(ncp, &vp->v_cache_dst, nc_dst)
2461 if ((ncp->nc_flag & NCF_ISDOTDOT) == 0)
2467 l = min(ncp->nc_nlen, buflen - 1);
2468 memcpy(buf, ncp->nc_name, l);
2475 * This function updates path string to vnode's full global path
2476 * and checks the size of the new path string against the pathlen argument.
2478 * Requires a locked, referenced vnode.
2479 * Vnode is re-locked on success or ENODEV, otherwise unlocked.
2481 * If sysctl debug.disablefullpath is set, ENODEV is returned,
2482 * vnode is left locked and path remain untouched.
2484 * If vp is a directory, the call to vn_fullpath_global() always succeeds
2485 * because it falls back to the ".." lookup if the namecache lookup fails.
2488 vn_path_to_global_path(struct thread *td, struct vnode *vp, char *path,
2491 struct nameidata nd;
2496 ASSERT_VOP_ELOCKED(vp, __func__);
2498 /* Return ENODEV if sysctl debug.disablefullpath==1 */
2499 if (__predict_false(disablefullpath))
2502 /* Construct global filesystem path from vp. */
2504 error = vn_fullpath_global(td, vp, &rpath, &fbuf);
2511 if (strlen(rpath) >= pathlen) {
2513 error = ENAMETOOLONG;
2518 * Re-lookup the vnode by path to detect a possible rename.
2519 * As a side effect, the vnode is relocked.
2520 * If vnode was renamed, return ENOENT.
2522 NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF | AUDITVNODE1,
2523 UIO_SYSSPACE, path, td);
2529 NDFREE(&nd, NDF_ONLY_PNBUF);
2533 strcpy(path, rpath);
2546 db_print_vpath(struct vnode *vp)
2549 while (vp != NULL) {
2550 db_printf("%p: ", vp);
2551 if (vp == rootvnode) {
2555 if (vp->v_vflag & VV_ROOT) {
2556 db_printf("<mount point>");
2557 vp = vp->v_mount->mnt_vnodecovered;
2559 struct namecache *ncp;
2563 ncp = TAILQ_FIRST(&vp->v_cache_dst);
2566 for (i = 0; i < ncp->nc_nlen; i++)
2567 db_printf("%c", *ncn++);
2580 DB_SHOW_COMMAND(vpath, db_show_vpath)
2585 db_printf("usage: show vpath <struct vnode *>\n");
2589 vp = (struct vnode *)addr;