2 * SPDX-License-Identifier: BSD-3-Clause
4 * Copyright (c) 1989, 1993, 1995
5 * The Regents of the University of California. All rights reserved.
7 * This code is derived from software contributed to Berkeley by
8 * Poul-Henning Kamp of the FreeBSD Project.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * @(#)vfs_cache.c 8.5 (Berkeley) 3/22/95
37 #include <sys/cdefs.h>
38 __FBSDID("$FreeBSD$");
41 #include "opt_ktrace.h"
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/counter.h>
46 #include <sys/filedesc.h>
47 #include <sys/fnv_hash.h>
48 #include <sys/kernel.h>
50 #include <sys/malloc.h>
51 #include <sys/fcntl.h>
52 #include <sys/mount.h>
53 #include <sys/namei.h>
55 #include <sys/rwlock.h>
58 #include <sys/syscallsubr.h>
59 #include <sys/sysctl.h>
60 #include <sys/sysproto.h>
61 #include <sys/vnode.h>
63 #include <sys/ktrace.h>
72 SDT_PROVIDER_DECLARE(vfs);
73 SDT_PROBE_DEFINE3(vfs, namecache, enter, done, "struct vnode *", "char *",
75 SDT_PROBE_DEFINE2(vfs, namecache, enter_negative, done, "struct vnode *",
77 SDT_PROBE_DEFINE1(vfs, namecache, fullpath, entry, "struct vnode *");
78 SDT_PROBE_DEFINE3(vfs, namecache, fullpath, hit, "struct vnode *",
79 "char *", "struct vnode *");
80 SDT_PROBE_DEFINE1(vfs, namecache, fullpath, miss, "struct vnode *");
81 SDT_PROBE_DEFINE3(vfs, namecache, fullpath, return, "int",
82 "struct vnode *", "char *");
83 SDT_PROBE_DEFINE3(vfs, namecache, lookup, hit, "struct vnode *", "char *",
85 SDT_PROBE_DEFINE2(vfs, namecache, lookup, hit__negative,
86 "struct vnode *", "char *");
87 SDT_PROBE_DEFINE2(vfs, namecache, lookup, miss, "struct vnode *",
89 SDT_PROBE_DEFINE1(vfs, namecache, purge, done, "struct vnode *");
90 SDT_PROBE_DEFINE1(vfs, namecache, purge_negative, done, "struct vnode *");
91 SDT_PROBE_DEFINE1(vfs, namecache, purgevfs, done, "struct mount *");
92 SDT_PROBE_DEFINE3(vfs, namecache, zap, done, "struct vnode *", "char *",
94 SDT_PROBE_DEFINE2(vfs, namecache, zap_negative, done, "struct vnode *",
96 SDT_PROBE_DEFINE2(vfs, namecache, shrink_negative, done, "struct vnode *",
100 * This structure describes the elements in the cache of recent
101 * names looked up by namei.
105 LIST_ENTRY(namecache) nc_src; /* source vnode list */
106 TAILQ_ENTRY(namecache) nc_dst; /* destination vnode list */
107 LIST_ENTRY(namecache) nc_hash; /* hash chain */
108 struct vnode *nc_dvp; /* vnode of parent of name */
110 struct vnode *nu_vp; /* vnode the name refers to */
112 u_char nc_flag; /* flag bits */
113 u_char nc_nlen; /* length of name */
114 char nc_name[0]; /* segment name + nul */
118 * struct namecache_ts repeats struct namecache layout up to the
120 * struct namecache_ts is used in place of struct namecache when time(s) need
121 * to be stored. The nc_dotdottime field is used when a cache entry is mapping
122 * both a non-dotdot directory name plus dotdot for the directory's
125 * See below for alignment requirement.
127 struct namecache_ts {
128 struct timespec nc_time; /* timespec provided by fs */
129 struct timespec nc_dotdottime; /* dotdot timespec provided by fs */
130 int nc_ticks; /* ticks value when entry was added */
131 struct namecache nc_nc;
135 * At least mips n32 performs 64-bit accesses to timespec as found
136 * in namecache_ts and requires them to be aligned. Since others
137 * may be in the same spot suffer a little bit and enforce the
138 * alignment for everyone. Note this is a nop for 64-bit platforms.
140 #define CACHE_ZONE_ALIGNMENT UMA_ALIGNOF(time_t)
142 #define nc_vp n_un.nu_vp
145 * Flags in namecache.nc_flag
147 #define NCF_WHITE 0x01
148 #define NCF_ISDOTDOT 0x02
151 #define NCF_DVDROP 0x10
152 #define NCF_NEGATIVE 0x20
153 #define NCF_HOTNEGATIVE 0x40
156 * Name caching works as follows:
158 * Names found by directory scans are retained in a cache
159 * for future reference. It is managed LRU, so frequently
160 * used names will hang around. Cache is indexed by hash value
161 * obtained from (dvp, name) where dvp refers to the directory
164 * If it is a "negative" entry, (i.e. for a name that is known NOT to
165 * exist) the vnode pointer will be NULL.
167 * Upon reaching the last segment of a path, if the reference
168 * is for DELETE, or NOCACHE is set (rewrite), and the
169 * name is located in the cache, it will be dropped.
171 * These locks are used (in the order in which they can be taken):
173 * vnodelock mtx vnode lists and v_cache_dd field protection
174 * bucketlock rwlock for access to given set of hash buckets
175 * neglist mtx negative entry LRU management
177 * Additionally, ncneg_shrink_lock mtx is used to have at most one thread
178 * shrinking the LRU list.
180 * It is legal to take multiple vnodelock and bucketlock locks. The locking
181 * order is lower address first. Both are recursive.
183 * "." lookups are lockless.
185 * ".." and vnode -> name lookups require vnodelock.
187 * name -> vnode lookup requires the relevant bucketlock to be held for reading.
189 * Insertions and removals of entries require involved vnodes and bucketlocks
190 * to be write-locked to prevent other threads from seeing the entry.
192 * Some lookups result in removal of the found entry (e.g. getting rid of a
193 * negative entry with the intent to create a positive one), which poses a
194 * problem when multiple threads reach the state. Similarly, two different
195 * threads can purge two different vnodes and try to remove the same name.
197 * If the already held vnode lock is lower than the second required lock, we
198 * can just take the other lock. However, in the opposite case, this could
199 * deadlock. As such, this is resolved by trylocking and if that fails unlocking
200 * the first node, locking everything in order and revalidating the state.
204 * Structures associated with name caching.
206 #define NCHHASH(hash) \
207 (&nchashtbl[(hash) & nchash])
208 static __read_mostly LIST_HEAD(nchashhead, namecache) *nchashtbl;/* Hash Table */
209 static u_long __read_mostly nchash; /* size of hash table */
210 SYSCTL_ULONG(_debug, OID_AUTO, nchash, CTLFLAG_RD, &nchash, 0,
211 "Size of namecache hash table");
212 static u_long __read_mostly ncnegfactor = 5; /* ratio of negative entries */
213 SYSCTL_ULONG(_vfs, OID_AUTO, ncnegfactor, CTLFLAG_RW, &ncnegfactor, 0,
214 "Ratio of negative namecache entries");
215 static u_long __exclusive_cache_line numneg; /* number of negative entries allocated */
216 static u_long __exclusive_cache_line numcache;/* number of cache entries allocated */
217 u_int ncsizefactor = 2;
218 SYSCTL_UINT(_vfs, OID_AUTO, ncsizefactor, CTLFLAG_RW, &ncsizefactor, 0,
219 "Size factor for namecache");
220 static u_int __read_mostly ncpurgeminvnodes;
221 SYSCTL_UINT(_vfs, OID_AUTO, ncpurgeminvnodes, CTLFLAG_RW, &ncpurgeminvnodes, 0,
222 "Number of vnodes below which purgevfs ignores the request");
223 static u_int __read_mostly ncsize; /* the size as computed on creation or resizing */
225 struct nchstats nchstats; /* cache effectiveness statistics */
227 static struct mtx __exclusive_cache_line ncneg_shrink_lock;
228 static int shrink_list_turn;
232 TAILQ_HEAD(, namecache) nl_list;
233 } __aligned(CACHE_LINE_SIZE);
235 static struct neglist __read_mostly *neglists;
236 static struct neglist ncneg_hot;
237 static u_long numhotneg;
240 #define numneglists (ncneghash + 1)
241 static inline struct neglist *
242 NCP2NEGLIST(struct namecache *ncp)
245 return (&neglists[(((uintptr_t)(ncp) >> 8) & ncneghash)]);
248 #define numbucketlocks (ncbuckethash + 1)
249 static u_int __read_mostly ncbuckethash;
250 static struct rwlock_padalign __read_mostly *bucketlocks;
251 #define HASH2BUCKETLOCK(hash) \
252 ((struct rwlock *)(&bucketlocks[((hash) & ncbuckethash)]))
254 #define numvnodelocks (ncvnodehash + 1)
255 static u_int __read_mostly ncvnodehash;
256 static struct mtx __read_mostly *vnodelocks;
257 static inline struct mtx *
258 VP2VNODELOCK(struct vnode *vp)
261 return (&vnodelocks[(((uintptr_t)(vp) >> 8) & ncvnodehash)]);
265 * UMA zones for the VFS cache.
267 * The small cache is used for entries with short names, which are the
268 * most common. The large cache is used for entries which are too big to
269 * fit in the small cache.
271 static uma_zone_t __read_mostly cache_zone_small;
272 static uma_zone_t __read_mostly cache_zone_small_ts;
273 static uma_zone_t __read_mostly cache_zone_large;
274 static uma_zone_t __read_mostly cache_zone_large_ts;
276 #define CACHE_PATH_CUTOFF 35
278 static struct namecache *
279 cache_alloc(int len, int ts)
281 struct namecache_ts *ncp_ts;
282 struct namecache *ncp;
284 if (__predict_false(ts)) {
285 if (len <= CACHE_PATH_CUTOFF)
286 ncp_ts = uma_zalloc(cache_zone_small_ts, M_WAITOK);
288 ncp_ts = uma_zalloc(cache_zone_large_ts, M_WAITOK);
289 ncp = &ncp_ts->nc_nc;
291 if (len <= CACHE_PATH_CUTOFF)
292 ncp = uma_zalloc(cache_zone_small, M_WAITOK);
294 ncp = uma_zalloc(cache_zone_large, M_WAITOK);
300 cache_free(struct namecache *ncp)
302 struct namecache_ts *ncp_ts;
306 if ((ncp->nc_flag & NCF_DVDROP) != 0)
308 if (__predict_false(ncp->nc_flag & NCF_TS)) {
309 ncp_ts = __containerof(ncp, struct namecache_ts, nc_nc);
310 if (ncp->nc_nlen <= CACHE_PATH_CUTOFF)
311 uma_zfree(cache_zone_small_ts, ncp_ts);
313 uma_zfree(cache_zone_large_ts, ncp_ts);
315 if (ncp->nc_nlen <= CACHE_PATH_CUTOFF)
316 uma_zfree(cache_zone_small, ncp);
318 uma_zfree(cache_zone_large, ncp);
323 cache_out_ts(struct namecache *ncp, struct timespec *tsp, int *ticksp)
325 struct namecache_ts *ncp_ts;
327 KASSERT((ncp->nc_flag & NCF_TS) != 0 ||
328 (tsp == NULL && ticksp == NULL),
331 if (tsp == NULL && ticksp == NULL)
334 ncp_ts = __containerof(ncp, struct namecache_ts, nc_nc);
336 *tsp = ncp_ts->nc_time;
338 *ticksp = ncp_ts->nc_ticks;
341 static int __read_mostly doingcache = 1; /* 1 => enable the cache */
342 SYSCTL_INT(_debug, OID_AUTO, vfscache, CTLFLAG_RW, &doingcache, 0,
343 "VFS namecache enabled");
345 /* Export size information to userland */
346 SYSCTL_INT(_debug_sizeof, OID_AUTO, namecache, CTLFLAG_RD, SYSCTL_NULL_INT_PTR,
347 sizeof(struct namecache), "sizeof(struct namecache)");
350 * The new name cache statistics
352 static SYSCTL_NODE(_vfs, OID_AUTO, cache, CTLFLAG_RW, 0,
353 "Name cache statistics");
354 #define STATNODE_ULONG(name, descr) \
355 SYSCTL_ULONG(_vfs_cache, OID_AUTO, name, CTLFLAG_RD, &name, 0, descr);
356 #define STATNODE_COUNTER(name, descr) \
357 static counter_u64_t __read_mostly name; \
358 SYSCTL_COUNTER_U64(_vfs_cache, OID_AUTO, name, CTLFLAG_RD, &name, descr);
359 STATNODE_ULONG(numneg, "Number of negative cache entries");
360 STATNODE_ULONG(numcache, "Number of cache entries");
361 STATNODE_COUNTER(numcachehv, "Number of namecache entries with vnodes held");
362 STATNODE_COUNTER(numcalls, "Number of cache lookups");
363 STATNODE_COUNTER(dothits, "Number of '.' hits");
364 STATNODE_COUNTER(dotdothits, "Number of '..' hits");
365 STATNODE_COUNTER(numchecks, "Number of checks in lookup");
366 STATNODE_COUNTER(nummiss, "Number of cache misses");
367 STATNODE_COUNTER(nummisszap, "Number of cache misses we do not want to cache");
368 STATNODE_COUNTER(numposzaps,
369 "Number of cache hits (positive) we do not want to cache");
370 STATNODE_COUNTER(numposhits, "Number of cache hits (positive)");
371 STATNODE_COUNTER(numnegzaps,
372 "Number of cache hits (negative) we do not want to cache");
373 STATNODE_COUNTER(numneghits, "Number of cache hits (negative)");
374 /* These count for kern___getcwd(), too. */
375 STATNODE_COUNTER(numfullpathcalls, "Number of fullpath search calls");
376 STATNODE_COUNTER(numfullpathfail1, "Number of fullpath search errors (ENOTDIR)");
377 STATNODE_COUNTER(numfullpathfail2,
378 "Number of fullpath search errors (VOP_VPTOCNP failures)");
379 STATNODE_COUNTER(numfullpathfail4, "Number of fullpath search errors (ENOMEM)");
380 STATNODE_COUNTER(numfullpathfound, "Number of successful fullpath calls");
381 STATNODE_COUNTER(zap_and_exit_bucket_relock_success,
382 "Number of successful removals after relocking");
383 static long zap_and_exit_bucket_fail; STATNODE_ULONG(zap_and_exit_bucket_fail,
384 "Number of times zap_and_exit failed to lock");
385 static long zap_and_exit_bucket_fail2; STATNODE_ULONG(zap_and_exit_bucket_fail2,
386 "Number of times zap_and_exit failed to lock");
387 static long cache_lock_vnodes_cel_3_failures;
388 STATNODE_ULONG(cache_lock_vnodes_cel_3_failures,
389 "Number of times 3-way vnode locking failed");
390 STATNODE_ULONG(numhotneg, "Number of hot negative entries");
391 STATNODE_COUNTER(numneg_evicted,
392 "Number of negative entries evicted when adding a new entry");
393 STATNODE_COUNTER(shrinking_skipped,
394 "Number of times shrinking was already in progress");
396 static void cache_zap_locked(struct namecache *ncp, bool neg_locked);
397 static int vn_fullpath1(struct thread *td, struct vnode *vp, struct vnode *rdir,
398 char *buf, char **retbuf, u_int buflen);
400 static MALLOC_DEFINE(M_VFSCACHE, "vfscache", "VFS name cache entries");
402 static int cache_yield;
403 SYSCTL_INT(_vfs_cache, OID_AUTO, yield, CTLFLAG_RD, &cache_yield, 0,
404 "Number of times cache called yield");
406 static void __noinline
407 cache_maybe_yield(void)
410 if (should_yield()) {
412 kern_yield(PRI_USER);
417 cache_assert_vlp_locked(struct mtx *vlp)
421 mtx_assert(vlp, MA_OWNED);
425 cache_assert_vnode_locked(struct vnode *vp)
429 vlp = VP2VNODELOCK(vp);
430 cache_assert_vlp_locked(vlp);
434 cache_get_hash(char *name, u_char len, struct vnode *dvp)
438 hash = fnv_32_buf(name, len, FNV1_32_INIT);
439 hash = fnv_32_buf(&dvp, sizeof(dvp), hash);
443 static inline struct rwlock *
444 NCP2BUCKETLOCK(struct namecache *ncp)
448 hash = cache_get_hash(ncp->nc_name, ncp->nc_nlen, ncp->nc_dvp);
449 return (HASH2BUCKETLOCK(hash));
454 cache_assert_bucket_locked(struct namecache *ncp, int mode)
458 blp = NCP2BUCKETLOCK(ncp);
459 rw_assert(blp, mode);
462 #define cache_assert_bucket_locked(x, y) do { } while (0)
465 #define cache_sort_vnodes(x, y) _cache_sort_vnodes((void **)(x), (void **)(y))
467 _cache_sort_vnodes(void **p1, void **p2)
471 MPASS(*p1 != NULL || *p2 != NULL);
481 cache_lock_all_buckets(void)
485 for (i = 0; i < numbucketlocks; i++)
486 rw_wlock(&bucketlocks[i]);
490 cache_unlock_all_buckets(void)
494 for (i = 0; i < numbucketlocks; i++)
495 rw_wunlock(&bucketlocks[i]);
499 cache_lock_all_vnodes(void)
503 for (i = 0; i < numvnodelocks; i++)
504 mtx_lock(&vnodelocks[i]);
508 cache_unlock_all_vnodes(void)
512 for (i = 0; i < numvnodelocks; i++)
513 mtx_unlock(&vnodelocks[i]);
517 cache_trylock_vnodes(struct mtx *vlp1, struct mtx *vlp2)
520 cache_sort_vnodes(&vlp1, &vlp2);
523 if (!mtx_trylock(vlp1))
526 if (!mtx_trylock(vlp2)) {
536 cache_lock_vnodes(struct mtx *vlp1, struct mtx *vlp2)
539 MPASS(vlp1 != NULL || vlp2 != NULL);
549 cache_unlock_vnodes(struct mtx *vlp1, struct mtx *vlp2)
552 MPASS(vlp1 != NULL || vlp2 != NULL);
561 sysctl_nchstats(SYSCTL_HANDLER_ARGS)
563 struct nchstats snap;
565 if (req->oldptr == NULL)
566 return (SYSCTL_OUT(req, 0, sizeof(snap)));
569 snap.ncs_goodhits = counter_u64_fetch(numposhits);
570 snap.ncs_neghits = counter_u64_fetch(numneghits);
571 snap.ncs_badhits = counter_u64_fetch(numposzaps) +
572 counter_u64_fetch(numnegzaps);
573 snap.ncs_miss = counter_u64_fetch(nummisszap) +
574 counter_u64_fetch(nummiss);
576 return (SYSCTL_OUT(req, &snap, sizeof(snap)));
578 SYSCTL_PROC(_vfs_cache, OID_AUTO, nchstats, CTLTYPE_OPAQUE | CTLFLAG_RD |
579 CTLFLAG_MPSAFE, 0, 0, sysctl_nchstats, "LU",
580 "VFS cache effectiveness statistics");
584 * Grab an atomic snapshot of the name cache hash chain lengths
586 static SYSCTL_NODE(_debug, OID_AUTO, hashstat, CTLFLAG_RW, NULL,
590 sysctl_debug_hashstat_rawnchash(SYSCTL_HANDLER_ARGS)
592 struct nchashhead *ncpp;
593 struct namecache *ncp;
594 int i, error, n_nchash, *cntbuf;
597 n_nchash = nchash + 1; /* nchash is max index, not count */
598 if (req->oldptr == NULL)
599 return SYSCTL_OUT(req, 0, n_nchash * sizeof(int));
600 cntbuf = malloc(n_nchash * sizeof(int), M_TEMP, M_ZERO | M_WAITOK);
601 cache_lock_all_buckets();
602 if (n_nchash != nchash + 1) {
603 cache_unlock_all_buckets();
604 free(cntbuf, M_TEMP);
607 /* Scan hash tables counting entries */
608 for (ncpp = nchashtbl, i = 0; i < n_nchash; ncpp++, i++)
609 LIST_FOREACH(ncp, ncpp, nc_hash)
611 cache_unlock_all_buckets();
612 for (error = 0, i = 0; i < n_nchash; i++)
613 if ((error = SYSCTL_OUT(req, &cntbuf[i], sizeof(int))) != 0)
615 free(cntbuf, M_TEMP);
618 SYSCTL_PROC(_debug_hashstat, OID_AUTO, rawnchash, CTLTYPE_INT|CTLFLAG_RD|
619 CTLFLAG_MPSAFE, 0, 0, sysctl_debug_hashstat_rawnchash, "S,int",
620 "nchash chain lengths");
623 sysctl_debug_hashstat_nchash(SYSCTL_HANDLER_ARGS)
626 struct nchashhead *ncpp;
627 struct namecache *ncp;
629 int count, maxlength, used, pct;
632 return SYSCTL_OUT(req, 0, 4 * sizeof(int));
634 cache_lock_all_buckets();
635 n_nchash = nchash + 1; /* nchash is max index, not count */
639 /* Scan hash tables for applicable entries */
640 for (ncpp = nchashtbl; n_nchash > 0; n_nchash--, ncpp++) {
642 LIST_FOREACH(ncp, ncpp, nc_hash) {
647 if (maxlength < count)
650 n_nchash = nchash + 1;
651 cache_unlock_all_buckets();
652 pct = (used * 100) / (n_nchash / 100);
653 error = SYSCTL_OUT(req, &n_nchash, sizeof(n_nchash));
656 error = SYSCTL_OUT(req, &used, sizeof(used));
659 error = SYSCTL_OUT(req, &maxlength, sizeof(maxlength));
662 error = SYSCTL_OUT(req, &pct, sizeof(pct));
667 SYSCTL_PROC(_debug_hashstat, OID_AUTO, nchash, CTLTYPE_INT|CTLFLAG_RD|
668 CTLFLAG_MPSAFE, 0, 0, sysctl_debug_hashstat_nchash, "I",
669 "nchash statistics (number of total/used buckets, maximum chain length, usage percentage)");
673 * Negative entries management
675 * A variation of LRU scheme is used. New entries are hashed into one of
676 * numneglists cold lists. Entries get promoted to the hot list on first hit.
678 * The shrinker will demote hot list head and evict from the cold list in a
679 * round-robin manner.
682 cache_negative_hit(struct namecache *ncp)
684 struct neglist *neglist;
686 MPASS(ncp->nc_flag & NCF_NEGATIVE);
687 if (ncp->nc_flag & NCF_HOTNEGATIVE)
689 neglist = NCP2NEGLIST(ncp);
690 mtx_lock(&ncneg_hot.nl_lock);
691 mtx_lock(&neglist->nl_lock);
692 if (!(ncp->nc_flag & NCF_HOTNEGATIVE)) {
694 TAILQ_REMOVE(&neglist->nl_list, ncp, nc_dst);
695 TAILQ_INSERT_TAIL(&ncneg_hot.nl_list, ncp, nc_dst);
696 ncp->nc_flag |= NCF_HOTNEGATIVE;
698 mtx_unlock(&neglist->nl_lock);
699 mtx_unlock(&ncneg_hot.nl_lock);
703 cache_negative_insert(struct namecache *ncp, bool neg_locked)
705 struct neglist *neglist;
707 MPASS(ncp->nc_flag & NCF_NEGATIVE);
708 cache_assert_bucket_locked(ncp, RA_WLOCKED);
709 neglist = NCP2NEGLIST(ncp);
711 mtx_lock(&neglist->nl_lock);
713 mtx_assert(&neglist->nl_lock, MA_OWNED);
715 TAILQ_INSERT_TAIL(&neglist->nl_list, ncp, nc_dst);
717 mtx_unlock(&neglist->nl_lock);
718 atomic_add_rel_long(&numneg, 1);
722 cache_negative_remove(struct namecache *ncp, bool neg_locked)
724 struct neglist *neglist;
725 bool hot_locked = false;
726 bool list_locked = false;
728 MPASS(ncp->nc_flag & NCF_NEGATIVE);
729 cache_assert_bucket_locked(ncp, RA_WLOCKED);
730 neglist = NCP2NEGLIST(ncp);
732 if (ncp->nc_flag & NCF_HOTNEGATIVE) {
734 mtx_lock(&ncneg_hot.nl_lock);
735 if (!(ncp->nc_flag & NCF_HOTNEGATIVE)) {
737 mtx_lock(&neglist->nl_lock);
741 mtx_lock(&neglist->nl_lock);
744 if (ncp->nc_flag & NCF_HOTNEGATIVE) {
745 mtx_assert(&ncneg_hot.nl_lock, MA_OWNED);
746 TAILQ_REMOVE(&ncneg_hot.nl_list, ncp, nc_dst);
749 mtx_assert(&neglist->nl_lock, MA_OWNED);
750 TAILQ_REMOVE(&neglist->nl_list, ncp, nc_dst);
753 mtx_unlock(&neglist->nl_lock);
755 mtx_unlock(&ncneg_hot.nl_lock);
756 atomic_subtract_rel_long(&numneg, 1);
760 cache_negative_shrink_select(int start, struct namecache **ncpp,
761 struct neglist **neglistpp)
763 struct neglist *neglist;
764 struct namecache *ncp;
770 for (i = start; i < numneglists; i++) {
771 neglist = &neglists[i];
772 if (TAILQ_FIRST(&neglist->nl_list) == NULL)
774 mtx_lock(&neglist->nl_lock);
775 ncp = TAILQ_FIRST(&neglist->nl_list);
778 mtx_unlock(&neglist->nl_lock);
781 *neglistpp = neglist;
786 cache_negative_zap_one(void)
788 struct namecache *ncp, *ncp2;
789 struct neglist *neglist;
793 if (mtx_owner(&ncneg_shrink_lock) != NULL ||
794 !mtx_trylock(&ncneg_shrink_lock)) {
795 counter_u64_add(shrinking_skipped, 1);
799 mtx_lock(&ncneg_hot.nl_lock);
800 ncp = TAILQ_FIRST(&ncneg_hot.nl_list);
802 neglist = NCP2NEGLIST(ncp);
803 mtx_lock(&neglist->nl_lock);
804 TAILQ_REMOVE(&ncneg_hot.nl_list, ncp, nc_dst);
805 TAILQ_INSERT_TAIL(&neglist->nl_list, ncp, nc_dst);
806 ncp->nc_flag &= ~NCF_HOTNEGATIVE;
808 mtx_unlock(&neglist->nl_lock);
810 mtx_unlock(&ncneg_hot.nl_lock);
812 cache_negative_shrink_select(shrink_list_turn, &ncp, &neglist);
814 if (shrink_list_turn == numneglists)
815 shrink_list_turn = 0;
816 if (ncp == NULL && shrink_list_turn == 0)
817 cache_negative_shrink_select(shrink_list_turn, &ncp, &neglist);
818 mtx_unlock(&ncneg_shrink_lock);
822 MPASS(ncp->nc_flag & NCF_NEGATIVE);
823 dvlp = VP2VNODELOCK(ncp->nc_dvp);
824 blp = NCP2BUCKETLOCK(ncp);
825 mtx_unlock(&neglist->nl_lock);
828 mtx_lock(&neglist->nl_lock);
829 ncp2 = TAILQ_FIRST(&neglist->nl_list);
830 if (ncp != ncp2 || dvlp != VP2VNODELOCK(ncp2->nc_dvp) ||
831 blp != NCP2BUCKETLOCK(ncp2) || !(ncp2->nc_flag & NCF_NEGATIVE)) {
834 SDT_PROBE2(vfs, namecache, shrink_negative, done, ncp->nc_dvp,
837 cache_zap_locked(ncp, true);
838 counter_u64_add(numneg_evicted, 1);
840 mtx_unlock(&neglist->nl_lock);
847 * cache_zap_locked():
849 * Removes a namecache entry from cache, whether it contains an actual
850 * pointer to a vnode or if it is just a negative cache entry.
853 cache_zap_locked(struct namecache *ncp, bool neg_locked)
856 if (!(ncp->nc_flag & NCF_NEGATIVE))
857 cache_assert_vnode_locked(ncp->nc_vp);
858 cache_assert_vnode_locked(ncp->nc_dvp);
859 cache_assert_bucket_locked(ncp, RA_WLOCKED);
861 CTR2(KTR_VFS, "cache_zap(%p) vp %p", ncp,
862 (ncp->nc_flag & NCF_NEGATIVE) ? NULL : ncp->nc_vp);
863 LIST_REMOVE(ncp, nc_hash);
864 if (!(ncp->nc_flag & NCF_NEGATIVE)) {
865 SDT_PROBE3(vfs, namecache, zap, done, ncp->nc_dvp,
866 ncp->nc_name, ncp->nc_vp);
867 TAILQ_REMOVE(&ncp->nc_vp->v_cache_dst, ncp, nc_dst);
868 if (ncp == ncp->nc_vp->v_cache_dd)
869 ncp->nc_vp->v_cache_dd = NULL;
871 SDT_PROBE2(vfs, namecache, zap_negative, done, ncp->nc_dvp,
873 cache_negative_remove(ncp, neg_locked);
875 if (ncp->nc_flag & NCF_ISDOTDOT) {
876 if (ncp == ncp->nc_dvp->v_cache_dd)
877 ncp->nc_dvp->v_cache_dd = NULL;
879 LIST_REMOVE(ncp, nc_src);
880 if (LIST_EMPTY(&ncp->nc_dvp->v_cache_src)) {
881 ncp->nc_flag |= NCF_DVDROP;
882 counter_u64_add(numcachehv, -1);
885 atomic_subtract_rel_long(&numcache, 1);
889 cache_zap_negative_locked_vnode_kl(struct namecache *ncp, struct vnode *vp)
893 MPASS(ncp->nc_dvp == vp);
894 MPASS(ncp->nc_flag & NCF_NEGATIVE);
895 cache_assert_vnode_locked(vp);
897 blp = NCP2BUCKETLOCK(ncp);
899 cache_zap_locked(ncp, false);
904 cache_zap_locked_vnode_kl2(struct namecache *ncp, struct vnode *vp,
907 struct mtx *pvlp, *vlp1, *vlp2, *to_unlock;
910 MPASS(vp == ncp->nc_dvp || vp == ncp->nc_vp);
911 cache_assert_vnode_locked(vp);
913 if (ncp->nc_flag & NCF_NEGATIVE) {
918 cache_zap_negative_locked_vnode_kl(ncp, vp);
922 pvlp = VP2VNODELOCK(vp);
923 blp = NCP2BUCKETLOCK(ncp);
924 vlp1 = VP2VNODELOCK(ncp->nc_dvp);
925 vlp2 = VP2VNODELOCK(ncp->nc_vp);
927 if (*vlpp == vlp1 || *vlpp == vlp2) {
935 cache_sort_vnodes(&vlp1, &vlp2);
940 if (!mtx_trylock(vlp1))
946 cache_zap_locked(ncp, false);
948 if (to_unlock != NULL)
949 mtx_unlock(to_unlock);
956 MPASS(*vlpp == NULL);
961 static int __noinline
962 cache_zap_locked_vnode(struct namecache *ncp, struct vnode *vp)
964 struct mtx *pvlp, *vlp1, *vlp2, *to_unlock;
968 MPASS(vp == ncp->nc_dvp || vp == ncp->nc_vp);
969 cache_assert_vnode_locked(vp);
971 pvlp = VP2VNODELOCK(vp);
972 if (ncp->nc_flag & NCF_NEGATIVE) {
973 cache_zap_negative_locked_vnode_kl(ncp, vp);
977 blp = NCP2BUCKETLOCK(ncp);
978 vlp1 = VP2VNODELOCK(ncp->nc_dvp);
979 vlp2 = VP2VNODELOCK(ncp->nc_vp);
980 cache_sort_vnodes(&vlp1, &vlp2);
985 if (!mtx_trylock(vlp1)) {
992 cache_zap_locked(ncp, false);
994 mtx_unlock(to_unlock);
1001 * If trylocking failed we can get here. We know enough to take all needed locks
1002 * in the right order and re-lookup the entry.
1005 cache_zap_unlocked_bucket(struct namecache *ncp, struct componentname *cnp,
1006 struct vnode *dvp, struct mtx *dvlp, struct mtx *vlp, uint32_t hash,
1009 struct namecache *rncp;
1011 cache_assert_bucket_locked(ncp, RA_UNLOCKED);
1013 cache_sort_vnodes(&dvlp, &vlp);
1014 cache_lock_vnodes(dvlp, vlp);
1016 LIST_FOREACH(rncp, (NCHHASH(hash)), nc_hash) {
1017 if (rncp == ncp && rncp->nc_dvp == dvp &&
1018 rncp->nc_nlen == cnp->cn_namelen &&
1019 !bcmp(rncp->nc_name, cnp->cn_nameptr, rncp->nc_nlen))
1023 cache_zap_locked(rncp, false);
1025 cache_unlock_vnodes(dvlp, vlp);
1026 counter_u64_add(zap_and_exit_bucket_relock_success, 1);
1031 cache_unlock_vnodes(dvlp, vlp);
1035 static int __noinline
1036 cache_zap_wlocked_bucket(struct namecache *ncp, struct componentname *cnp,
1037 uint32_t hash, struct rwlock *blp)
1039 struct mtx *dvlp, *vlp;
1042 cache_assert_bucket_locked(ncp, RA_WLOCKED);
1044 dvlp = VP2VNODELOCK(ncp->nc_dvp);
1046 if (!(ncp->nc_flag & NCF_NEGATIVE))
1047 vlp = VP2VNODELOCK(ncp->nc_vp);
1048 if (cache_trylock_vnodes(dvlp, vlp) == 0) {
1049 cache_zap_locked(ncp, false);
1051 cache_unlock_vnodes(dvlp, vlp);
1057 return (cache_zap_unlocked_bucket(ncp, cnp, dvp, dvlp, vlp, hash, blp));
1060 static int __noinline
1061 cache_zap_rlocked_bucket(struct namecache *ncp, struct componentname *cnp,
1062 uint32_t hash, struct rwlock *blp)
1064 struct mtx *dvlp, *vlp;
1067 cache_assert_bucket_locked(ncp, RA_RLOCKED);
1069 dvlp = VP2VNODELOCK(ncp->nc_dvp);
1071 if (!(ncp->nc_flag & NCF_NEGATIVE))
1072 vlp = VP2VNODELOCK(ncp->nc_vp);
1073 if (cache_trylock_vnodes(dvlp, vlp) == 0) {
1076 cache_zap_locked(ncp, false);
1078 cache_unlock_vnodes(dvlp, vlp);
1084 return (cache_zap_unlocked_bucket(ncp, cnp, dvp, dvlp, vlp, hash, blp));
1088 cache_zap_wlocked_bucket_kl(struct namecache *ncp, struct rwlock *blp,
1089 struct mtx **vlpp1, struct mtx **vlpp2)
1091 struct mtx *dvlp, *vlp;
1093 cache_assert_bucket_locked(ncp, RA_WLOCKED);
1095 dvlp = VP2VNODELOCK(ncp->nc_dvp);
1097 if (!(ncp->nc_flag & NCF_NEGATIVE))
1098 vlp = VP2VNODELOCK(ncp->nc_vp);
1099 cache_sort_vnodes(&dvlp, &vlp);
1101 if (*vlpp1 == dvlp && *vlpp2 == vlp) {
1102 cache_zap_locked(ncp, false);
1103 cache_unlock_vnodes(dvlp, vlp);
1116 if (cache_trylock_vnodes(dvlp, vlp) == 0) {
1117 cache_zap_locked(ncp, false);
1118 cache_unlock_vnodes(dvlp, vlp);
1133 cache_lookup_unlock(struct rwlock *blp, struct mtx *vlp)
1143 static int __noinline
1144 cache_lookup_dot(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp,
1145 struct timespec *tsp, int *ticksp)
1150 CTR2(KTR_VFS, "cache_lookup(%p, %s) found via .",
1151 dvp, cnp->cn_nameptr);
1152 counter_u64_add(dothits, 1);
1153 SDT_PROBE3(vfs, namecache, lookup, hit, dvp, ".", *vpp);
1160 * When we lookup "." we still can be asked to lock it
1163 ltype = cnp->cn_lkflags & LK_TYPE_MASK;
1164 if (ltype != VOP_ISLOCKED(*vpp)) {
1165 if (ltype == LK_EXCLUSIVE) {
1166 vn_lock(*vpp, LK_UPGRADE | LK_RETRY);
1167 if ((*vpp)->v_iflag & VI_DOOMED) {
1168 /* forced unmount */
1174 vn_lock(*vpp, LK_DOWNGRADE | LK_RETRY);
1179 static __noinline int
1180 cache_lookup_nomakeentry(struct vnode *dvp, struct vnode **vpp,
1181 struct componentname *cnp, struct timespec *tsp, int *ticksp)
1183 struct namecache *ncp;
1185 struct mtx *dvlp, *dvlp2;
1189 if (cnp->cn_namelen == 2 &&
1190 cnp->cn_nameptr[0] == '.' && cnp->cn_nameptr[1] == '.') {
1191 counter_u64_add(dotdothits, 1);
1192 dvlp = VP2VNODELOCK(dvp);
1196 ncp = dvp->v_cache_dd;
1198 SDT_PROBE3(vfs, namecache, lookup, miss, dvp,
1205 if ((ncp->nc_flag & NCF_ISDOTDOT) != 0) {
1206 if (ncp->nc_dvp != dvp)
1207 panic("dvp %p v_cache_dd %p\n", dvp, ncp);
1208 if (!cache_zap_locked_vnode_kl2(ncp,
1211 MPASS(dvp->v_cache_dd == NULL);
1217 dvp->v_cache_dd = NULL;
1225 hash = cache_get_hash(cnp->cn_nameptr, cnp->cn_namelen, dvp);
1226 blp = HASH2BUCKETLOCK(hash);
1228 if (LIST_EMPTY(NCHHASH(hash)))
1233 LIST_FOREACH(ncp, (NCHHASH(hash)), nc_hash) {
1234 counter_u64_add(numchecks, 1);
1235 if (ncp->nc_dvp == dvp && ncp->nc_nlen == cnp->cn_namelen &&
1236 !bcmp(ncp->nc_name, cnp->cn_nameptr, ncp->nc_nlen))
1240 /* We failed to find an entry */
1246 error = cache_zap_wlocked_bucket(ncp, cnp, hash, blp);
1247 if (__predict_false(error != 0)) {
1248 zap_and_exit_bucket_fail++;
1249 cache_maybe_yield();
1252 counter_u64_add(numposzaps, 1);
1256 SDT_PROBE3(vfs, namecache, lookup, miss, dvp, cnp->cn_nameptr, NULL);
1257 counter_u64_add(nummisszap, 1);
1262 * Lookup a name in the name cache
1266 * - dvp: Parent directory in which to search.
1267 * - vpp: Return argument. Will contain desired vnode on cache hit.
1268 * - cnp: Parameters of the name search. The most interesting bits of
1269 * the cn_flags field have the following meanings:
1270 * - MAKEENTRY: If clear, free an entry from the cache rather than look
1272 * - ISDOTDOT: Must be set if and only if cn_nameptr == ".."
1273 * - tsp: Return storage for cache timestamp. On a successful (positive
1274 * or negative) lookup, tsp will be filled with any timespec that
1275 * was stored when this cache entry was created. However, it will
1276 * be clear for "." entries.
1277 * - ticks: Return storage for alternate cache timestamp. On a successful
1278 * (positive or negative) lookup, it will contain the ticks value
1279 * that was current when the cache entry was created, unless cnp
1284 * - -1: A positive cache hit. vpp will contain the desired vnode.
1285 * - ENOENT: A negative cache hit, or dvp was recycled out from under us due
1286 * to a forced unmount. vpp will not be modified. If the entry
1287 * is a whiteout, then the ISWHITEOUT flag will be set in
1289 * - 0: A cache miss. vpp will not be modified.
1293 * On a cache hit, vpp will be returned locked and ref'd. If we're looking up
1294 * .., dvp is unlocked. If we're looking up . an extra ref is taken, but the
1295 * lock is not recursively acquired.
1298 cache_lookup(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp,
1299 struct timespec *tsp, int *ticksp)
1301 struct namecache_ts *ncp_ts;
1302 struct namecache *ncp;
1308 if (__predict_false(!doingcache)) {
1309 cnp->cn_flags &= ~MAKEENTRY;
1313 counter_u64_add(numcalls, 1);
1315 if (__predict_false(cnp->cn_namelen == 1 && cnp->cn_nameptr[0] == '.'))
1316 return (cache_lookup_dot(dvp, vpp, cnp, tsp, ticksp));
1318 if ((cnp->cn_flags & MAKEENTRY) == 0)
1319 return (cache_lookup_nomakeentry(dvp, vpp, cnp, tsp, ticksp));
1325 if (cnp->cn_namelen == 2 &&
1326 cnp->cn_nameptr[0] == '.' && cnp->cn_nameptr[1] == '.') {
1327 counter_u64_add(dotdothits, 1);
1328 dvlp = VP2VNODELOCK(dvp);
1330 ncp = dvp->v_cache_dd;
1332 SDT_PROBE3(vfs, namecache, lookup, miss, dvp,
1337 if ((ncp->nc_flag & NCF_ISDOTDOT) != 0) {
1338 if (ncp->nc_flag & NCF_NEGATIVE)
1344 /* Return failure if negative entry was found. */
1346 goto negative_success;
1347 CTR3(KTR_VFS, "cache_lookup(%p, %s) found %p via ..",
1348 dvp, cnp->cn_nameptr, *vpp);
1349 SDT_PROBE3(vfs, namecache, lookup, hit, dvp, "..",
1351 cache_out_ts(ncp, tsp, ticksp);
1352 if ((ncp->nc_flag & (NCF_ISDOTDOT | NCF_DTS)) ==
1353 NCF_DTS && tsp != NULL) {
1354 ncp_ts = __containerof(ncp, struct namecache_ts, nc_nc);
1355 *tsp = ncp_ts->nc_dotdottime;
1360 hash = cache_get_hash(cnp->cn_nameptr, cnp->cn_namelen, dvp);
1361 blp = HASH2BUCKETLOCK(hash);
1364 LIST_FOREACH(ncp, (NCHHASH(hash)), nc_hash) {
1365 counter_u64_add(numchecks, 1);
1366 if (ncp->nc_dvp == dvp && ncp->nc_nlen == cnp->cn_namelen &&
1367 !bcmp(ncp->nc_name, cnp->cn_nameptr, ncp->nc_nlen))
1371 /* We failed to find an entry */
1372 if (__predict_false(ncp == NULL)) {
1374 SDT_PROBE3(vfs, namecache, lookup, miss, dvp, cnp->cn_nameptr,
1376 counter_u64_add(nummiss, 1);
1380 if (ncp->nc_flag & NCF_NEGATIVE)
1381 goto negative_success;
1383 /* We found a "positive" match, return the vnode */
1384 counter_u64_add(numposhits, 1);
1386 CTR4(KTR_VFS, "cache_lookup(%p, %s) found %p via ncp %p",
1387 dvp, cnp->cn_nameptr, *vpp, ncp);
1388 SDT_PROBE3(vfs, namecache, lookup, hit, dvp, ncp->nc_name,
1390 cache_out_ts(ncp, tsp, ticksp);
1393 * On success we return a locked and ref'd vnode as per the lookup
1397 ltype = 0; /* silence gcc warning */
1398 if (cnp->cn_flags & ISDOTDOT) {
1399 ltype = VOP_ISLOCKED(dvp);
1403 cache_lookup_unlock(blp, dvlp);
1404 error = vget(*vpp, cnp->cn_lkflags | LK_VNHELD, cnp->cn_thread);
1405 if (cnp->cn_flags & ISDOTDOT) {
1406 vn_lock(dvp, ltype | LK_RETRY);
1407 if (dvp->v_iflag & VI_DOOMED) {
1418 if ((cnp->cn_flags & ISLASTCN) &&
1419 (cnp->cn_lkflags & LK_TYPE_MASK) == LK_EXCLUSIVE) {
1420 ASSERT_VOP_ELOCKED(*vpp, "cache_lookup");
1425 /* We found a negative match, and want to create it, so purge */
1426 if (cnp->cn_nameiop == CREATE) {
1427 counter_u64_add(numnegzaps, 1);
1431 counter_u64_add(numneghits, 1);
1432 cache_negative_hit(ncp);
1433 if (ncp->nc_flag & NCF_WHITE)
1434 cnp->cn_flags |= ISWHITEOUT;
1435 SDT_PROBE2(vfs, namecache, lookup, hit__negative, dvp,
1437 cache_out_ts(ncp, tsp, ticksp);
1438 cache_lookup_unlock(blp, dvlp);
1443 error = cache_zap_rlocked_bucket(ncp, cnp, hash, blp);
1445 error = cache_zap_locked_vnode(ncp, dvp);
1446 if (__predict_false(error != 0)) {
1447 zap_and_exit_bucket_fail2++;
1448 cache_maybe_yield();
1455 struct celockstate {
1457 struct rwlock *blp[2];
1459 CTASSERT((nitems(((struct celockstate *)0)->vlp) == 3));
1460 CTASSERT((nitems(((struct celockstate *)0)->blp) == 2));
1463 cache_celockstate_init(struct celockstate *cel)
1466 bzero(cel, sizeof(*cel));
1470 cache_lock_vnodes_cel(struct celockstate *cel, struct vnode *vp,
1473 struct mtx *vlp1, *vlp2;
1475 MPASS(cel->vlp[0] == NULL);
1476 MPASS(cel->vlp[1] == NULL);
1477 MPASS(cel->vlp[2] == NULL);
1479 MPASS(vp != NULL || dvp != NULL);
1481 vlp1 = VP2VNODELOCK(vp);
1482 vlp2 = VP2VNODELOCK(dvp);
1483 cache_sort_vnodes(&vlp1, &vlp2);
1494 cache_unlock_vnodes_cel(struct celockstate *cel)
1497 MPASS(cel->vlp[0] != NULL || cel->vlp[1] != NULL);
1499 if (cel->vlp[0] != NULL)
1500 mtx_unlock(cel->vlp[0]);
1501 if (cel->vlp[1] != NULL)
1502 mtx_unlock(cel->vlp[1]);
1503 if (cel->vlp[2] != NULL)
1504 mtx_unlock(cel->vlp[2]);
1508 cache_lock_vnodes_cel_3(struct celockstate *cel, struct vnode *vp)
1513 cache_assert_vlp_locked(cel->vlp[0]);
1514 cache_assert_vlp_locked(cel->vlp[1]);
1515 MPASS(cel->vlp[2] == NULL);
1518 vlp = VP2VNODELOCK(vp);
1521 if (vlp >= cel->vlp[1]) {
1524 if (mtx_trylock(vlp))
1526 cache_lock_vnodes_cel_3_failures++;
1527 cache_unlock_vnodes_cel(cel);
1528 if (vlp < cel->vlp[0]) {
1530 mtx_lock(cel->vlp[0]);
1531 mtx_lock(cel->vlp[1]);
1533 if (cel->vlp[0] != NULL)
1534 mtx_lock(cel->vlp[0]);
1536 mtx_lock(cel->vlp[1]);
1546 cache_lock_buckets_cel(struct celockstate *cel, struct rwlock *blp1,
1547 struct rwlock *blp2)
1550 MPASS(cel->blp[0] == NULL);
1551 MPASS(cel->blp[1] == NULL);
1553 cache_sort_vnodes(&blp1, &blp2);
1564 cache_unlock_buckets_cel(struct celockstate *cel)
1567 if (cel->blp[0] != NULL)
1568 rw_wunlock(cel->blp[0]);
1569 rw_wunlock(cel->blp[1]);
1573 * Lock part of the cache affected by the insertion.
1575 * This means vnodelocks for dvp, vp and the relevant bucketlock.
1576 * However, insertion can result in removal of an old entry. In this
1577 * case we have an additional vnode and bucketlock pair to lock. If the
1578 * entry is negative, ncelock is locked instead of the vnode.
1580 * That is, in the worst case we have to lock 3 vnodes and 2 bucketlocks, while
1581 * preserving the locking order (smaller address first).
1584 cache_enter_lock(struct celockstate *cel, struct vnode *dvp, struct vnode *vp,
1587 struct namecache *ncp;
1588 struct rwlock *blps[2];
1590 blps[0] = HASH2BUCKETLOCK(hash);
1593 cache_lock_vnodes_cel(cel, dvp, vp);
1594 if (vp == NULL || vp->v_type != VDIR)
1596 ncp = vp->v_cache_dd;
1599 if ((ncp->nc_flag & NCF_ISDOTDOT) == 0)
1601 MPASS(ncp->nc_dvp == vp);
1602 blps[1] = NCP2BUCKETLOCK(ncp);
1603 if (ncp->nc_flag & NCF_NEGATIVE)
1605 if (cache_lock_vnodes_cel_3(cel, ncp->nc_vp))
1608 * All vnodes got re-locked. Re-validate the state and if
1609 * nothing changed we are done. Otherwise restart.
1611 if (ncp == vp->v_cache_dd &&
1612 (ncp->nc_flag & NCF_ISDOTDOT) != 0 &&
1613 blps[1] == NCP2BUCKETLOCK(ncp) &&
1614 VP2VNODELOCK(ncp->nc_vp) == cel->vlp[2])
1616 cache_unlock_vnodes_cel(cel);
1621 cache_lock_buckets_cel(cel, blps[0], blps[1]);
1625 cache_enter_lock_dd(struct celockstate *cel, struct vnode *dvp, struct vnode *vp,
1628 struct namecache *ncp;
1629 struct rwlock *blps[2];
1631 blps[0] = HASH2BUCKETLOCK(hash);
1634 cache_lock_vnodes_cel(cel, dvp, vp);
1635 ncp = dvp->v_cache_dd;
1638 if ((ncp->nc_flag & NCF_ISDOTDOT) == 0)
1640 MPASS(ncp->nc_dvp == dvp);
1641 blps[1] = NCP2BUCKETLOCK(ncp);
1642 if (ncp->nc_flag & NCF_NEGATIVE)
1644 if (cache_lock_vnodes_cel_3(cel, ncp->nc_vp))
1646 if (ncp == dvp->v_cache_dd &&
1647 (ncp->nc_flag & NCF_ISDOTDOT) != 0 &&
1648 blps[1] == NCP2BUCKETLOCK(ncp) &&
1649 VP2VNODELOCK(ncp->nc_vp) == cel->vlp[2])
1651 cache_unlock_vnodes_cel(cel);
1656 cache_lock_buckets_cel(cel, blps[0], blps[1]);
1660 cache_enter_unlock(struct celockstate *cel)
1663 cache_unlock_buckets_cel(cel);
1664 cache_unlock_vnodes_cel(cel);
1667 static void __noinline
1668 cache_enter_dotdot_prep(struct vnode *dvp, struct vnode *vp,
1669 struct componentname *cnp)
1671 struct celockstate cel;
1672 struct namecache *ncp;
1676 if (dvp->v_cache_dd == NULL)
1678 len = cnp->cn_namelen;
1679 cache_celockstate_init(&cel);
1680 hash = cache_get_hash(cnp->cn_nameptr, len, dvp);
1681 cache_enter_lock_dd(&cel, dvp, vp, hash);
1682 ncp = dvp->v_cache_dd;
1683 if (ncp != NULL && (ncp->nc_flag & NCF_ISDOTDOT)) {
1684 KASSERT(ncp->nc_dvp == dvp, ("wrong isdotdot parent"));
1685 cache_zap_locked(ncp, false);
1689 dvp->v_cache_dd = NULL;
1690 cache_enter_unlock(&cel);
1695 * Add an entry to the cache.
1698 cache_enter_time(struct vnode *dvp, struct vnode *vp, struct componentname *cnp,
1699 struct timespec *tsp, struct timespec *dtsp)
1701 struct celockstate cel;
1702 struct namecache *ncp, *n2, *ndd;
1703 struct namecache_ts *ncp_ts, *n2_ts;
1704 struct nchashhead *ncpp;
1710 CTR3(KTR_VFS, "cache_enter(%p, %p, %s)", dvp, vp, cnp->cn_nameptr);
1711 VNASSERT(vp == NULL || (vp->v_iflag & VI_DOOMED) == 0, vp,
1712 ("cache_enter: Adding a doomed vnode"));
1713 VNASSERT(dvp == NULL || (dvp->v_iflag & VI_DOOMED) == 0, dvp,
1714 ("cache_enter: Doomed vnode used as src"));
1716 if (__predict_false(!doingcache))
1720 if (__predict_false(cnp->cn_nameptr[0] == '.')) {
1721 if (cnp->cn_namelen == 1)
1723 if (cnp->cn_namelen == 2 && cnp->cn_nameptr[1] == '.') {
1724 cache_enter_dotdot_prep(dvp, vp, cnp);
1725 flag = NCF_ISDOTDOT;
1730 * Avoid blowout in namecache entries.
1732 lnumcache = atomic_fetchadd_long(&numcache, 1) + 1;
1733 if (__predict_false(lnumcache >= ncsize)) {
1734 atomic_add_long(&numcache, -1);
1738 cache_celockstate_init(&cel);
1743 * Calculate the hash key and setup as much of the new
1744 * namecache entry as possible before acquiring the lock.
1746 ncp = cache_alloc(cnp->cn_namelen, tsp != NULL);
1747 ncp->nc_flag = flag;
1750 ncp->nc_flag |= NCF_NEGATIVE;
1753 ncp_ts = __containerof(ncp, struct namecache_ts, nc_nc);
1754 ncp_ts->nc_time = *tsp;
1755 ncp_ts->nc_ticks = ticks;
1756 ncp_ts->nc_nc.nc_flag |= NCF_TS;
1758 ncp_ts->nc_dotdottime = *dtsp;
1759 ncp_ts->nc_nc.nc_flag |= NCF_DTS;
1762 len = ncp->nc_nlen = cnp->cn_namelen;
1763 hash = cache_get_hash(cnp->cn_nameptr, len, dvp);
1764 strlcpy(ncp->nc_name, cnp->cn_nameptr, len + 1);
1765 cache_enter_lock(&cel, dvp, vp, hash);
1768 * See if this vnode or negative entry is already in the cache
1769 * with this name. This can happen with concurrent lookups of
1770 * the same path name.
1772 ncpp = NCHHASH(hash);
1773 LIST_FOREACH(n2, ncpp, nc_hash) {
1774 if (n2->nc_dvp == dvp &&
1775 n2->nc_nlen == cnp->cn_namelen &&
1776 !bcmp(n2->nc_name, cnp->cn_nameptr, n2->nc_nlen)) {
1778 KASSERT((n2->nc_flag & NCF_TS) != 0,
1780 n2_ts = __containerof(n2, struct namecache_ts, nc_nc);
1781 n2_ts->nc_time = ncp_ts->nc_time;
1782 n2_ts->nc_ticks = ncp_ts->nc_ticks;
1784 n2_ts->nc_dotdottime = ncp_ts->nc_dotdottime;
1785 if (ncp->nc_flag & NCF_NEGATIVE)
1786 mtx_lock(&ncneg_hot.nl_lock);
1787 n2_ts->nc_nc.nc_flag |= NCF_DTS;
1788 if (ncp->nc_flag & NCF_NEGATIVE)
1789 mtx_unlock(&ncneg_hot.nl_lock);
1792 goto out_unlock_free;
1796 if (flag == NCF_ISDOTDOT) {
1798 * See if we are trying to add .. entry, but some other lookup
1799 * has populated v_cache_dd pointer already.
1801 if (dvp->v_cache_dd != NULL)
1802 goto out_unlock_free;
1803 KASSERT(vp == NULL || vp->v_type == VDIR,
1804 ("wrong vnode type %p", vp));
1805 dvp->v_cache_dd = ncp;
1809 if (vp->v_type == VDIR) {
1810 if (flag != NCF_ISDOTDOT) {
1812 * For this case, the cache entry maps both the
1813 * directory name in it and the name ".." for the
1814 * directory's parent.
1816 if ((ndd = vp->v_cache_dd) != NULL) {
1817 if ((ndd->nc_flag & NCF_ISDOTDOT) != 0)
1818 cache_zap_locked(ndd, false);
1822 vp->v_cache_dd = ncp;
1825 vp->v_cache_dd = NULL;
1829 if (flag != NCF_ISDOTDOT) {
1830 if (LIST_EMPTY(&dvp->v_cache_src)) {
1832 counter_u64_add(numcachehv, 1);
1834 LIST_INSERT_HEAD(&dvp->v_cache_src, ncp, nc_src);
1838 * Insert the new namecache entry into the appropriate chain
1839 * within the cache entries table.
1841 LIST_INSERT_HEAD(ncpp, ncp, nc_hash);
1844 * If the entry is "negative", we place it into the
1845 * "negative" cache queue, otherwise, we place it into the
1846 * destination vnode's cache entries queue.
1849 TAILQ_INSERT_HEAD(&vp->v_cache_dst, ncp, nc_dst);
1850 SDT_PROBE3(vfs, namecache, enter, done, dvp, ncp->nc_name,
1853 if (cnp->cn_flags & ISWHITEOUT)
1854 ncp->nc_flag |= NCF_WHITE;
1855 cache_negative_insert(ncp, false);
1856 SDT_PROBE2(vfs, namecache, enter_negative, done, dvp,
1859 cache_enter_unlock(&cel);
1860 if (numneg * ncnegfactor > lnumcache)
1861 cache_negative_zap_one();
1865 cache_enter_unlock(&cel);
1866 atomic_add_long(&numcache, -1);
1872 cache_roundup_2(u_int val)
1876 for (res = 1; res <= val; res <<= 1)
1883 * Name cache initialization, from vfs_init() when we are booting
1886 nchinit(void *dummy __unused)
1890 cache_zone_small = uma_zcreate("S VFS Cache",
1891 sizeof(struct namecache) + CACHE_PATH_CUTOFF + 1,
1892 NULL, NULL, NULL, NULL, CACHE_ZONE_ALIGNMENT,
1894 cache_zone_small_ts = uma_zcreate("STS VFS Cache",
1895 sizeof(struct namecache_ts) + CACHE_PATH_CUTOFF + 1,
1896 NULL, NULL, NULL, NULL, CACHE_ZONE_ALIGNMENT,
1898 cache_zone_large = uma_zcreate("L VFS Cache",
1899 sizeof(struct namecache) + NAME_MAX + 1,
1900 NULL, NULL, NULL, NULL, CACHE_ZONE_ALIGNMENT,
1902 cache_zone_large_ts = uma_zcreate("LTS VFS Cache",
1903 sizeof(struct namecache_ts) + NAME_MAX + 1,
1904 NULL, NULL, NULL, NULL, CACHE_ZONE_ALIGNMENT,
1907 ncsize = desiredvnodes * ncsizefactor;
1908 nchashtbl = hashinit(desiredvnodes * 2, M_VFSCACHE, &nchash);
1909 ncbuckethash = cache_roundup_2(mp_ncpus * mp_ncpus) - 1;
1910 if (ncbuckethash < 7) /* arbitrarily chosen to avoid having one lock */
1912 if (ncbuckethash > nchash)
1913 ncbuckethash = nchash;
1914 bucketlocks = malloc(sizeof(*bucketlocks) * numbucketlocks, M_VFSCACHE,
1916 for (i = 0; i < numbucketlocks; i++)
1917 rw_init_flags(&bucketlocks[i], "ncbuc", RW_DUPOK | RW_RECURSE);
1918 ncvnodehash = ncbuckethash;
1919 vnodelocks = malloc(sizeof(*vnodelocks) * numvnodelocks, M_VFSCACHE,
1921 for (i = 0; i < numvnodelocks; i++)
1922 mtx_init(&vnodelocks[i], "ncvn", NULL, MTX_DUPOK | MTX_RECURSE);
1923 ncpurgeminvnodes = numbucketlocks * 2;
1925 neglists = malloc(sizeof(*neglists) * numneglists, M_VFSCACHE,
1927 for (i = 0; i < numneglists; i++) {
1928 mtx_init(&neglists[i].nl_lock, "ncnegl", NULL, MTX_DEF);
1929 TAILQ_INIT(&neglists[i].nl_list);
1931 mtx_init(&ncneg_hot.nl_lock, "ncneglh", NULL, MTX_DEF);
1932 TAILQ_INIT(&ncneg_hot.nl_list);
1934 mtx_init(&ncneg_shrink_lock, "ncnegs", NULL, MTX_DEF);
1936 numcachehv = counter_u64_alloc(M_WAITOK);
1937 numcalls = counter_u64_alloc(M_WAITOK);
1938 dothits = counter_u64_alloc(M_WAITOK);
1939 dotdothits = counter_u64_alloc(M_WAITOK);
1940 numchecks = counter_u64_alloc(M_WAITOK);
1941 nummiss = counter_u64_alloc(M_WAITOK);
1942 nummisszap = counter_u64_alloc(M_WAITOK);
1943 numposzaps = counter_u64_alloc(M_WAITOK);
1944 numposhits = counter_u64_alloc(M_WAITOK);
1945 numnegzaps = counter_u64_alloc(M_WAITOK);
1946 numneghits = counter_u64_alloc(M_WAITOK);
1947 numfullpathcalls = counter_u64_alloc(M_WAITOK);
1948 numfullpathfail1 = counter_u64_alloc(M_WAITOK);
1949 numfullpathfail2 = counter_u64_alloc(M_WAITOK);
1950 numfullpathfail4 = counter_u64_alloc(M_WAITOK);
1951 numfullpathfound = counter_u64_alloc(M_WAITOK);
1952 zap_and_exit_bucket_relock_success = counter_u64_alloc(M_WAITOK);
1953 numneg_evicted = counter_u64_alloc(M_WAITOK);
1954 shrinking_skipped = counter_u64_alloc(M_WAITOK);
1956 SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_SECOND, nchinit, NULL);
1959 cache_changesize(int newmaxvnodes)
1961 struct nchashhead *new_nchashtbl, *old_nchashtbl;
1962 u_long new_nchash, old_nchash;
1963 struct namecache *ncp;
1968 newncsize = newmaxvnodes * ncsizefactor;
1969 newmaxvnodes = cache_roundup_2(newmaxvnodes * 2);
1970 if (newmaxvnodes < numbucketlocks)
1971 newmaxvnodes = numbucketlocks;
1973 new_nchashtbl = hashinit(newmaxvnodes, M_VFSCACHE, &new_nchash);
1974 /* If same hash table size, nothing to do */
1975 if (nchash == new_nchash) {
1976 free(new_nchashtbl, M_VFSCACHE);
1980 * Move everything from the old hash table to the new table.
1981 * None of the namecache entries in the table can be removed
1982 * because to do so, they have to be removed from the hash table.
1984 cache_lock_all_vnodes();
1985 cache_lock_all_buckets();
1986 old_nchashtbl = nchashtbl;
1987 old_nchash = nchash;
1988 nchashtbl = new_nchashtbl;
1989 nchash = new_nchash;
1990 for (i = 0; i <= old_nchash; i++) {
1991 while ((ncp = LIST_FIRST(&old_nchashtbl[i])) != NULL) {
1992 hash = cache_get_hash(ncp->nc_name, ncp->nc_nlen,
1994 LIST_REMOVE(ncp, nc_hash);
1995 LIST_INSERT_HEAD(NCHHASH(hash), ncp, nc_hash);
1999 cache_unlock_all_buckets();
2000 cache_unlock_all_vnodes();
2001 free(old_nchashtbl, M_VFSCACHE);
2005 * Invalidate all entries from and to a particular vnode.
2008 cache_purge(struct vnode *vp)
2010 TAILQ_HEAD(, namecache) ncps;
2011 struct namecache *ncp, *nnp;
2012 struct mtx *vlp, *vlp2;
2014 CTR1(KTR_VFS, "cache_purge(%p)", vp);
2015 SDT_PROBE1(vfs, namecache, purge, done, vp);
2016 if (LIST_EMPTY(&vp->v_cache_src) && TAILQ_EMPTY(&vp->v_cache_dst) &&
2017 vp->v_cache_dd == NULL)
2020 vlp = VP2VNODELOCK(vp);
2024 while (!LIST_EMPTY(&vp->v_cache_src)) {
2025 ncp = LIST_FIRST(&vp->v_cache_src);
2026 if (!cache_zap_locked_vnode_kl2(ncp, vp, &vlp2))
2028 TAILQ_INSERT_TAIL(&ncps, ncp, nc_dst);
2030 while (!TAILQ_EMPTY(&vp->v_cache_dst)) {
2031 ncp = TAILQ_FIRST(&vp->v_cache_dst);
2032 if (!cache_zap_locked_vnode_kl2(ncp, vp, &vlp2))
2034 TAILQ_INSERT_TAIL(&ncps, ncp, nc_dst);
2036 ncp = vp->v_cache_dd;
2038 KASSERT(ncp->nc_flag & NCF_ISDOTDOT,
2039 ("lost dotdot link"));
2040 if (!cache_zap_locked_vnode_kl2(ncp, vp, &vlp2))
2042 TAILQ_INSERT_TAIL(&ncps, ncp, nc_dst);
2044 KASSERT(vp->v_cache_dd == NULL, ("incomplete purge"));
2048 TAILQ_FOREACH_SAFE(ncp, &ncps, nc_dst, nnp) {
2054 * Invalidate all negative entries for a particular directory vnode.
2057 cache_purge_negative(struct vnode *vp)
2059 TAILQ_HEAD(, namecache) ncps;
2060 struct namecache *ncp, *nnp;
2063 CTR1(KTR_VFS, "cache_purge_negative(%p)", vp);
2064 SDT_PROBE1(vfs, namecache, purge_negative, done, vp);
2065 if (LIST_EMPTY(&vp->v_cache_src))
2068 vlp = VP2VNODELOCK(vp);
2070 LIST_FOREACH_SAFE(ncp, &vp->v_cache_src, nc_src, nnp) {
2071 if (!(ncp->nc_flag & NCF_NEGATIVE))
2073 cache_zap_negative_locked_vnode_kl(ncp, vp);
2074 TAILQ_INSERT_TAIL(&ncps, ncp, nc_dst);
2077 TAILQ_FOREACH_SAFE(ncp, &ncps, nc_dst, nnp) {
2083 * Flush all entries referencing a particular filesystem.
2086 cache_purgevfs(struct mount *mp, bool force)
2088 TAILQ_HEAD(, namecache) ncps;
2089 struct mtx *vlp1, *vlp2;
2091 struct nchashhead *bucket;
2092 struct namecache *ncp, *nnp;
2093 u_long i, j, n_nchash;
2096 /* Scan hash tables for applicable entries */
2097 SDT_PROBE1(vfs, namecache, purgevfs, done, mp);
2098 if (!force && mp->mnt_nvnodelistsize <= ncpurgeminvnodes)
2101 n_nchash = nchash + 1;
2103 for (i = 0; i < numbucketlocks; i++) {
2104 blp = (struct rwlock *)&bucketlocks[i];
2106 for (j = i; j < n_nchash; j += numbucketlocks) {
2108 bucket = &nchashtbl[j];
2109 LIST_FOREACH_SAFE(ncp, bucket, nc_hash, nnp) {
2110 cache_assert_bucket_locked(ncp, RA_WLOCKED);
2111 if (ncp->nc_dvp->v_mount != mp)
2113 error = cache_zap_wlocked_bucket_kl(ncp, blp,
2117 TAILQ_INSERT_HEAD(&ncps, ncp, nc_dst);
2121 if (vlp1 == NULL && vlp2 == NULL)
2122 cache_maybe_yield();
2129 TAILQ_FOREACH_SAFE(ncp, &ncps, nc_dst, nnp) {
2135 * Perform canonical checks and cache lookup and pass on to filesystem
2136 * through the vop_cachedlookup only if needed.
2140 vfs_cache_lookup(struct vop_lookup_args *ap)
2144 struct vnode **vpp = ap->a_vpp;
2145 struct componentname *cnp = ap->a_cnp;
2146 int flags = cnp->cn_flags;
2151 if (dvp->v_type != VDIR)
2154 if ((flags & ISLASTCN) && (dvp->v_mount->mnt_flag & MNT_RDONLY) &&
2155 (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME))
2158 error = vn_dir_check_exec(dvp, cnp);
2162 error = cache_lookup(dvp, vpp, cnp, NULL, NULL);
2164 return (VOP_CACHEDLOOKUP(dvp, vpp, cnp));
2171 * XXX All of these sysctls would probably be more productive dead.
2173 static int __read_mostly disablecwd;
2174 SYSCTL_INT(_debug, OID_AUTO, disablecwd, CTLFLAG_RW, &disablecwd, 0,
2175 "Disable the getcwd syscall");
2177 /* Implementation of the getcwd syscall. */
2179 sys___getcwd(struct thread *td, struct __getcwd_args *uap)
2182 return (kern___getcwd(td, uap->buf, UIO_USERSPACE, uap->buflen,
2187 kern___getcwd(struct thread *td, char *buf, enum uio_seg bufseg, size_t buflen,
2191 struct filedesc *fdp;
2192 struct vnode *cdir, *rdir;
2195 if (__predict_false(disablecwd))
2197 if (__predict_false(buflen < 2))
2199 if (buflen > path_max)
2202 tmpbuf = malloc(buflen, M_TEMP, M_WAITOK);
2203 fdp = td->td_proc->p_fd;
2204 FILEDESC_SLOCK(fdp);
2205 cdir = fdp->fd_cdir;
2207 rdir = fdp->fd_rdir;
2209 FILEDESC_SUNLOCK(fdp);
2210 error = vn_fullpath1(td, cdir, rdir, tmpbuf, &bp, buflen);
2215 if (bufseg == UIO_SYSSPACE)
2216 bcopy(bp, buf, strlen(bp) + 1);
2218 error = copyout(bp, buf, strlen(bp) + 1);
2220 if (KTRPOINT(curthread, KTR_NAMEI))
2224 free(tmpbuf, M_TEMP);
2229 * Thus begins the fullpath magic.
2232 static int __read_mostly disablefullpath;
2233 SYSCTL_INT(_debug, OID_AUTO, disablefullpath, CTLFLAG_RW, &disablefullpath, 0,
2234 "Disable the vn_fullpath function");
2237 * Retrieve the full filesystem path that correspond to a vnode from the name
2238 * cache (if available)
2241 vn_fullpath(struct thread *td, struct vnode *vn, char **retbuf, char **freebuf)
2244 struct filedesc *fdp;
2248 if (__predict_false(disablefullpath))
2250 if (__predict_false(vn == NULL))
2253 buf = malloc(MAXPATHLEN, M_TEMP, M_WAITOK);
2254 fdp = td->td_proc->p_fd;
2255 FILEDESC_SLOCK(fdp);
2256 rdir = fdp->fd_rdir;
2258 FILEDESC_SUNLOCK(fdp);
2259 error = vn_fullpath1(td, vn, rdir, buf, retbuf, MAXPATHLEN);
2270 * This function is similar to vn_fullpath, but it attempts to lookup the
2271 * pathname relative to the global root mount point. This is required for the
2272 * auditing sub-system, as audited pathnames must be absolute, relative to the
2273 * global root mount point.
2276 vn_fullpath_global(struct thread *td, struct vnode *vn,
2277 char **retbuf, char **freebuf)
2282 if (__predict_false(disablefullpath))
2284 if (__predict_false(vn == NULL))
2286 buf = malloc(MAXPATHLEN, M_TEMP, M_WAITOK);
2287 error = vn_fullpath1(td, vn, rootvnode, buf, retbuf, MAXPATHLEN);
2296 vn_vptocnp(struct vnode **vp, struct ucred *cred, char *buf, u_int *buflen)
2299 struct namecache *ncp;
2303 vlp = VP2VNODELOCK(*vp);
2305 TAILQ_FOREACH(ncp, &((*vp)->v_cache_dst), nc_dst) {
2306 if ((ncp->nc_flag & NCF_ISDOTDOT) == 0)
2310 if (*buflen < ncp->nc_nlen) {
2313 counter_u64_add(numfullpathfail4, 1);
2315 SDT_PROBE3(vfs, namecache, fullpath, return, error,
2319 *buflen -= ncp->nc_nlen;
2320 memcpy(buf + *buflen, ncp->nc_name, ncp->nc_nlen);
2321 SDT_PROBE3(vfs, namecache, fullpath, hit, ncp->nc_dvp,
2330 SDT_PROBE1(vfs, namecache, fullpath, miss, vp);
2333 vn_lock(*vp, LK_SHARED | LK_RETRY);
2334 error = VOP_VPTOCNP(*vp, &dvp, cred, buf, buflen);
2337 counter_u64_add(numfullpathfail2, 1);
2338 SDT_PROBE3(vfs, namecache, fullpath, return, error, vp, NULL);
2343 if (dvp->v_iflag & VI_DOOMED) {
2344 /* forced unmount */
2347 SDT_PROBE3(vfs, namecache, fullpath, return, error, vp, NULL);
2351 * *vp has its use count incremented still.
2358 * The magic behind kern___getcwd() and vn_fullpath().
2361 vn_fullpath1(struct thread *td, struct vnode *vp, struct vnode *rdir,
2362 char *buf, char **retbuf, u_int buflen)
2364 int error, slash_prefixed;
2365 #ifdef KDTRACE_HOOKS
2366 struct vnode *startvp = vp;
2375 SDT_PROBE1(vfs, namecache, fullpath, entry, vp);
2376 counter_u64_add(numfullpathcalls, 1);
2378 if (vp->v_type != VDIR) {
2379 error = vn_vptocnp(&vp, td->td_ucred, buf, &buflen);
2386 buf[--buflen] = '/';
2389 while (vp != rdir && vp != rootvnode) {
2391 * The vp vnode must be already fully constructed,
2392 * since it is either found in namecache or obtained
2393 * from VOP_VPTOCNP(). We may test for VV_ROOT safely
2394 * without obtaining the vnode lock.
2396 if ((vp->v_vflag & VV_ROOT) != 0) {
2397 vn_lock(vp, LK_RETRY | LK_SHARED);
2400 * With the vnode locked, check for races with
2401 * unmount, forced or not. Note that we
2402 * already verified that vp is not equal to
2403 * the root vnode, which means that
2404 * mnt_vnodecovered can be NULL only for the
2407 if ((vp->v_iflag & VI_DOOMED) != 0 ||
2408 (vp1 = vp->v_mount->mnt_vnodecovered) == NULL ||
2409 vp1->v_mountedhere != vp->v_mount) {
2412 SDT_PROBE3(vfs, namecache, fullpath, return,
2422 if (vp->v_type != VDIR) {
2424 counter_u64_add(numfullpathfail1, 1);
2426 SDT_PROBE3(vfs, namecache, fullpath, return,
2430 error = vn_vptocnp(&vp, td->td_ucred, buf, &buflen);
2436 SDT_PROBE3(vfs, namecache, fullpath, return, error,
2440 buf[--buflen] = '/';
2445 if (!slash_prefixed) {
2448 counter_u64_add(numfullpathfail4, 1);
2449 SDT_PROBE3(vfs, namecache, fullpath, return, ENOMEM,
2453 buf[--buflen] = '/';
2455 counter_u64_add(numfullpathfound, 1);
2458 SDT_PROBE3(vfs, namecache, fullpath, return, 0, startvp, buf + buflen);
2459 *retbuf = buf + buflen;
2464 vn_dir_dd_ino(struct vnode *vp)
2466 struct namecache *ncp;
2470 ASSERT_VOP_LOCKED(vp, "vn_dir_dd_ino");
2471 vlp = VP2VNODELOCK(vp);
2473 TAILQ_FOREACH(ncp, &(vp->v_cache_dst), nc_dst) {
2474 if ((ncp->nc_flag & NCF_ISDOTDOT) != 0)
2479 if (vget(ddvp, LK_SHARED | LK_NOWAIT | LK_VNHELD, curthread))
2488 vn_commname(struct vnode *vp, char *buf, u_int buflen)
2490 struct namecache *ncp;
2494 vlp = VP2VNODELOCK(vp);
2496 TAILQ_FOREACH(ncp, &vp->v_cache_dst, nc_dst)
2497 if ((ncp->nc_flag & NCF_ISDOTDOT) == 0)
2503 l = min(ncp->nc_nlen, buflen - 1);
2504 memcpy(buf, ncp->nc_name, l);
2510 /* ABI compat shims for old kernel modules. */
2513 void cache_enter(struct vnode *dvp, struct vnode *vp,
2514 struct componentname *cnp);
2517 cache_enter(struct vnode *dvp, struct vnode *vp, struct componentname *cnp)
2520 cache_enter_time(dvp, vp, cnp, NULL, NULL);
2524 * This function updates path string to vnode's full global path
2525 * and checks the size of the new path string against the pathlen argument.
2527 * Requires a locked, referenced vnode.
2528 * Vnode is re-locked on success or ENODEV, otherwise unlocked.
2530 * If sysctl debug.disablefullpath is set, ENODEV is returned,
2531 * vnode is left locked and path remain untouched.
2533 * If vp is a directory, the call to vn_fullpath_global() always succeeds
2534 * because it falls back to the ".." lookup if the namecache lookup fails.
2537 vn_path_to_global_path(struct thread *td, struct vnode *vp, char *path,
2540 struct nameidata nd;
2545 ASSERT_VOP_ELOCKED(vp, __func__);
2547 /* Return ENODEV if sysctl debug.disablefullpath==1 */
2548 if (__predict_false(disablefullpath))
2551 /* Construct global filesystem path from vp. */
2553 error = vn_fullpath_global(td, vp, &rpath, &fbuf);
2560 if (strlen(rpath) >= pathlen) {
2562 error = ENAMETOOLONG;
2567 * Re-lookup the vnode by path to detect a possible rename.
2568 * As a side effect, the vnode is relocked.
2569 * If vnode was renamed, return ENOENT.
2571 NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF | AUDITVNODE1,
2572 UIO_SYSSPACE, path, td);
2578 NDFREE(&nd, NDF_ONLY_PNBUF);
2582 strcpy(path, rpath);
2595 db_print_vpath(struct vnode *vp)
2598 while (vp != NULL) {
2599 db_printf("%p: ", vp);
2600 if (vp == rootvnode) {
2604 if (vp->v_vflag & VV_ROOT) {
2605 db_printf("<mount point>");
2606 vp = vp->v_mount->mnt_vnodecovered;
2608 struct namecache *ncp;
2612 ncp = TAILQ_FIRST(&vp->v_cache_dst);
2615 for (i = 0; i < ncp->nc_nlen; i++)
2616 db_printf("%c", *ncn++);
2629 DB_SHOW_COMMAND(vpath, db_show_vpath)
2634 db_printf("usage: show vpath <struct vnode *>\n");
2638 vp = (struct vnode *)addr;