2 * SPDX-License-Identifier: BSD-3-Clause
4 * Copyright (c) 1989, 1993, 1995
5 * The Regents of the University of California. All rights reserved.
7 * This code is derived from software contributed to Berkeley by
8 * Poul-Henning Kamp of the FreeBSD Project.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * @(#)vfs_cache.c 8.5 (Berkeley) 3/22/95
37 #include <sys/cdefs.h>
38 __FBSDID("$FreeBSD$");
41 #include "opt_ktrace.h"
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/capsicum.h>
46 #include <sys/counter.h>
47 #include <sys/filedesc.h>
48 #include <sys/fnv_hash.h>
49 #include <sys/kernel.h>
52 #include <sys/malloc.h>
53 #include <sys/fcntl.h>
55 #include <sys/mount.h>
56 #include <sys/namei.h>
58 #include <sys/rwlock.h>
63 #include <sys/syscallsubr.h>
64 #include <sys/sysctl.h>
65 #include <sys/sysproto.h>
66 #include <sys/vnode.h>
69 #include <sys/ktrace.h>
72 #include <sys/capsicum.h>
74 #include <security/audit/audit.h>
75 #include <security/mac/mac_framework.h>
83 SDT_PROVIDER_DECLARE(vfs);
84 SDT_PROBE_DEFINE3(vfs, namecache, enter, done, "struct vnode *", "char *",
86 SDT_PROBE_DEFINE2(vfs, namecache, enter_negative, done, "struct vnode *",
88 SDT_PROBE_DEFINE1(vfs, namecache, fullpath, entry, "struct vnode *");
89 SDT_PROBE_DEFINE3(vfs, namecache, fullpath, hit, "struct vnode *",
90 "char *", "struct vnode *");
91 SDT_PROBE_DEFINE1(vfs, namecache, fullpath, miss, "struct vnode *");
92 SDT_PROBE_DEFINE3(vfs, namecache, fullpath, return, "int",
93 "struct vnode *", "char *");
94 SDT_PROBE_DEFINE3(vfs, namecache, lookup, hit, "struct vnode *", "char *",
96 SDT_PROBE_DEFINE2(vfs, namecache, lookup, hit__negative,
97 "struct vnode *", "char *");
98 SDT_PROBE_DEFINE2(vfs, namecache, lookup, miss, "struct vnode *",
100 SDT_PROBE_DEFINE2(vfs, namecache, removecnp, hit, "struct vnode *",
101 "struct componentname *");
102 SDT_PROBE_DEFINE2(vfs, namecache, removecnp, miss, "struct vnode *",
103 "struct componentname *");
104 SDT_PROBE_DEFINE1(vfs, namecache, purge, done, "struct vnode *");
105 SDT_PROBE_DEFINE1(vfs, namecache, purge_negative, done, "struct vnode *");
106 SDT_PROBE_DEFINE1(vfs, namecache, purgevfs, done, "struct mount *");
107 SDT_PROBE_DEFINE3(vfs, namecache, zap, done, "struct vnode *", "char *",
109 SDT_PROBE_DEFINE2(vfs, namecache, zap_negative, done, "struct vnode *",
111 SDT_PROBE_DEFINE2(vfs, namecache, shrink_negative, done, "struct vnode *",
114 SDT_PROBE_DEFINE3(vfs, fplookup, lookup, done, "struct nameidata", "int", "bool");
115 SDT_PROBE_DECLARE(vfs, namei, lookup, entry);
116 SDT_PROBE_DECLARE(vfs, namei, lookup, return);
119 * This structure describes the elements in the cache of recent
120 * names looked up by namei.
125 _Static_assert(sizeof(struct negstate) <= sizeof(struct vnode *),
126 "the state must fit in a union with a pointer without growing it");
129 LIST_ENTRY(namecache) nc_src; /* source vnode list */
130 TAILQ_ENTRY(namecache) nc_dst; /* destination vnode list */
131 CK_SLIST_ENTRY(namecache) nc_hash;/* hash chain */
132 struct vnode *nc_dvp; /* vnode of parent of name */
134 struct vnode *nu_vp; /* vnode the name refers to */
135 struct negstate nu_neg;/* negative entry state */
137 u_char nc_flag; /* flag bits */
138 u_char nc_nlen; /* length of name */
139 char nc_name[0]; /* segment name + nul */
143 * struct namecache_ts repeats struct namecache layout up to the
145 * struct namecache_ts is used in place of struct namecache when time(s) need
146 * to be stored. The nc_dotdottime field is used when a cache entry is mapping
147 * both a non-dotdot directory name plus dotdot for the directory's
150 * See below for alignment requirement.
152 struct namecache_ts {
153 struct timespec nc_time; /* timespec provided by fs */
154 struct timespec nc_dotdottime; /* dotdot timespec provided by fs */
155 int nc_ticks; /* ticks value when entry was added */
156 struct namecache nc_nc;
160 * At least mips n32 performs 64-bit accesses to timespec as found
161 * in namecache_ts and requires them to be aligned. Since others
162 * may be in the same spot suffer a little bit and enforce the
163 * alignment for everyone. Note this is a nop for 64-bit platforms.
165 #define CACHE_ZONE_ALIGNMENT UMA_ALIGNOF(time_t)
166 #define CACHE_PATH_CUTOFF 39
168 #define CACHE_ZONE_SMALL_SIZE (sizeof(struct namecache) + CACHE_PATH_CUTOFF + 1)
169 #define CACHE_ZONE_SMALL_TS_SIZE (sizeof(struct namecache_ts) + CACHE_PATH_CUTOFF + 1)
170 #define CACHE_ZONE_LARGE_SIZE (sizeof(struct namecache) + NAME_MAX + 1)
171 #define CACHE_ZONE_LARGE_TS_SIZE (sizeof(struct namecache_ts) + NAME_MAX + 1)
173 _Static_assert((CACHE_ZONE_SMALL_SIZE % (CACHE_ZONE_ALIGNMENT + 1)) == 0, "bad zone size");
174 _Static_assert((CACHE_ZONE_SMALL_TS_SIZE % (CACHE_ZONE_ALIGNMENT + 1)) == 0, "bad zone size");
175 _Static_assert((CACHE_ZONE_LARGE_SIZE % (CACHE_ZONE_ALIGNMENT + 1)) == 0, "bad zone size");
176 _Static_assert((CACHE_ZONE_LARGE_TS_SIZE % (CACHE_ZONE_ALIGNMENT + 1)) == 0, "bad zone size");
178 #define nc_vp n_un.nu_vp
179 #define nc_neg n_un.nu_neg
182 * Flags in namecache.nc_flag
184 #define NCF_WHITE 0x01
185 #define NCF_ISDOTDOT 0x02
188 #define NCF_DVDROP 0x10
189 #define NCF_NEGATIVE 0x20
190 #define NCF_INVALID 0x40
194 * Flags in negstate.neg_flag
199 * Mark an entry as invalid.
201 * This is called before it starts getting deconstructed.
204 cache_ncp_invalidate(struct namecache *ncp)
207 KASSERT((ncp->nc_flag & NCF_INVALID) == 0,
208 ("%s: entry %p already invalid", __func__, ncp));
209 atomic_store_char(&ncp->nc_flag, ncp->nc_flag | NCF_INVALID);
210 atomic_thread_fence_rel();
214 * Check whether the entry can be safely used.
216 * All places which elide locks are supposed to call this after they are
217 * done with reading from an entry.
220 cache_ncp_canuse(struct namecache *ncp)
223 atomic_thread_fence_acq();
224 return ((atomic_load_char(&ncp->nc_flag) & (NCF_INVALID | NCF_WIP)) == 0);
228 * Name caching works as follows:
230 * Names found by directory scans are retained in a cache
231 * for future reference. It is managed LRU, so frequently
232 * used names will hang around. Cache is indexed by hash value
233 * obtained from (dvp, name) where dvp refers to the directory
236 * If it is a "negative" entry, (i.e. for a name that is known NOT to
237 * exist) the vnode pointer will be NULL.
239 * Upon reaching the last segment of a path, if the reference
240 * is for DELETE, or NOCACHE is set (rewrite), and the
241 * name is located in the cache, it will be dropped.
243 * These locks are used (in the order in which they can be taken):
245 * vnodelock mtx vnode lists and v_cache_dd field protection
246 * bucketlock rwlock for access to given set of hash buckets
247 * neglist mtx negative entry LRU management
249 * Additionally, ncneg_shrink_lock mtx is used to have at most one thread
250 * shrinking the LRU list.
252 * It is legal to take multiple vnodelock and bucketlock locks. The locking
253 * order is lower address first. Both are recursive.
255 * "." lookups are lockless.
257 * ".." and vnode -> name lookups require vnodelock.
259 * name -> vnode lookup requires the relevant bucketlock to be held for reading.
261 * Insertions and removals of entries require involved vnodes and bucketlocks
262 * to be write-locked to prevent other threads from seeing the entry.
264 * Some lookups result in removal of the found entry (e.g. getting rid of a
265 * negative entry with the intent to create a positive one), which poses a
266 * problem when multiple threads reach the state. Similarly, two different
267 * threads can purge two different vnodes and try to remove the same name.
269 * If the already held vnode lock is lower than the second required lock, we
270 * can just take the other lock. However, in the opposite case, this could
271 * deadlock. As such, this is resolved by trylocking and if that fails unlocking
272 * the first node, locking everything in order and revalidating the state.
278 * Structures associated with name caching.
280 #define NCHHASH(hash) \
281 (&nchashtbl[(hash) & nchash])
282 static __read_mostly CK_SLIST_HEAD(nchashhead, namecache) *nchashtbl;/* Hash Table */
283 static u_long __read_mostly nchash; /* size of hash table */
284 SYSCTL_ULONG(_debug, OID_AUTO, nchash, CTLFLAG_RD, &nchash, 0,
285 "Size of namecache hash table");
286 static u_long __read_mostly ncnegfactor = 5; /* ratio of negative entries */
287 SYSCTL_ULONG(_vfs, OID_AUTO, ncnegfactor, CTLFLAG_RW, &ncnegfactor, 0,
288 "Ratio of negative namecache entries");
289 static u_long __exclusive_cache_line numneg; /* number of negative entries allocated */
290 static u_long __exclusive_cache_line numcache;/* number of cache entries allocated */
291 u_int ncsizefactor = 2;
292 SYSCTL_UINT(_vfs, OID_AUTO, ncsizefactor, CTLFLAG_RW, &ncsizefactor, 0,
293 "Size factor for namecache");
294 static u_int __read_mostly ncpurgeminvnodes;
295 SYSCTL_UINT(_vfs, OID_AUTO, ncpurgeminvnodes, CTLFLAG_RW, &ncpurgeminvnodes, 0,
296 "Number of vnodes below which purgevfs ignores the request");
297 static u_int __read_mostly ncsize; /* the size as computed on creation or resizing */
299 struct nchstats nchstats; /* cache effectiveness statistics */
301 static struct mtx __exclusive_cache_line ncneg_shrink_lock;
305 TAILQ_HEAD(, namecache) nl_list;
306 } __aligned(CACHE_LINE_SIZE);
308 static struct neglist __read_mostly *neglists;
309 static struct neglist ncneg_hot;
310 static u_long numhotneg;
313 #define numneglists (ncneghash + 1)
314 static inline struct neglist *
315 NCP2NEGLIST(struct namecache *ncp)
318 return (&neglists[(((uintptr_t)(ncp) >> 8) & ncneghash)]);
321 static inline struct negstate *
322 NCP2NEGSTATE(struct namecache *ncp)
325 MPASS(ncp->nc_flag & NCF_NEGATIVE);
326 return (&ncp->nc_neg);
329 #define numbucketlocks (ncbuckethash + 1)
330 static u_int __read_mostly ncbuckethash;
331 static struct rwlock_padalign __read_mostly *bucketlocks;
332 #define HASH2BUCKETLOCK(hash) \
333 ((struct rwlock *)(&bucketlocks[((hash) & ncbuckethash)]))
335 #define numvnodelocks (ncvnodehash + 1)
336 static u_int __read_mostly ncvnodehash;
337 static struct mtx __read_mostly *vnodelocks;
338 static inline struct mtx *
339 VP2VNODELOCK(struct vnode *vp)
342 return (&vnodelocks[(((uintptr_t)(vp) >> 8) & ncvnodehash)]);
346 * UMA zones for the VFS cache.
348 * The small cache is used for entries with short names, which are the
349 * most common. The large cache is used for entries which are too big to
350 * fit in the small cache.
352 static uma_zone_t __read_mostly cache_zone_small;
353 static uma_zone_t __read_mostly cache_zone_small_ts;
354 static uma_zone_t __read_mostly cache_zone_large;
355 static uma_zone_t __read_mostly cache_zone_large_ts;
357 static struct namecache *
358 cache_alloc(int len, int ts)
360 struct namecache_ts *ncp_ts;
361 struct namecache *ncp;
363 if (__predict_false(ts)) {
364 if (len <= CACHE_PATH_CUTOFF)
365 ncp_ts = uma_zalloc_smr(cache_zone_small_ts, M_WAITOK);
367 ncp_ts = uma_zalloc_smr(cache_zone_large_ts, M_WAITOK);
368 ncp = &ncp_ts->nc_nc;
370 if (len <= CACHE_PATH_CUTOFF)
371 ncp = uma_zalloc_smr(cache_zone_small, M_WAITOK);
373 ncp = uma_zalloc_smr(cache_zone_large, M_WAITOK);
379 cache_free(struct namecache *ncp)
381 struct namecache_ts *ncp_ts;
385 if ((ncp->nc_flag & NCF_DVDROP) != 0)
387 if (__predict_false(ncp->nc_flag & NCF_TS)) {
388 ncp_ts = __containerof(ncp, struct namecache_ts, nc_nc);
389 if (ncp->nc_nlen <= CACHE_PATH_CUTOFF)
390 uma_zfree_smr(cache_zone_small_ts, ncp_ts);
392 uma_zfree_smr(cache_zone_large_ts, ncp_ts);
394 if (ncp->nc_nlen <= CACHE_PATH_CUTOFF)
395 uma_zfree_smr(cache_zone_small, ncp);
397 uma_zfree_smr(cache_zone_large, ncp);
402 cache_out_ts(struct namecache *ncp, struct timespec *tsp, int *ticksp)
404 struct namecache_ts *ncp_ts;
406 KASSERT((ncp->nc_flag & NCF_TS) != 0 ||
407 (tsp == NULL && ticksp == NULL),
410 if (tsp == NULL && ticksp == NULL)
413 ncp_ts = __containerof(ncp, struct namecache_ts, nc_nc);
415 *tsp = ncp_ts->nc_time;
417 *ticksp = ncp_ts->nc_ticks;
421 static int __read_mostly doingcache = 1; /* 1 => enable the cache */
422 SYSCTL_INT(_debug, OID_AUTO, vfscache, CTLFLAG_RW, &doingcache, 0,
423 "VFS namecache enabled");
426 /* Export size information to userland */
427 SYSCTL_INT(_debug_sizeof, OID_AUTO, namecache, CTLFLAG_RD, SYSCTL_NULL_INT_PTR,
428 sizeof(struct namecache), "sizeof(struct namecache)");
431 * The new name cache statistics
433 static SYSCTL_NODE(_vfs, OID_AUTO, cache, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
434 "Name cache statistics");
435 #define STATNODE_ULONG(name, descr) \
436 SYSCTL_ULONG(_vfs_cache, OID_AUTO, name, CTLFLAG_RD, &name, 0, descr);
437 #define STATNODE_COUNTER(name, descr) \
438 static COUNTER_U64_DEFINE_EARLY(name); \
439 SYSCTL_COUNTER_U64(_vfs_cache, OID_AUTO, name, CTLFLAG_RD, &name, \
441 STATNODE_ULONG(numneg, "Number of negative cache entries");
442 STATNODE_ULONG(numcache, "Number of cache entries");
443 STATNODE_COUNTER(numcachehv, "Number of namecache entries with vnodes held");
444 STATNODE_COUNTER(numdrops, "Number of dropped entries due to reaching the limit");
445 STATNODE_COUNTER(dothits, "Number of '.' hits");
446 STATNODE_COUNTER(dotdothits, "Number of '..' hits");
447 STATNODE_COUNTER(nummiss, "Number of cache misses");
448 STATNODE_COUNTER(nummisszap, "Number of cache misses we do not want to cache");
449 STATNODE_COUNTER(numposzaps,
450 "Number of cache hits (positive) we do not want to cache");
451 STATNODE_COUNTER(numposhits, "Number of cache hits (positive)");
452 STATNODE_COUNTER(numnegzaps,
453 "Number of cache hits (negative) we do not want to cache");
454 STATNODE_COUNTER(numneghits, "Number of cache hits (negative)");
455 /* These count for vn_getcwd(), too. */
456 STATNODE_COUNTER(numfullpathcalls, "Number of fullpath search calls");
457 STATNODE_COUNTER(numfullpathfail1, "Number of fullpath search errors (ENOTDIR)");
458 STATNODE_COUNTER(numfullpathfail2,
459 "Number of fullpath search errors (VOP_VPTOCNP failures)");
460 STATNODE_COUNTER(numfullpathfail4, "Number of fullpath search errors (ENOMEM)");
461 STATNODE_COUNTER(numfullpathfound, "Number of successful fullpath calls");
462 STATNODE_COUNTER(zap_and_exit_bucket_relock_success,
463 "Number of successful removals after relocking");
464 static long zap_and_exit_bucket_fail; STATNODE_ULONG(zap_and_exit_bucket_fail,
465 "Number of times zap_and_exit failed to lock");
466 static long zap_and_exit_bucket_fail2; STATNODE_ULONG(zap_and_exit_bucket_fail2,
467 "Number of times zap_and_exit failed to lock");
468 static long cache_lock_vnodes_cel_3_failures;
469 STATNODE_ULONG(cache_lock_vnodes_cel_3_failures,
470 "Number of times 3-way vnode locking failed");
471 STATNODE_ULONG(numhotneg, "Number of hot negative entries");
472 STATNODE_COUNTER(numneg_evicted,
473 "Number of negative entries evicted when adding a new entry");
474 STATNODE_COUNTER(shrinking_skipped,
475 "Number of times shrinking was already in progress");
477 static void cache_zap_locked(struct namecache *ncp);
478 static int vn_fullpath_hardlink(struct thread *td, struct nameidata *ndp, char **retbuf,
479 char **freebuf, size_t *buflen);
480 static int vn_fullpath_any(struct thread *td, struct vnode *vp, struct vnode *rdir,
481 char *buf, char **retbuf, size_t *buflen);
482 static int vn_fullpath_dir(struct thread *td, struct vnode *vp, struct vnode *rdir,
483 char *buf, char **retbuf, size_t *len, bool slash_prefixed, size_t addend);
485 static MALLOC_DEFINE(M_VFSCACHE, "vfscache", "VFS name cache entries");
487 static int cache_yield;
488 SYSCTL_INT(_vfs_cache, OID_AUTO, yield, CTLFLAG_RD, &cache_yield, 0,
489 "Number of times cache called yield");
491 static void __noinline
492 cache_maybe_yield(void)
495 if (should_yield()) {
497 kern_yield(PRI_USER);
502 cache_assert_vlp_locked(struct mtx *vlp)
506 mtx_assert(vlp, MA_OWNED);
510 cache_assert_vnode_locked(struct vnode *vp)
514 vlp = VP2VNODELOCK(vp);
515 cache_assert_vlp_locked(vlp);
519 * TODO: With the value stored we can do better than computing the hash based
520 * on the address. The choice of FNV should also be revisited.
523 cache_prehash(struct vnode *vp)
526 vp->v_nchash = fnv_32_buf(&vp, sizeof(vp), FNV1_32_INIT);
530 cache_get_hash(char *name, u_char len, struct vnode *dvp)
533 return (fnv_32_buf(name, len, dvp->v_nchash));
536 static inline struct nchashhead *
537 NCP2BUCKET(struct namecache *ncp)
541 hash = cache_get_hash(ncp->nc_name, ncp->nc_nlen, ncp->nc_dvp);
542 return (NCHHASH(hash));
545 static inline struct rwlock *
546 NCP2BUCKETLOCK(struct namecache *ncp)
550 hash = cache_get_hash(ncp->nc_name, ncp->nc_nlen, ncp->nc_dvp);
551 return (HASH2BUCKETLOCK(hash));
556 cache_assert_bucket_locked(struct namecache *ncp, int mode)
560 blp = NCP2BUCKETLOCK(ncp);
561 rw_assert(blp, mode);
564 #define cache_assert_bucket_locked(x, y) do { } while (0)
567 #define cache_sort_vnodes(x, y) _cache_sort_vnodes((void **)(x), (void **)(y))
569 _cache_sort_vnodes(void **p1, void **p2)
573 MPASS(*p1 != NULL || *p2 != NULL);
583 cache_lock_all_buckets(void)
587 for (i = 0; i < numbucketlocks; i++)
588 rw_wlock(&bucketlocks[i]);
592 cache_unlock_all_buckets(void)
596 for (i = 0; i < numbucketlocks; i++)
597 rw_wunlock(&bucketlocks[i]);
601 cache_lock_all_vnodes(void)
605 for (i = 0; i < numvnodelocks; i++)
606 mtx_lock(&vnodelocks[i]);
610 cache_unlock_all_vnodes(void)
614 for (i = 0; i < numvnodelocks; i++)
615 mtx_unlock(&vnodelocks[i]);
619 cache_trylock_vnodes(struct mtx *vlp1, struct mtx *vlp2)
622 cache_sort_vnodes(&vlp1, &vlp2);
625 if (!mtx_trylock(vlp1))
628 if (!mtx_trylock(vlp2)) {
638 cache_lock_vnodes(struct mtx *vlp1, struct mtx *vlp2)
641 MPASS(vlp1 != NULL || vlp2 != NULL);
651 cache_unlock_vnodes(struct mtx *vlp1, struct mtx *vlp2)
654 MPASS(vlp1 != NULL || vlp2 != NULL);
663 sysctl_nchstats(SYSCTL_HANDLER_ARGS)
665 struct nchstats snap;
667 if (req->oldptr == NULL)
668 return (SYSCTL_OUT(req, 0, sizeof(snap)));
671 snap.ncs_goodhits = counter_u64_fetch(numposhits);
672 snap.ncs_neghits = counter_u64_fetch(numneghits);
673 snap.ncs_badhits = counter_u64_fetch(numposzaps) +
674 counter_u64_fetch(numnegzaps);
675 snap.ncs_miss = counter_u64_fetch(nummisszap) +
676 counter_u64_fetch(nummiss);
678 return (SYSCTL_OUT(req, &snap, sizeof(snap)));
680 SYSCTL_PROC(_vfs_cache, OID_AUTO, nchstats, CTLTYPE_OPAQUE | CTLFLAG_RD |
681 CTLFLAG_MPSAFE, 0, 0, sysctl_nchstats, "LU",
682 "VFS cache effectiveness statistics");
686 * Grab an atomic snapshot of the name cache hash chain lengths
688 static SYSCTL_NODE(_debug, OID_AUTO, hashstat,
689 CTLFLAG_RW | CTLFLAG_MPSAFE, NULL,
693 sysctl_debug_hashstat_rawnchash(SYSCTL_HANDLER_ARGS)
695 struct nchashhead *ncpp;
696 struct namecache *ncp;
697 int i, error, n_nchash, *cntbuf;
700 n_nchash = nchash + 1; /* nchash is max index, not count */
701 if (req->oldptr == NULL)
702 return SYSCTL_OUT(req, 0, n_nchash * sizeof(int));
703 cntbuf = malloc(n_nchash * sizeof(int), M_TEMP, M_ZERO | M_WAITOK);
704 cache_lock_all_buckets();
705 if (n_nchash != nchash + 1) {
706 cache_unlock_all_buckets();
707 free(cntbuf, M_TEMP);
710 /* Scan hash tables counting entries */
711 for (ncpp = nchashtbl, i = 0; i < n_nchash; ncpp++, i++)
712 CK_SLIST_FOREACH(ncp, ncpp, nc_hash)
714 cache_unlock_all_buckets();
715 for (error = 0, i = 0; i < n_nchash; i++)
716 if ((error = SYSCTL_OUT(req, &cntbuf[i], sizeof(int))) != 0)
718 free(cntbuf, M_TEMP);
721 SYSCTL_PROC(_debug_hashstat, OID_AUTO, rawnchash, CTLTYPE_INT|CTLFLAG_RD|
722 CTLFLAG_MPSAFE, 0, 0, sysctl_debug_hashstat_rawnchash, "S,int",
723 "nchash chain lengths");
726 sysctl_debug_hashstat_nchash(SYSCTL_HANDLER_ARGS)
729 struct nchashhead *ncpp;
730 struct namecache *ncp;
732 int count, maxlength, used, pct;
735 return SYSCTL_OUT(req, 0, 4 * sizeof(int));
737 cache_lock_all_buckets();
738 n_nchash = nchash + 1; /* nchash is max index, not count */
742 /* Scan hash tables for applicable entries */
743 for (ncpp = nchashtbl; n_nchash > 0; n_nchash--, ncpp++) {
745 CK_SLIST_FOREACH(ncp, ncpp, nc_hash) {
750 if (maxlength < count)
753 n_nchash = nchash + 1;
754 cache_unlock_all_buckets();
755 pct = (used * 100) / (n_nchash / 100);
756 error = SYSCTL_OUT(req, &n_nchash, sizeof(n_nchash));
759 error = SYSCTL_OUT(req, &used, sizeof(used));
762 error = SYSCTL_OUT(req, &maxlength, sizeof(maxlength));
765 error = SYSCTL_OUT(req, &pct, sizeof(pct));
770 SYSCTL_PROC(_debug_hashstat, OID_AUTO, nchash, CTLTYPE_INT|CTLFLAG_RD|
771 CTLFLAG_MPSAFE, 0, 0, sysctl_debug_hashstat_nchash, "I",
772 "nchash statistics (number of total/used buckets, maximum chain length, usage percentage)");
776 * Negative entries management
778 * A variation of LRU scheme is used. New entries are hashed into one of
779 * numneglists cold lists. Entries get promoted to the hot list on first hit.
781 * The shrinker will demote hot list head and evict from the cold list in a
782 * round-robin manner.
785 cache_negative_init(struct namecache *ncp)
787 struct negstate *negstate;
789 ncp->nc_flag |= NCF_NEGATIVE;
790 negstate = NCP2NEGSTATE(ncp);
791 negstate->neg_flag = 0;
795 cache_negative_hit(struct namecache *ncp)
797 struct neglist *neglist;
798 struct negstate *negstate;
800 negstate = NCP2NEGSTATE(ncp);
801 if ((negstate->neg_flag & NEG_HOT) != 0)
803 neglist = NCP2NEGLIST(ncp);
804 mtx_lock(&ncneg_hot.nl_lock);
805 mtx_lock(&neglist->nl_lock);
806 if ((negstate->neg_flag & NEG_HOT) == 0) {
808 TAILQ_REMOVE(&neglist->nl_list, ncp, nc_dst);
809 TAILQ_INSERT_TAIL(&ncneg_hot.nl_list, ncp, nc_dst);
810 negstate->neg_flag |= NEG_HOT;
812 mtx_unlock(&neglist->nl_lock);
813 mtx_unlock(&ncneg_hot.nl_lock);
817 cache_negative_insert(struct namecache *ncp)
819 struct neglist *neglist;
821 MPASS(ncp->nc_flag & NCF_NEGATIVE);
822 cache_assert_bucket_locked(ncp, RA_WLOCKED);
823 neglist = NCP2NEGLIST(ncp);
824 mtx_lock(&neglist->nl_lock);
825 TAILQ_INSERT_TAIL(&neglist->nl_list, ncp, nc_dst);
826 mtx_unlock(&neglist->nl_lock);
827 atomic_add_rel_long(&numneg, 1);
831 cache_negative_remove(struct namecache *ncp)
833 struct neglist *neglist;
834 struct negstate *negstate;
835 bool hot_locked = false;
836 bool list_locked = false;
838 cache_assert_bucket_locked(ncp, RA_WLOCKED);
839 neglist = NCP2NEGLIST(ncp);
840 negstate = NCP2NEGSTATE(ncp);
841 if ((negstate->neg_flag & NEG_HOT) != 0) {
843 mtx_lock(&ncneg_hot.nl_lock);
844 if ((negstate->neg_flag & NEG_HOT) == 0) {
846 mtx_lock(&neglist->nl_lock);
850 mtx_lock(&neglist->nl_lock);
852 * We may be racing against promotion in lockless lookup.
854 if ((negstate->neg_flag & NEG_HOT) != 0) {
855 mtx_unlock(&neglist->nl_lock);
857 mtx_lock(&ncneg_hot.nl_lock);
858 mtx_lock(&neglist->nl_lock);
861 if ((negstate->neg_flag & NEG_HOT) != 0) {
862 mtx_assert(&ncneg_hot.nl_lock, MA_OWNED);
863 TAILQ_REMOVE(&ncneg_hot.nl_list, ncp, nc_dst);
866 mtx_assert(&neglist->nl_lock, MA_OWNED);
867 TAILQ_REMOVE(&neglist->nl_list, ncp, nc_dst);
870 mtx_unlock(&neglist->nl_lock);
872 mtx_unlock(&ncneg_hot.nl_lock);
873 atomic_subtract_rel_long(&numneg, 1);
877 cache_negative_shrink_select(struct namecache **ncpp,
878 struct neglist **neglistpp)
880 struct neglist *neglist;
881 struct namecache *ncp;
887 for (i = 0; i < numneglists; i++) {
888 neglist = &neglists[(cycle + i) % numneglists];
889 if (TAILQ_FIRST(&neglist->nl_list) == NULL)
891 mtx_lock(&neglist->nl_lock);
892 ncp = TAILQ_FIRST(&neglist->nl_list);
895 mtx_unlock(&neglist->nl_lock);
898 *neglistpp = neglist;
904 cache_negative_zap_one(void)
906 struct namecache *ncp, *ncp2;
907 struct neglist *neglist;
908 struct negstate *negstate;
912 if (mtx_owner(&ncneg_shrink_lock) != NULL ||
913 !mtx_trylock(&ncneg_shrink_lock)) {
914 counter_u64_add(shrinking_skipped, 1);
918 mtx_lock(&ncneg_hot.nl_lock);
919 ncp = TAILQ_FIRST(&ncneg_hot.nl_list);
921 neglist = NCP2NEGLIST(ncp);
922 negstate = NCP2NEGSTATE(ncp);
923 mtx_lock(&neglist->nl_lock);
924 MPASS((negstate->neg_flag & NEG_HOT) != 0);
925 TAILQ_REMOVE(&ncneg_hot.nl_list, ncp, nc_dst);
926 TAILQ_INSERT_TAIL(&neglist->nl_list, ncp, nc_dst);
927 negstate->neg_flag &= ~NEG_HOT;
929 mtx_unlock(&neglist->nl_lock);
931 mtx_unlock(&ncneg_hot.nl_lock);
933 cache_negative_shrink_select(&ncp, &neglist);
935 mtx_unlock(&ncneg_shrink_lock);
939 MPASS(ncp->nc_flag & NCF_NEGATIVE);
940 dvlp = VP2VNODELOCK(ncp->nc_dvp);
941 blp = NCP2BUCKETLOCK(ncp);
942 mtx_unlock(&neglist->nl_lock);
946 * Enter SMR to safely check the negative list.
947 * Even if the found pointer matches, the entry may now be reallocated
948 * and used by a different vnode.
951 ncp2 = TAILQ_FIRST(&neglist->nl_list);
952 if (ncp != ncp2 || dvlp != VP2VNODELOCK(ncp2->nc_dvp) ||
953 blp != NCP2BUCKETLOCK(ncp2)) {
958 SDT_PROBE2(vfs, namecache, shrink_negative, done, ncp->nc_dvp,
960 cache_zap_locked(ncp);
961 counter_u64_add(numneg_evicted, 1);
969 * cache_zap_locked():
971 * Removes a namecache entry from cache, whether it contains an actual
972 * pointer to a vnode or if it is just a negative cache entry.
975 cache_zap_locked(struct namecache *ncp)
977 struct nchashhead *ncpp;
979 if (!(ncp->nc_flag & NCF_NEGATIVE))
980 cache_assert_vnode_locked(ncp->nc_vp);
981 cache_assert_vnode_locked(ncp->nc_dvp);
982 cache_assert_bucket_locked(ncp, RA_WLOCKED);
984 CTR2(KTR_VFS, "cache_zap(%p) vp %p", ncp,
985 (ncp->nc_flag & NCF_NEGATIVE) ? NULL : ncp->nc_vp);
987 cache_ncp_invalidate(ncp);
989 ncpp = NCP2BUCKET(ncp);
990 CK_SLIST_REMOVE(ncpp, ncp, namecache, nc_hash);
991 if (!(ncp->nc_flag & NCF_NEGATIVE)) {
992 SDT_PROBE3(vfs, namecache, zap, done, ncp->nc_dvp,
993 ncp->nc_name, ncp->nc_vp);
994 TAILQ_REMOVE(&ncp->nc_vp->v_cache_dst, ncp, nc_dst);
995 if (ncp == ncp->nc_vp->v_cache_dd) {
996 vn_seqc_write_begin_unheld(ncp->nc_vp);
997 ncp->nc_vp->v_cache_dd = NULL;
998 vn_seqc_write_end(ncp->nc_vp);
1001 SDT_PROBE2(vfs, namecache, zap_negative, done, ncp->nc_dvp,
1003 cache_negative_remove(ncp);
1005 if (ncp->nc_flag & NCF_ISDOTDOT) {
1006 if (ncp == ncp->nc_dvp->v_cache_dd) {
1007 vn_seqc_write_begin_unheld(ncp->nc_dvp);
1008 ncp->nc_dvp->v_cache_dd = NULL;
1009 vn_seqc_write_end(ncp->nc_dvp);
1012 LIST_REMOVE(ncp, nc_src);
1013 if (LIST_EMPTY(&ncp->nc_dvp->v_cache_src)) {
1014 ncp->nc_flag |= NCF_DVDROP;
1015 counter_u64_add(numcachehv, -1);
1018 atomic_subtract_rel_long(&numcache, 1);
1022 cache_zap_negative_locked_vnode_kl(struct namecache *ncp, struct vnode *vp)
1026 MPASS(ncp->nc_dvp == vp);
1027 MPASS(ncp->nc_flag & NCF_NEGATIVE);
1028 cache_assert_vnode_locked(vp);
1030 blp = NCP2BUCKETLOCK(ncp);
1032 cache_zap_locked(ncp);
1037 cache_zap_locked_vnode_kl2(struct namecache *ncp, struct vnode *vp,
1040 struct mtx *pvlp, *vlp1, *vlp2, *to_unlock;
1043 MPASS(vp == ncp->nc_dvp || vp == ncp->nc_vp);
1044 cache_assert_vnode_locked(vp);
1046 if (ncp->nc_flag & NCF_NEGATIVE) {
1047 if (*vlpp != NULL) {
1051 cache_zap_negative_locked_vnode_kl(ncp, vp);
1055 pvlp = VP2VNODELOCK(vp);
1056 blp = NCP2BUCKETLOCK(ncp);
1057 vlp1 = VP2VNODELOCK(ncp->nc_dvp);
1058 vlp2 = VP2VNODELOCK(ncp->nc_vp);
1060 if (*vlpp == vlp1 || *vlpp == vlp2) {
1064 if (*vlpp != NULL) {
1068 cache_sort_vnodes(&vlp1, &vlp2);
1073 if (!mtx_trylock(vlp1))
1079 cache_zap_locked(ncp);
1081 if (to_unlock != NULL)
1082 mtx_unlock(to_unlock);
1089 MPASS(*vlpp == NULL);
1094 static int __noinline
1095 cache_zap_locked_vnode(struct namecache *ncp, struct vnode *vp)
1097 struct mtx *pvlp, *vlp1, *vlp2, *to_unlock;
1101 MPASS(vp == ncp->nc_dvp || vp == ncp->nc_vp);
1102 cache_assert_vnode_locked(vp);
1104 pvlp = VP2VNODELOCK(vp);
1105 if (ncp->nc_flag & NCF_NEGATIVE) {
1106 cache_zap_negative_locked_vnode_kl(ncp, vp);
1110 blp = NCP2BUCKETLOCK(ncp);
1111 vlp1 = VP2VNODELOCK(ncp->nc_dvp);
1112 vlp2 = VP2VNODELOCK(ncp->nc_vp);
1113 cache_sort_vnodes(&vlp1, &vlp2);
1118 if (!mtx_trylock(vlp1)) {
1125 cache_zap_locked(ncp);
1127 mtx_unlock(to_unlock);
1134 * If trylocking failed we can get here. We know enough to take all needed locks
1135 * in the right order and re-lookup the entry.
1138 cache_zap_unlocked_bucket(struct namecache *ncp, struct componentname *cnp,
1139 struct vnode *dvp, struct mtx *dvlp, struct mtx *vlp, uint32_t hash,
1142 struct namecache *rncp;
1144 cache_assert_bucket_locked(ncp, RA_UNLOCKED);
1146 cache_sort_vnodes(&dvlp, &vlp);
1147 cache_lock_vnodes(dvlp, vlp);
1149 CK_SLIST_FOREACH(rncp, (NCHHASH(hash)), nc_hash) {
1150 if (rncp == ncp && rncp->nc_dvp == dvp &&
1151 rncp->nc_nlen == cnp->cn_namelen &&
1152 !bcmp(rncp->nc_name, cnp->cn_nameptr, rncp->nc_nlen))
1156 cache_zap_locked(rncp);
1158 cache_unlock_vnodes(dvlp, vlp);
1159 counter_u64_add(zap_and_exit_bucket_relock_success, 1);
1164 cache_unlock_vnodes(dvlp, vlp);
1168 static int __noinline
1169 cache_zap_wlocked_bucket(struct namecache *ncp, struct componentname *cnp,
1170 uint32_t hash, struct rwlock *blp)
1172 struct mtx *dvlp, *vlp;
1175 cache_assert_bucket_locked(ncp, RA_WLOCKED);
1177 dvlp = VP2VNODELOCK(ncp->nc_dvp);
1179 if (!(ncp->nc_flag & NCF_NEGATIVE))
1180 vlp = VP2VNODELOCK(ncp->nc_vp);
1181 if (cache_trylock_vnodes(dvlp, vlp) == 0) {
1182 cache_zap_locked(ncp);
1184 cache_unlock_vnodes(dvlp, vlp);
1190 return (cache_zap_unlocked_bucket(ncp, cnp, dvp, dvlp, vlp, hash, blp));
1193 static int __noinline
1194 cache_zap_rlocked_bucket(struct namecache *ncp, struct componentname *cnp,
1195 uint32_t hash, struct rwlock *blp)
1197 struct mtx *dvlp, *vlp;
1200 cache_assert_bucket_locked(ncp, RA_RLOCKED);
1202 dvlp = VP2VNODELOCK(ncp->nc_dvp);
1204 if (!(ncp->nc_flag & NCF_NEGATIVE))
1205 vlp = VP2VNODELOCK(ncp->nc_vp);
1206 if (cache_trylock_vnodes(dvlp, vlp) == 0) {
1209 cache_zap_locked(ncp);
1211 cache_unlock_vnodes(dvlp, vlp);
1217 return (cache_zap_unlocked_bucket(ncp, cnp, dvp, dvlp, vlp, hash, blp));
1221 cache_zap_wlocked_bucket_kl(struct namecache *ncp, struct rwlock *blp,
1222 struct mtx **vlpp1, struct mtx **vlpp2)
1224 struct mtx *dvlp, *vlp;
1226 cache_assert_bucket_locked(ncp, RA_WLOCKED);
1228 dvlp = VP2VNODELOCK(ncp->nc_dvp);
1230 if (!(ncp->nc_flag & NCF_NEGATIVE))
1231 vlp = VP2VNODELOCK(ncp->nc_vp);
1232 cache_sort_vnodes(&dvlp, &vlp);
1234 if (*vlpp1 == dvlp && *vlpp2 == vlp) {
1235 cache_zap_locked(ncp);
1236 cache_unlock_vnodes(dvlp, vlp);
1249 if (cache_trylock_vnodes(dvlp, vlp) == 0) {
1250 cache_zap_locked(ncp);
1251 cache_unlock_vnodes(dvlp, vlp);
1266 cache_lookup_unlock(struct rwlock *blp, struct mtx *vlp)
1276 static int __noinline
1277 cache_lookup_dot(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp,
1278 struct timespec *tsp, int *ticksp)
1283 CTR2(KTR_VFS, "cache_lookup(%p, %s) found via .",
1284 dvp, cnp->cn_nameptr);
1285 counter_u64_add(dothits, 1);
1286 SDT_PROBE3(vfs, namecache, lookup, hit, dvp, ".", *vpp);
1293 * When we lookup "." we still can be asked to lock it
1296 ltype = cnp->cn_lkflags & LK_TYPE_MASK;
1297 if (ltype != VOP_ISLOCKED(*vpp)) {
1298 if (ltype == LK_EXCLUSIVE) {
1299 vn_lock(*vpp, LK_UPGRADE | LK_RETRY);
1300 if (VN_IS_DOOMED((*vpp))) {
1301 /* forced unmount */
1307 vn_lock(*vpp, LK_DOWNGRADE | LK_RETRY);
1312 static __noinline int
1313 cache_remove_cnp(struct vnode *dvp, struct componentname *cnp)
1315 struct namecache *ncp;
1317 struct mtx *dvlp, *dvlp2;
1321 if (cnp->cn_namelen == 2 &&
1322 cnp->cn_nameptr[0] == '.' && cnp->cn_nameptr[1] == '.') {
1323 dvlp = VP2VNODELOCK(dvp);
1327 ncp = dvp->v_cache_dd;
1332 SDT_PROBE2(vfs, namecache, removecnp, miss, dvp, cnp);
1335 if ((ncp->nc_flag & NCF_ISDOTDOT) != 0) {
1336 if (ncp->nc_dvp != dvp)
1337 panic("dvp %p v_cache_dd %p\n", dvp, ncp);
1338 if (!cache_zap_locked_vnode_kl2(ncp,
1341 MPASS(dvp->v_cache_dd == NULL);
1347 vn_seqc_write_begin(dvp);
1348 dvp->v_cache_dd = NULL;
1349 vn_seqc_write_end(dvp);
1354 SDT_PROBE2(vfs, namecache, removecnp, hit, dvp, cnp);
1358 hash = cache_get_hash(cnp->cn_nameptr, cnp->cn_namelen, dvp);
1359 blp = HASH2BUCKETLOCK(hash);
1361 if (CK_SLIST_EMPTY(NCHHASH(hash)))
1366 CK_SLIST_FOREACH(ncp, (NCHHASH(hash)), nc_hash) {
1367 if (ncp->nc_dvp == dvp && ncp->nc_nlen == cnp->cn_namelen &&
1368 !bcmp(ncp->nc_name, cnp->cn_nameptr, ncp->nc_nlen))
1372 /* We failed to find an entry */
1378 error = cache_zap_wlocked_bucket(ncp, cnp, hash, blp);
1379 if (__predict_false(error != 0)) {
1380 zap_and_exit_bucket_fail++;
1381 cache_maybe_yield();
1384 counter_u64_add(numposzaps, 1);
1386 SDT_PROBE2(vfs, namecache, removecnp, hit, dvp, cnp);
1389 SDT_PROBE2(vfs, namecache, removecnp, miss, dvp, cnp);
1390 counter_u64_add(nummisszap, 1);
1395 * Lookup a name in the name cache
1399 * - dvp: Parent directory in which to search.
1400 * - vpp: Return argument. Will contain desired vnode on cache hit.
1401 * - cnp: Parameters of the name search. The most interesting bits of
1402 * the cn_flags field have the following meanings:
1403 * - MAKEENTRY: If clear, free an entry from the cache rather than look
1405 * - ISDOTDOT: Must be set if and only if cn_nameptr == ".."
1406 * - tsp: Return storage for cache timestamp. On a successful (positive
1407 * or negative) lookup, tsp will be filled with any timespec that
1408 * was stored when this cache entry was created. However, it will
1409 * be clear for "." entries.
1410 * - ticks: Return storage for alternate cache timestamp. On a successful
1411 * (positive or negative) lookup, it will contain the ticks value
1412 * that was current when the cache entry was created, unless cnp
1417 * - -1: A positive cache hit. vpp will contain the desired vnode.
1418 * - ENOENT: A negative cache hit, or dvp was recycled out from under us due
1419 * to a forced unmount. vpp will not be modified. If the entry
1420 * is a whiteout, then the ISWHITEOUT flag will be set in
1422 * - 0: A cache miss. vpp will not be modified.
1426 * On a cache hit, vpp will be returned locked and ref'd. If we're looking up
1427 * .., dvp is unlocked. If we're looking up . an extra ref is taken, but the
1428 * lock is not recursively acquired.
1431 cache_lookup(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp,
1432 struct timespec *tsp, int *ticksp)
1434 struct namecache_ts *ncp_ts;
1435 struct namecache *ncp;
1436 struct negstate *negstate;
1442 bool try_smr, doing_smr, whiteout;
1445 if (__predict_false(!doingcache)) {
1446 cnp->cn_flags &= ~MAKEENTRY;
1451 if (__predict_false(cnp->cn_namelen == 1 && cnp->cn_nameptr[0] == '.'))
1452 return (cache_lookup_dot(dvp, vpp, cnp, tsp, ticksp));
1454 if ((cnp->cn_flags & MAKEENTRY) == 0) {
1455 cache_remove_cnp(dvp, cnp);
1460 if (cnp->cn_nameiop == CREATE)
1467 if (cnp->cn_namelen == 2 &&
1468 cnp->cn_nameptr[0] == '.' && cnp->cn_nameptr[1] == '.') {
1469 counter_u64_add(dotdothits, 1);
1470 dvlp = VP2VNODELOCK(dvp);
1472 ncp = dvp->v_cache_dd;
1474 SDT_PROBE3(vfs, namecache, lookup, miss, dvp,
1479 if ((ncp->nc_flag & NCF_ISDOTDOT) != 0) {
1480 if (ncp->nc_flag & NCF_NEGATIVE)
1486 /* Return failure if negative entry was found. */
1488 goto negative_success;
1489 CTR3(KTR_VFS, "cache_lookup(%p, %s) found %p via ..",
1490 dvp, cnp->cn_nameptr, *vpp);
1491 SDT_PROBE3(vfs, namecache, lookup, hit, dvp, "..",
1493 cache_out_ts(ncp, tsp, ticksp);
1494 if ((ncp->nc_flag & (NCF_ISDOTDOT | NCF_DTS)) ==
1495 NCF_DTS && tsp != NULL) {
1496 ncp_ts = __containerof(ncp, struct namecache_ts, nc_nc);
1497 *tsp = ncp_ts->nc_dotdottime;
1502 hash = cache_get_hash(cnp->cn_nameptr, cnp->cn_namelen, dvp);
1509 blp = HASH2BUCKETLOCK(hash);
1513 CK_SLIST_FOREACH(ncp, (NCHHASH(hash)), nc_hash) {
1514 if (ncp->nc_dvp == dvp && ncp->nc_nlen == cnp->cn_namelen &&
1515 !bcmp(ncp->nc_name, cnp->cn_nameptr, ncp->nc_nlen))
1519 /* We failed to find an entry */
1520 if (__predict_false(ncp == NULL)) {
1525 SDT_PROBE3(vfs, namecache, lookup, miss, dvp, cnp->cn_nameptr,
1527 counter_u64_add(nummiss, 1);
1531 if (ncp->nc_flag & NCF_NEGATIVE)
1532 goto negative_success;
1534 /* We found a "positive" match, return the vnode */
1535 counter_u64_add(numposhits, 1);
1537 CTR4(KTR_VFS, "cache_lookup(%p, %s) found %p via ncp %p",
1538 dvp, cnp->cn_nameptr, *vpp, ncp);
1539 SDT_PROBE3(vfs, namecache, lookup, hit, dvp, ncp->nc_name,
1541 cache_out_ts(ncp, tsp, ticksp);
1544 * On success we return a locked and ref'd vnode as per the lookup
1548 ltype = 0; /* silence gcc warning */
1549 if (cnp->cn_flags & ISDOTDOT) {
1550 ltype = VOP_ISLOCKED(dvp);
1554 if (!cache_ncp_canuse(ncp)) {
1559 vs = vget_prep_smr(*vpp);
1561 if (__predict_false(vs == VGET_NONE)) {
1566 vs = vget_prep(*vpp);
1567 cache_lookup_unlock(blp, dvlp);
1569 error = vget_finish(*vpp, cnp->cn_lkflags, vs);
1570 if (cnp->cn_flags & ISDOTDOT) {
1571 vn_lock(dvp, ltype | LK_RETRY);
1572 if (VN_IS_DOOMED(dvp)) {
1583 if ((cnp->cn_flags & ISLASTCN) &&
1584 (cnp->cn_lkflags & LK_TYPE_MASK) == LK_EXCLUSIVE) {
1585 ASSERT_VOP_ELOCKED(*vpp, "cache_lookup");
1590 /* We found a negative match, and want to create it, so purge */
1591 if (cnp->cn_nameiop == CREATE) {
1593 counter_u64_add(numnegzaps, 1);
1597 SDT_PROBE2(vfs, namecache, lookup, hit__negative, dvp, ncp->nc_name);
1598 cache_out_ts(ncp, tsp, ticksp);
1599 counter_u64_add(numneghits, 1);
1600 whiteout = (ncp->nc_flag & NCF_WHITE);
1604 * We need to take locks to promote an entry.
1606 negstate = NCP2NEGSTATE(ncp);
1607 if ((negstate->neg_flag & NEG_HOT) == 0 ||
1608 !cache_ncp_canuse(ncp)) {
1615 cache_negative_hit(ncp);
1616 cache_lookup_unlock(blp, dvlp);
1619 cnp->cn_flags |= ISWHITEOUT;
1625 error = cache_zap_rlocked_bucket(ncp, cnp, hash, blp);
1627 error = cache_zap_locked_vnode(ncp, dvp);
1628 if (__predict_false(error != 0)) {
1629 zap_and_exit_bucket_fail2++;
1630 cache_maybe_yield();
1637 struct celockstate {
1639 struct rwlock *blp[2];
1641 CTASSERT((nitems(((struct celockstate *)0)->vlp) == 3));
1642 CTASSERT((nitems(((struct celockstate *)0)->blp) == 2));
1645 cache_celockstate_init(struct celockstate *cel)
1648 bzero(cel, sizeof(*cel));
1652 cache_lock_vnodes_cel(struct celockstate *cel, struct vnode *vp,
1655 struct mtx *vlp1, *vlp2;
1657 MPASS(cel->vlp[0] == NULL);
1658 MPASS(cel->vlp[1] == NULL);
1659 MPASS(cel->vlp[2] == NULL);
1661 MPASS(vp != NULL || dvp != NULL);
1663 vlp1 = VP2VNODELOCK(vp);
1664 vlp2 = VP2VNODELOCK(dvp);
1665 cache_sort_vnodes(&vlp1, &vlp2);
1676 cache_unlock_vnodes_cel(struct celockstate *cel)
1679 MPASS(cel->vlp[0] != NULL || cel->vlp[1] != NULL);
1681 if (cel->vlp[0] != NULL)
1682 mtx_unlock(cel->vlp[0]);
1683 if (cel->vlp[1] != NULL)
1684 mtx_unlock(cel->vlp[1]);
1685 if (cel->vlp[2] != NULL)
1686 mtx_unlock(cel->vlp[2]);
1690 cache_lock_vnodes_cel_3(struct celockstate *cel, struct vnode *vp)
1695 cache_assert_vlp_locked(cel->vlp[0]);
1696 cache_assert_vlp_locked(cel->vlp[1]);
1697 MPASS(cel->vlp[2] == NULL);
1700 vlp = VP2VNODELOCK(vp);
1703 if (vlp >= cel->vlp[1]) {
1706 if (mtx_trylock(vlp))
1708 cache_lock_vnodes_cel_3_failures++;
1709 cache_unlock_vnodes_cel(cel);
1710 if (vlp < cel->vlp[0]) {
1712 mtx_lock(cel->vlp[0]);
1713 mtx_lock(cel->vlp[1]);
1715 if (cel->vlp[0] != NULL)
1716 mtx_lock(cel->vlp[0]);
1718 mtx_lock(cel->vlp[1]);
1728 cache_lock_buckets_cel(struct celockstate *cel, struct rwlock *blp1,
1729 struct rwlock *blp2)
1732 MPASS(cel->blp[0] == NULL);
1733 MPASS(cel->blp[1] == NULL);
1735 cache_sort_vnodes(&blp1, &blp2);
1746 cache_unlock_buckets_cel(struct celockstate *cel)
1749 if (cel->blp[0] != NULL)
1750 rw_wunlock(cel->blp[0]);
1751 rw_wunlock(cel->blp[1]);
1755 * Lock part of the cache affected by the insertion.
1757 * This means vnodelocks for dvp, vp and the relevant bucketlock.
1758 * However, insertion can result in removal of an old entry. In this
1759 * case we have an additional vnode and bucketlock pair to lock. If the
1760 * entry is negative, ncelock is locked instead of the vnode.
1762 * That is, in the worst case we have to lock 3 vnodes and 2 bucketlocks, while
1763 * preserving the locking order (smaller address first).
1766 cache_enter_lock(struct celockstate *cel, struct vnode *dvp, struct vnode *vp,
1769 struct namecache *ncp;
1770 struct rwlock *blps[2];
1772 blps[0] = HASH2BUCKETLOCK(hash);
1775 cache_lock_vnodes_cel(cel, dvp, vp);
1776 if (vp == NULL || vp->v_type != VDIR)
1778 ncp = vp->v_cache_dd;
1781 if ((ncp->nc_flag & NCF_ISDOTDOT) == 0)
1783 MPASS(ncp->nc_dvp == vp);
1784 blps[1] = NCP2BUCKETLOCK(ncp);
1785 if (ncp->nc_flag & NCF_NEGATIVE)
1787 if (cache_lock_vnodes_cel_3(cel, ncp->nc_vp))
1790 * All vnodes got re-locked. Re-validate the state and if
1791 * nothing changed we are done. Otherwise restart.
1793 if (ncp == vp->v_cache_dd &&
1794 (ncp->nc_flag & NCF_ISDOTDOT) != 0 &&
1795 blps[1] == NCP2BUCKETLOCK(ncp) &&
1796 VP2VNODELOCK(ncp->nc_vp) == cel->vlp[2])
1798 cache_unlock_vnodes_cel(cel);
1803 cache_lock_buckets_cel(cel, blps[0], blps[1]);
1807 cache_enter_lock_dd(struct celockstate *cel, struct vnode *dvp, struct vnode *vp,
1810 struct namecache *ncp;
1811 struct rwlock *blps[2];
1813 blps[0] = HASH2BUCKETLOCK(hash);
1816 cache_lock_vnodes_cel(cel, dvp, vp);
1817 ncp = dvp->v_cache_dd;
1820 if ((ncp->nc_flag & NCF_ISDOTDOT) == 0)
1822 MPASS(ncp->nc_dvp == dvp);
1823 blps[1] = NCP2BUCKETLOCK(ncp);
1824 if (ncp->nc_flag & NCF_NEGATIVE)
1826 if (cache_lock_vnodes_cel_3(cel, ncp->nc_vp))
1828 if (ncp == dvp->v_cache_dd &&
1829 (ncp->nc_flag & NCF_ISDOTDOT) != 0 &&
1830 blps[1] == NCP2BUCKETLOCK(ncp) &&
1831 VP2VNODELOCK(ncp->nc_vp) == cel->vlp[2])
1833 cache_unlock_vnodes_cel(cel);
1838 cache_lock_buckets_cel(cel, blps[0], blps[1]);
1842 cache_enter_unlock(struct celockstate *cel)
1845 cache_unlock_buckets_cel(cel);
1846 cache_unlock_vnodes_cel(cel);
1849 static void __noinline
1850 cache_enter_dotdot_prep(struct vnode *dvp, struct vnode *vp,
1851 struct componentname *cnp)
1853 struct celockstate cel;
1854 struct namecache *ncp;
1858 if (dvp->v_cache_dd == NULL)
1860 len = cnp->cn_namelen;
1861 cache_celockstate_init(&cel);
1862 hash = cache_get_hash(cnp->cn_nameptr, len, dvp);
1863 cache_enter_lock_dd(&cel, dvp, vp, hash);
1864 vn_seqc_write_begin(dvp);
1865 ncp = dvp->v_cache_dd;
1866 if (ncp != NULL && (ncp->nc_flag & NCF_ISDOTDOT)) {
1867 KASSERT(ncp->nc_dvp == dvp, ("wrong isdotdot parent"));
1868 cache_zap_locked(ncp);
1872 dvp->v_cache_dd = NULL;
1873 vn_seqc_write_end(dvp);
1874 cache_enter_unlock(&cel);
1879 * Add an entry to the cache.
1882 cache_enter_time(struct vnode *dvp, struct vnode *vp, struct componentname *cnp,
1883 struct timespec *tsp, struct timespec *dtsp)
1885 struct celockstate cel;
1886 struct namecache *ncp, *n2, *ndd;
1887 struct namecache_ts *ncp_ts, *n2_ts;
1888 struct nchashhead *ncpp;
1894 CTR3(KTR_VFS, "cache_enter(%p, %p, %s)", dvp, vp, cnp->cn_nameptr);
1895 VNASSERT(vp == NULL || !VN_IS_DOOMED(vp), vp,
1896 ("cache_enter: Adding a doomed vnode"));
1897 VNASSERT(dvp == NULL || !VN_IS_DOOMED(dvp), dvp,
1898 ("cache_enter: Doomed vnode used as src"));
1901 if (__predict_false(!doingcache))
1906 if (__predict_false(cnp->cn_nameptr[0] == '.')) {
1907 if (cnp->cn_namelen == 1)
1909 if (cnp->cn_namelen == 2 && cnp->cn_nameptr[1] == '.') {
1910 cache_enter_dotdot_prep(dvp, vp, cnp);
1911 flag = NCF_ISDOTDOT;
1916 * Avoid blowout in namecache entries.
1918 lnumcache = atomic_fetchadd_long(&numcache, 1) + 1;
1919 if (__predict_false(lnumcache >= ncsize)) {
1920 atomic_add_long(&numcache, -1);
1921 counter_u64_add(numdrops, 1);
1925 cache_celockstate_init(&cel);
1930 * Calculate the hash key and setup as much of the new
1931 * namecache entry as possible before acquiring the lock.
1933 ncp = cache_alloc(cnp->cn_namelen, tsp != NULL);
1934 ncp->nc_flag = flag | NCF_WIP;
1937 cache_negative_init(ncp);
1940 ncp_ts = __containerof(ncp, struct namecache_ts, nc_nc);
1941 ncp_ts->nc_time = *tsp;
1942 ncp_ts->nc_ticks = ticks;
1943 ncp_ts->nc_nc.nc_flag |= NCF_TS;
1945 ncp_ts->nc_dotdottime = *dtsp;
1946 ncp_ts->nc_nc.nc_flag |= NCF_DTS;
1949 len = ncp->nc_nlen = cnp->cn_namelen;
1950 hash = cache_get_hash(cnp->cn_nameptr, len, dvp);
1951 memcpy(ncp->nc_name, cnp->cn_nameptr, len);
1952 ncp->nc_name[len] = '\0';
1953 cache_enter_lock(&cel, dvp, vp, hash);
1956 * See if this vnode or negative entry is already in the cache
1957 * with this name. This can happen with concurrent lookups of
1958 * the same path name.
1960 ncpp = NCHHASH(hash);
1961 CK_SLIST_FOREACH(n2, ncpp, nc_hash) {
1962 if (n2->nc_dvp == dvp &&
1963 n2->nc_nlen == cnp->cn_namelen &&
1964 !bcmp(n2->nc_name, cnp->cn_nameptr, n2->nc_nlen)) {
1965 MPASS(cache_ncp_canuse(n2));
1966 if ((n2->nc_flag & NCF_NEGATIVE) != 0)
1968 ("%s: found entry pointing to a different vnode (%p != %p)",
1969 __func__, NULL, vp));
1971 KASSERT(n2->nc_vp == vp,
1972 ("%s: found entry pointing to a different vnode (%p != %p)",
1973 __func__, n2->nc_vp, vp));
1975 KASSERT((n2->nc_flag & NCF_TS) != 0,
1977 n2_ts = __containerof(n2, struct namecache_ts, nc_nc);
1978 n2_ts->nc_time = ncp_ts->nc_time;
1979 n2_ts->nc_ticks = ncp_ts->nc_ticks;
1981 n2_ts->nc_dotdottime = ncp_ts->nc_dotdottime;
1982 n2_ts->nc_nc.nc_flag |= NCF_DTS;
1985 goto out_unlock_free;
1989 if (flag == NCF_ISDOTDOT) {
1991 * See if we are trying to add .. entry, but some other lookup
1992 * has populated v_cache_dd pointer already.
1994 if (dvp->v_cache_dd != NULL)
1995 goto out_unlock_free;
1996 KASSERT(vp == NULL || vp->v_type == VDIR,
1997 ("wrong vnode type %p", vp));
1998 vn_seqc_write_begin(dvp);
1999 dvp->v_cache_dd = ncp;
2000 vn_seqc_write_end(dvp);
2004 if (vp->v_type == VDIR) {
2005 if (flag != NCF_ISDOTDOT) {
2007 * For this case, the cache entry maps both the
2008 * directory name in it and the name ".." for the
2009 * directory's parent.
2011 vn_seqc_write_begin(vp);
2012 if ((ndd = vp->v_cache_dd) != NULL) {
2013 if ((ndd->nc_flag & NCF_ISDOTDOT) != 0)
2014 cache_zap_locked(ndd);
2018 vp->v_cache_dd = ncp;
2019 vn_seqc_write_end(vp);
2022 if (vp->v_cache_dd != NULL) {
2023 vn_seqc_write_begin(vp);
2024 vp->v_cache_dd = NULL;
2025 vn_seqc_write_end(vp);
2030 if (flag != NCF_ISDOTDOT) {
2031 if (LIST_EMPTY(&dvp->v_cache_src)) {
2033 counter_u64_add(numcachehv, 1);
2035 LIST_INSERT_HEAD(&dvp->v_cache_src, ncp, nc_src);
2039 * If the entry is "negative", we place it into the
2040 * "negative" cache queue, otherwise, we place it into the
2041 * destination vnode's cache entries queue.
2044 TAILQ_INSERT_HEAD(&vp->v_cache_dst, ncp, nc_dst);
2045 SDT_PROBE3(vfs, namecache, enter, done, dvp, ncp->nc_name,
2048 if (cnp->cn_flags & ISWHITEOUT)
2049 ncp->nc_flag |= NCF_WHITE;
2050 cache_negative_insert(ncp);
2051 SDT_PROBE2(vfs, namecache, enter_negative, done, dvp,
2056 * Insert the new namecache entry into the appropriate chain
2057 * within the cache entries table.
2059 CK_SLIST_INSERT_HEAD(ncpp, ncp, nc_hash);
2061 atomic_thread_fence_rel();
2063 * Mark the entry as fully constructed.
2064 * It is immutable past this point until its removal.
2066 atomic_store_char(&ncp->nc_flag, ncp->nc_flag & ~NCF_WIP);
2068 cache_enter_unlock(&cel);
2069 if (numneg * ncnegfactor > lnumcache)
2070 cache_negative_zap_one();
2074 cache_enter_unlock(&cel);
2075 atomic_add_long(&numcache, -1);
2081 cache_roundup_2(u_int val)
2085 for (res = 1; res <= val; res <<= 1)
2091 static struct nchashhead *
2092 nchinittbl(u_long elements, u_long *hashmask)
2094 struct nchashhead *hashtbl;
2097 hashsize = cache_roundup_2(elements) / 2;
2099 hashtbl = malloc((u_long)hashsize * sizeof(*hashtbl), M_VFSCACHE, M_WAITOK);
2100 for (i = 0; i < hashsize; i++)
2101 CK_SLIST_INIT(&hashtbl[i]);
2102 *hashmask = hashsize - 1;
2107 ncfreetbl(struct nchashhead *hashtbl)
2110 free(hashtbl, M_VFSCACHE);
2114 * Name cache initialization, from vfs_init() when we are booting
2117 nchinit(void *dummy __unused)
2121 cache_zone_small = uma_zcreate("S VFS Cache", CACHE_ZONE_SMALL_SIZE,
2122 NULL, NULL, NULL, NULL, CACHE_ZONE_ALIGNMENT, UMA_ZONE_ZINIT);
2123 cache_zone_small_ts = uma_zcreate("STS VFS Cache", CACHE_ZONE_SMALL_TS_SIZE,
2124 NULL, NULL, NULL, NULL, CACHE_ZONE_ALIGNMENT, UMA_ZONE_ZINIT);
2125 cache_zone_large = uma_zcreate("L VFS Cache", CACHE_ZONE_LARGE_SIZE,
2126 NULL, NULL, NULL, NULL, CACHE_ZONE_ALIGNMENT, UMA_ZONE_ZINIT);
2127 cache_zone_large_ts = uma_zcreate("LTS VFS Cache", CACHE_ZONE_LARGE_TS_SIZE,
2128 NULL, NULL, NULL, NULL, CACHE_ZONE_ALIGNMENT, UMA_ZONE_ZINIT);
2130 VFS_SMR_ZONE_SET(cache_zone_small);
2131 VFS_SMR_ZONE_SET(cache_zone_small_ts);
2132 VFS_SMR_ZONE_SET(cache_zone_large);
2133 VFS_SMR_ZONE_SET(cache_zone_large_ts);
2135 ncsize = desiredvnodes * ncsizefactor;
2136 nchashtbl = nchinittbl(desiredvnodes * 2, &nchash);
2137 ncbuckethash = cache_roundup_2(mp_ncpus * mp_ncpus) - 1;
2138 if (ncbuckethash < 7) /* arbitrarily chosen to avoid having one lock */
2140 if (ncbuckethash > nchash)
2141 ncbuckethash = nchash;
2142 bucketlocks = malloc(sizeof(*bucketlocks) * numbucketlocks, M_VFSCACHE,
2144 for (i = 0; i < numbucketlocks; i++)
2145 rw_init_flags(&bucketlocks[i], "ncbuc", RW_DUPOK | RW_RECURSE);
2146 ncvnodehash = ncbuckethash;
2147 vnodelocks = malloc(sizeof(*vnodelocks) * numvnodelocks, M_VFSCACHE,
2149 for (i = 0; i < numvnodelocks; i++)
2150 mtx_init(&vnodelocks[i], "ncvn", NULL, MTX_DUPOK | MTX_RECURSE);
2151 ncpurgeminvnodes = numbucketlocks * 2;
2153 neglists = malloc(sizeof(*neglists) * numneglists, M_VFSCACHE,
2155 for (i = 0; i < numneglists; i++) {
2156 mtx_init(&neglists[i].nl_lock, "ncnegl", NULL, MTX_DEF);
2157 TAILQ_INIT(&neglists[i].nl_list);
2159 mtx_init(&ncneg_hot.nl_lock, "ncneglh", NULL, MTX_DEF);
2160 TAILQ_INIT(&ncneg_hot.nl_list);
2162 mtx_init(&ncneg_shrink_lock, "ncnegs", NULL, MTX_DEF);
2164 SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_SECOND, nchinit, NULL);
2167 cache_vnode_init(struct vnode *vp)
2170 LIST_INIT(&vp->v_cache_src);
2171 TAILQ_INIT(&vp->v_cache_dst);
2172 vp->v_cache_dd = NULL;
2177 cache_changesize(u_long newmaxvnodes)
2179 struct nchashhead *new_nchashtbl, *old_nchashtbl;
2180 u_long new_nchash, old_nchash;
2181 struct namecache *ncp;
2186 newncsize = newmaxvnodes * ncsizefactor;
2187 newmaxvnodes = cache_roundup_2(newmaxvnodes * 2);
2188 if (newmaxvnodes < numbucketlocks)
2189 newmaxvnodes = numbucketlocks;
2191 new_nchashtbl = nchinittbl(newmaxvnodes, &new_nchash);
2192 /* If same hash table size, nothing to do */
2193 if (nchash == new_nchash) {
2194 ncfreetbl(new_nchashtbl);
2198 * Move everything from the old hash table to the new table.
2199 * None of the namecache entries in the table can be removed
2200 * because to do so, they have to be removed from the hash table.
2202 cache_lock_all_vnodes();
2203 cache_lock_all_buckets();
2204 old_nchashtbl = nchashtbl;
2205 old_nchash = nchash;
2206 nchashtbl = new_nchashtbl;
2207 nchash = new_nchash;
2208 for (i = 0; i <= old_nchash; i++) {
2209 while ((ncp = CK_SLIST_FIRST(&old_nchashtbl[i])) != NULL) {
2210 hash = cache_get_hash(ncp->nc_name, ncp->nc_nlen,
2212 CK_SLIST_REMOVE(&old_nchashtbl[i], ncp, namecache, nc_hash);
2213 CK_SLIST_INSERT_HEAD(NCHHASH(hash), ncp, nc_hash);
2217 cache_unlock_all_buckets();
2218 cache_unlock_all_vnodes();
2219 ncfreetbl(old_nchashtbl);
2223 * Invalidate all entries from and to a particular vnode.
2226 cache_purge_impl(struct vnode *vp)
2228 TAILQ_HEAD(, namecache) ncps;
2229 struct namecache *ncp, *nnp;
2230 struct mtx *vlp, *vlp2;
2233 vlp = VP2VNODELOCK(vp);
2235 mtx_assert(vlp, MA_OWNED);
2237 while (!LIST_EMPTY(&vp->v_cache_src)) {
2238 ncp = LIST_FIRST(&vp->v_cache_src);
2239 if (!cache_zap_locked_vnode_kl2(ncp, vp, &vlp2))
2241 TAILQ_INSERT_TAIL(&ncps, ncp, nc_dst);
2243 while (!TAILQ_EMPTY(&vp->v_cache_dst)) {
2244 ncp = TAILQ_FIRST(&vp->v_cache_dst);
2245 if (!cache_zap_locked_vnode_kl2(ncp, vp, &vlp2))
2247 TAILQ_INSERT_TAIL(&ncps, ncp, nc_dst);
2249 ncp = vp->v_cache_dd;
2251 KASSERT(ncp->nc_flag & NCF_ISDOTDOT,
2252 ("lost dotdot link"));
2253 if (!cache_zap_locked_vnode_kl2(ncp, vp, &vlp2))
2255 TAILQ_INSERT_TAIL(&ncps, ncp, nc_dst);
2257 KASSERT(vp->v_cache_dd == NULL, ("incomplete purge"));
2261 TAILQ_FOREACH_SAFE(ncp, &ncps, nc_dst, nnp) {
2267 cache_purge(struct vnode *vp)
2271 SDT_PROBE1(vfs, namecache, purge, done, vp);
2272 if (LIST_EMPTY(&vp->v_cache_src) && TAILQ_EMPTY(&vp->v_cache_dst) &&
2273 vp->v_cache_dd == NULL)
2275 vlp = VP2VNODELOCK(vp);
2277 cache_purge_impl(vp);
2281 * Only to be used by vgone.
2284 cache_purge_vgone(struct vnode *vp)
2288 VNPASS(VN_IS_DOOMED(vp), vp);
2289 vlp = VP2VNODELOCK(vp);
2290 if (!(LIST_EMPTY(&vp->v_cache_src) && TAILQ_EMPTY(&vp->v_cache_dst) &&
2291 vp->v_cache_dd == NULL)) {
2293 cache_purge_impl(vp);
2294 mtx_assert(vlp, MA_NOTOWNED);
2299 * All the NULL pointer state we found above may be transient.
2300 * Serialize against a possible thread doing cache_purge.
2302 mtx_wait_unlocked(vlp);
2303 if (!(LIST_EMPTY(&vp->v_cache_src) && TAILQ_EMPTY(&vp->v_cache_dst) &&
2304 vp->v_cache_dd == NULL)) {
2306 cache_purge_impl(vp);
2307 mtx_assert(vlp, MA_NOTOWNED);
2314 * Invalidate all negative entries for a particular directory vnode.
2317 cache_purge_negative(struct vnode *vp)
2319 TAILQ_HEAD(, namecache) ncps;
2320 struct namecache *ncp, *nnp;
2323 CTR1(KTR_VFS, "cache_purge_negative(%p)", vp);
2324 SDT_PROBE1(vfs, namecache, purge_negative, done, vp);
2325 if (LIST_EMPTY(&vp->v_cache_src))
2328 vlp = VP2VNODELOCK(vp);
2330 LIST_FOREACH_SAFE(ncp, &vp->v_cache_src, nc_src, nnp) {
2331 if (!(ncp->nc_flag & NCF_NEGATIVE))
2333 cache_zap_negative_locked_vnode_kl(ncp, vp);
2334 TAILQ_INSERT_TAIL(&ncps, ncp, nc_dst);
2337 TAILQ_FOREACH_SAFE(ncp, &ncps, nc_dst, nnp) {
2343 cache_rename(struct vnode *fdvp, struct vnode *fvp, struct vnode *tdvp,
2344 struct vnode *tvp, struct componentname *fcnp, struct componentname *tcnp)
2347 ASSERT_VOP_IN_SEQC(fdvp);
2348 ASSERT_VOP_IN_SEQC(fvp);
2349 ASSERT_VOP_IN_SEQC(tdvp);
2351 ASSERT_VOP_IN_SEQC(tvp);
2356 KASSERT(!cache_remove_cnp(tdvp, tcnp),
2357 ("%s: lingering negative entry", __func__));
2359 cache_remove_cnp(tdvp, tcnp);
2364 * Flush all entries referencing a particular filesystem.
2367 cache_purgevfs(struct mount *mp, bool force)
2369 TAILQ_HEAD(, namecache) ncps;
2370 struct mtx *vlp1, *vlp2;
2372 struct nchashhead *bucket;
2373 struct namecache *ncp, *nnp;
2374 u_long i, j, n_nchash;
2377 /* Scan hash tables for applicable entries */
2378 SDT_PROBE1(vfs, namecache, purgevfs, done, mp);
2379 if (!force && mp->mnt_nvnodelistsize <= ncpurgeminvnodes)
2382 n_nchash = nchash + 1;
2384 for (i = 0; i < numbucketlocks; i++) {
2385 blp = (struct rwlock *)&bucketlocks[i];
2387 for (j = i; j < n_nchash; j += numbucketlocks) {
2389 bucket = &nchashtbl[j];
2390 CK_SLIST_FOREACH_SAFE(ncp, bucket, nc_hash, nnp) {
2391 cache_assert_bucket_locked(ncp, RA_WLOCKED);
2392 if (ncp->nc_dvp->v_mount != mp)
2394 error = cache_zap_wlocked_bucket_kl(ncp, blp,
2398 TAILQ_INSERT_HEAD(&ncps, ncp, nc_dst);
2402 if (vlp1 == NULL && vlp2 == NULL)
2403 cache_maybe_yield();
2410 TAILQ_FOREACH_SAFE(ncp, &ncps, nc_dst, nnp) {
2416 * Perform canonical checks and cache lookup and pass on to filesystem
2417 * through the vop_cachedlookup only if needed.
2421 vfs_cache_lookup(struct vop_lookup_args *ap)
2425 struct vnode **vpp = ap->a_vpp;
2426 struct componentname *cnp = ap->a_cnp;
2427 int flags = cnp->cn_flags;
2432 if (dvp->v_type != VDIR)
2435 if ((flags & ISLASTCN) && (dvp->v_mount->mnt_flag & MNT_RDONLY) &&
2436 (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME))
2439 error = vn_dir_check_exec(dvp, cnp);
2443 error = cache_lookup(dvp, vpp, cnp, NULL, NULL);
2445 return (VOP_CACHEDLOOKUP(dvp, vpp, cnp));
2451 /* Implementation of the getcwd syscall. */
2453 sys___getcwd(struct thread *td, struct __getcwd_args *uap)
2459 buflen = uap->buflen;
2460 if (__predict_false(buflen < 2))
2462 if (buflen > MAXPATHLEN)
2463 buflen = MAXPATHLEN;
2465 buf = uma_zalloc(namei_zone, M_WAITOK);
2466 error = vn_getcwd(td, buf, &retbuf, &buflen);
2468 error = copyout(retbuf, uap->buf, buflen);
2469 uma_zfree(namei_zone, buf);
2474 vn_getcwd(struct thread *td, char *buf, char **retbuf, size_t *buflen)
2480 error = vn_fullpath_any(td, pwd->pwd_cdir, pwd->pwd_rdir, buf, retbuf, buflen);
2484 if (KTRPOINT(curthread, KTR_NAMEI) && error == 0)
2491 kern___realpathat(struct thread *td, int fd, const char *path, char *buf,
2492 size_t size, int flags, enum uio_seg pathseg)
2494 struct nameidata nd;
2495 char *retbuf, *freebuf;
2500 NDINIT_ATRIGHTS(&nd, LOOKUP, FOLLOW | SAVENAME | WANTPARENT | AUDITVNODE1,
2501 pathseg, path, fd, &cap_fstat_rights, td);
2502 if ((error = namei(&nd)) != 0)
2504 error = vn_fullpath_hardlink(td, &nd, &retbuf, &freebuf, &size);
2506 error = copyout(retbuf, buf, size);
2507 free(freebuf, M_TEMP);
2514 sys___realpathat(struct thread *td, struct __realpathat_args *uap)
2517 return (kern___realpathat(td, uap->fd, uap->path, uap->buf, uap->size,
2518 uap->flags, UIO_USERSPACE));
2522 * Retrieve the full filesystem path that correspond to a vnode from the name
2523 * cache (if available)
2526 vn_fullpath(struct thread *td, struct vnode *vn, char **retbuf, char **freebuf)
2533 if (__predict_false(vn == NULL))
2536 buflen = MAXPATHLEN;
2537 buf = malloc(buflen, M_TEMP, M_WAITOK);
2539 error = vn_fullpath_any(td, vn, pwd->pwd_rdir, buf, retbuf, &buflen);
2550 * This function is similar to vn_fullpath, but it attempts to lookup the
2551 * pathname relative to the global root mount point. This is required for the
2552 * auditing sub-system, as audited pathnames must be absolute, relative to the
2553 * global root mount point.
2556 vn_fullpath_global(struct thread *td, struct vnode *vn,
2557 char **retbuf, char **freebuf)
2563 if (__predict_false(vn == NULL))
2565 buflen = MAXPATHLEN;
2566 buf = malloc(buflen, M_TEMP, M_WAITOK);
2567 error = vn_fullpath_any(td, vn, rootvnode, buf, retbuf, &buflen);
2576 vn_vptocnp(struct vnode **vp, struct ucred *cred, char *buf, size_t *buflen)
2579 struct namecache *ncp;
2583 vlp = VP2VNODELOCK(*vp);
2585 TAILQ_FOREACH(ncp, &((*vp)->v_cache_dst), nc_dst) {
2586 if ((ncp->nc_flag & NCF_ISDOTDOT) == 0)
2590 if (*buflen < ncp->nc_nlen) {
2593 counter_u64_add(numfullpathfail4, 1);
2595 SDT_PROBE3(vfs, namecache, fullpath, return, error,
2599 *buflen -= ncp->nc_nlen;
2600 memcpy(buf + *buflen, ncp->nc_name, ncp->nc_nlen);
2601 SDT_PROBE3(vfs, namecache, fullpath, hit, ncp->nc_dvp,
2610 SDT_PROBE1(vfs, namecache, fullpath, miss, vp);
2613 vn_lock(*vp, LK_SHARED | LK_RETRY);
2614 error = VOP_VPTOCNP(*vp, &dvp, cred, buf, buflen);
2617 counter_u64_add(numfullpathfail2, 1);
2618 SDT_PROBE3(vfs, namecache, fullpath, return, error, vp, NULL);
2623 if (VN_IS_DOOMED(dvp)) {
2624 /* forced unmount */
2627 SDT_PROBE3(vfs, namecache, fullpath, return, error, vp, NULL);
2631 * *vp has its use count incremented still.
2638 * Resolve a directory to a pathname.
2640 * The name of the directory can always be found in the namecache or fetched
2641 * from the filesystem. There is also guaranteed to be only one parent, meaning
2642 * we can just follow vnodes up until we find the root.
2644 * The vnode must be referenced.
2647 vn_fullpath_dir(struct thread *td, struct vnode *vp, struct vnode *rdir,
2648 char *buf, char **retbuf, size_t *len, bool slash_prefixed, size_t addend)
2650 #ifdef KDTRACE_HOOKS
2651 struct vnode *startvp = vp;
2657 VNPASS(vp->v_type == VDIR || VN_IS_DOOMED(vp), vp);
2658 VNPASS(vp->v_usecount > 0, vp);
2662 if (!slash_prefixed) {
2670 SDT_PROBE1(vfs, namecache, fullpath, entry, vp);
2671 counter_u64_add(numfullpathcalls, 1);
2672 while (vp != rdir && vp != rootvnode) {
2674 * The vp vnode must be already fully constructed,
2675 * since it is either found in namecache or obtained
2676 * from VOP_VPTOCNP(). We may test for VV_ROOT safely
2677 * without obtaining the vnode lock.
2679 if ((vp->v_vflag & VV_ROOT) != 0) {
2680 vn_lock(vp, LK_RETRY | LK_SHARED);
2683 * With the vnode locked, check for races with
2684 * unmount, forced or not. Note that we
2685 * already verified that vp is not equal to
2686 * the root vnode, which means that
2687 * mnt_vnodecovered can be NULL only for the
2690 if (VN_IS_DOOMED(vp) ||
2691 (vp1 = vp->v_mount->mnt_vnodecovered) == NULL ||
2692 vp1->v_mountedhere != vp->v_mount) {
2695 SDT_PROBE3(vfs, namecache, fullpath, return,
2705 if (vp->v_type != VDIR) {
2707 counter_u64_add(numfullpathfail1, 1);
2709 SDT_PROBE3(vfs, namecache, fullpath, return,
2713 error = vn_vptocnp(&vp, td->td_ucred, buf, &buflen);
2719 SDT_PROBE3(vfs, namecache, fullpath, return, error,
2723 buf[--buflen] = '/';
2724 slash_prefixed = true;
2728 if (!slash_prefixed) {
2731 counter_u64_add(numfullpathfail4, 1);
2732 SDT_PROBE3(vfs, namecache, fullpath, return, ENOMEM,
2736 buf[--buflen] = '/';
2738 counter_u64_add(numfullpathfound, 1);
2741 *retbuf = buf + buflen;
2742 SDT_PROBE3(vfs, namecache, fullpath, return, 0, startvp, *retbuf);
2749 * Resolve an arbitrary vnode to a pathname.
2752 * - hardlinks are not tracked, thus if the vnode is not a directory this can
2753 * resolve to a different path than the one used to find it
2754 * - namecache is not mandatory, meaning names are not guaranteed to be added
2755 * (in which case resolving fails)
2758 vn_fullpath_any(struct thread *td, struct vnode *vp, struct vnode *rdir,
2759 char *buf, char **retbuf, size_t *buflen)
2762 bool slash_prefixed;
2768 orig_buflen = *buflen;
2771 slash_prefixed = false;
2772 if (vp->v_type != VDIR) {
2774 buf[*buflen] = '\0';
2775 error = vn_vptocnp(&vp, td->td_ucred, buf, buflen);
2784 slash_prefixed = true;
2787 return (vn_fullpath_dir(td, vp, rdir, buf, retbuf, buflen, slash_prefixed,
2788 orig_buflen - *buflen));
2792 * Resolve an arbitrary vnode to a pathname (taking care of hardlinks).
2794 * Since the namecache does not track handlings, the caller is expected to first
2795 * look up the target vnode with SAVENAME | WANTPARENT flags passed to namei.
2797 * Then we have 2 cases:
2798 * - if the found vnode is a directory, the path can be constructed just by
2799 * fullowing names up the chain
2800 * - otherwise we populate the buffer with the saved name and start resolving
2804 vn_fullpath_hardlink(struct thread *td, struct nameidata *ndp, char **retbuf,
2805 char **freebuf, size_t *buflen)
2809 struct componentname *cnp;
2813 bool slash_prefixed;
2817 if (*buflen > MAXPATHLEN)
2818 *buflen = MAXPATHLEN;
2820 slash_prefixed = false;
2822 buf = malloc(*buflen, M_TEMP, M_WAITOK);
2827 if (vp->v_type != VDIR) {
2829 addend = cnp->cn_namelen + 2;
2830 if (*buflen < addend) {
2835 tmpbuf = buf + *buflen;
2837 memcpy(&tmpbuf[1], cnp->cn_nameptr, cnp->cn_namelen);
2838 tmpbuf[addend - 1] = '\0';
2839 slash_prefixed = true;
2844 error = vn_fullpath_dir(td, vp, pwd->pwd_rdir, buf, retbuf, buflen,
2845 slash_prefixed, addend);
2860 vn_dir_dd_ino(struct vnode *vp)
2862 struct namecache *ncp;
2867 ASSERT_VOP_LOCKED(vp, "vn_dir_dd_ino");
2868 vlp = VP2VNODELOCK(vp);
2870 TAILQ_FOREACH(ncp, &(vp->v_cache_dst), nc_dst) {
2871 if ((ncp->nc_flag & NCF_ISDOTDOT) != 0)
2874 vs = vget_prep(ddvp);
2876 if (vget_finish(ddvp, LK_SHARED | LK_NOWAIT, vs))
2885 vn_commname(struct vnode *vp, char *buf, u_int buflen)
2887 struct namecache *ncp;
2891 vlp = VP2VNODELOCK(vp);
2893 TAILQ_FOREACH(ncp, &vp->v_cache_dst, nc_dst)
2894 if ((ncp->nc_flag & NCF_ISDOTDOT) == 0)
2900 l = min(ncp->nc_nlen, buflen - 1);
2901 memcpy(buf, ncp->nc_name, l);
2908 * This function updates path string to vnode's full global path
2909 * and checks the size of the new path string against the pathlen argument.
2911 * Requires a locked, referenced vnode.
2912 * Vnode is re-locked on success or ENODEV, otherwise unlocked.
2914 * If vp is a directory, the call to vn_fullpath_global() always succeeds
2915 * because it falls back to the ".." lookup if the namecache lookup fails.
2918 vn_path_to_global_path(struct thread *td, struct vnode *vp, char *path,
2921 struct nameidata nd;
2926 ASSERT_VOP_ELOCKED(vp, __func__);
2928 /* Construct global filesystem path from vp. */
2930 error = vn_fullpath_global(td, vp, &rpath, &fbuf);
2937 if (strlen(rpath) >= pathlen) {
2939 error = ENAMETOOLONG;
2944 * Re-lookup the vnode by path to detect a possible rename.
2945 * As a side effect, the vnode is relocked.
2946 * If vnode was renamed, return ENOENT.
2948 NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF | AUDITVNODE1,
2949 UIO_SYSSPACE, path, td);
2955 NDFREE(&nd, NDF_ONLY_PNBUF);
2959 strcpy(path, rpath);
2972 db_print_vpath(struct vnode *vp)
2975 while (vp != NULL) {
2976 db_printf("%p: ", vp);
2977 if (vp == rootvnode) {
2981 if (vp->v_vflag & VV_ROOT) {
2982 db_printf("<mount point>");
2983 vp = vp->v_mount->mnt_vnodecovered;
2985 struct namecache *ncp;
2989 ncp = TAILQ_FIRST(&vp->v_cache_dst);
2992 for (i = 0; i < ncp->nc_nlen; i++)
2993 db_printf("%c", *ncn++);
3006 DB_SHOW_COMMAND(vpath, db_show_vpath)
3011 db_printf("usage: show vpath <struct vnode *>\n");
3015 vp = (struct vnode *)addr;
3021 static bool __read_frequently cache_fast_lookup = true;
3022 SYSCTL_BOOL(_vfs, OID_AUTO, cache_fast_lookup, CTLFLAG_RW,
3023 &cache_fast_lookup, 0, "");
3025 #define CACHE_FPL_FAILED -2020
3028 cache_fpl_cleanup_cnp(struct componentname *cnp)
3031 uma_zfree(namei_zone, cnp->cn_pnbuf);
3033 cnp->cn_pnbuf = NULL;
3034 cnp->cn_nameptr = NULL;
3039 cache_fpl_handle_root(struct nameidata *ndp, struct vnode **dpp)
3041 struct componentname *cnp;
3044 while (*(cnp->cn_nameptr) == '/') {
3049 *dpp = ndp->ni_rootdir;
3053 * Components of nameidata (or objects it can point to) which may
3054 * need restoring in case fast path lookup fails.
3056 struct nameidata_saved {
3064 struct nameidata *ndp;
3065 struct componentname *cnp;
3071 struct nameidata_saved snd;
3073 enum cache_fpl_status status:8;
3078 cache_fpl_checkpoint(struct cache_fpl *fpl, struct nameidata_saved *snd)
3081 snd->cn_flags = fpl->ndp->ni_cnd.cn_flags;
3082 snd->cn_namelen = fpl->ndp->ni_cnd.cn_namelen;
3083 snd->cn_nameptr = fpl->ndp->ni_cnd.cn_nameptr;
3084 snd->ni_pathlen = fpl->ndp->ni_pathlen;
3088 cache_fpl_restore(struct cache_fpl *fpl, struct nameidata_saved *snd)
3091 fpl->ndp->ni_cnd.cn_flags = snd->cn_flags;
3092 fpl->ndp->ni_cnd.cn_namelen = snd->cn_namelen;
3093 fpl->ndp->ni_cnd.cn_nameptr = snd->cn_nameptr;
3094 fpl->ndp->ni_pathlen = snd->ni_pathlen;
3098 #define cache_fpl_smr_assert_entered(fpl) ({ \
3099 struct cache_fpl *_fpl = (fpl); \
3100 MPASS(_fpl->in_smr == true); \
3101 VFS_SMR_ASSERT_ENTERED(); \
3103 #define cache_fpl_smr_assert_not_entered(fpl) ({ \
3104 struct cache_fpl *_fpl = (fpl); \
3105 MPASS(_fpl->in_smr == false); \
3106 VFS_SMR_ASSERT_NOT_ENTERED(); \
3109 #define cache_fpl_smr_assert_entered(fpl) do { } while (0)
3110 #define cache_fpl_smr_assert_not_entered(fpl) do { } while (0)
3113 #define cache_fpl_smr_enter_initial(fpl) ({ \
3114 struct cache_fpl *_fpl = (fpl); \
3116 _fpl->in_smr = true; \
3119 #define cache_fpl_smr_enter(fpl) ({ \
3120 struct cache_fpl *_fpl = (fpl); \
3121 MPASS(_fpl->in_smr == false); \
3123 _fpl->in_smr = true; \
3126 #define cache_fpl_smr_exit(fpl) ({ \
3127 struct cache_fpl *_fpl = (fpl); \
3128 MPASS(_fpl->in_smr == true); \
3130 _fpl->in_smr = false; \
3134 cache_fpl_aborted_impl(struct cache_fpl *fpl, int line)
3137 if (fpl->status != CACHE_FPL_STATUS_UNSET) {
3138 KASSERT(fpl->status == CACHE_FPL_STATUS_PARTIAL,
3139 ("%s: converting to abort from %d at %d, set at %d\n",
3140 __func__, fpl->status, line, fpl->line));
3142 fpl->status = CACHE_FPL_STATUS_ABORTED;
3144 return (CACHE_FPL_FAILED);
3147 #define cache_fpl_aborted(x) cache_fpl_aborted_impl((x), __LINE__)
3150 cache_fpl_partial_impl(struct cache_fpl *fpl, int line)
3153 KASSERT(fpl->status == CACHE_FPL_STATUS_UNSET,
3154 ("%s: setting to partial at %d, but already set to %d at %d\n",
3155 __func__, line, fpl->status, fpl->line));
3156 cache_fpl_smr_assert_entered(fpl);
3157 fpl->status = CACHE_FPL_STATUS_PARTIAL;
3159 return (CACHE_FPL_FAILED);
3162 #define cache_fpl_partial(x) cache_fpl_partial_impl((x), __LINE__)
3165 cache_fpl_handled_impl(struct cache_fpl *fpl, int error, int line)
3168 KASSERT(fpl->status == CACHE_FPL_STATUS_UNSET,
3169 ("%s: setting to handled at %d, but already set to %d at %d\n",
3170 __func__, line, fpl->status, fpl->line));
3171 cache_fpl_smr_assert_not_entered(fpl);
3172 MPASS(error != CACHE_FPL_FAILED);
3173 fpl->status = CACHE_FPL_STATUS_HANDLED;
3178 #define cache_fpl_handled(x, e) cache_fpl_handled_impl((x), (e), __LINE__)
3180 #define CACHE_FPL_SUPPORTED_CN_FLAGS \
3181 (LOCKLEAF | LOCKPARENT | WANTPARENT | NOCACHE | FOLLOW | LOCKSHARED | SAVENAME | \
3182 SAVESTART | WILLBEDIR | ISOPEN | NOMACCHECK | AUDITVNODE1 | AUDITVNODE2 | NOCAPCHECK)
3184 #define CACHE_FPL_INTERNAL_CN_FLAGS \
3185 (ISDOTDOT | MAKEENTRY | ISLASTCN)
3187 _Static_assert((CACHE_FPL_SUPPORTED_CN_FLAGS & CACHE_FPL_INTERNAL_CN_FLAGS) == 0,
3188 "supported and internal flags overlap");
3191 cache_fpl_islastcn(struct nameidata *ndp)
3194 return (*ndp->ni_next == 0);
3198 cache_fpl_isdotdot(struct componentname *cnp)
3201 if (cnp->cn_namelen == 2 &&
3202 cnp->cn_nameptr[1] == '.' && cnp->cn_nameptr[0] == '.')
3208 cache_can_fplookup(struct cache_fpl *fpl)
3210 struct nameidata *ndp;
3211 struct componentname *cnp;
3216 td = cnp->cn_thread;
3218 if (!cache_fast_lookup) {
3219 cache_fpl_aborted(fpl);
3223 if (mac_vnode_check_lookup_enabled()) {
3224 cache_fpl_aborted(fpl);
3228 if ((cnp->cn_flags & ~CACHE_FPL_SUPPORTED_CN_FLAGS) != 0) {
3229 cache_fpl_aborted(fpl);
3232 if (ndp->ni_dirfd != AT_FDCWD) {
3233 cache_fpl_aborted(fpl);
3236 if (IN_CAPABILITY_MODE(td)) {
3237 cache_fpl_aborted(fpl);
3240 if (AUDITING_TD(td)) {
3241 cache_fpl_aborted(fpl);
3244 if (ndp->ni_startdir != NULL) {
3245 cache_fpl_aborted(fpl);
3252 cache_fplookup_vnode_supported(struct vnode *vp)
3255 return (vp->v_type != VLNK);
3259 * Move a negative entry to the hot list.
3261 * We have to take locks, but they may be contended and in the worst
3262 * case we may need to go off CPU. We don't want to spin within the
3263 * smr section and we can't block with it. Instead we are going to
3264 * look up the entry again.
3266 static int __noinline
3267 cache_fplookup_negative_promote(struct cache_fpl *fpl, struct namecache *oncp,
3270 struct componentname *cnp;
3271 struct namecache *ncp;
3272 struct neglist *neglist;
3273 struct negstate *negstate;
3280 if (!vhold_smr(dvp))
3281 return (cache_fpl_aborted(fpl));
3283 neglist = NCP2NEGLIST(oncp);
3284 cache_fpl_smr_exit(fpl);
3286 mtx_lock(&ncneg_hot.nl_lock);
3287 mtx_lock(&neglist->nl_lock);
3289 * For hash iteration.
3291 cache_fpl_smr_enter(fpl);
3294 * Avoid all surprises by only succeeding if we got the same entry and
3295 * bailing completely otherwise.
3297 * In particular at this point there can be a new ncp which matches the
3298 * search but hashes to a different neglist.
3300 CK_SLIST_FOREACH(ncp, (NCHHASH(hash)), nc_hash) {
3306 * No match to begin with.
3308 if (__predict_false(ncp == NULL)) {
3313 * The newly found entry may be something different...
3315 if (!(ncp->nc_dvp == dvp && ncp->nc_nlen == cnp->cn_namelen &&
3316 !bcmp(ncp->nc_name, cnp->cn_nameptr, ncp->nc_nlen))) {
3321 * ... and not even negative.
3323 nc_flag = atomic_load_char(&ncp->nc_flag);
3324 if ((nc_flag & NCF_NEGATIVE) == 0) {
3328 if (__predict_false(!cache_ncp_canuse(ncp))) {
3332 negstate = NCP2NEGSTATE(ncp);
3333 if ((negstate->neg_flag & NEG_HOT) == 0) {
3335 TAILQ_REMOVE(&neglist->nl_list, ncp, nc_dst);
3336 TAILQ_INSERT_TAIL(&ncneg_hot.nl_list, ncp, nc_dst);
3337 negstate->neg_flag |= NEG_HOT;
3340 SDT_PROBE2(vfs, namecache, lookup, hit__negative, dvp, ncp->nc_name);
3341 counter_u64_add(numneghits, 1);
3342 cache_fpl_smr_exit(fpl);
3343 mtx_unlock(&neglist->nl_lock);
3344 mtx_unlock(&ncneg_hot.nl_lock);
3346 return (cache_fpl_handled(fpl, ENOENT));
3348 cache_fpl_smr_exit(fpl);
3349 mtx_unlock(&neglist->nl_lock);
3350 mtx_unlock(&ncneg_hot.nl_lock);
3352 return (cache_fpl_aborted(fpl));
3356 * The target vnode is not supported, prepare for the slow path to take over.
3358 static int __noinline
3359 cache_fplookup_partial_setup(struct cache_fpl *fpl)
3361 struct nameidata *ndp;
3362 struct componentname *cnp;
3371 dvp_seqc = fpl->dvp_seqc;
3373 dvs = vget_prep_smr(dvp);
3374 if (__predict_false(dvs == VGET_NONE)) {
3375 cache_fpl_smr_exit(fpl);
3376 return (cache_fpl_aborted(fpl));
3379 cache_fpl_smr_exit(fpl);
3381 vget_finish_ref(dvp, dvs);
3382 if (!vn_seqc_consistent(dvp, dvp_seqc)) {
3384 return (cache_fpl_aborted(fpl));
3387 pwd = pwd_hold(curthread);
3388 if (fpl->pwd != pwd) {
3391 return (cache_fpl_aborted(fpl));
3394 cache_fpl_restore(fpl, &fpl->snd);
3396 ndp->ni_startdir = dvp;
3397 cnp->cn_flags |= MAKEENTRY;
3398 if (cache_fpl_islastcn(ndp))
3399 cnp->cn_flags |= ISLASTCN;
3400 if (cache_fpl_isdotdot(cnp))
3401 cnp->cn_flags |= ISDOTDOT;
3407 cache_fplookup_final_child(struct cache_fpl *fpl, enum vgetstate tvs)
3409 struct componentname *cnp;
3416 tvp_seqc = fpl->tvp_seqc;
3418 if ((cnp->cn_flags & LOCKLEAF) != 0) {
3419 lkflags = LK_SHARED;
3420 if ((cnp->cn_flags & LOCKSHARED) == 0)
3421 lkflags = LK_EXCLUSIVE;
3422 error = vget_finish(tvp, lkflags, tvs);
3423 if (__predict_false(error != 0)) {
3424 return (cache_fpl_aborted(fpl));
3427 vget_finish_ref(tvp, tvs);
3430 if (!vn_seqc_consistent(tvp, tvp_seqc)) {
3431 if ((cnp->cn_flags & LOCKLEAF) != 0)
3435 return (cache_fpl_aborted(fpl));
3438 return (cache_fpl_handled(fpl, 0));
3442 * They want to possibly modify the state of the namecache.
3444 * Don't try to match the API contract, just leave.
3445 * TODO: this leaves scalability on the table
3448 cache_fplookup_final_modifying(struct cache_fpl *fpl)
3450 struct componentname *cnp;
3453 MPASS(cnp->cn_nameiop != LOOKUP);
3454 return (cache_fpl_partial(fpl));
3457 static int __noinline
3458 cache_fplookup_final_withparent(struct cache_fpl *fpl)
3460 struct componentname *cnp;
3461 enum vgetstate dvs, tvs;
3462 struct vnode *dvp, *tvp;
3463 seqc_t dvp_seqc, tvp_seqc;
3468 dvp_seqc = fpl->dvp_seqc;
3470 tvp_seqc = fpl->tvp_seqc;
3472 MPASS((cnp->cn_flags & (LOCKPARENT|WANTPARENT)) != 0);
3475 * This is less efficient than it can be for simplicity.
3477 dvs = vget_prep_smr(dvp);
3478 if (__predict_false(dvs == VGET_NONE)) {
3479 return (cache_fpl_aborted(fpl));
3481 tvs = vget_prep_smr(tvp);
3482 if (__predict_false(tvs == VGET_NONE)) {
3483 cache_fpl_smr_exit(fpl);
3484 vget_abort(dvp, dvs);
3485 return (cache_fpl_aborted(fpl));
3488 cache_fpl_smr_exit(fpl);
3490 if ((cnp->cn_flags & LOCKPARENT) != 0) {
3491 error = vget_finish(dvp, LK_EXCLUSIVE, dvs);
3492 if (__predict_false(error != 0)) {
3493 vget_abort(tvp, tvs);
3494 return (cache_fpl_aborted(fpl));
3497 vget_finish_ref(dvp, dvs);
3500 if (!vn_seqc_consistent(dvp, dvp_seqc)) {
3501 vget_abort(tvp, tvs);
3502 if ((cnp->cn_flags & LOCKPARENT) != 0)
3506 return (cache_fpl_aborted(fpl));
3509 error = cache_fplookup_final_child(fpl, tvs);
3510 if (__predict_false(error != 0)) {
3511 MPASS(fpl->status == CACHE_FPL_STATUS_ABORTED);
3512 if ((cnp->cn_flags & LOCKPARENT) != 0)
3519 MPASS(fpl->status == CACHE_FPL_STATUS_HANDLED);
3524 cache_fplookup_final(struct cache_fpl *fpl)
3526 struct componentname *cnp;
3528 struct vnode *dvp, *tvp;
3529 seqc_t dvp_seqc, tvp_seqc;
3533 dvp_seqc = fpl->dvp_seqc;
3535 tvp_seqc = fpl->tvp_seqc;
3537 VNPASS(cache_fplookup_vnode_supported(dvp), dvp);
3539 if (cnp->cn_nameiop != LOOKUP) {
3540 return (cache_fplookup_final_modifying(fpl));
3543 if ((cnp->cn_flags & (LOCKPARENT|WANTPARENT)) != 0)
3544 return (cache_fplookup_final_withparent(fpl));
3546 tvs = vget_prep_smr(tvp);
3547 if (__predict_false(tvs == VGET_NONE)) {
3548 return (cache_fpl_partial(fpl));
3551 if (!vn_seqc_consistent(dvp, dvp_seqc)) {
3552 cache_fpl_smr_exit(fpl);
3553 vget_abort(tvp, tvs);
3554 return (cache_fpl_aborted(fpl));
3557 cache_fpl_smr_exit(fpl);
3558 return (cache_fplookup_final_child(fpl, tvs));
3561 static int __noinline
3562 cache_fplookup_dot(struct cache_fpl *fpl)
3569 fpl->tvp_seqc = vn_seqc_read_any(dvp);
3570 if (seqc_in_modify(fpl->tvp_seqc)) {
3571 return (cache_fpl_aborted(fpl));
3574 counter_u64_add(dothits, 1);
3575 SDT_PROBE3(vfs, namecache, lookup, hit, dvp, ".", dvp);
3580 static int __noinline
3581 cache_fplookup_dotdot(struct cache_fpl *fpl)
3583 struct nameidata *ndp;
3584 struct componentname *cnp;
3585 struct namecache *ncp;
3595 * XXX this is racy the same way regular lookup is
3597 for (pr = cnp->cn_cred->cr_prison; pr != NULL;
3599 if (dvp == pr->pr_root)
3602 if (dvp == ndp->ni_rootdir ||
3603 dvp == ndp->ni_topdir ||
3607 fpl->tvp_seqc = vn_seqc_read_any(dvp);
3608 if (seqc_in_modify(fpl->tvp_seqc)) {
3609 return (cache_fpl_aborted(fpl));
3614 if ((dvp->v_vflag & VV_ROOT) != 0) {
3617 * The opposite of climb mount is needed here.
3619 return (cache_fpl_aborted(fpl));
3622 ncp = atomic_load_ptr(&dvp->v_cache_dd);
3624 return (cache_fpl_aborted(fpl));
3627 nc_flag = atomic_load_char(&ncp->nc_flag);
3628 if ((nc_flag & NCF_ISDOTDOT) != 0) {
3629 if ((nc_flag & NCF_NEGATIVE) != 0)
3630 return (cache_fpl_aborted(fpl));
3631 fpl->tvp = ncp->nc_vp;
3633 fpl->tvp = ncp->nc_dvp;
3636 if (__predict_false(!cache_ncp_canuse(ncp))) {
3637 return (cache_fpl_aborted(fpl));
3640 fpl->tvp_seqc = vn_seqc_read_any(fpl->tvp);
3641 if (seqc_in_modify(fpl->tvp_seqc)) {
3642 return (cache_fpl_partial(fpl));
3645 counter_u64_add(dotdothits, 1);
3650 cache_fplookup_next(struct cache_fpl *fpl)
3652 struct componentname *cnp;
3653 struct namecache *ncp;
3654 struct negstate *negstate;
3655 struct vnode *dvp, *tvp;
3663 if (__predict_false(cnp->cn_namelen == 1 && cnp->cn_nameptr[0] == '.')) {
3664 return (cache_fplookup_dot(fpl));
3667 hash = cache_get_hash(cnp->cn_nameptr, cnp->cn_namelen, dvp);
3669 CK_SLIST_FOREACH(ncp, (NCHHASH(hash)), nc_hash) {
3670 if (ncp->nc_dvp == dvp && ncp->nc_nlen == cnp->cn_namelen &&
3671 !bcmp(ncp->nc_name, cnp->cn_nameptr, ncp->nc_nlen))
3676 * If there is no entry we have to punt to the slow path to perform
3677 * actual lookup. Should there be nothing with this name a negative
3678 * entry will be created.
3680 if (__predict_false(ncp == NULL)) {
3681 return (cache_fpl_partial(fpl));
3684 tvp = atomic_load_ptr(&ncp->nc_vp);
3685 nc_flag = atomic_load_char(&ncp->nc_flag);
3686 if ((nc_flag & NCF_NEGATIVE) != 0) {
3688 * If they want to create an entry we need to replace this one.
3690 if (__predict_false(fpl->cnp->cn_nameiop != LOOKUP)) {
3691 return (cache_fpl_partial(fpl));
3693 negstate = NCP2NEGSTATE(ncp);
3694 neg_hot = ((negstate->neg_flag & NEG_HOT) != 0);
3695 if (__predict_false(!cache_ncp_canuse(ncp))) {
3696 return (cache_fpl_partial(fpl));
3698 if (__predict_false((nc_flag & NCF_WHITE) != 0)) {
3699 return (cache_fpl_partial(fpl));
3702 return (cache_fplookup_negative_promote(fpl, ncp, hash));
3704 SDT_PROBE2(vfs, namecache, lookup, hit__negative, dvp,
3706 counter_u64_add(numneghits, 1);
3707 cache_fpl_smr_exit(fpl);
3708 return (cache_fpl_handled(fpl, ENOENT));
3711 if (__predict_false(!cache_ncp_canuse(ncp))) {
3712 return (cache_fpl_partial(fpl));
3716 fpl->tvp_seqc = vn_seqc_read_any(tvp);
3717 if (seqc_in_modify(fpl->tvp_seqc)) {
3718 return (cache_fpl_partial(fpl));
3721 if (!cache_fplookup_vnode_supported(tvp)) {
3722 return (cache_fpl_partial(fpl));
3725 counter_u64_add(numposhits, 1);
3726 SDT_PROBE3(vfs, namecache, lookup, hit, dvp, ncp->nc_name, tvp);
3731 cache_fplookup_mp_supported(struct mount *mp)
3736 if ((mp->mnt_kern_flag & MNTK_FPLOOKUP) == 0)
3742 * Walk up the mount stack (if any).
3744 * Correctness is provided in the following ways:
3745 * - all vnodes are protected from freeing with SMR
3746 * - struct mount objects are type stable making them always safe to access
3747 * - stability of the particular mount is provided by busying it
3748 * - relationship between the vnode which is mounted on and the mount is
3749 * verified with the vnode sequence counter after busying
3750 * - association between root vnode of the mount and the mount is protected
3753 * From that point on we can read the sequence counter of the root vnode
3754 * and get the next mount on the stack (if any) using the same protection.
3756 * By the end of successful walk we are guaranteed the reached state was
3757 * indeed present at least at some point which matches the regular lookup.
3759 static int __noinline
3760 cache_fplookup_climb_mount(struct cache_fpl *fpl)
3762 struct mount *mp, *prev_mp;
3767 vp_seqc = fpl->tvp_seqc;
3769 VNPASS(vp->v_type == VDIR || vp->v_type == VBAD, vp);
3770 mp = atomic_load_ptr(&vp->v_mountedhere);
3776 if (!vfs_op_thread_enter_crit(mp)) {
3777 if (prev_mp != NULL)
3778 vfs_op_thread_exit_crit(prev_mp);
3779 return (cache_fpl_partial(fpl));
3781 if (prev_mp != NULL)
3782 vfs_op_thread_exit_crit(prev_mp);
3783 if (!vn_seqc_consistent(vp, vp_seqc)) {
3784 vfs_op_thread_exit_crit(mp);
3785 return (cache_fpl_partial(fpl));
3787 if (!cache_fplookup_mp_supported(mp)) {
3788 vfs_op_thread_exit_crit(mp);
3789 return (cache_fpl_partial(fpl));
3791 vp = atomic_load_ptr(&mp->mnt_rootvnode);
3792 if (vp == NULL || VN_IS_DOOMED(vp)) {
3793 vfs_op_thread_exit_crit(mp);
3794 return (cache_fpl_partial(fpl));
3796 vp_seqc = vn_seqc_read_any(vp);
3797 if (seqc_in_modify(vp_seqc)) {
3798 vfs_op_thread_exit_crit(mp);
3799 return (cache_fpl_partial(fpl));
3802 mp = atomic_load_ptr(&vp->v_mountedhere);
3807 vfs_op_thread_exit_crit(prev_mp);
3809 fpl->tvp_seqc = vp_seqc;
3814 cache_fplookup_need_climb_mount(struct cache_fpl *fpl)
3822 * Hack: while this is a union, the pointer tends to be NULL so save on
3825 mp = atomic_load_ptr(&vp->v_mountedhere);
3828 if (vp->v_type == VDIR)
3836 * The code is mostly copy-pasted from regular lookup, see lookup().
3837 * The structure is maintained along with comments for easier maintenance.
3838 * Deduplicating the code will become feasible after fast path lookup
3839 * becomes more feature-complete.
3842 cache_fplookup_parse(struct cache_fpl *fpl)
3844 struct nameidata *ndp;
3845 struct componentname *cnp;
3852 * Search a new directory.
3854 * The last component of the filename is left accessible via
3855 * cnp->cn_nameptr for callers that need the name. Callers needing
3856 * the name set the SAVENAME flag. When done, they assume
3857 * responsibility for freeing the pathname buffer.
3859 for (cp = cnp->cn_nameptr; *cp != 0 && *cp != '/'; cp++)
3861 cnp->cn_namelen = cp - cnp->cn_nameptr;
3862 if (__predict_false(cnp->cn_namelen > NAME_MAX)) {
3863 cache_fpl_smr_exit(fpl);
3864 return (cache_fpl_handled(fpl, ENAMETOOLONG));
3866 ndp->ni_pathlen -= cnp->cn_namelen;
3867 KASSERT(ndp->ni_pathlen <= PATH_MAX,
3868 ("%s: ni_pathlen underflow to %zd\n", __func__, ndp->ni_pathlen));
3872 * Replace multiple slashes by a single slash and trailing slashes
3873 * by a null. This must be done before VOP_LOOKUP() because some
3874 * fs's don't know about trailing slashes. Remember if there were
3875 * trailing slashes to handle symlinks, existing non-directories
3876 * and non-existing files that won't be directories specially later.
3878 while (*cp == '/' && (cp[1] == '/' || cp[1] == '\0')) {
3884 * Regular lookup performs the following:
3885 * *ndp->ni_next = '\0';
3886 * cnp->cn_flags |= TRAILINGSLASH;
3888 * Which is problematic since it modifies data read
3889 * from userspace. Then if fast path lookup was to
3890 * abort we would have to either restore it or convey
3891 * the flag. Since this is a corner case just ignore
3892 * it for simplicity.
3894 return (cache_fpl_partial(fpl));
3900 * Check for degenerate name (e.g. / or "")
3901 * which is a way of talking about a directory,
3902 * e.g. like "/." or ".".
3905 * Another corner case handled by the regular lookup
3907 if (__predict_false(cnp->cn_nameptr[0] == '\0')) {
3908 return (cache_fpl_partial(fpl));
3914 cache_fplookup_parse_advance(struct cache_fpl *fpl)
3916 struct nameidata *ndp;
3917 struct componentname *cnp;
3922 cnp->cn_nameptr = ndp->ni_next;
3923 while (*cnp->cn_nameptr == '/') {
3929 static int __noinline
3930 cache_fplookup_failed_vexec(struct cache_fpl *fpl, int error)
3936 * Can happen when racing against vgone.
3939 cache_fpl_partial(fpl);
3943 * See the API contract for VOP_FPLOOKUP_VEXEC.
3945 if (!vn_seqc_consistent(fpl->dvp, fpl->dvp_seqc)) {
3946 error = cache_fpl_aborted(fpl);
3948 cache_fpl_smr_exit(fpl);
3949 cache_fpl_handled(fpl, error);
3957 cache_fplookup_impl(struct vnode *dvp, struct cache_fpl *fpl)
3959 struct nameidata *ndp;
3960 struct componentname *cnp;
3964 error = CACHE_FPL_FAILED;
3968 cache_fpl_checkpoint(fpl, &fpl->snd);
3971 fpl->dvp_seqc = vn_seqc_read_any(fpl->dvp);
3972 if (seqc_in_modify(fpl->dvp_seqc)) {
3973 cache_fpl_aborted(fpl);
3976 mp = atomic_load_ptr(&fpl->dvp->v_mount);
3977 if (!cache_fplookup_mp_supported(mp)) {
3978 cache_fpl_aborted(fpl);
3982 VNPASS(cache_fplookup_vnode_supported(fpl->dvp), fpl->dvp);
3985 error = cache_fplookup_parse(fpl);
3986 if (__predict_false(error != 0)) {
3990 VNPASS(cache_fplookup_vnode_supported(fpl->dvp), fpl->dvp);
3992 error = VOP_FPLOOKUP_VEXEC(fpl->dvp, cnp->cn_cred);
3993 if (__predict_false(error != 0)) {
3994 error = cache_fplookup_failed_vexec(fpl, error);
3998 if (__predict_false(cache_fpl_isdotdot(cnp))) {
3999 error = cache_fplookup_dotdot(fpl);
4000 if (__predict_false(error != 0)) {
4004 error = cache_fplookup_next(fpl);
4005 if (__predict_false(error != 0)) {
4009 VNPASS(!seqc_in_modify(fpl->tvp_seqc), fpl->tvp);
4011 if (cache_fplookup_need_climb_mount(fpl)) {
4012 error = cache_fplookup_climb_mount(fpl);
4013 if (__predict_false(error != 0)) {
4019 VNPASS(!seqc_in_modify(fpl->tvp_seqc), fpl->tvp);
4021 if (cache_fpl_islastcn(ndp)) {
4022 error = cache_fplookup_final(fpl);
4026 if (!vn_seqc_consistent(fpl->dvp, fpl->dvp_seqc)) {
4027 error = cache_fpl_aborted(fpl);
4031 fpl->dvp = fpl->tvp;
4032 fpl->dvp_seqc = fpl->tvp_seqc;
4034 cache_fplookup_parse_advance(fpl);
4035 cache_fpl_checkpoint(fpl, &fpl->snd);
4038 switch (fpl->status) {
4039 case CACHE_FPL_STATUS_UNSET:
4040 __assert_unreachable();
4042 case CACHE_FPL_STATUS_PARTIAL:
4043 cache_fpl_smr_assert_entered(fpl);
4044 return (cache_fplookup_partial_setup(fpl));
4045 case CACHE_FPL_STATUS_ABORTED:
4047 cache_fpl_smr_exit(fpl);
4048 return (CACHE_FPL_FAILED);
4049 case CACHE_FPL_STATUS_HANDLED:
4050 MPASS(error != CACHE_FPL_FAILED);
4051 cache_fpl_smr_assert_not_entered(fpl);
4052 if (__predict_false(error != 0)) {
4055 cache_fpl_cleanup_cnp(cnp);
4058 ndp->ni_dvp = fpl->dvp;
4059 ndp->ni_vp = fpl->tvp;
4060 if (cnp->cn_flags & SAVENAME)
4061 cnp->cn_flags |= HASBUF;
4063 cache_fpl_cleanup_cnp(cnp);
4069 * Fast path lookup protected with SMR and sequence counters.
4071 * Note: all VOP_FPLOOKUP_VEXEC routines have a comment referencing this one.
4073 * Filesystems can opt in by setting the MNTK_FPLOOKUP flag and meeting criteria
4076 * Traditional vnode lookup conceptually looks like this:
4082 * vn_unlock(current);
4089 * Each jump to the next vnode is safe memory-wise and atomic with respect to
4090 * any modifications thanks to holding respective locks.
4092 * The same guarantee can be provided with a combination of safe memory
4093 * reclamation and sequence counters instead. If all operations which affect
4094 * the relationship between the current vnode and the one we are looking for
4095 * also modify the counter, we can verify whether all the conditions held as
4096 * we made the jump. This includes things like permissions, mount points etc.
4097 * Counter modification is provided by enclosing relevant places in
4098 * vn_seqc_write_begin()/end() calls.
4100 * Thus this translates to:
4103 * dvp_seqc = seqc_read_any(dvp);
4104 * if (seqc_in_modify(dvp_seqc)) // someone is altering the vnode
4108 * tvp_seqc = seqc_read_any(tvp);
4109 * if (seqc_in_modify(tvp_seqc)) // someone is altering the target vnode
4111 * if (!seqc_consistent(dvp, dvp_seqc) // someone is altering the vnode
4113 * dvp = tvp; // we know nothing of importance has changed
4114 * dvp_seqc = tvp_seqc; // store the counter for the tvp iteration
4118 * vget(); // secure the vnode
4119 * if (!seqc_consistent(tvp, tvp_seqc) // final check
4121 * // at this point we know nothing has changed for any parent<->child pair
4122 * // as they were crossed during the lookup, meaning we matched the guarantee
4123 * // of the locked variant
4126 * The API contract for VOP_FPLOOKUP_VEXEC routines is as follows:
4127 * - they are called while within vfs_smr protection which they must never exit
4128 * - EAGAIN can be returned to denote checking could not be performed, it is
4129 * always valid to return it
4130 * - if the sequence counter has not changed the result must be valid
4131 * - if the sequence counter has changed both false positives and false negatives
4132 * are permitted (since the result will be rejected later)
4133 * - for simple cases of unix permission checks vaccess_vexec_smr can be used
4135 * Caveats to watch out for:
4136 * - vnodes are passed unlocked and unreferenced with nothing stopping
4137 * VOP_RECLAIM, in turn meaning that ->v_data can become NULL. It is advised
4138 * to use atomic_load_ptr to fetch it.
4139 * - the aforementioned object can also get freed, meaning absent other means it
4140 * should be protected with vfs_smr
4141 * - either safely checking permissions as they are modified or guaranteeing
4142 * their stability is left to the routine
4145 cache_fplookup(struct nameidata *ndp, enum cache_fpl_status *status,
4148 struct cache_fpl fpl;
4151 struct componentname *cnp;
4152 struct nameidata_saved orig;
4155 MPASS(ndp->ni_lcf == 0);
4157 fpl.status = CACHE_FPL_STATUS_UNSET;
4159 fpl.cnp = &ndp->ni_cnd;
4160 MPASS(curthread == fpl.cnp->cn_thread);
4162 if ((fpl.cnp->cn_flags & SAVESTART) != 0)
4163 MPASS(fpl.cnp->cn_nameiop != LOOKUP);
4165 if (!cache_can_fplookup(&fpl)) {
4166 SDT_PROBE3(vfs, fplookup, lookup, done, ndp, fpl.line, fpl.status);
4167 *status = fpl.status;
4168 return (EOPNOTSUPP);
4171 cache_fpl_checkpoint(&fpl, &orig);
4173 cache_fpl_smr_enter_initial(&fpl);
4174 pwd = pwd_get_smr();
4176 ndp->ni_rootdir = pwd->pwd_rdir;
4177 ndp->ni_topdir = pwd->pwd_jdir;
4180 cnp->cn_nameptr = cnp->cn_pnbuf;
4181 if (cnp->cn_pnbuf[0] == '/') {
4182 cache_fpl_handle_root(ndp, &dvp);
4184 MPASS(ndp->ni_dirfd == AT_FDCWD);
4185 dvp = pwd->pwd_cdir;
4188 SDT_PROBE4(vfs, namei, lookup, entry, dvp, cnp->cn_pnbuf, cnp->cn_flags, true);
4190 error = cache_fplookup_impl(dvp, &fpl);
4191 cache_fpl_smr_assert_not_entered(&fpl);
4192 SDT_PROBE3(vfs, fplookup, lookup, done, ndp, fpl.line, fpl.status);
4194 *status = fpl.status;
4195 switch (fpl.status) {
4196 case CACHE_FPL_STATUS_UNSET:
4197 __assert_unreachable();
4199 case CACHE_FPL_STATUS_HANDLED:
4200 SDT_PROBE3(vfs, namei, lookup, return, error,
4201 (error == 0 ? ndp->ni_vp : NULL), true);
4203 case CACHE_FPL_STATUS_PARTIAL:
4206 * Status restored by cache_fplookup_partial_setup.
4209 case CACHE_FPL_STATUS_ABORTED:
4210 cache_fpl_restore(&fpl, &orig);