2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2019 The FreeBSD Foundation
6 * This software was developed by BFF Storage Systems, LLC under sponsorship
7 * from the FreeBSD Foundation.
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 #include <sys/types.h>
33 #include <sys/sysctl.h>
42 using namespace testing;
45 * FUSE asynchonous notification
47 * FUSE servers can send unprompted notification messages for things like cache
48 * invalidation. This file tests our client's handling of those messages.
51 class Notify: public FuseTest {
53 /* Ignore an optional FUSE_FSYNC */
54 void maybe_expect_fsync(uint64_t ino)
56 EXPECT_CALL(*m_mock, process(
57 ResultOf([=](auto in) {
58 return (in.header.opcode == FUSE_FSYNC &&
59 in.header.nodeid == ino);
62 ).WillOnce(Invoke(ReturnErrno(0)));
65 void expect_lookup(uint64_t parent, const char *relpath, uint64_t ino,
66 off_t size, Sequence &seq)
68 EXPECT_LOOKUP(parent, relpath)
71 ReturnImmediate([=](auto in __unused, auto& out) {
72 SET_OUT_HEADER_LEN(out, entry);
73 out.body.entry.attr.mode = S_IFREG | 0644;
74 out.body.entry.nodeid = ino;
75 out.body.entry.attr.ino = ino;
76 out.body.entry.attr.nlink = 1;
77 out.body.entry.attr.size = size;
78 out.body.entry.attr_valid = UINT64_MAX;
79 out.body.entry.entry_valid = UINT64_MAX;
84 class NotifyWriteback: public Notify {
86 virtual void SetUp() {
87 const char *node = "vfs.fusefs.data_cache_mode";
89 size_t size = sizeof(val);
96 ASSERT_EQ(0, sysctlbyname(node, &val, &size, NULL, 0))
99 GTEST_SKIP() << "vfs.fusefs.data_cache_mode must be set to 2 "
100 "(writeback) for this test";
103 void expect_write(uint64_t ino, uint64_t offset, uint64_t size,
104 const void *contents)
106 FuseTest::expect_write(ino, offset, size, size, 0, 0, contents);
111 struct inval_entry_args {
118 static void* inval_entry(void* arg) {
119 const struct inval_entry_args *iea = (struct inval_entry_args*)arg;
122 r = iea->mock->notify_inval_entry(iea->parent, iea->name, iea->namelen);
126 return (void*)(intptr_t)errno;
129 struct inval_inode_args {
144 static void* inval_inode(void* arg) {
145 const struct inval_inode_args *iia = (struct inval_inode_args*)arg;
148 r = iia->mock->notify_inval_inode(iia->ino, iia->off, iia->len);
152 return (void*)(intptr_t)errno;
155 static void* store(void* arg) {
156 const struct store_args *sa = (struct store_args*)arg;
159 r = sa->mock->notify_store(sa->nodeid, sa->offset, sa->data, sa->size);
163 return (void*)(intptr_t)errno;
166 /* Invalidate a nonexistent entry */
167 TEST_F(Notify, inval_entry_nonexistent)
169 const static char *name = "foo";
170 struct inval_entry_args iea;
175 iea.parent = FUSE_ROOT_ID;
177 iea.namelen = strlen(name);
178 ASSERT_EQ(0, pthread_create(&th0, NULL, inval_entry, &iea))
180 pthread_join(th0, &thr0_value);
181 /* It's not an error for an entry to not be cached */
182 EXPECT_EQ(0, (intptr_t)thr0_value);
185 /* Invalidate a cached entry */
186 TEST_F(Notify, inval_entry)
188 const static char FULLPATH[] = "mountpoint/foo";
189 const static char RELPATH[] = "foo";
190 struct inval_entry_args iea;
198 expect_lookup(FUSE_ROOT_ID, RELPATH, ino0, 0, seq);
199 expect_lookup(FUSE_ROOT_ID, RELPATH, ino1, 0, seq);
201 /* Fill the entry cache */
202 ASSERT_EQ(0, stat(FULLPATH, &sb)) << strerror(errno);
203 EXPECT_EQ(ino0, sb.st_ino);
205 /* Now invalidate the entry */
207 iea.parent = FUSE_ROOT_ID;
209 iea.namelen = strlen(RELPATH);
210 ASSERT_EQ(0, pthread_create(&th0, NULL, inval_entry, &iea))
212 pthread_join(th0, &thr0_value);
213 EXPECT_EQ(0, (intptr_t)thr0_value);
215 /* The second lookup should return the alternate ino */
216 ASSERT_EQ(0, stat(FULLPATH, &sb)) << strerror(errno);
217 EXPECT_EQ(ino1, sb.st_ino);
221 * Invalidate a cached entry beneath the root, which uses a slightly different
224 TEST_F(Notify, inval_entry_below_root)
226 const static char FULLPATH[] = "mountpoint/some_dir/foo";
227 const static char DNAME[] = "some_dir";
228 const static char FNAME[] = "foo";
229 struct inval_entry_args iea;
232 uint64_t dir_ino = 41;
238 EXPECT_LOOKUP(FUSE_ROOT_ID, DNAME)
240 ReturnImmediate([=](auto in __unused, auto& out) {
241 SET_OUT_HEADER_LEN(out, entry);
242 out.body.entry.attr.mode = S_IFDIR | 0755;
243 out.body.entry.nodeid = dir_ino;
244 out.body.entry.attr.nlink = 2;
245 out.body.entry.attr_valid = UINT64_MAX;
246 out.body.entry.entry_valid = UINT64_MAX;
248 expect_lookup(dir_ino, FNAME, ino0, 0, seq);
249 expect_lookup(dir_ino, FNAME, ino1, 0, seq);
251 /* Fill the entry cache */
252 ASSERT_EQ(0, stat(FULLPATH, &sb)) << strerror(errno);
253 EXPECT_EQ(ino0, sb.st_ino);
255 /* Now invalidate the entry */
257 iea.parent = dir_ino;
259 iea.namelen = strlen(FNAME);
260 ASSERT_EQ(0, pthread_create(&th0, NULL, inval_entry, &iea))
262 pthread_join(th0, &thr0_value);
263 EXPECT_EQ(0, (intptr_t)thr0_value);
265 /* The second lookup should return the alternate ino */
266 ASSERT_EQ(0, stat(FULLPATH, &sb)) << strerror(errno);
267 EXPECT_EQ(ino1, sb.st_ino);
270 /* Invalidating an entry invalidates the parent directory's attributes */
271 TEST_F(Notify, inval_entry_invalidates_parent_attrs)
273 const static char FULLPATH[] = "mountpoint/foo";
274 const static char RELPATH[] = "foo";
275 struct inval_entry_args iea;
282 expect_lookup(FUSE_ROOT_ID, RELPATH, ino, 0, seq);
283 EXPECT_CALL(*m_mock, process(
284 ResultOf([=](auto in) {
285 return (in.header.opcode == FUSE_GETATTR &&
286 in.header.nodeid == FUSE_ROOT_ID);
290 .WillRepeatedly(Invoke(ReturnImmediate([=](auto i __unused, auto& out) {
291 SET_OUT_HEADER_LEN(out, attr);
292 out.body.attr.attr.mode = S_IFDIR | 0755;
293 out.body.attr.attr_valid = UINT64_MAX;
296 /* Fill the attr and entry cache */
297 ASSERT_EQ(0, stat("mountpoint", &sb)) << strerror(errno);
298 ASSERT_EQ(0, stat(FULLPATH, &sb)) << strerror(errno);
300 /* Now invalidate the entry */
302 iea.parent = FUSE_ROOT_ID;
304 iea.namelen = strlen(RELPATH);
305 ASSERT_EQ(0, pthread_create(&th0, NULL, inval_entry, &iea))
307 pthread_join(th0, &thr0_value);
308 EXPECT_EQ(0, (intptr_t)thr0_value);
310 /* /'s attribute cache should be cleared */
311 ASSERT_EQ(0, stat("mountpoint", &sb)) << strerror(errno);
315 TEST_F(Notify, inval_inode_nonexistent)
317 struct inval_inode_args iia;
326 ASSERT_EQ(0, pthread_create(&th0, NULL, inval_inode, &iia))
328 pthread_join(th0, &thr0_value);
329 /* It's not an error for an inode to not be cached */
330 EXPECT_EQ(0, (intptr_t)thr0_value);
333 TEST_F(Notify, inval_inode_with_clean_cache)
335 const static char FULLPATH[] = "mountpoint/foo";
336 const static char RELPATH[] = "foo";
337 const char CONTENTS0[] = "abcdefgh";
338 const char CONTENTS1[] = "ijklmnopqrstuvwxyz";
339 struct inval_inode_args iia;
346 ssize_t size0 = sizeof(CONTENTS0);
347 ssize_t size1 = sizeof(CONTENTS1);
351 expect_lookup(FUSE_ROOT_ID, RELPATH, ino, size0, seq);
352 expect_open(ino, 0, 1);
353 EXPECT_CALL(*m_mock, process(
354 ResultOf([=](auto in) {
355 return (in.header.opcode == FUSE_GETATTR &&
356 in.header.nodeid == ino);
359 ).WillOnce(Invoke(ReturnImmediate([=](auto i __unused, auto& out) {
360 SET_OUT_HEADER_LEN(out, attr);
361 out.body.attr.attr.mode = S_IFREG | 0644;
362 out.body.attr.attr_valid = UINT64_MAX;
363 out.body.attr.attr.size = size1;
364 out.body.attr.attr.uid = uid;
366 expect_read(ino, 0, size0, size0, CONTENTS0);
367 expect_read(ino, 0, size1, size1, CONTENTS1);
369 /* Fill the data cache */
370 fd = open(FULLPATH, O_RDWR);
371 ASSERT_LE(0, fd) << strerror(errno);
372 ASSERT_EQ(size0, read(fd, buf, size0)) << strerror(errno);
373 EXPECT_EQ(0, memcmp(buf, CONTENTS0, size0));
375 /* Evict the data cache */
380 ASSERT_EQ(0, pthread_create(&th0, NULL, inval_inode, &iia))
382 pthread_join(th0, &thr0_value);
383 EXPECT_EQ(0, (intptr_t)thr0_value);
385 /* cache attributes were been purged; this will trigger a new GETATTR */
386 ASSERT_EQ(0, stat(FULLPATH, &sb)) << strerror(errno);
387 EXPECT_EQ(uid, sb.st_uid);
388 EXPECT_EQ(size1, sb.st_size);
390 /* This read should not be serviced by cache */
391 ASSERT_EQ(0, lseek(fd, 0, SEEK_SET)) << strerror(errno);
392 ASSERT_EQ(size1, read(fd, buf, size1)) << strerror(errno);
393 EXPECT_EQ(0, memcmp(buf, CONTENTS1, size1));
395 /* Deliberately leak fd. close(2) will be tested in release.cc */
398 /* FUSE_NOTIFY_STORE with a file that's not in the entry cache */
399 /* disabled because FUSE_NOTIFY_STORE is not yet implemented */
400 TEST_F(Notify, DISABLED_store_nonexistent)
402 struct store_args sa;
411 ASSERT_EQ(0, pthread_create(&th0, NULL, store, &sa)) << strerror(errno);
412 pthread_join(th0, &thr0_value);
413 /* It's not an error for a file to be unknown to the kernel */
414 EXPECT_EQ(0, (intptr_t)thr0_value);
417 /* Store data into for a file that does not yet have anything cached */
418 /* disabled because FUSE_NOTIFY_STORE is not yet implemented */
419 TEST_F(Notify, DISABLED_store_with_blank_cache)
421 const static char FULLPATH[] = "mountpoint/foo";
422 const static char RELPATH[] = "foo";
423 const char CONTENTS1[] = "ijklmnopqrstuvwxyz";
424 struct store_args sa;
429 ssize_t size1 = sizeof(CONTENTS1);
433 expect_lookup(FUSE_ROOT_ID, RELPATH, ino, size1, seq);
434 expect_open(ino, 0, 1);
436 /* Fill the data cache */
437 fd = open(FULLPATH, O_RDWR);
438 ASSERT_LE(0, fd) << strerror(errno);
440 /* Evict the data cache */
445 sa.data = (void*)CONTENTS1;
446 ASSERT_EQ(0, pthread_create(&th0, NULL, store, &sa)) << strerror(errno);
447 pthread_join(th0, &thr0_value);
448 EXPECT_EQ(0, (intptr_t)thr0_value);
450 /* This read should be serviced by cache */
451 ASSERT_EQ(size1, read(fd, buf, size1)) << strerror(errno);
452 EXPECT_EQ(0, memcmp(buf, CONTENTS1, size1));
454 /* Deliberately leak fd. close(2) will be tested in release.cc */
457 TEST_F(NotifyWriteback, inval_inode_with_dirty_cache)
459 const static char FULLPATH[] = "mountpoint/foo";
460 const static char RELPATH[] = "foo";
461 const char CONTENTS[] = "abcdefgh";
462 struct inval_inode_args iia;
467 ssize_t bufsize = sizeof(CONTENTS);
470 expect_lookup(FUSE_ROOT_ID, RELPATH, ino, 0, seq);
471 expect_open(ino, 0, 1);
473 /* Fill the data cache */
474 fd = open(FULLPATH, O_RDWR);
475 ASSERT_EQ(bufsize, write(fd, CONTENTS, bufsize)) << strerror(errno);
477 expect_write(ino, 0, bufsize, CONTENTS);
479 * The FUSE protocol does not require an fsync here, but FreeBSD's
480 * bufobj_invalbuf sends it anyway
482 maybe_expect_fsync(ino);
484 /* Evict the data cache */
489 ASSERT_EQ(0, pthread_create(&th0, NULL, inval_inode, &iia))
491 pthread_join(th0, &thr0_value);
492 EXPECT_EQ(0, (intptr_t)thr0_value);
494 /* Deliberately leak fd. close(2) will be tested in release.cc */
497 TEST_F(NotifyWriteback, inval_inode_attrs_only)
499 const static char FULLPATH[] = "mountpoint/foo";
500 const static char RELPATH[] = "foo";
501 const char CONTENTS[] = "abcdefgh";
502 struct inval_inode_args iia;
509 ssize_t bufsize = sizeof(CONTENTS);
512 expect_lookup(FUSE_ROOT_ID, RELPATH, ino, 0, seq);
513 expect_open(ino, 0, 1);
514 EXPECT_CALL(*m_mock, process(
515 ResultOf([=](auto in) {
516 return (in.header.opcode == FUSE_WRITE);
520 EXPECT_CALL(*m_mock, process(
521 ResultOf([=](auto in) {
522 return (in.header.opcode == FUSE_GETATTR &&
523 in.header.nodeid == ino);
526 ).WillOnce(Invoke(ReturnImmediate([=](auto i __unused, auto& out) {
527 SET_OUT_HEADER_LEN(out, attr);
528 out.body.attr.attr.mode = S_IFREG | 0644;
529 out.body.attr.attr_valid = UINT64_MAX;
530 out.body.attr.attr.size = bufsize;
531 out.body.attr.attr.uid = uid;
534 /* Fill the data cache */
535 fd = open(FULLPATH, O_RDWR);
536 ASSERT_EQ(bufsize, write(fd, CONTENTS, bufsize)) << strerror(errno);
538 /* Evict the attributes, but not data cache */
543 ASSERT_EQ(0, pthread_create(&th0, NULL, inval_inode, &iia))
545 pthread_join(th0, &thr0_value);
546 EXPECT_EQ(0, (intptr_t)thr0_value);
548 /* cache attributes were been purged; this will trigger a new GETATTR */
549 ASSERT_EQ(0, stat(FULLPATH, &sb)) << strerror(errno);
550 EXPECT_EQ(uid, sb.st_uid);
551 EXPECT_EQ(bufsize, sb.st_size);
553 /* Deliberately leak fd. close(2) will be tested in release.cc */