2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (c) 2019 The FreeBSD Foundation
6 * This software was developed by BFF Storage Systems, LLC under sponsorship
7 * from the FreeBSD Foundation.
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 #include <sys/types.h>
35 #include <sys/sysctl.h>
44 using namespace testing;
47 * FUSE asynchonous notification
49 * FUSE servers can send unprompted notification messages for things like cache
50 * invalidation. This file tests our client's handling of those messages.
53 class Notify: public FuseTest {
55 /* Ignore an optional FUSE_FSYNC */
56 void maybe_expect_fsync(uint64_t ino)
58 EXPECT_CALL(*m_mock, process(
59 ResultOf([=](auto in) {
60 return (in.header.opcode == FUSE_FSYNC &&
61 in.header.nodeid == ino);
64 ).WillOnce(Invoke(ReturnErrno(0)));
67 void expect_lookup(uint64_t parent, const char *relpath, uint64_t ino,
68 off_t size, Sequence &seq)
70 EXPECT_LOOKUP(parent, relpath)
73 ReturnImmediate([=](auto in __unused, auto& out) {
74 SET_OUT_HEADER_LEN(out, entry);
75 out.body.entry.attr.mode = S_IFREG | 0644;
76 out.body.entry.nodeid = ino;
77 out.body.entry.attr.ino = ino;
78 out.body.entry.attr.nlink = 1;
79 out.body.entry.attr.size = size;
80 out.body.entry.attr_valid = UINT64_MAX;
81 out.body.entry.entry_valid = UINT64_MAX;
86 class NotifyWriteback: public Notify {
88 virtual void SetUp() {
89 m_init_flags |= FUSE_WRITEBACK_CACHE;
96 void expect_write(uint64_t ino, uint64_t offset, uint64_t size,
99 FuseTest::expect_write(ino, offset, size, size, 0, 0, contents);
104 struct inval_entry_args {
111 static void* inval_entry(void* arg) {
112 const struct inval_entry_args *iea = (struct inval_entry_args*)arg;
115 r = iea->mock->notify_inval_entry(iea->parent, iea->name, iea->namelen);
119 return (void*)(intptr_t)errno;
122 struct inval_inode_args {
137 static void* inval_inode(void* arg) {
138 const struct inval_inode_args *iia = (struct inval_inode_args*)arg;
141 r = iia->mock->notify_inval_inode(iia->ino, iia->off, iia->len);
145 return (void*)(intptr_t)errno;
148 static void* store(void* arg) {
149 const struct store_args *sa = (struct store_args*)arg;
152 r = sa->mock->notify_store(sa->nodeid, sa->offset, sa->data, sa->size);
156 return (void*)(intptr_t)errno;
159 /* Invalidate a nonexistent entry */
160 TEST_F(Notify, inval_entry_nonexistent)
162 const static char *name = "foo";
163 struct inval_entry_args iea;
168 iea.parent = FUSE_ROOT_ID;
170 iea.namelen = strlen(name);
171 ASSERT_EQ(0, pthread_create(&th0, NULL, inval_entry, &iea))
173 pthread_join(th0, &thr0_value);
174 /* It's not an error for an entry to not be cached */
175 EXPECT_EQ(0, (intptr_t)thr0_value);
178 /* Invalidate a cached entry */
179 TEST_F(Notify, inval_entry)
181 const static char FULLPATH[] = "mountpoint/foo";
182 const static char RELPATH[] = "foo";
183 struct inval_entry_args iea;
191 expect_lookup(FUSE_ROOT_ID, RELPATH, ino0, 0, seq);
192 expect_lookup(FUSE_ROOT_ID, RELPATH, ino1, 0, seq);
194 /* Fill the entry cache */
195 ASSERT_EQ(0, stat(FULLPATH, &sb)) << strerror(errno);
196 EXPECT_EQ(ino0, sb.st_ino);
198 /* Now invalidate the entry */
200 iea.parent = FUSE_ROOT_ID;
202 iea.namelen = strlen(RELPATH);
203 ASSERT_EQ(0, pthread_create(&th0, NULL, inval_entry, &iea))
205 pthread_join(th0, &thr0_value);
206 EXPECT_EQ(0, (intptr_t)thr0_value);
208 /* The second lookup should return the alternate ino */
209 ASSERT_EQ(0, stat(FULLPATH, &sb)) << strerror(errno);
210 EXPECT_EQ(ino1, sb.st_ino);
214 * Invalidate a cached entry beneath the root, which uses a slightly different
217 TEST_F(Notify, inval_entry_below_root)
219 const static char FULLPATH[] = "mountpoint/some_dir/foo";
220 const static char DNAME[] = "some_dir";
221 const static char FNAME[] = "foo";
222 struct inval_entry_args iea;
225 uint64_t dir_ino = 41;
231 EXPECT_LOOKUP(FUSE_ROOT_ID, DNAME)
233 ReturnImmediate([=](auto in __unused, auto& out) {
234 SET_OUT_HEADER_LEN(out, entry);
235 out.body.entry.attr.mode = S_IFDIR | 0755;
236 out.body.entry.nodeid = dir_ino;
237 out.body.entry.attr.nlink = 2;
238 out.body.entry.attr_valid = UINT64_MAX;
239 out.body.entry.entry_valid = UINT64_MAX;
241 expect_lookup(dir_ino, FNAME, ino0, 0, seq);
242 expect_lookup(dir_ino, FNAME, ino1, 0, seq);
244 /* Fill the entry cache */
245 ASSERT_EQ(0, stat(FULLPATH, &sb)) << strerror(errno);
246 EXPECT_EQ(ino0, sb.st_ino);
248 /* Now invalidate the entry */
250 iea.parent = dir_ino;
252 iea.namelen = strlen(FNAME);
253 ASSERT_EQ(0, pthread_create(&th0, NULL, inval_entry, &iea))
255 pthread_join(th0, &thr0_value);
256 EXPECT_EQ(0, (intptr_t)thr0_value);
258 /* The second lookup should return the alternate ino */
259 ASSERT_EQ(0, stat(FULLPATH, &sb)) << strerror(errno);
260 EXPECT_EQ(ino1, sb.st_ino);
263 /* Invalidating an entry invalidates the parent directory's attributes */
264 TEST_F(Notify, inval_entry_invalidates_parent_attrs)
266 const static char FULLPATH[] = "mountpoint/foo";
267 const static char RELPATH[] = "foo";
268 struct inval_entry_args iea;
275 expect_lookup(FUSE_ROOT_ID, RELPATH, ino, 0, seq);
276 EXPECT_CALL(*m_mock, process(
277 ResultOf([=](auto in) {
278 return (in.header.opcode == FUSE_GETATTR &&
279 in.header.nodeid == FUSE_ROOT_ID);
283 .WillRepeatedly(Invoke(ReturnImmediate([=](auto i __unused, auto& out) {
284 SET_OUT_HEADER_LEN(out, attr);
285 out.body.attr.attr.mode = S_IFDIR | 0755;
286 out.body.attr.attr_valid = UINT64_MAX;
289 /* Fill the attr and entry cache */
290 ASSERT_EQ(0, stat("mountpoint", &sb)) << strerror(errno);
291 ASSERT_EQ(0, stat(FULLPATH, &sb)) << strerror(errno);
293 /* Now invalidate the entry */
295 iea.parent = FUSE_ROOT_ID;
297 iea.namelen = strlen(RELPATH);
298 ASSERT_EQ(0, pthread_create(&th0, NULL, inval_entry, &iea))
300 pthread_join(th0, &thr0_value);
301 EXPECT_EQ(0, (intptr_t)thr0_value);
303 /* /'s attribute cache should be cleared */
304 ASSERT_EQ(0, stat("mountpoint", &sb)) << strerror(errno);
308 TEST_F(Notify, inval_inode_nonexistent)
310 struct inval_inode_args iia;
319 ASSERT_EQ(0, pthread_create(&th0, NULL, inval_inode, &iia))
321 pthread_join(th0, &thr0_value);
322 /* It's not an error for an inode to not be cached */
323 EXPECT_EQ(0, (intptr_t)thr0_value);
326 TEST_F(Notify, inval_inode_with_clean_cache)
328 const static char FULLPATH[] = "mountpoint/foo";
329 const static char RELPATH[] = "foo";
330 const char CONTENTS0[] = "abcdefgh";
331 const char CONTENTS1[] = "ijklmnopqrstuvwxyz";
332 struct inval_inode_args iia;
339 ssize_t size0 = sizeof(CONTENTS0);
340 ssize_t size1 = sizeof(CONTENTS1);
344 expect_lookup(FUSE_ROOT_ID, RELPATH, ino, size0, seq);
345 expect_open(ino, 0, 1);
346 EXPECT_CALL(*m_mock, process(
347 ResultOf([=](auto in) {
348 return (in.header.opcode == FUSE_GETATTR &&
349 in.header.nodeid == ino);
352 ).WillOnce(Invoke(ReturnImmediate([=](auto i __unused, auto& out) {
353 SET_OUT_HEADER_LEN(out, attr);
354 out.body.attr.attr.mode = S_IFREG | 0644;
355 out.body.attr.attr_valid = UINT64_MAX;
356 out.body.attr.attr.size = size1;
357 out.body.attr.attr.uid = uid;
359 expect_read(ino, 0, size0, size0, CONTENTS0);
360 expect_read(ino, 0, size1, size1, CONTENTS1);
362 /* Fill the data cache */
363 fd = open(FULLPATH, O_RDWR);
364 ASSERT_LE(0, fd) << strerror(errno);
365 ASSERT_EQ(size0, read(fd, buf, size0)) << strerror(errno);
366 EXPECT_EQ(0, memcmp(buf, CONTENTS0, size0));
368 /* Evict the data cache */
373 ASSERT_EQ(0, pthread_create(&th0, NULL, inval_inode, &iia))
375 pthread_join(th0, &thr0_value);
376 EXPECT_EQ(0, (intptr_t)thr0_value);
378 /* cache attributes were purged; this will trigger a new GETATTR */
379 ASSERT_EQ(0, stat(FULLPATH, &sb)) << strerror(errno);
380 EXPECT_EQ(uid, sb.st_uid);
381 EXPECT_EQ(size1, sb.st_size);
383 /* This read should not be serviced by cache */
384 ASSERT_EQ(0, lseek(fd, 0, SEEK_SET)) << strerror(errno);
385 ASSERT_EQ(size1, read(fd, buf, size1)) << strerror(errno);
386 EXPECT_EQ(0, memcmp(buf, CONTENTS1, size1));
391 /* FUSE_NOTIFY_STORE with a file that's not in the entry cache */
392 /* disabled because FUSE_NOTIFY_STORE is not yet implemented */
393 TEST_F(Notify, DISABLED_store_nonexistent)
395 struct store_args sa;
404 ASSERT_EQ(0, pthread_create(&th0, NULL, store, &sa)) << strerror(errno);
405 pthread_join(th0, &thr0_value);
406 /* It's not an error for a file to be unknown to the kernel */
407 EXPECT_EQ(0, (intptr_t)thr0_value);
410 /* Store data into for a file that does not yet have anything cached */
411 /* disabled because FUSE_NOTIFY_STORE is not yet implemented */
412 TEST_F(Notify, DISABLED_store_with_blank_cache)
414 const static char FULLPATH[] = "mountpoint/foo";
415 const static char RELPATH[] = "foo";
416 const char CONTENTS1[] = "ijklmnopqrstuvwxyz";
417 struct store_args sa;
422 ssize_t size1 = sizeof(CONTENTS1);
426 expect_lookup(FUSE_ROOT_ID, RELPATH, ino, size1, seq);
427 expect_open(ino, 0, 1);
429 /* Fill the data cache */
430 fd = open(FULLPATH, O_RDWR);
431 ASSERT_LE(0, fd) << strerror(errno);
433 /* Evict the data cache */
438 sa.data = (const void*)CONTENTS1;
439 ASSERT_EQ(0, pthread_create(&th0, NULL, store, &sa)) << strerror(errno);
440 pthread_join(th0, &thr0_value);
441 EXPECT_EQ(0, (intptr_t)thr0_value);
443 /* This read should be serviced by cache */
444 ASSERT_EQ(size1, read(fd, buf, size1)) << strerror(errno);
445 EXPECT_EQ(0, memcmp(buf, CONTENTS1, size1));
450 TEST_F(NotifyWriteback, inval_inode_with_dirty_cache)
452 const static char FULLPATH[] = "mountpoint/foo";
453 const static char RELPATH[] = "foo";
454 const char CONTENTS[] = "abcdefgh";
455 struct inval_inode_args iia;
460 ssize_t bufsize = sizeof(CONTENTS);
463 expect_lookup(FUSE_ROOT_ID, RELPATH, ino, 0, seq);
464 expect_open(ino, 0, 1);
466 /* Fill the data cache */
467 fd = open(FULLPATH, O_RDWR);
469 ASSERT_EQ(bufsize, write(fd, CONTENTS, bufsize)) << strerror(errno);
471 expect_write(ino, 0, bufsize, CONTENTS);
473 * The FUSE protocol does not require an fsync here, but FreeBSD's
474 * bufobj_invalbuf sends it anyway
476 maybe_expect_fsync(ino);
478 /* Evict the data cache */
483 ASSERT_EQ(0, pthread_create(&th0, NULL, inval_inode, &iia))
485 pthread_join(th0, &thr0_value);
486 EXPECT_EQ(0, (intptr_t)thr0_value);
491 TEST_F(NotifyWriteback, inval_inode_attrs_only)
493 const static char FULLPATH[] = "mountpoint/foo";
494 const static char RELPATH[] = "foo";
495 const char CONTENTS[] = "abcdefgh";
496 struct inval_inode_args iia;
503 ssize_t bufsize = sizeof(CONTENTS);
506 expect_lookup(FUSE_ROOT_ID, RELPATH, ino, 0, seq);
507 expect_open(ino, 0, 1);
508 EXPECT_CALL(*m_mock, process(
509 ResultOf([=](auto in) {
510 return (in.header.opcode == FUSE_WRITE);
514 EXPECT_CALL(*m_mock, process(
515 ResultOf([=](auto in) {
516 return (in.header.opcode == FUSE_GETATTR &&
517 in.header.nodeid == ino);
520 ).WillOnce(Invoke(ReturnImmediate([=](auto i __unused, auto& out) {
521 SET_OUT_HEADER_LEN(out, attr);
522 out.body.attr.attr.mode = S_IFREG | 0644;
523 out.body.attr.attr_valid = UINT64_MAX;
524 out.body.attr.attr.size = bufsize;
525 out.body.attr.attr.uid = uid;
528 /* Fill the data cache */
529 fd = open(FULLPATH, O_RDWR);
530 ASSERT_LE(0, fd) << strerror(errno);
531 ASSERT_EQ(bufsize, write(fd, CONTENTS, bufsize)) << strerror(errno);
533 /* Evict the attributes, but not data cache */
538 ASSERT_EQ(0, pthread_create(&th0, NULL, inval_inode, &iia))
540 pthread_join(th0, &thr0_value);
541 EXPECT_EQ(0, (intptr_t)thr0_value);
543 /* cache attributes were been purged; this will trigger a new GETATTR */
544 ASSERT_EQ(0, stat(FULLPATH, &sb)) << strerror(errno);
545 EXPECT_EQ(uid, sb.st_uid);
546 EXPECT_EQ(bufsize, sb.st_size);