2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2019 The FreeBSD Foundation
6 * This software was developed by BFF Storage Systems, LLC under sponsorship
7 * from the FreeBSD Foundation.
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 #include <sys/param.h>
34 #include <sys/socket.h>
35 #include <sys/sysctl.h>
40 #include <semaphore.h>
47 using namespace testing;
49 class Read: public FuseTest {
52 void expect_lookup(const char *relpath, uint64_t ino, uint64_t size)
54 FuseTest::expect_lookup(relpath, ino, S_IFREG | 0644, size, 1);
58 class Read_7_8: public FuseTest {
60 virtual void SetUp() {
61 m_kernel_minor_version = 8;
65 void expect_lookup(const char *relpath, uint64_t ino, uint64_t size)
67 FuseTest::expect_lookup_7_8(relpath, ino, S_IFREG | 0644, size, 1);
71 class AioRead: public Read {
73 virtual void SetUp() {
74 const char *node = "vfs.aio.enable_unsafe";
76 size_t size = sizeof(val);
80 ASSERT_EQ(0, sysctlbyname(node, &val, &size, NULL, 0))
84 "vfs.aio.enable_unsafe must be set for this test";
88 class AsyncRead: public AioRead {
89 virtual void SetUp() {
90 m_init_flags = FUSE_ASYNC_READ;
95 class ReadCacheable: public Read {
97 virtual void SetUp() {
98 const char *node = "vfs.fusefs.data_cache_mode";
100 size_t size = sizeof(val);
104 ASSERT_EQ(0, sysctlbyname(node, &val, &size, NULL, 0))
108 "fusefs data caching must be enabled for this test";
112 class ReadAhead: public ReadCacheable,
113 public WithParamInterface<tuple<bool, int>>
115 virtual void SetUp() {
117 const char *node = "vfs.maxbcachebuf";
118 size_t size = sizeof(val);
119 ASSERT_EQ(0, sysctlbyname(node, &val, &size, NULL, 0))
122 m_maxreadahead = val * get<1>(GetParam());
123 m_noclusterr = get<0>(GetParam());
124 ReadCacheable::SetUp();
128 /* AIO reads need to set the header's pid field correctly */
129 /* https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=236379 */
130 TEST_F(AioRead, aio_read)
132 const char FULLPATH[] = "mountpoint/some_file.txt";
133 const char RELPATH[] = "some_file.txt";
134 const char *CONTENTS = "abcdefgh";
137 ssize_t bufsize = strlen(CONTENTS);
139 struct aiocb iocb, *piocb;
141 expect_lookup(RELPATH, ino, bufsize);
142 expect_open(ino, 0, 1);
143 expect_read(ino, 0, bufsize, bufsize, CONTENTS);
145 fd = open(FULLPATH, O_RDONLY);
146 ASSERT_LE(0, fd) << strerror(errno);
148 iocb.aio_nbytes = bufsize;
149 iocb.aio_fildes = fd;
152 iocb.aio_sigevent.sigev_notify = SIGEV_NONE;
153 ASSERT_EQ(0, aio_read(&iocb)) << strerror(errno);
154 ASSERT_EQ(bufsize, aio_waitcomplete(&piocb, NULL)) << strerror(errno);
155 ASSERT_EQ(0, memcmp(buf, CONTENTS, bufsize));
156 /* Deliberately leak fd. close(2) will be tested in release.cc */
160 * Without the FUSE_ASYNC_READ mount option, fuse(4) should ensure that there
161 * is at most one outstanding read operation per file handle
163 TEST_F(AioRead, async_read_disabled)
165 const char FULLPATH[] = "mountpoint/some_file.txt";
166 const char RELPATH[] = "some_file.txt";
169 ssize_t bufsize = 50;
170 char buf0[bufsize], buf1[bufsize];
172 off_t off1 = m_maxbcachebuf;
173 struct aiocb iocb0, iocb1;
174 volatile sig_atomic_t read_count = 0;
176 expect_lookup(RELPATH, ino, 131072);
177 expect_open(ino, 0, 1);
178 EXPECT_CALL(*m_mock, process(
179 ResultOf([=](auto in) {
180 return (in.header.opcode == FUSE_READ &&
181 in.header.nodeid == ino &&
182 in.body.read.fh == FH &&
183 in.body.read.offset == (uint64_t)off0);
186 ).WillRepeatedly(Invoke([&](auto in __unused, auto &out __unused) {
188 /* Filesystem is slow to respond */
190 EXPECT_CALL(*m_mock, process(
191 ResultOf([=](auto in) {
192 return (in.header.opcode == FUSE_READ &&
193 in.header.nodeid == ino &&
194 in.body.read.fh == FH &&
195 in.body.read.offset == (uint64_t)off1);
198 ).WillRepeatedly(Invoke([&](auto in __unused, auto &out __unused) {
200 /* Filesystem is slow to respond */
203 fd = open(FULLPATH, O_RDONLY);
204 ASSERT_LE(0, fd) << strerror(errno);
207 * Submit two AIO read requests, and respond to neither. If the
208 * filesystem ever gets the second read request, then we failed to
209 * limit outstanding reads.
211 iocb0.aio_nbytes = bufsize;
212 iocb0.aio_fildes = fd;
213 iocb0.aio_buf = buf0;
214 iocb0.aio_offset = off0;
215 iocb0.aio_sigevent.sigev_notify = SIGEV_NONE;
216 ASSERT_EQ(0, aio_read(&iocb0)) << strerror(errno);
218 iocb1.aio_nbytes = bufsize;
219 iocb1.aio_fildes = fd;
220 iocb1.aio_buf = buf1;
221 iocb1.aio_offset = off1;
222 iocb1.aio_sigevent.sigev_notify = SIGEV_NONE;
223 ASSERT_EQ(0, aio_read(&iocb1)) << strerror(errno);
226 * Sleep for awhile to make sure the kernel has had a chance to issue
227 * the second read, even though the first has not yet returned
230 EXPECT_EQ(read_count, 1);
232 m_mock->kill_daemon();
233 /* Wait for AIO activity to complete, but ignore errors */
234 (void)aio_waitcomplete(NULL, NULL);
236 /* Deliberately leak fd. close(2) will be tested in release.cc */
240 * With the FUSE_ASYNC_READ mount option, fuse(4) may issue multiple
241 * simultaneous read requests on the same file handle.
243 TEST_F(AsyncRead, async_read)
245 const char FULLPATH[] = "mountpoint/some_file.txt";
246 const char RELPATH[] = "some_file.txt";
249 ssize_t bufsize = 50;
250 char buf0[bufsize], buf1[bufsize];
252 off_t off1 = m_maxbcachebuf;
253 off_t fsize = 2 * m_maxbcachebuf;
254 struct aiocb iocb0, iocb1;
257 ASSERT_EQ(0, sem_init(&sem, 0, 0)) << strerror(errno);
259 expect_lookup(RELPATH, ino, fsize);
260 expect_open(ino, 0, 1);
261 EXPECT_CALL(*m_mock, process(
262 ResultOf([=](auto in) {
263 return (in.header.opcode == FUSE_READ &&
264 in.header.nodeid == ino &&
265 in.body.read.fh == FH &&
266 in.body.read.offset == (uint64_t)off0);
269 ).WillOnce(Invoke([&](auto in __unused, auto &out __unused) {
271 /* Filesystem is slow to respond */
273 EXPECT_CALL(*m_mock, process(
274 ResultOf([=](auto in) {
275 return (in.header.opcode == FUSE_READ &&
276 in.header.nodeid == ino &&
277 in.body.read.fh == FH &&
278 in.body.read.offset == (uint64_t)off1);
281 ).WillOnce(Invoke([&](auto in __unused, auto &out __unused) {
283 /* Filesystem is slow to respond */
286 fd = open(FULLPATH, O_RDONLY);
287 ASSERT_LE(0, fd) << strerror(errno);
290 * Submit two AIO read requests, but respond to neither. Ensure that
293 iocb0.aio_nbytes = bufsize;
294 iocb0.aio_fildes = fd;
295 iocb0.aio_buf = buf0;
296 iocb0.aio_offset = off0;
297 iocb0.aio_sigevent.sigev_notify = SIGEV_NONE;
298 ASSERT_EQ(0, aio_read(&iocb0)) << strerror(errno);
300 iocb1.aio_nbytes = bufsize;
301 iocb1.aio_fildes = fd;
302 iocb1.aio_buf = buf1;
303 iocb1.aio_offset = off1;
304 iocb1.aio_sigevent.sigev_notify = SIGEV_NONE;
305 ASSERT_EQ(0, aio_read(&iocb1)) << strerror(errno);
307 /* Wait until both reads have reached the daemon */
308 ASSERT_EQ(0, sem_wait(&sem)) << strerror(errno);
309 ASSERT_EQ(0, sem_wait(&sem)) << strerror(errno);
311 m_mock->kill_daemon();
312 /* Wait for AIO activity to complete, but ignore errors */
313 (void)aio_waitcomplete(NULL, NULL);
315 /* Deliberately leak fd. close(2) will be tested in release.cc */
318 /* 0-length reads shouldn't cause any confusion */
319 TEST_F(Read, direct_io_read_nothing)
321 const char FULLPATH[] = "mountpoint/some_file.txt";
322 const char RELPATH[] = "some_file.txt";
325 uint64_t offset = 100;
328 expect_lookup(RELPATH, ino, offset + 1000);
329 expect_open(ino, FOPEN_DIRECT_IO, 1);
331 fd = open(FULLPATH, O_RDONLY);
332 ASSERT_LE(0, fd) << strerror(errno);
334 ASSERT_EQ(0, pread(fd, buf, 0, offset)) << strerror(errno);
335 /* Deliberately leak fd. close(2) will be tested in release.cc */
339 * With direct_io, reads should not fill the cache. They should go straight to
342 TEST_F(Read, direct_io_pread)
344 const char FULLPATH[] = "mountpoint/some_file.txt";
345 const char RELPATH[] = "some_file.txt";
346 const char *CONTENTS = "abcdefgh";
349 uint64_t offset = 100;
350 ssize_t bufsize = strlen(CONTENTS);
353 expect_lookup(RELPATH, ino, offset + bufsize);
354 expect_open(ino, FOPEN_DIRECT_IO, 1);
355 expect_read(ino, offset, bufsize, bufsize, CONTENTS);
357 fd = open(FULLPATH, O_RDONLY);
358 ASSERT_LE(0, fd) << strerror(errno);
360 ASSERT_EQ(bufsize, pread(fd, buf, bufsize, offset)) << strerror(errno);
361 ASSERT_EQ(0, memcmp(buf, CONTENTS, bufsize));
362 /* Deliberately leak fd. close(2) will be tested in release.cc */
366 * With direct_io, filesystems are allowed to return less data than is
367 * requested. fuse(4) should return a short read to userland.
369 TEST_F(Read, direct_io_short_read)
371 const char FULLPATH[] = "mountpoint/some_file.txt";
372 const char RELPATH[] = "some_file.txt";
373 const char *CONTENTS = "abcdefghijklmnop";
376 uint64_t offset = 100;
377 ssize_t bufsize = strlen(CONTENTS);
378 ssize_t halfbufsize = bufsize / 2;
381 expect_lookup(RELPATH, ino, offset + bufsize);
382 expect_open(ino, FOPEN_DIRECT_IO, 1);
383 expect_read(ino, offset, bufsize, halfbufsize, CONTENTS);
385 fd = open(FULLPATH, O_RDONLY);
386 ASSERT_LE(0, fd) << strerror(errno);
388 ASSERT_EQ(halfbufsize, pread(fd, buf, bufsize, offset))
390 ASSERT_EQ(0, memcmp(buf, CONTENTS, halfbufsize));
391 /* Deliberately leak fd. close(2) will be tested in release.cc */
396 const char FULLPATH[] = "mountpoint/some_file.txt";
397 const char RELPATH[] = "some_file.txt";
398 const char *CONTENTS = "abcdefgh";
401 ssize_t bufsize = strlen(CONTENTS);
404 expect_lookup(RELPATH, ino, bufsize);
405 expect_open(ino, 0, 1);
406 EXPECT_CALL(*m_mock, process(
407 ResultOf([=](auto in) {
408 return (in.header.opcode == FUSE_READ);
411 ).WillOnce(Invoke(ReturnErrno(EIO)));
413 fd = open(FULLPATH, O_RDONLY);
414 ASSERT_LE(0, fd) << strerror(errno);
416 ASSERT_EQ(-1, read(fd, buf, bufsize)) << strerror(errno);
417 ASSERT_EQ(EIO, errno);
418 /* Deliberately leak fd. close(2) will be tested in release.cc */
422 * If the server returns a short read when direct io is not in use, that
423 * indicates EOF, because of a server-side truncation. We should invalidate
424 * all cached attributes. We may update the file size,
426 TEST_F(ReadCacheable, eof)
428 const char FULLPATH[] = "mountpoint/some_file.txt";
429 const char RELPATH[] = "some_file.txt";
430 const char *CONTENTS = "abcdefghijklmnop";
433 uint64_t offset = 100;
434 ssize_t bufsize = strlen(CONTENTS);
435 ssize_t partbufsize = 3 * bufsize / 4;
440 expect_lookup(RELPATH, ino, offset + bufsize);
441 expect_open(ino, 0, 1);
442 expect_read(ino, 0, offset + bufsize, offset + partbufsize, CONTENTS);
443 expect_getattr(ino, offset + partbufsize);
445 fd = open(FULLPATH, O_RDONLY);
446 ASSERT_LE(0, fd) << strerror(errno);
448 r = pread(fd, buf, bufsize, offset);
449 ASSERT_LE(0, r) << strerror(errno);
450 EXPECT_EQ(partbufsize, r) << strerror(errno);
451 ASSERT_EQ(0, fstat(fd, &sb));
452 EXPECT_EQ((off_t)(offset + partbufsize), sb.st_size);
453 /* Deliberately leak fd. close(2) will be tested in release.cc */
456 /* Like ReadCacheable.eof, but causes an entire buffer to be invalidated */
457 TEST_F(ReadCacheable, eof_of_whole_buffer)
459 const char FULLPATH[] = "mountpoint/some_file.txt";
460 const char RELPATH[] = "some_file.txt";
461 const char *CONTENTS = "abcdefghijklmnop";
464 ssize_t bufsize = strlen(CONTENTS);
465 off_t old_filesize = m_maxbcachebuf * 2 + bufsize;
469 expect_lookup(RELPATH, ino, old_filesize);
470 expect_open(ino, 0, 1);
471 expect_read(ino, 2 * m_maxbcachebuf, bufsize, bufsize, CONTENTS);
472 expect_read(ino, m_maxbcachebuf, m_maxbcachebuf, 0, CONTENTS);
473 expect_getattr(ino, m_maxbcachebuf);
475 fd = open(FULLPATH, O_RDONLY);
476 ASSERT_LE(0, fd) << strerror(errno);
478 /* Cache the third block */
479 ASSERT_EQ(bufsize, pread(fd, buf, bufsize, m_maxbcachebuf * 2))
481 /* Try to read the 2nd block, but it's past EOF */
482 ASSERT_EQ(0, pread(fd, buf, bufsize, m_maxbcachebuf))
484 ASSERT_EQ(0, fstat(fd, &sb));
485 EXPECT_EQ((off_t)(m_maxbcachebuf), sb.st_size);
486 /* Deliberately leak fd. close(2) will be tested in release.cc */
490 * With the keep_cache option, the kernel may keep its read cache across
493 TEST_F(ReadCacheable, keep_cache)
495 const char FULLPATH[] = "mountpoint/some_file.txt";
496 const char RELPATH[] = "some_file.txt";
497 const char *CONTENTS = "abcdefgh";
500 ssize_t bufsize = strlen(CONTENTS);
503 FuseTest::expect_lookup(RELPATH, ino, S_IFREG | 0644, bufsize, 2);
504 expect_open(ino, FOPEN_KEEP_CACHE, 2);
505 expect_read(ino, 0, bufsize, bufsize, CONTENTS);
507 fd0 = open(FULLPATH, O_RDONLY);
508 ASSERT_LE(0, fd0) << strerror(errno);
509 ASSERT_EQ(bufsize, read(fd0, buf, bufsize)) << strerror(errno);
511 fd1 = open(FULLPATH, O_RDWR);
512 ASSERT_LE(0, fd1) << strerror(errno);
515 * This read should be serviced by cache, even though it's on the other
518 ASSERT_EQ(bufsize, read(fd1, buf, bufsize)) << strerror(errno);
520 /* Deliberately leak fd0 and fd1. */
524 * Without the keep_cache option, the kernel should drop its read caches on
527 TEST_F(Read, keep_cache_disabled)
529 const char FULLPATH[] = "mountpoint/some_file.txt";
530 const char RELPATH[] = "some_file.txt";
531 const char *CONTENTS = "abcdefgh";
534 ssize_t bufsize = strlen(CONTENTS);
537 FuseTest::expect_lookup(RELPATH, ino, S_IFREG | 0644, bufsize, 2);
538 expect_open(ino, 0, 2);
539 expect_read(ino, 0, bufsize, bufsize, CONTENTS);
541 fd0 = open(FULLPATH, O_RDONLY);
542 ASSERT_LE(0, fd0) << strerror(errno);
543 ASSERT_EQ(bufsize, read(fd0, buf, bufsize)) << strerror(errno);
545 fd1 = open(FULLPATH, O_RDWR);
546 ASSERT_LE(0, fd1) << strerror(errno);
549 * This read should not be serviced by cache, even though it's on the
550 * original file descriptor
552 expect_read(ino, 0, bufsize, bufsize, CONTENTS);
553 ASSERT_EQ(0, lseek(fd0, 0, SEEK_SET)) << strerror(errno);
554 ASSERT_EQ(bufsize, read(fd0, buf, bufsize)) << strerror(errno);
556 /* Deliberately leak fd0 and fd1. */
559 TEST_F(ReadCacheable, mmap)
561 const char FULLPATH[] = "mountpoint/some_file.txt";
562 const char RELPATH[] = "some_file.txt";
563 const char *CONTENTS = "abcdefgh";
567 size_t bufsize = strlen(CONTENTS);
572 expect_lookup(RELPATH, ino, bufsize);
573 expect_open(ino, 0, 1);
574 /* mmap may legitimately try to read more data than is available */
575 EXPECT_CALL(*m_mock, process(
576 ResultOf([=](auto in) {
577 return (in.header.opcode == FUSE_READ &&
578 in.header.nodeid == ino &&
579 in.body.read.fh == Read::FH &&
580 in.body.read.offset == 0 &&
581 in.body.read.size >= bufsize);
584 ).WillOnce(Invoke(ReturnImmediate([=](auto in __unused, auto& out) {
585 out.header.len = sizeof(struct fuse_out_header) + bufsize;
586 memmove(out.body.bytes, CONTENTS, bufsize);
589 fd = open(FULLPATH, O_RDONLY);
590 ASSERT_LE(0, fd) << strerror(errno);
592 p = mmap(NULL, len, PROT_READ, MAP_SHARED, fd, 0);
593 ASSERT_NE(MAP_FAILED, p) << strerror(errno);
595 ASSERT_EQ(0, memcmp(p, CONTENTS, bufsize));
597 ASSERT_EQ(0, munmap(p, len)) << strerror(errno);
598 /* Deliberately leak fd. close(2) will be tested in release.cc */
602 * A read via mmap comes up short, indicating that the file was truncated
605 TEST_F(ReadCacheable, mmap_eof)
607 const char FULLPATH[] = "mountpoint/some_file.txt";
608 const char RELPATH[] = "some_file.txt";
609 const char *CONTENTS = "abcdefgh";
613 size_t bufsize = strlen(CONTENTS);
619 expect_lookup(RELPATH, ino, 100000);
620 expect_open(ino, 0, 1);
621 /* mmap may legitimately try to read more data than is available */
622 EXPECT_CALL(*m_mock, process(
623 ResultOf([=](auto in) {
624 return (in.header.opcode == FUSE_READ &&
625 in.header.nodeid == ino &&
626 in.body.read.fh == Read::FH &&
627 in.body.read.offset == 0 &&
628 in.body.read.size >= bufsize);
631 ).WillOnce(Invoke(ReturnImmediate([=](auto in __unused, auto& out) {
632 out.header.len = sizeof(struct fuse_out_header) + bufsize;
633 memmove(out.body.bytes, CONTENTS, bufsize);
635 expect_getattr(ino, bufsize);
637 fd = open(FULLPATH, O_RDONLY);
638 ASSERT_LE(0, fd) << strerror(errno);
640 p = mmap(NULL, len, PROT_READ, MAP_SHARED, fd, 0);
641 ASSERT_NE(MAP_FAILED, p) << strerror(errno);
643 /* The file size should be automatically truncated */
644 ASSERT_EQ(0, memcmp(p, CONTENTS, bufsize));
645 ASSERT_EQ(0, fstat(fd, &sb)) << strerror(errno);
646 EXPECT_EQ((off_t)bufsize, sb.st_size);
648 ASSERT_EQ(0, munmap(p, len)) << strerror(errno);
649 /* Deliberately leak fd. close(2) will be tested in release.cc */
653 * Just as when FOPEN_DIRECT_IO is used, reads with O_DIRECT should bypass
654 * cache and to straight to the daemon
656 TEST_F(Read, o_direct)
658 const char FULLPATH[] = "mountpoint/some_file.txt";
659 const char RELPATH[] = "some_file.txt";
660 const char *CONTENTS = "abcdefgh";
663 ssize_t bufsize = strlen(CONTENTS);
666 expect_lookup(RELPATH, ino, bufsize);
667 expect_open(ino, 0, 1);
668 expect_read(ino, 0, bufsize, bufsize, CONTENTS);
670 fd = open(FULLPATH, O_RDONLY);
671 ASSERT_LE(0, fd) << strerror(errno);
674 ASSERT_EQ(bufsize, read(fd, buf, bufsize)) << strerror(errno);
675 ASSERT_EQ(0, memcmp(buf, CONTENTS, bufsize));
677 // Reads with o_direct should bypass the cache
678 expect_read(ino, 0, bufsize, bufsize, CONTENTS);
679 ASSERT_EQ(0, fcntl(fd, F_SETFL, O_DIRECT)) << strerror(errno);
680 ASSERT_EQ(0, lseek(fd, 0, SEEK_SET)) << strerror(errno);
681 ASSERT_EQ(bufsize, read(fd, buf, bufsize)) << strerror(errno);
682 ASSERT_EQ(0, memcmp(buf, CONTENTS, bufsize));
684 /* Deliberately leak fd. close(2) will be tested in release.cc */
689 const char FULLPATH[] = "mountpoint/some_file.txt";
690 const char RELPATH[] = "some_file.txt";
691 const char *CONTENTS = "abcdefgh";
695 * Set offset to a maxbcachebuf boundary so we'll be sure what offset
696 * to read from. Without this, the read might start at a lower offset.
698 uint64_t offset = m_maxbcachebuf;
699 ssize_t bufsize = strlen(CONTENTS);
702 expect_lookup(RELPATH, ino, offset + bufsize);
703 expect_open(ino, 0, 1);
704 expect_read(ino, offset, bufsize, bufsize, CONTENTS);
706 fd = open(FULLPATH, O_RDONLY);
707 ASSERT_LE(0, fd) << strerror(errno);
709 ASSERT_EQ(bufsize, pread(fd, buf, bufsize, offset)) << strerror(errno);
710 ASSERT_EQ(0, memcmp(buf, CONTENTS, bufsize));
711 /* Deliberately leak fd. close(2) will be tested in release.cc */
716 const char FULLPATH[] = "mountpoint/some_file.txt";
717 const char RELPATH[] = "some_file.txt";
718 const char *CONTENTS = "abcdefgh";
721 ssize_t bufsize = strlen(CONTENTS);
724 expect_lookup(RELPATH, ino, bufsize);
725 expect_open(ino, 0, 1);
726 expect_read(ino, 0, bufsize, bufsize, CONTENTS);
728 fd = open(FULLPATH, O_RDONLY);
729 ASSERT_LE(0, fd) << strerror(errno);
731 ASSERT_EQ(bufsize, read(fd, buf, bufsize)) << strerror(errno);
732 ASSERT_EQ(0, memcmp(buf, CONTENTS, bufsize));
734 /* Deliberately leak fd. close(2) will be tested in release.cc */
737 TEST_F(Read_7_8, read)
739 const char FULLPATH[] = "mountpoint/some_file.txt";
740 const char RELPATH[] = "some_file.txt";
741 const char *CONTENTS = "abcdefgh";
744 ssize_t bufsize = strlen(CONTENTS);
747 expect_lookup(RELPATH, ino, bufsize);
748 expect_open(ino, 0, 1);
749 expect_read(ino, 0, bufsize, bufsize, CONTENTS);
751 fd = open(FULLPATH, O_RDONLY);
752 ASSERT_LE(0, fd) << strerror(errno);
754 ASSERT_EQ(bufsize, read(fd, buf, bufsize)) << strerror(errno);
755 ASSERT_EQ(0, memcmp(buf, CONTENTS, bufsize));
757 /* Deliberately leak fd. close(2) will be tested in release.cc */
761 * If cacheing is enabled, the kernel should try to read an entire cache block
764 TEST_F(ReadCacheable, cache_block)
766 const char FULLPATH[] = "mountpoint/some_file.txt";
767 const char RELPATH[] = "some_file.txt";
768 const char *CONTENTS0 = "abcdefghijklmnop";
772 ssize_t filesize = m_maxbcachebuf * 2;
775 const char *contents1 = CONTENTS0 + bufsize;
777 contents = (char*)calloc(1, filesize);
778 ASSERT_NE(NULL, contents);
779 memmove(contents, CONTENTS0, strlen(CONTENTS0));
781 expect_lookup(RELPATH, ino, filesize);
782 expect_open(ino, 0, 1);
783 expect_read(ino, 0, m_maxbcachebuf, m_maxbcachebuf,
786 fd = open(FULLPATH, O_RDONLY);
787 ASSERT_LE(0, fd) << strerror(errno);
789 ASSERT_EQ(bufsize, read(fd, buf, bufsize)) << strerror(errno);
790 ASSERT_EQ(0, memcmp(buf, CONTENTS0, bufsize));
792 /* A subsequent read should be serviced by cache */
793 ASSERT_EQ(bufsize, read(fd, buf, bufsize)) << strerror(errno);
794 ASSERT_EQ(0, memcmp(buf, contents1, bufsize));
795 /* Deliberately leak fd. close(2) will be tested in release.cc */
798 /* Reading with sendfile should work (though it obviously won't be 0-copy) */
799 TEST_F(ReadCacheable, sendfile)
801 const char FULLPATH[] = "mountpoint/some_file.txt";
802 const char RELPATH[] = "some_file.txt";
803 const char *CONTENTS = "abcdefgh";
806 size_t bufsize = strlen(CONTENTS);
811 expect_lookup(RELPATH, ino, bufsize);
812 expect_open(ino, 0, 1);
813 /* Like mmap, sendfile may request more data than is available */
814 EXPECT_CALL(*m_mock, process(
815 ResultOf([=](auto in) {
816 return (in.header.opcode == FUSE_READ &&
817 in.header.nodeid == ino &&
818 in.body.read.fh == Read::FH &&
819 in.body.read.offset == 0 &&
820 in.body.read.size >= bufsize);
823 ).WillOnce(Invoke(ReturnImmediate([=](auto in __unused, auto& out) {
824 out.header.len = sizeof(struct fuse_out_header) + bufsize;
825 memmove(out.body.bytes, CONTENTS, bufsize);
828 ASSERT_EQ(0, socketpair(PF_LOCAL, SOCK_STREAM, 0, sp))
830 fd = open(FULLPATH, O_RDONLY);
831 ASSERT_LE(0, fd) << strerror(errno);
833 ASSERT_EQ(0, sendfile(fd, sp[1], 0, bufsize, NULL, &sbytes, 0))
835 ASSERT_EQ(static_cast<ssize_t>(bufsize), read(sp[0], buf, bufsize))
837 ASSERT_EQ(0, memcmp(buf, CONTENTS, bufsize));
841 /* Deliberately leak fd. close(2) will be tested in release.cc */
844 /* sendfile should fail gracefully if fuse declines the read */
845 /* https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=236466 */
846 TEST_F(ReadCacheable, DISABLED_sendfile_eio)
848 const char FULLPATH[] = "mountpoint/some_file.txt";
849 const char RELPATH[] = "some_file.txt";
850 const char *CONTENTS = "abcdefgh";
853 ssize_t bufsize = strlen(CONTENTS);
857 expect_lookup(RELPATH, ino, bufsize);
858 expect_open(ino, 0, 1);
859 EXPECT_CALL(*m_mock, process(
860 ResultOf([=](auto in) {
861 return (in.header.opcode == FUSE_READ);
864 ).WillOnce(Invoke(ReturnErrno(EIO)));
866 ASSERT_EQ(0, socketpair(PF_LOCAL, SOCK_STREAM, 0, sp))
868 fd = open(FULLPATH, O_RDONLY);
869 ASSERT_LE(0, fd) << strerror(errno);
871 ASSERT_NE(0, sendfile(fd, sp[1], 0, bufsize, NULL, &sbytes, 0));
875 /* Deliberately leak fd. close(2) will be tested in release.cc */
879 * Sequential reads should use readahead. And if allowed, large reads should
882 TEST_P(ReadAhead, readahead) {
883 const char FULLPATH[] = "mountpoint/some_file.txt";
884 const char RELPATH[] = "some_file.txt";
886 int fd, maxcontig, clustersize;
887 ssize_t bufsize = 4 * m_maxbcachebuf;
888 ssize_t filesize = bufsize;
890 char *rbuf, *contents;
893 contents = (char*)malloc(filesize);
894 ASSERT_NE(NULL, contents);
895 memset(contents, 'X', filesize);
896 rbuf = (char*)calloc(1, bufsize);
898 expect_lookup(RELPATH, ino, filesize);
899 expect_open(ino, 0, 1);
900 maxcontig = m_noclusterr ? m_maxbcachebuf :
901 m_maxbcachebuf + m_maxreadahead;
902 clustersize = MIN(maxcontig, m_maxphys);
903 for (offs = 0; offs < bufsize; offs += clustersize) {
904 len = std::min((size_t)clustersize, (size_t)(filesize - offs));
905 expect_read(ino, offs, len, len, contents + offs);
908 fd = open(FULLPATH, O_RDONLY);
909 ASSERT_LE(0, fd) << strerror(errno);
911 /* Set the internal readahead counter to a "large" value */
912 ASSERT_EQ(0, fcntl(fd, F_READAHEAD, 1'000'000'000)) << strerror(errno);
914 ASSERT_EQ(bufsize, read(fd, rbuf, bufsize)) << strerror(errno);
915 ASSERT_EQ(0, memcmp(rbuf, contents, bufsize));
917 /* Deliberately leak fd. close(2) will be tested in release.cc */
920 INSTANTIATE_TEST_CASE_P(RA, ReadAhead,
921 Values(tuple<bool, int>(false, 0),
922 tuple<bool, int>(false, 1),
923 tuple<bool, int>(false, 2),
924 tuple<bool, int>(false, 3),
925 tuple<bool, int>(true, 0),
926 tuple<bool, int>(true, 1),
927 tuple<bool, int>(true, 2)));