2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2019 The FreeBSD Foundation
6 * This software was developed by BFF Storage Systems, LLC under sponsorship
7 * from the FreeBSD Foundation.
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 #include <sys/param.h>
34 #include <sys/socket.h>
35 #include <sys/sysctl.h>
40 #include <semaphore.h>
47 using namespace testing;
49 class Read: public FuseTest {
52 void expect_lookup(const char *relpath, uint64_t ino, uint64_t size)
54 FuseTest::expect_lookup(relpath, ino, S_IFREG | 0644, size, 1);
58 class Read_7_8: public FuseTest {
60 virtual void SetUp() {
61 m_kernel_minor_version = 8;
65 void expect_lookup(const char *relpath, uint64_t ino, uint64_t size)
67 FuseTest::expect_lookup_7_8(relpath, ino, S_IFREG | 0644, size, 1);
71 class AioRead: public Read {
73 virtual void SetUp() {
74 const char *node = "vfs.aio.enable_unsafe";
76 size_t size = sizeof(val);
80 ASSERT_EQ(0, sysctlbyname(node, &val, &size, NULL, 0))
84 "vfs.aio.enable_unsafe must be set for this test";
88 class AsyncRead: public AioRead {
89 virtual void SetUp() {
90 m_init_flags = FUSE_ASYNC_READ;
95 class ReadCacheable: public Read {
97 virtual void SetUp() {
98 const char *node = "vfs.fusefs.data_cache_mode";
100 size_t size = sizeof(val);
104 ASSERT_EQ(0, sysctlbyname(node, &val, &size, NULL, 0))
108 "fusefs data caching must be enabled for this test";
112 class ReadAhead: public ReadCacheable,
113 public WithParamInterface<tuple<bool, uint32_t>>
115 virtual void SetUp() {
116 m_maxreadahead = get<1>(GetParam());
117 m_noclusterr = get<0>(GetParam());
118 ReadCacheable::SetUp();
122 /* AIO reads need to set the header's pid field correctly */
123 /* https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=236379 */
124 TEST_F(AioRead, aio_read)
126 const char FULLPATH[] = "mountpoint/some_file.txt";
127 const char RELPATH[] = "some_file.txt";
128 const char *CONTENTS = "abcdefgh";
131 ssize_t bufsize = strlen(CONTENTS);
133 struct aiocb iocb, *piocb;
135 expect_lookup(RELPATH, ino, bufsize);
136 expect_open(ino, 0, 1);
137 expect_read(ino, 0, bufsize, bufsize, CONTENTS);
139 fd = open(FULLPATH, O_RDONLY);
140 ASSERT_LE(0, fd) << strerror(errno);
142 iocb.aio_nbytes = bufsize;
143 iocb.aio_fildes = fd;
146 iocb.aio_sigevent.sigev_notify = SIGEV_NONE;
147 ASSERT_EQ(0, aio_read(&iocb)) << strerror(errno);
148 ASSERT_EQ(bufsize, aio_waitcomplete(&piocb, NULL)) << strerror(errno);
149 ASSERT_EQ(0, memcmp(buf, CONTENTS, bufsize));
150 /* Deliberately leak fd. close(2) will be tested in release.cc */
154 * Without the FUSE_ASYNC_READ mount option, fuse(4) should ensure that there
155 * is at most one outstanding read operation per file handle
157 TEST_F(AioRead, async_read_disabled)
159 const char FULLPATH[] = "mountpoint/some_file.txt";
160 const char RELPATH[] = "some_file.txt";
163 ssize_t bufsize = 50;
164 char buf0[bufsize], buf1[bufsize];
166 off_t off1 = m_maxbcachebuf;
167 struct aiocb iocb0, iocb1;
168 volatile sig_atomic_t read_count = 0;
170 expect_lookup(RELPATH, ino, 131072);
171 expect_open(ino, 0, 1);
172 EXPECT_CALL(*m_mock, process(
173 ResultOf([=](auto in) {
174 return (in.header.opcode == FUSE_READ &&
175 in.header.nodeid == ino &&
176 in.body.read.fh == FH &&
177 in.body.read.offset == (uint64_t)off0);
180 ).WillRepeatedly(Invoke([&](auto in __unused, auto &out __unused) {
182 /* Filesystem is slow to respond */
184 EXPECT_CALL(*m_mock, process(
185 ResultOf([=](auto in) {
186 return (in.header.opcode == FUSE_READ &&
187 in.header.nodeid == ino &&
188 in.body.read.fh == FH &&
189 in.body.read.offset == (uint64_t)off1);
192 ).WillRepeatedly(Invoke([&](auto in __unused, auto &out __unused) {
194 /* Filesystem is slow to respond */
197 fd = open(FULLPATH, O_RDONLY);
198 ASSERT_LE(0, fd) << strerror(errno);
201 * Submit two AIO read requests, and respond to neither. If the
202 * filesystem ever gets the second read request, then we failed to
203 * limit outstanding reads.
205 iocb0.aio_nbytes = bufsize;
206 iocb0.aio_fildes = fd;
207 iocb0.aio_buf = buf0;
208 iocb0.aio_offset = off0;
209 iocb0.aio_sigevent.sigev_notify = SIGEV_NONE;
210 ASSERT_EQ(0, aio_read(&iocb0)) << strerror(errno);
212 iocb1.aio_nbytes = bufsize;
213 iocb1.aio_fildes = fd;
214 iocb1.aio_buf = buf1;
215 iocb1.aio_offset = off1;
216 iocb1.aio_sigevent.sigev_notify = SIGEV_NONE;
217 ASSERT_EQ(0, aio_read(&iocb1)) << strerror(errno);
220 * Sleep for awhile to make sure the kernel has had a chance to issue
221 * the second read, even though the first has not yet returned
224 EXPECT_EQ(read_count, 1);
226 m_mock->kill_daemon();
227 /* Wait for AIO activity to complete, but ignore errors */
228 (void)aio_waitcomplete(NULL, NULL);
230 /* Deliberately leak fd. close(2) will be tested in release.cc */
234 * With the FUSE_ASYNC_READ mount option, fuse(4) may issue multiple
235 * simultaneous read requests on the same file handle.
237 TEST_F(AsyncRead, async_read)
239 const char FULLPATH[] = "mountpoint/some_file.txt";
240 const char RELPATH[] = "some_file.txt";
243 ssize_t bufsize = 50;
244 char buf0[bufsize], buf1[bufsize];
246 off_t off1 = m_maxbcachebuf;
247 off_t fsize = 2 * m_maxbcachebuf;
248 struct aiocb iocb0, iocb1;
251 ASSERT_EQ(0, sem_init(&sem, 0, 0)) << strerror(errno);
253 expect_lookup(RELPATH, ino, fsize);
254 expect_open(ino, 0, 1);
255 EXPECT_CALL(*m_mock, process(
256 ResultOf([=](auto in) {
257 return (in.header.opcode == FUSE_READ &&
258 in.header.nodeid == ino &&
259 in.body.read.fh == FH &&
260 in.body.read.offset == (uint64_t)off0);
263 ).WillOnce(Invoke([&](auto in __unused, auto &out __unused) {
265 /* Filesystem is slow to respond */
267 EXPECT_CALL(*m_mock, process(
268 ResultOf([=](auto in) {
269 return (in.header.opcode == FUSE_READ &&
270 in.header.nodeid == ino &&
271 in.body.read.fh == FH &&
272 in.body.read.offset == (uint64_t)off1);
275 ).WillOnce(Invoke([&](auto in __unused, auto &out __unused) {
277 /* Filesystem is slow to respond */
280 fd = open(FULLPATH, O_RDONLY);
281 ASSERT_LE(0, fd) << strerror(errno);
284 * Submit two AIO read requests, but respond to neither. Ensure that
287 iocb0.aio_nbytes = bufsize;
288 iocb0.aio_fildes = fd;
289 iocb0.aio_buf = buf0;
290 iocb0.aio_offset = off0;
291 iocb0.aio_sigevent.sigev_notify = SIGEV_NONE;
292 ASSERT_EQ(0, aio_read(&iocb0)) << strerror(errno);
294 iocb1.aio_nbytes = bufsize;
295 iocb1.aio_fildes = fd;
296 iocb1.aio_buf = buf1;
297 iocb1.aio_offset = off1;
298 iocb1.aio_sigevent.sigev_notify = SIGEV_NONE;
299 ASSERT_EQ(0, aio_read(&iocb1)) << strerror(errno);
301 /* Wait until both reads have reached the daemon */
302 ASSERT_EQ(0, sem_wait(&sem)) << strerror(errno);
303 ASSERT_EQ(0, sem_wait(&sem)) << strerror(errno);
305 m_mock->kill_daemon();
306 /* Wait for AIO activity to complete, but ignore errors */
307 (void)aio_waitcomplete(NULL, NULL);
309 /* Deliberately leak fd. close(2) will be tested in release.cc */
312 /* 0-length reads shouldn't cause any confusion */
313 TEST_F(Read, direct_io_read_nothing)
315 const char FULLPATH[] = "mountpoint/some_file.txt";
316 const char RELPATH[] = "some_file.txt";
319 uint64_t offset = 100;
322 expect_lookup(RELPATH, ino, offset + 1000);
323 expect_open(ino, FOPEN_DIRECT_IO, 1);
325 fd = open(FULLPATH, O_RDONLY);
326 ASSERT_LE(0, fd) << strerror(errno);
328 ASSERT_EQ(0, pread(fd, buf, 0, offset)) << strerror(errno);
329 /* Deliberately leak fd. close(2) will be tested in release.cc */
333 * With direct_io, reads should not fill the cache. They should go straight to
336 TEST_F(Read, direct_io_pread)
338 const char FULLPATH[] = "mountpoint/some_file.txt";
339 const char RELPATH[] = "some_file.txt";
340 const char *CONTENTS = "abcdefgh";
343 uint64_t offset = 100;
344 ssize_t bufsize = strlen(CONTENTS);
347 expect_lookup(RELPATH, ino, offset + bufsize);
348 expect_open(ino, FOPEN_DIRECT_IO, 1);
349 expect_read(ino, offset, bufsize, bufsize, CONTENTS);
351 fd = open(FULLPATH, O_RDONLY);
352 ASSERT_LE(0, fd) << strerror(errno);
354 ASSERT_EQ(bufsize, pread(fd, buf, bufsize, offset)) << strerror(errno);
355 ASSERT_EQ(0, memcmp(buf, CONTENTS, bufsize));
356 /* Deliberately leak fd. close(2) will be tested in release.cc */
360 * With direct_io, filesystems are allowed to return less data than is
361 * requested. fuse(4) should return a short read to userland.
363 TEST_F(Read, direct_io_short_read)
365 const char FULLPATH[] = "mountpoint/some_file.txt";
366 const char RELPATH[] = "some_file.txt";
367 const char *CONTENTS = "abcdefghijklmnop";
370 uint64_t offset = 100;
371 ssize_t bufsize = strlen(CONTENTS);
372 ssize_t halfbufsize = bufsize / 2;
375 expect_lookup(RELPATH, ino, offset + bufsize);
376 expect_open(ino, FOPEN_DIRECT_IO, 1);
377 expect_read(ino, offset, bufsize, halfbufsize, CONTENTS);
379 fd = open(FULLPATH, O_RDONLY);
380 ASSERT_LE(0, fd) << strerror(errno);
382 ASSERT_EQ(halfbufsize, pread(fd, buf, bufsize, offset))
384 ASSERT_EQ(0, memcmp(buf, CONTENTS, halfbufsize));
385 /* Deliberately leak fd. close(2) will be tested in release.cc */
390 const char FULLPATH[] = "mountpoint/some_file.txt";
391 const char RELPATH[] = "some_file.txt";
392 const char *CONTENTS = "abcdefgh";
395 ssize_t bufsize = strlen(CONTENTS);
398 expect_lookup(RELPATH, ino, bufsize);
399 expect_open(ino, 0, 1);
400 EXPECT_CALL(*m_mock, process(
401 ResultOf([=](auto in) {
402 return (in.header.opcode == FUSE_READ);
405 ).WillOnce(Invoke(ReturnErrno(EIO)));
407 fd = open(FULLPATH, O_RDONLY);
408 ASSERT_LE(0, fd) << strerror(errno);
410 ASSERT_EQ(-1, read(fd, buf, bufsize)) << strerror(errno);
411 ASSERT_EQ(EIO, errno);
412 /* Deliberately leak fd. close(2) will be tested in release.cc */
416 * If the server returns a short read when direct io is not in use, that
417 * indicates EOF, because of a server-side truncation. We should invalidate
418 * all cached attributes. We may update the file size,
420 TEST_F(ReadCacheable, eof)
422 const char FULLPATH[] = "mountpoint/some_file.txt";
423 const char RELPATH[] = "some_file.txt";
424 const char *CONTENTS = "abcdefghijklmnop";
427 uint64_t offset = 100;
428 ssize_t bufsize = strlen(CONTENTS);
429 ssize_t partbufsize = 3 * bufsize / 4;
434 expect_lookup(RELPATH, ino, offset + bufsize);
435 expect_open(ino, 0, 1);
436 expect_read(ino, 0, offset + bufsize, offset + partbufsize, CONTENTS);
437 expect_getattr(ino, offset + partbufsize);
439 fd = open(FULLPATH, O_RDONLY);
440 ASSERT_LE(0, fd) << strerror(errno);
442 r = pread(fd, buf, bufsize, offset);
443 ASSERT_LE(0, r) << strerror(errno);
444 EXPECT_EQ(partbufsize, r) << strerror(errno);
445 ASSERT_EQ(0, fstat(fd, &sb));
446 EXPECT_EQ((off_t)(offset + partbufsize), sb.st_size);
447 /* Deliberately leak fd. close(2) will be tested in release.cc */
450 /* Like ReadCacheable.eof, but causes an entire buffer to be invalidated */
451 TEST_F(ReadCacheable, eof_of_whole_buffer)
453 const char FULLPATH[] = "mountpoint/some_file.txt";
454 const char RELPATH[] = "some_file.txt";
455 const char *CONTENTS = "abcdefghijklmnop";
458 ssize_t bufsize = strlen(CONTENTS);
459 off_t old_filesize = m_maxbcachebuf * 2 + bufsize;
463 expect_lookup(RELPATH, ino, old_filesize);
464 expect_open(ino, 0, 1);
465 expect_read(ino, 2 * m_maxbcachebuf, bufsize, bufsize, CONTENTS);
466 expect_read(ino, m_maxbcachebuf, m_maxbcachebuf, 0, CONTENTS);
467 expect_getattr(ino, m_maxbcachebuf);
469 fd = open(FULLPATH, O_RDONLY);
470 ASSERT_LE(0, fd) << strerror(errno);
472 /* Cache the third block */
473 ASSERT_EQ(bufsize, pread(fd, buf, bufsize, m_maxbcachebuf * 2))
475 /* Try to read the 2nd block, but it's past EOF */
476 ASSERT_EQ(0, pread(fd, buf, bufsize, m_maxbcachebuf))
478 ASSERT_EQ(0, fstat(fd, &sb));
479 EXPECT_EQ((off_t)(m_maxbcachebuf), sb.st_size);
480 /* Deliberately leak fd. close(2) will be tested in release.cc */
484 * With the keep_cache option, the kernel may keep its read cache across
487 TEST_F(ReadCacheable, keep_cache)
489 const char FULLPATH[] = "mountpoint/some_file.txt";
490 const char RELPATH[] = "some_file.txt";
491 const char *CONTENTS = "abcdefgh";
494 ssize_t bufsize = strlen(CONTENTS);
497 FuseTest::expect_lookup(RELPATH, ino, S_IFREG | 0644, bufsize, 2);
498 expect_open(ino, FOPEN_KEEP_CACHE, 2);
499 expect_read(ino, 0, bufsize, bufsize, CONTENTS);
501 fd0 = open(FULLPATH, O_RDONLY);
502 ASSERT_LE(0, fd0) << strerror(errno);
503 ASSERT_EQ(bufsize, read(fd0, buf, bufsize)) << strerror(errno);
505 fd1 = open(FULLPATH, O_RDWR);
506 ASSERT_LE(0, fd1) << strerror(errno);
509 * This read should be serviced by cache, even though it's on the other
512 ASSERT_EQ(bufsize, read(fd1, buf, bufsize)) << strerror(errno);
514 /* Deliberately leak fd0 and fd1. */
518 * Without the keep_cache option, the kernel should drop its read caches on
521 TEST_F(Read, keep_cache_disabled)
523 const char FULLPATH[] = "mountpoint/some_file.txt";
524 const char RELPATH[] = "some_file.txt";
525 const char *CONTENTS = "abcdefgh";
528 ssize_t bufsize = strlen(CONTENTS);
531 FuseTest::expect_lookup(RELPATH, ino, S_IFREG | 0644, bufsize, 2);
532 expect_open(ino, 0, 2);
533 expect_read(ino, 0, bufsize, bufsize, CONTENTS);
535 fd0 = open(FULLPATH, O_RDONLY);
536 ASSERT_LE(0, fd0) << strerror(errno);
537 ASSERT_EQ(bufsize, read(fd0, buf, bufsize)) << strerror(errno);
539 fd1 = open(FULLPATH, O_RDWR);
540 ASSERT_LE(0, fd1) << strerror(errno);
543 * This read should not be serviced by cache, even though it's on the
544 * original file descriptor
546 expect_read(ino, 0, bufsize, bufsize, CONTENTS);
547 ASSERT_EQ(0, lseek(fd0, 0, SEEK_SET)) << strerror(errno);
548 ASSERT_EQ(bufsize, read(fd0, buf, bufsize)) << strerror(errno);
550 /* Deliberately leak fd0 and fd1. */
553 TEST_F(ReadCacheable, mmap)
555 const char FULLPATH[] = "mountpoint/some_file.txt";
556 const char RELPATH[] = "some_file.txt";
557 const char *CONTENTS = "abcdefgh";
561 size_t bufsize = strlen(CONTENTS);
566 expect_lookup(RELPATH, ino, bufsize);
567 expect_open(ino, 0, 1);
568 /* mmap may legitimately try to read more data than is available */
569 EXPECT_CALL(*m_mock, process(
570 ResultOf([=](auto in) {
571 return (in.header.opcode == FUSE_READ &&
572 in.header.nodeid == ino &&
573 in.body.read.fh == Read::FH &&
574 in.body.read.offset == 0 &&
575 in.body.read.size >= bufsize);
578 ).WillOnce(Invoke(ReturnImmediate([=](auto in __unused, auto& out) {
579 out.header.len = sizeof(struct fuse_out_header) + bufsize;
580 memmove(out.body.bytes, CONTENTS, bufsize);
583 fd = open(FULLPATH, O_RDONLY);
584 ASSERT_LE(0, fd) << strerror(errno);
586 p = mmap(NULL, len, PROT_READ, MAP_SHARED, fd, 0);
587 ASSERT_NE(MAP_FAILED, p) << strerror(errno);
589 ASSERT_EQ(0, memcmp(p, CONTENTS, bufsize));
591 ASSERT_EQ(0, munmap(p, len)) << strerror(errno);
592 /* Deliberately leak fd. close(2) will be tested in release.cc */
596 * A read via mmap comes up short, indicating that the file was truncated
599 TEST_F(ReadCacheable, mmap_eof)
601 const char FULLPATH[] = "mountpoint/some_file.txt";
602 const char RELPATH[] = "some_file.txt";
603 const char *CONTENTS = "abcdefgh";
607 size_t bufsize = strlen(CONTENTS);
613 expect_lookup(RELPATH, ino, 100000);
614 expect_open(ino, 0, 1);
615 /* mmap may legitimately try to read more data than is available */
616 EXPECT_CALL(*m_mock, process(
617 ResultOf([=](auto in) {
618 return (in.header.opcode == FUSE_READ &&
619 in.header.nodeid == ino &&
620 in.body.read.fh == Read::FH &&
621 in.body.read.offset == 0 &&
622 in.body.read.size >= bufsize);
625 ).WillOnce(Invoke(ReturnImmediate([=](auto in __unused, auto& out) {
626 out.header.len = sizeof(struct fuse_out_header) + bufsize;
627 memmove(out.body.bytes, CONTENTS, bufsize);
629 expect_getattr(ino, bufsize);
631 fd = open(FULLPATH, O_RDONLY);
632 ASSERT_LE(0, fd) << strerror(errno);
634 p = mmap(NULL, len, PROT_READ, MAP_SHARED, fd, 0);
635 ASSERT_NE(MAP_FAILED, p) << strerror(errno);
637 /* The file size should be automatically truncated */
638 ASSERT_EQ(0, memcmp(p, CONTENTS, bufsize));
639 ASSERT_EQ(0, fstat(fd, &sb)) << strerror(errno);
640 EXPECT_EQ((off_t)bufsize, sb.st_size);
642 ASSERT_EQ(0, munmap(p, len)) << strerror(errno);
643 /* Deliberately leak fd. close(2) will be tested in release.cc */
647 * Just as when FOPEN_DIRECT_IO is used, reads with O_DIRECT should bypass
648 * cache and to straight to the daemon
650 TEST_F(Read, o_direct)
652 const char FULLPATH[] = "mountpoint/some_file.txt";
653 const char RELPATH[] = "some_file.txt";
654 const char *CONTENTS = "abcdefgh";
657 ssize_t bufsize = strlen(CONTENTS);
660 expect_lookup(RELPATH, ino, bufsize);
661 expect_open(ino, 0, 1);
662 expect_read(ino, 0, bufsize, bufsize, CONTENTS);
664 fd = open(FULLPATH, O_RDONLY);
665 ASSERT_LE(0, fd) << strerror(errno);
668 ASSERT_EQ(bufsize, read(fd, buf, bufsize)) << strerror(errno);
669 ASSERT_EQ(0, memcmp(buf, CONTENTS, bufsize));
671 // Reads with o_direct should bypass the cache
672 expect_read(ino, 0, bufsize, bufsize, CONTENTS);
673 ASSERT_EQ(0, fcntl(fd, F_SETFL, O_DIRECT)) << strerror(errno);
674 ASSERT_EQ(0, lseek(fd, 0, SEEK_SET)) << strerror(errno);
675 ASSERT_EQ(bufsize, read(fd, buf, bufsize)) << strerror(errno);
676 ASSERT_EQ(0, memcmp(buf, CONTENTS, bufsize));
678 /* Deliberately leak fd. close(2) will be tested in release.cc */
683 const char FULLPATH[] = "mountpoint/some_file.txt";
684 const char RELPATH[] = "some_file.txt";
685 const char *CONTENTS = "abcdefgh";
689 * Set offset to a maxbcachebuf boundary so we'll be sure what offset
690 * to read from. Without this, the read might start at a lower offset.
692 uint64_t offset = m_maxbcachebuf;
693 ssize_t bufsize = strlen(CONTENTS);
696 expect_lookup(RELPATH, ino, offset + bufsize);
697 expect_open(ino, 0, 1);
698 expect_read(ino, offset, bufsize, bufsize, CONTENTS);
700 fd = open(FULLPATH, O_RDONLY);
701 ASSERT_LE(0, fd) << strerror(errno);
703 ASSERT_EQ(bufsize, pread(fd, buf, bufsize, offset)) << strerror(errno);
704 ASSERT_EQ(0, memcmp(buf, CONTENTS, bufsize));
705 /* Deliberately leak fd. close(2) will be tested in release.cc */
710 const char FULLPATH[] = "mountpoint/some_file.txt";
711 const char RELPATH[] = "some_file.txt";
712 const char *CONTENTS = "abcdefgh";
715 ssize_t bufsize = strlen(CONTENTS);
718 expect_lookup(RELPATH, ino, bufsize);
719 expect_open(ino, 0, 1);
720 expect_read(ino, 0, bufsize, bufsize, CONTENTS);
722 fd = open(FULLPATH, O_RDONLY);
723 ASSERT_LE(0, fd) << strerror(errno);
725 ASSERT_EQ(bufsize, read(fd, buf, bufsize)) << strerror(errno);
726 ASSERT_EQ(0, memcmp(buf, CONTENTS, bufsize));
728 /* Deliberately leak fd. close(2) will be tested in release.cc */
731 TEST_F(Read_7_8, read)
733 const char FULLPATH[] = "mountpoint/some_file.txt";
734 const char RELPATH[] = "some_file.txt";
735 const char *CONTENTS = "abcdefgh";
738 ssize_t bufsize = strlen(CONTENTS);
741 expect_lookup(RELPATH, ino, bufsize);
742 expect_open(ino, 0, 1);
743 expect_read(ino, 0, bufsize, bufsize, CONTENTS);
745 fd = open(FULLPATH, O_RDONLY);
746 ASSERT_LE(0, fd) << strerror(errno);
748 ASSERT_EQ(bufsize, read(fd, buf, bufsize)) << strerror(errno);
749 ASSERT_EQ(0, memcmp(buf, CONTENTS, bufsize));
751 /* Deliberately leak fd. close(2) will be tested in release.cc */
755 * If cacheing is enabled, the kernel should try to read an entire cache block
758 TEST_F(ReadCacheable, cache_block)
760 const char FULLPATH[] = "mountpoint/some_file.txt";
761 const char RELPATH[] = "some_file.txt";
762 const char *CONTENTS0 = "abcdefghijklmnop";
766 ssize_t filesize = m_maxbcachebuf * 2;
769 const char *contents1 = CONTENTS0 + bufsize;
771 contents = (char*)calloc(1, filesize);
772 ASSERT_NE(NULL, contents);
773 memmove(contents, CONTENTS0, strlen(CONTENTS0));
775 expect_lookup(RELPATH, ino, filesize);
776 expect_open(ino, 0, 1);
777 expect_read(ino, 0, m_maxbcachebuf, m_maxbcachebuf,
780 fd = open(FULLPATH, O_RDONLY);
781 ASSERT_LE(0, fd) << strerror(errno);
783 ASSERT_EQ(bufsize, read(fd, buf, bufsize)) << strerror(errno);
784 ASSERT_EQ(0, memcmp(buf, CONTENTS0, bufsize));
786 /* A subsequent read should be serviced by cache */
787 ASSERT_EQ(bufsize, read(fd, buf, bufsize)) << strerror(errno);
788 ASSERT_EQ(0, memcmp(buf, contents1, bufsize));
789 /* Deliberately leak fd. close(2) will be tested in release.cc */
792 /* Reading with sendfile should work (though it obviously won't be 0-copy) */
793 TEST_F(ReadCacheable, sendfile)
795 const char FULLPATH[] = "mountpoint/some_file.txt";
796 const char RELPATH[] = "some_file.txt";
797 const char *CONTENTS = "abcdefgh";
800 size_t bufsize = strlen(CONTENTS);
805 expect_lookup(RELPATH, ino, bufsize);
806 expect_open(ino, 0, 1);
807 /* Like mmap, sendfile may request more data than is available */
808 EXPECT_CALL(*m_mock, process(
809 ResultOf([=](auto in) {
810 return (in.header.opcode == FUSE_READ &&
811 in.header.nodeid == ino &&
812 in.body.read.fh == Read::FH &&
813 in.body.read.offset == 0 &&
814 in.body.read.size >= bufsize);
817 ).WillOnce(Invoke(ReturnImmediate([=](auto in __unused, auto& out) {
818 out.header.len = sizeof(struct fuse_out_header) + bufsize;
819 memmove(out.body.bytes, CONTENTS, bufsize);
822 ASSERT_EQ(0, socketpair(PF_LOCAL, SOCK_STREAM, 0, sp))
824 fd = open(FULLPATH, O_RDONLY);
825 ASSERT_LE(0, fd) << strerror(errno);
827 ASSERT_EQ(0, sendfile(fd, sp[1], 0, bufsize, NULL, &sbytes, 0))
829 ASSERT_EQ(static_cast<ssize_t>(bufsize), read(sp[0], buf, bufsize))
831 ASSERT_EQ(0, memcmp(buf, CONTENTS, bufsize));
835 /* Deliberately leak fd. close(2) will be tested in release.cc */
838 /* sendfile should fail gracefully if fuse declines the read */
839 /* https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=236466 */
840 TEST_F(ReadCacheable, DISABLED_sendfile_eio)
842 const char FULLPATH[] = "mountpoint/some_file.txt";
843 const char RELPATH[] = "some_file.txt";
844 const char *CONTENTS = "abcdefgh";
847 ssize_t bufsize = strlen(CONTENTS);
851 expect_lookup(RELPATH, ino, bufsize);
852 expect_open(ino, 0, 1);
853 EXPECT_CALL(*m_mock, process(
854 ResultOf([=](auto in) {
855 return (in.header.opcode == FUSE_READ);
858 ).WillOnce(Invoke(ReturnErrno(EIO)));
860 ASSERT_EQ(0, socketpair(PF_LOCAL, SOCK_STREAM, 0, sp))
862 fd = open(FULLPATH, O_RDONLY);
863 ASSERT_LE(0, fd) << strerror(errno);
865 ASSERT_NE(0, sendfile(fd, sp[1], 0, bufsize, NULL, &sbytes, 0));
869 /* Deliberately leak fd. close(2) will be tested in release.cc */
873 * Sequential reads should use readahead. And if allowed, large reads should
876 TEST_P(ReadAhead, readahead) {
877 const char FULLPATH[] = "mountpoint/some_file.txt";
878 const char RELPATH[] = "some_file.txt";
880 int fd, maxcontig, clustersize;
881 ssize_t bufsize = 4 * m_maxbcachebuf;
882 ssize_t filesize = bufsize;
884 char *rbuf, *contents;
887 contents = (char*)malloc(filesize);
888 ASSERT_NE(NULL, contents);
889 memset(contents, 'X', filesize);
890 rbuf = (char*)calloc(1, bufsize);
892 expect_lookup(RELPATH, ino, filesize);
893 expect_open(ino, 0, 1);
894 maxcontig = m_noclusterr ? m_maxbcachebuf :
895 m_maxbcachebuf + (int)get<1>(GetParam());
896 clustersize = MIN(maxcontig, MAXPHYS);
897 for (offs = 0; offs < bufsize; offs += clustersize) {
898 len = std::min((size_t)clustersize, (size_t)(filesize - offs));
899 expect_read(ino, offs, len, len, contents + offs);
902 fd = open(FULLPATH, O_RDONLY);
903 ASSERT_LE(0, fd) << strerror(errno);
905 /* Set the internal readahead counter to a "large" value */
906 ASSERT_EQ(0, fcntl(fd, F_READAHEAD, 1'000'000'000)) << strerror(errno);
908 ASSERT_EQ(bufsize, read(fd, rbuf, bufsize)) << strerror(errno);
909 ASSERT_EQ(0, memcmp(rbuf, contents, bufsize));
911 /* Deliberately leak fd. close(2) will be tested in release.cc */
914 INSTANTIATE_TEST_CASE_P(RA, ReadAhead,
915 Values(tuple<bool, int>(false, 0u),
916 tuple<bool, int>(false, 0x10000),
917 tuple<bool, int>(false, 0x20000),
918 tuple<bool, int>(false, 0x30000),
919 tuple<bool, int>(true, 0u),
920 tuple<bool, int>(true, 0x10000),
921 tuple<bool, int>(true, 0x20000)));