2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2019 The FreeBSD Foundation
6 * This software was developed by BFF Storage Systems, LLC under sponsorship
7 * from the FreeBSD Foundation.
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 #include <sys/types.h>
34 #include <sys/sysctl.h>
45 * For testing I/O like fsx does, but deterministically and without a real
46 * underlying file system
48 * TODO: after fusefs gains the options to select cache mode for each mount
49 * point, run each of these tests for all cache modes.
52 using namespace testing;
61 const char *cache_mode_to_s(enum cache_mode cm) {
66 return "Writethrough";
70 return "WritebackAsync";
76 const char FULLPATH[] = "mountpoint/some_file.txt";
77 const char RELPATH[] = "some_file.txt";
78 const uint64_t ino = 42;
80 static void compare(const void *tbuf, const void *controlbuf, off_t baseofs,
85 for (i = 0; i < size; i++) {
86 if (((const char*)tbuf)[i] != ((const char*)controlbuf)[i]) {
87 off_t ofs = baseofs + i;
88 FAIL() << "miscompare at offset "
94 << (unsigned)((const uint8_t*)controlbuf)[i]
96 << (unsigned)((const uint8_t*)tbuf)[i];
101 typedef tuple<bool, uint32_t, cache_mode> IoParam;
103 class Io: public FuseTest, public WithParamInterface<IoParam> {
105 int m_backing_fd, m_control_fd, m_test_fd;
109 Io(): m_backing_fd(-1), m_control_fd(-1), m_direct_io(false) {};
114 m_backing_fd = open("backing_file", O_RDWR | O_CREAT | O_TRUNC, 0644);
115 if (m_backing_fd < 0)
116 FAIL() << strerror(errno);
117 m_control_fd = open("control", O_RDWR | O_CREAT | O_TRUNC, 0644);
118 if (m_control_fd < 0)
119 FAIL() << strerror(errno);
120 srandom(22'9'1982); // Seed with my birthday
122 if (get<0>(GetParam()))
123 m_init_flags |= FUSE_ASYNC_READ;
124 m_maxwrite = get<1>(GetParam());
125 switch (get<2>(GetParam())) {
133 m_init_flags |= FUSE_WRITEBACK_CACHE;
138 FAIL() << "Unknown cache mode";
146 printf("Test Parameters: init_flags=%#x maxwrite=%#x "
147 "%sasync cache=%s\n",
148 m_init_flags, m_maxwrite, m_async? "" : "no",
149 cache_mode_to_s(get<2>(GetParam())));
152 expect_lookup(RELPATH, ino, S_IFREG | 0644, 0, 1);
153 expect_open(ino, m_direct_io ? FOPEN_DIRECT_IO : 0, 1);
154 EXPECT_CALL(*m_mock, process(
155 ResultOf([=](auto in) {
156 return (in.header.opcode == FUSE_WRITE &&
157 in.header.nodeid == ino);
160 ).WillRepeatedly(Invoke(ReturnImmediate([=](auto in, auto& out) {
161 const char *buf = (const char*)in.body.bytes +
162 sizeof(struct fuse_write_in);
163 ssize_t isize = in.body.write.size;
164 off_t iofs = in.body.write.offset;
166 ASSERT_EQ(isize, pwrite(m_backing_fd, buf, isize, iofs))
168 SET_OUT_HEADER_LEN(out, write);
169 out.body.write.size = isize;
171 EXPECT_CALL(*m_mock, process(
172 ResultOf([=](auto in) {
173 return (in.header.opcode == FUSE_READ &&
174 in.header.nodeid == ino);
177 ).WillRepeatedly(Invoke(ReturnImmediate([=](auto in, auto& out) {
178 ssize_t isize = in.body.write.size;
179 off_t iofs = in.body.write.offset;
180 void *buf = out.body.bytes;
183 osize = pread(m_backing_fd, buf, isize, iofs);
184 ASSERT_LE(0, osize) << strerror(errno);
185 out.header.len = sizeof(struct fuse_out_header) + osize;
187 EXPECT_CALL(*m_mock, process(
188 ResultOf([=](auto in) {
189 return (in.header.opcode == FUSE_SETATTR &&
190 in.header.nodeid == ino &&
191 (in.body.setattr.valid & FATTR_SIZE));
195 ).WillRepeatedly(Invoke(ReturnImmediate([=](auto in, auto& out) {
196 ASSERT_EQ(0, ftruncate(m_backing_fd, in.body.setattr.size))
198 SET_OUT_HEADER_LEN(out, attr);
199 out.body.attr.attr.ino = ino;
200 out.body.attr.attr.mode = S_IFREG | 0755;
201 out.body.attr.attr.size = in.body.setattr.size;
202 out.body.attr.attr_valid = UINT64_MAX;
204 /* Any test that close()s will send FUSE_FLUSH and FUSE_RELEASE */
205 EXPECT_CALL(*m_mock, process(
206 ResultOf([=](auto in) {
207 return (in.header.opcode == FUSE_FLUSH &&
208 in.header.nodeid == ino);
211 ).WillRepeatedly(Invoke(ReturnErrno(0)));
212 EXPECT_CALL(*m_mock, process(
213 ResultOf([=](auto in) {
214 return (in.header.opcode == FUSE_RELEASE &&
215 in.header.nodeid == ino);
218 ).WillRepeatedly(Invoke(ReturnErrno(0)));
220 m_test_fd = open(FULLPATH, O_RDWR );
221 EXPECT_LE(0, m_test_fd) << strerror(errno);
228 if (m_backing_fd >= 0)
230 if (m_control_fd >= 0)
232 FuseTest::TearDown();
238 ASSERT_EQ(0, close(m_test_fd)) << strerror(errno);
239 m_test_fd = open("backing_file", O_RDWR);
240 ASSERT_LE(0, m_test_fd) << strerror(errno);
242 ASSERT_EQ(0, close(m_control_fd)) << strerror(errno);
243 m_control_fd = open("control", O_RDWR);
244 ASSERT_LE(0, m_control_fd) << strerror(errno);
247 void do_ftruncate(off_t offs)
249 ASSERT_EQ(0, ftruncate(m_test_fd, offs)) << strerror(errno);
250 ASSERT_EQ(0, ftruncate(m_control_fd, offs)) << strerror(errno);
254 void do_mapread(ssize_t size, off_t offs)
256 void *control_buf, *p;
257 off_t pg_offset, page_mask;
260 page_mask = getpagesize() - 1;
261 pg_offset = offs & page_mask;
262 map_size = pg_offset + size;
264 p = mmap(NULL, map_size, PROT_READ, MAP_FILE | MAP_SHARED, m_test_fd,
266 ASSERT_NE(p, MAP_FAILED) << strerror(errno);
268 control_buf = malloc(size);
269 ASSERT_NE(NULL, control_buf) << strerror(errno);
271 ASSERT_EQ(size, pread(m_control_fd, control_buf, size, offs))
274 compare((void*)((char*)p + pg_offset), control_buf, offs, size);
276 ASSERT_EQ(0, munmap(p, map_size)) << strerror(errno);
280 void do_read(ssize_t size, off_t offs)
282 void *test_buf, *control_buf;
285 test_buf = malloc(size);
286 ASSERT_NE(NULL, test_buf) << strerror(errno);
287 control_buf = malloc(size);
288 ASSERT_NE(NULL, control_buf) << strerror(errno);
291 r = pread(m_test_fd, test_buf, size, offs);
292 ASSERT_NE(-1, r) << strerror(errno);
293 ASSERT_EQ(size, r) << "unexpected short read";
294 r = pread(m_control_fd, control_buf, size, offs);
295 ASSERT_NE(-1, r) << strerror(errno);
296 ASSERT_EQ(size, r) << "unexpected short read";
298 compare(test_buf, control_buf, offs, size);
304 void do_mapwrite(ssize_t size, off_t offs)
308 off_t pg_offset, page_mask;
312 page_mask = getpagesize() - 1;
313 pg_offset = offs & page_mask;
314 map_size = pg_offset + size;
316 buf = (char*)malloc(size);
317 ASSERT_NE(NULL, buf) << strerror(errno);
318 for (i=0; i < size; i++)
321 if (offs + size > m_filesize) {
323 * Must manually extend. vm_mmap_vnode will not implicitly
326 do_ftruncate(offs + size);
329 p = mmap(NULL, map_size, PROT_READ | PROT_WRITE,
330 MAP_FILE | MAP_SHARED, m_test_fd, offs - pg_offset);
331 ASSERT_NE(p, MAP_FAILED) << strerror(errno);
333 bcopy(buf, (char*)p + pg_offset, size);
334 ASSERT_EQ(size, pwrite(m_control_fd, buf, size, offs))
338 ASSERT_EQ(0, munmap(p, map_size)) << strerror(errno);
341 void do_write(ssize_t size, off_t offs)
346 buf = (char*)malloc(size);
347 ASSERT_NE(NULL, buf) << strerror(errno);
348 for (i=0; i < size; i++)
351 ASSERT_EQ(size, pwrite(m_test_fd, buf, size, offs ))
353 ASSERT_EQ(size, pwrite(m_control_fd, buf, size, offs))
355 m_filesize = std::max(m_filesize, offs + size);
362 class IoCacheable: public Io {
364 virtual void SetUp() {
370 * Extend a file with dirty data in the last page of the last block.
372 * fsx -WR -P /tmp -S8 -N3 fsx.bin
374 TEST_P(Io, extend_from_dirty_page)
377 ssize_t wsize = 0xf0a8;
379 ssize_t rsize = 0x9b22;
380 off_t truncsize = 0x28702;
382 do_write(wsize, wofs);
383 do_ftruncate(truncsize);
384 do_read(rsize, rofs);
388 * mapwrite into a newly extended part of a file.
390 * fsx -c 100 -i 100 -l 524288 -o 131072 -N5 -P /tmp -S19 fsx.bin
392 TEST_P(IoCacheable, extend_by_mapwrite)
394 do_mapwrite(0x849e, 0x29a3a); /* [0x29a3a, 0x31ed7] */
395 do_mapwrite(0x3994, 0x3c7d8); /* [0x3c7d8, 0x4016b] */
396 do_read(0xf556, 0x30c16); /* [0x30c16, 0x4016b] */
400 * When writing the last page of a file, it must be written synchronously.
401 * Otherwise the cached page can become invalid by a subsequent extend
404 * fsx -WR -P /tmp -S642 -N3 fsx.bin
406 TEST_P(Io, last_page)
408 do_write(0xcc77, 0x1134f); /* [0x1134f, 0x1dfc5] */
409 do_write(0xdfa7, 0x2096a); /* [0x2096a, 0x2e910] */
410 do_read(0xb5b7, 0x1a3aa); /* [0x1a3aa, 0x25960] */
414 * Read a hole using mmap
416 * fsx -c 100 -i 100 -l 524288 -o 131072 -N11 -P /tmp -S14 fsx.bin
418 TEST_P(IoCacheable, mapread_hole)
420 do_write(0x123b7, 0xf205); /* [0xf205, 0x215bb] */
421 do_mapread(0xeeea, 0x2f4c); /* [0x2f4c, 0x11e35] */
425 * Read a hole from a block that contains some cached data.
427 * fsx -WR -P /tmp -S55 fsx.bin
429 TEST_P(Io, read_hole_from_cached_block)
431 off_t wofs = 0x160c5;
432 ssize_t wsize = 0xa996;
434 ssize_t rsize = 0xd8d5;
436 do_write(wsize, wofs);
437 do_read(rsize, rofs);
441 * Truncating a file into a dirty buffer should not causing anything untoward
442 * to happen when that buffer is eventually flushed.
444 * fsx -WR -P /tmp -S839 -d -N6 fsx.bin
446 TEST_P(Io, truncate_into_dirty_buffer)
448 off_t wofs0 = 0x3bad7;
449 ssize_t wsize0 = 0x4529;
450 off_t wofs1 = 0xc30d;
451 ssize_t wsize1 = 0x5f77;
452 off_t truncsize0 = 0x10916;
454 ssize_t rsize = 0x29ff;
455 off_t truncsize1 = 0x152b4;
457 do_write(wsize0, wofs0);
458 do_write(wsize1, wofs1);
459 do_ftruncate(truncsize0);
460 do_read(rsize, rofs);
461 do_ftruncate(truncsize1);
466 * Truncating a file into a dirty buffer should not causing anything untoward
467 * to happen when that buffer is eventually flushed, even when the buffer's
470 * Based on this command with a few steps removed:
471 * fsx -WR -P /tmp -S677 -d -N8 fsx.bin
473 TEST_P(Io, truncate_into_dirty_buffer2)
475 off_t truncsize0 = 0x344f3;
476 off_t wofs = 0x2790c;
477 ssize_t wsize = 0xd86a;
478 off_t truncsize1 = 0x2de38;
479 off_t rofs2 = 0x1fd7a;
480 ssize_t rsize2 = 0xc594;
481 off_t truncsize2 = 0x31e71;
483 /* Sets the file size to something larger than the next write */
484 do_ftruncate(truncsize0);
486 * Creates a dirty buffer. The part in lbn 2 doesn't flush
489 do_write(wsize, wofs);
490 /* Truncates part of the dirty buffer created in step 2 */
491 do_ftruncate(truncsize1);
492 /* XXX ?I don't know why this is necessary? */
493 do_read(rsize2, rofs2);
494 /* Truncates the dirty buffer */
495 do_ftruncate(truncsize2);
500 * Regression test for a bug introduced in r348931
502 * Sequence of operations:
503 * 1) The first write reads lbn so it can modify it
504 * 2) The first write flushes lbn 3 immediately because it's the end of file
505 * 3) The first write then flushes lbn 4 because it's the end of the file
506 * 4) The second write modifies the cached versions of lbn 3 and 4
507 * 5) The third write's getblkx invalidates lbn 4's B_CACHE because it's
508 * extending the buffer. Then it flushes lbn 4 because B_DELWRI was set but
510 * 6) fuse_write_biobackend erroneously called vfs_bio_clrbuf, putting the
511 * buffer into a weird write-only state. All read operations would return
512 * 0. Writes were apparently still processed, because the buffer's contents
513 * were correct when examined in a core dump.
514 * 7) The third write reads lbn 4 because cache is clear
515 * 9) uiomove dutifully copies new data into the buffer
516 * 10) The buffer's dirty is flushed to lbn 4
517 * 11) The read returns all zeros because of step 6.
520 * fsx -WR -l 524388 -o 131072 -P /tmp -S6456 -q fsx.bin
522 TEST_P(Io, resize_a_valid_buffer_while_extending)
524 do_write(0x14530, 0x36ee6); /* [0x36ee6, 0x4b415] */
525 do_write(0x1507c, 0x33256); /* [0x33256, 0x482d1] */
526 do_write(0x175c, 0x4c03d); /* [0x4c03d, 0x4d798] */
527 do_read(0xe277, 0x3599c); /* [0x3599c, 0x43c12] */
531 INSTANTIATE_TEST_CASE_P(Io, Io,
532 Combine(Bool(), /* async read */
533 Values(0x1000, 0x10000, 0x20000), /* m_maxwrite */
534 Values(Uncached, Writethrough, Writeback, WritebackAsync)
538 INSTANTIATE_TEST_CASE_P(Io, IoCacheable,
539 Combine(Bool(), /* async read */
540 Values(0x1000, 0x10000, 0x20000), /* m_maxwrite */
541 Values(Writethrough, Writeback, WritebackAsync)