2 * Copyright (c) 2004 Robert N. M. Watson
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * Regression test to do some very basic AIO exercising on several types of
31 * file descriptors. Currently, the tests consist of initializing a fixed
32 * size buffer with pseudo-random data, writing it to one fd using AIO, then
33 * reading it from a second descriptor using AIO. For some targets, the same
34 * fd is used for write and read (i.e., file, md device), but for others the
35 * operation is performed on a peer (pty, socket, fifo, etc). A timeout is
36 * initiated to detect undue blocking. This test does not attempt to exercise
37 * error cases or more subtle asynchronous behavior, just make sure that the
38 * basic operations work on some basic object types.
41 #include <sys/param.h>
42 #include <sys/module.h>
43 #include <sys/resource.h>
44 #include <sys/socket.h>
46 #include <sys/mdioctl.h>
63 #include "freebsd_test_suite/macros.h"
67 * GLOBAL_MAX sets the largest usable buffer size to be read and written, as
68 * it sizes ac_buffer in the aio_context structure. It is also the default
69 * size for file I/O. For other types, we use smaller blocks or we risk
70 * blocking (and we run in a single process/thread so that would be bad).
72 #define GLOBAL_MAX 16384
74 #define BUFFER_MAX GLOBAL_MAX
77 * A completion function will block until the aio has completed, then return
78 * the result of the aio. errno will be set appropriately.
80 typedef ssize_t (*completion)(struct aiocb*);
83 int ac_read_fd, ac_write_fd;
85 char ac_buffer[GLOBAL_MAX];
88 void (*ac_cleanup)(void *arg);
92 static int aio_timedout;
95 * Each test run specifies a timeout in seconds. Use the somewhat obsoleted
96 * signal(3) and alarm(3) APIs to set this up.
99 aio_timeout_signal(int sig __unused)
106 aio_timeout_start(int seconds)
110 ATF_REQUIRE_MSG(signal(SIGALRM, aio_timeout_signal) != SIG_ERR,
111 "failed to set SIGALRM handler: %s", strerror(errno));
116 aio_timeout_stop(void)
119 ATF_REQUIRE_MSG(signal(SIGALRM, NULL) != SIG_ERR,
120 "failed to reset SIGALRM handler to default: %s", strerror(errno));
125 * Fill a buffer given a seed that can be fed into srandom() to initialize
126 * the PRNG in a repeatable manner.
129 aio_fill_buffer(char *buffer, int len, long seed)
135 for (i = 0; i < len; i++) {
136 ch = random() & 0xff;
142 * Test that a buffer matches a given seed. See aio_fill_buffer(). Return
143 * (1) on a match, (0) on a mismatch.
146 aio_test_buffer(char *buffer, int len, long seed)
152 for (i = 0; i < len; i++) {
153 ch = random() & 0xff;
161 * Initialize a testing context given the file descriptors provided by the
165 aio_context_init(struct aio_context *ac, int read_fd,
166 int write_fd, int buflen, int seconds, void (*cleanup)(void *),
170 ATF_REQUIRE_MSG(buflen <= BUFFER_MAX,
171 "aio_context_init: buffer too large (%d > %d)",
173 bzero(ac, sizeof(*ac));
174 ac->ac_read_fd = read_fd;
175 ac->ac_write_fd = write_fd;
176 ac->ac_buflen = buflen;
178 ac->ac_seed = random();
179 aio_fill_buffer(ac->ac_buffer, buflen, ac->ac_seed);
180 ATF_REQUIRE_MSG(aio_test_buffer(ac->ac_buffer, buflen,
181 ac->ac_seed) != 0, "aio_test_buffer: internal error");
182 ac->ac_seconds = seconds;
183 ac->ac_cleanup = cleanup;
184 ac->ac_cleanup_arg = cleanup_arg;
188 poll(struct aiocb *aio)
192 while ((error = aio_error(aio)) == EINPROGRESS && !aio_timedout)
199 return (aio_return(aio));
206 suspend(struct aiocb *aio)
208 const struct aiocb *const iocbs[] = {aio};
211 error = aio_suspend(iocbs, 1, NULL);
213 return (aio_return(aio));
219 waitcomplete(struct aiocb *aio)
224 ret = aio_waitcomplete(&aiop, NULL);
225 ATF_REQUIRE_EQ(aio, aiop);
230 * Each tester can register a callback to clean up in the event the test
231 * fails. Preserve the value of errno so that subsequent calls to errx()
235 aio_cleanup(struct aio_context *ac)
239 if (ac->ac_cleanup == NULL)
242 (ac->ac_cleanup)(ac->ac_cleanup_arg);
247 * Perform a simple write test of our initialized data buffer to the provided
251 aio_write_test(struct aio_context *ac, completion comp)
256 bzero(&aio, sizeof(aio));
257 aio.aio_buf = ac->ac_buffer;
258 aio.aio_nbytes = ac->ac_buflen;
259 aio.aio_fildes = ac->ac_write_fd;
262 aio_timeout_start(ac->ac_seconds);
264 if (aio_write(&aio) < 0) {
265 if (errno == EINTR) {
268 atf_tc_fail("aio_write timed out");
272 atf_tc_fail("aio_write failed: %s", strerror(errno));
277 if (errno == EINTR) {
280 atf_tc_fail("aio timed out");
284 atf_tc_fail("aio failed: %s", strerror(errno));
289 if (len != ac->ac_buflen) {
291 atf_tc_fail("aio short write (%jd)", (intmax_t)len);
296 * Perform a simple read test of our initialized data buffer from the
297 * provided file descriptor.
300 aio_read_test(struct aio_context *ac, completion comp)
305 bzero(ac->ac_buffer, ac->ac_buflen);
306 bzero(&aio, sizeof(aio));
307 aio.aio_buf = ac->ac_buffer;
308 aio.aio_nbytes = ac->ac_buflen;
309 aio.aio_fildes = ac->ac_read_fd;
312 aio_timeout_start(ac->ac_seconds);
314 if (aio_read(&aio) < 0) {
315 if (errno == EINTR) {
318 atf_tc_fail("aio_read timed out");
322 atf_tc_fail("aio_read failed: %s", strerror(errno));
327 if (errno == EINTR) {
330 atf_tc_fail("aio timed out");
334 atf_tc_fail("aio failed: %s", strerror(errno));
339 if (len != ac->ac_buflen) {
341 atf_tc_fail("aio short read (%jd)",
345 if (aio_test_buffer(ac->ac_buffer, ac->ac_buflen, ac->ac_seed) == 0) {
347 atf_tc_fail("buffer mismatched");
352 * Series of type-specific tests for AIO. For now, we just make sure we can
353 * issue a write and then a read to each type. We assume that once a write
354 * is issued, a read can follow.
358 * Test with a classic file. Assumes we can create a moderate size temporary
361 #define FILE_LEN GLOBAL_MAX
362 #define FILE_PATHNAME "testfile"
363 #define FILE_TIMEOUT 30
364 struct aio_file_arg {
369 aio_file_cleanup(void *arg)
371 struct aio_file_arg *afa;
375 unlink(FILE_PATHNAME);
379 aio_file_test(completion comp)
381 struct aio_file_arg arg;
382 struct aio_context ac;
385 ATF_REQUIRE_KERNEL_MODULE("aio");
386 ATF_REQUIRE_UNSAFE_AIO();
388 fd = open(FILE_PATHNAME, O_RDWR | O_CREAT);
389 ATF_REQUIRE_MSG(fd != -1, "open failed: %s", strerror(errno));
393 aio_context_init(&ac, fd, fd, FILE_LEN,
394 FILE_TIMEOUT, aio_file_cleanup, &arg);
395 aio_write_test(&ac, comp);
396 aio_read_test(&ac, comp);
398 aio_file_cleanup(&arg);
401 ATF_TC_WITHOUT_HEAD(file_poll);
402 ATF_TC_BODY(file_poll, tc)
407 ATF_TC_WITHOUT_HEAD(file_suspend);
408 ATF_TC_BODY(file_suspend, tc)
410 aio_file_test(suspend);
413 ATF_TC_WITHOUT_HEAD(file_waitcomplete);
414 ATF_TC_BODY(file_waitcomplete, tc)
416 aio_file_test(waitcomplete);
420 #define FIFO_PATHNAME "testfifo"
421 #define FIFO_TIMEOUT 30
422 struct aio_fifo_arg {
428 aio_fifo_cleanup(void *arg)
430 struct aio_fifo_arg *afa;
433 if (afa->afa_read_fd != -1)
434 close(afa->afa_read_fd);
435 if (afa->afa_write_fd != -1)
436 close(afa->afa_write_fd);
437 unlink(FIFO_PATHNAME);
441 aio_fifo_test(completion comp)
443 int error, read_fd = -1, write_fd = -1;
444 struct aio_fifo_arg arg;
445 struct aio_context ac;
447 ATF_REQUIRE_KERNEL_MODULE("aio");
448 ATF_REQUIRE_UNSAFE_AIO();
450 ATF_REQUIRE_MSG(mkfifo(FIFO_PATHNAME, 0600) != -1,
451 "mkfifo failed: %s", strerror(errno));
452 arg.afa_read_fd = -1;
453 arg.afa_write_fd = -1;
455 read_fd = open(FIFO_PATHNAME, O_RDONLY | O_NONBLOCK);
458 aio_fifo_cleanup(&arg);
460 atf_tc_fail("read_fd open failed: %s",
463 arg.afa_read_fd = read_fd;
465 write_fd = open(FIFO_PATHNAME, O_WRONLY);
466 if (write_fd == -1) {
468 aio_fifo_cleanup(&arg);
470 atf_tc_fail("write_fd open failed: %s",
473 arg.afa_write_fd = write_fd;
475 aio_context_init(&ac, read_fd, write_fd, FIFO_LEN,
476 FIFO_TIMEOUT, aio_fifo_cleanup, &arg);
477 aio_write_test(&ac, comp);
478 aio_read_test(&ac, comp);
480 aio_fifo_cleanup(&arg);
483 ATF_TC_WITHOUT_HEAD(fifo_poll);
484 ATF_TC_BODY(fifo_poll, tc)
489 ATF_TC_WITHOUT_HEAD(fifo_suspend);
490 ATF_TC_BODY(fifo_suspend, tc)
492 aio_fifo_test(waitcomplete);
495 ATF_TC_WITHOUT_HEAD(fifo_waitcomplete);
496 ATF_TC_BODY(fifo_waitcomplete, tc)
498 aio_fifo_test(waitcomplete);
501 struct aio_unix_socketpair_arg {
506 aio_unix_socketpair_cleanup(void *arg)
508 struct aio_unix_socketpair_arg *asa;
511 close(asa->asa_sockets[0]);
512 close(asa->asa_sockets[1]);
515 #define UNIX_SOCKETPAIR_LEN 256
516 #define UNIX_SOCKETPAIR_TIMEOUT 30
518 aio_unix_socketpair_test(completion comp)
520 struct aio_unix_socketpair_arg arg;
521 struct aio_context ac;
522 struct rusage ru_before, ru_after;
525 ATF_REQUIRE_KERNEL_MODULE("aio");
527 ATF_REQUIRE_MSG(socketpair(PF_UNIX, SOCK_STREAM, 0, sockets) != -1,
528 "socketpair failed: %s", strerror(errno));
530 arg.asa_sockets[0] = sockets[0];
531 arg.asa_sockets[1] = sockets[1];
532 aio_context_init(&ac, sockets[0],
533 sockets[1], UNIX_SOCKETPAIR_LEN, UNIX_SOCKETPAIR_TIMEOUT,
534 aio_unix_socketpair_cleanup, &arg);
535 ATF_REQUIRE_MSG(getrusage(RUSAGE_SELF, &ru_before) != -1,
536 "getrusage failed: %s", strerror(errno));
537 aio_write_test(&ac, comp);
538 ATF_REQUIRE_MSG(getrusage(RUSAGE_SELF, &ru_after) != -1,
539 "getrusage failed: %s", strerror(errno));
540 ATF_REQUIRE(ru_after.ru_msgsnd == ru_before.ru_msgsnd + 1);
541 ru_before = ru_after;
542 aio_read_test(&ac, comp);
543 ATF_REQUIRE_MSG(getrusage(RUSAGE_SELF, &ru_after) != -1,
544 "getrusage failed: %s", strerror(errno));
545 ATF_REQUIRE(ru_after.ru_msgrcv == ru_before.ru_msgrcv + 1);
547 aio_unix_socketpair_cleanup(&arg);
550 ATF_TC_WITHOUT_HEAD(socket_poll);
551 ATF_TC_BODY(socket_poll, tc)
553 aio_unix_socketpair_test(poll);
556 ATF_TC_WITHOUT_HEAD(socket_suspend);
557 ATF_TC_BODY(socket_suspend, tc)
559 aio_unix_socketpair_test(suspend);
562 ATF_TC_WITHOUT_HEAD(socket_waitcomplete);
563 ATF_TC_BODY(socket_waitcomplete, tc)
565 aio_unix_socketpair_test(waitcomplete);
574 aio_pty_cleanup(void *arg)
576 struct aio_pty_arg *apa;
579 close(apa->apa_read_fd);
580 close(apa->apa_write_fd);
584 #define PTY_TIMEOUT 30
586 aio_pty_test(completion comp)
588 struct aio_pty_arg arg;
589 struct aio_context ac;
590 int read_fd, write_fd;
594 ATF_REQUIRE_KERNEL_MODULE("aio");
595 ATF_REQUIRE_UNSAFE_AIO();
597 ATF_REQUIRE_MSG(openpty(&read_fd, &write_fd, NULL, NULL, NULL) == 0,
598 "openpty failed: %s", strerror(errno));
600 arg.apa_read_fd = read_fd;
601 arg.apa_write_fd = write_fd;
603 if (tcgetattr(write_fd, &ts) < 0) {
605 aio_pty_cleanup(&arg);
607 atf_tc_fail("tcgetattr failed: %s", strerror(errno));
610 if (tcsetattr(write_fd, TCSANOW, &ts) < 0) {
612 aio_pty_cleanup(&arg);
614 atf_tc_fail("tcsetattr failed: %s", strerror(errno));
616 aio_context_init(&ac, read_fd, write_fd, PTY_LEN,
617 PTY_TIMEOUT, aio_pty_cleanup, &arg);
619 aio_write_test(&ac, comp);
620 aio_read_test(&ac, comp);
622 aio_pty_cleanup(&arg);
625 ATF_TC_WITHOUT_HEAD(pty_poll);
626 ATF_TC_BODY(pty_poll, tc)
631 ATF_TC_WITHOUT_HEAD(pty_suspend);
632 ATF_TC_BODY(pty_suspend, tc)
634 aio_pty_test(suspend);
637 ATF_TC_WITHOUT_HEAD(pty_waitcomplete);
638 ATF_TC_BODY(pty_waitcomplete, tc)
640 aio_pty_test(waitcomplete);
644 aio_pipe_cleanup(void *arg)
653 #define PIPE_TIMEOUT 30
655 aio_pipe_test(completion comp)
657 struct aio_context ac;
660 ATF_REQUIRE_KERNEL_MODULE("aio");
661 ATF_REQUIRE_UNSAFE_AIO();
663 ATF_REQUIRE_MSG(pipe(pipes) != -1,
664 "pipe failed: %s", strerror(errno));
666 aio_context_init(&ac, pipes[0], pipes[1], PIPE_LEN,
667 PIPE_TIMEOUT, aio_pipe_cleanup, pipes);
668 aio_write_test(&ac, comp);
669 aio_read_test(&ac, comp);
671 aio_pipe_cleanup(pipes);
674 ATF_TC_WITHOUT_HEAD(pipe_poll);
675 ATF_TC_BODY(pipe_poll, tc)
680 ATF_TC_WITHOUT_HEAD(pipe_suspend);
681 ATF_TC_BODY(pipe_suspend, tc)
683 aio_pipe_test(suspend);
686 ATF_TC_WITHOUT_HEAD(pipe_waitcomplete);
687 ATF_TC_BODY(pipe_waitcomplete, tc)
689 aio_pipe_test(waitcomplete);
699 aio_md_cleanup(void *arg)
701 struct aio_md_arg *ama;
702 struct md_ioctl mdio;
707 if (ama->ama_fd != -1)
710 if (ama->ama_unit != -1) {
711 bzero(&mdio, sizeof(mdio));
712 mdio.md_version = MDIOVERSION;
713 mdio.md_unit = ama->ama_unit;
714 if (ioctl(ama->ama_mdctl_fd, MDIOCDETACH, &mdio) == -1) {
716 close(ama->ama_mdctl_fd);
718 atf_tc_fail("ioctl MDIOCDETACH failed: %s",
723 close(ama->ama_mdctl_fd);
726 #define MD_LEN GLOBAL_MAX
727 #define MD_TIMEOUT 30
729 aio_md_test(completion comp)
731 int error, fd, mdctl_fd, unit;
732 char pathname[PATH_MAX];
733 struct aio_md_arg arg;
734 struct aio_context ac;
735 struct md_ioctl mdio;
737 ATF_REQUIRE_KERNEL_MODULE("aio");
739 mdctl_fd = open("/dev/" MDCTL_NAME, O_RDWR, 0);
740 ATF_REQUIRE_MSG(mdctl_fd != -1,
741 "opening /dev/%s failed: %s", MDCTL_NAME, strerror(errno));
743 bzero(&mdio, sizeof(mdio));
744 mdio.md_version = MDIOVERSION;
745 mdio.md_type = MD_MALLOC;
746 mdio.md_options = MD_AUTOUNIT | MD_COMPRESS;
747 mdio.md_mediasize = GLOBAL_MAX;
748 mdio.md_sectorsize = 512;
750 arg.ama_mdctl_fd = mdctl_fd;
753 if (ioctl(mdctl_fd, MDIOCATTACH, &mdio) < 0) {
755 aio_md_cleanup(&arg);
757 atf_tc_fail("ioctl MDIOCATTACH failed: %s", strerror(errno));
760 arg.ama_unit = unit = mdio.md_unit;
761 snprintf(pathname, PATH_MAX, "/dev/md%d", unit);
762 fd = open(pathname, O_RDWR);
763 ATF_REQUIRE_MSG(fd != -1,
764 "opening %s failed: %s", pathname, strerror(errno));
767 aio_context_init(&ac, fd, fd, MD_LEN, MD_TIMEOUT,
768 aio_md_cleanup, &arg);
769 aio_write_test(&ac, comp);
770 aio_read_test(&ac, comp);
772 aio_md_cleanup(&arg);
776 ATF_TC_HEAD(md_poll, tc)
779 atf_tc_set_md_var(tc, "require.user", "root");
781 ATF_TC_BODY(md_poll, tc)
787 ATF_TC_HEAD(md_suspend, tc)
790 atf_tc_set_md_var(tc, "require.user", "root");
792 ATF_TC_BODY(md_suspend, tc)
794 aio_md_test(suspend);
797 ATF_TC(md_waitcomplete);
798 ATF_TC_HEAD(md_waitcomplete, tc)
801 atf_tc_set_md_var(tc, "require.user", "root");
803 ATF_TC_BODY(md_waitcomplete, tc)
805 aio_md_test(waitcomplete);
808 ATF_TC_WITHOUT_HEAD(aio_large_read_test);
809 ATF_TC_BODY(aio_large_read_test, tc)
811 struct aiocb cb, *cbp;
819 ATF_REQUIRE_KERNEL_MODULE("aio");
820 ATF_REQUIRE_UNSAFE_AIO();
823 len = sizeof(clamped);
824 if (sysctlbyname("debug.iosize_max_clamp", &clamped, &len, NULL, 0) ==
826 atf_libc_error(errno, "Failed to read debug.iosize_max_clamp");
829 /* Determine the maximum supported read(2) size. */
836 fd = open(FILE_PATHNAME, O_RDWR | O_CREAT);
837 ATF_REQUIRE_MSG(fd != -1, "open failed: %s", strerror(errno));
839 unlink(FILE_PATHNAME);
841 memset(&cb, 0, sizeof(cb));
845 if (aio_read(&cb) == -1)
846 atf_tc_fail("aio_read() of maximum read size failed: %s",
849 nread = aio_waitcomplete(&cbp, NULL);
851 atf_tc_fail("aio_waitcomplete() failed: %s", strerror(errno));
853 atf_tc_fail("aio_read() from empty file returned data: %zd",
856 memset(&cb, 0, sizeof(cb));
857 cb.aio_nbytes = len + 1;
860 if (aio_read(&cb) == -1) {
863 atf_tc_fail("aio_read() of too large read size failed: %s",
867 nread = aio_waitcomplete(&cbp, NULL);
871 atf_tc_fail("aio_waitcomplete() failed: %s", strerror(errno));
873 atf_tc_fail("aio_read() of too large read size returned: %zd", nread);
880 * This tests for a bug where arriving socket data can wakeup multiple
881 * AIO read requests resulting in an uncancellable request.
883 ATF_TC_WITHOUT_HEAD(aio_socket_two_reads);
884 ATF_TC_BODY(aio_socket_two_reads, tc)
895 ATF_REQUIRE_KERNEL_MODULE("aio");
896 #if __FreeBSD_version < 1100101
897 aft_tc_skip("kernel version %d is too old (%d required)",
898 __FreeBSD_version, 1100101);
901 ATF_REQUIRE(socketpair(PF_UNIX, SOCK_STREAM, 0, s) != -1);
903 /* Queue two read requests. */
904 memset(&ioreq, 0, sizeof(ioreq));
905 for (i = 0; i < nitems(ioreq); i++) {
906 ioreq[i].iocb.aio_nbytes = sizeof(ioreq[i].buffer);
907 ioreq[i].iocb.aio_fildes = s[0];
908 ioreq[i].iocb.aio_buf = ioreq[i].buffer;
909 ATF_REQUIRE(aio_read(&ioreq[i].iocb) == 0);
912 /* Send a single byte. This should complete one request. */
914 ATF_REQUIRE(write(s[1], &c, sizeof(c)) == 1);
916 ATF_REQUIRE(aio_waitcomplete(&iocb, NULL) == 1);
918 /* Determine which request completed and verify the data was read. */
919 if (iocb == &ioreq[0].iocb)
923 ATF_REQUIRE(ioreq[i].buffer[0] == c);
928 * Try to cancel the other request. On broken systems this
929 * will fail and the process will hang on exit.
931 ATF_REQUIRE(aio_error(&ioreq[i].iocb) == EINPROGRESS);
932 ATF_REQUIRE(aio_cancel(s[0], &ioreq[i].iocb) == AIO_CANCELED);
939 * This test ensures that aio_write() on a blocking socket of a "large"
940 * buffer does not return a short completion.
942 ATF_TC_WITHOUT_HEAD(aio_socket_blocking_short_write);
943 ATF_TC_BODY(aio_socket_blocking_short_write, tc)
945 struct aiocb iocb, *iocbp;
948 int buffer_size, sb_size;
952 ATF_REQUIRE_KERNEL_MODULE("aio");
954 ATF_REQUIRE(socketpair(PF_UNIX, SOCK_STREAM, 0, s) != -1);
956 len = sizeof(sb_size);
957 ATF_REQUIRE(getsockopt(s[0], SOL_SOCKET, SO_RCVBUF, &sb_size, &len) !=
959 ATF_REQUIRE(len == sizeof(sb_size));
960 buffer_size = sb_size;
962 ATF_REQUIRE(getsockopt(s[1], SOL_SOCKET, SO_SNDBUF, &sb_size, &len) !=
964 ATF_REQUIRE(len == sizeof(sb_size));
965 if (sb_size > buffer_size)
966 buffer_size = sb_size;
969 * Use twice the size of the MAX(receive buffer, send buffer)
970 * to ensure that the write is split up into multiple writes
975 buffer[0] = malloc(buffer_size);
976 ATF_REQUIRE(buffer[0] != NULL);
977 buffer[1] = malloc(buffer_size);
978 ATF_REQUIRE(buffer[1] != NULL);
981 aio_fill_buffer(buffer[1], buffer_size, random());
983 memset(&iocb, 0, sizeof(iocb));
984 iocb.aio_fildes = s[1];
985 iocb.aio_buf = buffer[1];
986 iocb.aio_nbytes = buffer_size;
987 ATF_REQUIRE(aio_write(&iocb) == 0);
989 done = recv(s[0], buffer[0], buffer_size, MSG_WAITALL);
990 ATF_REQUIRE(done == buffer_size);
992 done = aio_waitcomplete(&iocbp, NULL);
993 ATF_REQUIRE(iocbp == &iocb);
994 ATF_REQUIRE(done == buffer_size);
996 ATF_REQUIRE(memcmp(buffer[0], buffer[1], buffer_size) == 0);
1003 * This test verifies that cancelling a partially completed socket write
1004 * returns a short write rather than ECANCELED.
1006 ATF_TC_WITHOUT_HEAD(aio_socket_short_write_cancel);
1007 ATF_TC_BODY(aio_socket_short_write_cancel, tc)
1009 struct aiocb iocb, *iocbp;
1012 int buffer_size, sb_size;
1016 ATF_REQUIRE_KERNEL_MODULE("aio");
1018 ATF_REQUIRE(socketpair(PF_UNIX, SOCK_STREAM, 0, s) != -1);
1020 len = sizeof(sb_size);
1021 ATF_REQUIRE(getsockopt(s[0], SOL_SOCKET, SO_RCVBUF, &sb_size, &len) !=
1023 ATF_REQUIRE(len == sizeof(sb_size));
1024 buffer_size = sb_size;
1026 ATF_REQUIRE(getsockopt(s[1], SOL_SOCKET, SO_SNDBUF, &sb_size, &len) !=
1028 ATF_REQUIRE(len == sizeof(sb_size));
1029 if (sb_size > buffer_size)
1030 buffer_size = sb_size;
1033 * Use three times the size of the MAX(receive buffer, send
1034 * buffer) for the write to ensure that the write is split up
1035 * into multiple writes internally. The recv() ensures that
1036 * the write has partially completed, but a remaining size of
1037 * two buffers should ensure that the write has not completed
1038 * fully when it is cancelled.
1040 buffer[0] = malloc(buffer_size);
1041 ATF_REQUIRE(buffer[0] != NULL);
1042 buffer[1] = malloc(buffer_size * 3);
1043 ATF_REQUIRE(buffer[1] != NULL);
1046 aio_fill_buffer(buffer[1], buffer_size * 3, random());
1048 memset(&iocb, 0, sizeof(iocb));
1049 iocb.aio_fildes = s[1];
1050 iocb.aio_buf = buffer[1];
1051 iocb.aio_nbytes = buffer_size * 3;
1052 ATF_REQUIRE(aio_write(&iocb) == 0);
1054 done = recv(s[0], buffer[0], buffer_size, MSG_WAITALL);
1055 ATF_REQUIRE(done == buffer_size);
1057 ATF_REQUIRE(aio_error(&iocb) == EINPROGRESS);
1058 ATF_REQUIRE(aio_cancel(s[1], &iocb) == AIO_NOTCANCELED);
1060 done = aio_waitcomplete(&iocbp, NULL);
1061 ATF_REQUIRE(iocbp == &iocb);
1062 ATF_REQUIRE(done >= buffer_size && done <= buffer_size * 2);
1064 ATF_REQUIRE(memcmp(buffer[0], buffer[1], buffer_size) == 0);
1071 * This test just performs a basic test of aio_fsync().
1073 ATF_TC_WITHOUT_HEAD(aio_fsync_test);
1074 ATF_TC_BODY(aio_fsync_test, tc)
1076 struct aiocb synccb, *iocbp;
1087 ATF_REQUIRE_KERNEL_MODULE("aio");
1088 ATF_REQUIRE_UNSAFE_AIO();
1090 fd = open(FILE_PATHNAME, O_RDWR | O_CREAT);
1091 ATF_REQUIRE_MSG(fd != -1, "open failed: %s", strerror(errno));
1092 unlink(FILE_PATHNAME);
1094 ATF_REQUIRE(fstat(fd, &sb) == 0);
1095 ATF_REQUIRE(sb.st_blksize != 0);
1096 ATF_REQUIRE(ftruncate(fd, sb.st_blksize * nitems(buffers)) == 0);
1099 * Queue several asynchronous write requests. Hopefully this
1100 * forces the aio_fsync() request to be deferred. There is no
1101 * reliable way to guarantee that however.
1104 for (i = 0; i < nitems(buffers); i++) {
1105 buffers[i].done = false;
1106 memset(&buffers[i].iocb, 0, sizeof(buffers[i].iocb));
1107 buffers[i].buffer = malloc(sb.st_blksize);
1108 aio_fill_buffer(buffers[i].buffer, sb.st_blksize, random());
1109 buffers[i].iocb.aio_fildes = fd;
1110 buffers[i].iocb.aio_buf = buffers[i].buffer;
1111 buffers[i].iocb.aio_nbytes = sb.st_blksize;
1112 buffers[i].iocb.aio_offset = sb.st_blksize * i;
1113 ATF_REQUIRE(aio_write(&buffers[i].iocb) == 0);
1116 /* Queue the aio_fsync request. */
1117 memset(&synccb, 0, sizeof(synccb));
1118 synccb.aio_fildes = fd;
1119 ATF_REQUIRE(aio_fsync(O_SYNC, &synccb) == 0);
1121 /* Wait for requests to complete. */
1124 rval = aio_waitcomplete(&iocbp, NULL);
1125 ATF_REQUIRE(iocbp != NULL);
1126 if (iocbp == &synccb) {
1127 ATF_REQUIRE(rval == 0);
1131 for (i = 0; i < nitems(buffers); i++) {
1132 if (iocbp == &buffers[i].iocb) {
1133 ATF_REQUIRE(buffers[i].done == false);
1134 ATF_REQUIRE(rval == sb.st_blksize);
1135 buffers[i].done = true;
1140 ATF_REQUIRE_MSG(false, "unmatched AIO request");
1143 for (i = 0; i < nitems(buffers); i++)
1144 ATF_REQUIRE_MSG(buffers[i].done,
1145 "AIO request %u did not complete", i);
1153 ATF_TP_ADD_TC(tp, file_poll);
1154 ATF_TP_ADD_TC(tp, file_suspend);
1155 ATF_TP_ADD_TC(tp, file_waitcomplete);
1156 ATF_TP_ADD_TC(tp, fifo_poll);
1157 ATF_TP_ADD_TC(tp, fifo_suspend);
1158 ATF_TP_ADD_TC(tp, fifo_waitcomplete);
1159 ATF_TP_ADD_TC(tp, socket_poll);
1160 ATF_TP_ADD_TC(tp, socket_suspend);
1161 ATF_TP_ADD_TC(tp, socket_waitcomplete);
1162 ATF_TP_ADD_TC(tp, pty_poll);
1163 ATF_TP_ADD_TC(tp, pty_suspend);
1164 ATF_TP_ADD_TC(tp, pty_waitcomplete);
1165 ATF_TP_ADD_TC(tp, pipe_poll);
1166 ATF_TP_ADD_TC(tp, pipe_suspend);
1167 ATF_TP_ADD_TC(tp, pipe_waitcomplete);
1168 ATF_TP_ADD_TC(tp, md_poll);
1169 ATF_TP_ADD_TC(tp, md_suspend);
1170 ATF_TP_ADD_TC(tp, md_waitcomplete);
1171 ATF_TP_ADD_TC(tp, aio_large_read_test);
1172 ATF_TP_ADD_TC(tp, aio_socket_two_reads);
1173 ATF_TP_ADD_TC(tp, aio_socket_blocking_short_write);
1174 ATF_TP_ADD_TC(tp, aio_socket_short_write_cancel);
1175 ATF_TP_ADD_TC(tp, aio_fsync_test);
1177 return (atf_no_error());