4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or https://opensource.org/licenses/CDDL-1.0.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright (C) 2011 Lawrence Livermore National Security, LLC.
24 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
25 * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
32 #include <linux/blkdev.h>
33 #include <linux/backing-dev.h>
34 #include <linux/hdreg.h>
35 #include <linux/major.h>
36 #include <linux/msdos_fs.h> /* for SECTOR_* */
37 #include <linux/bio.h>
40 #include <linux/blk-mq.h>
43 #ifndef HAVE_BLK_QUEUE_FLAG_SET
45 blk_queue_flag_set(unsigned int flag, struct request_queue *q)
47 queue_flag_set(flag, q);
51 #ifndef HAVE_BLK_QUEUE_FLAG_CLEAR
53 blk_queue_flag_clear(unsigned int flag, struct request_queue *q)
55 queue_flag_clear(flag, q);
61 * The blk_queue_write_cache() interface has replaced blk_queue_flush()
62 * interface. However, the new interface is GPL-only thus we implement
63 * our own trivial wrapper when the GPL-only version is detected.
66 * The blk_queue_flush() interface has replaced blk_queue_ordered()
67 * interface. However, while the old interface was available to all the
68 * new one is GPL-only. Thus if the GPL-only version is detected we
69 * implement our own trivial helper.
72 blk_queue_set_write_cache(struct request_queue *q, bool wc, bool fua)
74 #if defined(HAVE_BLK_QUEUE_WRITE_CACHE_GPL_ONLY)
76 blk_queue_flag_set(QUEUE_FLAG_WC, q);
78 blk_queue_flag_clear(QUEUE_FLAG_WC, q);
80 blk_queue_flag_set(QUEUE_FLAG_FUA, q);
82 blk_queue_flag_clear(QUEUE_FLAG_FUA, q);
83 #elif defined(HAVE_BLK_QUEUE_WRITE_CACHE)
84 blk_queue_write_cache(q, wc, fua);
85 #elif defined(HAVE_BLK_QUEUE_FLUSH_GPL_ONLY)
87 q->flush_flags |= REQ_FLUSH;
89 q->flush_flags |= REQ_FUA;
90 #elif defined(HAVE_BLK_QUEUE_FLUSH)
91 blk_queue_flush(q, (wc ? REQ_FLUSH : 0) | (fua ? REQ_FUA : 0));
93 #error "Unsupported kernel"
98 blk_queue_set_read_ahead(struct request_queue *q, unsigned long ra_pages)
100 #if !defined(HAVE_BLK_QUEUE_UPDATE_READAHEAD) && \
101 !defined(HAVE_DISK_UPDATE_READAHEAD)
102 #ifdef HAVE_BLK_QUEUE_BDI_DYNAMIC
103 q->backing_dev_info->ra_pages = ra_pages;
105 q->backing_dev_info.ra_pages = ra_pages;
110 #ifdef HAVE_BIO_BVEC_ITER
111 #define BIO_BI_SECTOR(bio) (bio)->bi_iter.bi_sector
112 #define BIO_BI_SIZE(bio) (bio)->bi_iter.bi_size
113 #define BIO_BI_IDX(bio) (bio)->bi_iter.bi_idx
114 #define BIO_BI_SKIP(bio) (bio)->bi_iter.bi_bvec_done
115 #define bio_for_each_segment4(bv, bvp, b, i) \
116 bio_for_each_segment((bv), (b), (i))
117 typedef struct bvec_iter bvec_iterator_t;
119 #define BIO_BI_SECTOR(bio) (bio)->bi_sector
120 #define BIO_BI_SIZE(bio) (bio)->bi_size
121 #define BIO_BI_IDX(bio) (bio)->bi_idx
122 #define BIO_BI_SKIP(bio) (0)
123 #define bio_for_each_segment4(bv, bvp, b, i) \
124 bio_for_each_segment((bvp), (b), (i))
125 typedef int bvec_iterator_t;
129 bio_set_flags_failfast(struct block_device *bdev, int *flags, bool dev,
130 bool transport, bool driver)
134 * Disable FAILFAST for loopback devices because of the
135 * following incorrect BUG_ON() in loop_make_request().
136 * This support is also disabled for md devices because the
137 * test suite layers md devices on top of loopback devices.
138 * This may be removed when the loopback driver is fixed.
140 * BUG_ON(!lo || (rw != READ && rw != WRITE));
142 if ((MAJOR(bdev->bd_dev) == LOOP_MAJOR) ||
143 (MAJOR(bdev->bd_dev) == MD_MAJOR))
146 #ifdef BLOCK_EXT_MAJOR
147 if (MAJOR(bdev->bd_dev) == BLOCK_EXT_MAJOR)
149 #endif /* BLOCK_EXT_MAJOR */
150 #endif /* CONFIG_BUG */
153 *flags |= REQ_FAILFAST_DEV;
155 *flags |= REQ_FAILFAST_TRANSPORT;
157 *flags |= REQ_FAILFAST_DRIVER;
161 * Maximum disk label length, it may be undefined for some kernels.
163 #if !defined(DISK_NAME_LEN)
164 #define DISK_NAME_LEN 32
165 #endif /* DISK_NAME_LEN */
167 #ifdef HAVE_BIO_BI_STATUS
169 bi_status_to_errno(blk_status_t status)
174 case BLK_STS_NOTSUPP:
176 case BLK_STS_TIMEOUT:
180 case BLK_STS_TRANSPORT:
184 #ifdef HAVE_BLK_STS_RESV_CONFLICT
185 case BLK_STS_RESV_CONFLICT:
192 case BLK_STS_PROTECTION:
194 case BLK_STS_RESOURCE:
205 static inline blk_status_t
206 errno_to_bi_status(int error)
212 return (BLK_STS_NOTSUPP);
214 return (BLK_STS_TIMEOUT);
216 return (BLK_STS_NOSPC);
218 return (BLK_STS_TRANSPORT);
220 return (BLK_STS_TARGET);
222 #ifdef HAVE_BLK_STS_RESV_CONFLICT
223 return (BLK_STS_RESV_CONFLICT);
225 return (BLK_STS_NEXUS);
228 return (BLK_STS_MEDIUM);
230 return (BLK_STS_PROTECTION);
232 return (BLK_STS_RESOURCE);
234 return (BLK_STS_AGAIN);
236 return (BLK_STS_IOERR);
238 return (BLK_STS_IOERR);
241 #endif /* HAVE_BIO_BI_STATUS */
245 * The bio_endio() prototype changed slightly. These are helper
246 * macro's to ensure the prototype and invocation are handled.
248 #ifdef HAVE_1ARG_BIO_END_IO_T
249 #ifdef HAVE_BIO_BI_STATUS
250 #define BIO_END_IO_ERROR(bio) bi_status_to_errno(bio->bi_status)
251 #define BIO_END_IO_PROTO(fn, x, z) static void fn(struct bio *x)
252 #define BIO_END_IO(bio, error) bio_set_bi_status(bio, error)
254 bio_set_bi_status(struct bio *bio, int error)
256 ASSERT3S(error, <=, 0);
257 bio->bi_status = errno_to_bi_status(-error);
261 #define BIO_END_IO_ERROR(bio) (-(bio->bi_error))
262 #define BIO_END_IO_PROTO(fn, x, z) static void fn(struct bio *x)
263 #define BIO_END_IO(bio, error) bio_set_bi_error(bio, error)
265 bio_set_bi_error(struct bio *bio, int error)
267 ASSERT3S(error, <=, 0);
268 bio->bi_error = error;
271 #endif /* HAVE_BIO_BI_STATUS */
274 #define BIO_END_IO_PROTO(fn, x, z) static void fn(struct bio *x, int z)
275 #define BIO_END_IO(bio, error) bio_endio(bio, error);
276 #endif /* HAVE_1ARG_BIO_END_IO_T */
282 * 2.6.36 - 5.14 MACRO,
285 * Check the disk status and return B_TRUE if alive
288 static inline boolean_t
289 zfs_check_disk_status(struct block_device *bdev)
291 #if defined(GENHD_FL_UP)
292 return (!!(bdev->bd_disk->flags & GENHD_FL_UP));
293 #elif defined(GD_DEAD)
294 return (!test_bit(GD_DEAD, &bdev->bd_disk->state));
297 * This is encountered if neither GENHD_FL_UP nor GD_DEAD is available in
298 * the kernel - likely due to an MACRO change that needs to be chased down.
300 #error "Unsupported kernel: no usable disk status check"
306 * 3.10.0 CentOS 7.x API,
307 * blkdev_reread_part()
309 * For older kernels trigger a re-reading of the partition table by calling
310 * check_disk_change() which calls flush_disk() to invalidate the device.
312 * For newer kernels (as of 5.10), bdev_check_media_change is used, in favor of
313 * check_disk_change(), with the modification that invalidation is no longer
316 #ifdef HAVE_CHECK_DISK_CHANGE
317 #define zfs_check_media_change(bdev) check_disk_change(bdev)
318 #ifdef HAVE_BLKDEV_REREAD_PART
319 #define vdev_bdev_reread_part(bdev) blkdev_reread_part(bdev)
321 #define vdev_bdev_reread_part(bdev) check_disk_change(bdev)
322 #endif /* HAVE_BLKDEV_REREAD_PART */
324 #ifdef HAVE_BDEV_CHECK_MEDIA_CHANGE
326 zfs_check_media_change(struct block_device *bdev)
328 #ifdef HAVE_BLOCK_DEVICE_OPERATIONS_REVALIDATE_DISK
329 struct gendisk *gd = bdev->bd_disk;
330 const struct block_device_operations *bdo = gd->fops;
333 if (!bdev_check_media_change(bdev))
336 #ifdef HAVE_BLOCK_DEVICE_OPERATIONS_REVALIDATE_DISK
338 * Force revalidation, to mimic the old behavior of
339 * check_disk_change()
341 if (bdo->revalidate_disk)
342 bdo->revalidate_disk(gd);
347 #define vdev_bdev_reread_part(bdev) zfs_check_media_change(bdev)
348 #elif defined(HAVE_DISK_CHECK_MEDIA_CHANGE)
349 #define vdev_bdev_reread_part(bdev) disk_check_media_change(bdev->bd_disk)
350 #define zfs_check_media_change(bdev) disk_check_media_change(bdev->bd_disk)
353 * This is encountered if check_disk_change() and bdev_check_media_change()
354 * are not available in the kernel - likely due to an API change that needs
357 #error "Unsupported kernel: no usable disk change check"
358 #endif /* HAVE_BDEV_CHECK_MEDIA_CHANGE */
359 #endif /* HAVE_CHECK_DISK_CHANGE */
363 * The function was exported for use, prior to this it existed but the
364 * symbol was not exported.
366 * 4.4.0-6.21 API change for Ubuntu
367 * lookup_bdev() gained a second argument, FMODE_*, to check inode permissions.
370 * Changed to take a dev_t argument which is set on success and return a
371 * non-zero error code on failure.
374 vdev_lookup_bdev(const char *path, dev_t *dev)
376 #if defined(HAVE_DEVT_LOOKUP_BDEV)
377 return (lookup_bdev(path, dev));
378 #elif defined(HAVE_1ARG_LOOKUP_BDEV)
379 struct block_device *bdev = lookup_bdev(path);
381 return (PTR_ERR(bdev));
387 #elif defined(HAVE_MODE_LOOKUP_BDEV)
388 struct block_device *bdev = lookup_bdev(path, FMODE_READ);
390 return (PTR_ERR(bdev));
397 #error "Unsupported kernel"
401 #if defined(HAVE_BLK_MODE_T)
402 #define blk_mode_is_open_write(flag) ((flag) & BLK_OPEN_WRITE)
404 #define blk_mode_is_open_write(flag) ((flag) & FMODE_WRITE)
408 * Kernels without bio_set_op_attrs use bi_rw for the bio flags.
410 #if !defined(HAVE_BIO_SET_OP_ATTRS)
412 bio_set_op_attrs(struct bio *bio, unsigned rw, unsigned flags)
414 #if defined(HAVE_BIO_BI_OPF)
415 bio->bi_opf = rw | flags;
417 bio->bi_rw |= rw | flags;
418 #endif /* HAVE_BIO_BI_OPF */
423 * bio_set_flush - Set the appropriate flags in a bio to guarantee
424 * data are on non-volatile media on completion.
427 * Introduce WRITE_FLUSH, WRITE_FUA, and WRITE_FLUSH_FUA flags as a
428 * replacement for WRITE_BARRIER to allow expressing richer semantics
429 * to the block layer. It's up to the block layer to implement the
430 * semantics correctly. Use the WRITE_FLUSH_FUA flag combination.
433 * REQ_FLUSH was renamed to REQ_PREFLUSH. For consistency with previous
434 * OpenZFS releases, prefer the WRITE_FLUSH_FUA flag set if it's available.
437 * The read/write flags and their modifiers, including WRITE_FLUSH,
438 * WRITE_FUA and WRITE_FLUSH_FUA were removed from fs.h in
439 * torvalds/linux@70fd7614 and replaced by direct flag modification
440 * of the REQ_ flags in bio->bi_opf. Use REQ_PREFLUSH.
443 bio_set_flush(struct bio *bio)
445 #if defined(HAVE_REQ_PREFLUSH) /* >= 4.10 */
446 bio_set_op_attrs(bio, 0, REQ_PREFLUSH | REQ_OP_WRITE);
447 #elif defined(WRITE_FLUSH_FUA) /* >= 2.6.37 and <= 4.9 */
448 bio_set_op_attrs(bio, 0, WRITE_FLUSH_FUA);
450 #error "Allowing the build will cause bio_set_flush requests to be ignored."
464 * in all cases but may have a performance impact for some kernels. It
465 * has the advantage of minimizing kernel specific changes in the zvol code.
468 static inline boolean_t
469 bio_is_flush(struct bio *bio)
471 #if defined(HAVE_REQ_OP_FLUSH) && defined(HAVE_BIO_BI_OPF)
472 return ((bio_op(bio) == REQ_OP_FLUSH) || (bio->bi_opf & REQ_PREFLUSH));
473 #elif defined(HAVE_REQ_PREFLUSH) && defined(HAVE_BIO_BI_OPF)
474 return (bio->bi_opf & REQ_PREFLUSH);
475 #elif defined(HAVE_REQ_PREFLUSH) && !defined(HAVE_BIO_BI_OPF)
476 return (bio->bi_rw & REQ_PREFLUSH);
477 #elif defined(HAVE_REQ_FLUSH)
478 return (bio->bi_rw & REQ_FLUSH);
480 #error "Unsupported kernel"
486 * REQ_FUA flag moved to bio->bi_opf
491 static inline boolean_t
492 bio_is_fua(struct bio *bio)
494 #if defined(HAVE_BIO_BI_OPF)
495 return (bio->bi_opf & REQ_FUA);
496 #elif defined(REQ_FUA)
497 return (bio->bi_rw & REQ_FUA);
499 #error "Allowing the build will cause fua requests to be ignored."
510 * In all cases the normal I/O path is used for discards. The only
511 * difference is how the kernel tags individual I/Os as discards.
513 static inline boolean_t
514 bio_is_discard(struct bio *bio)
516 #if defined(HAVE_REQ_OP_DISCARD)
517 return (bio_op(bio) == REQ_OP_DISCARD);
518 #elif defined(HAVE_REQ_DISCARD)
519 return (bio->bi_rw & REQ_DISCARD);
521 #error "Unsupported kernel"
527 * REQ_OP_SECURE_ERASE
532 static inline boolean_t
533 bio_is_secure_erase(struct bio *bio)
535 #if defined(HAVE_REQ_OP_SECURE_ERASE)
536 return (bio_op(bio) == REQ_OP_SECURE_ERASE);
537 #elif defined(REQ_SECURE)
538 return (bio->bi_rw & REQ_SECURE);
546 * Discard granularity and alignment restrictions may now be set. For
547 * older kernels which do not support this it is safe to skip it.
550 blk_queue_discard_granularity(struct request_queue *q, unsigned int dg)
552 q->limits.discard_granularity = dg;
557 * bdev_max_discard_sectors()
560 * blk_queue_discard()
562 static inline boolean_t
563 bdev_discard_supported(struct block_device *bdev)
565 #if defined(HAVE_BDEV_MAX_DISCARD_SECTORS)
566 return (bdev_max_discard_sectors(bdev) > 0 &&
567 bdev_discard_granularity(bdev) > 0);
568 #elif defined(HAVE_BLK_QUEUE_DISCARD)
569 return (blk_queue_discard(bdev_get_queue(bdev)) > 0 &&
570 bdev_get_queue(bdev)->limits.discard_granularity > 0);
572 #error "Unsupported kernel"
578 * bdev_max_secure_erase_sectors()
581 * blk_queue_secure_erase()
584 * blk_queue_secdiscard()
586 static inline boolean_t
587 bdev_secure_discard_supported(struct block_device *bdev)
589 #if defined(HAVE_BDEV_MAX_SECURE_ERASE_SECTORS)
590 return (!!bdev_max_secure_erase_sectors(bdev));
591 #elif defined(HAVE_BLK_QUEUE_SECURE_ERASE)
592 return (!!blk_queue_secure_erase(bdev_get_queue(bdev)));
593 #elif defined(HAVE_BLK_QUEUE_SECDISCARD)
594 return (!!blk_queue_secdiscard(bdev_get_queue(bdev)));
596 #error "Unsupported kernel"
601 * A common holder for vdev_bdev_open() is used to relax the exclusive open
602 * semantics slightly. Internal vdev disk callers may pass VDEV_HOLDER to
603 * allow them to open the device multiple times. Other kernel callers and
604 * user space processes which don't pass this value will get EBUSY. This is
605 * currently required for the correct operation of hot spares.
607 #define VDEV_HOLDER ((void *)0x2401de7)
609 static inline unsigned long
610 blk_generic_start_io_acct(struct request_queue *q __attribute__((unused)),
611 struct gendisk *disk __attribute__((unused)),
612 int rw __attribute__((unused)), struct bio *bio)
614 #if defined(HAVE_BDEV_IO_ACCT_63)
615 return (bdev_start_io_acct(bio->bi_bdev, bio_op(bio),
617 #elif defined(HAVE_BDEV_IO_ACCT_OLD)
618 return (bdev_start_io_acct(bio->bi_bdev, bio_sectors(bio),
619 bio_op(bio), jiffies));
620 #elif defined(HAVE_DISK_IO_ACCT)
621 return (disk_start_io_acct(disk, bio_sectors(bio), bio_op(bio)));
622 #elif defined(HAVE_BIO_IO_ACCT)
623 return (bio_start_io_acct(bio));
624 #elif defined(HAVE_GENERIC_IO_ACCT_3ARG)
625 unsigned long start_time = jiffies;
626 generic_start_io_acct(rw, bio_sectors(bio), &disk->part0);
628 #elif defined(HAVE_GENERIC_IO_ACCT_4ARG)
629 unsigned long start_time = jiffies;
630 generic_start_io_acct(q, rw, bio_sectors(bio), &disk->part0);
639 blk_generic_end_io_acct(struct request_queue *q __attribute__((unused)),
640 struct gendisk *disk __attribute__((unused)),
641 int rw __attribute__((unused)), struct bio *bio, unsigned long start_time)
643 #if defined(HAVE_BDEV_IO_ACCT_63)
644 bdev_end_io_acct(bio->bi_bdev, bio_op(bio), bio_sectors(bio),
646 #elif defined(HAVE_BDEV_IO_ACCT_OLD)
647 bdev_end_io_acct(bio->bi_bdev, bio_op(bio), start_time);
648 #elif defined(HAVE_DISK_IO_ACCT)
649 disk_end_io_acct(disk, bio_op(bio), start_time);
650 #elif defined(HAVE_BIO_IO_ACCT)
651 bio_end_io_acct(bio, start_time);
652 #elif defined(HAVE_GENERIC_IO_ACCT_3ARG)
653 generic_end_io_acct(rw, &disk->part0, start_time);
654 #elif defined(HAVE_GENERIC_IO_ACCT_4ARG)
655 generic_end_io_acct(q, rw, &disk->part0, start_time);
659 #ifndef HAVE_SUBMIT_BIO_IN_BLOCK_DEVICE_OPERATIONS
660 static inline struct request_queue *
661 blk_generic_alloc_queue(make_request_fn make_request, int node_id)
663 #if defined(HAVE_BLK_ALLOC_QUEUE_REQUEST_FN)
664 return (blk_alloc_queue(make_request, node_id));
665 #elif defined(HAVE_BLK_ALLOC_QUEUE_REQUEST_FN_RH)
666 return (blk_alloc_queue_rh(make_request, node_id));
668 struct request_queue *q = blk_alloc_queue(GFP_KERNEL);
670 blk_queue_make_request(q, make_request);
675 #endif /* !HAVE_SUBMIT_BIO_IN_BLOCK_DEVICE_OPERATIONS */
678 * All the io_*() helper functions below can operate on a bio, or a rq, but
679 * not both. The older submit_bio() codepath will pass a bio, and the
680 * newer blk-mq codepath will pass a rq.
683 io_data_dir(struct bio *bio, struct request *rq)
687 if (op_is_write(req_op(rq))) {
694 ASSERT3P(rq, ==, NULL);
696 return (bio_data_dir(bio));
700 io_is_flush(struct bio *bio, struct request *rq)
704 return (req_op(rq) == REQ_OP_FLUSH);
706 ASSERT3P(rq, ==, NULL);
708 return (bio_is_flush(bio));
712 io_is_discard(struct bio *bio, struct request *rq)
716 return (req_op(rq) == REQ_OP_DISCARD);
718 ASSERT3P(rq, ==, NULL);
720 return (bio_is_discard(bio));
724 io_is_secure_erase(struct bio *bio, struct request *rq)
728 return (req_op(rq) == REQ_OP_SECURE_ERASE);
730 ASSERT3P(rq, ==, NULL);
732 return (bio_is_secure_erase(bio));
736 io_is_fua(struct bio *bio, struct request *rq)
740 return (rq->cmd_flags & REQ_FUA);
742 ASSERT3P(rq, ==, NULL);
744 return (bio_is_fua(bio));
748 static inline uint64_t
749 io_offset(struct bio *bio, struct request *rq)
753 return (blk_rq_pos(rq) << 9);
755 ASSERT3P(rq, ==, NULL);
757 return (BIO_BI_SECTOR(bio) << 9);
760 static inline uint64_t
761 io_size(struct bio *bio, struct request *rq)
765 return (blk_rq_bytes(rq));
767 ASSERT3P(rq, ==, NULL);
769 return (BIO_BI_SIZE(bio));
773 io_has_data(struct bio *bio, struct request *rq)
777 return (bio_has_data(rq->bio));
779 ASSERT3P(rq, ==, NULL);
781 return (bio_has_data(bio));
783 #endif /* _ZFS_BLKDEV_H */