4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2013 by Delphix. All rights reserved.
26 #include <sys/zfs_context.h>
27 #include <sys/spa_impl.h>
28 #include <sys/refcount.h>
29 #include <sys/vdev_disk.h>
30 #include <sys/vdev_impl.h>
31 #include <sys/fs/zfs.h>
33 #include <sys/sunldi.h>
34 #include <sys/efi_partition.h>
35 #include <sys/fm/fs/zfs.h>
38 * Virtual device vector for disks.
41 extern ldi_ident_t zfs_li;
43 typedef struct vdev_disk_buf {
49 vdev_disk_hold(vdev_t *vd)
54 ASSERT(spa_config_held(vd->vdev_spa, SCL_STATE, RW_WRITER));
57 * We must have a pathname, and it must be absolute.
59 if (vd->vdev_path == NULL || vd->vdev_path[0] != '/')
63 * Only prefetch path and devid info if the device has
66 if (vd->vdev_tsd != NULL)
69 if (vd->vdev_wholedisk == -1ULL) {
70 size_t len = strlen(vd->vdev_path) + 3;
71 char *buf = kmem_alloc(len, KM_SLEEP);
73 (void) snprintf(buf, len, "%ss0", vd->vdev_path);
75 (void) ldi_vp_from_name(buf, &vd->vdev_name_vp);
79 if (vd->vdev_name_vp == NULL)
80 (void) ldi_vp_from_name(vd->vdev_path, &vd->vdev_name_vp);
82 if (vd->vdev_devid != NULL &&
83 ddi_devid_str_decode(vd->vdev_devid, &devid, &minor) == 0) {
84 (void) ldi_vp_from_devid(devid, minor, &vd->vdev_devid_vp);
85 ddi_devid_str_free(minor);
86 ddi_devid_free(devid);
91 vdev_disk_rele(vdev_t *vd)
93 ASSERT(spa_config_held(vd->vdev_spa, SCL_STATE, RW_WRITER));
95 if (vd->vdev_name_vp) {
96 VN_RELE_ASYNC(vd->vdev_name_vp,
97 dsl_pool_vnrele_taskq(vd->vdev_spa->spa_dsl_pool));
98 vd->vdev_name_vp = NULL;
100 if (vd->vdev_devid_vp) {
101 VN_RELE_ASYNC(vd->vdev_devid_vp,
102 dsl_pool_vnrele_taskq(vd->vdev_spa->spa_dsl_pool));
103 vd->vdev_devid_vp = NULL;
108 vdev_disk_get_space(vdev_t *vd, uint64_t capacity, uint_t blksz)
110 ASSERT(vd->vdev_wholedisk);
112 vdev_disk_t *dvd = vd->vdev_tsd;
115 uint64_t avail_space = 0;
116 int efisize = EFI_LABEL_SIZE * 2;
118 dk_ioc.dki_data = kmem_alloc(efisize, KM_SLEEP);
120 dk_ioc.dki_length = efisize;
121 dk_ioc.dki_data_64 = (uint64_t)(uintptr_t)dk_ioc.dki_data;
122 efi = dk_ioc.dki_data;
124 if (ldi_ioctl(dvd->vd_lh, DKIOCGETEFI, (intptr_t)&dk_ioc,
125 FKIOCTL, kcred, NULL) == 0) {
126 uint64_t efi_altern_lba = LE_64(efi->efi_gpt_AlternateLBA);
128 zfs_dbgmsg("vdev %s, capacity %llu, altern lba %llu",
129 vd->vdev_path, capacity, efi_altern_lba);
130 if (capacity > efi_altern_lba)
131 avail_space = (capacity - efi_altern_lba) * blksz;
133 kmem_free(dk_ioc.dki_data, efisize);
134 return (avail_space);
138 vdev_disk_open(vdev_t *vd, uint64_t *psize, uint64_t *max_psize,
141 spa_t *spa = vd->vdev_spa;
143 struct dk_minfo_ext dkmext;
147 boolean_t validate_devid = B_FALSE;
151 * We must have a pathname, and it must be absolute.
153 if (vd->vdev_path == NULL || vd->vdev_path[0] != '/') {
154 vd->vdev_stat.vs_aux = VDEV_AUX_BAD_LABEL;
155 return (SET_ERROR(EINVAL));
159 * Reopen the device if it's not currently open. Otherwise,
160 * just update the physical size of the device.
162 if (vd->vdev_tsd != NULL) {
163 ASSERT(vd->vdev_reopening);
168 dvd = vd->vdev_tsd = kmem_zalloc(sizeof (vdev_disk_t), KM_SLEEP);
171 * When opening a disk device, we want to preserve the user's original
172 * intent. We always want to open the device by the path the user gave
173 * us, even if it is one of multiple paths to the save device. But we
174 * also want to be able to survive disks being removed/recabled.
175 * Therefore the sequence of opening devices is:
177 * 1. Try opening the device by path. For legacy pools without the
178 * 'whole_disk' property, attempt to fix the path by appending 's0'.
180 * 2. If the devid of the device matches the stored value, return
183 * 3. Otherwise, the device may have moved. Try opening the device
184 * by the devid instead.
186 if (vd->vdev_devid != NULL) {
187 if (ddi_devid_str_decode(vd->vdev_devid, &dvd->vd_devid,
188 &dvd->vd_minor) != 0) {
189 vd->vdev_stat.vs_aux = VDEV_AUX_BAD_LABEL;
190 return (SET_ERROR(EINVAL));
194 error = EINVAL; /* presume failure */
196 if (vd->vdev_path != NULL) {
198 if (vd->vdev_wholedisk == -1ULL) {
199 size_t len = strlen(vd->vdev_path) + 3;
200 char *buf = kmem_alloc(len, KM_SLEEP);
203 (void) snprintf(buf, len, "%ss0", vd->vdev_path);
205 if (ldi_open_by_name(buf, spa_mode(spa), kcred,
207 spa_strfree(vd->vdev_path);
209 vd->vdev_wholedisk = 1ULL;
210 (void) ldi_close(lh, spa_mode(spa), kcred);
216 error = ldi_open_by_name(vd->vdev_path, spa_mode(spa), kcred,
217 &dvd->vd_lh, zfs_li);
220 * Compare the devid to the stored value.
222 if (error == 0 && vd->vdev_devid != NULL &&
223 ldi_get_devid(dvd->vd_lh, &devid) == 0) {
224 if (ddi_devid_compare(devid, dvd->vd_devid) != 0) {
225 error = SET_ERROR(EINVAL);
226 (void) ldi_close(dvd->vd_lh, spa_mode(spa),
230 ddi_devid_free(devid);
234 * If we succeeded in opening the device, but 'vdev_wholedisk'
235 * is not yet set, then this must be a slice.
237 if (error == 0 && vd->vdev_wholedisk == -1ULL)
238 vd->vdev_wholedisk = 0;
242 * If we were unable to open by path, or the devid check fails, open by
245 if (error != 0 && vd->vdev_devid != NULL) {
246 error = ldi_open_by_devid(dvd->vd_devid, dvd->vd_minor,
247 spa_mode(spa), kcred, &dvd->vd_lh, zfs_li);
251 * If all else fails, then try opening by physical path (if available)
252 * or the logical path (if we failed due to the devid check). While not
253 * as reliable as the devid, this will give us something, and the higher
254 * level vdev validation will prevent us from opening the wrong device.
257 if (vd->vdev_devid != NULL)
258 validate_devid = B_TRUE;
260 if (vd->vdev_physpath != NULL &&
261 (dev = ddi_pathname_to_dev_t(vd->vdev_physpath)) != NODEV)
262 error = ldi_open_by_dev(&dev, OTYP_BLK, spa_mode(spa),
263 kcred, &dvd->vd_lh, zfs_li);
266 * Note that we don't support the legacy auto-wholedisk support
267 * as above. This hasn't been used in a very long time and we
268 * don't need to propagate its oddities to this edge condition.
270 if (error && vd->vdev_path != NULL)
271 error = ldi_open_by_name(vd->vdev_path, spa_mode(spa),
272 kcred, &dvd->vd_lh, zfs_li);
276 vd->vdev_stat.vs_aux = VDEV_AUX_OPEN_FAILED;
281 * Now that the device has been successfully opened, update the devid
284 if (validate_devid && spa_writeable(spa) &&
285 ldi_get_devid(dvd->vd_lh, &devid) == 0) {
286 if (ddi_devid_compare(devid, dvd->vd_devid) != 0) {
289 vd_devid = ddi_devid_str_encode(devid, dvd->vd_minor);
290 zfs_dbgmsg("vdev %s: update devid from %s, "
291 "to %s", vd->vdev_path, vd->vdev_devid, vd_devid);
292 spa_strfree(vd->vdev_devid);
293 vd->vdev_devid = spa_strdup(vd_devid);
294 ddi_devid_str_free(vd_devid);
296 ddi_devid_free(devid);
300 * Once a device is opened, verify that the physical device path (if
301 * available) is up to date.
303 if (ldi_get_dev(dvd->vd_lh, &dev) == 0 &&
304 ldi_get_otyp(dvd->vd_lh, &otyp) == 0) {
305 char *physpath, *minorname;
307 physpath = kmem_alloc(MAXPATHLEN, KM_SLEEP);
309 if (ddi_dev_pathname(dev, otyp, physpath) == 0 &&
310 ldi_get_minor_name(dvd->vd_lh, &minorname) == 0 &&
311 (vd->vdev_physpath == NULL ||
312 strcmp(vd->vdev_physpath, physpath) != 0)) {
313 if (vd->vdev_physpath)
314 spa_strfree(vd->vdev_physpath);
315 (void) strlcat(physpath, ":", MAXPATHLEN);
316 (void) strlcat(physpath, minorname, MAXPATHLEN);
317 vd->vdev_physpath = spa_strdup(physpath);
320 kmem_free(minorname, strlen(minorname) + 1);
321 kmem_free(physpath, MAXPATHLEN);
326 * Determine the actual size of the device.
328 if (ldi_get_size(dvd->vd_lh, psize) != 0) {
329 vd->vdev_stat.vs_aux = VDEV_AUX_OPEN_FAILED;
330 return (SET_ERROR(EINVAL));
334 * Determine the device's minimum transfer size.
335 * If the ioctl isn't supported, assume DEV_BSIZE.
337 if (ldi_ioctl(dvd->vd_lh, DKIOCGMEDIAINFOEXT, (intptr_t)&dkmext,
338 FKIOCTL, kcred, NULL) != 0)
339 dkmext.dki_pbsize = DEV_BSIZE;
341 *ashift = highbit(MAX(dkmext.dki_pbsize, SPA_MINBLOCKSIZE)) - 1;
343 if (vd->vdev_wholedisk == 1) {
344 uint64_t capacity = dkmext.dki_capacity - 1;
345 uint64_t blksz = dkmext.dki_lbsize;
349 * If we own the whole disk, try to enable disk write caching.
350 * We ignore errors because it's OK if we can't do it.
352 (void) ldi_ioctl(dvd->vd_lh, DKIOCSETWCE, (intptr_t)&wce,
353 FKIOCTL, kcred, NULL);
355 *max_psize = *psize + vdev_disk_get_space(vd, capacity, blksz);
356 zfs_dbgmsg("capacity change: vdev %s, psize %llu, "
357 "max_psize %llu", vd->vdev_path, *psize, *max_psize);
363 * Clear the nowritecache bit, so that on a vdev_reopen() we will
366 vd->vdev_nowritecache = B_FALSE;
372 vdev_disk_close(vdev_t *vd)
374 vdev_disk_t *dvd = vd->vdev_tsd;
376 if (vd->vdev_reopening || dvd == NULL)
379 if (dvd->vd_minor != NULL)
380 ddi_devid_str_free(dvd->vd_minor);
382 if (dvd->vd_devid != NULL)
383 ddi_devid_free(dvd->vd_devid);
385 if (dvd->vd_lh != NULL)
386 (void) ldi_close(dvd->vd_lh, spa_mode(vd->vdev_spa), kcred);
388 vd->vdev_delayed_close = B_FALSE;
389 kmem_free(dvd, sizeof (vdev_disk_t));
394 vdev_disk_physio(ldi_handle_t vd_lh, caddr_t data, size_t size,
395 uint64_t offset, int flags)
401 return (SET_ERROR(EINVAL));
403 ASSERT(flags & B_READ || flags & B_WRITE);
405 bp = getrbuf(KM_SLEEP);
406 bp->b_flags = flags | B_BUSY | B_NOCACHE | B_FAILFAST;
408 bp->b_un.b_addr = (void *)data;
409 bp->b_lblkno = lbtodb(offset);
410 bp->b_bufsize = size;
412 error = ldi_strategy(vd_lh, bp);
414 if ((error = biowait(bp)) == 0 && bp->b_resid != 0)
415 error = SET_ERROR(EIO);
422 vdev_disk_io_intr(buf_t *bp)
424 vdev_disk_buf_t *vdb = (vdev_disk_buf_t *)bp;
425 zio_t *zio = vdb->vdb_io;
428 * The rest of the zio stack only deals with EIO, ECKSUM, and ENXIO.
429 * Rather than teach the rest of the stack about other error
430 * possibilities (EFAULT, etc), we normalize the error value here.
432 zio->io_error = (geterror(bp) != 0 ? EIO : 0);
434 if (zio->io_error == 0 && bp->b_resid != 0)
435 zio->io_error = SET_ERROR(EIO);
437 kmem_free(vdb, sizeof (vdev_disk_buf_t));
443 vdev_disk_ioctl_free(zio_t *zio)
445 kmem_free(zio->io_vsd, sizeof (struct dk_callback));
448 static const zio_vsd_ops_t vdev_disk_vsd_ops = {
449 vdev_disk_ioctl_free,
450 zio_vsd_default_cksum_report
454 vdev_disk_ioctl_done(void *zio_arg, int error)
456 zio_t *zio = zio_arg;
458 zio->io_error = error;
464 vdev_disk_io_start(zio_t *zio)
466 vdev_t *vd = zio->io_vd;
467 vdev_disk_t *dvd = vd->vdev_tsd;
468 vdev_disk_buf_t *vdb;
469 struct dk_callback *dkc;
473 if (zio->io_type == ZIO_TYPE_IOCTL) {
475 if (!vdev_readable(vd)) {
476 zio->io_error = SET_ERROR(ENXIO);
477 return (ZIO_PIPELINE_CONTINUE);
480 switch (zio->io_cmd) {
482 case DKIOCFLUSHWRITECACHE:
484 if (zfs_nocacheflush)
487 if (vd->vdev_nowritecache) {
488 zio->io_error = SET_ERROR(ENOTSUP);
492 zio->io_vsd = dkc = kmem_alloc(sizeof (*dkc), KM_SLEEP);
493 zio->io_vsd_ops = &vdev_disk_vsd_ops;
495 dkc->dkc_callback = vdev_disk_ioctl_done;
496 dkc->dkc_flag = FLUSH_VOLATILE;
497 dkc->dkc_cookie = zio;
499 error = ldi_ioctl(dvd->vd_lh, zio->io_cmd,
500 (uintptr_t)dkc, FKIOCTL, kcred, NULL);
504 * The ioctl will be done asychronously,
505 * and will call vdev_disk_ioctl_done()
508 return (ZIO_PIPELINE_STOP);
511 if (error == ENOTSUP || error == ENOTTY) {
513 * If we get ENOTSUP or ENOTTY, we know that
514 * no future attempts will ever succeed.
515 * In this case we set a persistent bit so
516 * that we don't bother with the ioctl in the
519 vd->vdev_nowritecache = B_TRUE;
521 zio->io_error = error;
526 zio->io_error = SET_ERROR(ENOTSUP);
529 return (ZIO_PIPELINE_CONTINUE);
532 vdb = kmem_alloc(sizeof (vdev_disk_buf_t), KM_SLEEP);
538 bp->b_flags = B_BUSY | B_NOCACHE |
539 (zio->io_type == ZIO_TYPE_READ ? B_READ : B_WRITE);
540 if (!(zio->io_flags & (ZIO_FLAG_IO_RETRY | ZIO_FLAG_TRYHARD)))
541 bp->b_flags |= B_FAILFAST;
542 bp->b_bcount = zio->io_size;
543 bp->b_un.b_addr = zio->io_data;
544 bp->b_lblkno = lbtodb(zio->io_offset);
545 bp->b_bufsize = zio->io_size;
546 bp->b_iodone = (int (*)())vdev_disk_io_intr;
548 /* ldi_strategy() will return non-zero only on programming errors */
549 VERIFY(ldi_strategy(dvd->vd_lh, bp) == 0);
551 return (ZIO_PIPELINE_STOP);
555 vdev_disk_io_done(zio_t *zio)
557 vdev_t *vd = zio->io_vd;
560 * If the device returned EIO, then attempt a DKIOCSTATE ioctl to see if
561 * the device has been removed. If this is the case, then we trigger an
562 * asynchronous removal of the device. Otherwise, probe the device and
563 * make sure it's still accessible.
565 if (zio->io_error == EIO && !vd->vdev_remove_wanted) {
566 vdev_disk_t *dvd = vd->vdev_tsd;
567 int state = DKIO_NONE;
569 if (ldi_ioctl(dvd->vd_lh, DKIOCSTATE, (intptr_t)&state,
570 FKIOCTL, kcred, NULL) == 0 && state != DKIO_INSERTED) {
572 * We post the resource as soon as possible, instead of
573 * when the async removal actually happens, because the
574 * DE is using this information to discard previous I/O
577 zfs_post_remove(zio->io_spa, vd);
578 vd->vdev_remove_wanted = B_TRUE;
579 spa_async_request(zio->io_spa, SPA_ASYNC_REMOVE);
580 } else if (!vd->vdev_delayed_close) {
581 vd->vdev_delayed_close = B_TRUE;
586 vdev_ops_t vdev_disk_ops = {
595 VDEV_TYPE_DISK, /* name of this vdev type */
596 B_TRUE /* leaf vdev */
600 * Given the root disk device devid or pathname, read the label from
601 * the device, and construct a configuration nvlist.
604 vdev_disk_read_rootlabel(char *devpath, char *devid, nvlist_t **config)
610 ddi_devid_t tmpdevid;
615 * Read the device label and build the nvlist.
617 if (devid != NULL && ddi_devid_str_decode(devid, &tmpdevid,
619 error = ldi_open_by_devid(tmpdevid, minor_name,
620 FREAD, kcred, &vd_lh, zfs_li);
621 ddi_devid_free(tmpdevid);
622 ddi_devid_str_free(minor_name);
625 if (error && (error = ldi_open_by_name(devpath, FREAD, kcred, &vd_lh,
629 if (ldi_get_size(vd_lh, &s)) {
630 (void) ldi_close(vd_lh, FREAD, kcred);
631 return (SET_ERROR(EIO));
634 size = P2ALIGN_TYPED(s, sizeof (vdev_label_t), uint64_t);
635 label = kmem_alloc(sizeof (vdev_label_t), KM_SLEEP);
638 for (l = 0; l < VDEV_LABELS; l++) {
639 uint64_t offset, state, txg = 0;
641 /* read vdev label */
642 offset = vdev_label_offset(size, l, 0);
643 if (vdev_disk_physio(vd_lh, (caddr_t)label,
644 VDEV_SKIP_SIZE + VDEV_PHYS_SIZE, offset, B_READ) != 0)
647 if (nvlist_unpack(label->vl_vdev_phys.vp_nvlist,
648 sizeof (label->vl_vdev_phys.vp_nvlist), config, 0) != 0) {
653 if (nvlist_lookup_uint64(*config, ZPOOL_CONFIG_POOL_STATE,
654 &state) != 0 || state >= POOL_STATE_DESTROYED) {
655 nvlist_free(*config);
660 if (nvlist_lookup_uint64(*config, ZPOOL_CONFIG_POOL_TXG,
661 &txg) != 0 || txg == 0) {
662 nvlist_free(*config);
670 kmem_free(label, sizeof (vdev_label_t));
671 (void) ldi_close(vd_lh, FREAD, kcred);
673 error = SET_ERROR(EIDRM);