]> CyberLeo.Net >> Repos - FreeBSD/stable/9.git/blob - sys/cddl/contrib/opensolaris/uts/common/fs/zfs/vdev_geom.c
Added ZFS TRIM support which is enabled by default. To disable
[FreeBSD/stable/9.git] / sys / cddl / contrib / opensolaris / uts / common / fs / zfs / vdev_geom.c
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2006 Pawel Jakub Dawidek <pjd@FreeBSD.org>
23  * All rights reserved.
24  *
25  * Portions Copyright (c) 2012 Martin Matuska <mm@FreeBSD.org>
26  */
27
28 #include <sys/zfs_context.h>
29 #include <sys/param.h>
30 #include <sys/kernel.h>
31 #include <sys/bio.h>
32 #include <sys/disk.h>
33 #include <sys/spa.h>
34 #include <sys/spa_impl.h>
35 #include <sys/vdev_impl.h>
36 #include <sys/fs/zfs.h>
37 #include <sys/zio.h>
38 #include <geom/geom.h>
39 #include <geom/geom_int.h>
40
41 /*
42  * Virtual device vector for GEOM.
43  */
44
45 struct g_class zfs_vdev_class = {
46         .name = "ZFS::VDEV",
47         .version = G_VERSION,
48 };
49
50 DECLARE_GEOM_CLASS(zfs_vdev_class, zfs_vdev);
51
52 SYSCTL_DECL(_vfs_zfs_vdev);
53 /* Don't send BIO_FLUSH. */
54 static int vdev_geom_bio_flush_disable = 0;
55 TUNABLE_INT("vfs.zfs.vdev.bio_flush_disable", &vdev_geom_bio_flush_disable);
56 SYSCTL_INT(_vfs_zfs_vdev, OID_AUTO, bio_flush_disable, CTLFLAG_RW,
57     &vdev_geom_bio_flush_disable, 0, "Disable BIO_FLUSH");
58 /* Don't send BIO_DELETE. */
59 static int vdev_geom_bio_delete_disable = 0;
60 TUNABLE_INT("vfs.zfs.vdev.bio_delete_disable", &vdev_geom_bio_delete_disable);
61 SYSCTL_INT(_vfs_zfs_vdev, OID_AUTO, bio_delete_disable, CTLFLAG_RW,
62     &vdev_geom_bio_delete_disable, 0, "Disable BIO_DELETE");
63
64 static void
65 vdev_geom_orphan(struct g_consumer *cp)
66 {
67         vdev_t *vd;
68
69         g_topology_assert();
70
71         vd = cp->private;
72
73         /*
74          * Orphan callbacks occur from the GEOM event thread.
75          * Concurrent with this call, new I/O requests may be
76          * working their way through GEOM about to find out
77          * (only once executed by the g_down thread) that we've
78          * been orphaned from our disk provider.  These I/Os
79          * must be retired before we can detach our consumer.
80          * This is most easily achieved by acquiring the
81          * SPA ZIO configuration lock as a writer, but doing
82          * so with the GEOM topology lock held would cause
83          * a lock order reversal.  Instead, rely on the SPA's
84          * async removal support to invoke a close on this
85          * vdev once it is safe to do so.
86          */
87         zfs_post_remove(vd->vdev_spa, vd);
88         vd->vdev_remove_wanted = B_TRUE;
89         spa_async_request(vd->vdev_spa, SPA_ASYNC_REMOVE);
90 }
91
92 static struct g_consumer *
93 vdev_geom_attach(struct g_provider *pp)
94 {
95         struct g_geom *gp;
96         struct g_consumer *cp;
97
98         g_topology_assert();
99
100         ZFS_LOG(1, "Attaching to %s.", pp->name);
101         /* Do we have geom already? No? Create one. */
102         LIST_FOREACH(gp, &zfs_vdev_class.geom, geom) {
103                 if (gp->flags & G_GEOM_WITHER)
104                         continue;
105                 if (strcmp(gp->name, "zfs::vdev") != 0)
106                         continue;
107                 break;
108         }
109         if (gp == NULL) {
110                 gp = g_new_geomf(&zfs_vdev_class, "zfs::vdev");
111                 gp->orphan = vdev_geom_orphan;
112                 cp = g_new_consumer(gp);
113                 if (g_attach(cp, pp) != 0) {
114                         g_wither_geom(gp, ENXIO);
115                         return (NULL);
116                 }
117                 if (g_access(cp, 1, 0, 1) != 0) {
118                         g_wither_geom(gp, ENXIO);
119                         return (NULL);
120                 }
121                 ZFS_LOG(1, "Created geom and consumer for %s.", pp->name);
122         } else {
123                 /* Check if we are already connected to this provider. */
124                 LIST_FOREACH(cp, &gp->consumer, consumer) {
125                         if (cp->provider == pp) {
126                                 ZFS_LOG(1, "Found consumer for %s.", pp->name);
127                                 break;
128                         }
129                 }
130                 if (cp == NULL) {
131                         cp = g_new_consumer(gp);
132                         if (g_attach(cp, pp) != 0) {
133                                 g_destroy_consumer(cp);
134                                 return (NULL);
135                         }
136                         if (g_access(cp, 1, 0, 1) != 0) {
137                                 g_detach(cp);
138                                 g_destroy_consumer(cp);
139                                 return (NULL);
140                         }
141                         ZFS_LOG(1, "Created consumer for %s.", pp->name);
142                 } else {
143                         if (g_access(cp, 1, 0, 1) != 0)
144                                 return (NULL);
145                         ZFS_LOG(1, "Used existing consumer for %s.", pp->name);
146                 }
147         }
148         return (cp);
149 }
150
151 static void
152 vdev_geom_detach(void *arg, int flag __unused)
153 {
154         struct g_geom *gp;
155         struct g_consumer *cp;
156
157         g_topology_assert();
158         cp = arg;
159         gp = cp->geom;
160
161         ZFS_LOG(1, "Closing access to %s.", cp->provider->name);
162         g_access(cp, -1, 0, -1);
163         /* Destroy consumer on last close. */
164         if (cp->acr == 0 && cp->ace == 0) {
165                 ZFS_LOG(1, "Destroyed consumer to %s.", cp->provider->name);
166                 if (cp->acw > 0)
167                         g_access(cp, 0, -cp->acw, 0);
168                 g_detach(cp);
169                 g_destroy_consumer(cp);
170         }
171         /* Destroy geom if there are no consumers left. */
172         if (LIST_EMPTY(&gp->consumer)) {
173                 ZFS_LOG(1, "Destroyed geom %s.", gp->name);
174                 g_wither_geom(gp, ENXIO);
175         }
176 }
177
178 static uint64_t
179 nvlist_get_guid(nvlist_t *list)
180 {
181         uint64_t value;
182
183         value = 0;
184         nvlist_lookup_uint64(list, ZPOOL_CONFIG_GUID, &value);
185         return (value);
186 }
187
188 static int
189 vdev_geom_io(struct g_consumer *cp, int cmd, void *data, off_t offset, off_t size)
190 {
191         struct bio *bp;
192         u_char *p;
193         off_t off, maxio;
194         int error;
195
196         ASSERT((offset % cp->provider->sectorsize) == 0);
197         ASSERT((size % cp->provider->sectorsize) == 0);
198
199         bp = g_alloc_bio();
200         off = offset;
201         offset += size;
202         p = data;
203         maxio = MAXPHYS - (MAXPHYS % cp->provider->sectorsize);
204         error = 0;
205
206         for (; off < offset; off += maxio, p += maxio, size -= maxio) {
207                 bzero(bp, sizeof(*bp));
208                 bp->bio_cmd = cmd;
209                 bp->bio_done = NULL;
210                 bp->bio_offset = off;
211                 bp->bio_length = MIN(size, maxio);
212                 bp->bio_data = p;
213                 g_io_request(bp, cp);
214                 error = biowait(bp, "vdev_geom_io");
215                 if (error != 0)
216                         break;
217         }
218
219         g_destroy_bio(bp);
220         return (error);
221 }
222
223 static void
224 vdev_geom_taste_orphan(struct g_consumer *cp)
225 {
226
227         KASSERT(1 == 0, ("%s called while tasting %s.", __func__,
228             cp->provider->name));
229 }
230
231 static int
232 vdev_geom_read_config(struct g_consumer *cp, nvlist_t **config)
233 {
234         struct g_provider *pp;
235         vdev_label_t *label;
236         char *p, *buf;
237         size_t buflen;
238         uint64_t psize;
239         off_t offset, size;
240         uint64_t guid, state, txg;
241         int error, l, len;
242
243         g_topology_assert_not();
244
245         pp = cp->provider;
246         ZFS_LOG(1, "Reading config from %s...", pp->name);
247
248         psize = pp->mediasize;
249         psize = P2ALIGN(psize, (uint64_t)sizeof(vdev_label_t));
250
251         size = sizeof(*label) + pp->sectorsize -
252             ((sizeof(*label) - 1) % pp->sectorsize) - 1;
253
254         guid = 0;
255         label = kmem_alloc(size, KM_SLEEP);
256         buflen = sizeof(label->vl_vdev_phys.vp_nvlist);
257
258         *config = NULL;
259         for (l = 0; l < VDEV_LABELS; l++) {
260
261                 offset = vdev_label_offset(psize, l, 0);
262                 if ((offset % pp->sectorsize) != 0)
263                         continue;
264
265                 if (vdev_geom_io(cp, BIO_READ, label, offset, size) != 0)
266                         continue;
267                 buf = label->vl_vdev_phys.vp_nvlist;
268
269                 if (nvlist_unpack(buf, buflen, config, 0) != 0)
270                         continue;
271
272                 if (nvlist_lookup_uint64(*config, ZPOOL_CONFIG_POOL_STATE,
273                     &state) != 0 || state == POOL_STATE_DESTROYED ||
274                     state > POOL_STATE_L2CACHE) {
275                         nvlist_free(*config);
276                         *config = NULL;
277                         continue;
278                 }
279
280                 if (state != POOL_STATE_SPARE && state != POOL_STATE_L2CACHE &&
281                     (nvlist_lookup_uint64(*config, ZPOOL_CONFIG_POOL_TXG,
282                     &txg) != 0 || txg == 0)) {
283                         nvlist_free(*config);
284                         *config = NULL;
285                         continue;
286                 }
287
288                 break;
289         }
290
291         kmem_free(label, size);
292         return (*config == NULL ? ENOENT : 0);
293 }
294
295 static void
296 resize_configs(nvlist_t ***configs, uint64_t *count, uint64_t id)
297 {
298         nvlist_t **new_configs;
299         uint64_t i;
300
301         if (id < *count)
302                 return;
303         new_configs = kmem_zalloc((id + 1) * sizeof(nvlist_t *),
304             KM_SLEEP);
305         for (i = 0; i < *count; i++)
306                 new_configs[i] = (*configs)[i];
307         if (*configs != NULL)
308                 kmem_free(*configs, *count * sizeof(void *));
309         *configs = new_configs;
310         *count = id + 1;
311 }
312
313 static void
314 process_vdev_config(nvlist_t ***configs, uint64_t *count, nvlist_t *cfg,
315     const char *name, uint64_t* known_pool_guid)
316 {
317         nvlist_t *vdev_tree;
318         uint64_t pool_guid;
319         uint64_t vdev_guid, known_guid;
320         uint64_t id, txg, known_txg;
321         char *pname;
322         int i;
323
324         if (nvlist_lookup_string(cfg, ZPOOL_CONFIG_POOL_NAME, &pname) != 0 ||
325             strcmp(pname, name) != 0)
326                 goto ignore;
327
328         if (nvlist_lookup_uint64(cfg, ZPOOL_CONFIG_POOL_GUID, &pool_guid) != 0)
329                 goto ignore;
330
331         if (nvlist_lookup_uint64(cfg, ZPOOL_CONFIG_TOP_GUID, &vdev_guid) != 0)
332                 goto ignore;
333
334         if (nvlist_lookup_nvlist(cfg, ZPOOL_CONFIG_VDEV_TREE, &vdev_tree) != 0)
335                 goto ignore;
336
337         if (nvlist_lookup_uint64(vdev_tree, ZPOOL_CONFIG_ID, &id) != 0)
338                 goto ignore;
339
340         VERIFY(nvlist_lookup_uint64(cfg, ZPOOL_CONFIG_POOL_TXG, &txg) == 0);
341
342         if (*known_pool_guid != 0) {
343                 if (pool_guid != *known_pool_guid)
344                         goto ignore;
345         } else
346                 *known_pool_guid = pool_guid;
347
348         resize_configs(configs, count, id);
349
350         if ((*configs)[id] != NULL) {
351                 VERIFY(nvlist_lookup_uint64((*configs)[id],
352                     ZPOOL_CONFIG_POOL_TXG, &known_txg) == 0);
353                 if (txg <= known_txg)
354                         goto ignore;
355                 nvlist_free((*configs)[id]);
356         }
357
358         (*configs)[id] = cfg;
359         return;
360
361 ignore:
362         nvlist_free(cfg);
363 }
364
365 static int
366 vdev_geom_attach_taster(struct g_consumer *cp, struct g_provider *pp)
367 {
368         int error;
369
370         if (pp->flags & G_PF_WITHER)
371                 return (EINVAL);
372         if (pp->sectorsize > VDEV_PAD_SIZE || !ISP2(pp->sectorsize))
373                 return (EINVAL);
374         g_attach(cp, pp);
375         error = g_access(cp, 1, 0, 0);
376         if (error != 0)
377                 g_detach(cp);
378         return (error);
379 }
380
381 static void
382 vdev_geom_detach_taster(struct g_consumer *cp)
383 {
384         g_access(cp, -1, 0, 0);
385         g_detach(cp);
386 }
387
388 int
389 vdev_geom_read_pool_label(const char *name,
390     nvlist_t ***configs, uint64_t *count)
391 {
392         struct g_class *mp;
393         struct g_geom *gp, *zgp;
394         struct g_provider *pp;
395         struct g_consumer *zcp;
396         nvlist_t *vdev_cfg;
397         uint64_t pool_guid;
398         int error;
399
400         DROP_GIANT();
401         g_topology_lock();
402
403         zgp = g_new_geomf(&zfs_vdev_class, "zfs::vdev::taste");
404         /* This orphan function should be never called. */
405         zgp->orphan = vdev_geom_taste_orphan;
406         zcp = g_new_consumer(zgp);
407
408         *configs = NULL;
409         *count = 0;
410         pool_guid = 0;
411         LIST_FOREACH(mp, &g_classes, class) {
412                 if (mp == &zfs_vdev_class)
413                         continue;
414                 LIST_FOREACH(gp, &mp->geom, geom) {
415                         if (gp->flags & G_GEOM_WITHER)
416                                 continue;
417                         LIST_FOREACH(pp, &gp->provider, provider) {
418                                 if (pp->flags & G_PF_WITHER)
419                                         continue;
420                                 if (vdev_geom_attach_taster(zcp, pp) != 0)
421                                         continue;
422                                 g_topology_unlock();
423                                 error = vdev_geom_read_config(zcp, &vdev_cfg);
424                                 g_topology_lock();
425                                 vdev_geom_detach_taster(zcp);
426                                 if (error)
427                                         continue;
428                                 ZFS_LOG(1, "successfully read vdev config");
429
430                                 process_vdev_config(configs, count,
431                                     vdev_cfg, name, &pool_guid);
432                         }
433                 }
434         }
435
436         g_destroy_consumer(zcp);
437         g_destroy_geom(zgp);
438         g_topology_unlock();
439         PICKUP_GIANT();
440
441         return (*count > 0 ? 0 : ENOENT);
442 }
443
444 static uint64_t
445 vdev_geom_read_guid(struct g_consumer *cp)
446 {
447         nvlist_t *config;
448         uint64_t guid;
449
450         g_topology_assert_not();
451
452         guid = 0;
453         if (vdev_geom_read_config(cp, &config) == 0) {
454                 guid = nvlist_get_guid(config);
455                 nvlist_free(config);
456         }
457         return (guid);
458 }
459
460 static struct g_consumer *
461 vdev_geom_attach_by_guid(uint64_t guid)
462 {
463         struct g_class *mp;
464         struct g_geom *gp, *zgp;
465         struct g_provider *pp;
466         struct g_consumer *cp, *zcp;
467         uint64_t pguid;
468
469         g_topology_assert();
470
471         zgp = g_new_geomf(&zfs_vdev_class, "zfs::vdev::taste");
472         /* This orphan function should be never called. */
473         zgp->orphan = vdev_geom_taste_orphan;
474         zcp = g_new_consumer(zgp);
475
476         cp = NULL;
477         LIST_FOREACH(mp, &g_classes, class) {
478                 if (mp == &zfs_vdev_class)
479                         continue;
480                 LIST_FOREACH(gp, &mp->geom, geom) {
481                         if (gp->flags & G_GEOM_WITHER)
482                                 continue;
483                         LIST_FOREACH(pp, &gp->provider, provider) {
484                                 if (vdev_geom_attach_taster(zcp, pp) != 0)
485                                         continue;
486                                 g_topology_unlock();
487                                 pguid = vdev_geom_read_guid(zcp);
488                                 g_topology_lock();
489                                 vdev_geom_detach_taster(zcp);
490                                 if (pguid != guid)
491                                         continue;
492                                 cp = vdev_geom_attach(pp);
493                                 if (cp == NULL) {
494                                         printf("ZFS WARNING: Unable to attach to %s.\n",
495                                             pp->name);
496                                         continue;
497                                 }
498                                 break;
499                         }
500                         if (cp != NULL)
501                                 break;
502                 }
503                 if (cp != NULL)
504                         break;
505         }
506 end:
507         g_destroy_consumer(zcp);
508         g_destroy_geom(zgp);
509         return (cp);
510 }
511
512 static struct g_consumer *
513 vdev_geom_open_by_guid(vdev_t *vd)
514 {
515         struct g_consumer *cp;
516         char *buf;
517         size_t len;
518
519         g_topology_assert();
520
521         ZFS_LOG(1, "Searching by guid [%ju].", (uintmax_t)vd->vdev_guid);
522         cp = vdev_geom_attach_by_guid(vd->vdev_guid);
523         if (cp != NULL) {
524                 len = strlen(cp->provider->name) + strlen("/dev/") + 1;
525                 buf = kmem_alloc(len, KM_SLEEP);
526
527                 snprintf(buf, len, "/dev/%s", cp->provider->name);
528                 spa_strfree(vd->vdev_path);
529                 vd->vdev_path = buf;
530
531                 ZFS_LOG(1, "Attach by guid [%ju] succeeded, provider %s.",
532                     (uintmax_t)vd->vdev_guid, vd->vdev_path);
533         } else {
534                 ZFS_LOG(1, "Search by guid [%ju] failed.",
535                     (uintmax_t)vd->vdev_guid);
536         }
537
538         return (cp);
539 }
540
541 static struct g_consumer *
542 vdev_geom_open_by_path(vdev_t *vd, int check_guid)
543 {
544         struct g_provider *pp;
545         struct g_consumer *cp;
546         uint64_t guid;
547
548         g_topology_assert();
549
550         cp = NULL;
551         pp = g_provider_by_name(vd->vdev_path + sizeof("/dev/") - 1);
552         if (pp != NULL) {
553                 ZFS_LOG(1, "Found provider by name %s.", vd->vdev_path);
554                 cp = vdev_geom_attach(pp);
555                 if (cp != NULL && check_guid && ISP2(pp->sectorsize) &&
556                     pp->sectorsize <= VDEV_PAD_SIZE) {
557                         g_topology_unlock();
558                         guid = vdev_geom_read_guid(cp);
559                         g_topology_lock();
560                         if (guid != vd->vdev_guid) {
561                                 vdev_geom_detach(cp, 0);
562                                 cp = NULL;
563                                 ZFS_LOG(1, "guid mismatch for provider %s: "
564                                     "%ju != %ju.", vd->vdev_path,
565                                     (uintmax_t)vd->vdev_guid, (uintmax_t)guid);
566                         } else {
567                                 ZFS_LOG(1, "guid match for provider %s.",
568                                     vd->vdev_path);
569                         }
570                 }
571         }
572
573         return (cp);
574 }
575
576 static int
577 vdev_geom_open(vdev_t *vd, uint64_t *psize, uint64_t *max_psize,
578     uint64_t *ashift)
579 {
580         struct g_provider *pp;
581         struct g_consumer *cp;
582         size_t bufsize;
583         int error;
584
585         /*
586          * We must have a pathname, and it must be absolute.
587          */
588         if (vd->vdev_path == NULL || vd->vdev_path[0] != '/') {
589                 vd->vdev_stat.vs_aux = VDEV_AUX_BAD_LABEL;
590                 return (EINVAL);
591         }
592
593         vd->vdev_tsd = NULL;
594
595         DROP_GIANT();
596         g_topology_lock();
597         error = 0;
598
599         /*
600          * If we're creating or splitting a pool, just find the GEOM provider
601          * by its name and ignore GUID mismatches.
602          */
603         if (vd->vdev_spa->spa_load_state == SPA_LOAD_NONE ||
604             vd->vdev_spa->spa_splitting_newspa == B_TRUE)
605                 cp = vdev_geom_open_by_path(vd, 0);
606         else {
607                 cp = vdev_geom_open_by_path(vd, 1);
608                 if (cp == NULL) {
609                         /*
610                          * The device at vd->vdev_path doesn't have the
611                          * expected guid. The disks might have merely
612                          * moved around so try all other GEOM providers
613                          * to find one with the right guid.
614                          */
615                         cp = vdev_geom_open_by_guid(vd);
616                 }
617         }
618
619         if (cp == NULL) {
620                 ZFS_LOG(1, "Provider %s not found.", vd->vdev_path);
621                 error = ENOENT;
622         } else if (cp->provider->sectorsize > VDEV_PAD_SIZE ||
623             !ISP2(cp->provider->sectorsize)) {
624                 ZFS_LOG(1, "Provider %s has unsupported sectorsize.",
625                     vd->vdev_path);
626                 vdev_geom_detach(cp, 0);
627                 error = EINVAL;
628                 cp = NULL;
629         } else if (cp->acw == 0 && (spa_mode(vd->vdev_spa) & FWRITE) != 0) {
630                 int i;
631
632                 for (i = 0; i < 5; i++) {
633                         error = g_access(cp, 0, 1, 0);
634                         if (error == 0)
635                                 break;
636                         g_topology_unlock();
637                         tsleep(vd, 0, "vdev", hz / 2);
638                         g_topology_lock();
639                 }
640                 if (error != 0) {
641                         printf("ZFS WARNING: Unable to open %s for writing (error=%d).\n",
642                             vd->vdev_path, error);
643                         vdev_geom_detach(cp, 0);
644                         cp = NULL;
645                 }
646         }
647         g_topology_unlock();
648         PICKUP_GIANT();
649         if (cp == NULL) {
650                 vd->vdev_stat.vs_aux = VDEV_AUX_OPEN_FAILED;
651                 return (error);
652         }
653
654         cp->private = vd;
655         vd->vdev_tsd = cp;
656         pp = cp->provider;
657
658         /*
659          * Determine the actual size of the device.
660          */
661         *max_psize = *psize = pp->mediasize;
662
663         /*
664          * Determine the device's minimum transfer size.
665          */
666         *ashift = highbit(MAX(pp->sectorsize, SPA_MINBLOCKSIZE)) - 1;
667
668         /*
669          * Clear the nowritecache settings, so that on a vdev_reopen()
670          * we will try again.
671          */
672         vd->vdev_nowritecache = B_FALSE;
673
674         if (vd->vdev_physpath != NULL)
675                 spa_strfree(vd->vdev_physpath);
676         bufsize = sizeof("/dev/") + strlen(pp->name);
677         vd->vdev_physpath = kmem_alloc(bufsize, KM_SLEEP);
678         snprintf(vd->vdev_physpath, bufsize, "/dev/%s", pp->name);
679
680         return (0);
681 }
682
683 static void
684 vdev_geom_close(vdev_t *vd)
685 {
686         struct g_consumer *cp;
687
688         cp = vd->vdev_tsd;
689         if (cp == NULL)
690                 return;
691         vd->vdev_tsd = NULL;
692         vd->vdev_delayed_close = B_FALSE;
693         g_post_event(vdev_geom_detach, cp, M_WAITOK, NULL);
694 }
695
696 static void
697 vdev_geom_io_intr(struct bio *bp)
698 {
699         vdev_t *vd;
700         zio_t *zio;
701
702         zio = bp->bio_caller1;
703         vd = zio->io_vd;
704         zio->io_error = bp->bio_error;
705         if (zio->io_error == 0 && bp->bio_resid != 0)
706                 zio->io_error = EIO;
707         if (bp->bio_cmd == BIO_FLUSH && bp->bio_error == ENOTSUP) {
708                 /*
709                  * If we get ENOTSUP, we know that no future
710                  * attempts will ever succeed.  In this case we
711                  * set a persistent bit so that we don't bother
712                  * with the ioctl in the future.
713                  */
714                 vd->vdev_nowritecache = B_TRUE;
715         }
716         if (bp->bio_cmd == BIO_DELETE && bp->bio_error == ENOTSUP) {
717                 /*
718                  * If we get ENOTSUP, we know that no future
719                  * attempts will ever succeed.  In this case we
720                  * set a persistent bit so that we don't bother
721                  * with the ioctl in the future.
722                  */
723                 vd->vdev_notrim = B_TRUE;
724         }
725         if (zio->io_error == EIO && !vd->vdev_remove_wanted) {
726                 /*
727                  * If provider's error is set we assume it is being
728                  * removed.
729                  */
730                 if (bp->bio_to->error != 0) {
731                         /*
732                          * We post the resource as soon as possible, instead of
733                          * when the async removal actually happens, because the
734                          * DE is using this information to discard previous I/O
735                          * errors.
736                          */
737                         /* XXX: zfs_post_remove() can sleep. */
738                         zfs_post_remove(zio->io_spa, vd);
739                         vd->vdev_remove_wanted = B_TRUE;
740                         spa_async_request(zio->io_spa, SPA_ASYNC_REMOVE);
741                 } else if (!vd->vdev_delayed_close) {
742                         vd->vdev_delayed_close = B_TRUE;
743                 }
744         }
745         g_destroy_bio(bp);
746         zio_interrupt(zio);
747 }
748
749 static int
750 vdev_geom_io_start(zio_t *zio)
751 {
752         vdev_t *vd;
753         struct g_consumer *cp;
754         struct bio *bp;
755         int error;
756
757         vd = zio->io_vd;
758
759         if (zio->io_type == ZIO_TYPE_IOCTL) {
760                 /* XXPOLICY */
761                 if (!vdev_readable(vd)) {
762                         zio->io_error = ENXIO;
763                         return (ZIO_PIPELINE_CONTINUE);
764                 }
765
766                 switch (zio->io_cmd) {
767                 case DKIOCFLUSHWRITECACHE:
768                         if (zfs_nocacheflush || vdev_geom_bio_flush_disable)
769                                 break;
770                         if (vd->vdev_nowritecache) {
771                                 zio->io_error = ENOTSUP;
772                                 break;
773                         }
774                         goto sendreq;
775                 case DKIOCTRIM:
776                         if (vdev_geom_bio_delete_disable)
777                                 break;
778                         if (vd->vdev_notrim) {
779                                 zio->io_error = ENOTSUP;
780                                 break;
781                         }
782                         goto sendreq;
783                 default:
784                         zio->io_error = ENOTSUP;
785                 }
786
787                 return (ZIO_PIPELINE_CONTINUE);
788         }
789 sendreq:
790         cp = vd->vdev_tsd;
791         if (cp == NULL) {
792                 zio->io_error = ENXIO;
793                 return (ZIO_PIPELINE_CONTINUE);
794         }
795         bp = g_alloc_bio();
796         bp->bio_caller1 = zio;
797         switch (zio->io_type) {
798         case ZIO_TYPE_READ:
799         case ZIO_TYPE_WRITE:
800                 bp->bio_cmd = zio->io_type == ZIO_TYPE_READ ? BIO_READ : BIO_WRITE;
801                 bp->bio_data = zio->io_data;
802                 bp->bio_offset = zio->io_offset;
803                 bp->bio_length = zio->io_size;
804                 break;
805         case ZIO_TYPE_IOCTL:
806                 switch (zio->io_cmd) {
807                 case DKIOCFLUSHWRITECACHE:
808                         bp->bio_cmd = BIO_FLUSH;
809                         bp->bio_flags |= BIO_ORDERED;
810                         bp->bio_data = NULL;
811                         bp->bio_offset = cp->provider->mediasize;
812                         bp->bio_length = 0;
813                         break;
814                 case DKIOCTRIM:
815                         bp->bio_cmd = BIO_DELETE;
816                         bp->bio_data = NULL;
817                         bp->bio_offset = zio->io_offset;
818                         bp->bio_length = zio->io_size;
819                         break;
820                 }
821                 break;
822         }
823         bp->bio_done = vdev_geom_io_intr;
824
825         g_io_request(bp, cp);
826
827         return (ZIO_PIPELINE_STOP);
828 }
829
830 static void
831 vdev_geom_io_done(zio_t *zio)
832 {
833 }
834
835 static void
836 vdev_geom_hold(vdev_t *vd)
837 {
838 }
839
840 static void
841 vdev_geom_rele(vdev_t *vd)
842 {
843 }
844
845 vdev_ops_t vdev_geom_ops = {
846         vdev_geom_open,
847         vdev_geom_close,
848         vdev_default_asize,
849         vdev_geom_io_start,
850         vdev_geom_io_done,
851         NULL,
852         vdev_geom_hold,
853         vdev_geom_rele,
854         VDEV_TYPE_DISK,         /* name of this vdev type */
855         B_TRUE                  /* leaf vdev */
856 };