]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/cddl/contrib/opensolaris/uts/common/fs/zfs/vdev_geom.c
MFV: r344395
[FreeBSD/FreeBSD.git] / sys / cddl / contrib / opensolaris / uts / common / fs / zfs / vdev_geom.c
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2006 Pawel Jakub Dawidek <pjd@FreeBSD.org>
23  * All rights reserved.
24  *
25  * Portions Copyright (c) 2012 Martin Matuska <mm@FreeBSD.org>
26  */
27
28 #include <sys/zfs_context.h>
29 #include <sys/param.h>
30 #include <sys/kernel.h>
31 #include <sys/bio.h>
32 #include <sys/disk.h>
33 #include <sys/spa.h>
34 #include <sys/spa_impl.h>
35 #include <sys/vdev_impl.h>
36 #include <sys/fs/zfs.h>
37 #include <sys/zio.h>
38 #include <geom/geom.h>
39 #include <geom/geom_int.h>
40
41 /*
42  * Virtual device vector for GEOM.
43  */
44
45 static g_attrchanged_t vdev_geom_attrchanged;
46 struct g_class zfs_vdev_class = {
47         .name = "ZFS::VDEV",
48         .version = G_VERSION,
49         .attrchanged = vdev_geom_attrchanged,
50 };
51
52 struct consumer_vdev_elem {
53         SLIST_ENTRY(consumer_vdev_elem) elems;
54         vdev_t                          *vd;
55 };
56
57 SLIST_HEAD(consumer_priv_t, consumer_vdev_elem);
58 _Static_assert(sizeof(((struct g_consumer*)NULL)->private)
59     == sizeof(struct consumer_priv_t*),
60     "consumer_priv_t* can't be stored in g_consumer.private");
61
62 DECLARE_GEOM_CLASS(zfs_vdev_class, zfs_vdev);
63
64 SYSCTL_DECL(_vfs_zfs_vdev);
65 /* Don't send BIO_FLUSH. */
66 static int vdev_geom_bio_flush_disable;
67 SYSCTL_INT(_vfs_zfs_vdev, OID_AUTO, bio_flush_disable, CTLFLAG_RWTUN,
68     &vdev_geom_bio_flush_disable, 0, "Disable BIO_FLUSH");
69 /* Don't send BIO_DELETE. */
70 static int vdev_geom_bio_delete_disable;
71 SYSCTL_INT(_vfs_zfs_vdev, OID_AUTO, bio_delete_disable, CTLFLAG_RWTUN,
72     &vdev_geom_bio_delete_disable, 0, "Disable BIO_DELETE");
73
74 /* Declare local functions */
75 static void vdev_geom_detach(struct g_consumer *cp, boolean_t open_for_read);
76
77 /*
78  * Thread local storage used to indicate when a thread is probing geoms
79  * for their guids.  If NULL, this thread is not tasting geoms.  If non NULL,
80  * it is looking for a replacement for the vdev_t* that is its value.
81  */
82 uint_t zfs_geom_probe_vdev_key;
83
84 static void
85 vdev_geom_set_rotation_rate(vdev_t *vd, struct g_consumer *cp)
86
87         int error;
88         uint16_t rate;
89
90         error = g_getattr("GEOM::rotation_rate", cp, &rate);
91         if (error == 0)
92                 vd->vdev_rotation_rate = rate;
93         else
94                 vd->vdev_rotation_rate = VDEV_RATE_UNKNOWN;
95 }
96
97 static void
98 vdev_geom_set_physpath(vdev_t *vd, struct g_consumer *cp,
99                        boolean_t do_null_update)
100 {
101         boolean_t needs_update = B_FALSE;
102         char *physpath;
103         int error, physpath_len;
104
105         physpath_len = MAXPATHLEN;
106         physpath = g_malloc(physpath_len, M_WAITOK|M_ZERO);
107         error = g_io_getattr("GEOM::physpath", cp, &physpath_len, physpath);
108         if (error == 0) {
109                 char *old_physpath;
110
111                 /* g_topology lock ensures that vdev has not been closed */
112                 g_topology_assert();
113                 old_physpath = vd->vdev_physpath;
114                 vd->vdev_physpath = spa_strdup(physpath);
115
116                 if (old_physpath != NULL) {
117                         needs_update = (strcmp(old_physpath,
118                                                 vd->vdev_physpath) != 0);
119                         spa_strfree(old_physpath);
120                 } else
121                         needs_update = do_null_update;
122         }
123         g_free(physpath);
124
125         /*
126          * If the physical path changed, update the config.
127          * Only request an update for previously unset physpaths if
128          * requested by the caller.
129          */
130         if (needs_update)
131                 spa_async_request(vd->vdev_spa, SPA_ASYNC_CONFIG_UPDATE);
132
133 }
134
135 static void
136 vdev_geom_attrchanged(struct g_consumer *cp, const char *attr)
137 {
138         char *old_physpath;
139         struct consumer_priv_t *priv;
140         struct consumer_vdev_elem *elem;
141         int error;
142
143         priv = (struct consumer_priv_t*)&cp->private;
144         if (SLIST_EMPTY(priv))
145                 return;
146
147         SLIST_FOREACH(elem, priv, elems) {
148                 vdev_t *vd = elem->vd;
149                 if (strcmp(attr, "GEOM::rotation_rate") == 0) {
150                         vdev_geom_set_rotation_rate(vd, cp);
151                         return;
152                 }
153                 if (strcmp(attr, "GEOM::physpath") == 0) {
154                         vdev_geom_set_physpath(vd, cp, /*null_update*/B_TRUE);
155                         return;
156                 }
157         }
158 }
159
160 static void
161 vdev_geom_orphan(struct g_consumer *cp)
162 {
163         struct consumer_priv_t *priv;
164         struct consumer_vdev_elem *elem;
165
166         g_topology_assert();
167
168         priv = (struct consumer_priv_t*)&cp->private;
169         if (SLIST_EMPTY(priv))
170                 /* Vdev close in progress.  Ignore the event. */
171                 return;
172
173         /*
174          * Orphan callbacks occur from the GEOM event thread.
175          * Concurrent with this call, new I/O requests may be
176          * working their way through GEOM about to find out
177          * (only once executed by the g_down thread) that we've
178          * been orphaned from our disk provider.  These I/Os
179          * must be retired before we can detach our consumer.
180          * This is most easily achieved by acquiring the
181          * SPA ZIO configuration lock as a writer, but doing
182          * so with the GEOM topology lock held would cause
183          * a lock order reversal.  Instead, rely on the SPA's
184          * async removal support to invoke a close on this
185          * vdev once it is safe to do so.
186          */
187         SLIST_FOREACH(elem, priv, elems) {
188                 vdev_t *vd = elem->vd;
189
190                 vd->vdev_remove_wanted = B_TRUE;
191                 spa_async_request(vd->vdev_spa, SPA_ASYNC_REMOVE);
192         }
193 }
194
195 static struct g_consumer *
196 vdev_geom_attach(struct g_provider *pp, vdev_t *vd, boolean_t sanity)
197 {
198         struct g_geom *gp;
199         struct g_consumer *cp;
200         int error;
201
202         g_topology_assert();
203
204         ZFS_LOG(1, "Attaching to %s.", pp->name);
205
206         if (sanity) {
207                 if (pp->sectorsize > VDEV_PAD_SIZE || !ISP2(pp->sectorsize)) {
208                         ZFS_LOG(1, "Failing attach of %s. "
209                                    "Incompatible sectorsize %d\n",
210                             pp->name, pp->sectorsize);
211                         return (NULL);
212                 } else if (pp->mediasize < SPA_MINDEVSIZE) {
213                         ZFS_LOG(1, "Failing attach of %s. "
214                                    "Incompatible mediasize %ju\n",
215                             pp->name, pp->mediasize);
216                         return (NULL);
217                 }
218         }
219
220         /* Do we have geom already? No? Create one. */
221         LIST_FOREACH(gp, &zfs_vdev_class.geom, geom) {
222                 if (gp->flags & G_GEOM_WITHER)
223                         continue;
224                 if (strcmp(gp->name, "zfs::vdev") != 0)
225                         continue;
226                 break;
227         }
228         if (gp == NULL) {
229                 gp = g_new_geomf(&zfs_vdev_class, "zfs::vdev");
230                 gp->orphan = vdev_geom_orphan;
231                 gp->attrchanged = vdev_geom_attrchanged;
232                 cp = g_new_consumer(gp);
233                 error = g_attach(cp, pp);
234                 if (error != 0) {
235                         ZFS_LOG(1, "%s(%d): g_attach failed: %d\n", __func__,
236                             __LINE__, error);
237                         vdev_geom_detach(cp, B_FALSE);
238                         return (NULL);
239                 }
240                 error = g_access(cp, 1, 0, 1);
241                 if (error != 0) {
242                         ZFS_LOG(1, "%s(%d): g_access failed: %d\n", __func__,
243                                __LINE__, error);
244                         vdev_geom_detach(cp, B_FALSE);
245                         return (NULL);
246                 }
247                 ZFS_LOG(1, "Created geom and consumer for %s.", pp->name);
248         } else {
249                 /* Check if we are already connected to this provider. */
250                 LIST_FOREACH(cp, &gp->consumer, consumer) {
251                         if (cp->provider == pp) {
252                                 ZFS_LOG(1, "Found consumer for %s.", pp->name);
253                                 break;
254                         }
255                 }
256                 if (cp == NULL) {
257                         cp = g_new_consumer(gp);
258                         error = g_attach(cp, pp);
259                         if (error != 0) {
260                                 ZFS_LOG(1, "%s(%d): g_attach failed: %d\n",
261                                     __func__, __LINE__, error);
262                                 vdev_geom_detach(cp, B_FALSE);
263                                 return (NULL);
264                         }
265                         error = g_access(cp, 1, 0, 1);
266                         if (error != 0) {
267                                 ZFS_LOG(1, "%s(%d): g_access failed: %d\n",
268                                     __func__, __LINE__, error);
269                                 vdev_geom_detach(cp, B_FALSE);
270                                 return (NULL);
271                         }
272                         ZFS_LOG(1, "Created consumer for %s.", pp->name);
273                 } else {
274                         error = g_access(cp, 1, 0, 1);
275                         if (error != 0) {
276                                 ZFS_LOG(1, "%s(%d): g_access failed: %d\n",
277                                     __func__, __LINE__, error);
278                                 return (NULL);
279                         }
280                         ZFS_LOG(1, "Used existing consumer for %s.", pp->name);
281                 }
282         }
283
284         if (vd != NULL)
285                 vd->vdev_tsd = cp;
286
287         cp->flags |= G_CF_DIRECT_SEND | G_CF_DIRECT_RECEIVE;
288         return (cp);
289 }
290
291 static void
292 vdev_geom_detach(struct g_consumer *cp, boolean_t open_for_read)
293 {
294         struct g_geom *gp;
295
296         g_topology_assert();
297
298         ZFS_LOG(1, "Detaching from %s.",
299             cp->provider && cp->provider->name ? cp->provider->name : "NULL");
300
301         gp = cp->geom;
302         if (open_for_read)
303                 g_access(cp, -1, 0, -1);
304         /* Destroy consumer on last close. */
305         if (cp->acr == 0 && cp->ace == 0) {
306                 if (cp->acw > 0)
307                         g_access(cp, 0, -cp->acw, 0);
308                 if (cp->provider != NULL) {
309                         ZFS_LOG(1, "Destroying consumer for %s.",
310                             cp->provider->name ? cp->provider->name : "NULL");
311                         g_detach(cp);
312                 }
313                 g_destroy_consumer(cp);
314         }
315         /* Destroy geom if there are no consumers left. */
316         if (LIST_EMPTY(&gp->consumer)) {
317                 ZFS_LOG(1, "Destroyed geom %s.", gp->name);
318                 g_wither_geom(gp, ENXIO);
319         }
320 }
321
322 static void
323 vdev_geom_close_locked(vdev_t *vd)
324 {
325         struct g_consumer *cp;
326         struct consumer_priv_t *priv;
327         struct consumer_vdev_elem *elem, *elem_temp;
328
329         g_topology_assert();
330
331         cp = vd->vdev_tsd;
332         vd->vdev_delayed_close = B_FALSE;
333         if (cp == NULL)
334                 return;
335
336         ZFS_LOG(1, "Closing access to %s.", cp->provider->name);
337         KASSERT(cp->private != NULL, ("%s: cp->private is NULL", __func__));
338         priv = (struct consumer_priv_t*)&cp->private;
339         vd->vdev_tsd = NULL;
340         SLIST_FOREACH_SAFE(elem, priv, elems, elem_temp) {
341                 if (elem->vd == vd) {
342                         SLIST_REMOVE(priv, elem, consumer_vdev_elem, elems);
343                         g_free(elem);
344                 }
345         }
346
347         vdev_geom_detach(cp, B_TRUE);
348 }
349
350 /*
351  * Issue one or more bios to the vdev in parallel
352  * cmds, datas, offsets, errors, and sizes are arrays of length ncmds.  Each IO
353  * operation is described by parallel entries from each array.  There may be
354  * more bios actually issued than entries in the array
355  */
356 static void
357 vdev_geom_io(struct g_consumer *cp, int *cmds, void **datas, off_t *offsets,
358     off_t *sizes, int *errors, int ncmds)
359 {
360         struct bio **bios;
361         u_char *p;
362         off_t off, maxio, s, end;
363         int i, n_bios, j;
364         size_t bios_size;
365
366         maxio = MAXPHYS - (MAXPHYS % cp->provider->sectorsize);
367         n_bios = 0;
368
369         /* How many bios are required for all commands ? */
370         for (i = 0; i < ncmds; i++)
371                 n_bios += (sizes[i] + maxio - 1) / maxio;
372
373         /* Allocate memory for the bios */
374         bios_size = n_bios * sizeof(struct bio*);
375         bios = kmem_zalloc(bios_size, KM_SLEEP);
376
377         /* Prepare and issue all of the bios */
378         for (i = j = 0; i < ncmds; i++) {
379                 off = offsets[i];
380                 p = datas[i];
381                 s = sizes[i];
382                 end = off + s;
383                 ASSERT((off % cp->provider->sectorsize) == 0);
384                 ASSERT((s % cp->provider->sectorsize) == 0);
385
386                 for (; off < end; off += maxio, p += maxio, s -= maxio, j++) {
387                         bios[j] = g_alloc_bio();
388                         bios[j]->bio_cmd = cmds[i];
389                         bios[j]->bio_done = NULL;
390                         bios[j]->bio_offset = off;
391                         bios[j]->bio_length = MIN(s, maxio);
392                         bios[j]->bio_data = p;
393                         g_io_request(bios[j], cp);
394                 }
395         }
396         ASSERT(j == n_bios);
397
398         /* Wait for all of the bios to complete, and clean them up */
399         for (i = j = 0; i < ncmds; i++) {
400                 off = offsets[i];
401                 s = sizes[i];
402                 end = off + s;
403
404                 for (; off < end; off += maxio, s -= maxio, j++) {
405                         errors[i] = biowait(bios[j], "vdev_geom_io") || errors[i];
406                         g_destroy_bio(bios[j]);
407                 }
408         }
409         kmem_free(bios, bios_size);
410 }
411
412 /* 
413  * Read the vdev config from a device.  Return the number of valid labels that
414  * were found.  The vdev config will be returned in config if and only if at
415  * least one valid label was found.
416  */
417 static int
418 vdev_geom_read_config(struct g_consumer *cp, nvlist_t **configp)
419 {
420         struct g_provider *pp;
421         nvlist_t *config;
422         vdev_phys_t *vdev_lists[VDEV_LABELS];
423         char *buf;
424         size_t buflen;
425         uint64_t psize, state, txg;
426         off_t offsets[VDEV_LABELS];
427         off_t size;
428         off_t sizes[VDEV_LABELS];
429         int cmds[VDEV_LABELS];
430         int errors[VDEV_LABELS];
431         int l, nlabels;
432
433         g_topology_assert_not();
434
435         pp = cp->provider;
436         ZFS_LOG(1, "Reading config from %s...", pp->name);
437
438         psize = pp->mediasize;
439         psize = P2ALIGN(psize, (uint64_t)sizeof(vdev_label_t));
440
441         size = sizeof(*vdev_lists[0]) + pp->sectorsize -
442             ((sizeof(*vdev_lists[0]) - 1) % pp->sectorsize) - 1;
443
444         buflen = sizeof(vdev_lists[0]->vp_nvlist);
445
446         /* Create all of the IO requests */
447         for (l = 0; l < VDEV_LABELS; l++) {
448                 cmds[l] = BIO_READ;
449                 vdev_lists[l] = kmem_alloc(size, KM_SLEEP);
450                 offsets[l] = vdev_label_offset(psize, l, 0) + VDEV_SKIP_SIZE;
451                 sizes[l] = size;
452                 errors[l] = 0;
453                 ASSERT(offsets[l] % pp->sectorsize == 0);
454         }
455
456         /* Issue the IO requests */
457         vdev_geom_io(cp, cmds, (void**)vdev_lists, offsets, sizes, errors,
458             VDEV_LABELS);
459
460         /* Parse the labels */
461         config = *configp = NULL;
462         nlabels = 0;
463         for (l = 0; l < VDEV_LABELS; l++) {
464                 if (errors[l] != 0)
465                         continue;
466
467                 buf = vdev_lists[l]->vp_nvlist;
468
469                 if (nvlist_unpack(buf, buflen, &config, 0) != 0)
470                         continue;
471
472                 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE,
473                     &state) != 0 || state > POOL_STATE_L2CACHE) {
474                         nvlist_free(config);
475                         continue;
476                 }
477
478                 if (state != POOL_STATE_SPARE &&
479                     state != POOL_STATE_L2CACHE &&
480                     (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG,
481                     &txg) != 0 || txg == 0)) {
482                         nvlist_free(config);
483                         continue;
484                 }
485
486                 if (*configp != NULL)
487                         nvlist_free(*configp);
488                 *configp = config;
489
490                 nlabels++;
491         }
492
493         /* Free the label storage */
494         for (l = 0; l < VDEV_LABELS; l++)
495                 kmem_free(vdev_lists[l], size);
496
497         return (nlabels);
498 }
499
500 static void
501 resize_configs(nvlist_t ***configs, uint64_t *count, uint64_t id)
502 {
503         nvlist_t **new_configs;
504         uint64_t i;
505
506         if (id < *count)
507                 return;
508         new_configs = kmem_zalloc((id + 1) * sizeof(nvlist_t *),
509             KM_SLEEP);
510         for (i = 0; i < *count; i++)
511                 new_configs[i] = (*configs)[i];
512         if (*configs != NULL)
513                 kmem_free(*configs, *count * sizeof(void *));
514         *configs = new_configs;
515         *count = id + 1;
516 }
517
518 static void
519 process_vdev_config(nvlist_t ***configs, uint64_t *count, nvlist_t *cfg,
520     const char *name, uint64_t* known_pool_guid)
521 {
522         nvlist_t *vdev_tree;
523         uint64_t pool_guid;
524         uint64_t vdev_guid, known_guid;
525         uint64_t id, txg, known_txg;
526         char *pname;
527         int i;
528
529         if (nvlist_lookup_string(cfg, ZPOOL_CONFIG_POOL_NAME, &pname) != 0 ||
530             strcmp(pname, name) != 0)
531                 goto ignore;
532
533         if (nvlist_lookup_uint64(cfg, ZPOOL_CONFIG_POOL_GUID, &pool_guid) != 0)
534                 goto ignore;
535
536         if (nvlist_lookup_uint64(cfg, ZPOOL_CONFIG_TOP_GUID, &vdev_guid) != 0)
537                 goto ignore;
538
539         if (nvlist_lookup_nvlist(cfg, ZPOOL_CONFIG_VDEV_TREE, &vdev_tree) != 0)
540                 goto ignore;
541
542         if (nvlist_lookup_uint64(vdev_tree, ZPOOL_CONFIG_ID, &id) != 0)
543                 goto ignore;
544
545         VERIFY(nvlist_lookup_uint64(cfg, ZPOOL_CONFIG_POOL_TXG, &txg) == 0);
546
547         if (*known_pool_guid != 0) {
548                 if (pool_guid != *known_pool_guid)
549                         goto ignore;
550         } else
551                 *known_pool_guid = pool_guid;
552
553         resize_configs(configs, count, id);
554
555         if ((*configs)[id] != NULL) {
556                 VERIFY(nvlist_lookup_uint64((*configs)[id],
557                     ZPOOL_CONFIG_POOL_TXG, &known_txg) == 0);
558                 if (txg <= known_txg)
559                         goto ignore;
560                 nvlist_free((*configs)[id]);
561         }
562
563         (*configs)[id] = cfg;
564         return;
565
566 ignore:
567         nvlist_free(cfg);
568 }
569
570 int
571 vdev_geom_read_pool_label(const char *name,
572     nvlist_t ***configs, uint64_t *count)
573 {
574         struct g_class *mp;
575         struct g_geom *gp;
576         struct g_provider *pp;
577         struct g_consumer *zcp;
578         nvlist_t *vdev_cfg;
579         uint64_t pool_guid;
580         int error, nlabels;
581
582         DROP_GIANT();
583         g_topology_lock();
584
585         *configs = NULL;
586         *count = 0;
587         pool_guid = 0;
588         LIST_FOREACH(mp, &g_classes, class) {
589                 if (mp == &zfs_vdev_class)
590                         continue;
591                 LIST_FOREACH(gp, &mp->geom, geom) {
592                         if (gp->flags & G_GEOM_WITHER)
593                                 continue;
594                         LIST_FOREACH(pp, &gp->provider, provider) {
595                                 if (pp->flags & G_PF_WITHER)
596                                         continue;
597                                 zcp = vdev_geom_attach(pp, NULL, B_TRUE);
598                                 if (zcp == NULL)
599                                         continue;
600                                 g_topology_unlock();
601                                 nlabels = vdev_geom_read_config(zcp, &vdev_cfg);
602                                 g_topology_lock();
603                                 vdev_geom_detach(zcp, B_TRUE);
604                                 if (nlabels == 0)
605                                         continue;
606                                 ZFS_LOG(1, "successfully read vdev config");
607
608                                 process_vdev_config(configs, count,
609                                     vdev_cfg, name, &pool_guid);
610                         }
611                 }
612         }
613         g_topology_unlock();
614         PICKUP_GIANT();
615
616         return (*count > 0 ? 0 : ENOENT);
617 }
618
619 enum match {
620         NO_MATCH = 0,           /* No matching labels found */
621         TOPGUID_MATCH = 1,      /* Labels match top guid, not vdev guid*/
622         ZERO_MATCH = 1,         /* Should never be returned */
623         ONE_MATCH = 2,          /* 1 label matching the vdev_guid */
624         TWO_MATCH = 3,          /* 2 label matching the vdev_guid */
625         THREE_MATCH = 4,        /* 3 label matching the vdev_guid */
626         FULL_MATCH = 5          /* all labels match the vdev_guid */
627 };
628
629 static enum match
630 vdev_attach_ok(vdev_t *vd, struct g_provider *pp)
631 {
632         nvlist_t *config;
633         uint64_t pool_guid, top_guid, vdev_guid;
634         struct g_consumer *cp;
635         int nlabels;
636
637         cp = vdev_geom_attach(pp, NULL, B_TRUE);
638         if (cp == NULL) {
639                 ZFS_LOG(1, "Unable to attach tasting instance to %s.",
640                     pp->name);
641                 return (NO_MATCH);
642         }
643         g_topology_unlock();
644         nlabels = vdev_geom_read_config(cp, &config);
645         g_topology_lock();
646         vdev_geom_detach(cp, B_TRUE);
647         if (nlabels == 0) {
648                 ZFS_LOG(1, "Unable to read config from %s.", pp->name);
649                 return (NO_MATCH);
650         }
651
652         pool_guid = 0;
653         (void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, &pool_guid);
654         top_guid = 0;
655         (void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_TOP_GUID, &top_guid);
656         vdev_guid = 0;
657         (void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID, &vdev_guid);
658         nvlist_free(config);
659
660         /*
661          * Check that the label's pool guid matches the desired guid.
662          * Inactive spares and L2ARCs do not have any pool guid in the label.
663          */
664         if (pool_guid != 0 && pool_guid != spa_guid(vd->vdev_spa)) {
665                 ZFS_LOG(1, "pool guid mismatch for provider %s: %ju != %ju.",
666                     pp->name,
667                     (uintmax_t)spa_guid(vd->vdev_spa), (uintmax_t)pool_guid);
668                 return (NO_MATCH);
669         }
670
671         /*
672          * Check that the label's vdev guid matches the desired guid.
673          * The second condition handles possible race on vdev detach, when
674          * remaining vdev receives GUID of destroyed top level mirror vdev.
675          */
676         if (vdev_guid == vd->vdev_guid) {
677                 ZFS_LOG(1, "guids match for provider %s.", pp->name);
678                 return (ZERO_MATCH + nlabels);
679         } else if (top_guid == vd->vdev_guid && vd == vd->vdev_top) {
680                 ZFS_LOG(1, "top vdev guid match for provider %s.", pp->name);
681                 return (TOPGUID_MATCH);
682         }
683         ZFS_LOG(1, "vdev guid mismatch for provider %s: %ju != %ju.",
684             pp->name, (uintmax_t)vd->vdev_guid, (uintmax_t)vdev_guid);
685         return (NO_MATCH);
686 }
687
688 static struct g_consumer *
689 vdev_geom_attach_by_guids(vdev_t *vd)
690 {
691         struct g_class *mp;
692         struct g_geom *gp;
693         struct g_provider *pp, *best_pp;
694         struct g_consumer *cp;
695         const char *vdpath;
696         enum match match, best_match;
697
698         g_topology_assert();
699
700         vdpath = vd->vdev_path + sizeof("/dev/") - 1;
701         cp = NULL;
702         best_pp = NULL;
703         best_match = NO_MATCH;
704         LIST_FOREACH(mp, &g_classes, class) {
705                 if (mp == &zfs_vdev_class)
706                         continue;
707                 LIST_FOREACH(gp, &mp->geom, geom) {
708                         if (gp->flags & G_GEOM_WITHER)
709                                 continue;
710                         LIST_FOREACH(pp, &gp->provider, provider) {
711                                 match = vdev_attach_ok(vd, pp);
712                                 if (match > best_match) {
713                                         best_match = match;
714                                         best_pp = pp;
715                                 } else if (match == best_match) {
716                                         if (strcmp(pp->name, vdpath) == 0) {
717                                                 best_pp = pp;
718                                         }
719                                 }
720                                 if (match == FULL_MATCH)
721                                         goto out;
722                         }
723                 }
724         }
725
726 out:
727         if (best_pp) {
728                 cp = vdev_geom_attach(best_pp, vd, B_TRUE);
729                 if (cp == NULL) {
730                         printf("ZFS WARNING: Unable to attach to %s.\n",
731                             best_pp->name);
732                 }
733         }
734         return (cp);
735 }
736
737 static struct g_consumer *
738 vdev_geom_open_by_guids(vdev_t *vd)
739 {
740         struct g_consumer *cp;
741         char *buf;
742         size_t len;
743
744         g_topology_assert();
745
746         ZFS_LOG(1, "Searching by guids [%ju:%ju].",
747                 (uintmax_t)spa_guid(vd->vdev_spa), (uintmax_t)vd->vdev_guid);
748         cp = vdev_geom_attach_by_guids(vd);
749         if (cp != NULL) {
750                 len = strlen(cp->provider->name) + strlen("/dev/") + 1;
751                 buf = kmem_alloc(len, KM_SLEEP);
752
753                 snprintf(buf, len, "/dev/%s", cp->provider->name);
754                 spa_strfree(vd->vdev_path);
755                 vd->vdev_path = buf;
756
757                 ZFS_LOG(1, "Attach by guid [%ju:%ju] succeeded, provider %s.",
758                     (uintmax_t)spa_guid(vd->vdev_spa),
759                     (uintmax_t)vd->vdev_guid, cp->provider->name);
760         } else {
761                 ZFS_LOG(1, "Search by guid [%ju:%ju] failed.",
762                     (uintmax_t)spa_guid(vd->vdev_spa),
763                     (uintmax_t)vd->vdev_guid);
764         }
765
766         return (cp);
767 }
768
769 static struct g_consumer *
770 vdev_geom_open_by_path(vdev_t *vd, int check_guid)
771 {
772         struct g_provider *pp;
773         struct g_consumer *cp;
774
775         g_topology_assert();
776
777         cp = NULL;
778         pp = g_provider_by_name(vd->vdev_path + sizeof("/dev/") - 1);
779         if (pp != NULL) {
780                 ZFS_LOG(1, "Found provider by name %s.", vd->vdev_path);
781                 if (!check_guid || vdev_attach_ok(vd, pp) == FULL_MATCH)
782                         cp = vdev_geom_attach(pp, vd, B_FALSE);
783         }
784
785         return (cp);
786 }
787
788 static int
789 vdev_geom_open(vdev_t *vd, uint64_t *psize, uint64_t *max_psize,
790     uint64_t *logical_ashift, uint64_t *physical_ashift)
791 {
792         struct g_provider *pp;
793         struct g_consumer *cp;
794         size_t bufsize;
795         int error;
796
797         /* Set the TLS to indicate downstack that we should not access zvols*/
798         VERIFY(tsd_set(zfs_geom_probe_vdev_key, vd) == 0);
799
800         /*
801          * We must have a pathname, and it must be absolute.
802          */
803         if (vd->vdev_path == NULL || strncmp(vd->vdev_path, "/dev/", 5) != 0) {
804                 vd->vdev_stat.vs_aux = VDEV_AUX_BAD_LABEL;
805                 return (EINVAL);
806         }
807
808         /*
809          * Reopen the device if it's not currently open. Otherwise,
810          * just update the physical size of the device.
811          */
812         if ((cp = vd->vdev_tsd) != NULL) {
813                 ASSERT(vd->vdev_reopening);
814                 goto skip_open;
815         }
816
817         DROP_GIANT();
818         g_topology_lock();
819         error = 0;
820
821         if (vd->vdev_spa->spa_splitting_newspa ||
822             (vd->vdev_prevstate == VDEV_STATE_UNKNOWN &&
823              vd->vdev_spa->spa_load_state == SPA_LOAD_NONE ||
824              vd->vdev_spa->spa_load_state == SPA_LOAD_CREATE)) {
825                 /*
826                  * We are dealing with a vdev that hasn't been previously
827                  * opened (since boot), and we are not loading an
828                  * existing pool configuration.  This looks like a
829                  * vdev add operation to a new or existing pool.
830                  * Assume the user knows what he/she is doing and find
831                  * GEOM provider by its name, ignoring GUID mismatches.
832                  *
833                  * XXPOLICY: It would be safer to only allow a device
834                  *           that is unlabeled or labeled but missing
835                  *           GUID information to be opened in this fashion,
836                  *           unless we are doing a split, in which case we
837                  *           should allow any guid.
838                  */
839                 cp = vdev_geom_open_by_path(vd, 0);
840         } else {
841                 /*
842                  * Try using the recorded path for this device, but only
843                  * accept it if its label data contains the expected GUIDs.
844                  */
845                 cp = vdev_geom_open_by_path(vd, 1);
846                 if (cp == NULL) {
847                         /*
848                          * The device at vd->vdev_path doesn't have the
849                          * expected GUIDs. The disks might have merely
850                          * moved around so try all other GEOM providers
851                          * to find one with the right GUIDs.
852                          */
853                         cp = vdev_geom_open_by_guids(vd);
854                 }
855         }
856
857         /* Clear the TLS now that tasting is done */
858         VERIFY(tsd_set(zfs_geom_probe_vdev_key, NULL) == 0);
859
860         if (cp == NULL) {
861                 ZFS_LOG(1, "Vdev %s not found.", vd->vdev_path);
862                 error = ENOENT;
863         } else {
864                 struct consumer_priv_t *priv;
865                 struct consumer_vdev_elem *elem;
866                 int spamode;
867
868                 priv = (struct consumer_priv_t*)&cp->private;
869                 if (cp->private == NULL)
870                         SLIST_INIT(priv);
871                 elem = g_malloc(sizeof(*elem), M_WAITOK|M_ZERO);
872                 elem->vd = vd;
873                 SLIST_INSERT_HEAD(priv, elem, elems);
874
875                 spamode = spa_mode(vd->vdev_spa);
876                 if (cp->provider->sectorsize > VDEV_PAD_SIZE ||
877                     !ISP2(cp->provider->sectorsize)) {
878                         ZFS_LOG(1, "Provider %s has unsupported sectorsize.",
879                             cp->provider->name);
880
881                         vdev_geom_close_locked(vd);
882                         error = EINVAL;
883                         cp = NULL;
884                 } else if (cp->acw == 0 && (spamode & FWRITE) != 0) {
885                         int i;
886
887                         for (i = 0; i < 5; i++) {
888                                 error = g_access(cp, 0, 1, 0);
889                                 if (error == 0)
890                                         break;
891                                 g_topology_unlock();
892                                 tsleep(vd, 0, "vdev", hz / 2);
893                                 g_topology_lock();
894                         }
895                         if (error != 0) {
896                                 printf("ZFS WARNING: Unable to open %s for writing (error=%d).\n",
897                                     cp->provider->name, error);
898                                 vdev_geom_close_locked(vd);
899                                 cp = NULL;
900                         }
901                 }
902         }
903
904         /* Fetch initial physical path information for this device. */
905         if (cp != NULL) {
906                 vdev_geom_attrchanged(cp, "GEOM::physpath");
907         
908                 /* Set other GEOM characteristics */
909                 vdev_geom_set_physpath(vd, cp, /*do_null_update*/B_FALSE);
910                 vdev_geom_set_rotation_rate(vd, cp);
911         }
912
913         g_topology_unlock();
914         PICKUP_GIANT();
915         if (cp == NULL) {
916                 vd->vdev_stat.vs_aux = VDEV_AUX_OPEN_FAILED;
917                 vdev_dbgmsg(vd, "vdev_geom_open: failed to open [error=%d]",
918                     error);
919                 return (error);
920         }
921 skip_open:
922         pp = cp->provider;
923
924         /*
925          * Determine the actual size of the device.
926          */
927         *max_psize = *psize = pp->mediasize;
928
929         /*
930          * Determine the device's minimum transfer size and preferred
931          * transfer size.
932          */
933         *logical_ashift = highbit(MAX(pp->sectorsize, SPA_MINBLOCKSIZE)) - 1;
934         *physical_ashift = 0;
935         if (pp->stripesize > (1 << *logical_ashift) && ISP2(pp->stripesize) &&
936             pp->stripesize <= (1 << SPA_MAXASHIFT) && pp->stripeoffset == 0)
937                 *physical_ashift = highbit(pp->stripesize) - 1;
938
939         /*
940          * Clear the nowritecache settings, so that on a vdev_reopen()
941          * we will try again.
942          */
943         vd->vdev_nowritecache = B_FALSE;
944
945         return (0);
946 }
947
948 static void
949 vdev_geom_close(vdev_t *vd)
950 {
951         struct g_consumer *cp;
952
953         cp = vd->vdev_tsd;
954
955         DROP_GIANT();
956         g_topology_lock();
957
958         if (!vd->vdev_reopening ||
959             (cp != NULL && ((cp->flags & G_CF_ORPHAN) != 0 ||
960             (cp->provider != NULL && cp->provider->error != 0))))
961                 vdev_geom_close_locked(vd);
962
963         g_topology_unlock();
964         PICKUP_GIANT();
965 }
966
967 static void
968 vdev_geom_io_intr(struct bio *bp)
969 {
970         vdev_t *vd;
971         zio_t *zio;
972
973         zio = bp->bio_caller1;
974         vd = zio->io_vd;
975         zio->io_error = bp->bio_error;
976         if (zio->io_error == 0 && bp->bio_resid != 0)
977                 zio->io_error = SET_ERROR(EIO);
978
979         switch(zio->io_error) {
980         case ENOTSUP:
981                 /*
982                  * If we get ENOTSUP for BIO_FLUSH or BIO_DELETE we know
983                  * that future attempts will never succeed. In this case
984                  * we set a persistent flag so that we don't bother with
985                  * requests in the future.
986                  */
987                 switch(bp->bio_cmd) {
988                 case BIO_FLUSH:
989                         vd->vdev_nowritecache = B_TRUE;
990                         break;
991                 case BIO_DELETE:
992                         vd->vdev_notrim = B_TRUE;
993                         break;
994                 }
995                 break;
996         case ENXIO:
997                 if (!vd->vdev_remove_wanted) {
998                         /*
999                          * If provider's error is set we assume it is being
1000                          * removed.
1001                          */
1002                         if (bp->bio_to->error != 0) {
1003                                 vd->vdev_remove_wanted = B_TRUE;
1004                                 spa_async_request(zio->io_spa,
1005                                     SPA_ASYNC_REMOVE);
1006                         } else if (!vd->vdev_delayed_close) {
1007                                 vd->vdev_delayed_close = B_TRUE;
1008                         }
1009                 }
1010                 break;
1011         }
1012
1013         /*
1014          * We have to split bio freeing into two parts, because the ABD code
1015          * cannot be called in this context and vdev_op_io_done is not called
1016          * for ZIO_TYPE_IOCTL zio-s.
1017          */
1018         if (zio->io_type != ZIO_TYPE_READ && zio->io_type != ZIO_TYPE_WRITE) {
1019                 g_destroy_bio(bp);
1020                 zio->io_bio = NULL;
1021         }
1022         zio_delay_interrupt(zio);
1023 }
1024
1025 static void
1026 vdev_geom_io_start(zio_t *zio)
1027 {
1028         vdev_t *vd;
1029         struct g_consumer *cp;
1030         struct bio *bp;
1031         int error;
1032
1033         vd = zio->io_vd;
1034
1035         switch (zio->io_type) {
1036         case ZIO_TYPE_IOCTL:
1037                 /* XXPOLICY */
1038                 if (!vdev_readable(vd)) {
1039                         zio->io_error = SET_ERROR(ENXIO);
1040                         zio_interrupt(zio);
1041                         return;
1042                 } else {
1043                         switch (zio->io_cmd) {
1044                         case DKIOCFLUSHWRITECACHE:
1045                                 if (zfs_nocacheflush || vdev_geom_bio_flush_disable)
1046                                         break;
1047                                 if (vd->vdev_nowritecache) {
1048                                         zio->io_error = SET_ERROR(ENOTSUP);
1049                                         break;
1050                                 }
1051                                 goto sendreq;
1052                         default:
1053                                 zio->io_error = SET_ERROR(ENOTSUP);
1054                         }
1055                 }
1056
1057                 zio_execute(zio);
1058                 return;
1059         case ZIO_TYPE_FREE:
1060                 if (vd->vdev_notrim) {
1061                         zio->io_error = SET_ERROR(ENOTSUP);
1062                 } else if (!vdev_geom_bio_delete_disable) {
1063                         goto sendreq;
1064                 }
1065                 zio_execute(zio);
1066                 return;
1067         }
1068 sendreq:
1069         ASSERT(zio->io_type == ZIO_TYPE_READ ||
1070             zio->io_type == ZIO_TYPE_WRITE ||
1071             zio->io_type == ZIO_TYPE_FREE ||
1072             zio->io_type == ZIO_TYPE_IOCTL);
1073
1074         cp = vd->vdev_tsd;
1075         if (cp == NULL) {
1076                 zio->io_error = SET_ERROR(ENXIO);
1077                 zio_interrupt(zio);
1078                 return;
1079         }
1080         bp = g_alloc_bio();
1081         bp->bio_caller1 = zio;
1082         switch (zio->io_type) {
1083         case ZIO_TYPE_READ:
1084         case ZIO_TYPE_WRITE:
1085                 zio->io_target_timestamp = zio_handle_io_delay(zio);
1086                 bp->bio_offset = zio->io_offset;
1087                 bp->bio_length = zio->io_size;
1088                 if (zio->io_type == ZIO_TYPE_READ) {
1089                         bp->bio_cmd = BIO_READ;
1090                         bp->bio_data =
1091                             abd_borrow_buf(zio->io_abd, zio->io_size);
1092                 } else {
1093                         bp->bio_cmd = BIO_WRITE;
1094                         bp->bio_data =
1095                             abd_borrow_buf_copy(zio->io_abd, zio->io_size);
1096                 }
1097                 break;
1098         case ZIO_TYPE_FREE:
1099                 bp->bio_cmd = BIO_DELETE;
1100                 bp->bio_data = NULL;
1101                 bp->bio_offset = zio->io_offset;
1102                 bp->bio_length = zio->io_size;
1103                 break;
1104         case ZIO_TYPE_IOCTL:
1105                 bp->bio_cmd = BIO_FLUSH;
1106                 bp->bio_data = NULL;
1107                 bp->bio_offset = cp->provider->mediasize;
1108                 bp->bio_length = 0;
1109                 break;
1110         }
1111         bp->bio_done = vdev_geom_io_intr;
1112         zio->io_bio = bp;
1113
1114         g_io_request(bp, cp);
1115 }
1116
1117 static void
1118 vdev_geom_io_done(zio_t *zio)
1119 {
1120         struct bio *bp = zio->io_bio;
1121
1122         if (zio->io_type != ZIO_TYPE_READ && zio->io_type != ZIO_TYPE_WRITE) {
1123                 ASSERT(bp == NULL);
1124                 return;
1125         }
1126
1127         if (bp == NULL) {
1128                 ASSERT3S(zio->io_error, ==, ENXIO);
1129                 return;
1130         }
1131
1132         if (zio->io_type == ZIO_TYPE_READ)
1133                 abd_return_buf_copy(zio->io_abd, bp->bio_data, zio->io_size);
1134         else
1135                 abd_return_buf(zio->io_abd, bp->bio_data, zio->io_size);
1136
1137         g_destroy_bio(bp);
1138         zio->io_bio = NULL;
1139 }
1140
1141 static void
1142 vdev_geom_hold(vdev_t *vd)
1143 {
1144 }
1145
1146 static void
1147 vdev_geom_rele(vdev_t *vd)
1148 {
1149 }
1150
1151 vdev_ops_t vdev_geom_ops = {
1152         vdev_geom_open,
1153         vdev_geom_close,
1154         vdev_default_asize,
1155         vdev_geom_io_start,
1156         vdev_geom_io_done,
1157         NULL,
1158         NULL,
1159         vdev_geom_hold,
1160         vdev_geom_rele,
1161         NULL,
1162         vdev_default_xlate,
1163         VDEV_TYPE_DISK,         /* name of this vdev type */
1164         B_TRUE                  /* leaf vdev */
1165 };