]> CyberLeo.Net >> Repos - FreeBSD/releng/8.2.git/blob - sys/geom/part/g_part.c
Copy stable/8 to releng/8.2 in preparation for FreeBSD-8.2 release.
[FreeBSD/releng/8.2.git] / sys / geom / part / g_part.c
1 /*-
2  * Copyright (c) 2002, 2005-2009 Marcel Moolenaar
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  */
26
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29
30 #include <sys/param.h>
31 #include <sys/bio.h>
32 #include <sys/diskmbr.h>
33 #include <sys/endian.h>
34 #include <sys/kernel.h>
35 #include <sys/kobj.h>
36 #include <sys/limits.h>
37 #include <sys/lock.h>
38 #include <sys/malloc.h>
39 #include <sys/mutex.h>
40 #include <sys/queue.h>
41 #include <sys/sbuf.h>
42 #include <sys/systm.h>
43 #include <sys/uuid.h>
44 #include <geom/geom.h>
45 #include <geom/geom_ctl.h>
46 #include <geom/geom_int.h>
47 #include <geom/part/g_part.h>
48
49 #include "g_part_if.h"
50
51 #ifndef _PATH_DEV
52 #define _PATH_DEV "/dev/"
53 #endif
54
55 static kobj_method_t g_part_null_methods[] = {
56         { 0, 0 }
57 };
58
59 static struct g_part_scheme g_part_null_scheme = {
60         "(none)",
61         g_part_null_methods,
62         sizeof(struct g_part_table),
63 };
64
65 TAILQ_HEAD(, g_part_scheme) g_part_schemes =
66     TAILQ_HEAD_INITIALIZER(g_part_schemes);
67
68 struct g_part_alias_list {
69         const char *lexeme;
70         enum g_part_alias alias;
71 } g_part_alias_list[G_PART_ALIAS_COUNT] = {
72         { "apple-boot", G_PART_ALIAS_APPLE_BOOT },
73         { "apple-hfs", G_PART_ALIAS_APPLE_HFS },
74         { "apple-label", G_PART_ALIAS_APPLE_LABEL },
75         { "apple-raid", G_PART_ALIAS_APPLE_RAID },
76         { "apple-raid-offline", G_PART_ALIAS_APPLE_RAID_OFFLINE },
77         { "apple-tv-recovery", G_PART_ALIAS_APPLE_TV_RECOVERY },
78         { "apple-ufs", G_PART_ALIAS_APPLE_UFS },
79         { "efi", G_PART_ALIAS_EFI },
80         { "freebsd", G_PART_ALIAS_FREEBSD },
81         { "freebsd-boot", G_PART_ALIAS_FREEBSD_BOOT },
82         { "freebsd-swap", G_PART_ALIAS_FREEBSD_SWAP },
83         { "freebsd-ufs", G_PART_ALIAS_FREEBSD_UFS },
84         { "freebsd-vinum", G_PART_ALIAS_FREEBSD_VINUM },
85         { "freebsd-zfs", G_PART_ALIAS_FREEBSD_ZFS },
86         { "linux-data", G_PART_ALIAS_LINUX_DATA },
87         { "linux-lvm", G_PART_ALIAS_LINUX_LVM },
88         { "linux-raid", G_PART_ALIAS_LINUX_RAID },
89         { "linux-swap", G_PART_ALIAS_LINUX_SWAP },
90         { "ms-basic-data", G_PART_ALIAS_MS_BASIC_DATA },
91         { "ms-ldm-data", G_PART_ALIAS_MS_LDM_DATA },
92         { "ms-ldm-metadata", G_PART_ALIAS_MS_LDM_METADATA },
93         { "ms-reserved", G_PART_ALIAS_MS_RESERVED },
94         { "ntfs", G_PART_ALIAS_MS_NTFS },
95         { "netbsd-ccd", G_PART_ALIAS_NETBSD_CCD },
96         { "netbsd-cgd", G_PART_ALIAS_NETBSD_CGD },
97         { "netbsd-ffs", G_PART_ALIAS_NETBSD_FFS },
98         { "netbsd-lfs", G_PART_ALIAS_NETBSD_LFS },
99         { "netbsd-raid", G_PART_ALIAS_NETBSD_RAID },
100         { "netbsd-swap", G_PART_ALIAS_NETBSD_SWAP },
101         { "mbr", G_PART_ALIAS_MBR }
102 };
103
104 /*
105  * The GEOM partitioning class.
106  */
107 static g_ctl_req_t g_part_ctlreq;
108 static g_ctl_destroy_geom_t g_part_destroy_geom;
109 static g_fini_t g_part_fini;
110 static g_init_t g_part_init;
111 static g_taste_t g_part_taste;
112
113 static g_access_t g_part_access;
114 static g_dumpconf_t g_part_dumpconf;
115 static g_orphan_t g_part_orphan;
116 static g_spoiled_t g_part_spoiled;
117 static g_start_t g_part_start;
118
119 static struct g_class g_part_class = {
120         .name = "PART",
121         .version = G_VERSION,
122         /* Class methods. */
123         .ctlreq = g_part_ctlreq,
124         .destroy_geom = g_part_destroy_geom,
125         .fini = g_part_fini,
126         .init = g_part_init,
127         .taste = g_part_taste,
128         /* Geom methods. */
129         .access = g_part_access,
130         .dumpconf = g_part_dumpconf,
131         .orphan = g_part_orphan,
132         .spoiled = g_part_spoiled,
133         .start = g_part_start,
134 };
135
136 DECLARE_GEOM_CLASS(g_part_class, g_part);
137
138 /*
139  * Support functions.
140  */
141
142 static void g_part_wither(struct g_geom *, int);
143
144 const char *
145 g_part_alias_name(enum g_part_alias alias)
146 {
147         int i;
148
149         for (i = 0; i < G_PART_ALIAS_COUNT; i++) {
150                 if (g_part_alias_list[i].alias != alias)
151                         continue;
152                 return (g_part_alias_list[i].lexeme);
153         }
154
155         return (NULL);
156 }
157
158 void
159 g_part_geometry_heads(off_t blocks, u_int sectors, off_t *bestchs,
160     u_int *bestheads)
161 {
162         static u_int candidate_heads[] = { 1, 2, 16, 32, 64, 128, 255, 0 };
163         off_t chs, cylinders;
164         u_int heads;
165         int idx;
166
167         *bestchs = 0;
168         *bestheads = 0;
169         for (idx = 0; candidate_heads[idx] != 0; idx++) {
170                 heads = candidate_heads[idx];
171                 cylinders = blocks / heads / sectors;
172                 if (cylinders < heads || cylinders < sectors)
173                         break;
174                 if (cylinders > 1023)
175                         continue;
176                 chs = cylinders * heads * sectors;
177                 if (chs > *bestchs || (chs == *bestchs && *bestheads == 1)) {
178                         *bestchs = chs;
179                         *bestheads = heads;
180                 }
181         }
182 }
183
184 static void
185 g_part_geometry(struct g_part_table *table, struct g_consumer *cp,
186     off_t blocks)
187 {
188         static u_int candidate_sectors[] = { 1, 9, 17, 33, 63, 0 };
189         off_t chs, bestchs;
190         u_int heads, sectors;
191         int idx;
192
193         if (g_getattr("GEOM::fwsectors", cp, &sectors) != 0 || sectors == 0 ||
194             g_getattr("GEOM::fwheads", cp, &heads) != 0 || heads == 0) {
195                 table->gpt_fixgeom = 0;
196                 table->gpt_heads = 0;
197                 table->gpt_sectors = 0;
198                 bestchs = 0;
199                 for (idx = 0; candidate_sectors[idx] != 0; idx++) {
200                         sectors = candidate_sectors[idx];
201                         g_part_geometry_heads(blocks, sectors, &chs, &heads);
202                         if (chs == 0)
203                                 continue;
204                         /*
205                          * Prefer a geometry with sectors > 1, but only if
206                          * it doesn't bump down the numbver of heads to 1.
207                          */
208                         if (chs > bestchs || (chs == bestchs && heads > 1 &&
209                             table->gpt_sectors == 1)) {
210                                 bestchs = chs;
211                                 table->gpt_heads = heads;
212                                 table->gpt_sectors = sectors;
213                         }
214                 }
215                 /*
216                  * If we didn't find a geometry at all, then the disk is
217                  * too big. This means we can use the maximum number of
218                  * heads and sectors.
219                  */
220                 if (bestchs == 0) {
221                         table->gpt_heads = 255;
222                         table->gpt_sectors = 63;
223                 }
224         } else {
225                 table->gpt_fixgeom = 1;
226                 table->gpt_heads = heads;
227                 table->gpt_sectors = sectors;
228         }
229 }
230
231 struct g_part_entry *
232 g_part_new_entry(struct g_part_table *table, int index, quad_t start,
233     quad_t end)
234 {
235         struct g_part_entry *entry, *last;
236
237         last = NULL;
238         LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
239                 if (entry->gpe_index == index)
240                         break;
241                 if (entry->gpe_index > index) {
242                         entry = NULL;
243                         break;
244                 }
245                 last = entry;
246         }
247         if (entry == NULL) {
248                 entry = g_malloc(table->gpt_scheme->gps_entrysz,
249                     M_WAITOK | M_ZERO);
250                 entry->gpe_index = index;
251                 if (last == NULL)
252                         LIST_INSERT_HEAD(&table->gpt_entry, entry, gpe_entry);
253                 else
254                         LIST_INSERT_AFTER(last, entry, gpe_entry);
255         } else
256                 entry->gpe_offset = 0;
257         entry->gpe_start = start;
258         entry->gpe_end = end;
259         return (entry);
260 }
261
262 static void
263 g_part_new_provider(struct g_geom *gp, struct g_part_table *table,
264     struct g_part_entry *entry)
265 {
266         struct g_consumer *cp;
267         struct g_provider *pp;
268         struct sbuf *sb;
269         off_t offset;
270
271         cp = LIST_FIRST(&gp->consumer);
272         pp = cp->provider;
273
274         offset = entry->gpe_start * pp->sectorsize;
275         if (entry->gpe_offset < offset)
276                 entry->gpe_offset = offset;
277
278         if (entry->gpe_pp == NULL) {
279                 sb = sbuf_new_auto();
280                 G_PART_FULLNAME(table, entry, sb, gp->name);
281                 sbuf_finish(sb);
282                 entry->gpe_pp = g_new_providerf(gp, "%s", sbuf_data(sb));
283                 sbuf_delete(sb);
284                 entry->gpe_pp->private = entry;         /* Close the circle. */
285         }
286         entry->gpe_pp->index = entry->gpe_index - 1;    /* index is 1-based. */
287         entry->gpe_pp->mediasize = (entry->gpe_end - entry->gpe_start + 1) *
288             pp->sectorsize;
289         entry->gpe_pp->mediasize -= entry->gpe_offset - offset;
290         entry->gpe_pp->sectorsize = pp->sectorsize;
291         entry->gpe_pp->flags = pp->flags & G_PF_CANDELETE;
292         entry->gpe_pp->stripesize = pp->stripesize;
293         entry->gpe_pp->stripeoffset = pp->stripeoffset + entry->gpe_offset;
294         if (pp->stripesize > 0)
295                 entry->gpe_pp->stripeoffset %= pp->stripesize;
296         g_error_provider(entry->gpe_pp, 0);
297 }
298
299 static int
300 g_part_parm_geom(const char *name, struct g_geom **v)
301 {
302         struct g_geom *gp;
303
304         if (strncmp(name, _PATH_DEV, strlen(_PATH_DEV)) == 0)
305                 name += strlen(_PATH_DEV);
306         LIST_FOREACH(gp, &g_part_class.geom, geom) {
307                 if (!strcmp(name, gp->name))
308                         break;
309         }
310         if (gp == NULL)
311                 return (EINVAL);
312         *v = gp;
313         return (0);
314 }
315
316 static int
317 g_part_parm_provider(const char *name, struct g_provider **v)
318 {
319         struct g_provider *pp;
320
321         if (strncmp(name, _PATH_DEV, strlen(_PATH_DEV)) == 0)
322                 name += strlen(_PATH_DEV);
323         pp = g_provider_by_name(name);
324         if (pp == NULL)
325                 return (EINVAL);
326         *v = pp;
327         return (0);
328 }
329
330 static int
331 g_part_parm_quad(const char *p, quad_t *v)
332 {
333         char *x;
334         quad_t q;
335
336         q = strtoq(p, &x, 0);
337         if (*x != '\0' || q < 0)
338                 return (EINVAL);
339         *v = q;
340         return (0);
341 }
342
343 static int
344 g_part_parm_scheme(const char *p, struct g_part_scheme **v)
345 {
346         struct g_part_scheme *s;
347
348         TAILQ_FOREACH(s, &g_part_schemes, scheme_list) {
349                 if (s == &g_part_null_scheme)
350                         continue;
351                 if (!strcasecmp(s->name, p))
352                         break;
353         }
354         if (s == NULL)
355                 return (EINVAL);
356         *v = s;
357         return (0);
358 }
359
360 static int
361 g_part_parm_str(const char *p, const char **v)
362 {
363
364         if (p[0] == '\0')
365                 return (EINVAL);
366         *v = p;
367         return (0);
368 }
369
370 static int
371 g_part_parm_uint(const char *p, u_int *v)
372 {
373         char *x;
374         long l;
375
376         l = strtol(p, &x, 0);
377         if (*x != '\0' || l < 0 || l > INT_MAX)
378                 return (EINVAL);
379         *v = (unsigned int)l;
380         return (0);
381 }
382
383 static int
384 g_part_probe(struct g_geom *gp, struct g_consumer *cp, int depth)
385 {
386         struct g_part_scheme *iter, *scheme;
387         struct g_part_table *table;
388         int pri, probe;
389
390         table = gp->softc;
391         scheme = (table != NULL) ? table->gpt_scheme : NULL;
392         pri = (scheme != NULL) ? G_PART_PROBE(table, cp) : INT_MIN;
393         if (pri == 0)
394                 goto done;
395         if (pri > 0) {  /* error */
396                 scheme = NULL;
397                 pri = INT_MIN;
398         }
399
400         TAILQ_FOREACH(iter, &g_part_schemes, scheme_list) {
401                 if (iter == &g_part_null_scheme)
402                         continue;
403                 table = (void *)kobj_create((kobj_class_t)iter, M_GEOM,
404                     M_WAITOK);
405                 table->gpt_gp = gp;
406                 table->gpt_scheme = iter;
407                 table->gpt_depth = depth;
408                 probe = G_PART_PROBE(table, cp);
409                 if (probe <= 0 && probe > pri) {
410                         pri = probe;
411                         scheme = iter;
412                         if (gp->softc != NULL)
413                                 kobj_delete((kobj_t)gp->softc, M_GEOM);
414                         gp->softc = table;
415                         if (pri == 0)
416                                 goto done;
417                 } else
418                         kobj_delete((kobj_t)table, M_GEOM);
419         }
420
421 done:
422         return ((scheme == NULL) ? ENXIO : 0);
423 }
424
425 /*
426  * Control request functions.
427  */
428
429 static int
430 g_part_ctl_add(struct gctl_req *req, struct g_part_parms *gpp)
431 {
432         struct g_geom *gp;
433         struct g_provider *pp;
434         struct g_part_entry *delent, *last, *entry;
435         struct g_part_table *table;
436         struct sbuf *sb;
437         quad_t end;
438         unsigned int index;
439         int error;
440
441         gp = gpp->gpp_geom;
442         G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
443         g_topology_assert();
444
445         pp = LIST_FIRST(&gp->consumer)->provider;
446         table = gp->softc;
447         end = gpp->gpp_start + gpp->gpp_size - 1;
448
449         if (gpp->gpp_start < table->gpt_first ||
450             gpp->gpp_start > table->gpt_last) {
451                 gctl_error(req, "%d start '%jd'", EINVAL,
452                     (intmax_t)gpp->gpp_start);
453                 return (EINVAL);
454         }
455         if (end < gpp->gpp_start || end > table->gpt_last) {
456                 gctl_error(req, "%d size '%jd'", EINVAL,
457                     (intmax_t)gpp->gpp_size);
458                 return (EINVAL);
459         }
460         if (gpp->gpp_index > table->gpt_entries) {
461                 gctl_error(req, "%d index '%d'", EINVAL, gpp->gpp_index);
462                 return (EINVAL);
463         }
464
465         delent = last = NULL;
466         index = (gpp->gpp_index > 0) ? gpp->gpp_index : 1;
467         LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
468                 if (entry->gpe_deleted) {
469                         if (entry->gpe_index == index)
470                                 delent = entry;
471                         continue;
472                 }
473                 if (entry->gpe_index == index)
474                         index = entry->gpe_index + 1;
475                 if (entry->gpe_index < index)
476                         last = entry;
477                 if (entry->gpe_internal)
478                         continue;
479                 if (gpp->gpp_start >= entry->gpe_start &&
480                     gpp->gpp_start <= entry->gpe_end) {
481                         gctl_error(req, "%d start '%jd'", ENOSPC,
482                             (intmax_t)gpp->gpp_start);
483                         return (ENOSPC);
484                 }
485                 if (end >= entry->gpe_start && end <= entry->gpe_end) {
486                         gctl_error(req, "%d end '%jd'", ENOSPC, (intmax_t)end);
487                         return (ENOSPC);
488                 }
489                 if (gpp->gpp_start < entry->gpe_start && end > entry->gpe_end) {
490                         gctl_error(req, "%d size '%jd'", ENOSPC,
491                             (intmax_t)gpp->gpp_size);
492                         return (ENOSPC);
493                 }
494         }
495         if (gpp->gpp_index > 0 && index != gpp->gpp_index) {
496                 gctl_error(req, "%d index '%d'", EEXIST, gpp->gpp_index);
497                 return (EEXIST);
498         }
499         if (index > table->gpt_entries) {
500                 gctl_error(req, "%d index '%d'", ENOSPC, index);
501                 return (ENOSPC);
502         }
503
504         entry = (delent == NULL) ? g_malloc(table->gpt_scheme->gps_entrysz,
505             M_WAITOK | M_ZERO) : delent;
506         entry->gpe_index = index;
507         entry->gpe_start = gpp->gpp_start;
508         entry->gpe_end = end;
509         error = G_PART_ADD(table, entry, gpp);
510         if (error) {
511                 gctl_error(req, "%d", error);
512                 if (delent == NULL)
513                         g_free(entry);
514                 return (error);
515         }
516         if (delent == NULL) {
517                 if (last == NULL)
518                         LIST_INSERT_HEAD(&table->gpt_entry, entry, gpe_entry);
519                 else
520                         LIST_INSERT_AFTER(last, entry, gpe_entry);
521                 entry->gpe_created = 1;
522         } else {
523                 entry->gpe_deleted = 0;
524                 entry->gpe_modified = 1;
525         }
526         g_part_new_provider(gp, table, entry);
527
528         /* Provide feedback if so requested. */
529         if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
530                 sb = sbuf_new_auto();
531                 G_PART_FULLNAME(table, entry, sb, gp->name);
532                 sbuf_cat(sb, " added\n");
533                 sbuf_finish(sb);
534                 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
535                 sbuf_delete(sb);
536         }
537         return (0);
538 }
539
540 static int
541 g_part_ctl_bootcode(struct gctl_req *req, struct g_part_parms *gpp)
542 {
543         struct g_geom *gp;
544         struct g_part_table *table;
545         struct sbuf *sb;
546         int error, sz;
547
548         gp = gpp->gpp_geom;
549         G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
550         g_topology_assert();
551
552         table = gp->softc;
553         sz = table->gpt_scheme->gps_bootcodesz;
554         if (sz == 0) {
555                 error = ENODEV;
556                 goto fail;
557         }
558         if (gpp->gpp_codesize > sz) {
559                 error = EFBIG;
560                 goto fail;
561         }
562
563         error = G_PART_BOOTCODE(table, gpp);
564         if (error)
565                 goto fail;
566
567         /* Provide feedback if so requested. */
568         if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
569                 sb = sbuf_new_auto();
570                 sbuf_printf(sb, "bootcode written to %s\n", gp->name);
571                 sbuf_finish(sb);
572                 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
573                 sbuf_delete(sb);
574         }
575         return (0);
576
577  fail:
578         gctl_error(req, "%d", error);
579         return (error);
580 }
581
582 static int
583 g_part_ctl_commit(struct gctl_req *req, struct g_part_parms *gpp)
584 {
585         struct g_consumer *cp;
586         struct g_geom *gp;
587         struct g_provider *pp;
588         struct g_part_entry *entry, *tmp;
589         struct g_part_table *table;
590         char *buf;
591         int error, i;
592
593         gp = gpp->gpp_geom;
594         G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
595         g_topology_assert();
596
597         table = gp->softc;
598         if (!table->gpt_opened) {
599                 gctl_error(req, "%d", EPERM);
600                 return (EPERM);
601         }
602
603         g_topology_unlock();
604
605         cp = LIST_FIRST(&gp->consumer);
606         if ((table->gpt_smhead | table->gpt_smtail) != 0) {
607                 pp = cp->provider;
608                 buf = g_malloc(pp->sectorsize, M_WAITOK | M_ZERO);
609                 while (table->gpt_smhead != 0) {
610                         i = ffs(table->gpt_smhead) - 1;
611                         error = g_write_data(cp, i * pp->sectorsize, buf,
612                             pp->sectorsize);
613                         if (error) {
614                                 g_free(buf);
615                                 goto fail;
616                         }
617                         table->gpt_smhead &= ~(1 << i);
618                 }
619                 while (table->gpt_smtail != 0) {
620                         i = ffs(table->gpt_smtail) - 1;
621                         error = g_write_data(cp, pp->mediasize - (i + 1) *
622                             pp->sectorsize, buf, pp->sectorsize);
623                         if (error) {
624                                 g_free(buf);
625                                 goto fail;
626                         }
627                         table->gpt_smtail &= ~(1 << i);
628                 }
629                 g_free(buf);
630         }
631
632         if (table->gpt_scheme == &g_part_null_scheme) {
633                 g_topology_lock();
634                 g_access(cp, -1, -1, -1);
635                 g_part_wither(gp, ENXIO);
636                 return (0);
637         }
638
639         error = G_PART_WRITE(table, cp);
640         if (error)
641                 goto fail;
642
643         LIST_FOREACH_SAFE(entry, &table->gpt_entry, gpe_entry, tmp) {
644                 if (!entry->gpe_deleted) {
645                         entry->gpe_created = 0;
646                         entry->gpe_modified = 0;
647                         continue;
648                 }
649                 LIST_REMOVE(entry, gpe_entry);
650                 g_free(entry);
651         }
652         table->gpt_created = 0;
653         table->gpt_opened = 0;
654
655         g_topology_lock();
656         g_access(cp, -1, -1, -1);
657         return (0);
658
659 fail:
660         g_topology_lock();
661         gctl_error(req, "%d", error);
662         return (error);
663 }
664
665 static int
666 g_part_ctl_create(struct gctl_req *req, struct g_part_parms *gpp)
667 {
668         struct g_consumer *cp;
669         struct g_geom *gp;
670         struct g_provider *pp;
671         struct g_part_scheme *scheme;
672         struct g_part_table *null, *table;
673         struct sbuf *sb;
674         int attr, error;
675
676         pp = gpp->gpp_provider;
677         scheme = gpp->gpp_scheme;
678         G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, pp->name));
679         g_topology_assert();
680
681         /* Check that there isn't already a g_part geom on the provider. */
682         error = g_part_parm_geom(pp->name, &gp);
683         if (!error) {
684                 null = gp->softc;
685                 if (null->gpt_scheme != &g_part_null_scheme) {
686                         gctl_error(req, "%d geom '%s'", EEXIST, pp->name);
687                         return (EEXIST);
688                 }
689         } else
690                 null = NULL;
691
692         if ((gpp->gpp_parms & G_PART_PARM_ENTRIES) &&
693             (gpp->gpp_entries < scheme->gps_minent ||
694              gpp->gpp_entries > scheme->gps_maxent)) {
695                 gctl_error(req, "%d entries '%d'", EINVAL, gpp->gpp_entries);
696                 return (EINVAL);
697         }
698
699         if (null == NULL)
700                 gp = g_new_geomf(&g_part_class, "%s", pp->name);
701         gp->softc = kobj_create((kobj_class_t)gpp->gpp_scheme, M_GEOM,
702             M_WAITOK);
703         table = gp->softc;
704         table->gpt_gp = gp;
705         table->gpt_scheme = gpp->gpp_scheme;
706         table->gpt_entries = (gpp->gpp_parms & G_PART_PARM_ENTRIES) ?
707             gpp->gpp_entries : scheme->gps_minent;
708         LIST_INIT(&table->gpt_entry);
709         if (null == NULL) {
710                 cp = g_new_consumer(gp);
711                 error = g_attach(cp, pp);
712                 if (error == 0)
713                         error = g_access(cp, 1, 1, 1);
714                 if (error != 0) {
715                         g_part_wither(gp, error);
716                         gctl_error(req, "%d geom '%s'", error, pp->name);
717                         return (error);
718                 }
719                 table->gpt_opened = 1;
720         } else {
721                 cp = LIST_FIRST(&gp->consumer);
722                 table->gpt_opened = null->gpt_opened;
723                 table->gpt_smhead = null->gpt_smhead;
724                 table->gpt_smtail = null->gpt_smtail;
725         }
726
727         g_topology_unlock();
728
729         /* Make sure the provider has media. */
730         if (pp->mediasize == 0 || pp->sectorsize == 0) {
731                 error = ENODEV;
732                 goto fail;
733         }
734
735         /* Make sure we can nest and if so, determine our depth. */
736         error = g_getattr("PART::isleaf", cp, &attr);
737         if (!error && attr) {
738                 error = ENODEV;
739                 goto fail;
740         }
741         error = g_getattr("PART::depth", cp, &attr);
742         table->gpt_depth = (!error) ? attr + 1 : 0;
743
744         /*
745          * Synthesize a disk geometry. Some partitioning schemes
746          * depend on it and since some file systems need it even
747          * when the partitition scheme doesn't, we do it here in
748          * scheme-independent code.
749          */
750         g_part_geometry(table, cp, pp->mediasize / pp->sectorsize);
751
752         error = G_PART_CREATE(table, gpp);
753         if (error)
754                 goto fail;
755
756         g_topology_lock();
757
758         table->gpt_created = 1;
759         if (null != NULL)
760                 kobj_delete((kobj_t)null, M_GEOM);
761
762         /*
763          * Support automatic commit by filling in the gpp_geom
764          * parameter.
765          */
766         gpp->gpp_parms |= G_PART_PARM_GEOM;
767         gpp->gpp_geom = gp;
768
769         /* Provide feedback if so requested. */
770         if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
771                 sb = sbuf_new_auto();
772                 sbuf_printf(sb, "%s created\n", gp->name);
773                 sbuf_finish(sb);
774                 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
775                 sbuf_delete(sb);
776         }
777         return (0);
778
779 fail:
780         g_topology_lock();
781         if (null == NULL) {
782                 g_access(cp, -1, -1, -1);
783                 g_part_wither(gp, error);
784         } else {
785                 kobj_delete((kobj_t)gp->softc, M_GEOM);
786                 gp->softc = null;
787         }
788         gctl_error(req, "%d provider", error);
789         return (error);
790 }
791
792 static int
793 g_part_ctl_delete(struct gctl_req *req, struct g_part_parms *gpp)
794 {
795         struct g_geom *gp;
796         struct g_provider *pp;
797         struct g_part_entry *entry;
798         struct g_part_table *table;
799         struct sbuf *sb;
800
801         gp = gpp->gpp_geom;
802         G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
803         g_topology_assert();
804
805         table = gp->softc;
806
807         LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
808                 if (entry->gpe_deleted || entry->gpe_internal)
809                         continue;
810                 if (entry->gpe_index == gpp->gpp_index)
811                         break;
812         }
813         if (entry == NULL) {
814                 gctl_error(req, "%d index '%d'", ENOENT, gpp->gpp_index);
815                 return (ENOENT);
816         }
817
818         pp = entry->gpe_pp;
819         if (pp != NULL) {
820                 if (pp->acr > 0 || pp->acw > 0 || pp->ace > 0) {
821                         gctl_error(req, "%d", EBUSY);
822                         return (EBUSY);
823                 }
824
825                 pp->private = NULL;
826                 entry->gpe_pp = NULL;
827         }
828
829         if (pp != NULL)
830                 g_wither_provider(pp, ENXIO);
831
832         /* Provide feedback if so requested. */
833         if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
834                 sb = sbuf_new_auto();
835                 G_PART_FULLNAME(table, entry, sb, gp->name);
836                 sbuf_cat(sb, " deleted\n");
837                 sbuf_finish(sb);
838                 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
839                 sbuf_delete(sb);
840         }
841
842         if (entry->gpe_created) {
843                 LIST_REMOVE(entry, gpe_entry);
844                 g_free(entry);
845         } else {
846                 entry->gpe_modified = 0;
847                 entry->gpe_deleted = 1;
848         }
849         return (0);
850 }
851
852 static int
853 g_part_ctl_destroy(struct gctl_req *req, struct g_part_parms *gpp)
854 {
855         struct g_consumer *cp;
856         struct g_geom *gp;
857         struct g_provider *pp;
858         struct g_part_entry *entry, *tmp;
859         struct g_part_table *null, *table;
860         struct sbuf *sb;
861         int error;
862
863         gp = gpp->gpp_geom;
864         G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
865         g_topology_assert();
866
867         table = gp->softc;
868         /* Check for busy providers. */
869         LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
870                 if (entry->gpe_deleted || entry->gpe_internal)
871                         continue;
872                 if (gpp->gpp_force) {
873                         pp = entry->gpe_pp;
874                         if (pp == NULL)
875                                 continue;
876                         if (pp->acr == 0 && pp->acw == 0 && pp->ace == 0)
877                                 continue;
878                 }
879                 gctl_error(req, "%d", EBUSY);
880                 return (EBUSY);
881         }
882
883         if (gpp->gpp_force) {
884                 /* Destroy all providers. */
885                 LIST_FOREACH_SAFE(entry, &table->gpt_entry, gpe_entry, tmp) {
886                         pp = entry->gpe_pp;
887                         if (pp != NULL) {
888                                 pp->private = NULL;
889                                 g_wither_provider(pp, ENXIO);
890                         }
891                         LIST_REMOVE(entry, gpe_entry);
892                         g_free(entry);
893                 }
894         }
895
896         error = G_PART_DESTROY(table, gpp);
897         if (error) {
898                 gctl_error(req, "%d", error);
899                 return (error);
900         }
901
902         gp->softc = kobj_create((kobj_class_t)&g_part_null_scheme, M_GEOM,
903             M_WAITOK);
904         null = gp->softc;
905         null->gpt_gp = gp;
906         null->gpt_scheme = &g_part_null_scheme;
907         LIST_INIT(&null->gpt_entry);
908
909         cp = LIST_FIRST(&gp->consumer);
910         pp = cp->provider;
911         null->gpt_last = pp->mediasize / pp->sectorsize - 1;
912
913         null->gpt_depth = table->gpt_depth;
914         null->gpt_opened = table->gpt_opened;
915         null->gpt_smhead = table->gpt_smhead;
916         null->gpt_smtail = table->gpt_smtail;
917
918         while ((entry = LIST_FIRST(&table->gpt_entry)) != NULL) {
919                 LIST_REMOVE(entry, gpe_entry);
920                 g_free(entry);
921         }
922         kobj_delete((kobj_t)table, M_GEOM);
923
924         /* Provide feedback if so requested. */
925         if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
926                 sb = sbuf_new_auto();
927                 sbuf_printf(sb, "%s destroyed\n", gp->name);
928                 sbuf_finish(sb);
929                 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
930                 sbuf_delete(sb);
931         }
932         return (0);
933 }
934
935 static int
936 g_part_ctl_modify(struct gctl_req *req, struct g_part_parms *gpp)
937 {
938         struct g_geom *gp;
939         struct g_part_entry *entry;
940         struct g_part_table *table;
941         struct sbuf *sb;
942         int error;
943
944         gp = gpp->gpp_geom;
945         G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
946         g_topology_assert();
947
948         table = gp->softc;
949
950         LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
951                 if (entry->gpe_deleted || entry->gpe_internal)
952                         continue;
953                 if (entry->gpe_index == gpp->gpp_index)
954                         break;
955         }
956         if (entry == NULL) {
957                 gctl_error(req, "%d index '%d'", ENOENT, gpp->gpp_index);
958                 return (ENOENT);
959         }
960
961         error = G_PART_MODIFY(table, entry, gpp);
962         if (error) {
963                 gctl_error(req, "%d", error);
964                 return (error);
965         }
966
967         if (!entry->gpe_created)
968                 entry->gpe_modified = 1;
969
970         /* Provide feedback if so requested. */
971         if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
972                 sb = sbuf_new_auto();
973                 G_PART_FULLNAME(table, entry, sb, gp->name);
974                 sbuf_cat(sb, " modified\n");
975                 sbuf_finish(sb);
976                 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
977                 sbuf_delete(sb);
978         }
979         return (0);
980 }
981
982 static int
983 g_part_ctl_move(struct gctl_req *req, struct g_part_parms *gpp)
984 {
985         gctl_error(req, "%d verb 'move'", ENOSYS);
986         return (ENOSYS);
987 }
988
989 static int
990 g_part_ctl_recover(struct gctl_req *req, struct g_part_parms *gpp)
991 {
992         struct g_part_table *table;
993         struct g_geom *gp;
994         struct sbuf *sb;
995         int error, recovered;
996
997         gp = gpp->gpp_geom;
998         G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
999         g_topology_assert();
1000         table = gp->softc;
1001         error = recovered = 0;
1002
1003         if (table->gpt_corrupt) {
1004                 error = G_PART_RECOVER(table);
1005                 if (error) {
1006                         gctl_error(req, "%d recovering '%s' failed",
1007                             error, gp->name);
1008                         return (error);
1009                 }
1010                 recovered = 1;
1011         }
1012         /* Provide feedback if so requested. */
1013         if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
1014                 sb = sbuf_new_auto();
1015                 if (recovered)
1016                         sbuf_printf(sb, "%s recovered\n", gp->name);
1017                 else
1018                         sbuf_printf(sb, "%s recovering is not needed\n",
1019                             gp->name);
1020                 sbuf_finish(sb);
1021                 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
1022                 sbuf_delete(sb);
1023         }
1024         return (0);
1025 }
1026
1027 static int
1028 g_part_ctl_resize(struct gctl_req *req, struct g_part_parms *gpp)
1029 {
1030         struct g_geom *gp;
1031         struct g_provider *pp;
1032         struct g_part_entry *pe, *entry;
1033         struct g_part_table *table;
1034         struct sbuf *sb;
1035         quad_t end;
1036         int error;
1037
1038         gp = gpp->gpp_geom;
1039         G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
1040         g_topology_assert();
1041         table = gp->softc;
1042
1043         /* check gpp_index */
1044         LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
1045                 if (entry->gpe_deleted || entry->gpe_internal)
1046                         continue;
1047                 if (entry->gpe_index == gpp->gpp_index)
1048                         break;
1049         }
1050         if (entry == NULL) {
1051                 gctl_error(req, "%d index '%d'", ENOENT, gpp->gpp_index);
1052                 return (ENOENT);
1053         }
1054
1055         /* check gpp_size */
1056         end = entry->gpe_start + gpp->gpp_size - 1;
1057         if (gpp->gpp_size < 1 || end > table->gpt_last) {
1058                 gctl_error(req, "%d size '%jd'", EINVAL,
1059                     (intmax_t)gpp->gpp_size);
1060                 return (EINVAL);
1061         }
1062
1063         LIST_FOREACH(pe, &table->gpt_entry, gpe_entry) {
1064                 if (pe->gpe_deleted || pe->gpe_internal || pe == entry)
1065                         continue;
1066                 if (end >= pe->gpe_start && end <= pe->gpe_end) {
1067                         gctl_error(req, "%d end '%jd'", ENOSPC,
1068                             (intmax_t)end);
1069                         return (ENOSPC);
1070                 }
1071                 if (entry->gpe_start < pe->gpe_start && end > pe->gpe_end) {
1072                         gctl_error(req, "%d size '%jd'", ENOSPC,
1073                             (intmax_t)gpp->gpp_size);
1074                         return (ENOSPC);
1075                 }
1076         }
1077
1078         pp = entry->gpe_pp;
1079         if ((g_debugflags & 16) == 0 &&
1080             (pp->acr > 0 || pp->acw > 0 || pp->ace > 0)) {
1081                 gctl_error(req, "%d", EBUSY);
1082                 return (EBUSY);
1083         }
1084
1085         error = G_PART_RESIZE(table, entry, gpp);
1086         if (error) {
1087                 gctl_error(req, "%d", error);
1088                 return (error);
1089         }
1090
1091         if (!entry->gpe_created)
1092                 entry->gpe_modified = 1;
1093
1094         /* update mediasize of changed provider */
1095         pp->mediasize = (entry->gpe_end - entry->gpe_start + 1) *
1096                 pp->sectorsize;
1097
1098         /* Provide feedback if so requested. */
1099         if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
1100                 sb = sbuf_new_auto();
1101                 G_PART_FULLNAME(table, entry, sb, gp->name);
1102                 sbuf_cat(sb, " resized\n");
1103                 sbuf_finish(sb);
1104                 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
1105                 sbuf_delete(sb);
1106         }
1107         return (0);
1108 }
1109
1110 static int
1111 g_part_ctl_setunset(struct gctl_req *req, struct g_part_parms *gpp,
1112     unsigned int set)
1113 {
1114         struct g_geom *gp;
1115         struct g_part_entry *entry;
1116         struct g_part_table *table;
1117         struct sbuf *sb;
1118         int error;
1119
1120         gp = gpp->gpp_geom;
1121         G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
1122         g_topology_assert();
1123
1124         table = gp->softc;
1125
1126         LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
1127                 if (entry->gpe_deleted || entry->gpe_internal)
1128                         continue;
1129                 if (entry->gpe_index == gpp->gpp_index)
1130                         break;
1131         }
1132         if (entry == NULL) {
1133                 gctl_error(req, "%d index '%d'", ENOENT, gpp->gpp_index);
1134                 return (ENOENT);
1135         }
1136
1137         error = G_PART_SETUNSET(table, entry, gpp->gpp_attrib, set);
1138         if (error) {
1139                 gctl_error(req, "%d attrib '%s'", error, gpp->gpp_attrib);
1140                 return (error);
1141         }
1142
1143         /* Provide feedback if so requested. */
1144         if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
1145                 sb = sbuf_new_auto();
1146                 sbuf_printf(sb, "%s %sset on ", gpp->gpp_attrib,
1147                     (set) ? "" : "un");
1148                 G_PART_FULLNAME(table, entry, sb, gp->name);
1149                 sbuf_printf(sb, "\n");
1150                 sbuf_finish(sb);
1151                 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
1152                 sbuf_delete(sb);
1153         }
1154         return (0);
1155 }
1156
1157 static int
1158 g_part_ctl_undo(struct gctl_req *req, struct g_part_parms *gpp)
1159 {
1160         struct g_consumer *cp;
1161         struct g_provider *pp;
1162         struct g_geom *gp;
1163         struct g_part_entry *entry, *tmp;
1164         struct g_part_table *table;
1165         int error, reprobe;
1166
1167         gp = gpp->gpp_geom;
1168         G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
1169         g_topology_assert();
1170
1171         table = gp->softc;
1172         if (!table->gpt_opened) {
1173                 gctl_error(req, "%d", EPERM);
1174                 return (EPERM);
1175         }
1176
1177         cp = LIST_FIRST(&gp->consumer);
1178         LIST_FOREACH_SAFE(entry, &table->gpt_entry, gpe_entry, tmp) {
1179                 entry->gpe_modified = 0;
1180                 if (entry->gpe_created) {
1181                         pp = entry->gpe_pp;
1182                         if (pp != NULL) {
1183                                 pp->private = NULL;
1184                                 entry->gpe_pp = NULL;
1185                                 g_wither_provider(pp, ENXIO);
1186                         }
1187                         entry->gpe_deleted = 1;
1188                 }
1189                 if (entry->gpe_deleted) {
1190                         LIST_REMOVE(entry, gpe_entry);
1191                         g_free(entry);
1192                 }
1193         }
1194
1195         g_topology_unlock();
1196
1197         reprobe = (table->gpt_scheme == &g_part_null_scheme ||
1198             table->gpt_created) ? 1 : 0;
1199
1200         if (reprobe) {
1201                 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
1202                         if (entry->gpe_internal)
1203                                 continue;
1204                         error = EBUSY;
1205                         goto fail;
1206                 }
1207                 while ((entry = LIST_FIRST(&table->gpt_entry)) != NULL) {
1208                         LIST_REMOVE(entry, gpe_entry);
1209                         g_free(entry);
1210                 }
1211                 error = g_part_probe(gp, cp, table->gpt_depth);
1212                 if (error) {
1213                         g_topology_lock();
1214                         g_access(cp, -1, -1, -1);
1215                         g_part_wither(gp, error);
1216                         return (0);
1217                 }
1218                 table = gp->softc;
1219
1220                 /*
1221                  * Synthesize a disk geometry. Some partitioning schemes
1222                  * depend on it and since some file systems need it even
1223                  * when the partitition scheme doesn't, we do it here in
1224                  * scheme-independent code.
1225                  */
1226                 pp = cp->provider;
1227                 g_part_geometry(table, cp, pp->mediasize / pp->sectorsize);
1228         }
1229
1230         error = G_PART_READ(table, cp);
1231         if (error)
1232                 goto fail;
1233
1234         g_topology_lock();
1235
1236         LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
1237                 if (!entry->gpe_internal)
1238                         g_part_new_provider(gp, table, entry);
1239         }
1240
1241         table->gpt_opened = 0;
1242         g_access(cp, -1, -1, -1);
1243         return (0);
1244
1245 fail:
1246         g_topology_lock();
1247         gctl_error(req, "%d", error);
1248         return (error);
1249 }
1250
1251 static void
1252 g_part_wither(struct g_geom *gp, int error)
1253 {
1254         struct g_part_entry *entry;
1255         struct g_part_table *table;
1256
1257         table = gp->softc;
1258         if (table != NULL) {
1259                 G_PART_DESTROY(table, NULL);
1260                 while ((entry = LIST_FIRST(&table->gpt_entry)) != NULL) {
1261                         LIST_REMOVE(entry, gpe_entry);
1262                         g_free(entry);
1263                 }
1264                 if (gp->softc != NULL) {
1265                         kobj_delete((kobj_t)gp->softc, M_GEOM);
1266                         gp->softc = NULL;
1267                 }
1268         }
1269         g_wither_geom(gp, error);
1270 }
1271
1272 /*
1273  * Class methods.
1274  */
1275
1276 static void
1277 g_part_ctlreq(struct gctl_req *req, struct g_class *mp, const char *verb)
1278 {
1279         struct g_part_parms gpp;
1280         struct g_part_table *table;
1281         struct gctl_req_arg *ap;
1282         const char *p;
1283         enum g_part_ctl ctlreq;
1284         unsigned int i, mparms, oparms, parm;
1285         int auto_commit, close_on_error;
1286         int error, len, modifies;
1287
1288         G_PART_TRACE((G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, verb));
1289         g_topology_assert();
1290
1291         ctlreq = G_PART_CTL_NONE;
1292         modifies = 1;
1293         mparms = 0;
1294         oparms = G_PART_PARM_FLAGS | G_PART_PARM_OUTPUT | G_PART_PARM_VERSION;
1295         switch (*verb) {
1296         case 'a':
1297                 if (!strcmp(verb, "add")) {
1298                         ctlreq = G_PART_CTL_ADD;
1299                         mparms |= G_PART_PARM_GEOM | G_PART_PARM_SIZE |
1300                             G_PART_PARM_START | G_PART_PARM_TYPE;
1301                         oparms |= G_PART_PARM_INDEX | G_PART_PARM_LABEL;
1302                 }
1303                 break;
1304         case 'b':
1305                 if (!strcmp(verb, "bootcode")) {
1306                         ctlreq = G_PART_CTL_BOOTCODE;
1307                         mparms |= G_PART_PARM_GEOM | G_PART_PARM_BOOTCODE;
1308                 }
1309                 break;
1310         case 'c':
1311                 if (!strcmp(verb, "commit")) {
1312                         ctlreq = G_PART_CTL_COMMIT;
1313                         mparms |= G_PART_PARM_GEOM;
1314                         modifies = 0;
1315                 } else if (!strcmp(verb, "create")) {
1316                         ctlreq = G_PART_CTL_CREATE;
1317                         mparms |= G_PART_PARM_PROVIDER | G_PART_PARM_SCHEME;
1318                         oparms |= G_PART_PARM_ENTRIES;
1319                 }
1320                 break;
1321         case 'd':
1322                 if (!strcmp(verb, "delete")) {
1323                         ctlreq = G_PART_CTL_DELETE;
1324                         mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX;
1325                 } else if (!strcmp(verb, "destroy")) {
1326                         ctlreq = G_PART_CTL_DESTROY;
1327                         mparms |= G_PART_PARM_GEOM;
1328                         oparms |= G_PART_PARM_FORCE;
1329                 }
1330                 break;
1331         case 'm':
1332                 if (!strcmp(verb, "modify")) {
1333                         ctlreq = G_PART_CTL_MODIFY;
1334                         mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX;
1335                         oparms |= G_PART_PARM_LABEL | G_PART_PARM_TYPE;
1336                 } else if (!strcmp(verb, "move")) {
1337                         ctlreq = G_PART_CTL_MOVE;
1338                         mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX;
1339                 }
1340                 break;
1341         case 'r':
1342                 if (!strcmp(verb, "recover")) {
1343                         ctlreq = G_PART_CTL_RECOVER;
1344                         mparms |= G_PART_PARM_GEOM;
1345                 } else if (!strcmp(verb, "resize")) {
1346                         ctlreq = G_PART_CTL_RESIZE;
1347                         mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX |
1348                             G_PART_PARM_SIZE;
1349                 }
1350                 break;
1351         case 's':
1352                 if (!strcmp(verb, "set")) {
1353                         ctlreq = G_PART_CTL_SET;
1354                         mparms |= G_PART_PARM_ATTRIB | G_PART_PARM_GEOM |
1355                             G_PART_PARM_INDEX;
1356                 }
1357                 break;
1358         case 'u':
1359                 if (!strcmp(verb, "undo")) {
1360                         ctlreq = G_PART_CTL_UNDO;
1361                         mparms |= G_PART_PARM_GEOM;
1362                         modifies = 0;
1363                 } else if (!strcmp(verb, "unset")) {
1364                         ctlreq = G_PART_CTL_UNSET;
1365                         mparms |= G_PART_PARM_ATTRIB | G_PART_PARM_GEOM |
1366                             G_PART_PARM_INDEX;
1367                 }
1368                 break;
1369         }
1370         if (ctlreq == G_PART_CTL_NONE) {
1371                 gctl_error(req, "%d verb '%s'", EINVAL, verb);
1372                 return;
1373         }
1374
1375         bzero(&gpp, sizeof(gpp));
1376         for (i = 0; i < req->narg; i++) {
1377                 ap = &req->arg[i];
1378                 parm = 0;
1379                 switch (ap->name[0]) {
1380                 case 'a':
1381                         if (!strcmp(ap->name, "attrib"))
1382                                 parm = G_PART_PARM_ATTRIB;
1383                         break;
1384                 case 'b':
1385                         if (!strcmp(ap->name, "bootcode"))
1386                                 parm = G_PART_PARM_BOOTCODE;
1387                         break;
1388                 case 'c':
1389                         if (!strcmp(ap->name, "class"))
1390                                 continue;
1391                         break;
1392                 case 'e':
1393                         if (!strcmp(ap->name, "entries"))
1394                                 parm = G_PART_PARM_ENTRIES;
1395                         break;
1396                 case 'f':
1397                         if (!strcmp(ap->name, "flags"))
1398                                 parm = G_PART_PARM_FLAGS;
1399                         else if (!strcmp(ap->name, "force"))
1400                                 parm = G_PART_PARM_FORCE;
1401                         break;
1402                 case 'g':
1403                         if (!strcmp(ap->name, "geom"))
1404                                 parm = G_PART_PARM_GEOM;
1405                         break;
1406                 case 'i':
1407                         if (!strcmp(ap->name, "index"))
1408                                 parm = G_PART_PARM_INDEX;
1409                         break;
1410                 case 'l':
1411                         if (!strcmp(ap->name, "label"))
1412                                 parm = G_PART_PARM_LABEL;
1413                         break;
1414                 case 'o':
1415                         if (!strcmp(ap->name, "output"))
1416                                 parm = G_PART_PARM_OUTPUT;
1417                         break;
1418                 case 'p':
1419                         if (!strcmp(ap->name, "provider"))
1420                                 parm = G_PART_PARM_PROVIDER;
1421                         break;
1422                 case 's':
1423                         if (!strcmp(ap->name, "scheme"))
1424                                 parm = G_PART_PARM_SCHEME;
1425                         else if (!strcmp(ap->name, "size"))
1426                                 parm = G_PART_PARM_SIZE;
1427                         else if (!strcmp(ap->name, "start"))
1428                                 parm = G_PART_PARM_START;
1429                         break;
1430                 case 't':
1431                         if (!strcmp(ap->name, "type"))
1432                                 parm = G_PART_PARM_TYPE;
1433                         break;
1434                 case 'v':
1435                         if (!strcmp(ap->name, "verb"))
1436                                 continue;
1437                         else if (!strcmp(ap->name, "version"))
1438                                 parm = G_PART_PARM_VERSION;
1439                         break;
1440                 }
1441                 if ((parm & (mparms | oparms)) == 0) {
1442                         gctl_error(req, "%d param '%s'", EINVAL, ap->name);
1443                         return;
1444                 }
1445                 if (parm == G_PART_PARM_BOOTCODE)
1446                         p = gctl_get_param(req, ap->name, &len);
1447                 else
1448                         p = gctl_get_asciiparam(req, ap->name);
1449                 if (p == NULL) {
1450                         gctl_error(req, "%d param '%s'", ENOATTR, ap->name);
1451                         return;
1452                 }
1453                 switch (parm) {
1454                 case G_PART_PARM_ATTRIB:
1455                         error = g_part_parm_str(p, &gpp.gpp_attrib);
1456                         break;
1457                 case G_PART_PARM_BOOTCODE:
1458                         gpp.gpp_codeptr = p;
1459                         gpp.gpp_codesize = len;
1460                         error = 0;
1461                         break;
1462                 case G_PART_PARM_ENTRIES:
1463                         error = g_part_parm_uint(p, &gpp.gpp_entries);
1464                         break;
1465                 case G_PART_PARM_FLAGS:
1466                         if (p[0] == '\0')
1467                                 continue;
1468                         error = g_part_parm_str(p, &gpp.gpp_flags);
1469                         break;
1470                 case G_PART_PARM_FORCE:
1471                         error = g_part_parm_uint(p, &gpp.gpp_force);
1472                         break;
1473                 case G_PART_PARM_GEOM:
1474                         error = g_part_parm_geom(p, &gpp.gpp_geom);
1475                         break;
1476                 case G_PART_PARM_INDEX:
1477                         error = g_part_parm_uint(p, &gpp.gpp_index);
1478                         break;
1479                 case G_PART_PARM_LABEL:
1480                         /* An empty label is always valid. */
1481                         gpp.gpp_label = p;
1482                         error = 0;
1483                         break;
1484                 case G_PART_PARM_OUTPUT:
1485                         error = 0;      /* Write-only parameter */
1486                         break;
1487                 case G_PART_PARM_PROVIDER:
1488                         error = g_part_parm_provider(p, &gpp.gpp_provider);
1489                         break;
1490                 case G_PART_PARM_SCHEME:
1491                         error = g_part_parm_scheme(p, &gpp.gpp_scheme);
1492                         break;
1493                 case G_PART_PARM_SIZE:
1494                         error = g_part_parm_quad(p, &gpp.gpp_size);
1495                         break;
1496                 case G_PART_PARM_START:
1497                         error = g_part_parm_quad(p, &gpp.gpp_start);
1498                         break;
1499                 case G_PART_PARM_TYPE:
1500                         error = g_part_parm_str(p, &gpp.gpp_type);
1501                         break;
1502                 case G_PART_PARM_VERSION:
1503                         error = g_part_parm_uint(p, &gpp.gpp_version);
1504                         break;
1505                 default:
1506                         error = EDOOFUS;
1507                         break;
1508                 }
1509                 if (error) {
1510                         gctl_error(req, "%d %s '%s'", error, ap->name, p);
1511                         return;
1512                 }
1513                 gpp.gpp_parms |= parm;
1514         }
1515         if ((gpp.gpp_parms & mparms) != mparms) {
1516                 parm = mparms - (gpp.gpp_parms & mparms);
1517                 gctl_error(req, "%d param '%x'", ENOATTR, parm);
1518                 return;
1519         }
1520
1521         /* Obtain permissions if possible/necessary. */
1522         close_on_error = 0;
1523         table = NULL;
1524         if (modifies && (gpp.gpp_parms & G_PART_PARM_GEOM)) {
1525                 table = gpp.gpp_geom->softc;
1526                 if (table != NULL && table->gpt_corrupt &&
1527                     ctlreq != G_PART_CTL_DESTROY &&
1528                     ctlreq != G_PART_CTL_RECOVER) {
1529                         gctl_error(req, "%d table '%s' is corrupt",
1530                             EPERM, gpp.gpp_geom->name);
1531                         return;
1532                 }
1533                 if (table != NULL && !table->gpt_opened) {
1534                         error = g_access(LIST_FIRST(&gpp.gpp_geom->consumer),
1535                             1, 1, 1);
1536                         if (error) {
1537                                 gctl_error(req, "%d geom '%s'", error,
1538                                     gpp.gpp_geom->name);
1539                                 return;
1540                         }
1541                         table->gpt_opened = 1;
1542                         close_on_error = 1;
1543                 }
1544         }
1545
1546         /* Allow the scheme to check or modify the parameters. */
1547         if (table != NULL) {
1548                 error = G_PART_PRECHECK(table, ctlreq, &gpp);
1549                 if (error) {
1550                         gctl_error(req, "%d pre-check failed", error);
1551                         goto out;
1552                 }
1553         } else
1554                 error = EDOOFUS;        /* Prevent bogus uninit. warning. */
1555
1556         switch (ctlreq) {
1557         case G_PART_CTL_NONE:
1558                 panic("%s", __func__);
1559         case G_PART_CTL_ADD:
1560                 error = g_part_ctl_add(req, &gpp);
1561                 break;
1562         case G_PART_CTL_BOOTCODE:
1563                 error = g_part_ctl_bootcode(req, &gpp);
1564                 break;
1565         case G_PART_CTL_COMMIT:
1566                 error = g_part_ctl_commit(req, &gpp);
1567                 break;
1568         case G_PART_CTL_CREATE:
1569                 error = g_part_ctl_create(req, &gpp);
1570                 break;
1571         case G_PART_CTL_DELETE:
1572                 error = g_part_ctl_delete(req, &gpp);
1573                 break;
1574         case G_PART_CTL_DESTROY:
1575                 error = g_part_ctl_destroy(req, &gpp);
1576                 break;
1577         case G_PART_CTL_MODIFY:
1578                 error = g_part_ctl_modify(req, &gpp);
1579                 break;
1580         case G_PART_CTL_MOVE:
1581                 error = g_part_ctl_move(req, &gpp);
1582                 break;
1583         case G_PART_CTL_RECOVER:
1584                 error = g_part_ctl_recover(req, &gpp);
1585                 break;
1586         case G_PART_CTL_RESIZE:
1587                 error = g_part_ctl_resize(req, &gpp);
1588                 break;
1589         case G_PART_CTL_SET:
1590                 error = g_part_ctl_setunset(req, &gpp, 1);
1591                 break;
1592         case G_PART_CTL_UNDO:
1593                 error = g_part_ctl_undo(req, &gpp);
1594                 break;
1595         case G_PART_CTL_UNSET:
1596                 error = g_part_ctl_setunset(req, &gpp, 0);
1597                 break;
1598         }
1599
1600         /* Implement automatic commit. */
1601         if (!error) {
1602                 auto_commit = (modifies &&
1603                     (gpp.gpp_parms & G_PART_PARM_FLAGS) &&
1604                     strchr(gpp.gpp_flags, 'C') != NULL) ? 1 : 0;
1605                 if (auto_commit) {
1606                         KASSERT(gpp.gpp_parms & G_PART_PARM_GEOM, (__func__));
1607                         error = g_part_ctl_commit(req, &gpp);
1608                 }
1609         }
1610
1611  out:
1612         if (error && close_on_error) {
1613                 g_access(LIST_FIRST(&gpp.gpp_geom->consumer), -1, -1, -1);
1614                 table->gpt_opened = 0;
1615         }
1616 }
1617
1618 static int
1619 g_part_destroy_geom(struct gctl_req *req, struct g_class *mp,
1620     struct g_geom *gp)
1621 {
1622
1623         G_PART_TRACE((G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, gp->name));
1624         g_topology_assert();
1625
1626         g_part_wither(gp, EINVAL);
1627         return (0);
1628 }
1629
1630 static struct g_geom *
1631 g_part_taste(struct g_class *mp, struct g_provider *pp, int flags __unused)
1632 {
1633         struct g_consumer *cp;
1634         struct g_geom *gp;
1635         struct g_part_entry *entry;
1636         struct g_part_table *table;
1637         struct root_hold_token *rht;
1638         int attr, depth;
1639         int error;
1640
1641         G_PART_TRACE((G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, pp->name));
1642         g_topology_assert();
1643
1644         /* Skip providers that are already open for writing. */
1645         if (pp->acw > 0)
1646                 return (NULL);
1647
1648         /*
1649          * Create a GEOM with consumer and hook it up to the provider.
1650          * With that we become part of the topology. Optain read access
1651          * to the provider.
1652          */
1653         gp = g_new_geomf(mp, "%s", pp->name);
1654         cp = g_new_consumer(gp);
1655         error = g_attach(cp, pp);
1656         if (error == 0)
1657                 error = g_access(cp, 1, 0, 0);
1658         if (error != 0) {
1659                 g_part_wither(gp, error);
1660                 return (NULL);
1661         }
1662
1663         rht = root_mount_hold(mp->name);
1664         g_topology_unlock();
1665
1666         /*
1667          * Short-circuit the whole probing galore when there's no
1668          * media present.
1669          */
1670         if (pp->mediasize == 0 || pp->sectorsize == 0) {
1671                 error = ENODEV;
1672                 goto fail;
1673         }
1674
1675         /* Make sure we can nest and if so, determine our depth. */
1676         error = g_getattr("PART::isleaf", cp, &attr);
1677         if (!error && attr) {
1678                 error = ENODEV;
1679                 goto fail;
1680         }
1681         error = g_getattr("PART::depth", cp, &attr);
1682         depth = (!error) ? attr + 1 : 0;
1683
1684         error = g_part_probe(gp, cp, depth);
1685         if (error)
1686                 goto fail;
1687
1688         table = gp->softc;
1689
1690         /*
1691          * Synthesize a disk geometry. Some partitioning schemes
1692          * depend on it and since some file systems need it even
1693          * when the partitition scheme doesn't, we do it here in
1694          * scheme-independent code.
1695          */
1696         g_part_geometry(table, cp, pp->mediasize / pp->sectorsize);
1697
1698         error = G_PART_READ(table, cp);
1699         if (error)
1700                 goto fail;
1701
1702         g_topology_lock();
1703         LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
1704                 if (!entry->gpe_internal)
1705                         g_part_new_provider(gp, table, entry);
1706         }
1707
1708         root_mount_rel(rht);
1709         g_access(cp, -1, 0, 0);
1710         return (gp);
1711
1712  fail:
1713         g_topology_lock();
1714         root_mount_rel(rht);
1715         g_access(cp, -1, 0, 0);
1716         g_part_wither(gp, error);
1717         return (NULL);
1718 }
1719
1720 /*
1721  * Geom methods.
1722  */
1723
1724 static int
1725 g_part_access(struct g_provider *pp, int dr, int dw, int de)
1726 {
1727         struct g_consumer *cp;
1728
1729         G_PART_TRACE((G_T_ACCESS, "%s(%s,%d,%d,%d)", __func__, pp->name, dr,
1730             dw, de));
1731
1732         cp = LIST_FIRST(&pp->geom->consumer);
1733
1734         /* We always gain write-exclusive access. */
1735         return (g_access(cp, dr, dw, dw + de));
1736 }
1737
1738 static void
1739 g_part_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp,
1740     struct g_consumer *cp, struct g_provider *pp)
1741 {
1742         char buf[64];
1743         struct g_part_entry *entry;
1744         struct g_part_table *table;
1745
1746         KASSERT(sb != NULL && gp != NULL, (__func__));
1747         table = gp->softc;
1748
1749         if (indent == NULL) {
1750                 KASSERT(cp == NULL && pp != NULL, (__func__));
1751                 entry = pp->private;
1752                 if (entry == NULL)
1753                         return;
1754                 sbuf_printf(sb, " i %u o %ju ty %s", entry->gpe_index,
1755                     (uintmax_t)entry->gpe_offset,
1756                     G_PART_TYPE(table, entry, buf, sizeof(buf)));
1757                 /*
1758                  * libdisk compatibility quirk - the scheme dumps the
1759                  * slicer name and partition type in a way that is
1760                  * compatible with libdisk. When libdisk is not used
1761                  * anymore, this should go away.
1762                  */
1763                 G_PART_DUMPCONF(table, entry, sb, indent);
1764         } else if (cp != NULL) {        /* Consumer configuration. */
1765                 KASSERT(pp == NULL, (__func__));
1766                 /* none */
1767         } else if (pp != NULL) {        /* Provider configuration. */
1768                 entry = pp->private;
1769                 if (entry == NULL)
1770                         return;
1771                 sbuf_printf(sb, "%s<start>%ju</start>\n", indent,
1772                     (uintmax_t)entry->gpe_start);
1773                 sbuf_printf(sb, "%s<end>%ju</end>\n", indent,
1774                     (uintmax_t)entry->gpe_end);
1775                 sbuf_printf(sb, "%s<index>%u</index>\n", indent,
1776                     entry->gpe_index);
1777                 sbuf_printf(sb, "%s<type>%s</type>\n", indent,
1778                     G_PART_TYPE(table, entry, buf, sizeof(buf)));
1779                 sbuf_printf(sb, "%s<offset>%ju</offset>\n", indent,
1780                     (uintmax_t)entry->gpe_offset);
1781                 sbuf_printf(sb, "%s<length>%ju</length>\n", indent,
1782                     (uintmax_t)pp->mediasize);
1783                 G_PART_DUMPCONF(table, entry, sb, indent);
1784         } else {                        /* Geom configuration. */
1785                 sbuf_printf(sb, "%s<scheme>%s</scheme>\n", indent,
1786                     table->gpt_scheme->name);
1787                 sbuf_printf(sb, "%s<entries>%u</entries>\n", indent,
1788                     table->gpt_entries);
1789                 sbuf_printf(sb, "%s<first>%ju</first>\n", indent,
1790                     (uintmax_t)table->gpt_first);
1791                 sbuf_printf(sb, "%s<last>%ju</last>\n", indent,
1792                     (uintmax_t)table->gpt_last);
1793                 sbuf_printf(sb, "%s<fwsectors>%u</fwsectors>\n", indent,
1794                     table->gpt_sectors);
1795                 sbuf_printf(sb, "%s<fwheads>%u</fwheads>\n", indent,
1796                     table->gpt_heads);
1797                 sbuf_printf(sb, "%s<state>%s</state>\n", indent,
1798                     table->gpt_corrupt ? "CORRUPT": "OK");
1799                 G_PART_DUMPCONF(table, NULL, sb, indent);
1800         }
1801 }
1802
1803 static void
1804 g_part_orphan(struct g_consumer *cp)
1805 {
1806         struct g_provider *pp;
1807         struct g_part_table *table;
1808
1809         pp = cp->provider;
1810         KASSERT(pp != NULL, (__func__));
1811         G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, pp->name));
1812         g_topology_assert();
1813
1814         KASSERT(pp->error != 0, (__func__));
1815         table = cp->geom->softc;
1816         if (table != NULL && table->gpt_opened)
1817                 g_access(cp, -1, -1, -1);
1818         g_part_wither(cp->geom, pp->error);
1819 }
1820
1821 static void
1822 g_part_spoiled(struct g_consumer *cp)
1823 {
1824
1825         G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, cp->provider->name));
1826         g_topology_assert();
1827
1828         g_part_wither(cp->geom, ENXIO);
1829 }
1830
1831 static void
1832 g_part_start(struct bio *bp)
1833 {
1834         struct bio *bp2;
1835         struct g_consumer *cp;
1836         struct g_geom *gp;
1837         struct g_part_entry *entry;
1838         struct g_part_table *table;
1839         struct g_kerneldump *gkd;
1840         struct g_provider *pp;
1841
1842         pp = bp->bio_to;
1843         gp = pp->geom;
1844         table = gp->softc;
1845         cp = LIST_FIRST(&gp->consumer);
1846
1847         G_PART_TRACE((G_T_BIO, "%s: cmd=%d, provider=%s", __func__, bp->bio_cmd,
1848             pp->name));
1849
1850         entry = pp->private;
1851         if (entry == NULL) {
1852                 g_io_deliver(bp, ENXIO);
1853                 return;
1854         }
1855
1856         switch(bp->bio_cmd) {
1857         case BIO_DELETE:
1858         case BIO_READ:
1859         case BIO_WRITE:
1860                 if (bp->bio_offset >= pp->mediasize) {
1861                         g_io_deliver(bp, EIO);
1862                         return;
1863                 }
1864                 bp2 = g_clone_bio(bp);
1865                 if (bp2 == NULL) {
1866                         g_io_deliver(bp, ENOMEM);
1867                         return;
1868                 }
1869                 if (bp2->bio_offset + bp2->bio_length > pp->mediasize)
1870                         bp2->bio_length = pp->mediasize - bp2->bio_offset;
1871                 bp2->bio_done = g_std_done;
1872                 bp2->bio_offset += entry->gpe_offset;
1873                 g_io_request(bp2, cp);
1874                 return;
1875         case BIO_FLUSH:
1876                 break;
1877         case BIO_GETATTR:
1878                 if (g_handleattr_int(bp, "GEOM::fwheads", table->gpt_heads))
1879                         return;
1880                 if (g_handleattr_int(bp, "GEOM::fwsectors", table->gpt_sectors))
1881                         return;
1882                 if (g_handleattr_int(bp, "PART::isleaf", table->gpt_isleaf))
1883                         return;
1884                 if (g_handleattr_int(bp, "PART::depth", table->gpt_depth))
1885                         return;
1886                 if (g_handleattr_str(bp, "PART::scheme",
1887                     table->gpt_scheme->name))
1888                         return;
1889                 if (!strcmp("GEOM::kerneldump", bp->bio_attribute)) {
1890                         /*
1891                          * Check that the partition is suitable for kernel
1892                          * dumps. Typically only swap partitions should be
1893                          * used.
1894                          */
1895                         if (!G_PART_DUMPTO(table, entry)) {
1896                                 g_io_deliver(bp, ENODEV);
1897                                 printf("GEOM_PART: Partition '%s' not suitable"
1898                                     " for kernel dumps (wrong type?)\n",
1899                                     pp->name);
1900                                 return;
1901                         }
1902                         gkd = (struct g_kerneldump *)bp->bio_data;
1903                         if (gkd->offset >= pp->mediasize) {
1904                                 g_io_deliver(bp, EIO);
1905                                 return;
1906                         }
1907                         if (gkd->offset + gkd->length > pp->mediasize)
1908                                 gkd->length = pp->mediasize - gkd->offset;
1909                         gkd->offset += entry->gpe_offset;
1910                 }
1911                 break;
1912         default:
1913                 g_io_deliver(bp, EOPNOTSUPP);
1914                 return;
1915         }
1916
1917         bp2 = g_clone_bio(bp);
1918         if (bp2 == NULL) {
1919                 g_io_deliver(bp, ENOMEM);
1920                 return;
1921         }
1922         bp2->bio_done = g_std_done;
1923         g_io_request(bp2, cp);
1924 }
1925
1926 static void
1927 g_part_init(struct g_class *mp)
1928 {
1929
1930         TAILQ_INSERT_HEAD(&g_part_schemes, &g_part_null_scheme, scheme_list);
1931 }
1932
1933 static void
1934 g_part_fini(struct g_class *mp)
1935 {
1936
1937         TAILQ_REMOVE(&g_part_schemes, &g_part_null_scheme, scheme_list);
1938 }
1939
1940 static void
1941 g_part_unload_event(void *arg, int flag)
1942 {
1943         struct g_consumer *cp;
1944         struct g_geom *gp;
1945         struct g_provider *pp;
1946         struct g_part_scheme *scheme;
1947         struct g_part_table *table;
1948         uintptr_t *xchg;
1949         int acc, error;
1950
1951         if (flag == EV_CANCEL)
1952                 return;
1953
1954         xchg = arg;
1955         error = 0;
1956         scheme = (void *)(*xchg);
1957
1958         g_topology_assert();
1959
1960         LIST_FOREACH(gp, &g_part_class.geom, geom) {
1961                 table = gp->softc;
1962                 if (table->gpt_scheme != scheme)
1963                         continue;
1964
1965                 acc = 0;
1966                 LIST_FOREACH(pp, &gp->provider, provider)
1967                         acc += pp->acr + pp->acw + pp->ace;
1968                 LIST_FOREACH(cp, &gp->consumer, consumer)
1969                         acc += cp->acr + cp->acw + cp->ace;
1970
1971                 if (!acc)
1972                         g_part_wither(gp, ENOSYS);
1973                 else
1974                         error = EBUSY;
1975         }
1976
1977         if (!error)
1978                 TAILQ_REMOVE(&g_part_schemes, scheme, scheme_list);
1979
1980         *xchg = error;
1981 }
1982
1983 int
1984 g_part_modevent(module_t mod, int type, struct g_part_scheme *scheme)
1985 {
1986         uintptr_t arg;
1987         int error;
1988
1989         switch (type) {
1990         case MOD_LOAD:
1991                 TAILQ_INSERT_TAIL(&g_part_schemes, scheme, scheme_list);
1992
1993                 error = g_retaste(&g_part_class);
1994                 if (error)
1995                         TAILQ_REMOVE(&g_part_schemes, scheme, scheme_list);
1996                 break;
1997         case MOD_UNLOAD:
1998                 arg = (uintptr_t)scheme;
1999                 error = g_waitfor_event(g_part_unload_event, &arg, M_WAITOK,
2000                     NULL);
2001                 if (!error)
2002                         error = (arg == (uintptr_t)scheme) ? EDOOFUS : arg;
2003                 break;
2004         default:
2005                 error = EOPNOTSUPP;
2006                 break;
2007         }
2008
2009         return (error);
2010 }