]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/geom/part/g_part.c
When IPv6 packet is handled by O_REJECT opcode, convert ICMP code
[FreeBSD/FreeBSD.git] / sys / geom / part / g_part.c
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2002, 2005-2009 Marcel Moolenaar
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27  */
28
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31
32 #include <sys/param.h>
33 #include <sys/bio.h>
34 #include <sys/endian.h>
35 #include <sys/kernel.h>
36 #include <sys/kobj.h>
37 #include <sys/limits.h>
38 #include <sys/lock.h>
39 #include <sys/malloc.h>
40 #include <sys/mutex.h>
41 #include <sys/queue.h>
42 #include <sys/sbuf.h>
43 #include <sys/sysctl.h>
44 #include <sys/systm.h>
45 #include <sys/uuid.h>
46 #include <geom/geom.h>
47 #include <geom/geom_ctl.h>
48 #include <geom/geom_int.h>
49 #include <geom/part/g_part.h>
50
51 #include "g_part_if.h"
52
53 #ifndef _PATH_DEV
54 #define _PATH_DEV "/dev/"
55 #endif
56
57 static kobj_method_t g_part_null_methods[] = {
58         { 0, 0 }
59 };
60
61 static struct g_part_scheme g_part_null_scheme = {
62         "(none)",
63         g_part_null_methods,
64         sizeof(struct g_part_table),
65 };
66
67 TAILQ_HEAD(, g_part_scheme) g_part_schemes =
68     TAILQ_HEAD_INITIALIZER(g_part_schemes);
69
70 struct g_part_alias_list {
71         const char *lexeme;
72         enum g_part_alias alias;
73 } g_part_alias_list[G_PART_ALIAS_COUNT] = {
74         { "apple-boot", G_PART_ALIAS_APPLE_BOOT },
75         { "apple-core-storage", G_PART_ALIAS_APPLE_CORE_STORAGE },
76         { "apple-hfs", G_PART_ALIAS_APPLE_HFS },
77         { "apple-label", G_PART_ALIAS_APPLE_LABEL },
78         { "apple-raid", G_PART_ALIAS_APPLE_RAID },
79         { "apple-raid-offline", G_PART_ALIAS_APPLE_RAID_OFFLINE },
80         { "apple-tv-recovery", G_PART_ALIAS_APPLE_TV_RECOVERY },
81         { "apple-ufs", G_PART_ALIAS_APPLE_UFS },
82         { "bios-boot", G_PART_ALIAS_BIOS_BOOT },
83         { "chromeos-firmware", G_PART_ALIAS_CHROMEOS_FIRMWARE },
84         { "chromeos-kernel", G_PART_ALIAS_CHROMEOS_KERNEL },
85         { "chromeos-reserved", G_PART_ALIAS_CHROMEOS_RESERVED },
86         { "chromeos-root", G_PART_ALIAS_CHROMEOS_ROOT },
87         { "dragonfly-ccd", G_PART_ALIAS_DFBSD_CCD },
88         { "dragonfly-hammer", G_PART_ALIAS_DFBSD_HAMMER },
89         { "dragonfly-hammer2", G_PART_ALIAS_DFBSD_HAMMER2 },
90         { "dragonfly-label32", G_PART_ALIAS_DFBSD },
91         { "dragonfly-label64", G_PART_ALIAS_DFBSD64 },
92         { "dragonfly-legacy", G_PART_ALIAS_DFBSD_LEGACY },
93         { "dragonfly-swap", G_PART_ALIAS_DFBSD_SWAP },
94         { "dragonfly-ufs", G_PART_ALIAS_DFBSD_UFS },
95         { "dragonfly-vinum", G_PART_ALIAS_DFBSD_VINUM },
96         { "ebr", G_PART_ALIAS_EBR },
97         { "efi", G_PART_ALIAS_EFI },
98         { "fat16", G_PART_ALIAS_MS_FAT16 },
99         { "fat32", G_PART_ALIAS_MS_FAT32 },
100         { "freebsd", G_PART_ALIAS_FREEBSD },
101         { "freebsd-boot", G_PART_ALIAS_FREEBSD_BOOT },
102         { "freebsd-nandfs", G_PART_ALIAS_FREEBSD_NANDFS },
103         { "freebsd-swap", G_PART_ALIAS_FREEBSD_SWAP },
104         { "freebsd-ufs", G_PART_ALIAS_FREEBSD_UFS },
105         { "freebsd-vinum", G_PART_ALIAS_FREEBSD_VINUM },
106         { "freebsd-zfs", G_PART_ALIAS_FREEBSD_ZFS },
107         { "linux-data", G_PART_ALIAS_LINUX_DATA },
108         { "linux-lvm", G_PART_ALIAS_LINUX_LVM },
109         { "linux-raid", G_PART_ALIAS_LINUX_RAID },
110         { "linux-swap", G_PART_ALIAS_LINUX_SWAP },
111         { "mbr", G_PART_ALIAS_MBR },
112         { "ms-basic-data", G_PART_ALIAS_MS_BASIC_DATA },
113         { "ms-ldm-data", G_PART_ALIAS_MS_LDM_DATA },
114         { "ms-ldm-metadata", G_PART_ALIAS_MS_LDM_METADATA },
115         { "ms-recovery", G_PART_ALIAS_MS_RECOVERY },
116         { "ms-reserved", G_PART_ALIAS_MS_RESERVED },
117         { "ms-spaces", G_PART_ALIAS_MS_SPACES },
118         { "netbsd-ccd", G_PART_ALIAS_NETBSD_CCD },
119         { "netbsd-cgd", G_PART_ALIAS_NETBSD_CGD },
120         { "netbsd-ffs", G_PART_ALIAS_NETBSD_FFS },
121         { "netbsd-lfs", G_PART_ALIAS_NETBSD_LFS },
122         { "netbsd-raid", G_PART_ALIAS_NETBSD_RAID },
123         { "netbsd-swap", G_PART_ALIAS_NETBSD_SWAP },
124         { "ntfs", G_PART_ALIAS_MS_NTFS },
125         { "openbsd-data", G_PART_ALIAS_OPENBSD_DATA },
126         { "prep-boot", G_PART_ALIAS_PREP_BOOT },
127         { "vmware-reserved", G_PART_ALIAS_VMRESERVED },
128         { "vmware-vmfs", G_PART_ALIAS_VMFS },
129         { "vmware-vmkdiag", G_PART_ALIAS_VMKDIAG },
130         { "vmware-vsanhdr", G_PART_ALIAS_VMVSANHDR },
131 };
132
133 SYSCTL_DECL(_kern_geom);
134 SYSCTL_NODE(_kern_geom, OID_AUTO, part, CTLFLAG_RW, 0,
135     "GEOM_PART stuff");
136 static u_int check_integrity = 1;
137 SYSCTL_UINT(_kern_geom_part, OID_AUTO, check_integrity,
138     CTLFLAG_RWTUN, &check_integrity, 1,
139     "Enable integrity checking");
140 static u_int auto_resize = 1;
141 SYSCTL_UINT(_kern_geom_part, OID_AUTO, auto_resize,
142     CTLFLAG_RWTUN, &auto_resize, 1,
143     "Enable auto resize");
144
145 /*
146  * The GEOM partitioning class.
147  */
148 static g_ctl_req_t g_part_ctlreq;
149 static g_ctl_destroy_geom_t g_part_destroy_geom;
150 static g_fini_t g_part_fini;
151 static g_init_t g_part_init;
152 static g_taste_t g_part_taste;
153
154 static g_access_t g_part_access;
155 static g_dumpconf_t g_part_dumpconf;
156 static g_orphan_t g_part_orphan;
157 static g_spoiled_t g_part_spoiled;
158 static g_start_t g_part_start;
159 static g_resize_t g_part_resize;
160 static g_ioctl_t g_part_ioctl;
161
162 static struct g_class g_part_class = {
163         .name = "PART",
164         .version = G_VERSION,
165         /* Class methods. */
166         .ctlreq = g_part_ctlreq,
167         .destroy_geom = g_part_destroy_geom,
168         .fini = g_part_fini,
169         .init = g_part_init,
170         .taste = g_part_taste,
171         /* Geom methods. */
172         .access = g_part_access,
173         .dumpconf = g_part_dumpconf,
174         .orphan = g_part_orphan,
175         .spoiled = g_part_spoiled,
176         .start = g_part_start,
177         .resize = g_part_resize,
178         .ioctl = g_part_ioctl,
179 };
180
181 DECLARE_GEOM_CLASS(g_part_class, g_part);
182 MODULE_VERSION(g_part, 0);
183
184 /*
185  * Support functions.
186  */
187
188 static void g_part_wither(struct g_geom *, int);
189
190 const char *
191 g_part_alias_name(enum g_part_alias alias)
192 {
193         int i;
194
195         for (i = 0; i < G_PART_ALIAS_COUNT; i++) {
196                 if (g_part_alias_list[i].alias != alias)
197                         continue;
198                 return (g_part_alias_list[i].lexeme);
199         }
200
201         return (NULL);
202 }
203
204 void
205 g_part_geometry_heads(off_t blocks, u_int sectors, off_t *bestchs,
206     u_int *bestheads)
207 {
208         static u_int candidate_heads[] = { 1, 2, 16, 32, 64, 128, 255, 0 };
209         off_t chs, cylinders;
210         u_int heads;
211         int idx;
212
213         *bestchs = 0;
214         *bestheads = 0;
215         for (idx = 0; candidate_heads[idx] != 0; idx++) {
216                 heads = candidate_heads[idx];
217                 cylinders = blocks / heads / sectors;
218                 if (cylinders < heads || cylinders < sectors)
219                         break;
220                 if (cylinders > 1023)
221                         continue;
222                 chs = cylinders * heads * sectors;
223                 if (chs > *bestchs || (chs == *bestchs && *bestheads == 1)) {
224                         *bestchs = chs;
225                         *bestheads = heads;
226                 }
227         }
228 }
229
230 static void
231 g_part_geometry(struct g_part_table *table, struct g_consumer *cp,
232     off_t blocks)
233 {
234         static u_int candidate_sectors[] = { 1, 9, 17, 33, 63, 0 };
235         off_t chs, bestchs;
236         u_int heads, sectors;
237         int idx;
238
239         if (g_getattr("GEOM::fwsectors", cp, &sectors) != 0 || sectors == 0 ||
240             g_getattr("GEOM::fwheads", cp, &heads) != 0 || heads == 0) {
241                 table->gpt_fixgeom = 0;
242                 table->gpt_heads = 0;
243                 table->gpt_sectors = 0;
244                 bestchs = 0;
245                 for (idx = 0; candidate_sectors[idx] != 0; idx++) {
246                         sectors = candidate_sectors[idx];
247                         g_part_geometry_heads(blocks, sectors, &chs, &heads);
248                         if (chs == 0)
249                                 continue;
250                         /*
251                          * Prefer a geometry with sectors > 1, but only if
252                          * it doesn't bump down the number of heads to 1.
253                          */
254                         if (chs > bestchs || (chs == bestchs && heads > 1 &&
255                             table->gpt_sectors == 1)) {
256                                 bestchs = chs;
257                                 table->gpt_heads = heads;
258                                 table->gpt_sectors = sectors;
259                         }
260                 }
261                 /*
262                  * If we didn't find a geometry at all, then the disk is
263                  * too big. This means we can use the maximum number of
264                  * heads and sectors.
265                  */
266                 if (bestchs == 0) {
267                         table->gpt_heads = 255;
268                         table->gpt_sectors = 63;
269                 }
270         } else {
271                 table->gpt_fixgeom = 1;
272                 table->gpt_heads = heads;
273                 table->gpt_sectors = sectors;
274         }
275 }
276
277 #define DPRINTF(...)    if (bootverbose) {      \
278         printf("GEOM_PART: " __VA_ARGS__);      \
279 }
280
281 static int
282 g_part_check_integrity(struct g_part_table *table, struct g_consumer *cp)
283 {
284         struct g_part_entry *e1, *e2;
285         struct g_provider *pp;
286         off_t offset;
287         int failed;
288
289         failed = 0;
290         pp = cp->provider;
291         if (table->gpt_last < table->gpt_first) {
292                 DPRINTF("last LBA is below first LBA: %jd < %jd\n",
293                     (intmax_t)table->gpt_last, (intmax_t)table->gpt_first);
294                 failed++;
295         }
296         if (table->gpt_last > pp->mediasize / pp->sectorsize - 1) {
297                 DPRINTF("last LBA extends beyond mediasize: "
298                     "%jd > %jd\n", (intmax_t)table->gpt_last,
299                     (intmax_t)pp->mediasize / pp->sectorsize - 1);
300                 failed++;
301         }
302         LIST_FOREACH(e1, &table->gpt_entry, gpe_entry) {
303                 if (e1->gpe_deleted || e1->gpe_internal)
304                         continue;
305                 if (e1->gpe_start < table->gpt_first) {
306                         DPRINTF("partition %d has start offset below first "
307                             "LBA: %jd < %jd\n", e1->gpe_index,
308                             (intmax_t)e1->gpe_start,
309                             (intmax_t)table->gpt_first);
310                         failed++;
311                 }
312                 if (e1->gpe_start > table->gpt_last) {
313                         DPRINTF("partition %d has start offset beyond last "
314                             "LBA: %jd > %jd\n", e1->gpe_index,
315                             (intmax_t)e1->gpe_start,
316                             (intmax_t)table->gpt_last);
317                         failed++;
318                 }
319                 if (e1->gpe_end < e1->gpe_start) {
320                         DPRINTF("partition %d has end offset below start "
321                             "offset: %jd < %jd\n", e1->gpe_index,
322                             (intmax_t)e1->gpe_end,
323                             (intmax_t)e1->gpe_start);
324                         failed++;
325                 }
326                 if (e1->gpe_end > table->gpt_last) {
327                         DPRINTF("partition %d has end offset beyond last "
328                             "LBA: %jd > %jd\n", e1->gpe_index,
329                             (intmax_t)e1->gpe_end,
330                             (intmax_t)table->gpt_last);
331                         failed++;
332                 }
333                 if (pp->stripesize > 0) {
334                         offset = e1->gpe_start * pp->sectorsize;
335                         if (e1->gpe_offset > offset)
336                                 offset = e1->gpe_offset;
337                         if ((offset + pp->stripeoffset) % pp->stripesize) {
338                                 DPRINTF("partition %d on (%s, %s) is not "
339                                     "aligned on %u bytes\n", e1->gpe_index,
340                                     pp->name, table->gpt_scheme->name,
341                                     pp->stripesize);
342                                 /* Don't treat this as a critical failure */
343                         }
344                 }
345                 e2 = e1;
346                 while ((e2 = LIST_NEXT(e2, gpe_entry)) != NULL) {
347                         if (e2->gpe_deleted || e2->gpe_internal)
348                                 continue;
349                         if (e1->gpe_start >= e2->gpe_start &&
350                             e1->gpe_start <= e2->gpe_end) {
351                                 DPRINTF("partition %d has start offset inside "
352                                     "partition %d: start[%d] %jd >= start[%d] "
353                                     "%jd <= end[%d] %jd\n",
354                                     e1->gpe_index, e2->gpe_index,
355                                     e2->gpe_index, (intmax_t)e2->gpe_start,
356                                     e1->gpe_index, (intmax_t)e1->gpe_start,
357                                     e2->gpe_index, (intmax_t)e2->gpe_end);
358                                 failed++;
359                         }
360                         if (e1->gpe_end >= e2->gpe_start &&
361                             e1->gpe_end <= e2->gpe_end) {
362                                 DPRINTF("partition %d has end offset inside "
363                                     "partition %d: start[%d] %jd >= end[%d] "
364                                     "%jd <= end[%d] %jd\n",
365                                     e1->gpe_index, e2->gpe_index,
366                                     e2->gpe_index, (intmax_t)e2->gpe_start,
367                                     e1->gpe_index, (intmax_t)e1->gpe_end,
368                                     e2->gpe_index, (intmax_t)e2->gpe_end);
369                                 failed++;
370                         }
371                         if (e1->gpe_start < e2->gpe_start &&
372                             e1->gpe_end > e2->gpe_end) {
373                                 DPRINTF("partition %d contains partition %d: "
374                                     "start[%d] %jd > start[%d] %jd, end[%d] "
375                                     "%jd < end[%d] %jd\n",
376                                     e1->gpe_index, e2->gpe_index,
377                                     e1->gpe_index, (intmax_t)e1->gpe_start,
378                                     e2->gpe_index, (intmax_t)e2->gpe_start,
379                                     e2->gpe_index, (intmax_t)e2->gpe_end,
380                                     e1->gpe_index, (intmax_t)e1->gpe_end);
381                                 failed++;
382                         }
383                 }
384         }
385         if (failed != 0) {
386                 printf("GEOM_PART: integrity check failed (%s, %s)\n",
387                     pp->name, table->gpt_scheme->name);
388                 if (check_integrity != 0)
389                         return (EINVAL);
390                 table->gpt_corrupt = 1;
391         }
392         return (0);
393 }
394 #undef  DPRINTF
395
396 struct g_part_entry *
397 g_part_new_entry(struct g_part_table *table, int index, quad_t start,
398     quad_t end)
399 {
400         struct g_part_entry *entry, *last;
401
402         last = NULL;
403         LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
404                 if (entry->gpe_index == index)
405                         break;
406                 if (entry->gpe_index > index) {
407                         entry = NULL;
408                         break;
409                 }
410                 last = entry;
411         }
412         if (entry == NULL) {
413                 entry = g_malloc(table->gpt_scheme->gps_entrysz,
414                     M_WAITOK | M_ZERO);
415                 entry->gpe_index = index;
416                 if (last == NULL)
417                         LIST_INSERT_HEAD(&table->gpt_entry, entry, gpe_entry);
418                 else
419                         LIST_INSERT_AFTER(last, entry, gpe_entry);
420         } else
421                 entry->gpe_offset = 0;
422         entry->gpe_start = start;
423         entry->gpe_end = end;
424         return (entry);
425 }
426
427 static void
428 g_part_new_provider(struct g_geom *gp, struct g_part_table *table,
429     struct g_part_entry *entry)
430 {
431         struct g_consumer *cp;
432         struct g_provider *pp;
433         struct sbuf *sb;
434         struct g_geom_alias *gap;
435         off_t offset;
436
437         cp = LIST_FIRST(&gp->consumer);
438         pp = cp->provider;
439
440         offset = entry->gpe_start * pp->sectorsize;
441         if (entry->gpe_offset < offset)
442                 entry->gpe_offset = offset;
443
444         if (entry->gpe_pp == NULL) {
445                 /*
446                  * Add aliases to the geom before we create the provider so that
447                  * geom_dev can taste it with all the aliases in place so all
448                  * the aliased dev_t instances get created for each partition
449                  * (eg foo5p7 gets created for bar5p7 when foo is an alias of bar).
450                  */
451                 LIST_FOREACH(gap, &table->gpt_gp->aliases, ga_next) {
452                         sb = sbuf_new_auto();
453                         G_PART_FULLNAME(table, entry, sb, gap->ga_alias);
454                         sbuf_finish(sb);
455                         g_geom_add_alias(gp, sbuf_data(sb));
456                         sbuf_delete(sb);
457                 }
458                 sb = sbuf_new_auto();
459                 G_PART_FULLNAME(table, entry, sb, gp->name);
460                 sbuf_finish(sb);
461                 entry->gpe_pp = g_new_providerf(gp, "%s", sbuf_data(sb));
462                 sbuf_delete(sb);
463                 entry->gpe_pp->flags |= G_PF_DIRECT_SEND | G_PF_DIRECT_RECEIVE;
464                 entry->gpe_pp->private = entry;         /* Close the circle. */
465         }
466         entry->gpe_pp->index = entry->gpe_index - 1;    /* index is 1-based. */
467         entry->gpe_pp->mediasize = (entry->gpe_end - entry->gpe_start + 1) *
468             pp->sectorsize;
469         entry->gpe_pp->mediasize -= entry->gpe_offset - offset;
470         entry->gpe_pp->sectorsize = pp->sectorsize;
471         entry->gpe_pp->stripesize = pp->stripesize;
472         entry->gpe_pp->stripeoffset = pp->stripeoffset + entry->gpe_offset;
473         if (pp->stripesize > 0)
474                 entry->gpe_pp->stripeoffset %= pp->stripesize;
475         entry->gpe_pp->flags |= pp->flags & G_PF_ACCEPT_UNMAPPED;
476         g_error_provider(entry->gpe_pp, 0);
477 }
478
479 static struct g_geom*
480 g_part_find_geom(const char *name)
481 {
482         struct g_geom *gp;
483         LIST_FOREACH(gp, &g_part_class.geom, geom) {
484                 if ((gp->flags & G_GEOM_WITHER) == 0 &&
485                     strcmp(name, gp->name) == 0)
486                         break;
487         }
488         return (gp);
489 }
490
491 static int
492 g_part_parm_geom(struct gctl_req *req, const char *name, struct g_geom **v)
493 {
494         struct g_geom *gp;
495         const char *gname;
496
497         gname = gctl_get_asciiparam(req, name);
498         if (gname == NULL)
499                 return (ENOATTR);
500         if (strncmp(gname, _PATH_DEV, sizeof(_PATH_DEV) - 1) == 0)
501                 gname += sizeof(_PATH_DEV) - 1;
502         gp = g_part_find_geom(gname);
503         if (gp == NULL) {
504                 gctl_error(req, "%d %s '%s'", EINVAL, name, gname);
505                 return (EINVAL);
506         }
507         *v = gp;
508         return (0);
509 }
510
511 static int
512 g_part_parm_provider(struct gctl_req *req, const char *name,
513     struct g_provider **v)
514 {
515         struct g_provider *pp;
516         const char *pname;
517
518         pname = gctl_get_asciiparam(req, name);
519         if (pname == NULL)
520                 return (ENOATTR);
521         if (strncmp(pname, _PATH_DEV, sizeof(_PATH_DEV) - 1) == 0)
522                 pname += sizeof(_PATH_DEV) - 1;
523         pp = g_provider_by_name(pname);
524         if (pp == NULL) {
525                 gctl_error(req, "%d %s '%s'", EINVAL, name, pname);
526                 return (EINVAL);
527         }
528         *v = pp;
529         return (0);
530 }
531
532 static int
533 g_part_parm_quad(struct gctl_req *req, const char *name, quad_t *v)
534 {
535         const char *p;
536         char *x;
537         quad_t q;
538
539         p = gctl_get_asciiparam(req, name);
540         if (p == NULL)
541                 return (ENOATTR);
542         q = strtoq(p, &x, 0);
543         if (*x != '\0' || q < 0) {
544                 gctl_error(req, "%d %s '%s'", EINVAL, name, p);
545                 return (EINVAL);
546         }
547         *v = q;
548         return (0);
549 }
550
551 static int
552 g_part_parm_scheme(struct gctl_req *req, const char *name,
553     struct g_part_scheme **v)
554 {
555         struct g_part_scheme *s;
556         const char *p;
557
558         p = gctl_get_asciiparam(req, name);
559         if (p == NULL)
560                 return (ENOATTR);
561         TAILQ_FOREACH(s, &g_part_schemes, scheme_list) {
562                 if (s == &g_part_null_scheme)
563                         continue;
564                 if (!strcasecmp(s->name, p))
565                         break;
566         }
567         if (s == NULL) {
568                 gctl_error(req, "%d %s '%s'", EINVAL, name, p);
569                 return (EINVAL);
570         }
571         *v = s;
572         return (0);
573 }
574
575 static int
576 g_part_parm_str(struct gctl_req *req, const char *name, const char **v)
577 {
578         const char *p;
579
580         p = gctl_get_asciiparam(req, name);
581         if (p == NULL)
582                 return (ENOATTR);
583         /* An empty label is always valid. */
584         if (strcmp(name, "label") != 0 && p[0] == '\0') {
585                 gctl_error(req, "%d %s '%s'", EINVAL, name, p);
586                 return (EINVAL);
587         }
588         *v = p;
589         return (0);
590 }
591
592 static int
593 g_part_parm_intmax(struct gctl_req *req, const char *name, u_int *v)
594 {
595         const intmax_t *p;
596         int size;
597
598         p = gctl_get_param(req, name, &size);
599         if (p == NULL)
600                 return (ENOATTR);
601         if (size != sizeof(*p) || *p < 0 || *p > INT_MAX) {
602                 gctl_error(req, "%d %s '%jd'", EINVAL, name, *p);
603                 return (EINVAL);
604         }
605         *v = (u_int)*p;
606         return (0);
607 }
608
609 static int
610 g_part_parm_uint32(struct gctl_req *req, const char *name, u_int *v)
611 {
612         const uint32_t *p;
613         int size;
614
615         p = gctl_get_param(req, name, &size);
616         if (p == NULL)
617                 return (ENOATTR);
618         if (size != sizeof(*p) || *p > INT_MAX) {
619                 gctl_error(req, "%d %s '%u'", EINVAL, name, (unsigned int)*p);
620                 return (EINVAL);
621         }
622         *v = (u_int)*p;
623         return (0);
624 }
625
626 static int
627 g_part_parm_bootcode(struct gctl_req *req, const char *name, const void **v,
628     unsigned int *s)
629 {
630         const void *p;
631         int size;
632
633         p = gctl_get_param(req, name, &size);
634         if (p == NULL)
635                 return (ENOATTR);
636         *v = p;
637         *s = size;
638         return (0);
639 }
640
641 static int
642 g_part_probe(struct g_geom *gp, struct g_consumer *cp, int depth)
643 {
644         struct g_part_scheme *iter, *scheme;
645         struct g_part_table *table;
646         int pri, probe;
647
648         table = gp->softc;
649         scheme = (table != NULL) ? table->gpt_scheme : NULL;
650         pri = (scheme != NULL) ? G_PART_PROBE(table, cp) : INT_MIN;
651         if (pri == 0)
652                 goto done;
653         if (pri > 0) {  /* error */
654                 scheme = NULL;
655                 pri = INT_MIN;
656         }
657
658         TAILQ_FOREACH(iter, &g_part_schemes, scheme_list) {
659                 if (iter == &g_part_null_scheme)
660                         continue;
661                 table = (void *)kobj_create((kobj_class_t)iter, M_GEOM,
662                     M_WAITOK);
663                 table->gpt_gp = gp;
664                 table->gpt_scheme = iter;
665                 table->gpt_depth = depth;
666                 probe = G_PART_PROBE(table, cp);
667                 if (probe <= 0 && probe > pri) {
668                         pri = probe;
669                         scheme = iter;
670                         if (gp->softc != NULL)
671                                 kobj_delete((kobj_t)gp->softc, M_GEOM);
672                         gp->softc = table;
673                         if (pri == 0)
674                                 goto done;
675                 } else
676                         kobj_delete((kobj_t)table, M_GEOM);
677         }
678
679 done:
680         return ((scheme == NULL) ? ENXIO : 0);
681 }
682
683 /*
684  * Control request functions.
685  */
686
687 static int
688 g_part_ctl_add(struct gctl_req *req, struct g_part_parms *gpp)
689 {
690         struct g_geom *gp;
691         struct g_provider *pp;
692         struct g_part_entry *delent, *last, *entry;
693         struct g_part_table *table;
694         struct sbuf *sb;
695         quad_t end;
696         unsigned int index;
697         int error;
698
699         gp = gpp->gpp_geom;
700         G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
701         g_topology_assert();
702
703         pp = LIST_FIRST(&gp->consumer)->provider;
704         table = gp->softc;
705         end = gpp->gpp_start + gpp->gpp_size - 1;
706
707         if (gpp->gpp_start < table->gpt_first ||
708             gpp->gpp_start > table->gpt_last) {
709                 gctl_error(req, "%d start '%jd'", EINVAL,
710                     (intmax_t)gpp->gpp_start);
711                 return (EINVAL);
712         }
713         if (end < gpp->gpp_start || end > table->gpt_last) {
714                 gctl_error(req, "%d size '%jd'", EINVAL,
715                     (intmax_t)gpp->gpp_size);
716                 return (EINVAL);
717         }
718         if (gpp->gpp_index > table->gpt_entries) {
719                 gctl_error(req, "%d index '%d'", EINVAL, gpp->gpp_index);
720                 return (EINVAL);
721         }
722
723         delent = last = NULL;
724         index = (gpp->gpp_index > 0) ? gpp->gpp_index : 1;
725         LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
726                 if (entry->gpe_deleted) {
727                         if (entry->gpe_index == index)
728                                 delent = entry;
729                         continue;
730                 }
731                 if (entry->gpe_index == index)
732                         index = entry->gpe_index + 1;
733                 if (entry->gpe_index < index)
734                         last = entry;
735                 if (entry->gpe_internal)
736                         continue;
737                 if (gpp->gpp_start >= entry->gpe_start &&
738                     gpp->gpp_start <= entry->gpe_end) {
739                         gctl_error(req, "%d start '%jd'", ENOSPC,
740                             (intmax_t)gpp->gpp_start);
741                         return (ENOSPC);
742                 }
743                 if (end >= entry->gpe_start && end <= entry->gpe_end) {
744                         gctl_error(req, "%d end '%jd'", ENOSPC, (intmax_t)end);
745                         return (ENOSPC);
746                 }
747                 if (gpp->gpp_start < entry->gpe_start && end > entry->gpe_end) {
748                         gctl_error(req, "%d size '%jd'", ENOSPC,
749                             (intmax_t)gpp->gpp_size);
750                         return (ENOSPC);
751                 }
752         }
753         if (gpp->gpp_index > 0 && index != gpp->gpp_index) {
754                 gctl_error(req, "%d index '%d'", EEXIST, gpp->gpp_index);
755                 return (EEXIST);
756         }
757         if (index > table->gpt_entries) {
758                 gctl_error(req, "%d index '%d'", ENOSPC, index);
759                 return (ENOSPC);
760         }
761
762         entry = (delent == NULL) ? g_malloc(table->gpt_scheme->gps_entrysz,
763             M_WAITOK | M_ZERO) : delent;
764         entry->gpe_index = index;
765         entry->gpe_start = gpp->gpp_start;
766         entry->gpe_end = end;
767         error = G_PART_ADD(table, entry, gpp);
768         if (error) {
769                 gctl_error(req, "%d", error);
770                 if (delent == NULL)
771                         g_free(entry);
772                 return (error);
773         }
774         if (delent == NULL) {
775                 if (last == NULL)
776                         LIST_INSERT_HEAD(&table->gpt_entry, entry, gpe_entry);
777                 else
778                         LIST_INSERT_AFTER(last, entry, gpe_entry);
779                 entry->gpe_created = 1;
780         } else {
781                 entry->gpe_deleted = 0;
782                 entry->gpe_modified = 1;
783         }
784         g_part_new_provider(gp, table, entry);
785
786         /* Provide feedback if so requested. */
787         if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
788                 sb = sbuf_new_auto();
789                 G_PART_FULLNAME(table, entry, sb, gp->name);
790                 if (pp->stripesize > 0 && entry->gpe_pp->stripeoffset != 0)
791                         sbuf_printf(sb, " added, but partition is not "
792                             "aligned on %u bytes\n", pp->stripesize);
793                 else
794                         sbuf_cat(sb, " added\n");
795                 sbuf_finish(sb);
796                 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
797                 sbuf_delete(sb);
798         }
799         return (0);
800 }
801
802 static int
803 g_part_ctl_bootcode(struct gctl_req *req, struct g_part_parms *gpp)
804 {
805         struct g_geom *gp;
806         struct g_part_table *table;
807         struct sbuf *sb;
808         int error, sz;
809
810         gp = gpp->gpp_geom;
811         G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
812         g_topology_assert();
813
814         table = gp->softc;
815         sz = table->gpt_scheme->gps_bootcodesz;
816         if (sz == 0) {
817                 error = ENODEV;
818                 goto fail;
819         }
820         if (gpp->gpp_codesize > sz) {
821                 error = EFBIG;
822                 goto fail;
823         }
824
825         error = G_PART_BOOTCODE(table, gpp);
826         if (error)
827                 goto fail;
828
829         /* Provide feedback if so requested. */
830         if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
831                 sb = sbuf_new_auto();
832                 sbuf_printf(sb, "bootcode written to %s\n", gp->name);
833                 sbuf_finish(sb);
834                 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
835                 sbuf_delete(sb);
836         }
837         return (0);
838
839  fail:
840         gctl_error(req, "%d", error);
841         return (error);
842 }
843
844 static int
845 g_part_ctl_commit(struct gctl_req *req, struct g_part_parms *gpp)
846 {
847         struct g_consumer *cp;
848         struct g_geom *gp;
849         struct g_provider *pp;
850         struct g_part_entry *entry, *tmp;
851         struct g_part_table *table;
852         char *buf;
853         int error, i;
854
855         gp = gpp->gpp_geom;
856         G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
857         g_topology_assert();
858
859         table = gp->softc;
860         if (!table->gpt_opened) {
861                 gctl_error(req, "%d", EPERM);
862                 return (EPERM);
863         }
864
865         g_topology_unlock();
866
867         cp = LIST_FIRST(&gp->consumer);
868         if ((table->gpt_smhead | table->gpt_smtail) != 0) {
869                 pp = cp->provider;
870                 buf = g_malloc(pp->sectorsize, M_WAITOK | M_ZERO);
871                 while (table->gpt_smhead != 0) {
872                         i = ffs(table->gpt_smhead) - 1;
873                         error = g_write_data(cp, i * pp->sectorsize, buf,
874                             pp->sectorsize);
875                         if (error) {
876                                 g_free(buf);
877                                 goto fail;
878                         }
879                         table->gpt_smhead &= ~(1 << i);
880                 }
881                 while (table->gpt_smtail != 0) {
882                         i = ffs(table->gpt_smtail) - 1;
883                         error = g_write_data(cp, pp->mediasize - (i + 1) *
884                             pp->sectorsize, buf, pp->sectorsize);
885                         if (error) {
886                                 g_free(buf);
887                                 goto fail;
888                         }
889                         table->gpt_smtail &= ~(1 << i);
890                 }
891                 g_free(buf);
892         }
893
894         if (table->gpt_scheme == &g_part_null_scheme) {
895                 g_topology_lock();
896                 g_access(cp, -1, -1, -1);
897                 g_part_wither(gp, ENXIO);
898                 return (0);
899         }
900
901         error = G_PART_WRITE(table, cp);
902         if (error)
903                 goto fail;
904
905         LIST_FOREACH_SAFE(entry, &table->gpt_entry, gpe_entry, tmp) {
906                 if (!entry->gpe_deleted) {
907                         /* Notify consumers that provider might be changed. */
908                         if (entry->gpe_modified && (
909                             entry->gpe_pp->acw + entry->gpe_pp->ace +
910                             entry->gpe_pp->acr) == 0)
911                                 g_media_changed(entry->gpe_pp, M_NOWAIT);
912                         entry->gpe_created = 0;
913                         entry->gpe_modified = 0;
914                         continue;
915                 }
916                 LIST_REMOVE(entry, gpe_entry);
917                 g_free(entry);
918         }
919         table->gpt_created = 0;
920         table->gpt_opened = 0;
921
922         g_topology_lock();
923         g_access(cp, -1, -1, -1);
924         return (0);
925
926 fail:
927         g_topology_lock();
928         gctl_error(req, "%d", error);
929         return (error);
930 }
931
932 static int
933 g_part_ctl_create(struct gctl_req *req, struct g_part_parms *gpp)
934 {
935         struct g_consumer *cp;
936         struct g_geom *gp;
937         struct g_provider *pp;
938         struct g_part_scheme *scheme;
939         struct g_part_table *null, *table;
940         struct sbuf *sb;
941         int attr, error;
942
943         pp = gpp->gpp_provider;
944         scheme = gpp->gpp_scheme;
945         G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, pp->name));
946         g_topology_assert();
947
948         /* Check that there isn't already a g_part geom on the provider. */
949         gp = g_part_find_geom(pp->name);
950         if (gp != NULL) {
951                 null = gp->softc;
952                 if (null->gpt_scheme != &g_part_null_scheme) {
953                         gctl_error(req, "%d geom '%s'", EEXIST, pp->name);
954                         return (EEXIST);
955                 }
956         } else
957                 null = NULL;
958
959         if ((gpp->gpp_parms & G_PART_PARM_ENTRIES) &&
960             (gpp->gpp_entries < scheme->gps_minent ||
961              gpp->gpp_entries > scheme->gps_maxent)) {
962                 gctl_error(req, "%d entries '%d'", EINVAL, gpp->gpp_entries);
963                 return (EINVAL);
964         }
965
966         if (null == NULL)
967                 gp = g_new_geomf(&g_part_class, "%s", pp->name);
968         gp->softc = kobj_create((kobj_class_t)gpp->gpp_scheme, M_GEOM,
969             M_WAITOK);
970         table = gp->softc;
971         table->gpt_gp = gp;
972         table->gpt_scheme = gpp->gpp_scheme;
973         table->gpt_entries = (gpp->gpp_parms & G_PART_PARM_ENTRIES) ?
974             gpp->gpp_entries : scheme->gps_minent;
975         LIST_INIT(&table->gpt_entry);
976         if (null == NULL) {
977                 cp = g_new_consumer(gp);
978                 cp->flags |= G_CF_DIRECT_SEND | G_CF_DIRECT_RECEIVE;
979                 error = g_attach(cp, pp);
980                 if (error == 0)
981                         error = g_access(cp, 1, 1, 1);
982                 if (error != 0) {
983                         g_part_wither(gp, error);
984                         gctl_error(req, "%d geom '%s'", error, pp->name);
985                         return (error);
986                 }
987                 table->gpt_opened = 1;
988         } else {
989                 cp = LIST_FIRST(&gp->consumer);
990                 table->gpt_opened = null->gpt_opened;
991                 table->gpt_smhead = null->gpt_smhead;
992                 table->gpt_smtail = null->gpt_smtail;
993         }
994
995         g_topology_unlock();
996
997         /* Make sure the provider has media. */
998         if (pp->mediasize == 0 || pp->sectorsize == 0) {
999                 error = ENODEV;
1000                 goto fail;
1001         }
1002
1003         /* Make sure we can nest and if so, determine our depth. */
1004         error = g_getattr("PART::isleaf", cp, &attr);
1005         if (!error && attr) {
1006                 error = ENODEV;
1007                 goto fail;
1008         }
1009         error = g_getattr("PART::depth", cp, &attr);
1010         table->gpt_depth = (!error) ? attr + 1 : 0;
1011
1012         /*
1013          * Synthesize a disk geometry. Some partitioning schemes
1014          * depend on it and since some file systems need it even
1015          * when the partitition scheme doesn't, we do it here in
1016          * scheme-independent code.
1017          */
1018         g_part_geometry(table, cp, pp->mediasize / pp->sectorsize);
1019
1020         error = G_PART_CREATE(table, gpp);
1021         if (error)
1022                 goto fail;
1023
1024         g_topology_lock();
1025
1026         table->gpt_created = 1;
1027         if (null != NULL)
1028                 kobj_delete((kobj_t)null, M_GEOM);
1029
1030         /*
1031          * Support automatic commit by filling in the gpp_geom
1032          * parameter.
1033          */
1034         gpp->gpp_parms |= G_PART_PARM_GEOM;
1035         gpp->gpp_geom = gp;
1036
1037         /* Provide feedback if so requested. */
1038         if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
1039                 sb = sbuf_new_auto();
1040                 sbuf_printf(sb, "%s created\n", gp->name);
1041                 sbuf_finish(sb);
1042                 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
1043                 sbuf_delete(sb);
1044         }
1045         return (0);
1046
1047 fail:
1048         g_topology_lock();
1049         if (null == NULL) {
1050                 g_access(cp, -1, -1, -1);
1051                 g_part_wither(gp, error);
1052         } else {
1053                 kobj_delete((kobj_t)gp->softc, M_GEOM);
1054                 gp->softc = null;
1055         }
1056         gctl_error(req, "%d provider", error);
1057         return (error);
1058 }
1059
1060 static int
1061 g_part_ctl_delete(struct gctl_req *req, struct g_part_parms *gpp)
1062 {
1063         struct g_geom *gp;
1064         struct g_provider *pp;
1065         struct g_part_entry *entry;
1066         struct g_part_table *table;
1067         struct sbuf *sb;
1068
1069         gp = gpp->gpp_geom;
1070         G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
1071         g_topology_assert();
1072
1073         table = gp->softc;
1074
1075         LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
1076                 if (entry->gpe_deleted || entry->gpe_internal)
1077                         continue;
1078                 if (entry->gpe_index == gpp->gpp_index)
1079                         break;
1080         }
1081         if (entry == NULL) {
1082                 gctl_error(req, "%d index '%d'", ENOENT, gpp->gpp_index);
1083                 return (ENOENT);
1084         }
1085
1086         pp = entry->gpe_pp;
1087         if (pp != NULL) {
1088                 if (pp->acr > 0 || pp->acw > 0 || pp->ace > 0) {
1089                         gctl_error(req, "%d", EBUSY);
1090                         return (EBUSY);
1091                 }
1092
1093                 pp->private = NULL;
1094                 entry->gpe_pp = NULL;
1095         }
1096
1097         if (pp != NULL)
1098                 g_wither_provider(pp, ENXIO);
1099
1100         /* Provide feedback if so requested. */
1101         if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
1102                 sb = sbuf_new_auto();
1103                 G_PART_FULLNAME(table, entry, sb, gp->name);
1104                 sbuf_cat(sb, " deleted\n");
1105                 sbuf_finish(sb);
1106                 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
1107                 sbuf_delete(sb);
1108         }
1109
1110         if (entry->gpe_created) {
1111                 LIST_REMOVE(entry, gpe_entry);
1112                 g_free(entry);
1113         } else {
1114                 entry->gpe_modified = 0;
1115                 entry->gpe_deleted = 1;
1116         }
1117         return (0);
1118 }
1119
1120 static int
1121 g_part_ctl_destroy(struct gctl_req *req, struct g_part_parms *gpp)
1122 {
1123         struct g_consumer *cp;
1124         struct g_geom *gp;
1125         struct g_provider *pp;
1126         struct g_part_entry *entry, *tmp;
1127         struct g_part_table *null, *table;
1128         struct sbuf *sb;
1129         int error;
1130
1131         gp = gpp->gpp_geom;
1132         G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
1133         g_topology_assert();
1134
1135         table = gp->softc;
1136         /* Check for busy providers. */
1137         LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
1138                 if (entry->gpe_deleted || entry->gpe_internal)
1139                         continue;
1140                 if (gpp->gpp_force) {
1141                         pp = entry->gpe_pp;
1142                         if (pp == NULL)
1143                                 continue;
1144                         if (pp->acr == 0 && pp->acw == 0 && pp->ace == 0)
1145                                 continue;
1146                 }
1147                 gctl_error(req, "%d", EBUSY);
1148                 return (EBUSY);
1149         }
1150
1151         if (gpp->gpp_force) {
1152                 /* Destroy all providers. */
1153                 LIST_FOREACH_SAFE(entry, &table->gpt_entry, gpe_entry, tmp) {
1154                         pp = entry->gpe_pp;
1155                         if (pp != NULL) {
1156                                 pp->private = NULL;
1157                                 g_wither_provider(pp, ENXIO);
1158                         }
1159                         LIST_REMOVE(entry, gpe_entry);
1160                         g_free(entry);
1161                 }
1162         }
1163
1164         error = G_PART_DESTROY(table, gpp);
1165         if (error) {
1166                 gctl_error(req, "%d", error);
1167                 return (error);
1168         }
1169
1170         gp->softc = kobj_create((kobj_class_t)&g_part_null_scheme, M_GEOM,
1171             M_WAITOK);
1172         null = gp->softc;
1173         null->gpt_gp = gp;
1174         null->gpt_scheme = &g_part_null_scheme;
1175         LIST_INIT(&null->gpt_entry);
1176
1177         cp = LIST_FIRST(&gp->consumer);
1178         pp = cp->provider;
1179         null->gpt_last = pp->mediasize / pp->sectorsize - 1;
1180
1181         null->gpt_depth = table->gpt_depth;
1182         null->gpt_opened = table->gpt_opened;
1183         null->gpt_smhead = table->gpt_smhead;
1184         null->gpt_smtail = table->gpt_smtail;
1185
1186         while ((entry = LIST_FIRST(&table->gpt_entry)) != NULL) {
1187                 LIST_REMOVE(entry, gpe_entry);
1188                 g_free(entry);
1189         }
1190         kobj_delete((kobj_t)table, M_GEOM);
1191
1192         /* Provide feedback if so requested. */
1193         if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
1194                 sb = sbuf_new_auto();
1195                 sbuf_printf(sb, "%s destroyed\n", gp->name);
1196                 sbuf_finish(sb);
1197                 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
1198                 sbuf_delete(sb);
1199         }
1200         return (0);
1201 }
1202
1203 static int
1204 g_part_ctl_modify(struct gctl_req *req, struct g_part_parms *gpp)
1205 {
1206         struct g_geom *gp;
1207         struct g_part_entry *entry;
1208         struct g_part_table *table;
1209         struct sbuf *sb;
1210         int error;
1211
1212         gp = gpp->gpp_geom;
1213         G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
1214         g_topology_assert();
1215
1216         table = gp->softc;
1217
1218         LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
1219                 if (entry->gpe_deleted || entry->gpe_internal)
1220                         continue;
1221                 if (entry->gpe_index == gpp->gpp_index)
1222                         break;
1223         }
1224         if (entry == NULL) {
1225                 gctl_error(req, "%d index '%d'", ENOENT, gpp->gpp_index);
1226                 return (ENOENT);
1227         }
1228
1229         error = G_PART_MODIFY(table, entry, gpp);
1230         if (error) {
1231                 gctl_error(req, "%d", error);
1232                 return (error);
1233         }
1234
1235         if (!entry->gpe_created)
1236                 entry->gpe_modified = 1;
1237
1238         /* Provide feedback if so requested. */
1239         if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
1240                 sb = sbuf_new_auto();
1241                 G_PART_FULLNAME(table, entry, sb, gp->name);
1242                 sbuf_cat(sb, " modified\n");
1243                 sbuf_finish(sb);
1244                 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
1245                 sbuf_delete(sb);
1246         }
1247         return (0);
1248 }
1249
1250 static int
1251 g_part_ctl_move(struct gctl_req *req, struct g_part_parms *gpp)
1252 {
1253         gctl_error(req, "%d verb 'move'", ENOSYS);
1254         return (ENOSYS);
1255 }
1256
1257 static int
1258 g_part_ctl_recover(struct gctl_req *req, struct g_part_parms *gpp)
1259 {
1260         struct g_part_table *table;
1261         struct g_geom *gp;
1262         struct sbuf *sb;
1263         int error, recovered;
1264
1265         gp = gpp->gpp_geom;
1266         G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
1267         g_topology_assert();
1268         table = gp->softc;
1269         error = recovered = 0;
1270
1271         if (table->gpt_corrupt) {
1272                 error = G_PART_RECOVER(table);
1273                 if (error == 0)
1274                         error = g_part_check_integrity(table,
1275                             LIST_FIRST(&gp->consumer));
1276                 if (error) {
1277                         gctl_error(req, "%d recovering '%s' failed",
1278                             error, gp->name);
1279                         return (error);
1280                 }
1281                 recovered = 1;
1282         }
1283         /* Provide feedback if so requested. */
1284         if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
1285                 sb = sbuf_new_auto();
1286                 if (recovered)
1287                         sbuf_printf(sb, "%s recovered\n", gp->name);
1288                 else
1289                         sbuf_printf(sb, "%s recovering is not needed\n",
1290                             gp->name);
1291                 sbuf_finish(sb);
1292                 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
1293                 sbuf_delete(sb);
1294         }
1295         return (0);
1296 }
1297
1298 static int
1299 g_part_ctl_resize(struct gctl_req *req, struct g_part_parms *gpp)
1300 {
1301         struct g_geom *gp;
1302         struct g_provider *pp;
1303         struct g_part_entry *pe, *entry;
1304         struct g_part_table *table;
1305         struct sbuf *sb;
1306         quad_t end;
1307         int error;
1308         off_t mediasize;
1309
1310         gp = gpp->gpp_geom;
1311         G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
1312         g_topology_assert();
1313         table = gp->softc;
1314
1315         /* check gpp_index */
1316         LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
1317                 if (entry->gpe_deleted || entry->gpe_internal)
1318                         continue;
1319                 if (entry->gpe_index == gpp->gpp_index)
1320                         break;
1321         }
1322         if (entry == NULL) {
1323                 gctl_error(req, "%d index '%d'", ENOENT, gpp->gpp_index);
1324                 return (ENOENT);
1325         }
1326
1327         /* check gpp_size */
1328         end = entry->gpe_start + gpp->gpp_size - 1;
1329         if (gpp->gpp_size < 1 || end > table->gpt_last) {
1330                 gctl_error(req, "%d size '%jd'", EINVAL,
1331                     (intmax_t)gpp->gpp_size);
1332                 return (EINVAL);
1333         }
1334
1335         LIST_FOREACH(pe, &table->gpt_entry, gpe_entry) {
1336                 if (pe->gpe_deleted || pe->gpe_internal || pe == entry)
1337                         continue;
1338                 if (end >= pe->gpe_start && end <= pe->gpe_end) {
1339                         gctl_error(req, "%d end '%jd'", ENOSPC,
1340                             (intmax_t)end);
1341                         return (ENOSPC);
1342                 }
1343                 if (entry->gpe_start < pe->gpe_start && end > pe->gpe_end) {
1344                         gctl_error(req, "%d size '%jd'", ENOSPC,
1345                             (intmax_t)gpp->gpp_size);
1346                         return (ENOSPC);
1347                 }
1348         }
1349
1350         pp = entry->gpe_pp;
1351         if ((g_debugflags & 16) == 0 &&
1352             (pp->acr > 0 || pp->acw > 0 || pp->ace > 0)) {
1353                 if (entry->gpe_end - entry->gpe_start + 1 > gpp->gpp_size) {
1354                         /* Deny shrinking of an opened partition. */
1355                         gctl_error(req, "%d", EBUSY);
1356                         return (EBUSY);
1357                 }
1358         }
1359
1360         error = G_PART_RESIZE(table, entry, gpp);
1361         if (error) {
1362                 gctl_error(req, "%d%s", error, error != EBUSY ? "":
1363                     " resizing will lead to unexpected shrinking"
1364                     " due to alignment");
1365                 return (error);
1366         }
1367
1368         if (!entry->gpe_created)
1369                 entry->gpe_modified = 1;
1370
1371         /* update mediasize of changed provider */
1372         mediasize = (entry->gpe_end - entry->gpe_start + 1) *
1373                 pp->sectorsize;
1374         g_resize_provider(pp, mediasize);
1375
1376         /* Provide feedback if so requested. */
1377         if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
1378                 sb = sbuf_new_auto();
1379                 G_PART_FULLNAME(table, entry, sb, gp->name);
1380                 sbuf_cat(sb, " resized\n");
1381                 sbuf_finish(sb);
1382                 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
1383                 sbuf_delete(sb);
1384         }
1385         return (0);
1386 }
1387
1388 static int
1389 g_part_ctl_setunset(struct gctl_req *req, struct g_part_parms *gpp,
1390     unsigned int set)
1391 {
1392         struct g_geom *gp;
1393         struct g_part_entry *entry;
1394         struct g_part_table *table;
1395         struct sbuf *sb;
1396         int error;
1397
1398         gp = gpp->gpp_geom;
1399         G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
1400         g_topology_assert();
1401
1402         table = gp->softc;
1403
1404         if (gpp->gpp_parms & G_PART_PARM_INDEX) {
1405                 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
1406                         if (entry->gpe_deleted || entry->gpe_internal)
1407                                 continue;
1408                         if (entry->gpe_index == gpp->gpp_index)
1409                                 break;
1410                 }
1411                 if (entry == NULL) {
1412                         gctl_error(req, "%d index '%d'", ENOENT,
1413                             gpp->gpp_index);
1414                         return (ENOENT);
1415                 }
1416         } else
1417                 entry = NULL;
1418
1419         error = G_PART_SETUNSET(table, entry, gpp->gpp_attrib, set);
1420         if (error) {
1421                 gctl_error(req, "%d attrib '%s'", error, gpp->gpp_attrib);
1422                 return (error);
1423         }
1424
1425         /* Provide feedback if so requested. */
1426         if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
1427                 sb = sbuf_new_auto();
1428                 sbuf_printf(sb, "%s %sset on ", gpp->gpp_attrib,
1429                     (set) ? "" : "un");
1430                 if (entry)
1431                         G_PART_FULLNAME(table, entry, sb, gp->name);
1432                 else
1433                         sbuf_cat(sb, gp->name);
1434                 sbuf_cat(sb, "\n");
1435                 sbuf_finish(sb);
1436                 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
1437                 sbuf_delete(sb);
1438         }
1439         return (0);
1440 }
1441
1442 static int
1443 g_part_ctl_undo(struct gctl_req *req, struct g_part_parms *gpp)
1444 {
1445         struct g_consumer *cp;
1446         struct g_provider *pp;
1447         struct g_geom *gp;
1448         struct g_part_entry *entry, *tmp;
1449         struct g_part_table *table;
1450         int error, reprobe;
1451
1452         gp = gpp->gpp_geom;
1453         G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
1454         g_topology_assert();
1455
1456         table = gp->softc;
1457         if (!table->gpt_opened) {
1458                 gctl_error(req, "%d", EPERM);
1459                 return (EPERM);
1460         }
1461
1462         cp = LIST_FIRST(&gp->consumer);
1463         LIST_FOREACH_SAFE(entry, &table->gpt_entry, gpe_entry, tmp) {
1464                 entry->gpe_modified = 0;
1465                 if (entry->gpe_created) {
1466                         pp = entry->gpe_pp;
1467                         if (pp != NULL) {
1468                                 pp->private = NULL;
1469                                 entry->gpe_pp = NULL;
1470                                 g_wither_provider(pp, ENXIO);
1471                         }
1472                         entry->gpe_deleted = 1;
1473                 }
1474                 if (entry->gpe_deleted) {
1475                         LIST_REMOVE(entry, gpe_entry);
1476                         g_free(entry);
1477                 }
1478         }
1479
1480         g_topology_unlock();
1481
1482         reprobe = (table->gpt_scheme == &g_part_null_scheme ||
1483             table->gpt_created) ? 1 : 0;
1484
1485         if (reprobe) {
1486                 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
1487                         if (entry->gpe_internal)
1488                                 continue;
1489                         error = EBUSY;
1490                         goto fail;
1491                 }
1492                 while ((entry = LIST_FIRST(&table->gpt_entry)) != NULL) {
1493                         LIST_REMOVE(entry, gpe_entry);
1494                         g_free(entry);
1495                 }
1496                 error = g_part_probe(gp, cp, table->gpt_depth);
1497                 if (error) {
1498                         g_topology_lock();
1499                         g_access(cp, -1, -1, -1);
1500                         g_part_wither(gp, error);
1501                         return (0);
1502                 }
1503                 table = gp->softc;
1504
1505                 /*
1506                  * Synthesize a disk geometry. Some partitioning schemes
1507                  * depend on it and since some file systems need it even
1508                  * when the partitition scheme doesn't, we do it here in
1509                  * scheme-independent code.
1510                  */
1511                 pp = cp->provider;
1512                 g_part_geometry(table, cp, pp->mediasize / pp->sectorsize);
1513         }
1514
1515         error = G_PART_READ(table, cp);
1516         if (error)
1517                 goto fail;
1518         error = g_part_check_integrity(table, cp);
1519         if (error)
1520                 goto fail;
1521
1522         g_topology_lock();
1523         LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
1524                 if (!entry->gpe_internal)
1525                         g_part_new_provider(gp, table, entry);
1526         }
1527
1528         table->gpt_opened = 0;
1529         g_access(cp, -1, -1, -1);
1530         return (0);
1531
1532 fail:
1533         g_topology_lock();
1534         gctl_error(req, "%d", error);
1535         return (error);
1536 }
1537
1538 static void
1539 g_part_wither(struct g_geom *gp, int error)
1540 {
1541         struct g_part_entry *entry;
1542         struct g_part_table *table;
1543
1544         table = gp->softc;
1545         if (table != NULL) {
1546                 G_PART_DESTROY(table, NULL);
1547                 while ((entry = LIST_FIRST(&table->gpt_entry)) != NULL) {
1548                         LIST_REMOVE(entry, gpe_entry);
1549                         g_free(entry);
1550                 }
1551                 if (gp->softc != NULL) {
1552                         kobj_delete((kobj_t)gp->softc, M_GEOM);
1553                         gp->softc = NULL;
1554                 }
1555         }
1556         g_wither_geom(gp, error);
1557 }
1558
1559 /*
1560  * Class methods.
1561  */
1562
1563 static void
1564 g_part_ctlreq(struct gctl_req *req, struct g_class *mp, const char *verb)
1565 {
1566         struct g_part_parms gpp;
1567         struct g_part_table *table;
1568         struct gctl_req_arg *ap;
1569         enum g_part_ctl ctlreq;
1570         unsigned int i, mparms, oparms, parm;
1571         int auto_commit, close_on_error;
1572         int error, modifies;
1573
1574         G_PART_TRACE((G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, verb));
1575         g_topology_assert();
1576
1577         ctlreq = G_PART_CTL_NONE;
1578         modifies = 1;
1579         mparms = 0;
1580         oparms = G_PART_PARM_FLAGS | G_PART_PARM_OUTPUT | G_PART_PARM_VERSION;
1581         switch (*verb) {
1582         case 'a':
1583                 if (!strcmp(verb, "add")) {
1584                         ctlreq = G_PART_CTL_ADD;
1585                         mparms |= G_PART_PARM_GEOM | G_PART_PARM_SIZE |
1586                             G_PART_PARM_START | G_PART_PARM_TYPE;
1587                         oparms |= G_PART_PARM_INDEX | G_PART_PARM_LABEL;
1588                 }
1589                 break;
1590         case 'b':
1591                 if (!strcmp(verb, "bootcode")) {
1592                         ctlreq = G_PART_CTL_BOOTCODE;
1593                         mparms |= G_PART_PARM_GEOM | G_PART_PARM_BOOTCODE;
1594                 }
1595                 break;
1596         case 'c':
1597                 if (!strcmp(verb, "commit")) {
1598                         ctlreq = G_PART_CTL_COMMIT;
1599                         mparms |= G_PART_PARM_GEOM;
1600                         modifies = 0;
1601                 } else if (!strcmp(verb, "create")) {
1602                         ctlreq = G_PART_CTL_CREATE;
1603                         mparms |= G_PART_PARM_PROVIDER | G_PART_PARM_SCHEME;
1604                         oparms |= G_PART_PARM_ENTRIES;
1605                 }
1606                 break;
1607         case 'd':
1608                 if (!strcmp(verb, "delete")) {
1609                         ctlreq = G_PART_CTL_DELETE;
1610                         mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX;
1611                 } else if (!strcmp(verb, "destroy")) {
1612                         ctlreq = G_PART_CTL_DESTROY;
1613                         mparms |= G_PART_PARM_GEOM;
1614                         oparms |= G_PART_PARM_FORCE;
1615                 }
1616                 break;
1617         case 'm':
1618                 if (!strcmp(verb, "modify")) {
1619                         ctlreq = G_PART_CTL_MODIFY;
1620                         mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX;
1621                         oparms |= G_PART_PARM_LABEL | G_PART_PARM_TYPE;
1622                 } else if (!strcmp(verb, "move")) {
1623                         ctlreq = G_PART_CTL_MOVE;
1624                         mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX;
1625                 }
1626                 break;
1627         case 'r':
1628                 if (!strcmp(verb, "recover")) {
1629                         ctlreq = G_PART_CTL_RECOVER;
1630                         mparms |= G_PART_PARM_GEOM;
1631                 } else if (!strcmp(verb, "resize")) {
1632                         ctlreq = G_PART_CTL_RESIZE;
1633                         mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX |
1634                             G_PART_PARM_SIZE;
1635                 }
1636                 break;
1637         case 's':
1638                 if (!strcmp(verb, "set")) {
1639                         ctlreq = G_PART_CTL_SET;
1640                         mparms |= G_PART_PARM_ATTRIB | G_PART_PARM_GEOM;
1641                         oparms |= G_PART_PARM_INDEX;
1642                 }
1643                 break;
1644         case 'u':
1645                 if (!strcmp(verb, "undo")) {
1646                         ctlreq = G_PART_CTL_UNDO;
1647                         mparms |= G_PART_PARM_GEOM;
1648                         modifies = 0;
1649                 } else if (!strcmp(verb, "unset")) {
1650                         ctlreq = G_PART_CTL_UNSET;
1651                         mparms |= G_PART_PARM_ATTRIB | G_PART_PARM_GEOM;
1652                         oparms |= G_PART_PARM_INDEX;
1653                 }
1654                 break;
1655         }
1656         if (ctlreq == G_PART_CTL_NONE) {
1657                 gctl_error(req, "%d verb '%s'", EINVAL, verb);
1658                 return;
1659         }
1660
1661         bzero(&gpp, sizeof(gpp));
1662         for (i = 0; i < req->narg; i++) {
1663                 ap = &req->arg[i];
1664                 parm = 0;
1665                 switch (ap->name[0]) {
1666                 case 'a':
1667                         if (!strcmp(ap->name, "arg0")) {
1668                                 parm = mparms &
1669                                     (G_PART_PARM_GEOM | G_PART_PARM_PROVIDER);
1670                         }
1671                         if (!strcmp(ap->name, "attrib"))
1672                                 parm = G_PART_PARM_ATTRIB;
1673                         break;
1674                 case 'b':
1675                         if (!strcmp(ap->name, "bootcode"))
1676                                 parm = G_PART_PARM_BOOTCODE;
1677                         break;
1678                 case 'c':
1679                         if (!strcmp(ap->name, "class"))
1680                                 continue;
1681                         break;
1682                 case 'e':
1683                         if (!strcmp(ap->name, "entries"))
1684                                 parm = G_PART_PARM_ENTRIES;
1685                         break;
1686                 case 'f':
1687                         if (!strcmp(ap->name, "flags"))
1688                                 parm = G_PART_PARM_FLAGS;
1689                         else if (!strcmp(ap->name, "force"))
1690                                 parm = G_PART_PARM_FORCE;
1691                         break;
1692                 case 'i':
1693                         if (!strcmp(ap->name, "index"))
1694                                 parm = G_PART_PARM_INDEX;
1695                         break;
1696                 case 'l':
1697                         if (!strcmp(ap->name, "label"))
1698                                 parm = G_PART_PARM_LABEL;
1699                         break;
1700                 case 'o':
1701                         if (!strcmp(ap->name, "output"))
1702                                 parm = G_PART_PARM_OUTPUT;
1703                         break;
1704                 case 's':
1705                         if (!strcmp(ap->name, "scheme"))
1706                                 parm = G_PART_PARM_SCHEME;
1707                         else if (!strcmp(ap->name, "size"))
1708                                 parm = G_PART_PARM_SIZE;
1709                         else if (!strcmp(ap->name, "start"))
1710                                 parm = G_PART_PARM_START;
1711                         break;
1712                 case 't':
1713                         if (!strcmp(ap->name, "type"))
1714                                 parm = G_PART_PARM_TYPE;
1715                         break;
1716                 case 'v':
1717                         if (!strcmp(ap->name, "verb"))
1718                                 continue;
1719                         else if (!strcmp(ap->name, "version"))
1720                                 parm = G_PART_PARM_VERSION;
1721                         break;
1722                 }
1723                 if ((parm & (mparms | oparms)) == 0) {
1724                         gctl_error(req, "%d param '%s'", EINVAL, ap->name);
1725                         return;
1726                 }
1727                 switch (parm) {
1728                 case G_PART_PARM_ATTRIB:
1729                         error = g_part_parm_str(req, ap->name,
1730                             &gpp.gpp_attrib);
1731                         break;
1732                 case G_PART_PARM_BOOTCODE:
1733                         error = g_part_parm_bootcode(req, ap->name,
1734                             &gpp.gpp_codeptr, &gpp.gpp_codesize);
1735                         break;
1736                 case G_PART_PARM_ENTRIES:
1737                         error = g_part_parm_intmax(req, ap->name,
1738                             &gpp.gpp_entries);
1739                         break;
1740                 case G_PART_PARM_FLAGS:
1741                         error = g_part_parm_str(req, ap->name, &gpp.gpp_flags);
1742                         break;
1743                 case G_PART_PARM_FORCE:
1744                         error = g_part_parm_uint32(req, ap->name,
1745                             &gpp.gpp_force);
1746                         break;
1747                 case G_PART_PARM_GEOM:
1748                         error = g_part_parm_geom(req, ap->name, &gpp.gpp_geom);
1749                         break;
1750                 case G_PART_PARM_INDEX:
1751                         error = g_part_parm_intmax(req, ap->name,
1752                             &gpp.gpp_index);
1753                         break;
1754                 case G_PART_PARM_LABEL:
1755                         error = g_part_parm_str(req, ap->name, &gpp.gpp_label);
1756                         break;
1757                 case G_PART_PARM_OUTPUT:
1758                         error = 0;      /* Write-only parameter */
1759                         break;
1760                 case G_PART_PARM_PROVIDER:
1761                         error = g_part_parm_provider(req, ap->name,
1762                             &gpp.gpp_provider);
1763                         break;
1764                 case G_PART_PARM_SCHEME:
1765                         error = g_part_parm_scheme(req, ap->name,
1766                             &gpp.gpp_scheme);
1767                         break;
1768                 case G_PART_PARM_SIZE:
1769                         error = g_part_parm_quad(req, ap->name, &gpp.gpp_size);
1770                         break;
1771                 case G_PART_PARM_START:
1772                         error = g_part_parm_quad(req, ap->name,
1773                             &gpp.gpp_start);
1774                         break;
1775                 case G_PART_PARM_TYPE:
1776                         error = g_part_parm_str(req, ap->name, &gpp.gpp_type);
1777                         break;
1778                 case G_PART_PARM_VERSION:
1779                         error = g_part_parm_uint32(req, ap->name,
1780                             &gpp.gpp_version);
1781                         break;
1782                 default:
1783                         error = EDOOFUS;
1784                         gctl_error(req, "%d %s", error, ap->name);
1785                         break;
1786                 }
1787                 if (error != 0) {
1788                         if (error == ENOATTR) {
1789                                 gctl_error(req, "%d param '%s'", error,
1790                                     ap->name);
1791                         }
1792                         return;
1793                 }
1794                 gpp.gpp_parms |= parm;
1795         }
1796         if ((gpp.gpp_parms & mparms) != mparms) {
1797                 parm = mparms - (gpp.gpp_parms & mparms);
1798                 gctl_error(req, "%d param '%x'", ENOATTR, parm);
1799                 return;
1800         }
1801
1802         /* Obtain permissions if possible/necessary. */
1803         close_on_error = 0;
1804         table = NULL;
1805         if (modifies && (gpp.gpp_parms & G_PART_PARM_GEOM)) {
1806                 table = gpp.gpp_geom->softc;
1807                 if (table != NULL && table->gpt_corrupt &&
1808                     ctlreq != G_PART_CTL_DESTROY &&
1809                     ctlreq != G_PART_CTL_RECOVER) {
1810                         gctl_error(req, "%d table '%s' is corrupt",
1811                             EPERM, gpp.gpp_geom->name);
1812                         return;
1813                 }
1814                 if (table != NULL && !table->gpt_opened) {
1815                         error = g_access(LIST_FIRST(&gpp.gpp_geom->consumer),
1816                             1, 1, 1);
1817                         if (error) {
1818                                 gctl_error(req, "%d geom '%s'", error,
1819                                     gpp.gpp_geom->name);
1820                                 return;
1821                         }
1822                         table->gpt_opened = 1;
1823                         close_on_error = 1;
1824                 }
1825         }
1826
1827         /* Allow the scheme to check or modify the parameters. */
1828         if (table != NULL) {
1829                 error = G_PART_PRECHECK(table, ctlreq, &gpp);
1830                 if (error) {
1831                         gctl_error(req, "%d pre-check failed", error);
1832                         goto out;
1833                 }
1834         } else
1835                 error = EDOOFUS;        /* Prevent bogus uninit. warning. */
1836
1837         switch (ctlreq) {
1838         case G_PART_CTL_NONE:
1839                 panic("%s", __func__);
1840         case G_PART_CTL_ADD:
1841                 error = g_part_ctl_add(req, &gpp);
1842                 break;
1843         case G_PART_CTL_BOOTCODE:
1844                 error = g_part_ctl_bootcode(req, &gpp);
1845                 break;
1846         case G_PART_CTL_COMMIT:
1847                 error = g_part_ctl_commit(req, &gpp);
1848                 break;
1849         case G_PART_CTL_CREATE:
1850                 error = g_part_ctl_create(req, &gpp);
1851                 break;
1852         case G_PART_CTL_DELETE:
1853                 error = g_part_ctl_delete(req, &gpp);
1854                 break;
1855         case G_PART_CTL_DESTROY:
1856                 error = g_part_ctl_destroy(req, &gpp);
1857                 break;
1858         case G_PART_CTL_MODIFY:
1859                 error = g_part_ctl_modify(req, &gpp);
1860                 break;
1861         case G_PART_CTL_MOVE:
1862                 error = g_part_ctl_move(req, &gpp);
1863                 break;
1864         case G_PART_CTL_RECOVER:
1865                 error = g_part_ctl_recover(req, &gpp);
1866                 break;
1867         case G_PART_CTL_RESIZE:
1868                 error = g_part_ctl_resize(req, &gpp);
1869                 break;
1870         case G_PART_CTL_SET:
1871                 error = g_part_ctl_setunset(req, &gpp, 1);
1872                 break;
1873         case G_PART_CTL_UNDO:
1874                 error = g_part_ctl_undo(req, &gpp);
1875                 break;
1876         case G_PART_CTL_UNSET:
1877                 error = g_part_ctl_setunset(req, &gpp, 0);
1878                 break;
1879         }
1880
1881         /* Implement automatic commit. */
1882         if (!error) {
1883                 auto_commit = (modifies &&
1884                     (gpp.gpp_parms & G_PART_PARM_FLAGS) &&
1885                     strchr(gpp.gpp_flags, 'C') != NULL) ? 1 : 0;
1886                 if (auto_commit) {
1887                         KASSERT(gpp.gpp_parms & G_PART_PARM_GEOM, ("%s",
1888                             __func__));
1889                         error = g_part_ctl_commit(req, &gpp);
1890                 }
1891         }
1892
1893  out:
1894         if (error && close_on_error) {
1895                 g_access(LIST_FIRST(&gpp.gpp_geom->consumer), -1, -1, -1);
1896                 table->gpt_opened = 0;
1897         }
1898 }
1899
1900 static int
1901 g_part_destroy_geom(struct gctl_req *req, struct g_class *mp,
1902     struct g_geom *gp)
1903 {
1904
1905         G_PART_TRACE((G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, gp->name));
1906         g_topology_assert();
1907
1908         g_part_wither(gp, EINVAL);
1909         return (0);
1910 }
1911
1912 static struct g_geom *
1913 g_part_taste(struct g_class *mp, struct g_provider *pp, int flags __unused)
1914 {
1915         struct g_consumer *cp;
1916         struct g_geom *gp;
1917         struct g_part_entry *entry;
1918         struct g_part_table *table;
1919         struct root_hold_token *rht;
1920         struct g_geom_alias *gap;
1921         int attr, depth;
1922         int error;
1923
1924         G_PART_TRACE((G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, pp->name));
1925         g_topology_assert();
1926
1927         /* Skip providers that are already open for writing. */
1928         if (pp->acw > 0)
1929                 return (NULL);
1930
1931         /*
1932          * Create a GEOM with consumer and hook it up to the provider.
1933          * With that we become part of the topology. Obtain read access
1934          * to the provider.
1935          */
1936         gp = g_new_geomf(mp, "%s", pp->name);
1937         LIST_FOREACH(gap, &pp->geom->aliases, ga_next)
1938                 g_geom_add_alias(gp, gap->ga_alias);
1939         cp = g_new_consumer(gp);
1940         cp->flags |= G_CF_DIRECT_SEND | G_CF_DIRECT_RECEIVE;
1941         error = g_attach(cp, pp);
1942         if (error == 0)
1943                 error = g_access(cp, 1, 0, 0);
1944         if (error != 0) {
1945                 if (cp->provider)
1946                         g_detach(cp);
1947                 g_destroy_consumer(cp);
1948                 g_destroy_geom(gp);
1949                 return (NULL);
1950         }
1951
1952         rht = root_mount_hold(mp->name);
1953         g_topology_unlock();
1954
1955         /*
1956          * Short-circuit the whole probing galore when there's no
1957          * media present.
1958          */
1959         if (pp->mediasize == 0 || pp->sectorsize == 0) {
1960                 error = ENODEV;
1961                 goto fail;
1962         }
1963
1964         /* Make sure we can nest and if so, determine our depth. */
1965         error = g_getattr("PART::isleaf", cp, &attr);
1966         if (!error && attr) {
1967                 error = ENODEV;
1968                 goto fail;
1969         }
1970         error = g_getattr("PART::depth", cp, &attr);
1971         depth = (!error) ? attr + 1 : 0;
1972
1973         error = g_part_probe(gp, cp, depth);
1974         if (error)
1975                 goto fail;
1976
1977         table = gp->softc;
1978
1979         /*
1980          * Synthesize a disk geometry. Some partitioning schemes
1981          * depend on it and since some file systems need it even
1982          * when the partitition scheme doesn't, we do it here in
1983          * scheme-independent code.
1984          */
1985         g_part_geometry(table, cp, pp->mediasize / pp->sectorsize);
1986
1987         error = G_PART_READ(table, cp);
1988         if (error)
1989                 goto fail;
1990         error = g_part_check_integrity(table, cp);
1991         if (error)
1992                 goto fail;
1993
1994         g_topology_lock();
1995         LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
1996                 if (!entry->gpe_internal)
1997                         g_part_new_provider(gp, table, entry);
1998         }
1999
2000         root_mount_rel(rht);
2001         g_access(cp, -1, 0, 0);
2002         return (gp);
2003
2004  fail:
2005         g_topology_lock();
2006         root_mount_rel(rht);
2007         g_access(cp, -1, 0, 0);
2008         g_detach(cp);
2009         g_destroy_consumer(cp);
2010         g_destroy_geom(gp);
2011         return (NULL);
2012 }
2013
2014 /*
2015  * Geom methods.
2016  */
2017
2018 static int
2019 g_part_access(struct g_provider *pp, int dr, int dw, int de)
2020 {
2021         struct g_consumer *cp;
2022
2023         G_PART_TRACE((G_T_ACCESS, "%s(%s,%d,%d,%d)", __func__, pp->name, dr,
2024             dw, de));
2025
2026         cp = LIST_FIRST(&pp->geom->consumer);
2027
2028         /* We always gain write-exclusive access. */
2029         return (g_access(cp, dr, dw, dw + de));
2030 }
2031
2032 static void
2033 g_part_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp,
2034     struct g_consumer *cp, struct g_provider *pp)
2035 {
2036         char buf[64];
2037         struct g_part_entry *entry;
2038         struct g_part_table *table;
2039
2040         KASSERT(sb != NULL && gp != NULL, ("%s", __func__));
2041         table = gp->softc;
2042
2043         if (indent == NULL) {
2044                 KASSERT(cp == NULL && pp != NULL, ("%s", __func__));
2045                 entry = pp->private;
2046                 if (entry == NULL)
2047                         return;
2048                 sbuf_printf(sb, " i %u o %ju ty %s", entry->gpe_index,
2049                     (uintmax_t)entry->gpe_offset,
2050                     G_PART_TYPE(table, entry, buf, sizeof(buf)));
2051                 /*
2052                  * libdisk compatibility quirk - the scheme dumps the
2053                  * slicer name and partition type in a way that is
2054                  * compatible with libdisk. When libdisk is not used
2055                  * anymore, this should go away.
2056                  */
2057                 G_PART_DUMPCONF(table, entry, sb, indent);
2058         } else if (cp != NULL) {        /* Consumer configuration. */
2059                 KASSERT(pp == NULL, ("%s", __func__));
2060                 /* none */
2061         } else if (pp != NULL) {        /* Provider configuration. */
2062                 entry = pp->private;
2063                 if (entry == NULL)
2064                         return;
2065                 sbuf_printf(sb, "%s<start>%ju</start>\n", indent,
2066                     (uintmax_t)entry->gpe_start);
2067                 sbuf_printf(sb, "%s<end>%ju</end>\n", indent,
2068                     (uintmax_t)entry->gpe_end);
2069                 sbuf_printf(sb, "%s<index>%u</index>\n", indent,
2070                     entry->gpe_index);
2071                 sbuf_printf(sb, "%s<type>%s</type>\n", indent,
2072                     G_PART_TYPE(table, entry, buf, sizeof(buf)));
2073                 sbuf_printf(sb, "%s<offset>%ju</offset>\n", indent,
2074                     (uintmax_t)entry->gpe_offset);
2075                 sbuf_printf(sb, "%s<length>%ju</length>\n", indent,
2076                     (uintmax_t)pp->mediasize);
2077                 G_PART_DUMPCONF(table, entry, sb, indent);
2078         } else {                        /* Geom configuration. */
2079                 sbuf_printf(sb, "%s<scheme>%s</scheme>\n", indent,
2080                     table->gpt_scheme->name);
2081                 sbuf_printf(sb, "%s<entries>%u</entries>\n", indent,
2082                     table->gpt_entries);
2083                 sbuf_printf(sb, "%s<first>%ju</first>\n", indent,
2084                     (uintmax_t)table->gpt_first);
2085                 sbuf_printf(sb, "%s<last>%ju</last>\n", indent,
2086                     (uintmax_t)table->gpt_last);
2087                 sbuf_printf(sb, "%s<fwsectors>%u</fwsectors>\n", indent,
2088                     table->gpt_sectors);
2089                 sbuf_printf(sb, "%s<fwheads>%u</fwheads>\n", indent,
2090                     table->gpt_heads);
2091                 sbuf_printf(sb, "%s<state>%s</state>\n", indent,
2092                     table->gpt_corrupt ? "CORRUPT": "OK");
2093                 sbuf_printf(sb, "%s<modified>%s</modified>\n", indent,
2094                     table->gpt_opened ? "true": "false");
2095                 G_PART_DUMPCONF(table, NULL, sb, indent);
2096         }
2097 }
2098
2099 /*-
2100  * This start routine is only called for non-trivial requests, all the
2101  * trivial ones are handled autonomously by the slice code.
2102  * For requests we handle here, we must call the g_io_deliver() on the
2103  * bio, and return non-zero to indicate to the slice code that we did so.
2104  * This code executes in the "DOWN" I/O path, this means:
2105  *    * No sleeping.
2106  *    * Don't grab the topology lock.
2107  *    * Don't call biowait, g_getattr(), g_setattr() or g_read_data()
2108  */
2109 static int
2110 g_part_ioctl(struct g_provider *pp, u_long cmd, void *data, int fflag, struct thread *td)
2111 {
2112         struct g_part_table *table;
2113
2114         table = pp->geom->softc;
2115         return G_PART_IOCTL(table, pp, cmd, data, fflag, td);
2116 }
2117
2118 static void
2119 g_part_resize(struct g_consumer *cp)
2120 {
2121         struct g_part_table *table;
2122
2123         G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, cp->provider->name));
2124         g_topology_assert();
2125
2126         if (auto_resize == 0)
2127                 return;
2128
2129         table = cp->geom->softc;
2130         if (table->gpt_opened == 0) {
2131                 if (g_access(cp, 1, 1, 1) != 0)
2132                         return;
2133                 table->gpt_opened = 1;
2134         }
2135         if (G_PART_RESIZE(table, NULL, NULL) == 0)
2136                 printf("GEOM_PART: %s was automatically resized.\n"
2137                     "  Use `gpart commit %s` to save changes or "
2138                     "`gpart undo %s` to revert them.\n", cp->geom->name,
2139                     cp->geom->name, cp->geom->name);
2140         if (g_part_check_integrity(table, cp) != 0) {
2141                 g_access(cp, -1, -1, -1);
2142                 table->gpt_opened = 0;
2143                 g_part_wither(table->gpt_gp, ENXIO);
2144         }
2145 }
2146
2147 static void
2148 g_part_orphan(struct g_consumer *cp)
2149 {
2150         struct g_provider *pp;
2151         struct g_part_table *table;
2152
2153         pp = cp->provider;
2154         KASSERT(pp != NULL, ("%s", __func__));
2155         G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, pp->name));
2156         g_topology_assert();
2157
2158         KASSERT(pp->error != 0, ("%s", __func__));
2159         table = cp->geom->softc;
2160         if (table != NULL && table->gpt_opened)
2161                 g_access(cp, -1, -1, -1);
2162         g_part_wither(cp->geom, pp->error);
2163 }
2164
2165 static void
2166 g_part_spoiled(struct g_consumer *cp)
2167 {
2168
2169         G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, cp->provider->name));
2170         g_topology_assert();
2171
2172         cp->flags |= G_CF_ORPHAN;
2173         g_part_wither(cp->geom, ENXIO);
2174 }
2175
2176 static void
2177 g_part_start(struct bio *bp)
2178 {
2179         struct bio *bp2;
2180         struct g_consumer *cp;
2181         struct g_geom *gp;
2182         struct g_part_entry *entry;
2183         struct g_part_table *table;
2184         struct g_kerneldump *gkd;
2185         struct g_provider *pp;
2186         char buf[64];
2187
2188         biotrack(bp, __func__);
2189
2190         pp = bp->bio_to;
2191         gp = pp->geom;
2192         table = gp->softc;
2193         cp = LIST_FIRST(&gp->consumer);
2194
2195         G_PART_TRACE((G_T_BIO, "%s: cmd=%d, provider=%s", __func__, bp->bio_cmd,
2196             pp->name));
2197
2198         entry = pp->private;
2199         if (entry == NULL) {
2200                 g_io_deliver(bp, ENXIO);
2201                 return;
2202         }
2203
2204         switch(bp->bio_cmd) {
2205         case BIO_DELETE:
2206         case BIO_READ:
2207         case BIO_WRITE:
2208                 if (bp->bio_offset >= pp->mediasize) {
2209                         g_io_deliver(bp, EIO);
2210                         return;
2211                 }
2212                 bp2 = g_clone_bio(bp);
2213                 if (bp2 == NULL) {
2214                         g_io_deliver(bp, ENOMEM);
2215                         return;
2216                 }
2217                 if (bp2->bio_offset + bp2->bio_length > pp->mediasize)
2218                         bp2->bio_length = pp->mediasize - bp2->bio_offset;
2219                 bp2->bio_done = g_std_done;
2220                 bp2->bio_offset += entry->gpe_offset;
2221                 g_io_request(bp2, cp);
2222                 return;
2223         case BIO_FLUSH:
2224                 break;
2225         case BIO_GETATTR:
2226                 if (g_handleattr_int(bp, "GEOM::fwheads", table->gpt_heads))
2227                         return;
2228                 if (g_handleattr_int(bp, "GEOM::fwsectors", table->gpt_sectors))
2229                         return;
2230                 if (g_handleattr_int(bp, "PART::isleaf", table->gpt_isleaf))
2231                         return;
2232                 if (g_handleattr_int(bp, "PART::depth", table->gpt_depth))
2233                         return;
2234                 if (g_handleattr_str(bp, "PART::scheme",
2235                     table->gpt_scheme->name))
2236                         return;
2237                 if (g_handleattr_str(bp, "PART::type",
2238                     G_PART_TYPE(table, entry, buf, sizeof(buf))))
2239                         return;
2240                 if (!strcmp("GEOM::kerneldump", bp->bio_attribute)) {
2241                         /*
2242                          * Check that the partition is suitable for kernel
2243                          * dumps. Typically only swap partitions should be
2244                          * used. If the request comes from the nested scheme
2245                          * we allow dumping there as well.
2246                          */
2247                         if ((bp->bio_from == NULL ||
2248                             bp->bio_from->geom->class != &g_part_class) &&
2249                             G_PART_DUMPTO(table, entry) == 0) {
2250                                 g_io_deliver(bp, ENODEV);
2251                                 printf("GEOM_PART: Partition '%s' not suitable"
2252                                     " for kernel dumps (wrong type?)\n",
2253                                     pp->name);
2254                                 return;
2255                         }
2256                         gkd = (struct g_kerneldump *)bp->bio_data;
2257                         if (gkd->offset >= pp->mediasize) {
2258                                 g_io_deliver(bp, EIO);
2259                                 return;
2260                         }
2261                         if (gkd->offset + gkd->length > pp->mediasize)
2262                                 gkd->length = pp->mediasize - gkd->offset;
2263                         gkd->offset += entry->gpe_offset;
2264                 }
2265                 break;
2266         default:
2267                 g_io_deliver(bp, EOPNOTSUPP);
2268                 return;
2269         }
2270
2271         bp2 = g_clone_bio(bp);
2272         if (bp2 == NULL) {
2273                 g_io_deliver(bp, ENOMEM);
2274                 return;
2275         }
2276         bp2->bio_done = g_std_done;
2277         g_io_request(bp2, cp);
2278 }
2279
2280 static void
2281 g_part_init(struct g_class *mp)
2282 {
2283
2284         TAILQ_INSERT_HEAD(&g_part_schemes, &g_part_null_scheme, scheme_list);
2285 }
2286
2287 static void
2288 g_part_fini(struct g_class *mp)
2289 {
2290
2291         TAILQ_REMOVE(&g_part_schemes, &g_part_null_scheme, scheme_list);
2292 }
2293
2294 static void
2295 g_part_unload_event(void *arg, int flag)
2296 {
2297         struct g_consumer *cp;
2298         struct g_geom *gp;
2299         struct g_provider *pp;
2300         struct g_part_scheme *scheme;
2301         struct g_part_table *table;
2302         uintptr_t *xchg;
2303         int acc, error;
2304
2305         if (flag == EV_CANCEL)
2306                 return;
2307
2308         xchg = arg;
2309         error = 0;
2310         scheme = (void *)(*xchg);
2311
2312         g_topology_assert();
2313
2314         LIST_FOREACH(gp, &g_part_class.geom, geom) {
2315                 table = gp->softc;
2316                 if (table->gpt_scheme != scheme)
2317                         continue;
2318
2319                 acc = 0;
2320                 LIST_FOREACH(pp, &gp->provider, provider)
2321                         acc += pp->acr + pp->acw + pp->ace;
2322                 LIST_FOREACH(cp, &gp->consumer, consumer)
2323                         acc += cp->acr + cp->acw + cp->ace;
2324
2325                 if (!acc)
2326                         g_part_wither(gp, ENOSYS);
2327                 else
2328                         error = EBUSY;
2329         }
2330
2331         if (!error)
2332                 TAILQ_REMOVE(&g_part_schemes, scheme, scheme_list);
2333
2334         *xchg = error;
2335 }
2336
2337 int
2338 g_part_modevent(module_t mod, int type, struct g_part_scheme *scheme)
2339 {
2340         struct g_part_scheme *iter;
2341         uintptr_t arg;
2342         int error;
2343
2344         error = 0;
2345         switch (type) {
2346         case MOD_LOAD:
2347                 TAILQ_FOREACH(iter, &g_part_schemes, scheme_list) {
2348                         if (scheme == iter) {
2349                                 printf("GEOM_PART: scheme %s is already "
2350                                     "registered!\n", scheme->name);
2351                                 break;
2352                         }
2353                 }
2354                 if (iter == NULL) {
2355                         TAILQ_INSERT_TAIL(&g_part_schemes, scheme,
2356                             scheme_list);
2357                         g_retaste(&g_part_class);
2358                 }
2359                 break;
2360         case MOD_UNLOAD:
2361                 arg = (uintptr_t)scheme;
2362                 error = g_waitfor_event(g_part_unload_event, &arg, M_WAITOK,
2363                     NULL);
2364                 if (error == 0)
2365                         error = arg;
2366                 break;
2367         default:
2368                 error = EOPNOTSUPP;
2369                 break;
2370         }
2371
2372         return (error);
2373 }