]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/geom/part/g_part.c
After r315112 I broke the tests with eli, instead to pass 0, I should pass
[FreeBSD/FreeBSD.git] / sys / geom / part / g_part.c
1 /*-
2  * Copyright (c) 2002, 2005-2009 Marcel Moolenaar
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  */
26
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29
30 #include <sys/param.h>
31 #include <sys/bio.h>
32 #include <sys/endian.h>
33 #include <sys/kernel.h>
34 #include <sys/kobj.h>
35 #include <sys/limits.h>
36 #include <sys/lock.h>
37 #include <sys/malloc.h>
38 #include <sys/mutex.h>
39 #include <sys/queue.h>
40 #include <sys/sbuf.h>
41 #include <sys/sysctl.h>
42 #include <sys/systm.h>
43 #include <sys/uuid.h>
44 #include <geom/geom.h>
45 #include <geom/geom_ctl.h>
46 #include <geom/geom_int.h>
47 #include <geom/part/g_part.h>
48
49 #include "g_part_if.h"
50
51 #ifndef _PATH_DEV
52 #define _PATH_DEV "/dev/"
53 #endif
54
55 static kobj_method_t g_part_null_methods[] = {
56         { 0, 0 }
57 };
58
59 static struct g_part_scheme g_part_null_scheme = {
60         "(none)",
61         g_part_null_methods,
62         sizeof(struct g_part_table),
63 };
64
65 TAILQ_HEAD(, g_part_scheme) g_part_schemes =
66     TAILQ_HEAD_INITIALIZER(g_part_schemes);
67
68 struct g_part_alias_list {
69         const char *lexeme;
70         enum g_part_alias alias;
71 } g_part_alias_list[G_PART_ALIAS_COUNT] = {
72         { "apple-boot", G_PART_ALIAS_APPLE_BOOT },
73         { "apple-core-storage", G_PART_ALIAS_APPLE_CORE_STORAGE },
74         { "apple-hfs", G_PART_ALIAS_APPLE_HFS },
75         { "apple-label", G_PART_ALIAS_APPLE_LABEL },
76         { "apple-raid", G_PART_ALIAS_APPLE_RAID },
77         { "apple-raid-offline", G_PART_ALIAS_APPLE_RAID_OFFLINE },
78         { "apple-tv-recovery", G_PART_ALIAS_APPLE_TV_RECOVERY },
79         { "apple-ufs", G_PART_ALIAS_APPLE_UFS },
80         { "bios-boot", G_PART_ALIAS_BIOS_BOOT },
81         { "chromeos-firmware", G_PART_ALIAS_CHROMEOS_FIRMWARE },
82         { "chromeos-kernel", G_PART_ALIAS_CHROMEOS_KERNEL },
83         { "chromeos-reserved", G_PART_ALIAS_CHROMEOS_RESERVED },
84         { "chromeos-root", G_PART_ALIAS_CHROMEOS_ROOT },
85         { "dragonfly-ccd", G_PART_ALIAS_DFBSD_CCD },
86         { "dragonfly-hammer", G_PART_ALIAS_DFBSD_HAMMER },
87         { "dragonfly-hammer2", G_PART_ALIAS_DFBSD_HAMMER2 },
88         { "dragonfly-label32", G_PART_ALIAS_DFBSD },
89         { "dragonfly-label64", G_PART_ALIAS_DFBSD64 },
90         { "dragonfly-legacy", G_PART_ALIAS_DFBSD_LEGACY },
91         { "dragonfly-swap", G_PART_ALIAS_DFBSD_SWAP },
92         { "dragonfly-ufs", G_PART_ALIAS_DFBSD_UFS },
93         { "dragonfly-vinum", G_PART_ALIAS_DFBSD_VINUM },
94         { "ebr", G_PART_ALIAS_EBR },
95         { "efi", G_PART_ALIAS_EFI },
96         { "fat16", G_PART_ALIAS_MS_FAT16 },
97         { "fat32", G_PART_ALIAS_MS_FAT32 },
98         { "freebsd", G_PART_ALIAS_FREEBSD },
99         { "freebsd-boot", G_PART_ALIAS_FREEBSD_BOOT },
100         { "freebsd-nandfs", G_PART_ALIAS_FREEBSD_NANDFS },
101         { "freebsd-swap", G_PART_ALIAS_FREEBSD_SWAP },
102         { "freebsd-ufs", G_PART_ALIAS_FREEBSD_UFS },
103         { "freebsd-vinum", G_PART_ALIAS_FREEBSD_VINUM },
104         { "freebsd-zfs", G_PART_ALIAS_FREEBSD_ZFS },
105         { "linux-data", G_PART_ALIAS_LINUX_DATA },
106         { "linux-lvm", G_PART_ALIAS_LINUX_LVM },
107         { "linux-raid", G_PART_ALIAS_LINUX_RAID },
108         { "linux-swap", G_PART_ALIAS_LINUX_SWAP },
109         { "mbr", G_PART_ALIAS_MBR },
110         { "ms-basic-data", G_PART_ALIAS_MS_BASIC_DATA },
111         { "ms-ldm-data", G_PART_ALIAS_MS_LDM_DATA },
112         { "ms-ldm-metadata", G_PART_ALIAS_MS_LDM_METADATA },
113         { "ms-recovery", G_PART_ALIAS_MS_RECOVERY },
114         { "ms-reserved", G_PART_ALIAS_MS_RESERVED },
115         { "ms-spaces", G_PART_ALIAS_MS_SPACES },
116         { "netbsd-ccd", G_PART_ALIAS_NETBSD_CCD },
117         { "netbsd-cgd", G_PART_ALIAS_NETBSD_CGD },
118         { "netbsd-ffs", G_PART_ALIAS_NETBSD_FFS },
119         { "netbsd-lfs", G_PART_ALIAS_NETBSD_LFS },
120         { "netbsd-raid", G_PART_ALIAS_NETBSD_RAID },
121         { "netbsd-swap", G_PART_ALIAS_NETBSD_SWAP },
122         { "ntfs", G_PART_ALIAS_MS_NTFS },
123         { "openbsd-data", G_PART_ALIAS_OPENBSD_DATA },
124         { "prep-boot", G_PART_ALIAS_PREP_BOOT },
125         { "vmware-reserved", G_PART_ALIAS_VMRESERVED },
126         { "vmware-vmfs", G_PART_ALIAS_VMFS },
127         { "vmware-vmkdiag", G_PART_ALIAS_VMKDIAG },
128         { "vmware-vsanhdr", G_PART_ALIAS_VMVSANHDR },
129 };
130
131 SYSCTL_DECL(_kern_geom);
132 SYSCTL_NODE(_kern_geom, OID_AUTO, part, CTLFLAG_RW, 0,
133     "GEOM_PART stuff");
134 static u_int check_integrity = 1;
135 SYSCTL_UINT(_kern_geom_part, OID_AUTO, check_integrity,
136     CTLFLAG_RWTUN, &check_integrity, 1,
137     "Enable integrity checking");
138 static u_int auto_resize = 1;
139 SYSCTL_UINT(_kern_geom_part, OID_AUTO, auto_resize,
140     CTLFLAG_RWTUN, &auto_resize, 1,
141     "Enable auto resize");
142
143 /*
144  * The GEOM partitioning class.
145  */
146 static g_ctl_req_t g_part_ctlreq;
147 static g_ctl_destroy_geom_t g_part_destroy_geom;
148 static g_fini_t g_part_fini;
149 static g_init_t g_part_init;
150 static g_taste_t g_part_taste;
151
152 static g_access_t g_part_access;
153 static g_dumpconf_t g_part_dumpconf;
154 static g_orphan_t g_part_orphan;
155 static g_spoiled_t g_part_spoiled;
156 static g_start_t g_part_start;
157 static g_resize_t g_part_resize;
158 static g_ioctl_t g_part_ioctl;
159
160 static struct g_class g_part_class = {
161         .name = "PART",
162         .version = G_VERSION,
163         /* Class methods. */
164         .ctlreq = g_part_ctlreq,
165         .destroy_geom = g_part_destroy_geom,
166         .fini = g_part_fini,
167         .init = g_part_init,
168         .taste = g_part_taste,
169         /* Geom methods. */
170         .access = g_part_access,
171         .dumpconf = g_part_dumpconf,
172         .orphan = g_part_orphan,
173         .spoiled = g_part_spoiled,
174         .start = g_part_start,
175         .resize = g_part_resize,
176         .ioctl = g_part_ioctl,
177 };
178
179 DECLARE_GEOM_CLASS(g_part_class, g_part);
180 MODULE_VERSION(g_part, 0);
181
182 /*
183  * Support functions.
184  */
185
186 static void g_part_wither(struct g_geom *, int);
187
188 const char *
189 g_part_alias_name(enum g_part_alias alias)
190 {
191         int i;
192
193         for (i = 0; i < G_PART_ALIAS_COUNT; i++) {
194                 if (g_part_alias_list[i].alias != alias)
195                         continue;
196                 return (g_part_alias_list[i].lexeme);
197         }
198
199         return (NULL);
200 }
201
202 void
203 g_part_geometry_heads(off_t blocks, u_int sectors, off_t *bestchs,
204     u_int *bestheads)
205 {
206         static u_int candidate_heads[] = { 1, 2, 16, 32, 64, 128, 255, 0 };
207         off_t chs, cylinders;
208         u_int heads;
209         int idx;
210
211         *bestchs = 0;
212         *bestheads = 0;
213         for (idx = 0; candidate_heads[idx] != 0; idx++) {
214                 heads = candidate_heads[idx];
215                 cylinders = blocks / heads / sectors;
216                 if (cylinders < heads || cylinders < sectors)
217                         break;
218                 if (cylinders > 1023)
219                         continue;
220                 chs = cylinders * heads * sectors;
221                 if (chs > *bestchs || (chs == *bestchs && *bestheads == 1)) {
222                         *bestchs = chs;
223                         *bestheads = heads;
224                 }
225         }
226 }
227
228 static void
229 g_part_geometry(struct g_part_table *table, struct g_consumer *cp,
230     off_t blocks)
231 {
232         static u_int candidate_sectors[] = { 1, 9, 17, 33, 63, 0 };
233         off_t chs, bestchs;
234         u_int heads, sectors;
235         int idx;
236
237         if (g_getattr("GEOM::fwsectors", cp, &sectors) != 0 || sectors == 0 ||
238             g_getattr("GEOM::fwheads", cp, &heads) != 0 || heads == 0) {
239                 table->gpt_fixgeom = 0;
240                 table->gpt_heads = 0;
241                 table->gpt_sectors = 0;
242                 bestchs = 0;
243                 for (idx = 0; candidate_sectors[idx] != 0; idx++) {
244                         sectors = candidate_sectors[idx];
245                         g_part_geometry_heads(blocks, sectors, &chs, &heads);
246                         if (chs == 0)
247                                 continue;
248                         /*
249                          * Prefer a geometry with sectors > 1, but only if
250                          * it doesn't bump down the number of heads to 1.
251                          */
252                         if (chs > bestchs || (chs == bestchs && heads > 1 &&
253                             table->gpt_sectors == 1)) {
254                                 bestchs = chs;
255                                 table->gpt_heads = heads;
256                                 table->gpt_sectors = sectors;
257                         }
258                 }
259                 /*
260                  * If we didn't find a geometry at all, then the disk is
261                  * too big. This means we can use the maximum number of
262                  * heads and sectors.
263                  */
264                 if (bestchs == 0) {
265                         table->gpt_heads = 255;
266                         table->gpt_sectors = 63;
267                 }
268         } else {
269                 table->gpt_fixgeom = 1;
270                 table->gpt_heads = heads;
271                 table->gpt_sectors = sectors;
272         }
273 }
274
275 #define DPRINTF(...)    if (bootverbose) {      \
276         printf("GEOM_PART: " __VA_ARGS__);      \
277 }
278
279 static int
280 g_part_check_integrity(struct g_part_table *table, struct g_consumer *cp)
281 {
282         struct g_part_entry *e1, *e2;
283         struct g_provider *pp;
284         off_t offset;
285         int failed;
286
287         failed = 0;
288         pp = cp->provider;
289         if (table->gpt_last < table->gpt_first) {
290                 DPRINTF("last LBA is below first LBA: %jd < %jd\n",
291                     (intmax_t)table->gpt_last, (intmax_t)table->gpt_first);
292                 failed++;
293         }
294         if (table->gpt_last > pp->mediasize / pp->sectorsize - 1) {
295                 DPRINTF("last LBA extends beyond mediasize: "
296                     "%jd > %jd\n", (intmax_t)table->gpt_last,
297                     (intmax_t)pp->mediasize / pp->sectorsize - 1);
298                 failed++;
299         }
300         LIST_FOREACH(e1, &table->gpt_entry, gpe_entry) {
301                 if (e1->gpe_deleted || e1->gpe_internal)
302                         continue;
303                 if (e1->gpe_start < table->gpt_first) {
304                         DPRINTF("partition %d has start offset below first "
305                             "LBA: %jd < %jd\n", e1->gpe_index,
306                             (intmax_t)e1->gpe_start,
307                             (intmax_t)table->gpt_first);
308                         failed++;
309                 }
310                 if (e1->gpe_start > table->gpt_last) {
311                         DPRINTF("partition %d has start offset beyond last "
312                             "LBA: %jd > %jd\n", e1->gpe_index,
313                             (intmax_t)e1->gpe_start,
314                             (intmax_t)table->gpt_last);
315                         failed++;
316                 }
317                 if (e1->gpe_end < e1->gpe_start) {
318                         DPRINTF("partition %d has end offset below start "
319                             "offset: %jd < %jd\n", e1->gpe_index,
320                             (intmax_t)e1->gpe_end,
321                             (intmax_t)e1->gpe_start);
322                         failed++;
323                 }
324                 if (e1->gpe_end > table->gpt_last) {
325                         DPRINTF("partition %d has end offset beyond last "
326                             "LBA: %jd > %jd\n", e1->gpe_index,
327                             (intmax_t)e1->gpe_end,
328                             (intmax_t)table->gpt_last);
329                         failed++;
330                 }
331                 if (pp->stripesize > 0) {
332                         offset = e1->gpe_start * pp->sectorsize;
333                         if (e1->gpe_offset > offset)
334                                 offset = e1->gpe_offset;
335                         if ((offset + pp->stripeoffset) % pp->stripesize) {
336                                 DPRINTF("partition %d on (%s, %s) is not "
337                                     "aligned on %u bytes\n", e1->gpe_index,
338                                     pp->name, table->gpt_scheme->name,
339                                     pp->stripesize);
340                                 /* Don't treat this as a critical failure */
341                         }
342                 }
343                 e2 = e1;
344                 while ((e2 = LIST_NEXT(e2, gpe_entry)) != NULL) {
345                         if (e2->gpe_deleted || e2->gpe_internal)
346                                 continue;
347                         if (e1->gpe_start >= e2->gpe_start &&
348                             e1->gpe_start <= e2->gpe_end) {
349                                 DPRINTF("partition %d has start offset inside "
350                                     "partition %d: start[%d] %jd >= start[%d] "
351                                     "%jd <= end[%d] %jd\n",
352                                     e1->gpe_index, e2->gpe_index,
353                                     e2->gpe_index, (intmax_t)e2->gpe_start,
354                                     e1->gpe_index, (intmax_t)e1->gpe_start,
355                                     e2->gpe_index, (intmax_t)e2->gpe_end);
356                                 failed++;
357                         }
358                         if (e1->gpe_end >= e2->gpe_start &&
359                             e1->gpe_end <= e2->gpe_end) {
360                                 DPRINTF("partition %d has end offset inside "
361                                     "partition %d: start[%d] %jd >= end[%d] "
362                                     "%jd <= end[%d] %jd\n",
363                                     e1->gpe_index, e2->gpe_index,
364                                     e2->gpe_index, (intmax_t)e2->gpe_start,
365                                     e1->gpe_index, (intmax_t)e1->gpe_end,
366                                     e2->gpe_index, (intmax_t)e2->gpe_end);
367                                 failed++;
368                         }
369                         if (e1->gpe_start < e2->gpe_start &&
370                             e1->gpe_end > e2->gpe_end) {
371                                 DPRINTF("partition %d contains partition %d: "
372                                     "start[%d] %jd > start[%d] %jd, end[%d] "
373                                     "%jd < end[%d] %jd\n",
374                                     e1->gpe_index, e2->gpe_index,
375                                     e1->gpe_index, (intmax_t)e1->gpe_start,
376                                     e2->gpe_index, (intmax_t)e2->gpe_start,
377                                     e2->gpe_index, (intmax_t)e2->gpe_end,
378                                     e1->gpe_index, (intmax_t)e1->gpe_end);
379                                 failed++;
380                         }
381                 }
382         }
383         if (failed != 0) {
384                 printf("GEOM_PART: integrity check failed (%s, %s)\n",
385                     pp->name, table->gpt_scheme->name);
386                 if (check_integrity != 0)
387                         return (EINVAL);
388                 table->gpt_corrupt = 1;
389         }
390         return (0);
391 }
392 #undef  DPRINTF
393
394 struct g_part_entry *
395 g_part_new_entry(struct g_part_table *table, int index, quad_t start,
396     quad_t end)
397 {
398         struct g_part_entry *entry, *last;
399
400         last = NULL;
401         LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
402                 if (entry->gpe_index == index)
403                         break;
404                 if (entry->gpe_index > index) {
405                         entry = NULL;
406                         break;
407                 }
408                 last = entry;
409         }
410         if (entry == NULL) {
411                 entry = g_malloc(table->gpt_scheme->gps_entrysz,
412                     M_WAITOK | M_ZERO);
413                 entry->gpe_index = index;
414                 if (last == NULL)
415                         LIST_INSERT_HEAD(&table->gpt_entry, entry, gpe_entry);
416                 else
417                         LIST_INSERT_AFTER(last, entry, gpe_entry);
418         } else
419                 entry->gpe_offset = 0;
420         entry->gpe_start = start;
421         entry->gpe_end = end;
422         return (entry);
423 }
424
425 static void
426 g_part_new_provider(struct g_geom *gp, struct g_part_table *table,
427     struct g_part_entry *entry)
428 {
429         struct g_consumer *cp;
430         struct g_provider *pp;
431         struct sbuf *sb;
432         off_t offset;
433
434         cp = LIST_FIRST(&gp->consumer);
435         pp = cp->provider;
436
437         offset = entry->gpe_start * pp->sectorsize;
438         if (entry->gpe_offset < offset)
439                 entry->gpe_offset = offset;
440
441         if (entry->gpe_pp == NULL) {
442                 sb = sbuf_new_auto();
443                 G_PART_FULLNAME(table, entry, sb, gp->name);
444                 sbuf_finish(sb);
445                 entry->gpe_pp = g_new_providerf(gp, "%s", sbuf_data(sb));
446                 sbuf_delete(sb);
447                 entry->gpe_pp->flags |= G_PF_DIRECT_SEND | G_PF_DIRECT_RECEIVE;
448                 entry->gpe_pp->private = entry;         /* Close the circle. */
449         }
450         entry->gpe_pp->index = entry->gpe_index - 1;    /* index is 1-based. */
451         entry->gpe_pp->mediasize = (entry->gpe_end - entry->gpe_start + 1) *
452             pp->sectorsize;
453         entry->gpe_pp->mediasize -= entry->gpe_offset - offset;
454         entry->gpe_pp->sectorsize = pp->sectorsize;
455         entry->gpe_pp->stripesize = pp->stripesize;
456         entry->gpe_pp->stripeoffset = pp->stripeoffset + entry->gpe_offset;
457         if (pp->stripesize > 0)
458                 entry->gpe_pp->stripeoffset %= pp->stripesize;
459         entry->gpe_pp->flags |= pp->flags & G_PF_ACCEPT_UNMAPPED;
460         g_error_provider(entry->gpe_pp, 0);
461 }
462
463 static struct g_geom*
464 g_part_find_geom(const char *name)
465 {
466         struct g_geom *gp;
467         LIST_FOREACH(gp, &g_part_class.geom, geom) {
468                 if ((gp->flags & G_GEOM_WITHER) == 0 &&
469                     strcmp(name, gp->name) == 0)
470                         break;
471         }
472         return (gp);
473 }
474
475 static int
476 g_part_parm_geom(struct gctl_req *req, const char *name, struct g_geom **v)
477 {
478         struct g_geom *gp;
479         const char *gname;
480
481         gname = gctl_get_asciiparam(req, name);
482         if (gname == NULL)
483                 return (ENOATTR);
484         if (strncmp(gname, _PATH_DEV, sizeof(_PATH_DEV) - 1) == 0)
485                 gname += sizeof(_PATH_DEV) - 1;
486         gp = g_part_find_geom(gname);
487         if (gp == NULL) {
488                 gctl_error(req, "%d %s '%s'", EINVAL, name, gname);
489                 return (EINVAL);
490         }
491         *v = gp;
492         return (0);
493 }
494
495 static int
496 g_part_parm_provider(struct gctl_req *req, const char *name,
497     struct g_provider **v)
498 {
499         struct g_provider *pp;
500         const char *pname;
501
502         pname = gctl_get_asciiparam(req, name);
503         if (pname == NULL)
504                 return (ENOATTR);
505         if (strncmp(pname, _PATH_DEV, sizeof(_PATH_DEV) - 1) == 0)
506                 pname += sizeof(_PATH_DEV) - 1;
507         pp = g_provider_by_name(pname);
508         if (pp == NULL) {
509                 gctl_error(req, "%d %s '%s'", EINVAL, name, pname);
510                 return (EINVAL);
511         }
512         *v = pp;
513         return (0);
514 }
515
516 static int
517 g_part_parm_quad(struct gctl_req *req, const char *name, quad_t *v)
518 {
519         const char *p;
520         char *x;
521         quad_t q;
522
523         p = gctl_get_asciiparam(req, name);
524         if (p == NULL)
525                 return (ENOATTR);
526         q = strtoq(p, &x, 0);
527         if (*x != '\0' || q < 0) {
528                 gctl_error(req, "%d %s '%s'", EINVAL, name, p);
529                 return (EINVAL);
530         }
531         *v = q;
532         return (0);
533 }
534
535 static int
536 g_part_parm_scheme(struct gctl_req *req, const char *name,
537     struct g_part_scheme **v)
538 {
539         struct g_part_scheme *s;
540         const char *p;
541
542         p = gctl_get_asciiparam(req, name);
543         if (p == NULL)
544                 return (ENOATTR);
545         TAILQ_FOREACH(s, &g_part_schemes, scheme_list) {
546                 if (s == &g_part_null_scheme)
547                         continue;
548                 if (!strcasecmp(s->name, p))
549                         break;
550         }
551         if (s == NULL) {
552                 gctl_error(req, "%d %s '%s'", EINVAL, name, p);
553                 return (EINVAL);
554         }
555         *v = s;
556         return (0);
557 }
558
559 static int
560 g_part_parm_str(struct gctl_req *req, const char *name, const char **v)
561 {
562         const char *p;
563
564         p = gctl_get_asciiparam(req, name);
565         if (p == NULL)
566                 return (ENOATTR);
567         /* An empty label is always valid. */
568         if (strcmp(name, "label") != 0 && p[0] == '\0') {
569                 gctl_error(req, "%d %s '%s'", EINVAL, name, p);
570                 return (EINVAL);
571         }
572         *v = p;
573         return (0);
574 }
575
576 static int
577 g_part_parm_intmax(struct gctl_req *req, const char *name, u_int *v)
578 {
579         const intmax_t *p;
580         int size;
581
582         p = gctl_get_param(req, name, &size);
583         if (p == NULL)
584                 return (ENOATTR);
585         if (size != sizeof(*p) || *p < 0 || *p > INT_MAX) {
586                 gctl_error(req, "%d %s '%jd'", EINVAL, name, *p);
587                 return (EINVAL);
588         }
589         *v = (u_int)*p;
590         return (0);
591 }
592
593 static int
594 g_part_parm_uint32(struct gctl_req *req, const char *name, u_int *v)
595 {
596         const uint32_t *p;
597         int size;
598
599         p = gctl_get_param(req, name, &size);
600         if (p == NULL)
601                 return (ENOATTR);
602         if (size != sizeof(*p) || *p > INT_MAX) {
603                 gctl_error(req, "%d %s '%u'", EINVAL, name, (unsigned int)*p);
604                 return (EINVAL);
605         }
606         *v = (u_int)*p;
607         return (0);
608 }
609
610 static int
611 g_part_parm_bootcode(struct gctl_req *req, const char *name, const void **v,
612     unsigned int *s)
613 {
614         const void *p;
615         int size;
616
617         p = gctl_get_param(req, name, &size);
618         if (p == NULL)
619                 return (ENOATTR);
620         *v = p;
621         *s = size;
622         return (0);
623 }
624
625 static int
626 g_part_probe(struct g_geom *gp, struct g_consumer *cp, int depth)
627 {
628         struct g_part_scheme *iter, *scheme;
629         struct g_part_table *table;
630         int pri, probe;
631
632         table = gp->softc;
633         scheme = (table != NULL) ? table->gpt_scheme : NULL;
634         pri = (scheme != NULL) ? G_PART_PROBE(table, cp) : INT_MIN;
635         if (pri == 0)
636                 goto done;
637         if (pri > 0) {  /* error */
638                 scheme = NULL;
639                 pri = INT_MIN;
640         }
641
642         TAILQ_FOREACH(iter, &g_part_schemes, scheme_list) {
643                 if (iter == &g_part_null_scheme)
644                         continue;
645                 table = (void *)kobj_create((kobj_class_t)iter, M_GEOM,
646                     M_WAITOK);
647                 table->gpt_gp = gp;
648                 table->gpt_scheme = iter;
649                 table->gpt_depth = depth;
650                 probe = G_PART_PROBE(table, cp);
651                 if (probe <= 0 && probe > pri) {
652                         pri = probe;
653                         scheme = iter;
654                         if (gp->softc != NULL)
655                                 kobj_delete((kobj_t)gp->softc, M_GEOM);
656                         gp->softc = table;
657                         if (pri == 0)
658                                 goto done;
659                 } else
660                         kobj_delete((kobj_t)table, M_GEOM);
661         }
662
663 done:
664         return ((scheme == NULL) ? ENXIO : 0);
665 }
666
667 /*
668  * Control request functions.
669  */
670
671 static int
672 g_part_ctl_add(struct gctl_req *req, struct g_part_parms *gpp)
673 {
674         struct g_geom *gp;
675         struct g_provider *pp;
676         struct g_part_entry *delent, *last, *entry;
677         struct g_part_table *table;
678         struct sbuf *sb;
679         quad_t end;
680         unsigned int index;
681         int error;
682
683         gp = gpp->gpp_geom;
684         G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
685         g_topology_assert();
686
687         pp = LIST_FIRST(&gp->consumer)->provider;
688         table = gp->softc;
689         end = gpp->gpp_start + gpp->gpp_size - 1;
690
691         if (gpp->gpp_start < table->gpt_first ||
692             gpp->gpp_start > table->gpt_last) {
693                 gctl_error(req, "%d start '%jd'", EINVAL,
694                     (intmax_t)gpp->gpp_start);
695                 return (EINVAL);
696         }
697         if (end < gpp->gpp_start || end > table->gpt_last) {
698                 gctl_error(req, "%d size '%jd'", EINVAL,
699                     (intmax_t)gpp->gpp_size);
700                 return (EINVAL);
701         }
702         if (gpp->gpp_index > table->gpt_entries) {
703                 gctl_error(req, "%d index '%d'", EINVAL, gpp->gpp_index);
704                 return (EINVAL);
705         }
706
707         delent = last = NULL;
708         index = (gpp->gpp_index > 0) ? gpp->gpp_index : 1;
709         LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
710                 if (entry->gpe_deleted) {
711                         if (entry->gpe_index == index)
712                                 delent = entry;
713                         continue;
714                 }
715                 if (entry->gpe_index == index)
716                         index = entry->gpe_index + 1;
717                 if (entry->gpe_index < index)
718                         last = entry;
719                 if (entry->gpe_internal)
720                         continue;
721                 if (gpp->gpp_start >= entry->gpe_start &&
722                     gpp->gpp_start <= entry->gpe_end) {
723                         gctl_error(req, "%d start '%jd'", ENOSPC,
724                             (intmax_t)gpp->gpp_start);
725                         return (ENOSPC);
726                 }
727                 if (end >= entry->gpe_start && end <= entry->gpe_end) {
728                         gctl_error(req, "%d end '%jd'", ENOSPC, (intmax_t)end);
729                         return (ENOSPC);
730                 }
731                 if (gpp->gpp_start < entry->gpe_start && end > entry->gpe_end) {
732                         gctl_error(req, "%d size '%jd'", ENOSPC,
733                             (intmax_t)gpp->gpp_size);
734                         return (ENOSPC);
735                 }
736         }
737         if (gpp->gpp_index > 0 && index != gpp->gpp_index) {
738                 gctl_error(req, "%d index '%d'", EEXIST, gpp->gpp_index);
739                 return (EEXIST);
740         }
741         if (index > table->gpt_entries) {
742                 gctl_error(req, "%d index '%d'", ENOSPC, index);
743                 return (ENOSPC);
744         }
745
746         entry = (delent == NULL) ? g_malloc(table->gpt_scheme->gps_entrysz,
747             M_WAITOK | M_ZERO) : delent;
748         entry->gpe_index = index;
749         entry->gpe_start = gpp->gpp_start;
750         entry->gpe_end = end;
751         error = G_PART_ADD(table, entry, gpp);
752         if (error) {
753                 gctl_error(req, "%d", error);
754                 if (delent == NULL)
755                         g_free(entry);
756                 return (error);
757         }
758         if (delent == NULL) {
759                 if (last == NULL)
760                         LIST_INSERT_HEAD(&table->gpt_entry, entry, gpe_entry);
761                 else
762                         LIST_INSERT_AFTER(last, entry, gpe_entry);
763                 entry->gpe_created = 1;
764         } else {
765                 entry->gpe_deleted = 0;
766                 entry->gpe_modified = 1;
767         }
768         g_part_new_provider(gp, table, entry);
769
770         /* Provide feedback if so requested. */
771         if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
772                 sb = sbuf_new_auto();
773                 G_PART_FULLNAME(table, entry, sb, gp->name);
774                 if (pp->stripesize > 0 && entry->gpe_pp->stripeoffset != 0)
775                         sbuf_printf(sb, " added, but partition is not "
776                             "aligned on %u bytes\n", pp->stripesize);
777                 else
778                         sbuf_cat(sb, " added\n");
779                 sbuf_finish(sb);
780                 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
781                 sbuf_delete(sb);
782         }
783         return (0);
784 }
785
786 static int
787 g_part_ctl_bootcode(struct gctl_req *req, struct g_part_parms *gpp)
788 {
789         struct g_geom *gp;
790         struct g_part_table *table;
791         struct sbuf *sb;
792         int error, sz;
793
794         gp = gpp->gpp_geom;
795         G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
796         g_topology_assert();
797
798         table = gp->softc;
799         sz = table->gpt_scheme->gps_bootcodesz;
800         if (sz == 0) {
801                 error = ENODEV;
802                 goto fail;
803         }
804         if (gpp->gpp_codesize > sz) {
805                 error = EFBIG;
806                 goto fail;
807         }
808
809         error = G_PART_BOOTCODE(table, gpp);
810         if (error)
811                 goto fail;
812
813         /* Provide feedback if so requested. */
814         if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
815                 sb = sbuf_new_auto();
816                 sbuf_printf(sb, "bootcode written to %s\n", gp->name);
817                 sbuf_finish(sb);
818                 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
819                 sbuf_delete(sb);
820         }
821         return (0);
822
823  fail:
824         gctl_error(req, "%d", error);
825         return (error);
826 }
827
828 static int
829 g_part_ctl_commit(struct gctl_req *req, struct g_part_parms *gpp)
830 {
831         struct g_consumer *cp;
832         struct g_geom *gp;
833         struct g_provider *pp;
834         struct g_part_entry *entry, *tmp;
835         struct g_part_table *table;
836         char *buf;
837         int error, i;
838
839         gp = gpp->gpp_geom;
840         G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
841         g_topology_assert();
842
843         table = gp->softc;
844         if (!table->gpt_opened) {
845                 gctl_error(req, "%d", EPERM);
846                 return (EPERM);
847         }
848
849         g_topology_unlock();
850
851         cp = LIST_FIRST(&gp->consumer);
852         if ((table->gpt_smhead | table->gpt_smtail) != 0) {
853                 pp = cp->provider;
854                 buf = g_malloc(pp->sectorsize, M_WAITOK | M_ZERO);
855                 while (table->gpt_smhead != 0) {
856                         i = ffs(table->gpt_smhead) - 1;
857                         error = g_write_data(cp, i * pp->sectorsize, buf,
858                             pp->sectorsize);
859                         if (error) {
860                                 g_free(buf);
861                                 goto fail;
862                         }
863                         table->gpt_smhead &= ~(1 << i);
864                 }
865                 while (table->gpt_smtail != 0) {
866                         i = ffs(table->gpt_smtail) - 1;
867                         error = g_write_data(cp, pp->mediasize - (i + 1) *
868                             pp->sectorsize, buf, pp->sectorsize);
869                         if (error) {
870                                 g_free(buf);
871                                 goto fail;
872                         }
873                         table->gpt_smtail &= ~(1 << i);
874                 }
875                 g_free(buf);
876         }
877
878         if (table->gpt_scheme == &g_part_null_scheme) {
879                 g_topology_lock();
880                 g_access(cp, -1, -1, -1);
881                 g_part_wither(gp, ENXIO);
882                 return (0);
883         }
884
885         error = G_PART_WRITE(table, cp);
886         if (error)
887                 goto fail;
888
889         LIST_FOREACH_SAFE(entry, &table->gpt_entry, gpe_entry, tmp) {
890                 if (!entry->gpe_deleted) {
891                         /* Notify consumers that provider might be changed. */
892                         if (entry->gpe_modified && (
893                             entry->gpe_pp->acw + entry->gpe_pp->ace) == 0)
894                                 g_media_changed(entry->gpe_pp, M_NOWAIT);
895                         entry->gpe_created = 0;
896                         entry->gpe_modified = 0;
897                         continue;
898                 }
899                 LIST_REMOVE(entry, gpe_entry);
900                 g_free(entry);
901         }
902         table->gpt_created = 0;
903         table->gpt_opened = 0;
904
905         g_topology_lock();
906         g_access(cp, -1, -1, -1);
907         return (0);
908
909 fail:
910         g_topology_lock();
911         gctl_error(req, "%d", error);
912         return (error);
913 }
914
915 static int
916 g_part_ctl_create(struct gctl_req *req, struct g_part_parms *gpp)
917 {
918         struct g_consumer *cp;
919         struct g_geom *gp;
920         struct g_provider *pp;
921         struct g_part_scheme *scheme;
922         struct g_part_table *null, *table;
923         struct sbuf *sb;
924         int attr, error;
925
926         pp = gpp->gpp_provider;
927         scheme = gpp->gpp_scheme;
928         G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, pp->name));
929         g_topology_assert();
930
931         /* Check that there isn't already a g_part geom on the provider. */
932         gp = g_part_find_geom(pp->name);
933         if (gp != NULL) {
934                 null = gp->softc;
935                 if (null->gpt_scheme != &g_part_null_scheme) {
936                         gctl_error(req, "%d geom '%s'", EEXIST, pp->name);
937                         return (EEXIST);
938                 }
939         } else
940                 null = NULL;
941
942         if ((gpp->gpp_parms & G_PART_PARM_ENTRIES) &&
943             (gpp->gpp_entries < scheme->gps_minent ||
944              gpp->gpp_entries > scheme->gps_maxent)) {
945                 gctl_error(req, "%d entries '%d'", EINVAL, gpp->gpp_entries);
946                 return (EINVAL);
947         }
948
949         if (null == NULL)
950                 gp = g_new_geomf(&g_part_class, "%s", pp->name);
951         gp->softc = kobj_create((kobj_class_t)gpp->gpp_scheme, M_GEOM,
952             M_WAITOK);
953         table = gp->softc;
954         table->gpt_gp = gp;
955         table->gpt_scheme = gpp->gpp_scheme;
956         table->gpt_entries = (gpp->gpp_parms & G_PART_PARM_ENTRIES) ?
957             gpp->gpp_entries : scheme->gps_minent;
958         LIST_INIT(&table->gpt_entry);
959         if (null == NULL) {
960                 cp = g_new_consumer(gp);
961                 cp->flags |= G_CF_DIRECT_SEND | G_CF_DIRECT_RECEIVE;
962                 error = g_attach(cp, pp);
963                 if (error == 0)
964                         error = g_access(cp, 1, 1, 1);
965                 if (error != 0) {
966                         g_part_wither(gp, error);
967                         gctl_error(req, "%d geom '%s'", error, pp->name);
968                         return (error);
969                 }
970                 table->gpt_opened = 1;
971         } else {
972                 cp = LIST_FIRST(&gp->consumer);
973                 table->gpt_opened = null->gpt_opened;
974                 table->gpt_smhead = null->gpt_smhead;
975                 table->gpt_smtail = null->gpt_smtail;
976         }
977
978         g_topology_unlock();
979
980         /* Make sure the provider has media. */
981         if (pp->mediasize == 0 || pp->sectorsize == 0) {
982                 error = ENODEV;
983                 goto fail;
984         }
985
986         /* Make sure we can nest and if so, determine our depth. */
987         error = g_getattr("PART::isleaf", cp, &attr);
988         if (!error && attr) {
989                 error = ENODEV;
990                 goto fail;
991         }
992         error = g_getattr("PART::depth", cp, &attr);
993         table->gpt_depth = (!error) ? attr + 1 : 0;
994
995         /*
996          * Synthesize a disk geometry. Some partitioning schemes
997          * depend on it and since some file systems need it even
998          * when the partitition scheme doesn't, we do it here in
999          * scheme-independent code.
1000          */
1001         g_part_geometry(table, cp, pp->mediasize / pp->sectorsize);
1002
1003         error = G_PART_CREATE(table, gpp);
1004         if (error)
1005                 goto fail;
1006
1007         g_topology_lock();
1008
1009         table->gpt_created = 1;
1010         if (null != NULL)
1011                 kobj_delete((kobj_t)null, M_GEOM);
1012
1013         /*
1014          * Support automatic commit by filling in the gpp_geom
1015          * parameter.
1016          */
1017         gpp->gpp_parms |= G_PART_PARM_GEOM;
1018         gpp->gpp_geom = gp;
1019
1020         /* Provide feedback if so requested. */
1021         if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
1022                 sb = sbuf_new_auto();
1023                 sbuf_printf(sb, "%s created\n", gp->name);
1024                 sbuf_finish(sb);
1025                 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
1026                 sbuf_delete(sb);
1027         }
1028         return (0);
1029
1030 fail:
1031         g_topology_lock();
1032         if (null == NULL) {
1033                 g_access(cp, -1, -1, -1);
1034                 g_part_wither(gp, error);
1035         } else {
1036                 kobj_delete((kobj_t)gp->softc, M_GEOM);
1037                 gp->softc = null;
1038         }
1039         gctl_error(req, "%d provider", error);
1040         return (error);
1041 }
1042
1043 static int
1044 g_part_ctl_delete(struct gctl_req *req, struct g_part_parms *gpp)
1045 {
1046         struct g_geom *gp;
1047         struct g_provider *pp;
1048         struct g_part_entry *entry;
1049         struct g_part_table *table;
1050         struct sbuf *sb;
1051
1052         gp = gpp->gpp_geom;
1053         G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
1054         g_topology_assert();
1055
1056         table = gp->softc;
1057
1058         LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
1059                 if (entry->gpe_deleted || entry->gpe_internal)
1060                         continue;
1061                 if (entry->gpe_index == gpp->gpp_index)
1062                         break;
1063         }
1064         if (entry == NULL) {
1065                 gctl_error(req, "%d index '%d'", ENOENT, gpp->gpp_index);
1066                 return (ENOENT);
1067         }
1068
1069         pp = entry->gpe_pp;
1070         if (pp != NULL) {
1071                 if (pp->acr > 0 || pp->acw > 0 || pp->ace > 0) {
1072                         gctl_error(req, "%d", EBUSY);
1073                         return (EBUSY);
1074                 }
1075
1076                 pp->private = NULL;
1077                 entry->gpe_pp = NULL;
1078         }
1079
1080         if (pp != NULL)
1081                 g_wither_provider(pp, ENXIO);
1082
1083         /* Provide feedback if so requested. */
1084         if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
1085                 sb = sbuf_new_auto();
1086                 G_PART_FULLNAME(table, entry, sb, gp->name);
1087                 sbuf_cat(sb, " deleted\n");
1088                 sbuf_finish(sb);
1089                 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
1090                 sbuf_delete(sb);
1091         }
1092
1093         if (entry->gpe_created) {
1094                 LIST_REMOVE(entry, gpe_entry);
1095                 g_free(entry);
1096         } else {
1097                 entry->gpe_modified = 0;
1098                 entry->gpe_deleted = 1;
1099         }
1100         return (0);
1101 }
1102
1103 static int
1104 g_part_ctl_destroy(struct gctl_req *req, struct g_part_parms *gpp)
1105 {
1106         struct g_consumer *cp;
1107         struct g_geom *gp;
1108         struct g_provider *pp;
1109         struct g_part_entry *entry, *tmp;
1110         struct g_part_table *null, *table;
1111         struct sbuf *sb;
1112         int error;
1113
1114         gp = gpp->gpp_geom;
1115         G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
1116         g_topology_assert();
1117
1118         table = gp->softc;
1119         /* Check for busy providers. */
1120         LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
1121                 if (entry->gpe_deleted || entry->gpe_internal)
1122                         continue;
1123                 if (gpp->gpp_force) {
1124                         pp = entry->gpe_pp;
1125                         if (pp == NULL)
1126                                 continue;
1127                         if (pp->acr == 0 && pp->acw == 0 && pp->ace == 0)
1128                                 continue;
1129                 }
1130                 gctl_error(req, "%d", EBUSY);
1131                 return (EBUSY);
1132         }
1133
1134         if (gpp->gpp_force) {
1135                 /* Destroy all providers. */
1136                 LIST_FOREACH_SAFE(entry, &table->gpt_entry, gpe_entry, tmp) {
1137                         pp = entry->gpe_pp;
1138                         if (pp != NULL) {
1139                                 pp->private = NULL;
1140                                 g_wither_provider(pp, ENXIO);
1141                         }
1142                         LIST_REMOVE(entry, gpe_entry);
1143                         g_free(entry);
1144                 }
1145         }
1146
1147         error = G_PART_DESTROY(table, gpp);
1148         if (error) {
1149                 gctl_error(req, "%d", error);
1150                 return (error);
1151         }
1152
1153         gp->softc = kobj_create((kobj_class_t)&g_part_null_scheme, M_GEOM,
1154             M_WAITOK);
1155         null = gp->softc;
1156         null->gpt_gp = gp;
1157         null->gpt_scheme = &g_part_null_scheme;
1158         LIST_INIT(&null->gpt_entry);
1159
1160         cp = LIST_FIRST(&gp->consumer);
1161         pp = cp->provider;
1162         null->gpt_last = pp->mediasize / pp->sectorsize - 1;
1163
1164         null->gpt_depth = table->gpt_depth;
1165         null->gpt_opened = table->gpt_opened;
1166         null->gpt_smhead = table->gpt_smhead;
1167         null->gpt_smtail = table->gpt_smtail;
1168
1169         while ((entry = LIST_FIRST(&table->gpt_entry)) != NULL) {
1170                 LIST_REMOVE(entry, gpe_entry);
1171                 g_free(entry);
1172         }
1173         kobj_delete((kobj_t)table, M_GEOM);
1174
1175         /* Provide feedback if so requested. */
1176         if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
1177                 sb = sbuf_new_auto();
1178                 sbuf_printf(sb, "%s destroyed\n", gp->name);
1179                 sbuf_finish(sb);
1180                 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
1181                 sbuf_delete(sb);
1182         }
1183         return (0);
1184 }
1185
1186 static int
1187 g_part_ctl_modify(struct gctl_req *req, struct g_part_parms *gpp)
1188 {
1189         struct g_geom *gp;
1190         struct g_part_entry *entry;
1191         struct g_part_table *table;
1192         struct sbuf *sb;
1193         int error;
1194
1195         gp = gpp->gpp_geom;
1196         G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
1197         g_topology_assert();
1198
1199         table = gp->softc;
1200
1201         LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
1202                 if (entry->gpe_deleted || entry->gpe_internal)
1203                         continue;
1204                 if (entry->gpe_index == gpp->gpp_index)
1205                         break;
1206         }
1207         if (entry == NULL) {
1208                 gctl_error(req, "%d index '%d'", ENOENT, gpp->gpp_index);
1209                 return (ENOENT);
1210         }
1211
1212         error = G_PART_MODIFY(table, entry, gpp);
1213         if (error) {
1214                 gctl_error(req, "%d", error);
1215                 return (error);
1216         }
1217
1218         if (!entry->gpe_created)
1219                 entry->gpe_modified = 1;
1220
1221         /* Provide feedback if so requested. */
1222         if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
1223                 sb = sbuf_new_auto();
1224                 G_PART_FULLNAME(table, entry, sb, gp->name);
1225                 sbuf_cat(sb, " modified\n");
1226                 sbuf_finish(sb);
1227                 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
1228                 sbuf_delete(sb);
1229         }
1230         return (0);
1231 }
1232
1233 static int
1234 g_part_ctl_move(struct gctl_req *req, struct g_part_parms *gpp)
1235 {
1236         gctl_error(req, "%d verb 'move'", ENOSYS);
1237         return (ENOSYS);
1238 }
1239
1240 static int
1241 g_part_ctl_recover(struct gctl_req *req, struct g_part_parms *gpp)
1242 {
1243         struct g_part_table *table;
1244         struct g_geom *gp;
1245         struct sbuf *sb;
1246         int error, recovered;
1247
1248         gp = gpp->gpp_geom;
1249         G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
1250         g_topology_assert();
1251         table = gp->softc;
1252         error = recovered = 0;
1253
1254         if (table->gpt_corrupt) {
1255                 error = G_PART_RECOVER(table);
1256                 if (error == 0)
1257                         error = g_part_check_integrity(table,
1258                             LIST_FIRST(&gp->consumer));
1259                 if (error) {
1260                         gctl_error(req, "%d recovering '%s' failed",
1261                             error, gp->name);
1262                         return (error);
1263                 }
1264                 recovered = 1;
1265         }
1266         /* Provide feedback if so requested. */
1267         if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
1268                 sb = sbuf_new_auto();
1269                 if (recovered)
1270                         sbuf_printf(sb, "%s recovered\n", gp->name);
1271                 else
1272                         sbuf_printf(sb, "%s recovering is not needed\n",
1273                             gp->name);
1274                 sbuf_finish(sb);
1275                 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
1276                 sbuf_delete(sb);
1277         }
1278         return (0);
1279 }
1280
1281 static int
1282 g_part_ctl_resize(struct gctl_req *req, struct g_part_parms *gpp)
1283 {
1284         struct g_geom *gp;
1285         struct g_provider *pp;
1286         struct g_part_entry *pe, *entry;
1287         struct g_part_table *table;
1288         struct sbuf *sb;
1289         quad_t end;
1290         int error;
1291         off_t mediasize;
1292
1293         gp = gpp->gpp_geom;
1294         G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
1295         g_topology_assert();
1296         table = gp->softc;
1297
1298         /* check gpp_index */
1299         LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
1300                 if (entry->gpe_deleted || entry->gpe_internal)
1301                         continue;
1302                 if (entry->gpe_index == gpp->gpp_index)
1303                         break;
1304         }
1305         if (entry == NULL) {
1306                 gctl_error(req, "%d index '%d'", ENOENT, gpp->gpp_index);
1307                 return (ENOENT);
1308         }
1309
1310         /* check gpp_size */
1311         end = entry->gpe_start + gpp->gpp_size - 1;
1312         if (gpp->gpp_size < 1 || end > table->gpt_last) {
1313                 gctl_error(req, "%d size '%jd'", EINVAL,
1314                     (intmax_t)gpp->gpp_size);
1315                 return (EINVAL);
1316         }
1317
1318         LIST_FOREACH(pe, &table->gpt_entry, gpe_entry) {
1319                 if (pe->gpe_deleted || pe->gpe_internal || pe == entry)
1320                         continue;
1321                 if (end >= pe->gpe_start && end <= pe->gpe_end) {
1322                         gctl_error(req, "%d end '%jd'", ENOSPC,
1323                             (intmax_t)end);
1324                         return (ENOSPC);
1325                 }
1326                 if (entry->gpe_start < pe->gpe_start && end > pe->gpe_end) {
1327                         gctl_error(req, "%d size '%jd'", ENOSPC,
1328                             (intmax_t)gpp->gpp_size);
1329                         return (ENOSPC);
1330                 }
1331         }
1332
1333         pp = entry->gpe_pp;
1334         if ((g_debugflags & 16) == 0 &&
1335             (pp->acr > 0 || pp->acw > 0 || pp->ace > 0)) {
1336                 if (entry->gpe_end - entry->gpe_start + 1 > gpp->gpp_size) {
1337                         /* Deny shrinking of an opened partition. */
1338                         gctl_error(req, "%d", EBUSY);
1339                         return (EBUSY);
1340                 } 
1341         }
1342
1343         error = G_PART_RESIZE(table, entry, gpp);
1344         if (error) {
1345                 gctl_error(req, "%d%s", error, error != EBUSY ? "":
1346                     " resizing will lead to unexpected shrinking"
1347                     " due to alignment");
1348                 return (error);
1349         }
1350
1351         if (!entry->gpe_created)
1352                 entry->gpe_modified = 1;
1353
1354         /* update mediasize of changed provider */
1355         mediasize = (entry->gpe_end - entry->gpe_start + 1) *
1356                 pp->sectorsize;
1357         g_resize_provider(pp, mediasize);
1358
1359         /* Provide feedback if so requested. */
1360         if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
1361                 sb = sbuf_new_auto();
1362                 G_PART_FULLNAME(table, entry, sb, gp->name);
1363                 sbuf_cat(sb, " resized\n");
1364                 sbuf_finish(sb);
1365                 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
1366                 sbuf_delete(sb);
1367         }
1368         return (0);
1369 }
1370
1371 static int
1372 g_part_ctl_setunset(struct gctl_req *req, struct g_part_parms *gpp,
1373     unsigned int set)
1374 {
1375         struct g_geom *gp;
1376         struct g_part_entry *entry;
1377         struct g_part_table *table;
1378         struct sbuf *sb;
1379         int error;
1380
1381         gp = gpp->gpp_geom;
1382         G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
1383         g_topology_assert();
1384
1385         table = gp->softc;
1386
1387         if (gpp->gpp_parms & G_PART_PARM_INDEX) {
1388                 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
1389                         if (entry->gpe_deleted || entry->gpe_internal)
1390                                 continue;
1391                         if (entry->gpe_index == gpp->gpp_index)
1392                                 break;
1393                 }
1394                 if (entry == NULL) {
1395                         gctl_error(req, "%d index '%d'", ENOENT,
1396                             gpp->gpp_index);
1397                         return (ENOENT);
1398                 }
1399         } else
1400                 entry = NULL;
1401
1402         error = G_PART_SETUNSET(table, entry, gpp->gpp_attrib, set);
1403         if (error) {
1404                 gctl_error(req, "%d attrib '%s'", error, gpp->gpp_attrib);
1405                 return (error);
1406         }
1407
1408         /* Provide feedback if so requested. */
1409         if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
1410                 sb = sbuf_new_auto();
1411                 sbuf_printf(sb, "%s %sset on ", gpp->gpp_attrib,
1412                     (set) ? "" : "un");
1413                 if (entry)
1414                         G_PART_FULLNAME(table, entry, sb, gp->name);
1415                 else
1416                         sbuf_cat(sb, gp->name);
1417                 sbuf_cat(sb, "\n");
1418                 sbuf_finish(sb);
1419                 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
1420                 sbuf_delete(sb);
1421         }
1422         return (0);
1423 }
1424
1425 static int
1426 g_part_ctl_undo(struct gctl_req *req, struct g_part_parms *gpp)
1427 {
1428         struct g_consumer *cp;
1429         struct g_provider *pp;
1430         struct g_geom *gp;
1431         struct g_part_entry *entry, *tmp;
1432         struct g_part_table *table;
1433         int error, reprobe;
1434
1435         gp = gpp->gpp_geom;
1436         G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
1437         g_topology_assert();
1438
1439         table = gp->softc;
1440         if (!table->gpt_opened) {
1441                 gctl_error(req, "%d", EPERM);
1442                 return (EPERM);
1443         }
1444
1445         cp = LIST_FIRST(&gp->consumer);
1446         LIST_FOREACH_SAFE(entry, &table->gpt_entry, gpe_entry, tmp) {
1447                 entry->gpe_modified = 0;
1448                 if (entry->gpe_created) {
1449                         pp = entry->gpe_pp;
1450                         if (pp != NULL) {
1451                                 pp->private = NULL;
1452                                 entry->gpe_pp = NULL;
1453                                 g_wither_provider(pp, ENXIO);
1454                         }
1455                         entry->gpe_deleted = 1;
1456                 }
1457                 if (entry->gpe_deleted) {
1458                         LIST_REMOVE(entry, gpe_entry);
1459                         g_free(entry);
1460                 }
1461         }
1462
1463         g_topology_unlock();
1464
1465         reprobe = (table->gpt_scheme == &g_part_null_scheme ||
1466             table->gpt_created) ? 1 : 0;
1467
1468         if (reprobe) {
1469                 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
1470                         if (entry->gpe_internal)
1471                                 continue;
1472                         error = EBUSY;
1473                         goto fail;
1474                 }
1475                 while ((entry = LIST_FIRST(&table->gpt_entry)) != NULL) {
1476                         LIST_REMOVE(entry, gpe_entry);
1477                         g_free(entry);
1478                 }
1479                 error = g_part_probe(gp, cp, table->gpt_depth);
1480                 if (error) {
1481                         g_topology_lock();
1482                         g_access(cp, -1, -1, -1);
1483                         g_part_wither(gp, error);
1484                         return (0);
1485                 }
1486                 table = gp->softc;
1487
1488                 /*
1489                  * Synthesize a disk geometry. Some partitioning schemes
1490                  * depend on it and since some file systems need it even
1491                  * when the partitition scheme doesn't, we do it here in
1492                  * scheme-independent code.
1493                  */
1494                 pp = cp->provider;
1495                 g_part_geometry(table, cp, pp->mediasize / pp->sectorsize);
1496         }
1497
1498         error = G_PART_READ(table, cp);
1499         if (error)
1500                 goto fail;
1501         error = g_part_check_integrity(table, cp);
1502         if (error)
1503                 goto fail;
1504
1505         g_topology_lock();
1506         LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
1507                 if (!entry->gpe_internal)
1508                         g_part_new_provider(gp, table, entry);
1509         }
1510
1511         table->gpt_opened = 0;
1512         g_access(cp, -1, -1, -1);
1513         return (0);
1514
1515 fail:
1516         g_topology_lock();
1517         gctl_error(req, "%d", error);
1518         return (error);
1519 }
1520
1521 static void
1522 g_part_wither(struct g_geom *gp, int error)
1523 {
1524         struct g_part_entry *entry;
1525         struct g_part_table *table;
1526
1527         table = gp->softc;
1528         if (table != NULL) {
1529                 G_PART_DESTROY(table, NULL);
1530                 while ((entry = LIST_FIRST(&table->gpt_entry)) != NULL) {
1531                         LIST_REMOVE(entry, gpe_entry);
1532                         g_free(entry);
1533                 }
1534                 if (gp->softc != NULL) {
1535                         kobj_delete((kobj_t)gp->softc, M_GEOM);
1536                         gp->softc = NULL;
1537                 }
1538         }
1539         g_wither_geom(gp, error);
1540 }
1541
1542 /*
1543  * Class methods.
1544  */
1545
1546 static void
1547 g_part_ctlreq(struct gctl_req *req, struct g_class *mp, const char *verb)
1548 {
1549         struct g_part_parms gpp;
1550         struct g_part_table *table;
1551         struct gctl_req_arg *ap;
1552         enum g_part_ctl ctlreq;
1553         unsigned int i, mparms, oparms, parm;
1554         int auto_commit, close_on_error;
1555         int error, modifies;
1556
1557         G_PART_TRACE((G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, verb));
1558         g_topology_assert();
1559
1560         ctlreq = G_PART_CTL_NONE;
1561         modifies = 1;
1562         mparms = 0;
1563         oparms = G_PART_PARM_FLAGS | G_PART_PARM_OUTPUT | G_PART_PARM_VERSION;
1564         switch (*verb) {
1565         case 'a':
1566                 if (!strcmp(verb, "add")) {
1567                         ctlreq = G_PART_CTL_ADD;
1568                         mparms |= G_PART_PARM_GEOM | G_PART_PARM_SIZE |
1569                             G_PART_PARM_START | G_PART_PARM_TYPE;
1570                         oparms |= G_PART_PARM_INDEX | G_PART_PARM_LABEL;
1571                 }
1572                 break;
1573         case 'b':
1574                 if (!strcmp(verb, "bootcode")) {
1575                         ctlreq = G_PART_CTL_BOOTCODE;
1576                         mparms |= G_PART_PARM_GEOM | G_PART_PARM_BOOTCODE;
1577                 }
1578                 break;
1579         case 'c':
1580                 if (!strcmp(verb, "commit")) {
1581                         ctlreq = G_PART_CTL_COMMIT;
1582                         mparms |= G_PART_PARM_GEOM;
1583                         modifies = 0;
1584                 } else if (!strcmp(verb, "create")) {
1585                         ctlreq = G_PART_CTL_CREATE;
1586                         mparms |= G_PART_PARM_PROVIDER | G_PART_PARM_SCHEME;
1587                         oparms |= G_PART_PARM_ENTRIES;
1588                 }
1589                 break;
1590         case 'd':
1591                 if (!strcmp(verb, "delete")) {
1592                         ctlreq = G_PART_CTL_DELETE;
1593                         mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX;
1594                 } else if (!strcmp(verb, "destroy")) {
1595                         ctlreq = G_PART_CTL_DESTROY;
1596                         mparms |= G_PART_PARM_GEOM;
1597                         oparms |= G_PART_PARM_FORCE;
1598                 }
1599                 break;
1600         case 'm':
1601                 if (!strcmp(verb, "modify")) {
1602                         ctlreq = G_PART_CTL_MODIFY;
1603                         mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX;
1604                         oparms |= G_PART_PARM_LABEL | G_PART_PARM_TYPE;
1605                 } else if (!strcmp(verb, "move")) {
1606                         ctlreq = G_PART_CTL_MOVE;
1607                         mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX;
1608                 }
1609                 break;
1610         case 'r':
1611                 if (!strcmp(verb, "recover")) {
1612                         ctlreq = G_PART_CTL_RECOVER;
1613                         mparms |= G_PART_PARM_GEOM;
1614                 } else if (!strcmp(verb, "resize")) {
1615                         ctlreq = G_PART_CTL_RESIZE;
1616                         mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX |
1617                             G_PART_PARM_SIZE;
1618                 }
1619                 break;
1620         case 's':
1621                 if (!strcmp(verb, "set")) {
1622                         ctlreq = G_PART_CTL_SET;
1623                         mparms |= G_PART_PARM_ATTRIB | G_PART_PARM_GEOM;
1624                         oparms |= G_PART_PARM_INDEX;
1625                 }
1626                 break;
1627         case 'u':
1628                 if (!strcmp(verb, "undo")) {
1629                         ctlreq = G_PART_CTL_UNDO;
1630                         mparms |= G_PART_PARM_GEOM;
1631                         modifies = 0;
1632                 } else if (!strcmp(verb, "unset")) {
1633                         ctlreq = G_PART_CTL_UNSET;
1634                         mparms |= G_PART_PARM_ATTRIB | G_PART_PARM_GEOM;
1635                         oparms |= G_PART_PARM_INDEX;
1636                 }
1637                 break;
1638         }
1639         if (ctlreq == G_PART_CTL_NONE) {
1640                 gctl_error(req, "%d verb '%s'", EINVAL, verb);
1641                 return;
1642         }
1643
1644         bzero(&gpp, sizeof(gpp));
1645         for (i = 0; i < req->narg; i++) {
1646                 ap = &req->arg[i];
1647                 parm = 0;
1648                 switch (ap->name[0]) {
1649                 case 'a':
1650                         if (!strcmp(ap->name, "arg0")) {
1651                                 parm = mparms &
1652                                     (G_PART_PARM_GEOM | G_PART_PARM_PROVIDER);
1653                         }
1654                         if (!strcmp(ap->name, "attrib"))
1655                                 parm = G_PART_PARM_ATTRIB;
1656                         break;
1657                 case 'b':
1658                         if (!strcmp(ap->name, "bootcode"))
1659                                 parm = G_PART_PARM_BOOTCODE;
1660                         break;
1661                 case 'c':
1662                         if (!strcmp(ap->name, "class"))
1663                                 continue;
1664                         break;
1665                 case 'e':
1666                         if (!strcmp(ap->name, "entries"))
1667                                 parm = G_PART_PARM_ENTRIES;
1668                         break;
1669                 case 'f':
1670                         if (!strcmp(ap->name, "flags"))
1671                                 parm = G_PART_PARM_FLAGS;
1672                         else if (!strcmp(ap->name, "force"))
1673                                 parm = G_PART_PARM_FORCE;
1674                         break;
1675                 case 'i':
1676                         if (!strcmp(ap->name, "index"))
1677                                 parm = G_PART_PARM_INDEX;
1678                         break;
1679                 case 'l':
1680                         if (!strcmp(ap->name, "label"))
1681                                 parm = G_PART_PARM_LABEL;
1682                         break;
1683                 case 'o':
1684                         if (!strcmp(ap->name, "output"))
1685                                 parm = G_PART_PARM_OUTPUT;
1686                         break;
1687                 case 's':
1688                         if (!strcmp(ap->name, "scheme"))
1689                                 parm = G_PART_PARM_SCHEME;
1690                         else if (!strcmp(ap->name, "size"))
1691                                 parm = G_PART_PARM_SIZE;
1692                         else if (!strcmp(ap->name, "start"))
1693                                 parm = G_PART_PARM_START;
1694                         break;
1695                 case 't':
1696                         if (!strcmp(ap->name, "type"))
1697                                 parm = G_PART_PARM_TYPE;
1698                         break;
1699                 case 'v':
1700                         if (!strcmp(ap->name, "verb"))
1701                                 continue;
1702                         else if (!strcmp(ap->name, "version"))
1703                                 parm = G_PART_PARM_VERSION;
1704                         break;
1705                 }
1706                 if ((parm & (mparms | oparms)) == 0) {
1707                         gctl_error(req, "%d param '%s'", EINVAL, ap->name);
1708                         return;
1709                 }
1710                 switch (parm) {
1711                 case G_PART_PARM_ATTRIB:
1712                         error = g_part_parm_str(req, ap->name,
1713                             &gpp.gpp_attrib);
1714                         break;
1715                 case G_PART_PARM_BOOTCODE:
1716                         error = g_part_parm_bootcode(req, ap->name,
1717                             &gpp.gpp_codeptr, &gpp.gpp_codesize);
1718                         break;
1719                 case G_PART_PARM_ENTRIES:
1720                         error = g_part_parm_intmax(req, ap->name,
1721                             &gpp.gpp_entries);
1722                         break;
1723                 case G_PART_PARM_FLAGS:
1724                         error = g_part_parm_str(req, ap->name, &gpp.gpp_flags);
1725                         break;
1726                 case G_PART_PARM_FORCE:
1727                         error = g_part_parm_uint32(req, ap->name,
1728                             &gpp.gpp_force);
1729                         break;
1730                 case G_PART_PARM_GEOM:
1731                         error = g_part_parm_geom(req, ap->name, &gpp.gpp_geom);
1732                         break;
1733                 case G_PART_PARM_INDEX:
1734                         error = g_part_parm_intmax(req, ap->name,
1735                             &gpp.gpp_index);
1736                         break;
1737                 case G_PART_PARM_LABEL:
1738                         error = g_part_parm_str(req, ap->name, &gpp.gpp_label);
1739                         break;
1740                 case G_PART_PARM_OUTPUT:
1741                         error = 0;      /* Write-only parameter */
1742                         break;
1743                 case G_PART_PARM_PROVIDER:
1744                         error = g_part_parm_provider(req, ap->name,
1745                             &gpp.gpp_provider);
1746                         break;
1747                 case G_PART_PARM_SCHEME:
1748                         error = g_part_parm_scheme(req, ap->name,
1749                             &gpp.gpp_scheme);
1750                         break;
1751                 case G_PART_PARM_SIZE:
1752                         error = g_part_parm_quad(req, ap->name, &gpp.gpp_size);
1753                         break;
1754                 case G_PART_PARM_START:
1755                         error = g_part_parm_quad(req, ap->name,
1756                             &gpp.gpp_start);
1757                         break;
1758                 case G_PART_PARM_TYPE:
1759                         error = g_part_parm_str(req, ap->name, &gpp.gpp_type);
1760                         break;
1761                 case G_PART_PARM_VERSION:
1762                         error = g_part_parm_uint32(req, ap->name,
1763                             &gpp.gpp_version);
1764                         break;
1765                 default:
1766                         error = EDOOFUS;
1767                         gctl_error(req, "%d %s", error, ap->name);
1768                         break;
1769                 }
1770                 if (error != 0) {
1771                         if (error == ENOATTR) {
1772                                 gctl_error(req, "%d param '%s'", error,
1773                                     ap->name);
1774                         }
1775                         return;
1776                 }
1777                 gpp.gpp_parms |= parm;
1778         }
1779         if ((gpp.gpp_parms & mparms) != mparms) {
1780                 parm = mparms - (gpp.gpp_parms & mparms);
1781                 gctl_error(req, "%d param '%x'", ENOATTR, parm);
1782                 return;
1783         }
1784
1785         /* Obtain permissions if possible/necessary. */
1786         close_on_error = 0;
1787         table = NULL;
1788         if (modifies && (gpp.gpp_parms & G_PART_PARM_GEOM)) {
1789                 table = gpp.gpp_geom->softc;
1790                 if (table != NULL && table->gpt_corrupt &&
1791                     ctlreq != G_PART_CTL_DESTROY &&
1792                     ctlreq != G_PART_CTL_RECOVER) {
1793                         gctl_error(req, "%d table '%s' is corrupt",
1794                             EPERM, gpp.gpp_geom->name);
1795                         return;
1796                 }
1797                 if (table != NULL && !table->gpt_opened) {
1798                         error = g_access(LIST_FIRST(&gpp.gpp_geom->consumer),
1799                             1, 1, 1);
1800                         if (error) {
1801                                 gctl_error(req, "%d geom '%s'", error,
1802                                     gpp.gpp_geom->name);
1803                                 return;
1804                         }
1805                         table->gpt_opened = 1;
1806                         close_on_error = 1;
1807                 }
1808         }
1809
1810         /* Allow the scheme to check or modify the parameters. */
1811         if (table != NULL) {
1812                 error = G_PART_PRECHECK(table, ctlreq, &gpp);
1813                 if (error) {
1814                         gctl_error(req, "%d pre-check failed", error);
1815                         goto out;
1816                 }
1817         } else
1818                 error = EDOOFUS;        /* Prevent bogus uninit. warning. */
1819
1820         switch (ctlreq) {
1821         case G_PART_CTL_NONE:
1822                 panic("%s", __func__);
1823         case G_PART_CTL_ADD:
1824                 error = g_part_ctl_add(req, &gpp);
1825                 break;
1826         case G_PART_CTL_BOOTCODE:
1827                 error = g_part_ctl_bootcode(req, &gpp);
1828                 break;
1829         case G_PART_CTL_COMMIT:
1830                 error = g_part_ctl_commit(req, &gpp);
1831                 break;
1832         case G_PART_CTL_CREATE:
1833                 error = g_part_ctl_create(req, &gpp);
1834                 break;
1835         case G_PART_CTL_DELETE:
1836                 error = g_part_ctl_delete(req, &gpp);
1837                 break;
1838         case G_PART_CTL_DESTROY:
1839                 error = g_part_ctl_destroy(req, &gpp);
1840                 break;
1841         case G_PART_CTL_MODIFY:
1842                 error = g_part_ctl_modify(req, &gpp);
1843                 break;
1844         case G_PART_CTL_MOVE:
1845                 error = g_part_ctl_move(req, &gpp);
1846                 break;
1847         case G_PART_CTL_RECOVER:
1848                 error = g_part_ctl_recover(req, &gpp);
1849                 break;
1850         case G_PART_CTL_RESIZE:
1851                 error = g_part_ctl_resize(req, &gpp);
1852                 break;
1853         case G_PART_CTL_SET:
1854                 error = g_part_ctl_setunset(req, &gpp, 1);
1855                 break;
1856         case G_PART_CTL_UNDO:
1857                 error = g_part_ctl_undo(req, &gpp);
1858                 break;
1859         case G_PART_CTL_UNSET:
1860                 error = g_part_ctl_setunset(req, &gpp, 0);
1861                 break;
1862         }
1863
1864         /* Implement automatic commit. */
1865         if (!error) {
1866                 auto_commit = (modifies &&
1867                     (gpp.gpp_parms & G_PART_PARM_FLAGS) &&
1868                     strchr(gpp.gpp_flags, 'C') != NULL) ? 1 : 0;
1869                 if (auto_commit) {
1870                         KASSERT(gpp.gpp_parms & G_PART_PARM_GEOM, ("%s",
1871                             __func__));
1872                         error = g_part_ctl_commit(req, &gpp);
1873                 }
1874         }
1875
1876  out:
1877         if (error && close_on_error) {
1878                 g_access(LIST_FIRST(&gpp.gpp_geom->consumer), -1, -1, -1);
1879                 table->gpt_opened = 0;
1880         }
1881 }
1882
1883 static int
1884 g_part_destroy_geom(struct gctl_req *req, struct g_class *mp,
1885     struct g_geom *gp)
1886 {
1887
1888         G_PART_TRACE((G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, gp->name));
1889         g_topology_assert();
1890
1891         g_part_wither(gp, EINVAL);
1892         return (0);
1893 }
1894
1895 static struct g_geom *
1896 g_part_taste(struct g_class *mp, struct g_provider *pp, int flags __unused)
1897 {
1898         struct g_consumer *cp;
1899         struct g_geom *gp;
1900         struct g_part_entry *entry;
1901         struct g_part_table *table;
1902         struct root_hold_token *rht;
1903         int attr, depth;
1904         int error;
1905
1906         G_PART_TRACE((G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, pp->name));
1907         g_topology_assert();
1908
1909         /* Skip providers that are already open for writing. */
1910         if (pp->acw > 0)
1911                 return (NULL);
1912
1913         /*
1914          * Create a GEOM with consumer and hook it up to the provider.
1915          * With that we become part of the topology. Optain read access
1916          * to the provider.
1917          */
1918         gp = g_new_geomf(mp, "%s", pp->name);
1919         cp = g_new_consumer(gp);
1920         cp->flags |= G_CF_DIRECT_SEND | G_CF_DIRECT_RECEIVE;
1921         error = g_attach(cp, pp);
1922         if (error == 0)
1923                 error = g_access(cp, 1, 0, 0);
1924         if (error != 0) {
1925                 if (cp->provider)
1926                         g_detach(cp);
1927                 g_destroy_consumer(cp);
1928                 g_destroy_geom(gp);
1929                 return (NULL);
1930         }
1931
1932         rht = root_mount_hold(mp->name);
1933         g_topology_unlock();
1934
1935         /*
1936          * Short-circuit the whole probing galore when there's no
1937          * media present.
1938          */
1939         if (pp->mediasize == 0 || pp->sectorsize == 0) {
1940                 error = ENODEV;
1941                 goto fail;
1942         }
1943
1944         /* Make sure we can nest and if so, determine our depth. */
1945         error = g_getattr("PART::isleaf", cp, &attr);
1946         if (!error && attr) {
1947                 error = ENODEV;
1948                 goto fail;
1949         }
1950         error = g_getattr("PART::depth", cp, &attr);
1951         depth = (!error) ? attr + 1 : 0;
1952
1953         error = g_part_probe(gp, cp, depth);
1954         if (error)
1955                 goto fail;
1956
1957         table = gp->softc;
1958
1959         /*
1960          * Synthesize a disk geometry. Some partitioning schemes
1961          * depend on it and since some file systems need it even
1962          * when the partitition scheme doesn't, we do it here in
1963          * scheme-independent code.
1964          */
1965         g_part_geometry(table, cp, pp->mediasize / pp->sectorsize);
1966
1967         error = G_PART_READ(table, cp);
1968         if (error)
1969                 goto fail;
1970         error = g_part_check_integrity(table, cp);
1971         if (error)
1972                 goto fail;
1973
1974         g_topology_lock();
1975         LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
1976                 if (!entry->gpe_internal)
1977                         g_part_new_provider(gp, table, entry);
1978         }
1979
1980         root_mount_rel(rht);
1981         g_access(cp, -1, 0, 0);
1982         return (gp);
1983
1984  fail:
1985         g_topology_lock();
1986         root_mount_rel(rht);
1987         g_access(cp, -1, 0, 0);
1988         g_detach(cp);
1989         g_destroy_consumer(cp);
1990         g_destroy_geom(gp);
1991         return (NULL);
1992 }
1993
1994 /*
1995  * Geom methods.
1996  */
1997
1998 static int
1999 g_part_access(struct g_provider *pp, int dr, int dw, int de)
2000 {
2001         struct g_consumer *cp;
2002
2003         G_PART_TRACE((G_T_ACCESS, "%s(%s,%d,%d,%d)", __func__, pp->name, dr,
2004             dw, de));
2005
2006         cp = LIST_FIRST(&pp->geom->consumer);
2007
2008         /* We always gain write-exclusive access. */
2009         return (g_access(cp, dr, dw, dw + de));
2010 }
2011
2012 static void
2013 g_part_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp,
2014     struct g_consumer *cp, struct g_provider *pp)
2015 {
2016         char buf[64];
2017         struct g_part_entry *entry;
2018         struct g_part_table *table;
2019
2020         KASSERT(sb != NULL && gp != NULL, ("%s", __func__));
2021         table = gp->softc;
2022
2023         if (indent == NULL) {
2024                 KASSERT(cp == NULL && pp != NULL, ("%s", __func__));
2025                 entry = pp->private;
2026                 if (entry == NULL)
2027                         return;
2028                 sbuf_printf(sb, " i %u o %ju ty %s", entry->gpe_index,
2029                     (uintmax_t)entry->gpe_offset,
2030                     G_PART_TYPE(table, entry, buf, sizeof(buf)));
2031                 /*
2032                  * libdisk compatibility quirk - the scheme dumps the
2033                  * slicer name and partition type in a way that is
2034                  * compatible with libdisk. When libdisk is not used
2035                  * anymore, this should go away.
2036                  */
2037                 G_PART_DUMPCONF(table, entry, sb, indent);
2038         } else if (cp != NULL) {        /* Consumer configuration. */
2039                 KASSERT(pp == NULL, ("%s", __func__));
2040                 /* none */
2041         } else if (pp != NULL) {        /* Provider configuration. */
2042                 entry = pp->private;
2043                 if (entry == NULL)
2044                         return;
2045                 sbuf_printf(sb, "%s<start>%ju</start>\n", indent,
2046                     (uintmax_t)entry->gpe_start);
2047                 sbuf_printf(sb, "%s<end>%ju</end>\n", indent,
2048                     (uintmax_t)entry->gpe_end);
2049                 sbuf_printf(sb, "%s<index>%u</index>\n", indent,
2050                     entry->gpe_index);
2051                 sbuf_printf(sb, "%s<type>%s</type>\n", indent,
2052                     G_PART_TYPE(table, entry, buf, sizeof(buf)));
2053                 sbuf_printf(sb, "%s<offset>%ju</offset>\n", indent,
2054                     (uintmax_t)entry->gpe_offset);
2055                 sbuf_printf(sb, "%s<length>%ju</length>\n", indent,
2056                     (uintmax_t)pp->mediasize);
2057                 G_PART_DUMPCONF(table, entry, sb, indent);
2058         } else {                        /* Geom configuration. */
2059                 sbuf_printf(sb, "%s<scheme>%s</scheme>\n", indent,
2060                     table->gpt_scheme->name);
2061                 sbuf_printf(sb, "%s<entries>%u</entries>\n", indent,
2062                     table->gpt_entries);
2063                 sbuf_printf(sb, "%s<first>%ju</first>\n", indent,
2064                     (uintmax_t)table->gpt_first);
2065                 sbuf_printf(sb, "%s<last>%ju</last>\n", indent,
2066                     (uintmax_t)table->gpt_last);
2067                 sbuf_printf(sb, "%s<fwsectors>%u</fwsectors>\n", indent,
2068                     table->gpt_sectors);
2069                 sbuf_printf(sb, "%s<fwheads>%u</fwheads>\n", indent,
2070                     table->gpt_heads);
2071                 sbuf_printf(sb, "%s<state>%s</state>\n", indent,
2072                     table->gpt_corrupt ? "CORRUPT": "OK");
2073                 sbuf_printf(sb, "%s<modified>%s</modified>\n", indent,
2074                     table->gpt_opened ? "true": "false");
2075                 G_PART_DUMPCONF(table, NULL, sb, indent);
2076         }
2077 }
2078
2079 /*-
2080  * This start routine is only called for non-trivial requests, all the
2081  * trivial ones are handled autonomously by the slice code.
2082  * For requests we handle here, we must call the g_io_deliver() on the
2083  * bio, and return non-zero to indicate to the slice code that we did so.
2084  * This code executes in the "DOWN" I/O path, this means:
2085  *    * No sleeping.
2086  *    * Don't grab the topology lock.
2087  *    * Don't call biowait, g_getattr(), g_setattr() or g_read_data()
2088  */
2089 static int
2090 g_part_ioctl(struct g_provider *pp, u_long cmd, void *data, int fflag, struct thread *td)
2091 {
2092         struct g_part_table *table;
2093
2094         table = pp->geom->softc;
2095         return G_PART_IOCTL(table, pp, cmd, data, fflag, td);
2096 }
2097
2098 static void
2099 g_part_resize(struct g_consumer *cp)
2100 {
2101         struct g_part_table *table;
2102
2103         G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, cp->provider->name));
2104         g_topology_assert();
2105
2106         if (auto_resize == 0)
2107                 return;
2108
2109         table = cp->geom->softc;
2110         if (table->gpt_opened == 0) {
2111                 if (g_access(cp, 1, 1, 1) != 0)
2112                         return;
2113                 table->gpt_opened = 1;
2114         }
2115         if (G_PART_RESIZE(table, NULL, NULL) == 0)
2116                 printf("GEOM_PART: %s was automatically resized.\n"
2117                     "  Use `gpart commit %s` to save changes or "
2118                     "`gpart undo %s` to revert them.\n", cp->geom->name,
2119                     cp->geom->name, cp->geom->name);
2120         if (g_part_check_integrity(table, cp) != 0) {
2121                 g_access(cp, -1, -1, -1);
2122                 table->gpt_opened = 0;
2123                 g_part_wither(table->gpt_gp, ENXIO);
2124         }
2125 }
2126
2127 static void
2128 g_part_orphan(struct g_consumer *cp)
2129 {
2130         struct g_provider *pp;
2131         struct g_part_table *table;
2132
2133         pp = cp->provider;
2134         KASSERT(pp != NULL, ("%s", __func__));
2135         G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, pp->name));
2136         g_topology_assert();
2137
2138         KASSERT(pp->error != 0, ("%s", __func__));
2139         table = cp->geom->softc;
2140         if (table != NULL && table->gpt_opened)
2141                 g_access(cp, -1, -1, -1);
2142         g_part_wither(cp->geom, pp->error);
2143 }
2144
2145 static void
2146 g_part_spoiled(struct g_consumer *cp)
2147 {
2148
2149         G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, cp->provider->name));
2150         g_topology_assert();
2151
2152         cp->flags |= G_CF_ORPHAN;
2153         g_part_wither(cp->geom, ENXIO);
2154 }
2155
2156 static void
2157 g_part_start(struct bio *bp)
2158 {
2159         struct bio *bp2;
2160         struct g_consumer *cp;
2161         struct g_geom *gp;
2162         struct g_part_entry *entry;
2163         struct g_part_table *table;
2164         struct g_kerneldump *gkd;
2165         struct g_provider *pp;
2166         char buf[64];
2167
2168         biotrack(bp, __func__);
2169
2170         pp = bp->bio_to;
2171         gp = pp->geom;
2172         table = gp->softc;
2173         cp = LIST_FIRST(&gp->consumer);
2174
2175         G_PART_TRACE((G_T_BIO, "%s: cmd=%d, provider=%s", __func__, bp->bio_cmd,
2176             pp->name));
2177
2178         entry = pp->private;
2179         if (entry == NULL) {
2180                 g_io_deliver(bp, ENXIO);
2181                 return;
2182         }
2183
2184         switch(bp->bio_cmd) {
2185         case BIO_DELETE:
2186         case BIO_READ:
2187         case BIO_WRITE:
2188                 if (bp->bio_offset >= pp->mediasize) {
2189                         g_io_deliver(bp, EIO);
2190                         return;
2191                 }
2192                 bp2 = g_clone_bio(bp);
2193                 if (bp2 == NULL) {
2194                         g_io_deliver(bp, ENOMEM);
2195                         return;
2196                 }
2197                 if (bp2->bio_offset + bp2->bio_length > pp->mediasize)
2198                         bp2->bio_length = pp->mediasize - bp2->bio_offset;
2199                 bp2->bio_done = g_std_done;
2200                 bp2->bio_offset += entry->gpe_offset;
2201                 g_io_request(bp2, cp);
2202                 return;
2203         case BIO_FLUSH:
2204                 break;
2205         case BIO_GETATTR:
2206                 if (g_handleattr_int(bp, "GEOM::fwheads", table->gpt_heads))
2207                         return;
2208                 if (g_handleattr_int(bp, "GEOM::fwsectors", table->gpt_sectors))
2209                         return;
2210                 if (g_handleattr_int(bp, "PART::isleaf", table->gpt_isleaf))
2211                         return;
2212                 if (g_handleattr_int(bp, "PART::depth", table->gpt_depth))
2213                         return;
2214                 if (g_handleattr_str(bp, "PART::scheme",
2215                     table->gpt_scheme->name))
2216                         return;
2217                 if (g_handleattr_str(bp, "PART::type",
2218                     G_PART_TYPE(table, entry, buf, sizeof(buf))))
2219                         return;
2220                 if (!strcmp("GEOM::kerneldump", bp->bio_attribute)) {
2221                         /*
2222                          * Check that the partition is suitable for kernel
2223                          * dumps. Typically only swap partitions should be
2224                          * used. If the request comes from the nested scheme
2225                          * we allow dumping there as well.
2226                          */
2227                         if ((bp->bio_from == NULL ||
2228                             bp->bio_from->geom->class != &g_part_class) &&
2229                             G_PART_DUMPTO(table, entry) == 0) {
2230                                 g_io_deliver(bp, ENODEV);
2231                                 printf("GEOM_PART: Partition '%s' not suitable"
2232                                     " for kernel dumps (wrong type?)\n",
2233                                     pp->name);
2234                                 return;
2235                         }
2236                         gkd = (struct g_kerneldump *)bp->bio_data;
2237                         if (gkd->offset >= pp->mediasize) {
2238                                 g_io_deliver(bp, EIO);
2239                                 return;
2240                         }
2241                         if (gkd->offset + gkd->length > pp->mediasize)
2242                                 gkd->length = pp->mediasize - gkd->offset;
2243                         gkd->offset += entry->gpe_offset;
2244                 }
2245                 break;
2246         default:
2247                 g_io_deliver(bp, EOPNOTSUPP);
2248                 return;
2249         }
2250
2251         bp2 = g_clone_bio(bp);
2252         if (bp2 == NULL) {
2253                 g_io_deliver(bp, ENOMEM);
2254                 return;
2255         }
2256         bp2->bio_done = g_std_done;
2257         g_io_request(bp2, cp);
2258 }
2259
2260 static void
2261 g_part_init(struct g_class *mp)
2262 {
2263
2264         TAILQ_INSERT_HEAD(&g_part_schemes, &g_part_null_scheme, scheme_list);
2265 }
2266
2267 static void
2268 g_part_fini(struct g_class *mp)
2269 {
2270
2271         TAILQ_REMOVE(&g_part_schemes, &g_part_null_scheme, scheme_list);
2272 }
2273
2274 static void
2275 g_part_unload_event(void *arg, int flag)
2276 {
2277         struct g_consumer *cp;
2278         struct g_geom *gp;
2279         struct g_provider *pp;
2280         struct g_part_scheme *scheme;
2281         struct g_part_table *table;
2282         uintptr_t *xchg;
2283         int acc, error;
2284
2285         if (flag == EV_CANCEL)
2286                 return;
2287
2288         xchg = arg;
2289         error = 0;
2290         scheme = (void *)(*xchg);
2291
2292         g_topology_assert();
2293
2294         LIST_FOREACH(gp, &g_part_class.geom, geom) {
2295                 table = gp->softc;
2296                 if (table->gpt_scheme != scheme)
2297                         continue;
2298
2299                 acc = 0;
2300                 LIST_FOREACH(pp, &gp->provider, provider)
2301                         acc += pp->acr + pp->acw + pp->ace;
2302                 LIST_FOREACH(cp, &gp->consumer, consumer)
2303                         acc += cp->acr + cp->acw + cp->ace;
2304
2305                 if (!acc)
2306                         g_part_wither(gp, ENOSYS);
2307                 else
2308                         error = EBUSY;
2309         }
2310
2311         if (!error)
2312                 TAILQ_REMOVE(&g_part_schemes, scheme, scheme_list);
2313
2314         *xchg = error;
2315 }
2316
2317 int
2318 g_part_modevent(module_t mod, int type, struct g_part_scheme *scheme)
2319 {
2320         struct g_part_scheme *iter;
2321         uintptr_t arg;
2322         int error;
2323
2324         error = 0;
2325         switch (type) {
2326         case MOD_LOAD:
2327                 TAILQ_FOREACH(iter, &g_part_schemes, scheme_list) {
2328                         if (scheme == iter) {
2329                                 printf("GEOM_PART: scheme %s is already "
2330                                     "registered!\n", scheme->name);
2331                                 break;
2332                         }
2333                 }
2334                 if (iter == NULL) {
2335                         TAILQ_INSERT_TAIL(&g_part_schemes, scheme,
2336                             scheme_list);
2337                         g_retaste(&g_part_class);
2338                 }
2339                 break;
2340         case MOD_UNLOAD:
2341                 arg = (uintptr_t)scheme;
2342                 error = g_waitfor_event(g_part_unload_event, &arg, M_WAITOK,
2343                     NULL);
2344                 if (error == 0)
2345                         error = arg;
2346                 break;
2347         default:
2348                 error = EOPNOTSUPP;
2349                 break;
2350         }
2351
2352         return (error);
2353 }