2 * Copyright (c) 2002, 2005-2009 Marcel Moolenaar
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
30 #include <sys/param.h>
32 #include <sys/diskmbr.h>
33 #include <sys/endian.h>
34 #include <sys/kernel.h>
36 #include <sys/limits.h>
38 #include <sys/malloc.h>
39 #include <sys/mutex.h>
40 #include <sys/queue.h>
42 #include <sys/systm.h>
44 #include <geom/geom.h>
45 #include <geom/geom_ctl.h>
46 #include <geom/geom_int.h>
47 #include <geom/part/g_part.h>
49 #include "g_part_if.h"
52 #define _PATH_DEV "/dev/"
55 static kobj_method_t g_part_null_methods[] = {
59 static struct g_part_scheme g_part_null_scheme = {
62 sizeof(struct g_part_table),
65 TAILQ_HEAD(, g_part_scheme) g_part_schemes =
66 TAILQ_HEAD_INITIALIZER(g_part_schemes);
68 struct g_part_alias_list {
70 enum g_part_alias alias;
71 } g_part_alias_list[G_PART_ALIAS_COUNT] = {
72 { "apple-boot", G_PART_ALIAS_APPLE_BOOT },
73 { "apple-hfs", G_PART_ALIAS_APPLE_HFS },
74 { "apple-label", G_PART_ALIAS_APPLE_LABEL },
75 { "apple-raid", G_PART_ALIAS_APPLE_RAID },
76 { "apple-raid-offline", G_PART_ALIAS_APPLE_RAID_OFFLINE },
77 { "apple-tv-recovery", G_PART_ALIAS_APPLE_TV_RECOVERY },
78 { "apple-ufs", G_PART_ALIAS_APPLE_UFS },
79 { "efi", G_PART_ALIAS_EFI },
80 { "freebsd", G_PART_ALIAS_FREEBSD },
81 { "freebsd-boot", G_PART_ALIAS_FREEBSD_BOOT },
82 { "freebsd-swap", G_PART_ALIAS_FREEBSD_SWAP },
83 { "freebsd-ufs", G_PART_ALIAS_FREEBSD_UFS },
84 { "freebsd-vinum", G_PART_ALIAS_FREEBSD_VINUM },
85 { "freebsd-zfs", G_PART_ALIAS_FREEBSD_ZFS },
86 { "linux-data", G_PART_ALIAS_LINUX_DATA },
87 { "linux-lvm", G_PART_ALIAS_LINUX_LVM },
88 { "linux-raid", G_PART_ALIAS_LINUX_RAID },
89 { "linux-swap", G_PART_ALIAS_LINUX_SWAP },
90 { "mbr", G_PART_ALIAS_MBR }
94 * The GEOM partitioning class.
96 static g_ctl_req_t g_part_ctlreq;
97 static g_ctl_destroy_geom_t g_part_destroy_geom;
98 static g_fini_t g_part_fini;
99 static g_init_t g_part_init;
100 static g_taste_t g_part_taste;
102 static g_access_t g_part_access;
103 static g_dumpconf_t g_part_dumpconf;
104 static g_orphan_t g_part_orphan;
105 static g_spoiled_t g_part_spoiled;
106 static g_start_t g_part_start;
108 static struct g_class g_part_class = {
110 .version = G_VERSION,
112 .ctlreq = g_part_ctlreq,
113 .destroy_geom = g_part_destroy_geom,
116 .taste = g_part_taste,
118 .access = g_part_access,
119 .dumpconf = g_part_dumpconf,
120 .orphan = g_part_orphan,
121 .spoiled = g_part_spoiled,
122 .start = g_part_start,
125 DECLARE_GEOM_CLASS(g_part_class, g_part);
131 static void g_part_wither(struct g_geom *, int);
134 g_part_alias_name(enum g_part_alias alias)
138 for (i = 0; i < G_PART_ALIAS_COUNT; i++) {
139 if (g_part_alias_list[i].alias != alias)
141 return (g_part_alias_list[i].lexeme);
148 g_part_geometry_heads(off_t blocks, u_int sectors, off_t *bestchs,
151 static u_int candidate_heads[] = { 1, 2, 16, 32, 64, 128, 255, 0 };
152 off_t chs, cylinders;
158 for (idx = 0; candidate_heads[idx] != 0; idx++) {
159 heads = candidate_heads[idx];
160 cylinders = blocks / heads / sectors;
161 if (cylinders < heads || cylinders < sectors)
163 if (cylinders > 1023)
165 chs = cylinders * heads * sectors;
166 if (chs > *bestchs || (chs == *bestchs && *bestheads == 1)) {
174 g_part_geometry(struct g_part_table *table, struct g_consumer *cp,
177 static u_int candidate_sectors[] = { 1, 9, 17, 33, 63, 0 };
179 u_int heads, sectors;
182 if (g_getattr("GEOM::fwsectors", cp, §ors) != 0 || sectors == 0 ||
183 g_getattr("GEOM::fwheads", cp, &heads) != 0 || heads == 0) {
184 table->gpt_fixgeom = 0;
185 table->gpt_heads = 0;
186 table->gpt_sectors = 0;
188 for (idx = 0; candidate_sectors[idx] != 0; idx++) {
189 sectors = candidate_sectors[idx];
190 g_part_geometry_heads(blocks, sectors, &chs, &heads);
194 * Prefer a geometry with sectors > 1, but only if
195 * it doesn't bump down the numbver of heads to 1.
197 if (chs > bestchs || (chs == bestchs && heads > 1 &&
198 table->gpt_sectors == 1)) {
200 table->gpt_heads = heads;
201 table->gpt_sectors = sectors;
205 * If we didn't find a geometry at all, then the disk is
206 * too big. This means we can use the maximum number of
210 table->gpt_heads = 255;
211 table->gpt_sectors = 63;
214 table->gpt_fixgeom = 1;
215 table->gpt_heads = heads;
216 table->gpt_sectors = sectors;
220 struct g_part_entry *
221 g_part_new_entry(struct g_part_table *table, int index, quad_t start,
224 struct g_part_entry *entry, *last;
227 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
228 if (entry->gpe_index == index)
230 if (entry->gpe_index > index) {
237 entry = g_malloc(table->gpt_scheme->gps_entrysz,
239 entry->gpe_index = index;
241 LIST_INSERT_HEAD(&table->gpt_entry, entry, gpe_entry);
243 LIST_INSERT_AFTER(last, entry, gpe_entry);
245 entry->gpe_offset = 0;
246 entry->gpe_start = start;
247 entry->gpe_end = end;
252 g_part_new_provider(struct g_geom *gp, struct g_part_table *table,
253 struct g_part_entry *entry)
255 struct g_consumer *cp;
256 struct g_provider *pp;
260 cp = LIST_FIRST(&gp->consumer);
263 offset = entry->gpe_start * pp->sectorsize;
264 if (entry->gpe_offset < offset)
265 entry->gpe_offset = offset;
267 if (entry->gpe_pp == NULL) {
268 sb = sbuf_new_auto();
269 G_PART_FULLNAME(table, entry, sb, gp->name);
271 entry->gpe_pp = g_new_providerf(gp, "%s", sbuf_data(sb));
273 entry->gpe_pp->private = entry; /* Close the circle. */
275 entry->gpe_pp->index = entry->gpe_index - 1; /* index is 1-based. */
276 entry->gpe_pp->mediasize = (entry->gpe_end - entry->gpe_start + 1) *
278 entry->gpe_pp->mediasize -= entry->gpe_offset - offset;
279 entry->gpe_pp->sectorsize = pp->sectorsize;
280 entry->gpe_pp->flags = pp->flags & G_PF_CANDELETE;
281 entry->gpe_pp->stripesize = pp->stripesize;
282 entry->gpe_pp->stripeoffset = pp->stripeoffset + entry->gpe_offset;
283 if (pp->stripesize > 0)
284 entry->gpe_pp->stripeoffset %= pp->stripesize;
285 g_error_provider(entry->gpe_pp, 0);
289 g_part_parm_geom(const char *rawname, struct g_geom **v)
294 if (strncmp(rawname, _PATH_DEV, strlen(_PATH_DEV)) == 0)
295 pname = rawname + strlen(_PATH_DEV);
298 LIST_FOREACH(gp, &g_part_class.geom, geom) {
299 if (!strcmp(pname, gp->name))
309 g_part_parm_provider(const char *pname, struct g_provider **v)
311 struct g_provider *pp;
313 if (strncmp(pname, _PATH_DEV, strlen(_PATH_DEV)) == 0)
314 pp = g_provider_by_name(pname + strlen(_PATH_DEV));
316 pp = g_provider_by_name(pname);
324 g_part_parm_quad(const char *p, quad_t *v)
329 q = strtoq(p, &x, 0);
330 if (*x != '\0' || q < 0)
337 g_part_parm_scheme(const char *p, struct g_part_scheme **v)
339 struct g_part_scheme *s;
341 TAILQ_FOREACH(s, &g_part_schemes, scheme_list) {
342 if (s == &g_part_null_scheme)
344 if (!strcasecmp(s->name, p))
354 g_part_parm_str(const char *p, const char **v)
364 g_part_parm_uint(const char *p, u_int *v)
369 l = strtol(p, &x, 0);
370 if (*x != '\0' || l < 0 || l > INT_MAX)
372 *v = (unsigned int)l;
377 g_part_probe(struct g_geom *gp, struct g_consumer *cp, int depth)
379 struct g_part_scheme *iter, *scheme;
380 struct g_part_table *table;
384 scheme = (table != NULL) ? table->gpt_scheme : NULL;
385 pri = (scheme != NULL) ? G_PART_PROBE(table, cp) : INT_MIN;
388 if (pri > 0) { /* error */
393 TAILQ_FOREACH(iter, &g_part_schemes, scheme_list) {
394 if (iter == &g_part_null_scheme)
396 table = (void *)kobj_create((kobj_class_t)iter, M_GEOM,
399 table->gpt_scheme = iter;
400 table->gpt_depth = depth;
401 probe = G_PART_PROBE(table, cp);
402 if (probe <= 0 && probe > pri) {
405 if (gp->softc != NULL)
406 kobj_delete((kobj_t)gp->softc, M_GEOM);
411 kobj_delete((kobj_t)table, M_GEOM);
415 return ((scheme == NULL) ? ENXIO : 0);
419 * Control request functions.
423 g_part_ctl_add(struct gctl_req *req, struct g_part_parms *gpp)
426 struct g_provider *pp;
427 struct g_part_entry *delent, *last, *entry;
428 struct g_part_table *table;
435 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
438 pp = LIST_FIRST(&gp->consumer)->provider;
440 end = gpp->gpp_start + gpp->gpp_size - 1;
442 if (gpp->gpp_start < table->gpt_first ||
443 gpp->gpp_start > table->gpt_last) {
444 gctl_error(req, "%d start '%jd'", EINVAL,
445 (intmax_t)gpp->gpp_start);
448 if (end < gpp->gpp_start || end > table->gpt_last) {
449 gctl_error(req, "%d size '%jd'", EINVAL,
450 (intmax_t)gpp->gpp_size);
453 if (gpp->gpp_index > table->gpt_entries) {
454 gctl_error(req, "%d index '%d'", EINVAL, gpp->gpp_index);
458 delent = last = NULL;
459 index = (gpp->gpp_index > 0) ? gpp->gpp_index : 1;
460 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
461 if (entry->gpe_deleted) {
462 if (entry->gpe_index == index)
466 if (entry->gpe_index == index)
467 index = entry->gpe_index + 1;
468 if (entry->gpe_index < index)
470 if (entry->gpe_internal)
472 if (gpp->gpp_start >= entry->gpe_start &&
473 gpp->gpp_start <= entry->gpe_end) {
474 gctl_error(req, "%d start '%jd'", ENOSPC,
475 (intmax_t)gpp->gpp_start);
478 if (end >= entry->gpe_start && end <= entry->gpe_end) {
479 gctl_error(req, "%d end '%jd'", ENOSPC, (intmax_t)end);
482 if (gpp->gpp_start < entry->gpe_start && end > entry->gpe_end) {
483 gctl_error(req, "%d size '%jd'", ENOSPC,
484 (intmax_t)gpp->gpp_size);
488 if (gpp->gpp_index > 0 && index != gpp->gpp_index) {
489 gctl_error(req, "%d index '%d'", EEXIST, gpp->gpp_index);
492 if (index > table->gpt_entries) {
493 gctl_error(req, "%d index '%d'", ENOSPC, index);
497 entry = (delent == NULL) ? g_malloc(table->gpt_scheme->gps_entrysz,
498 M_WAITOK | M_ZERO) : delent;
499 entry->gpe_index = index;
500 entry->gpe_start = gpp->gpp_start;
501 entry->gpe_end = end;
502 error = G_PART_ADD(table, entry, gpp);
504 gctl_error(req, "%d", error);
509 if (delent == NULL) {
511 LIST_INSERT_HEAD(&table->gpt_entry, entry, gpe_entry);
513 LIST_INSERT_AFTER(last, entry, gpe_entry);
514 entry->gpe_created = 1;
516 entry->gpe_deleted = 0;
517 entry->gpe_modified = 1;
519 g_part_new_provider(gp, table, entry);
521 /* Provide feedback if so requested. */
522 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
523 sb = sbuf_new_auto();
524 G_PART_FULLNAME(table, entry, sb, gp->name);
525 sbuf_cat(sb, " added\n");
527 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
534 g_part_ctl_bootcode(struct gctl_req *req, struct g_part_parms *gpp)
537 struct g_part_table *table;
542 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
546 sz = table->gpt_scheme->gps_bootcodesz;
551 if (gpp->gpp_codesize > sz) {
556 error = G_PART_BOOTCODE(table, gpp);
560 /* Provide feedback if so requested. */
561 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
562 sb = sbuf_new_auto();
563 sbuf_printf(sb, "%s has bootcode\n", gp->name);
565 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
571 gctl_error(req, "%d", error);
576 g_part_ctl_commit(struct gctl_req *req, struct g_part_parms *gpp)
578 struct g_consumer *cp;
580 struct g_provider *pp;
581 struct g_part_entry *entry, *tmp;
582 struct g_part_table *table;
587 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
591 if (!table->gpt_opened) {
592 gctl_error(req, "%d", EPERM);
598 cp = LIST_FIRST(&gp->consumer);
599 if ((table->gpt_smhead | table->gpt_smtail) != 0) {
601 buf = g_malloc(pp->sectorsize, M_WAITOK | M_ZERO);
602 while (table->gpt_smhead != 0) {
603 i = ffs(table->gpt_smhead) - 1;
604 error = g_write_data(cp, i * pp->sectorsize, buf,
610 table->gpt_smhead &= ~(1 << i);
612 while (table->gpt_smtail != 0) {
613 i = ffs(table->gpt_smtail) - 1;
614 error = g_write_data(cp, pp->mediasize - (i + 1) *
615 pp->sectorsize, buf, pp->sectorsize);
620 table->gpt_smtail &= ~(1 << i);
625 if (table->gpt_scheme == &g_part_null_scheme) {
627 g_access(cp, -1, -1, -1);
628 g_part_wither(gp, ENXIO);
632 error = G_PART_WRITE(table, cp);
636 LIST_FOREACH_SAFE(entry, &table->gpt_entry, gpe_entry, tmp) {
637 if (!entry->gpe_deleted) {
638 entry->gpe_created = 0;
639 entry->gpe_modified = 0;
642 LIST_REMOVE(entry, gpe_entry);
645 table->gpt_created = 0;
646 table->gpt_opened = 0;
649 g_access(cp, -1, -1, -1);
654 gctl_error(req, "%d", error);
659 g_part_ctl_create(struct gctl_req *req, struct g_part_parms *gpp)
661 struct g_consumer *cp;
663 struct g_provider *pp;
664 struct g_part_scheme *scheme;
665 struct g_part_table *null, *table;
669 pp = gpp->gpp_provider;
670 scheme = gpp->gpp_scheme;
671 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, pp->name));
674 /* Check that there isn't already a g_part geom on the provider. */
675 error = g_part_parm_geom(pp->name, &gp);
678 if (null->gpt_scheme != &g_part_null_scheme) {
679 gctl_error(req, "%d geom '%s'", EEXIST, pp->name);
685 if ((gpp->gpp_parms & G_PART_PARM_ENTRIES) &&
686 (gpp->gpp_entries < scheme->gps_minent ||
687 gpp->gpp_entries > scheme->gps_maxent)) {
688 gctl_error(req, "%d entries '%d'", EINVAL, gpp->gpp_entries);
693 gp = g_new_geomf(&g_part_class, "%s", pp->name);
694 gp->softc = kobj_create((kobj_class_t)gpp->gpp_scheme, M_GEOM,
698 table->gpt_scheme = gpp->gpp_scheme;
699 table->gpt_entries = (gpp->gpp_parms & G_PART_PARM_ENTRIES) ?
700 gpp->gpp_entries : scheme->gps_minent;
701 LIST_INIT(&table->gpt_entry);
703 cp = g_new_consumer(gp);
704 error = g_attach(cp, pp);
706 error = g_access(cp, 1, 1, 1);
708 g_part_wither(gp, error);
709 gctl_error(req, "%d geom '%s'", error, pp->name);
712 table->gpt_opened = 1;
714 cp = LIST_FIRST(&gp->consumer);
715 table->gpt_opened = null->gpt_opened;
716 table->gpt_smhead = null->gpt_smhead;
717 table->gpt_smtail = null->gpt_smtail;
722 /* Make sure the provider has media. */
723 if (pp->mediasize == 0 || pp->sectorsize == 0) {
728 /* Make sure we can nest and if so, determine our depth. */
729 error = g_getattr("PART::isleaf", cp, &attr);
730 if (!error && attr) {
734 error = g_getattr("PART::depth", cp, &attr);
735 table->gpt_depth = (!error) ? attr + 1 : 0;
738 * Synthesize a disk geometry. Some partitioning schemes
739 * depend on it and since some file systems need it even
740 * when the partitition scheme doesn't, we do it here in
741 * scheme-independent code.
743 g_part_geometry(table, cp, pp->mediasize / pp->sectorsize);
745 error = G_PART_CREATE(table, gpp);
751 table->gpt_created = 1;
753 kobj_delete((kobj_t)null, M_GEOM);
756 * Support automatic commit by filling in the gpp_geom
759 gpp->gpp_parms |= G_PART_PARM_GEOM;
762 /* Provide feedback if so requested. */
763 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
764 sb = sbuf_new_auto();
765 sbuf_printf(sb, "%s created\n", gp->name);
767 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
775 g_access(cp, -1, -1, -1);
776 g_part_wither(gp, error);
778 kobj_delete((kobj_t)gp->softc, M_GEOM);
781 gctl_error(req, "%d provider", error);
786 g_part_ctl_delete(struct gctl_req *req, struct g_part_parms *gpp)
789 struct g_provider *pp;
790 struct g_part_entry *entry;
791 struct g_part_table *table;
795 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
800 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
801 if (entry->gpe_deleted || entry->gpe_internal)
803 if (entry->gpe_index == gpp->gpp_index)
807 gctl_error(req, "%d index '%d'", ENOENT, gpp->gpp_index);
813 if (pp->acr > 0 || pp->acw > 0 || pp->ace > 0) {
814 gctl_error(req, "%d", EBUSY);
819 entry->gpe_pp = NULL;
822 if (entry->gpe_created) {
823 LIST_REMOVE(entry, gpe_entry);
826 entry->gpe_modified = 0;
827 entry->gpe_deleted = 1;
831 g_wither_provider(pp, ENXIO);
833 /* Provide feedback if so requested. */
834 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
835 sb = sbuf_new_auto();
836 G_PART_FULLNAME(table, entry, sb, gp->name);
837 sbuf_cat(sb, " deleted\n");
839 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
846 g_part_ctl_destroy(struct gctl_req *req, struct g_part_parms *gpp)
849 struct g_part_entry *entry;
850 struct g_part_table *null, *table;
855 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
859 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
860 if (entry->gpe_deleted || entry->gpe_internal)
862 gctl_error(req, "%d", EBUSY);
866 error = G_PART_DESTROY(table, gpp);
868 gctl_error(req, "%d", error);
872 gp->softc = kobj_create((kobj_class_t)&g_part_null_scheme, M_GEOM,
876 null->gpt_scheme = &g_part_null_scheme;
877 LIST_INIT(&null->gpt_entry);
878 null->gpt_depth = table->gpt_depth;
879 null->gpt_opened = table->gpt_opened;
880 null->gpt_smhead = table->gpt_smhead;
881 null->gpt_smtail = table->gpt_smtail;
883 while ((entry = LIST_FIRST(&table->gpt_entry)) != NULL) {
884 LIST_REMOVE(entry, gpe_entry);
887 kobj_delete((kobj_t)table, M_GEOM);
889 /* Provide feedback if so requested. */
890 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
891 sb = sbuf_new_auto();
892 sbuf_printf(sb, "%s destroyed\n", gp->name);
894 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
901 g_part_ctl_modify(struct gctl_req *req, struct g_part_parms *gpp)
904 struct g_part_entry *entry;
905 struct g_part_table *table;
910 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
915 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
916 if (entry->gpe_deleted || entry->gpe_internal)
918 if (entry->gpe_index == gpp->gpp_index)
922 gctl_error(req, "%d index '%d'", ENOENT, gpp->gpp_index);
926 error = G_PART_MODIFY(table, entry, gpp);
928 gctl_error(req, "%d", error);
932 if (!entry->gpe_created)
933 entry->gpe_modified = 1;
935 /* Provide feedback if so requested. */
936 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
937 sb = sbuf_new_auto();
938 G_PART_FULLNAME(table, entry, sb, gp->name);
939 sbuf_cat(sb, " modified\n");
941 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
948 g_part_ctl_move(struct gctl_req *req, struct g_part_parms *gpp)
950 gctl_error(req, "%d verb 'move'", ENOSYS);
955 g_part_ctl_recover(struct gctl_req *req, struct g_part_parms *gpp)
957 gctl_error(req, "%d verb 'recover'", ENOSYS);
962 g_part_ctl_resize(struct gctl_req *req, struct g_part_parms *gpp)
964 gctl_error(req, "%d verb 'resize'", ENOSYS);
969 g_part_ctl_setunset(struct gctl_req *req, struct g_part_parms *gpp,
973 struct g_part_entry *entry;
974 struct g_part_table *table;
979 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
984 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
985 if (entry->gpe_deleted || entry->gpe_internal)
987 if (entry->gpe_index == gpp->gpp_index)
991 gctl_error(req, "%d index '%d'", ENOENT, gpp->gpp_index);
995 error = G_PART_SETUNSET(table, entry, gpp->gpp_attrib, set);
997 gctl_error(req, "%d attrib '%s'", error, gpp->gpp_attrib);
1001 /* Provide feedback if so requested. */
1002 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
1003 sb = sbuf_new_auto();
1004 G_PART_FULLNAME(table, entry, sb, gp->name);
1005 sbuf_printf(sb, " has %s %sset\n", gpp->gpp_attrib,
1008 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
1015 g_part_ctl_undo(struct gctl_req *req, struct g_part_parms *gpp)
1017 struct g_consumer *cp;
1018 struct g_provider *pp;
1020 struct g_part_entry *entry, *tmp;
1021 struct g_part_table *table;
1025 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
1026 g_topology_assert();
1029 if (!table->gpt_opened) {
1030 gctl_error(req, "%d", EPERM);
1034 cp = LIST_FIRST(&gp->consumer);
1035 LIST_FOREACH_SAFE(entry, &table->gpt_entry, gpe_entry, tmp) {
1036 entry->gpe_modified = 0;
1037 if (entry->gpe_created) {
1041 entry->gpe_pp = NULL;
1042 g_wither_provider(pp, ENXIO);
1044 entry->gpe_deleted = 1;
1046 if (entry->gpe_deleted) {
1047 LIST_REMOVE(entry, gpe_entry);
1052 g_topology_unlock();
1054 reprobe = (table->gpt_scheme == &g_part_null_scheme ||
1055 table->gpt_created) ? 1 : 0;
1058 if (!LIST_EMPTY(&table->gpt_entry)) {
1062 error = g_part_probe(gp, cp, table->gpt_depth);
1065 g_access(cp, -1, -1, -1);
1066 g_part_wither(gp, error);
1072 * Synthesize a disk geometry. Some partitioning schemes
1073 * depend on it and since some file systems need it even
1074 * when the partitition scheme doesn't, we do it here in
1075 * scheme-independent code.
1078 g_part_geometry(table, cp, pp->mediasize / pp->sectorsize);
1081 error = G_PART_READ(table, cp);
1087 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
1088 if (!entry->gpe_internal)
1089 g_part_new_provider(gp, table, entry);
1092 table->gpt_opened = 0;
1093 g_access(cp, -1, -1, -1);
1098 gctl_error(req, "%d", error);
1103 g_part_wither(struct g_geom *gp, int error)
1105 struct g_part_entry *entry;
1106 struct g_part_table *table;
1109 if (table != NULL) {
1110 while ((entry = LIST_FIRST(&table->gpt_entry)) != NULL) {
1111 LIST_REMOVE(entry, gpe_entry);
1114 if (gp->softc != NULL) {
1115 kobj_delete((kobj_t)gp->softc, M_GEOM);
1119 g_wither_geom(gp, error);
1127 g_part_ctlreq(struct gctl_req *req, struct g_class *mp, const char *verb)
1129 struct g_part_parms gpp;
1130 struct g_part_table *table;
1131 struct gctl_req_arg *ap;
1133 enum g_part_ctl ctlreq;
1134 unsigned int i, mparms, oparms, parm;
1135 int auto_commit, close_on_error;
1136 int error, len, modifies;
1138 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, verb));
1139 g_topology_assert();
1141 ctlreq = G_PART_CTL_NONE;
1144 oparms = G_PART_PARM_FLAGS | G_PART_PARM_OUTPUT | G_PART_PARM_VERSION;
1147 if (!strcmp(verb, "add")) {
1148 ctlreq = G_PART_CTL_ADD;
1149 mparms |= G_PART_PARM_GEOM | G_PART_PARM_SIZE |
1150 G_PART_PARM_START | G_PART_PARM_TYPE;
1151 oparms |= G_PART_PARM_INDEX | G_PART_PARM_LABEL;
1155 if (!strcmp(verb, "bootcode")) {
1156 ctlreq = G_PART_CTL_BOOTCODE;
1157 mparms |= G_PART_PARM_GEOM | G_PART_PARM_BOOTCODE;
1161 if (!strcmp(verb, "commit")) {
1162 ctlreq = G_PART_CTL_COMMIT;
1163 mparms |= G_PART_PARM_GEOM;
1165 } else if (!strcmp(verb, "create")) {
1166 ctlreq = G_PART_CTL_CREATE;
1167 mparms |= G_PART_PARM_PROVIDER | G_PART_PARM_SCHEME;
1168 oparms |= G_PART_PARM_ENTRIES;
1172 if (!strcmp(verb, "delete")) {
1173 ctlreq = G_PART_CTL_DELETE;
1174 mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX;
1175 } else if (!strcmp(verb, "destroy")) {
1176 ctlreq = G_PART_CTL_DESTROY;
1177 mparms |= G_PART_PARM_GEOM;
1181 if (!strcmp(verb, "modify")) {
1182 ctlreq = G_PART_CTL_MODIFY;
1183 mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX;
1184 oparms |= G_PART_PARM_LABEL | G_PART_PARM_TYPE;
1185 } else if (!strcmp(verb, "move")) {
1186 ctlreq = G_PART_CTL_MOVE;
1187 mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX;
1191 if (!strcmp(verb, "recover")) {
1192 ctlreq = G_PART_CTL_RECOVER;
1193 mparms |= G_PART_PARM_GEOM;
1194 } else if (!strcmp(verb, "resize")) {
1195 ctlreq = G_PART_CTL_RESIZE;
1196 mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX;
1200 if (!strcmp(verb, "set")) {
1201 ctlreq = G_PART_CTL_SET;
1202 mparms |= G_PART_PARM_ATTRIB | G_PART_PARM_GEOM |
1207 if (!strcmp(verb, "undo")) {
1208 ctlreq = G_PART_CTL_UNDO;
1209 mparms |= G_PART_PARM_GEOM;
1211 } else if (!strcmp(verb, "unset")) {
1212 ctlreq = G_PART_CTL_UNSET;
1213 mparms |= G_PART_PARM_ATTRIB | G_PART_PARM_GEOM |
1218 if (ctlreq == G_PART_CTL_NONE) {
1219 gctl_error(req, "%d verb '%s'", EINVAL, verb);
1223 bzero(&gpp, sizeof(gpp));
1224 for (i = 0; i < req->narg; i++) {
1227 switch (ap->name[0]) {
1229 if (!strcmp(ap->name, "attrib"))
1230 parm = G_PART_PARM_ATTRIB;
1233 if (!strcmp(ap->name, "bootcode"))
1234 parm = G_PART_PARM_BOOTCODE;
1237 if (!strcmp(ap->name, "class"))
1241 if (!strcmp(ap->name, "entries"))
1242 parm = G_PART_PARM_ENTRIES;
1245 if (!strcmp(ap->name, "flags"))
1246 parm = G_PART_PARM_FLAGS;
1249 if (!strcmp(ap->name, "geom"))
1250 parm = G_PART_PARM_GEOM;
1253 if (!strcmp(ap->name, "index"))
1254 parm = G_PART_PARM_INDEX;
1257 if (!strcmp(ap->name, "label"))
1258 parm = G_PART_PARM_LABEL;
1261 if (!strcmp(ap->name, "output"))
1262 parm = G_PART_PARM_OUTPUT;
1265 if (!strcmp(ap->name, "provider"))
1266 parm = G_PART_PARM_PROVIDER;
1269 if (!strcmp(ap->name, "scheme"))
1270 parm = G_PART_PARM_SCHEME;
1271 else if (!strcmp(ap->name, "size"))
1272 parm = G_PART_PARM_SIZE;
1273 else if (!strcmp(ap->name, "start"))
1274 parm = G_PART_PARM_START;
1277 if (!strcmp(ap->name, "type"))
1278 parm = G_PART_PARM_TYPE;
1281 if (!strcmp(ap->name, "verb"))
1283 else if (!strcmp(ap->name, "version"))
1284 parm = G_PART_PARM_VERSION;
1287 if ((parm & (mparms | oparms)) == 0) {
1288 gctl_error(req, "%d param '%s'", EINVAL, ap->name);
1291 if (parm == G_PART_PARM_BOOTCODE)
1292 p = gctl_get_param(req, ap->name, &len);
1294 p = gctl_get_asciiparam(req, ap->name);
1296 gctl_error(req, "%d param '%s'", ENOATTR, ap->name);
1300 case G_PART_PARM_ATTRIB:
1301 error = g_part_parm_str(p, &gpp.gpp_attrib);
1303 case G_PART_PARM_BOOTCODE:
1304 gpp.gpp_codeptr = p;
1305 gpp.gpp_codesize = len;
1308 case G_PART_PARM_ENTRIES:
1309 error = g_part_parm_uint(p, &gpp.gpp_entries);
1311 case G_PART_PARM_FLAGS:
1314 error = g_part_parm_str(p, &gpp.gpp_flags);
1316 case G_PART_PARM_GEOM:
1317 error = g_part_parm_geom(p, &gpp.gpp_geom);
1319 case G_PART_PARM_INDEX:
1320 error = g_part_parm_uint(p, &gpp.gpp_index);
1322 case G_PART_PARM_LABEL:
1323 /* An empty label is always valid. */
1327 case G_PART_PARM_OUTPUT:
1328 error = 0; /* Write-only parameter */
1330 case G_PART_PARM_PROVIDER:
1331 error = g_part_parm_provider(p, &gpp.gpp_provider);
1333 case G_PART_PARM_SCHEME:
1334 error = g_part_parm_scheme(p, &gpp.gpp_scheme);
1336 case G_PART_PARM_SIZE:
1337 error = g_part_parm_quad(p, &gpp.gpp_size);
1339 case G_PART_PARM_START:
1340 error = g_part_parm_quad(p, &gpp.gpp_start);
1342 case G_PART_PARM_TYPE:
1343 error = g_part_parm_str(p, &gpp.gpp_type);
1345 case G_PART_PARM_VERSION:
1346 error = g_part_parm_uint(p, &gpp.gpp_version);
1353 gctl_error(req, "%d %s '%s'", error, ap->name, p);
1356 gpp.gpp_parms |= parm;
1358 if ((gpp.gpp_parms & mparms) != mparms) {
1359 parm = mparms - (gpp.gpp_parms & mparms);
1360 gctl_error(req, "%d param '%x'", ENOATTR, parm);
1364 /* Obtain permissions if possible/necessary. */
1367 if (modifies && (gpp.gpp_parms & G_PART_PARM_GEOM)) {
1368 table = gpp.gpp_geom->softc;
1369 if (table != NULL && !table->gpt_opened) {
1370 error = g_access(LIST_FIRST(&gpp.gpp_geom->consumer),
1373 gctl_error(req, "%d geom '%s'", error,
1374 gpp.gpp_geom->name);
1377 table->gpt_opened = 1;
1382 /* Allow the scheme to check or modify the parameters. */
1383 if (table != NULL) {
1384 error = G_PART_PRECHECK(table, ctlreq, &gpp);
1386 gctl_error(req, "%d pre-check failed", error);
1390 error = EDOOFUS; /* Prevent bogus uninit. warning. */
1393 case G_PART_CTL_NONE:
1394 panic("%s", __func__);
1395 case G_PART_CTL_ADD:
1396 error = g_part_ctl_add(req, &gpp);
1398 case G_PART_CTL_BOOTCODE:
1399 error = g_part_ctl_bootcode(req, &gpp);
1401 case G_PART_CTL_COMMIT:
1402 error = g_part_ctl_commit(req, &gpp);
1404 case G_PART_CTL_CREATE:
1405 error = g_part_ctl_create(req, &gpp);
1407 case G_PART_CTL_DELETE:
1408 error = g_part_ctl_delete(req, &gpp);
1410 case G_PART_CTL_DESTROY:
1411 error = g_part_ctl_destroy(req, &gpp);
1413 case G_PART_CTL_MODIFY:
1414 error = g_part_ctl_modify(req, &gpp);
1416 case G_PART_CTL_MOVE:
1417 error = g_part_ctl_move(req, &gpp);
1419 case G_PART_CTL_RECOVER:
1420 error = g_part_ctl_recover(req, &gpp);
1422 case G_PART_CTL_RESIZE:
1423 error = g_part_ctl_resize(req, &gpp);
1425 case G_PART_CTL_SET:
1426 error = g_part_ctl_setunset(req, &gpp, 1);
1428 case G_PART_CTL_UNDO:
1429 error = g_part_ctl_undo(req, &gpp);
1431 case G_PART_CTL_UNSET:
1432 error = g_part_ctl_setunset(req, &gpp, 0);
1436 /* Implement automatic commit. */
1438 auto_commit = (modifies &&
1439 (gpp.gpp_parms & G_PART_PARM_FLAGS) &&
1440 strchr(gpp.gpp_flags, 'C') != NULL) ? 1 : 0;
1442 KASSERT(gpp.gpp_parms & G_PART_PARM_GEOM, (__func__));
1443 error = g_part_ctl_commit(req, &gpp);
1448 if (error && close_on_error) {
1449 g_access(LIST_FIRST(&gpp.gpp_geom->consumer), -1, -1, -1);
1450 table->gpt_opened = 0;
1455 g_part_destroy_geom(struct gctl_req *req, struct g_class *mp,
1459 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, gp->name));
1460 g_topology_assert();
1462 g_part_wither(gp, EINVAL);
1466 static struct g_geom *
1467 g_part_taste(struct g_class *mp, struct g_provider *pp, int flags __unused)
1469 struct g_consumer *cp;
1471 struct g_part_entry *entry;
1472 struct g_part_table *table;
1473 struct root_hold_token *rht;
1477 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, pp->name));
1478 g_topology_assert();
1480 /* Skip providers that are already open for writing. */
1485 * Create a GEOM with consumer and hook it up to the provider.
1486 * With that we become part of the topology. Optain read access
1489 gp = g_new_geomf(mp, "%s", pp->name);
1490 cp = g_new_consumer(gp);
1491 error = g_attach(cp, pp);
1493 error = g_access(cp, 1, 0, 0);
1495 g_part_wither(gp, error);
1499 rht = root_mount_hold(mp->name);
1500 g_topology_unlock();
1503 * Short-circuit the whole probing galore when there's no
1506 if (pp->mediasize == 0 || pp->sectorsize == 0) {
1511 /* Make sure we can nest and if so, determine our depth. */
1512 error = g_getattr("PART::isleaf", cp, &attr);
1513 if (!error && attr) {
1517 error = g_getattr("PART::depth", cp, &attr);
1518 depth = (!error) ? attr + 1 : 0;
1520 error = g_part_probe(gp, cp, depth);
1527 * Synthesize a disk geometry. Some partitioning schemes
1528 * depend on it and since some file systems need it even
1529 * when the partitition scheme doesn't, we do it here in
1530 * scheme-independent code.
1532 g_part_geometry(table, cp, pp->mediasize / pp->sectorsize);
1534 error = G_PART_READ(table, cp);
1539 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
1540 if (!entry->gpe_internal)
1541 g_part_new_provider(gp, table, entry);
1544 root_mount_rel(rht);
1545 g_access(cp, -1, 0, 0);
1550 root_mount_rel(rht);
1551 g_access(cp, -1, 0, 0);
1552 g_part_wither(gp, error);
1561 g_part_access(struct g_provider *pp, int dr, int dw, int de)
1563 struct g_consumer *cp;
1565 G_PART_TRACE((G_T_ACCESS, "%s(%s,%d,%d,%d)", __func__, pp->name, dr,
1568 cp = LIST_FIRST(&pp->geom->consumer);
1570 /* We always gain write-exclusive access. */
1571 return (g_access(cp, dr, dw, dw + de));
1575 g_part_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp,
1576 struct g_consumer *cp, struct g_provider *pp)
1579 struct g_part_entry *entry;
1580 struct g_part_table *table;
1582 KASSERT(sb != NULL && gp != NULL, (__func__));
1585 if (indent == NULL) {
1586 KASSERT(cp == NULL && pp != NULL, (__func__));
1587 entry = pp->private;
1590 sbuf_printf(sb, " i %u o %ju ty %s", entry->gpe_index,
1591 (uintmax_t)entry->gpe_offset,
1592 G_PART_TYPE(table, entry, buf, sizeof(buf)));
1594 * libdisk compatibility quirk - the scheme dumps the
1595 * slicer name and partition type in a way that is
1596 * compatible with libdisk. When libdisk is not used
1597 * anymore, this should go away.
1599 G_PART_DUMPCONF(table, entry, sb, indent);
1600 } else if (cp != NULL) { /* Consumer configuration. */
1601 KASSERT(pp == NULL, (__func__));
1603 } else if (pp != NULL) { /* Provider configuration. */
1604 entry = pp->private;
1607 sbuf_printf(sb, "%s<start>%ju</start>\n", indent,
1608 (uintmax_t)entry->gpe_start);
1609 sbuf_printf(sb, "%s<end>%ju</end>\n", indent,
1610 (uintmax_t)entry->gpe_end);
1611 sbuf_printf(sb, "%s<index>%u</index>\n", indent,
1613 sbuf_printf(sb, "%s<type>%s</type>\n", indent,
1614 G_PART_TYPE(table, entry, buf, sizeof(buf)));
1615 sbuf_printf(sb, "%s<offset>%ju</offset>\n", indent,
1616 (uintmax_t)entry->gpe_offset);
1617 sbuf_printf(sb, "%s<length>%ju</length>\n", indent,
1618 (uintmax_t)pp->mediasize);
1619 G_PART_DUMPCONF(table, entry, sb, indent);
1620 } else { /* Geom configuration. */
1621 sbuf_printf(sb, "%s<scheme>%s</scheme>\n", indent,
1622 table->gpt_scheme->name);
1623 sbuf_printf(sb, "%s<entries>%u</entries>\n", indent,
1624 table->gpt_entries);
1625 sbuf_printf(sb, "%s<first>%ju</first>\n", indent,
1626 (uintmax_t)table->gpt_first);
1627 sbuf_printf(sb, "%s<last>%ju</last>\n", indent,
1628 (uintmax_t)table->gpt_last);
1629 sbuf_printf(sb, "%s<fwsectors>%u</fwsectors>\n", indent,
1630 table->gpt_sectors);
1631 sbuf_printf(sb, "%s<fwheads>%u</fwheads>\n", indent,
1633 G_PART_DUMPCONF(table, NULL, sb, indent);
1638 g_part_orphan(struct g_consumer *cp)
1640 struct g_provider *pp;
1643 KASSERT(pp != NULL, (__func__));
1644 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, pp->name));
1645 g_topology_assert();
1647 KASSERT(pp->error != 0, (__func__));
1648 g_part_wither(cp->geom, pp->error);
1652 g_part_spoiled(struct g_consumer *cp)
1655 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, cp->provider->name));
1656 g_topology_assert();
1658 g_part_wither(cp->geom, ENXIO);
1662 g_part_start(struct bio *bp)
1665 struct g_consumer *cp;
1667 struct g_part_entry *entry;
1668 struct g_part_table *table;
1669 struct g_kerneldump *gkd;
1670 struct g_provider *pp;
1675 cp = LIST_FIRST(&gp->consumer);
1677 G_PART_TRACE((G_T_BIO, "%s: cmd=%d, provider=%s", __func__, bp->bio_cmd,
1680 entry = pp->private;
1681 if (entry == NULL) {
1682 g_io_deliver(bp, ENXIO);
1686 switch(bp->bio_cmd) {
1690 if (bp->bio_offset >= pp->mediasize) {
1691 g_io_deliver(bp, EIO);
1694 bp2 = g_clone_bio(bp);
1696 g_io_deliver(bp, ENOMEM);
1699 if (bp2->bio_offset + bp2->bio_length > pp->mediasize)
1700 bp2->bio_length = pp->mediasize - bp2->bio_offset;
1701 bp2->bio_done = g_std_done;
1702 bp2->bio_offset += entry->gpe_offset;
1703 g_io_request(bp2, cp);
1708 if (g_handleattr_int(bp, "GEOM::fwheads", table->gpt_heads))
1710 if (g_handleattr_int(bp, "GEOM::fwsectors", table->gpt_sectors))
1712 if (g_handleattr_int(bp, "PART::isleaf", table->gpt_isleaf))
1714 if (g_handleattr_int(bp, "PART::depth", table->gpt_depth))
1716 if (g_handleattr_str(bp, "PART::scheme",
1717 table->gpt_scheme->name))
1719 if (!strcmp("GEOM::kerneldump", bp->bio_attribute)) {
1721 * Check that the partition is suitable for kernel
1722 * dumps. Typically only swap partitions should be
1725 if (!G_PART_DUMPTO(table, entry)) {
1726 g_io_deliver(bp, ENODEV);
1727 printf("GEOM_PART: Partition '%s' not suitable"
1728 " for kernel dumps (wrong type?)\n",
1732 gkd = (struct g_kerneldump *)bp->bio_data;
1733 if (gkd->offset >= pp->mediasize) {
1734 g_io_deliver(bp, EIO);
1737 if (gkd->offset + gkd->length > pp->mediasize)
1738 gkd->length = pp->mediasize - gkd->offset;
1739 gkd->offset += entry->gpe_offset;
1743 g_io_deliver(bp, EOPNOTSUPP);
1747 bp2 = g_clone_bio(bp);
1749 g_io_deliver(bp, ENOMEM);
1752 bp2->bio_done = g_std_done;
1753 g_io_request(bp2, cp);
1757 g_part_init(struct g_class *mp)
1760 TAILQ_INSERT_HEAD(&g_part_schemes, &g_part_null_scheme, scheme_list);
1764 g_part_fini(struct g_class *mp)
1767 TAILQ_REMOVE(&g_part_schemes, &g_part_null_scheme, scheme_list);
1771 g_part_unload_event(void *arg, int flag)
1773 struct g_consumer *cp;
1775 struct g_provider *pp;
1776 struct g_part_scheme *scheme;
1777 struct g_part_table *table;
1781 if (flag == EV_CANCEL)
1786 scheme = (void *)(*xchg);
1788 g_topology_assert();
1790 LIST_FOREACH(gp, &g_part_class.geom, geom) {
1792 if (table->gpt_scheme != scheme)
1796 LIST_FOREACH(pp, &gp->provider, provider)
1797 acc += pp->acr + pp->acw + pp->ace;
1798 LIST_FOREACH(cp, &gp->consumer, consumer)
1799 acc += cp->acr + cp->acw + cp->ace;
1802 g_part_wither(gp, ENOSYS);
1808 TAILQ_REMOVE(&g_part_schemes, scheme, scheme_list);
1814 g_part_modevent(module_t mod, int type, struct g_part_scheme *scheme)
1821 TAILQ_INSERT_TAIL(&g_part_schemes, scheme, scheme_list);
1823 error = g_retaste(&g_part_class);
1825 TAILQ_REMOVE(&g_part_schemes, scheme, scheme_list);
1828 arg = (uintptr_t)scheme;
1829 error = g_waitfor_event(g_part_unload_event, &arg, M_WAITOK,
1832 error = (arg == (uintptr_t)scheme) ? EDOOFUS : arg;