2 * Copyright (c) 2002, 2005-2009 Marcel Moolenaar
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
30 #include <sys/param.h>
32 #include <sys/diskmbr.h>
33 #include <sys/endian.h>
34 #include <sys/kernel.h>
36 #include <sys/limits.h>
38 #include <sys/malloc.h>
39 #include <sys/mutex.h>
40 #include <sys/queue.h>
42 #include <sys/sysctl.h>
43 #include <sys/systm.h>
45 #include <geom/geom.h>
46 #include <geom/geom_ctl.h>
47 #include <geom/geom_int.h>
48 #include <geom/part/g_part.h>
50 #include "g_part_if.h"
53 #define _PATH_DEV "/dev/"
56 static kobj_method_t g_part_null_methods[] = {
60 static struct g_part_scheme g_part_null_scheme = {
63 sizeof(struct g_part_table),
66 TAILQ_HEAD(, g_part_scheme) g_part_schemes =
67 TAILQ_HEAD_INITIALIZER(g_part_schemes);
69 struct g_part_alias_list {
71 enum g_part_alias alias;
72 } g_part_alias_list[G_PART_ALIAS_COUNT] = {
73 { "apple-boot", G_PART_ALIAS_APPLE_BOOT },
74 { "apple-hfs", G_PART_ALIAS_APPLE_HFS },
75 { "apple-label", G_PART_ALIAS_APPLE_LABEL },
76 { "apple-raid", G_PART_ALIAS_APPLE_RAID },
77 { "apple-raid-offline", G_PART_ALIAS_APPLE_RAID_OFFLINE },
78 { "apple-tv-recovery", G_PART_ALIAS_APPLE_TV_RECOVERY },
79 { "apple-ufs", G_PART_ALIAS_APPLE_UFS },
80 { "bios-boot", G_PART_ALIAS_BIOS_BOOT },
81 { "ebr", G_PART_ALIAS_EBR },
82 { "efi", G_PART_ALIAS_EFI },
83 { "fat16", G_PART_ALIAS_MS_FAT16 },
84 { "fat32", G_PART_ALIAS_MS_FAT32 },
85 { "freebsd", G_PART_ALIAS_FREEBSD },
86 { "freebsd-boot", G_PART_ALIAS_FREEBSD_BOOT },
87 { "freebsd-nandfs", G_PART_ALIAS_FREEBSD_NANDFS },
88 { "freebsd-swap", G_PART_ALIAS_FREEBSD_SWAP },
89 { "freebsd-ufs", G_PART_ALIAS_FREEBSD_UFS },
90 { "freebsd-vinum", G_PART_ALIAS_FREEBSD_VINUM },
91 { "freebsd-zfs", G_PART_ALIAS_FREEBSD_ZFS },
92 { "linux-data", G_PART_ALIAS_LINUX_DATA },
93 { "linux-lvm", G_PART_ALIAS_LINUX_LVM },
94 { "linux-raid", G_PART_ALIAS_LINUX_RAID },
95 { "linux-swap", G_PART_ALIAS_LINUX_SWAP },
96 { "mbr", G_PART_ALIAS_MBR },
97 { "ms-basic-data", G_PART_ALIAS_MS_BASIC_DATA },
98 { "ms-ldm-data", G_PART_ALIAS_MS_LDM_DATA },
99 { "ms-ldm-metadata", G_PART_ALIAS_MS_LDM_METADATA },
100 { "ms-reserved", G_PART_ALIAS_MS_RESERVED },
101 { "ntfs", G_PART_ALIAS_MS_NTFS },
102 { "netbsd-ccd", G_PART_ALIAS_NETBSD_CCD },
103 { "netbsd-cgd", G_PART_ALIAS_NETBSD_CGD },
104 { "netbsd-ffs", G_PART_ALIAS_NETBSD_FFS },
105 { "netbsd-lfs", G_PART_ALIAS_NETBSD_LFS },
106 { "netbsd-raid", G_PART_ALIAS_NETBSD_RAID },
107 { "netbsd-swap", G_PART_ALIAS_NETBSD_SWAP },
108 { "vmware-vmfs", G_PART_ALIAS_VMFS },
109 { "vmware-vmkdiag", G_PART_ALIAS_VMKDIAG },
110 { "vmware-reserved", G_PART_ALIAS_VMRESERVED },
113 SYSCTL_DECL(_kern_geom);
114 SYSCTL_NODE(_kern_geom, OID_AUTO, part, CTLFLAG_RW, 0,
116 static u_int check_integrity = 1;
117 TUNABLE_INT("kern.geom.part.check_integrity", &check_integrity);
118 SYSCTL_UINT(_kern_geom_part, OID_AUTO, check_integrity,
119 CTLFLAG_RW | CTLFLAG_TUN, &check_integrity, 1,
120 "Enable integrity checking");
123 * The GEOM partitioning class.
125 static g_ctl_req_t g_part_ctlreq;
126 static g_ctl_destroy_geom_t g_part_destroy_geom;
127 static g_fini_t g_part_fini;
128 static g_init_t g_part_init;
129 static g_taste_t g_part_taste;
131 static g_access_t g_part_access;
132 static g_dumpconf_t g_part_dumpconf;
133 static g_orphan_t g_part_orphan;
134 static g_spoiled_t g_part_spoiled;
135 static g_start_t g_part_start;
137 static struct g_class g_part_class = {
139 .version = G_VERSION,
141 .ctlreq = g_part_ctlreq,
142 .destroy_geom = g_part_destroy_geom,
145 .taste = g_part_taste,
147 .access = g_part_access,
148 .dumpconf = g_part_dumpconf,
149 .orphan = g_part_orphan,
150 .spoiled = g_part_spoiled,
151 .start = g_part_start,
154 DECLARE_GEOM_CLASS(g_part_class, g_part);
155 MODULE_VERSION(g_part, 0);
161 static void g_part_wither(struct g_geom *, int);
164 g_part_alias_name(enum g_part_alias alias)
168 for (i = 0; i < G_PART_ALIAS_COUNT; i++) {
169 if (g_part_alias_list[i].alias != alias)
171 return (g_part_alias_list[i].lexeme);
178 g_part_geometry_heads(off_t blocks, u_int sectors, off_t *bestchs,
181 static u_int candidate_heads[] = { 1, 2, 16, 32, 64, 128, 255, 0 };
182 off_t chs, cylinders;
188 for (idx = 0; candidate_heads[idx] != 0; idx++) {
189 heads = candidate_heads[idx];
190 cylinders = blocks / heads / sectors;
191 if (cylinders < heads || cylinders < sectors)
193 if (cylinders > 1023)
195 chs = cylinders * heads * sectors;
196 if (chs > *bestchs || (chs == *bestchs && *bestheads == 1)) {
204 g_part_geometry(struct g_part_table *table, struct g_consumer *cp,
207 static u_int candidate_sectors[] = { 1, 9, 17, 33, 63, 0 };
209 u_int heads, sectors;
212 if (g_getattr("GEOM::fwsectors", cp, §ors) != 0 || sectors == 0 ||
213 g_getattr("GEOM::fwheads", cp, &heads) != 0 || heads == 0) {
214 table->gpt_fixgeom = 0;
215 table->gpt_heads = 0;
216 table->gpt_sectors = 0;
218 for (idx = 0; candidate_sectors[idx] != 0; idx++) {
219 sectors = candidate_sectors[idx];
220 g_part_geometry_heads(blocks, sectors, &chs, &heads);
224 * Prefer a geometry with sectors > 1, but only if
225 * it doesn't bump down the number of heads to 1.
227 if (chs > bestchs || (chs == bestchs && heads > 1 &&
228 table->gpt_sectors == 1)) {
230 table->gpt_heads = heads;
231 table->gpt_sectors = sectors;
235 * If we didn't find a geometry at all, then the disk is
236 * too big. This means we can use the maximum number of
240 table->gpt_heads = 255;
241 table->gpt_sectors = 63;
244 table->gpt_fixgeom = 1;
245 table->gpt_heads = heads;
246 table->gpt_sectors = sectors;
250 #define DPRINTF(...) if (bootverbose) { \
251 printf("GEOM_PART: " __VA_ARGS__); \
255 g_part_check_integrity(struct g_part_table *table, struct g_consumer *cp)
257 struct g_part_entry *e1, *e2;
258 struct g_provider *pp;
264 if (table->gpt_last < table->gpt_first) {
265 DPRINTF("last LBA is below first LBA: %jd < %jd\n",
266 (intmax_t)table->gpt_last, (intmax_t)table->gpt_first);
269 if (table->gpt_last > pp->mediasize / pp->sectorsize - 1) {
270 DPRINTF("last LBA extends beyond mediasize: "
271 "%jd > %jd\n", (intmax_t)table->gpt_last,
272 (intmax_t)pp->mediasize / pp->sectorsize - 1);
275 LIST_FOREACH(e1, &table->gpt_entry, gpe_entry) {
276 if (e1->gpe_deleted || e1->gpe_internal)
278 if (e1->gpe_start < table->gpt_first) {
279 DPRINTF("partition %d has start offset below first "
280 "LBA: %jd < %jd\n", e1->gpe_index,
281 (intmax_t)e1->gpe_start,
282 (intmax_t)table->gpt_first);
285 if (e1->gpe_start > table->gpt_last) {
286 DPRINTF("partition %d has start offset beyond last "
287 "LBA: %jd > %jd\n", e1->gpe_index,
288 (intmax_t)e1->gpe_start,
289 (intmax_t)table->gpt_last);
292 if (e1->gpe_end < e1->gpe_start) {
293 DPRINTF("partition %d has end offset below start "
294 "offset: %jd < %jd\n", e1->gpe_index,
295 (intmax_t)e1->gpe_end,
296 (intmax_t)e1->gpe_start);
299 if (e1->gpe_end > table->gpt_last) {
300 DPRINTF("partition %d has end offset beyond last "
301 "LBA: %jd > %jd\n", e1->gpe_index,
302 (intmax_t)e1->gpe_end,
303 (intmax_t)table->gpt_last);
306 if (pp->stripesize > 0) {
307 offset = e1->gpe_start * pp->sectorsize;
308 if (e1->gpe_offset > offset)
309 offset = e1->gpe_offset;
310 if ((offset + pp->stripeoffset) % pp->stripesize) {
311 DPRINTF("partition %d is not aligned on %u "
312 "bytes\n", e1->gpe_index, pp->stripesize);
313 /* Don't treat this as a critical failure */
317 while ((e2 = LIST_NEXT(e2, gpe_entry)) != NULL) {
318 if (e2->gpe_deleted || e2->gpe_internal)
320 if (e1->gpe_start >= e2->gpe_start &&
321 e1->gpe_start <= e2->gpe_end) {
322 DPRINTF("partition %d has start offset inside "
323 "partition %d: start[%d] %jd >= start[%d] "
324 "%jd <= end[%d] %jd\n",
325 e1->gpe_index, e2->gpe_index,
326 e2->gpe_index, (intmax_t)e2->gpe_start,
327 e1->gpe_index, (intmax_t)e1->gpe_start,
328 e2->gpe_index, (intmax_t)e2->gpe_end);
331 if (e1->gpe_end >= e2->gpe_start &&
332 e1->gpe_end <= e2->gpe_end) {
333 DPRINTF("partition %d has end offset inside "
334 "partition %d: start[%d] %jd >= end[%d] "
335 "%jd <= end[%d] %jd\n",
336 e1->gpe_index, e2->gpe_index,
337 e2->gpe_index, (intmax_t)e2->gpe_start,
338 e1->gpe_index, (intmax_t)e1->gpe_end,
339 e2->gpe_index, (intmax_t)e2->gpe_end);
342 if (e1->gpe_start < e2->gpe_start &&
343 e1->gpe_end > e2->gpe_end) {
344 DPRINTF("partition %d contains partition %d: "
345 "start[%d] %jd > start[%d] %jd, end[%d] "
346 "%jd < end[%d] %jd\n",
347 e1->gpe_index, e2->gpe_index,
348 e1->gpe_index, (intmax_t)e1->gpe_start,
349 e2->gpe_index, (intmax_t)e2->gpe_start,
350 e2->gpe_index, (intmax_t)e2->gpe_end,
351 e1->gpe_index, (intmax_t)e1->gpe_end);
357 printf("GEOM_PART: integrity check failed (%s, %s)\n",
358 pp->name, table->gpt_scheme->name);
359 if (check_integrity != 0)
361 table->gpt_corrupt = 1;
367 struct g_part_entry *
368 g_part_new_entry(struct g_part_table *table, int index, quad_t start,
371 struct g_part_entry *entry, *last;
374 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
375 if (entry->gpe_index == index)
377 if (entry->gpe_index > index) {
384 entry = g_malloc(table->gpt_scheme->gps_entrysz,
386 entry->gpe_index = index;
388 LIST_INSERT_HEAD(&table->gpt_entry, entry, gpe_entry);
390 LIST_INSERT_AFTER(last, entry, gpe_entry);
392 entry->gpe_offset = 0;
393 entry->gpe_start = start;
394 entry->gpe_end = end;
399 g_part_new_provider(struct g_geom *gp, struct g_part_table *table,
400 struct g_part_entry *entry)
402 struct g_consumer *cp;
403 struct g_provider *pp;
407 cp = LIST_FIRST(&gp->consumer);
410 offset = entry->gpe_start * pp->sectorsize;
411 if (entry->gpe_offset < offset)
412 entry->gpe_offset = offset;
414 if (entry->gpe_pp == NULL) {
415 sb = sbuf_new_auto();
416 G_PART_FULLNAME(table, entry, sb, gp->name);
418 entry->gpe_pp = g_new_providerf(gp, "%s", sbuf_data(sb));
420 entry->gpe_pp->private = entry; /* Close the circle. */
422 entry->gpe_pp->index = entry->gpe_index - 1; /* index is 1-based. */
423 entry->gpe_pp->mediasize = (entry->gpe_end - entry->gpe_start + 1) *
425 entry->gpe_pp->mediasize -= entry->gpe_offset - offset;
426 entry->gpe_pp->sectorsize = pp->sectorsize;
427 entry->gpe_pp->flags = pp->flags & G_PF_CANDELETE;
428 entry->gpe_pp->stripesize = pp->stripesize;
429 entry->gpe_pp->stripeoffset = pp->stripeoffset + entry->gpe_offset;
430 if (pp->stripesize > 0)
431 entry->gpe_pp->stripeoffset %= pp->stripesize;
432 entry->gpe_pp->flags |= pp->flags & G_PF_ACCEPT_UNMAPPED;
433 g_error_provider(entry->gpe_pp, 0);
436 static struct g_geom*
437 g_part_find_geom(const char *name)
440 LIST_FOREACH(gp, &g_part_class.geom, geom) {
441 if (!strcmp(name, gp->name))
448 g_part_parm_geom(struct gctl_req *req, const char *name, struct g_geom **v)
453 gname = gctl_get_asciiparam(req, name);
456 if (strncmp(gname, _PATH_DEV, sizeof(_PATH_DEV) - 1) == 0)
457 gname += sizeof(_PATH_DEV) - 1;
458 gp = g_part_find_geom(gname);
460 gctl_error(req, "%d %s '%s'", EINVAL, name, gname);
463 if ((gp->flags & G_GEOM_WITHER) != 0) {
464 gctl_error(req, "%d %s", ENXIO, gname);
472 g_part_parm_provider(struct gctl_req *req, const char *name,
473 struct g_provider **v)
475 struct g_provider *pp;
478 pname = gctl_get_asciiparam(req, name);
481 if (strncmp(pname, _PATH_DEV, sizeof(_PATH_DEV) - 1) == 0)
482 pname += sizeof(_PATH_DEV) - 1;
483 pp = g_provider_by_name(pname);
485 gctl_error(req, "%d %s '%s'", EINVAL, name, pname);
493 g_part_parm_quad(struct gctl_req *req, const char *name, quad_t *v)
499 p = gctl_get_asciiparam(req, name);
502 q = strtoq(p, &x, 0);
503 if (*x != '\0' || q < 0) {
504 gctl_error(req, "%d %s '%s'", EINVAL, name, p);
512 g_part_parm_scheme(struct gctl_req *req, const char *name,
513 struct g_part_scheme **v)
515 struct g_part_scheme *s;
518 p = gctl_get_asciiparam(req, name);
521 TAILQ_FOREACH(s, &g_part_schemes, scheme_list) {
522 if (s == &g_part_null_scheme)
524 if (!strcasecmp(s->name, p))
528 gctl_error(req, "%d %s '%s'", EINVAL, name, p);
536 g_part_parm_str(struct gctl_req *req, const char *name, const char **v)
540 p = gctl_get_asciiparam(req, name);
543 /* An empty label is always valid. */
544 if (strcmp(name, "label") != 0 && p[0] == '\0') {
545 gctl_error(req, "%d %s '%s'", EINVAL, name, p);
553 g_part_parm_intmax(struct gctl_req *req, const char *name, u_int *v)
558 p = gctl_get_param(req, name, &size);
561 if (size != sizeof(*p) || *p < 0 || *p > INT_MAX) {
562 gctl_error(req, "%d %s '%jd'", EINVAL, name, *p);
570 g_part_parm_uint32(struct gctl_req *req, const char *name, u_int *v)
575 p = gctl_get_param(req, name, &size);
578 if (size != sizeof(*p) || *p > INT_MAX) {
579 gctl_error(req, "%d %s '%u'", EINVAL, name, (unsigned int)*p);
587 g_part_parm_bootcode(struct gctl_req *req, const char *name, const void **v,
593 p = gctl_get_param(req, name, &size);
602 g_part_probe(struct g_geom *gp, struct g_consumer *cp, int depth)
604 struct g_part_scheme *iter, *scheme;
605 struct g_part_table *table;
609 scheme = (table != NULL) ? table->gpt_scheme : NULL;
610 pri = (scheme != NULL) ? G_PART_PROBE(table, cp) : INT_MIN;
613 if (pri > 0) { /* error */
618 TAILQ_FOREACH(iter, &g_part_schemes, scheme_list) {
619 if (iter == &g_part_null_scheme)
621 table = (void *)kobj_create((kobj_class_t)iter, M_GEOM,
624 table->gpt_scheme = iter;
625 table->gpt_depth = depth;
626 probe = G_PART_PROBE(table, cp);
627 if (probe <= 0 && probe > pri) {
630 if (gp->softc != NULL)
631 kobj_delete((kobj_t)gp->softc, M_GEOM);
636 kobj_delete((kobj_t)table, M_GEOM);
640 return ((scheme == NULL) ? ENXIO : 0);
644 * Control request functions.
648 g_part_ctl_add(struct gctl_req *req, struct g_part_parms *gpp)
651 struct g_provider *pp;
652 struct g_part_entry *delent, *last, *entry;
653 struct g_part_table *table;
660 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
663 pp = LIST_FIRST(&gp->consumer)->provider;
665 end = gpp->gpp_start + gpp->gpp_size - 1;
667 if (gpp->gpp_start < table->gpt_first ||
668 gpp->gpp_start > table->gpt_last) {
669 gctl_error(req, "%d start '%jd'", EINVAL,
670 (intmax_t)gpp->gpp_start);
673 if (end < gpp->gpp_start || end > table->gpt_last) {
674 gctl_error(req, "%d size '%jd'", EINVAL,
675 (intmax_t)gpp->gpp_size);
678 if (gpp->gpp_index > table->gpt_entries) {
679 gctl_error(req, "%d index '%d'", EINVAL, gpp->gpp_index);
683 delent = last = NULL;
684 index = (gpp->gpp_index > 0) ? gpp->gpp_index : 1;
685 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
686 if (entry->gpe_deleted) {
687 if (entry->gpe_index == index)
691 if (entry->gpe_index == index)
692 index = entry->gpe_index + 1;
693 if (entry->gpe_index < index)
695 if (entry->gpe_internal)
697 if (gpp->gpp_start >= entry->gpe_start &&
698 gpp->gpp_start <= entry->gpe_end) {
699 gctl_error(req, "%d start '%jd'", ENOSPC,
700 (intmax_t)gpp->gpp_start);
703 if (end >= entry->gpe_start && end <= entry->gpe_end) {
704 gctl_error(req, "%d end '%jd'", ENOSPC, (intmax_t)end);
707 if (gpp->gpp_start < entry->gpe_start && end > entry->gpe_end) {
708 gctl_error(req, "%d size '%jd'", ENOSPC,
709 (intmax_t)gpp->gpp_size);
713 if (gpp->gpp_index > 0 && index != gpp->gpp_index) {
714 gctl_error(req, "%d index '%d'", EEXIST, gpp->gpp_index);
717 if (index > table->gpt_entries) {
718 gctl_error(req, "%d index '%d'", ENOSPC, index);
722 entry = (delent == NULL) ? g_malloc(table->gpt_scheme->gps_entrysz,
723 M_WAITOK | M_ZERO) : delent;
724 entry->gpe_index = index;
725 entry->gpe_start = gpp->gpp_start;
726 entry->gpe_end = end;
727 error = G_PART_ADD(table, entry, gpp);
729 gctl_error(req, "%d", error);
734 if (delent == NULL) {
736 LIST_INSERT_HEAD(&table->gpt_entry, entry, gpe_entry);
738 LIST_INSERT_AFTER(last, entry, gpe_entry);
739 entry->gpe_created = 1;
741 entry->gpe_deleted = 0;
742 entry->gpe_modified = 1;
744 g_part_new_provider(gp, table, entry);
746 /* Provide feedback if so requested. */
747 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
748 sb = sbuf_new_auto();
749 G_PART_FULLNAME(table, entry, sb, gp->name);
750 if (pp->stripesize > 0 && entry->gpe_pp->stripeoffset != 0)
751 sbuf_printf(sb, " added, but partition is not "
752 "aligned on %u bytes\n", pp->stripesize);
754 sbuf_cat(sb, " added\n");
756 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
763 g_part_ctl_bootcode(struct gctl_req *req, struct g_part_parms *gpp)
766 struct g_part_table *table;
771 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
775 sz = table->gpt_scheme->gps_bootcodesz;
780 if (gpp->gpp_codesize > sz) {
785 error = G_PART_BOOTCODE(table, gpp);
789 /* Provide feedback if so requested. */
790 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
791 sb = sbuf_new_auto();
792 sbuf_printf(sb, "bootcode written to %s\n", gp->name);
794 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
800 gctl_error(req, "%d", error);
805 g_part_ctl_commit(struct gctl_req *req, struct g_part_parms *gpp)
807 struct g_consumer *cp;
809 struct g_provider *pp;
810 struct g_part_entry *entry, *tmp;
811 struct g_part_table *table;
816 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
820 if (!table->gpt_opened) {
821 gctl_error(req, "%d", EPERM);
827 cp = LIST_FIRST(&gp->consumer);
828 if ((table->gpt_smhead | table->gpt_smtail) != 0) {
830 buf = g_malloc(pp->sectorsize, M_WAITOK | M_ZERO);
831 while (table->gpt_smhead != 0) {
832 i = ffs(table->gpt_smhead) - 1;
833 error = g_write_data(cp, i * pp->sectorsize, buf,
839 table->gpt_smhead &= ~(1 << i);
841 while (table->gpt_smtail != 0) {
842 i = ffs(table->gpt_smtail) - 1;
843 error = g_write_data(cp, pp->mediasize - (i + 1) *
844 pp->sectorsize, buf, pp->sectorsize);
849 table->gpt_smtail &= ~(1 << i);
854 if (table->gpt_scheme == &g_part_null_scheme) {
856 g_access(cp, -1, -1, -1);
857 g_part_wither(gp, ENXIO);
861 error = G_PART_WRITE(table, cp);
865 LIST_FOREACH_SAFE(entry, &table->gpt_entry, gpe_entry, tmp) {
866 if (!entry->gpe_deleted) {
867 entry->gpe_created = 0;
868 entry->gpe_modified = 0;
871 LIST_REMOVE(entry, gpe_entry);
874 table->gpt_created = 0;
875 table->gpt_opened = 0;
878 g_access(cp, -1, -1, -1);
883 gctl_error(req, "%d", error);
888 g_part_ctl_create(struct gctl_req *req, struct g_part_parms *gpp)
890 struct g_consumer *cp;
892 struct g_provider *pp;
893 struct g_part_scheme *scheme;
894 struct g_part_table *null, *table;
898 pp = gpp->gpp_provider;
899 scheme = gpp->gpp_scheme;
900 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, pp->name));
903 /* Check that there isn't already a g_part geom on the provider. */
904 gp = g_part_find_geom(pp->name);
907 if (null->gpt_scheme != &g_part_null_scheme) {
908 gctl_error(req, "%d geom '%s'", EEXIST, pp->name);
914 if ((gpp->gpp_parms & G_PART_PARM_ENTRIES) &&
915 (gpp->gpp_entries < scheme->gps_minent ||
916 gpp->gpp_entries > scheme->gps_maxent)) {
917 gctl_error(req, "%d entries '%d'", EINVAL, gpp->gpp_entries);
922 gp = g_new_geomf(&g_part_class, "%s", pp->name);
923 gp->softc = kobj_create((kobj_class_t)gpp->gpp_scheme, M_GEOM,
927 table->gpt_scheme = gpp->gpp_scheme;
928 table->gpt_entries = (gpp->gpp_parms & G_PART_PARM_ENTRIES) ?
929 gpp->gpp_entries : scheme->gps_minent;
930 LIST_INIT(&table->gpt_entry);
932 cp = g_new_consumer(gp);
933 error = g_attach(cp, pp);
935 error = g_access(cp, 1, 1, 1);
937 g_part_wither(gp, error);
938 gctl_error(req, "%d geom '%s'", error, pp->name);
941 table->gpt_opened = 1;
943 cp = LIST_FIRST(&gp->consumer);
944 table->gpt_opened = null->gpt_opened;
945 table->gpt_smhead = null->gpt_smhead;
946 table->gpt_smtail = null->gpt_smtail;
951 /* Make sure the provider has media. */
952 if (pp->mediasize == 0 || pp->sectorsize == 0) {
957 /* Make sure we can nest and if so, determine our depth. */
958 error = g_getattr("PART::isleaf", cp, &attr);
959 if (!error && attr) {
963 error = g_getattr("PART::depth", cp, &attr);
964 table->gpt_depth = (!error) ? attr + 1 : 0;
967 * Synthesize a disk geometry. Some partitioning schemes
968 * depend on it and since some file systems need it even
969 * when the partitition scheme doesn't, we do it here in
970 * scheme-independent code.
972 g_part_geometry(table, cp, pp->mediasize / pp->sectorsize);
974 error = G_PART_CREATE(table, gpp);
980 table->gpt_created = 1;
982 kobj_delete((kobj_t)null, M_GEOM);
985 * Support automatic commit by filling in the gpp_geom
988 gpp->gpp_parms |= G_PART_PARM_GEOM;
991 /* Provide feedback if so requested. */
992 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
993 sb = sbuf_new_auto();
994 sbuf_printf(sb, "%s created\n", gp->name);
996 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
1004 g_access(cp, -1, -1, -1);
1005 g_part_wither(gp, error);
1007 kobj_delete((kobj_t)gp->softc, M_GEOM);
1010 gctl_error(req, "%d provider", error);
1015 g_part_ctl_delete(struct gctl_req *req, struct g_part_parms *gpp)
1018 struct g_provider *pp;
1019 struct g_part_entry *entry;
1020 struct g_part_table *table;
1024 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
1025 g_topology_assert();
1029 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
1030 if (entry->gpe_deleted || entry->gpe_internal)
1032 if (entry->gpe_index == gpp->gpp_index)
1035 if (entry == NULL) {
1036 gctl_error(req, "%d index '%d'", ENOENT, gpp->gpp_index);
1042 if (pp->acr > 0 || pp->acw > 0 || pp->ace > 0) {
1043 gctl_error(req, "%d", EBUSY);
1048 entry->gpe_pp = NULL;
1052 g_wither_provider(pp, ENXIO);
1054 /* Provide feedback if so requested. */
1055 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
1056 sb = sbuf_new_auto();
1057 G_PART_FULLNAME(table, entry, sb, gp->name);
1058 sbuf_cat(sb, " deleted\n");
1060 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
1064 if (entry->gpe_created) {
1065 LIST_REMOVE(entry, gpe_entry);
1068 entry->gpe_modified = 0;
1069 entry->gpe_deleted = 1;
1075 g_part_ctl_destroy(struct gctl_req *req, struct g_part_parms *gpp)
1077 struct g_consumer *cp;
1079 struct g_provider *pp;
1080 struct g_part_entry *entry, *tmp;
1081 struct g_part_table *null, *table;
1086 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
1087 g_topology_assert();
1090 /* Check for busy providers. */
1091 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
1092 if (entry->gpe_deleted || entry->gpe_internal)
1094 if (gpp->gpp_force) {
1098 if (pp->acr == 0 && pp->acw == 0 && pp->ace == 0)
1101 gctl_error(req, "%d", EBUSY);
1105 if (gpp->gpp_force) {
1106 /* Destroy all providers. */
1107 LIST_FOREACH_SAFE(entry, &table->gpt_entry, gpe_entry, tmp) {
1111 g_wither_provider(pp, ENXIO);
1113 LIST_REMOVE(entry, gpe_entry);
1118 error = G_PART_DESTROY(table, gpp);
1120 gctl_error(req, "%d", error);
1124 gp->softc = kobj_create((kobj_class_t)&g_part_null_scheme, M_GEOM,
1128 null->gpt_scheme = &g_part_null_scheme;
1129 LIST_INIT(&null->gpt_entry);
1131 cp = LIST_FIRST(&gp->consumer);
1133 null->gpt_last = pp->mediasize / pp->sectorsize - 1;
1135 null->gpt_depth = table->gpt_depth;
1136 null->gpt_opened = table->gpt_opened;
1137 null->gpt_smhead = table->gpt_smhead;
1138 null->gpt_smtail = table->gpt_smtail;
1140 while ((entry = LIST_FIRST(&table->gpt_entry)) != NULL) {
1141 LIST_REMOVE(entry, gpe_entry);
1144 kobj_delete((kobj_t)table, M_GEOM);
1146 /* Provide feedback if so requested. */
1147 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
1148 sb = sbuf_new_auto();
1149 sbuf_printf(sb, "%s destroyed\n", gp->name);
1151 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
1158 g_part_ctl_modify(struct gctl_req *req, struct g_part_parms *gpp)
1161 struct g_part_entry *entry;
1162 struct g_part_table *table;
1167 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
1168 g_topology_assert();
1172 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
1173 if (entry->gpe_deleted || entry->gpe_internal)
1175 if (entry->gpe_index == gpp->gpp_index)
1178 if (entry == NULL) {
1179 gctl_error(req, "%d index '%d'", ENOENT, gpp->gpp_index);
1183 error = G_PART_MODIFY(table, entry, gpp);
1185 gctl_error(req, "%d", error);
1189 if (!entry->gpe_created)
1190 entry->gpe_modified = 1;
1192 /* Provide feedback if so requested. */
1193 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
1194 sb = sbuf_new_auto();
1195 G_PART_FULLNAME(table, entry, sb, gp->name);
1196 sbuf_cat(sb, " modified\n");
1198 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
1205 g_part_ctl_move(struct gctl_req *req, struct g_part_parms *gpp)
1207 gctl_error(req, "%d verb 'move'", ENOSYS);
1212 g_part_ctl_recover(struct gctl_req *req, struct g_part_parms *gpp)
1214 struct g_part_table *table;
1217 int error, recovered;
1220 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
1221 g_topology_assert();
1223 error = recovered = 0;
1225 if (table->gpt_corrupt) {
1226 error = G_PART_RECOVER(table);
1228 error = g_part_check_integrity(table,
1229 LIST_FIRST(&gp->consumer));
1231 gctl_error(req, "%d recovering '%s' failed",
1237 /* Provide feedback if so requested. */
1238 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
1239 sb = sbuf_new_auto();
1241 sbuf_printf(sb, "%s recovered\n", gp->name);
1243 sbuf_printf(sb, "%s recovering is not needed\n",
1246 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
1253 g_part_ctl_resize(struct gctl_req *req, struct g_part_parms *gpp)
1256 struct g_provider *pp;
1257 struct g_part_entry *pe, *entry;
1258 struct g_part_table *table;
1264 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
1265 g_topology_assert();
1268 /* check gpp_index */
1269 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
1270 if (entry->gpe_deleted || entry->gpe_internal)
1272 if (entry->gpe_index == gpp->gpp_index)
1275 if (entry == NULL) {
1276 gctl_error(req, "%d index '%d'", ENOENT, gpp->gpp_index);
1280 /* check gpp_size */
1281 end = entry->gpe_start + gpp->gpp_size - 1;
1282 if (gpp->gpp_size < 1 || end > table->gpt_last) {
1283 gctl_error(req, "%d size '%jd'", EINVAL,
1284 (intmax_t)gpp->gpp_size);
1288 LIST_FOREACH(pe, &table->gpt_entry, gpe_entry) {
1289 if (pe->gpe_deleted || pe->gpe_internal || pe == entry)
1291 if (end >= pe->gpe_start && end <= pe->gpe_end) {
1292 gctl_error(req, "%d end '%jd'", ENOSPC,
1296 if (entry->gpe_start < pe->gpe_start && end > pe->gpe_end) {
1297 gctl_error(req, "%d size '%jd'", ENOSPC,
1298 (intmax_t)gpp->gpp_size);
1304 if ((g_debugflags & 16) == 0 &&
1305 (pp->acr > 0 || pp->acw > 0 || pp->ace > 0)) {
1306 gctl_error(req, "%d", EBUSY);
1310 error = G_PART_RESIZE(table, entry, gpp);
1312 gctl_error(req, "%d", error);
1316 if (!entry->gpe_created)
1317 entry->gpe_modified = 1;
1319 /* update mediasize of changed provider */
1320 pp->mediasize = (entry->gpe_end - entry->gpe_start + 1) *
1323 /* Provide feedback if so requested. */
1324 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
1325 sb = sbuf_new_auto();
1326 G_PART_FULLNAME(table, entry, sb, gp->name);
1327 sbuf_cat(sb, " resized\n");
1329 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
1336 g_part_ctl_setunset(struct gctl_req *req, struct g_part_parms *gpp,
1340 struct g_part_entry *entry;
1341 struct g_part_table *table;
1346 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
1347 g_topology_assert();
1351 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
1352 if (entry->gpe_deleted || entry->gpe_internal)
1354 if (entry->gpe_index == gpp->gpp_index)
1357 if (entry == NULL) {
1358 gctl_error(req, "%d index '%d'", ENOENT, gpp->gpp_index);
1362 error = G_PART_SETUNSET(table, entry, gpp->gpp_attrib, set);
1364 gctl_error(req, "%d attrib '%s'", error, gpp->gpp_attrib);
1368 /* Provide feedback if so requested. */
1369 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
1370 sb = sbuf_new_auto();
1371 sbuf_printf(sb, "%s %sset on ", gpp->gpp_attrib,
1373 G_PART_FULLNAME(table, entry, sb, gp->name);
1374 sbuf_printf(sb, "\n");
1376 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
1383 g_part_ctl_undo(struct gctl_req *req, struct g_part_parms *gpp)
1385 struct g_consumer *cp;
1386 struct g_provider *pp;
1388 struct g_part_entry *entry, *tmp;
1389 struct g_part_table *table;
1393 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
1394 g_topology_assert();
1397 if (!table->gpt_opened) {
1398 gctl_error(req, "%d", EPERM);
1402 cp = LIST_FIRST(&gp->consumer);
1403 LIST_FOREACH_SAFE(entry, &table->gpt_entry, gpe_entry, tmp) {
1404 entry->gpe_modified = 0;
1405 if (entry->gpe_created) {
1409 entry->gpe_pp = NULL;
1410 g_wither_provider(pp, ENXIO);
1412 entry->gpe_deleted = 1;
1414 if (entry->gpe_deleted) {
1415 LIST_REMOVE(entry, gpe_entry);
1420 g_topology_unlock();
1422 reprobe = (table->gpt_scheme == &g_part_null_scheme ||
1423 table->gpt_created) ? 1 : 0;
1426 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
1427 if (entry->gpe_internal)
1432 while ((entry = LIST_FIRST(&table->gpt_entry)) != NULL) {
1433 LIST_REMOVE(entry, gpe_entry);
1436 error = g_part_probe(gp, cp, table->gpt_depth);
1439 g_access(cp, -1, -1, -1);
1440 g_part_wither(gp, error);
1446 * Synthesize a disk geometry. Some partitioning schemes
1447 * depend on it and since some file systems need it even
1448 * when the partitition scheme doesn't, we do it here in
1449 * scheme-independent code.
1452 g_part_geometry(table, cp, pp->mediasize / pp->sectorsize);
1455 error = G_PART_READ(table, cp);
1458 error = g_part_check_integrity(table, cp);
1463 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
1464 if (!entry->gpe_internal)
1465 g_part_new_provider(gp, table, entry);
1468 table->gpt_opened = 0;
1469 g_access(cp, -1, -1, -1);
1474 gctl_error(req, "%d", error);
1479 g_part_wither(struct g_geom *gp, int error)
1481 struct g_part_entry *entry;
1482 struct g_part_table *table;
1485 if (table != NULL) {
1486 G_PART_DESTROY(table, NULL);
1487 while ((entry = LIST_FIRST(&table->gpt_entry)) != NULL) {
1488 LIST_REMOVE(entry, gpe_entry);
1491 if (gp->softc != NULL) {
1492 kobj_delete((kobj_t)gp->softc, M_GEOM);
1496 g_wither_geom(gp, error);
1504 g_part_ctlreq(struct gctl_req *req, struct g_class *mp, const char *verb)
1506 struct g_part_parms gpp;
1507 struct g_part_table *table;
1508 struct gctl_req_arg *ap;
1509 enum g_part_ctl ctlreq;
1510 unsigned int i, mparms, oparms, parm;
1511 int auto_commit, close_on_error;
1512 int error, modifies;
1514 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, verb));
1515 g_topology_assert();
1517 ctlreq = G_PART_CTL_NONE;
1520 oparms = G_PART_PARM_FLAGS | G_PART_PARM_OUTPUT | G_PART_PARM_VERSION;
1523 if (!strcmp(verb, "add")) {
1524 ctlreq = G_PART_CTL_ADD;
1525 mparms |= G_PART_PARM_GEOM | G_PART_PARM_SIZE |
1526 G_PART_PARM_START | G_PART_PARM_TYPE;
1527 oparms |= G_PART_PARM_INDEX | G_PART_PARM_LABEL;
1531 if (!strcmp(verb, "bootcode")) {
1532 ctlreq = G_PART_CTL_BOOTCODE;
1533 mparms |= G_PART_PARM_GEOM | G_PART_PARM_BOOTCODE;
1537 if (!strcmp(verb, "commit")) {
1538 ctlreq = G_PART_CTL_COMMIT;
1539 mparms |= G_PART_PARM_GEOM;
1541 } else if (!strcmp(verb, "create")) {
1542 ctlreq = G_PART_CTL_CREATE;
1543 mparms |= G_PART_PARM_PROVIDER | G_PART_PARM_SCHEME;
1544 oparms |= G_PART_PARM_ENTRIES;
1548 if (!strcmp(verb, "delete")) {
1549 ctlreq = G_PART_CTL_DELETE;
1550 mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX;
1551 } else if (!strcmp(verb, "destroy")) {
1552 ctlreq = G_PART_CTL_DESTROY;
1553 mparms |= G_PART_PARM_GEOM;
1554 oparms |= G_PART_PARM_FORCE;
1558 if (!strcmp(verb, "modify")) {
1559 ctlreq = G_PART_CTL_MODIFY;
1560 mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX;
1561 oparms |= G_PART_PARM_LABEL | G_PART_PARM_TYPE;
1562 } else if (!strcmp(verb, "move")) {
1563 ctlreq = G_PART_CTL_MOVE;
1564 mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX;
1568 if (!strcmp(verb, "recover")) {
1569 ctlreq = G_PART_CTL_RECOVER;
1570 mparms |= G_PART_PARM_GEOM;
1571 } else if (!strcmp(verb, "resize")) {
1572 ctlreq = G_PART_CTL_RESIZE;
1573 mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX |
1578 if (!strcmp(verb, "set")) {
1579 ctlreq = G_PART_CTL_SET;
1580 mparms |= G_PART_PARM_ATTRIB | G_PART_PARM_GEOM |
1585 if (!strcmp(verb, "undo")) {
1586 ctlreq = G_PART_CTL_UNDO;
1587 mparms |= G_PART_PARM_GEOM;
1589 } else if (!strcmp(verb, "unset")) {
1590 ctlreq = G_PART_CTL_UNSET;
1591 mparms |= G_PART_PARM_ATTRIB | G_PART_PARM_GEOM |
1596 if (ctlreq == G_PART_CTL_NONE) {
1597 gctl_error(req, "%d verb '%s'", EINVAL, verb);
1601 bzero(&gpp, sizeof(gpp));
1602 for (i = 0; i < req->narg; i++) {
1605 switch (ap->name[0]) {
1607 if (!strcmp(ap->name, "arg0")) {
1609 (G_PART_PARM_GEOM | G_PART_PARM_PROVIDER);
1611 if (!strcmp(ap->name, "attrib"))
1612 parm = G_PART_PARM_ATTRIB;
1615 if (!strcmp(ap->name, "bootcode"))
1616 parm = G_PART_PARM_BOOTCODE;
1619 if (!strcmp(ap->name, "class"))
1623 if (!strcmp(ap->name, "entries"))
1624 parm = G_PART_PARM_ENTRIES;
1627 if (!strcmp(ap->name, "flags"))
1628 parm = G_PART_PARM_FLAGS;
1629 else if (!strcmp(ap->name, "force"))
1630 parm = G_PART_PARM_FORCE;
1633 if (!strcmp(ap->name, "index"))
1634 parm = G_PART_PARM_INDEX;
1637 if (!strcmp(ap->name, "label"))
1638 parm = G_PART_PARM_LABEL;
1641 if (!strcmp(ap->name, "output"))
1642 parm = G_PART_PARM_OUTPUT;
1645 if (!strcmp(ap->name, "scheme"))
1646 parm = G_PART_PARM_SCHEME;
1647 else if (!strcmp(ap->name, "size"))
1648 parm = G_PART_PARM_SIZE;
1649 else if (!strcmp(ap->name, "start"))
1650 parm = G_PART_PARM_START;
1653 if (!strcmp(ap->name, "type"))
1654 parm = G_PART_PARM_TYPE;
1657 if (!strcmp(ap->name, "verb"))
1659 else if (!strcmp(ap->name, "version"))
1660 parm = G_PART_PARM_VERSION;
1663 if ((parm & (mparms | oparms)) == 0) {
1664 gctl_error(req, "%d param '%s'", EINVAL, ap->name);
1668 case G_PART_PARM_ATTRIB:
1669 error = g_part_parm_str(req, ap->name,
1672 case G_PART_PARM_BOOTCODE:
1673 error = g_part_parm_bootcode(req, ap->name,
1674 &gpp.gpp_codeptr, &gpp.gpp_codesize);
1676 case G_PART_PARM_ENTRIES:
1677 error = g_part_parm_intmax(req, ap->name,
1680 case G_PART_PARM_FLAGS:
1681 error = g_part_parm_str(req, ap->name, &gpp.gpp_flags);
1683 case G_PART_PARM_FORCE:
1684 error = g_part_parm_uint32(req, ap->name,
1687 case G_PART_PARM_GEOM:
1688 error = g_part_parm_geom(req, ap->name, &gpp.gpp_geom);
1690 case G_PART_PARM_INDEX:
1691 error = g_part_parm_intmax(req, ap->name,
1694 case G_PART_PARM_LABEL:
1695 error = g_part_parm_str(req, ap->name, &gpp.gpp_label);
1697 case G_PART_PARM_OUTPUT:
1698 error = 0; /* Write-only parameter */
1700 case G_PART_PARM_PROVIDER:
1701 error = g_part_parm_provider(req, ap->name,
1704 case G_PART_PARM_SCHEME:
1705 error = g_part_parm_scheme(req, ap->name,
1708 case G_PART_PARM_SIZE:
1709 error = g_part_parm_quad(req, ap->name, &gpp.gpp_size);
1711 case G_PART_PARM_START:
1712 error = g_part_parm_quad(req, ap->name,
1715 case G_PART_PARM_TYPE:
1716 error = g_part_parm_str(req, ap->name, &gpp.gpp_type);
1718 case G_PART_PARM_VERSION:
1719 error = g_part_parm_uint32(req, ap->name,
1724 gctl_error(req, "%d %s", error, ap->name);
1728 if (error == ENOATTR) {
1729 gctl_error(req, "%d param '%s'", error,
1734 gpp.gpp_parms |= parm;
1736 if ((gpp.gpp_parms & mparms) != mparms) {
1737 parm = mparms - (gpp.gpp_parms & mparms);
1738 gctl_error(req, "%d param '%x'", ENOATTR, parm);
1742 /* Obtain permissions if possible/necessary. */
1745 if (modifies && (gpp.gpp_parms & G_PART_PARM_GEOM)) {
1746 table = gpp.gpp_geom->softc;
1747 if (table != NULL && table->gpt_corrupt &&
1748 ctlreq != G_PART_CTL_DESTROY &&
1749 ctlreq != G_PART_CTL_RECOVER) {
1750 gctl_error(req, "%d table '%s' is corrupt",
1751 EPERM, gpp.gpp_geom->name);
1754 if (table != NULL && !table->gpt_opened) {
1755 error = g_access(LIST_FIRST(&gpp.gpp_geom->consumer),
1758 gctl_error(req, "%d geom '%s'", error,
1759 gpp.gpp_geom->name);
1762 table->gpt_opened = 1;
1767 /* Allow the scheme to check or modify the parameters. */
1768 if (table != NULL) {
1769 error = G_PART_PRECHECK(table, ctlreq, &gpp);
1771 gctl_error(req, "%d pre-check failed", error);
1775 error = EDOOFUS; /* Prevent bogus uninit. warning. */
1778 case G_PART_CTL_NONE:
1779 panic("%s", __func__);
1780 case G_PART_CTL_ADD:
1781 error = g_part_ctl_add(req, &gpp);
1783 case G_PART_CTL_BOOTCODE:
1784 error = g_part_ctl_bootcode(req, &gpp);
1786 case G_PART_CTL_COMMIT:
1787 error = g_part_ctl_commit(req, &gpp);
1789 case G_PART_CTL_CREATE:
1790 error = g_part_ctl_create(req, &gpp);
1792 case G_PART_CTL_DELETE:
1793 error = g_part_ctl_delete(req, &gpp);
1795 case G_PART_CTL_DESTROY:
1796 error = g_part_ctl_destroy(req, &gpp);
1798 case G_PART_CTL_MODIFY:
1799 error = g_part_ctl_modify(req, &gpp);
1801 case G_PART_CTL_MOVE:
1802 error = g_part_ctl_move(req, &gpp);
1804 case G_PART_CTL_RECOVER:
1805 error = g_part_ctl_recover(req, &gpp);
1807 case G_PART_CTL_RESIZE:
1808 error = g_part_ctl_resize(req, &gpp);
1810 case G_PART_CTL_SET:
1811 error = g_part_ctl_setunset(req, &gpp, 1);
1813 case G_PART_CTL_UNDO:
1814 error = g_part_ctl_undo(req, &gpp);
1816 case G_PART_CTL_UNSET:
1817 error = g_part_ctl_setunset(req, &gpp, 0);
1821 /* Implement automatic commit. */
1823 auto_commit = (modifies &&
1824 (gpp.gpp_parms & G_PART_PARM_FLAGS) &&
1825 strchr(gpp.gpp_flags, 'C') != NULL) ? 1 : 0;
1827 KASSERT(gpp.gpp_parms & G_PART_PARM_GEOM, ("%s",
1829 error = g_part_ctl_commit(req, &gpp);
1834 if (error && close_on_error) {
1835 g_access(LIST_FIRST(&gpp.gpp_geom->consumer), -1, -1, -1);
1836 table->gpt_opened = 0;
1841 g_part_destroy_geom(struct gctl_req *req, struct g_class *mp,
1845 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, gp->name));
1846 g_topology_assert();
1848 g_part_wither(gp, EINVAL);
1852 static struct g_geom *
1853 g_part_taste(struct g_class *mp, struct g_provider *pp, int flags __unused)
1855 struct g_consumer *cp;
1857 struct g_part_entry *entry;
1858 struct g_part_table *table;
1859 struct root_hold_token *rht;
1863 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, pp->name));
1864 g_topology_assert();
1866 /* Skip providers that are already open for writing. */
1871 * Create a GEOM with consumer and hook it up to the provider.
1872 * With that we become part of the topology. Optain read access
1875 gp = g_new_geomf(mp, "%s", pp->name);
1876 cp = g_new_consumer(gp);
1877 error = g_attach(cp, pp);
1879 error = g_access(cp, 1, 0, 0);
1883 g_destroy_consumer(cp);
1888 rht = root_mount_hold(mp->name);
1889 g_topology_unlock();
1892 * Short-circuit the whole probing galore when there's no
1895 if (pp->mediasize == 0 || pp->sectorsize == 0) {
1900 /* Make sure we can nest and if so, determine our depth. */
1901 error = g_getattr("PART::isleaf", cp, &attr);
1902 if (!error && attr) {
1906 error = g_getattr("PART::depth", cp, &attr);
1907 depth = (!error) ? attr + 1 : 0;
1909 error = g_part_probe(gp, cp, depth);
1916 * Synthesize a disk geometry. Some partitioning schemes
1917 * depend on it and since some file systems need it even
1918 * when the partitition scheme doesn't, we do it here in
1919 * scheme-independent code.
1921 g_part_geometry(table, cp, pp->mediasize / pp->sectorsize);
1923 error = G_PART_READ(table, cp);
1926 error = g_part_check_integrity(table, cp);
1931 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
1932 if (!entry->gpe_internal)
1933 g_part_new_provider(gp, table, entry);
1936 root_mount_rel(rht);
1937 g_access(cp, -1, 0, 0);
1942 root_mount_rel(rht);
1943 g_access(cp, -1, 0, 0);
1945 g_destroy_consumer(cp);
1955 g_part_access(struct g_provider *pp, int dr, int dw, int de)
1957 struct g_consumer *cp;
1959 G_PART_TRACE((G_T_ACCESS, "%s(%s,%d,%d,%d)", __func__, pp->name, dr,
1962 cp = LIST_FIRST(&pp->geom->consumer);
1964 /* We always gain write-exclusive access. */
1965 return (g_access(cp, dr, dw, dw + de));
1969 g_part_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp,
1970 struct g_consumer *cp, struct g_provider *pp)
1973 struct g_part_entry *entry;
1974 struct g_part_table *table;
1976 KASSERT(sb != NULL && gp != NULL, ("%s", __func__));
1979 if (indent == NULL) {
1980 KASSERT(cp == NULL && pp != NULL, ("%s", __func__));
1981 entry = pp->private;
1984 sbuf_printf(sb, " i %u o %ju ty %s", entry->gpe_index,
1985 (uintmax_t)entry->gpe_offset,
1986 G_PART_TYPE(table, entry, buf, sizeof(buf)));
1988 * libdisk compatibility quirk - the scheme dumps the
1989 * slicer name and partition type in a way that is
1990 * compatible with libdisk. When libdisk is not used
1991 * anymore, this should go away.
1993 G_PART_DUMPCONF(table, entry, sb, indent);
1994 } else if (cp != NULL) { /* Consumer configuration. */
1995 KASSERT(pp == NULL, ("%s", __func__));
1997 } else if (pp != NULL) { /* Provider configuration. */
1998 entry = pp->private;
2001 sbuf_printf(sb, "%s<start>%ju</start>\n", indent,
2002 (uintmax_t)entry->gpe_start);
2003 sbuf_printf(sb, "%s<end>%ju</end>\n", indent,
2004 (uintmax_t)entry->gpe_end);
2005 sbuf_printf(sb, "%s<index>%u</index>\n", indent,
2007 sbuf_printf(sb, "%s<type>%s</type>\n", indent,
2008 G_PART_TYPE(table, entry, buf, sizeof(buf)));
2009 sbuf_printf(sb, "%s<offset>%ju</offset>\n", indent,
2010 (uintmax_t)entry->gpe_offset);
2011 sbuf_printf(sb, "%s<length>%ju</length>\n", indent,
2012 (uintmax_t)pp->mediasize);
2013 G_PART_DUMPCONF(table, entry, sb, indent);
2014 } else { /* Geom configuration. */
2015 sbuf_printf(sb, "%s<scheme>%s</scheme>\n", indent,
2016 table->gpt_scheme->name);
2017 sbuf_printf(sb, "%s<entries>%u</entries>\n", indent,
2018 table->gpt_entries);
2019 sbuf_printf(sb, "%s<first>%ju</first>\n", indent,
2020 (uintmax_t)table->gpt_first);
2021 sbuf_printf(sb, "%s<last>%ju</last>\n", indent,
2022 (uintmax_t)table->gpt_last);
2023 sbuf_printf(sb, "%s<fwsectors>%u</fwsectors>\n", indent,
2024 table->gpt_sectors);
2025 sbuf_printf(sb, "%s<fwheads>%u</fwheads>\n", indent,
2027 sbuf_printf(sb, "%s<state>%s</state>\n", indent,
2028 table->gpt_corrupt ? "CORRUPT": "OK");
2029 sbuf_printf(sb, "%s<modified>%s</modified>\n", indent,
2030 table->gpt_opened ? "true": "false");
2031 G_PART_DUMPCONF(table, NULL, sb, indent);
2036 g_part_orphan(struct g_consumer *cp)
2038 struct g_provider *pp;
2039 struct g_part_table *table;
2042 KASSERT(pp != NULL, ("%s", __func__));
2043 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, pp->name));
2044 g_topology_assert();
2046 KASSERT(pp->error != 0, ("%s", __func__));
2047 table = cp->geom->softc;
2048 if (table != NULL && table->gpt_opened)
2049 g_access(cp, -1, -1, -1);
2050 g_part_wither(cp->geom, pp->error);
2054 g_part_spoiled(struct g_consumer *cp)
2057 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, cp->provider->name));
2058 g_topology_assert();
2060 cp->flags |= G_CF_ORPHAN;
2061 g_part_wither(cp->geom, ENXIO);
2065 g_part_start(struct bio *bp)
2068 struct g_consumer *cp;
2070 struct g_part_entry *entry;
2071 struct g_part_table *table;
2072 struct g_kerneldump *gkd;
2073 struct g_provider *pp;
2079 cp = LIST_FIRST(&gp->consumer);
2081 G_PART_TRACE((G_T_BIO, "%s: cmd=%d, provider=%s", __func__, bp->bio_cmd,
2084 entry = pp->private;
2085 if (entry == NULL) {
2086 g_io_deliver(bp, ENXIO);
2090 switch(bp->bio_cmd) {
2094 if (bp->bio_offset >= pp->mediasize) {
2095 g_io_deliver(bp, EIO);
2098 bp2 = g_clone_bio(bp);
2100 g_io_deliver(bp, ENOMEM);
2103 if (bp2->bio_offset + bp2->bio_length > pp->mediasize)
2104 bp2->bio_length = pp->mediasize - bp2->bio_offset;
2105 bp2->bio_done = g_std_done;
2106 bp2->bio_offset += entry->gpe_offset;
2107 g_io_request(bp2, cp);
2112 if (g_handleattr_int(bp, "GEOM::fwheads", table->gpt_heads))
2114 if (g_handleattr_int(bp, "GEOM::fwsectors", table->gpt_sectors))
2116 if (g_handleattr_int(bp, "PART::isleaf", table->gpt_isleaf))
2118 if (g_handleattr_int(bp, "PART::depth", table->gpt_depth))
2120 if (g_handleattr_str(bp, "PART::scheme",
2121 table->gpt_scheme->name))
2123 if (g_handleattr_str(bp, "PART::type",
2124 G_PART_TYPE(table, entry, buf, sizeof(buf))))
2126 if (!strcmp("GEOM::kerneldump", bp->bio_attribute)) {
2128 * Check that the partition is suitable for kernel
2129 * dumps. Typically only swap partitions should be
2130 * used. If the request comes from the nested scheme
2131 * we allow dumping there as well.
2133 if ((bp->bio_from == NULL ||
2134 bp->bio_from->geom->class != &g_part_class) &&
2135 G_PART_DUMPTO(table, entry) == 0) {
2136 g_io_deliver(bp, ENODEV);
2137 printf("GEOM_PART: Partition '%s' not suitable"
2138 " for kernel dumps (wrong type?)\n",
2142 gkd = (struct g_kerneldump *)bp->bio_data;
2143 if (gkd->offset >= pp->mediasize) {
2144 g_io_deliver(bp, EIO);
2147 if (gkd->offset + gkd->length > pp->mediasize)
2148 gkd->length = pp->mediasize - gkd->offset;
2149 gkd->offset += entry->gpe_offset;
2153 g_io_deliver(bp, EOPNOTSUPP);
2157 bp2 = g_clone_bio(bp);
2159 g_io_deliver(bp, ENOMEM);
2162 bp2->bio_done = g_std_done;
2163 g_io_request(bp2, cp);
2167 g_part_init(struct g_class *mp)
2170 TAILQ_INSERT_HEAD(&g_part_schemes, &g_part_null_scheme, scheme_list);
2174 g_part_fini(struct g_class *mp)
2177 TAILQ_REMOVE(&g_part_schemes, &g_part_null_scheme, scheme_list);
2181 g_part_unload_event(void *arg, int flag)
2183 struct g_consumer *cp;
2185 struct g_provider *pp;
2186 struct g_part_scheme *scheme;
2187 struct g_part_table *table;
2191 if (flag == EV_CANCEL)
2196 scheme = (void *)(*xchg);
2198 g_topology_assert();
2200 LIST_FOREACH(gp, &g_part_class.geom, geom) {
2202 if (table->gpt_scheme != scheme)
2206 LIST_FOREACH(pp, &gp->provider, provider)
2207 acc += pp->acr + pp->acw + pp->ace;
2208 LIST_FOREACH(cp, &gp->consumer, consumer)
2209 acc += cp->acr + cp->acw + cp->ace;
2212 g_part_wither(gp, ENOSYS);
2218 TAILQ_REMOVE(&g_part_schemes, scheme, scheme_list);
2224 g_part_modevent(module_t mod, int type, struct g_part_scheme *scheme)
2226 struct g_part_scheme *iter;
2233 TAILQ_FOREACH(iter, &g_part_schemes, scheme_list) {
2234 if (scheme == iter) {
2235 printf("GEOM_PART: scheme %s is already "
2236 "registered!\n", scheme->name);
2241 TAILQ_INSERT_TAIL(&g_part_schemes, scheme,
2243 g_retaste(&g_part_class);
2247 arg = (uintptr_t)scheme;
2248 error = g_waitfor_event(g_part_unload_event, &arg, M_WAITOK,