2 * Copyright (c) 2002, 2005-2009 Marcel Moolenaar
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
30 #include <sys/param.h>
32 #include <sys/diskmbr.h>
33 #include <sys/endian.h>
34 #include <sys/kernel.h>
36 #include <sys/limits.h>
38 #include <sys/malloc.h>
39 #include <sys/mutex.h>
40 #include <sys/queue.h>
42 #include <sys/sysctl.h>
43 #include <sys/systm.h>
45 #include <geom/geom.h>
46 #include <geom/geom_ctl.h>
47 #include <geom/geom_int.h>
48 #include <geom/part/g_part.h>
50 #include "g_part_if.h"
53 #define _PATH_DEV "/dev/"
56 static kobj_method_t g_part_null_methods[] = {
60 static struct g_part_scheme g_part_null_scheme = {
63 sizeof(struct g_part_table),
66 TAILQ_HEAD(, g_part_scheme) g_part_schemes =
67 TAILQ_HEAD_INITIALIZER(g_part_schemes);
69 struct g_part_alias_list {
71 enum g_part_alias alias;
72 } g_part_alias_list[G_PART_ALIAS_COUNT] = {
73 { "apple-boot", G_PART_ALIAS_APPLE_BOOT },
74 { "apple-hfs", G_PART_ALIAS_APPLE_HFS },
75 { "apple-label", G_PART_ALIAS_APPLE_LABEL },
76 { "apple-raid", G_PART_ALIAS_APPLE_RAID },
77 { "apple-raid-offline", G_PART_ALIAS_APPLE_RAID_OFFLINE },
78 { "apple-tv-recovery", G_PART_ALIAS_APPLE_TV_RECOVERY },
79 { "apple-ufs", G_PART_ALIAS_APPLE_UFS },
80 { "bios-boot", G_PART_ALIAS_BIOS_BOOT },
81 { "ebr", G_PART_ALIAS_EBR },
82 { "efi", G_PART_ALIAS_EFI },
83 { "fat16", G_PART_ALIAS_MS_FAT16 },
84 { "fat32", G_PART_ALIAS_MS_FAT32 },
85 { "freebsd", G_PART_ALIAS_FREEBSD },
86 { "freebsd-boot", G_PART_ALIAS_FREEBSD_BOOT },
87 { "freebsd-nandfs", G_PART_ALIAS_FREEBSD_NANDFS },
88 { "freebsd-swap", G_PART_ALIAS_FREEBSD_SWAP },
89 { "freebsd-ufs", G_PART_ALIAS_FREEBSD_UFS },
90 { "freebsd-vinum", G_PART_ALIAS_FREEBSD_VINUM },
91 { "freebsd-zfs", G_PART_ALIAS_FREEBSD_ZFS },
92 { "linux-data", G_PART_ALIAS_LINUX_DATA },
93 { "linux-lvm", G_PART_ALIAS_LINUX_LVM },
94 { "linux-raid", G_PART_ALIAS_LINUX_RAID },
95 { "linux-swap", G_PART_ALIAS_LINUX_SWAP },
96 { "mbr", G_PART_ALIAS_MBR },
97 { "ms-basic-data", G_PART_ALIAS_MS_BASIC_DATA },
98 { "ms-ldm-data", G_PART_ALIAS_MS_LDM_DATA },
99 { "ms-ldm-metadata", G_PART_ALIAS_MS_LDM_METADATA },
100 { "ms-reserved", G_PART_ALIAS_MS_RESERVED },
101 { "ntfs", G_PART_ALIAS_MS_NTFS },
102 { "netbsd-ccd", G_PART_ALIAS_NETBSD_CCD },
103 { "netbsd-cgd", G_PART_ALIAS_NETBSD_CGD },
104 { "netbsd-ffs", G_PART_ALIAS_NETBSD_FFS },
105 { "netbsd-lfs", G_PART_ALIAS_NETBSD_LFS },
106 { "netbsd-raid", G_PART_ALIAS_NETBSD_RAID },
107 { "netbsd-swap", G_PART_ALIAS_NETBSD_SWAP },
108 { "vmware-vmfs", G_PART_ALIAS_VMFS },
109 { "vmware-vmkdiag", G_PART_ALIAS_VMKDIAG },
110 { "vmware-reserved", G_PART_ALIAS_VMRESERVED },
111 { "vmware-vsanhdr", G_PART_ALIAS_VMVSANHDR },
114 SYSCTL_DECL(_kern_geom);
115 SYSCTL_NODE(_kern_geom, OID_AUTO, part, CTLFLAG_RW, 0,
117 static u_int check_integrity = 1;
118 TUNABLE_INT("kern.geom.part.check_integrity", &check_integrity);
119 SYSCTL_UINT(_kern_geom_part, OID_AUTO, check_integrity,
120 CTLFLAG_RW | CTLFLAG_TUN, &check_integrity, 1,
121 "Enable integrity checking");
124 * The GEOM partitioning class.
126 static g_ctl_req_t g_part_ctlreq;
127 static g_ctl_destroy_geom_t g_part_destroy_geom;
128 static g_fini_t g_part_fini;
129 static g_init_t g_part_init;
130 static g_taste_t g_part_taste;
132 static g_access_t g_part_access;
133 static g_dumpconf_t g_part_dumpconf;
134 static g_orphan_t g_part_orphan;
135 static g_spoiled_t g_part_spoiled;
136 static g_start_t g_part_start;
138 static struct g_class g_part_class = {
140 .version = G_VERSION,
142 .ctlreq = g_part_ctlreq,
143 .destroy_geom = g_part_destroy_geom,
146 .taste = g_part_taste,
148 .access = g_part_access,
149 .dumpconf = g_part_dumpconf,
150 .orphan = g_part_orphan,
151 .spoiled = g_part_spoiled,
152 .start = g_part_start,
155 DECLARE_GEOM_CLASS(g_part_class, g_part);
156 MODULE_VERSION(g_part, 0);
162 static void g_part_wither(struct g_geom *, int);
165 g_part_alias_name(enum g_part_alias alias)
169 for (i = 0; i < G_PART_ALIAS_COUNT; i++) {
170 if (g_part_alias_list[i].alias != alias)
172 return (g_part_alias_list[i].lexeme);
179 g_part_geometry_heads(off_t blocks, u_int sectors, off_t *bestchs,
182 static u_int candidate_heads[] = { 1, 2, 16, 32, 64, 128, 255, 0 };
183 off_t chs, cylinders;
189 for (idx = 0; candidate_heads[idx] != 0; idx++) {
190 heads = candidate_heads[idx];
191 cylinders = blocks / heads / sectors;
192 if (cylinders < heads || cylinders < sectors)
194 if (cylinders > 1023)
196 chs = cylinders * heads * sectors;
197 if (chs > *bestchs || (chs == *bestchs && *bestheads == 1)) {
205 g_part_geometry(struct g_part_table *table, struct g_consumer *cp,
208 static u_int candidate_sectors[] = { 1, 9, 17, 33, 63, 0 };
210 u_int heads, sectors;
213 if (g_getattr("GEOM::fwsectors", cp, §ors) != 0 || sectors == 0 ||
214 g_getattr("GEOM::fwheads", cp, &heads) != 0 || heads == 0) {
215 table->gpt_fixgeom = 0;
216 table->gpt_heads = 0;
217 table->gpt_sectors = 0;
219 for (idx = 0; candidate_sectors[idx] != 0; idx++) {
220 sectors = candidate_sectors[idx];
221 g_part_geometry_heads(blocks, sectors, &chs, &heads);
225 * Prefer a geometry with sectors > 1, but only if
226 * it doesn't bump down the number of heads to 1.
228 if (chs > bestchs || (chs == bestchs && heads > 1 &&
229 table->gpt_sectors == 1)) {
231 table->gpt_heads = heads;
232 table->gpt_sectors = sectors;
236 * If we didn't find a geometry at all, then the disk is
237 * too big. This means we can use the maximum number of
241 table->gpt_heads = 255;
242 table->gpt_sectors = 63;
245 table->gpt_fixgeom = 1;
246 table->gpt_heads = heads;
247 table->gpt_sectors = sectors;
251 #define DPRINTF(...) if (bootverbose) { \
252 printf("GEOM_PART: " __VA_ARGS__); \
256 g_part_check_integrity(struct g_part_table *table, struct g_consumer *cp)
258 struct g_part_entry *e1, *e2;
259 struct g_provider *pp;
265 if (table->gpt_last < table->gpt_first) {
266 DPRINTF("last LBA is below first LBA: %jd < %jd\n",
267 (intmax_t)table->gpt_last, (intmax_t)table->gpt_first);
270 if (table->gpt_last > pp->mediasize / pp->sectorsize - 1) {
271 DPRINTF("last LBA extends beyond mediasize: "
272 "%jd > %jd\n", (intmax_t)table->gpt_last,
273 (intmax_t)pp->mediasize / pp->sectorsize - 1);
276 LIST_FOREACH(e1, &table->gpt_entry, gpe_entry) {
277 if (e1->gpe_deleted || e1->gpe_internal)
279 if (e1->gpe_start < table->gpt_first) {
280 DPRINTF("partition %d has start offset below first "
281 "LBA: %jd < %jd\n", e1->gpe_index,
282 (intmax_t)e1->gpe_start,
283 (intmax_t)table->gpt_first);
286 if (e1->gpe_start > table->gpt_last) {
287 DPRINTF("partition %d has start offset beyond last "
288 "LBA: %jd > %jd\n", e1->gpe_index,
289 (intmax_t)e1->gpe_start,
290 (intmax_t)table->gpt_last);
293 if (e1->gpe_end < e1->gpe_start) {
294 DPRINTF("partition %d has end offset below start "
295 "offset: %jd < %jd\n", e1->gpe_index,
296 (intmax_t)e1->gpe_end,
297 (intmax_t)e1->gpe_start);
300 if (e1->gpe_end > table->gpt_last) {
301 DPRINTF("partition %d has end offset beyond last "
302 "LBA: %jd > %jd\n", e1->gpe_index,
303 (intmax_t)e1->gpe_end,
304 (intmax_t)table->gpt_last);
307 if (pp->stripesize > 0) {
308 offset = e1->gpe_start * pp->sectorsize;
309 if (e1->gpe_offset > offset)
310 offset = e1->gpe_offset;
311 if ((offset + pp->stripeoffset) % pp->stripesize) {
312 DPRINTF("partition %d is not aligned on %u "
313 "bytes\n", e1->gpe_index, pp->stripesize);
314 /* Don't treat this as a critical failure */
318 while ((e2 = LIST_NEXT(e2, gpe_entry)) != NULL) {
319 if (e2->gpe_deleted || e2->gpe_internal)
321 if (e1->gpe_start >= e2->gpe_start &&
322 e1->gpe_start <= e2->gpe_end) {
323 DPRINTF("partition %d has start offset inside "
324 "partition %d: start[%d] %jd >= start[%d] "
325 "%jd <= end[%d] %jd\n",
326 e1->gpe_index, e2->gpe_index,
327 e2->gpe_index, (intmax_t)e2->gpe_start,
328 e1->gpe_index, (intmax_t)e1->gpe_start,
329 e2->gpe_index, (intmax_t)e2->gpe_end);
332 if (e1->gpe_end >= e2->gpe_start &&
333 e1->gpe_end <= e2->gpe_end) {
334 DPRINTF("partition %d has end offset inside "
335 "partition %d: start[%d] %jd >= end[%d] "
336 "%jd <= end[%d] %jd\n",
337 e1->gpe_index, e2->gpe_index,
338 e2->gpe_index, (intmax_t)e2->gpe_start,
339 e1->gpe_index, (intmax_t)e1->gpe_end,
340 e2->gpe_index, (intmax_t)e2->gpe_end);
343 if (e1->gpe_start < e2->gpe_start &&
344 e1->gpe_end > e2->gpe_end) {
345 DPRINTF("partition %d contains partition %d: "
346 "start[%d] %jd > start[%d] %jd, end[%d] "
347 "%jd < end[%d] %jd\n",
348 e1->gpe_index, e2->gpe_index,
349 e1->gpe_index, (intmax_t)e1->gpe_start,
350 e2->gpe_index, (intmax_t)e2->gpe_start,
351 e2->gpe_index, (intmax_t)e2->gpe_end,
352 e1->gpe_index, (intmax_t)e1->gpe_end);
358 printf("GEOM_PART: integrity check failed (%s, %s)\n",
359 pp->name, table->gpt_scheme->name);
360 if (check_integrity != 0)
362 table->gpt_corrupt = 1;
368 struct g_part_entry *
369 g_part_new_entry(struct g_part_table *table, int index, quad_t start,
372 struct g_part_entry *entry, *last;
375 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
376 if (entry->gpe_index == index)
378 if (entry->gpe_index > index) {
385 entry = g_malloc(table->gpt_scheme->gps_entrysz,
387 entry->gpe_index = index;
389 LIST_INSERT_HEAD(&table->gpt_entry, entry, gpe_entry);
391 LIST_INSERT_AFTER(last, entry, gpe_entry);
393 entry->gpe_offset = 0;
394 entry->gpe_start = start;
395 entry->gpe_end = end;
400 g_part_new_provider(struct g_geom *gp, struct g_part_table *table,
401 struct g_part_entry *entry)
403 struct g_consumer *cp;
404 struct g_provider *pp;
408 cp = LIST_FIRST(&gp->consumer);
411 offset = entry->gpe_start * pp->sectorsize;
412 if (entry->gpe_offset < offset)
413 entry->gpe_offset = offset;
415 if (entry->gpe_pp == NULL) {
416 sb = sbuf_new_auto();
417 G_PART_FULLNAME(table, entry, sb, gp->name);
419 entry->gpe_pp = g_new_providerf(gp, "%s", sbuf_data(sb));
421 entry->gpe_pp->private = entry; /* Close the circle. */
423 entry->gpe_pp->index = entry->gpe_index - 1; /* index is 1-based. */
424 entry->gpe_pp->mediasize = (entry->gpe_end - entry->gpe_start + 1) *
426 entry->gpe_pp->mediasize -= entry->gpe_offset - offset;
427 entry->gpe_pp->sectorsize = pp->sectorsize;
428 entry->gpe_pp->flags = pp->flags & G_PF_CANDELETE;
429 entry->gpe_pp->stripesize = pp->stripesize;
430 entry->gpe_pp->stripeoffset = pp->stripeoffset + entry->gpe_offset;
431 if (pp->stripesize > 0)
432 entry->gpe_pp->stripeoffset %= pp->stripesize;
433 entry->gpe_pp->flags |= pp->flags & G_PF_ACCEPT_UNMAPPED;
434 g_error_provider(entry->gpe_pp, 0);
437 static struct g_geom*
438 g_part_find_geom(const char *name)
441 LIST_FOREACH(gp, &g_part_class.geom, geom) {
442 if (!strcmp(name, gp->name))
449 g_part_parm_geom(struct gctl_req *req, const char *name, struct g_geom **v)
454 gname = gctl_get_asciiparam(req, name);
457 if (strncmp(gname, _PATH_DEV, sizeof(_PATH_DEV) - 1) == 0)
458 gname += sizeof(_PATH_DEV) - 1;
459 gp = g_part_find_geom(gname);
461 gctl_error(req, "%d %s '%s'", EINVAL, name, gname);
464 if ((gp->flags & G_GEOM_WITHER) != 0) {
465 gctl_error(req, "%d %s", ENXIO, gname);
473 g_part_parm_provider(struct gctl_req *req, const char *name,
474 struct g_provider **v)
476 struct g_provider *pp;
479 pname = gctl_get_asciiparam(req, name);
482 if (strncmp(pname, _PATH_DEV, sizeof(_PATH_DEV) - 1) == 0)
483 pname += sizeof(_PATH_DEV) - 1;
484 pp = g_provider_by_name(pname);
486 gctl_error(req, "%d %s '%s'", EINVAL, name, pname);
494 g_part_parm_quad(struct gctl_req *req, const char *name, quad_t *v)
500 p = gctl_get_asciiparam(req, name);
503 q = strtoq(p, &x, 0);
504 if (*x != '\0' || q < 0) {
505 gctl_error(req, "%d %s '%s'", EINVAL, name, p);
513 g_part_parm_scheme(struct gctl_req *req, const char *name,
514 struct g_part_scheme **v)
516 struct g_part_scheme *s;
519 p = gctl_get_asciiparam(req, name);
522 TAILQ_FOREACH(s, &g_part_schemes, scheme_list) {
523 if (s == &g_part_null_scheme)
525 if (!strcasecmp(s->name, p))
529 gctl_error(req, "%d %s '%s'", EINVAL, name, p);
537 g_part_parm_str(struct gctl_req *req, const char *name, const char **v)
541 p = gctl_get_asciiparam(req, name);
544 /* An empty label is always valid. */
545 if (strcmp(name, "label") != 0 && p[0] == '\0') {
546 gctl_error(req, "%d %s '%s'", EINVAL, name, p);
554 g_part_parm_intmax(struct gctl_req *req, const char *name, u_int *v)
559 p = gctl_get_param(req, name, &size);
562 if (size != sizeof(*p) || *p < 0 || *p > INT_MAX) {
563 gctl_error(req, "%d %s '%jd'", EINVAL, name, *p);
571 g_part_parm_uint32(struct gctl_req *req, const char *name, u_int *v)
576 p = gctl_get_param(req, name, &size);
579 if (size != sizeof(*p) || *p > INT_MAX) {
580 gctl_error(req, "%d %s '%u'", EINVAL, name, (unsigned int)*p);
588 g_part_parm_bootcode(struct gctl_req *req, const char *name, const void **v,
594 p = gctl_get_param(req, name, &size);
603 g_part_probe(struct g_geom *gp, struct g_consumer *cp, int depth)
605 struct g_part_scheme *iter, *scheme;
606 struct g_part_table *table;
610 scheme = (table != NULL) ? table->gpt_scheme : NULL;
611 pri = (scheme != NULL) ? G_PART_PROBE(table, cp) : INT_MIN;
614 if (pri > 0) { /* error */
619 TAILQ_FOREACH(iter, &g_part_schemes, scheme_list) {
620 if (iter == &g_part_null_scheme)
622 table = (void *)kobj_create((kobj_class_t)iter, M_GEOM,
625 table->gpt_scheme = iter;
626 table->gpt_depth = depth;
627 probe = G_PART_PROBE(table, cp);
628 if (probe <= 0 && probe > pri) {
631 if (gp->softc != NULL)
632 kobj_delete((kobj_t)gp->softc, M_GEOM);
637 kobj_delete((kobj_t)table, M_GEOM);
641 return ((scheme == NULL) ? ENXIO : 0);
645 * Control request functions.
649 g_part_ctl_add(struct gctl_req *req, struct g_part_parms *gpp)
652 struct g_provider *pp;
653 struct g_part_entry *delent, *last, *entry;
654 struct g_part_table *table;
661 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
664 pp = LIST_FIRST(&gp->consumer)->provider;
666 end = gpp->gpp_start + gpp->gpp_size - 1;
668 if (gpp->gpp_start < table->gpt_first ||
669 gpp->gpp_start > table->gpt_last) {
670 gctl_error(req, "%d start '%jd'", EINVAL,
671 (intmax_t)gpp->gpp_start);
674 if (end < gpp->gpp_start || end > table->gpt_last) {
675 gctl_error(req, "%d size '%jd'", EINVAL,
676 (intmax_t)gpp->gpp_size);
679 if (gpp->gpp_index > table->gpt_entries) {
680 gctl_error(req, "%d index '%d'", EINVAL, gpp->gpp_index);
684 delent = last = NULL;
685 index = (gpp->gpp_index > 0) ? gpp->gpp_index : 1;
686 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
687 if (entry->gpe_deleted) {
688 if (entry->gpe_index == index)
692 if (entry->gpe_index == index)
693 index = entry->gpe_index + 1;
694 if (entry->gpe_index < index)
696 if (entry->gpe_internal)
698 if (gpp->gpp_start >= entry->gpe_start &&
699 gpp->gpp_start <= entry->gpe_end) {
700 gctl_error(req, "%d start '%jd'", ENOSPC,
701 (intmax_t)gpp->gpp_start);
704 if (end >= entry->gpe_start && end <= entry->gpe_end) {
705 gctl_error(req, "%d end '%jd'", ENOSPC, (intmax_t)end);
708 if (gpp->gpp_start < entry->gpe_start && end > entry->gpe_end) {
709 gctl_error(req, "%d size '%jd'", ENOSPC,
710 (intmax_t)gpp->gpp_size);
714 if (gpp->gpp_index > 0 && index != gpp->gpp_index) {
715 gctl_error(req, "%d index '%d'", EEXIST, gpp->gpp_index);
718 if (index > table->gpt_entries) {
719 gctl_error(req, "%d index '%d'", ENOSPC, index);
723 entry = (delent == NULL) ? g_malloc(table->gpt_scheme->gps_entrysz,
724 M_WAITOK | M_ZERO) : delent;
725 entry->gpe_index = index;
726 entry->gpe_start = gpp->gpp_start;
727 entry->gpe_end = end;
728 error = G_PART_ADD(table, entry, gpp);
730 gctl_error(req, "%d", error);
735 if (delent == NULL) {
737 LIST_INSERT_HEAD(&table->gpt_entry, entry, gpe_entry);
739 LIST_INSERT_AFTER(last, entry, gpe_entry);
740 entry->gpe_created = 1;
742 entry->gpe_deleted = 0;
743 entry->gpe_modified = 1;
745 g_part_new_provider(gp, table, entry);
747 /* Provide feedback if so requested. */
748 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
749 sb = sbuf_new_auto();
750 G_PART_FULLNAME(table, entry, sb, gp->name);
751 if (pp->stripesize > 0 && entry->gpe_pp->stripeoffset != 0)
752 sbuf_printf(sb, " added, but partition is not "
753 "aligned on %u bytes\n", pp->stripesize);
755 sbuf_cat(sb, " added\n");
757 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
764 g_part_ctl_bootcode(struct gctl_req *req, struct g_part_parms *gpp)
767 struct g_part_table *table;
772 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
776 sz = table->gpt_scheme->gps_bootcodesz;
781 if (gpp->gpp_codesize > sz) {
786 error = G_PART_BOOTCODE(table, gpp);
790 /* Provide feedback if so requested. */
791 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
792 sb = sbuf_new_auto();
793 sbuf_printf(sb, "bootcode written to %s\n", gp->name);
795 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
801 gctl_error(req, "%d", error);
806 g_part_ctl_commit(struct gctl_req *req, struct g_part_parms *gpp)
808 struct g_consumer *cp;
810 struct g_provider *pp;
811 struct g_part_entry *entry, *tmp;
812 struct g_part_table *table;
817 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
821 if (!table->gpt_opened) {
822 gctl_error(req, "%d", EPERM);
828 cp = LIST_FIRST(&gp->consumer);
829 if ((table->gpt_smhead | table->gpt_smtail) != 0) {
831 buf = g_malloc(pp->sectorsize, M_WAITOK | M_ZERO);
832 while (table->gpt_smhead != 0) {
833 i = ffs(table->gpt_smhead) - 1;
834 error = g_write_data(cp, i * pp->sectorsize, buf,
840 table->gpt_smhead &= ~(1 << i);
842 while (table->gpt_smtail != 0) {
843 i = ffs(table->gpt_smtail) - 1;
844 error = g_write_data(cp, pp->mediasize - (i + 1) *
845 pp->sectorsize, buf, pp->sectorsize);
850 table->gpt_smtail &= ~(1 << i);
855 if (table->gpt_scheme == &g_part_null_scheme) {
857 g_access(cp, -1, -1, -1);
858 g_part_wither(gp, ENXIO);
862 error = G_PART_WRITE(table, cp);
866 LIST_FOREACH_SAFE(entry, &table->gpt_entry, gpe_entry, tmp) {
867 if (!entry->gpe_deleted) {
868 entry->gpe_created = 0;
869 entry->gpe_modified = 0;
872 LIST_REMOVE(entry, gpe_entry);
875 table->gpt_created = 0;
876 table->gpt_opened = 0;
879 g_access(cp, -1, -1, -1);
884 gctl_error(req, "%d", error);
889 g_part_ctl_create(struct gctl_req *req, struct g_part_parms *gpp)
891 struct g_consumer *cp;
893 struct g_provider *pp;
894 struct g_part_scheme *scheme;
895 struct g_part_table *null, *table;
899 pp = gpp->gpp_provider;
900 scheme = gpp->gpp_scheme;
901 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, pp->name));
904 /* Check that there isn't already a g_part geom on the provider. */
905 gp = g_part_find_geom(pp->name);
908 if (null->gpt_scheme != &g_part_null_scheme) {
909 gctl_error(req, "%d geom '%s'", EEXIST, pp->name);
915 if ((gpp->gpp_parms & G_PART_PARM_ENTRIES) &&
916 (gpp->gpp_entries < scheme->gps_minent ||
917 gpp->gpp_entries > scheme->gps_maxent)) {
918 gctl_error(req, "%d entries '%d'", EINVAL, gpp->gpp_entries);
923 gp = g_new_geomf(&g_part_class, "%s", pp->name);
924 gp->softc = kobj_create((kobj_class_t)gpp->gpp_scheme, M_GEOM,
928 table->gpt_scheme = gpp->gpp_scheme;
929 table->gpt_entries = (gpp->gpp_parms & G_PART_PARM_ENTRIES) ?
930 gpp->gpp_entries : scheme->gps_minent;
931 LIST_INIT(&table->gpt_entry);
933 cp = g_new_consumer(gp);
934 error = g_attach(cp, pp);
936 error = g_access(cp, 1, 1, 1);
938 g_part_wither(gp, error);
939 gctl_error(req, "%d geom '%s'", error, pp->name);
942 table->gpt_opened = 1;
944 cp = LIST_FIRST(&gp->consumer);
945 table->gpt_opened = null->gpt_opened;
946 table->gpt_smhead = null->gpt_smhead;
947 table->gpt_smtail = null->gpt_smtail;
952 /* Make sure the provider has media. */
953 if (pp->mediasize == 0 || pp->sectorsize == 0) {
958 /* Make sure we can nest and if so, determine our depth. */
959 error = g_getattr("PART::isleaf", cp, &attr);
960 if (!error && attr) {
964 error = g_getattr("PART::depth", cp, &attr);
965 table->gpt_depth = (!error) ? attr + 1 : 0;
968 * Synthesize a disk geometry. Some partitioning schemes
969 * depend on it and since some file systems need it even
970 * when the partitition scheme doesn't, we do it here in
971 * scheme-independent code.
973 g_part_geometry(table, cp, pp->mediasize / pp->sectorsize);
975 error = G_PART_CREATE(table, gpp);
981 table->gpt_created = 1;
983 kobj_delete((kobj_t)null, M_GEOM);
986 * Support automatic commit by filling in the gpp_geom
989 gpp->gpp_parms |= G_PART_PARM_GEOM;
992 /* Provide feedback if so requested. */
993 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
994 sb = sbuf_new_auto();
995 sbuf_printf(sb, "%s created\n", gp->name);
997 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
1005 g_access(cp, -1, -1, -1);
1006 g_part_wither(gp, error);
1008 kobj_delete((kobj_t)gp->softc, M_GEOM);
1011 gctl_error(req, "%d provider", error);
1016 g_part_ctl_delete(struct gctl_req *req, struct g_part_parms *gpp)
1019 struct g_provider *pp;
1020 struct g_part_entry *entry;
1021 struct g_part_table *table;
1025 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
1026 g_topology_assert();
1030 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
1031 if (entry->gpe_deleted || entry->gpe_internal)
1033 if (entry->gpe_index == gpp->gpp_index)
1036 if (entry == NULL) {
1037 gctl_error(req, "%d index '%d'", ENOENT, gpp->gpp_index);
1043 if (pp->acr > 0 || pp->acw > 0 || pp->ace > 0) {
1044 gctl_error(req, "%d", EBUSY);
1049 entry->gpe_pp = NULL;
1053 g_wither_provider(pp, ENXIO);
1055 /* Provide feedback if so requested. */
1056 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
1057 sb = sbuf_new_auto();
1058 G_PART_FULLNAME(table, entry, sb, gp->name);
1059 sbuf_cat(sb, " deleted\n");
1061 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
1065 if (entry->gpe_created) {
1066 LIST_REMOVE(entry, gpe_entry);
1069 entry->gpe_modified = 0;
1070 entry->gpe_deleted = 1;
1076 g_part_ctl_destroy(struct gctl_req *req, struct g_part_parms *gpp)
1078 struct g_consumer *cp;
1080 struct g_provider *pp;
1081 struct g_part_entry *entry, *tmp;
1082 struct g_part_table *null, *table;
1087 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
1088 g_topology_assert();
1091 /* Check for busy providers. */
1092 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
1093 if (entry->gpe_deleted || entry->gpe_internal)
1095 if (gpp->gpp_force) {
1099 if (pp->acr == 0 && pp->acw == 0 && pp->ace == 0)
1102 gctl_error(req, "%d", EBUSY);
1106 if (gpp->gpp_force) {
1107 /* Destroy all providers. */
1108 LIST_FOREACH_SAFE(entry, &table->gpt_entry, gpe_entry, tmp) {
1112 g_wither_provider(pp, ENXIO);
1114 LIST_REMOVE(entry, gpe_entry);
1119 error = G_PART_DESTROY(table, gpp);
1121 gctl_error(req, "%d", error);
1125 gp->softc = kobj_create((kobj_class_t)&g_part_null_scheme, M_GEOM,
1129 null->gpt_scheme = &g_part_null_scheme;
1130 LIST_INIT(&null->gpt_entry);
1132 cp = LIST_FIRST(&gp->consumer);
1134 null->gpt_last = pp->mediasize / pp->sectorsize - 1;
1136 null->gpt_depth = table->gpt_depth;
1137 null->gpt_opened = table->gpt_opened;
1138 null->gpt_smhead = table->gpt_smhead;
1139 null->gpt_smtail = table->gpt_smtail;
1141 while ((entry = LIST_FIRST(&table->gpt_entry)) != NULL) {
1142 LIST_REMOVE(entry, gpe_entry);
1145 kobj_delete((kobj_t)table, M_GEOM);
1147 /* Provide feedback if so requested. */
1148 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
1149 sb = sbuf_new_auto();
1150 sbuf_printf(sb, "%s destroyed\n", gp->name);
1152 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
1159 g_part_ctl_modify(struct gctl_req *req, struct g_part_parms *gpp)
1162 struct g_part_entry *entry;
1163 struct g_part_table *table;
1168 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
1169 g_topology_assert();
1173 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
1174 if (entry->gpe_deleted || entry->gpe_internal)
1176 if (entry->gpe_index == gpp->gpp_index)
1179 if (entry == NULL) {
1180 gctl_error(req, "%d index '%d'", ENOENT, gpp->gpp_index);
1184 error = G_PART_MODIFY(table, entry, gpp);
1186 gctl_error(req, "%d", error);
1190 if (!entry->gpe_created)
1191 entry->gpe_modified = 1;
1193 /* Provide feedback if so requested. */
1194 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
1195 sb = sbuf_new_auto();
1196 G_PART_FULLNAME(table, entry, sb, gp->name);
1197 sbuf_cat(sb, " modified\n");
1199 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
1206 g_part_ctl_move(struct gctl_req *req, struct g_part_parms *gpp)
1208 gctl_error(req, "%d verb 'move'", ENOSYS);
1213 g_part_ctl_recover(struct gctl_req *req, struct g_part_parms *gpp)
1215 struct g_part_table *table;
1218 int error, recovered;
1221 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
1222 g_topology_assert();
1224 error = recovered = 0;
1226 if (table->gpt_corrupt) {
1227 error = G_PART_RECOVER(table);
1229 error = g_part_check_integrity(table,
1230 LIST_FIRST(&gp->consumer));
1232 gctl_error(req, "%d recovering '%s' failed",
1238 /* Provide feedback if so requested. */
1239 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
1240 sb = sbuf_new_auto();
1242 sbuf_printf(sb, "%s recovered\n", gp->name);
1244 sbuf_printf(sb, "%s recovering is not needed\n",
1247 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
1254 g_part_ctl_resize(struct gctl_req *req, struct g_part_parms *gpp)
1257 struct g_provider *pp;
1258 struct g_part_entry *pe, *entry;
1259 struct g_part_table *table;
1265 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
1266 g_topology_assert();
1269 /* check gpp_index */
1270 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
1271 if (entry->gpe_deleted || entry->gpe_internal)
1273 if (entry->gpe_index == gpp->gpp_index)
1276 if (entry == NULL) {
1277 gctl_error(req, "%d index '%d'", ENOENT, gpp->gpp_index);
1281 /* check gpp_size */
1282 end = entry->gpe_start + gpp->gpp_size - 1;
1283 if (gpp->gpp_size < 1 || end > table->gpt_last) {
1284 gctl_error(req, "%d size '%jd'", EINVAL,
1285 (intmax_t)gpp->gpp_size);
1289 LIST_FOREACH(pe, &table->gpt_entry, gpe_entry) {
1290 if (pe->gpe_deleted || pe->gpe_internal || pe == entry)
1292 if (end >= pe->gpe_start && end <= pe->gpe_end) {
1293 gctl_error(req, "%d end '%jd'", ENOSPC,
1297 if (entry->gpe_start < pe->gpe_start && end > pe->gpe_end) {
1298 gctl_error(req, "%d size '%jd'", ENOSPC,
1299 (intmax_t)gpp->gpp_size);
1305 if ((g_debugflags & 16) == 0 &&
1306 (pp->acr > 0 || pp->acw > 0 || pp->ace > 0)) {
1307 gctl_error(req, "%d", EBUSY);
1311 error = G_PART_RESIZE(table, entry, gpp);
1313 gctl_error(req, "%d", error);
1317 if (!entry->gpe_created)
1318 entry->gpe_modified = 1;
1320 /* update mediasize of changed provider */
1321 pp->mediasize = (entry->gpe_end - entry->gpe_start + 1) *
1324 /* Provide feedback if so requested. */
1325 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
1326 sb = sbuf_new_auto();
1327 G_PART_FULLNAME(table, entry, sb, gp->name);
1328 sbuf_cat(sb, " resized\n");
1330 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
1337 g_part_ctl_setunset(struct gctl_req *req, struct g_part_parms *gpp,
1341 struct g_part_entry *entry;
1342 struct g_part_table *table;
1347 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
1348 g_topology_assert();
1352 if (gpp->gpp_parms & G_PART_PARM_INDEX) {
1353 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
1354 if (entry->gpe_deleted || entry->gpe_internal)
1356 if (entry->gpe_index == gpp->gpp_index)
1359 if (entry == NULL) {
1360 gctl_error(req, "%d index '%d'", ENOENT,
1367 error = G_PART_SETUNSET(table, entry, gpp->gpp_attrib, set);
1369 gctl_error(req, "%d attrib '%s'", error, gpp->gpp_attrib);
1373 /* Provide feedback if so requested. */
1374 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
1375 sb = sbuf_new_auto();
1376 sbuf_printf(sb, "%s %sset on ", gpp->gpp_attrib,
1379 G_PART_FULLNAME(table, entry, sb, gp->name);
1381 sbuf_cat(sb, gp->name);
1384 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
1391 g_part_ctl_undo(struct gctl_req *req, struct g_part_parms *gpp)
1393 struct g_consumer *cp;
1394 struct g_provider *pp;
1396 struct g_part_entry *entry, *tmp;
1397 struct g_part_table *table;
1401 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
1402 g_topology_assert();
1405 if (!table->gpt_opened) {
1406 gctl_error(req, "%d", EPERM);
1410 cp = LIST_FIRST(&gp->consumer);
1411 LIST_FOREACH_SAFE(entry, &table->gpt_entry, gpe_entry, tmp) {
1412 entry->gpe_modified = 0;
1413 if (entry->gpe_created) {
1417 entry->gpe_pp = NULL;
1418 g_wither_provider(pp, ENXIO);
1420 entry->gpe_deleted = 1;
1422 if (entry->gpe_deleted) {
1423 LIST_REMOVE(entry, gpe_entry);
1428 g_topology_unlock();
1430 reprobe = (table->gpt_scheme == &g_part_null_scheme ||
1431 table->gpt_created) ? 1 : 0;
1434 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
1435 if (entry->gpe_internal)
1440 while ((entry = LIST_FIRST(&table->gpt_entry)) != NULL) {
1441 LIST_REMOVE(entry, gpe_entry);
1444 error = g_part_probe(gp, cp, table->gpt_depth);
1447 g_access(cp, -1, -1, -1);
1448 g_part_wither(gp, error);
1454 * Synthesize a disk geometry. Some partitioning schemes
1455 * depend on it and since some file systems need it even
1456 * when the partitition scheme doesn't, we do it here in
1457 * scheme-independent code.
1460 g_part_geometry(table, cp, pp->mediasize / pp->sectorsize);
1463 error = G_PART_READ(table, cp);
1466 error = g_part_check_integrity(table, cp);
1471 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
1472 if (!entry->gpe_internal)
1473 g_part_new_provider(gp, table, entry);
1476 table->gpt_opened = 0;
1477 g_access(cp, -1, -1, -1);
1482 gctl_error(req, "%d", error);
1487 g_part_wither(struct g_geom *gp, int error)
1489 struct g_part_entry *entry;
1490 struct g_part_table *table;
1493 if (table != NULL) {
1494 G_PART_DESTROY(table, NULL);
1495 while ((entry = LIST_FIRST(&table->gpt_entry)) != NULL) {
1496 LIST_REMOVE(entry, gpe_entry);
1499 if (gp->softc != NULL) {
1500 kobj_delete((kobj_t)gp->softc, M_GEOM);
1504 g_wither_geom(gp, error);
1512 g_part_ctlreq(struct gctl_req *req, struct g_class *mp, const char *verb)
1514 struct g_part_parms gpp;
1515 struct g_part_table *table;
1516 struct gctl_req_arg *ap;
1517 enum g_part_ctl ctlreq;
1518 unsigned int i, mparms, oparms, parm;
1519 int auto_commit, close_on_error;
1520 int error, modifies;
1522 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, verb));
1523 g_topology_assert();
1525 ctlreq = G_PART_CTL_NONE;
1528 oparms = G_PART_PARM_FLAGS | G_PART_PARM_OUTPUT | G_PART_PARM_VERSION;
1531 if (!strcmp(verb, "add")) {
1532 ctlreq = G_PART_CTL_ADD;
1533 mparms |= G_PART_PARM_GEOM | G_PART_PARM_SIZE |
1534 G_PART_PARM_START | G_PART_PARM_TYPE;
1535 oparms |= G_PART_PARM_INDEX | G_PART_PARM_LABEL;
1539 if (!strcmp(verb, "bootcode")) {
1540 ctlreq = G_PART_CTL_BOOTCODE;
1541 mparms |= G_PART_PARM_GEOM | G_PART_PARM_BOOTCODE;
1545 if (!strcmp(verb, "commit")) {
1546 ctlreq = G_PART_CTL_COMMIT;
1547 mparms |= G_PART_PARM_GEOM;
1549 } else if (!strcmp(verb, "create")) {
1550 ctlreq = G_PART_CTL_CREATE;
1551 mparms |= G_PART_PARM_PROVIDER | G_PART_PARM_SCHEME;
1552 oparms |= G_PART_PARM_ENTRIES;
1556 if (!strcmp(verb, "delete")) {
1557 ctlreq = G_PART_CTL_DELETE;
1558 mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX;
1559 } else if (!strcmp(verb, "destroy")) {
1560 ctlreq = G_PART_CTL_DESTROY;
1561 mparms |= G_PART_PARM_GEOM;
1562 oparms |= G_PART_PARM_FORCE;
1566 if (!strcmp(verb, "modify")) {
1567 ctlreq = G_PART_CTL_MODIFY;
1568 mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX;
1569 oparms |= G_PART_PARM_LABEL | G_PART_PARM_TYPE;
1570 } else if (!strcmp(verb, "move")) {
1571 ctlreq = G_PART_CTL_MOVE;
1572 mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX;
1576 if (!strcmp(verb, "recover")) {
1577 ctlreq = G_PART_CTL_RECOVER;
1578 mparms |= G_PART_PARM_GEOM;
1579 } else if (!strcmp(verb, "resize")) {
1580 ctlreq = G_PART_CTL_RESIZE;
1581 mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX |
1586 if (!strcmp(verb, "set")) {
1587 ctlreq = G_PART_CTL_SET;
1588 mparms |= G_PART_PARM_ATTRIB | G_PART_PARM_GEOM;
1589 oparms |= G_PART_PARM_INDEX;
1593 if (!strcmp(verb, "undo")) {
1594 ctlreq = G_PART_CTL_UNDO;
1595 mparms |= G_PART_PARM_GEOM;
1597 } else if (!strcmp(verb, "unset")) {
1598 ctlreq = G_PART_CTL_UNSET;
1599 mparms |= G_PART_PARM_ATTRIB | G_PART_PARM_GEOM;
1600 oparms |= G_PART_PARM_INDEX;
1604 if (ctlreq == G_PART_CTL_NONE) {
1605 gctl_error(req, "%d verb '%s'", EINVAL, verb);
1609 bzero(&gpp, sizeof(gpp));
1610 for (i = 0; i < req->narg; i++) {
1613 switch (ap->name[0]) {
1615 if (!strcmp(ap->name, "arg0")) {
1617 (G_PART_PARM_GEOM | G_PART_PARM_PROVIDER);
1619 if (!strcmp(ap->name, "attrib"))
1620 parm = G_PART_PARM_ATTRIB;
1623 if (!strcmp(ap->name, "bootcode"))
1624 parm = G_PART_PARM_BOOTCODE;
1627 if (!strcmp(ap->name, "class"))
1631 if (!strcmp(ap->name, "entries"))
1632 parm = G_PART_PARM_ENTRIES;
1635 if (!strcmp(ap->name, "flags"))
1636 parm = G_PART_PARM_FLAGS;
1637 else if (!strcmp(ap->name, "force"))
1638 parm = G_PART_PARM_FORCE;
1641 if (!strcmp(ap->name, "index"))
1642 parm = G_PART_PARM_INDEX;
1645 if (!strcmp(ap->name, "label"))
1646 parm = G_PART_PARM_LABEL;
1649 if (!strcmp(ap->name, "output"))
1650 parm = G_PART_PARM_OUTPUT;
1653 if (!strcmp(ap->name, "scheme"))
1654 parm = G_PART_PARM_SCHEME;
1655 else if (!strcmp(ap->name, "size"))
1656 parm = G_PART_PARM_SIZE;
1657 else if (!strcmp(ap->name, "start"))
1658 parm = G_PART_PARM_START;
1661 if (!strcmp(ap->name, "type"))
1662 parm = G_PART_PARM_TYPE;
1665 if (!strcmp(ap->name, "verb"))
1667 else if (!strcmp(ap->name, "version"))
1668 parm = G_PART_PARM_VERSION;
1671 if ((parm & (mparms | oparms)) == 0) {
1672 gctl_error(req, "%d param '%s'", EINVAL, ap->name);
1676 case G_PART_PARM_ATTRIB:
1677 error = g_part_parm_str(req, ap->name,
1680 case G_PART_PARM_BOOTCODE:
1681 error = g_part_parm_bootcode(req, ap->name,
1682 &gpp.gpp_codeptr, &gpp.gpp_codesize);
1684 case G_PART_PARM_ENTRIES:
1685 error = g_part_parm_intmax(req, ap->name,
1688 case G_PART_PARM_FLAGS:
1689 error = g_part_parm_str(req, ap->name, &gpp.gpp_flags);
1691 case G_PART_PARM_FORCE:
1692 error = g_part_parm_uint32(req, ap->name,
1695 case G_PART_PARM_GEOM:
1696 error = g_part_parm_geom(req, ap->name, &gpp.gpp_geom);
1698 case G_PART_PARM_INDEX:
1699 error = g_part_parm_intmax(req, ap->name,
1702 case G_PART_PARM_LABEL:
1703 error = g_part_parm_str(req, ap->name, &gpp.gpp_label);
1705 case G_PART_PARM_OUTPUT:
1706 error = 0; /* Write-only parameter */
1708 case G_PART_PARM_PROVIDER:
1709 error = g_part_parm_provider(req, ap->name,
1712 case G_PART_PARM_SCHEME:
1713 error = g_part_parm_scheme(req, ap->name,
1716 case G_PART_PARM_SIZE:
1717 error = g_part_parm_quad(req, ap->name, &gpp.gpp_size);
1719 case G_PART_PARM_START:
1720 error = g_part_parm_quad(req, ap->name,
1723 case G_PART_PARM_TYPE:
1724 error = g_part_parm_str(req, ap->name, &gpp.gpp_type);
1726 case G_PART_PARM_VERSION:
1727 error = g_part_parm_uint32(req, ap->name,
1732 gctl_error(req, "%d %s", error, ap->name);
1736 if (error == ENOATTR) {
1737 gctl_error(req, "%d param '%s'", error,
1742 gpp.gpp_parms |= parm;
1744 if ((gpp.gpp_parms & mparms) != mparms) {
1745 parm = mparms - (gpp.gpp_parms & mparms);
1746 gctl_error(req, "%d param '%x'", ENOATTR, parm);
1750 /* Obtain permissions if possible/necessary. */
1753 if (modifies && (gpp.gpp_parms & G_PART_PARM_GEOM)) {
1754 table = gpp.gpp_geom->softc;
1755 if (table != NULL && table->gpt_corrupt &&
1756 ctlreq != G_PART_CTL_DESTROY &&
1757 ctlreq != G_PART_CTL_RECOVER) {
1758 gctl_error(req, "%d table '%s' is corrupt",
1759 EPERM, gpp.gpp_geom->name);
1762 if (table != NULL && !table->gpt_opened) {
1763 error = g_access(LIST_FIRST(&gpp.gpp_geom->consumer),
1766 gctl_error(req, "%d geom '%s'", error,
1767 gpp.gpp_geom->name);
1770 table->gpt_opened = 1;
1775 /* Allow the scheme to check or modify the parameters. */
1776 if (table != NULL) {
1777 error = G_PART_PRECHECK(table, ctlreq, &gpp);
1779 gctl_error(req, "%d pre-check failed", error);
1783 error = EDOOFUS; /* Prevent bogus uninit. warning. */
1786 case G_PART_CTL_NONE:
1787 panic("%s", __func__);
1788 case G_PART_CTL_ADD:
1789 error = g_part_ctl_add(req, &gpp);
1791 case G_PART_CTL_BOOTCODE:
1792 error = g_part_ctl_bootcode(req, &gpp);
1794 case G_PART_CTL_COMMIT:
1795 error = g_part_ctl_commit(req, &gpp);
1797 case G_PART_CTL_CREATE:
1798 error = g_part_ctl_create(req, &gpp);
1800 case G_PART_CTL_DELETE:
1801 error = g_part_ctl_delete(req, &gpp);
1803 case G_PART_CTL_DESTROY:
1804 error = g_part_ctl_destroy(req, &gpp);
1806 case G_PART_CTL_MODIFY:
1807 error = g_part_ctl_modify(req, &gpp);
1809 case G_PART_CTL_MOVE:
1810 error = g_part_ctl_move(req, &gpp);
1812 case G_PART_CTL_RECOVER:
1813 error = g_part_ctl_recover(req, &gpp);
1815 case G_PART_CTL_RESIZE:
1816 error = g_part_ctl_resize(req, &gpp);
1818 case G_PART_CTL_SET:
1819 error = g_part_ctl_setunset(req, &gpp, 1);
1821 case G_PART_CTL_UNDO:
1822 error = g_part_ctl_undo(req, &gpp);
1824 case G_PART_CTL_UNSET:
1825 error = g_part_ctl_setunset(req, &gpp, 0);
1829 /* Implement automatic commit. */
1831 auto_commit = (modifies &&
1832 (gpp.gpp_parms & G_PART_PARM_FLAGS) &&
1833 strchr(gpp.gpp_flags, 'C') != NULL) ? 1 : 0;
1835 KASSERT(gpp.gpp_parms & G_PART_PARM_GEOM, ("%s",
1837 error = g_part_ctl_commit(req, &gpp);
1842 if (error && close_on_error) {
1843 g_access(LIST_FIRST(&gpp.gpp_geom->consumer), -1, -1, -1);
1844 table->gpt_opened = 0;
1849 g_part_destroy_geom(struct gctl_req *req, struct g_class *mp,
1853 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, gp->name));
1854 g_topology_assert();
1856 g_part_wither(gp, EINVAL);
1860 static struct g_geom *
1861 g_part_taste(struct g_class *mp, struct g_provider *pp, int flags __unused)
1863 struct g_consumer *cp;
1865 struct g_part_entry *entry;
1866 struct g_part_table *table;
1867 struct root_hold_token *rht;
1871 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, pp->name));
1872 g_topology_assert();
1874 /* Skip providers that are already open for writing. */
1879 * Create a GEOM with consumer and hook it up to the provider.
1880 * With that we become part of the topology. Optain read access
1883 gp = g_new_geomf(mp, "%s", pp->name);
1884 cp = g_new_consumer(gp);
1885 error = g_attach(cp, pp);
1887 error = g_access(cp, 1, 0, 0);
1891 g_destroy_consumer(cp);
1896 rht = root_mount_hold(mp->name);
1897 g_topology_unlock();
1900 * Short-circuit the whole probing galore when there's no
1903 if (pp->mediasize == 0 || pp->sectorsize == 0) {
1908 /* Make sure we can nest and if so, determine our depth. */
1909 error = g_getattr("PART::isleaf", cp, &attr);
1910 if (!error && attr) {
1914 error = g_getattr("PART::depth", cp, &attr);
1915 depth = (!error) ? attr + 1 : 0;
1917 error = g_part_probe(gp, cp, depth);
1924 * Synthesize a disk geometry. Some partitioning schemes
1925 * depend on it and since some file systems need it even
1926 * when the partitition scheme doesn't, we do it here in
1927 * scheme-independent code.
1929 g_part_geometry(table, cp, pp->mediasize / pp->sectorsize);
1931 error = G_PART_READ(table, cp);
1934 error = g_part_check_integrity(table, cp);
1939 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
1940 if (!entry->gpe_internal)
1941 g_part_new_provider(gp, table, entry);
1944 root_mount_rel(rht);
1945 g_access(cp, -1, 0, 0);
1950 root_mount_rel(rht);
1951 g_access(cp, -1, 0, 0);
1953 g_destroy_consumer(cp);
1963 g_part_access(struct g_provider *pp, int dr, int dw, int de)
1965 struct g_consumer *cp;
1967 G_PART_TRACE((G_T_ACCESS, "%s(%s,%d,%d,%d)", __func__, pp->name, dr,
1970 cp = LIST_FIRST(&pp->geom->consumer);
1972 /* We always gain write-exclusive access. */
1973 return (g_access(cp, dr, dw, dw + de));
1977 g_part_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp,
1978 struct g_consumer *cp, struct g_provider *pp)
1981 struct g_part_entry *entry;
1982 struct g_part_table *table;
1984 KASSERT(sb != NULL && gp != NULL, ("%s", __func__));
1987 if (indent == NULL) {
1988 KASSERT(cp == NULL && pp != NULL, ("%s", __func__));
1989 entry = pp->private;
1992 sbuf_printf(sb, " i %u o %ju ty %s", entry->gpe_index,
1993 (uintmax_t)entry->gpe_offset,
1994 G_PART_TYPE(table, entry, buf, sizeof(buf)));
1996 * libdisk compatibility quirk - the scheme dumps the
1997 * slicer name and partition type in a way that is
1998 * compatible with libdisk. When libdisk is not used
1999 * anymore, this should go away.
2001 G_PART_DUMPCONF(table, entry, sb, indent);
2002 } else if (cp != NULL) { /* Consumer configuration. */
2003 KASSERT(pp == NULL, ("%s", __func__));
2005 } else if (pp != NULL) { /* Provider configuration. */
2006 entry = pp->private;
2009 sbuf_printf(sb, "%s<start>%ju</start>\n", indent,
2010 (uintmax_t)entry->gpe_start);
2011 sbuf_printf(sb, "%s<end>%ju</end>\n", indent,
2012 (uintmax_t)entry->gpe_end);
2013 sbuf_printf(sb, "%s<index>%u</index>\n", indent,
2015 sbuf_printf(sb, "%s<type>%s</type>\n", indent,
2016 G_PART_TYPE(table, entry, buf, sizeof(buf)));
2017 sbuf_printf(sb, "%s<offset>%ju</offset>\n", indent,
2018 (uintmax_t)entry->gpe_offset);
2019 sbuf_printf(sb, "%s<length>%ju</length>\n", indent,
2020 (uintmax_t)pp->mediasize);
2021 G_PART_DUMPCONF(table, entry, sb, indent);
2022 } else { /* Geom configuration. */
2023 sbuf_printf(sb, "%s<scheme>%s</scheme>\n", indent,
2024 table->gpt_scheme->name);
2025 sbuf_printf(sb, "%s<entries>%u</entries>\n", indent,
2026 table->gpt_entries);
2027 sbuf_printf(sb, "%s<first>%ju</first>\n", indent,
2028 (uintmax_t)table->gpt_first);
2029 sbuf_printf(sb, "%s<last>%ju</last>\n", indent,
2030 (uintmax_t)table->gpt_last);
2031 sbuf_printf(sb, "%s<fwsectors>%u</fwsectors>\n", indent,
2032 table->gpt_sectors);
2033 sbuf_printf(sb, "%s<fwheads>%u</fwheads>\n", indent,
2035 sbuf_printf(sb, "%s<state>%s</state>\n", indent,
2036 table->gpt_corrupt ? "CORRUPT": "OK");
2037 sbuf_printf(sb, "%s<modified>%s</modified>\n", indent,
2038 table->gpt_opened ? "true": "false");
2039 G_PART_DUMPCONF(table, NULL, sb, indent);
2044 g_part_orphan(struct g_consumer *cp)
2046 struct g_provider *pp;
2047 struct g_part_table *table;
2050 KASSERT(pp != NULL, ("%s", __func__));
2051 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, pp->name));
2052 g_topology_assert();
2054 KASSERT(pp->error != 0, ("%s", __func__));
2055 table = cp->geom->softc;
2056 if (table != NULL && table->gpt_opened)
2057 g_access(cp, -1, -1, -1);
2058 g_part_wither(cp->geom, pp->error);
2062 g_part_spoiled(struct g_consumer *cp)
2065 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, cp->provider->name));
2066 g_topology_assert();
2068 cp->flags |= G_CF_ORPHAN;
2069 g_part_wither(cp->geom, ENXIO);
2073 g_part_start(struct bio *bp)
2076 struct g_consumer *cp;
2078 struct g_part_entry *entry;
2079 struct g_part_table *table;
2080 struct g_kerneldump *gkd;
2081 struct g_provider *pp;
2087 cp = LIST_FIRST(&gp->consumer);
2089 G_PART_TRACE((G_T_BIO, "%s: cmd=%d, provider=%s", __func__, bp->bio_cmd,
2092 entry = pp->private;
2093 if (entry == NULL) {
2094 g_io_deliver(bp, ENXIO);
2098 switch(bp->bio_cmd) {
2102 if (bp->bio_offset >= pp->mediasize) {
2103 g_io_deliver(bp, EIO);
2106 bp2 = g_clone_bio(bp);
2108 g_io_deliver(bp, ENOMEM);
2111 if (bp2->bio_offset + bp2->bio_length > pp->mediasize)
2112 bp2->bio_length = pp->mediasize - bp2->bio_offset;
2113 bp2->bio_done = g_std_done;
2114 bp2->bio_offset += entry->gpe_offset;
2115 g_io_request(bp2, cp);
2120 if (g_handleattr_int(bp, "GEOM::fwheads", table->gpt_heads))
2122 if (g_handleattr_int(bp, "GEOM::fwsectors", table->gpt_sectors))
2124 if (g_handleattr_int(bp, "PART::isleaf", table->gpt_isleaf))
2126 if (g_handleattr_int(bp, "PART::depth", table->gpt_depth))
2128 if (g_handleattr_str(bp, "PART::scheme",
2129 table->gpt_scheme->name))
2131 if (g_handleattr_str(bp, "PART::type",
2132 G_PART_TYPE(table, entry, buf, sizeof(buf))))
2134 if (!strcmp("GEOM::kerneldump", bp->bio_attribute)) {
2136 * Check that the partition is suitable for kernel
2137 * dumps. Typically only swap partitions should be
2138 * used. If the request comes from the nested scheme
2139 * we allow dumping there as well.
2141 if ((bp->bio_from == NULL ||
2142 bp->bio_from->geom->class != &g_part_class) &&
2143 G_PART_DUMPTO(table, entry) == 0) {
2144 g_io_deliver(bp, ENODEV);
2145 printf("GEOM_PART: Partition '%s' not suitable"
2146 " for kernel dumps (wrong type?)\n",
2150 gkd = (struct g_kerneldump *)bp->bio_data;
2151 if (gkd->offset >= pp->mediasize) {
2152 g_io_deliver(bp, EIO);
2155 if (gkd->offset + gkd->length > pp->mediasize)
2156 gkd->length = pp->mediasize - gkd->offset;
2157 gkd->offset += entry->gpe_offset;
2161 g_io_deliver(bp, EOPNOTSUPP);
2165 bp2 = g_clone_bio(bp);
2167 g_io_deliver(bp, ENOMEM);
2170 bp2->bio_done = g_std_done;
2171 g_io_request(bp2, cp);
2175 g_part_init(struct g_class *mp)
2178 TAILQ_INSERT_HEAD(&g_part_schemes, &g_part_null_scheme, scheme_list);
2182 g_part_fini(struct g_class *mp)
2185 TAILQ_REMOVE(&g_part_schemes, &g_part_null_scheme, scheme_list);
2189 g_part_unload_event(void *arg, int flag)
2191 struct g_consumer *cp;
2193 struct g_provider *pp;
2194 struct g_part_scheme *scheme;
2195 struct g_part_table *table;
2199 if (flag == EV_CANCEL)
2204 scheme = (void *)(*xchg);
2206 g_topology_assert();
2208 LIST_FOREACH(gp, &g_part_class.geom, geom) {
2210 if (table->gpt_scheme != scheme)
2214 LIST_FOREACH(pp, &gp->provider, provider)
2215 acc += pp->acr + pp->acw + pp->ace;
2216 LIST_FOREACH(cp, &gp->consumer, consumer)
2217 acc += cp->acr + cp->acw + cp->ace;
2220 g_part_wither(gp, ENOSYS);
2226 TAILQ_REMOVE(&g_part_schemes, scheme, scheme_list);
2232 g_part_modevent(module_t mod, int type, struct g_part_scheme *scheme)
2234 struct g_part_scheme *iter;
2241 TAILQ_FOREACH(iter, &g_part_schemes, scheme_list) {
2242 if (scheme == iter) {
2243 printf("GEOM_PART: scheme %s is already "
2244 "registered!\n", scheme->name);
2249 TAILQ_INSERT_TAIL(&g_part_schemes, scheme,
2251 g_retaste(&g_part_class);
2255 arg = (uintptr_t)scheme;
2256 error = g_waitfor_event(g_part_unload_event, &arg, M_WAITOK,