2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2002, 2005-2009 Marcel Moolenaar
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
32 #include <sys/param.h>
34 #include <sys/endian.h>
35 #include <sys/kernel.h>
37 #include <sys/limits.h>
39 #include <sys/malloc.h>
40 #include <sys/mutex.h>
41 #include <sys/queue.h>
43 #include <sys/sysctl.h>
44 #include <sys/systm.h>
46 #include <geom/geom.h>
47 #include <geom/geom_ctl.h>
48 #include <geom/geom_int.h>
49 #include <geom/part/g_part.h>
51 #include "g_part_if.h"
54 #define _PATH_DEV "/dev/"
57 static kobj_method_t g_part_null_methods[] = {
61 static struct g_part_scheme g_part_null_scheme = {
64 sizeof(struct g_part_table),
67 TAILQ_HEAD(, g_part_scheme) g_part_schemes =
68 TAILQ_HEAD_INITIALIZER(g_part_schemes);
70 struct g_part_alias_list {
72 enum g_part_alias alias;
73 } g_part_alias_list[G_PART_ALIAS_COUNT] = {
74 { "apple-boot", G_PART_ALIAS_APPLE_BOOT },
75 { "apple-core-storage", G_PART_ALIAS_APPLE_CORE_STORAGE },
76 { "apple-hfs", G_PART_ALIAS_APPLE_HFS },
77 { "apple-label", G_PART_ALIAS_APPLE_LABEL },
78 { "apple-raid", G_PART_ALIAS_APPLE_RAID },
79 { "apple-raid-offline", G_PART_ALIAS_APPLE_RAID_OFFLINE },
80 { "apple-tv-recovery", G_PART_ALIAS_APPLE_TV_RECOVERY },
81 { "apple-ufs", G_PART_ALIAS_APPLE_UFS },
82 { "bios-boot", G_PART_ALIAS_BIOS_BOOT },
83 { "chromeos-firmware", G_PART_ALIAS_CHROMEOS_FIRMWARE },
84 { "chromeos-kernel", G_PART_ALIAS_CHROMEOS_KERNEL },
85 { "chromeos-reserved", G_PART_ALIAS_CHROMEOS_RESERVED },
86 { "chromeos-root", G_PART_ALIAS_CHROMEOS_ROOT },
87 { "dragonfly-ccd", G_PART_ALIAS_DFBSD_CCD },
88 { "dragonfly-hammer", G_PART_ALIAS_DFBSD_HAMMER },
89 { "dragonfly-hammer2", G_PART_ALIAS_DFBSD_HAMMER2 },
90 { "dragonfly-label32", G_PART_ALIAS_DFBSD },
91 { "dragonfly-label64", G_PART_ALIAS_DFBSD64 },
92 { "dragonfly-legacy", G_PART_ALIAS_DFBSD_LEGACY },
93 { "dragonfly-swap", G_PART_ALIAS_DFBSD_SWAP },
94 { "dragonfly-ufs", G_PART_ALIAS_DFBSD_UFS },
95 { "dragonfly-vinum", G_PART_ALIAS_DFBSD_VINUM },
96 { "ebr", G_PART_ALIAS_EBR },
97 { "efi", G_PART_ALIAS_EFI },
98 { "fat16", G_PART_ALIAS_MS_FAT16 },
99 { "fat32", G_PART_ALIAS_MS_FAT32 },
100 { "freebsd", G_PART_ALIAS_FREEBSD },
101 { "freebsd-boot", G_PART_ALIAS_FREEBSD_BOOT },
102 { "freebsd-nandfs", G_PART_ALIAS_FREEBSD_NANDFS },
103 { "freebsd-swap", G_PART_ALIAS_FREEBSD_SWAP },
104 { "freebsd-ufs", G_PART_ALIAS_FREEBSD_UFS },
105 { "freebsd-vinum", G_PART_ALIAS_FREEBSD_VINUM },
106 { "freebsd-zfs", G_PART_ALIAS_FREEBSD_ZFS },
107 { "linux-data", G_PART_ALIAS_LINUX_DATA },
108 { "linux-lvm", G_PART_ALIAS_LINUX_LVM },
109 { "linux-raid", G_PART_ALIAS_LINUX_RAID },
110 { "linux-swap", G_PART_ALIAS_LINUX_SWAP },
111 { "mbr", G_PART_ALIAS_MBR },
112 { "ms-basic-data", G_PART_ALIAS_MS_BASIC_DATA },
113 { "ms-ldm-data", G_PART_ALIAS_MS_LDM_DATA },
114 { "ms-ldm-metadata", G_PART_ALIAS_MS_LDM_METADATA },
115 { "ms-recovery", G_PART_ALIAS_MS_RECOVERY },
116 { "ms-reserved", G_PART_ALIAS_MS_RESERVED },
117 { "ms-spaces", G_PART_ALIAS_MS_SPACES },
118 { "netbsd-ccd", G_PART_ALIAS_NETBSD_CCD },
119 { "netbsd-cgd", G_PART_ALIAS_NETBSD_CGD },
120 { "netbsd-ffs", G_PART_ALIAS_NETBSD_FFS },
121 { "netbsd-lfs", G_PART_ALIAS_NETBSD_LFS },
122 { "netbsd-raid", G_PART_ALIAS_NETBSD_RAID },
123 { "netbsd-swap", G_PART_ALIAS_NETBSD_SWAP },
124 { "ntfs", G_PART_ALIAS_MS_NTFS },
125 { "openbsd-data", G_PART_ALIAS_OPENBSD_DATA },
126 { "prep-boot", G_PART_ALIAS_PREP_BOOT },
127 { "vmware-reserved", G_PART_ALIAS_VMRESERVED },
128 { "vmware-vmfs", G_PART_ALIAS_VMFS },
129 { "vmware-vmkdiag", G_PART_ALIAS_VMKDIAG },
130 { "vmware-vsanhdr", G_PART_ALIAS_VMVSANHDR },
133 SYSCTL_DECL(_kern_geom);
134 SYSCTL_NODE(_kern_geom, OID_AUTO, part, CTLFLAG_RW, 0,
136 static u_int check_integrity = 1;
137 SYSCTL_UINT(_kern_geom_part, OID_AUTO, check_integrity,
138 CTLFLAG_RWTUN, &check_integrity, 1,
139 "Enable integrity checking");
140 static u_int auto_resize = 1;
141 SYSCTL_UINT(_kern_geom_part, OID_AUTO, auto_resize,
142 CTLFLAG_RWTUN, &auto_resize, 1,
143 "Enable auto resize");
146 * The GEOM partitioning class.
148 static g_ctl_req_t g_part_ctlreq;
149 static g_ctl_destroy_geom_t g_part_destroy_geom;
150 static g_fini_t g_part_fini;
151 static g_init_t g_part_init;
152 static g_taste_t g_part_taste;
154 static g_access_t g_part_access;
155 static g_dumpconf_t g_part_dumpconf;
156 static g_orphan_t g_part_orphan;
157 static g_spoiled_t g_part_spoiled;
158 static g_start_t g_part_start;
159 static g_resize_t g_part_resize;
160 static g_ioctl_t g_part_ioctl;
162 static struct g_class g_part_class = {
164 .version = G_VERSION,
166 .ctlreq = g_part_ctlreq,
167 .destroy_geom = g_part_destroy_geom,
170 .taste = g_part_taste,
172 .access = g_part_access,
173 .dumpconf = g_part_dumpconf,
174 .orphan = g_part_orphan,
175 .spoiled = g_part_spoiled,
176 .start = g_part_start,
177 .resize = g_part_resize,
178 .ioctl = g_part_ioctl,
181 DECLARE_GEOM_CLASS(g_part_class, g_part);
182 MODULE_VERSION(g_part, 0);
188 static void g_part_wither(struct g_geom *, int);
191 g_part_alias_name(enum g_part_alias alias)
195 for (i = 0; i < G_PART_ALIAS_COUNT; i++) {
196 if (g_part_alias_list[i].alias != alias)
198 return (g_part_alias_list[i].lexeme);
205 g_part_geometry_heads(off_t blocks, u_int sectors, off_t *bestchs,
208 static u_int candidate_heads[] = { 1, 2, 16, 32, 64, 128, 255, 0 };
209 off_t chs, cylinders;
215 for (idx = 0; candidate_heads[idx] != 0; idx++) {
216 heads = candidate_heads[idx];
217 cylinders = blocks / heads / sectors;
218 if (cylinders < heads || cylinders < sectors)
220 if (cylinders > 1023)
222 chs = cylinders * heads * sectors;
223 if (chs > *bestchs || (chs == *bestchs && *bestheads == 1)) {
231 g_part_geometry(struct g_part_table *table, struct g_consumer *cp,
234 static u_int candidate_sectors[] = { 1, 9, 17, 33, 63, 0 };
236 u_int heads, sectors;
239 if (g_getattr("GEOM::fwsectors", cp, §ors) != 0 || sectors == 0 ||
240 g_getattr("GEOM::fwheads", cp, &heads) != 0 || heads == 0) {
241 table->gpt_fixgeom = 0;
242 table->gpt_heads = 0;
243 table->gpt_sectors = 0;
245 for (idx = 0; candidate_sectors[idx] != 0; idx++) {
246 sectors = candidate_sectors[idx];
247 g_part_geometry_heads(blocks, sectors, &chs, &heads);
251 * Prefer a geometry with sectors > 1, but only if
252 * it doesn't bump down the number of heads to 1.
254 if (chs > bestchs || (chs == bestchs && heads > 1 &&
255 table->gpt_sectors == 1)) {
257 table->gpt_heads = heads;
258 table->gpt_sectors = sectors;
262 * If we didn't find a geometry at all, then the disk is
263 * too big. This means we can use the maximum number of
267 table->gpt_heads = 255;
268 table->gpt_sectors = 63;
271 table->gpt_fixgeom = 1;
272 table->gpt_heads = heads;
273 table->gpt_sectors = sectors;
277 #define DPRINTF(...) if (bootverbose) { \
278 printf("GEOM_PART: " __VA_ARGS__); \
282 g_part_check_integrity(struct g_part_table *table, struct g_consumer *cp)
284 struct g_part_entry *e1, *e2;
285 struct g_provider *pp;
291 if (table->gpt_last < table->gpt_first) {
292 DPRINTF("last LBA is below first LBA: %jd < %jd\n",
293 (intmax_t)table->gpt_last, (intmax_t)table->gpt_first);
296 if (table->gpt_last > pp->mediasize / pp->sectorsize - 1) {
297 DPRINTF("last LBA extends beyond mediasize: "
298 "%jd > %jd\n", (intmax_t)table->gpt_last,
299 (intmax_t)pp->mediasize / pp->sectorsize - 1);
302 LIST_FOREACH(e1, &table->gpt_entry, gpe_entry) {
303 if (e1->gpe_deleted || e1->gpe_internal)
305 if (e1->gpe_start < table->gpt_first) {
306 DPRINTF("partition %d has start offset below first "
307 "LBA: %jd < %jd\n", e1->gpe_index,
308 (intmax_t)e1->gpe_start,
309 (intmax_t)table->gpt_first);
312 if (e1->gpe_start > table->gpt_last) {
313 DPRINTF("partition %d has start offset beyond last "
314 "LBA: %jd > %jd\n", e1->gpe_index,
315 (intmax_t)e1->gpe_start,
316 (intmax_t)table->gpt_last);
319 if (e1->gpe_end < e1->gpe_start) {
320 DPRINTF("partition %d has end offset below start "
321 "offset: %jd < %jd\n", e1->gpe_index,
322 (intmax_t)e1->gpe_end,
323 (intmax_t)e1->gpe_start);
326 if (e1->gpe_end > table->gpt_last) {
327 DPRINTF("partition %d has end offset beyond last "
328 "LBA: %jd > %jd\n", e1->gpe_index,
329 (intmax_t)e1->gpe_end,
330 (intmax_t)table->gpt_last);
333 if (pp->stripesize > 0) {
334 offset = e1->gpe_start * pp->sectorsize;
335 if (e1->gpe_offset > offset)
336 offset = e1->gpe_offset;
337 if ((offset + pp->stripeoffset) % pp->stripesize) {
338 DPRINTF("partition %d on (%s, %s) is not "
339 "aligned on %u bytes\n", e1->gpe_index,
340 pp->name, table->gpt_scheme->name,
342 /* Don't treat this as a critical failure */
346 while ((e2 = LIST_NEXT(e2, gpe_entry)) != NULL) {
347 if (e2->gpe_deleted || e2->gpe_internal)
349 if (e1->gpe_start >= e2->gpe_start &&
350 e1->gpe_start <= e2->gpe_end) {
351 DPRINTF("partition %d has start offset inside "
352 "partition %d: start[%d] %jd >= start[%d] "
353 "%jd <= end[%d] %jd\n",
354 e1->gpe_index, e2->gpe_index,
355 e2->gpe_index, (intmax_t)e2->gpe_start,
356 e1->gpe_index, (intmax_t)e1->gpe_start,
357 e2->gpe_index, (intmax_t)e2->gpe_end);
360 if (e1->gpe_end >= e2->gpe_start &&
361 e1->gpe_end <= e2->gpe_end) {
362 DPRINTF("partition %d has end offset inside "
363 "partition %d: start[%d] %jd >= end[%d] "
364 "%jd <= end[%d] %jd\n",
365 e1->gpe_index, e2->gpe_index,
366 e2->gpe_index, (intmax_t)e2->gpe_start,
367 e1->gpe_index, (intmax_t)e1->gpe_end,
368 e2->gpe_index, (intmax_t)e2->gpe_end);
371 if (e1->gpe_start < e2->gpe_start &&
372 e1->gpe_end > e2->gpe_end) {
373 DPRINTF("partition %d contains partition %d: "
374 "start[%d] %jd > start[%d] %jd, end[%d] "
375 "%jd < end[%d] %jd\n",
376 e1->gpe_index, e2->gpe_index,
377 e1->gpe_index, (intmax_t)e1->gpe_start,
378 e2->gpe_index, (intmax_t)e2->gpe_start,
379 e2->gpe_index, (intmax_t)e2->gpe_end,
380 e1->gpe_index, (intmax_t)e1->gpe_end);
386 printf("GEOM_PART: integrity check failed (%s, %s)\n",
387 pp->name, table->gpt_scheme->name);
388 if (check_integrity != 0)
390 table->gpt_corrupt = 1;
396 struct g_part_entry *
397 g_part_new_entry(struct g_part_table *table, int index, quad_t start,
400 struct g_part_entry *entry, *last;
403 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
404 if (entry->gpe_index == index)
406 if (entry->gpe_index > index) {
413 entry = g_malloc(table->gpt_scheme->gps_entrysz,
415 entry->gpe_index = index;
417 LIST_INSERT_HEAD(&table->gpt_entry, entry, gpe_entry);
419 LIST_INSERT_AFTER(last, entry, gpe_entry);
421 entry->gpe_offset = 0;
422 entry->gpe_start = start;
423 entry->gpe_end = end;
428 g_part_new_provider(struct g_geom *gp, struct g_part_table *table,
429 struct g_part_entry *entry)
431 struct g_consumer *cp;
432 struct g_provider *pp;
434 struct g_geom_alias *gap;
437 cp = LIST_FIRST(&gp->consumer);
440 offset = entry->gpe_start * pp->sectorsize;
441 if (entry->gpe_offset < offset)
442 entry->gpe_offset = offset;
444 if (entry->gpe_pp == NULL) {
446 * Add aliases to the geom before we create the provider so that
447 * geom_dev can taste it with all the aliases in place so all
448 * the aliased dev_t instances get created for each partition
449 * (eg foo5p7 gets created for bar5p7 when foo is an alias of bar).
451 LIST_FOREACH(gap, &table->gpt_gp->aliases, ga_next) {
452 sb = sbuf_new_auto();
453 G_PART_FULLNAME(table, entry, sb, gap->ga_alias);
455 g_geom_add_alias(gp, sbuf_data(sb));
458 sb = sbuf_new_auto();
459 G_PART_FULLNAME(table, entry, sb, gp->name);
461 entry->gpe_pp = g_new_providerf(gp, "%s", sbuf_data(sb));
463 entry->gpe_pp->flags |= G_PF_DIRECT_SEND | G_PF_DIRECT_RECEIVE;
464 entry->gpe_pp->private = entry; /* Close the circle. */
466 entry->gpe_pp->index = entry->gpe_index - 1; /* index is 1-based. */
467 entry->gpe_pp->mediasize = (entry->gpe_end - entry->gpe_start + 1) *
469 entry->gpe_pp->mediasize -= entry->gpe_offset - offset;
470 entry->gpe_pp->sectorsize = pp->sectorsize;
471 entry->gpe_pp->stripesize = pp->stripesize;
472 entry->gpe_pp->stripeoffset = pp->stripeoffset + entry->gpe_offset;
473 if (pp->stripesize > 0)
474 entry->gpe_pp->stripeoffset %= pp->stripesize;
475 entry->gpe_pp->flags |= pp->flags & G_PF_ACCEPT_UNMAPPED;
476 g_error_provider(entry->gpe_pp, 0);
479 static struct g_geom*
480 g_part_find_geom(const char *name)
483 LIST_FOREACH(gp, &g_part_class.geom, geom) {
484 if ((gp->flags & G_GEOM_WITHER) == 0 &&
485 strcmp(name, gp->name) == 0)
492 g_part_parm_geom(struct gctl_req *req, const char *name, struct g_geom **v)
497 gname = gctl_get_asciiparam(req, name);
500 if (strncmp(gname, _PATH_DEV, sizeof(_PATH_DEV) - 1) == 0)
501 gname += sizeof(_PATH_DEV) - 1;
502 gp = g_part_find_geom(gname);
504 gctl_error(req, "%d %s '%s'", EINVAL, name, gname);
512 g_part_parm_provider(struct gctl_req *req, const char *name,
513 struct g_provider **v)
515 struct g_provider *pp;
518 pname = gctl_get_asciiparam(req, name);
521 if (strncmp(pname, _PATH_DEV, sizeof(_PATH_DEV) - 1) == 0)
522 pname += sizeof(_PATH_DEV) - 1;
523 pp = g_provider_by_name(pname);
525 gctl_error(req, "%d %s '%s'", EINVAL, name, pname);
533 g_part_parm_quad(struct gctl_req *req, const char *name, quad_t *v)
539 p = gctl_get_asciiparam(req, name);
542 q = strtoq(p, &x, 0);
543 if (*x != '\0' || q < 0) {
544 gctl_error(req, "%d %s '%s'", EINVAL, name, p);
552 g_part_parm_scheme(struct gctl_req *req, const char *name,
553 struct g_part_scheme **v)
555 struct g_part_scheme *s;
558 p = gctl_get_asciiparam(req, name);
561 TAILQ_FOREACH(s, &g_part_schemes, scheme_list) {
562 if (s == &g_part_null_scheme)
564 if (!strcasecmp(s->name, p))
568 gctl_error(req, "%d %s '%s'", EINVAL, name, p);
576 g_part_parm_str(struct gctl_req *req, const char *name, const char **v)
580 p = gctl_get_asciiparam(req, name);
583 /* An empty label is always valid. */
584 if (strcmp(name, "label") != 0 && p[0] == '\0') {
585 gctl_error(req, "%d %s '%s'", EINVAL, name, p);
593 g_part_parm_intmax(struct gctl_req *req, const char *name, u_int *v)
598 p = gctl_get_param(req, name, &size);
601 if (size != sizeof(*p) || *p < 0 || *p > INT_MAX) {
602 gctl_error(req, "%d %s '%jd'", EINVAL, name, *p);
610 g_part_parm_uint32(struct gctl_req *req, const char *name, u_int *v)
615 p = gctl_get_param(req, name, &size);
618 if (size != sizeof(*p) || *p > INT_MAX) {
619 gctl_error(req, "%d %s '%u'", EINVAL, name, (unsigned int)*p);
627 g_part_parm_bootcode(struct gctl_req *req, const char *name, const void **v,
633 p = gctl_get_param(req, name, &size);
642 g_part_probe(struct g_geom *gp, struct g_consumer *cp, int depth)
644 struct g_part_scheme *iter, *scheme;
645 struct g_part_table *table;
649 scheme = (table != NULL) ? table->gpt_scheme : NULL;
650 pri = (scheme != NULL) ? G_PART_PROBE(table, cp) : INT_MIN;
653 if (pri > 0) { /* error */
658 TAILQ_FOREACH(iter, &g_part_schemes, scheme_list) {
659 if (iter == &g_part_null_scheme)
661 table = (void *)kobj_create((kobj_class_t)iter, M_GEOM,
664 table->gpt_scheme = iter;
665 table->gpt_depth = depth;
666 probe = G_PART_PROBE(table, cp);
667 if (probe <= 0 && probe > pri) {
670 if (gp->softc != NULL)
671 kobj_delete((kobj_t)gp->softc, M_GEOM);
676 kobj_delete((kobj_t)table, M_GEOM);
680 return ((scheme == NULL) ? ENXIO : 0);
684 * Control request functions.
688 g_part_ctl_add(struct gctl_req *req, struct g_part_parms *gpp)
691 struct g_provider *pp;
692 struct g_part_entry *delent, *last, *entry;
693 struct g_part_table *table;
700 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
703 pp = LIST_FIRST(&gp->consumer)->provider;
705 end = gpp->gpp_start + gpp->gpp_size - 1;
707 if (gpp->gpp_start < table->gpt_first ||
708 gpp->gpp_start > table->gpt_last) {
709 gctl_error(req, "%d start '%jd'", EINVAL,
710 (intmax_t)gpp->gpp_start);
713 if (end < gpp->gpp_start || end > table->gpt_last) {
714 gctl_error(req, "%d size '%jd'", EINVAL,
715 (intmax_t)gpp->gpp_size);
718 if (gpp->gpp_index > table->gpt_entries) {
719 gctl_error(req, "%d index '%d'", EINVAL, gpp->gpp_index);
723 delent = last = NULL;
724 index = (gpp->gpp_index > 0) ? gpp->gpp_index : 1;
725 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
726 if (entry->gpe_deleted) {
727 if (entry->gpe_index == index)
731 if (entry->gpe_index == index)
732 index = entry->gpe_index + 1;
733 if (entry->gpe_index < index)
735 if (entry->gpe_internal)
737 if (gpp->gpp_start >= entry->gpe_start &&
738 gpp->gpp_start <= entry->gpe_end) {
739 gctl_error(req, "%d start '%jd'", ENOSPC,
740 (intmax_t)gpp->gpp_start);
743 if (end >= entry->gpe_start && end <= entry->gpe_end) {
744 gctl_error(req, "%d end '%jd'", ENOSPC, (intmax_t)end);
747 if (gpp->gpp_start < entry->gpe_start && end > entry->gpe_end) {
748 gctl_error(req, "%d size '%jd'", ENOSPC,
749 (intmax_t)gpp->gpp_size);
753 if (gpp->gpp_index > 0 && index != gpp->gpp_index) {
754 gctl_error(req, "%d index '%d'", EEXIST, gpp->gpp_index);
757 if (index > table->gpt_entries) {
758 gctl_error(req, "%d index '%d'", ENOSPC, index);
762 entry = (delent == NULL) ? g_malloc(table->gpt_scheme->gps_entrysz,
763 M_WAITOK | M_ZERO) : delent;
764 entry->gpe_index = index;
765 entry->gpe_start = gpp->gpp_start;
766 entry->gpe_end = end;
767 error = G_PART_ADD(table, entry, gpp);
769 gctl_error(req, "%d", error);
774 if (delent == NULL) {
776 LIST_INSERT_HEAD(&table->gpt_entry, entry, gpe_entry);
778 LIST_INSERT_AFTER(last, entry, gpe_entry);
779 entry->gpe_created = 1;
781 entry->gpe_deleted = 0;
782 entry->gpe_modified = 1;
784 g_part_new_provider(gp, table, entry);
786 /* Provide feedback if so requested. */
787 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
788 sb = sbuf_new_auto();
789 G_PART_FULLNAME(table, entry, sb, gp->name);
790 if (pp->stripesize > 0 && entry->gpe_pp->stripeoffset != 0)
791 sbuf_printf(sb, " added, but partition is not "
792 "aligned on %u bytes\n", pp->stripesize);
794 sbuf_cat(sb, " added\n");
796 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
803 g_part_ctl_bootcode(struct gctl_req *req, struct g_part_parms *gpp)
806 struct g_part_table *table;
811 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
815 sz = table->gpt_scheme->gps_bootcodesz;
820 if (gpp->gpp_codesize > sz) {
825 error = G_PART_BOOTCODE(table, gpp);
829 /* Provide feedback if so requested. */
830 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
831 sb = sbuf_new_auto();
832 sbuf_printf(sb, "bootcode written to %s\n", gp->name);
834 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
840 gctl_error(req, "%d", error);
845 g_part_ctl_commit(struct gctl_req *req, struct g_part_parms *gpp)
847 struct g_consumer *cp;
849 struct g_provider *pp;
850 struct g_part_entry *entry, *tmp;
851 struct g_part_table *table;
856 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
860 if (!table->gpt_opened) {
861 gctl_error(req, "%d", EPERM);
867 cp = LIST_FIRST(&gp->consumer);
868 if ((table->gpt_smhead | table->gpt_smtail) != 0) {
870 buf = g_malloc(pp->sectorsize, M_WAITOK | M_ZERO);
871 while (table->gpt_smhead != 0) {
872 i = ffs(table->gpt_smhead) - 1;
873 error = g_write_data(cp, i * pp->sectorsize, buf,
879 table->gpt_smhead &= ~(1 << i);
881 while (table->gpt_smtail != 0) {
882 i = ffs(table->gpt_smtail) - 1;
883 error = g_write_data(cp, pp->mediasize - (i + 1) *
884 pp->sectorsize, buf, pp->sectorsize);
889 table->gpt_smtail &= ~(1 << i);
894 if (table->gpt_scheme == &g_part_null_scheme) {
896 g_access(cp, -1, -1, -1);
897 g_part_wither(gp, ENXIO);
901 error = G_PART_WRITE(table, cp);
905 LIST_FOREACH_SAFE(entry, &table->gpt_entry, gpe_entry, tmp) {
906 if (!entry->gpe_deleted) {
907 /* Notify consumers that provider might be changed. */
908 if (entry->gpe_modified && (
909 entry->gpe_pp->acw + entry->gpe_pp->ace +
910 entry->gpe_pp->acr) == 0)
911 g_media_changed(entry->gpe_pp, M_NOWAIT);
912 entry->gpe_created = 0;
913 entry->gpe_modified = 0;
916 LIST_REMOVE(entry, gpe_entry);
919 table->gpt_created = 0;
920 table->gpt_opened = 0;
923 g_access(cp, -1, -1, -1);
928 gctl_error(req, "%d", error);
933 g_part_ctl_create(struct gctl_req *req, struct g_part_parms *gpp)
935 struct g_consumer *cp;
937 struct g_provider *pp;
938 struct g_part_scheme *scheme;
939 struct g_part_table *null, *table;
943 pp = gpp->gpp_provider;
944 scheme = gpp->gpp_scheme;
945 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, pp->name));
948 /* Check that there isn't already a g_part geom on the provider. */
949 gp = g_part_find_geom(pp->name);
952 if (null->gpt_scheme != &g_part_null_scheme) {
953 gctl_error(req, "%d geom '%s'", EEXIST, pp->name);
959 if ((gpp->gpp_parms & G_PART_PARM_ENTRIES) &&
960 (gpp->gpp_entries < scheme->gps_minent ||
961 gpp->gpp_entries > scheme->gps_maxent)) {
962 gctl_error(req, "%d entries '%d'", EINVAL, gpp->gpp_entries);
967 gp = g_new_geomf(&g_part_class, "%s", pp->name);
968 gp->softc = kobj_create((kobj_class_t)gpp->gpp_scheme, M_GEOM,
972 table->gpt_scheme = gpp->gpp_scheme;
973 table->gpt_entries = (gpp->gpp_parms & G_PART_PARM_ENTRIES) ?
974 gpp->gpp_entries : scheme->gps_minent;
975 LIST_INIT(&table->gpt_entry);
977 cp = g_new_consumer(gp);
978 cp->flags |= G_CF_DIRECT_SEND | G_CF_DIRECT_RECEIVE;
979 error = g_attach(cp, pp);
981 error = g_access(cp, 1, 1, 1);
983 g_part_wither(gp, error);
984 gctl_error(req, "%d geom '%s'", error, pp->name);
987 table->gpt_opened = 1;
989 cp = LIST_FIRST(&gp->consumer);
990 table->gpt_opened = null->gpt_opened;
991 table->gpt_smhead = null->gpt_smhead;
992 table->gpt_smtail = null->gpt_smtail;
997 /* Make sure the provider has media. */
998 if (pp->mediasize == 0 || pp->sectorsize == 0) {
1003 /* Make sure we can nest and if so, determine our depth. */
1004 error = g_getattr("PART::isleaf", cp, &attr);
1005 if (!error && attr) {
1009 error = g_getattr("PART::depth", cp, &attr);
1010 table->gpt_depth = (!error) ? attr + 1 : 0;
1013 * Synthesize a disk geometry. Some partitioning schemes
1014 * depend on it and since some file systems need it even
1015 * when the partitition scheme doesn't, we do it here in
1016 * scheme-independent code.
1018 g_part_geometry(table, cp, pp->mediasize / pp->sectorsize);
1020 error = G_PART_CREATE(table, gpp);
1026 table->gpt_created = 1;
1028 kobj_delete((kobj_t)null, M_GEOM);
1031 * Support automatic commit by filling in the gpp_geom
1034 gpp->gpp_parms |= G_PART_PARM_GEOM;
1037 /* Provide feedback if so requested. */
1038 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
1039 sb = sbuf_new_auto();
1040 sbuf_printf(sb, "%s created\n", gp->name);
1042 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
1050 g_access(cp, -1, -1, -1);
1051 g_part_wither(gp, error);
1053 kobj_delete((kobj_t)gp->softc, M_GEOM);
1056 gctl_error(req, "%d provider", error);
1061 g_part_ctl_delete(struct gctl_req *req, struct g_part_parms *gpp)
1064 struct g_provider *pp;
1065 struct g_part_entry *entry;
1066 struct g_part_table *table;
1070 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
1071 g_topology_assert();
1075 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
1076 if (entry->gpe_deleted || entry->gpe_internal)
1078 if (entry->gpe_index == gpp->gpp_index)
1081 if (entry == NULL) {
1082 gctl_error(req, "%d index '%d'", ENOENT, gpp->gpp_index);
1088 if (pp->acr > 0 || pp->acw > 0 || pp->ace > 0) {
1089 gctl_error(req, "%d", EBUSY);
1094 entry->gpe_pp = NULL;
1098 g_wither_provider(pp, ENXIO);
1100 /* Provide feedback if so requested. */
1101 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
1102 sb = sbuf_new_auto();
1103 G_PART_FULLNAME(table, entry, sb, gp->name);
1104 sbuf_cat(sb, " deleted\n");
1106 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
1110 if (entry->gpe_created) {
1111 LIST_REMOVE(entry, gpe_entry);
1114 entry->gpe_modified = 0;
1115 entry->gpe_deleted = 1;
1121 g_part_ctl_destroy(struct gctl_req *req, struct g_part_parms *gpp)
1123 struct g_consumer *cp;
1125 struct g_provider *pp;
1126 struct g_part_entry *entry, *tmp;
1127 struct g_part_table *null, *table;
1132 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
1133 g_topology_assert();
1136 /* Check for busy providers. */
1137 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
1138 if (entry->gpe_deleted || entry->gpe_internal)
1140 if (gpp->gpp_force) {
1144 if (pp->acr == 0 && pp->acw == 0 && pp->ace == 0)
1147 gctl_error(req, "%d", EBUSY);
1151 if (gpp->gpp_force) {
1152 /* Destroy all providers. */
1153 LIST_FOREACH_SAFE(entry, &table->gpt_entry, gpe_entry, tmp) {
1157 g_wither_provider(pp, ENXIO);
1159 LIST_REMOVE(entry, gpe_entry);
1164 error = G_PART_DESTROY(table, gpp);
1166 gctl_error(req, "%d", error);
1170 gp->softc = kobj_create((kobj_class_t)&g_part_null_scheme, M_GEOM,
1174 null->gpt_scheme = &g_part_null_scheme;
1175 LIST_INIT(&null->gpt_entry);
1177 cp = LIST_FIRST(&gp->consumer);
1179 null->gpt_last = pp->mediasize / pp->sectorsize - 1;
1181 null->gpt_depth = table->gpt_depth;
1182 null->gpt_opened = table->gpt_opened;
1183 null->gpt_smhead = table->gpt_smhead;
1184 null->gpt_smtail = table->gpt_smtail;
1186 while ((entry = LIST_FIRST(&table->gpt_entry)) != NULL) {
1187 LIST_REMOVE(entry, gpe_entry);
1190 kobj_delete((kobj_t)table, M_GEOM);
1192 /* Provide feedback if so requested. */
1193 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
1194 sb = sbuf_new_auto();
1195 sbuf_printf(sb, "%s destroyed\n", gp->name);
1197 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
1204 g_part_ctl_modify(struct gctl_req *req, struct g_part_parms *gpp)
1207 struct g_part_entry *entry;
1208 struct g_part_table *table;
1213 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
1214 g_topology_assert();
1218 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
1219 if (entry->gpe_deleted || entry->gpe_internal)
1221 if (entry->gpe_index == gpp->gpp_index)
1224 if (entry == NULL) {
1225 gctl_error(req, "%d index '%d'", ENOENT, gpp->gpp_index);
1229 error = G_PART_MODIFY(table, entry, gpp);
1231 gctl_error(req, "%d", error);
1235 if (!entry->gpe_created)
1236 entry->gpe_modified = 1;
1238 /* Provide feedback if so requested. */
1239 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
1240 sb = sbuf_new_auto();
1241 G_PART_FULLNAME(table, entry, sb, gp->name);
1242 sbuf_cat(sb, " modified\n");
1244 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
1251 g_part_ctl_move(struct gctl_req *req, struct g_part_parms *gpp)
1253 gctl_error(req, "%d verb 'move'", ENOSYS);
1258 g_part_ctl_recover(struct gctl_req *req, struct g_part_parms *gpp)
1260 struct g_part_table *table;
1263 int error, recovered;
1266 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
1267 g_topology_assert();
1269 error = recovered = 0;
1271 if (table->gpt_corrupt) {
1272 error = G_PART_RECOVER(table);
1274 error = g_part_check_integrity(table,
1275 LIST_FIRST(&gp->consumer));
1277 gctl_error(req, "%d recovering '%s' failed",
1283 /* Provide feedback if so requested. */
1284 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
1285 sb = sbuf_new_auto();
1287 sbuf_printf(sb, "%s recovered\n", gp->name);
1289 sbuf_printf(sb, "%s recovering is not needed\n",
1292 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
1299 g_part_ctl_resize(struct gctl_req *req, struct g_part_parms *gpp)
1302 struct g_provider *pp;
1303 struct g_part_entry *pe, *entry;
1304 struct g_part_table *table;
1311 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
1312 g_topology_assert();
1315 /* check gpp_index */
1316 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
1317 if (entry->gpe_deleted || entry->gpe_internal)
1319 if (entry->gpe_index == gpp->gpp_index)
1322 if (entry == NULL) {
1323 gctl_error(req, "%d index '%d'", ENOENT, gpp->gpp_index);
1327 /* check gpp_size */
1328 end = entry->gpe_start + gpp->gpp_size - 1;
1329 if (gpp->gpp_size < 1 || end > table->gpt_last) {
1330 gctl_error(req, "%d size '%jd'", EINVAL,
1331 (intmax_t)gpp->gpp_size);
1335 LIST_FOREACH(pe, &table->gpt_entry, gpe_entry) {
1336 if (pe->gpe_deleted || pe->gpe_internal || pe == entry)
1338 if (end >= pe->gpe_start && end <= pe->gpe_end) {
1339 gctl_error(req, "%d end '%jd'", ENOSPC,
1343 if (entry->gpe_start < pe->gpe_start && end > pe->gpe_end) {
1344 gctl_error(req, "%d size '%jd'", ENOSPC,
1345 (intmax_t)gpp->gpp_size);
1351 if ((g_debugflags & 16) == 0 &&
1352 (pp->acr > 0 || pp->acw > 0 || pp->ace > 0)) {
1353 if (entry->gpe_end - entry->gpe_start + 1 > gpp->gpp_size) {
1354 /* Deny shrinking of an opened partition. */
1355 gctl_error(req, "%d", EBUSY);
1360 error = G_PART_RESIZE(table, entry, gpp);
1362 gctl_error(req, "%d%s", error, error != EBUSY ? "":
1363 " resizing will lead to unexpected shrinking"
1364 " due to alignment");
1368 if (!entry->gpe_created)
1369 entry->gpe_modified = 1;
1371 /* update mediasize of changed provider */
1372 mediasize = (entry->gpe_end - entry->gpe_start + 1) *
1374 g_resize_provider(pp, mediasize);
1376 /* Provide feedback if so requested. */
1377 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
1378 sb = sbuf_new_auto();
1379 G_PART_FULLNAME(table, entry, sb, gp->name);
1380 sbuf_cat(sb, " resized\n");
1382 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
1389 g_part_ctl_setunset(struct gctl_req *req, struct g_part_parms *gpp,
1393 struct g_part_entry *entry;
1394 struct g_part_table *table;
1399 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
1400 g_topology_assert();
1404 if (gpp->gpp_parms & G_PART_PARM_INDEX) {
1405 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
1406 if (entry->gpe_deleted || entry->gpe_internal)
1408 if (entry->gpe_index == gpp->gpp_index)
1411 if (entry == NULL) {
1412 gctl_error(req, "%d index '%d'", ENOENT,
1419 error = G_PART_SETUNSET(table, entry, gpp->gpp_attrib, set);
1421 gctl_error(req, "%d attrib '%s'", error, gpp->gpp_attrib);
1425 /* Provide feedback if so requested. */
1426 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
1427 sb = sbuf_new_auto();
1428 sbuf_printf(sb, "%s %sset on ", gpp->gpp_attrib,
1431 G_PART_FULLNAME(table, entry, sb, gp->name);
1433 sbuf_cat(sb, gp->name);
1436 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
1443 g_part_ctl_undo(struct gctl_req *req, struct g_part_parms *gpp)
1445 struct g_consumer *cp;
1446 struct g_provider *pp;
1448 struct g_part_entry *entry, *tmp;
1449 struct g_part_table *table;
1453 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
1454 g_topology_assert();
1457 if (!table->gpt_opened) {
1458 gctl_error(req, "%d", EPERM);
1462 cp = LIST_FIRST(&gp->consumer);
1463 LIST_FOREACH_SAFE(entry, &table->gpt_entry, gpe_entry, tmp) {
1464 entry->gpe_modified = 0;
1465 if (entry->gpe_created) {
1469 entry->gpe_pp = NULL;
1470 g_wither_provider(pp, ENXIO);
1472 entry->gpe_deleted = 1;
1474 if (entry->gpe_deleted) {
1475 LIST_REMOVE(entry, gpe_entry);
1480 g_topology_unlock();
1482 reprobe = (table->gpt_scheme == &g_part_null_scheme ||
1483 table->gpt_created) ? 1 : 0;
1486 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
1487 if (entry->gpe_internal)
1492 while ((entry = LIST_FIRST(&table->gpt_entry)) != NULL) {
1493 LIST_REMOVE(entry, gpe_entry);
1496 error = g_part_probe(gp, cp, table->gpt_depth);
1499 g_access(cp, -1, -1, -1);
1500 g_part_wither(gp, error);
1506 * Synthesize a disk geometry. Some partitioning schemes
1507 * depend on it and since some file systems need it even
1508 * when the partitition scheme doesn't, we do it here in
1509 * scheme-independent code.
1512 g_part_geometry(table, cp, pp->mediasize / pp->sectorsize);
1515 error = G_PART_READ(table, cp);
1518 error = g_part_check_integrity(table, cp);
1523 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
1524 if (!entry->gpe_internal)
1525 g_part_new_provider(gp, table, entry);
1528 table->gpt_opened = 0;
1529 g_access(cp, -1, -1, -1);
1534 gctl_error(req, "%d", error);
1539 g_part_wither(struct g_geom *gp, int error)
1541 struct g_part_entry *entry;
1542 struct g_part_table *table;
1545 if (table != NULL) {
1546 G_PART_DESTROY(table, NULL);
1547 while ((entry = LIST_FIRST(&table->gpt_entry)) != NULL) {
1548 LIST_REMOVE(entry, gpe_entry);
1551 if (gp->softc != NULL) {
1552 kobj_delete((kobj_t)gp->softc, M_GEOM);
1556 g_wither_geom(gp, error);
1564 g_part_ctlreq(struct gctl_req *req, struct g_class *mp, const char *verb)
1566 struct g_part_parms gpp;
1567 struct g_part_table *table;
1568 struct gctl_req_arg *ap;
1569 enum g_part_ctl ctlreq;
1570 unsigned int i, mparms, oparms, parm;
1571 int auto_commit, close_on_error;
1572 int error, modifies;
1574 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, verb));
1575 g_topology_assert();
1577 ctlreq = G_PART_CTL_NONE;
1580 oparms = G_PART_PARM_FLAGS | G_PART_PARM_OUTPUT | G_PART_PARM_VERSION;
1583 if (!strcmp(verb, "add")) {
1584 ctlreq = G_PART_CTL_ADD;
1585 mparms |= G_PART_PARM_GEOM | G_PART_PARM_SIZE |
1586 G_PART_PARM_START | G_PART_PARM_TYPE;
1587 oparms |= G_PART_PARM_INDEX | G_PART_PARM_LABEL;
1591 if (!strcmp(verb, "bootcode")) {
1592 ctlreq = G_PART_CTL_BOOTCODE;
1593 mparms |= G_PART_PARM_GEOM | G_PART_PARM_BOOTCODE;
1597 if (!strcmp(verb, "commit")) {
1598 ctlreq = G_PART_CTL_COMMIT;
1599 mparms |= G_PART_PARM_GEOM;
1601 } else if (!strcmp(verb, "create")) {
1602 ctlreq = G_PART_CTL_CREATE;
1603 mparms |= G_PART_PARM_PROVIDER | G_PART_PARM_SCHEME;
1604 oparms |= G_PART_PARM_ENTRIES;
1608 if (!strcmp(verb, "delete")) {
1609 ctlreq = G_PART_CTL_DELETE;
1610 mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX;
1611 } else if (!strcmp(verb, "destroy")) {
1612 ctlreq = G_PART_CTL_DESTROY;
1613 mparms |= G_PART_PARM_GEOM;
1614 oparms |= G_PART_PARM_FORCE;
1618 if (!strcmp(verb, "modify")) {
1619 ctlreq = G_PART_CTL_MODIFY;
1620 mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX;
1621 oparms |= G_PART_PARM_LABEL | G_PART_PARM_TYPE;
1622 } else if (!strcmp(verb, "move")) {
1623 ctlreq = G_PART_CTL_MOVE;
1624 mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX;
1628 if (!strcmp(verb, "recover")) {
1629 ctlreq = G_PART_CTL_RECOVER;
1630 mparms |= G_PART_PARM_GEOM;
1631 } else if (!strcmp(verb, "resize")) {
1632 ctlreq = G_PART_CTL_RESIZE;
1633 mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX |
1638 if (!strcmp(verb, "set")) {
1639 ctlreq = G_PART_CTL_SET;
1640 mparms |= G_PART_PARM_ATTRIB | G_PART_PARM_GEOM;
1641 oparms |= G_PART_PARM_INDEX;
1645 if (!strcmp(verb, "undo")) {
1646 ctlreq = G_PART_CTL_UNDO;
1647 mparms |= G_PART_PARM_GEOM;
1649 } else if (!strcmp(verb, "unset")) {
1650 ctlreq = G_PART_CTL_UNSET;
1651 mparms |= G_PART_PARM_ATTRIB | G_PART_PARM_GEOM;
1652 oparms |= G_PART_PARM_INDEX;
1656 if (ctlreq == G_PART_CTL_NONE) {
1657 gctl_error(req, "%d verb '%s'", EINVAL, verb);
1661 bzero(&gpp, sizeof(gpp));
1662 for (i = 0; i < req->narg; i++) {
1665 switch (ap->name[0]) {
1667 if (!strcmp(ap->name, "arg0")) {
1669 (G_PART_PARM_GEOM | G_PART_PARM_PROVIDER);
1671 if (!strcmp(ap->name, "attrib"))
1672 parm = G_PART_PARM_ATTRIB;
1675 if (!strcmp(ap->name, "bootcode"))
1676 parm = G_PART_PARM_BOOTCODE;
1679 if (!strcmp(ap->name, "class"))
1683 if (!strcmp(ap->name, "entries"))
1684 parm = G_PART_PARM_ENTRIES;
1687 if (!strcmp(ap->name, "flags"))
1688 parm = G_PART_PARM_FLAGS;
1689 else if (!strcmp(ap->name, "force"))
1690 parm = G_PART_PARM_FORCE;
1693 if (!strcmp(ap->name, "index"))
1694 parm = G_PART_PARM_INDEX;
1697 if (!strcmp(ap->name, "label"))
1698 parm = G_PART_PARM_LABEL;
1701 if (!strcmp(ap->name, "output"))
1702 parm = G_PART_PARM_OUTPUT;
1705 if (!strcmp(ap->name, "scheme"))
1706 parm = G_PART_PARM_SCHEME;
1707 else if (!strcmp(ap->name, "size"))
1708 parm = G_PART_PARM_SIZE;
1709 else if (!strcmp(ap->name, "start"))
1710 parm = G_PART_PARM_START;
1713 if (!strcmp(ap->name, "type"))
1714 parm = G_PART_PARM_TYPE;
1717 if (!strcmp(ap->name, "verb"))
1719 else if (!strcmp(ap->name, "version"))
1720 parm = G_PART_PARM_VERSION;
1723 if ((parm & (mparms | oparms)) == 0) {
1724 gctl_error(req, "%d param '%s'", EINVAL, ap->name);
1728 case G_PART_PARM_ATTRIB:
1729 error = g_part_parm_str(req, ap->name,
1732 case G_PART_PARM_BOOTCODE:
1733 error = g_part_parm_bootcode(req, ap->name,
1734 &gpp.gpp_codeptr, &gpp.gpp_codesize);
1736 case G_PART_PARM_ENTRIES:
1737 error = g_part_parm_intmax(req, ap->name,
1740 case G_PART_PARM_FLAGS:
1741 error = g_part_parm_str(req, ap->name, &gpp.gpp_flags);
1743 case G_PART_PARM_FORCE:
1744 error = g_part_parm_uint32(req, ap->name,
1747 case G_PART_PARM_GEOM:
1748 error = g_part_parm_geom(req, ap->name, &gpp.gpp_geom);
1750 case G_PART_PARM_INDEX:
1751 error = g_part_parm_intmax(req, ap->name,
1754 case G_PART_PARM_LABEL:
1755 error = g_part_parm_str(req, ap->name, &gpp.gpp_label);
1757 case G_PART_PARM_OUTPUT:
1758 error = 0; /* Write-only parameter */
1760 case G_PART_PARM_PROVIDER:
1761 error = g_part_parm_provider(req, ap->name,
1764 case G_PART_PARM_SCHEME:
1765 error = g_part_parm_scheme(req, ap->name,
1768 case G_PART_PARM_SIZE:
1769 error = g_part_parm_quad(req, ap->name, &gpp.gpp_size);
1771 case G_PART_PARM_START:
1772 error = g_part_parm_quad(req, ap->name,
1775 case G_PART_PARM_TYPE:
1776 error = g_part_parm_str(req, ap->name, &gpp.gpp_type);
1778 case G_PART_PARM_VERSION:
1779 error = g_part_parm_uint32(req, ap->name,
1784 gctl_error(req, "%d %s", error, ap->name);
1788 if (error == ENOATTR) {
1789 gctl_error(req, "%d param '%s'", error,
1794 gpp.gpp_parms |= parm;
1796 if ((gpp.gpp_parms & mparms) != mparms) {
1797 parm = mparms - (gpp.gpp_parms & mparms);
1798 gctl_error(req, "%d param '%x'", ENOATTR, parm);
1802 /* Obtain permissions if possible/necessary. */
1805 if (modifies && (gpp.gpp_parms & G_PART_PARM_GEOM)) {
1806 table = gpp.gpp_geom->softc;
1807 if (table != NULL && table->gpt_corrupt &&
1808 ctlreq != G_PART_CTL_DESTROY &&
1809 ctlreq != G_PART_CTL_RECOVER) {
1810 gctl_error(req, "%d table '%s' is corrupt",
1811 EPERM, gpp.gpp_geom->name);
1814 if (table != NULL && !table->gpt_opened) {
1815 error = g_access(LIST_FIRST(&gpp.gpp_geom->consumer),
1818 gctl_error(req, "%d geom '%s'", error,
1819 gpp.gpp_geom->name);
1822 table->gpt_opened = 1;
1827 /* Allow the scheme to check or modify the parameters. */
1828 if (table != NULL) {
1829 error = G_PART_PRECHECK(table, ctlreq, &gpp);
1831 gctl_error(req, "%d pre-check failed", error);
1835 error = EDOOFUS; /* Prevent bogus uninit. warning. */
1838 case G_PART_CTL_NONE:
1839 panic("%s", __func__);
1840 case G_PART_CTL_ADD:
1841 error = g_part_ctl_add(req, &gpp);
1843 case G_PART_CTL_BOOTCODE:
1844 error = g_part_ctl_bootcode(req, &gpp);
1846 case G_PART_CTL_COMMIT:
1847 error = g_part_ctl_commit(req, &gpp);
1849 case G_PART_CTL_CREATE:
1850 error = g_part_ctl_create(req, &gpp);
1852 case G_PART_CTL_DELETE:
1853 error = g_part_ctl_delete(req, &gpp);
1855 case G_PART_CTL_DESTROY:
1856 error = g_part_ctl_destroy(req, &gpp);
1858 case G_PART_CTL_MODIFY:
1859 error = g_part_ctl_modify(req, &gpp);
1861 case G_PART_CTL_MOVE:
1862 error = g_part_ctl_move(req, &gpp);
1864 case G_PART_CTL_RECOVER:
1865 error = g_part_ctl_recover(req, &gpp);
1867 case G_PART_CTL_RESIZE:
1868 error = g_part_ctl_resize(req, &gpp);
1870 case G_PART_CTL_SET:
1871 error = g_part_ctl_setunset(req, &gpp, 1);
1873 case G_PART_CTL_UNDO:
1874 error = g_part_ctl_undo(req, &gpp);
1876 case G_PART_CTL_UNSET:
1877 error = g_part_ctl_setunset(req, &gpp, 0);
1881 /* Implement automatic commit. */
1883 auto_commit = (modifies &&
1884 (gpp.gpp_parms & G_PART_PARM_FLAGS) &&
1885 strchr(gpp.gpp_flags, 'C') != NULL) ? 1 : 0;
1887 KASSERT(gpp.gpp_parms & G_PART_PARM_GEOM, ("%s",
1889 error = g_part_ctl_commit(req, &gpp);
1894 if (error && close_on_error) {
1895 g_access(LIST_FIRST(&gpp.gpp_geom->consumer), -1, -1, -1);
1896 table->gpt_opened = 0;
1901 g_part_destroy_geom(struct gctl_req *req, struct g_class *mp,
1905 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, gp->name));
1906 g_topology_assert();
1908 g_part_wither(gp, EINVAL);
1912 static struct g_geom *
1913 g_part_taste(struct g_class *mp, struct g_provider *pp, int flags __unused)
1915 struct g_consumer *cp;
1917 struct g_part_entry *entry;
1918 struct g_part_table *table;
1919 struct root_hold_token *rht;
1920 struct g_geom_alias *gap;
1924 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, pp->name));
1925 g_topology_assert();
1927 /* Skip providers that are already open for writing. */
1932 * Create a GEOM with consumer and hook it up to the provider.
1933 * With that we become part of the topology. Obtain read access
1936 gp = g_new_geomf(mp, "%s", pp->name);
1937 LIST_FOREACH(gap, &pp->geom->aliases, ga_next)
1938 g_geom_add_alias(gp, gap->ga_alias);
1939 cp = g_new_consumer(gp);
1940 cp->flags |= G_CF_DIRECT_SEND | G_CF_DIRECT_RECEIVE;
1941 error = g_attach(cp, pp);
1943 error = g_access(cp, 1, 0, 0);
1947 g_destroy_consumer(cp);
1952 rht = root_mount_hold(mp->name);
1953 g_topology_unlock();
1956 * Short-circuit the whole probing galore when there's no
1959 if (pp->mediasize == 0 || pp->sectorsize == 0) {
1964 /* Make sure we can nest and if so, determine our depth. */
1965 error = g_getattr("PART::isleaf", cp, &attr);
1966 if (!error && attr) {
1970 error = g_getattr("PART::depth", cp, &attr);
1971 depth = (!error) ? attr + 1 : 0;
1973 error = g_part_probe(gp, cp, depth);
1980 * Synthesize a disk geometry. Some partitioning schemes
1981 * depend on it and since some file systems need it even
1982 * when the partitition scheme doesn't, we do it here in
1983 * scheme-independent code.
1985 g_part_geometry(table, cp, pp->mediasize / pp->sectorsize);
1987 error = G_PART_READ(table, cp);
1990 error = g_part_check_integrity(table, cp);
1995 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
1996 if (!entry->gpe_internal)
1997 g_part_new_provider(gp, table, entry);
2000 root_mount_rel(rht);
2001 g_access(cp, -1, 0, 0);
2006 root_mount_rel(rht);
2007 g_access(cp, -1, 0, 0);
2009 g_destroy_consumer(cp);
2019 g_part_access(struct g_provider *pp, int dr, int dw, int de)
2021 struct g_consumer *cp;
2023 G_PART_TRACE((G_T_ACCESS, "%s(%s,%d,%d,%d)", __func__, pp->name, dr,
2026 cp = LIST_FIRST(&pp->geom->consumer);
2028 /* We always gain write-exclusive access. */
2029 return (g_access(cp, dr, dw, dw + de));
2033 g_part_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp,
2034 struct g_consumer *cp, struct g_provider *pp)
2037 struct g_part_entry *entry;
2038 struct g_part_table *table;
2040 KASSERT(sb != NULL && gp != NULL, ("%s", __func__));
2043 if (indent == NULL) {
2044 KASSERT(cp == NULL && pp != NULL, ("%s", __func__));
2045 entry = pp->private;
2048 sbuf_printf(sb, " i %u o %ju ty %s", entry->gpe_index,
2049 (uintmax_t)entry->gpe_offset,
2050 G_PART_TYPE(table, entry, buf, sizeof(buf)));
2052 * libdisk compatibility quirk - the scheme dumps the
2053 * slicer name and partition type in a way that is
2054 * compatible with libdisk. When libdisk is not used
2055 * anymore, this should go away.
2057 G_PART_DUMPCONF(table, entry, sb, indent);
2058 } else if (cp != NULL) { /* Consumer configuration. */
2059 KASSERT(pp == NULL, ("%s", __func__));
2061 } else if (pp != NULL) { /* Provider configuration. */
2062 entry = pp->private;
2065 sbuf_printf(sb, "%s<start>%ju</start>\n", indent,
2066 (uintmax_t)entry->gpe_start);
2067 sbuf_printf(sb, "%s<end>%ju</end>\n", indent,
2068 (uintmax_t)entry->gpe_end);
2069 sbuf_printf(sb, "%s<index>%u</index>\n", indent,
2071 sbuf_printf(sb, "%s<type>%s</type>\n", indent,
2072 G_PART_TYPE(table, entry, buf, sizeof(buf)));
2073 sbuf_printf(sb, "%s<offset>%ju</offset>\n", indent,
2074 (uintmax_t)entry->gpe_offset);
2075 sbuf_printf(sb, "%s<length>%ju</length>\n", indent,
2076 (uintmax_t)pp->mediasize);
2077 G_PART_DUMPCONF(table, entry, sb, indent);
2078 } else { /* Geom configuration. */
2079 sbuf_printf(sb, "%s<scheme>%s</scheme>\n", indent,
2080 table->gpt_scheme->name);
2081 sbuf_printf(sb, "%s<entries>%u</entries>\n", indent,
2082 table->gpt_entries);
2083 sbuf_printf(sb, "%s<first>%ju</first>\n", indent,
2084 (uintmax_t)table->gpt_first);
2085 sbuf_printf(sb, "%s<last>%ju</last>\n", indent,
2086 (uintmax_t)table->gpt_last);
2087 sbuf_printf(sb, "%s<fwsectors>%u</fwsectors>\n", indent,
2088 table->gpt_sectors);
2089 sbuf_printf(sb, "%s<fwheads>%u</fwheads>\n", indent,
2091 sbuf_printf(sb, "%s<state>%s</state>\n", indent,
2092 table->gpt_corrupt ? "CORRUPT": "OK");
2093 sbuf_printf(sb, "%s<modified>%s</modified>\n", indent,
2094 table->gpt_opened ? "true": "false");
2095 G_PART_DUMPCONF(table, NULL, sb, indent);
2100 * This start routine is only called for non-trivial requests, all the
2101 * trivial ones are handled autonomously by the slice code.
2102 * For requests we handle here, we must call the g_io_deliver() on the
2103 * bio, and return non-zero to indicate to the slice code that we did so.
2104 * This code executes in the "DOWN" I/O path, this means:
2106 * * Don't grab the topology lock.
2107 * * Don't call biowait, g_getattr(), g_setattr() or g_read_data()
2110 g_part_ioctl(struct g_provider *pp, u_long cmd, void *data, int fflag, struct thread *td)
2112 struct g_part_table *table;
2114 table = pp->geom->softc;
2115 return G_PART_IOCTL(table, pp, cmd, data, fflag, td);
2119 g_part_resize(struct g_consumer *cp)
2121 struct g_part_table *table;
2123 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, cp->provider->name));
2124 g_topology_assert();
2126 if (auto_resize == 0)
2129 table = cp->geom->softc;
2130 if (table->gpt_opened == 0) {
2131 if (g_access(cp, 1, 1, 1) != 0)
2133 table->gpt_opened = 1;
2135 if (G_PART_RESIZE(table, NULL, NULL) == 0)
2136 printf("GEOM_PART: %s was automatically resized.\n"
2137 " Use `gpart commit %s` to save changes or "
2138 "`gpart undo %s` to revert them.\n", cp->geom->name,
2139 cp->geom->name, cp->geom->name);
2140 if (g_part_check_integrity(table, cp) != 0) {
2141 g_access(cp, -1, -1, -1);
2142 table->gpt_opened = 0;
2143 g_part_wither(table->gpt_gp, ENXIO);
2148 g_part_orphan(struct g_consumer *cp)
2150 struct g_provider *pp;
2151 struct g_part_table *table;
2154 KASSERT(pp != NULL, ("%s", __func__));
2155 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, pp->name));
2156 g_topology_assert();
2158 KASSERT(pp->error != 0, ("%s", __func__));
2159 table = cp->geom->softc;
2160 if (table != NULL && table->gpt_opened)
2161 g_access(cp, -1, -1, -1);
2162 g_part_wither(cp->geom, pp->error);
2166 g_part_spoiled(struct g_consumer *cp)
2169 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, cp->provider->name));
2170 g_topology_assert();
2172 cp->flags |= G_CF_ORPHAN;
2173 g_part_wither(cp->geom, ENXIO);
2177 g_part_start(struct bio *bp)
2180 struct g_consumer *cp;
2182 struct g_part_entry *entry;
2183 struct g_part_table *table;
2184 struct g_kerneldump *gkd;
2185 struct g_provider *pp;
2188 biotrack(bp, __func__);
2193 cp = LIST_FIRST(&gp->consumer);
2195 G_PART_TRACE((G_T_BIO, "%s: cmd=%d, provider=%s", __func__, bp->bio_cmd,
2198 entry = pp->private;
2199 if (entry == NULL) {
2200 g_io_deliver(bp, ENXIO);
2204 switch(bp->bio_cmd) {
2208 if (bp->bio_offset >= pp->mediasize) {
2209 g_io_deliver(bp, EIO);
2212 bp2 = g_clone_bio(bp);
2214 g_io_deliver(bp, ENOMEM);
2217 if (bp2->bio_offset + bp2->bio_length > pp->mediasize)
2218 bp2->bio_length = pp->mediasize - bp2->bio_offset;
2219 bp2->bio_done = g_std_done;
2220 bp2->bio_offset += entry->gpe_offset;
2221 g_io_request(bp2, cp);
2226 if (g_handleattr_int(bp, "GEOM::fwheads", table->gpt_heads))
2228 if (g_handleattr_int(bp, "GEOM::fwsectors", table->gpt_sectors))
2230 if (g_handleattr_int(bp, "PART::isleaf", table->gpt_isleaf))
2232 if (g_handleattr_int(bp, "PART::depth", table->gpt_depth))
2234 if (g_handleattr_str(bp, "PART::scheme",
2235 table->gpt_scheme->name))
2237 if (g_handleattr_str(bp, "PART::type",
2238 G_PART_TYPE(table, entry, buf, sizeof(buf))))
2240 if (!strcmp("GEOM::kerneldump", bp->bio_attribute)) {
2242 * Check that the partition is suitable for kernel
2243 * dumps. Typically only swap partitions should be
2244 * used. If the request comes from the nested scheme
2245 * we allow dumping there as well.
2247 if ((bp->bio_from == NULL ||
2248 bp->bio_from->geom->class != &g_part_class) &&
2249 G_PART_DUMPTO(table, entry) == 0) {
2250 g_io_deliver(bp, ENODEV);
2251 printf("GEOM_PART: Partition '%s' not suitable"
2252 " for kernel dumps (wrong type?)\n",
2256 gkd = (struct g_kerneldump *)bp->bio_data;
2257 if (gkd->offset >= pp->mediasize) {
2258 g_io_deliver(bp, EIO);
2261 if (gkd->offset + gkd->length > pp->mediasize)
2262 gkd->length = pp->mediasize - gkd->offset;
2263 gkd->offset += entry->gpe_offset;
2267 g_io_deliver(bp, EOPNOTSUPP);
2271 bp2 = g_clone_bio(bp);
2273 g_io_deliver(bp, ENOMEM);
2276 bp2->bio_done = g_std_done;
2277 g_io_request(bp2, cp);
2281 g_part_init(struct g_class *mp)
2284 TAILQ_INSERT_HEAD(&g_part_schemes, &g_part_null_scheme, scheme_list);
2288 g_part_fini(struct g_class *mp)
2291 TAILQ_REMOVE(&g_part_schemes, &g_part_null_scheme, scheme_list);
2295 g_part_unload_event(void *arg, int flag)
2297 struct g_consumer *cp;
2299 struct g_provider *pp;
2300 struct g_part_scheme *scheme;
2301 struct g_part_table *table;
2305 if (flag == EV_CANCEL)
2310 scheme = (void *)(*xchg);
2312 g_topology_assert();
2314 LIST_FOREACH(gp, &g_part_class.geom, geom) {
2316 if (table->gpt_scheme != scheme)
2320 LIST_FOREACH(pp, &gp->provider, provider)
2321 acc += pp->acr + pp->acw + pp->ace;
2322 LIST_FOREACH(cp, &gp->consumer, consumer)
2323 acc += cp->acr + cp->acw + cp->ace;
2326 g_part_wither(gp, ENOSYS);
2332 TAILQ_REMOVE(&g_part_schemes, scheme, scheme_list);
2338 g_part_modevent(module_t mod, int type, struct g_part_scheme *scheme)
2340 struct g_part_scheme *iter;
2347 TAILQ_FOREACH(iter, &g_part_schemes, scheme_list) {
2348 if (scheme == iter) {
2349 printf("GEOM_PART: scheme %s is already "
2350 "registered!\n", scheme->name);
2355 TAILQ_INSERT_TAIL(&g_part_schemes, scheme,
2357 g_retaste(&g_part_class);
2361 arg = (uintptr_t)scheme;
2362 error = g_waitfor_event(g_part_unload_event, &arg, M_WAITOK,