2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2002, 2005-2009 Marcel Moolenaar
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
32 #include <sys/param.h>
34 #include <sys/endian.h>
35 #include <sys/kernel.h>
37 #include <sys/limits.h>
39 #include <sys/malloc.h>
40 #include <sys/mutex.h>
41 #include <sys/queue.h>
43 #include <sys/sysctl.h>
44 #include <sys/systm.h>
46 #include <geom/geom.h>
47 #include <geom/geom_ctl.h>
48 #include <geom/geom_int.h>
49 #include <geom/part/g_part.h>
51 #include "g_part_if.h"
53 static kobj_method_t g_part_null_methods[] = {
57 static struct g_part_scheme g_part_null_scheme = {
60 sizeof(struct g_part_table),
63 TAILQ_HEAD(, g_part_scheme) g_part_schemes =
64 TAILQ_HEAD_INITIALIZER(g_part_schemes);
66 struct g_part_alias_list {
68 enum g_part_alias alias;
69 } g_part_alias_list[G_PART_ALIAS_COUNT] = {
70 { "apple-apfs", G_PART_ALIAS_APPLE_APFS },
71 { "apple-boot", G_PART_ALIAS_APPLE_BOOT },
72 { "apple-core-storage", G_PART_ALIAS_APPLE_CORE_STORAGE },
73 { "apple-hfs", G_PART_ALIAS_APPLE_HFS },
74 { "apple-label", G_PART_ALIAS_APPLE_LABEL },
75 { "apple-raid", G_PART_ALIAS_APPLE_RAID },
76 { "apple-raid-offline", G_PART_ALIAS_APPLE_RAID_OFFLINE },
77 { "apple-tv-recovery", G_PART_ALIAS_APPLE_TV_RECOVERY },
78 { "apple-ufs", G_PART_ALIAS_APPLE_UFS },
79 { "apple-zfs", G_PART_ALIAS_APPLE_ZFS },
80 { "bios-boot", G_PART_ALIAS_BIOS_BOOT },
81 { "chromeos-firmware", G_PART_ALIAS_CHROMEOS_FIRMWARE },
82 { "chromeos-kernel", G_PART_ALIAS_CHROMEOS_KERNEL },
83 { "chromeos-reserved", G_PART_ALIAS_CHROMEOS_RESERVED },
84 { "chromeos-root", G_PART_ALIAS_CHROMEOS_ROOT },
85 { "dragonfly-ccd", G_PART_ALIAS_DFBSD_CCD },
86 { "dragonfly-hammer", G_PART_ALIAS_DFBSD_HAMMER },
87 { "dragonfly-hammer2", G_PART_ALIAS_DFBSD_HAMMER2 },
88 { "dragonfly-label32", G_PART_ALIAS_DFBSD },
89 { "dragonfly-label64", G_PART_ALIAS_DFBSD64 },
90 { "dragonfly-legacy", G_PART_ALIAS_DFBSD_LEGACY },
91 { "dragonfly-swap", G_PART_ALIAS_DFBSD_SWAP },
92 { "dragonfly-ufs", G_PART_ALIAS_DFBSD_UFS },
93 { "dragonfly-vinum", G_PART_ALIAS_DFBSD_VINUM },
94 { "ebr", G_PART_ALIAS_EBR },
95 { "efi", G_PART_ALIAS_EFI },
96 { "fat16", G_PART_ALIAS_MS_FAT16 },
97 { "fat32", G_PART_ALIAS_MS_FAT32 },
98 { "fat32lba", G_PART_ALIAS_MS_FAT32LBA },
99 { "freebsd", G_PART_ALIAS_FREEBSD },
100 { "freebsd-boot", G_PART_ALIAS_FREEBSD_BOOT },
101 { "freebsd-nandfs", G_PART_ALIAS_FREEBSD_NANDFS },
102 { "freebsd-swap", G_PART_ALIAS_FREEBSD_SWAP },
103 { "freebsd-ufs", G_PART_ALIAS_FREEBSD_UFS },
104 { "freebsd-vinum", G_PART_ALIAS_FREEBSD_VINUM },
105 { "freebsd-zfs", G_PART_ALIAS_FREEBSD_ZFS },
106 { "hifive-fsbl", G_PART_ALIAS_HIFIVE_FSBL },
107 { "hifive-bbl", G_PART_ALIAS_HIFIVE_BBL },
108 { "linux-data", G_PART_ALIAS_LINUX_DATA },
109 { "linux-lvm", G_PART_ALIAS_LINUX_LVM },
110 { "linux-raid", G_PART_ALIAS_LINUX_RAID },
111 { "linux-swap", G_PART_ALIAS_LINUX_SWAP },
112 { "mbr", G_PART_ALIAS_MBR },
113 { "ms-basic-data", G_PART_ALIAS_MS_BASIC_DATA },
114 { "ms-ldm-data", G_PART_ALIAS_MS_LDM_DATA },
115 { "ms-ldm-metadata", G_PART_ALIAS_MS_LDM_METADATA },
116 { "ms-recovery", G_PART_ALIAS_MS_RECOVERY },
117 { "ms-reserved", G_PART_ALIAS_MS_RESERVED },
118 { "ms-spaces", G_PART_ALIAS_MS_SPACES },
119 { "netbsd-ccd", G_PART_ALIAS_NETBSD_CCD },
120 { "netbsd-cgd", G_PART_ALIAS_NETBSD_CGD },
121 { "netbsd-ffs", G_PART_ALIAS_NETBSD_FFS },
122 { "netbsd-lfs", G_PART_ALIAS_NETBSD_LFS },
123 { "netbsd-raid", G_PART_ALIAS_NETBSD_RAID },
124 { "netbsd-swap", G_PART_ALIAS_NETBSD_SWAP },
125 { "ntfs", G_PART_ALIAS_MS_NTFS },
126 { "openbsd-data", G_PART_ALIAS_OPENBSD_DATA },
127 { "prep-boot", G_PART_ALIAS_PREP_BOOT },
128 { "solaris-boot", G_PART_ALIAS_SOLARIS_BOOT },
129 { "solaris-root", G_PART_ALIAS_SOLARIS_ROOT },
130 { "solaris-swap", G_PART_ALIAS_SOLARIS_SWAP },
131 { "solaris-backup", G_PART_ALIAS_SOLARIS_BACKUP },
132 { "solaris-var", G_PART_ALIAS_SOLARIS_VAR },
133 { "solaris-home", G_PART_ALIAS_SOLARIS_HOME },
134 { "solaris-altsec", G_PART_ALIAS_SOLARIS_ALTSEC },
135 { "solaris-reserved", G_PART_ALIAS_SOLARIS_RESERVED },
136 { "vmware-reserved", G_PART_ALIAS_VMRESERVED },
137 { "vmware-vmfs", G_PART_ALIAS_VMFS },
138 { "vmware-vmkdiag", G_PART_ALIAS_VMKDIAG },
139 { "vmware-vsanhdr", G_PART_ALIAS_VMVSANHDR },
142 SYSCTL_DECL(_kern_geom);
143 SYSCTL_NODE(_kern_geom, OID_AUTO, part, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
145 u_int geom_part_check_integrity = 1;
146 SYSCTL_UINT(_kern_geom_part, OID_AUTO, check_integrity,
147 CTLFLAG_RWTUN, &geom_part_check_integrity, 1,
148 "Enable integrity checking");
149 static u_int auto_resize = 1;
150 SYSCTL_UINT(_kern_geom_part, OID_AUTO, auto_resize,
151 CTLFLAG_RWTUN, &auto_resize, 1,
152 "Enable auto resize");
153 static u_int allow_nesting = 0;
154 SYSCTL_UINT(_kern_geom_part, OID_AUTO, allow_nesting,
155 CTLFLAG_RWTUN, &allow_nesting, 0,
156 "Allow additional levels of nesting");
157 char g_part_separator[MAXPATHLEN] = "";
158 SYSCTL_STRING(_kern_geom_part, OID_AUTO, separator,
159 CTLFLAG_RDTUN, &g_part_separator, sizeof(g_part_separator),
160 "Partition name separator");
163 * The GEOM partitioning class.
165 static g_ctl_req_t g_part_ctlreq;
166 static g_ctl_destroy_geom_t g_part_destroy_geom;
167 static g_fini_t g_part_fini;
168 static g_init_t g_part_init;
169 static g_taste_t g_part_taste;
171 static g_access_t g_part_access;
172 static g_dumpconf_t g_part_dumpconf;
173 static g_orphan_t g_part_orphan;
174 static g_spoiled_t g_part_spoiled;
175 static g_start_t g_part_start;
176 static g_resize_t g_part_resize;
177 static g_ioctl_t g_part_ioctl;
179 static struct g_class g_part_class = {
181 .version = G_VERSION,
183 .ctlreq = g_part_ctlreq,
184 .destroy_geom = g_part_destroy_geom,
187 .taste = g_part_taste,
189 .access = g_part_access,
190 .dumpconf = g_part_dumpconf,
191 .orphan = g_part_orphan,
192 .spoiled = g_part_spoiled,
193 .start = g_part_start,
194 .resize = g_part_resize,
195 .ioctl = g_part_ioctl,
198 DECLARE_GEOM_CLASS(g_part_class, g_part);
199 MODULE_VERSION(g_part, 0);
205 static void g_part_wither(struct g_geom *, int);
208 g_part_alias_name(enum g_part_alias alias)
212 for (i = 0; i < G_PART_ALIAS_COUNT; i++) {
213 if (g_part_alias_list[i].alias != alias)
215 return (g_part_alias_list[i].lexeme);
222 g_part_geometry_heads(off_t blocks, u_int sectors, off_t *bestchs,
225 static u_int candidate_heads[] = { 1, 2, 16, 32, 64, 128, 255, 0 };
226 off_t chs, cylinders;
232 for (idx = 0; candidate_heads[idx] != 0; idx++) {
233 heads = candidate_heads[idx];
234 cylinders = blocks / heads / sectors;
235 if (cylinders < heads || cylinders < sectors)
237 if (cylinders > 1023)
239 chs = cylinders * heads * sectors;
240 if (chs > *bestchs || (chs == *bestchs && *bestheads == 1)) {
248 g_part_geometry(struct g_part_table *table, struct g_consumer *cp,
251 static u_int candidate_sectors[] = { 1, 9, 17, 33, 63, 0 };
253 u_int heads, sectors;
256 if (g_getattr("GEOM::fwsectors", cp, §ors) != 0 || sectors == 0 ||
257 g_getattr("GEOM::fwheads", cp, &heads) != 0 || heads == 0) {
258 table->gpt_fixgeom = 0;
259 table->gpt_heads = 0;
260 table->gpt_sectors = 0;
262 for (idx = 0; candidate_sectors[idx] != 0; idx++) {
263 sectors = candidate_sectors[idx];
264 g_part_geometry_heads(blocks, sectors, &chs, &heads);
268 * Prefer a geometry with sectors > 1, but only if
269 * it doesn't bump down the number of heads to 1.
271 if (chs > bestchs || (chs == bestchs && heads > 1 &&
272 table->gpt_sectors == 1)) {
274 table->gpt_heads = heads;
275 table->gpt_sectors = sectors;
279 * If we didn't find a geometry at all, then the disk is
280 * too big. This means we can use the maximum number of
284 table->gpt_heads = 255;
285 table->gpt_sectors = 63;
288 table->gpt_fixgeom = 1;
289 table->gpt_heads = heads;
290 table->gpt_sectors = sectors;
295 g_part_get_physpath_done(struct bio *bp)
298 struct g_part_entry *entry;
299 struct g_part_table *table;
300 struct g_provider *pp;
303 pbp = bp->bio_parent;
309 if (bp->bio_error == 0) {
311 size_t len, remainder;
312 len = strlcat(bp->bio_data, "/", bp->bio_length);
313 if (len < bp->bio_length) {
314 end = bp->bio_data + len;
315 remainder = bp->bio_length - len;
316 G_PART_NAME(table, entry, end, remainder);
322 #define DPRINTF(...) if (bootverbose) { \
323 printf("GEOM_PART: " __VA_ARGS__); \
327 g_part_check_integrity(struct g_part_table *table, struct g_consumer *cp)
329 struct g_part_entry *e1, *e2;
330 struct g_provider *pp;
336 if (table->gpt_last < table->gpt_first) {
337 DPRINTF("last LBA is below first LBA: %jd < %jd\n",
338 (intmax_t)table->gpt_last, (intmax_t)table->gpt_first);
341 if (table->gpt_last > pp->mediasize / pp->sectorsize - 1) {
342 DPRINTF("last LBA extends beyond mediasize: "
343 "%jd > %jd\n", (intmax_t)table->gpt_last,
344 (intmax_t)pp->mediasize / pp->sectorsize - 1);
347 LIST_FOREACH(e1, &table->gpt_entry, gpe_entry) {
348 if (e1->gpe_deleted || e1->gpe_internal)
350 if (e1->gpe_start < table->gpt_first) {
351 DPRINTF("partition %d has start offset below first "
352 "LBA: %jd < %jd\n", e1->gpe_index,
353 (intmax_t)e1->gpe_start,
354 (intmax_t)table->gpt_first);
357 if (e1->gpe_start > table->gpt_last) {
358 DPRINTF("partition %d has start offset beyond last "
359 "LBA: %jd > %jd\n", e1->gpe_index,
360 (intmax_t)e1->gpe_start,
361 (intmax_t)table->gpt_last);
364 if (e1->gpe_end < e1->gpe_start) {
365 DPRINTF("partition %d has end offset below start "
366 "offset: %jd < %jd\n", e1->gpe_index,
367 (intmax_t)e1->gpe_end,
368 (intmax_t)e1->gpe_start);
371 if (e1->gpe_end > table->gpt_last) {
372 DPRINTF("partition %d has end offset beyond last "
373 "LBA: %jd > %jd\n", e1->gpe_index,
374 (intmax_t)e1->gpe_end,
375 (intmax_t)table->gpt_last);
378 if (pp->stripesize > 0) {
379 offset = e1->gpe_start * pp->sectorsize;
380 if (e1->gpe_offset > offset)
381 offset = e1->gpe_offset;
382 if ((offset + pp->stripeoffset) % pp->stripesize) {
383 DPRINTF("partition %d on (%s, %s) is not "
384 "aligned on %ju bytes\n", e1->gpe_index,
385 pp->name, table->gpt_scheme->name,
386 (uintmax_t)pp->stripesize);
387 /* Don't treat this as a critical failure */
391 while ((e2 = LIST_NEXT(e2, gpe_entry)) != NULL) {
392 if (e2->gpe_deleted || e2->gpe_internal)
394 if (e1->gpe_start >= e2->gpe_start &&
395 e1->gpe_start <= e2->gpe_end) {
396 DPRINTF("partition %d has start offset inside "
397 "partition %d: start[%d] %jd >= start[%d] "
398 "%jd <= end[%d] %jd\n",
399 e1->gpe_index, e2->gpe_index,
400 e2->gpe_index, (intmax_t)e2->gpe_start,
401 e1->gpe_index, (intmax_t)e1->gpe_start,
402 e2->gpe_index, (intmax_t)e2->gpe_end);
405 if (e1->gpe_end >= e2->gpe_start &&
406 e1->gpe_end <= e2->gpe_end) {
407 DPRINTF("partition %d has end offset inside "
408 "partition %d: start[%d] %jd >= end[%d] "
409 "%jd <= end[%d] %jd\n",
410 e1->gpe_index, e2->gpe_index,
411 e2->gpe_index, (intmax_t)e2->gpe_start,
412 e1->gpe_index, (intmax_t)e1->gpe_end,
413 e2->gpe_index, (intmax_t)e2->gpe_end);
416 if (e1->gpe_start < e2->gpe_start &&
417 e1->gpe_end > e2->gpe_end) {
418 DPRINTF("partition %d contains partition %d: "
419 "start[%d] %jd > start[%d] %jd, end[%d] "
420 "%jd < end[%d] %jd\n",
421 e1->gpe_index, e2->gpe_index,
422 e1->gpe_index, (intmax_t)e1->gpe_start,
423 e2->gpe_index, (intmax_t)e2->gpe_start,
424 e2->gpe_index, (intmax_t)e2->gpe_end,
425 e1->gpe_index, (intmax_t)e1->gpe_end);
431 printf("GEOM_PART: integrity check failed (%s, %s)\n",
432 pp->name, table->gpt_scheme->name);
433 if (geom_part_check_integrity != 0)
435 table->gpt_corrupt = 1;
441 struct g_part_entry *
442 g_part_new_entry(struct g_part_table *table, int index, quad_t start,
445 struct g_part_entry *entry, *last;
448 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
449 if (entry->gpe_index == index)
451 if (entry->gpe_index > index) {
458 entry = g_malloc(table->gpt_scheme->gps_entrysz,
460 entry->gpe_index = index;
462 LIST_INSERT_HEAD(&table->gpt_entry, entry, gpe_entry);
464 LIST_INSERT_AFTER(last, entry, gpe_entry);
466 entry->gpe_offset = 0;
467 entry->gpe_start = start;
468 entry->gpe_end = end;
473 g_part_new_provider(struct g_geom *gp, struct g_part_table *table,
474 struct g_part_entry *entry)
476 struct g_consumer *cp;
477 struct g_provider *pp;
478 struct g_geom_alias *gap;
481 cp = LIST_FIRST(&gp->consumer);
484 offset = entry->gpe_start * pp->sectorsize;
485 if (entry->gpe_offset < offset)
486 entry->gpe_offset = offset;
488 if (entry->gpe_pp == NULL) {
489 entry->gpe_pp = G_PART_NEW_PROVIDER(table, gp, entry, gp->name);
491 * If our parent provider had any aliases, then copy them to our
492 * provider so when geom DEV tastes things later, they will be
493 * there for it to create the aliases with those name used in
494 * place of the geom's name we use to create the provider. The
495 * kobj interface that generates names makes this awkward.
497 LIST_FOREACH(gap, &pp->aliases, ga_next)
498 G_PART_ADD_ALIAS(table, entry->gpe_pp, entry, gap->ga_alias);
499 entry->gpe_pp->flags |= G_PF_DIRECT_SEND | G_PF_DIRECT_RECEIVE;
500 entry->gpe_pp->private = entry; /* Close the circle. */
502 entry->gpe_pp->index = entry->gpe_index - 1; /* index is 1-based. */
503 entry->gpe_pp->mediasize = (entry->gpe_end - entry->gpe_start + 1) *
505 entry->gpe_pp->mediasize -= entry->gpe_offset - offset;
506 entry->gpe_pp->sectorsize = pp->sectorsize;
507 entry->gpe_pp->stripesize = pp->stripesize;
508 entry->gpe_pp->stripeoffset = pp->stripeoffset + entry->gpe_offset;
509 if (pp->stripesize > 0)
510 entry->gpe_pp->stripeoffset %= pp->stripesize;
511 entry->gpe_pp->flags |= pp->flags & G_PF_ACCEPT_UNMAPPED;
512 g_error_provider(entry->gpe_pp, 0);
515 static struct g_geom*
516 g_part_find_geom(const char *name)
519 LIST_FOREACH(gp, &g_part_class.geom, geom) {
520 if ((gp->flags & G_GEOM_WITHER) == 0 &&
521 strcmp(name, gp->name) == 0)
528 g_part_parm_geom(struct gctl_req *req, const char *name, struct g_geom **v)
533 gname = gctl_get_asciiparam(req, name);
536 if (strncmp(gname, _PATH_DEV, sizeof(_PATH_DEV) - 1) == 0)
537 gname += sizeof(_PATH_DEV) - 1;
538 gp = g_part_find_geom(gname);
540 gctl_error(req, "%d %s '%s'", EINVAL, name, gname);
548 g_part_parm_provider(struct gctl_req *req, const char *name,
549 struct g_provider **v)
551 struct g_provider *pp;
554 pname = gctl_get_asciiparam(req, name);
557 if (strncmp(pname, _PATH_DEV, sizeof(_PATH_DEV) - 1) == 0)
558 pname += sizeof(_PATH_DEV) - 1;
559 pp = g_provider_by_name(pname);
561 gctl_error(req, "%d %s '%s'", EINVAL, name, pname);
569 g_part_parm_quad(struct gctl_req *req, const char *name, quad_t *v)
575 p = gctl_get_asciiparam(req, name);
578 q = strtoq(p, &x, 0);
579 if (*x != '\0' || q < 0) {
580 gctl_error(req, "%d %s '%s'", EINVAL, name, p);
588 g_part_parm_scheme(struct gctl_req *req, const char *name,
589 struct g_part_scheme **v)
591 struct g_part_scheme *s;
594 p = gctl_get_asciiparam(req, name);
597 TAILQ_FOREACH(s, &g_part_schemes, scheme_list) {
598 if (s == &g_part_null_scheme)
600 if (!strcasecmp(s->name, p))
604 gctl_error(req, "%d %s '%s'", EINVAL, name, p);
612 g_part_parm_str(struct gctl_req *req, const char *name, const char **v)
616 p = gctl_get_asciiparam(req, name);
619 /* An empty label is always valid. */
620 if (strcmp(name, "label") != 0 && p[0] == '\0') {
621 gctl_error(req, "%d %s '%s'", EINVAL, name, p);
629 g_part_parm_intmax(struct gctl_req *req, const char *name, u_int *v)
634 p = gctl_get_param(req, name, &size);
637 if (size != sizeof(*p) || *p < 0 || *p > INT_MAX) {
638 gctl_error(req, "%d %s '%jd'", EINVAL, name, *p);
646 g_part_parm_uint32(struct gctl_req *req, const char *name, u_int *v)
651 p = gctl_get_param(req, name, &size);
654 if (size != sizeof(*p) || *p > INT_MAX) {
655 gctl_error(req, "%d %s '%u'", EINVAL, name, (unsigned int)*p);
663 g_part_parm_bootcode(struct gctl_req *req, const char *name, const void **v,
669 p = gctl_get_param(req, name, &size);
678 g_part_probe(struct g_geom *gp, struct g_consumer *cp, int depth)
680 struct g_part_scheme *iter, *scheme;
681 struct g_part_table *table;
685 scheme = (table != NULL) ? table->gpt_scheme : NULL;
686 pri = (scheme != NULL) ? G_PART_PROBE(table, cp) : INT_MIN;
689 if (pri > 0) { /* error */
694 TAILQ_FOREACH(iter, &g_part_schemes, scheme_list) {
695 if (iter == &g_part_null_scheme)
697 table = (void *)kobj_create((kobj_class_t)iter, M_GEOM,
700 table->gpt_scheme = iter;
701 table->gpt_depth = depth;
702 probe = G_PART_PROBE(table, cp);
703 if (probe <= 0 && probe > pri) {
706 if (gp->softc != NULL)
707 kobj_delete((kobj_t)gp->softc, M_GEOM);
712 kobj_delete((kobj_t)table, M_GEOM);
716 return ((scheme == NULL) ? ENXIO : 0);
720 * Control request functions.
724 g_part_ctl_add(struct gctl_req *req, struct g_part_parms *gpp)
727 struct g_provider *pp;
728 struct g_part_entry *delent, *last, *entry;
729 struct g_part_table *table;
736 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
739 pp = LIST_FIRST(&gp->consumer)->provider;
741 end = gpp->gpp_start + gpp->gpp_size - 1;
743 if (gpp->gpp_start < table->gpt_first ||
744 gpp->gpp_start > table->gpt_last) {
745 gctl_error(req, "%d start '%jd'", EINVAL,
746 (intmax_t)gpp->gpp_start);
749 if (end < gpp->gpp_start || end > table->gpt_last) {
750 gctl_error(req, "%d size '%jd'", EINVAL,
751 (intmax_t)gpp->gpp_size);
754 if (gpp->gpp_index > table->gpt_entries) {
755 gctl_error(req, "%d index '%d'", EINVAL, gpp->gpp_index);
759 delent = last = NULL;
760 index = (gpp->gpp_index > 0) ? gpp->gpp_index : 1;
761 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
762 if (entry->gpe_deleted) {
763 if (entry->gpe_index == index)
767 if (entry->gpe_index == index)
768 index = entry->gpe_index + 1;
769 if (entry->gpe_index < index)
771 if (entry->gpe_internal)
773 if (gpp->gpp_start >= entry->gpe_start &&
774 gpp->gpp_start <= entry->gpe_end) {
775 gctl_error(req, "%d start '%jd'", ENOSPC,
776 (intmax_t)gpp->gpp_start);
779 if (end >= entry->gpe_start && end <= entry->gpe_end) {
780 gctl_error(req, "%d end '%jd'", ENOSPC, (intmax_t)end);
783 if (gpp->gpp_start < entry->gpe_start && end > entry->gpe_end) {
784 gctl_error(req, "%d size '%jd'", ENOSPC,
785 (intmax_t)gpp->gpp_size);
789 if (gpp->gpp_index > 0 && index != gpp->gpp_index) {
790 gctl_error(req, "%d index '%d'", EEXIST, gpp->gpp_index);
793 if (index > table->gpt_entries) {
794 gctl_error(req, "%d index '%d'", ENOSPC, index);
798 entry = (delent == NULL) ? g_malloc(table->gpt_scheme->gps_entrysz,
799 M_WAITOK | M_ZERO) : delent;
800 entry->gpe_index = index;
801 entry->gpe_start = gpp->gpp_start;
802 entry->gpe_end = end;
803 error = G_PART_ADD(table, entry, gpp);
805 gctl_error(req, "%d", error);
810 if (delent == NULL) {
812 LIST_INSERT_HEAD(&table->gpt_entry, entry, gpe_entry);
814 LIST_INSERT_AFTER(last, entry, gpe_entry);
815 entry->gpe_created = 1;
817 entry->gpe_deleted = 0;
818 entry->gpe_modified = 1;
820 g_part_new_provider(gp, table, entry);
822 /* Provide feedback if so requested. */
823 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
824 sb = sbuf_new_auto();
825 G_PART_FULLNAME(table, entry, sb, gp->name);
826 if (pp->stripesize > 0 && entry->gpe_pp->stripeoffset != 0)
827 sbuf_printf(sb, " added, but partition is not "
828 "aligned on %ju bytes\n", (uintmax_t)pp->stripesize);
830 sbuf_cat(sb, " added\n");
832 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
839 g_part_ctl_bootcode(struct gctl_req *req, struct g_part_parms *gpp)
842 struct g_part_table *table;
847 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
851 sz = table->gpt_scheme->gps_bootcodesz;
856 if (gpp->gpp_codesize > sz) {
861 error = G_PART_BOOTCODE(table, gpp);
865 /* Provide feedback if so requested. */
866 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
867 sb = sbuf_new_auto();
868 sbuf_printf(sb, "bootcode written to %s\n", gp->name);
870 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
876 gctl_error(req, "%d", error);
881 g_part_ctl_commit(struct gctl_req *req, struct g_part_parms *gpp)
883 struct g_consumer *cp;
885 struct g_provider *pp;
886 struct g_part_entry *entry, *tmp;
887 struct g_part_table *table;
892 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
896 if (!table->gpt_opened) {
897 gctl_error(req, "%d", EPERM);
903 cp = LIST_FIRST(&gp->consumer);
904 if ((table->gpt_smhead | table->gpt_smtail) != 0) {
906 buf = g_malloc(pp->sectorsize, M_WAITOK | M_ZERO);
907 while (table->gpt_smhead != 0) {
908 i = ffs(table->gpt_smhead) - 1;
909 error = g_write_data(cp, i * pp->sectorsize, buf,
915 table->gpt_smhead &= ~(1 << i);
917 while (table->gpt_smtail != 0) {
918 i = ffs(table->gpt_smtail) - 1;
919 error = g_write_data(cp, pp->mediasize - (i + 1) *
920 pp->sectorsize, buf, pp->sectorsize);
925 table->gpt_smtail &= ~(1 << i);
930 if (table->gpt_scheme == &g_part_null_scheme) {
932 g_access(cp, -1, -1, -1);
933 g_part_wither(gp, ENXIO);
937 error = G_PART_WRITE(table, cp);
941 LIST_FOREACH_SAFE(entry, &table->gpt_entry, gpe_entry, tmp) {
942 if (!entry->gpe_deleted) {
943 /* Notify consumers that provider might be changed. */
944 if (entry->gpe_modified && (
945 entry->gpe_pp->acw + entry->gpe_pp->ace +
946 entry->gpe_pp->acr) == 0)
947 g_media_changed(entry->gpe_pp, M_NOWAIT);
948 entry->gpe_created = 0;
949 entry->gpe_modified = 0;
952 LIST_REMOVE(entry, gpe_entry);
955 table->gpt_created = 0;
956 table->gpt_opened = 0;
959 g_access(cp, -1, -1, -1);
964 gctl_error(req, "%d", error);
969 g_part_ctl_create(struct gctl_req *req, struct g_part_parms *gpp)
971 struct g_consumer *cp;
973 struct g_provider *pp;
974 struct g_part_scheme *scheme;
975 struct g_part_table *null, *table;
979 pp = gpp->gpp_provider;
980 scheme = gpp->gpp_scheme;
981 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, pp->name));
984 /* Check that there isn't already a g_part geom on the provider. */
985 gp = g_part_find_geom(pp->name);
988 if (null->gpt_scheme != &g_part_null_scheme) {
989 gctl_error(req, "%d geom '%s'", EEXIST, pp->name);
995 if ((gpp->gpp_parms & G_PART_PARM_ENTRIES) &&
996 (gpp->gpp_entries < scheme->gps_minent ||
997 gpp->gpp_entries > scheme->gps_maxent)) {
998 gctl_error(req, "%d entries '%d'", EINVAL, gpp->gpp_entries);
1003 gp = g_new_geomf(&g_part_class, "%s", pp->name);
1004 gp->softc = kobj_create((kobj_class_t)gpp->gpp_scheme, M_GEOM,
1008 table->gpt_scheme = gpp->gpp_scheme;
1009 table->gpt_entries = (gpp->gpp_parms & G_PART_PARM_ENTRIES) ?
1010 gpp->gpp_entries : scheme->gps_minent;
1011 LIST_INIT(&table->gpt_entry);
1013 cp = g_new_consumer(gp);
1014 cp->flags |= G_CF_DIRECT_SEND | G_CF_DIRECT_RECEIVE;
1015 error = g_attach(cp, pp);
1017 error = g_access(cp, 1, 1, 1);
1019 g_part_wither(gp, error);
1020 gctl_error(req, "%d geom '%s'", error, pp->name);
1023 table->gpt_opened = 1;
1025 cp = LIST_FIRST(&gp->consumer);
1026 table->gpt_opened = null->gpt_opened;
1027 table->gpt_smhead = null->gpt_smhead;
1028 table->gpt_smtail = null->gpt_smtail;
1031 g_topology_unlock();
1033 /* Make sure the provider has media. */
1034 if (pp->mediasize == 0 || pp->sectorsize == 0) {
1039 /* Make sure we can nest and if so, determine our depth. */
1040 error = g_getattr("PART::isleaf", cp, &attr);
1041 if (!error && attr) {
1045 error = g_getattr("PART::depth", cp, &attr);
1046 table->gpt_depth = (!error) ? attr + 1 : 0;
1049 * Synthesize a disk geometry. Some partitioning schemes
1050 * depend on it and since some file systems need it even
1051 * when the partitition scheme doesn't, we do it here in
1052 * scheme-independent code.
1054 g_part_geometry(table, cp, pp->mediasize / pp->sectorsize);
1056 error = G_PART_CREATE(table, gpp);
1062 table->gpt_created = 1;
1064 kobj_delete((kobj_t)null, M_GEOM);
1067 * Support automatic commit by filling in the gpp_geom
1070 gpp->gpp_parms |= G_PART_PARM_GEOM;
1073 /* Provide feedback if so requested. */
1074 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
1075 sb = sbuf_new_auto();
1076 sbuf_printf(sb, "%s created\n", gp->name);
1078 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
1086 g_access(cp, -1, -1, -1);
1087 g_part_wither(gp, error);
1089 kobj_delete((kobj_t)gp->softc, M_GEOM);
1092 gctl_error(req, "%d provider", error);
1097 g_part_ctl_delete(struct gctl_req *req, struct g_part_parms *gpp)
1100 struct g_provider *pp;
1101 struct g_part_entry *entry;
1102 struct g_part_table *table;
1106 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
1107 g_topology_assert();
1111 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
1112 if (entry->gpe_deleted || entry->gpe_internal)
1114 if (entry->gpe_index == gpp->gpp_index)
1117 if (entry == NULL) {
1118 gctl_error(req, "%d index '%d'", ENOENT, gpp->gpp_index);
1124 if (pp->acr > 0 || pp->acw > 0 || pp->ace > 0) {
1125 gctl_error(req, "%d", EBUSY);
1130 entry->gpe_pp = NULL;
1134 g_wither_provider(pp, ENXIO);
1136 /* Provide feedback if so requested. */
1137 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
1138 sb = sbuf_new_auto();
1139 G_PART_FULLNAME(table, entry, sb, gp->name);
1140 sbuf_cat(sb, " deleted\n");
1142 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
1146 if (entry->gpe_created) {
1147 LIST_REMOVE(entry, gpe_entry);
1150 entry->gpe_modified = 0;
1151 entry->gpe_deleted = 1;
1157 g_part_ctl_destroy(struct gctl_req *req, struct g_part_parms *gpp)
1159 struct g_consumer *cp;
1161 struct g_provider *pp;
1162 struct g_part_entry *entry, *tmp;
1163 struct g_part_table *null, *table;
1168 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
1169 g_topology_assert();
1172 /* Check for busy providers. */
1173 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
1174 if (entry->gpe_deleted || entry->gpe_internal)
1176 if (gpp->gpp_force) {
1180 if (pp->acr == 0 && pp->acw == 0 && pp->ace == 0)
1183 gctl_error(req, "%d", EBUSY);
1187 if (gpp->gpp_force) {
1188 /* Destroy all providers. */
1189 LIST_FOREACH_SAFE(entry, &table->gpt_entry, gpe_entry, tmp) {
1193 g_wither_provider(pp, ENXIO);
1195 LIST_REMOVE(entry, gpe_entry);
1200 error = G_PART_DESTROY(table, gpp);
1202 gctl_error(req, "%d", error);
1206 gp->softc = kobj_create((kobj_class_t)&g_part_null_scheme, M_GEOM,
1210 null->gpt_scheme = &g_part_null_scheme;
1211 LIST_INIT(&null->gpt_entry);
1213 cp = LIST_FIRST(&gp->consumer);
1215 null->gpt_last = pp->mediasize / pp->sectorsize - 1;
1217 null->gpt_depth = table->gpt_depth;
1218 null->gpt_opened = table->gpt_opened;
1219 null->gpt_smhead = table->gpt_smhead;
1220 null->gpt_smtail = table->gpt_smtail;
1222 while ((entry = LIST_FIRST(&table->gpt_entry)) != NULL) {
1223 LIST_REMOVE(entry, gpe_entry);
1226 kobj_delete((kobj_t)table, M_GEOM);
1228 /* Provide feedback if so requested. */
1229 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
1230 sb = sbuf_new_auto();
1231 sbuf_printf(sb, "%s destroyed\n", gp->name);
1233 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
1240 g_part_ctl_modify(struct gctl_req *req, struct g_part_parms *gpp)
1243 struct g_part_entry *entry;
1244 struct g_part_table *table;
1249 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
1250 g_topology_assert();
1254 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
1255 if (entry->gpe_deleted || entry->gpe_internal)
1257 if (entry->gpe_index == gpp->gpp_index)
1260 if (entry == NULL) {
1261 gctl_error(req, "%d index '%d'", ENOENT, gpp->gpp_index);
1265 error = G_PART_MODIFY(table, entry, gpp);
1267 gctl_error(req, "%d", error);
1271 if (!entry->gpe_created)
1272 entry->gpe_modified = 1;
1274 /* Provide feedback if so requested. */
1275 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
1276 sb = sbuf_new_auto();
1277 G_PART_FULLNAME(table, entry, sb, gp->name);
1278 sbuf_cat(sb, " modified\n");
1280 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
1287 g_part_ctl_move(struct gctl_req *req, struct g_part_parms *gpp)
1289 gctl_error(req, "%d verb 'move'", ENOSYS);
1294 g_part_ctl_recover(struct gctl_req *req, struct g_part_parms *gpp)
1296 struct g_part_table *table;
1299 int error, recovered;
1302 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
1303 g_topology_assert();
1305 error = recovered = 0;
1307 if (table->gpt_corrupt) {
1308 error = G_PART_RECOVER(table);
1310 error = g_part_check_integrity(table,
1311 LIST_FIRST(&gp->consumer));
1313 gctl_error(req, "%d recovering '%s' failed",
1319 /* Provide feedback if so requested. */
1320 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
1321 sb = sbuf_new_auto();
1323 sbuf_printf(sb, "%s recovered\n", gp->name);
1325 sbuf_printf(sb, "%s recovering is not needed\n",
1328 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
1335 g_part_ctl_resize(struct gctl_req *req, struct g_part_parms *gpp)
1338 struct g_provider *pp;
1339 struct g_part_entry *pe, *entry;
1340 struct g_part_table *table;
1347 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
1348 g_topology_assert();
1351 /* check gpp_index */
1352 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
1353 if (entry->gpe_deleted || entry->gpe_internal)
1355 if (entry->gpe_index == gpp->gpp_index)
1358 if (entry == NULL) {
1359 gctl_error(req, "%d index '%d'", ENOENT, gpp->gpp_index);
1363 /* check gpp_size */
1364 end = entry->gpe_start + gpp->gpp_size - 1;
1365 if (gpp->gpp_size < 1 || end > table->gpt_last) {
1366 gctl_error(req, "%d size '%jd'", EINVAL,
1367 (intmax_t)gpp->gpp_size);
1371 LIST_FOREACH(pe, &table->gpt_entry, gpe_entry) {
1372 if (pe->gpe_deleted || pe->gpe_internal || pe == entry)
1374 if (end >= pe->gpe_start && end <= pe->gpe_end) {
1375 gctl_error(req, "%d end '%jd'", ENOSPC,
1379 if (entry->gpe_start < pe->gpe_start && end > pe->gpe_end) {
1380 gctl_error(req, "%d size '%jd'", ENOSPC,
1381 (intmax_t)gpp->gpp_size);
1387 if ((g_debugflags & G_F_FOOTSHOOTING) == 0 &&
1388 (pp->acr > 0 || pp->acw > 0 || pp->ace > 0)) {
1389 if (entry->gpe_end - entry->gpe_start + 1 > gpp->gpp_size) {
1390 /* Deny shrinking of an opened partition. */
1391 gctl_error(req, "%d", EBUSY);
1396 error = G_PART_RESIZE(table, entry, gpp);
1398 gctl_error(req, "%d%s", error, error != EBUSY ? "":
1399 " resizing will lead to unexpected shrinking"
1400 " due to alignment");
1404 if (!entry->gpe_created)
1405 entry->gpe_modified = 1;
1407 /* update mediasize of changed provider */
1408 mediasize = (entry->gpe_end - entry->gpe_start + 1) *
1410 g_resize_provider(pp, mediasize);
1412 /* Provide feedback if so requested. */
1413 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
1414 sb = sbuf_new_auto();
1415 G_PART_FULLNAME(table, entry, sb, gp->name);
1416 sbuf_cat(sb, " resized\n");
1418 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
1425 g_part_ctl_setunset(struct gctl_req *req, struct g_part_parms *gpp,
1429 struct g_part_entry *entry;
1430 struct g_part_table *table;
1435 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
1436 g_topology_assert();
1440 if (gpp->gpp_parms & G_PART_PARM_INDEX) {
1441 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
1442 if (entry->gpe_deleted || entry->gpe_internal)
1444 if (entry->gpe_index == gpp->gpp_index)
1447 if (entry == NULL) {
1448 gctl_error(req, "%d index '%d'", ENOENT,
1455 error = G_PART_SETUNSET(table, entry, gpp->gpp_attrib, set);
1457 gctl_error(req, "%d attrib '%s'", error, gpp->gpp_attrib);
1461 /* Provide feedback if so requested. */
1462 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
1463 sb = sbuf_new_auto();
1464 sbuf_printf(sb, "%s %sset on ", gpp->gpp_attrib,
1467 G_PART_FULLNAME(table, entry, sb, gp->name);
1469 sbuf_cat(sb, gp->name);
1472 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
1479 g_part_ctl_undo(struct gctl_req *req, struct g_part_parms *gpp)
1481 struct g_consumer *cp;
1482 struct g_provider *pp;
1484 struct g_part_entry *entry, *tmp;
1485 struct g_part_table *table;
1489 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
1490 g_topology_assert();
1493 if (!table->gpt_opened) {
1494 gctl_error(req, "%d", EPERM);
1498 cp = LIST_FIRST(&gp->consumer);
1499 LIST_FOREACH_SAFE(entry, &table->gpt_entry, gpe_entry, tmp) {
1500 entry->gpe_modified = 0;
1501 if (entry->gpe_created) {
1505 entry->gpe_pp = NULL;
1506 g_wither_provider(pp, ENXIO);
1508 entry->gpe_deleted = 1;
1510 if (entry->gpe_deleted) {
1511 LIST_REMOVE(entry, gpe_entry);
1516 g_topology_unlock();
1518 reprobe = (table->gpt_scheme == &g_part_null_scheme ||
1519 table->gpt_created) ? 1 : 0;
1522 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
1523 if (entry->gpe_internal)
1528 while ((entry = LIST_FIRST(&table->gpt_entry)) != NULL) {
1529 LIST_REMOVE(entry, gpe_entry);
1532 error = g_part_probe(gp, cp, table->gpt_depth);
1535 g_access(cp, -1, -1, -1);
1536 g_part_wither(gp, error);
1542 * Synthesize a disk geometry. Some partitioning schemes
1543 * depend on it and since some file systems need it even
1544 * when the partitition scheme doesn't, we do it here in
1545 * scheme-independent code.
1548 g_part_geometry(table, cp, pp->mediasize / pp->sectorsize);
1551 error = G_PART_READ(table, cp);
1554 error = g_part_check_integrity(table, cp);
1559 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
1560 if (!entry->gpe_internal)
1561 g_part_new_provider(gp, table, entry);
1564 table->gpt_opened = 0;
1565 g_access(cp, -1, -1, -1);
1570 gctl_error(req, "%d", error);
1575 g_part_wither(struct g_geom *gp, int error)
1577 struct g_part_entry *entry;
1578 struct g_part_table *table;
1579 struct g_provider *pp;
1582 if (table != NULL) {
1584 while ((entry = LIST_FIRST(&table->gpt_entry)) != NULL) {
1585 LIST_REMOVE(entry, gpe_entry);
1587 entry->gpe_pp = NULL;
1590 g_wither_provider(pp, error);
1594 G_PART_DESTROY(table, NULL);
1595 kobj_delete((kobj_t)table, M_GEOM);
1597 g_wither_geom(gp, error);
1605 g_part_ctlreq(struct gctl_req *req, struct g_class *mp, const char *verb)
1607 struct g_part_parms gpp;
1608 struct g_part_table *table;
1609 struct gctl_req_arg *ap;
1610 enum g_part_ctl ctlreq;
1611 unsigned int i, mparms, oparms, parm;
1612 int auto_commit, close_on_error;
1613 int error, modifies;
1615 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, verb));
1616 g_topology_assert();
1618 ctlreq = G_PART_CTL_NONE;
1621 oparms = G_PART_PARM_FLAGS | G_PART_PARM_OUTPUT | G_PART_PARM_VERSION;
1624 if (!strcmp(verb, "add")) {
1625 ctlreq = G_PART_CTL_ADD;
1626 mparms |= G_PART_PARM_GEOM | G_PART_PARM_SIZE |
1627 G_PART_PARM_START | G_PART_PARM_TYPE;
1628 oparms |= G_PART_PARM_INDEX | G_PART_PARM_LABEL;
1632 if (!strcmp(verb, "bootcode")) {
1633 ctlreq = G_PART_CTL_BOOTCODE;
1634 mparms |= G_PART_PARM_GEOM | G_PART_PARM_BOOTCODE;
1635 oparms |= G_PART_PARM_SKIP_DSN;
1639 if (!strcmp(verb, "commit")) {
1640 ctlreq = G_PART_CTL_COMMIT;
1641 mparms |= G_PART_PARM_GEOM;
1643 } else if (!strcmp(verb, "create")) {
1644 ctlreq = G_PART_CTL_CREATE;
1645 mparms |= G_PART_PARM_PROVIDER | G_PART_PARM_SCHEME;
1646 oparms |= G_PART_PARM_ENTRIES;
1650 if (!strcmp(verb, "delete")) {
1651 ctlreq = G_PART_CTL_DELETE;
1652 mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX;
1653 } else if (!strcmp(verb, "destroy")) {
1654 ctlreq = G_PART_CTL_DESTROY;
1655 mparms |= G_PART_PARM_GEOM;
1656 oparms |= G_PART_PARM_FORCE;
1660 if (!strcmp(verb, "modify")) {
1661 ctlreq = G_PART_CTL_MODIFY;
1662 mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX;
1663 oparms |= G_PART_PARM_LABEL | G_PART_PARM_TYPE;
1664 } else if (!strcmp(verb, "move")) {
1665 ctlreq = G_PART_CTL_MOVE;
1666 mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX;
1670 if (!strcmp(verb, "recover")) {
1671 ctlreq = G_PART_CTL_RECOVER;
1672 mparms |= G_PART_PARM_GEOM;
1673 } else if (!strcmp(verb, "resize")) {
1674 ctlreq = G_PART_CTL_RESIZE;
1675 mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX |
1680 if (!strcmp(verb, "set")) {
1681 ctlreq = G_PART_CTL_SET;
1682 mparms |= G_PART_PARM_ATTRIB | G_PART_PARM_GEOM;
1683 oparms |= G_PART_PARM_INDEX;
1687 if (!strcmp(verb, "undo")) {
1688 ctlreq = G_PART_CTL_UNDO;
1689 mparms |= G_PART_PARM_GEOM;
1691 } else if (!strcmp(verb, "unset")) {
1692 ctlreq = G_PART_CTL_UNSET;
1693 mparms |= G_PART_PARM_ATTRIB | G_PART_PARM_GEOM;
1694 oparms |= G_PART_PARM_INDEX;
1698 if (ctlreq == G_PART_CTL_NONE) {
1699 gctl_error(req, "%d verb '%s'", EINVAL, verb);
1703 bzero(&gpp, sizeof(gpp));
1704 for (i = 0; i < req->narg; i++) {
1707 switch (ap->name[0]) {
1709 if (!strcmp(ap->name, "arg0")) {
1711 (G_PART_PARM_GEOM | G_PART_PARM_PROVIDER);
1713 if (!strcmp(ap->name, "attrib"))
1714 parm = G_PART_PARM_ATTRIB;
1717 if (!strcmp(ap->name, "bootcode"))
1718 parm = G_PART_PARM_BOOTCODE;
1721 if (!strcmp(ap->name, "class"))
1725 if (!strcmp(ap->name, "entries"))
1726 parm = G_PART_PARM_ENTRIES;
1729 if (!strcmp(ap->name, "flags"))
1730 parm = G_PART_PARM_FLAGS;
1731 else if (!strcmp(ap->name, "force"))
1732 parm = G_PART_PARM_FORCE;
1735 if (!strcmp(ap->name, "index"))
1736 parm = G_PART_PARM_INDEX;
1739 if (!strcmp(ap->name, "label"))
1740 parm = G_PART_PARM_LABEL;
1743 if (!strcmp(ap->name, "output"))
1744 parm = G_PART_PARM_OUTPUT;
1747 if (!strcmp(ap->name, "scheme"))
1748 parm = G_PART_PARM_SCHEME;
1749 else if (!strcmp(ap->name, "size"))
1750 parm = G_PART_PARM_SIZE;
1751 else if (!strcmp(ap->name, "start"))
1752 parm = G_PART_PARM_START;
1753 else if (!strcmp(ap->name, "skip_dsn"))
1754 parm = G_PART_PARM_SKIP_DSN;
1757 if (!strcmp(ap->name, "type"))
1758 parm = G_PART_PARM_TYPE;
1761 if (!strcmp(ap->name, "verb"))
1763 else if (!strcmp(ap->name, "version"))
1764 parm = G_PART_PARM_VERSION;
1767 if ((parm & (mparms | oparms)) == 0) {
1768 gctl_error(req, "%d param '%s'", EINVAL, ap->name);
1772 case G_PART_PARM_ATTRIB:
1773 error = g_part_parm_str(req, ap->name,
1776 case G_PART_PARM_BOOTCODE:
1777 error = g_part_parm_bootcode(req, ap->name,
1778 &gpp.gpp_codeptr, &gpp.gpp_codesize);
1780 case G_PART_PARM_ENTRIES:
1781 error = g_part_parm_intmax(req, ap->name,
1784 case G_PART_PARM_FLAGS:
1785 error = g_part_parm_str(req, ap->name, &gpp.gpp_flags);
1787 case G_PART_PARM_FORCE:
1788 error = g_part_parm_uint32(req, ap->name,
1791 case G_PART_PARM_GEOM:
1792 error = g_part_parm_geom(req, ap->name, &gpp.gpp_geom);
1794 case G_PART_PARM_INDEX:
1795 error = g_part_parm_intmax(req, ap->name,
1798 case G_PART_PARM_LABEL:
1799 error = g_part_parm_str(req, ap->name, &gpp.gpp_label);
1801 case G_PART_PARM_OUTPUT:
1802 error = 0; /* Write-only parameter */
1804 case G_PART_PARM_PROVIDER:
1805 error = g_part_parm_provider(req, ap->name,
1808 case G_PART_PARM_SCHEME:
1809 error = g_part_parm_scheme(req, ap->name,
1812 case G_PART_PARM_SIZE:
1813 error = g_part_parm_quad(req, ap->name, &gpp.gpp_size);
1815 case G_PART_PARM_SKIP_DSN:
1816 error = g_part_parm_uint32(req, ap->name,
1819 case G_PART_PARM_START:
1820 error = g_part_parm_quad(req, ap->name,
1823 case G_PART_PARM_TYPE:
1824 error = g_part_parm_str(req, ap->name, &gpp.gpp_type);
1826 case G_PART_PARM_VERSION:
1827 error = g_part_parm_uint32(req, ap->name,
1832 gctl_error(req, "%d %s", error, ap->name);
1836 if (error == ENOATTR) {
1837 gctl_error(req, "%d param '%s'", error,
1842 gpp.gpp_parms |= parm;
1844 if ((gpp.gpp_parms & mparms) != mparms) {
1845 parm = mparms - (gpp.gpp_parms & mparms);
1846 gctl_error(req, "%d param '%x'", ENOATTR, parm);
1850 /* Obtain permissions if possible/necessary. */
1853 if (modifies && (gpp.gpp_parms & G_PART_PARM_GEOM)) {
1854 table = gpp.gpp_geom->softc;
1855 if (table != NULL && table->gpt_corrupt &&
1856 ctlreq != G_PART_CTL_DESTROY &&
1857 ctlreq != G_PART_CTL_RECOVER &&
1858 geom_part_check_integrity) {
1859 gctl_error(req, "%d table '%s' is corrupt",
1860 EPERM, gpp.gpp_geom->name);
1863 if (table != NULL && !table->gpt_opened) {
1864 error = g_access(LIST_FIRST(&gpp.gpp_geom->consumer),
1867 gctl_error(req, "%d geom '%s'", error,
1868 gpp.gpp_geom->name);
1871 table->gpt_opened = 1;
1876 /* Allow the scheme to check or modify the parameters. */
1877 if (table != NULL) {
1878 error = G_PART_PRECHECK(table, ctlreq, &gpp);
1880 gctl_error(req, "%d pre-check failed", error);
1884 error = EDOOFUS; /* Prevent bogus uninit. warning. */
1887 case G_PART_CTL_NONE:
1888 panic("%s", __func__);
1889 case G_PART_CTL_ADD:
1890 error = g_part_ctl_add(req, &gpp);
1892 case G_PART_CTL_BOOTCODE:
1893 error = g_part_ctl_bootcode(req, &gpp);
1895 case G_PART_CTL_COMMIT:
1896 error = g_part_ctl_commit(req, &gpp);
1898 case G_PART_CTL_CREATE:
1899 error = g_part_ctl_create(req, &gpp);
1901 case G_PART_CTL_DELETE:
1902 error = g_part_ctl_delete(req, &gpp);
1904 case G_PART_CTL_DESTROY:
1905 error = g_part_ctl_destroy(req, &gpp);
1907 case G_PART_CTL_MODIFY:
1908 error = g_part_ctl_modify(req, &gpp);
1910 case G_PART_CTL_MOVE:
1911 error = g_part_ctl_move(req, &gpp);
1913 case G_PART_CTL_RECOVER:
1914 error = g_part_ctl_recover(req, &gpp);
1916 case G_PART_CTL_RESIZE:
1917 error = g_part_ctl_resize(req, &gpp);
1919 case G_PART_CTL_SET:
1920 error = g_part_ctl_setunset(req, &gpp, 1);
1922 case G_PART_CTL_UNDO:
1923 error = g_part_ctl_undo(req, &gpp);
1925 case G_PART_CTL_UNSET:
1926 error = g_part_ctl_setunset(req, &gpp, 0);
1930 /* Implement automatic commit. */
1932 auto_commit = (modifies &&
1933 (gpp.gpp_parms & G_PART_PARM_FLAGS) &&
1934 strchr(gpp.gpp_flags, 'C') != NULL) ? 1 : 0;
1936 KASSERT(gpp.gpp_parms & G_PART_PARM_GEOM, ("%s",
1938 error = g_part_ctl_commit(req, &gpp);
1943 if (error && close_on_error) {
1944 g_access(LIST_FIRST(&gpp.gpp_geom->consumer), -1, -1, -1);
1945 table->gpt_opened = 0;
1950 g_part_destroy_geom(struct gctl_req *req, struct g_class *mp,
1954 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, gp->name));
1955 g_topology_assert();
1957 g_part_wither(gp, EINVAL);
1961 static struct g_geom *
1962 g_part_taste(struct g_class *mp, struct g_provider *pp, int flags __unused)
1964 struct g_consumer *cp;
1966 struct g_part_entry *entry;
1967 struct g_part_table *table;
1968 struct root_hold_token *rht;
1972 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, pp->name));
1973 g_topology_assert();
1975 /* Skip providers that are already open for writing. */
1980 * Create a GEOM with consumer and hook it up to the provider.
1981 * With that we become part of the topology. Obtain read access
1984 gp = g_new_geomf(mp, "%s", pp->name);
1985 cp = g_new_consumer(gp);
1986 cp->flags |= G_CF_DIRECT_SEND | G_CF_DIRECT_RECEIVE;
1987 error = g_attach(cp, pp);
1989 error = g_access(cp, 1, 0, 0);
1993 g_destroy_consumer(cp);
1998 rht = root_mount_hold(mp->name);
1999 g_topology_unlock();
2002 * Short-circuit the whole probing galore when there's no
2005 if (pp->mediasize == 0 || pp->sectorsize == 0) {
2010 /* Make sure we can nest and if so, determine our depth. */
2011 error = g_getattr("PART::isleaf", cp, &attr);
2012 if (!error && attr) {
2016 error = g_getattr("PART::depth", cp, &attr);
2017 depth = (!error) ? attr + 1 : 0;
2019 error = g_part_probe(gp, cp, depth);
2026 * Synthesize a disk geometry. Some partitioning schemes
2027 * depend on it and since some file systems need it even
2028 * when the partitition scheme doesn't, we do it here in
2029 * scheme-independent code.
2031 g_part_geometry(table, cp, pp->mediasize / pp->sectorsize);
2033 error = G_PART_READ(table, cp);
2036 error = g_part_check_integrity(table, cp);
2041 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
2042 if (!entry->gpe_internal)
2043 g_part_new_provider(gp, table, entry);
2046 root_mount_rel(rht);
2047 g_access(cp, -1, 0, 0);
2052 root_mount_rel(rht);
2053 g_access(cp, -1, 0, 0);
2055 g_destroy_consumer(cp);
2065 g_part_access(struct g_provider *pp, int dr, int dw, int de)
2067 struct g_consumer *cp;
2069 G_PART_TRACE((G_T_ACCESS, "%s(%s,%d,%d,%d)", __func__, pp->name, dr,
2072 cp = LIST_FIRST(&pp->geom->consumer);
2074 /* We always gain write-exclusive access. */
2075 return (g_access(cp, dr, dw, dw + de));
2079 g_part_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp,
2080 struct g_consumer *cp, struct g_provider *pp)
2083 struct g_part_entry *entry;
2084 struct g_part_table *table;
2086 KASSERT(sb != NULL && gp != NULL, ("%s", __func__));
2089 if (indent == NULL) {
2090 KASSERT(cp == NULL && pp != NULL, ("%s", __func__));
2091 entry = pp->private;
2094 sbuf_printf(sb, " i %u o %ju ty %s", entry->gpe_index,
2095 (uintmax_t)entry->gpe_offset,
2096 G_PART_TYPE(table, entry, buf, sizeof(buf)));
2098 * libdisk compatibility quirk - the scheme dumps the
2099 * slicer name and partition type in a way that is
2100 * compatible with libdisk. When libdisk is not used
2101 * anymore, this should go away.
2103 G_PART_DUMPCONF(table, entry, sb, indent);
2104 } else if (cp != NULL) { /* Consumer configuration. */
2105 KASSERT(pp == NULL, ("%s", __func__));
2107 } else if (pp != NULL) { /* Provider configuration. */
2108 entry = pp->private;
2111 sbuf_printf(sb, "%s<start>%ju</start>\n", indent,
2112 (uintmax_t)entry->gpe_start);
2113 sbuf_printf(sb, "%s<end>%ju</end>\n", indent,
2114 (uintmax_t)entry->gpe_end);
2115 sbuf_printf(sb, "%s<index>%u</index>\n", indent,
2117 sbuf_printf(sb, "%s<type>%s</type>\n", indent,
2118 G_PART_TYPE(table, entry, buf, sizeof(buf)));
2119 sbuf_printf(sb, "%s<offset>%ju</offset>\n", indent,
2120 (uintmax_t)entry->gpe_offset);
2121 sbuf_printf(sb, "%s<length>%ju</length>\n", indent,
2122 (uintmax_t)pp->mediasize);
2123 G_PART_DUMPCONF(table, entry, sb, indent);
2124 } else { /* Geom configuration. */
2125 sbuf_printf(sb, "%s<scheme>%s</scheme>\n", indent,
2126 table->gpt_scheme->name);
2127 sbuf_printf(sb, "%s<entries>%u</entries>\n", indent,
2128 table->gpt_entries);
2129 sbuf_printf(sb, "%s<first>%ju</first>\n", indent,
2130 (uintmax_t)table->gpt_first);
2131 sbuf_printf(sb, "%s<last>%ju</last>\n", indent,
2132 (uintmax_t)table->gpt_last);
2133 sbuf_printf(sb, "%s<fwsectors>%u</fwsectors>\n", indent,
2134 table->gpt_sectors);
2135 sbuf_printf(sb, "%s<fwheads>%u</fwheads>\n", indent,
2137 sbuf_printf(sb, "%s<state>%s</state>\n", indent,
2138 table->gpt_corrupt ? "CORRUPT": "OK");
2139 sbuf_printf(sb, "%s<modified>%s</modified>\n", indent,
2140 table->gpt_opened ? "true": "false");
2141 G_PART_DUMPCONF(table, NULL, sb, indent);
2146 * This start routine is only called for non-trivial requests, all the
2147 * trivial ones are handled autonomously by the slice code.
2148 * For requests we handle here, we must call the g_io_deliver() on the
2149 * bio, and return non-zero to indicate to the slice code that we did so.
2150 * This code executes in the "DOWN" I/O path, this means:
2152 * * Don't grab the topology lock.
2153 * * Don't call biowait, g_getattr(), g_setattr() or g_read_data()
2156 g_part_ioctl(struct g_provider *pp, u_long cmd, void *data, int fflag, struct thread *td)
2158 struct g_part_table *table;
2160 table = pp->geom->softc;
2161 return G_PART_IOCTL(table, pp, cmd, data, fflag, td);
2165 g_part_resize(struct g_consumer *cp)
2167 struct g_part_table *table;
2169 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, cp->provider->name));
2170 g_topology_assert();
2172 if (auto_resize == 0)
2175 table = cp->geom->softc;
2176 if (table->gpt_opened == 0) {
2177 if (g_access(cp, 1, 1, 1) != 0)
2179 table->gpt_opened = 1;
2181 if (G_PART_RESIZE(table, NULL, NULL) == 0)
2182 printf("GEOM_PART: %s was automatically resized.\n"
2183 " Use `gpart commit %s` to save changes or "
2184 "`gpart undo %s` to revert them.\n", cp->geom->name,
2185 cp->geom->name, cp->geom->name);
2186 if (g_part_check_integrity(table, cp) != 0) {
2187 g_access(cp, -1, -1, -1);
2188 table->gpt_opened = 0;
2189 g_part_wither(table->gpt_gp, ENXIO);
2194 g_part_orphan(struct g_consumer *cp)
2196 struct g_provider *pp;
2197 struct g_part_table *table;
2200 KASSERT(pp != NULL, ("%s", __func__));
2201 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, pp->name));
2202 g_topology_assert();
2204 KASSERT(pp->error != 0, ("%s", __func__));
2205 table = cp->geom->softc;
2206 if (table != NULL && table->gpt_opened)
2207 g_access(cp, -1, -1, -1);
2208 g_part_wither(cp->geom, pp->error);
2212 g_part_spoiled(struct g_consumer *cp)
2215 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, cp->provider->name));
2216 g_topology_assert();
2218 cp->flags |= G_CF_ORPHAN;
2219 g_part_wither(cp->geom, ENXIO);
2223 g_part_start(struct bio *bp)
2226 struct g_consumer *cp;
2228 struct g_part_entry *entry;
2229 struct g_part_table *table;
2230 struct g_kerneldump *gkd;
2231 struct g_provider *pp;
2232 void (*done_func)(struct bio *) = g_std_done;
2235 biotrack(bp, __func__);
2240 cp = LIST_FIRST(&gp->consumer);
2242 G_PART_TRACE((G_T_BIO, "%s: cmd=%d, provider=%s", __func__, bp->bio_cmd,
2245 entry = pp->private;
2246 if (entry == NULL) {
2247 g_io_deliver(bp, ENXIO);
2251 switch(bp->bio_cmd) {
2255 if (bp->bio_offset >= pp->mediasize) {
2256 g_io_deliver(bp, EIO);
2259 bp2 = g_clone_bio(bp);
2261 g_io_deliver(bp, ENOMEM);
2264 if (bp2->bio_offset + bp2->bio_length > pp->mediasize)
2265 bp2->bio_length = pp->mediasize - bp2->bio_offset;
2266 bp2->bio_done = g_std_done;
2267 bp2->bio_offset += entry->gpe_offset;
2268 g_io_request(bp2, cp);
2274 if (g_handleattr_int(bp, "GEOM::fwheads", table->gpt_heads))
2276 if (g_handleattr_int(bp, "GEOM::fwsectors", table->gpt_sectors))
2279 * allow_nesting overrides "isleaf" to false _unless_ the
2280 * provider offset is zero, since otherwise we would recurse.
2282 if (g_handleattr_int(bp, "PART::isleaf",
2283 table->gpt_isleaf &&
2284 (allow_nesting == 0 || entry->gpe_offset == 0)))
2286 if (g_handleattr_int(bp, "PART::depth", table->gpt_depth))
2288 if (g_handleattr_str(bp, "PART::scheme",
2289 table->gpt_scheme->name))
2291 if (g_handleattr_str(bp, "PART::type",
2292 G_PART_TYPE(table, entry, buf, sizeof(buf))))
2294 if (!strcmp("GEOM::physpath", bp->bio_attribute)) {
2295 done_func = g_part_get_physpath_done;
2298 if (!strcmp("GEOM::kerneldump", bp->bio_attribute)) {
2300 * Check that the partition is suitable for kernel
2301 * dumps. Typically only swap partitions should be
2302 * used. If the request comes from the nested scheme
2303 * we allow dumping there as well.
2305 if ((bp->bio_from == NULL ||
2306 bp->bio_from->geom->class != &g_part_class) &&
2307 G_PART_DUMPTO(table, entry) == 0) {
2308 g_io_deliver(bp, ENODEV);
2309 printf("GEOM_PART: Partition '%s' not suitable"
2310 " for kernel dumps (wrong type?)\n",
2314 gkd = (struct g_kerneldump *)bp->bio_data;
2315 if (gkd->offset >= pp->mediasize) {
2316 g_io_deliver(bp, EIO);
2319 if (gkd->offset + gkd->length > pp->mediasize)
2320 gkd->length = pp->mediasize - gkd->offset;
2321 gkd->offset += entry->gpe_offset;
2325 g_io_deliver(bp, EOPNOTSUPP);
2329 bp2 = g_clone_bio(bp);
2331 g_io_deliver(bp, ENOMEM);
2334 bp2->bio_done = done_func;
2335 g_io_request(bp2, cp);
2339 g_part_init(struct g_class *mp)
2342 TAILQ_INSERT_HEAD(&g_part_schemes, &g_part_null_scheme, scheme_list);
2346 g_part_fini(struct g_class *mp)
2349 TAILQ_REMOVE(&g_part_schemes, &g_part_null_scheme, scheme_list);
2353 g_part_unload_event(void *arg, int flag)
2355 struct g_consumer *cp;
2357 struct g_provider *pp;
2358 struct g_part_scheme *scheme;
2359 struct g_part_table *table;
2363 if (flag == EV_CANCEL)
2368 scheme = (void *)(*xchg);
2370 g_topology_assert();
2372 LIST_FOREACH(gp, &g_part_class.geom, geom) {
2374 if (table->gpt_scheme != scheme)
2378 LIST_FOREACH(pp, &gp->provider, provider)
2379 acc += pp->acr + pp->acw + pp->ace;
2380 LIST_FOREACH(cp, &gp->consumer, consumer)
2381 acc += cp->acr + cp->acw + cp->ace;
2384 g_part_wither(gp, ENOSYS);
2390 TAILQ_REMOVE(&g_part_schemes, scheme, scheme_list);
2396 g_part_modevent(module_t mod, int type, struct g_part_scheme *scheme)
2398 struct g_part_scheme *iter;
2405 TAILQ_FOREACH(iter, &g_part_schemes, scheme_list) {
2406 if (scheme == iter) {
2407 printf("GEOM_PART: scheme %s is already "
2408 "registered!\n", scheme->name);
2413 TAILQ_INSERT_TAIL(&g_part_schemes, scheme,
2415 g_retaste(&g_part_class);
2419 arg = (uintptr_t)scheme;
2420 error = g_waitfor_event(g_part_unload_event, &arg, M_WAITOK,