2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2002, 2005-2009 Marcel Moolenaar
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
32 #include <sys/param.h>
34 #include <sys/endian.h>
35 #include <sys/kernel.h>
37 #include <sys/limits.h>
39 #include <sys/malloc.h>
40 #include <sys/mutex.h>
41 #include <sys/queue.h>
43 #include <sys/sysctl.h>
44 #include <sys/systm.h>
46 #include <geom/geom.h>
47 #include <geom/geom_ctl.h>
48 #include <geom/geom_int.h>
49 #include <geom/part/g_part.h>
51 #include "g_part_if.h"
54 #define _PATH_DEV "/dev/"
57 static kobj_method_t g_part_null_methods[] = {
61 static struct g_part_scheme g_part_null_scheme = {
64 sizeof(struct g_part_table),
67 TAILQ_HEAD(, g_part_scheme) g_part_schemes =
68 TAILQ_HEAD_INITIALIZER(g_part_schemes);
70 struct g_part_alias_list {
72 enum g_part_alias alias;
73 } g_part_alias_list[G_PART_ALIAS_COUNT] = {
74 { "apple-apfs", G_PART_ALIAS_APPLE_APFS },
75 { "apple-boot", G_PART_ALIAS_APPLE_BOOT },
76 { "apple-core-storage", G_PART_ALIAS_APPLE_CORE_STORAGE },
77 { "apple-hfs", G_PART_ALIAS_APPLE_HFS },
78 { "apple-label", G_PART_ALIAS_APPLE_LABEL },
79 { "apple-raid", G_PART_ALIAS_APPLE_RAID },
80 { "apple-raid-offline", G_PART_ALIAS_APPLE_RAID_OFFLINE },
81 { "apple-tv-recovery", G_PART_ALIAS_APPLE_TV_RECOVERY },
82 { "apple-ufs", G_PART_ALIAS_APPLE_UFS },
83 { "bios-boot", G_PART_ALIAS_BIOS_BOOT },
84 { "chromeos-firmware", G_PART_ALIAS_CHROMEOS_FIRMWARE },
85 { "chromeos-kernel", G_PART_ALIAS_CHROMEOS_KERNEL },
86 { "chromeos-reserved", G_PART_ALIAS_CHROMEOS_RESERVED },
87 { "chromeos-root", G_PART_ALIAS_CHROMEOS_ROOT },
88 { "dragonfly-ccd", G_PART_ALIAS_DFBSD_CCD },
89 { "dragonfly-hammer", G_PART_ALIAS_DFBSD_HAMMER },
90 { "dragonfly-hammer2", G_PART_ALIAS_DFBSD_HAMMER2 },
91 { "dragonfly-label32", G_PART_ALIAS_DFBSD },
92 { "dragonfly-label64", G_PART_ALIAS_DFBSD64 },
93 { "dragonfly-legacy", G_PART_ALIAS_DFBSD_LEGACY },
94 { "dragonfly-swap", G_PART_ALIAS_DFBSD_SWAP },
95 { "dragonfly-ufs", G_PART_ALIAS_DFBSD_UFS },
96 { "dragonfly-vinum", G_PART_ALIAS_DFBSD_VINUM },
97 { "ebr", G_PART_ALIAS_EBR },
98 { "efi", G_PART_ALIAS_EFI },
99 { "fat16", G_PART_ALIAS_MS_FAT16 },
100 { "fat32", G_PART_ALIAS_MS_FAT32 },
101 { "fat32lba", G_PART_ALIAS_MS_FAT32LBA },
102 { "freebsd", G_PART_ALIAS_FREEBSD },
103 { "freebsd-boot", G_PART_ALIAS_FREEBSD_BOOT },
104 { "freebsd-nandfs", G_PART_ALIAS_FREEBSD_NANDFS },
105 { "freebsd-swap", G_PART_ALIAS_FREEBSD_SWAP },
106 { "freebsd-ufs", G_PART_ALIAS_FREEBSD_UFS },
107 { "freebsd-vinum", G_PART_ALIAS_FREEBSD_VINUM },
108 { "freebsd-zfs", G_PART_ALIAS_FREEBSD_ZFS },
109 { "linux-data", G_PART_ALIAS_LINUX_DATA },
110 { "linux-lvm", G_PART_ALIAS_LINUX_LVM },
111 { "linux-raid", G_PART_ALIAS_LINUX_RAID },
112 { "linux-swap", G_PART_ALIAS_LINUX_SWAP },
113 { "mbr", G_PART_ALIAS_MBR },
114 { "ms-basic-data", G_PART_ALIAS_MS_BASIC_DATA },
115 { "ms-ldm-data", G_PART_ALIAS_MS_LDM_DATA },
116 { "ms-ldm-metadata", G_PART_ALIAS_MS_LDM_METADATA },
117 { "ms-recovery", G_PART_ALIAS_MS_RECOVERY },
118 { "ms-reserved", G_PART_ALIAS_MS_RESERVED },
119 { "ms-spaces", G_PART_ALIAS_MS_SPACES },
120 { "netbsd-ccd", G_PART_ALIAS_NETBSD_CCD },
121 { "netbsd-cgd", G_PART_ALIAS_NETBSD_CGD },
122 { "netbsd-ffs", G_PART_ALIAS_NETBSD_FFS },
123 { "netbsd-lfs", G_PART_ALIAS_NETBSD_LFS },
124 { "netbsd-raid", G_PART_ALIAS_NETBSD_RAID },
125 { "netbsd-swap", G_PART_ALIAS_NETBSD_SWAP },
126 { "ntfs", G_PART_ALIAS_MS_NTFS },
127 { "openbsd-data", G_PART_ALIAS_OPENBSD_DATA },
128 { "prep-boot", G_PART_ALIAS_PREP_BOOT },
129 { "vmware-reserved", G_PART_ALIAS_VMRESERVED },
130 { "vmware-vmfs", G_PART_ALIAS_VMFS },
131 { "vmware-vmkdiag", G_PART_ALIAS_VMKDIAG },
132 { "vmware-vsanhdr", G_PART_ALIAS_VMVSANHDR },
135 SYSCTL_DECL(_kern_geom);
136 SYSCTL_NODE(_kern_geom, OID_AUTO, part, CTLFLAG_RW, 0,
138 static u_int check_integrity = 1;
139 SYSCTL_UINT(_kern_geom_part, OID_AUTO, check_integrity,
140 CTLFLAG_RWTUN, &check_integrity, 1,
141 "Enable integrity checking");
142 static u_int auto_resize = 1;
143 SYSCTL_UINT(_kern_geom_part, OID_AUTO, auto_resize,
144 CTLFLAG_RWTUN, &auto_resize, 1,
145 "Enable auto resize");
148 * The GEOM partitioning class.
150 static g_ctl_req_t g_part_ctlreq;
151 static g_ctl_destroy_geom_t g_part_destroy_geom;
152 static g_fini_t g_part_fini;
153 static g_init_t g_part_init;
154 static g_taste_t g_part_taste;
156 static g_access_t g_part_access;
157 static g_dumpconf_t g_part_dumpconf;
158 static g_orphan_t g_part_orphan;
159 static g_spoiled_t g_part_spoiled;
160 static g_start_t g_part_start;
161 static g_resize_t g_part_resize;
162 static g_ioctl_t g_part_ioctl;
164 static struct g_class g_part_class = {
166 .version = G_VERSION,
168 .ctlreq = g_part_ctlreq,
169 .destroy_geom = g_part_destroy_geom,
172 .taste = g_part_taste,
174 .access = g_part_access,
175 .dumpconf = g_part_dumpconf,
176 .orphan = g_part_orphan,
177 .spoiled = g_part_spoiled,
178 .start = g_part_start,
179 .resize = g_part_resize,
180 .ioctl = g_part_ioctl,
183 DECLARE_GEOM_CLASS(g_part_class, g_part);
184 MODULE_VERSION(g_part, 0);
190 static void g_part_wither(struct g_geom *, int);
193 g_part_alias_name(enum g_part_alias alias)
197 for (i = 0; i < G_PART_ALIAS_COUNT; i++) {
198 if (g_part_alias_list[i].alias != alias)
200 return (g_part_alias_list[i].lexeme);
207 g_part_geometry_heads(off_t blocks, u_int sectors, off_t *bestchs,
210 static u_int candidate_heads[] = { 1, 2, 16, 32, 64, 128, 255, 0 };
211 off_t chs, cylinders;
217 for (idx = 0; candidate_heads[idx] != 0; idx++) {
218 heads = candidate_heads[idx];
219 cylinders = blocks / heads / sectors;
220 if (cylinders < heads || cylinders < sectors)
222 if (cylinders > 1023)
224 chs = cylinders * heads * sectors;
225 if (chs > *bestchs || (chs == *bestchs && *bestheads == 1)) {
233 g_part_geometry(struct g_part_table *table, struct g_consumer *cp,
236 static u_int candidate_sectors[] = { 1, 9, 17, 33, 63, 0 };
238 u_int heads, sectors;
241 if (g_getattr("GEOM::fwsectors", cp, §ors) != 0 || sectors == 0 ||
242 g_getattr("GEOM::fwheads", cp, &heads) != 0 || heads == 0) {
243 table->gpt_fixgeom = 0;
244 table->gpt_heads = 0;
245 table->gpt_sectors = 0;
247 for (idx = 0; candidate_sectors[idx] != 0; idx++) {
248 sectors = candidate_sectors[idx];
249 g_part_geometry_heads(blocks, sectors, &chs, &heads);
253 * Prefer a geometry with sectors > 1, but only if
254 * it doesn't bump down the number of heads to 1.
256 if (chs > bestchs || (chs == bestchs && heads > 1 &&
257 table->gpt_sectors == 1)) {
259 table->gpt_heads = heads;
260 table->gpt_sectors = sectors;
264 * If we didn't find a geometry at all, then the disk is
265 * too big. This means we can use the maximum number of
269 table->gpt_heads = 255;
270 table->gpt_sectors = 63;
273 table->gpt_fixgeom = 1;
274 table->gpt_heads = heads;
275 table->gpt_sectors = sectors;
280 g_part_get_physpath_done(struct bio *bp)
283 struct g_part_entry *entry;
284 struct g_part_table *table;
285 struct g_provider *pp;
288 pbp = bp->bio_parent;
294 if (bp->bio_error == 0) {
296 size_t len, remainder;
297 len = strlcat(bp->bio_data, "/", bp->bio_length);
298 if (len < bp->bio_length) {
299 end = bp->bio_data + len;
300 remainder = bp->bio_length - len;
301 G_PART_NAME(table, entry, end, remainder);
308 #define DPRINTF(...) if (bootverbose) { \
309 printf("GEOM_PART: " __VA_ARGS__); \
313 g_part_check_integrity(struct g_part_table *table, struct g_consumer *cp)
315 struct g_part_entry *e1, *e2;
316 struct g_provider *pp;
322 if (table->gpt_last < table->gpt_first) {
323 DPRINTF("last LBA is below first LBA: %jd < %jd\n",
324 (intmax_t)table->gpt_last, (intmax_t)table->gpt_first);
327 if (table->gpt_last > pp->mediasize / pp->sectorsize - 1) {
328 DPRINTF("last LBA extends beyond mediasize: "
329 "%jd > %jd\n", (intmax_t)table->gpt_last,
330 (intmax_t)pp->mediasize / pp->sectorsize - 1);
333 LIST_FOREACH(e1, &table->gpt_entry, gpe_entry) {
334 if (e1->gpe_deleted || e1->gpe_internal)
336 if (e1->gpe_start < table->gpt_first) {
337 DPRINTF("partition %d has start offset below first "
338 "LBA: %jd < %jd\n", e1->gpe_index,
339 (intmax_t)e1->gpe_start,
340 (intmax_t)table->gpt_first);
343 if (e1->gpe_start > table->gpt_last) {
344 DPRINTF("partition %d has start offset beyond last "
345 "LBA: %jd > %jd\n", e1->gpe_index,
346 (intmax_t)e1->gpe_start,
347 (intmax_t)table->gpt_last);
350 if (e1->gpe_end < e1->gpe_start) {
351 DPRINTF("partition %d has end offset below start "
352 "offset: %jd < %jd\n", e1->gpe_index,
353 (intmax_t)e1->gpe_end,
354 (intmax_t)e1->gpe_start);
357 if (e1->gpe_end > table->gpt_last) {
358 DPRINTF("partition %d has end offset beyond last "
359 "LBA: %jd > %jd\n", e1->gpe_index,
360 (intmax_t)e1->gpe_end,
361 (intmax_t)table->gpt_last);
364 if (pp->stripesize > 0) {
365 offset = e1->gpe_start * pp->sectorsize;
366 if (e1->gpe_offset > offset)
367 offset = e1->gpe_offset;
368 if ((offset + pp->stripeoffset) % pp->stripesize) {
369 DPRINTF("partition %d on (%s, %s) is not "
370 "aligned on %ju bytes\n", e1->gpe_index,
371 pp->name, table->gpt_scheme->name,
372 (uintmax_t)pp->stripesize);
373 /* Don't treat this as a critical failure */
377 while ((e2 = LIST_NEXT(e2, gpe_entry)) != NULL) {
378 if (e2->gpe_deleted || e2->gpe_internal)
380 if (e1->gpe_start >= e2->gpe_start &&
381 e1->gpe_start <= e2->gpe_end) {
382 DPRINTF("partition %d has start offset inside "
383 "partition %d: start[%d] %jd >= start[%d] "
384 "%jd <= end[%d] %jd\n",
385 e1->gpe_index, e2->gpe_index,
386 e2->gpe_index, (intmax_t)e2->gpe_start,
387 e1->gpe_index, (intmax_t)e1->gpe_start,
388 e2->gpe_index, (intmax_t)e2->gpe_end);
391 if (e1->gpe_end >= e2->gpe_start &&
392 e1->gpe_end <= e2->gpe_end) {
393 DPRINTF("partition %d has end offset inside "
394 "partition %d: start[%d] %jd >= end[%d] "
395 "%jd <= end[%d] %jd\n",
396 e1->gpe_index, e2->gpe_index,
397 e2->gpe_index, (intmax_t)e2->gpe_start,
398 e1->gpe_index, (intmax_t)e1->gpe_end,
399 e2->gpe_index, (intmax_t)e2->gpe_end);
402 if (e1->gpe_start < e2->gpe_start &&
403 e1->gpe_end > e2->gpe_end) {
404 DPRINTF("partition %d contains partition %d: "
405 "start[%d] %jd > start[%d] %jd, end[%d] "
406 "%jd < end[%d] %jd\n",
407 e1->gpe_index, e2->gpe_index,
408 e1->gpe_index, (intmax_t)e1->gpe_start,
409 e2->gpe_index, (intmax_t)e2->gpe_start,
410 e2->gpe_index, (intmax_t)e2->gpe_end,
411 e1->gpe_index, (intmax_t)e1->gpe_end);
417 printf("GEOM_PART: integrity check failed (%s, %s)\n",
418 pp->name, table->gpt_scheme->name);
419 if (check_integrity != 0)
421 table->gpt_corrupt = 1;
427 struct g_part_entry *
428 g_part_new_entry(struct g_part_table *table, int index, quad_t start,
431 struct g_part_entry *entry, *last;
434 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
435 if (entry->gpe_index == index)
437 if (entry->gpe_index > index) {
444 entry = g_malloc(table->gpt_scheme->gps_entrysz,
446 entry->gpe_index = index;
448 LIST_INSERT_HEAD(&table->gpt_entry, entry, gpe_entry);
450 LIST_INSERT_AFTER(last, entry, gpe_entry);
452 entry->gpe_offset = 0;
453 entry->gpe_start = start;
454 entry->gpe_end = end;
459 g_part_new_provider(struct g_geom *gp, struct g_part_table *table,
460 struct g_part_entry *entry)
462 struct g_consumer *cp;
463 struct g_provider *pp;
465 struct g_geom_alias *gap;
468 cp = LIST_FIRST(&gp->consumer);
471 offset = entry->gpe_start * pp->sectorsize;
472 if (entry->gpe_offset < offset)
473 entry->gpe_offset = offset;
475 if (entry->gpe_pp == NULL) {
477 * Add aliases to the geom before we create the provider so that
478 * geom_dev can taste it with all the aliases in place so all
479 * the aliased dev_t instances get created for each partition
480 * (eg foo5p7 gets created for bar5p7 when foo is an alias of bar).
482 LIST_FOREACH(gap, &table->gpt_gp->aliases, ga_next) {
483 sb = sbuf_new_auto();
484 G_PART_FULLNAME(table, entry, sb, gap->ga_alias);
486 g_geom_add_alias(gp, sbuf_data(sb));
489 sb = sbuf_new_auto();
490 G_PART_FULLNAME(table, entry, sb, gp->name);
492 entry->gpe_pp = g_new_providerf(gp, "%s", sbuf_data(sb));
494 entry->gpe_pp->flags |= G_PF_DIRECT_SEND | G_PF_DIRECT_RECEIVE;
495 entry->gpe_pp->private = entry; /* Close the circle. */
497 entry->gpe_pp->index = entry->gpe_index - 1; /* index is 1-based. */
498 entry->gpe_pp->mediasize = (entry->gpe_end - entry->gpe_start + 1) *
500 entry->gpe_pp->mediasize -= entry->gpe_offset - offset;
501 entry->gpe_pp->sectorsize = pp->sectorsize;
502 entry->gpe_pp->stripesize = pp->stripesize;
503 entry->gpe_pp->stripeoffset = pp->stripeoffset + entry->gpe_offset;
504 if (pp->stripesize > 0)
505 entry->gpe_pp->stripeoffset %= pp->stripesize;
506 entry->gpe_pp->flags |= pp->flags & G_PF_ACCEPT_UNMAPPED;
507 g_error_provider(entry->gpe_pp, 0);
510 static struct g_geom*
511 g_part_find_geom(const char *name)
514 LIST_FOREACH(gp, &g_part_class.geom, geom) {
515 if ((gp->flags & G_GEOM_WITHER) == 0 &&
516 strcmp(name, gp->name) == 0)
523 g_part_parm_geom(struct gctl_req *req, const char *name, struct g_geom **v)
528 gname = gctl_get_asciiparam(req, name);
531 if (strncmp(gname, _PATH_DEV, sizeof(_PATH_DEV) - 1) == 0)
532 gname += sizeof(_PATH_DEV) - 1;
533 gp = g_part_find_geom(gname);
535 gctl_error(req, "%d %s '%s'", EINVAL, name, gname);
543 g_part_parm_provider(struct gctl_req *req, const char *name,
544 struct g_provider **v)
546 struct g_provider *pp;
549 pname = gctl_get_asciiparam(req, name);
552 if (strncmp(pname, _PATH_DEV, sizeof(_PATH_DEV) - 1) == 0)
553 pname += sizeof(_PATH_DEV) - 1;
554 pp = g_provider_by_name(pname);
556 gctl_error(req, "%d %s '%s'", EINVAL, name, pname);
564 g_part_parm_quad(struct gctl_req *req, const char *name, quad_t *v)
570 p = gctl_get_asciiparam(req, name);
573 q = strtoq(p, &x, 0);
574 if (*x != '\0' || q < 0) {
575 gctl_error(req, "%d %s '%s'", EINVAL, name, p);
583 g_part_parm_scheme(struct gctl_req *req, const char *name,
584 struct g_part_scheme **v)
586 struct g_part_scheme *s;
589 p = gctl_get_asciiparam(req, name);
592 TAILQ_FOREACH(s, &g_part_schemes, scheme_list) {
593 if (s == &g_part_null_scheme)
595 if (!strcasecmp(s->name, p))
599 gctl_error(req, "%d %s '%s'", EINVAL, name, p);
607 g_part_parm_str(struct gctl_req *req, const char *name, const char **v)
611 p = gctl_get_asciiparam(req, name);
614 /* An empty label is always valid. */
615 if (strcmp(name, "label") != 0 && p[0] == '\0') {
616 gctl_error(req, "%d %s '%s'", EINVAL, name, p);
624 g_part_parm_intmax(struct gctl_req *req, const char *name, u_int *v)
629 p = gctl_get_param(req, name, &size);
632 if (size != sizeof(*p) || *p < 0 || *p > INT_MAX) {
633 gctl_error(req, "%d %s '%jd'", EINVAL, name, *p);
641 g_part_parm_uint32(struct gctl_req *req, const char *name, u_int *v)
646 p = gctl_get_param(req, name, &size);
649 if (size != sizeof(*p) || *p > INT_MAX) {
650 gctl_error(req, "%d %s '%u'", EINVAL, name, (unsigned int)*p);
658 g_part_parm_bootcode(struct gctl_req *req, const char *name, const void **v,
664 p = gctl_get_param(req, name, &size);
673 g_part_probe(struct g_geom *gp, struct g_consumer *cp, int depth)
675 struct g_part_scheme *iter, *scheme;
676 struct g_part_table *table;
680 scheme = (table != NULL) ? table->gpt_scheme : NULL;
681 pri = (scheme != NULL) ? G_PART_PROBE(table, cp) : INT_MIN;
684 if (pri > 0) { /* error */
689 TAILQ_FOREACH(iter, &g_part_schemes, scheme_list) {
690 if (iter == &g_part_null_scheme)
692 table = (void *)kobj_create((kobj_class_t)iter, M_GEOM,
695 table->gpt_scheme = iter;
696 table->gpt_depth = depth;
697 probe = G_PART_PROBE(table, cp);
698 if (probe <= 0 && probe > pri) {
701 if (gp->softc != NULL)
702 kobj_delete((kobj_t)gp->softc, M_GEOM);
707 kobj_delete((kobj_t)table, M_GEOM);
711 return ((scheme == NULL) ? ENXIO : 0);
715 * Control request functions.
719 g_part_ctl_add(struct gctl_req *req, struct g_part_parms *gpp)
722 struct g_provider *pp;
723 struct g_part_entry *delent, *last, *entry;
724 struct g_part_table *table;
731 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
734 pp = LIST_FIRST(&gp->consumer)->provider;
736 end = gpp->gpp_start + gpp->gpp_size - 1;
738 if (gpp->gpp_start < table->gpt_first ||
739 gpp->gpp_start > table->gpt_last) {
740 gctl_error(req, "%d start '%jd'", EINVAL,
741 (intmax_t)gpp->gpp_start);
744 if (end < gpp->gpp_start || end > table->gpt_last) {
745 gctl_error(req, "%d size '%jd'", EINVAL,
746 (intmax_t)gpp->gpp_size);
749 if (gpp->gpp_index > table->gpt_entries) {
750 gctl_error(req, "%d index '%d'", EINVAL, gpp->gpp_index);
754 delent = last = NULL;
755 index = (gpp->gpp_index > 0) ? gpp->gpp_index : 1;
756 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
757 if (entry->gpe_deleted) {
758 if (entry->gpe_index == index)
762 if (entry->gpe_index == index)
763 index = entry->gpe_index + 1;
764 if (entry->gpe_index < index)
766 if (entry->gpe_internal)
768 if (gpp->gpp_start >= entry->gpe_start &&
769 gpp->gpp_start <= entry->gpe_end) {
770 gctl_error(req, "%d start '%jd'", ENOSPC,
771 (intmax_t)gpp->gpp_start);
774 if (end >= entry->gpe_start && end <= entry->gpe_end) {
775 gctl_error(req, "%d end '%jd'", ENOSPC, (intmax_t)end);
778 if (gpp->gpp_start < entry->gpe_start && end > entry->gpe_end) {
779 gctl_error(req, "%d size '%jd'", ENOSPC,
780 (intmax_t)gpp->gpp_size);
784 if (gpp->gpp_index > 0 && index != gpp->gpp_index) {
785 gctl_error(req, "%d index '%d'", EEXIST, gpp->gpp_index);
788 if (index > table->gpt_entries) {
789 gctl_error(req, "%d index '%d'", ENOSPC, index);
793 entry = (delent == NULL) ? g_malloc(table->gpt_scheme->gps_entrysz,
794 M_WAITOK | M_ZERO) : delent;
795 entry->gpe_index = index;
796 entry->gpe_start = gpp->gpp_start;
797 entry->gpe_end = end;
798 error = G_PART_ADD(table, entry, gpp);
800 gctl_error(req, "%d", error);
805 if (delent == NULL) {
807 LIST_INSERT_HEAD(&table->gpt_entry, entry, gpe_entry);
809 LIST_INSERT_AFTER(last, entry, gpe_entry);
810 entry->gpe_created = 1;
812 entry->gpe_deleted = 0;
813 entry->gpe_modified = 1;
815 g_part_new_provider(gp, table, entry);
817 /* Provide feedback if so requested. */
818 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
819 sb = sbuf_new_auto();
820 G_PART_FULLNAME(table, entry, sb, gp->name);
821 if (pp->stripesize > 0 && entry->gpe_pp->stripeoffset != 0)
822 sbuf_printf(sb, " added, but partition is not "
823 "aligned on %ju bytes\n", (uintmax_t)pp->stripesize);
825 sbuf_cat(sb, " added\n");
827 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
834 g_part_ctl_bootcode(struct gctl_req *req, struct g_part_parms *gpp)
837 struct g_part_table *table;
842 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
846 sz = table->gpt_scheme->gps_bootcodesz;
851 if (gpp->gpp_codesize > sz) {
856 error = G_PART_BOOTCODE(table, gpp);
860 /* Provide feedback if so requested. */
861 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
862 sb = sbuf_new_auto();
863 sbuf_printf(sb, "bootcode written to %s\n", gp->name);
865 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
871 gctl_error(req, "%d", error);
876 g_part_ctl_commit(struct gctl_req *req, struct g_part_parms *gpp)
878 struct g_consumer *cp;
880 struct g_provider *pp;
881 struct g_part_entry *entry, *tmp;
882 struct g_part_table *table;
887 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
891 if (!table->gpt_opened) {
892 gctl_error(req, "%d", EPERM);
898 cp = LIST_FIRST(&gp->consumer);
899 if ((table->gpt_smhead | table->gpt_smtail) != 0) {
901 buf = g_malloc(pp->sectorsize, M_WAITOK | M_ZERO);
902 while (table->gpt_smhead != 0) {
903 i = ffs(table->gpt_smhead) - 1;
904 error = g_write_data(cp, i * pp->sectorsize, buf,
910 table->gpt_smhead &= ~(1 << i);
912 while (table->gpt_smtail != 0) {
913 i = ffs(table->gpt_smtail) - 1;
914 error = g_write_data(cp, pp->mediasize - (i + 1) *
915 pp->sectorsize, buf, pp->sectorsize);
920 table->gpt_smtail &= ~(1 << i);
925 if (table->gpt_scheme == &g_part_null_scheme) {
927 g_access(cp, -1, -1, -1);
928 g_part_wither(gp, ENXIO);
932 error = G_PART_WRITE(table, cp);
936 LIST_FOREACH_SAFE(entry, &table->gpt_entry, gpe_entry, tmp) {
937 if (!entry->gpe_deleted) {
938 /* Notify consumers that provider might be changed. */
939 if (entry->gpe_modified && (
940 entry->gpe_pp->acw + entry->gpe_pp->ace +
941 entry->gpe_pp->acr) == 0)
942 g_media_changed(entry->gpe_pp, M_NOWAIT);
943 entry->gpe_created = 0;
944 entry->gpe_modified = 0;
947 LIST_REMOVE(entry, gpe_entry);
950 table->gpt_created = 0;
951 table->gpt_opened = 0;
954 g_access(cp, -1, -1, -1);
959 gctl_error(req, "%d", error);
964 g_part_ctl_create(struct gctl_req *req, struct g_part_parms *gpp)
966 struct g_consumer *cp;
968 struct g_provider *pp;
969 struct g_part_scheme *scheme;
970 struct g_part_table *null, *table;
974 pp = gpp->gpp_provider;
975 scheme = gpp->gpp_scheme;
976 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, pp->name));
979 /* Check that there isn't already a g_part geom on the provider. */
980 gp = g_part_find_geom(pp->name);
983 if (null->gpt_scheme != &g_part_null_scheme) {
984 gctl_error(req, "%d geom '%s'", EEXIST, pp->name);
990 if ((gpp->gpp_parms & G_PART_PARM_ENTRIES) &&
991 (gpp->gpp_entries < scheme->gps_minent ||
992 gpp->gpp_entries > scheme->gps_maxent)) {
993 gctl_error(req, "%d entries '%d'", EINVAL, gpp->gpp_entries);
998 gp = g_new_geomf(&g_part_class, "%s", pp->name);
999 gp->softc = kobj_create((kobj_class_t)gpp->gpp_scheme, M_GEOM,
1003 table->gpt_scheme = gpp->gpp_scheme;
1004 table->gpt_entries = (gpp->gpp_parms & G_PART_PARM_ENTRIES) ?
1005 gpp->gpp_entries : scheme->gps_minent;
1006 LIST_INIT(&table->gpt_entry);
1008 cp = g_new_consumer(gp);
1009 cp->flags |= G_CF_DIRECT_SEND | G_CF_DIRECT_RECEIVE;
1010 error = g_attach(cp, pp);
1012 error = g_access(cp, 1, 1, 1);
1014 g_part_wither(gp, error);
1015 gctl_error(req, "%d geom '%s'", error, pp->name);
1018 table->gpt_opened = 1;
1020 cp = LIST_FIRST(&gp->consumer);
1021 table->gpt_opened = null->gpt_opened;
1022 table->gpt_smhead = null->gpt_smhead;
1023 table->gpt_smtail = null->gpt_smtail;
1026 g_topology_unlock();
1028 /* Make sure the provider has media. */
1029 if (pp->mediasize == 0 || pp->sectorsize == 0) {
1034 /* Make sure we can nest and if so, determine our depth. */
1035 error = g_getattr("PART::isleaf", cp, &attr);
1036 if (!error && attr) {
1040 error = g_getattr("PART::depth", cp, &attr);
1041 table->gpt_depth = (!error) ? attr + 1 : 0;
1044 * Synthesize a disk geometry. Some partitioning schemes
1045 * depend on it and since some file systems need it even
1046 * when the partitition scheme doesn't, we do it here in
1047 * scheme-independent code.
1049 g_part_geometry(table, cp, pp->mediasize / pp->sectorsize);
1051 error = G_PART_CREATE(table, gpp);
1057 table->gpt_created = 1;
1059 kobj_delete((kobj_t)null, M_GEOM);
1062 * Support automatic commit by filling in the gpp_geom
1065 gpp->gpp_parms |= G_PART_PARM_GEOM;
1068 /* Provide feedback if so requested. */
1069 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
1070 sb = sbuf_new_auto();
1071 sbuf_printf(sb, "%s created\n", gp->name);
1073 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
1081 g_access(cp, -1, -1, -1);
1082 g_part_wither(gp, error);
1084 kobj_delete((kobj_t)gp->softc, M_GEOM);
1087 gctl_error(req, "%d provider", error);
1092 g_part_ctl_delete(struct gctl_req *req, struct g_part_parms *gpp)
1095 struct g_provider *pp;
1096 struct g_part_entry *entry;
1097 struct g_part_table *table;
1101 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
1102 g_topology_assert();
1106 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
1107 if (entry->gpe_deleted || entry->gpe_internal)
1109 if (entry->gpe_index == gpp->gpp_index)
1112 if (entry == NULL) {
1113 gctl_error(req, "%d index '%d'", ENOENT, gpp->gpp_index);
1119 if (pp->acr > 0 || pp->acw > 0 || pp->ace > 0) {
1120 gctl_error(req, "%d", EBUSY);
1125 entry->gpe_pp = NULL;
1129 g_wither_provider(pp, ENXIO);
1131 /* Provide feedback if so requested. */
1132 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
1133 sb = sbuf_new_auto();
1134 G_PART_FULLNAME(table, entry, sb, gp->name);
1135 sbuf_cat(sb, " deleted\n");
1137 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
1141 if (entry->gpe_created) {
1142 LIST_REMOVE(entry, gpe_entry);
1145 entry->gpe_modified = 0;
1146 entry->gpe_deleted = 1;
1152 g_part_ctl_destroy(struct gctl_req *req, struct g_part_parms *gpp)
1154 struct g_consumer *cp;
1156 struct g_provider *pp;
1157 struct g_part_entry *entry, *tmp;
1158 struct g_part_table *null, *table;
1163 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
1164 g_topology_assert();
1167 /* Check for busy providers. */
1168 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
1169 if (entry->gpe_deleted || entry->gpe_internal)
1171 if (gpp->gpp_force) {
1175 if (pp->acr == 0 && pp->acw == 0 && pp->ace == 0)
1178 gctl_error(req, "%d", EBUSY);
1182 if (gpp->gpp_force) {
1183 /* Destroy all providers. */
1184 LIST_FOREACH_SAFE(entry, &table->gpt_entry, gpe_entry, tmp) {
1188 g_wither_provider(pp, ENXIO);
1190 LIST_REMOVE(entry, gpe_entry);
1195 error = G_PART_DESTROY(table, gpp);
1197 gctl_error(req, "%d", error);
1201 gp->softc = kobj_create((kobj_class_t)&g_part_null_scheme, M_GEOM,
1205 null->gpt_scheme = &g_part_null_scheme;
1206 LIST_INIT(&null->gpt_entry);
1208 cp = LIST_FIRST(&gp->consumer);
1210 null->gpt_last = pp->mediasize / pp->sectorsize - 1;
1212 null->gpt_depth = table->gpt_depth;
1213 null->gpt_opened = table->gpt_opened;
1214 null->gpt_smhead = table->gpt_smhead;
1215 null->gpt_smtail = table->gpt_smtail;
1217 while ((entry = LIST_FIRST(&table->gpt_entry)) != NULL) {
1218 LIST_REMOVE(entry, gpe_entry);
1221 kobj_delete((kobj_t)table, M_GEOM);
1223 /* Provide feedback if so requested. */
1224 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
1225 sb = sbuf_new_auto();
1226 sbuf_printf(sb, "%s destroyed\n", gp->name);
1228 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
1235 g_part_ctl_modify(struct gctl_req *req, struct g_part_parms *gpp)
1238 struct g_part_entry *entry;
1239 struct g_part_table *table;
1244 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
1245 g_topology_assert();
1249 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
1250 if (entry->gpe_deleted || entry->gpe_internal)
1252 if (entry->gpe_index == gpp->gpp_index)
1255 if (entry == NULL) {
1256 gctl_error(req, "%d index '%d'", ENOENT, gpp->gpp_index);
1260 error = G_PART_MODIFY(table, entry, gpp);
1262 gctl_error(req, "%d", error);
1266 if (!entry->gpe_created)
1267 entry->gpe_modified = 1;
1269 /* Provide feedback if so requested. */
1270 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
1271 sb = sbuf_new_auto();
1272 G_PART_FULLNAME(table, entry, sb, gp->name);
1273 sbuf_cat(sb, " modified\n");
1275 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
1282 g_part_ctl_move(struct gctl_req *req, struct g_part_parms *gpp)
1284 gctl_error(req, "%d verb 'move'", ENOSYS);
1289 g_part_ctl_recover(struct gctl_req *req, struct g_part_parms *gpp)
1291 struct g_part_table *table;
1294 int error, recovered;
1297 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
1298 g_topology_assert();
1300 error = recovered = 0;
1302 if (table->gpt_corrupt) {
1303 error = G_PART_RECOVER(table);
1305 error = g_part_check_integrity(table,
1306 LIST_FIRST(&gp->consumer));
1308 gctl_error(req, "%d recovering '%s' failed",
1314 /* Provide feedback if so requested. */
1315 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
1316 sb = sbuf_new_auto();
1318 sbuf_printf(sb, "%s recovered\n", gp->name);
1320 sbuf_printf(sb, "%s recovering is not needed\n",
1323 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
1330 g_part_ctl_resize(struct gctl_req *req, struct g_part_parms *gpp)
1333 struct g_provider *pp;
1334 struct g_part_entry *pe, *entry;
1335 struct g_part_table *table;
1342 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
1343 g_topology_assert();
1346 /* check gpp_index */
1347 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
1348 if (entry->gpe_deleted || entry->gpe_internal)
1350 if (entry->gpe_index == gpp->gpp_index)
1353 if (entry == NULL) {
1354 gctl_error(req, "%d index '%d'", ENOENT, gpp->gpp_index);
1358 /* check gpp_size */
1359 end = entry->gpe_start + gpp->gpp_size - 1;
1360 if (gpp->gpp_size < 1 || end > table->gpt_last) {
1361 gctl_error(req, "%d size '%jd'", EINVAL,
1362 (intmax_t)gpp->gpp_size);
1366 LIST_FOREACH(pe, &table->gpt_entry, gpe_entry) {
1367 if (pe->gpe_deleted || pe->gpe_internal || pe == entry)
1369 if (end >= pe->gpe_start && end <= pe->gpe_end) {
1370 gctl_error(req, "%d end '%jd'", ENOSPC,
1374 if (entry->gpe_start < pe->gpe_start && end > pe->gpe_end) {
1375 gctl_error(req, "%d size '%jd'", ENOSPC,
1376 (intmax_t)gpp->gpp_size);
1382 if ((g_debugflags & 16) == 0 &&
1383 (pp->acr > 0 || pp->acw > 0 || pp->ace > 0)) {
1384 if (entry->gpe_end - entry->gpe_start + 1 > gpp->gpp_size) {
1385 /* Deny shrinking of an opened partition. */
1386 gctl_error(req, "%d", EBUSY);
1391 error = G_PART_RESIZE(table, entry, gpp);
1393 gctl_error(req, "%d%s", error, error != EBUSY ? "":
1394 " resizing will lead to unexpected shrinking"
1395 " due to alignment");
1399 if (!entry->gpe_created)
1400 entry->gpe_modified = 1;
1402 /* update mediasize of changed provider */
1403 mediasize = (entry->gpe_end - entry->gpe_start + 1) *
1405 g_resize_provider(pp, mediasize);
1407 /* Provide feedback if so requested. */
1408 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
1409 sb = sbuf_new_auto();
1410 G_PART_FULLNAME(table, entry, sb, gp->name);
1411 sbuf_cat(sb, " resized\n");
1413 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
1420 g_part_ctl_setunset(struct gctl_req *req, struct g_part_parms *gpp,
1424 struct g_part_entry *entry;
1425 struct g_part_table *table;
1430 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
1431 g_topology_assert();
1435 if (gpp->gpp_parms & G_PART_PARM_INDEX) {
1436 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
1437 if (entry->gpe_deleted || entry->gpe_internal)
1439 if (entry->gpe_index == gpp->gpp_index)
1442 if (entry == NULL) {
1443 gctl_error(req, "%d index '%d'", ENOENT,
1450 error = G_PART_SETUNSET(table, entry, gpp->gpp_attrib, set);
1452 gctl_error(req, "%d attrib '%s'", error, gpp->gpp_attrib);
1456 /* Provide feedback if so requested. */
1457 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
1458 sb = sbuf_new_auto();
1459 sbuf_printf(sb, "%s %sset on ", gpp->gpp_attrib,
1462 G_PART_FULLNAME(table, entry, sb, gp->name);
1464 sbuf_cat(sb, gp->name);
1467 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
1474 g_part_ctl_undo(struct gctl_req *req, struct g_part_parms *gpp)
1476 struct g_consumer *cp;
1477 struct g_provider *pp;
1479 struct g_part_entry *entry, *tmp;
1480 struct g_part_table *table;
1484 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
1485 g_topology_assert();
1488 if (!table->gpt_opened) {
1489 gctl_error(req, "%d", EPERM);
1493 cp = LIST_FIRST(&gp->consumer);
1494 LIST_FOREACH_SAFE(entry, &table->gpt_entry, gpe_entry, tmp) {
1495 entry->gpe_modified = 0;
1496 if (entry->gpe_created) {
1500 entry->gpe_pp = NULL;
1501 g_wither_provider(pp, ENXIO);
1503 entry->gpe_deleted = 1;
1505 if (entry->gpe_deleted) {
1506 LIST_REMOVE(entry, gpe_entry);
1511 g_topology_unlock();
1513 reprobe = (table->gpt_scheme == &g_part_null_scheme ||
1514 table->gpt_created) ? 1 : 0;
1517 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
1518 if (entry->gpe_internal)
1523 while ((entry = LIST_FIRST(&table->gpt_entry)) != NULL) {
1524 LIST_REMOVE(entry, gpe_entry);
1527 error = g_part_probe(gp, cp, table->gpt_depth);
1530 g_access(cp, -1, -1, -1);
1531 g_part_wither(gp, error);
1537 * Synthesize a disk geometry. Some partitioning schemes
1538 * depend on it and since some file systems need it even
1539 * when the partitition scheme doesn't, we do it here in
1540 * scheme-independent code.
1543 g_part_geometry(table, cp, pp->mediasize / pp->sectorsize);
1546 error = G_PART_READ(table, cp);
1549 error = g_part_check_integrity(table, cp);
1554 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
1555 if (!entry->gpe_internal)
1556 g_part_new_provider(gp, table, entry);
1559 table->gpt_opened = 0;
1560 g_access(cp, -1, -1, -1);
1565 gctl_error(req, "%d", error);
1570 g_part_wither(struct g_geom *gp, int error)
1572 struct g_part_entry *entry;
1573 struct g_part_table *table;
1574 struct g_provider *pp;
1577 if (table != NULL) {
1579 while ((entry = LIST_FIRST(&table->gpt_entry)) != NULL) {
1580 LIST_REMOVE(entry, gpe_entry);
1582 entry->gpe_pp = NULL;
1585 g_wither_provider(pp, error);
1589 G_PART_DESTROY(table, NULL);
1590 kobj_delete((kobj_t)table, M_GEOM);
1592 g_wither_geom(gp, error);
1600 g_part_ctlreq(struct gctl_req *req, struct g_class *mp, const char *verb)
1602 struct g_part_parms gpp;
1603 struct g_part_table *table;
1604 struct gctl_req_arg *ap;
1605 enum g_part_ctl ctlreq;
1606 unsigned int i, mparms, oparms, parm;
1607 int auto_commit, close_on_error;
1608 int error, modifies;
1610 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, verb));
1611 g_topology_assert();
1613 ctlreq = G_PART_CTL_NONE;
1616 oparms = G_PART_PARM_FLAGS | G_PART_PARM_OUTPUT | G_PART_PARM_VERSION;
1619 if (!strcmp(verb, "add")) {
1620 ctlreq = G_PART_CTL_ADD;
1621 mparms |= G_PART_PARM_GEOM | G_PART_PARM_SIZE |
1622 G_PART_PARM_START | G_PART_PARM_TYPE;
1623 oparms |= G_PART_PARM_INDEX | G_PART_PARM_LABEL;
1627 if (!strcmp(verb, "bootcode")) {
1628 ctlreq = G_PART_CTL_BOOTCODE;
1629 mparms |= G_PART_PARM_GEOM | G_PART_PARM_BOOTCODE;
1633 if (!strcmp(verb, "commit")) {
1634 ctlreq = G_PART_CTL_COMMIT;
1635 mparms |= G_PART_PARM_GEOM;
1637 } else if (!strcmp(verb, "create")) {
1638 ctlreq = G_PART_CTL_CREATE;
1639 mparms |= G_PART_PARM_PROVIDER | G_PART_PARM_SCHEME;
1640 oparms |= G_PART_PARM_ENTRIES;
1644 if (!strcmp(verb, "delete")) {
1645 ctlreq = G_PART_CTL_DELETE;
1646 mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX;
1647 } else if (!strcmp(verb, "destroy")) {
1648 ctlreq = G_PART_CTL_DESTROY;
1649 mparms |= G_PART_PARM_GEOM;
1650 oparms |= G_PART_PARM_FORCE;
1654 if (!strcmp(verb, "modify")) {
1655 ctlreq = G_PART_CTL_MODIFY;
1656 mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX;
1657 oparms |= G_PART_PARM_LABEL | G_PART_PARM_TYPE;
1658 } else if (!strcmp(verb, "move")) {
1659 ctlreq = G_PART_CTL_MOVE;
1660 mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX;
1664 if (!strcmp(verb, "recover")) {
1665 ctlreq = G_PART_CTL_RECOVER;
1666 mparms |= G_PART_PARM_GEOM;
1667 } else if (!strcmp(verb, "resize")) {
1668 ctlreq = G_PART_CTL_RESIZE;
1669 mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX |
1674 if (!strcmp(verb, "set")) {
1675 ctlreq = G_PART_CTL_SET;
1676 mparms |= G_PART_PARM_ATTRIB | G_PART_PARM_GEOM;
1677 oparms |= G_PART_PARM_INDEX;
1681 if (!strcmp(verb, "undo")) {
1682 ctlreq = G_PART_CTL_UNDO;
1683 mparms |= G_PART_PARM_GEOM;
1685 } else if (!strcmp(verb, "unset")) {
1686 ctlreq = G_PART_CTL_UNSET;
1687 mparms |= G_PART_PARM_ATTRIB | G_PART_PARM_GEOM;
1688 oparms |= G_PART_PARM_INDEX;
1692 if (ctlreq == G_PART_CTL_NONE) {
1693 gctl_error(req, "%d verb '%s'", EINVAL, verb);
1697 bzero(&gpp, sizeof(gpp));
1698 for (i = 0; i < req->narg; i++) {
1701 switch (ap->name[0]) {
1703 if (!strcmp(ap->name, "arg0")) {
1705 (G_PART_PARM_GEOM | G_PART_PARM_PROVIDER);
1707 if (!strcmp(ap->name, "attrib"))
1708 parm = G_PART_PARM_ATTRIB;
1711 if (!strcmp(ap->name, "bootcode"))
1712 parm = G_PART_PARM_BOOTCODE;
1715 if (!strcmp(ap->name, "class"))
1719 if (!strcmp(ap->name, "entries"))
1720 parm = G_PART_PARM_ENTRIES;
1723 if (!strcmp(ap->name, "flags"))
1724 parm = G_PART_PARM_FLAGS;
1725 else if (!strcmp(ap->name, "force"))
1726 parm = G_PART_PARM_FORCE;
1729 if (!strcmp(ap->name, "index"))
1730 parm = G_PART_PARM_INDEX;
1733 if (!strcmp(ap->name, "label"))
1734 parm = G_PART_PARM_LABEL;
1737 if (!strcmp(ap->name, "output"))
1738 parm = G_PART_PARM_OUTPUT;
1741 if (!strcmp(ap->name, "scheme"))
1742 parm = G_PART_PARM_SCHEME;
1743 else if (!strcmp(ap->name, "size"))
1744 parm = G_PART_PARM_SIZE;
1745 else if (!strcmp(ap->name, "start"))
1746 parm = G_PART_PARM_START;
1749 if (!strcmp(ap->name, "type"))
1750 parm = G_PART_PARM_TYPE;
1753 if (!strcmp(ap->name, "verb"))
1755 else if (!strcmp(ap->name, "version"))
1756 parm = G_PART_PARM_VERSION;
1759 if ((parm & (mparms | oparms)) == 0) {
1760 gctl_error(req, "%d param '%s'", EINVAL, ap->name);
1764 case G_PART_PARM_ATTRIB:
1765 error = g_part_parm_str(req, ap->name,
1768 case G_PART_PARM_BOOTCODE:
1769 error = g_part_parm_bootcode(req, ap->name,
1770 &gpp.gpp_codeptr, &gpp.gpp_codesize);
1772 case G_PART_PARM_ENTRIES:
1773 error = g_part_parm_intmax(req, ap->name,
1776 case G_PART_PARM_FLAGS:
1777 error = g_part_parm_str(req, ap->name, &gpp.gpp_flags);
1779 case G_PART_PARM_FORCE:
1780 error = g_part_parm_uint32(req, ap->name,
1783 case G_PART_PARM_GEOM:
1784 error = g_part_parm_geom(req, ap->name, &gpp.gpp_geom);
1786 case G_PART_PARM_INDEX:
1787 error = g_part_parm_intmax(req, ap->name,
1790 case G_PART_PARM_LABEL:
1791 error = g_part_parm_str(req, ap->name, &gpp.gpp_label);
1793 case G_PART_PARM_OUTPUT:
1794 error = 0; /* Write-only parameter */
1796 case G_PART_PARM_PROVIDER:
1797 error = g_part_parm_provider(req, ap->name,
1800 case G_PART_PARM_SCHEME:
1801 error = g_part_parm_scheme(req, ap->name,
1804 case G_PART_PARM_SIZE:
1805 error = g_part_parm_quad(req, ap->name, &gpp.gpp_size);
1807 case G_PART_PARM_START:
1808 error = g_part_parm_quad(req, ap->name,
1811 case G_PART_PARM_TYPE:
1812 error = g_part_parm_str(req, ap->name, &gpp.gpp_type);
1814 case G_PART_PARM_VERSION:
1815 error = g_part_parm_uint32(req, ap->name,
1820 gctl_error(req, "%d %s", error, ap->name);
1824 if (error == ENOATTR) {
1825 gctl_error(req, "%d param '%s'", error,
1830 gpp.gpp_parms |= parm;
1832 if ((gpp.gpp_parms & mparms) != mparms) {
1833 parm = mparms - (gpp.gpp_parms & mparms);
1834 gctl_error(req, "%d param '%x'", ENOATTR, parm);
1838 /* Obtain permissions if possible/necessary. */
1841 if (modifies && (gpp.gpp_parms & G_PART_PARM_GEOM)) {
1842 table = gpp.gpp_geom->softc;
1843 if (table != NULL && table->gpt_corrupt &&
1844 ctlreq != G_PART_CTL_DESTROY &&
1845 ctlreq != G_PART_CTL_RECOVER) {
1846 gctl_error(req, "%d table '%s' is corrupt",
1847 EPERM, gpp.gpp_geom->name);
1850 if (table != NULL && !table->gpt_opened) {
1851 error = g_access(LIST_FIRST(&gpp.gpp_geom->consumer),
1854 gctl_error(req, "%d geom '%s'", error,
1855 gpp.gpp_geom->name);
1858 table->gpt_opened = 1;
1863 /* Allow the scheme to check or modify the parameters. */
1864 if (table != NULL) {
1865 error = G_PART_PRECHECK(table, ctlreq, &gpp);
1867 gctl_error(req, "%d pre-check failed", error);
1871 error = EDOOFUS; /* Prevent bogus uninit. warning. */
1874 case G_PART_CTL_NONE:
1875 panic("%s", __func__);
1876 case G_PART_CTL_ADD:
1877 error = g_part_ctl_add(req, &gpp);
1879 case G_PART_CTL_BOOTCODE:
1880 error = g_part_ctl_bootcode(req, &gpp);
1882 case G_PART_CTL_COMMIT:
1883 error = g_part_ctl_commit(req, &gpp);
1885 case G_PART_CTL_CREATE:
1886 error = g_part_ctl_create(req, &gpp);
1888 case G_PART_CTL_DELETE:
1889 error = g_part_ctl_delete(req, &gpp);
1891 case G_PART_CTL_DESTROY:
1892 error = g_part_ctl_destroy(req, &gpp);
1894 case G_PART_CTL_MODIFY:
1895 error = g_part_ctl_modify(req, &gpp);
1897 case G_PART_CTL_MOVE:
1898 error = g_part_ctl_move(req, &gpp);
1900 case G_PART_CTL_RECOVER:
1901 error = g_part_ctl_recover(req, &gpp);
1903 case G_PART_CTL_RESIZE:
1904 error = g_part_ctl_resize(req, &gpp);
1906 case G_PART_CTL_SET:
1907 error = g_part_ctl_setunset(req, &gpp, 1);
1909 case G_PART_CTL_UNDO:
1910 error = g_part_ctl_undo(req, &gpp);
1912 case G_PART_CTL_UNSET:
1913 error = g_part_ctl_setunset(req, &gpp, 0);
1917 /* Implement automatic commit. */
1919 auto_commit = (modifies &&
1920 (gpp.gpp_parms & G_PART_PARM_FLAGS) &&
1921 strchr(gpp.gpp_flags, 'C') != NULL) ? 1 : 0;
1923 KASSERT(gpp.gpp_parms & G_PART_PARM_GEOM, ("%s",
1925 error = g_part_ctl_commit(req, &gpp);
1930 if (error && close_on_error) {
1931 g_access(LIST_FIRST(&gpp.gpp_geom->consumer), -1, -1, -1);
1932 table->gpt_opened = 0;
1937 g_part_destroy_geom(struct gctl_req *req, struct g_class *mp,
1941 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, gp->name));
1942 g_topology_assert();
1944 g_part_wither(gp, EINVAL);
1948 static struct g_geom *
1949 g_part_taste(struct g_class *mp, struct g_provider *pp, int flags __unused)
1951 struct g_consumer *cp;
1953 struct g_part_entry *entry;
1954 struct g_part_table *table;
1955 struct root_hold_token *rht;
1956 struct g_geom_alias *gap;
1960 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, pp->name));
1961 g_topology_assert();
1963 /* Skip providers that are already open for writing. */
1968 * Create a GEOM with consumer and hook it up to the provider.
1969 * With that we become part of the topology. Obtain read access
1972 gp = g_new_geomf(mp, "%s", pp->name);
1973 LIST_FOREACH(gap, &pp->geom->aliases, ga_next)
1974 g_geom_add_alias(gp, gap->ga_alias);
1975 cp = g_new_consumer(gp);
1976 cp->flags |= G_CF_DIRECT_SEND | G_CF_DIRECT_RECEIVE;
1977 error = g_attach(cp, pp);
1979 error = g_access(cp, 1, 0, 0);
1983 g_destroy_consumer(cp);
1988 rht = root_mount_hold(mp->name);
1989 g_topology_unlock();
1992 * Short-circuit the whole probing galore when there's no
1995 if (pp->mediasize == 0 || pp->sectorsize == 0) {
2000 /* Make sure we can nest and if so, determine our depth. */
2001 error = g_getattr("PART::isleaf", cp, &attr);
2002 if (!error && attr) {
2006 error = g_getattr("PART::depth", cp, &attr);
2007 depth = (!error) ? attr + 1 : 0;
2009 error = g_part_probe(gp, cp, depth);
2016 * Synthesize a disk geometry. Some partitioning schemes
2017 * depend on it and since some file systems need it even
2018 * when the partitition scheme doesn't, we do it here in
2019 * scheme-independent code.
2021 g_part_geometry(table, cp, pp->mediasize / pp->sectorsize);
2023 error = G_PART_READ(table, cp);
2026 error = g_part_check_integrity(table, cp);
2031 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
2032 if (!entry->gpe_internal)
2033 g_part_new_provider(gp, table, entry);
2036 root_mount_rel(rht);
2037 g_access(cp, -1, 0, 0);
2042 root_mount_rel(rht);
2043 g_access(cp, -1, 0, 0);
2045 g_destroy_consumer(cp);
2055 g_part_access(struct g_provider *pp, int dr, int dw, int de)
2057 struct g_consumer *cp;
2059 G_PART_TRACE((G_T_ACCESS, "%s(%s,%d,%d,%d)", __func__, pp->name, dr,
2062 cp = LIST_FIRST(&pp->geom->consumer);
2064 /* We always gain write-exclusive access. */
2065 return (g_access(cp, dr, dw, dw + de));
2069 g_part_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp,
2070 struct g_consumer *cp, struct g_provider *pp)
2073 struct g_part_entry *entry;
2074 struct g_part_table *table;
2076 KASSERT(sb != NULL && gp != NULL, ("%s", __func__));
2079 if (indent == NULL) {
2080 KASSERT(cp == NULL && pp != NULL, ("%s", __func__));
2081 entry = pp->private;
2084 sbuf_printf(sb, " i %u o %ju ty %s", entry->gpe_index,
2085 (uintmax_t)entry->gpe_offset,
2086 G_PART_TYPE(table, entry, buf, sizeof(buf)));
2088 * libdisk compatibility quirk - the scheme dumps the
2089 * slicer name and partition type in a way that is
2090 * compatible with libdisk. When libdisk is not used
2091 * anymore, this should go away.
2093 G_PART_DUMPCONF(table, entry, sb, indent);
2094 } else if (cp != NULL) { /* Consumer configuration. */
2095 KASSERT(pp == NULL, ("%s", __func__));
2097 } else if (pp != NULL) { /* Provider configuration. */
2098 entry = pp->private;
2101 sbuf_printf(sb, "%s<start>%ju</start>\n", indent,
2102 (uintmax_t)entry->gpe_start);
2103 sbuf_printf(sb, "%s<end>%ju</end>\n", indent,
2104 (uintmax_t)entry->gpe_end);
2105 sbuf_printf(sb, "%s<index>%u</index>\n", indent,
2107 sbuf_printf(sb, "%s<type>%s</type>\n", indent,
2108 G_PART_TYPE(table, entry, buf, sizeof(buf)));
2109 sbuf_printf(sb, "%s<offset>%ju</offset>\n", indent,
2110 (uintmax_t)entry->gpe_offset);
2111 sbuf_printf(sb, "%s<length>%ju</length>\n", indent,
2112 (uintmax_t)pp->mediasize);
2113 G_PART_DUMPCONF(table, entry, sb, indent);
2114 } else { /* Geom configuration. */
2115 sbuf_printf(sb, "%s<scheme>%s</scheme>\n", indent,
2116 table->gpt_scheme->name);
2117 sbuf_printf(sb, "%s<entries>%u</entries>\n", indent,
2118 table->gpt_entries);
2119 sbuf_printf(sb, "%s<first>%ju</first>\n", indent,
2120 (uintmax_t)table->gpt_first);
2121 sbuf_printf(sb, "%s<last>%ju</last>\n", indent,
2122 (uintmax_t)table->gpt_last);
2123 sbuf_printf(sb, "%s<fwsectors>%u</fwsectors>\n", indent,
2124 table->gpt_sectors);
2125 sbuf_printf(sb, "%s<fwheads>%u</fwheads>\n", indent,
2127 sbuf_printf(sb, "%s<state>%s</state>\n", indent,
2128 table->gpt_corrupt ? "CORRUPT": "OK");
2129 sbuf_printf(sb, "%s<modified>%s</modified>\n", indent,
2130 table->gpt_opened ? "true": "false");
2131 G_PART_DUMPCONF(table, NULL, sb, indent);
2136 * This start routine is only called for non-trivial requests, all the
2137 * trivial ones are handled autonomously by the slice code.
2138 * For requests we handle here, we must call the g_io_deliver() on the
2139 * bio, and return non-zero to indicate to the slice code that we did so.
2140 * This code executes in the "DOWN" I/O path, this means:
2142 * * Don't grab the topology lock.
2143 * * Don't call biowait, g_getattr(), g_setattr() or g_read_data()
2146 g_part_ioctl(struct g_provider *pp, u_long cmd, void *data, int fflag, struct thread *td)
2148 struct g_part_table *table;
2150 table = pp->geom->softc;
2151 return G_PART_IOCTL(table, pp, cmd, data, fflag, td);
2155 g_part_resize(struct g_consumer *cp)
2157 struct g_part_table *table;
2159 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, cp->provider->name));
2160 g_topology_assert();
2162 if (auto_resize == 0)
2165 table = cp->geom->softc;
2166 if (table->gpt_opened == 0) {
2167 if (g_access(cp, 1, 1, 1) != 0)
2169 table->gpt_opened = 1;
2171 if (G_PART_RESIZE(table, NULL, NULL) == 0)
2172 printf("GEOM_PART: %s was automatically resized.\n"
2173 " Use `gpart commit %s` to save changes or "
2174 "`gpart undo %s` to revert them.\n", cp->geom->name,
2175 cp->geom->name, cp->geom->name);
2176 if (g_part_check_integrity(table, cp) != 0) {
2177 g_access(cp, -1, -1, -1);
2178 table->gpt_opened = 0;
2179 g_part_wither(table->gpt_gp, ENXIO);
2184 g_part_orphan(struct g_consumer *cp)
2186 struct g_provider *pp;
2187 struct g_part_table *table;
2190 KASSERT(pp != NULL, ("%s", __func__));
2191 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, pp->name));
2192 g_topology_assert();
2194 KASSERT(pp->error != 0, ("%s", __func__));
2195 table = cp->geom->softc;
2196 if (table != NULL && table->gpt_opened)
2197 g_access(cp, -1, -1, -1);
2198 g_part_wither(cp->geom, pp->error);
2202 g_part_spoiled(struct g_consumer *cp)
2205 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, cp->provider->name));
2206 g_topology_assert();
2208 cp->flags |= G_CF_ORPHAN;
2209 g_part_wither(cp->geom, ENXIO);
2213 g_part_start(struct bio *bp)
2216 struct g_consumer *cp;
2218 struct g_part_entry *entry;
2219 struct g_part_table *table;
2220 struct g_kerneldump *gkd;
2221 struct g_provider *pp;
2222 void (*done_func)(struct bio *) = g_std_done;
2225 biotrack(bp, __func__);
2230 cp = LIST_FIRST(&gp->consumer);
2232 G_PART_TRACE((G_T_BIO, "%s: cmd=%d, provider=%s", __func__, bp->bio_cmd,
2235 entry = pp->private;
2236 if (entry == NULL) {
2237 g_io_deliver(bp, ENXIO);
2241 switch(bp->bio_cmd) {
2245 if (bp->bio_offset >= pp->mediasize) {
2246 g_io_deliver(bp, EIO);
2249 bp2 = g_clone_bio(bp);
2251 g_io_deliver(bp, ENOMEM);
2254 if (bp2->bio_offset + bp2->bio_length > pp->mediasize)
2255 bp2->bio_length = pp->mediasize - bp2->bio_offset;
2256 bp2->bio_done = g_std_done;
2257 bp2->bio_offset += entry->gpe_offset;
2258 g_io_request(bp2, cp);
2263 if (g_handleattr_int(bp, "GEOM::fwheads", table->gpt_heads))
2265 if (g_handleattr_int(bp, "GEOM::fwsectors", table->gpt_sectors))
2267 if (g_handleattr_int(bp, "PART::isleaf", table->gpt_isleaf))
2269 if (g_handleattr_int(bp, "PART::depth", table->gpt_depth))
2271 if (g_handleattr_str(bp, "PART::scheme",
2272 table->gpt_scheme->name))
2274 if (g_handleattr_str(bp, "PART::type",
2275 G_PART_TYPE(table, entry, buf, sizeof(buf))))
2277 if (!strcmp("GEOM::physpath", bp->bio_attribute)) {
2278 done_func = g_part_get_physpath_done;
2281 if (!strcmp("GEOM::kerneldump", bp->bio_attribute)) {
2283 * Check that the partition is suitable for kernel
2284 * dumps. Typically only swap partitions should be
2285 * used. If the request comes from the nested scheme
2286 * we allow dumping there as well.
2288 if ((bp->bio_from == NULL ||
2289 bp->bio_from->geom->class != &g_part_class) &&
2290 G_PART_DUMPTO(table, entry) == 0) {
2291 g_io_deliver(bp, ENODEV);
2292 printf("GEOM_PART: Partition '%s' not suitable"
2293 " for kernel dumps (wrong type?)\n",
2297 gkd = (struct g_kerneldump *)bp->bio_data;
2298 if (gkd->offset >= pp->mediasize) {
2299 g_io_deliver(bp, EIO);
2302 if (gkd->offset + gkd->length > pp->mediasize)
2303 gkd->length = pp->mediasize - gkd->offset;
2304 gkd->offset += entry->gpe_offset;
2308 g_io_deliver(bp, EOPNOTSUPP);
2312 bp2 = g_clone_bio(bp);
2314 g_io_deliver(bp, ENOMEM);
2317 bp2->bio_done = done_func;
2318 g_io_request(bp2, cp);
2322 g_part_init(struct g_class *mp)
2325 TAILQ_INSERT_HEAD(&g_part_schemes, &g_part_null_scheme, scheme_list);
2329 g_part_fini(struct g_class *mp)
2332 TAILQ_REMOVE(&g_part_schemes, &g_part_null_scheme, scheme_list);
2336 g_part_unload_event(void *arg, int flag)
2338 struct g_consumer *cp;
2340 struct g_provider *pp;
2341 struct g_part_scheme *scheme;
2342 struct g_part_table *table;
2346 if (flag == EV_CANCEL)
2351 scheme = (void *)(*xchg);
2353 g_topology_assert();
2355 LIST_FOREACH(gp, &g_part_class.geom, geom) {
2357 if (table->gpt_scheme != scheme)
2361 LIST_FOREACH(pp, &gp->provider, provider)
2362 acc += pp->acr + pp->acw + pp->ace;
2363 LIST_FOREACH(cp, &gp->consumer, consumer)
2364 acc += cp->acr + cp->acw + cp->ace;
2367 g_part_wither(gp, ENOSYS);
2373 TAILQ_REMOVE(&g_part_schemes, scheme, scheme_list);
2379 g_part_modevent(module_t mod, int type, struct g_part_scheme *scheme)
2381 struct g_part_scheme *iter;
2388 TAILQ_FOREACH(iter, &g_part_schemes, scheme_list) {
2389 if (scheme == iter) {
2390 printf("GEOM_PART: scheme %s is already "
2391 "registered!\n", scheme->name);
2396 TAILQ_INSERT_TAIL(&g_part_schemes, scheme,
2398 g_retaste(&g_part_class);
2402 arg = (uintptr_t)scheme;
2403 error = g_waitfor_event(g_part_unload_event, &arg, M_WAITOK,