2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2002, 2005-2009 Marcel Moolenaar
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
32 #include <sys/param.h>
34 #include <sys/endian.h>
35 #include <sys/kernel.h>
37 #include <sys/limits.h>
39 #include <sys/malloc.h>
40 #include <sys/mutex.h>
41 #include <sys/queue.h>
43 #include <sys/sysctl.h>
44 #include <sys/systm.h>
46 #include <geom/geom.h>
47 #include <geom/geom_ctl.h>
48 #include <geom/geom_int.h>
49 #include <geom/part/g_part.h>
51 #include "g_part_if.h"
54 #define _PATH_DEV "/dev/"
57 static kobj_method_t g_part_null_methods[] = {
61 static struct g_part_scheme g_part_null_scheme = {
64 sizeof(struct g_part_table),
67 TAILQ_HEAD(, g_part_scheme) g_part_schemes =
68 TAILQ_HEAD_INITIALIZER(g_part_schemes);
70 struct g_part_alias_list {
72 enum g_part_alias alias;
73 } g_part_alias_list[G_PART_ALIAS_COUNT] = {
74 { "apple-apfs", G_PART_ALIAS_APPLE_APFS },
75 { "apple-boot", G_PART_ALIAS_APPLE_BOOT },
76 { "apple-core-storage", G_PART_ALIAS_APPLE_CORE_STORAGE },
77 { "apple-hfs", G_PART_ALIAS_APPLE_HFS },
78 { "apple-label", G_PART_ALIAS_APPLE_LABEL },
79 { "apple-raid", G_PART_ALIAS_APPLE_RAID },
80 { "apple-raid-offline", G_PART_ALIAS_APPLE_RAID_OFFLINE },
81 { "apple-tv-recovery", G_PART_ALIAS_APPLE_TV_RECOVERY },
82 { "apple-ufs", G_PART_ALIAS_APPLE_UFS },
83 { "bios-boot", G_PART_ALIAS_BIOS_BOOT },
84 { "chromeos-firmware", G_PART_ALIAS_CHROMEOS_FIRMWARE },
85 { "chromeos-kernel", G_PART_ALIAS_CHROMEOS_KERNEL },
86 { "chromeos-reserved", G_PART_ALIAS_CHROMEOS_RESERVED },
87 { "chromeos-root", G_PART_ALIAS_CHROMEOS_ROOT },
88 { "dragonfly-ccd", G_PART_ALIAS_DFBSD_CCD },
89 { "dragonfly-hammer", G_PART_ALIAS_DFBSD_HAMMER },
90 { "dragonfly-hammer2", G_PART_ALIAS_DFBSD_HAMMER2 },
91 { "dragonfly-label32", G_PART_ALIAS_DFBSD },
92 { "dragonfly-label64", G_PART_ALIAS_DFBSD64 },
93 { "dragonfly-legacy", G_PART_ALIAS_DFBSD_LEGACY },
94 { "dragonfly-swap", G_PART_ALIAS_DFBSD_SWAP },
95 { "dragonfly-ufs", G_PART_ALIAS_DFBSD_UFS },
96 { "dragonfly-vinum", G_PART_ALIAS_DFBSD_VINUM },
97 { "ebr", G_PART_ALIAS_EBR },
98 { "efi", G_PART_ALIAS_EFI },
99 { "fat16", G_PART_ALIAS_MS_FAT16 },
100 { "fat32", G_PART_ALIAS_MS_FAT32 },
101 { "fat32lba", G_PART_ALIAS_MS_FAT32LBA },
102 { "freebsd", G_PART_ALIAS_FREEBSD },
103 { "freebsd-boot", G_PART_ALIAS_FREEBSD_BOOT },
104 { "freebsd-nandfs", G_PART_ALIAS_FREEBSD_NANDFS },
105 { "freebsd-swap", G_PART_ALIAS_FREEBSD_SWAP },
106 { "freebsd-ufs", G_PART_ALIAS_FREEBSD_UFS },
107 { "freebsd-vinum", G_PART_ALIAS_FREEBSD_VINUM },
108 { "freebsd-zfs", G_PART_ALIAS_FREEBSD_ZFS },
109 { "linux-data", G_PART_ALIAS_LINUX_DATA },
110 { "linux-lvm", G_PART_ALIAS_LINUX_LVM },
111 { "linux-raid", G_PART_ALIAS_LINUX_RAID },
112 { "linux-swap", G_PART_ALIAS_LINUX_SWAP },
113 { "mbr", G_PART_ALIAS_MBR },
114 { "ms-basic-data", G_PART_ALIAS_MS_BASIC_DATA },
115 { "ms-ldm-data", G_PART_ALIAS_MS_LDM_DATA },
116 { "ms-ldm-metadata", G_PART_ALIAS_MS_LDM_METADATA },
117 { "ms-recovery", G_PART_ALIAS_MS_RECOVERY },
118 { "ms-reserved", G_PART_ALIAS_MS_RESERVED },
119 { "ms-spaces", G_PART_ALIAS_MS_SPACES },
120 { "netbsd-ccd", G_PART_ALIAS_NETBSD_CCD },
121 { "netbsd-cgd", G_PART_ALIAS_NETBSD_CGD },
122 { "netbsd-ffs", G_PART_ALIAS_NETBSD_FFS },
123 { "netbsd-lfs", G_PART_ALIAS_NETBSD_LFS },
124 { "netbsd-raid", G_PART_ALIAS_NETBSD_RAID },
125 { "netbsd-swap", G_PART_ALIAS_NETBSD_SWAP },
126 { "ntfs", G_PART_ALIAS_MS_NTFS },
127 { "openbsd-data", G_PART_ALIAS_OPENBSD_DATA },
128 { "prep-boot", G_PART_ALIAS_PREP_BOOT },
129 { "vmware-reserved", G_PART_ALIAS_VMRESERVED },
130 { "vmware-vmfs", G_PART_ALIAS_VMFS },
131 { "vmware-vmkdiag", G_PART_ALIAS_VMKDIAG },
132 { "vmware-vsanhdr", G_PART_ALIAS_VMVSANHDR },
135 SYSCTL_DECL(_kern_geom);
136 SYSCTL_NODE(_kern_geom, OID_AUTO, part, CTLFLAG_RW, 0,
138 static u_int check_integrity = 1;
139 SYSCTL_UINT(_kern_geom_part, OID_AUTO, check_integrity,
140 CTLFLAG_RWTUN, &check_integrity, 1,
141 "Enable integrity checking");
142 static u_int auto_resize = 1;
143 SYSCTL_UINT(_kern_geom_part, OID_AUTO, auto_resize,
144 CTLFLAG_RWTUN, &auto_resize, 1,
145 "Enable auto resize");
146 static u_int allow_nesting = 0;
147 SYSCTL_UINT(_kern_geom_part, OID_AUTO, allow_nesting,
148 CTLFLAG_RWTUN, &allow_nesting, 0,
149 "Allow additional levels of nesting");
150 char g_part_separator[MAXPATHLEN] = "";
151 SYSCTL_STRING(_kern_geom_part, OID_AUTO, separator,
152 CTLFLAG_RDTUN, &g_part_separator, sizeof(g_part_separator),
153 "Partition name separator");
156 * The GEOM partitioning class.
158 static g_ctl_req_t g_part_ctlreq;
159 static g_ctl_destroy_geom_t g_part_destroy_geom;
160 static g_fini_t g_part_fini;
161 static g_init_t g_part_init;
162 static g_taste_t g_part_taste;
164 static g_access_t g_part_access;
165 static g_dumpconf_t g_part_dumpconf;
166 static g_orphan_t g_part_orphan;
167 static g_spoiled_t g_part_spoiled;
168 static g_start_t g_part_start;
169 static g_resize_t g_part_resize;
170 static g_ioctl_t g_part_ioctl;
172 static struct g_class g_part_class = {
174 .version = G_VERSION,
176 .ctlreq = g_part_ctlreq,
177 .destroy_geom = g_part_destroy_geom,
180 .taste = g_part_taste,
182 .access = g_part_access,
183 .dumpconf = g_part_dumpconf,
184 .orphan = g_part_orphan,
185 .spoiled = g_part_spoiled,
186 .start = g_part_start,
187 .resize = g_part_resize,
188 .ioctl = g_part_ioctl,
191 DECLARE_GEOM_CLASS(g_part_class, g_part);
192 MODULE_VERSION(g_part, 0);
198 static void g_part_wither(struct g_geom *, int);
201 g_part_alias_name(enum g_part_alias alias)
205 for (i = 0; i < G_PART_ALIAS_COUNT; i++) {
206 if (g_part_alias_list[i].alias != alias)
208 return (g_part_alias_list[i].lexeme);
215 g_part_geometry_heads(off_t blocks, u_int sectors, off_t *bestchs,
218 static u_int candidate_heads[] = { 1, 2, 16, 32, 64, 128, 255, 0 };
219 off_t chs, cylinders;
225 for (idx = 0; candidate_heads[idx] != 0; idx++) {
226 heads = candidate_heads[idx];
227 cylinders = blocks / heads / sectors;
228 if (cylinders < heads || cylinders < sectors)
230 if (cylinders > 1023)
232 chs = cylinders * heads * sectors;
233 if (chs > *bestchs || (chs == *bestchs && *bestheads == 1)) {
241 g_part_geometry(struct g_part_table *table, struct g_consumer *cp,
244 static u_int candidate_sectors[] = { 1, 9, 17, 33, 63, 0 };
246 u_int heads, sectors;
249 if (g_getattr("GEOM::fwsectors", cp, §ors) != 0 || sectors == 0 ||
250 g_getattr("GEOM::fwheads", cp, &heads) != 0 || heads == 0) {
251 table->gpt_fixgeom = 0;
252 table->gpt_heads = 0;
253 table->gpt_sectors = 0;
255 for (idx = 0; candidate_sectors[idx] != 0; idx++) {
256 sectors = candidate_sectors[idx];
257 g_part_geometry_heads(blocks, sectors, &chs, &heads);
261 * Prefer a geometry with sectors > 1, but only if
262 * it doesn't bump down the number of heads to 1.
264 if (chs > bestchs || (chs == bestchs && heads > 1 &&
265 table->gpt_sectors == 1)) {
267 table->gpt_heads = heads;
268 table->gpt_sectors = sectors;
272 * If we didn't find a geometry at all, then the disk is
273 * too big. This means we can use the maximum number of
277 table->gpt_heads = 255;
278 table->gpt_sectors = 63;
281 table->gpt_fixgeom = 1;
282 table->gpt_heads = heads;
283 table->gpt_sectors = sectors;
288 g_part_get_physpath_done(struct bio *bp)
291 struct g_part_entry *entry;
292 struct g_part_table *table;
293 struct g_provider *pp;
296 pbp = bp->bio_parent;
302 if (bp->bio_error == 0) {
304 size_t len, remainder;
305 len = strlcat(bp->bio_data, "/", bp->bio_length);
306 if (len < bp->bio_length) {
307 end = bp->bio_data + len;
308 remainder = bp->bio_length - len;
309 G_PART_NAME(table, entry, end, remainder);
316 #define DPRINTF(...) if (bootverbose) { \
317 printf("GEOM_PART: " __VA_ARGS__); \
321 g_part_check_integrity(struct g_part_table *table, struct g_consumer *cp)
323 struct g_part_entry *e1, *e2;
324 struct g_provider *pp;
330 if (table->gpt_last < table->gpt_first) {
331 DPRINTF("last LBA is below first LBA: %jd < %jd\n",
332 (intmax_t)table->gpt_last, (intmax_t)table->gpt_first);
335 if (table->gpt_last > pp->mediasize / pp->sectorsize - 1) {
336 DPRINTF("last LBA extends beyond mediasize: "
337 "%jd > %jd\n", (intmax_t)table->gpt_last,
338 (intmax_t)pp->mediasize / pp->sectorsize - 1);
341 LIST_FOREACH(e1, &table->gpt_entry, gpe_entry) {
342 if (e1->gpe_deleted || e1->gpe_internal)
344 if (e1->gpe_start < table->gpt_first) {
345 DPRINTF("partition %d has start offset below first "
346 "LBA: %jd < %jd\n", e1->gpe_index,
347 (intmax_t)e1->gpe_start,
348 (intmax_t)table->gpt_first);
351 if (e1->gpe_start > table->gpt_last) {
352 DPRINTF("partition %d has start offset beyond last "
353 "LBA: %jd > %jd\n", e1->gpe_index,
354 (intmax_t)e1->gpe_start,
355 (intmax_t)table->gpt_last);
358 if (e1->gpe_end < e1->gpe_start) {
359 DPRINTF("partition %d has end offset below start "
360 "offset: %jd < %jd\n", e1->gpe_index,
361 (intmax_t)e1->gpe_end,
362 (intmax_t)e1->gpe_start);
365 if (e1->gpe_end > table->gpt_last) {
366 DPRINTF("partition %d has end offset beyond last "
367 "LBA: %jd > %jd\n", e1->gpe_index,
368 (intmax_t)e1->gpe_end,
369 (intmax_t)table->gpt_last);
372 if (pp->stripesize > 0) {
373 offset = e1->gpe_start * pp->sectorsize;
374 if (e1->gpe_offset > offset)
375 offset = e1->gpe_offset;
376 if ((offset + pp->stripeoffset) % pp->stripesize) {
377 DPRINTF("partition %d on (%s, %s) is not "
378 "aligned on %ju bytes\n", e1->gpe_index,
379 pp->name, table->gpt_scheme->name,
380 (uintmax_t)pp->stripesize);
381 /* Don't treat this as a critical failure */
385 while ((e2 = LIST_NEXT(e2, gpe_entry)) != NULL) {
386 if (e2->gpe_deleted || e2->gpe_internal)
388 if (e1->gpe_start >= e2->gpe_start &&
389 e1->gpe_start <= e2->gpe_end) {
390 DPRINTF("partition %d has start offset inside "
391 "partition %d: start[%d] %jd >= start[%d] "
392 "%jd <= end[%d] %jd\n",
393 e1->gpe_index, e2->gpe_index,
394 e2->gpe_index, (intmax_t)e2->gpe_start,
395 e1->gpe_index, (intmax_t)e1->gpe_start,
396 e2->gpe_index, (intmax_t)e2->gpe_end);
399 if (e1->gpe_end >= e2->gpe_start &&
400 e1->gpe_end <= e2->gpe_end) {
401 DPRINTF("partition %d has end offset inside "
402 "partition %d: start[%d] %jd >= end[%d] "
403 "%jd <= end[%d] %jd\n",
404 e1->gpe_index, e2->gpe_index,
405 e2->gpe_index, (intmax_t)e2->gpe_start,
406 e1->gpe_index, (intmax_t)e1->gpe_end,
407 e2->gpe_index, (intmax_t)e2->gpe_end);
410 if (e1->gpe_start < e2->gpe_start &&
411 e1->gpe_end > e2->gpe_end) {
412 DPRINTF("partition %d contains partition %d: "
413 "start[%d] %jd > start[%d] %jd, end[%d] "
414 "%jd < end[%d] %jd\n",
415 e1->gpe_index, e2->gpe_index,
416 e1->gpe_index, (intmax_t)e1->gpe_start,
417 e2->gpe_index, (intmax_t)e2->gpe_start,
418 e2->gpe_index, (intmax_t)e2->gpe_end,
419 e1->gpe_index, (intmax_t)e1->gpe_end);
425 printf("GEOM_PART: integrity check failed (%s, %s)\n",
426 pp->name, table->gpt_scheme->name);
427 if (check_integrity != 0)
429 table->gpt_corrupt = 1;
435 struct g_part_entry *
436 g_part_new_entry(struct g_part_table *table, int index, quad_t start,
439 struct g_part_entry *entry, *last;
442 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
443 if (entry->gpe_index == index)
445 if (entry->gpe_index > index) {
452 entry = g_malloc(table->gpt_scheme->gps_entrysz,
454 entry->gpe_index = index;
456 LIST_INSERT_HEAD(&table->gpt_entry, entry, gpe_entry);
458 LIST_INSERT_AFTER(last, entry, gpe_entry);
460 entry->gpe_offset = 0;
461 entry->gpe_start = start;
462 entry->gpe_end = end;
467 g_part_new_provider(struct g_geom *gp, struct g_part_table *table,
468 struct g_part_entry *entry)
470 struct g_consumer *cp;
471 struct g_provider *pp;
473 struct g_geom_alias *gap;
476 cp = LIST_FIRST(&gp->consumer);
479 offset = entry->gpe_start * pp->sectorsize;
480 if (entry->gpe_offset < offset)
481 entry->gpe_offset = offset;
483 if (entry->gpe_pp == NULL) {
485 * Add aliases to the geom before we create the provider so that
486 * geom_dev can taste it with all the aliases in place so all
487 * the aliased dev_t instances get created for each partition
488 * (eg foo5p7 gets created for bar5p7 when foo is an alias of bar).
490 LIST_FOREACH(gap, &table->gpt_gp->aliases, ga_next) {
491 sb = sbuf_new_auto();
492 G_PART_FULLNAME(table, entry, sb, gap->ga_alias);
494 g_geom_add_alias(gp, sbuf_data(sb));
497 sb = sbuf_new_auto();
498 G_PART_FULLNAME(table, entry, sb, gp->name);
500 entry->gpe_pp = g_new_providerf(gp, "%s", sbuf_data(sb));
502 entry->gpe_pp->flags |= G_PF_DIRECT_SEND | G_PF_DIRECT_RECEIVE;
503 entry->gpe_pp->private = entry; /* Close the circle. */
505 entry->gpe_pp->index = entry->gpe_index - 1; /* index is 1-based. */
506 entry->gpe_pp->mediasize = (entry->gpe_end - entry->gpe_start + 1) *
508 entry->gpe_pp->mediasize -= entry->gpe_offset - offset;
509 entry->gpe_pp->sectorsize = pp->sectorsize;
510 entry->gpe_pp->stripesize = pp->stripesize;
511 entry->gpe_pp->stripeoffset = pp->stripeoffset + entry->gpe_offset;
512 if (pp->stripesize > 0)
513 entry->gpe_pp->stripeoffset %= pp->stripesize;
514 entry->gpe_pp->flags |= pp->flags & G_PF_ACCEPT_UNMAPPED;
515 g_error_provider(entry->gpe_pp, 0);
518 static struct g_geom*
519 g_part_find_geom(const char *name)
522 LIST_FOREACH(gp, &g_part_class.geom, geom) {
523 if ((gp->flags & G_GEOM_WITHER) == 0 &&
524 strcmp(name, gp->name) == 0)
531 g_part_parm_geom(struct gctl_req *req, const char *name, struct g_geom **v)
536 gname = gctl_get_asciiparam(req, name);
539 if (strncmp(gname, _PATH_DEV, sizeof(_PATH_DEV) - 1) == 0)
540 gname += sizeof(_PATH_DEV) - 1;
541 gp = g_part_find_geom(gname);
543 gctl_error(req, "%d %s '%s'", EINVAL, name, gname);
551 g_part_parm_provider(struct gctl_req *req, const char *name,
552 struct g_provider **v)
554 struct g_provider *pp;
557 pname = gctl_get_asciiparam(req, name);
560 if (strncmp(pname, _PATH_DEV, sizeof(_PATH_DEV) - 1) == 0)
561 pname += sizeof(_PATH_DEV) - 1;
562 pp = g_provider_by_name(pname);
564 gctl_error(req, "%d %s '%s'", EINVAL, name, pname);
572 g_part_parm_quad(struct gctl_req *req, const char *name, quad_t *v)
578 p = gctl_get_asciiparam(req, name);
581 q = strtoq(p, &x, 0);
582 if (*x != '\0' || q < 0) {
583 gctl_error(req, "%d %s '%s'", EINVAL, name, p);
591 g_part_parm_scheme(struct gctl_req *req, const char *name,
592 struct g_part_scheme **v)
594 struct g_part_scheme *s;
597 p = gctl_get_asciiparam(req, name);
600 TAILQ_FOREACH(s, &g_part_schemes, scheme_list) {
601 if (s == &g_part_null_scheme)
603 if (!strcasecmp(s->name, p))
607 gctl_error(req, "%d %s '%s'", EINVAL, name, p);
615 g_part_parm_str(struct gctl_req *req, const char *name, const char **v)
619 p = gctl_get_asciiparam(req, name);
622 /* An empty label is always valid. */
623 if (strcmp(name, "label") != 0 && p[0] == '\0') {
624 gctl_error(req, "%d %s '%s'", EINVAL, name, p);
632 g_part_parm_intmax(struct gctl_req *req, const char *name, u_int *v)
637 p = gctl_get_param(req, name, &size);
640 if (size != sizeof(*p) || *p < 0 || *p > INT_MAX) {
641 gctl_error(req, "%d %s '%jd'", EINVAL, name, *p);
649 g_part_parm_uint32(struct gctl_req *req, const char *name, u_int *v)
654 p = gctl_get_param(req, name, &size);
657 if (size != sizeof(*p) || *p > INT_MAX) {
658 gctl_error(req, "%d %s '%u'", EINVAL, name, (unsigned int)*p);
666 g_part_parm_bootcode(struct gctl_req *req, const char *name, const void **v,
672 p = gctl_get_param(req, name, &size);
681 g_part_probe(struct g_geom *gp, struct g_consumer *cp, int depth)
683 struct g_part_scheme *iter, *scheme;
684 struct g_part_table *table;
688 scheme = (table != NULL) ? table->gpt_scheme : NULL;
689 pri = (scheme != NULL) ? G_PART_PROBE(table, cp) : INT_MIN;
692 if (pri > 0) { /* error */
697 TAILQ_FOREACH(iter, &g_part_schemes, scheme_list) {
698 if (iter == &g_part_null_scheme)
700 table = (void *)kobj_create((kobj_class_t)iter, M_GEOM,
703 table->gpt_scheme = iter;
704 table->gpt_depth = depth;
705 probe = G_PART_PROBE(table, cp);
706 if (probe <= 0 && probe > pri) {
709 if (gp->softc != NULL)
710 kobj_delete((kobj_t)gp->softc, M_GEOM);
715 kobj_delete((kobj_t)table, M_GEOM);
719 return ((scheme == NULL) ? ENXIO : 0);
723 * Control request functions.
727 g_part_ctl_add(struct gctl_req *req, struct g_part_parms *gpp)
730 struct g_provider *pp;
731 struct g_part_entry *delent, *last, *entry;
732 struct g_part_table *table;
739 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
742 pp = LIST_FIRST(&gp->consumer)->provider;
744 end = gpp->gpp_start + gpp->gpp_size - 1;
746 if (gpp->gpp_start < table->gpt_first ||
747 gpp->gpp_start > table->gpt_last) {
748 gctl_error(req, "%d start '%jd'", EINVAL,
749 (intmax_t)gpp->gpp_start);
752 if (end < gpp->gpp_start || end > table->gpt_last) {
753 gctl_error(req, "%d size '%jd'", EINVAL,
754 (intmax_t)gpp->gpp_size);
757 if (gpp->gpp_index > table->gpt_entries) {
758 gctl_error(req, "%d index '%d'", EINVAL, gpp->gpp_index);
762 delent = last = NULL;
763 index = (gpp->gpp_index > 0) ? gpp->gpp_index : 1;
764 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
765 if (entry->gpe_deleted) {
766 if (entry->gpe_index == index)
770 if (entry->gpe_index == index)
771 index = entry->gpe_index + 1;
772 if (entry->gpe_index < index)
774 if (entry->gpe_internal)
776 if (gpp->gpp_start >= entry->gpe_start &&
777 gpp->gpp_start <= entry->gpe_end) {
778 gctl_error(req, "%d start '%jd'", ENOSPC,
779 (intmax_t)gpp->gpp_start);
782 if (end >= entry->gpe_start && end <= entry->gpe_end) {
783 gctl_error(req, "%d end '%jd'", ENOSPC, (intmax_t)end);
786 if (gpp->gpp_start < entry->gpe_start && end > entry->gpe_end) {
787 gctl_error(req, "%d size '%jd'", ENOSPC,
788 (intmax_t)gpp->gpp_size);
792 if (gpp->gpp_index > 0 && index != gpp->gpp_index) {
793 gctl_error(req, "%d index '%d'", EEXIST, gpp->gpp_index);
796 if (index > table->gpt_entries) {
797 gctl_error(req, "%d index '%d'", ENOSPC, index);
801 entry = (delent == NULL) ? g_malloc(table->gpt_scheme->gps_entrysz,
802 M_WAITOK | M_ZERO) : delent;
803 entry->gpe_index = index;
804 entry->gpe_start = gpp->gpp_start;
805 entry->gpe_end = end;
806 error = G_PART_ADD(table, entry, gpp);
808 gctl_error(req, "%d", error);
813 if (delent == NULL) {
815 LIST_INSERT_HEAD(&table->gpt_entry, entry, gpe_entry);
817 LIST_INSERT_AFTER(last, entry, gpe_entry);
818 entry->gpe_created = 1;
820 entry->gpe_deleted = 0;
821 entry->gpe_modified = 1;
823 g_part_new_provider(gp, table, entry);
825 /* Provide feedback if so requested. */
826 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
827 sb = sbuf_new_auto();
828 G_PART_FULLNAME(table, entry, sb, gp->name);
829 if (pp->stripesize > 0 && entry->gpe_pp->stripeoffset != 0)
830 sbuf_printf(sb, " added, but partition is not "
831 "aligned on %ju bytes\n", (uintmax_t)pp->stripesize);
833 sbuf_cat(sb, " added\n");
835 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
842 g_part_ctl_bootcode(struct gctl_req *req, struct g_part_parms *gpp)
845 struct g_part_table *table;
850 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
854 sz = table->gpt_scheme->gps_bootcodesz;
859 if (gpp->gpp_codesize > sz) {
864 error = G_PART_BOOTCODE(table, gpp);
868 /* Provide feedback if so requested. */
869 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
870 sb = sbuf_new_auto();
871 sbuf_printf(sb, "bootcode written to %s\n", gp->name);
873 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
879 gctl_error(req, "%d", error);
884 g_part_ctl_commit(struct gctl_req *req, struct g_part_parms *gpp)
886 struct g_consumer *cp;
888 struct g_provider *pp;
889 struct g_part_entry *entry, *tmp;
890 struct g_part_table *table;
895 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
899 if (!table->gpt_opened) {
900 gctl_error(req, "%d", EPERM);
906 cp = LIST_FIRST(&gp->consumer);
907 if ((table->gpt_smhead | table->gpt_smtail) != 0) {
909 buf = g_malloc(pp->sectorsize, M_WAITOK | M_ZERO);
910 while (table->gpt_smhead != 0) {
911 i = ffs(table->gpt_smhead) - 1;
912 error = g_write_data(cp, i * pp->sectorsize, buf,
918 table->gpt_smhead &= ~(1 << i);
920 while (table->gpt_smtail != 0) {
921 i = ffs(table->gpt_smtail) - 1;
922 error = g_write_data(cp, pp->mediasize - (i + 1) *
923 pp->sectorsize, buf, pp->sectorsize);
928 table->gpt_smtail &= ~(1 << i);
933 if (table->gpt_scheme == &g_part_null_scheme) {
935 g_access(cp, -1, -1, -1);
936 g_part_wither(gp, ENXIO);
940 error = G_PART_WRITE(table, cp);
944 LIST_FOREACH_SAFE(entry, &table->gpt_entry, gpe_entry, tmp) {
945 if (!entry->gpe_deleted) {
946 /* Notify consumers that provider might be changed. */
947 if (entry->gpe_modified && (
948 entry->gpe_pp->acw + entry->gpe_pp->ace +
949 entry->gpe_pp->acr) == 0)
950 g_media_changed(entry->gpe_pp, M_NOWAIT);
951 entry->gpe_created = 0;
952 entry->gpe_modified = 0;
955 LIST_REMOVE(entry, gpe_entry);
958 table->gpt_created = 0;
959 table->gpt_opened = 0;
962 g_access(cp, -1, -1, -1);
967 gctl_error(req, "%d", error);
972 g_part_ctl_create(struct gctl_req *req, struct g_part_parms *gpp)
974 struct g_consumer *cp;
976 struct g_provider *pp;
977 struct g_part_scheme *scheme;
978 struct g_part_table *null, *table;
982 pp = gpp->gpp_provider;
983 scheme = gpp->gpp_scheme;
984 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, pp->name));
987 /* Check that there isn't already a g_part geom on the provider. */
988 gp = g_part_find_geom(pp->name);
991 if (null->gpt_scheme != &g_part_null_scheme) {
992 gctl_error(req, "%d geom '%s'", EEXIST, pp->name);
998 if ((gpp->gpp_parms & G_PART_PARM_ENTRIES) &&
999 (gpp->gpp_entries < scheme->gps_minent ||
1000 gpp->gpp_entries > scheme->gps_maxent)) {
1001 gctl_error(req, "%d entries '%d'", EINVAL, gpp->gpp_entries);
1006 gp = g_new_geomf(&g_part_class, "%s", pp->name);
1007 gp->softc = kobj_create((kobj_class_t)gpp->gpp_scheme, M_GEOM,
1011 table->gpt_scheme = gpp->gpp_scheme;
1012 table->gpt_entries = (gpp->gpp_parms & G_PART_PARM_ENTRIES) ?
1013 gpp->gpp_entries : scheme->gps_minent;
1014 LIST_INIT(&table->gpt_entry);
1016 cp = g_new_consumer(gp);
1017 cp->flags |= G_CF_DIRECT_SEND | G_CF_DIRECT_RECEIVE;
1018 error = g_attach(cp, pp);
1020 error = g_access(cp, 1, 1, 1);
1022 g_part_wither(gp, error);
1023 gctl_error(req, "%d geom '%s'", error, pp->name);
1026 table->gpt_opened = 1;
1028 cp = LIST_FIRST(&gp->consumer);
1029 table->gpt_opened = null->gpt_opened;
1030 table->gpt_smhead = null->gpt_smhead;
1031 table->gpt_smtail = null->gpt_smtail;
1034 g_topology_unlock();
1036 /* Make sure the provider has media. */
1037 if (pp->mediasize == 0 || pp->sectorsize == 0) {
1042 /* Make sure we can nest and if so, determine our depth. */
1043 error = g_getattr("PART::isleaf", cp, &attr);
1044 if (!error && attr) {
1048 error = g_getattr("PART::depth", cp, &attr);
1049 table->gpt_depth = (!error) ? attr + 1 : 0;
1052 * Synthesize a disk geometry. Some partitioning schemes
1053 * depend on it and since some file systems need it even
1054 * when the partitition scheme doesn't, we do it here in
1055 * scheme-independent code.
1057 g_part_geometry(table, cp, pp->mediasize / pp->sectorsize);
1059 error = G_PART_CREATE(table, gpp);
1065 table->gpt_created = 1;
1067 kobj_delete((kobj_t)null, M_GEOM);
1070 * Support automatic commit by filling in the gpp_geom
1073 gpp->gpp_parms |= G_PART_PARM_GEOM;
1076 /* Provide feedback if so requested. */
1077 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
1078 sb = sbuf_new_auto();
1079 sbuf_printf(sb, "%s created\n", gp->name);
1081 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
1089 g_access(cp, -1, -1, -1);
1090 g_part_wither(gp, error);
1092 kobj_delete((kobj_t)gp->softc, M_GEOM);
1095 gctl_error(req, "%d provider", error);
1100 g_part_ctl_delete(struct gctl_req *req, struct g_part_parms *gpp)
1103 struct g_provider *pp;
1104 struct g_part_entry *entry;
1105 struct g_part_table *table;
1109 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
1110 g_topology_assert();
1114 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
1115 if (entry->gpe_deleted || entry->gpe_internal)
1117 if (entry->gpe_index == gpp->gpp_index)
1120 if (entry == NULL) {
1121 gctl_error(req, "%d index '%d'", ENOENT, gpp->gpp_index);
1127 if (pp->acr > 0 || pp->acw > 0 || pp->ace > 0) {
1128 gctl_error(req, "%d", EBUSY);
1133 entry->gpe_pp = NULL;
1137 g_wither_provider(pp, ENXIO);
1139 /* Provide feedback if so requested. */
1140 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
1141 sb = sbuf_new_auto();
1142 G_PART_FULLNAME(table, entry, sb, gp->name);
1143 sbuf_cat(sb, " deleted\n");
1145 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
1149 if (entry->gpe_created) {
1150 LIST_REMOVE(entry, gpe_entry);
1153 entry->gpe_modified = 0;
1154 entry->gpe_deleted = 1;
1160 g_part_ctl_destroy(struct gctl_req *req, struct g_part_parms *gpp)
1162 struct g_consumer *cp;
1164 struct g_provider *pp;
1165 struct g_part_entry *entry, *tmp;
1166 struct g_part_table *null, *table;
1171 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
1172 g_topology_assert();
1175 /* Check for busy providers. */
1176 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
1177 if (entry->gpe_deleted || entry->gpe_internal)
1179 if (gpp->gpp_force) {
1183 if (pp->acr == 0 && pp->acw == 0 && pp->ace == 0)
1186 gctl_error(req, "%d", EBUSY);
1190 if (gpp->gpp_force) {
1191 /* Destroy all providers. */
1192 LIST_FOREACH_SAFE(entry, &table->gpt_entry, gpe_entry, tmp) {
1196 g_wither_provider(pp, ENXIO);
1198 LIST_REMOVE(entry, gpe_entry);
1203 error = G_PART_DESTROY(table, gpp);
1205 gctl_error(req, "%d", error);
1209 gp->softc = kobj_create((kobj_class_t)&g_part_null_scheme, M_GEOM,
1213 null->gpt_scheme = &g_part_null_scheme;
1214 LIST_INIT(&null->gpt_entry);
1216 cp = LIST_FIRST(&gp->consumer);
1218 null->gpt_last = pp->mediasize / pp->sectorsize - 1;
1220 null->gpt_depth = table->gpt_depth;
1221 null->gpt_opened = table->gpt_opened;
1222 null->gpt_smhead = table->gpt_smhead;
1223 null->gpt_smtail = table->gpt_smtail;
1225 while ((entry = LIST_FIRST(&table->gpt_entry)) != NULL) {
1226 LIST_REMOVE(entry, gpe_entry);
1229 kobj_delete((kobj_t)table, M_GEOM);
1231 /* Provide feedback if so requested. */
1232 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
1233 sb = sbuf_new_auto();
1234 sbuf_printf(sb, "%s destroyed\n", gp->name);
1236 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
1243 g_part_ctl_modify(struct gctl_req *req, struct g_part_parms *gpp)
1246 struct g_part_entry *entry;
1247 struct g_part_table *table;
1252 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
1253 g_topology_assert();
1257 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
1258 if (entry->gpe_deleted || entry->gpe_internal)
1260 if (entry->gpe_index == gpp->gpp_index)
1263 if (entry == NULL) {
1264 gctl_error(req, "%d index '%d'", ENOENT, gpp->gpp_index);
1268 error = G_PART_MODIFY(table, entry, gpp);
1270 gctl_error(req, "%d", error);
1274 if (!entry->gpe_created)
1275 entry->gpe_modified = 1;
1277 /* Provide feedback if so requested. */
1278 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
1279 sb = sbuf_new_auto();
1280 G_PART_FULLNAME(table, entry, sb, gp->name);
1281 sbuf_cat(sb, " modified\n");
1283 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
1290 g_part_ctl_move(struct gctl_req *req, struct g_part_parms *gpp)
1292 gctl_error(req, "%d verb 'move'", ENOSYS);
1297 g_part_ctl_recover(struct gctl_req *req, struct g_part_parms *gpp)
1299 struct g_part_table *table;
1302 int error, recovered;
1305 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
1306 g_topology_assert();
1308 error = recovered = 0;
1310 if (table->gpt_corrupt) {
1311 error = G_PART_RECOVER(table);
1313 error = g_part_check_integrity(table,
1314 LIST_FIRST(&gp->consumer));
1316 gctl_error(req, "%d recovering '%s' failed",
1322 /* Provide feedback if so requested. */
1323 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
1324 sb = sbuf_new_auto();
1326 sbuf_printf(sb, "%s recovered\n", gp->name);
1328 sbuf_printf(sb, "%s recovering is not needed\n",
1331 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
1338 g_part_ctl_resize(struct gctl_req *req, struct g_part_parms *gpp)
1341 struct g_provider *pp;
1342 struct g_part_entry *pe, *entry;
1343 struct g_part_table *table;
1350 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
1351 g_topology_assert();
1354 /* check gpp_index */
1355 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
1356 if (entry->gpe_deleted || entry->gpe_internal)
1358 if (entry->gpe_index == gpp->gpp_index)
1361 if (entry == NULL) {
1362 gctl_error(req, "%d index '%d'", ENOENT, gpp->gpp_index);
1366 /* check gpp_size */
1367 end = entry->gpe_start + gpp->gpp_size - 1;
1368 if (gpp->gpp_size < 1 || end > table->gpt_last) {
1369 gctl_error(req, "%d size '%jd'", EINVAL,
1370 (intmax_t)gpp->gpp_size);
1374 LIST_FOREACH(pe, &table->gpt_entry, gpe_entry) {
1375 if (pe->gpe_deleted || pe->gpe_internal || pe == entry)
1377 if (end >= pe->gpe_start && end <= pe->gpe_end) {
1378 gctl_error(req, "%d end '%jd'", ENOSPC,
1382 if (entry->gpe_start < pe->gpe_start && end > pe->gpe_end) {
1383 gctl_error(req, "%d size '%jd'", ENOSPC,
1384 (intmax_t)gpp->gpp_size);
1390 if ((g_debugflags & G_F_FOOTSHOOTING) == 0 &&
1391 (pp->acr > 0 || pp->acw > 0 || pp->ace > 0)) {
1392 if (entry->gpe_end - entry->gpe_start + 1 > gpp->gpp_size) {
1393 /* Deny shrinking of an opened partition. */
1394 gctl_error(req, "%d", EBUSY);
1399 error = G_PART_RESIZE(table, entry, gpp);
1401 gctl_error(req, "%d%s", error, error != EBUSY ? "":
1402 " resizing will lead to unexpected shrinking"
1403 " due to alignment");
1407 if (!entry->gpe_created)
1408 entry->gpe_modified = 1;
1410 /* update mediasize of changed provider */
1411 mediasize = (entry->gpe_end - entry->gpe_start + 1) *
1413 g_resize_provider(pp, mediasize);
1415 /* Provide feedback if so requested. */
1416 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
1417 sb = sbuf_new_auto();
1418 G_PART_FULLNAME(table, entry, sb, gp->name);
1419 sbuf_cat(sb, " resized\n");
1421 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
1428 g_part_ctl_setunset(struct gctl_req *req, struct g_part_parms *gpp,
1432 struct g_part_entry *entry;
1433 struct g_part_table *table;
1438 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
1439 g_topology_assert();
1443 if (gpp->gpp_parms & G_PART_PARM_INDEX) {
1444 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
1445 if (entry->gpe_deleted || entry->gpe_internal)
1447 if (entry->gpe_index == gpp->gpp_index)
1450 if (entry == NULL) {
1451 gctl_error(req, "%d index '%d'", ENOENT,
1458 error = G_PART_SETUNSET(table, entry, gpp->gpp_attrib, set);
1460 gctl_error(req, "%d attrib '%s'", error, gpp->gpp_attrib);
1464 /* Provide feedback if so requested. */
1465 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
1466 sb = sbuf_new_auto();
1467 sbuf_printf(sb, "%s %sset on ", gpp->gpp_attrib,
1470 G_PART_FULLNAME(table, entry, sb, gp->name);
1472 sbuf_cat(sb, gp->name);
1475 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
1482 g_part_ctl_undo(struct gctl_req *req, struct g_part_parms *gpp)
1484 struct g_consumer *cp;
1485 struct g_provider *pp;
1487 struct g_part_entry *entry, *tmp;
1488 struct g_part_table *table;
1492 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
1493 g_topology_assert();
1496 if (!table->gpt_opened) {
1497 gctl_error(req, "%d", EPERM);
1501 cp = LIST_FIRST(&gp->consumer);
1502 LIST_FOREACH_SAFE(entry, &table->gpt_entry, gpe_entry, tmp) {
1503 entry->gpe_modified = 0;
1504 if (entry->gpe_created) {
1508 entry->gpe_pp = NULL;
1509 g_wither_provider(pp, ENXIO);
1511 entry->gpe_deleted = 1;
1513 if (entry->gpe_deleted) {
1514 LIST_REMOVE(entry, gpe_entry);
1519 g_topology_unlock();
1521 reprobe = (table->gpt_scheme == &g_part_null_scheme ||
1522 table->gpt_created) ? 1 : 0;
1525 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
1526 if (entry->gpe_internal)
1531 while ((entry = LIST_FIRST(&table->gpt_entry)) != NULL) {
1532 LIST_REMOVE(entry, gpe_entry);
1535 error = g_part_probe(gp, cp, table->gpt_depth);
1538 g_access(cp, -1, -1, -1);
1539 g_part_wither(gp, error);
1545 * Synthesize a disk geometry. Some partitioning schemes
1546 * depend on it and since some file systems need it even
1547 * when the partitition scheme doesn't, we do it here in
1548 * scheme-independent code.
1551 g_part_geometry(table, cp, pp->mediasize / pp->sectorsize);
1554 error = G_PART_READ(table, cp);
1557 error = g_part_check_integrity(table, cp);
1562 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
1563 if (!entry->gpe_internal)
1564 g_part_new_provider(gp, table, entry);
1567 table->gpt_opened = 0;
1568 g_access(cp, -1, -1, -1);
1573 gctl_error(req, "%d", error);
1578 g_part_wither(struct g_geom *gp, int error)
1580 struct g_part_entry *entry;
1581 struct g_part_table *table;
1582 struct g_provider *pp;
1585 if (table != NULL) {
1587 while ((entry = LIST_FIRST(&table->gpt_entry)) != NULL) {
1588 LIST_REMOVE(entry, gpe_entry);
1590 entry->gpe_pp = NULL;
1593 g_wither_provider(pp, error);
1597 G_PART_DESTROY(table, NULL);
1598 kobj_delete((kobj_t)table, M_GEOM);
1600 g_wither_geom(gp, error);
1608 g_part_ctlreq(struct gctl_req *req, struct g_class *mp, const char *verb)
1610 struct g_part_parms gpp;
1611 struct g_part_table *table;
1612 struct gctl_req_arg *ap;
1613 enum g_part_ctl ctlreq;
1614 unsigned int i, mparms, oparms, parm;
1615 int auto_commit, close_on_error;
1616 int error, modifies;
1618 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, verb));
1619 g_topology_assert();
1621 ctlreq = G_PART_CTL_NONE;
1624 oparms = G_PART_PARM_FLAGS | G_PART_PARM_OUTPUT | G_PART_PARM_VERSION;
1627 if (!strcmp(verb, "add")) {
1628 ctlreq = G_PART_CTL_ADD;
1629 mparms |= G_PART_PARM_GEOM | G_PART_PARM_SIZE |
1630 G_PART_PARM_START | G_PART_PARM_TYPE;
1631 oparms |= G_PART_PARM_INDEX | G_PART_PARM_LABEL;
1635 if (!strcmp(verb, "bootcode")) {
1636 ctlreq = G_PART_CTL_BOOTCODE;
1637 mparms |= G_PART_PARM_GEOM | G_PART_PARM_BOOTCODE;
1638 oparms |= G_PART_PARM_SKIP_DSN;
1642 if (!strcmp(verb, "commit")) {
1643 ctlreq = G_PART_CTL_COMMIT;
1644 mparms |= G_PART_PARM_GEOM;
1646 } else if (!strcmp(verb, "create")) {
1647 ctlreq = G_PART_CTL_CREATE;
1648 mparms |= G_PART_PARM_PROVIDER | G_PART_PARM_SCHEME;
1649 oparms |= G_PART_PARM_ENTRIES;
1653 if (!strcmp(verb, "delete")) {
1654 ctlreq = G_PART_CTL_DELETE;
1655 mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX;
1656 } else if (!strcmp(verb, "destroy")) {
1657 ctlreq = G_PART_CTL_DESTROY;
1658 mparms |= G_PART_PARM_GEOM;
1659 oparms |= G_PART_PARM_FORCE;
1663 if (!strcmp(verb, "modify")) {
1664 ctlreq = G_PART_CTL_MODIFY;
1665 mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX;
1666 oparms |= G_PART_PARM_LABEL | G_PART_PARM_TYPE;
1667 } else if (!strcmp(verb, "move")) {
1668 ctlreq = G_PART_CTL_MOVE;
1669 mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX;
1673 if (!strcmp(verb, "recover")) {
1674 ctlreq = G_PART_CTL_RECOVER;
1675 mparms |= G_PART_PARM_GEOM;
1676 } else if (!strcmp(verb, "resize")) {
1677 ctlreq = G_PART_CTL_RESIZE;
1678 mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX |
1683 if (!strcmp(verb, "set")) {
1684 ctlreq = G_PART_CTL_SET;
1685 mparms |= G_PART_PARM_ATTRIB | G_PART_PARM_GEOM;
1686 oparms |= G_PART_PARM_INDEX;
1690 if (!strcmp(verb, "undo")) {
1691 ctlreq = G_PART_CTL_UNDO;
1692 mparms |= G_PART_PARM_GEOM;
1694 } else if (!strcmp(verb, "unset")) {
1695 ctlreq = G_PART_CTL_UNSET;
1696 mparms |= G_PART_PARM_ATTRIB | G_PART_PARM_GEOM;
1697 oparms |= G_PART_PARM_INDEX;
1701 if (ctlreq == G_PART_CTL_NONE) {
1702 gctl_error(req, "%d verb '%s'", EINVAL, verb);
1706 bzero(&gpp, sizeof(gpp));
1707 for (i = 0; i < req->narg; i++) {
1710 switch (ap->name[0]) {
1712 if (!strcmp(ap->name, "arg0")) {
1714 (G_PART_PARM_GEOM | G_PART_PARM_PROVIDER);
1716 if (!strcmp(ap->name, "attrib"))
1717 parm = G_PART_PARM_ATTRIB;
1720 if (!strcmp(ap->name, "bootcode"))
1721 parm = G_PART_PARM_BOOTCODE;
1724 if (!strcmp(ap->name, "class"))
1728 if (!strcmp(ap->name, "entries"))
1729 parm = G_PART_PARM_ENTRIES;
1732 if (!strcmp(ap->name, "flags"))
1733 parm = G_PART_PARM_FLAGS;
1734 else if (!strcmp(ap->name, "force"))
1735 parm = G_PART_PARM_FORCE;
1738 if (!strcmp(ap->name, "index"))
1739 parm = G_PART_PARM_INDEX;
1742 if (!strcmp(ap->name, "label"))
1743 parm = G_PART_PARM_LABEL;
1746 if (!strcmp(ap->name, "output"))
1747 parm = G_PART_PARM_OUTPUT;
1750 if (!strcmp(ap->name, "scheme"))
1751 parm = G_PART_PARM_SCHEME;
1752 else if (!strcmp(ap->name, "size"))
1753 parm = G_PART_PARM_SIZE;
1754 else if (!strcmp(ap->name, "start"))
1755 parm = G_PART_PARM_START;
1756 else if (!strcmp(ap->name, "skip_dsn"))
1757 parm = G_PART_PARM_SKIP_DSN;
1760 if (!strcmp(ap->name, "type"))
1761 parm = G_PART_PARM_TYPE;
1764 if (!strcmp(ap->name, "verb"))
1766 else if (!strcmp(ap->name, "version"))
1767 parm = G_PART_PARM_VERSION;
1770 if ((parm & (mparms | oparms)) == 0) {
1771 gctl_error(req, "%d param '%s'", EINVAL, ap->name);
1775 case G_PART_PARM_ATTRIB:
1776 error = g_part_parm_str(req, ap->name,
1779 case G_PART_PARM_BOOTCODE:
1780 error = g_part_parm_bootcode(req, ap->name,
1781 &gpp.gpp_codeptr, &gpp.gpp_codesize);
1783 case G_PART_PARM_ENTRIES:
1784 error = g_part_parm_intmax(req, ap->name,
1787 case G_PART_PARM_FLAGS:
1788 error = g_part_parm_str(req, ap->name, &gpp.gpp_flags);
1790 case G_PART_PARM_FORCE:
1791 error = g_part_parm_uint32(req, ap->name,
1794 case G_PART_PARM_GEOM:
1795 error = g_part_parm_geom(req, ap->name, &gpp.gpp_geom);
1797 case G_PART_PARM_INDEX:
1798 error = g_part_parm_intmax(req, ap->name,
1801 case G_PART_PARM_LABEL:
1802 error = g_part_parm_str(req, ap->name, &gpp.gpp_label);
1804 case G_PART_PARM_OUTPUT:
1805 error = 0; /* Write-only parameter */
1807 case G_PART_PARM_PROVIDER:
1808 error = g_part_parm_provider(req, ap->name,
1811 case G_PART_PARM_SCHEME:
1812 error = g_part_parm_scheme(req, ap->name,
1815 case G_PART_PARM_SIZE:
1816 error = g_part_parm_quad(req, ap->name, &gpp.gpp_size);
1818 case G_PART_PARM_SKIP_DSN:
1819 error = g_part_parm_uint32(req, ap->name,
1822 case G_PART_PARM_START:
1823 error = g_part_parm_quad(req, ap->name,
1826 case G_PART_PARM_TYPE:
1827 error = g_part_parm_str(req, ap->name, &gpp.gpp_type);
1829 case G_PART_PARM_VERSION:
1830 error = g_part_parm_uint32(req, ap->name,
1835 gctl_error(req, "%d %s", error, ap->name);
1839 if (error == ENOATTR) {
1840 gctl_error(req, "%d param '%s'", error,
1845 gpp.gpp_parms |= parm;
1847 if ((gpp.gpp_parms & mparms) != mparms) {
1848 parm = mparms - (gpp.gpp_parms & mparms);
1849 gctl_error(req, "%d param '%x'", ENOATTR, parm);
1853 /* Obtain permissions if possible/necessary. */
1856 if (modifies && (gpp.gpp_parms & G_PART_PARM_GEOM)) {
1857 table = gpp.gpp_geom->softc;
1858 if (table != NULL && table->gpt_corrupt &&
1859 ctlreq != G_PART_CTL_DESTROY &&
1860 ctlreq != G_PART_CTL_RECOVER) {
1861 gctl_error(req, "%d table '%s' is corrupt",
1862 EPERM, gpp.gpp_geom->name);
1865 if (table != NULL && !table->gpt_opened) {
1866 error = g_access(LIST_FIRST(&gpp.gpp_geom->consumer),
1869 gctl_error(req, "%d geom '%s'", error,
1870 gpp.gpp_geom->name);
1873 table->gpt_opened = 1;
1878 /* Allow the scheme to check or modify the parameters. */
1879 if (table != NULL) {
1880 error = G_PART_PRECHECK(table, ctlreq, &gpp);
1882 gctl_error(req, "%d pre-check failed", error);
1886 error = EDOOFUS; /* Prevent bogus uninit. warning. */
1889 case G_PART_CTL_NONE:
1890 panic("%s", __func__);
1891 case G_PART_CTL_ADD:
1892 error = g_part_ctl_add(req, &gpp);
1894 case G_PART_CTL_BOOTCODE:
1895 error = g_part_ctl_bootcode(req, &gpp);
1897 case G_PART_CTL_COMMIT:
1898 error = g_part_ctl_commit(req, &gpp);
1900 case G_PART_CTL_CREATE:
1901 error = g_part_ctl_create(req, &gpp);
1903 case G_PART_CTL_DELETE:
1904 error = g_part_ctl_delete(req, &gpp);
1906 case G_PART_CTL_DESTROY:
1907 error = g_part_ctl_destroy(req, &gpp);
1909 case G_PART_CTL_MODIFY:
1910 error = g_part_ctl_modify(req, &gpp);
1912 case G_PART_CTL_MOVE:
1913 error = g_part_ctl_move(req, &gpp);
1915 case G_PART_CTL_RECOVER:
1916 error = g_part_ctl_recover(req, &gpp);
1918 case G_PART_CTL_RESIZE:
1919 error = g_part_ctl_resize(req, &gpp);
1921 case G_PART_CTL_SET:
1922 error = g_part_ctl_setunset(req, &gpp, 1);
1924 case G_PART_CTL_UNDO:
1925 error = g_part_ctl_undo(req, &gpp);
1927 case G_PART_CTL_UNSET:
1928 error = g_part_ctl_setunset(req, &gpp, 0);
1932 /* Implement automatic commit. */
1934 auto_commit = (modifies &&
1935 (gpp.gpp_parms & G_PART_PARM_FLAGS) &&
1936 strchr(gpp.gpp_flags, 'C') != NULL) ? 1 : 0;
1938 KASSERT(gpp.gpp_parms & G_PART_PARM_GEOM, ("%s",
1940 error = g_part_ctl_commit(req, &gpp);
1945 if (error && close_on_error) {
1946 g_access(LIST_FIRST(&gpp.gpp_geom->consumer), -1, -1, -1);
1947 table->gpt_opened = 0;
1952 g_part_destroy_geom(struct gctl_req *req, struct g_class *mp,
1956 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, gp->name));
1957 g_topology_assert();
1959 g_part_wither(gp, EINVAL);
1963 static struct g_geom *
1964 g_part_taste(struct g_class *mp, struct g_provider *pp, int flags __unused)
1966 struct g_consumer *cp;
1968 struct g_part_entry *entry;
1969 struct g_part_table *table;
1970 struct root_hold_token *rht;
1971 struct g_geom_alias *gap;
1975 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, pp->name));
1976 g_topology_assert();
1978 /* Skip providers that are already open for writing. */
1983 * Create a GEOM with consumer and hook it up to the provider.
1984 * With that we become part of the topology. Obtain read access
1987 gp = g_new_geomf(mp, "%s", pp->name);
1988 LIST_FOREACH(gap, &pp->geom->aliases, ga_next)
1989 g_geom_add_alias(gp, gap->ga_alias);
1990 cp = g_new_consumer(gp);
1991 cp->flags |= G_CF_DIRECT_SEND | G_CF_DIRECT_RECEIVE;
1992 error = g_attach(cp, pp);
1994 error = g_access(cp, 1, 0, 0);
1998 g_destroy_consumer(cp);
2003 rht = root_mount_hold(mp->name);
2004 g_topology_unlock();
2007 * Short-circuit the whole probing galore when there's no
2010 if (pp->mediasize == 0 || pp->sectorsize == 0) {
2015 /* Make sure we can nest and if so, determine our depth. */
2016 error = g_getattr("PART::isleaf", cp, &attr);
2017 if (!error && attr) {
2021 error = g_getattr("PART::depth", cp, &attr);
2022 depth = (!error) ? attr + 1 : 0;
2024 error = g_part_probe(gp, cp, depth);
2031 * Synthesize a disk geometry. Some partitioning schemes
2032 * depend on it and since some file systems need it even
2033 * when the partitition scheme doesn't, we do it here in
2034 * scheme-independent code.
2036 g_part_geometry(table, cp, pp->mediasize / pp->sectorsize);
2038 error = G_PART_READ(table, cp);
2041 error = g_part_check_integrity(table, cp);
2046 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
2047 if (!entry->gpe_internal)
2048 g_part_new_provider(gp, table, entry);
2051 root_mount_rel(rht);
2052 g_access(cp, -1, 0, 0);
2057 root_mount_rel(rht);
2058 g_access(cp, -1, 0, 0);
2060 g_destroy_consumer(cp);
2070 g_part_access(struct g_provider *pp, int dr, int dw, int de)
2072 struct g_consumer *cp;
2074 G_PART_TRACE((G_T_ACCESS, "%s(%s,%d,%d,%d)", __func__, pp->name, dr,
2077 cp = LIST_FIRST(&pp->geom->consumer);
2079 /* We always gain write-exclusive access. */
2080 return (g_access(cp, dr, dw, dw + de));
2084 g_part_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp,
2085 struct g_consumer *cp, struct g_provider *pp)
2088 struct g_part_entry *entry;
2089 struct g_part_table *table;
2091 KASSERT(sb != NULL && gp != NULL, ("%s", __func__));
2094 if (indent == NULL) {
2095 KASSERT(cp == NULL && pp != NULL, ("%s", __func__));
2096 entry = pp->private;
2099 sbuf_printf(sb, " i %u o %ju ty %s", entry->gpe_index,
2100 (uintmax_t)entry->gpe_offset,
2101 G_PART_TYPE(table, entry, buf, sizeof(buf)));
2103 * libdisk compatibility quirk - the scheme dumps the
2104 * slicer name and partition type in a way that is
2105 * compatible with libdisk. When libdisk is not used
2106 * anymore, this should go away.
2108 G_PART_DUMPCONF(table, entry, sb, indent);
2109 } else if (cp != NULL) { /* Consumer configuration. */
2110 KASSERT(pp == NULL, ("%s", __func__));
2112 } else if (pp != NULL) { /* Provider configuration. */
2113 entry = pp->private;
2116 sbuf_printf(sb, "%s<start>%ju</start>\n", indent,
2117 (uintmax_t)entry->gpe_start);
2118 sbuf_printf(sb, "%s<end>%ju</end>\n", indent,
2119 (uintmax_t)entry->gpe_end);
2120 sbuf_printf(sb, "%s<index>%u</index>\n", indent,
2122 sbuf_printf(sb, "%s<type>%s</type>\n", indent,
2123 G_PART_TYPE(table, entry, buf, sizeof(buf)));
2124 sbuf_printf(sb, "%s<offset>%ju</offset>\n", indent,
2125 (uintmax_t)entry->gpe_offset);
2126 sbuf_printf(sb, "%s<length>%ju</length>\n", indent,
2127 (uintmax_t)pp->mediasize);
2128 G_PART_DUMPCONF(table, entry, sb, indent);
2129 } else { /* Geom configuration. */
2130 sbuf_printf(sb, "%s<scheme>%s</scheme>\n", indent,
2131 table->gpt_scheme->name);
2132 sbuf_printf(sb, "%s<entries>%u</entries>\n", indent,
2133 table->gpt_entries);
2134 sbuf_printf(sb, "%s<first>%ju</first>\n", indent,
2135 (uintmax_t)table->gpt_first);
2136 sbuf_printf(sb, "%s<last>%ju</last>\n", indent,
2137 (uintmax_t)table->gpt_last);
2138 sbuf_printf(sb, "%s<fwsectors>%u</fwsectors>\n", indent,
2139 table->gpt_sectors);
2140 sbuf_printf(sb, "%s<fwheads>%u</fwheads>\n", indent,
2142 sbuf_printf(sb, "%s<state>%s</state>\n", indent,
2143 table->gpt_corrupt ? "CORRUPT": "OK");
2144 sbuf_printf(sb, "%s<modified>%s</modified>\n", indent,
2145 table->gpt_opened ? "true": "false");
2146 G_PART_DUMPCONF(table, NULL, sb, indent);
2151 * This start routine is only called for non-trivial requests, all the
2152 * trivial ones are handled autonomously by the slice code.
2153 * For requests we handle here, we must call the g_io_deliver() on the
2154 * bio, and return non-zero to indicate to the slice code that we did so.
2155 * This code executes in the "DOWN" I/O path, this means:
2157 * * Don't grab the topology lock.
2158 * * Don't call biowait, g_getattr(), g_setattr() or g_read_data()
2161 g_part_ioctl(struct g_provider *pp, u_long cmd, void *data, int fflag, struct thread *td)
2163 struct g_part_table *table;
2165 table = pp->geom->softc;
2166 return G_PART_IOCTL(table, pp, cmd, data, fflag, td);
2170 g_part_resize(struct g_consumer *cp)
2172 struct g_part_table *table;
2174 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, cp->provider->name));
2175 g_topology_assert();
2177 if (auto_resize == 0)
2180 table = cp->geom->softc;
2181 if (table->gpt_opened == 0) {
2182 if (g_access(cp, 1, 1, 1) != 0)
2184 table->gpt_opened = 1;
2186 if (G_PART_RESIZE(table, NULL, NULL) == 0)
2187 printf("GEOM_PART: %s was automatically resized.\n"
2188 " Use `gpart commit %s` to save changes or "
2189 "`gpart undo %s` to revert them.\n", cp->geom->name,
2190 cp->geom->name, cp->geom->name);
2191 if (g_part_check_integrity(table, cp) != 0) {
2192 g_access(cp, -1, -1, -1);
2193 table->gpt_opened = 0;
2194 g_part_wither(table->gpt_gp, ENXIO);
2199 g_part_orphan(struct g_consumer *cp)
2201 struct g_provider *pp;
2202 struct g_part_table *table;
2205 KASSERT(pp != NULL, ("%s", __func__));
2206 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, pp->name));
2207 g_topology_assert();
2209 KASSERT(pp->error != 0, ("%s", __func__));
2210 table = cp->geom->softc;
2211 if (table != NULL && table->gpt_opened)
2212 g_access(cp, -1, -1, -1);
2213 g_part_wither(cp->geom, pp->error);
2217 g_part_spoiled(struct g_consumer *cp)
2220 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, cp->provider->name));
2221 g_topology_assert();
2223 cp->flags |= G_CF_ORPHAN;
2224 g_part_wither(cp->geom, ENXIO);
2228 g_part_start(struct bio *bp)
2231 struct g_consumer *cp;
2233 struct g_part_entry *entry;
2234 struct g_part_table *table;
2235 struct g_kerneldump *gkd;
2236 struct g_provider *pp;
2237 void (*done_func)(struct bio *) = g_std_done;
2240 biotrack(bp, __func__);
2245 cp = LIST_FIRST(&gp->consumer);
2247 G_PART_TRACE((G_T_BIO, "%s: cmd=%d, provider=%s", __func__, bp->bio_cmd,
2250 entry = pp->private;
2251 if (entry == NULL) {
2252 g_io_deliver(bp, ENXIO);
2256 switch(bp->bio_cmd) {
2260 if (bp->bio_offset >= pp->mediasize) {
2261 g_io_deliver(bp, EIO);
2264 bp2 = g_clone_bio(bp);
2266 g_io_deliver(bp, ENOMEM);
2269 if (bp2->bio_offset + bp2->bio_length > pp->mediasize)
2270 bp2->bio_length = pp->mediasize - bp2->bio_offset;
2271 bp2->bio_done = g_std_done;
2272 bp2->bio_offset += entry->gpe_offset;
2273 g_io_request(bp2, cp);
2279 if (g_handleattr_int(bp, "GEOM::fwheads", table->gpt_heads))
2281 if (g_handleattr_int(bp, "GEOM::fwsectors", table->gpt_sectors))
2284 * allow_nesting overrides "isleaf" to false _unless_ the
2285 * provider offset is zero, since otherwise we would recurse.
2287 if (g_handleattr_int(bp, "PART::isleaf",
2288 table->gpt_isleaf &&
2289 (allow_nesting == 0 || entry->gpe_offset == 0)))
2291 if (g_handleattr_int(bp, "PART::depth", table->gpt_depth))
2293 if (g_handleattr_str(bp, "PART::scheme",
2294 table->gpt_scheme->name))
2296 if (g_handleattr_str(bp, "PART::type",
2297 G_PART_TYPE(table, entry, buf, sizeof(buf))))
2299 if (!strcmp("GEOM::physpath", bp->bio_attribute)) {
2300 done_func = g_part_get_physpath_done;
2303 if (!strcmp("GEOM::kerneldump", bp->bio_attribute)) {
2305 * Check that the partition is suitable for kernel
2306 * dumps. Typically only swap partitions should be
2307 * used. If the request comes from the nested scheme
2308 * we allow dumping there as well.
2310 if ((bp->bio_from == NULL ||
2311 bp->bio_from->geom->class != &g_part_class) &&
2312 G_PART_DUMPTO(table, entry) == 0) {
2313 g_io_deliver(bp, ENODEV);
2314 printf("GEOM_PART: Partition '%s' not suitable"
2315 " for kernel dumps (wrong type?)\n",
2319 gkd = (struct g_kerneldump *)bp->bio_data;
2320 if (gkd->offset >= pp->mediasize) {
2321 g_io_deliver(bp, EIO);
2324 if (gkd->offset + gkd->length > pp->mediasize)
2325 gkd->length = pp->mediasize - gkd->offset;
2326 gkd->offset += entry->gpe_offset;
2330 g_io_deliver(bp, EOPNOTSUPP);
2334 bp2 = g_clone_bio(bp);
2336 g_io_deliver(bp, ENOMEM);
2339 bp2->bio_done = done_func;
2340 g_io_request(bp2, cp);
2344 g_part_init(struct g_class *mp)
2347 TAILQ_INSERT_HEAD(&g_part_schemes, &g_part_null_scheme, scheme_list);
2351 g_part_fini(struct g_class *mp)
2354 TAILQ_REMOVE(&g_part_schemes, &g_part_null_scheme, scheme_list);
2358 g_part_unload_event(void *arg, int flag)
2360 struct g_consumer *cp;
2362 struct g_provider *pp;
2363 struct g_part_scheme *scheme;
2364 struct g_part_table *table;
2368 if (flag == EV_CANCEL)
2373 scheme = (void *)(*xchg);
2375 g_topology_assert();
2377 LIST_FOREACH(gp, &g_part_class.geom, geom) {
2379 if (table->gpt_scheme != scheme)
2383 LIST_FOREACH(pp, &gp->provider, provider)
2384 acc += pp->acr + pp->acw + pp->ace;
2385 LIST_FOREACH(cp, &gp->consumer, consumer)
2386 acc += cp->acr + cp->acw + cp->ace;
2389 g_part_wither(gp, ENOSYS);
2395 TAILQ_REMOVE(&g_part_schemes, scheme, scheme_list);
2401 g_part_modevent(module_t mod, int type, struct g_part_scheme *scheme)
2403 struct g_part_scheme *iter;
2410 TAILQ_FOREACH(iter, &g_part_schemes, scheme_list) {
2411 if (scheme == iter) {
2412 printf("GEOM_PART: scheme %s is already "
2413 "registered!\n", scheme->name);
2418 TAILQ_INSERT_TAIL(&g_part_schemes, scheme,
2420 g_retaste(&g_part_class);
2424 arg = (uintptr_t)scheme;
2425 error = g_waitfor_event(g_part_unload_event, &arg, M_WAITOK,