2 * Copyright (c) 2002, 2005-2009 Marcel Moolenaar
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
30 #include <sys/param.h>
32 #include <sys/endian.h>
33 #include <sys/kernel.h>
35 #include <sys/limits.h>
37 #include <sys/malloc.h>
38 #include <sys/mutex.h>
39 #include <sys/queue.h>
41 #include <sys/sysctl.h>
42 #include <sys/systm.h>
44 #include <geom/geom.h>
45 #include <geom/geom_ctl.h>
46 #include <geom/geom_int.h>
47 #include <geom/part/g_part.h>
49 #include "g_part_if.h"
52 #define _PATH_DEV "/dev/"
55 static kobj_method_t g_part_null_methods[] = {
59 static struct g_part_scheme g_part_null_scheme = {
62 sizeof(struct g_part_table),
65 TAILQ_HEAD(, g_part_scheme) g_part_schemes =
66 TAILQ_HEAD_INITIALIZER(g_part_schemes);
68 struct g_part_alias_list {
70 enum g_part_alias alias;
71 } g_part_alias_list[G_PART_ALIAS_COUNT] = {
72 { "apple-boot", G_PART_ALIAS_APPLE_BOOT },
73 { "apple-core-storage", G_PART_ALIAS_APPLE_CORE_STORAGE },
74 { "apple-hfs", G_PART_ALIAS_APPLE_HFS },
75 { "apple-label", G_PART_ALIAS_APPLE_LABEL },
76 { "apple-raid", G_PART_ALIAS_APPLE_RAID },
77 { "apple-raid-offline", G_PART_ALIAS_APPLE_RAID_OFFLINE },
78 { "apple-tv-recovery", G_PART_ALIAS_APPLE_TV_RECOVERY },
79 { "apple-ufs", G_PART_ALIAS_APPLE_UFS },
80 { "bios-boot", G_PART_ALIAS_BIOS_BOOT },
81 { "chromeos-firmware", G_PART_ALIAS_CHROMEOS_FIRMWARE },
82 { "chromeos-kernel", G_PART_ALIAS_CHROMEOS_KERNEL },
83 { "chromeos-reserved", G_PART_ALIAS_CHROMEOS_RESERVED },
84 { "chromeos-root", G_PART_ALIAS_CHROMEOS_ROOT },
85 { "dragonfly-ccd", G_PART_ALIAS_DFBSD_CCD },
86 { "dragonfly-hammer", G_PART_ALIAS_DFBSD_HAMMER },
87 { "dragonfly-hammer2", G_PART_ALIAS_DFBSD_HAMMER2 },
88 { "dragonfly-label32", G_PART_ALIAS_DFBSD },
89 { "dragonfly-label64", G_PART_ALIAS_DFBSD64 },
90 { "dragonfly-legacy", G_PART_ALIAS_DFBSD_LEGACY },
91 { "dragonfly-swap", G_PART_ALIAS_DFBSD_SWAP },
92 { "dragonfly-ufs", G_PART_ALIAS_DFBSD_UFS },
93 { "dragonfly-vinum", G_PART_ALIAS_DFBSD_VINUM },
94 { "ebr", G_PART_ALIAS_EBR },
95 { "efi", G_PART_ALIAS_EFI },
96 { "fat16", G_PART_ALIAS_MS_FAT16 },
97 { "fat32", G_PART_ALIAS_MS_FAT32 },
98 { "freebsd", G_PART_ALIAS_FREEBSD },
99 { "freebsd-boot", G_PART_ALIAS_FREEBSD_BOOT },
100 { "freebsd-nandfs", G_PART_ALIAS_FREEBSD_NANDFS },
101 { "freebsd-swap", G_PART_ALIAS_FREEBSD_SWAP },
102 { "freebsd-ufs", G_PART_ALIAS_FREEBSD_UFS },
103 { "freebsd-vinum", G_PART_ALIAS_FREEBSD_VINUM },
104 { "freebsd-zfs", G_PART_ALIAS_FREEBSD_ZFS },
105 { "linux-data", G_PART_ALIAS_LINUX_DATA },
106 { "linux-lvm", G_PART_ALIAS_LINUX_LVM },
107 { "linux-raid", G_PART_ALIAS_LINUX_RAID },
108 { "linux-swap", G_PART_ALIAS_LINUX_SWAP },
109 { "mbr", G_PART_ALIAS_MBR },
110 { "ms-basic-data", G_PART_ALIAS_MS_BASIC_DATA },
111 { "ms-ldm-data", G_PART_ALIAS_MS_LDM_DATA },
112 { "ms-ldm-metadata", G_PART_ALIAS_MS_LDM_METADATA },
113 { "ms-recovery", G_PART_ALIAS_MS_RECOVERY },
114 { "ms-reserved", G_PART_ALIAS_MS_RESERVED },
115 { "ms-spaces", G_PART_ALIAS_MS_SPACES },
116 { "netbsd-ccd", G_PART_ALIAS_NETBSD_CCD },
117 { "netbsd-cgd", G_PART_ALIAS_NETBSD_CGD },
118 { "netbsd-ffs", G_PART_ALIAS_NETBSD_FFS },
119 { "netbsd-lfs", G_PART_ALIAS_NETBSD_LFS },
120 { "netbsd-raid", G_PART_ALIAS_NETBSD_RAID },
121 { "netbsd-swap", G_PART_ALIAS_NETBSD_SWAP },
122 { "ntfs", G_PART_ALIAS_MS_NTFS },
123 { "openbsd-data", G_PART_ALIAS_OPENBSD_DATA },
124 { "prep-boot", G_PART_ALIAS_PREP_BOOT },
125 { "vmware-reserved", G_PART_ALIAS_VMRESERVED },
126 { "vmware-vmfs", G_PART_ALIAS_VMFS },
127 { "vmware-vmkdiag", G_PART_ALIAS_VMKDIAG },
128 { "vmware-vsanhdr", G_PART_ALIAS_VMVSANHDR },
131 SYSCTL_DECL(_kern_geom);
132 SYSCTL_NODE(_kern_geom, OID_AUTO, part, CTLFLAG_RW, 0,
134 static u_int check_integrity = 1;
135 SYSCTL_UINT(_kern_geom_part, OID_AUTO, check_integrity,
136 CTLFLAG_RWTUN, &check_integrity, 1,
137 "Enable integrity checking");
138 static u_int auto_resize = 1;
139 SYSCTL_UINT(_kern_geom_part, OID_AUTO, auto_resize,
140 CTLFLAG_RWTUN, &auto_resize, 1,
141 "Enable auto resize");
144 * The GEOM partitioning class.
146 static g_ctl_req_t g_part_ctlreq;
147 static g_ctl_destroy_geom_t g_part_destroy_geom;
148 static g_fini_t g_part_fini;
149 static g_init_t g_part_init;
150 static g_taste_t g_part_taste;
152 static g_access_t g_part_access;
153 static g_dumpconf_t g_part_dumpconf;
154 static g_orphan_t g_part_orphan;
155 static g_spoiled_t g_part_spoiled;
156 static g_start_t g_part_start;
157 static g_resize_t g_part_resize;
158 static g_ioctl_t g_part_ioctl;
160 static struct g_class g_part_class = {
162 .version = G_VERSION,
164 .ctlreq = g_part_ctlreq,
165 .destroy_geom = g_part_destroy_geom,
168 .taste = g_part_taste,
170 .access = g_part_access,
171 .dumpconf = g_part_dumpconf,
172 .orphan = g_part_orphan,
173 .spoiled = g_part_spoiled,
174 .start = g_part_start,
175 .resize = g_part_resize,
176 .ioctl = g_part_ioctl,
179 DECLARE_GEOM_CLASS(g_part_class, g_part);
180 MODULE_VERSION(g_part, 0);
186 static void g_part_wither(struct g_geom *, int);
189 g_part_alias_name(enum g_part_alias alias)
193 for (i = 0; i < G_PART_ALIAS_COUNT; i++) {
194 if (g_part_alias_list[i].alias != alias)
196 return (g_part_alias_list[i].lexeme);
203 g_part_geometry_heads(off_t blocks, u_int sectors, off_t *bestchs,
206 static u_int candidate_heads[] = { 1, 2, 16, 32, 64, 128, 255, 0 };
207 off_t chs, cylinders;
213 for (idx = 0; candidate_heads[idx] != 0; idx++) {
214 heads = candidate_heads[idx];
215 cylinders = blocks / heads / sectors;
216 if (cylinders < heads || cylinders < sectors)
218 if (cylinders > 1023)
220 chs = cylinders * heads * sectors;
221 if (chs > *bestchs || (chs == *bestchs && *bestheads == 1)) {
229 g_part_geometry(struct g_part_table *table, struct g_consumer *cp,
232 static u_int candidate_sectors[] = { 1, 9, 17, 33, 63, 0 };
234 u_int heads, sectors;
237 if (g_getattr("GEOM::fwsectors", cp, §ors) != 0 || sectors == 0 ||
238 g_getattr("GEOM::fwheads", cp, &heads) != 0 || heads == 0) {
239 table->gpt_fixgeom = 0;
240 table->gpt_heads = 0;
241 table->gpt_sectors = 0;
243 for (idx = 0; candidate_sectors[idx] != 0; idx++) {
244 sectors = candidate_sectors[idx];
245 g_part_geometry_heads(blocks, sectors, &chs, &heads);
249 * Prefer a geometry with sectors > 1, but only if
250 * it doesn't bump down the number of heads to 1.
252 if (chs > bestchs || (chs == bestchs && heads > 1 &&
253 table->gpt_sectors == 1)) {
255 table->gpt_heads = heads;
256 table->gpt_sectors = sectors;
260 * If we didn't find a geometry at all, then the disk is
261 * too big. This means we can use the maximum number of
265 table->gpt_heads = 255;
266 table->gpt_sectors = 63;
269 table->gpt_fixgeom = 1;
270 table->gpt_heads = heads;
271 table->gpt_sectors = sectors;
275 #define DPRINTF(...) if (bootverbose) { \
276 printf("GEOM_PART: " __VA_ARGS__); \
280 g_part_check_integrity(struct g_part_table *table, struct g_consumer *cp)
282 struct g_part_entry *e1, *e2;
283 struct g_provider *pp;
289 if (table->gpt_last < table->gpt_first) {
290 DPRINTF("last LBA is below first LBA: %jd < %jd\n",
291 (intmax_t)table->gpt_last, (intmax_t)table->gpt_first);
294 if (table->gpt_last > pp->mediasize / pp->sectorsize - 1) {
295 DPRINTF("last LBA extends beyond mediasize: "
296 "%jd > %jd\n", (intmax_t)table->gpt_last,
297 (intmax_t)pp->mediasize / pp->sectorsize - 1);
300 LIST_FOREACH(e1, &table->gpt_entry, gpe_entry) {
301 if (e1->gpe_deleted || e1->gpe_internal)
303 if (e1->gpe_start < table->gpt_first) {
304 DPRINTF("partition %d has start offset below first "
305 "LBA: %jd < %jd\n", e1->gpe_index,
306 (intmax_t)e1->gpe_start,
307 (intmax_t)table->gpt_first);
310 if (e1->gpe_start > table->gpt_last) {
311 DPRINTF("partition %d has start offset beyond last "
312 "LBA: %jd > %jd\n", e1->gpe_index,
313 (intmax_t)e1->gpe_start,
314 (intmax_t)table->gpt_last);
317 if (e1->gpe_end < e1->gpe_start) {
318 DPRINTF("partition %d has end offset below start "
319 "offset: %jd < %jd\n", e1->gpe_index,
320 (intmax_t)e1->gpe_end,
321 (intmax_t)e1->gpe_start);
324 if (e1->gpe_end > table->gpt_last) {
325 DPRINTF("partition %d has end offset beyond last "
326 "LBA: %jd > %jd\n", e1->gpe_index,
327 (intmax_t)e1->gpe_end,
328 (intmax_t)table->gpt_last);
331 if (pp->stripesize > 0) {
332 offset = e1->gpe_start * pp->sectorsize;
333 if (e1->gpe_offset > offset)
334 offset = e1->gpe_offset;
335 if ((offset + pp->stripeoffset) % pp->stripesize) {
336 DPRINTF("partition %d on (%s, %s) is not "
337 "aligned on %u bytes\n", e1->gpe_index,
338 pp->name, table->gpt_scheme->name,
340 /* Don't treat this as a critical failure */
344 while ((e2 = LIST_NEXT(e2, gpe_entry)) != NULL) {
345 if (e2->gpe_deleted || e2->gpe_internal)
347 if (e1->gpe_start >= e2->gpe_start &&
348 e1->gpe_start <= e2->gpe_end) {
349 DPRINTF("partition %d has start offset inside "
350 "partition %d: start[%d] %jd >= start[%d] "
351 "%jd <= end[%d] %jd\n",
352 e1->gpe_index, e2->gpe_index,
353 e2->gpe_index, (intmax_t)e2->gpe_start,
354 e1->gpe_index, (intmax_t)e1->gpe_start,
355 e2->gpe_index, (intmax_t)e2->gpe_end);
358 if (e1->gpe_end >= e2->gpe_start &&
359 e1->gpe_end <= e2->gpe_end) {
360 DPRINTF("partition %d has end offset inside "
361 "partition %d: start[%d] %jd >= end[%d] "
362 "%jd <= end[%d] %jd\n",
363 e1->gpe_index, e2->gpe_index,
364 e2->gpe_index, (intmax_t)e2->gpe_start,
365 e1->gpe_index, (intmax_t)e1->gpe_end,
366 e2->gpe_index, (intmax_t)e2->gpe_end);
369 if (e1->gpe_start < e2->gpe_start &&
370 e1->gpe_end > e2->gpe_end) {
371 DPRINTF("partition %d contains partition %d: "
372 "start[%d] %jd > start[%d] %jd, end[%d] "
373 "%jd < end[%d] %jd\n",
374 e1->gpe_index, e2->gpe_index,
375 e1->gpe_index, (intmax_t)e1->gpe_start,
376 e2->gpe_index, (intmax_t)e2->gpe_start,
377 e2->gpe_index, (intmax_t)e2->gpe_end,
378 e1->gpe_index, (intmax_t)e1->gpe_end);
384 printf("GEOM_PART: integrity check failed (%s, %s)\n",
385 pp->name, table->gpt_scheme->name);
386 if (check_integrity != 0)
388 table->gpt_corrupt = 1;
394 struct g_part_entry *
395 g_part_new_entry(struct g_part_table *table, int index, quad_t start,
398 struct g_part_entry *entry, *last;
401 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
402 if (entry->gpe_index == index)
404 if (entry->gpe_index > index) {
411 entry = g_malloc(table->gpt_scheme->gps_entrysz,
413 entry->gpe_index = index;
415 LIST_INSERT_HEAD(&table->gpt_entry, entry, gpe_entry);
417 LIST_INSERT_AFTER(last, entry, gpe_entry);
419 entry->gpe_offset = 0;
420 entry->gpe_start = start;
421 entry->gpe_end = end;
426 g_part_new_provider(struct g_geom *gp, struct g_part_table *table,
427 struct g_part_entry *entry)
429 struct g_consumer *cp;
430 struct g_provider *pp;
434 cp = LIST_FIRST(&gp->consumer);
437 offset = entry->gpe_start * pp->sectorsize;
438 if (entry->gpe_offset < offset)
439 entry->gpe_offset = offset;
441 if (entry->gpe_pp == NULL) {
442 sb = sbuf_new_auto();
443 G_PART_FULLNAME(table, entry, sb, gp->name);
445 entry->gpe_pp = g_new_providerf(gp, "%s", sbuf_data(sb));
447 entry->gpe_pp->flags |= G_PF_DIRECT_SEND | G_PF_DIRECT_RECEIVE;
448 entry->gpe_pp->private = entry; /* Close the circle. */
450 entry->gpe_pp->index = entry->gpe_index - 1; /* index is 1-based. */
451 entry->gpe_pp->mediasize = (entry->gpe_end - entry->gpe_start + 1) *
453 entry->gpe_pp->mediasize -= entry->gpe_offset - offset;
454 entry->gpe_pp->sectorsize = pp->sectorsize;
455 entry->gpe_pp->stripesize = pp->stripesize;
456 entry->gpe_pp->stripeoffset = pp->stripeoffset + entry->gpe_offset;
457 if (pp->stripesize > 0)
458 entry->gpe_pp->stripeoffset %= pp->stripesize;
459 entry->gpe_pp->flags |= pp->flags & G_PF_ACCEPT_UNMAPPED;
460 g_error_provider(entry->gpe_pp, 0);
463 static struct g_geom*
464 g_part_find_geom(const char *name)
467 LIST_FOREACH(gp, &g_part_class.geom, geom) {
468 if ((gp->flags & G_GEOM_WITHER) == 0 &&
469 strcmp(name, gp->name) == 0)
476 g_part_parm_geom(struct gctl_req *req, const char *name, struct g_geom **v)
481 gname = gctl_get_asciiparam(req, name);
484 if (strncmp(gname, _PATH_DEV, sizeof(_PATH_DEV) - 1) == 0)
485 gname += sizeof(_PATH_DEV) - 1;
486 gp = g_part_find_geom(gname);
488 gctl_error(req, "%d %s '%s'", EINVAL, name, gname);
496 g_part_parm_provider(struct gctl_req *req, const char *name,
497 struct g_provider **v)
499 struct g_provider *pp;
502 pname = gctl_get_asciiparam(req, name);
505 if (strncmp(pname, _PATH_DEV, sizeof(_PATH_DEV) - 1) == 0)
506 pname += sizeof(_PATH_DEV) - 1;
507 pp = g_provider_by_name(pname);
509 gctl_error(req, "%d %s '%s'", EINVAL, name, pname);
517 g_part_parm_quad(struct gctl_req *req, const char *name, quad_t *v)
523 p = gctl_get_asciiparam(req, name);
526 q = strtoq(p, &x, 0);
527 if (*x != '\0' || q < 0) {
528 gctl_error(req, "%d %s '%s'", EINVAL, name, p);
536 g_part_parm_scheme(struct gctl_req *req, const char *name,
537 struct g_part_scheme **v)
539 struct g_part_scheme *s;
542 p = gctl_get_asciiparam(req, name);
545 TAILQ_FOREACH(s, &g_part_schemes, scheme_list) {
546 if (s == &g_part_null_scheme)
548 if (!strcasecmp(s->name, p))
552 gctl_error(req, "%d %s '%s'", EINVAL, name, p);
560 g_part_parm_str(struct gctl_req *req, const char *name, const char **v)
564 p = gctl_get_asciiparam(req, name);
567 /* An empty label is always valid. */
568 if (strcmp(name, "label") != 0 && p[0] == '\0') {
569 gctl_error(req, "%d %s '%s'", EINVAL, name, p);
577 g_part_parm_intmax(struct gctl_req *req, const char *name, u_int *v)
582 p = gctl_get_param(req, name, &size);
585 if (size != sizeof(*p) || *p < 0 || *p > INT_MAX) {
586 gctl_error(req, "%d %s '%jd'", EINVAL, name, *p);
594 g_part_parm_uint32(struct gctl_req *req, const char *name, u_int *v)
599 p = gctl_get_param(req, name, &size);
602 if (size != sizeof(*p) || *p > INT_MAX) {
603 gctl_error(req, "%d %s '%u'", EINVAL, name, (unsigned int)*p);
611 g_part_parm_bootcode(struct gctl_req *req, const char *name, const void **v,
617 p = gctl_get_param(req, name, &size);
626 g_part_probe(struct g_geom *gp, struct g_consumer *cp, int depth)
628 struct g_part_scheme *iter, *scheme;
629 struct g_part_table *table;
633 scheme = (table != NULL) ? table->gpt_scheme : NULL;
634 pri = (scheme != NULL) ? G_PART_PROBE(table, cp) : INT_MIN;
637 if (pri > 0) { /* error */
642 TAILQ_FOREACH(iter, &g_part_schemes, scheme_list) {
643 if (iter == &g_part_null_scheme)
645 table = (void *)kobj_create((kobj_class_t)iter, M_GEOM,
648 table->gpt_scheme = iter;
649 table->gpt_depth = depth;
650 probe = G_PART_PROBE(table, cp);
651 if (probe <= 0 && probe > pri) {
654 if (gp->softc != NULL)
655 kobj_delete((kobj_t)gp->softc, M_GEOM);
660 kobj_delete((kobj_t)table, M_GEOM);
664 return ((scheme == NULL) ? ENXIO : 0);
668 * Control request functions.
672 g_part_ctl_add(struct gctl_req *req, struct g_part_parms *gpp)
675 struct g_provider *pp;
676 struct g_part_entry *delent, *last, *entry;
677 struct g_part_table *table;
684 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
687 pp = LIST_FIRST(&gp->consumer)->provider;
689 end = gpp->gpp_start + gpp->gpp_size - 1;
691 if (gpp->gpp_start < table->gpt_first ||
692 gpp->gpp_start > table->gpt_last) {
693 gctl_error(req, "%d start '%jd'", EINVAL,
694 (intmax_t)gpp->gpp_start);
697 if (end < gpp->gpp_start || end > table->gpt_last) {
698 gctl_error(req, "%d size '%jd'", EINVAL,
699 (intmax_t)gpp->gpp_size);
702 if (gpp->gpp_index > table->gpt_entries) {
703 gctl_error(req, "%d index '%d'", EINVAL, gpp->gpp_index);
707 delent = last = NULL;
708 index = (gpp->gpp_index > 0) ? gpp->gpp_index : 1;
709 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
710 if (entry->gpe_deleted) {
711 if (entry->gpe_index == index)
715 if (entry->gpe_index == index)
716 index = entry->gpe_index + 1;
717 if (entry->gpe_index < index)
719 if (entry->gpe_internal)
721 if (gpp->gpp_start >= entry->gpe_start &&
722 gpp->gpp_start <= entry->gpe_end) {
723 gctl_error(req, "%d start '%jd'", ENOSPC,
724 (intmax_t)gpp->gpp_start);
727 if (end >= entry->gpe_start && end <= entry->gpe_end) {
728 gctl_error(req, "%d end '%jd'", ENOSPC, (intmax_t)end);
731 if (gpp->gpp_start < entry->gpe_start && end > entry->gpe_end) {
732 gctl_error(req, "%d size '%jd'", ENOSPC,
733 (intmax_t)gpp->gpp_size);
737 if (gpp->gpp_index > 0 && index != gpp->gpp_index) {
738 gctl_error(req, "%d index '%d'", EEXIST, gpp->gpp_index);
741 if (index > table->gpt_entries) {
742 gctl_error(req, "%d index '%d'", ENOSPC, index);
746 entry = (delent == NULL) ? g_malloc(table->gpt_scheme->gps_entrysz,
747 M_WAITOK | M_ZERO) : delent;
748 entry->gpe_index = index;
749 entry->gpe_start = gpp->gpp_start;
750 entry->gpe_end = end;
751 error = G_PART_ADD(table, entry, gpp);
753 gctl_error(req, "%d", error);
758 if (delent == NULL) {
760 LIST_INSERT_HEAD(&table->gpt_entry, entry, gpe_entry);
762 LIST_INSERT_AFTER(last, entry, gpe_entry);
763 entry->gpe_created = 1;
765 entry->gpe_deleted = 0;
766 entry->gpe_modified = 1;
768 g_part_new_provider(gp, table, entry);
770 /* Provide feedback if so requested. */
771 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
772 sb = sbuf_new_auto();
773 G_PART_FULLNAME(table, entry, sb, gp->name);
774 if (pp->stripesize > 0 && entry->gpe_pp->stripeoffset != 0)
775 sbuf_printf(sb, " added, but partition is not "
776 "aligned on %u bytes\n", pp->stripesize);
778 sbuf_cat(sb, " added\n");
780 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
787 g_part_ctl_bootcode(struct gctl_req *req, struct g_part_parms *gpp)
790 struct g_part_table *table;
795 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
799 sz = table->gpt_scheme->gps_bootcodesz;
804 if (gpp->gpp_codesize > sz) {
809 error = G_PART_BOOTCODE(table, gpp);
813 /* Provide feedback if so requested. */
814 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
815 sb = sbuf_new_auto();
816 sbuf_printf(sb, "bootcode written to %s\n", gp->name);
818 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
824 gctl_error(req, "%d", error);
829 g_part_ctl_commit(struct gctl_req *req, struct g_part_parms *gpp)
831 struct g_consumer *cp;
833 struct g_provider *pp;
834 struct g_part_entry *entry, *tmp;
835 struct g_part_table *table;
840 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
844 if (!table->gpt_opened) {
845 gctl_error(req, "%d", EPERM);
851 cp = LIST_FIRST(&gp->consumer);
852 if ((table->gpt_smhead | table->gpt_smtail) != 0) {
854 buf = g_malloc(pp->sectorsize, M_WAITOK | M_ZERO);
855 while (table->gpt_smhead != 0) {
856 i = ffs(table->gpt_smhead) - 1;
857 error = g_write_data(cp, i * pp->sectorsize, buf,
863 table->gpt_smhead &= ~(1 << i);
865 while (table->gpt_smtail != 0) {
866 i = ffs(table->gpt_smtail) - 1;
867 error = g_write_data(cp, pp->mediasize - (i + 1) *
868 pp->sectorsize, buf, pp->sectorsize);
873 table->gpt_smtail &= ~(1 << i);
878 if (table->gpt_scheme == &g_part_null_scheme) {
880 g_access(cp, -1, -1, -1);
881 g_part_wither(gp, ENXIO);
885 error = G_PART_WRITE(table, cp);
889 LIST_FOREACH_SAFE(entry, &table->gpt_entry, gpe_entry, tmp) {
890 if (!entry->gpe_deleted) {
891 /* Notify consumers that provider might be changed. */
892 if (entry->gpe_modified && (
893 entry->gpe_pp->acw + entry->gpe_pp->ace) == 0)
894 g_media_changed(entry->gpe_pp, M_NOWAIT);
895 entry->gpe_created = 0;
896 entry->gpe_modified = 0;
899 LIST_REMOVE(entry, gpe_entry);
902 table->gpt_created = 0;
903 table->gpt_opened = 0;
906 g_access(cp, -1, -1, -1);
911 gctl_error(req, "%d", error);
916 g_part_ctl_create(struct gctl_req *req, struct g_part_parms *gpp)
918 struct g_consumer *cp;
920 struct g_provider *pp;
921 struct g_part_scheme *scheme;
922 struct g_part_table *null, *table;
926 pp = gpp->gpp_provider;
927 scheme = gpp->gpp_scheme;
928 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, pp->name));
931 /* Check that there isn't already a g_part geom on the provider. */
932 gp = g_part_find_geom(pp->name);
935 if (null->gpt_scheme != &g_part_null_scheme) {
936 gctl_error(req, "%d geom '%s'", EEXIST, pp->name);
942 if ((gpp->gpp_parms & G_PART_PARM_ENTRIES) &&
943 (gpp->gpp_entries < scheme->gps_minent ||
944 gpp->gpp_entries > scheme->gps_maxent)) {
945 gctl_error(req, "%d entries '%d'", EINVAL, gpp->gpp_entries);
950 gp = g_new_geomf(&g_part_class, "%s", pp->name);
951 gp->softc = kobj_create((kobj_class_t)gpp->gpp_scheme, M_GEOM,
955 table->gpt_scheme = gpp->gpp_scheme;
956 table->gpt_entries = (gpp->gpp_parms & G_PART_PARM_ENTRIES) ?
957 gpp->gpp_entries : scheme->gps_minent;
958 LIST_INIT(&table->gpt_entry);
960 cp = g_new_consumer(gp);
961 cp->flags |= G_CF_DIRECT_SEND | G_CF_DIRECT_RECEIVE;
962 error = g_attach(cp, pp);
964 error = g_access(cp, 1, 1, 1);
966 g_part_wither(gp, error);
967 gctl_error(req, "%d geom '%s'", error, pp->name);
970 table->gpt_opened = 1;
972 cp = LIST_FIRST(&gp->consumer);
973 table->gpt_opened = null->gpt_opened;
974 table->gpt_smhead = null->gpt_smhead;
975 table->gpt_smtail = null->gpt_smtail;
980 /* Make sure the provider has media. */
981 if (pp->mediasize == 0 || pp->sectorsize == 0) {
986 /* Make sure we can nest and if so, determine our depth. */
987 error = g_getattr("PART::isleaf", cp, &attr);
988 if (!error && attr) {
992 error = g_getattr("PART::depth", cp, &attr);
993 table->gpt_depth = (!error) ? attr + 1 : 0;
996 * Synthesize a disk geometry. Some partitioning schemes
997 * depend on it and since some file systems need it even
998 * when the partitition scheme doesn't, we do it here in
999 * scheme-independent code.
1001 g_part_geometry(table, cp, pp->mediasize / pp->sectorsize);
1003 error = G_PART_CREATE(table, gpp);
1009 table->gpt_created = 1;
1011 kobj_delete((kobj_t)null, M_GEOM);
1014 * Support automatic commit by filling in the gpp_geom
1017 gpp->gpp_parms |= G_PART_PARM_GEOM;
1020 /* Provide feedback if so requested. */
1021 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
1022 sb = sbuf_new_auto();
1023 sbuf_printf(sb, "%s created\n", gp->name);
1025 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
1033 g_access(cp, -1, -1, -1);
1034 g_part_wither(gp, error);
1036 kobj_delete((kobj_t)gp->softc, M_GEOM);
1039 gctl_error(req, "%d provider", error);
1044 g_part_ctl_delete(struct gctl_req *req, struct g_part_parms *gpp)
1047 struct g_provider *pp;
1048 struct g_part_entry *entry;
1049 struct g_part_table *table;
1053 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
1054 g_topology_assert();
1058 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
1059 if (entry->gpe_deleted || entry->gpe_internal)
1061 if (entry->gpe_index == gpp->gpp_index)
1064 if (entry == NULL) {
1065 gctl_error(req, "%d index '%d'", ENOENT, gpp->gpp_index);
1071 if (pp->acr > 0 || pp->acw > 0 || pp->ace > 0) {
1072 gctl_error(req, "%d", EBUSY);
1077 entry->gpe_pp = NULL;
1081 g_wither_provider(pp, ENXIO);
1083 /* Provide feedback if so requested. */
1084 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
1085 sb = sbuf_new_auto();
1086 G_PART_FULLNAME(table, entry, sb, gp->name);
1087 sbuf_cat(sb, " deleted\n");
1089 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
1093 if (entry->gpe_created) {
1094 LIST_REMOVE(entry, gpe_entry);
1097 entry->gpe_modified = 0;
1098 entry->gpe_deleted = 1;
1104 g_part_ctl_destroy(struct gctl_req *req, struct g_part_parms *gpp)
1106 struct g_consumer *cp;
1108 struct g_provider *pp;
1109 struct g_part_entry *entry, *tmp;
1110 struct g_part_table *null, *table;
1115 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
1116 g_topology_assert();
1119 /* Check for busy providers. */
1120 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
1121 if (entry->gpe_deleted || entry->gpe_internal)
1123 if (gpp->gpp_force) {
1127 if (pp->acr == 0 && pp->acw == 0 && pp->ace == 0)
1130 gctl_error(req, "%d", EBUSY);
1134 if (gpp->gpp_force) {
1135 /* Destroy all providers. */
1136 LIST_FOREACH_SAFE(entry, &table->gpt_entry, gpe_entry, tmp) {
1140 g_wither_provider(pp, ENXIO);
1142 LIST_REMOVE(entry, gpe_entry);
1147 error = G_PART_DESTROY(table, gpp);
1149 gctl_error(req, "%d", error);
1153 gp->softc = kobj_create((kobj_class_t)&g_part_null_scheme, M_GEOM,
1157 null->gpt_scheme = &g_part_null_scheme;
1158 LIST_INIT(&null->gpt_entry);
1160 cp = LIST_FIRST(&gp->consumer);
1162 null->gpt_last = pp->mediasize / pp->sectorsize - 1;
1164 null->gpt_depth = table->gpt_depth;
1165 null->gpt_opened = table->gpt_opened;
1166 null->gpt_smhead = table->gpt_smhead;
1167 null->gpt_smtail = table->gpt_smtail;
1169 while ((entry = LIST_FIRST(&table->gpt_entry)) != NULL) {
1170 LIST_REMOVE(entry, gpe_entry);
1173 kobj_delete((kobj_t)table, M_GEOM);
1175 /* Provide feedback if so requested. */
1176 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
1177 sb = sbuf_new_auto();
1178 sbuf_printf(sb, "%s destroyed\n", gp->name);
1180 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
1187 g_part_ctl_modify(struct gctl_req *req, struct g_part_parms *gpp)
1190 struct g_part_entry *entry;
1191 struct g_part_table *table;
1196 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
1197 g_topology_assert();
1201 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
1202 if (entry->gpe_deleted || entry->gpe_internal)
1204 if (entry->gpe_index == gpp->gpp_index)
1207 if (entry == NULL) {
1208 gctl_error(req, "%d index '%d'", ENOENT, gpp->gpp_index);
1212 error = G_PART_MODIFY(table, entry, gpp);
1214 gctl_error(req, "%d", error);
1218 if (!entry->gpe_created)
1219 entry->gpe_modified = 1;
1221 /* Provide feedback if so requested. */
1222 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
1223 sb = sbuf_new_auto();
1224 G_PART_FULLNAME(table, entry, sb, gp->name);
1225 sbuf_cat(sb, " modified\n");
1227 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
1234 g_part_ctl_move(struct gctl_req *req, struct g_part_parms *gpp)
1236 gctl_error(req, "%d verb 'move'", ENOSYS);
1241 g_part_ctl_recover(struct gctl_req *req, struct g_part_parms *gpp)
1243 struct g_part_table *table;
1246 int error, recovered;
1249 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
1250 g_topology_assert();
1252 error = recovered = 0;
1254 if (table->gpt_corrupt) {
1255 error = G_PART_RECOVER(table);
1257 error = g_part_check_integrity(table,
1258 LIST_FIRST(&gp->consumer));
1260 gctl_error(req, "%d recovering '%s' failed",
1266 /* Provide feedback if so requested. */
1267 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
1268 sb = sbuf_new_auto();
1270 sbuf_printf(sb, "%s recovered\n", gp->name);
1272 sbuf_printf(sb, "%s recovering is not needed\n",
1275 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
1282 g_part_ctl_resize(struct gctl_req *req, struct g_part_parms *gpp)
1285 struct g_provider *pp;
1286 struct g_part_entry *pe, *entry;
1287 struct g_part_table *table;
1294 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
1295 g_topology_assert();
1298 /* check gpp_index */
1299 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
1300 if (entry->gpe_deleted || entry->gpe_internal)
1302 if (entry->gpe_index == gpp->gpp_index)
1305 if (entry == NULL) {
1306 gctl_error(req, "%d index '%d'", ENOENT, gpp->gpp_index);
1310 /* check gpp_size */
1311 end = entry->gpe_start + gpp->gpp_size - 1;
1312 if (gpp->gpp_size < 1 || end > table->gpt_last) {
1313 gctl_error(req, "%d size '%jd'", EINVAL,
1314 (intmax_t)gpp->gpp_size);
1318 LIST_FOREACH(pe, &table->gpt_entry, gpe_entry) {
1319 if (pe->gpe_deleted || pe->gpe_internal || pe == entry)
1321 if (end >= pe->gpe_start && end <= pe->gpe_end) {
1322 gctl_error(req, "%d end '%jd'", ENOSPC,
1326 if (entry->gpe_start < pe->gpe_start && end > pe->gpe_end) {
1327 gctl_error(req, "%d size '%jd'", ENOSPC,
1328 (intmax_t)gpp->gpp_size);
1334 if ((g_debugflags & 16) == 0 &&
1335 (pp->acr > 0 || pp->acw > 0 || pp->ace > 0)) {
1336 if (entry->gpe_end - entry->gpe_start + 1 > gpp->gpp_size) {
1337 /* Deny shrinking of an opened partition. */
1338 gctl_error(req, "%d", EBUSY);
1343 error = G_PART_RESIZE(table, entry, gpp);
1345 gctl_error(req, "%d%s", error, error != EBUSY ? "":
1346 " resizing will lead to unexpected shrinking"
1347 " due to alignment");
1351 if (!entry->gpe_created)
1352 entry->gpe_modified = 1;
1354 /* update mediasize of changed provider */
1355 mediasize = (entry->gpe_end - entry->gpe_start + 1) *
1357 g_resize_provider(pp, mediasize);
1359 /* Provide feedback if so requested. */
1360 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
1361 sb = sbuf_new_auto();
1362 G_PART_FULLNAME(table, entry, sb, gp->name);
1363 sbuf_cat(sb, " resized\n");
1365 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
1372 g_part_ctl_setunset(struct gctl_req *req, struct g_part_parms *gpp,
1376 struct g_part_entry *entry;
1377 struct g_part_table *table;
1382 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
1383 g_topology_assert();
1387 if (gpp->gpp_parms & G_PART_PARM_INDEX) {
1388 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
1389 if (entry->gpe_deleted || entry->gpe_internal)
1391 if (entry->gpe_index == gpp->gpp_index)
1394 if (entry == NULL) {
1395 gctl_error(req, "%d index '%d'", ENOENT,
1402 error = G_PART_SETUNSET(table, entry, gpp->gpp_attrib, set);
1404 gctl_error(req, "%d attrib '%s'", error, gpp->gpp_attrib);
1408 /* Provide feedback if so requested. */
1409 if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
1410 sb = sbuf_new_auto();
1411 sbuf_printf(sb, "%s %sset on ", gpp->gpp_attrib,
1414 G_PART_FULLNAME(table, entry, sb, gp->name);
1416 sbuf_cat(sb, gp->name);
1419 gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
1426 g_part_ctl_undo(struct gctl_req *req, struct g_part_parms *gpp)
1428 struct g_consumer *cp;
1429 struct g_provider *pp;
1431 struct g_part_entry *entry, *tmp;
1432 struct g_part_table *table;
1436 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
1437 g_topology_assert();
1440 if (!table->gpt_opened) {
1441 gctl_error(req, "%d", EPERM);
1445 cp = LIST_FIRST(&gp->consumer);
1446 LIST_FOREACH_SAFE(entry, &table->gpt_entry, gpe_entry, tmp) {
1447 entry->gpe_modified = 0;
1448 if (entry->gpe_created) {
1452 entry->gpe_pp = NULL;
1453 g_wither_provider(pp, ENXIO);
1455 entry->gpe_deleted = 1;
1457 if (entry->gpe_deleted) {
1458 LIST_REMOVE(entry, gpe_entry);
1463 g_topology_unlock();
1465 reprobe = (table->gpt_scheme == &g_part_null_scheme ||
1466 table->gpt_created) ? 1 : 0;
1469 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
1470 if (entry->gpe_internal)
1475 while ((entry = LIST_FIRST(&table->gpt_entry)) != NULL) {
1476 LIST_REMOVE(entry, gpe_entry);
1479 error = g_part_probe(gp, cp, table->gpt_depth);
1482 g_access(cp, -1, -1, -1);
1483 g_part_wither(gp, error);
1489 * Synthesize a disk geometry. Some partitioning schemes
1490 * depend on it and since some file systems need it even
1491 * when the partitition scheme doesn't, we do it here in
1492 * scheme-independent code.
1495 g_part_geometry(table, cp, pp->mediasize / pp->sectorsize);
1498 error = G_PART_READ(table, cp);
1501 error = g_part_check_integrity(table, cp);
1506 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
1507 if (!entry->gpe_internal)
1508 g_part_new_provider(gp, table, entry);
1511 table->gpt_opened = 0;
1512 g_access(cp, -1, -1, -1);
1517 gctl_error(req, "%d", error);
1522 g_part_wither(struct g_geom *gp, int error)
1524 struct g_part_entry *entry;
1525 struct g_part_table *table;
1528 if (table != NULL) {
1529 G_PART_DESTROY(table, NULL);
1530 while ((entry = LIST_FIRST(&table->gpt_entry)) != NULL) {
1531 LIST_REMOVE(entry, gpe_entry);
1534 if (gp->softc != NULL) {
1535 kobj_delete((kobj_t)gp->softc, M_GEOM);
1539 g_wither_geom(gp, error);
1547 g_part_ctlreq(struct gctl_req *req, struct g_class *mp, const char *verb)
1549 struct g_part_parms gpp;
1550 struct g_part_table *table;
1551 struct gctl_req_arg *ap;
1552 enum g_part_ctl ctlreq;
1553 unsigned int i, mparms, oparms, parm;
1554 int auto_commit, close_on_error;
1555 int error, modifies;
1557 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, verb));
1558 g_topology_assert();
1560 ctlreq = G_PART_CTL_NONE;
1563 oparms = G_PART_PARM_FLAGS | G_PART_PARM_OUTPUT | G_PART_PARM_VERSION;
1566 if (!strcmp(verb, "add")) {
1567 ctlreq = G_PART_CTL_ADD;
1568 mparms |= G_PART_PARM_GEOM | G_PART_PARM_SIZE |
1569 G_PART_PARM_START | G_PART_PARM_TYPE;
1570 oparms |= G_PART_PARM_INDEX | G_PART_PARM_LABEL;
1574 if (!strcmp(verb, "bootcode")) {
1575 ctlreq = G_PART_CTL_BOOTCODE;
1576 mparms |= G_PART_PARM_GEOM | G_PART_PARM_BOOTCODE;
1580 if (!strcmp(verb, "commit")) {
1581 ctlreq = G_PART_CTL_COMMIT;
1582 mparms |= G_PART_PARM_GEOM;
1584 } else if (!strcmp(verb, "create")) {
1585 ctlreq = G_PART_CTL_CREATE;
1586 mparms |= G_PART_PARM_PROVIDER | G_PART_PARM_SCHEME;
1587 oparms |= G_PART_PARM_ENTRIES;
1591 if (!strcmp(verb, "delete")) {
1592 ctlreq = G_PART_CTL_DELETE;
1593 mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX;
1594 } else if (!strcmp(verb, "destroy")) {
1595 ctlreq = G_PART_CTL_DESTROY;
1596 mparms |= G_PART_PARM_GEOM;
1597 oparms |= G_PART_PARM_FORCE;
1601 if (!strcmp(verb, "modify")) {
1602 ctlreq = G_PART_CTL_MODIFY;
1603 mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX;
1604 oparms |= G_PART_PARM_LABEL | G_PART_PARM_TYPE;
1605 } else if (!strcmp(verb, "move")) {
1606 ctlreq = G_PART_CTL_MOVE;
1607 mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX;
1611 if (!strcmp(verb, "recover")) {
1612 ctlreq = G_PART_CTL_RECOVER;
1613 mparms |= G_PART_PARM_GEOM;
1614 } else if (!strcmp(verb, "resize")) {
1615 ctlreq = G_PART_CTL_RESIZE;
1616 mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX |
1621 if (!strcmp(verb, "set")) {
1622 ctlreq = G_PART_CTL_SET;
1623 mparms |= G_PART_PARM_ATTRIB | G_PART_PARM_GEOM;
1624 oparms |= G_PART_PARM_INDEX;
1628 if (!strcmp(verb, "undo")) {
1629 ctlreq = G_PART_CTL_UNDO;
1630 mparms |= G_PART_PARM_GEOM;
1632 } else if (!strcmp(verb, "unset")) {
1633 ctlreq = G_PART_CTL_UNSET;
1634 mparms |= G_PART_PARM_ATTRIB | G_PART_PARM_GEOM;
1635 oparms |= G_PART_PARM_INDEX;
1639 if (ctlreq == G_PART_CTL_NONE) {
1640 gctl_error(req, "%d verb '%s'", EINVAL, verb);
1644 bzero(&gpp, sizeof(gpp));
1645 for (i = 0; i < req->narg; i++) {
1648 switch (ap->name[0]) {
1650 if (!strcmp(ap->name, "arg0")) {
1652 (G_PART_PARM_GEOM | G_PART_PARM_PROVIDER);
1654 if (!strcmp(ap->name, "attrib"))
1655 parm = G_PART_PARM_ATTRIB;
1658 if (!strcmp(ap->name, "bootcode"))
1659 parm = G_PART_PARM_BOOTCODE;
1662 if (!strcmp(ap->name, "class"))
1666 if (!strcmp(ap->name, "entries"))
1667 parm = G_PART_PARM_ENTRIES;
1670 if (!strcmp(ap->name, "flags"))
1671 parm = G_PART_PARM_FLAGS;
1672 else if (!strcmp(ap->name, "force"))
1673 parm = G_PART_PARM_FORCE;
1676 if (!strcmp(ap->name, "index"))
1677 parm = G_PART_PARM_INDEX;
1680 if (!strcmp(ap->name, "label"))
1681 parm = G_PART_PARM_LABEL;
1684 if (!strcmp(ap->name, "output"))
1685 parm = G_PART_PARM_OUTPUT;
1688 if (!strcmp(ap->name, "scheme"))
1689 parm = G_PART_PARM_SCHEME;
1690 else if (!strcmp(ap->name, "size"))
1691 parm = G_PART_PARM_SIZE;
1692 else if (!strcmp(ap->name, "start"))
1693 parm = G_PART_PARM_START;
1696 if (!strcmp(ap->name, "type"))
1697 parm = G_PART_PARM_TYPE;
1700 if (!strcmp(ap->name, "verb"))
1702 else if (!strcmp(ap->name, "version"))
1703 parm = G_PART_PARM_VERSION;
1706 if ((parm & (mparms | oparms)) == 0) {
1707 gctl_error(req, "%d param '%s'", EINVAL, ap->name);
1711 case G_PART_PARM_ATTRIB:
1712 error = g_part_parm_str(req, ap->name,
1715 case G_PART_PARM_BOOTCODE:
1716 error = g_part_parm_bootcode(req, ap->name,
1717 &gpp.gpp_codeptr, &gpp.gpp_codesize);
1719 case G_PART_PARM_ENTRIES:
1720 error = g_part_parm_intmax(req, ap->name,
1723 case G_PART_PARM_FLAGS:
1724 error = g_part_parm_str(req, ap->name, &gpp.gpp_flags);
1726 case G_PART_PARM_FORCE:
1727 error = g_part_parm_uint32(req, ap->name,
1730 case G_PART_PARM_GEOM:
1731 error = g_part_parm_geom(req, ap->name, &gpp.gpp_geom);
1733 case G_PART_PARM_INDEX:
1734 error = g_part_parm_intmax(req, ap->name,
1737 case G_PART_PARM_LABEL:
1738 error = g_part_parm_str(req, ap->name, &gpp.gpp_label);
1740 case G_PART_PARM_OUTPUT:
1741 error = 0; /* Write-only parameter */
1743 case G_PART_PARM_PROVIDER:
1744 error = g_part_parm_provider(req, ap->name,
1747 case G_PART_PARM_SCHEME:
1748 error = g_part_parm_scheme(req, ap->name,
1751 case G_PART_PARM_SIZE:
1752 error = g_part_parm_quad(req, ap->name, &gpp.gpp_size);
1754 case G_PART_PARM_START:
1755 error = g_part_parm_quad(req, ap->name,
1758 case G_PART_PARM_TYPE:
1759 error = g_part_parm_str(req, ap->name, &gpp.gpp_type);
1761 case G_PART_PARM_VERSION:
1762 error = g_part_parm_uint32(req, ap->name,
1767 gctl_error(req, "%d %s", error, ap->name);
1771 if (error == ENOATTR) {
1772 gctl_error(req, "%d param '%s'", error,
1777 gpp.gpp_parms |= parm;
1779 if ((gpp.gpp_parms & mparms) != mparms) {
1780 parm = mparms - (gpp.gpp_parms & mparms);
1781 gctl_error(req, "%d param '%x'", ENOATTR, parm);
1785 /* Obtain permissions if possible/necessary. */
1788 if (modifies && (gpp.gpp_parms & G_PART_PARM_GEOM)) {
1789 table = gpp.gpp_geom->softc;
1790 if (table != NULL && table->gpt_corrupt &&
1791 ctlreq != G_PART_CTL_DESTROY &&
1792 ctlreq != G_PART_CTL_RECOVER) {
1793 gctl_error(req, "%d table '%s' is corrupt",
1794 EPERM, gpp.gpp_geom->name);
1797 if (table != NULL && !table->gpt_opened) {
1798 error = g_access(LIST_FIRST(&gpp.gpp_geom->consumer),
1801 gctl_error(req, "%d geom '%s'", error,
1802 gpp.gpp_geom->name);
1805 table->gpt_opened = 1;
1810 /* Allow the scheme to check or modify the parameters. */
1811 if (table != NULL) {
1812 error = G_PART_PRECHECK(table, ctlreq, &gpp);
1814 gctl_error(req, "%d pre-check failed", error);
1818 error = EDOOFUS; /* Prevent bogus uninit. warning. */
1821 case G_PART_CTL_NONE:
1822 panic("%s", __func__);
1823 case G_PART_CTL_ADD:
1824 error = g_part_ctl_add(req, &gpp);
1826 case G_PART_CTL_BOOTCODE:
1827 error = g_part_ctl_bootcode(req, &gpp);
1829 case G_PART_CTL_COMMIT:
1830 error = g_part_ctl_commit(req, &gpp);
1832 case G_PART_CTL_CREATE:
1833 error = g_part_ctl_create(req, &gpp);
1835 case G_PART_CTL_DELETE:
1836 error = g_part_ctl_delete(req, &gpp);
1838 case G_PART_CTL_DESTROY:
1839 error = g_part_ctl_destroy(req, &gpp);
1841 case G_PART_CTL_MODIFY:
1842 error = g_part_ctl_modify(req, &gpp);
1844 case G_PART_CTL_MOVE:
1845 error = g_part_ctl_move(req, &gpp);
1847 case G_PART_CTL_RECOVER:
1848 error = g_part_ctl_recover(req, &gpp);
1850 case G_PART_CTL_RESIZE:
1851 error = g_part_ctl_resize(req, &gpp);
1853 case G_PART_CTL_SET:
1854 error = g_part_ctl_setunset(req, &gpp, 1);
1856 case G_PART_CTL_UNDO:
1857 error = g_part_ctl_undo(req, &gpp);
1859 case G_PART_CTL_UNSET:
1860 error = g_part_ctl_setunset(req, &gpp, 0);
1864 /* Implement automatic commit. */
1866 auto_commit = (modifies &&
1867 (gpp.gpp_parms & G_PART_PARM_FLAGS) &&
1868 strchr(gpp.gpp_flags, 'C') != NULL) ? 1 : 0;
1870 KASSERT(gpp.gpp_parms & G_PART_PARM_GEOM, ("%s",
1872 error = g_part_ctl_commit(req, &gpp);
1877 if (error && close_on_error) {
1878 g_access(LIST_FIRST(&gpp.gpp_geom->consumer), -1, -1, -1);
1879 table->gpt_opened = 0;
1884 g_part_destroy_geom(struct gctl_req *req, struct g_class *mp,
1888 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, gp->name));
1889 g_topology_assert();
1891 g_part_wither(gp, EINVAL);
1895 static struct g_geom *
1896 g_part_taste(struct g_class *mp, struct g_provider *pp, int flags __unused)
1898 struct g_consumer *cp;
1900 struct g_part_entry *entry;
1901 struct g_part_table *table;
1902 struct root_hold_token *rht;
1906 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, pp->name));
1907 g_topology_assert();
1909 /* Skip providers that are already open for writing. */
1914 * Create a GEOM with consumer and hook it up to the provider.
1915 * With that we become part of the topology. Optain read access
1918 gp = g_new_geomf(mp, "%s", pp->name);
1919 cp = g_new_consumer(gp);
1920 cp->flags |= G_CF_DIRECT_SEND | G_CF_DIRECT_RECEIVE;
1921 error = g_attach(cp, pp);
1923 error = g_access(cp, 1, 0, 0);
1927 g_destroy_consumer(cp);
1932 rht = root_mount_hold(mp->name);
1933 g_topology_unlock();
1936 * Short-circuit the whole probing galore when there's no
1939 if (pp->mediasize == 0 || pp->sectorsize == 0) {
1944 /* Make sure we can nest and if so, determine our depth. */
1945 error = g_getattr("PART::isleaf", cp, &attr);
1946 if (!error && attr) {
1950 error = g_getattr("PART::depth", cp, &attr);
1951 depth = (!error) ? attr + 1 : 0;
1953 error = g_part_probe(gp, cp, depth);
1960 * Synthesize a disk geometry. Some partitioning schemes
1961 * depend on it and since some file systems need it even
1962 * when the partitition scheme doesn't, we do it here in
1963 * scheme-independent code.
1965 g_part_geometry(table, cp, pp->mediasize / pp->sectorsize);
1967 error = G_PART_READ(table, cp);
1970 error = g_part_check_integrity(table, cp);
1975 LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
1976 if (!entry->gpe_internal)
1977 g_part_new_provider(gp, table, entry);
1980 root_mount_rel(rht);
1981 g_access(cp, -1, 0, 0);
1986 root_mount_rel(rht);
1987 g_access(cp, -1, 0, 0);
1989 g_destroy_consumer(cp);
1999 g_part_access(struct g_provider *pp, int dr, int dw, int de)
2001 struct g_consumer *cp;
2003 G_PART_TRACE((G_T_ACCESS, "%s(%s,%d,%d,%d)", __func__, pp->name, dr,
2006 cp = LIST_FIRST(&pp->geom->consumer);
2008 /* We always gain write-exclusive access. */
2009 return (g_access(cp, dr, dw, dw + de));
2013 g_part_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp,
2014 struct g_consumer *cp, struct g_provider *pp)
2017 struct g_part_entry *entry;
2018 struct g_part_table *table;
2020 KASSERT(sb != NULL && gp != NULL, ("%s", __func__));
2023 if (indent == NULL) {
2024 KASSERT(cp == NULL && pp != NULL, ("%s", __func__));
2025 entry = pp->private;
2028 sbuf_printf(sb, " i %u o %ju ty %s", entry->gpe_index,
2029 (uintmax_t)entry->gpe_offset,
2030 G_PART_TYPE(table, entry, buf, sizeof(buf)));
2032 * libdisk compatibility quirk - the scheme dumps the
2033 * slicer name and partition type in a way that is
2034 * compatible with libdisk. When libdisk is not used
2035 * anymore, this should go away.
2037 G_PART_DUMPCONF(table, entry, sb, indent);
2038 } else if (cp != NULL) { /* Consumer configuration. */
2039 KASSERT(pp == NULL, ("%s", __func__));
2041 } else if (pp != NULL) { /* Provider configuration. */
2042 entry = pp->private;
2045 sbuf_printf(sb, "%s<start>%ju</start>\n", indent,
2046 (uintmax_t)entry->gpe_start);
2047 sbuf_printf(sb, "%s<end>%ju</end>\n", indent,
2048 (uintmax_t)entry->gpe_end);
2049 sbuf_printf(sb, "%s<index>%u</index>\n", indent,
2051 sbuf_printf(sb, "%s<type>%s</type>\n", indent,
2052 G_PART_TYPE(table, entry, buf, sizeof(buf)));
2053 sbuf_printf(sb, "%s<offset>%ju</offset>\n", indent,
2054 (uintmax_t)entry->gpe_offset);
2055 sbuf_printf(sb, "%s<length>%ju</length>\n", indent,
2056 (uintmax_t)pp->mediasize);
2057 G_PART_DUMPCONF(table, entry, sb, indent);
2058 } else { /* Geom configuration. */
2059 sbuf_printf(sb, "%s<scheme>%s</scheme>\n", indent,
2060 table->gpt_scheme->name);
2061 sbuf_printf(sb, "%s<entries>%u</entries>\n", indent,
2062 table->gpt_entries);
2063 sbuf_printf(sb, "%s<first>%ju</first>\n", indent,
2064 (uintmax_t)table->gpt_first);
2065 sbuf_printf(sb, "%s<last>%ju</last>\n", indent,
2066 (uintmax_t)table->gpt_last);
2067 sbuf_printf(sb, "%s<fwsectors>%u</fwsectors>\n", indent,
2068 table->gpt_sectors);
2069 sbuf_printf(sb, "%s<fwheads>%u</fwheads>\n", indent,
2071 sbuf_printf(sb, "%s<state>%s</state>\n", indent,
2072 table->gpt_corrupt ? "CORRUPT": "OK");
2073 sbuf_printf(sb, "%s<modified>%s</modified>\n", indent,
2074 table->gpt_opened ? "true": "false");
2075 G_PART_DUMPCONF(table, NULL, sb, indent);
2080 * This start routine is only called for non-trivial requests, all the
2081 * trivial ones are handled autonomously by the slice code.
2082 * For requests we handle here, we must call the g_io_deliver() on the
2083 * bio, and return non-zero to indicate to the slice code that we did so.
2084 * This code executes in the "DOWN" I/O path, this means:
2086 * * Don't grab the topology lock.
2087 * * Don't call biowait, g_getattr(), g_setattr() or g_read_data()
2090 g_part_ioctl(struct g_provider *pp, u_long cmd, void *data, int fflag, struct thread *td)
2092 struct g_part_table *table;
2094 table = pp->geom->softc;
2095 return G_PART_IOCTL(table, pp, cmd, data, fflag, td);
2099 g_part_resize(struct g_consumer *cp)
2101 struct g_part_table *table;
2103 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, cp->provider->name));
2104 g_topology_assert();
2106 if (auto_resize == 0)
2109 table = cp->geom->softc;
2110 if (table->gpt_opened == 0) {
2111 if (g_access(cp, 1, 1, 1) != 0)
2113 table->gpt_opened = 1;
2115 if (G_PART_RESIZE(table, NULL, NULL) == 0)
2116 printf("GEOM_PART: %s was automatically resized.\n"
2117 " Use `gpart commit %s` to save changes or "
2118 "`gpart undo %s` to revert them.\n", cp->geom->name,
2119 cp->geom->name, cp->geom->name);
2120 if (g_part_check_integrity(table, cp) != 0) {
2121 g_access(cp, -1, -1, -1);
2122 table->gpt_opened = 0;
2123 g_part_wither(table->gpt_gp, ENXIO);
2128 g_part_orphan(struct g_consumer *cp)
2130 struct g_provider *pp;
2131 struct g_part_table *table;
2134 KASSERT(pp != NULL, ("%s", __func__));
2135 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, pp->name));
2136 g_topology_assert();
2138 KASSERT(pp->error != 0, ("%s", __func__));
2139 table = cp->geom->softc;
2140 if (table != NULL && table->gpt_opened)
2141 g_access(cp, -1, -1, -1);
2142 g_part_wither(cp->geom, pp->error);
2146 g_part_spoiled(struct g_consumer *cp)
2149 G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, cp->provider->name));
2150 g_topology_assert();
2152 cp->flags |= G_CF_ORPHAN;
2153 g_part_wither(cp->geom, ENXIO);
2157 g_part_start(struct bio *bp)
2160 struct g_consumer *cp;
2162 struct g_part_entry *entry;
2163 struct g_part_table *table;
2164 struct g_kerneldump *gkd;
2165 struct g_provider *pp;
2168 biotrack(bp, __func__);
2173 cp = LIST_FIRST(&gp->consumer);
2175 G_PART_TRACE((G_T_BIO, "%s: cmd=%d, provider=%s", __func__, bp->bio_cmd,
2178 entry = pp->private;
2179 if (entry == NULL) {
2180 g_io_deliver(bp, ENXIO);
2184 switch(bp->bio_cmd) {
2188 if (bp->bio_offset >= pp->mediasize) {
2189 g_io_deliver(bp, EIO);
2192 bp2 = g_clone_bio(bp);
2194 g_io_deliver(bp, ENOMEM);
2197 if (bp2->bio_offset + bp2->bio_length > pp->mediasize)
2198 bp2->bio_length = pp->mediasize - bp2->bio_offset;
2199 bp2->bio_done = g_std_done;
2200 bp2->bio_offset += entry->gpe_offset;
2201 g_io_request(bp2, cp);
2206 if (g_handleattr_int(bp, "GEOM::fwheads", table->gpt_heads))
2208 if (g_handleattr_int(bp, "GEOM::fwsectors", table->gpt_sectors))
2210 if (g_handleattr_int(bp, "PART::isleaf", table->gpt_isleaf))
2212 if (g_handleattr_int(bp, "PART::depth", table->gpt_depth))
2214 if (g_handleattr_str(bp, "PART::scheme",
2215 table->gpt_scheme->name))
2217 if (g_handleattr_str(bp, "PART::type",
2218 G_PART_TYPE(table, entry, buf, sizeof(buf))))
2220 if (!strcmp("GEOM::kerneldump", bp->bio_attribute)) {
2222 * Check that the partition is suitable for kernel
2223 * dumps. Typically only swap partitions should be
2224 * used. If the request comes from the nested scheme
2225 * we allow dumping there as well.
2227 if ((bp->bio_from == NULL ||
2228 bp->bio_from->geom->class != &g_part_class) &&
2229 G_PART_DUMPTO(table, entry) == 0) {
2230 g_io_deliver(bp, ENODEV);
2231 printf("GEOM_PART: Partition '%s' not suitable"
2232 " for kernel dumps (wrong type?)\n",
2236 gkd = (struct g_kerneldump *)bp->bio_data;
2237 if (gkd->offset >= pp->mediasize) {
2238 g_io_deliver(bp, EIO);
2241 if (gkd->offset + gkd->length > pp->mediasize)
2242 gkd->length = pp->mediasize - gkd->offset;
2243 gkd->offset += entry->gpe_offset;
2247 g_io_deliver(bp, EOPNOTSUPP);
2251 bp2 = g_clone_bio(bp);
2253 g_io_deliver(bp, ENOMEM);
2256 bp2->bio_done = g_std_done;
2257 g_io_request(bp2, cp);
2261 g_part_init(struct g_class *mp)
2264 TAILQ_INSERT_HEAD(&g_part_schemes, &g_part_null_scheme, scheme_list);
2268 g_part_fini(struct g_class *mp)
2271 TAILQ_REMOVE(&g_part_schemes, &g_part_null_scheme, scheme_list);
2275 g_part_unload_event(void *arg, int flag)
2277 struct g_consumer *cp;
2279 struct g_provider *pp;
2280 struct g_part_scheme *scheme;
2281 struct g_part_table *table;
2285 if (flag == EV_CANCEL)
2290 scheme = (void *)(*xchg);
2292 g_topology_assert();
2294 LIST_FOREACH(gp, &g_part_class.geom, geom) {
2296 if (table->gpt_scheme != scheme)
2300 LIST_FOREACH(pp, &gp->provider, provider)
2301 acc += pp->acr + pp->acw + pp->ace;
2302 LIST_FOREACH(cp, &gp->consumer, consumer)
2303 acc += cp->acr + cp->acw + cp->ace;
2306 g_part_wither(gp, ENOSYS);
2312 TAILQ_REMOVE(&g_part_schemes, scheme, scheme_list);
2318 g_part_modevent(module_t mod, int type, struct g_part_scheme *scheme)
2320 struct g_part_scheme *iter;
2327 TAILQ_FOREACH(iter, &g_part_schemes, scheme_list) {
2328 if (scheme == iter) {
2329 printf("GEOM_PART: scheme %s is already "
2330 "registered!\n", scheme->name);
2335 TAILQ_INSERT_TAIL(&g_part_schemes, scheme,
2337 g_retaste(&g_part_class);
2341 arg = (uintptr_t)scheme;
2342 error = g_waitfor_event(g_part_unload_event, &arg, M_WAITOK,