]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - usr.sbin/bhyve/block_if.c
Merge bmake-20150505 improve detection of malformed conditionals.
[FreeBSD/FreeBSD.git] / usr.sbin / bhyve / block_if.c
1 /*-
2  * Copyright (c) 2013  Peter Grehan <grehan@freebsd.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  * $FreeBSD$
27  */
28
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31
32 #include <sys/param.h>
33 #include <sys/queue.h>
34 #include <sys/errno.h>
35 #include <sys/stat.h>
36 #include <sys/ioctl.h>
37 #include <sys/disk.h>
38
39 #include <assert.h>
40 #include <fcntl.h>
41 #include <stdio.h>
42 #include <stdlib.h>
43 #include <string.h>
44 #include <pthread.h>
45 #include <pthread_np.h>
46 #include <signal.h>
47 #include <unistd.h>
48
49 #include <machine/atomic.h>
50
51 #include "bhyverun.h"
52 #include "mevent.h"
53 #include "block_if.h"
54
55 #define BLOCKIF_SIG     0xb109b109
56
57 #define BLOCKIF_NUMTHR  8
58 #define BLOCKIF_MAXREQ  (64 + BLOCKIF_NUMTHR)
59
60 enum blockop {
61         BOP_READ,
62         BOP_WRITE,
63         BOP_FLUSH,
64         BOP_DELETE
65 };
66
67 enum blockstat {
68         BST_FREE,
69         BST_BLOCK,
70         BST_PEND,
71         BST_BUSY,
72         BST_DONE
73 };
74
75 struct blockif_elem {
76         TAILQ_ENTRY(blockif_elem) be_link;
77         struct blockif_req  *be_req;
78         enum blockop         be_op;
79         enum blockstat       be_status;
80         pthread_t            be_tid;
81         off_t                be_block;
82 };
83
84 struct blockif_ctxt {
85         int                     bc_magic;
86         int                     bc_fd;
87         int                     bc_ischr;
88         int                     bc_isgeom;
89         int                     bc_candelete;
90         int                     bc_rdonly;
91         off_t                   bc_size;
92         int                     bc_sectsz;
93         int                     bc_psectsz;
94         int                     bc_psectoff;
95         int                     bc_closing;
96         pthread_t               bc_btid[BLOCKIF_NUMTHR];
97         pthread_mutex_t         bc_mtx;
98         pthread_cond_t          bc_cond;
99
100         /* Request elements and free/pending/busy queues */
101         TAILQ_HEAD(, blockif_elem) bc_freeq;       
102         TAILQ_HEAD(, blockif_elem) bc_pendq;
103         TAILQ_HEAD(, blockif_elem) bc_busyq;
104         struct blockif_elem     bc_reqs[BLOCKIF_MAXREQ];
105 };
106
107 static pthread_once_t blockif_once = PTHREAD_ONCE_INIT;
108
109 struct blockif_sig_elem {
110         pthread_mutex_t                 bse_mtx;
111         pthread_cond_t                  bse_cond;
112         int                             bse_pending;
113         struct blockif_sig_elem         *bse_next;
114 };
115
116 static struct blockif_sig_elem *blockif_bse_head;
117
118 static int
119 blockif_enqueue(struct blockif_ctxt *bc, struct blockif_req *breq,
120                 enum blockop op)
121 {
122         struct blockif_elem *be, *tbe;
123         off_t off;
124         int i;
125
126         be = TAILQ_FIRST(&bc->bc_freeq);
127         assert(be != NULL);
128         assert(be->be_status == BST_FREE);
129         TAILQ_REMOVE(&bc->bc_freeq, be, be_link);
130         be->be_req = breq;
131         be->be_op = op;
132         switch (op) {
133         case BOP_READ:
134         case BOP_WRITE:
135         case BOP_DELETE:
136                 off = breq->br_offset;
137                 for (i = 0; i < breq->br_iovcnt; i++)
138                         off += breq->br_iov[i].iov_len;
139                 break;
140         default:
141                 off = OFF_MAX;
142         }
143         be->be_block = off;
144         TAILQ_FOREACH(tbe, &bc->bc_pendq, be_link) {
145                 if (tbe->be_block == breq->br_offset)
146                         break;
147         }
148         if (tbe == NULL) {
149                 TAILQ_FOREACH(tbe, &bc->bc_busyq, be_link) {
150                         if (tbe->be_block == breq->br_offset)
151                                 break;
152                 }
153         }
154         if (tbe == NULL)
155                 be->be_status = BST_PEND;
156         else
157                 be->be_status = BST_BLOCK;
158         TAILQ_INSERT_TAIL(&bc->bc_pendq, be, be_link);
159         return (be->be_status == BST_PEND);
160 }
161
162 static int
163 blockif_dequeue(struct blockif_ctxt *bc, pthread_t t, struct blockif_elem **bep)
164 {
165         struct blockif_elem *be;
166
167         TAILQ_FOREACH(be, &bc->bc_pendq, be_link) {
168                 if (be->be_status == BST_PEND)
169                         break;
170                 assert(be->be_status == BST_BLOCK);
171         }
172         if (be == NULL)
173                 return (0);
174         TAILQ_REMOVE(&bc->bc_pendq, be, be_link);
175         be->be_status = BST_BUSY;
176         be->be_tid = t;
177         TAILQ_INSERT_TAIL(&bc->bc_busyq, be, be_link);
178         *bep = be;
179         return (1);
180 }
181
182 static void
183 blockif_complete(struct blockif_ctxt *bc, struct blockif_elem *be)
184 {
185         struct blockif_elem *tbe;
186
187         if (be->be_status == BST_DONE || be->be_status == BST_BUSY)
188                 TAILQ_REMOVE(&bc->bc_busyq, be, be_link);
189         else
190                 TAILQ_REMOVE(&bc->bc_pendq, be, be_link);
191         TAILQ_FOREACH(tbe, &bc->bc_pendq, be_link) {
192                 if (tbe->be_req->br_offset == be->be_block)
193                         tbe->be_status = BST_PEND;
194         }
195         be->be_tid = 0;
196         be->be_status = BST_FREE;
197         be->be_req = NULL;
198         TAILQ_INSERT_TAIL(&bc->bc_freeq, be, be_link);
199 }
200
201 static void
202 blockif_proc(struct blockif_ctxt *bc, struct blockif_elem *be, uint8_t *buf)
203 {
204         struct blockif_req *br;
205         off_t arg[2];
206         ssize_t clen, len, off, boff, voff;
207         int i, err;
208
209         br = be->be_req;
210         if (br->br_iovcnt <= 1)
211                 buf = NULL;
212         err = 0;
213         switch (be->be_op) {
214         case BOP_READ:
215                 if (buf == NULL) {
216                         if ((len = preadv(bc->bc_fd, br->br_iov, br->br_iovcnt,
217                                    br->br_offset)) < 0)
218                                 err = errno;
219                         else
220                                 br->br_resid -= len;
221                         break;
222                 }
223                 i = 0;
224                 off = voff = 0;
225                 while (br->br_resid > 0) {
226                         len = MIN(br->br_resid, MAXPHYS);
227                         if (pread(bc->bc_fd, buf, len, br->br_offset +
228                             off) < 0) {
229                                 err = errno;
230                                 break;
231                         }
232                         boff = 0;
233                         do {
234                                 clen = MIN(len - boff, br->br_iov[i].iov_len -
235                                     voff);
236                                 memcpy(br->br_iov[i].iov_base + voff,
237                                     buf + boff, clen);
238                                 if (clen < br->br_iov[i].iov_len - voff)
239                                         voff += clen;
240                                 else {
241                                         i++;
242                                         voff = 0;
243                                 }
244                                 boff += clen;
245                         } while (boff < len);
246                         off += len;
247                         br->br_resid -= len;
248                 }
249                 break;
250         case BOP_WRITE:
251                 if (bc->bc_rdonly) {
252                         err = EROFS;
253                         break;
254                 }
255                 if (buf == NULL) {
256                         if ((len = pwritev(bc->bc_fd, br->br_iov, br->br_iovcnt,
257                                     br->br_offset)) < 0)
258                                 err = errno;
259                         else
260                                 br->br_resid -= len;
261                         break;
262                 }
263                 i = 0;
264                 off = voff = 0;
265                 while (br->br_resid > 0) {
266                         len = MIN(br->br_resid, MAXPHYS);
267                         boff = 0;
268                         do {
269                                 clen = MIN(len - boff, br->br_iov[i].iov_len -
270                                     voff);
271                                 memcpy(buf + boff,
272                                     br->br_iov[i].iov_base + voff, clen);
273                                 if (clen < br->br_iov[i].iov_len - voff)
274                                         voff += clen;
275                                 else {
276                                         i++;
277                                         voff = 0;
278                                 }
279                                 boff += clen;
280                         } while (boff < len);
281                         if (pwrite(bc->bc_fd, buf, len, br->br_offset +
282                             off) < 0) {
283                                 err = errno;
284                                 break;
285                         }
286                         off += len;
287                         br->br_resid -= len;
288                 }
289                 break;
290         case BOP_FLUSH:
291                 if (bc->bc_ischr) {
292                         if (ioctl(bc->bc_fd, DIOCGFLUSH))
293                                 err = errno;
294                 } else if (fsync(bc->bc_fd))
295                         err = errno;
296                 break;
297         case BOP_DELETE:
298                 if (!bc->bc_candelete)
299                         err = EOPNOTSUPP;
300                 else if (bc->bc_rdonly)
301                         err = EROFS;
302                 else if (bc->bc_ischr) {
303                         arg[0] = br->br_offset;
304                         arg[1] = br->br_resid;
305                         if (ioctl(bc->bc_fd, DIOCGDELETE, arg))
306                                 err = errno;
307                         else
308                                 br->br_resid = 0;
309                 } else
310                         err = EOPNOTSUPP;
311                 break;
312         default:
313                 err = EINVAL;
314                 break;
315         }
316
317         be->be_status = BST_DONE;
318
319         (*br->br_callback)(br, err);
320 }
321
322 static void *
323 blockif_thr(void *arg)
324 {
325         struct blockif_ctxt *bc;
326         struct blockif_elem *be;
327         pthread_t t;
328         uint8_t *buf;
329
330         bc = arg;
331         if (bc->bc_isgeom)
332                 buf = malloc(MAXPHYS);
333         else
334                 buf = NULL;
335         t = pthread_self();
336
337         pthread_mutex_lock(&bc->bc_mtx);
338         for (;;) {
339                 while (blockif_dequeue(bc, t, &be)) {
340                         pthread_mutex_unlock(&bc->bc_mtx);
341                         blockif_proc(bc, be, buf);
342                         pthread_mutex_lock(&bc->bc_mtx);
343                         blockif_complete(bc, be);
344                 }
345                 /* Check ctxt status here to see if exit requested */
346                 if (bc->bc_closing)
347                         break;
348                 pthread_cond_wait(&bc->bc_cond, &bc->bc_mtx);
349         }
350         pthread_mutex_unlock(&bc->bc_mtx);
351
352         if (buf)
353                 free(buf);
354         pthread_exit(NULL);
355         return (NULL);
356 }
357
358 static void
359 blockif_sigcont_handler(int signal, enum ev_type type, void *arg)
360 {
361         struct blockif_sig_elem *bse;
362
363         for (;;) {
364                 /*
365                  * Process the entire list even if not intended for
366                  * this thread.
367                  */
368                 do {
369                         bse = blockif_bse_head;
370                         if (bse == NULL)
371                                 return;
372                 } while (!atomic_cmpset_ptr((uintptr_t *)&blockif_bse_head,
373                                             (uintptr_t)bse,
374                                             (uintptr_t)bse->bse_next));
375
376                 pthread_mutex_lock(&bse->bse_mtx);
377                 bse->bse_pending = 0;
378                 pthread_cond_signal(&bse->bse_cond);
379                 pthread_mutex_unlock(&bse->bse_mtx);
380         }
381 }
382
383 static void
384 blockif_init(void)
385 {
386         mevent_add(SIGCONT, EVF_SIGNAL, blockif_sigcont_handler, NULL);
387         (void) signal(SIGCONT, SIG_IGN);
388 }
389
390 struct blockif_ctxt *
391 blockif_open(const char *optstr, const char *ident)
392 {
393         char tname[MAXCOMLEN + 1];
394         char name[MAXPATHLEN];
395         char *nopt, *xopts;
396         struct blockif_ctxt *bc;
397         struct stat sbuf;
398         struct diocgattr_arg arg;
399         off_t size, psectsz, psectoff;
400         int extra, fd, i, sectsz;
401         int nocache, sync, ro, candelete, geom;
402
403         pthread_once(&blockif_once, blockif_init);
404
405         nocache = 0;
406         sync = 0;
407         ro = 0;
408
409         /*
410          * The first element in the optstring is always a pathname.
411          * Optional elements follow
412          */
413         nopt = strdup(optstr);
414         for (xopts = strtok(nopt, ",");
415              xopts != NULL;
416              xopts = strtok(NULL, ",")) {
417                 if (!strcmp(xopts, "nocache"))
418                         nocache = 1;
419                 else if (!strcmp(xopts, "sync"))
420                         sync = 1;
421                 else if (!strcmp(xopts, "ro"))
422                         ro = 1;
423         }
424
425         extra = 0;
426         if (nocache)
427                 extra |= O_DIRECT;
428         if (sync)
429                 extra |= O_SYNC;
430
431         fd = open(nopt, (ro ? O_RDONLY : O_RDWR) | extra);
432         if (fd < 0 && !ro) {
433                 /* Attempt a r/w fail with a r/o open */
434                 fd = open(nopt, O_RDONLY | extra);
435                 ro = 1;
436         }
437
438         if (fd < 0) {
439                 perror("Could not open backing file");
440                 return (NULL);
441         }
442
443         if (fstat(fd, &sbuf) < 0) {
444                 perror("Could not stat backing file");
445                 close(fd);
446                 return (NULL);
447         }
448
449         /*
450          * Deal with raw devices
451          */
452         size = sbuf.st_size;
453         sectsz = DEV_BSIZE;
454         psectsz = psectoff = 0;
455         candelete = geom = 0;
456         if (S_ISCHR(sbuf.st_mode)) {
457                 if (ioctl(fd, DIOCGMEDIASIZE, &size) < 0 ||
458                     ioctl(fd, DIOCGSECTORSIZE, &sectsz)) {
459                         perror("Could not fetch dev blk/sector size");
460                         close(fd);
461                         return (NULL);
462                 }
463                 assert(size != 0);
464                 assert(sectsz != 0);
465                 if (ioctl(fd, DIOCGSTRIPESIZE, &psectsz) == 0 && psectsz > 0)
466                         ioctl(fd, DIOCGSTRIPEOFFSET, &psectoff);
467                 strlcpy(arg.name, "GEOM::candelete", sizeof(arg.name));
468                 arg.len = sizeof(arg.value.i);
469                 if (ioctl(fd, DIOCGATTR, &arg) == 0)
470                         candelete = arg.value.i;
471                 if (ioctl(fd, DIOCGPROVIDERNAME, name) == 0)
472                         geom = 1;
473         } else
474                 psectsz = sbuf.st_blksize;
475
476         bc = calloc(1, sizeof(struct blockif_ctxt));
477         if (bc == NULL) {
478                 close(fd);
479                 return (NULL);
480         }
481
482         bc->bc_magic = BLOCKIF_SIG;
483         bc->bc_fd = fd;
484         bc->bc_ischr = S_ISCHR(sbuf.st_mode);
485         bc->bc_isgeom = geom;
486         bc->bc_candelete = candelete;
487         bc->bc_rdonly = ro;
488         bc->bc_size = size;
489         bc->bc_sectsz = sectsz;
490         bc->bc_psectsz = psectsz;
491         bc->bc_psectoff = psectoff;
492         pthread_mutex_init(&bc->bc_mtx, NULL);
493         pthread_cond_init(&bc->bc_cond, NULL);
494         TAILQ_INIT(&bc->bc_freeq);
495         TAILQ_INIT(&bc->bc_pendq);
496         TAILQ_INIT(&bc->bc_busyq);
497         for (i = 0; i < BLOCKIF_MAXREQ; i++) {
498                 bc->bc_reqs[i].be_status = BST_FREE;
499                 TAILQ_INSERT_HEAD(&bc->bc_freeq, &bc->bc_reqs[i], be_link);
500         }
501
502         for (i = 0; i < BLOCKIF_NUMTHR; i++) {
503                 pthread_create(&bc->bc_btid[i], NULL, blockif_thr, bc);
504                 snprintf(tname, sizeof(tname), "blk-%s-%d", ident, i);
505                 pthread_set_name_np(bc->bc_btid[i], tname);
506         }
507
508         return (bc);
509 }
510
511 static int
512 blockif_request(struct blockif_ctxt *bc, struct blockif_req *breq,
513                 enum blockop op)
514 {
515         int err;
516
517         err = 0;
518
519         pthread_mutex_lock(&bc->bc_mtx);
520         if (!TAILQ_EMPTY(&bc->bc_freeq)) {
521                 /*
522                  * Enqueue and inform the block i/o thread
523                  * that there is work available
524                  */
525                 if (blockif_enqueue(bc, breq, op))
526                         pthread_cond_signal(&bc->bc_cond);
527         } else {
528                 /*
529                  * Callers are not allowed to enqueue more than
530                  * the specified blockif queue limit. Return an
531                  * error to indicate that the queue length has been
532                  * exceeded.
533                  */
534                 err = E2BIG;
535         }
536         pthread_mutex_unlock(&bc->bc_mtx);
537
538         return (err);
539 }
540
541 int
542 blockif_read(struct blockif_ctxt *bc, struct blockif_req *breq)
543 {
544
545         assert(bc->bc_magic == BLOCKIF_SIG);
546         return (blockif_request(bc, breq, BOP_READ));
547 }
548
549 int
550 blockif_write(struct blockif_ctxt *bc, struct blockif_req *breq)
551 {
552
553         assert(bc->bc_magic == BLOCKIF_SIG);
554         return (blockif_request(bc, breq, BOP_WRITE));
555 }
556
557 int
558 blockif_flush(struct blockif_ctxt *bc, struct blockif_req *breq)
559 {
560
561         assert(bc->bc_magic == BLOCKIF_SIG);
562         return (blockif_request(bc, breq, BOP_FLUSH));
563 }
564
565 int
566 blockif_delete(struct blockif_ctxt *bc, struct blockif_req *breq)
567 {
568
569         assert(bc->bc_magic == BLOCKIF_SIG);
570         return (blockif_request(bc, breq, BOP_DELETE));
571 }
572
573 int
574 blockif_cancel(struct blockif_ctxt *bc, struct blockif_req *breq)
575 {
576         struct blockif_elem *be;
577
578         assert(bc->bc_magic == BLOCKIF_SIG);
579
580         pthread_mutex_lock(&bc->bc_mtx);
581         /*
582          * Check pending requests.
583          */
584         TAILQ_FOREACH(be, &bc->bc_pendq, be_link) {
585                 if (be->be_req == breq)
586                         break;
587         }
588         if (be != NULL) {
589                 /*
590                  * Found it.
591                  */
592                 blockif_complete(bc, be);
593                 pthread_mutex_unlock(&bc->bc_mtx);
594
595                 return (0);
596         }
597
598         /*
599          * Check in-flight requests.
600          */
601         TAILQ_FOREACH(be, &bc->bc_busyq, be_link) {
602                 if (be->be_req == breq)
603                         break;
604         }
605         if (be == NULL) {
606                 /*
607                  * Didn't find it.
608                  */
609                 pthread_mutex_unlock(&bc->bc_mtx);
610                 return (EINVAL);
611         }
612
613         /*
614          * Interrupt the processing thread to force it return
615          * prematurely via it's normal callback path.
616          */
617         while (be->be_status == BST_BUSY) {
618                 struct blockif_sig_elem bse, *old_head;
619
620                 pthread_mutex_init(&bse.bse_mtx, NULL);
621                 pthread_cond_init(&bse.bse_cond, NULL);
622
623                 bse.bse_pending = 1;
624
625                 do {
626                         old_head = blockif_bse_head;
627                         bse.bse_next = old_head;
628                 } while (!atomic_cmpset_ptr((uintptr_t *)&blockif_bse_head,
629                                             (uintptr_t)old_head,
630                                             (uintptr_t)&bse));
631
632                 pthread_kill(be->be_tid, SIGCONT);
633
634                 pthread_mutex_lock(&bse.bse_mtx);
635                 while (bse.bse_pending)
636                         pthread_cond_wait(&bse.bse_cond, &bse.bse_mtx);
637                 pthread_mutex_unlock(&bse.bse_mtx);
638         }
639
640         pthread_mutex_unlock(&bc->bc_mtx);
641
642         /*
643          * The processing thread has been interrupted.  Since it's not
644          * clear if the callback has been invoked yet, return EBUSY.
645          */
646         return (EBUSY);
647 }
648
649 int
650 blockif_close(struct blockif_ctxt *bc)
651 {
652         void *jval;
653         int err, i;
654
655         err = 0;
656
657         assert(bc->bc_magic == BLOCKIF_SIG);
658
659         /*
660          * Stop the block i/o thread
661          */
662         pthread_mutex_lock(&bc->bc_mtx);
663         bc->bc_closing = 1;
664         pthread_mutex_unlock(&bc->bc_mtx);
665         pthread_cond_broadcast(&bc->bc_cond);
666         for (i = 0; i < BLOCKIF_NUMTHR; i++)
667                 pthread_join(bc->bc_btid[i], &jval);
668
669         /* XXX Cancel queued i/o's ??? */
670
671         /*
672          * Release resources
673          */
674         bc->bc_magic = 0;
675         close(bc->bc_fd);
676         free(bc);
677
678         return (0);
679 }
680
681 /*
682  * Return virtual C/H/S values for a given block. Use the algorithm
683  * outlined in the VHD specification to calculate values.
684  */
685 void
686 blockif_chs(struct blockif_ctxt *bc, uint16_t *c, uint8_t *h, uint8_t *s)
687 {
688         off_t sectors;          /* total sectors of the block dev */
689         off_t hcyl;             /* cylinders times heads */
690         uint16_t secpt;         /* sectors per track */
691         uint8_t heads;
692
693         assert(bc->bc_magic == BLOCKIF_SIG);
694
695         sectors = bc->bc_size / bc->bc_sectsz;
696
697         /* Clamp the size to the largest possible with CHS */
698         if (sectors > 65535UL*16*255)
699                 sectors = 65535UL*16*255;
700
701         if (sectors >= 65536UL*16*63) {
702                 secpt = 255;
703                 heads = 16;
704                 hcyl = sectors / secpt;
705         } else {
706                 secpt = 17;
707                 hcyl = sectors / secpt;
708                 heads = (hcyl + 1023) / 1024;
709
710                 if (heads < 4)
711                         heads = 4;
712
713                 if (hcyl >= (heads * 1024) || heads > 16) {
714                         secpt = 31;
715                         heads = 16;
716                         hcyl = sectors / secpt;
717                 }
718                 if (hcyl >= (heads * 1024)) {
719                         secpt = 63;
720                         heads = 16;
721                         hcyl = sectors / secpt;
722                 }
723         }
724
725         *c = hcyl / heads;
726         *h = heads;
727         *s = secpt;
728 }
729
730 /*
731  * Accessors
732  */
733 off_t
734 blockif_size(struct blockif_ctxt *bc)
735 {
736
737         assert(bc->bc_magic == BLOCKIF_SIG);
738         return (bc->bc_size);
739 }
740
741 int
742 blockif_sectsz(struct blockif_ctxt *bc)
743 {
744
745         assert(bc->bc_magic == BLOCKIF_SIG);
746         return (bc->bc_sectsz);
747 }
748
749 void
750 blockif_psectsz(struct blockif_ctxt *bc, int *size, int *off)
751 {
752
753         assert(bc->bc_magic == BLOCKIF_SIG);
754         *size = bc->bc_psectsz;
755         *off = bc->bc_psectoff;
756 }
757
758 int
759 blockif_queuesz(struct blockif_ctxt *bc)
760 {
761
762         assert(bc->bc_magic == BLOCKIF_SIG);
763         return (BLOCKIF_MAXREQ - 1);
764 }
765
766 int
767 blockif_is_ro(struct blockif_ctxt *bc)
768 {
769
770         assert(bc->bc_magic == BLOCKIF_SIG);
771         return (bc->bc_rdonly);
772 }
773
774 int
775 blockif_candelete(struct blockif_ctxt *bc)
776 {
777
778         assert(bc->bc_magic == BLOCKIF_SIG);
779         return (bc->bc_candelete);
780 }