]> CyberLeo.Net >> Repos - FreeBSD/releng/10.0.git/blob - sys/dev/ida/ida.c
- Copy stable/10 (r259064) to releng/10.0 as part of the
[FreeBSD/releng/10.0.git] / sys / dev / ida / ida.c
1 /*-
2  * Copyright (c) 1999,2000 Jonathan Lemon
3  * All rights reserved.
4  *
5  # Derived from the original IDA Compaq RAID driver, which is
6  * Copyright (c) 1996, 1997, 1998, 1999
7  *    Mark Dawson and David James. All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  */
30
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33
34 /*
35  * Generic driver for Compaq SMART RAID adapters.
36  */
37
38 #include <sys/param.h>
39 #include <sys/kernel.h>
40 #include <sys/systm.h>
41 #include <sys/lock.h>
42 #include <sys/malloc.h>
43 #include <sys/mutex.h>
44 #include <sys/stat.h>
45
46 #include <sys/bio.h>
47 #include <sys/bus.h>
48 #include <sys/conf.h>
49 #include <sys/endian.h>
50
51 #include <machine/bus.h>
52 #include <sys/rman.h>
53
54 #include <geom/geom_disk.h>
55
56 #include <dev/ida/idareg.h>
57 #include <dev/ida/idavar.h>
58 #include <dev/ida/idaio.h>
59
60 /* prototypes */
61 static int ida_alloc_qcbs(struct ida_softc *ida);
62 static void ida_done(struct ida_softc *ida, struct ida_qcb *qcb);
63 static void ida_start(struct ida_softc *ida);
64 static void ida_startio(struct ida_softc *ida);
65 static void ida_startup(void *arg);
66 static void ida_timeout(void *arg);
67 static int ida_wait(struct ida_softc *ida, struct ida_qcb *qcb);
68
69 static d_ioctl_t ida_ioctl;
70 static struct cdevsw ida_cdevsw = {
71         .d_version =    D_VERSION,
72         .d_ioctl =      ida_ioctl,
73         .d_name =       "ida",
74 };
75
76 void
77 ida_free(struct ida_softc *ida)
78 {
79         int i;
80
81         if (ida->ih != NULL)
82                 bus_teardown_intr(ida->dev, ida->irq, ida->ih);
83
84         mtx_lock(&ida->lock);
85         callout_stop(&ida->ch);
86         mtx_unlock(&ida->lock);
87         callout_drain(&ida->ch);
88
89         if (ida->buffer_dmat) {
90                 for (i = 0; i < IDA_QCB_MAX; i++)
91                         bus_dmamap_destroy(ida->buffer_dmat, ida->qcbs[i].dmamap);
92                 bus_dma_tag_destroy(ida->buffer_dmat);
93         }
94
95         if (ida->hwqcb_dmat) {
96                 if (ida->hwqcb_busaddr)
97                         bus_dmamap_unload(ida->hwqcb_dmat, ida->hwqcb_dmamap);
98                 if (ida->hwqcbs)
99                         bus_dmamem_free(ida->hwqcb_dmat, ida->hwqcbs,
100                             ida->hwqcb_dmamap);
101                 bus_dma_tag_destroy(ida->hwqcb_dmat);
102         }
103
104         if (ida->qcbs != NULL)
105                 free(ida->qcbs, M_DEVBUF);
106
107         if (ida->irq != NULL)
108                 bus_release_resource(ida->dev, ida->irq_res_type,
109                     0, ida->irq);
110
111         if (ida->parent_dmat != NULL)
112                 bus_dma_tag_destroy(ida->parent_dmat);
113
114         if (ida->regs != NULL)
115                 bus_release_resource(ida->dev, ida->regs_res_type,
116                     ida->regs_res_id, ida->regs);
117
118         mtx_destroy(&ida->lock);
119 }
120
121 /*
122  * record bus address from bus_dmamap_load
123  */
124 static void
125 ida_dma_map_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
126 {
127         bus_addr_t *baddr;
128
129         baddr = (bus_addr_t *)arg;
130         *baddr = segs->ds_addr;
131 }
132
133 static __inline struct ida_qcb *
134 ida_get_qcb(struct ida_softc *ida)
135 {
136         struct ida_qcb *qcb;
137
138         if ((qcb = SLIST_FIRST(&ida->free_qcbs)) != NULL) {
139                 SLIST_REMOVE_HEAD(&ida->free_qcbs, link.sle);
140                 bzero(qcb->hwqcb, sizeof(struct ida_hdr) + sizeof(struct ida_req));
141         }
142         return (qcb);
143 }
144
145 static __inline void
146 ida_free_qcb(struct ida_softc *ida, struct ida_qcb *qcb)
147 {
148
149         qcb->state = QCB_FREE;
150         qcb->buf = NULL;
151         qcb->error = 0;
152         SLIST_INSERT_HEAD(&ida->free_qcbs, qcb, link.sle);
153 }
154
155 static __inline bus_addr_t
156 idahwqcbvtop(struct ida_softc *ida, struct ida_hardware_qcb *hwqcb)
157 {
158         return (ida->hwqcb_busaddr +
159             ((bus_addr_t)hwqcb - (bus_addr_t)ida->hwqcbs));
160 }
161
162 static __inline struct ida_qcb *
163 idahwqcbptov(struct ida_softc *ida, bus_addr_t hwqcb_addr)
164 {
165         struct ida_hardware_qcb *hwqcb;
166
167         hwqcb = (struct ida_hardware_qcb *)
168             ((bus_addr_t)ida->hwqcbs + (hwqcb_addr - ida->hwqcb_busaddr));
169         return (hwqcb->qcb);
170 }
171
172 static int
173 ida_alloc_qcbs(struct ida_softc *ida)
174 {
175         struct ida_qcb *qcb;
176         int error, i;
177
178         for (i = 0; i < IDA_QCB_MAX; i++) {
179                 qcb = &ida->qcbs[i];
180
181                 error = bus_dmamap_create(ida->buffer_dmat, /*flags*/0, &qcb->dmamap);
182                 if (error != 0)
183                         return (error);
184
185                 qcb->ida = ida;
186                 qcb->flags = QCB_FREE;
187                 qcb->hwqcb = &ida->hwqcbs[i];
188                 qcb->hwqcb->qcb = qcb;
189                 qcb->hwqcb_busaddr = idahwqcbvtop(ida, qcb->hwqcb);
190                 SLIST_INSERT_HEAD(&ida->free_qcbs, qcb, link.sle);
191         }
192         return (0);
193 }
194
195 int
196 ida_init(struct ida_softc *ida)
197 {
198         struct ida_controller_info cinfo;
199         device_t child;
200         int error, i, unit;
201
202         SLIST_INIT(&ida->free_qcbs);
203         STAILQ_INIT(&ida->qcb_queue);
204         bioq_init(&ida->bio_queue);
205
206         ida->qcbs = (struct ida_qcb *)
207             malloc(IDA_QCB_MAX * sizeof(struct ida_qcb), M_DEVBUF,
208                 M_NOWAIT | M_ZERO);
209         if (ida->qcbs == NULL)
210                 return (ENOMEM);
211
212         /*
213          * Create our DMA tags
214          */
215
216         /* DMA tag for our hardware QCB structures */
217         error = bus_dma_tag_create(
218                 /* parent       */ ida->parent_dmat,
219                 /* alignment    */ 1,
220                 /* boundary     */ 0,
221                 /* lowaddr      */ BUS_SPACE_MAXADDR,
222                 /* highaddr     */ BUS_SPACE_MAXADDR,
223                 /* filter       */ NULL,
224                 /* filterarg    */ NULL,
225                 /* maxsize      */ IDA_QCB_MAX * sizeof(struct ida_hardware_qcb),
226                 /* nsegments    */ 1,
227                 /* maxsegsz     */ BUS_SPACE_MAXSIZE_32BIT,
228                 /* flags        */ 0,
229                 /* lockfunc     */ NULL,
230                 /* lockarg      */ NULL,
231                 &ida->hwqcb_dmat);
232         if (error)
233                 return (ENOMEM);
234
235         /* DMA tag for mapping buffers into device space */
236         error = bus_dma_tag_create(
237                 /* parent       */ ida->parent_dmat,
238                 /* alignment    */ 1,
239                 /* boundary     */ 0,
240                 /* lowaddr      */ BUS_SPACE_MAXADDR,
241                 /* highaddr     */ BUS_SPACE_MAXADDR,
242                 /* filter       */ NULL,
243                 /* filterarg    */ NULL,
244                 /* maxsize      */ MAXBSIZE,
245                 /* nsegments    */ IDA_NSEG,
246                 /* maxsegsz     */ BUS_SPACE_MAXSIZE_32BIT,
247                 /* flags        */ 0,
248                 /* lockfunc     */ busdma_lock_mutex,
249                 /* lockarg      */ &Giant,
250                 &ida->buffer_dmat);
251         if (error)
252                 return (ENOMEM);
253
254         /* Allocation of hardware QCBs */
255         /* XXX allocation is rounded to hardware page size */
256         error = bus_dmamem_alloc(ida->hwqcb_dmat,
257             (void **)&ida->hwqcbs, BUS_DMA_NOWAIT, &ida->hwqcb_dmamap);
258         if (error)
259                 return (ENOMEM);
260
261         /* And permanently map them in */
262         bus_dmamap_load(ida->hwqcb_dmat, ida->hwqcb_dmamap,
263             ida->hwqcbs, IDA_QCB_MAX * sizeof(struct ida_hardware_qcb),
264             ida_dma_map_cb, &ida->hwqcb_busaddr, /*flags*/0);
265
266         bzero(ida->hwqcbs, IDA_QCB_MAX * sizeof(struct ida_hardware_qcb));
267
268         error = ida_alloc_qcbs(ida);
269         if (error)
270                 return (error);
271
272         mtx_lock(&ida->lock);
273         ida->cmd.int_enable(ida, 0);
274
275         error = ida_command(ida, CMD_GET_CTRL_INFO, &cinfo, sizeof(cinfo),
276             IDA_CONTROLLER, 0, DMA_DATA_IN);
277         if (error) {
278                 mtx_unlock(&ida->lock);
279                 device_printf(ida->dev, "CMD_GET_CTRL_INFO failed.\n");
280                 return (error);
281         }
282
283         device_printf(ida->dev, "drives=%d firm_rev=%c%c%c%c\n",
284             cinfo.num_drvs, cinfo.firm_rev[0], cinfo.firm_rev[1],
285             cinfo.firm_rev[2], cinfo.firm_rev[3]);
286
287         if (ida->flags & IDA_FIRMWARE) {
288                 int data;
289
290                 error = ida_command(ida, CMD_START_FIRMWARE,
291                     &data, sizeof(data), IDA_CONTROLLER, 0, DMA_DATA_IN);
292                 if (error) {
293                         mtx_unlock(&ida->lock);
294                         device_printf(ida->dev, "CMD_START_FIRMWARE failed.\n");
295                         return (error);
296                 }
297         }
298         
299         ida->cmd.int_enable(ida, 1);
300         ida->flags |= IDA_ATTACHED;
301         mtx_unlock(&ida->lock);
302
303         for (i = 0; i < cinfo.num_drvs; i++) {
304                 child = device_add_child(ida->dev, /*"idad"*/NULL, -1);
305                 if (child != NULL)
306                         device_set_ivars(child, (void *)(intptr_t)i);
307         }
308
309         ida->ich.ich_func = ida_startup;
310         ida->ich.ich_arg = ida;
311         if (config_intrhook_establish(&ida->ich) != 0) {
312                 device_delete_children(ida->dev);
313                 device_printf(ida->dev, "Cannot establish configuration hook\n");
314                 return (error);
315         }
316
317         unit = device_get_unit(ida->dev);
318         ida->ida_dev_t = make_dev(&ida_cdevsw, unit,
319                                  UID_ROOT, GID_OPERATOR, S_IRUSR | S_IWUSR,
320                                  "ida%d", unit);
321         ida->ida_dev_t->si_drv1 = ida;
322
323         return (0);
324 }
325
326 static void
327 ida_startup(void *arg)
328 {
329         struct ida_softc *ida;
330
331         ida = arg;
332
333         config_intrhook_disestablish(&ida->ich);
334
335         mtx_lock(&Giant);
336         bus_generic_attach(ida->dev);
337         mtx_unlock(&Giant);
338 }
339
340 int
341 ida_detach(device_t dev)
342 {
343         struct ida_softc *ida;
344         int error;
345
346         ida = (struct ida_softc *)device_get_softc(dev);
347
348         error = bus_generic_detach(dev);
349         if (error)
350                 return (error);
351         error = device_delete_children(dev);
352         if (error)
353                 return (error);
354
355         /*
356          * XXX
357          * before detaching, we must make sure that the system is
358          * quiescent; nothing mounted, no pending activity.
359          */
360
361         /*
362          * XXX
363          * now, how are we supposed to maintain a list of our drives?
364          * iterate over our "child devices"?
365          */
366
367         destroy_dev(ida->ida_dev_t);
368         ida_free(ida);
369         return (error);
370 }
371
372 static void
373 ida_data_cb(void *arg, bus_dma_segment_t *segs, int nsegments, int error)
374 {
375         struct ida_hardware_qcb *hwqcb;
376         struct ida_softc *ida;
377         struct ida_qcb *qcb;
378         bus_dmasync_op_t op;
379         int i;
380
381         qcb = arg;
382         ida = qcb->ida;
383         if (!dumping)
384                 mtx_assert(&ida->lock, MA_OWNED);
385         if (error) {
386                 qcb->error = error;
387                 ida_done(ida, qcb);
388                 return;
389         }
390
391         hwqcb = qcb->hwqcb;
392         hwqcb->hdr.size = htole16((sizeof(struct ida_req) +
393             sizeof(struct ida_sgb) * IDA_NSEG) >> 2);
394
395         for (i = 0; i < nsegments; i++) {
396                 hwqcb->seg[i].addr = htole32(segs[i].ds_addr);
397                 hwqcb->seg[i].length = htole32(segs[i].ds_len);
398         }
399         hwqcb->req.sgcount = nsegments;
400         if (qcb->flags & DMA_DATA_TRANSFER) {
401                 switch (qcb->flags & DMA_DATA_TRANSFER) {
402                 case DMA_DATA_TRANSFER:
403                         op = BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE;
404                         break;
405                 case DMA_DATA_IN:
406                         op = BUS_DMASYNC_PREREAD;
407                         break;
408                 default:
409                         KASSERT((qcb->flags & DMA_DATA_TRANSFER) ==
410                             DMA_DATA_OUT, ("bad DMA data flags"));
411                         op = BUS_DMASYNC_PREWRITE;
412                         break;
413                 }
414                 bus_dmamap_sync(ida->buffer_dmat, qcb->dmamap, op);
415         }
416         bus_dmamap_sync(ida->hwqcb_dmat, ida->hwqcb_dmamap,
417             BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
418
419         STAILQ_INSERT_TAIL(&ida->qcb_queue, qcb, link.stqe);
420         ida_start(ida);
421         ida->flags &= ~IDA_QFROZEN;
422 }
423
424 static int
425 ida_map_qcb(struct ida_softc *ida, struct ida_qcb *qcb, void *data,
426     bus_size_t datasize)
427 {
428         int error, flags;
429
430         if (ida->flags & IDA_INTERRUPTS)
431                 flags = BUS_DMA_WAITOK;
432         else
433                 flags = BUS_DMA_NOWAIT;
434         error = bus_dmamap_load(ida->buffer_dmat, qcb->dmamap, data, datasize,
435             ida_data_cb, qcb, flags);
436         if (error == EINPROGRESS) {
437                 ida->flags |= IDA_QFROZEN;
438                 error = 0;
439         }
440         return (error);
441 }
442
443 int
444 ida_command(struct ida_softc *ida, int command, void *data, int datasize,
445         int drive, u_int32_t pblkno, int flags)
446 {
447         struct ida_hardware_qcb *hwqcb;
448         struct ida_qcb *qcb;
449         int error;
450
451         if (!dumping)
452                 mtx_assert(&ida->lock, MA_OWNED);
453         qcb = ida_get_qcb(ida);
454
455         if (qcb == NULL) {
456                 device_printf(ida->dev, "out of QCBs\n");
457                 return (EAGAIN);
458         }
459
460         qcb->flags = flags | IDA_COMMAND;
461         hwqcb = qcb->hwqcb;
462         hwqcb->hdr.drive = drive;
463         hwqcb->req.blkno = htole32(pblkno);
464         hwqcb->req.bcount = htole16(howmany(datasize, DEV_BSIZE));
465         hwqcb->req.command = command;
466
467         error = ida_map_qcb(ida, qcb, data, datasize);
468         if (error == 0) {
469                 error = ida_wait(ida, qcb);
470                 /* Don't free QCB on a timeout in case it later completes. */
471                 if (error)
472                         return (error);
473                 error = qcb->error;
474         }
475
476         /* XXX should have status returned here? */
477         /* XXX have "status pointer" area in QCB? */
478
479         ida_free_qcb(ida, qcb);
480         return (error);
481 }
482
483 void
484 ida_submit_buf(struct ida_softc *ida, struct bio *bp)
485 {
486         mtx_lock(&ida->lock);
487         bioq_insert_tail(&ida->bio_queue, bp);
488         ida_startio(ida);
489         mtx_unlock(&ida->lock);
490 }
491
492 static void
493 ida_startio(struct ida_softc *ida)
494 {
495         struct ida_hardware_qcb *hwqcb;
496         struct ida_qcb *qcb;
497         struct idad_softc *drv;
498         struct bio *bp;
499         int error;
500
501         mtx_assert(&ida->lock, MA_OWNED);
502         for (;;) {
503                 if (ida->flags & IDA_QFROZEN)
504                         return;
505                 bp = bioq_first(&ida->bio_queue);
506                 if (bp == NULL)
507                         return;                         /* no more buffers */
508
509                 qcb = ida_get_qcb(ida);
510                 if (qcb == NULL)
511                         return;                         /* out of resources */
512
513                 bioq_remove(&ida->bio_queue, bp);
514                 qcb->buf = bp;
515                 qcb->flags = bp->bio_cmd == BIO_READ ? DMA_DATA_IN : DMA_DATA_OUT;
516
517                 hwqcb = qcb->hwqcb;
518                 drv = bp->bio_driver1;
519                 hwqcb->hdr.drive = drv->drive;
520                 hwqcb->req.blkno = bp->bio_pblkno;
521                 hwqcb->req.bcount = howmany(bp->bio_bcount, DEV_BSIZE);
522                 hwqcb->req.command = bp->bio_cmd == BIO_READ ? CMD_READ : CMD_WRITE;
523
524                 error = ida_map_qcb(ida, qcb, bp->bio_data, bp->bio_bcount);
525                 if (error) {
526                         qcb->error = error;
527                         ida_done(ida, qcb);
528                 }
529         }
530 }
531
532 static void
533 ida_start(struct ida_softc *ida)
534 {
535         struct ida_qcb *qcb;
536
537         if (!dumping)
538                 mtx_assert(&ida->lock, MA_OWNED);
539         while ((qcb = STAILQ_FIRST(&ida->qcb_queue)) != NULL) {
540                 if (ida->cmd.fifo_full(ida))
541                         break;
542                 STAILQ_REMOVE_HEAD(&ida->qcb_queue, link.stqe);
543                 /*
544                  * XXX
545                  * place the qcb on an active list?
546                  */
547
548                 /* Set a timeout. */
549                 if (!ida->qactive && !dumping)
550                         callout_reset(&ida->ch, hz * 5, ida_timeout, ida);
551                 ida->qactive++;
552
553                 qcb->state = QCB_ACTIVE;
554                 ida->cmd.submit(ida, qcb);
555         }
556 }
557
558 static int
559 ida_wait(struct ida_softc *ida, struct ida_qcb *qcb)
560 {
561         struct ida_qcb *qcb_done = NULL;
562         bus_addr_t completed;
563         int delay;
564
565         if (!dumping)
566                 mtx_assert(&ida->lock, MA_OWNED);
567         if (ida->flags & IDA_INTERRUPTS) {
568                 if (mtx_sleep(qcb, &ida->lock, PRIBIO, "idacmd", 5 * hz)) {
569                         qcb->state = QCB_TIMEDOUT;
570                         return (ETIMEDOUT);
571                 }
572                 return (0);
573         }
574
575 again:
576         delay = 5 * 1000 * 100;                 /* 5 sec delay */
577         while ((completed = ida->cmd.done(ida)) == 0) {
578                 if (delay-- == 0) {
579                         qcb->state = QCB_TIMEDOUT;
580                         return (ETIMEDOUT);
581                 }
582                 DELAY(10);
583         }
584
585         qcb_done = idahwqcbptov(ida, completed & ~3);
586         if (qcb_done != qcb)
587                 goto again;
588         ida_done(ida, qcb);
589         return (0);
590 }
591
592 void
593 ida_intr(void *data)
594 {
595         struct ida_softc *ida;
596         struct ida_qcb *qcb;
597         bus_addr_t completed;
598
599         ida = (struct ida_softc *)data;
600
601         mtx_lock(&ida->lock);
602         if (ida->cmd.int_pending(ida) == 0) {
603                 mtx_unlock(&ida->lock);
604                 return;                         /* not our interrupt */
605         }
606
607         while ((completed = ida->cmd.done(ida)) != 0) {
608                 qcb = idahwqcbptov(ida, completed & ~3);
609
610                 if (qcb == NULL || qcb->state != QCB_ACTIVE) {
611                         device_printf(ida->dev,
612                             "ignoring completion %jx\n", (intmax_t)completed);
613                         continue;
614                 }
615                 /* Handle "Bad Command List" errors. */
616                 if ((completed & 3) && (qcb->hwqcb->req.error == 0))
617                         qcb->hwqcb->req.error = CMD_REJECTED;
618                 ida_done(ida, qcb);
619         }
620         ida_startio(ida);
621         mtx_unlock(&ida->lock);
622 }
623
624 /*
625  * should switch out command type; may be status, not just I/O.
626  */
627 static void
628 ida_done(struct ida_softc *ida, struct ida_qcb *qcb)
629 {
630         bus_dmasync_op_t op;
631         int active, error = 0;
632
633         /*
634          * finish up command
635          */
636         if (!dumping)
637                 mtx_assert(&ida->lock, MA_OWNED);
638         active = (qcb->state != QCB_FREE);
639         if (qcb->flags & DMA_DATA_TRANSFER && active) {
640                 switch (qcb->flags & DMA_DATA_TRANSFER) {
641                 case DMA_DATA_TRANSFER:
642                         op = BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE;
643                         break;
644                 case DMA_DATA_IN:
645                         op = BUS_DMASYNC_POSTREAD;
646                         break;
647                 default:
648                         KASSERT((qcb->flags & DMA_DATA_TRANSFER) ==
649                             DMA_DATA_OUT, ("bad DMA data flags"));
650                         op = BUS_DMASYNC_POSTWRITE;
651                         break;
652                 }
653                 bus_dmamap_sync(ida->buffer_dmat, qcb->dmamap, op);
654                 bus_dmamap_unload(ida->buffer_dmat, qcb->dmamap);
655         }
656         if (active)
657                 bus_dmamap_sync(ida->hwqcb_dmat, ida->hwqcb_dmamap,
658                     BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
659
660         if (qcb->hwqcb->req.error & SOFT_ERROR) {
661                 if (qcb->buf)
662                         device_printf(ida->dev, "soft %s error\n",
663                                 qcb->buf->bio_cmd == BIO_READ ?
664                                         "read" : "write");
665                 else
666                         device_printf(ida->dev, "soft error\n");
667         }
668         if (qcb->hwqcb->req.error & HARD_ERROR) {
669                 error = 1;
670                 if (qcb->buf)
671                         device_printf(ida->dev, "hard %s error\n",
672                                 qcb->buf->bio_cmd == BIO_READ ?
673                                         "read" : "write");
674                 else
675                         device_printf(ida->dev, "hard error\n");
676         }
677         if (qcb->hwqcb->req.error & CMD_REJECTED) {
678                 error = 1;
679                 device_printf(ida->dev, "invalid request\n");
680         }
681         if (qcb->error) {
682                 error = 1;
683                 device_printf(ida->dev, "request failed to map: %d\n", qcb->error);
684         }
685
686         if (qcb->flags & IDA_COMMAND) {
687                 if (ida->flags & IDA_INTERRUPTS)
688                         wakeup(qcb);
689                 if (qcb->state == QCB_TIMEDOUT)
690                         ida_free_qcb(ida, qcb);
691         } else {
692                 KASSERT(qcb->buf != NULL, ("ida_done(): qcb->buf is NULL!"));
693                 if (error)
694                         qcb->buf->bio_flags |= BIO_ERROR;
695                 idad_intr(qcb->buf);
696                 ida_free_qcb(ida, qcb);
697         }
698
699         if (!active)
700                 return;
701
702         ida->qactive--;
703         /* Reschedule or cancel timeout */
704         if (ida->qactive)
705                 callout_reset(&ida->ch, hz * 5, ida_timeout, ida);
706         else
707                 callout_stop(&ida->ch);
708 }
709
710 static void
711 ida_timeout(void *arg)
712 {
713         struct ida_softc *ida;
714
715         ida = (struct ida_softc *)arg;
716         device_printf(ida->dev, "%s() qactive %d\n", __func__, ida->qactive);
717
718         if (ida->flags & IDA_INTERRUPTS)
719                 device_printf(ida->dev, "IDA_INTERRUPTS\n");
720
721         device_printf(ida->dev, "\t   R_CMD_FIFO: %08x\n"
722                                 "\t  R_DONE_FIFO: %08x\n"
723                                 "\t   R_INT_MASK: %08x\n"
724                                 "\t     R_STATUS: %08x\n"
725                                 "\tR_INT_PENDING: %08x\n",
726                                         ida_inl(ida, R_CMD_FIFO),
727                                         ida_inl(ida, R_DONE_FIFO),
728                                         ida_inl(ida, R_INT_MASK),
729                                         ida_inl(ida, R_STATUS),
730                                         ida_inl(ida, R_INT_PENDING));
731
732         return;
733 }
734
735 /*
736  * IOCTL stuff follows.
737  */
738 struct cmd_info {
739         int     cmd;
740         int     len;
741         int     flags;
742 };
743 static struct cmd_info *ida_cmd_lookup(int);
744
745 static int
746 ida_ioctl (struct cdev *dev, u_long cmd, caddr_t addr, int32_t flag, struct thread *td)
747 {
748         struct ida_softc *sc;
749         struct ida_user_command *uc;
750         struct cmd_info *ci;
751         int len;
752         int flags;
753         int error;
754         int data;
755         void *daddr;
756
757         sc = (struct ida_softc *)dev->si_drv1;
758         uc = (struct ida_user_command *)addr;
759         error = 0;
760
761         switch (cmd) {
762         case IDAIO_COMMAND:
763                 ci = ida_cmd_lookup(uc->command);
764                 if (ci == NULL) {
765                         error = EINVAL;
766                         break;
767                 }
768                 len = ci->len;
769                 flags = ci->flags;
770                 if (len)
771                         daddr = &uc->d.buf;
772                 else {
773                         daddr = &data;
774                         len = sizeof(data);
775                 }
776                 mtx_lock(&sc->lock);
777                 error = ida_command(sc, uc->command, daddr, len,
778                                     uc->drive, uc->blkno, flags);
779                 mtx_unlock(&sc->lock);
780                 break;
781         default:
782                 error = ENOIOCTL;
783                 break;
784         }
785         return (error);
786 }
787
788 static struct cmd_info ci_list[] = {
789         { CMD_GET_LOG_DRV_INFO,
790                         sizeof(struct ida_drive_info), DMA_DATA_IN },
791         { CMD_GET_CTRL_INFO,
792                         sizeof(struct ida_controller_info), DMA_DATA_IN },
793         { CMD_SENSE_DRV_STATUS,
794                         sizeof(struct ida_drive_status), DMA_DATA_IN },
795         { CMD_START_RECOVERY,           0, 0 },
796         { CMD_GET_PHYS_DRV_INFO,
797                         sizeof(struct ida_phys_drv_info), DMA_DATA_TRANSFER },
798         { CMD_BLINK_DRV_LEDS,
799                         sizeof(struct ida_blink_drv_leds), DMA_DATA_OUT },
800         { CMD_SENSE_DRV_LEDS,
801                         sizeof(struct ida_blink_drv_leds), DMA_DATA_IN },
802         { CMD_GET_LOG_DRV_EXT,
803                         sizeof(struct ida_drive_info_ext), DMA_DATA_IN },
804         { CMD_RESET_CTRL,               0, 0 },
805         { CMD_GET_CONFIG,               0, 0 },
806         { CMD_SET_CONFIG,               0, 0 },
807         { CMD_LABEL_LOG_DRV,
808                         sizeof(struct ida_label_logical), DMA_DATA_OUT },
809         { CMD_SET_SURFACE_DELAY,        0, 0 },
810         { CMD_SENSE_BUS_PARAMS,         0, 0 },
811         { CMD_SENSE_SUBSYS_INFO,        0, 0 },
812         { CMD_SENSE_SURFACE_ATS,        0, 0 },
813         { CMD_PASSTHROUGH,              0, 0 },
814         { CMD_RESET_SCSI_DEV,           0, 0 },
815         { CMD_PAUSE_BG_ACT,             0, 0 },
816         { CMD_RESUME_BG_ACT,            0, 0 },
817         { CMD_START_FIRMWARE,           0, 0 },
818         { CMD_SENSE_DRV_ERR_LOG,        0, 0 },
819         { CMD_START_CPM,                0, 0 },
820         { CMD_SENSE_CP,                 0, 0 },
821         { CMD_STOP_CPM,                 0, 0 },
822         { CMD_FLUSH_CACHE,              0, 0 },
823         { CMD_ACCEPT_MEDIA_EXCH,        0, 0 },
824         { 0, 0, 0 }
825 };
826
827 static struct cmd_info *
828 ida_cmd_lookup (int command)
829 {
830         struct cmd_info *ci;
831
832         ci = ci_list;
833         while (ci->cmd) {
834                 if (ci->cmd == command)
835                         return (ci);
836                 ci++;
837         }
838         return (NULL);
839 }