]> CyberLeo.Net >> Repos - FreeBSD/releng/10.2.git/blob - sys/dev/twe/twe_freebsd.c
- Copy stable/10@285827 to releng/10.2 in preparation for 10.2-RC1
[FreeBSD/releng/10.2.git] / sys / dev / twe / twe_freebsd.c
1 /*-
2  * Copyright (c) 2000 Michael Smith
3  * Copyright (c) 2003 Paul Saab
4  * Copyright (c) 2003 Vinod Kashyap
5  * Copyright (c) 2000 BSDi
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  */
29
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
32
33 /*
34  * FreeBSD-specific code.
35  */
36
37 #include <dev/twe/twe_compat.h>
38 #include <dev/twe/twereg.h>
39 #include <dev/twe/tweio.h>
40 #include <dev/twe/twevar.h>
41 #include <dev/twe/twe_tables.h>
42
43 #include <vm/vm.h>
44
45 static devclass_t       twe_devclass;
46
47 #ifdef TWE_DEBUG
48 static u_int32_t        twed_bio_in;
49 #define TWED_BIO_IN     twed_bio_in++
50 static u_int32_t        twed_bio_out;
51 #define TWED_BIO_OUT    twed_bio_out++
52 #else
53 #define TWED_BIO_IN
54 #define TWED_BIO_OUT
55 #endif
56
57 static void     twe_setup_data_dmamap(void *arg, bus_dma_segment_t *segs, int nsegments, int error);
58 static void     twe_setup_request_dmamap(void *arg, bus_dma_segment_t *segs, int nsegments, int error);
59
60 /********************************************************************************
61  ********************************************************************************
62                                                          Control device interface
63  ********************************************************************************
64  ********************************************************************************/
65
66 static  d_open_t                twe_open;
67 static  d_close_t               twe_close;
68 static  d_ioctl_t               twe_ioctl_wrapper;
69
70 static struct cdevsw twe_cdevsw = {
71         .d_version =    D_VERSION,
72         .d_open =       twe_open,
73         .d_close =      twe_close,
74         .d_ioctl =      twe_ioctl_wrapper,
75         .d_name =       "twe",
76 };
77
78 /********************************************************************************
79  * Accept an open operation on the control device.
80  */
81 static int
82 twe_open(struct cdev *dev, int flags, int fmt, struct thread *td)
83 {
84     struct twe_softc            *sc = (struct twe_softc *)dev->si_drv1;
85
86     TWE_IO_LOCK(sc);
87     if (sc->twe_state & TWE_STATE_DETACHING) {
88         TWE_IO_UNLOCK(sc);
89         return (ENXIO);
90     }
91     sc->twe_state |= TWE_STATE_OPEN;
92     TWE_IO_UNLOCK(sc);
93     return(0);
94 }
95
96 /********************************************************************************
97  * Accept the last close on the control device.
98  */
99 static int
100 twe_close(struct cdev *dev, int flags, int fmt, struct thread *td)
101 {
102     struct twe_softc            *sc = (struct twe_softc *)dev->si_drv1;
103
104     TWE_IO_LOCK(sc);
105     sc->twe_state &= ~TWE_STATE_OPEN;
106     TWE_IO_UNLOCK(sc);
107     return (0);
108 }
109
110 /********************************************************************************
111  * Handle controller-specific control operations.
112  */
113 static int
114 twe_ioctl_wrapper(struct cdev *dev, u_long cmd, caddr_t addr, int32_t flag, struct thread *td)
115 {
116     struct twe_softc            *sc = (struct twe_softc *)dev->si_drv1;
117     
118     return(twe_ioctl(sc, cmd, addr));
119 }
120
121 /********************************************************************************
122  ********************************************************************************
123                                                              PCI device interface
124  ********************************************************************************
125  ********************************************************************************/
126
127 static int      twe_probe(device_t dev);
128 static int      twe_attach(device_t dev);
129 static void     twe_free(struct twe_softc *sc);
130 static int      twe_detach(device_t dev);
131 static int      twe_shutdown(device_t dev);
132 static int      twe_suspend(device_t dev);
133 static int      twe_resume(device_t dev);
134 static void     twe_pci_intr(void *arg);
135 static void     twe_intrhook(void *arg);
136
137 static device_method_t twe_methods[] = {
138     /* Device interface */
139     DEVMETHOD(device_probe,     twe_probe),
140     DEVMETHOD(device_attach,    twe_attach),
141     DEVMETHOD(device_detach,    twe_detach),
142     DEVMETHOD(device_shutdown,  twe_shutdown),
143     DEVMETHOD(device_suspend,   twe_suspend),
144     DEVMETHOD(device_resume,    twe_resume),
145
146     DEVMETHOD_END
147 };
148
149 static driver_t twe_pci_driver = {
150         "twe",
151         twe_methods,
152         sizeof(struct twe_softc)
153 };
154
155 DRIVER_MODULE(twe, pci, twe_pci_driver, twe_devclass, 0, 0);
156
157 /********************************************************************************
158  * Match a 3ware Escalade ATA RAID controller.
159  */
160 static int
161 twe_probe(device_t dev)
162 {
163
164     debug_called(4);
165
166     if ((pci_get_vendor(dev) == TWE_VENDOR_ID) &&
167         ((pci_get_device(dev) == TWE_DEVICE_ID) || 
168          (pci_get_device(dev) == TWE_DEVICE_ID_ASIC))) {
169         device_set_desc_copy(dev, TWE_DEVICE_NAME ". Driver version " TWE_DRIVER_VERSION_STRING);
170         return(BUS_PROBE_DEFAULT);
171     }
172     return(ENXIO);
173 }
174
175 /********************************************************************************
176  * Allocate resources, initialise the controller.
177  */
178 static int
179 twe_attach(device_t dev)
180 {
181     struct twe_softc    *sc;
182     struct sysctl_oid   *sysctl_tree;
183     int                 rid, error;
184
185     debug_called(4);
186
187     /*
188      * Initialise the softc structure.
189      */
190     sc = device_get_softc(dev);
191     sc->twe_dev = dev;
192     mtx_init(&sc->twe_io_lock, "twe I/O", NULL, MTX_DEF);
193     sx_init(&sc->twe_config_lock, "twe config");
194
195     /*
196      * XXX: This sysctl tree must stay at hw.tweX rather than using
197      * the device_get_sysctl_tree() created by new-bus because
198      * existing 3rd party binary tools such as tw_cli and 3dm2 use the
199      * existence of this sysctl node to discover controllers.
200      */
201     sysctl_tree = SYSCTL_ADD_NODE(device_get_sysctl_ctx(dev),
202         SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO,
203         device_get_nameunit(dev), CTLFLAG_RD, 0, "");
204     if (sysctl_tree == NULL) {
205         twe_printf(sc, "cannot add sysctl tree node\n");
206         return (ENXIO);
207     }
208     SYSCTL_ADD_STRING(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(sysctl_tree),
209         OID_AUTO, "driver_version", CTLFLAG_RD, TWE_DRIVER_VERSION_STRING, 0,
210         "TWE driver version");
211
212     /*
213      * Force the busmaster enable bit on, in case the BIOS forgot.
214      */
215     pci_enable_busmaster(dev);
216
217     /*
218      * Allocate the PCI register window.
219      */
220     rid = TWE_IO_CONFIG_REG;
221     if ((sc->twe_io = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid, 
222         RF_ACTIVE)) == NULL) {
223         twe_printf(sc, "can't allocate register window\n");
224         twe_free(sc);
225         return(ENXIO);
226     }
227
228     /*
229      * Allocate the parent bus DMA tag appropriate for PCI.
230      */
231     if (bus_dma_tag_create(bus_get_dma_tag(dev),                /* PCI parent */
232                            1, 0,                                /* alignment, boundary */
233                            BUS_SPACE_MAXADDR_32BIT,             /* lowaddr */
234                            BUS_SPACE_MAXADDR,                   /* highaddr */
235                            NULL, NULL,                          /* filter, filterarg */
236                            BUS_SPACE_MAXSIZE_32BIT,             /* maxsize */
237                            BUS_SPACE_UNRESTRICTED,              /* nsegments */
238                            BUS_SPACE_MAXSIZE_32BIT,             /* maxsegsize */
239                            0,                                   /* flags */
240                            NULL,                                /* lockfunc */
241                            NULL,                                /* lockarg */
242                            &sc->twe_parent_dmat)) {
243         twe_printf(sc, "can't allocate parent DMA tag\n");
244         twe_free(sc);
245         return(ENOMEM);
246     }
247
248     /* 
249      * Allocate and connect our interrupt.
250      */
251     rid = 0;
252     if ((sc->twe_irq = bus_alloc_resource_any(sc->twe_dev, SYS_RES_IRQ,
253         &rid, RF_SHAREABLE | RF_ACTIVE)) == NULL) {
254         twe_printf(sc, "can't allocate interrupt\n");
255         twe_free(sc);
256         return(ENXIO);
257     }
258     if (bus_setup_intr(sc->twe_dev, sc->twe_irq, INTR_TYPE_BIO | INTR_ENTROPY | INTR_MPSAFE,  
259                        NULL, twe_pci_intr, sc, &sc->twe_intr)) {
260         twe_printf(sc, "can't set up interrupt\n");
261         twe_free(sc);
262         return(ENXIO);
263     }
264
265     /*
266      * Create DMA tag for mapping command's into controller-addressable space.
267      */
268     if (bus_dma_tag_create(sc->twe_parent_dmat,         /* parent */
269                            1, 0,                        /* alignment, boundary */
270                            BUS_SPACE_MAXADDR_32BIT,     /* lowaddr */
271                            BUS_SPACE_MAXADDR,           /* highaddr */
272                            NULL, NULL,                  /* filter, filterarg */
273                            sizeof(TWE_Command) *
274                            TWE_Q_LENGTH, 1,             /* maxsize, nsegments */
275                            BUS_SPACE_MAXSIZE_32BIT,     /* maxsegsize */
276                            0,                           /* flags */
277                            NULL,                        /* lockfunc */
278                            NULL,                        /* lockarg */
279                            &sc->twe_cmd_dmat)) {
280         twe_printf(sc, "can't allocate data buffer DMA tag\n");
281         twe_free(sc);
282         return(ENOMEM);
283     }
284     /*
285      * Allocate memory and make it available for DMA.
286      */
287     if (bus_dmamem_alloc(sc->twe_cmd_dmat, (void **)&sc->twe_cmd,
288                          BUS_DMA_NOWAIT, &sc->twe_cmdmap)) {
289         twe_printf(sc, "can't allocate command memory\n");
290         return(ENOMEM);
291     }
292     bus_dmamap_load(sc->twe_cmd_dmat, sc->twe_cmdmap, sc->twe_cmd,
293                     sizeof(TWE_Command) * TWE_Q_LENGTH,
294                     twe_setup_request_dmamap, sc, 0);
295     bzero(sc->twe_cmd, sizeof(TWE_Command) * TWE_Q_LENGTH);
296
297     /*
298      * Create DMA tag for mapping objects into controller-addressable space.
299      */
300     if (bus_dma_tag_create(sc->twe_parent_dmat,         /* parent */
301                            1, 0,                        /* alignment, boundary */
302                            BUS_SPACE_MAXADDR_32BIT,     /* lowaddr */
303                            BUS_SPACE_MAXADDR,           /* highaddr */
304                            NULL, NULL,                  /* filter, filterarg */
305                            (TWE_MAX_SGL_LENGTH - 1) * PAGE_SIZE,/* maxsize */
306                            TWE_MAX_SGL_LENGTH,          /* nsegments */
307                            BUS_SPACE_MAXSIZE_32BIT,     /* maxsegsize */
308                            BUS_DMA_ALLOCNOW,            /* flags */
309                            busdma_lock_mutex,           /* lockfunc */
310                            &sc->twe_io_lock,            /* lockarg */
311                            &sc->twe_buffer_dmat)) {
312         twe_printf(sc, "can't allocate data buffer DMA tag\n");
313         twe_free(sc);
314         return(ENOMEM);
315     }
316
317     /*
318      * Create DMA tag for mapping objects into controller-addressable space.
319      */
320     if (bus_dma_tag_create(sc->twe_parent_dmat,         /* parent */
321                            1, 0,                        /* alignment, boundary */
322                            BUS_SPACE_MAXADDR_32BIT,     /* lowaddr */
323                            BUS_SPACE_MAXADDR,           /* highaddr */
324                            NULL, NULL,                  /* filter, filterarg */
325                            DFLTPHYS, 1,                 /* maxsize, nsegments */
326                            BUS_SPACE_MAXSIZE_32BIT,     /* maxsegsize */
327                            0,                           /* flags */
328                            NULL,                        /* lockfunc */
329                            NULL,                        /* lockarg */
330                            &sc->twe_immediate_dmat)) {
331         twe_printf(sc, "can't allocate data buffer DMA tag\n");
332         twe_free(sc);
333         return(ENOMEM);
334     }
335     /*
336      * Allocate memory for requests which cannot sleep or support continuation.
337      */
338      if (bus_dmamem_alloc(sc->twe_immediate_dmat, (void **)&sc->twe_immediate,
339                           BUS_DMA_NOWAIT, &sc->twe_immediate_map)) {
340         twe_printf(sc, "can't allocate memory for immediate requests\n");
341         return(ENOMEM);
342      }
343
344     /*
345      * Initialise the controller and driver core.
346      */
347     if ((error = twe_setup(sc))) {
348         twe_free(sc);
349         return(error);
350     }
351
352     /*
353      * Print some information about the controller and configuration.
354      */
355     twe_describe_controller(sc);
356
357     /*
358      * Create the control device.
359      */
360     sc->twe_dev_t = make_dev(&twe_cdevsw, device_get_unit(sc->twe_dev), UID_ROOT, GID_OPERATOR,
361                              S_IRUSR | S_IWUSR, "twe%d", device_get_unit(sc->twe_dev));
362     sc->twe_dev_t->si_drv1 = sc;
363     /*
364      * Schedule ourselves to bring the controller up once interrupts are available.
365      * This isn't strictly necessary, since we disable interrupts while probing the
366      * controller, but it is more in keeping with common practice for other disk 
367      * devices.
368      */
369     sc->twe_ich.ich_func = twe_intrhook;
370     sc->twe_ich.ich_arg = sc;
371     if (config_intrhook_establish(&sc->twe_ich) != 0) {
372         twe_printf(sc, "can't establish configuration hook\n");
373         twe_free(sc);
374         return(ENXIO);
375     }
376
377     return(0);
378 }
379
380 /********************************************************************************
381  * Free all of the resources associated with (sc).
382  *
383  * Should not be called if the controller is active.
384  */
385 static void
386 twe_free(struct twe_softc *sc)
387 {
388     struct twe_request  *tr;
389
390     debug_called(4);
391
392     /* throw away any command buffers */
393     while ((tr = twe_dequeue_free(sc)) != NULL)
394         twe_free_request(tr);
395
396     if (sc->twe_cmd != NULL) {
397         bus_dmamap_unload(sc->twe_cmd_dmat, sc->twe_cmdmap);
398         bus_dmamem_free(sc->twe_cmd_dmat, sc->twe_cmd, sc->twe_cmdmap);
399     }
400
401     if (sc->twe_immediate != NULL) {
402         bus_dmamap_unload(sc->twe_immediate_dmat, sc->twe_immediate_map);
403         bus_dmamem_free(sc->twe_immediate_dmat, sc->twe_immediate,
404                         sc->twe_immediate_map);
405     }
406
407     if (sc->twe_immediate_dmat)
408         bus_dma_tag_destroy(sc->twe_immediate_dmat);
409
410     /* destroy the data-transfer DMA tag */
411     if (sc->twe_buffer_dmat)
412         bus_dma_tag_destroy(sc->twe_buffer_dmat);
413
414     /* disconnect the interrupt handler */
415     if (sc->twe_intr)
416         bus_teardown_intr(sc->twe_dev, sc->twe_irq, sc->twe_intr);
417     if (sc->twe_irq != NULL)
418         bus_release_resource(sc->twe_dev, SYS_RES_IRQ, 0, sc->twe_irq);
419
420     /* destroy the parent DMA tag */
421     if (sc->twe_parent_dmat)
422         bus_dma_tag_destroy(sc->twe_parent_dmat);
423
424     /* release the register window mapping */
425     if (sc->twe_io != NULL)
426         bus_release_resource(sc->twe_dev, SYS_RES_IOPORT, TWE_IO_CONFIG_REG, sc->twe_io);
427
428     /* destroy control device */
429     if (sc->twe_dev_t != (struct cdev *)NULL)
430         destroy_dev(sc->twe_dev_t);
431
432     sx_destroy(&sc->twe_config_lock);
433     mtx_destroy(&sc->twe_io_lock);
434 }
435
436 /********************************************************************************
437  * Disconnect from the controller completely, in preparation for unload.
438  */
439 static int
440 twe_detach(device_t dev)
441 {
442     struct twe_softc    *sc = device_get_softc(dev);
443
444     debug_called(4);
445
446     TWE_IO_LOCK(sc);
447     if (sc->twe_state & TWE_STATE_OPEN) {
448         TWE_IO_UNLOCK(sc);
449         return (EBUSY);
450     }
451     sc->twe_state |= TWE_STATE_DETACHING;
452     TWE_IO_UNLOCK(sc);
453
454     /*  
455      * Shut the controller down.
456      */
457     if (twe_shutdown(dev)) {
458         TWE_IO_LOCK(sc);
459         sc->twe_state &= ~TWE_STATE_DETACHING;
460         TWE_IO_UNLOCK(sc);
461         return (EBUSY);
462     }
463
464     twe_free(sc);
465
466     return(0);
467 }
468
469 /********************************************************************************
470  * Bring the controller down to a dormant state and detach all child devices.
471  *
472  * Note that we can assume that the bioq on the controller is empty, as we won't
473  * allow shutdown if any device is open.
474  */
475 static int
476 twe_shutdown(device_t dev)
477 {
478     struct twe_softc    *sc = device_get_softc(dev);
479     int                 i, error = 0;
480
481     debug_called(4);
482
483     /* 
484      * Delete all our child devices.
485      */
486     TWE_CONFIG_LOCK(sc);
487     for (i = 0; i < TWE_MAX_UNITS; i++) {
488         if (sc->twe_drive[i].td_disk != 0) {
489             if ((error = twe_detach_drive(sc, i)) != 0) {
490                 TWE_CONFIG_UNLOCK(sc);
491                 return (error);
492             }
493         }
494     }
495     TWE_CONFIG_UNLOCK(sc);
496
497     /*
498      * Bring the controller down.
499      */
500     TWE_IO_LOCK(sc);
501     twe_deinit(sc);
502     TWE_IO_UNLOCK(sc);
503
504     return(0);
505 }
506
507 /********************************************************************************
508  * Bring the controller to a quiescent state, ready for system suspend.
509  */
510 static int
511 twe_suspend(device_t dev)
512 {
513     struct twe_softc    *sc = device_get_softc(dev);
514
515     debug_called(4);
516
517     TWE_IO_LOCK(sc);
518     sc->twe_state |= TWE_STATE_SUSPEND;
519     
520     twe_disable_interrupts(sc);
521     TWE_IO_UNLOCK(sc);
522
523     return(0);
524 }
525
526 /********************************************************************************
527  * Bring the controller back to a state ready for operation.
528  */
529 static int
530 twe_resume(device_t dev)
531 {
532     struct twe_softc    *sc = device_get_softc(dev);
533
534     debug_called(4);
535
536     TWE_IO_LOCK(sc);
537     sc->twe_state &= ~TWE_STATE_SUSPEND;
538     twe_enable_interrupts(sc);
539     TWE_IO_UNLOCK(sc);
540
541     return(0);
542 }
543
544 /*******************************************************************************
545  * Take an interrupt, or be poked by other code to look for interrupt-worthy
546  * status.
547  */
548 static void
549 twe_pci_intr(void *arg)
550 {
551     struct twe_softc *sc = arg;
552
553     TWE_IO_LOCK(sc);
554     twe_intr(sc);
555     TWE_IO_UNLOCK(sc);
556 }
557
558 /********************************************************************************
559  * Delayed-startup hook
560  */
561 static void
562 twe_intrhook(void *arg)
563 {
564     struct twe_softc            *sc = (struct twe_softc *)arg;
565
566     /* pull ourselves off the intrhook chain */
567     config_intrhook_disestablish(&sc->twe_ich);
568
569     /* call core startup routine */
570     twe_init(sc);
571 }
572
573 /********************************************************************************
574  * Given a detected drive, attach it to the bio interface.
575  *
576  * This is called from twe_add_unit.
577  */
578 int
579 twe_attach_drive(struct twe_softc *sc, struct twe_drive *dr)
580 {
581     char        buf[80];
582     int         error;
583
584     mtx_lock(&Giant);
585     dr->td_disk =  device_add_child(sc->twe_dev, NULL, -1);
586     if (dr->td_disk == NULL) {
587         mtx_unlock(&Giant);
588         twe_printf(sc, "Cannot add unit\n");
589         return (EIO);
590     }
591     device_set_ivars(dr->td_disk, dr);
592
593     /* 
594      * XXX It would make sense to test the online/initialising bits, but they seem to be
595      * always set...
596      */
597     sprintf(buf, "Unit %d, %s, %s",
598             dr->td_twe_unit,
599             twe_describe_code(twe_table_unittype, dr->td_type),
600             twe_describe_code(twe_table_unitstate, dr->td_state & TWE_PARAM_UNITSTATUS_MASK));
601     device_set_desc_copy(dr->td_disk, buf);
602
603     error = device_probe_and_attach(dr->td_disk);
604     mtx_unlock(&Giant);
605     if (error != 0) {
606         twe_printf(sc, "Cannot attach unit to controller. error = %d\n", error);
607         return (EIO);
608     }
609     return (0);
610 }
611
612 /********************************************************************************
613  * Detach the specified unit if it exsists
614  *
615  * This is called from twe_del_unit.
616  */
617 int
618 twe_detach_drive(struct twe_softc *sc, int unit)
619 {
620     int error = 0;
621
622     TWE_CONFIG_ASSERT_LOCKED(sc);
623     mtx_lock(&Giant);
624     error = device_delete_child(sc->twe_dev, sc->twe_drive[unit].td_disk);
625     mtx_unlock(&Giant);
626     if (error != 0) {
627         twe_printf(sc, "failed to delete unit %d\n", unit);
628         return(error);
629     }
630     bzero(&sc->twe_drive[unit], sizeof(sc->twe_drive[unit]));
631     return(error);
632 }
633
634 /********************************************************************************
635  * Clear a PCI parity error.
636  */
637 void
638 twe_clear_pci_parity_error(struct twe_softc *sc)
639 {
640     TWE_CONTROL(sc, TWE_CONTROL_CLEAR_PARITY_ERROR);
641     pci_write_config(sc->twe_dev, PCIR_STATUS, TWE_PCI_CLEAR_PARITY_ERROR, 2);
642 }
643
644 /********************************************************************************
645  * Clear a PCI abort.
646  */
647 void
648 twe_clear_pci_abort(struct twe_softc *sc)
649 {
650     TWE_CONTROL(sc, TWE_CONTROL_CLEAR_PCI_ABORT);
651     pci_write_config(sc->twe_dev, PCIR_STATUS, TWE_PCI_CLEAR_PCI_ABORT, 2);
652 }
653
654 /********************************************************************************
655  ********************************************************************************
656                                                                       Disk device
657  ********************************************************************************
658  ********************************************************************************/
659
660 /*
661  * Disk device softc
662  */
663 struct twed_softc
664 {
665     device_t            twed_dev;
666     struct twe_softc    *twed_controller;       /* parent device softc */
667     struct twe_drive    *twed_drive;            /* drive data in parent softc */
668     struct disk         *twed_disk;             /* generic disk handle */
669 };
670
671 /*
672  * Disk device bus interface
673  */
674 static int twed_probe(device_t dev);
675 static int twed_attach(device_t dev);
676 static int twed_detach(device_t dev);
677
678 static device_method_t twed_methods[] = {
679     DEVMETHOD(device_probe,     twed_probe),
680     DEVMETHOD(device_attach,    twed_attach),
681     DEVMETHOD(device_detach,    twed_detach),
682     { 0, 0 }
683 };
684
685 static driver_t twed_driver = {
686     "twed",
687     twed_methods,
688     sizeof(struct twed_softc)
689 };
690
691 static devclass_t       twed_devclass;
692 DRIVER_MODULE(twed, twe, twed_driver, twed_devclass, 0, 0);
693
694 /*
695  * Disk device control interface.
696  */
697
698 /********************************************************************************
699  * Handle open from generic layer.
700  *
701  * Note that this is typically only called by the diskslice code, and not
702  * for opens on subdevices (eg. slices, partitions).
703  */
704 static int
705 twed_open(struct disk *dp)
706 {
707     struct twed_softc   *sc = (struct twed_softc *)dp->d_drv1;
708
709     debug_called(4);
710         
711     if (sc == NULL)
712         return (ENXIO);
713
714     /* check that the controller is up and running */
715     if (sc->twed_controller->twe_state & TWE_STATE_SHUTDOWN)
716         return(ENXIO);
717
718     return (0);
719 }
720
721 /********************************************************************************
722  * Handle an I/O request.
723  */
724 static void
725 twed_strategy(struct bio *bp)
726 {
727     struct twed_softc   *sc = bp->bio_disk->d_drv1;
728
729     debug_called(4);
730
731     bp->bio_driver1 = &sc->twed_drive->td_twe_unit;
732     TWED_BIO_IN;
733
734     /* bogus disk? */
735     if (sc == NULL || sc->twed_drive->td_disk == NULL) {
736         bp->bio_error = EINVAL;
737         bp->bio_flags |= BIO_ERROR;
738         printf("twe: bio for invalid disk!\n");
739         biodone(bp);
740         TWED_BIO_OUT;
741         return;
742     }
743
744     /* queue the bio on the controller */
745     TWE_IO_LOCK(sc->twed_controller);
746     twe_enqueue_bio(sc->twed_controller, bp);
747
748     /* poke the controller to start I/O */
749     twe_startio(sc->twed_controller);
750     TWE_IO_UNLOCK(sc->twed_controller);
751     return;
752 }
753
754 /********************************************************************************
755  * System crashdump support
756  */
757 static int
758 twed_dump(void *arg, void *virtual, vm_offset_t physical, off_t offset, size_t length)
759 {
760     struct twed_softc   *twed_sc;
761     struct twe_softc    *twe_sc;
762     int                 error;
763     struct disk         *dp;
764
765     dp = arg;
766     twed_sc = (struct twed_softc *)dp->d_drv1;
767     if (twed_sc == NULL)
768         return(ENXIO);
769     twe_sc  = (struct twe_softc *)twed_sc->twed_controller;
770
771     if (length > 0) {
772         if ((error = twe_dump_blocks(twe_sc, twed_sc->twed_drive->td_twe_unit, offset / TWE_BLOCK_SIZE, virtual, length / TWE_BLOCK_SIZE)) != 0)
773             return(error);
774     }
775     return(0);
776 }
777
778 /********************************************************************************
779  * Handle completion of an I/O request.
780  */
781 void
782 twed_intr(struct bio *bp)
783 {
784     debug_called(4);
785
786     /* if no error, transfer completed */
787     if (!(bp->bio_flags & BIO_ERROR))
788         bp->bio_resid = 0;
789
790     biodone(bp);
791     TWED_BIO_OUT;
792 }
793
794 /********************************************************************************
795  * Default probe stub.
796  */
797 static int
798 twed_probe(device_t dev)
799 {
800     return (0);
801 }
802
803 /********************************************************************************
804  * Attach a unit to the controller.
805  */
806 static int
807 twed_attach(device_t dev)
808 {
809     struct twed_softc   *sc;
810     device_t            parent;
811     
812     debug_called(4);
813
814     /* initialise our softc */
815     sc = device_get_softc(dev);
816     parent = device_get_parent(dev);
817     sc->twed_controller = (struct twe_softc *)device_get_softc(parent);
818     sc->twed_drive = device_get_ivars(dev);
819     sc->twed_dev = dev;
820
821     /* report the drive */
822     twed_printf(sc, "%uMB (%u sectors)\n",
823                 sc->twed_drive->td_size / ((1024 * 1024) / TWE_BLOCK_SIZE),
824                 sc->twed_drive->td_size);
825     
826     /* attach a generic disk device to ourselves */
827
828     sc->twed_drive->td_sys_unit = device_get_unit(dev);
829
830     sc->twed_disk = disk_alloc();
831     sc->twed_disk->d_open = twed_open;
832     sc->twed_disk->d_strategy = twed_strategy;
833     sc->twed_disk->d_dump = (dumper_t *)twed_dump;
834     sc->twed_disk->d_name = "twed";
835     sc->twed_disk->d_drv1 = sc;
836     sc->twed_disk->d_maxsize = (TWE_MAX_SGL_LENGTH - 1) * PAGE_SIZE;
837     sc->twed_disk->d_sectorsize = TWE_BLOCK_SIZE;
838     sc->twed_disk->d_mediasize = TWE_BLOCK_SIZE * (off_t)sc->twed_drive->td_size;
839     if (sc->twed_drive->td_type == TWE_UD_CONFIG_RAID0 ||
840         sc->twed_drive->td_type == TWE_UD_CONFIG_RAID5 ||
841         sc->twed_drive->td_type == TWE_UD_CONFIG_RAID10) {
842             sc->twed_disk->d_stripesize =
843                 TWE_BLOCK_SIZE << sc->twed_drive->td_stripe;
844             sc->twed_disk->d_stripeoffset = 0;
845     }
846     sc->twed_disk->d_fwsectors = sc->twed_drive->td_sectors;
847     sc->twed_disk->d_fwheads = sc->twed_drive->td_heads;
848     sc->twed_disk->d_unit = sc->twed_drive->td_sys_unit;
849
850     disk_create(sc->twed_disk, DISK_VERSION);
851
852     /* set the maximum I/O size to the theoretical maximum allowed by the S/G list size */
853
854     return (0);
855 }
856
857 /********************************************************************************
858  * Disconnect ourselves from the system.
859  */
860 static int
861 twed_detach(device_t dev)
862 {
863     struct twed_softc *sc = (struct twed_softc *)device_get_softc(dev);
864
865     debug_called(4);
866
867     if (sc->twed_disk->d_flags & DISKFLAG_OPEN)
868         return(EBUSY);
869
870     disk_destroy(sc->twed_disk);
871
872     return(0);
873 }
874
875 /********************************************************************************
876  ********************************************************************************
877                                                                              Misc
878  ********************************************************************************
879  ********************************************************************************/
880
881 /********************************************************************************
882  * Allocate a command buffer
883  */
884 static MALLOC_DEFINE(TWE_MALLOC_CLASS, "twe_commands", "twe commands");
885
886 struct twe_request *
887 twe_allocate_request(struct twe_softc *sc, int tag)
888 {
889     struct twe_request  *tr;
890
891     tr = malloc(sizeof(struct twe_request), TWE_MALLOC_CLASS, M_WAITOK | M_ZERO);
892     tr->tr_sc = sc;
893     tr->tr_tag = tag;
894     if (bus_dmamap_create(sc->twe_buffer_dmat, 0, &tr->tr_dmamap)) {
895         twe_free_request(tr);
896         twe_printf(sc, "unable to allocate dmamap for tag %d\n", tag);
897         return(NULL);
898     }    
899     return(tr);
900 }
901
902 /********************************************************************************
903  * Permanently discard a command buffer.
904  */
905 void
906 twe_free_request(struct twe_request *tr) 
907 {
908     struct twe_softc    *sc = tr->tr_sc;
909     
910     debug_called(4);
911
912     bus_dmamap_destroy(sc->twe_buffer_dmat, tr->tr_dmamap);
913     free(tr, TWE_MALLOC_CLASS);
914 }
915
916 /********************************************************************************
917  * Map/unmap (tr)'s command and data in the controller's addressable space.
918  *
919  * These routines ensure that the data which the controller is going to try to
920  * access is actually visible to the controller, in a machine-independant 
921  * fashion.  Due to a hardware limitation, I/O buffers must be 512-byte aligned
922  * and we take care of that here as well.
923  */
924 static void
925 twe_fillin_sgl(TWE_SG_Entry *sgl, bus_dma_segment_t *segs, int nsegments, int max_sgl)
926 {
927     int i;
928
929     for (i = 0; i < nsegments; i++) {
930         sgl[i].address = segs[i].ds_addr;
931         sgl[i].length = segs[i].ds_len;
932     }
933     for (; i < max_sgl; i++) {                          /* XXX necessary? */
934         sgl[i].address = 0;
935         sgl[i].length = 0;
936     }
937 }
938                 
939 static void
940 twe_setup_data_dmamap(void *arg, bus_dma_segment_t *segs, int nsegments, int error)
941 {
942     struct twe_request  *tr = (struct twe_request *)arg;
943     struct twe_softc    *sc = tr->tr_sc;
944     TWE_Command         *cmd = TWE_FIND_COMMAND(tr);
945
946     debug_called(4);
947
948     if (tr->tr_flags & TWE_CMD_MAPPED)
949         panic("already mapped command");
950
951     tr->tr_flags |= TWE_CMD_MAPPED;
952
953     if (tr->tr_flags & TWE_CMD_IN_PROGRESS)
954         sc->twe_state &= ~TWE_STATE_FRZN;
955     /* save base of first segment in command (applicable if there only one segment) */
956     tr->tr_dataphys = segs[0].ds_addr;
957
958     /* correct command size for s/g list size */
959     cmd->generic.size += 2 * nsegments;
960
961     /*
962      * Due to the fact that parameter and I/O commands have the scatter/gather list in
963      * different places, we need to determine which sort of command this actually is
964      * before we can populate it correctly.
965      */
966     switch(cmd->generic.opcode) {
967     case TWE_OP_GET_PARAM:
968     case TWE_OP_SET_PARAM:
969         cmd->generic.sgl_offset = 2;
970         twe_fillin_sgl(&cmd->param.sgl[0], segs, nsegments, TWE_MAX_SGL_LENGTH);
971         break;
972     case TWE_OP_READ:
973     case TWE_OP_WRITE:
974         cmd->generic.sgl_offset = 3;
975         twe_fillin_sgl(&cmd->io.sgl[0], segs, nsegments, TWE_MAX_SGL_LENGTH);
976         break;
977     case TWE_OP_ATA_PASSTHROUGH:
978         cmd->generic.sgl_offset = 5;
979         twe_fillin_sgl(&cmd->ata.sgl[0], segs, nsegments, TWE_MAX_ATA_SGL_LENGTH);
980         break;
981     default:
982         /*
983          * Fall back to what the linux driver does.
984          * Do this because the API may send an opcode
985          * the driver knows nothing about and this will
986          * at least stop PCIABRT's from hosing us.
987          */
988         switch (cmd->generic.sgl_offset) {
989         case 2:
990             twe_fillin_sgl(&cmd->param.sgl[0], segs, nsegments, TWE_MAX_SGL_LENGTH);
991             break;
992         case 3:
993             twe_fillin_sgl(&cmd->io.sgl[0], segs, nsegments, TWE_MAX_SGL_LENGTH);
994             break;
995         case 5:
996             twe_fillin_sgl(&cmd->ata.sgl[0], segs, nsegments, TWE_MAX_ATA_SGL_LENGTH);
997             break;
998         }
999     }
1000
1001     if (tr->tr_flags & TWE_CMD_DATAIN) {
1002         if (tr->tr_flags & TWE_CMD_IMMEDIATE) {
1003             bus_dmamap_sync(sc->twe_immediate_dmat, sc->twe_immediate_map,
1004                             BUS_DMASYNC_PREREAD);
1005         } else {
1006             bus_dmamap_sync(sc->twe_buffer_dmat, tr->tr_dmamap,
1007                             BUS_DMASYNC_PREREAD);
1008         }
1009     }
1010
1011     if (tr->tr_flags & TWE_CMD_DATAOUT) {
1012         /*
1013          * if we're using an alignment buffer, and we're writing data
1014          * copy the real data out
1015          */
1016         if (tr->tr_flags & TWE_CMD_ALIGNBUF)
1017             bcopy(tr->tr_realdata, tr->tr_data, tr->tr_length);
1018
1019         if (tr->tr_flags & TWE_CMD_IMMEDIATE) {
1020             bus_dmamap_sync(sc->twe_immediate_dmat, sc->twe_immediate_map,
1021                             BUS_DMASYNC_PREWRITE);
1022         } else {
1023             bus_dmamap_sync(sc->twe_buffer_dmat, tr->tr_dmamap,
1024                             BUS_DMASYNC_PREWRITE);
1025         }
1026     }
1027
1028     if (twe_start(tr) == EBUSY) {
1029         tr->tr_sc->twe_state |= TWE_STATE_CTLR_BUSY;
1030         twe_requeue_ready(tr);
1031     }
1032 }
1033
1034 static void
1035 twe_setup_request_dmamap(void *arg, bus_dma_segment_t *segs, int nsegments, int error)
1036 {
1037     struct twe_softc    *sc = (struct twe_softc *)arg;
1038
1039     debug_called(4);
1040
1041     /* command can't cross a page boundary */
1042     sc->twe_cmdphys = segs[0].ds_addr;
1043 }
1044
1045 int
1046 twe_map_request(struct twe_request *tr)
1047 {
1048     struct twe_softc    *sc = tr->tr_sc;
1049     int                 error = 0;
1050
1051     debug_called(4);
1052
1053     if (!dumping)
1054         TWE_IO_ASSERT_LOCKED(sc);
1055     if (sc->twe_state & (TWE_STATE_CTLR_BUSY | TWE_STATE_FRZN)) {
1056         twe_requeue_ready(tr);
1057         return (EBUSY);
1058     }
1059
1060     bus_dmamap_sync(sc->twe_cmd_dmat, sc->twe_cmdmap, BUS_DMASYNC_PREWRITE);
1061
1062     /*
1063      * If the command involves data, map that too.
1064      */
1065     if (tr->tr_data != NULL && ((tr->tr_flags & TWE_CMD_MAPPED) == 0)) {
1066
1067         /* 
1068          * Data must be 64-byte aligned; allocate a fixup buffer if it's not.
1069          */
1070         if (((vm_offset_t)tr->tr_data % TWE_ALIGNMENT) != 0) {
1071             tr->tr_realdata = tr->tr_data;                              /* save pointer to 'real' data */
1072             tr->tr_flags |= TWE_CMD_ALIGNBUF;
1073             tr->tr_data = malloc(tr->tr_length, TWE_MALLOC_CLASS, M_NOWAIT);
1074             if (tr->tr_data == NULL) {
1075                 twe_printf(sc, "%s: malloc failed\n", __func__);
1076                 tr->tr_data = tr->tr_realdata; /* restore original data pointer */
1077                 return(ENOMEM);
1078             }
1079         }
1080         
1081         /*
1082          * Map the data buffer into bus space and build the s/g list.
1083          */
1084         if (tr->tr_flags & TWE_CMD_IMMEDIATE) {
1085             error = bus_dmamap_load(sc->twe_immediate_dmat, sc->twe_immediate_map, sc->twe_immediate,
1086                             tr->tr_length, twe_setup_data_dmamap, tr, BUS_DMA_NOWAIT);
1087         } else {
1088             error = bus_dmamap_load(sc->twe_buffer_dmat, tr->tr_dmamap, tr->tr_data, tr->tr_length, 
1089                                     twe_setup_data_dmamap, tr, 0);
1090         }
1091         if (error == EINPROGRESS) {
1092             tr->tr_flags |= TWE_CMD_IN_PROGRESS;
1093             sc->twe_state |= TWE_STATE_FRZN;
1094             error = 0;
1095         }
1096     } else
1097         if ((error = twe_start(tr)) == EBUSY) {
1098             sc->twe_state |= TWE_STATE_CTLR_BUSY;
1099             twe_requeue_ready(tr);
1100         }
1101
1102     return(error);
1103 }
1104
1105 void
1106 twe_unmap_request(struct twe_request *tr)
1107 {
1108     struct twe_softc    *sc = tr->tr_sc;
1109
1110     debug_called(4);
1111
1112     if (!dumping)
1113         TWE_IO_ASSERT_LOCKED(sc);
1114     bus_dmamap_sync(sc->twe_cmd_dmat, sc->twe_cmdmap, BUS_DMASYNC_POSTWRITE);
1115
1116     /*
1117      * If the command involved data, unmap that too.
1118      */
1119     if (tr->tr_data != NULL) {
1120         if (tr->tr_flags & TWE_CMD_DATAIN) {
1121             if (tr->tr_flags & TWE_CMD_IMMEDIATE) {
1122                 bus_dmamap_sync(sc->twe_immediate_dmat, sc->twe_immediate_map,
1123                                 BUS_DMASYNC_POSTREAD);
1124             } else {
1125                 bus_dmamap_sync(sc->twe_buffer_dmat, tr->tr_dmamap,
1126                                 BUS_DMASYNC_POSTREAD);
1127             }
1128
1129             /* if we're using an alignment buffer, and we're reading data, copy the real data in */
1130             if (tr->tr_flags & TWE_CMD_ALIGNBUF)
1131                 bcopy(tr->tr_data, tr->tr_realdata, tr->tr_length);
1132         }
1133         if (tr->tr_flags & TWE_CMD_DATAOUT) {
1134             if (tr->tr_flags & TWE_CMD_IMMEDIATE) {
1135                 bus_dmamap_sync(sc->twe_immediate_dmat, sc->twe_immediate_map,
1136                                 BUS_DMASYNC_POSTWRITE);
1137             } else {
1138                 bus_dmamap_sync(sc->twe_buffer_dmat, tr->tr_dmamap,
1139                                 BUS_DMASYNC_POSTWRITE);
1140             }
1141         }
1142
1143         if (tr->tr_flags & TWE_CMD_IMMEDIATE) {
1144             bus_dmamap_unload(sc->twe_immediate_dmat, sc->twe_immediate_map);
1145         } else {
1146             bus_dmamap_unload(sc->twe_buffer_dmat, tr->tr_dmamap); 
1147         }
1148     }
1149
1150     /* free alignment buffer if it was used */
1151     if (tr->tr_flags & TWE_CMD_ALIGNBUF) {
1152         free(tr->tr_data, TWE_MALLOC_CLASS);
1153         tr->tr_data = tr->tr_realdata;          /* restore 'real' data pointer */
1154     }
1155 }
1156
1157 #ifdef TWE_DEBUG
1158 void twe_report(void);
1159 /********************************************************************************
1160  * Print current controller status, call from DDB.
1161  */
1162 void
1163 twe_report(void)
1164 {
1165     struct twe_softc    *sc;
1166     int                 i;
1167
1168     for (i = 0; (sc = devclass_get_softc(twe_devclass, i)) != NULL; i++)
1169         twe_print_controller(sc);
1170     printf("twed: total bio count in %u  out %u\n", twed_bio_in, twed_bio_out);
1171 }
1172 #endif