2 * Copyright (c) 2000 Michael Smith
3 * Copyright (c) 2000 BSDi
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/malloc.h>
33 #include <sys/kernel.h>
37 #include <sys/devicestat.h>
40 #include <machine/bus_memio.h>
41 #include <machine/bus.h>
42 #include <machine/resource.h>
45 #include <pci/pcireg.h>
46 #include <pci/pcivar.h>
48 #include <dev/mly/mlyreg.h>
49 #include <dev/mly/mlyvar.h>
51 static int mly_pci_probe(device_t dev);
52 static int mly_pci_attach(device_t dev);
53 static int mly_pci_detach(device_t dev);
54 static int mly_pci_shutdown(device_t dev);
55 static int mly_pci_suspend(device_t dev);
56 static int mly_pci_resume(device_t dev);
57 static void mly_pci_intr(void *arg);
59 static int mly_sg_map(struct mly_softc *sc);
60 static void mly_sg_map_helper(void *arg, bus_dma_segment_t *segs, int nseg, int error);
61 static int mly_mmbox_map(struct mly_softc *sc);
62 static void mly_mmbox_map_helper(void *arg, bus_dma_segment_t *segs, int nseg, int error);
63 static void mly_free_command_cluster(struct mly_command_cluster *mcc);
65 static device_method_t mly_methods[] = {
66 /* Device interface */
67 DEVMETHOD(device_probe, mly_pci_probe),
68 DEVMETHOD(device_attach, mly_pci_attach),
69 DEVMETHOD(device_detach, mly_pci_detach),
70 DEVMETHOD(device_shutdown, mly_pci_shutdown),
71 DEVMETHOD(device_suspend, mly_pci_suspend),
72 DEVMETHOD(device_resume, mly_pci_resume),
76 static driver_t mly_pci_driver = {
79 sizeof(struct mly_softc)
82 static devclass_t mly_devclass;
83 DRIVER_MODULE(mly, pci, mly_pci_driver, mly_devclass, 0, 0);
93 } mly_identifiers[] = {
94 {0x1069, 0xba56, 0x1069, 0x0040, MLY_HWIF_STRONGARM, "Mylex eXtremeRAID 2000"},
95 {0x1069, 0xba56, 0x1069, 0x0030, MLY_HWIF_STRONGARM, "Mylex eXtremeRAID 3000"},
96 {0x1069, 0x0050, 0x1069, 0x0050, MLY_HWIF_I960RX, "Mylex AcceleRAID 352"},
97 {0x1069, 0x0050, 0x1069, 0x0052, MLY_HWIF_I960RX, "Mylex AcceleRAID 170"},
98 {0x1069, 0x0050, 0x1069, 0x0054, MLY_HWIF_I960RX, "Mylex AcceleRAID 160"},
102 /********************************************************************************
103 ********************************************************************************
105 ********************************************************************************
106 ********************************************************************************/
109 mly_pci_probe(device_t dev)
115 for (m = mly_identifiers; m->vendor != 0; m++) {
116 if ((m->vendor == pci_get_vendor(dev)) &&
117 (m->device == pci_get_device(dev)) &&
118 ((m->subvendor == 0) || ((m->subvendor == pci_get_subvendor(dev)) &&
119 (m->subdevice == pci_get_subdevice(dev))))) {
121 device_set_desc(dev, m->desc);
122 return(-10); /* allow room to be overridden */
129 mly_pci_attach(device_t dev)
131 struct mly_softc *sc;
140 sc = device_get_softc(dev);
141 bzero(sc, sizeof(*sc));
145 if (device_get_unit(sc->mly_dev) == 0)
149 /* assume failure is 'not configured' */
153 * Verify that the adapter is correctly set up in PCI space.
155 command = pci_read_config(sc->mly_dev, PCIR_COMMAND, 2);
156 command |= PCIM_CMD_BUSMASTEREN;
157 pci_write_config(dev, PCIR_COMMAND, command, 2);
158 command = pci_read_config(sc->mly_dev, PCIR_COMMAND, 2);
159 if (!(command & PCIM_CMD_BUSMASTEREN)) {
160 mly_printf(sc, "can't enable busmaster feature\n");
163 if ((command & PCIM_CMD_MEMEN) == 0) {
164 mly_printf(sc, "memory window not available\n");
169 * Allocate the PCI register window.
171 sc->mly_regs_rid = PCIR_MAPS; /* first base address register */
172 if ((sc->mly_regs_resource = bus_alloc_resource(sc->mly_dev, SYS_RES_MEMORY, &sc->mly_regs_rid,
173 0, ~0, 1, RF_ACTIVE)) == NULL) {
174 mly_printf(sc, "can't allocate register window\n");
177 sc->mly_btag = rman_get_bustag(sc->mly_regs_resource);
178 sc->mly_bhandle = rman_get_bushandle(sc->mly_regs_resource);
181 * Allocate and connect our interrupt.
184 if ((sc->mly_irq = bus_alloc_resource(sc->mly_dev, SYS_RES_IRQ, &sc->mly_irq_rid,
185 0, ~0, 1, RF_SHAREABLE | RF_ACTIVE)) == NULL) {
186 mly_printf(sc, "can't allocate interrupt\n");
189 if (bus_setup_intr(sc->mly_dev, sc->mly_irq, INTR_TYPE_CAM, mly_pci_intr, sc, &sc->mly_intr)) {
190 mly_printf(sc, "can't set up interrupt\n");
194 /* assume failure is 'out of memory' */
198 * Allocate the parent bus DMA tag appropriate for our PCI interface.
200 * Note that all of these controllers are 64-bit capable.
202 if (bus_dma_tag_create(NULL, /* parent */
203 1, 0, /* alignment, boundary */
204 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
205 BUS_SPACE_MAXADDR, /* highaddr */
206 NULL, NULL, /* filter, filterarg */
207 MAXBSIZE, MLY_MAXSGENTRIES, /* maxsize, nsegments */
208 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
209 BUS_DMA_ALLOCNOW, /* flags */
210 &sc->mly_parent_dmat)) {
211 mly_printf(sc, "can't allocate parent DMA tag\n");
216 * Create DMA tag for mapping buffers into controller-addressable space.
218 if (bus_dma_tag_create(sc->mly_parent_dmat, /* parent */
219 1, 0, /* alignment, boundary */
220 BUS_SPACE_MAXADDR, /* lowaddr */
221 BUS_SPACE_MAXADDR, /* highaddr */
222 NULL, NULL, /* filter, filterarg */
223 MAXBSIZE, MLY_MAXSGENTRIES, /* maxsize, nsegments */
224 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
226 &sc->mly_buffer_dmat)) {
227 mly_printf(sc, "can't allocate buffer DMA tag\n");
232 * Initialise the DMA tag for command packets.
234 if (bus_dma_tag_create(sc->mly_parent_dmat, /* parent */
235 1, 0, /* alignment, boundary */
236 BUS_SPACE_MAXADDR, /* lowaddr */
237 BUS_SPACE_MAXADDR, /* highaddr */
238 NULL, NULL, /* filter, filterarg */
239 sizeof(union mly_command_packet) * MLY_CMD_CLUSTERCOUNT, 1, /* maxsize, nsegments */
240 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
242 &sc->mly_packet_dmat)) {
243 mly_printf(sc, "can't allocate command packet DMA tag\n");
248 * Detect the hardware interface version
250 for (i = 0; mly_identifiers[i].vendor != 0; i++) {
251 if ((mly_identifiers[i].vendor == pci_get_vendor(dev)) &&
252 (mly_identifiers[i].device == pci_get_device(dev))) {
253 sc->mly_hwif = mly_identifiers[i].hwif;
254 switch(sc->mly_hwif) {
255 case MLY_HWIF_I960RX:
256 debug(2, "set hardware up for i960RX");
257 sc->mly_doorbell_true = 0x00;
258 sc->mly_command_mailbox = MLY_I960RX_COMMAND_MAILBOX;
259 sc->mly_status_mailbox = MLY_I960RX_STATUS_MAILBOX;
260 sc->mly_idbr = MLY_I960RX_IDBR;
261 sc->mly_odbr = MLY_I960RX_ODBR;
262 sc->mly_error_status = MLY_I960RX_ERROR_STATUS;
263 sc->mly_interrupt_status = MLY_I960RX_INTERRUPT_STATUS;
264 sc->mly_interrupt_mask = MLY_I960RX_INTERRUPT_MASK;
266 case MLY_HWIF_STRONGARM:
267 debug(2, "set hardware up for StrongARM");
268 sc->mly_doorbell_true = 0xff; /* doorbell 'true' is 0 */
269 sc->mly_command_mailbox = MLY_STRONGARM_COMMAND_MAILBOX;
270 sc->mly_status_mailbox = MLY_STRONGARM_STATUS_MAILBOX;
271 sc->mly_idbr = MLY_STRONGARM_IDBR;
272 sc->mly_odbr = MLY_STRONGARM_ODBR;
273 sc->mly_error_status = MLY_STRONGARM_ERROR_STATUS;
274 sc->mly_interrupt_status = MLY_STRONGARM_INTERRUPT_STATUS;
275 sc->mly_interrupt_mask = MLY_STRONGARM_INTERRUPT_MASK;
283 * Create the scatter/gather mappings.
285 if ((error = mly_sg_map(sc)))
289 * Allocate and map the memory mailbox
291 if ((error = mly_mmbox_map(sc)))
295 * Do bus-independent initialisation.
297 if ((error = mly_attach(sc)))
307 /********************************************************************************
308 * Disconnect from the controller completely, in preparation for unload.
311 mly_pci_detach(device_t dev)
313 struct mly_softc *sc = device_get_softc(dev);
318 if (sc->mly_state & MLY_STATE_OPEN)
321 if ((error = mly_pci_shutdown(dev)))
329 /********************************************************************************
330 * Bring the controller down to a dormant state and detach all child devices.
332 * This function is called before detach or system shutdown.
334 * Note that we can assume that the camq on the controller is empty, as we won't
335 * allow shutdown if any device is open.
338 mly_pci_shutdown(device_t dev)
340 struct mly_softc *sc = device_get_softc(dev);
348 /********************************************************************************
349 * Bring the controller to a quiescent state, ready for system suspend.
351 * We can't assume that the controller is not active at this point, so we need
352 * to mask interrupts.
355 mly_pci_suspend(device_t dev)
357 struct mly_softc *sc = device_get_softc(dev);
367 /********************************************************************************
368 * Bring the controller back to a state ready for operation.
371 mly_pci_resume(device_t dev)
373 struct mly_softc *sc = device_get_softc(dev);
376 sc->mly_state &= ~MLY_STATE_SUSPEND;
377 MLY_UNMASK_INTERRUPTS(sc);
381 /*******************************************************************************
382 * Take an interrupt, or be poked by other code to look for interrupt-worthy
386 mly_pci_intr(void *arg)
388 struct mly_softc *sc = (struct mly_softc *)arg;
392 /* collect finished commands, queue anything waiting */
396 /********************************************************************************
397 ********************************************************************************
398 Bus-dependant Resource Management
399 ********************************************************************************
400 ********************************************************************************/
402 /********************************************************************************
403 * Allocate memory for the scatter/gather tables
406 mly_sg_map(struct mly_softc *sc)
413 * Create a single tag describing a region large enough to hold all of
414 * the s/g lists we will need.
416 segsize = sizeof(struct mly_sg_entry) * MLY_MAXCOMMANDS * MLY_MAXSGENTRIES;
417 if (bus_dma_tag_create(sc->mly_parent_dmat, /* parent */
418 1, 0, /* alignment, boundary */
419 BUS_SPACE_MAXADDR, /* lowaddr */
420 BUS_SPACE_MAXADDR, /* highaddr */
421 NULL, NULL, /* filter, filterarg */
422 segsize, 1, /* maxsize, nsegments */
423 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
426 mly_printf(sc, "can't allocate scatter/gather DMA tag\n");
431 * Allocate enough s/g maps for all commands and permanently map them into
432 * controller-visible space.
434 * XXX this assumes we can get enough space for all the s/g maps in one
437 if (bus_dmamem_alloc(sc->mly_sg_dmat, (void **)&sc->mly_sg_table, BUS_DMA_NOWAIT, &sc->mly_sg_dmamap)) {
438 mly_printf(sc, "can't allocate s/g table\n");
441 bus_dmamap_load(sc->mly_sg_dmat, sc->mly_sg_dmamap, sc->mly_sg_table, segsize, mly_sg_map_helper, sc, 0);
445 /********************************************************************************
446 * Save the physical address of the base of the s/g table.
449 mly_sg_map_helper(void *arg, bus_dma_segment_t *segs, int nseg, int error)
451 struct mly_softc *sc = (struct mly_softc *)arg;
455 /* save base of s/g table's address in bus space */
456 sc->mly_sg_busaddr = segs->ds_addr;
459 /********************************************************************************
460 * Allocate memory for the memory-mailbox interface
463 mly_mmbox_map(struct mly_softc *sc)
467 * Create a DMA tag for a single contiguous region large enough for the
468 * memory mailbox structure.
470 if (bus_dma_tag_create(sc->mly_parent_dmat, /* parent */
471 1, 0, /* alignment, boundary */
472 BUS_SPACE_MAXADDR, /* lowaddr */
473 BUS_SPACE_MAXADDR, /* highaddr */
474 NULL, NULL, /* filter, filterarg */
475 sizeof(struct mly_mmbox), 1, /* maxsize, nsegments */
476 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
478 &sc->mly_mmbox_dmat)) {
479 mly_printf(sc, "can't allocate memory mailbox DMA tag\n");
484 * Allocate the buffer
486 if (bus_dmamem_alloc(sc->mly_mmbox_dmat, (void **)&sc->mly_mmbox, BUS_DMA_NOWAIT, &sc->mly_mmbox_dmamap)) {
487 mly_printf(sc, "can't allocate memory mailbox\n");
490 bus_dmamap_load(sc->mly_mmbox_dmat, sc->mly_mmbox_dmamap, sc->mly_mmbox, sizeof(struct mly_mmbox),
491 mly_mmbox_map_helper, sc, 0);
492 bzero(sc->mly_mmbox, sizeof(*sc->mly_mmbox));
497 /********************************************************************************
498 * Save the physical address of the memory mailbox
501 mly_mmbox_map_helper(void *arg, bus_dma_segment_t *segs, int nseg, int error)
503 struct mly_softc *sc = (struct mly_softc *)arg;
507 sc->mly_mmbox_busaddr = segs->ds_addr;
510 /********************************************************************************
511 * Free all of the resources associated with (sc)
513 * Should not be called if the controller is active.
516 mly_free(struct mly_softc *sc)
518 struct mly_command_cluster *mcc;
522 /* detach from CAM */
525 /* throw away any command buffers */
526 while ((mcc = mly_dequeue_cluster(sc)) != NULL)
527 mly_free_command_cluster(mcc);
529 /* throw away the controllerinfo structure */
530 if (sc->mly_controllerinfo != NULL)
531 free(sc->mly_controllerinfo, M_DEVBUF);
533 /* throw away the controllerparam structure */
534 if (sc->mly_controllerparam != NULL)
535 free(sc->mly_controllerparam, M_DEVBUF);
537 /* destroy data-transfer DMA tag */
538 if (sc->mly_buffer_dmat)
539 bus_dma_tag_destroy(sc->mly_buffer_dmat);
541 /* free and destroy DMA memory and tag for s/g lists */
542 if (sc->mly_sg_table) {
543 bus_dmamap_unload(sc->mly_sg_dmat, sc->mly_sg_dmamap);
544 bus_dmamem_free(sc->mly_sg_dmat, sc->mly_sg_table, sc->mly_sg_dmamap);
547 bus_dma_tag_destroy(sc->mly_sg_dmat);
549 /* free and destroy DMA memory and tag for memory mailbox */
551 bus_dmamap_unload(sc->mly_mmbox_dmat, sc->mly_mmbox_dmamap);
552 bus_dmamem_free(sc->mly_mmbox_dmat, sc->mly_mmbox, sc->mly_mmbox_dmamap);
554 if (sc->mly_mmbox_dmat)
555 bus_dma_tag_destroy(sc->mly_mmbox_dmat);
557 /* disconnect the interrupt handler */
559 bus_teardown_intr(sc->mly_dev, sc->mly_irq, sc->mly_intr);
560 if (sc->mly_irq != NULL)
561 bus_release_resource(sc->mly_dev, SYS_RES_IRQ, sc->mly_irq_rid, sc->mly_irq);
563 /* destroy the parent DMA tag */
564 if (sc->mly_parent_dmat)
565 bus_dma_tag_destroy(sc->mly_parent_dmat);
567 /* release the register window mapping */
568 if (sc->mly_regs_resource != NULL)
569 bus_release_resource(sc->mly_dev, SYS_RES_MEMORY, sc->mly_regs_rid, sc->mly_regs_resource);
572 /********************************************************************************
573 * Free a command cluster.
576 mly_free_command_cluster(struct mly_command_cluster *mcc)
578 struct mly_softc *sc = mcc->mcc_command[0].mc_sc;
583 for (i = 0; i < MLY_CMD_CLUSTERCOUNT; i++)
584 bus_dmamap_destroy(sc->mly_buffer_dmat, mcc->mcc_command[i].mc_datamap);
586 bus_dmamap_unload(sc->mly_packet_dmat, mcc->mcc_packetmap);
587 bus_dmamem_free(sc->mly_packet_dmat, mcc->mcc_packet, mcc->mcc_packetmap);