2 * Copyright (c) 2000 Michael Smith
3 * Copyright (c) 2001 Scott Long
4 * Copyright (c) 2000 BSDi
5 * Copyright (c) 2001 Adaptec, Inc.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
34 * Driver for the Adaptec 'FSA' family of PCI/SCSI RAID adapters.
36 #define AAC_DRIVERNAME "aac"
40 /* #include <stddef.h> */
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/malloc.h>
44 #include <sys/kernel.h>
45 #include <sys/kthread.h>
46 #include <sys/sysctl.h>
48 #include <sys/ioccom.h>
52 #include <sys/signalvar.h>
54 #include <sys/eventhandler.h>
57 #include <machine/bus.h>
58 #include <sys/bus_dma.h>
59 #include <machine/resource.h>
61 #include <dev/pci/pcireg.h>
62 #include <dev/pci/pcivar.h>
64 #include <dev/aac/aacreg.h>
65 #include <sys/aac_ioctl.h>
66 #include <dev/aac/aacvar.h>
67 #include <dev/aac/aac_tables.h>
69 static void aac_startup(void *arg);
70 static void aac_add_container(struct aac_softc *sc,
71 struct aac_mntinforesp *mir, int f);
72 static void aac_get_bus_info(struct aac_softc *sc);
73 static void aac_daemon(void *arg);
75 /* Command Processing */
76 static void aac_timeout(struct aac_softc *sc);
77 static void aac_complete(void *context, int pending);
78 static int aac_bio_command(struct aac_softc *sc, struct aac_command **cmp);
79 static void aac_bio_complete(struct aac_command *cm);
80 static int aac_wait_command(struct aac_command *cm);
81 static void aac_command_thread(struct aac_softc *sc);
83 /* Command Buffer Management */
84 static void aac_map_command_sg(void *arg, bus_dma_segment_t *segs,
86 static void aac_map_command_helper(void *arg, bus_dma_segment_t *segs,
88 static int aac_alloc_commands(struct aac_softc *sc);
89 static void aac_free_commands(struct aac_softc *sc);
90 static void aac_unmap_command(struct aac_command *cm);
92 /* Hardware Interface */
93 static int aac_alloc(struct aac_softc *sc);
94 static void aac_common_map(void *arg, bus_dma_segment_t *segs, int nseg,
96 static int aac_check_firmware(struct aac_softc *sc);
97 static int aac_init(struct aac_softc *sc);
98 static int aac_sync_command(struct aac_softc *sc, u_int32_t command,
99 u_int32_t arg0, u_int32_t arg1, u_int32_t arg2,
100 u_int32_t arg3, u_int32_t *sp);
101 static int aac_setup_intr(struct aac_softc *sc);
102 static int aac_enqueue_fib(struct aac_softc *sc, int queue,
103 struct aac_command *cm);
104 static int aac_dequeue_fib(struct aac_softc *sc, int queue,
105 u_int32_t *fib_size, struct aac_fib **fib_addr);
106 static int aac_enqueue_response(struct aac_softc *sc, int queue,
107 struct aac_fib *fib);
109 /* StrongARM interface */
110 static int aac_sa_get_fwstatus(struct aac_softc *sc);
111 static void aac_sa_qnotify(struct aac_softc *sc, int qbit);
112 static int aac_sa_get_istatus(struct aac_softc *sc);
113 static void aac_sa_clear_istatus(struct aac_softc *sc, int mask);
114 static void aac_sa_set_mailbox(struct aac_softc *sc, u_int32_t command,
115 u_int32_t arg0, u_int32_t arg1,
116 u_int32_t arg2, u_int32_t arg3);
117 static int aac_sa_get_mailbox(struct aac_softc *sc, int mb);
118 static void aac_sa_set_interrupts(struct aac_softc *sc, int enable);
120 struct aac_interface aac_sa_interface = {
124 aac_sa_clear_istatus,
127 aac_sa_set_interrupts,
131 /* i960Rx interface */
132 static int aac_rx_get_fwstatus(struct aac_softc *sc);
133 static void aac_rx_qnotify(struct aac_softc *sc, int qbit);
134 static int aac_rx_get_istatus(struct aac_softc *sc);
135 static void aac_rx_clear_istatus(struct aac_softc *sc, int mask);
136 static void aac_rx_set_mailbox(struct aac_softc *sc, u_int32_t command,
137 u_int32_t arg0, u_int32_t arg1,
138 u_int32_t arg2, u_int32_t arg3);
139 static int aac_rx_get_mailbox(struct aac_softc *sc, int mb);
140 static void aac_rx_set_interrupts(struct aac_softc *sc, int enable);
141 static int aac_rx_send_command(struct aac_softc *sc, struct aac_command *cm);
142 static int aac_rx_get_outb_queue(struct aac_softc *sc);
143 static void aac_rx_set_outb_queue(struct aac_softc *sc, int index);
145 struct aac_interface aac_rx_interface = {
149 aac_rx_clear_istatus,
152 aac_rx_set_interrupts,
154 aac_rx_get_outb_queue,
155 aac_rx_set_outb_queue
158 /* Rocket/MIPS interface */
159 static int aac_rkt_get_fwstatus(struct aac_softc *sc);
160 static void aac_rkt_qnotify(struct aac_softc *sc, int qbit);
161 static int aac_rkt_get_istatus(struct aac_softc *sc);
162 static void aac_rkt_clear_istatus(struct aac_softc *sc, int mask);
163 static void aac_rkt_set_mailbox(struct aac_softc *sc, u_int32_t command,
164 u_int32_t arg0, u_int32_t arg1,
165 u_int32_t arg2, u_int32_t arg3);
166 static int aac_rkt_get_mailbox(struct aac_softc *sc, int mb);
167 static void aac_rkt_set_interrupts(struct aac_softc *sc, int enable);
168 static int aac_rkt_send_command(struct aac_softc *sc, struct aac_command *cm);
169 static int aac_rkt_get_outb_queue(struct aac_softc *sc);
170 static void aac_rkt_set_outb_queue(struct aac_softc *sc, int index);
172 struct aac_interface aac_rkt_interface = {
173 aac_rkt_get_fwstatus,
176 aac_rkt_clear_istatus,
179 aac_rkt_set_interrupts,
180 aac_rkt_send_command,
181 aac_rkt_get_outb_queue,
182 aac_rkt_set_outb_queue
185 /* Debugging and Diagnostics */
186 static void aac_describe_controller(struct aac_softc *sc);
187 static char *aac_describe_code(struct aac_code_lookup *table,
190 /* Management Interface */
191 static d_open_t aac_open;
192 static d_ioctl_t aac_ioctl;
193 static d_poll_t aac_poll;
194 static void aac_cdevpriv_dtor(void *arg);
195 static int aac_ioctl_sendfib(struct aac_softc *sc, caddr_t ufib);
196 static int aac_ioctl_send_raw_srb(struct aac_softc *sc, caddr_t arg);
197 static void aac_handle_aif(struct aac_softc *sc,
198 struct aac_fib *fib);
199 static int aac_rev_check(struct aac_softc *sc, caddr_t udata);
200 static int aac_open_aif(struct aac_softc *sc, caddr_t arg);
201 static int aac_close_aif(struct aac_softc *sc, caddr_t arg);
202 static int aac_getnext_aif(struct aac_softc *sc, caddr_t arg);
203 static int aac_return_aif(struct aac_softc *sc,
204 struct aac_fib_context *ctx, caddr_t uptr);
205 static int aac_query_disk(struct aac_softc *sc, caddr_t uptr);
206 static int aac_get_pci_info(struct aac_softc *sc, caddr_t uptr);
207 static int aac_supported_features(struct aac_softc *sc, caddr_t uptr);
208 static void aac_ioctl_event(struct aac_softc *sc,
209 struct aac_event *event, void *arg);
210 static struct aac_mntinforesp *
211 aac_get_container_info(struct aac_softc *sc, struct aac_fib *fib, int cid);
213 static struct cdevsw aac_cdevsw = {
214 .d_version = D_VERSION,
215 .d_flags = D_NEEDGIANT,
217 .d_ioctl = aac_ioctl,
222 MALLOC_DEFINE(M_AACBUF, "aacbuf", "Buffers for the AAC driver");
225 SYSCTL_NODE(_hw, OID_AUTO, aac, CTLFLAG_RD, 0, "AAC driver parameters");
232 * Initialize the controller and softc
235 aac_attach(struct aac_softc *sc)
239 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
242 * Initialize per-controller queues.
250 * Initialize command-completion task.
252 TASK_INIT(&sc->aac_task_complete, 0, aac_complete, sc);
254 /* mark controller as suspended until we get ourselves organised */
255 sc->aac_state |= AAC_STATE_SUSPEND;
258 * Check that the firmware on the card is supported.
260 if ((error = aac_check_firmware(sc)) != 0)
266 mtx_init(&sc->aac_aifq_lock, "AAC AIF lock", NULL, MTX_DEF);
267 mtx_init(&sc->aac_io_lock, "AAC I/O lock", NULL, MTX_DEF);
268 mtx_init(&sc->aac_container_lock, "AAC container lock", NULL, MTX_DEF);
269 TAILQ_INIT(&sc->aac_container_tqh);
270 TAILQ_INIT(&sc->aac_ev_cmfree);
272 /* Initialize the clock daemon callout. */
273 callout_init_mtx(&sc->aac_daemontime, &sc->aac_io_lock, 0);
276 * Initialize the adapter.
278 if ((error = aac_alloc(sc)) != 0)
280 if ((error = aac_init(sc)) != 0)
284 * Allocate and connect our interrupt.
286 if ((error = aac_setup_intr(sc)) != 0)
290 * Print a little information about the controller.
292 aac_describe_controller(sc);
295 * Register to probe our containers later.
297 sc->aac_ich.ich_func = aac_startup;
298 sc->aac_ich.ich_arg = sc;
299 if (config_intrhook_establish(&sc->aac_ich) != 0) {
300 device_printf(sc->aac_dev,
301 "can't establish configuration hook\n");
306 * Make the control device.
308 unit = device_get_unit(sc->aac_dev);
309 sc->aac_dev_t = make_dev(&aac_cdevsw, unit, UID_ROOT, GID_OPERATOR,
310 0640, "aac%d", unit);
311 (void)make_dev_alias(sc->aac_dev_t, "afa%d", unit);
312 (void)make_dev_alias(sc->aac_dev_t, "hpn%d", unit);
313 sc->aac_dev_t->si_drv1 = sc;
315 /* Create the AIF thread */
316 if (kproc_create((void(*)(void *))aac_command_thread, sc,
317 &sc->aifthread, 0, 0, "aac%daif", unit))
318 panic("Could not create AIF thread");
320 /* Register the shutdown method to only be called post-dump */
321 if ((sc->eh = EVENTHANDLER_REGISTER(shutdown_final, aac_shutdown,
322 sc->aac_dev, SHUTDOWN_PRI_DEFAULT)) == NULL)
323 device_printf(sc->aac_dev,
324 "shutdown event registration failed\n");
326 /* Register with CAM for the non-DASD devices */
327 if ((sc->flags & AAC_FLAGS_ENABLE_CAM) != 0) {
328 TAILQ_INIT(&sc->aac_sim_tqh);
329 aac_get_bus_info(sc);
332 mtx_lock(&sc->aac_io_lock);
333 callout_reset(&sc->aac_daemontime, 60 * hz, aac_daemon, sc);
334 mtx_unlock(&sc->aac_io_lock);
340 aac_daemon(void *arg)
343 struct aac_softc *sc;
347 mtx_assert(&sc->aac_io_lock, MA_OWNED);
349 if (callout_pending(&sc->aac_daemontime) ||
350 callout_active(&sc->aac_daemontime) == 0)
353 aac_alloc_sync_fib(sc, &fib);
354 *(uint32_t *)fib->data = tv.tv_sec;
355 aac_sync_fib(sc, SendHostTime, 0, fib, sizeof(uint32_t));
356 aac_release_sync_fib(sc);
357 callout_schedule(&sc->aac_daemontime, 30 * 60 * hz);
361 aac_add_event(struct aac_softc *sc, struct aac_event *event)
364 switch (event->ev_type & AAC_EVENT_MASK) {
365 case AAC_EVENT_CMFREE:
366 TAILQ_INSERT_TAIL(&sc->aac_ev_cmfree, event, ev_links);
369 device_printf(sc->aac_dev, "aac_add event: unknown event %d\n",
378 * Request information of container #cid
380 static struct aac_mntinforesp *
381 aac_get_container_info(struct aac_softc *sc, struct aac_fib *fib, int cid)
383 struct aac_mntinfo *mi;
385 mi = (struct aac_mntinfo *)&fib->data[0];
386 /* use 64-bit LBA if enabled */
387 mi->Command = (sc->flags & AAC_FLAGS_LBA_64BIT) ?
388 VM_NameServe64 : VM_NameServe;
389 mi->MntType = FT_FILESYS;
392 if (aac_sync_fib(sc, ContainerCommand, 0, fib,
393 sizeof(struct aac_mntinfo))) {
394 device_printf(sc->aac_dev, "Error probing container %d\n", cid);
398 return ((struct aac_mntinforesp *)&fib->data[0]);
402 * Probe for containers, create disks.
405 aac_startup(void *arg)
407 struct aac_softc *sc;
409 struct aac_mntinforesp *mir;
410 int count = 0, i = 0;
412 sc = (struct aac_softc *)arg;
413 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
415 /* disconnect ourselves from the intrhook chain */
416 config_intrhook_disestablish(&sc->aac_ich);
418 mtx_lock(&sc->aac_io_lock);
419 aac_alloc_sync_fib(sc, &fib);
421 /* loop over possible containers */
423 if ((mir = aac_get_container_info(sc, fib, i)) == NULL)
426 count = mir->MntRespCount;
427 aac_add_container(sc, mir, 0);
429 } while ((i < count) && (i < AAC_MAX_CONTAINERS));
431 aac_release_sync_fib(sc);
432 mtx_unlock(&sc->aac_io_lock);
434 /* poke the bus to actually attach the child devices */
435 if (bus_generic_attach(sc->aac_dev))
436 device_printf(sc->aac_dev, "bus_generic_attach failed\n");
438 /* mark the controller up */
439 sc->aac_state &= ~AAC_STATE_SUSPEND;
441 /* enable interrupts now */
442 AAC_UNMASK_INTERRUPTS(sc);
446 * Create a device to represent a new container
449 aac_add_container(struct aac_softc *sc, struct aac_mntinforesp *mir, int f)
451 struct aac_container *co;
455 * Check container volume type for validity. Note that many of
456 * the possible types may never show up.
458 if ((mir->Status == ST_OK) && (mir->MntTable[0].VolType != CT_NONE)) {
459 co = (struct aac_container *)malloc(sizeof *co, M_AACBUF,
462 panic("Out of memory?!");
463 fwprintf(sc, HBA_FLAGS_DBG_INIT_B, "id %x name '%.16s' size %u type %d",
464 mir->MntTable[0].ObjectId,
465 mir->MntTable[0].FileSystemName,
466 mir->MntTable[0].Capacity, mir->MntTable[0].VolType);
468 if ((child = device_add_child(sc->aac_dev, "aacd", -1)) == NULL)
469 device_printf(sc->aac_dev, "device_add_child failed\n");
471 device_set_ivars(child, co);
472 device_set_desc(child, aac_describe_code(aac_container_types,
473 mir->MntTable[0].VolType));
476 bcopy(&mir->MntTable[0], &co->co_mntobj,
477 sizeof(struct aac_mntobj));
478 mtx_lock(&sc->aac_container_lock);
479 TAILQ_INSERT_TAIL(&sc->aac_container_tqh, co, co_link);
480 mtx_unlock(&sc->aac_container_lock);
485 * Allocate resources associated with (sc)
488 aac_alloc(struct aac_softc *sc)
491 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
494 * Create DMA tag for mapping buffers into controller-addressable space.
496 if (bus_dma_tag_create(sc->aac_parent_dmat, /* parent */
497 1, 0, /* algnmnt, boundary */
498 (sc->flags & AAC_FLAGS_SG_64BIT) ?
500 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
501 BUS_SPACE_MAXADDR, /* highaddr */
502 NULL, NULL, /* filter, filterarg */
503 MAXBSIZE, /* maxsize */
504 sc->aac_sg_tablesize, /* nsegments */
505 MAXBSIZE, /* maxsegsize */
506 BUS_DMA_ALLOCNOW, /* flags */
507 busdma_lock_mutex, /* lockfunc */
508 &sc->aac_io_lock, /* lockfuncarg */
509 &sc->aac_buffer_dmat)) {
510 device_printf(sc->aac_dev, "can't allocate buffer DMA tag\n");
515 * Create DMA tag for mapping FIBs into controller-addressable space..
517 if (bus_dma_tag_create(sc->aac_parent_dmat, /* parent */
518 1, 0, /* algnmnt, boundary */
519 (sc->flags & AAC_FLAGS_4GB_WINDOW) ?
520 BUS_SPACE_MAXADDR_32BIT :
521 0x7fffffff, /* lowaddr */
522 BUS_SPACE_MAXADDR, /* highaddr */
523 NULL, NULL, /* filter, filterarg */
524 sc->aac_max_fibs_alloc *
525 sc->aac_max_fib_size, /* maxsize */
527 sc->aac_max_fibs_alloc *
528 sc->aac_max_fib_size, /* maxsize */
530 NULL, NULL, /* No locking needed */
531 &sc->aac_fib_dmat)) {
532 device_printf(sc->aac_dev, "can't allocate FIB DMA tag\n");
537 * Create DMA tag for the common structure and allocate it.
539 if (bus_dma_tag_create(sc->aac_parent_dmat, /* parent */
540 1, 0, /* algnmnt, boundary */
541 (sc->flags & AAC_FLAGS_4GB_WINDOW) ?
542 BUS_SPACE_MAXADDR_32BIT :
543 0x7fffffff, /* lowaddr */
544 BUS_SPACE_MAXADDR, /* highaddr */
545 NULL, NULL, /* filter, filterarg */
546 8192 + sizeof(struct aac_common), /* maxsize */
548 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
550 NULL, NULL, /* No locking needed */
551 &sc->aac_common_dmat)) {
552 device_printf(sc->aac_dev,
553 "can't allocate common structure DMA tag\n");
556 if (bus_dmamem_alloc(sc->aac_common_dmat, (void **)&sc->aac_common,
557 BUS_DMA_NOWAIT, &sc->aac_common_dmamap)) {
558 device_printf(sc->aac_dev, "can't allocate common structure\n");
563 * Work around a bug in the 2120 and 2200 that cannot DMA commands
564 * below address 8192 in physical memory.
565 * XXX If the padding is not needed, can it be put to use instead
568 (void)bus_dmamap_load(sc->aac_common_dmat, sc->aac_common_dmamap,
569 sc->aac_common, 8192 + sizeof(*sc->aac_common),
570 aac_common_map, sc, 0);
572 if (sc->aac_common_busaddr < 8192) {
573 sc->aac_common = (struct aac_common *)
574 ((uint8_t *)sc->aac_common + 8192);
575 sc->aac_common_busaddr += 8192;
577 bzero(sc->aac_common, sizeof(*sc->aac_common));
579 /* Allocate some FIBs and associated command structs */
580 TAILQ_INIT(&sc->aac_fibmap_tqh);
581 sc->aac_commands = malloc(sc->aac_max_fibs * sizeof(struct aac_command),
582 M_AACBUF, M_WAITOK|M_ZERO);
583 while (sc->total_fibs < sc->aac_max_fibs) {
584 if (aac_alloc_commands(sc) != 0)
587 if (sc->total_fibs == 0)
594 * Free all of the resources associated with (sc)
596 * Should not be called if the controller is active.
599 aac_free(struct aac_softc *sc)
602 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
604 /* remove the control device */
605 if (sc->aac_dev_t != NULL)
606 destroy_dev(sc->aac_dev_t);
608 /* throw away any FIB buffers, discard the FIB DMA tag */
609 aac_free_commands(sc);
610 if (sc->aac_fib_dmat)
611 bus_dma_tag_destroy(sc->aac_fib_dmat);
613 free(sc->aac_commands, M_AACBUF);
615 /* destroy the common area */
616 if (sc->aac_common) {
617 bus_dmamap_unload(sc->aac_common_dmat, sc->aac_common_dmamap);
618 bus_dmamem_free(sc->aac_common_dmat, sc->aac_common,
619 sc->aac_common_dmamap);
621 if (sc->aac_common_dmat)
622 bus_dma_tag_destroy(sc->aac_common_dmat);
624 /* disconnect the interrupt handler */
626 bus_teardown_intr(sc->aac_dev, sc->aac_irq, sc->aac_intr);
627 if (sc->aac_irq != NULL)
628 bus_release_resource(sc->aac_dev, SYS_RES_IRQ, sc->aac_irq_rid,
631 /* destroy data-transfer DMA tag */
632 if (sc->aac_buffer_dmat)
633 bus_dma_tag_destroy(sc->aac_buffer_dmat);
635 /* destroy the parent DMA tag */
636 if (sc->aac_parent_dmat)
637 bus_dma_tag_destroy(sc->aac_parent_dmat);
639 /* release the register window mapping */
640 if (sc->aac_regs_res0 != NULL)
641 bus_release_resource(sc->aac_dev, SYS_RES_MEMORY,
642 sc->aac_regs_rid0, sc->aac_regs_res0);
643 if (sc->aac_hwif == AAC_HWIF_NARK && sc->aac_regs_res1 != NULL)
644 bus_release_resource(sc->aac_dev, SYS_RES_MEMORY,
645 sc->aac_regs_rid1, sc->aac_regs_res1);
649 * Disconnect from the controller completely, in preparation for unload.
652 aac_detach(device_t dev)
654 struct aac_softc *sc;
655 struct aac_container *co;
659 sc = device_get_softc(dev);
660 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
662 callout_drain(&sc->aac_daemontime);
664 mtx_lock(&sc->aac_io_lock);
665 while (sc->aifflags & AAC_AIFFLAGS_RUNNING) {
666 sc->aifflags |= AAC_AIFFLAGS_EXIT;
667 wakeup(sc->aifthread);
668 msleep(sc->aac_dev, &sc->aac_io_lock, PUSER, "aacdch", 0);
670 mtx_unlock(&sc->aac_io_lock);
671 KASSERT((sc->aifflags & AAC_AIFFLAGS_RUNNING) == 0,
672 ("%s: invalid detach state", __func__));
674 /* Remove the child containers */
675 while ((co = TAILQ_FIRST(&sc->aac_container_tqh)) != NULL) {
676 error = device_delete_child(dev, co->co_disk);
679 TAILQ_REMOVE(&sc->aac_container_tqh, co, co_link);
683 /* Remove the CAM SIMs */
684 while ((sim = TAILQ_FIRST(&sc->aac_sim_tqh)) != NULL) {
685 TAILQ_REMOVE(&sc->aac_sim_tqh, sim, sim_link);
686 error = device_delete_child(dev, sim->sim_dev);
692 if ((error = aac_shutdown(dev)))
695 EVENTHANDLER_DEREGISTER(shutdown_final, sc->eh);
699 mtx_destroy(&sc->aac_aifq_lock);
700 mtx_destroy(&sc->aac_io_lock);
701 mtx_destroy(&sc->aac_container_lock);
707 * Bring the controller down to a dormant state and detach all child devices.
709 * This function is called before detach or system shutdown.
711 * Note that we can assume that the bioq on the controller is empty, as we won't
712 * allow shutdown if any device is open.
715 aac_shutdown(device_t dev)
717 struct aac_softc *sc;
719 struct aac_close_command *cc;
721 sc = device_get_softc(dev);
722 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
724 sc->aac_state |= AAC_STATE_SUSPEND;
727 * Send a Container shutdown followed by a HostShutdown FIB to the
728 * controller to convince it that we don't want to talk to it anymore.
729 * We've been closed and all I/O completed already
731 device_printf(sc->aac_dev, "shutting down controller...");
733 mtx_lock(&sc->aac_io_lock);
734 aac_alloc_sync_fib(sc, &fib);
735 cc = (struct aac_close_command *)&fib->data[0];
737 bzero(cc, sizeof(struct aac_close_command));
738 cc->Command = VM_CloseAll;
739 cc->ContainerId = 0xffffffff;
740 if (aac_sync_fib(sc, ContainerCommand, 0, fib,
741 sizeof(struct aac_close_command)))
749 * XXX Issuing this command to the controller makes it shut down
750 * but also keeps it from coming back up without a reset of the
751 * PCI bus. This is not desirable if you are just unloading the
752 * driver module with the intent to reload it later.
754 if (aac_sync_fib(sc, FsaHostShutdown, AAC_FIBSTATE_SHUTDOWN,
763 AAC_MASK_INTERRUPTS(sc);
764 aac_release_sync_fib(sc);
765 mtx_unlock(&sc->aac_io_lock);
771 * Bring the controller to a quiescent state, ready for system suspend.
774 aac_suspend(device_t dev)
776 struct aac_softc *sc;
778 sc = device_get_softc(dev);
780 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
781 sc->aac_state |= AAC_STATE_SUSPEND;
783 AAC_MASK_INTERRUPTS(sc);
788 * Bring the controller back to a state ready for operation.
791 aac_resume(device_t dev)
793 struct aac_softc *sc;
795 sc = device_get_softc(dev);
797 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
798 sc->aac_state &= ~AAC_STATE_SUSPEND;
799 AAC_UNMASK_INTERRUPTS(sc);
804 * Interrupt handler for NEW_COMM interface.
807 aac_new_intr(void *arg)
809 struct aac_softc *sc;
810 u_int32_t index, fast;
811 struct aac_command *cm;
815 sc = (struct aac_softc *)arg;
817 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
818 mtx_lock(&sc->aac_io_lock);
820 index = AAC_GET_OUTB_QUEUE(sc);
821 if (index == 0xffffffff)
822 index = AAC_GET_OUTB_QUEUE(sc);
823 if (index == 0xffffffff)
826 if (index == 0xfffffffe) {
827 /* XXX This means that the controller wants
828 * more work. Ignore it for now.
833 fib = (struct aac_fib *)malloc(sizeof *fib, M_AACBUF,
836 /* If we're really this short on memory,
837 * hopefully breaking out of the handler will
838 * allow something to get freed. This
839 * actually sucks a whole lot.
844 for (i = 0; i < sizeof(struct aac_fib)/4; ++i)
845 ((u_int32_t *)fib)[i] = AAC_MEM1_GETREG4(sc, index + i*4);
846 aac_handle_aif(sc, fib);
850 * AIF memory is owned by the adapter, so let it
851 * know that we are done with it.
853 AAC_SET_OUTB_QUEUE(sc, index);
854 AAC_CLEAR_ISTATUS(sc, AAC_DB_RESPONSE_READY);
857 cm = sc->aac_commands + (index >> 2);
860 fib->Header.XferState |= AAC_FIBSTATE_DONEADAP;
861 *((u_int32_t *)(fib->data)) = AAC_ERROR_NORMAL;
864 aac_unmap_command(cm);
865 cm->cm_flags |= AAC_CMD_COMPLETED;
867 /* is there a completion handler? */
868 if (cm->cm_complete != NULL) {
871 /* assume that someone is sleeping on this
876 sc->flags &= ~AAC_QUEUE_FRZN;
879 /* see if we can start some more I/O */
880 if ((sc->flags & AAC_QUEUE_FRZN) == 0)
883 mtx_unlock(&sc->aac_io_lock);
887 * Interrupt filter for !NEW_COMM interface.
890 aac_filter(void *arg)
892 struct aac_softc *sc;
895 sc = (struct aac_softc *)arg;
897 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
899 * Read the status register directly. This is faster than taking the
900 * driver lock and reading the queues directly. It also saves having
901 * to turn parts of the driver lock into a spin mutex, which would be
904 reason = AAC_GET_ISTATUS(sc);
905 AAC_CLEAR_ISTATUS(sc, reason);
907 /* handle completion processing */
908 if (reason & AAC_DB_RESPONSE_READY)
909 taskqueue_enqueue_fast(taskqueue_fast, &sc->aac_task_complete);
911 /* controller wants to talk to us */
912 if (reason & (AAC_DB_PRINTF | AAC_DB_COMMAND_READY)) {
914 * XXX Make sure that we don't get fooled by strange messages
915 * that start with a NULL.
917 if ((reason & AAC_DB_PRINTF) &&
918 (sc->aac_common->ac_printf[0] == 0))
919 sc->aac_common->ac_printf[0] = 32;
922 * This might miss doing the actual wakeup. However, the
923 * msleep that this is waking up has a timeout, so it will
924 * wake up eventually. AIFs and printfs are low enough
925 * priority that they can handle hanging out for a few seconds
928 wakeup(sc->aifthread);
930 return (FILTER_HANDLED);
938 * Start as much queued I/O as possible on the controller
941 aac_startio(struct aac_softc *sc)
943 struct aac_command *cm;
946 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
950 * This flag might be set if the card is out of resources.
951 * Checking it here prevents an infinite loop of deferrals.
953 if (sc->flags & AAC_QUEUE_FRZN)
957 * Try to get a command that's been put off for lack of
960 cm = aac_dequeue_ready(sc);
963 * Try to build a command off the bio queue (ignore error
967 aac_bio_command(sc, &cm);
973 /* don't map more than once */
974 if (cm->cm_flags & AAC_CMD_MAPPED)
975 panic("aac: command %p already mapped", cm);
978 * Set up the command to go to the controller. If there are no
979 * data buffers associated with the command then it can bypass
982 if (cm->cm_datalen != 0) {
983 error = bus_dmamap_load(sc->aac_buffer_dmat,
984 cm->cm_datamap, cm->cm_data,
986 aac_map_command_sg, cm, 0);
987 if (error == EINPROGRESS) {
988 fwprintf(sc, HBA_FLAGS_DBG_COMM_B, "freezing queue\n");
989 sc->flags |= AAC_QUEUE_FRZN;
991 } else if (error != 0)
992 panic("aac_startio: unexpected error %d from "
995 aac_map_command_sg(cm, NULL, 0, 0);
1000 * Handle notification of one or more FIBs coming from the controller.
1003 aac_command_thread(struct aac_softc *sc)
1005 struct aac_fib *fib;
1009 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1011 mtx_lock(&sc->aac_io_lock);
1012 sc->aifflags = AAC_AIFFLAGS_RUNNING;
1014 while ((sc->aifflags & AAC_AIFFLAGS_EXIT) == 0) {
1017 if ((sc->aifflags & AAC_AIFFLAGS_PENDING) == 0)
1018 retval = msleep(sc->aifthread, &sc->aac_io_lock, PRIBIO,
1019 "aifthd", AAC_PERIODIC_INTERVAL * hz);
1022 * First see if any FIBs need to be allocated. This needs
1023 * to be called without the driver lock because contigmalloc
1026 if ((sc->aifflags & AAC_AIFFLAGS_ALLOCFIBS) != 0) {
1027 mtx_unlock(&sc->aac_io_lock);
1028 aac_alloc_commands(sc);
1029 mtx_lock(&sc->aac_io_lock);
1030 sc->aifflags &= ~AAC_AIFFLAGS_ALLOCFIBS;
1035 * While we're here, check to see if any commands are stuck.
1036 * This is pretty low-priority, so it's ok if it doesn't
1039 if (retval == EWOULDBLOCK)
1042 /* Check the hardware printf message buffer */
1043 if (sc->aac_common->ac_printf[0] != 0)
1044 aac_print_printf(sc);
1046 /* Also check to see if the adapter has a command for us. */
1047 if (sc->flags & AAC_FLAGS_NEW_COMM)
1050 if (aac_dequeue_fib(sc, AAC_HOST_NORM_CMD_QUEUE,
1054 AAC_PRINT_FIB(sc, fib);
1056 switch (fib->Header.Command) {
1058 aac_handle_aif(sc, fib);
1061 device_printf(sc->aac_dev, "unknown command "
1062 "from controller\n");
1066 if ((fib->Header.XferState == 0) ||
1067 (fib->Header.StructType != AAC_FIBTYPE_TFIB)) {
1071 /* Return the AIF to the controller. */
1072 if (fib->Header.XferState & AAC_FIBSTATE_FROMADAP) {
1073 fib->Header.XferState |= AAC_FIBSTATE_DONEHOST;
1074 *(AAC_FSAStatus*)fib->data = ST_OK;
1076 /* XXX Compute the Size field? */
1077 size = fib->Header.Size;
1078 if (size > sizeof(struct aac_fib)) {
1079 size = sizeof(struct aac_fib);
1080 fib->Header.Size = size;
1083 * Since we did not generate this command, it
1084 * cannot go through the normal
1085 * enqueue->startio chain.
1087 aac_enqueue_response(sc,
1088 AAC_ADAP_NORM_RESP_QUEUE,
1093 sc->aifflags &= ~AAC_AIFFLAGS_RUNNING;
1094 mtx_unlock(&sc->aac_io_lock);
1095 wakeup(sc->aac_dev);
1101 * Process completed commands.
1104 aac_complete(void *context, int pending)
1106 struct aac_softc *sc;
1107 struct aac_command *cm;
1108 struct aac_fib *fib;
1111 sc = (struct aac_softc *)context;
1112 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1114 mtx_lock(&sc->aac_io_lock);
1116 /* pull completed commands off the queue */
1118 /* look for completed FIBs on our queue */
1119 if (aac_dequeue_fib(sc, AAC_HOST_NORM_RESP_QUEUE, &fib_size,
1121 break; /* nothing to do */
1123 /* get the command, unmap and hand off for processing */
1124 cm = sc->aac_commands + fib->Header.SenderData;
1126 AAC_PRINT_FIB(sc, fib);
1129 if ((cm->cm_flags & AAC_CMD_TIMEDOUT) != 0)
1130 device_printf(sc->aac_dev,
1131 "COMMAND %p COMPLETED AFTER %d SECONDS\n",
1132 cm, (int)(time_uptime-cm->cm_timestamp));
1134 aac_remove_busy(cm);
1136 aac_unmap_command(cm);
1137 cm->cm_flags |= AAC_CMD_COMPLETED;
1139 /* is there a completion handler? */
1140 if (cm->cm_complete != NULL) {
1141 cm->cm_complete(cm);
1143 /* assume that someone is sleeping on this command */
1148 /* see if we can start some more I/O */
1149 sc->flags &= ~AAC_QUEUE_FRZN;
1152 mtx_unlock(&sc->aac_io_lock);
1156 * Handle a bio submitted from a disk device.
1159 aac_submit_bio(struct bio *bp)
1161 struct aac_disk *ad;
1162 struct aac_softc *sc;
1164 ad = (struct aac_disk *)bp->bio_disk->d_drv1;
1165 sc = ad->ad_controller;
1166 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1168 /* queue the BIO and try to get some work done */
1169 aac_enqueue_bio(sc, bp);
1174 * Get a bio and build a command to go with it.
1177 aac_bio_command(struct aac_softc *sc, struct aac_command **cmp)
1179 struct aac_command *cm;
1180 struct aac_fib *fib;
1181 struct aac_disk *ad;
1184 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1186 /* get the resources we will need */
1189 if (aac_alloc_command(sc, &cm)) /* get a command */
1191 if ((bp = aac_dequeue_bio(sc)) == NULL)
1194 /* fill out the command */
1195 cm->cm_data = (void *)bp->bio_data;
1196 cm->cm_datalen = bp->bio_bcount;
1197 cm->cm_complete = aac_bio_complete;
1198 cm->cm_private = bp;
1199 cm->cm_timestamp = time_uptime;
1203 fib->Header.Size = sizeof(struct aac_fib_header);
1204 fib->Header.XferState =
1205 AAC_FIBSTATE_HOSTOWNED |
1206 AAC_FIBSTATE_INITIALISED |
1207 AAC_FIBSTATE_EMPTY |
1208 AAC_FIBSTATE_FROMHOST |
1209 AAC_FIBSTATE_REXPECTED |
1211 AAC_FIBSTATE_ASYNC |
1212 AAC_FIBSTATE_FAST_RESPONSE;
1214 /* build the read/write request */
1215 ad = (struct aac_disk *)bp->bio_disk->d_drv1;
1217 if (sc->flags & AAC_FLAGS_RAW_IO) {
1218 struct aac_raw_io *raw;
1219 raw = (struct aac_raw_io *)&fib->data[0];
1220 fib->Header.Command = RawIo;
1221 raw->BlockNumber = (u_int64_t)bp->bio_pblkno;
1222 raw->ByteCount = bp->bio_bcount;
1223 raw->ContainerId = ad->ad_container->co_mntobj.ObjectId;
1225 raw->BpComplete = 0;
1226 fib->Header.Size += sizeof(struct aac_raw_io);
1227 cm->cm_sgtable = (struct aac_sg_table *)&raw->SgMapRaw;
1228 if (bp->bio_cmd == BIO_READ) {
1230 cm->cm_flags |= AAC_CMD_DATAIN;
1233 cm->cm_flags |= AAC_CMD_DATAOUT;
1235 } else if ((sc->flags & AAC_FLAGS_SG_64BIT) == 0) {
1236 fib->Header.Command = ContainerCommand;
1237 if (bp->bio_cmd == BIO_READ) {
1238 struct aac_blockread *br;
1239 br = (struct aac_blockread *)&fib->data[0];
1240 br->Command = VM_CtBlockRead;
1241 br->ContainerId = ad->ad_container->co_mntobj.ObjectId;
1242 br->BlockNumber = bp->bio_pblkno;
1243 br->ByteCount = bp->bio_bcount;
1244 fib->Header.Size += sizeof(struct aac_blockread);
1245 cm->cm_sgtable = &br->SgMap;
1246 cm->cm_flags |= AAC_CMD_DATAIN;
1248 struct aac_blockwrite *bw;
1249 bw = (struct aac_blockwrite *)&fib->data[0];
1250 bw->Command = VM_CtBlockWrite;
1251 bw->ContainerId = ad->ad_container->co_mntobj.ObjectId;
1252 bw->BlockNumber = bp->bio_pblkno;
1253 bw->ByteCount = bp->bio_bcount;
1254 bw->Stable = CUNSTABLE;
1255 fib->Header.Size += sizeof(struct aac_blockwrite);
1256 cm->cm_flags |= AAC_CMD_DATAOUT;
1257 cm->cm_sgtable = &bw->SgMap;
1260 fib->Header.Command = ContainerCommand64;
1261 if (bp->bio_cmd == BIO_READ) {
1262 struct aac_blockread64 *br;
1263 br = (struct aac_blockread64 *)&fib->data[0];
1264 br->Command = VM_CtHostRead64;
1265 br->ContainerId = ad->ad_container->co_mntobj.ObjectId;
1266 br->SectorCount = bp->bio_bcount / AAC_BLOCK_SIZE;
1267 br->BlockNumber = bp->bio_pblkno;
1270 fib->Header.Size += sizeof(struct aac_blockread64);
1271 cm->cm_flags |= AAC_CMD_DATAIN;
1272 cm->cm_sgtable = (struct aac_sg_table *)&br->SgMap64;
1274 struct aac_blockwrite64 *bw;
1275 bw = (struct aac_blockwrite64 *)&fib->data[0];
1276 bw->Command = VM_CtHostWrite64;
1277 bw->ContainerId = ad->ad_container->co_mntobj.ObjectId;
1278 bw->SectorCount = bp->bio_bcount / AAC_BLOCK_SIZE;
1279 bw->BlockNumber = bp->bio_pblkno;
1282 fib->Header.Size += sizeof(struct aac_blockwrite64);
1283 cm->cm_flags |= AAC_CMD_DATAOUT;
1284 cm->cm_sgtable = (struct aac_sg_table *)&bw->SgMap64;
1293 aac_enqueue_bio(sc, bp);
1295 aac_release_command(cm);
1300 * Handle a bio-instigated command that has been completed.
1303 aac_bio_complete(struct aac_command *cm)
1305 struct aac_blockread_response *brr;
1306 struct aac_blockwrite_response *bwr;
1308 AAC_FSAStatus status;
1310 /* fetch relevant status and then release the command */
1311 bp = (struct bio *)cm->cm_private;
1312 if (bp->bio_cmd == BIO_READ) {
1313 brr = (struct aac_blockread_response *)&cm->cm_fib->data[0];
1314 status = brr->Status;
1316 bwr = (struct aac_blockwrite_response *)&cm->cm_fib->data[0];
1317 status = bwr->Status;
1319 aac_release_command(cm);
1321 /* fix up the bio based on status */
1322 if (status == ST_OK) {
1325 bp->bio_error = EIO;
1326 bp->bio_flags |= BIO_ERROR;
1327 /* pass an error string out to the disk layer */
1328 bp->bio_driver1 = aac_describe_code(aac_command_status_table,
1335 * Submit a command to the controller, return when it completes.
1336 * XXX This is very dangerous! If the card has gone out to lunch, we could
1337 * be stuck here forever. At the same time, signals are not caught
1338 * because there is a risk that a signal could wakeup the sleep before
1339 * the card has a chance to complete the command. Since there is no way
1340 * to cancel a command that is in progress, we can't protect against the
1341 * card completing a command late and spamming the command and data
1342 * memory. So, we are held hostage until the command completes.
1345 aac_wait_command(struct aac_command *cm)
1347 struct aac_softc *sc;
1351 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1353 /* Put the command on the ready queue and get things going */
1354 aac_enqueue_ready(cm);
1356 error = msleep(cm, &sc->aac_io_lock, PRIBIO, "aacwait", 0);
1361 *Command Buffer Management
1365 * Allocate a command.
1368 aac_alloc_command(struct aac_softc *sc, struct aac_command **cmp)
1370 struct aac_command *cm;
1372 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1374 if ((cm = aac_dequeue_free(sc)) == NULL) {
1375 if (sc->total_fibs < sc->aac_max_fibs) {
1376 mtx_lock(&sc->aac_io_lock);
1377 sc->aifflags |= AAC_AIFFLAGS_ALLOCFIBS;
1378 mtx_unlock(&sc->aac_io_lock);
1379 wakeup(sc->aifthread);
1389 * Release a command back to the freelist.
1392 aac_release_command(struct aac_command *cm)
1394 struct aac_event *event;
1395 struct aac_softc *sc;
1398 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1400 /* (re)initialize the command/FIB */
1401 cm->cm_sgtable = NULL;
1403 cm->cm_complete = NULL;
1404 cm->cm_private = NULL;
1405 cm->cm_queue = AAC_ADAP_NORM_CMD_QUEUE;
1406 cm->cm_fib->Header.XferState = AAC_FIBSTATE_EMPTY;
1407 cm->cm_fib->Header.StructType = AAC_FIBTYPE_TFIB;
1408 cm->cm_fib->Header.Flags = 0;
1409 cm->cm_fib->Header.SenderSize = cm->cm_sc->aac_max_fib_size;
1412 * These are duplicated in aac_start to cover the case where an
1413 * intermediate stage may have destroyed them. They're left
1414 * initialized here for debugging purposes only.
1416 cm->cm_fib->Header.ReceiverFibAddress = (u_int32_t)cm->cm_fibphys;
1417 cm->cm_fib->Header.SenderData = 0;
1419 aac_enqueue_free(cm);
1421 if ((event = TAILQ_FIRST(&sc->aac_ev_cmfree)) != NULL) {
1422 TAILQ_REMOVE(&sc->aac_ev_cmfree, event, ev_links);
1423 event->ev_callback(sc, event, event->ev_arg);
1428 * Map helper for command/FIB allocation.
1431 aac_map_command_helper(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1435 fibphys = (uint64_t *)arg;
1437 *fibphys = segs[0].ds_addr;
1441 * Allocate and initialize commands/FIBs for this adapter.
1444 aac_alloc_commands(struct aac_softc *sc)
1446 struct aac_command *cm;
1447 struct aac_fibmap *fm;
1451 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1453 if (sc->total_fibs + sc->aac_max_fibs_alloc > sc->aac_max_fibs)
1456 fm = malloc(sizeof(struct aac_fibmap), M_AACBUF, M_NOWAIT|M_ZERO);
1460 /* allocate the FIBs in DMAable memory and load them */
1461 if (bus_dmamem_alloc(sc->aac_fib_dmat, (void **)&fm->aac_fibs,
1462 BUS_DMA_NOWAIT, &fm->aac_fibmap)) {
1463 device_printf(sc->aac_dev,
1464 "Not enough contiguous memory available.\n");
1469 /* Ignore errors since this doesn't bounce */
1470 (void)bus_dmamap_load(sc->aac_fib_dmat, fm->aac_fibmap, fm->aac_fibs,
1471 sc->aac_max_fibs_alloc * sc->aac_max_fib_size,
1472 aac_map_command_helper, &fibphys, 0);
1474 /* initialize constant fields in the command structure */
1475 bzero(fm->aac_fibs, sc->aac_max_fibs_alloc * sc->aac_max_fib_size);
1476 for (i = 0; i < sc->aac_max_fibs_alloc; i++) {
1477 cm = sc->aac_commands + sc->total_fibs;
1478 fm->aac_commands = cm;
1480 cm->cm_fib = (struct aac_fib *)
1481 ((u_int8_t *)fm->aac_fibs + i*sc->aac_max_fib_size);
1482 cm->cm_fibphys = fibphys + i*sc->aac_max_fib_size;
1483 cm->cm_index = sc->total_fibs;
1485 if ((error = bus_dmamap_create(sc->aac_buffer_dmat, 0,
1486 &cm->cm_datamap)) != 0)
1488 mtx_lock(&sc->aac_io_lock);
1489 aac_release_command(cm);
1491 mtx_unlock(&sc->aac_io_lock);
1495 mtx_lock(&sc->aac_io_lock);
1496 TAILQ_INSERT_TAIL(&sc->aac_fibmap_tqh, fm, fm_link);
1497 fwprintf(sc, HBA_FLAGS_DBG_COMM_B, "total_fibs= %d\n", sc->total_fibs);
1498 mtx_unlock(&sc->aac_io_lock);
1502 bus_dmamap_unload(sc->aac_fib_dmat, fm->aac_fibmap);
1503 bus_dmamem_free(sc->aac_fib_dmat, fm->aac_fibs, fm->aac_fibmap);
1509 * Free FIBs owned by this adapter.
1512 aac_free_commands(struct aac_softc *sc)
1514 struct aac_fibmap *fm;
1515 struct aac_command *cm;
1518 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1520 while ((fm = TAILQ_FIRST(&sc->aac_fibmap_tqh)) != NULL) {
1522 TAILQ_REMOVE(&sc->aac_fibmap_tqh, fm, fm_link);
1524 * We check against total_fibs to handle partially
1527 for (i = 0; i < sc->aac_max_fibs_alloc && sc->total_fibs--; i++) {
1528 cm = fm->aac_commands + i;
1529 bus_dmamap_destroy(sc->aac_buffer_dmat, cm->cm_datamap);
1531 bus_dmamap_unload(sc->aac_fib_dmat, fm->aac_fibmap);
1532 bus_dmamem_free(sc->aac_fib_dmat, fm->aac_fibs, fm->aac_fibmap);
1538 * Command-mapping helper function - populate this command's s/g table.
1541 aac_map_command_sg(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1543 struct aac_softc *sc;
1544 struct aac_command *cm;
1545 struct aac_fib *fib;
1548 cm = (struct aac_command *)arg;
1551 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1553 /* copy into the FIB */
1554 if (cm->cm_sgtable != NULL) {
1555 if (fib->Header.Command == RawIo) {
1556 struct aac_sg_tableraw *sg;
1557 sg = (struct aac_sg_tableraw *)cm->cm_sgtable;
1559 for (i = 0; i < nseg; i++) {
1560 sg->SgEntryRaw[i].SgAddress = segs[i].ds_addr;
1561 sg->SgEntryRaw[i].SgByteCount = segs[i].ds_len;
1562 sg->SgEntryRaw[i].Next = 0;
1563 sg->SgEntryRaw[i].Prev = 0;
1564 sg->SgEntryRaw[i].Flags = 0;
1566 /* update the FIB size for the s/g count */
1567 fib->Header.Size += nseg*sizeof(struct aac_sg_entryraw);
1568 } else if ((cm->cm_sc->flags & AAC_FLAGS_SG_64BIT) == 0) {
1569 struct aac_sg_table *sg;
1570 sg = cm->cm_sgtable;
1572 for (i = 0; i < nseg; i++) {
1573 sg->SgEntry[i].SgAddress = segs[i].ds_addr;
1574 sg->SgEntry[i].SgByteCount = segs[i].ds_len;
1576 /* update the FIB size for the s/g count */
1577 fib->Header.Size += nseg*sizeof(struct aac_sg_entry);
1579 struct aac_sg_table64 *sg;
1580 sg = (struct aac_sg_table64 *)cm->cm_sgtable;
1582 for (i = 0; i < nseg; i++) {
1583 sg->SgEntry64[i].SgAddress = segs[i].ds_addr;
1584 sg->SgEntry64[i].SgByteCount = segs[i].ds_len;
1586 /* update the FIB size for the s/g count */
1587 fib->Header.Size += nseg*sizeof(struct aac_sg_entry64);
1591 /* Fix up the address values in the FIB. Use the command array index
1592 * instead of a pointer since these fields are only 32 bits. Shift
1593 * the SenderFibAddress over to make room for the fast response bit
1594 * and for the AIF bit
1596 cm->cm_fib->Header.SenderFibAddress = (cm->cm_index << 2);
1597 cm->cm_fib->Header.ReceiverFibAddress = (u_int32_t)cm->cm_fibphys;
1599 /* save a pointer to the command for speedy reverse-lookup */
1600 cm->cm_fib->Header.SenderData = cm->cm_index;
1602 if (cm->cm_flags & AAC_CMD_DATAIN)
1603 bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap,
1604 BUS_DMASYNC_PREREAD);
1605 if (cm->cm_flags & AAC_CMD_DATAOUT)
1606 bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap,
1607 BUS_DMASYNC_PREWRITE);
1608 cm->cm_flags |= AAC_CMD_MAPPED;
1610 if (sc->flags & AAC_FLAGS_NEW_COMM) {
1611 int count = 10000000L;
1612 while (AAC_SEND_COMMAND(sc, cm) != 0) {
1614 aac_unmap_command(cm);
1615 sc->flags |= AAC_QUEUE_FRZN;
1616 aac_requeue_ready(cm);
1618 DELAY(5); /* wait 5 usec. */
1621 /* Put the FIB on the outbound queue */
1622 if (aac_enqueue_fib(sc, cm->cm_queue, cm) == EBUSY) {
1623 aac_unmap_command(cm);
1624 sc->flags |= AAC_QUEUE_FRZN;
1625 aac_requeue_ready(cm);
1633 * Unmap a command from controller-visible space.
1636 aac_unmap_command(struct aac_command *cm)
1638 struct aac_softc *sc;
1641 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1643 if (!(cm->cm_flags & AAC_CMD_MAPPED))
1646 if (cm->cm_datalen != 0) {
1647 if (cm->cm_flags & AAC_CMD_DATAIN)
1648 bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap,
1649 BUS_DMASYNC_POSTREAD);
1650 if (cm->cm_flags & AAC_CMD_DATAOUT)
1651 bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap,
1652 BUS_DMASYNC_POSTWRITE);
1654 bus_dmamap_unload(sc->aac_buffer_dmat, cm->cm_datamap);
1656 cm->cm_flags &= ~AAC_CMD_MAPPED;
1660 * Hardware Interface
1664 * Initialize the adapter.
1667 aac_common_map(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1669 struct aac_softc *sc;
1671 sc = (struct aac_softc *)arg;
1672 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1674 sc->aac_common_busaddr = segs[0].ds_addr;
1678 aac_check_firmware(struct aac_softc *sc)
1680 u_int32_t code, major, minor, options = 0, atu_size = 0;
1684 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1686 * Wait for the adapter to come ready.
1690 code = AAC_GET_FWSTATUS(sc);
1691 if (code & AAC_SELF_TEST_FAILED) {
1692 device_printf(sc->aac_dev, "FATAL: selftest failed\n");
1695 if (code & AAC_KERNEL_PANIC) {
1696 device_printf(sc->aac_dev,
1697 "FATAL: controller kernel panic");
1700 if (time_uptime > (then + AAC_BOOT_TIMEOUT)) {
1701 device_printf(sc->aac_dev,
1702 "FATAL: controller not coming ready, "
1703 "status %x\n", code);
1706 } while (!(code & AAC_UP_AND_RUNNING));
1709 * Retrieve the firmware version numbers. Dell PERC2/QC cards with
1710 * firmware version 1.x are not compatible with this driver.
1712 if (sc->flags & AAC_FLAGS_PERC2QC) {
1713 if (aac_sync_command(sc, AAC_MONKER_GETKERNVER, 0, 0, 0, 0,
1715 device_printf(sc->aac_dev,
1716 "Error reading firmware version\n");
1720 /* These numbers are stored as ASCII! */
1721 major = (AAC_GET_MAILBOX(sc, 1) & 0xff) - 0x30;
1722 minor = (AAC_GET_MAILBOX(sc, 2) & 0xff) - 0x30;
1724 device_printf(sc->aac_dev,
1725 "Firmware version %d.%d is not supported.\n",
1732 * Retrieve the capabilities/supported options word so we know what
1733 * work-arounds to enable. Some firmware revs don't support this
1736 if (aac_sync_command(sc, AAC_MONKER_GETINFO, 0, 0, 0, 0, &status)) {
1737 if (status != AAC_SRB_STS_INVALID_REQUEST) {
1738 device_printf(sc->aac_dev,
1739 "RequestAdapterInfo failed\n");
1743 options = AAC_GET_MAILBOX(sc, 1);
1744 atu_size = AAC_GET_MAILBOX(sc, 2);
1745 sc->supported_options = options;
1747 if ((options & AAC_SUPPORTED_4GB_WINDOW) != 0 &&
1748 (sc->flags & AAC_FLAGS_NO4GB) == 0)
1749 sc->flags |= AAC_FLAGS_4GB_WINDOW;
1750 if (options & AAC_SUPPORTED_NONDASD)
1751 sc->flags |= AAC_FLAGS_ENABLE_CAM;
1752 if ((options & AAC_SUPPORTED_SGMAP_HOST64) != 0
1753 && (sizeof(bus_addr_t) > 4)) {
1754 device_printf(sc->aac_dev,
1755 "Enabling 64-bit address support\n");
1756 sc->flags |= AAC_FLAGS_SG_64BIT;
1758 if ((options & AAC_SUPPORTED_NEW_COMM)
1759 && sc->aac_if.aif_send_command)
1760 sc->flags |= AAC_FLAGS_NEW_COMM;
1761 if (options & AAC_SUPPORTED_64BIT_ARRAYSIZE)
1762 sc->flags |= AAC_FLAGS_ARRAY_64BIT;
1765 /* Check for broken hardware that does a lower number of commands */
1766 sc->aac_max_fibs = (sc->flags & AAC_FLAGS_256FIBS ? 256:512);
1768 /* Remap mem. resource, if required */
1769 if ((sc->flags & AAC_FLAGS_NEW_COMM) &&
1770 atu_size > rman_get_size(sc->aac_regs_res1)) {
1771 bus_release_resource(
1772 sc->aac_dev, SYS_RES_MEMORY,
1773 sc->aac_regs_rid1, sc->aac_regs_res1);
1774 sc->aac_regs_res1 = bus_alloc_resource(
1775 sc->aac_dev, SYS_RES_MEMORY, &sc->aac_regs_rid1,
1776 0ul, ~0ul, atu_size, RF_ACTIVE);
1777 if (sc->aac_regs_res1 == NULL) {
1778 sc->aac_regs_res1 = bus_alloc_resource_any(
1779 sc->aac_dev, SYS_RES_MEMORY,
1780 &sc->aac_regs_rid1, RF_ACTIVE);
1781 if (sc->aac_regs_res1 == NULL) {
1782 device_printf(sc->aac_dev,
1783 "couldn't allocate register window\n");
1786 sc->flags &= ~AAC_FLAGS_NEW_COMM;
1788 sc->aac_btag1 = rman_get_bustag(sc->aac_regs_res1);
1789 sc->aac_bhandle1 = rman_get_bushandle(sc->aac_regs_res1);
1791 if (sc->aac_hwif == AAC_HWIF_NARK) {
1792 sc->aac_regs_res0 = sc->aac_regs_res1;
1793 sc->aac_regs_rid0 = sc->aac_regs_rid1;
1794 sc->aac_btag0 = sc->aac_btag1;
1795 sc->aac_bhandle0 = sc->aac_bhandle1;
1799 /* Read preferred settings */
1800 sc->aac_max_fib_size = sizeof(struct aac_fib);
1801 sc->aac_max_sectors = 128; /* 64KB */
1802 if (sc->flags & AAC_FLAGS_SG_64BIT)
1803 sc->aac_sg_tablesize = (AAC_FIB_DATASIZE
1804 - sizeof(struct aac_blockwrite64))
1805 / sizeof(struct aac_sg_entry64);
1807 sc->aac_sg_tablesize = (AAC_FIB_DATASIZE
1808 - sizeof(struct aac_blockwrite))
1809 / sizeof(struct aac_sg_entry);
1811 if (!aac_sync_command(sc, AAC_MONKER_GETCOMMPREF, 0, 0, 0, 0, NULL)) {
1812 options = AAC_GET_MAILBOX(sc, 1);
1813 sc->aac_max_fib_size = (options & 0xFFFF);
1814 sc->aac_max_sectors = (options >> 16) << 1;
1815 options = AAC_GET_MAILBOX(sc, 2);
1816 sc->aac_sg_tablesize = (options >> 16);
1817 options = AAC_GET_MAILBOX(sc, 3);
1818 sc->aac_max_fibs = (options & 0xFFFF);
1820 if (sc->aac_max_fib_size > PAGE_SIZE)
1821 sc->aac_max_fib_size = PAGE_SIZE;
1822 sc->aac_max_fibs_alloc = PAGE_SIZE / sc->aac_max_fib_size;
1824 if (sc->aac_max_fib_size > sizeof(struct aac_fib)) {
1825 sc->flags |= AAC_FLAGS_RAW_IO;
1826 device_printf(sc->aac_dev, "Enable Raw I/O\n");
1828 if ((sc->flags & AAC_FLAGS_RAW_IO) &&
1829 (sc->flags & AAC_FLAGS_ARRAY_64BIT)) {
1830 sc->flags |= AAC_FLAGS_LBA_64BIT;
1831 device_printf(sc->aac_dev, "Enable 64-bit array\n");
1838 aac_init(struct aac_softc *sc)
1840 struct aac_adapter_init *ip;
1844 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1847 * Fill in the init structure. This tells the adapter about the
1848 * physical location of various important shared data structures.
1850 ip = &sc->aac_common->ac_init;
1851 ip->InitStructRevision = AAC_INIT_STRUCT_REVISION;
1852 if (sc->aac_max_fib_size > sizeof(struct aac_fib)) {
1853 ip->InitStructRevision = AAC_INIT_STRUCT_REVISION_4;
1854 sc->flags |= AAC_FLAGS_RAW_IO;
1856 ip->MiniPortRevision = AAC_INIT_STRUCT_MINIPORT_REVISION;
1858 ip->AdapterFibsPhysicalAddress = sc->aac_common_busaddr +
1859 offsetof(struct aac_common, ac_fibs);
1860 ip->AdapterFibsVirtualAddress = 0;
1861 ip->AdapterFibsSize = AAC_ADAPTER_FIBS * sizeof(struct aac_fib);
1862 ip->AdapterFibAlign = sizeof(struct aac_fib);
1864 ip->PrintfBufferAddress = sc->aac_common_busaddr +
1865 offsetof(struct aac_common, ac_printf);
1866 ip->PrintfBufferSize = AAC_PRINTF_BUFSIZE;
1869 * The adapter assumes that pages are 4K in size, except on some
1870 * broken firmware versions that do the page->byte conversion twice,
1871 * therefore 'assuming' that this value is in 16MB units (2^24).
1872 * Round up since the granularity is so high.
1874 ip->HostPhysMemPages = ctob(physmem) / AAC_PAGE_SIZE;
1875 if (sc->flags & AAC_FLAGS_BROKEN_MEMMAP) {
1876 ip->HostPhysMemPages =
1877 (ip->HostPhysMemPages + AAC_PAGE_SIZE) / AAC_PAGE_SIZE;
1879 ip->HostElapsedSeconds = time_uptime; /* reset later if invalid */
1882 if (sc->flags & AAC_FLAGS_NEW_COMM) {
1883 ip->InitFlags |= AAC_INITFLAGS_NEW_COMM_SUPPORTED;
1884 device_printf(sc->aac_dev, "New comm. interface enabled\n");
1887 ip->MaxIoCommands = sc->aac_max_fibs;
1888 ip->MaxIoSize = sc->aac_max_sectors << 9;
1889 ip->MaxFibSize = sc->aac_max_fib_size;
1892 * Initialize FIB queues. Note that it appears that the layout of the
1893 * indexes and the segmentation of the entries may be mandated by the
1894 * adapter, which is only told about the base of the queue index fields.
1896 * The initial values of the indices are assumed to inform the adapter
1897 * of the sizes of the respective queues, and theoretically it could
1898 * work out the entire layout of the queue structures from this. We
1899 * take the easy route and just lay this area out like everyone else
1902 * The Linux driver uses a much more complex scheme whereby several
1903 * header records are kept for each queue. We use a couple of generic
1904 * list manipulation functions which 'know' the size of each list by
1905 * virtue of a table.
1907 qoffset = offsetof(struct aac_common, ac_qbuf) + AAC_QUEUE_ALIGN;
1908 qoffset &= ~(AAC_QUEUE_ALIGN - 1);
1910 (struct aac_queue_table *)((uintptr_t)sc->aac_common + qoffset);
1911 ip->CommHeaderAddress = sc->aac_common_busaddr + qoffset;
1913 sc->aac_queues->qt_qindex[AAC_HOST_NORM_CMD_QUEUE][AAC_PRODUCER_INDEX] =
1914 AAC_HOST_NORM_CMD_ENTRIES;
1915 sc->aac_queues->qt_qindex[AAC_HOST_NORM_CMD_QUEUE][AAC_CONSUMER_INDEX] =
1916 AAC_HOST_NORM_CMD_ENTRIES;
1917 sc->aac_queues->qt_qindex[AAC_HOST_HIGH_CMD_QUEUE][AAC_PRODUCER_INDEX] =
1918 AAC_HOST_HIGH_CMD_ENTRIES;
1919 sc->aac_queues->qt_qindex[AAC_HOST_HIGH_CMD_QUEUE][AAC_CONSUMER_INDEX] =
1920 AAC_HOST_HIGH_CMD_ENTRIES;
1921 sc->aac_queues->qt_qindex[AAC_ADAP_NORM_CMD_QUEUE][AAC_PRODUCER_INDEX] =
1922 AAC_ADAP_NORM_CMD_ENTRIES;
1923 sc->aac_queues->qt_qindex[AAC_ADAP_NORM_CMD_QUEUE][AAC_CONSUMER_INDEX] =
1924 AAC_ADAP_NORM_CMD_ENTRIES;
1925 sc->aac_queues->qt_qindex[AAC_ADAP_HIGH_CMD_QUEUE][AAC_PRODUCER_INDEX] =
1926 AAC_ADAP_HIGH_CMD_ENTRIES;
1927 sc->aac_queues->qt_qindex[AAC_ADAP_HIGH_CMD_QUEUE][AAC_CONSUMER_INDEX] =
1928 AAC_ADAP_HIGH_CMD_ENTRIES;
1929 sc->aac_queues->qt_qindex[AAC_HOST_NORM_RESP_QUEUE][AAC_PRODUCER_INDEX]=
1930 AAC_HOST_NORM_RESP_ENTRIES;
1931 sc->aac_queues->qt_qindex[AAC_HOST_NORM_RESP_QUEUE][AAC_CONSUMER_INDEX]=
1932 AAC_HOST_NORM_RESP_ENTRIES;
1933 sc->aac_queues->qt_qindex[AAC_HOST_HIGH_RESP_QUEUE][AAC_PRODUCER_INDEX]=
1934 AAC_HOST_HIGH_RESP_ENTRIES;
1935 sc->aac_queues->qt_qindex[AAC_HOST_HIGH_RESP_QUEUE][AAC_CONSUMER_INDEX]=
1936 AAC_HOST_HIGH_RESP_ENTRIES;
1937 sc->aac_queues->qt_qindex[AAC_ADAP_NORM_RESP_QUEUE][AAC_PRODUCER_INDEX]=
1938 AAC_ADAP_NORM_RESP_ENTRIES;
1939 sc->aac_queues->qt_qindex[AAC_ADAP_NORM_RESP_QUEUE][AAC_CONSUMER_INDEX]=
1940 AAC_ADAP_NORM_RESP_ENTRIES;
1941 sc->aac_queues->qt_qindex[AAC_ADAP_HIGH_RESP_QUEUE][AAC_PRODUCER_INDEX]=
1942 AAC_ADAP_HIGH_RESP_ENTRIES;
1943 sc->aac_queues->qt_qindex[AAC_ADAP_HIGH_RESP_QUEUE][AAC_CONSUMER_INDEX]=
1944 AAC_ADAP_HIGH_RESP_ENTRIES;
1945 sc->aac_qentries[AAC_HOST_NORM_CMD_QUEUE] =
1946 &sc->aac_queues->qt_HostNormCmdQueue[0];
1947 sc->aac_qentries[AAC_HOST_HIGH_CMD_QUEUE] =
1948 &sc->aac_queues->qt_HostHighCmdQueue[0];
1949 sc->aac_qentries[AAC_ADAP_NORM_CMD_QUEUE] =
1950 &sc->aac_queues->qt_AdapNormCmdQueue[0];
1951 sc->aac_qentries[AAC_ADAP_HIGH_CMD_QUEUE] =
1952 &sc->aac_queues->qt_AdapHighCmdQueue[0];
1953 sc->aac_qentries[AAC_HOST_NORM_RESP_QUEUE] =
1954 &sc->aac_queues->qt_HostNormRespQueue[0];
1955 sc->aac_qentries[AAC_HOST_HIGH_RESP_QUEUE] =
1956 &sc->aac_queues->qt_HostHighRespQueue[0];
1957 sc->aac_qentries[AAC_ADAP_NORM_RESP_QUEUE] =
1958 &sc->aac_queues->qt_AdapNormRespQueue[0];
1959 sc->aac_qentries[AAC_ADAP_HIGH_RESP_QUEUE] =
1960 &sc->aac_queues->qt_AdapHighRespQueue[0];
1963 * Do controller-type-specific initialisation
1965 switch (sc->aac_hwif) {
1966 case AAC_HWIF_I960RX:
1967 AAC_MEM0_SETREG4(sc, AAC_RX_ODBR, ~0);
1970 AAC_MEM0_SETREG4(sc, AAC_RKT_ODBR, ~0);
1977 * Give the init structure to the controller.
1979 if (aac_sync_command(sc, AAC_MONKER_INITSTRUCT,
1980 sc->aac_common_busaddr +
1981 offsetof(struct aac_common, ac_init), 0, 0, 0,
1983 device_printf(sc->aac_dev,
1984 "error establishing init structure\n");
1995 aac_setup_intr(struct aac_softc *sc)
1997 sc->aac_irq_rid = 0;
1998 if ((sc->aac_irq = bus_alloc_resource_any(sc->aac_dev, SYS_RES_IRQ,
2001 RF_ACTIVE)) == NULL) {
2002 device_printf(sc->aac_dev, "can't allocate interrupt\n");
2005 if (sc->flags & AAC_FLAGS_NEW_COMM) {
2006 if (bus_setup_intr(sc->aac_dev, sc->aac_irq,
2007 INTR_MPSAFE|INTR_TYPE_BIO, NULL,
2008 aac_new_intr, sc, &sc->aac_intr)) {
2009 device_printf(sc->aac_dev, "can't set up interrupt\n");
2013 if (bus_setup_intr(sc->aac_dev, sc->aac_irq,
2014 INTR_TYPE_BIO, aac_filter, NULL,
2015 sc, &sc->aac_intr)) {
2016 device_printf(sc->aac_dev,
2017 "can't set up interrupt filter\n");
2025 * Send a synchronous command to the controller and wait for a result.
2026 * Indicate if the controller completed the command with an error status.
2029 aac_sync_command(struct aac_softc *sc, u_int32_t command,
2030 u_int32_t arg0, u_int32_t arg1, u_int32_t arg2, u_int32_t arg3,
2036 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2038 /* populate the mailbox */
2039 AAC_SET_MAILBOX(sc, command, arg0, arg1, arg2, arg3);
2041 /* ensure the sync command doorbell flag is cleared */
2042 AAC_CLEAR_ISTATUS(sc, AAC_DB_SYNC_COMMAND);
2044 /* then set it to signal the adapter */
2045 AAC_QNOTIFY(sc, AAC_DB_SYNC_COMMAND);
2047 /* spin waiting for the command to complete */
2050 if (time_uptime > (then + AAC_IMMEDIATE_TIMEOUT)) {
2051 fwprintf(sc, HBA_FLAGS_DBG_ERROR_B, "timed out");
2054 } while (!(AAC_GET_ISTATUS(sc) & AAC_DB_SYNC_COMMAND));
2056 /* clear the completion flag */
2057 AAC_CLEAR_ISTATUS(sc, AAC_DB_SYNC_COMMAND);
2059 /* get the command status */
2060 status = AAC_GET_MAILBOX(sc, 0);
2064 if (status != AAC_SRB_STS_SUCCESS)
2070 aac_sync_fib(struct aac_softc *sc, u_int32_t command, u_int32_t xferstate,
2071 struct aac_fib *fib, u_int16_t datasize)
2073 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2074 mtx_assert(&sc->aac_io_lock, MA_OWNED);
2076 if (datasize > AAC_FIB_DATASIZE)
2080 * Set up the sync FIB
2082 fib->Header.XferState = AAC_FIBSTATE_HOSTOWNED |
2083 AAC_FIBSTATE_INITIALISED |
2085 fib->Header.XferState |= xferstate;
2086 fib->Header.Command = command;
2087 fib->Header.StructType = AAC_FIBTYPE_TFIB;
2088 fib->Header.Size = sizeof(struct aac_fib_header) + datasize;
2089 fib->Header.SenderSize = sizeof(struct aac_fib);
2090 fib->Header.SenderFibAddress = 0; /* Not needed */
2091 fib->Header.ReceiverFibAddress = sc->aac_common_busaddr +
2092 offsetof(struct aac_common,
2096 * Give the FIB to the controller, wait for a response.
2098 if (aac_sync_command(sc, AAC_MONKER_SYNCFIB,
2099 fib->Header.ReceiverFibAddress, 0, 0, 0, NULL)) {
2100 fwprintf(sc, HBA_FLAGS_DBG_ERROR_B, "IO error");
2108 * Adapter-space FIB queue manipulation
2110 * Note that the queue implementation here is a little funky; neither the PI or
2111 * CI will ever be zero. This behaviour is a controller feature.
2117 {AAC_HOST_NORM_CMD_ENTRIES, AAC_DB_COMMAND_NOT_FULL},
2118 {AAC_HOST_HIGH_CMD_ENTRIES, 0},
2119 {AAC_ADAP_NORM_CMD_ENTRIES, AAC_DB_COMMAND_READY},
2120 {AAC_ADAP_HIGH_CMD_ENTRIES, 0},
2121 {AAC_HOST_NORM_RESP_ENTRIES, AAC_DB_RESPONSE_NOT_FULL},
2122 {AAC_HOST_HIGH_RESP_ENTRIES, 0},
2123 {AAC_ADAP_NORM_RESP_ENTRIES, AAC_DB_RESPONSE_READY},
2124 {AAC_ADAP_HIGH_RESP_ENTRIES, 0}
2128 * Atomically insert an entry into the nominated queue, returns 0 on success or
2129 * EBUSY if the queue is full.
2131 * Note: it would be more efficient to defer notifying the controller in
2132 * the case where we may be inserting several entries in rapid succession,
2133 * but implementing this usefully may be difficult (it would involve a
2134 * separate queue/notify interface).
2137 aac_enqueue_fib(struct aac_softc *sc, int queue, struct aac_command *cm)
2144 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2146 fib_size = cm->cm_fib->Header.Size;
2147 fib_addr = cm->cm_fib->Header.ReceiverFibAddress;
2149 /* get the producer/consumer indices */
2150 pi = sc->aac_queues->qt_qindex[queue][AAC_PRODUCER_INDEX];
2151 ci = sc->aac_queues->qt_qindex[queue][AAC_CONSUMER_INDEX];
2153 /* wrap the queue? */
2154 if (pi >= aac_qinfo[queue].size)
2157 /* check for queue full */
2158 if ((pi + 1) == ci) {
2164 * To avoid a race with its completion interrupt, place this command on
2165 * the busy queue prior to advertising it to the controller.
2167 aac_enqueue_busy(cm);
2169 /* populate queue entry */
2170 (sc->aac_qentries[queue] + pi)->aq_fib_size = fib_size;
2171 (sc->aac_qentries[queue] + pi)->aq_fib_addr = fib_addr;
2173 /* update producer index */
2174 sc->aac_queues->qt_qindex[queue][AAC_PRODUCER_INDEX] = pi + 1;
2176 /* notify the adapter if we know how */
2177 if (aac_qinfo[queue].notify != 0)
2178 AAC_QNOTIFY(sc, aac_qinfo[queue].notify);
2187 * Atomically remove one entry from the nominated queue, returns 0 on
2188 * success or ENOENT if the queue is empty.
2191 aac_dequeue_fib(struct aac_softc *sc, int queue, u_int32_t *fib_size,
2192 struct aac_fib **fib_addr)
2195 u_int32_t fib_index;
2199 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2201 /* get the producer/consumer indices */
2202 pi = sc->aac_queues->qt_qindex[queue][AAC_PRODUCER_INDEX];
2203 ci = sc->aac_queues->qt_qindex[queue][AAC_CONSUMER_INDEX];
2205 /* check for queue empty */
2211 /* wrap the pi so the following test works */
2212 if (pi >= aac_qinfo[queue].size)
2219 /* wrap the queue? */
2220 if (ci >= aac_qinfo[queue].size)
2223 /* fetch the entry */
2224 *fib_size = (sc->aac_qentries[queue] + ci)->aq_fib_size;
2227 case AAC_HOST_NORM_CMD_QUEUE:
2228 case AAC_HOST_HIGH_CMD_QUEUE:
2230 * The aq_fib_addr is only 32 bits wide so it can't be counted
2231 * on to hold an address. For AIF's, the adapter assumes
2232 * that it's giving us an address into the array of AIF fibs.
2233 * Therefore, we have to convert it to an index.
2235 fib_index = (sc->aac_qentries[queue] + ci)->aq_fib_addr /
2236 sizeof(struct aac_fib);
2237 *fib_addr = &sc->aac_common->ac_fibs[fib_index];
2240 case AAC_HOST_NORM_RESP_QUEUE:
2241 case AAC_HOST_HIGH_RESP_QUEUE:
2243 struct aac_command *cm;
2246 * As above, an index is used instead of an actual address.
2247 * Gotta shift the index to account for the fast response
2248 * bit. No other correction is needed since this value was
2249 * originally provided by the driver via the SenderFibAddress
2252 fib_index = (sc->aac_qentries[queue] + ci)->aq_fib_addr;
2253 cm = sc->aac_commands + (fib_index >> 2);
2254 *fib_addr = cm->cm_fib;
2257 * Is this a fast response? If it is, update the fib fields in
2258 * local memory since the whole fib isn't DMA'd back up.
2260 if (fib_index & 0x01) {
2261 (*fib_addr)->Header.XferState |= AAC_FIBSTATE_DONEADAP;
2262 *((u_int32_t*)((*fib_addr)->data)) = AAC_ERROR_NORMAL;
2267 panic("Invalid queue in aac_dequeue_fib()");
2271 /* update consumer index */
2272 sc->aac_queues->qt_qindex[queue][AAC_CONSUMER_INDEX] = ci + 1;
2274 /* if we have made the queue un-full, notify the adapter */
2275 if (notify && (aac_qinfo[queue].notify != 0))
2276 AAC_QNOTIFY(sc, aac_qinfo[queue].notify);
2284 * Put our response to an Adapter Initialed Fib on the response queue
2287 aac_enqueue_response(struct aac_softc *sc, int queue, struct aac_fib *fib)
2294 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2296 /* Tell the adapter where the FIB is */
2297 fib_size = fib->Header.Size;
2298 fib_addr = fib->Header.SenderFibAddress;
2299 fib->Header.ReceiverFibAddress = fib_addr;
2301 /* get the producer/consumer indices */
2302 pi = sc->aac_queues->qt_qindex[queue][AAC_PRODUCER_INDEX];
2303 ci = sc->aac_queues->qt_qindex[queue][AAC_CONSUMER_INDEX];
2305 /* wrap the queue? */
2306 if (pi >= aac_qinfo[queue].size)
2309 /* check for queue full */
2310 if ((pi + 1) == ci) {
2315 /* populate queue entry */
2316 (sc->aac_qentries[queue] + pi)->aq_fib_size = fib_size;
2317 (sc->aac_qentries[queue] + pi)->aq_fib_addr = fib_addr;
2319 /* update producer index */
2320 sc->aac_queues->qt_qindex[queue][AAC_PRODUCER_INDEX] = pi + 1;
2322 /* notify the adapter if we know how */
2323 if (aac_qinfo[queue].notify != 0)
2324 AAC_QNOTIFY(sc, aac_qinfo[queue].notify);
2333 * Check for commands that have been outstanding for a suspiciously long time,
2334 * and complain about them.
2337 aac_timeout(struct aac_softc *sc)
2339 struct aac_command *cm;
2344 * Traverse the busy command list, bitch about late commands once
2348 deadline = time_uptime - AAC_CMD_TIMEOUT;
2349 TAILQ_FOREACH(cm, &sc->aac_busy, cm_link) {
2350 if ((cm->cm_timestamp < deadline)
2351 && !(cm->cm_flags & AAC_CMD_TIMEDOUT)) {
2352 cm->cm_flags |= AAC_CMD_TIMEDOUT;
2353 device_printf(sc->aac_dev,
2354 "COMMAND %p (TYPE %d) TIMEOUT AFTER %d SECONDS\n",
2355 cm, cm->cm_fib->Header.Command,
2356 (int)(time_uptime-cm->cm_timestamp));
2357 AAC_PRINT_FIB(sc, cm->cm_fib);
2363 code = AAC_GET_FWSTATUS(sc);
2364 if (code != AAC_UP_AND_RUNNING) {
2365 device_printf(sc->aac_dev, "WARNING! Controller is no "
2366 "longer running! code= 0x%x\n", code);
2373 * Interface Function Vectors
2377 * Read the current firmware status word.
2380 aac_sa_get_fwstatus(struct aac_softc *sc)
2382 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2384 return(AAC_MEM0_GETREG4(sc, AAC_SA_FWSTATUS));
2388 aac_rx_get_fwstatus(struct aac_softc *sc)
2390 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2392 return(AAC_MEM0_GETREG4(sc, sc->flags & AAC_FLAGS_NEW_COMM ?
2393 AAC_RX_OMR0 : AAC_RX_FWSTATUS));
2397 aac_rkt_get_fwstatus(struct aac_softc *sc)
2399 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2401 return(AAC_MEM0_GETREG4(sc, sc->flags & AAC_FLAGS_NEW_COMM ?
2402 AAC_RKT_OMR0 : AAC_RKT_FWSTATUS));
2406 * Notify the controller of a change in a given queue
2410 aac_sa_qnotify(struct aac_softc *sc, int qbit)
2412 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2414 AAC_MEM0_SETREG2(sc, AAC_SA_DOORBELL1_SET, qbit);
2418 aac_rx_qnotify(struct aac_softc *sc, int qbit)
2420 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2422 AAC_MEM0_SETREG4(sc, AAC_RX_IDBR, qbit);
2426 aac_rkt_qnotify(struct aac_softc *sc, int qbit)
2428 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2430 AAC_MEM0_SETREG4(sc, AAC_RKT_IDBR, qbit);
2434 * Get the interrupt reason bits
2437 aac_sa_get_istatus(struct aac_softc *sc)
2439 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2441 return(AAC_MEM0_GETREG2(sc, AAC_SA_DOORBELL0));
2445 aac_rx_get_istatus(struct aac_softc *sc)
2447 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2449 return(AAC_MEM0_GETREG4(sc, AAC_RX_ODBR));
2453 aac_rkt_get_istatus(struct aac_softc *sc)
2455 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2457 return(AAC_MEM0_GETREG4(sc, AAC_RKT_ODBR));
2461 * Clear some interrupt reason bits
2464 aac_sa_clear_istatus(struct aac_softc *sc, int mask)
2466 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2468 AAC_MEM0_SETREG2(sc, AAC_SA_DOORBELL0_CLEAR, mask);
2472 aac_rx_clear_istatus(struct aac_softc *sc, int mask)
2474 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2476 AAC_MEM0_SETREG4(sc, AAC_RX_ODBR, mask);
2480 aac_rkt_clear_istatus(struct aac_softc *sc, int mask)
2482 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2484 AAC_MEM0_SETREG4(sc, AAC_RKT_ODBR, mask);
2488 * Populate the mailbox and set the command word
2491 aac_sa_set_mailbox(struct aac_softc *sc, u_int32_t command,
2492 u_int32_t arg0, u_int32_t arg1, u_int32_t arg2, u_int32_t arg3)
2494 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2496 AAC_MEM1_SETREG4(sc, AAC_SA_MAILBOX, command);
2497 AAC_MEM1_SETREG4(sc, AAC_SA_MAILBOX + 4, arg0);
2498 AAC_MEM1_SETREG4(sc, AAC_SA_MAILBOX + 8, arg1);
2499 AAC_MEM1_SETREG4(sc, AAC_SA_MAILBOX + 12, arg2);
2500 AAC_MEM1_SETREG4(sc, AAC_SA_MAILBOX + 16, arg3);
2504 aac_rx_set_mailbox(struct aac_softc *sc, u_int32_t command,
2505 u_int32_t arg0, u_int32_t arg1, u_int32_t arg2, u_int32_t arg3)
2507 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2509 AAC_MEM1_SETREG4(sc, AAC_RX_MAILBOX, command);
2510 AAC_MEM1_SETREG4(sc, AAC_RX_MAILBOX + 4, arg0);
2511 AAC_MEM1_SETREG4(sc, AAC_RX_MAILBOX + 8, arg1);
2512 AAC_MEM1_SETREG4(sc, AAC_RX_MAILBOX + 12, arg2);
2513 AAC_MEM1_SETREG4(sc, AAC_RX_MAILBOX + 16, arg3);
2517 aac_rkt_set_mailbox(struct aac_softc *sc, u_int32_t command, u_int32_t arg0,
2518 u_int32_t arg1, u_int32_t arg2, u_int32_t arg3)
2520 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2522 AAC_MEM1_SETREG4(sc, AAC_RKT_MAILBOX, command);
2523 AAC_MEM1_SETREG4(sc, AAC_RKT_MAILBOX + 4, arg0);
2524 AAC_MEM1_SETREG4(sc, AAC_RKT_MAILBOX + 8, arg1);
2525 AAC_MEM1_SETREG4(sc, AAC_RKT_MAILBOX + 12, arg2);
2526 AAC_MEM1_SETREG4(sc, AAC_RKT_MAILBOX + 16, arg3);
2530 * Fetch the immediate command status word
2533 aac_sa_get_mailbox(struct aac_softc *sc, int mb)
2535 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2537 return(AAC_MEM1_GETREG4(sc, AAC_SA_MAILBOX + (mb * 4)));
2541 aac_rx_get_mailbox(struct aac_softc *sc, int mb)
2543 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2545 return(AAC_MEM1_GETREG4(sc, AAC_RX_MAILBOX + (mb * 4)));
2549 aac_rkt_get_mailbox(struct aac_softc *sc, int mb)
2551 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2553 return(AAC_MEM1_GETREG4(sc, AAC_RKT_MAILBOX + (mb * 4)));
2557 * Set/clear interrupt masks
2560 aac_sa_set_interrupts(struct aac_softc *sc, int enable)
2562 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "%sable interrupts", enable ? "en" : "dis");
2565 AAC_MEM0_SETREG2((sc), AAC_SA_MASK0_CLEAR, AAC_DB_INTERRUPTS);
2567 AAC_MEM0_SETREG2((sc), AAC_SA_MASK0_SET, ~0);
2572 aac_rx_set_interrupts(struct aac_softc *sc, int enable)
2574 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "%sable interrupts", enable ? "en" : "dis");
2577 if (sc->flags & AAC_FLAGS_NEW_COMM)
2578 AAC_MEM0_SETREG4(sc, AAC_RX_OIMR, ~AAC_DB_INT_NEW_COMM);
2580 AAC_MEM0_SETREG4(sc, AAC_RX_OIMR, ~AAC_DB_INTERRUPTS);
2582 AAC_MEM0_SETREG4(sc, AAC_RX_OIMR, ~0);
2587 aac_rkt_set_interrupts(struct aac_softc *sc, int enable)
2589 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "%sable interrupts", enable ? "en" : "dis");
2592 if (sc->flags & AAC_FLAGS_NEW_COMM)
2593 AAC_MEM0_SETREG4(sc, AAC_RKT_OIMR, ~AAC_DB_INT_NEW_COMM);
2595 AAC_MEM0_SETREG4(sc, AAC_RKT_OIMR, ~AAC_DB_INTERRUPTS);
2597 AAC_MEM0_SETREG4(sc, AAC_RKT_OIMR, ~0);
2602 * New comm. interface: Send command functions
2605 aac_rx_send_command(struct aac_softc *sc, struct aac_command *cm)
2607 u_int32_t index, device;
2609 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "send command (new comm.)");
2611 index = AAC_MEM0_GETREG4(sc, AAC_RX_IQUE);
2612 if (index == 0xffffffffL)
2613 index = AAC_MEM0_GETREG4(sc, AAC_RX_IQUE);
2614 if (index == 0xffffffffL)
2616 aac_enqueue_busy(cm);
2618 AAC_MEM1_SETREG4(sc, device, (u_int32_t)(cm->cm_fibphys & 0xffffffffUL));
2620 AAC_MEM1_SETREG4(sc, device, (u_int32_t)(cm->cm_fibphys >> 32));
2622 AAC_MEM1_SETREG4(sc, device, cm->cm_fib->Header.Size);
2623 AAC_MEM0_SETREG4(sc, AAC_RX_IQUE, index);
2628 aac_rkt_send_command(struct aac_softc *sc, struct aac_command *cm)
2630 u_int32_t index, device;
2632 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "send command (new comm.)");
2634 index = AAC_MEM0_GETREG4(sc, AAC_RKT_IQUE);
2635 if (index == 0xffffffffL)
2636 index = AAC_MEM0_GETREG4(sc, AAC_RKT_IQUE);
2637 if (index == 0xffffffffL)
2639 aac_enqueue_busy(cm);
2641 AAC_MEM1_SETREG4(sc, device, (u_int32_t)(cm->cm_fibphys & 0xffffffffUL));
2643 AAC_MEM1_SETREG4(sc, device, (u_int32_t)(cm->cm_fibphys >> 32));
2645 AAC_MEM1_SETREG4(sc, device, cm->cm_fib->Header.Size);
2646 AAC_MEM0_SETREG4(sc, AAC_RKT_IQUE, index);
2651 * New comm. interface: get, set outbound queue index
2654 aac_rx_get_outb_queue(struct aac_softc *sc)
2656 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2658 return(AAC_MEM0_GETREG4(sc, AAC_RX_OQUE));
2662 aac_rkt_get_outb_queue(struct aac_softc *sc)
2664 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2666 return(AAC_MEM0_GETREG4(sc, AAC_RKT_OQUE));
2670 aac_rx_set_outb_queue(struct aac_softc *sc, int index)
2672 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2674 AAC_MEM0_SETREG4(sc, AAC_RX_OQUE, index);
2678 aac_rkt_set_outb_queue(struct aac_softc *sc, int index)
2680 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2682 AAC_MEM0_SETREG4(sc, AAC_RKT_OQUE, index);
2686 * Debugging and Diagnostics
2690 * Print some information about the controller.
2693 aac_describe_controller(struct aac_softc *sc)
2695 struct aac_fib *fib;
2696 struct aac_adapter_info *info;
2697 char *adapter_type = "Adaptec RAID controller";
2699 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2701 mtx_lock(&sc->aac_io_lock);
2702 aac_alloc_sync_fib(sc, &fib);
2705 if (aac_sync_fib(sc, RequestAdapterInfo, 0, fib, 1)) {
2706 device_printf(sc->aac_dev, "RequestAdapterInfo failed\n");
2707 aac_release_sync_fib(sc);
2708 mtx_unlock(&sc->aac_io_lock);
2712 /* save the kernel revision structure for later use */
2713 info = (struct aac_adapter_info *)&fib->data[0];
2714 sc->aac_revision = info->KernelRevision;
2717 device_printf(sc->aac_dev, "%s %dMHz, %dMB memory "
2718 "(%dMB cache, %dMB execution), %s\n",
2719 aac_describe_code(aac_cpu_variant, info->CpuVariant),
2720 info->ClockSpeed, info->TotalMem / (1024 * 1024),
2721 info->BufferMem / (1024 * 1024),
2722 info->ExecutionMem / (1024 * 1024),
2723 aac_describe_code(aac_battery_platform,
2724 info->batteryPlatform));
2726 device_printf(sc->aac_dev,
2727 "Kernel %d.%d-%d, Build %d, S/N %6X\n",
2728 info->KernelRevision.external.comp.major,
2729 info->KernelRevision.external.comp.minor,
2730 info->KernelRevision.external.comp.dash,
2731 info->KernelRevision.buildNumber,
2732 (u_int32_t)(info->SerialNumber & 0xffffff));
2734 device_printf(sc->aac_dev, "Supported Options=%b\n",
2735 sc->supported_options,
2758 if (sc->supported_options & AAC_SUPPORTED_SUPPLEMENT_ADAPTER_INFO) {
2760 if (aac_sync_fib(sc, RequestSupplementAdapterInfo, 0, fib, 1))
2761 device_printf(sc->aac_dev,
2762 "RequestSupplementAdapterInfo failed\n");
2764 adapter_type = ((struct aac_supplement_adapter_info *)
2765 &fib->data[0])->AdapterTypeText;
2767 device_printf(sc->aac_dev, "%s, aac driver %d.%d.%d-%d\n",
2769 AAC_DRIVER_MAJOR_VERSION, AAC_DRIVER_MINOR_VERSION,
2770 AAC_DRIVER_BUGFIX_LEVEL, AAC_DRIVER_BUILD);
2772 aac_release_sync_fib(sc);
2773 mtx_unlock(&sc->aac_io_lock);
2777 * Look up a text description of a numeric error code and return a pointer to
2781 aac_describe_code(struct aac_code_lookup *table, u_int32_t code)
2785 for (i = 0; table[i].string != NULL; i++)
2786 if (table[i].code == code)
2787 return(table[i].string);
2788 return(table[i + 1].string);
2792 * Management Interface
2796 aac_open(struct cdev *dev, int flags, int fmt, struct thread *td)
2798 struct aac_softc *sc;
2801 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2802 device_busy(sc->aac_dev);
2803 devfs_set_cdevpriv(sc, aac_cdevpriv_dtor);
2809 aac_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td)
2811 union aac_statrequest *as;
2812 struct aac_softc *sc;
2815 as = (union aac_statrequest *)arg;
2817 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2821 switch (as->as_item) {
2826 bcopy(&sc->aac_qstat[as->as_item], &as->as_qstat,
2827 sizeof(struct aac_qstat));
2835 case FSACTL_SENDFIB:
2836 case FSACTL_SEND_LARGE_FIB:
2837 arg = *(caddr_t*)arg;
2838 case FSACTL_LNX_SENDFIB:
2839 case FSACTL_LNX_SEND_LARGE_FIB:
2840 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_SENDFIB");
2841 error = aac_ioctl_sendfib(sc, arg);
2843 case FSACTL_SEND_RAW_SRB:
2844 arg = *(caddr_t*)arg;
2845 case FSACTL_LNX_SEND_RAW_SRB:
2846 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_SEND_RAW_SRB");
2847 error = aac_ioctl_send_raw_srb(sc, arg);
2849 case FSACTL_AIF_THREAD:
2850 case FSACTL_LNX_AIF_THREAD:
2851 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_AIF_THREAD");
2854 case FSACTL_OPEN_GET_ADAPTER_FIB:
2855 arg = *(caddr_t*)arg;
2856 case FSACTL_LNX_OPEN_GET_ADAPTER_FIB:
2857 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_OPEN_GET_ADAPTER_FIB");
2858 error = aac_open_aif(sc, arg);
2860 case FSACTL_GET_NEXT_ADAPTER_FIB:
2861 arg = *(caddr_t*)arg;
2862 case FSACTL_LNX_GET_NEXT_ADAPTER_FIB:
2863 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_GET_NEXT_ADAPTER_FIB");
2864 error = aac_getnext_aif(sc, arg);
2866 case FSACTL_CLOSE_GET_ADAPTER_FIB:
2867 arg = *(caddr_t*)arg;
2868 case FSACTL_LNX_CLOSE_GET_ADAPTER_FIB:
2869 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_CLOSE_GET_ADAPTER_FIB");
2870 error = aac_close_aif(sc, arg);
2872 case FSACTL_MINIPORT_REV_CHECK:
2873 arg = *(caddr_t*)arg;
2874 case FSACTL_LNX_MINIPORT_REV_CHECK:
2875 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_MINIPORT_REV_CHECK");
2876 error = aac_rev_check(sc, arg);
2878 case FSACTL_QUERY_DISK:
2879 arg = *(caddr_t*)arg;
2880 case FSACTL_LNX_QUERY_DISK:
2881 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_QUERY_DISK");
2882 error = aac_query_disk(sc, arg);
2884 case FSACTL_DELETE_DISK:
2885 case FSACTL_LNX_DELETE_DISK:
2887 * We don't trust the underland to tell us when to delete a
2888 * container, rather we rely on an AIF coming from the
2893 case FSACTL_GET_PCI_INFO:
2894 arg = *(caddr_t*)arg;
2895 case FSACTL_LNX_GET_PCI_INFO:
2896 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_GET_PCI_INFO");
2897 error = aac_get_pci_info(sc, arg);
2899 case FSACTL_GET_FEATURES:
2900 arg = *(caddr_t*)arg;
2901 case FSACTL_LNX_GET_FEATURES:
2902 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_GET_FEATURES");
2903 error = aac_supported_features(sc, arg);
2906 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "unsupported cmd 0x%lx\n", cmd);
2914 aac_poll(struct cdev *dev, int poll_events, struct thread *td)
2916 struct aac_softc *sc;
2917 struct aac_fib_context *ctx;
2923 mtx_lock(&sc->aac_aifq_lock);
2924 if ((poll_events & (POLLRDNORM | POLLIN)) != 0) {
2925 for (ctx = sc->fibctx; ctx; ctx = ctx->next) {
2926 if (ctx->ctx_idx != sc->aifq_idx || ctx->ctx_wrap) {
2927 revents |= poll_events & (POLLIN | POLLRDNORM);
2932 mtx_unlock(&sc->aac_aifq_lock);
2935 if (poll_events & (POLLIN | POLLRDNORM))
2936 selrecord(td, &sc->rcv_select);
2943 aac_ioctl_event(struct aac_softc *sc, struct aac_event *event, void *arg)
2946 switch (event->ev_type) {
2947 case AAC_EVENT_CMFREE:
2948 mtx_assert(&sc->aac_io_lock, MA_OWNED);
2949 if (aac_alloc_command(sc, (struct aac_command **)arg)) {
2950 aac_add_event(sc, event);
2953 free(event, M_AACBUF);
2962 * Send a FIB supplied from userspace
2965 aac_ioctl_sendfib(struct aac_softc *sc, caddr_t ufib)
2967 struct aac_command *cm;
2970 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2977 mtx_lock(&sc->aac_io_lock);
2978 if (aac_alloc_command(sc, &cm)) {
2979 struct aac_event *event;
2981 event = malloc(sizeof(struct aac_event), M_AACBUF,
2983 if (event == NULL) {
2985 mtx_unlock(&sc->aac_io_lock);
2988 event->ev_type = AAC_EVENT_CMFREE;
2989 event->ev_callback = aac_ioctl_event;
2990 event->ev_arg = &cm;
2991 aac_add_event(sc, event);
2992 msleep(&cm, &sc->aac_io_lock, 0, "sendfib", 0);
2994 mtx_unlock(&sc->aac_io_lock);
2997 * Fetch the FIB header, then re-copy to get data as well.
2999 if ((error = copyin(ufib, cm->cm_fib,
3000 sizeof(struct aac_fib_header))) != 0)
3002 size = cm->cm_fib->Header.Size + sizeof(struct aac_fib_header);
3003 if (size > sc->aac_max_fib_size) {
3004 device_printf(sc->aac_dev, "incoming FIB oversized (%d > %d)\n",
3005 size, sc->aac_max_fib_size);
3006 size = sc->aac_max_fib_size;
3008 if ((error = copyin(ufib, cm->cm_fib, size)) != 0)
3010 cm->cm_fib->Header.Size = size;
3011 cm->cm_timestamp = time_uptime;
3014 * Pass the FIB to the controller, wait for it to complete.
3016 mtx_lock(&sc->aac_io_lock);
3017 error = aac_wait_command(cm);
3018 mtx_unlock(&sc->aac_io_lock);
3020 device_printf(sc->aac_dev,
3021 "aac_wait_command return %d\n", error);
3026 * Copy the FIB and data back out to the caller.
3028 size = cm->cm_fib->Header.Size;
3029 if (size > sc->aac_max_fib_size) {
3030 device_printf(sc->aac_dev, "outbound FIB oversized (%d > %d)\n",
3031 size, sc->aac_max_fib_size);
3032 size = sc->aac_max_fib_size;
3034 error = copyout(cm->cm_fib, ufib, size);
3038 mtx_lock(&sc->aac_io_lock);
3039 aac_release_command(cm);
3040 mtx_unlock(&sc->aac_io_lock);
3046 * Send a passthrough FIB supplied from userspace
3049 aac_ioctl_send_raw_srb(struct aac_softc *sc, caddr_t arg)
3051 struct aac_command *cm;
3052 struct aac_event *event;
3053 struct aac_fib *fib;
3054 struct aac_srb *srbcmd, *user_srb;
3055 struct aac_sg_entry *sge;
3056 struct aac_sg_entry64 *sge64;
3057 void *srb_sg_address, *ureply;
3058 uint32_t fibsize, srb_sg_bytecount;
3059 int error, transfer_data;
3061 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3066 user_srb = (struct aac_srb *)arg;
3068 mtx_lock(&sc->aac_io_lock);
3069 if (aac_alloc_command(sc, &cm)) {
3070 event = malloc(sizeof(struct aac_event), M_AACBUF,
3072 if (event == NULL) {
3074 mtx_unlock(&sc->aac_io_lock);
3077 event->ev_type = AAC_EVENT_CMFREE;
3078 event->ev_callback = aac_ioctl_event;
3079 event->ev_arg = &cm;
3080 aac_add_event(sc, event);
3081 msleep(cm, &sc->aac_io_lock, 0, "aacraw", 0);
3083 mtx_unlock(&sc->aac_io_lock);
3087 srbcmd = (struct aac_srb *)fib->data;
3088 error = copyin(&user_srb->data_len, &fibsize, sizeof(uint32_t));
3091 if (fibsize > (sc->aac_max_fib_size - sizeof(struct aac_fib_header))) {
3095 error = copyin(user_srb, srbcmd, fibsize);
3098 srbcmd->function = 0;
3099 srbcmd->retry_limit = 0;
3100 if (srbcmd->sg_map.SgCount > 1) {
3105 /* Retrieve correct SG entries. */
3106 if (fibsize == (sizeof(struct aac_srb) +
3107 srbcmd->sg_map.SgCount * sizeof(struct aac_sg_entry))) {
3108 sge = srbcmd->sg_map.SgEntry;
3110 srb_sg_bytecount = sge->SgByteCount;
3111 srb_sg_address = (void *)(uintptr_t)sge->SgAddress;
3114 else if (fibsize == (sizeof(struct aac_srb) +
3115 srbcmd->sg_map.SgCount * sizeof(struct aac_sg_entry64))) {
3117 sge64 = (struct aac_sg_entry64 *)srbcmd->sg_map.SgEntry;
3118 srb_sg_bytecount = sge64->SgByteCount;
3119 srb_sg_address = (void *)sge64->SgAddress;
3120 if (sge64->SgAddress > 0xffffffffull &&
3121 (sc->flags & AAC_FLAGS_SG_64BIT) == 0) {
3131 ureply = (char *)arg + fibsize;
3132 srbcmd->data_len = srb_sg_bytecount;
3133 if (srbcmd->sg_map.SgCount == 1)
3136 cm->cm_sgtable = (struct aac_sg_table *)&srbcmd->sg_map;
3137 if (transfer_data) {
3138 cm->cm_datalen = srb_sg_bytecount;
3139 cm->cm_data = malloc(cm->cm_datalen, M_AACBUF, M_NOWAIT);
3140 if (cm->cm_data == NULL) {
3144 if (srbcmd->flags & AAC_SRB_FLAGS_DATA_IN)
3145 cm->cm_flags |= AAC_CMD_DATAIN;
3146 if (srbcmd->flags & AAC_SRB_FLAGS_DATA_OUT) {
3147 cm->cm_flags |= AAC_CMD_DATAOUT;
3148 error = copyin(srb_sg_address, cm->cm_data,
3155 fib->Header.Size = sizeof(struct aac_fib_header) +
3156 sizeof(struct aac_srb);
3157 fib->Header.XferState =
3158 AAC_FIBSTATE_HOSTOWNED |
3159 AAC_FIBSTATE_INITIALISED |
3160 AAC_FIBSTATE_EMPTY |
3161 AAC_FIBSTATE_FROMHOST |
3162 AAC_FIBSTATE_REXPECTED |
3164 AAC_FIBSTATE_ASYNC |
3165 AAC_FIBSTATE_FAST_RESPONSE;
3166 fib->Header.Command = (sc->flags & AAC_FLAGS_SG_64BIT) != 0 ?
3167 ScsiPortCommandU64 : ScsiPortCommand;
3169 mtx_lock(&sc->aac_io_lock);
3170 aac_wait_command(cm);
3171 mtx_unlock(&sc->aac_io_lock);
3173 if (transfer_data && (srbcmd->flags & AAC_SRB_FLAGS_DATA_IN) != 0) {
3174 error = copyout(cm->cm_data, srb_sg_address, cm->cm_datalen);
3178 error = copyout(fib->data, ureply, sizeof(struct aac_srb_response));
3181 if (cm->cm_data != NULL)
3182 free(cm->cm_data, M_AACBUF);
3183 mtx_lock(&sc->aac_io_lock);
3184 aac_release_command(cm);
3185 mtx_unlock(&sc->aac_io_lock);
3191 * cdevpriv interface private destructor.
3194 aac_cdevpriv_dtor(void *arg)
3196 struct aac_softc *sc;
3199 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3201 device_unbusy(sc->aac_dev);
3206 * Handle an AIF sent to us by the controller; queue it for later reference.
3207 * If the queue fills up, then drop the older entries.
3210 aac_handle_aif(struct aac_softc *sc, struct aac_fib *fib)
3212 struct aac_aif_command *aif;
3213 struct aac_container *co, *co_next;
3214 struct aac_fib_context *ctx;
3215 struct aac_mntinforesp *mir;
3216 int next, current, found;
3217 int count = 0, added = 0, i = 0;
3220 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3222 aif = (struct aac_aif_command*)&fib->data[0];
3223 aac_print_aif(sc, aif);
3225 /* Is it an event that we should care about? */
3226 switch (aif->command) {
3227 case AifCmdEventNotify:
3228 switch (aif->data.EN.type) {
3229 case AifEnAddContainer:
3230 case AifEnDeleteContainer:
3232 * A container was added or deleted, but the message
3233 * doesn't tell us anything else! Re-enumerate the
3234 * containers and sort things out.
3236 aac_alloc_sync_fib(sc, &fib);
3239 * Ask the controller for its containers one at
3241 * XXX What if the controller's list changes
3242 * midway through this enumaration?
3243 * XXX This should be done async.
3245 if ((mir = aac_get_container_info(sc, fib, i)) == NULL)
3248 count = mir->MntRespCount;
3250 * Check the container against our list.
3251 * co->co_found was already set to 0 in a
3254 if ((mir->Status == ST_OK) &&
3255 (mir->MntTable[0].VolType != CT_NONE)) {
3258 &sc->aac_container_tqh,
3260 if (co->co_mntobj.ObjectId ==
3261 mir->MntTable[0].ObjectId) {
3268 * If the container matched, continue
3277 * This is a new container. Do all the
3278 * appropriate things to set it up.
3280 aac_add_container(sc, mir, 1);
3284 } while ((i < count) && (i < AAC_MAX_CONTAINERS));
3285 aac_release_sync_fib(sc);
3288 * Go through our list of containers and see which ones
3289 * were not marked 'found'. Since the controller didn't
3290 * list them they must have been deleted. Do the
3291 * appropriate steps to destroy the device. Also reset
3292 * the co->co_found field.
3294 co = TAILQ_FIRST(&sc->aac_container_tqh);
3295 while (co != NULL) {
3296 if (co->co_found == 0) {
3297 mtx_unlock(&sc->aac_io_lock);
3299 device_delete_child(sc->aac_dev,
3302 mtx_lock(&sc->aac_io_lock);
3303 co_next = TAILQ_NEXT(co, co_link);
3304 mtx_lock(&sc->aac_container_lock);
3305 TAILQ_REMOVE(&sc->aac_container_tqh, co,
3307 mtx_unlock(&sc->aac_container_lock);
3312 co = TAILQ_NEXT(co, co_link);
3316 /* Attach the newly created containers */
3318 mtx_unlock(&sc->aac_io_lock);
3320 bus_generic_attach(sc->aac_dev);
3322 mtx_lock(&sc->aac_io_lock);
3327 case AifEnEnclosureManagement:
3328 switch (aif->data.EN.data.EEE.eventType) {
3329 case AIF_EM_DRIVE_INSERTION:
3330 case AIF_EM_DRIVE_REMOVAL:
3331 channel = aif->data.EN.data.EEE.unitID;
3332 if (sc->cam_rescan_cb != NULL)
3333 sc->cam_rescan_cb(sc,
3334 (channel >> 24) & 0xF,
3335 (channel & 0xFFFF));
3341 case AifEnDeleteJBOD:
3342 channel = aif->data.EN.data.ECE.container;
3343 if (sc->cam_rescan_cb != NULL)
3344 sc->cam_rescan_cb(sc, (channel >> 24) & 0xF,
3345 AAC_CAM_TARGET_WILDCARD);
3356 /* Copy the AIF data to the AIF queue for ioctl retrieval */
3357 mtx_lock(&sc->aac_aifq_lock);
3358 current = sc->aifq_idx;
3359 next = (current + 1) % AAC_AIFQ_LENGTH;
3361 sc->aifq_filled = 1;
3362 bcopy(fib, &sc->aac_aifq[current], sizeof(struct aac_fib));
3363 /* modify AIF contexts */
3364 if (sc->aifq_filled) {
3365 for (ctx = sc->fibctx; ctx; ctx = ctx->next) {
3366 if (next == ctx->ctx_idx)
3368 else if (current == ctx->ctx_idx && ctx->ctx_wrap)
3369 ctx->ctx_idx = next;
3372 sc->aifq_idx = next;
3373 /* On the off chance that someone is sleeping for an aif... */
3374 if (sc->aac_state & AAC_STATE_AIF_SLEEPER)
3375 wakeup(sc->aac_aifq);
3376 /* Wakeup any poll()ers */
3377 selwakeuppri(&sc->rcv_select, PRIBIO);
3378 mtx_unlock(&sc->aac_aifq_lock);
3384 * Return the Revision of the driver to userspace and check to see if the
3385 * userspace app is possibly compatible. This is extremely bogus since
3386 * our driver doesn't follow Adaptec's versioning system. Cheat by just
3387 * returning what the card reported.
3390 aac_rev_check(struct aac_softc *sc, caddr_t udata)
3392 struct aac_rev_check rev_check;
3393 struct aac_rev_check_resp rev_check_resp;
3396 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3399 * Copyin the revision struct from userspace
3401 if ((error = copyin(udata, (caddr_t)&rev_check,
3402 sizeof(struct aac_rev_check))) != 0) {
3406 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "Userland revision= %d\n",
3407 rev_check.callingRevision.buildNumber);
3410 * Doctor up the response struct.
3412 rev_check_resp.possiblyCompatible = 1;
3413 rev_check_resp.adapterSWRevision.external.comp.major =
3414 AAC_DRIVER_MAJOR_VERSION;
3415 rev_check_resp.adapterSWRevision.external.comp.minor =
3416 AAC_DRIVER_MINOR_VERSION;
3417 rev_check_resp.adapterSWRevision.external.comp.type =
3419 rev_check_resp.adapterSWRevision.external.comp.dash =
3420 AAC_DRIVER_BUGFIX_LEVEL;
3421 rev_check_resp.adapterSWRevision.buildNumber =
3424 return(copyout((caddr_t)&rev_check_resp, udata,
3425 sizeof(struct aac_rev_check_resp)));
3429 * Pass the fib context to the caller
3432 aac_open_aif(struct aac_softc *sc, caddr_t arg)
3434 struct aac_fib_context *fibctx, *ctx;
3437 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3439 fibctx = malloc(sizeof(struct aac_fib_context), M_AACBUF, M_NOWAIT|M_ZERO);
3443 mtx_lock(&sc->aac_aifq_lock);
3444 /* all elements are already 0, add to queue */
3445 if (sc->fibctx == NULL)
3446 sc->fibctx = fibctx;
3448 for (ctx = sc->fibctx; ctx->next; ctx = ctx->next)
3454 /* evaluate unique value */
3455 fibctx->unique = (*(u_int32_t *)&fibctx & 0xffffffff);
3457 while (ctx != fibctx) {
3458 if (ctx->unique == fibctx->unique) {
3465 mtx_unlock(&sc->aac_aifq_lock);
3467 error = copyout(&fibctx->unique, (void *)arg, sizeof(u_int32_t));
3469 aac_close_aif(sc, (caddr_t)ctx);
3474 * Close the caller's fib context
3477 aac_close_aif(struct aac_softc *sc, caddr_t arg)
3479 struct aac_fib_context *ctx;
3481 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3483 mtx_lock(&sc->aac_aifq_lock);
3484 for (ctx = sc->fibctx; ctx; ctx = ctx->next) {
3485 if (ctx->unique == *(uint32_t *)&arg) {
3486 if (ctx == sc->fibctx)
3489 ctx->prev->next = ctx->next;
3491 ctx->next->prev = ctx->prev;
3496 mtx_unlock(&sc->aac_aifq_lock);
3498 free(ctx, M_AACBUF);
3504 * Pass the caller the next AIF in their queue
3507 aac_getnext_aif(struct aac_softc *sc, caddr_t arg)
3509 struct get_adapter_fib_ioctl agf;
3510 struct aac_fib_context *ctx;
3513 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3515 if ((error = copyin(arg, &agf, sizeof(agf))) == 0) {
3516 for (ctx = sc->fibctx; ctx; ctx = ctx->next) {
3517 if (agf.AdapterFibContext == ctx->unique)
3523 error = aac_return_aif(sc, ctx, agf.AifFib);
3524 if (error == EAGAIN && agf.Wait) {
3525 fwprintf(sc, HBA_FLAGS_DBG_AIF_B, "aac_getnext_aif(): waiting for AIF");
3526 sc->aac_state |= AAC_STATE_AIF_SLEEPER;
3527 while (error == EAGAIN) {
3528 error = tsleep(sc->aac_aifq, PRIBIO |
3529 PCATCH, "aacaif", 0);
3531 error = aac_return_aif(sc, ctx, agf.AifFib);
3533 sc->aac_state &= ~AAC_STATE_AIF_SLEEPER;
3540 * Hand the next AIF off the top of the queue out to userspace.
3543 aac_return_aif(struct aac_softc *sc, struct aac_fib_context *ctx, caddr_t uptr)
3547 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3549 mtx_lock(&sc->aac_aifq_lock);
3550 current = ctx->ctx_idx;
3551 if (current == sc->aifq_idx && !ctx->ctx_wrap) {
3553 mtx_unlock(&sc->aac_aifq_lock);
3557 copyout(&sc->aac_aifq[current], (void *)uptr, sizeof(struct aac_fib));
3559 device_printf(sc->aac_dev,
3560 "aac_return_aif: copyout returned %d\n", error);
3563 ctx->ctx_idx = (current + 1) % AAC_AIFQ_LENGTH;
3565 mtx_unlock(&sc->aac_aifq_lock);
3570 aac_get_pci_info(struct aac_softc *sc, caddr_t uptr)
3572 struct aac_pci_info {
3578 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3580 pciinf.bus = pci_get_bus(sc->aac_dev);
3581 pciinf.slot = pci_get_slot(sc->aac_dev);
3583 error = copyout((caddr_t)&pciinf, uptr,
3584 sizeof(struct aac_pci_info));
3590 aac_supported_features(struct aac_softc *sc, caddr_t uptr)
3592 struct aac_features f;
3595 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3597 if ((error = copyin(uptr, &f, sizeof (f))) != 0)
3601 * When the management driver receives FSACTL_GET_FEATURES ioctl with
3602 * ALL zero in the featuresState, the driver will return the current
3603 * state of all the supported features, the data field will not be
3605 * When the management driver receives FSACTL_GET_FEATURES ioctl with
3606 * a specific bit set in the featuresState, the driver will return the
3607 * current state of this specific feature and whatever data that are
3608 * associated with the feature in the data field or perform whatever
3609 * action needed indicates in the data field.
3611 if (f.feat.fValue == 0) {
3612 f.feat.fBits.largeLBA =
3613 (sc->flags & AAC_FLAGS_LBA_64BIT) ? 1 : 0;
3614 /* TODO: In the future, add other features state here as well */
3616 if (f.feat.fBits.largeLBA)
3617 f.feat.fBits.largeLBA =
3618 (sc->flags & AAC_FLAGS_LBA_64BIT) ? 1 : 0;
3619 /* TODO: Add other features state and data in the future */
3622 error = copyout(&f, uptr, sizeof (f));
3627 * Give the userland some information about the container. The AAC arch
3628 * expects the driver to be a SCSI passthrough type driver, so it expects
3629 * the containers to have b:t:l numbers. Fake it.
3632 aac_query_disk(struct aac_softc *sc, caddr_t uptr)
3634 struct aac_query_disk query_disk;
3635 struct aac_container *co;
3636 struct aac_disk *disk;
3639 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3643 error = copyin(uptr, (caddr_t)&query_disk,
3644 sizeof(struct aac_query_disk));
3648 id = query_disk.ContainerNumber;
3652 mtx_lock(&sc->aac_container_lock);
3653 TAILQ_FOREACH(co, &sc->aac_container_tqh, co_link) {
3654 if (co->co_mntobj.ObjectId == id)
3659 query_disk.Valid = 0;
3660 query_disk.Locked = 0;
3661 query_disk.Deleted = 1; /* XXX is this right? */
3663 disk = device_get_softc(co->co_disk);
3664 query_disk.Valid = 1;
3666 (disk->ad_flags & AAC_DISK_OPEN) ? 1 : 0;
3667 query_disk.Deleted = 0;
3668 query_disk.Bus = device_get_unit(sc->aac_dev);
3669 query_disk.Target = disk->unit;
3671 query_disk.UnMapped = 0;
3672 sprintf(&query_disk.diskDeviceName[0], "%s%d",
3673 disk->ad_disk->d_name, disk->ad_disk->d_unit);
3675 mtx_unlock(&sc->aac_container_lock);
3677 error = copyout((caddr_t)&query_disk, uptr,
3678 sizeof(struct aac_query_disk));
3684 aac_get_bus_info(struct aac_softc *sc)
3686 struct aac_fib *fib;
3687 struct aac_ctcfg *c_cmd;
3688 struct aac_ctcfg_resp *c_resp;
3689 struct aac_vmioctl *vmi;
3690 struct aac_vmi_businf_resp *vmi_resp;
3691 struct aac_getbusinf businfo;
3692 struct aac_sim *caminf;
3694 int i, found, error;
3696 mtx_lock(&sc->aac_io_lock);
3697 aac_alloc_sync_fib(sc, &fib);
3698 c_cmd = (struct aac_ctcfg *)&fib->data[0];
3699 bzero(c_cmd, sizeof(struct aac_ctcfg));
3701 c_cmd->Command = VM_ContainerConfig;
3702 c_cmd->cmd = CT_GET_SCSI_METHOD;
3705 error = aac_sync_fib(sc, ContainerCommand, 0, fib,
3706 sizeof(struct aac_ctcfg));
3708 device_printf(sc->aac_dev, "Error %d sending "
3709 "VM_ContainerConfig command\n", error);
3710 aac_release_sync_fib(sc);
3711 mtx_unlock(&sc->aac_io_lock);
3715 c_resp = (struct aac_ctcfg_resp *)&fib->data[0];
3716 if (c_resp->Status != ST_OK) {
3717 device_printf(sc->aac_dev, "VM_ContainerConfig returned 0x%x\n",
3719 aac_release_sync_fib(sc);
3720 mtx_unlock(&sc->aac_io_lock);
3724 sc->scsi_method_id = c_resp->param;
3726 vmi = (struct aac_vmioctl *)&fib->data[0];
3727 bzero(vmi, sizeof(struct aac_vmioctl));
3729 vmi->Command = VM_Ioctl;
3730 vmi->ObjType = FT_DRIVE;
3731 vmi->MethId = sc->scsi_method_id;
3733 vmi->IoctlCmd = GetBusInfo;
3735 error = aac_sync_fib(sc, ContainerCommand, 0, fib,
3736 sizeof(struct aac_vmi_businf_resp));
3738 device_printf(sc->aac_dev, "Error %d sending VMIoctl command\n",
3740 aac_release_sync_fib(sc);
3741 mtx_unlock(&sc->aac_io_lock);
3745 vmi_resp = (struct aac_vmi_businf_resp *)&fib->data[0];
3746 if (vmi_resp->Status != ST_OK) {
3747 device_printf(sc->aac_dev, "VM_Ioctl returned %d\n",
3749 aac_release_sync_fib(sc);
3750 mtx_unlock(&sc->aac_io_lock);
3754 bcopy(&vmi_resp->BusInf, &businfo, sizeof(struct aac_getbusinf));
3755 aac_release_sync_fib(sc);
3756 mtx_unlock(&sc->aac_io_lock);
3759 for (i = 0; i < businfo.BusCount; i++) {
3760 if (businfo.BusValid[i] != AAC_BUS_VALID)
3763 caminf = (struct aac_sim *)malloc( sizeof(struct aac_sim),
3764 M_AACBUF, M_NOWAIT | M_ZERO);
3765 if (caminf == NULL) {
3766 device_printf(sc->aac_dev,
3767 "No memory to add passthrough bus %d\n", i);
3771 child = device_add_child(sc->aac_dev, "aacp", -1);
3772 if (child == NULL) {
3773 device_printf(sc->aac_dev,
3774 "device_add_child failed for passthrough bus %d\n",
3776 free(caminf, M_AACBUF);
3780 caminf->TargetsPerBus = businfo.TargetsPerBus;
3781 caminf->BusNumber = i;
3782 caminf->InitiatorBusId = businfo.InitiatorBusId[i];
3783 caminf->aac_sc = sc;
3784 caminf->sim_dev = child;
3786 device_set_ivars(child, caminf);
3787 device_set_desc(child, "SCSI Passthrough Bus");
3788 TAILQ_INSERT_TAIL(&sc->aac_sim_tqh, caminf, sim_link);
3794 bus_generic_attach(sc->aac_dev);