2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2000 Michael Smith
5 * Copyright (c) 2001 Scott Long
6 * Copyright (c) 2000 BSDi
7 * Copyright (c) 2001-2010 Adaptec, Inc.
8 * Copyright (c) 2010-2012 PMC-Sierra, Inc.
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
37 * Driver for the Adaptec by PMC Series 6,7,8,... families of RAID controllers
39 #define AAC_DRIVERNAME "aacraid"
41 #include "opt_aacraid.h"
43 /* #include <stddef.h> */
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/malloc.h>
47 #include <sys/kernel.h>
48 #include <sys/kthread.h>
50 #include <sys/sysctl.h>
51 #include <sys/sysent.h>
53 #include <sys/ioccom.h>
57 #include <sys/signalvar.h>
59 #include <sys/eventhandler.h>
62 #include <machine/bus.h>
63 #include <machine/resource.h>
65 #include <dev/pci/pcireg.h>
66 #include <dev/pci/pcivar.h>
68 #include <dev/aacraid/aacraid_reg.h>
69 #include <sys/aac_ioctl.h>
70 #include <dev/aacraid/aacraid_debug.h>
71 #include <dev/aacraid/aacraid_var.h>
72 #include <dev/aacraid/aacraid_endian.h>
74 #ifndef FILTER_HANDLED
75 #define FILTER_HANDLED 0x02
78 static void aac_add_container(struct aac_softc *sc,
79 struct aac_mntinforesp *mir, int f,
81 static void aac_get_bus_info(struct aac_softc *sc);
82 static void aac_container_bus(struct aac_softc *sc);
83 static void aac_daemon(void *arg);
84 static int aac_convert_sgraw2(struct aac_softc *sc, struct aac_raw_io2 *raw,
85 int pages, int nseg, int nseg_new);
87 /* Command Processing */
88 static void aac_timeout(struct aac_softc *sc);
89 static void aac_command_thread(struct aac_softc *sc);
90 static int aac_sync_fib(struct aac_softc *sc, u_int32_t command,
91 u_int32_t xferstate, struct aac_fib *fib,
93 /* Command Buffer Management */
94 static void aac_map_command_helper(void *arg, bus_dma_segment_t *segs,
96 static int aac_alloc_commands(struct aac_softc *sc);
97 static void aac_free_commands(struct aac_softc *sc);
98 static void aac_unmap_command(struct aac_command *cm);
100 /* Hardware Interface */
101 static int aac_alloc(struct aac_softc *sc);
102 static void aac_common_map(void *arg, bus_dma_segment_t *segs, int nseg,
104 static int aac_check_firmware(struct aac_softc *sc);
105 static void aac_define_int_mode(struct aac_softc *sc);
106 static int aac_init(struct aac_softc *sc);
107 static int aac_find_pci_capability(struct aac_softc *sc, int cap);
108 static int aac_setup_intr(struct aac_softc *sc);
109 static int aac_check_config(struct aac_softc *sc);
111 /* PMC SRC interface */
112 static int aac_src_get_fwstatus(struct aac_softc *sc);
113 static void aac_src_qnotify(struct aac_softc *sc, int qbit);
114 static int aac_src_get_istatus(struct aac_softc *sc);
115 static void aac_src_clear_istatus(struct aac_softc *sc, int mask);
116 static void aac_src_set_mailbox(struct aac_softc *sc, u_int32_t command,
117 u_int32_t arg0, u_int32_t arg1,
118 u_int32_t arg2, u_int32_t arg3);
119 static int aac_src_get_mailbox(struct aac_softc *sc, int mb);
120 static void aac_src_access_devreg(struct aac_softc *sc, int mode);
121 static int aac_src_send_command(struct aac_softc *sc, struct aac_command *cm);
122 static int aac_src_get_outb_queue(struct aac_softc *sc);
123 static void aac_src_set_outb_queue(struct aac_softc *sc, int index);
125 struct aac_interface aacraid_src_interface = {
126 aac_src_get_fwstatus,
129 aac_src_clear_istatus,
132 aac_src_access_devreg,
133 aac_src_send_command,
134 aac_src_get_outb_queue,
135 aac_src_set_outb_queue
138 /* PMC SRCv interface */
139 static void aac_srcv_set_mailbox(struct aac_softc *sc, u_int32_t command,
140 u_int32_t arg0, u_int32_t arg1,
141 u_int32_t arg2, u_int32_t arg3);
142 static int aac_srcv_get_mailbox(struct aac_softc *sc, int mb);
144 struct aac_interface aacraid_srcv_interface = {
145 aac_src_get_fwstatus,
148 aac_src_clear_istatus,
149 aac_srcv_set_mailbox,
150 aac_srcv_get_mailbox,
151 aac_src_access_devreg,
152 aac_src_send_command,
153 aac_src_get_outb_queue,
154 aac_src_set_outb_queue
157 /* Debugging and Diagnostics */
158 static struct aac_code_lookup aac_cpu_variant[] = {
159 {"i960JX", CPUI960_JX},
160 {"i960CX", CPUI960_CX},
161 {"i960HX", CPUI960_HX},
162 {"i960RX", CPUI960_RX},
163 {"i960 80303", CPUI960_80303},
164 {"StrongARM SA110", CPUARM_SA110},
165 {"PPC603e", CPUPPC_603e},
166 {"XScale 80321", CPU_XSCALE_80321},
167 {"MIPS 4KC", CPU_MIPS_4KC},
168 {"MIPS 5KC", CPU_MIPS_5KC},
169 {"Unknown StrongARM", CPUARM_xxx},
170 {"Unknown PowerPC", CPUPPC_xxx},
172 {"Unknown processor", 0}
175 static struct aac_code_lookup aac_battery_platform[] = {
176 {"required battery present", PLATFORM_BAT_REQ_PRESENT},
177 {"REQUIRED BATTERY NOT PRESENT", PLATFORM_BAT_REQ_NOTPRESENT},
178 {"optional battery present", PLATFORM_BAT_OPT_PRESENT},
179 {"optional battery not installed", PLATFORM_BAT_OPT_NOTPRESENT},
180 {"no battery support", PLATFORM_BAT_NOT_SUPPORTED},
182 {"unknown battery platform", 0}
184 static void aac_describe_controller(struct aac_softc *sc);
185 static char *aac_describe_code(struct aac_code_lookup *table,
188 /* Management Interface */
189 static d_open_t aac_open;
190 static d_ioctl_t aac_ioctl;
191 static d_poll_t aac_poll;
192 static void aac_cdevpriv_dtor(void *arg);
193 static int aac_ioctl_sendfib(struct aac_softc *sc, caddr_t ufib);
194 static int aac_ioctl_send_raw_srb(struct aac_softc *sc, caddr_t arg);
195 static void aac_handle_aif(struct aac_softc *sc, struct aac_fib *fib);
196 static void aac_request_aif(struct aac_softc *sc);
197 static int aac_rev_check(struct aac_softc *sc, caddr_t udata);
198 static int aac_open_aif(struct aac_softc *sc, caddr_t arg);
199 static int aac_close_aif(struct aac_softc *sc, caddr_t arg);
200 static int aac_getnext_aif(struct aac_softc *sc, caddr_t arg);
201 static int aac_return_aif(struct aac_softc *sc,
202 struct aac_fib_context *ctx, caddr_t uptr);
203 static int aac_query_disk(struct aac_softc *sc, caddr_t uptr);
204 static int aac_get_pci_info(struct aac_softc *sc, caddr_t uptr);
205 static int aac_supported_features(struct aac_softc *sc, caddr_t uptr);
206 static void aac_ioctl_event(struct aac_softc *sc,
207 struct aac_event *event, void *arg);
208 static int aac_reset_adapter(struct aac_softc *sc);
209 static int aac_get_container_info(struct aac_softc *sc,
210 struct aac_fib *fib, int cid,
211 struct aac_mntinforesp *mir,
214 aac_check_adapter_health(struct aac_softc *sc, u_int8_t *bled);
216 static struct cdevsw aacraid_cdevsw = {
217 .d_version = D_VERSION,
220 .d_ioctl = aac_ioctl,
225 MALLOC_DEFINE(M_AACRAIDBUF, "aacraid_buf", "Buffers for the AACRAID driver");
228 SYSCTL_NODE(_hw, OID_AUTO, aacraid, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
229 "AACRAID driver parameters");
236 * Initialize the controller and softc
239 aacraid_attach(struct aac_softc *sc)
243 struct aac_mntinforesp mir;
244 int count = 0, i = 0;
247 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
248 sc->hint_flags = device_get_flags(sc->aac_dev);
250 * Initialize per-controller queues.
256 /* mark controller as suspended until we get ourselves organised */
257 sc->aac_state |= AAC_STATE_SUSPEND;
260 * Check that the firmware on the card is supported.
262 sc->msi_enabled = sc->msi_tupelo = FALSE;
263 if ((error = aac_check_firmware(sc)) != 0)
269 mtx_init(&sc->aac_io_lock, "AACRAID I/O lock", NULL, MTX_DEF);
270 TAILQ_INIT(&sc->aac_container_tqh);
271 TAILQ_INIT(&sc->aac_ev_cmfree);
273 /* Initialize the clock daemon callout. */
274 callout_init_mtx(&sc->aac_daemontime, &sc->aac_io_lock, 0);
277 * Initialize the adapter.
279 if ((error = aac_alloc(sc)) != 0)
281 aac_define_int_mode(sc);
282 if (!(sc->flags & AAC_FLAGS_SYNC_MODE)) {
283 if ((error = aac_init(sc)) != 0)
288 * Allocate and connect our interrupt.
290 if ((error = aac_setup_intr(sc)) != 0)
294 * Print a little information about the controller.
296 aac_describe_controller(sc);
299 * Make the control device.
301 unit = device_get_unit(sc->aac_dev);
302 sc->aac_dev_t = make_dev(&aacraid_cdevsw, unit, UID_ROOT, GID_OPERATOR,
303 0640, "aacraid%d", unit);
304 sc->aac_dev_t->si_drv1 = sc;
306 /* Create the AIF thread */
307 if (aac_kthread_create((void(*)(void *))aac_command_thread, sc,
308 &sc->aifthread, 0, 0, "aacraid%daif", unit))
309 panic("Could not create AIF thread");
311 /* Register the shutdown method to only be called post-dump */
312 if ((sc->eh = EVENTHANDLER_REGISTER(shutdown_final, aacraid_shutdown,
313 sc->aac_dev, SHUTDOWN_PRI_DEFAULT)) == NULL)
314 device_printf(sc->aac_dev,
315 "shutdown event registration failed\n");
317 /* Find containers */
318 mtx_lock(&sc->aac_io_lock);
319 aac_alloc_sync_fib(sc, &fib);
320 /* loop over possible containers */
322 if ((aac_get_container_info(sc, fib, i, &mir, &uid)) != 0)
325 count = mir.MntRespCount;
326 aac_add_container(sc, &mir, 0, uid);
328 } while ((i < count) && (i < AAC_MAX_CONTAINERS));
329 aac_release_sync_fib(sc);
330 mtx_unlock(&sc->aac_io_lock);
332 /* Register with CAM for the containers */
333 TAILQ_INIT(&sc->aac_sim_tqh);
334 aac_container_bus(sc);
335 /* Register with CAM for the non-DASD devices */
336 if ((sc->flags & AAC_FLAGS_ENABLE_CAM) != 0)
337 aac_get_bus_info(sc);
339 /* poke the bus to actually attach the child devices */
340 bus_generic_attach(sc->aac_dev);
342 /* mark the controller up */
343 sc->aac_state &= ~AAC_STATE_SUSPEND;
345 /* enable interrupts now */
346 AAC_ACCESS_DEVREG(sc, AAC_ENABLE_INTERRUPT);
348 mtx_lock(&sc->aac_io_lock);
349 callout_reset(&sc->aac_daemontime, 60 * hz, aac_daemon, sc);
350 mtx_unlock(&sc->aac_io_lock);
356 aac_daemon(void *arg)
358 struct aac_softc *sc;
360 struct aac_command *cm;
364 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
366 mtx_assert(&sc->aac_io_lock, MA_OWNED);
367 if (callout_pending(&sc->aac_daemontime) ||
368 callout_active(&sc->aac_daemontime) == 0)
372 if (!aacraid_alloc_command(sc, &cm)) {
374 cm->cm_timestamp = time_uptime;
376 cm->cm_flags |= AAC_CMD_WAIT;
379 sizeof(struct aac_fib_header) + sizeof(u_int32_t);
380 fib->Header.XferState =
381 AAC_FIBSTATE_HOSTOWNED |
382 AAC_FIBSTATE_INITIALISED |
384 AAC_FIBSTATE_FROMHOST |
385 AAC_FIBSTATE_REXPECTED |
388 AAC_FIBSTATE_FAST_RESPONSE;
389 fib->Header.Command = SendHostTime;
390 *(uint32_t *)fib->data = htole32(tv.tv_sec);
392 aacraid_map_command_sg(cm, NULL, 0, 0);
393 aacraid_release_command(cm);
396 callout_schedule(&sc->aac_daemontime, 30 * 60 * hz);
400 aacraid_add_event(struct aac_softc *sc, struct aac_event *event)
403 switch (event->ev_type & AAC_EVENT_MASK) {
404 case AAC_EVENT_CMFREE:
405 TAILQ_INSERT_TAIL(&sc->aac_ev_cmfree, event, ev_links);
408 device_printf(sc->aac_dev, "aac_add event: unknown event %d\n",
417 * Request information of container #cid
420 aac_get_container_info(struct aac_softc *sc, struct aac_fib *sync_fib, int cid,
421 struct aac_mntinforesp *mir, u_int32_t *uid)
423 struct aac_command *cm;
425 struct aac_mntinfo *mi;
426 struct aac_cnt_config *ccfg;
429 if (sync_fib == NULL) {
430 if (aacraid_alloc_command(sc, &cm)) {
431 device_printf(sc->aac_dev,
432 "Warning, no free command available\n");
440 mi = (struct aac_mntinfo *)&fib->data[0];
441 /* 4KB support?, 64-bit LBA? */
442 if (sc->aac_support_opt2 & AAC_SUPPORTED_VARIABLE_BLOCK_SIZE)
443 mi->Command = VM_NameServeAllBlk;
444 else if (sc->flags & AAC_FLAGS_LBA_64BIT)
445 mi->Command = VM_NameServe64;
447 mi->Command = VM_NameServe;
448 mi->MntType = FT_FILESYS;
450 aac_mntinfo_tole(mi);
453 if (aac_sync_fib(sc, ContainerCommand, 0, fib,
454 sizeof(struct aac_mntinfo))) {
455 device_printf(sc->aac_dev, "Error probing container %d\n", cid);
459 cm->cm_timestamp = time_uptime;
463 sizeof(struct aac_fib_header) + sizeof(struct aac_mntinfo);
464 fib->Header.XferState =
465 AAC_FIBSTATE_HOSTOWNED |
466 AAC_FIBSTATE_INITIALISED |
468 AAC_FIBSTATE_FROMHOST |
469 AAC_FIBSTATE_REXPECTED |
472 AAC_FIBSTATE_FAST_RESPONSE;
473 fib->Header.Command = ContainerCommand;
474 if (aacraid_wait_command(cm) != 0) {
475 device_printf(sc->aac_dev, "Error probing container %d\n", cid);
476 aacraid_release_command(cm);
480 bcopy(&fib->data[0], mir, sizeof(struct aac_mntinforesp));
481 aac_mntinforesp_toh(mir);
485 if (mir->MntTable[0].VolType != CT_NONE &&
486 !(mir->MntTable[0].ContentState & AAC_FSCS_HIDDEN)) {
487 if (!(sc->aac_support_opt2 & AAC_SUPPORTED_VARIABLE_BLOCK_SIZE)) {
488 mir->MntTable[0].ObjExtension.BlockDevice.BlockSize = 0x200;
489 mir->MntTable[0].ObjExtension.BlockDevice.bdLgclPhysMap = 0;
491 ccfg = (struct aac_cnt_config *)&fib->data[0];
492 bzero(ccfg, sizeof (*ccfg) - CT_PACKET_SIZE);
493 ccfg->Command = VM_ContainerConfig;
494 ccfg->CTCommand.command = CT_CID_TO_32BITS_UID;
495 ccfg->CTCommand.param[0] = cid;
496 aac_cnt_config_tole(ccfg);
499 rval = aac_sync_fib(sc, ContainerCommand, 0, fib,
500 sizeof(struct aac_cnt_config));
501 aac_cnt_config_toh(ccfg);
502 if (rval == 0 && ccfg->Command == ST_OK &&
503 ccfg->CTCommand.param[0] == CT_OK &&
504 mir->MntTable[0].VolType != CT_PASSTHRU)
505 *uid = ccfg->CTCommand.param[1];
508 sizeof(struct aac_fib_header) + sizeof(struct aac_cnt_config);
509 fib->Header.XferState =
510 AAC_FIBSTATE_HOSTOWNED |
511 AAC_FIBSTATE_INITIALISED |
513 AAC_FIBSTATE_FROMHOST |
514 AAC_FIBSTATE_REXPECTED |
517 AAC_FIBSTATE_FAST_RESPONSE;
518 fib->Header.Command = ContainerCommand;
519 rval = aacraid_wait_command(cm);
520 aac_cnt_config_toh(ccfg);
521 if (rval == 0 && ccfg->Command == ST_OK &&
522 ccfg->CTCommand.param[0] == CT_OK &&
523 mir->MntTable[0].VolType != CT_PASSTHRU)
524 *uid = ccfg->CTCommand.param[1];
525 aacraid_release_command(cm);
533 * Create a device to represent a new container
536 aac_add_container(struct aac_softc *sc, struct aac_mntinforesp *mir, int f,
539 struct aac_container *co;
541 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
544 * Check container volume type for validity. Note that many of
545 * the possible types may never show up.
547 if ((mir->Status == ST_OK) && (mir->MntTable[0].VolType != CT_NONE)) {
548 co = (struct aac_container *)malloc(sizeof *co, M_AACRAIDBUF,
551 panic("Out of memory?!");
555 bcopy(&mir->MntTable[0], &co->co_mntobj,
556 sizeof(struct aac_mntobj));
558 TAILQ_INSERT_TAIL(&sc->aac_container_tqh, co, co_link);
563 * Allocate resources associated with (sc)
566 aac_alloc(struct aac_softc *sc)
570 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
573 * Create DMA tag for mapping buffers into controller-addressable space.
575 if (bus_dma_tag_create(sc->aac_parent_dmat, /* parent */
576 1, 0, /* algnmnt, boundary */
577 (sc->flags & AAC_FLAGS_SG_64BIT) ?
579 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
580 BUS_SPACE_MAXADDR, /* highaddr */
581 NULL, NULL, /* filter, filterarg */
582 sc->aac_max_sectors << 9, /* maxsize */
583 sc->aac_sg_tablesize, /* nsegments */
584 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
585 BUS_DMA_ALLOCNOW, /* flags */
586 busdma_lock_mutex, /* lockfunc */
587 &sc->aac_io_lock, /* lockfuncarg */
588 &sc->aac_buffer_dmat)) {
589 device_printf(sc->aac_dev, "can't allocate buffer DMA tag\n");
594 * Create DMA tag for mapping FIBs into controller-addressable space..
596 if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE1)
597 maxsize = sc->aac_max_fibs_alloc * (sc->aac_max_fib_size +
598 sizeof(struct aac_fib_xporthdr) + 31);
600 maxsize = sc->aac_max_fibs_alloc * (sc->aac_max_fib_size + 31);
601 if (bus_dma_tag_create(sc->aac_parent_dmat, /* parent */
602 1, 0, /* algnmnt, boundary */
603 (sc->flags & AAC_FLAGS_4GB_WINDOW) ?
604 BUS_SPACE_MAXADDR_32BIT :
605 0x7fffffff, /* lowaddr */
606 BUS_SPACE_MAXADDR, /* highaddr */
607 NULL, NULL, /* filter, filterarg */
608 maxsize, /* maxsize */
610 maxsize, /* maxsize */
612 NULL, NULL, /* No locking needed */
613 &sc->aac_fib_dmat)) {
614 device_printf(sc->aac_dev, "can't allocate FIB DMA tag\n");
619 * Create DMA tag for the common structure and allocate it.
621 maxsize = sizeof(struct aac_common);
622 maxsize += sc->aac_max_fibs * sizeof(u_int32_t);
623 if (bus_dma_tag_create(sc->aac_parent_dmat, /* parent */
624 1, 0, /* algnmnt, boundary */
625 (sc->flags & AAC_FLAGS_4GB_WINDOW) ?
626 BUS_SPACE_MAXADDR_32BIT :
627 0x7fffffff, /* lowaddr */
628 BUS_SPACE_MAXADDR, /* highaddr */
629 NULL, NULL, /* filter, filterarg */
630 maxsize, /* maxsize */
632 maxsize, /* maxsegsize */
634 NULL, NULL, /* No locking needed */
635 &sc->aac_common_dmat)) {
636 device_printf(sc->aac_dev,
637 "can't allocate common structure DMA tag\n");
640 if (bus_dmamem_alloc(sc->aac_common_dmat, (void **)&sc->aac_common,
641 BUS_DMA_NOWAIT, &sc->aac_common_dmamap)) {
642 device_printf(sc->aac_dev, "can't allocate common structure\n");
646 (void)bus_dmamap_load(sc->aac_common_dmat, sc->aac_common_dmamap,
647 sc->aac_common, maxsize,
648 aac_common_map, sc, 0);
649 bzero(sc->aac_common, maxsize);
651 /* Allocate some FIBs and associated command structs */
652 TAILQ_INIT(&sc->aac_fibmap_tqh);
653 sc->aac_commands = malloc(sc->aac_max_fibs * sizeof(struct aac_command),
654 M_AACRAIDBUF, M_WAITOK|M_ZERO);
655 mtx_lock(&sc->aac_io_lock);
656 while (sc->total_fibs < sc->aac_max_fibs) {
657 if (aac_alloc_commands(sc) != 0)
660 mtx_unlock(&sc->aac_io_lock);
661 if (sc->total_fibs == 0)
668 * Free all of the resources associated with (sc)
670 * Should not be called if the controller is active.
673 aacraid_free(struct aac_softc *sc)
677 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
679 /* remove the control device */
680 if (sc->aac_dev_t != NULL)
681 destroy_dev(sc->aac_dev_t);
683 /* throw away any FIB buffers, discard the FIB DMA tag */
684 aac_free_commands(sc);
685 if (sc->aac_fib_dmat)
686 bus_dma_tag_destroy(sc->aac_fib_dmat);
688 free(sc->aac_commands, M_AACRAIDBUF);
690 /* destroy the common area */
691 if (sc->aac_common) {
692 bus_dmamap_unload(sc->aac_common_dmat, sc->aac_common_dmamap);
693 bus_dmamem_free(sc->aac_common_dmat, sc->aac_common,
694 sc->aac_common_dmamap);
696 if (sc->aac_common_dmat)
697 bus_dma_tag_destroy(sc->aac_common_dmat);
699 /* disconnect the interrupt handler */
700 for (i = 0; i < AAC_MAX_MSIX; ++i) {
702 bus_teardown_intr(sc->aac_dev,
703 sc->aac_irq[i], sc->aac_intr[i]);
705 bus_release_resource(sc->aac_dev, SYS_RES_IRQ,
706 sc->aac_irq_rid[i], sc->aac_irq[i]);
710 if (sc->msi_enabled || sc->msi_tupelo)
711 pci_release_msi(sc->aac_dev);
713 /* destroy data-transfer DMA tag */
714 if (sc->aac_buffer_dmat)
715 bus_dma_tag_destroy(sc->aac_buffer_dmat);
717 /* destroy the parent DMA tag */
718 if (sc->aac_parent_dmat)
719 bus_dma_tag_destroy(sc->aac_parent_dmat);
721 /* release the register window mapping */
722 if (sc->aac_regs_res0 != NULL)
723 bus_release_resource(sc->aac_dev, SYS_RES_MEMORY,
724 sc->aac_regs_rid0, sc->aac_regs_res0);
725 if (sc->aac_regs_res1 != NULL)
726 bus_release_resource(sc->aac_dev, SYS_RES_MEMORY,
727 sc->aac_regs_rid1, sc->aac_regs_res1);
731 * Disconnect from the controller completely, in preparation for unload.
734 aacraid_detach(device_t dev)
736 struct aac_softc *sc;
737 struct aac_container *co;
741 sc = device_get_softc(dev);
742 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
744 callout_drain(&sc->aac_daemontime);
745 /* Remove the child containers */
746 while ((co = TAILQ_FIRST(&sc->aac_container_tqh)) != NULL) {
747 TAILQ_REMOVE(&sc->aac_container_tqh, co, co_link);
748 free(co, M_AACRAIDBUF);
751 /* Remove the CAM SIMs */
752 while ((sim = TAILQ_FIRST(&sc->aac_sim_tqh)) != NULL) {
753 TAILQ_REMOVE(&sc->aac_sim_tqh, sim, sim_link);
754 error = device_delete_child(dev, sim->sim_dev);
757 free(sim, M_AACRAIDBUF);
760 if (sc->aifflags & AAC_AIFFLAGS_RUNNING) {
761 sc->aifflags |= AAC_AIFFLAGS_EXIT;
762 wakeup(sc->aifthread);
763 tsleep(sc->aac_dev, PUSER | PCATCH, "aac_dch", 30 * hz);
766 if (sc->aifflags & AAC_AIFFLAGS_RUNNING)
767 panic("Cannot shutdown AIF thread");
769 if ((error = aacraid_shutdown(dev)))
772 EVENTHANDLER_DEREGISTER(shutdown_final, sc->eh);
776 mtx_destroy(&sc->aac_io_lock);
782 * Bring the controller down to a dormant state and detach all child devices.
784 * This function is called before detach or system shutdown.
786 * Note that we can assume that the bioq on the controller is empty, as we won't
787 * allow shutdown if any device is open.
790 aacraid_shutdown(device_t dev)
792 struct aac_softc *sc;
794 struct aac_close_command *cc;
796 sc = device_get_softc(dev);
797 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
799 sc->aac_state |= AAC_STATE_SUSPEND;
802 * Send a Container shutdown followed by a HostShutdown FIB to the
803 * controller to convince it that we don't want to talk to it anymore.
804 * We've been closed and all I/O completed already
806 device_printf(sc->aac_dev, "shutting down controller...");
808 mtx_lock(&sc->aac_io_lock);
809 aac_alloc_sync_fib(sc, &fib);
810 cc = (struct aac_close_command *)&fib->data[0];
812 bzero(cc, sizeof(struct aac_close_command));
813 cc->Command = htole32(VM_CloseAll);
814 cc->ContainerId = htole32(0xfffffffe);
815 if (aac_sync_fib(sc, ContainerCommand, 0, fib,
816 sizeof(struct aac_close_command)))
821 AAC_ACCESS_DEVREG(sc, AAC_DISABLE_INTERRUPT);
822 aac_release_sync_fib(sc);
823 mtx_unlock(&sc->aac_io_lock);
829 * Bring the controller to a quiescent state, ready for system suspend.
832 aacraid_suspend(device_t dev)
834 struct aac_softc *sc;
836 sc = device_get_softc(dev);
838 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
839 sc->aac_state |= AAC_STATE_SUSPEND;
841 AAC_ACCESS_DEVREG(sc, AAC_DISABLE_INTERRUPT);
846 * Bring the controller back to a state ready for operation.
849 aacraid_resume(device_t dev)
851 struct aac_softc *sc;
853 sc = device_get_softc(dev);
855 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
856 sc->aac_state &= ~AAC_STATE_SUSPEND;
857 AAC_ACCESS_DEVREG(sc, AAC_ENABLE_INTERRUPT);
862 * Interrupt handler for NEW_COMM_TYPE1, NEW_COMM_TYPE2, NEW_COMM_TYPE34 interface.
865 aacraid_new_intr_type1(void *arg)
867 struct aac_msix_ctx *ctx;
868 struct aac_softc *sc;
870 struct aac_command *cm;
872 u_int32_t bellbits, bellbits_shifted, index, handle;
873 int isFastResponse, isAif, noMoreAif, mode;
875 ctx = (struct aac_msix_ctx *)arg;
877 vector_no = ctx->vector_no;
879 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
880 mtx_lock(&sc->aac_io_lock);
882 if (sc->msi_enabled) {
883 mode = AAC_INT_MODE_MSI;
884 if (vector_no == 0) {
885 bellbits = AAC_MEM0_GETREG4(sc, AAC_SRC_ODBR_MSI);
886 if (bellbits & 0x40000)
887 mode |= AAC_INT_MODE_AIF;
888 else if (bellbits & 0x1000)
889 mode |= AAC_INT_MODE_SYNC;
892 mode = AAC_INT_MODE_INTX;
893 bellbits = AAC_MEM0_GETREG4(sc, AAC_SRC_ODBR_R);
894 if (bellbits & AAC_DB_RESPONSE_SENT_NS) {
895 bellbits = AAC_DB_RESPONSE_SENT_NS;
896 AAC_MEM0_SETREG4(sc, AAC_SRC_ODBR_C, bellbits);
898 bellbits_shifted = (bellbits >> AAC_SRC_ODR_SHIFT);
899 AAC_MEM0_SETREG4(sc, AAC_SRC_ODBR_C, bellbits);
900 if (bellbits_shifted & AAC_DB_AIF_PENDING)
901 mode |= AAC_INT_MODE_AIF;
902 if (bellbits_shifted & AAC_DB_SYNC_COMMAND)
903 mode |= AAC_INT_MODE_SYNC;
905 /* ODR readback, Prep #238630 */
906 AAC_MEM0_GETREG4(sc, AAC_SRC_ODBR_R);
909 if (mode & AAC_INT_MODE_SYNC) {
910 if (sc->aac_sync_cm) {
911 cm = sc->aac_sync_cm;
912 aac_unmap_command(cm);
913 cm->cm_flags |= AAC_CMD_COMPLETED;
914 aac_fib_header_toh(&cm->cm_fib->Header);
916 /* is there a completion handler? */
917 if (cm->cm_complete != NULL) {
920 /* assume that someone is sleeping on this command */
923 sc->flags &= ~AAC_QUEUE_FRZN;
924 sc->aac_sync_cm = NULL;
926 if (mode & AAC_INT_MODE_INTX)
927 mode &= ~AAC_INT_MODE_SYNC;
932 if (mode & AAC_INT_MODE_AIF) {
933 if (mode & AAC_INT_MODE_INTX) {
939 if (sc->flags & AAC_FLAGS_SYNC_MODE)
943 /* handle async. status */
944 index = sc->aac_host_rrq_idx[vector_no];
946 isFastResponse = isAif = noMoreAif = 0;
947 /* remove toggle bit (31) */
948 handle = (le32toh(sc->aac_common->ac_host_rrq[index]) &
950 /* check fast response bit (30) */
951 if (handle & 0x40000000)
953 /* check AIF bit (23) */
954 else if (handle & 0x00800000)
956 handle &= 0x0000ffff;
960 cm = sc->aac_commands + (handle - 1);
962 aac_fib_header_toh(&fib->Header);
963 sc->aac_rrq_outstanding[vector_no]--;
965 noMoreAif = (fib->Header.XferState & AAC_FIBSTATE_NOMOREAIF) ? 1:0;
967 aac_handle_aif(sc, fib);
969 aacraid_release_command(cm);
971 if (isFastResponse) {
972 fib->Header.XferState |= AAC_FIBSTATE_DONEADAP;
973 *((u_int32_t *)(fib->data)) = htole32(ST_OK);
974 cm->cm_flags |= AAC_CMD_FASTRESP;
977 aac_unmap_command(cm);
978 cm->cm_flags |= AAC_CMD_COMPLETED;
980 /* is there a completion handler? */
981 if (cm->cm_complete != NULL) {
984 /* assume that someone is sleeping on this command */
987 sc->flags &= ~AAC_QUEUE_FRZN;
990 sc->aac_common->ac_host_rrq[index++] = 0;
991 if (index == (vector_no + 1) * sc->aac_vector_cap)
992 index = vector_no * sc->aac_vector_cap;
993 sc->aac_host_rrq_idx[vector_no] = index;
995 if ((isAif && !noMoreAif) || sc->aif_pending)
1000 if (mode & AAC_INT_MODE_AIF) {
1001 aac_request_aif(sc);
1002 AAC_ACCESS_DEVREG(sc, AAC_CLEAR_AIF_BIT);
1006 /* see if we can start some more I/O */
1007 if ((sc->flags & AAC_QUEUE_FRZN) == 0)
1008 aacraid_startio(sc);
1009 mtx_unlock(&sc->aac_io_lock);
1013 * Handle notification of one or more FIBs coming from the controller.
1016 aac_command_thread(struct aac_softc *sc)
1020 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1022 mtx_lock(&sc->aac_io_lock);
1023 sc->aifflags = AAC_AIFFLAGS_RUNNING;
1025 while ((sc->aifflags & AAC_AIFFLAGS_EXIT) == 0) {
1027 if ((sc->aifflags & AAC_AIFFLAGS_PENDING) == 0)
1028 retval = msleep(sc->aifthread, &sc->aac_io_lock, PRIBIO,
1029 "aacraid_aifthd", AAC_PERIODIC_INTERVAL * hz);
1032 * First see if any FIBs need to be allocated.
1034 if ((sc->aifflags & AAC_AIFFLAGS_ALLOCFIBS) != 0) {
1035 aac_alloc_commands(sc);
1036 sc->aifflags &= ~AAC_AIFFLAGS_ALLOCFIBS;
1037 aacraid_startio(sc);
1041 * While we're here, check to see if any commands are stuck.
1042 * This is pretty low-priority, so it's ok if it doesn't
1045 if (retval == EWOULDBLOCK)
1048 /* Check the hardware printf message buffer */
1049 if (sc->aac_common->ac_printf[0] != 0)
1050 aac_print_printf(sc);
1052 sc->aifflags &= ~AAC_AIFFLAGS_RUNNING;
1053 mtx_unlock(&sc->aac_io_lock);
1054 wakeup(sc->aac_dev);
1056 aac_kthread_exit(0);
1060 * Submit a command to the controller, return when it completes.
1061 * XXX This is very dangerous! If the card has gone out to lunch, we could
1062 * be stuck here forever. At the same time, signals are not caught
1063 * because there is a risk that a signal could wakeup the sleep before
1064 * the card has a chance to complete the command. Since there is no way
1065 * to cancel a command that is in progress, we can't protect against the
1066 * card completing a command late and spamming the command and data
1067 * memory. So, we are held hostage until the command completes.
1070 aacraid_wait_command(struct aac_command *cm)
1072 struct aac_softc *sc;
1076 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1077 mtx_assert(&sc->aac_io_lock, MA_OWNED);
1079 /* Put the command on the ready queue and get things going */
1080 aac_enqueue_ready(cm);
1081 aacraid_startio(sc);
1082 error = msleep(cm, &sc->aac_io_lock, PRIBIO, "aacraid_wait", 0);
1087 *Command Buffer Management
1091 * Allocate a command.
1094 aacraid_alloc_command(struct aac_softc *sc, struct aac_command **cmp)
1096 struct aac_command *cm;
1098 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1100 if ((cm = aac_dequeue_free(sc)) == NULL) {
1101 if (sc->total_fibs < sc->aac_max_fibs) {
1102 sc->aifflags |= AAC_AIFFLAGS_ALLOCFIBS;
1103 wakeup(sc->aifthread);
1113 * Release a command back to the freelist.
1116 aacraid_release_command(struct aac_command *cm)
1118 struct aac_event *event;
1119 struct aac_softc *sc;
1122 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1123 mtx_assert(&sc->aac_io_lock, MA_OWNED);
1125 /* (re)initialize the command/FIB */
1126 cm->cm_sgtable = NULL;
1128 cm->cm_complete = NULL;
1130 cm->cm_passthr_dmat = 0;
1131 cm->cm_fib->Header.XferState = AAC_FIBSTATE_EMPTY;
1132 cm->cm_fib->Header.StructType = AAC_FIBTYPE_TFIB;
1133 cm->cm_fib->Header.Unused = 0;
1134 cm->cm_fib->Header.SenderSize = cm->cm_sc->aac_max_fib_size;
1137 * These are duplicated in aac_start to cover the case where an
1138 * intermediate stage may have destroyed them. They're left
1139 * initialized here for debugging purposes only.
1141 cm->cm_fib->Header.u.ReceiverFibAddress = (u_int32_t)cm->cm_fibphys;
1142 cm->cm_fib->Header.Handle = 0;
1144 aac_enqueue_free(cm);
1147 * Dequeue all events so that there's no risk of events getting
1150 while ((event = TAILQ_FIRST(&sc->aac_ev_cmfree)) != NULL) {
1151 TAILQ_REMOVE(&sc->aac_ev_cmfree, event, ev_links);
1152 event->ev_callback(sc, event, event->ev_arg);
1157 * Map helper for command/FIB allocation.
1160 aac_map_command_helper(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1164 fibphys = (uint64_t *)arg;
1166 *fibphys = segs[0].ds_addr;
1170 * Allocate and initialize commands/FIBs for this adapter.
1173 aac_alloc_commands(struct aac_softc *sc)
1175 struct aac_command *cm;
1176 struct aac_fibmap *fm;
1181 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1182 mtx_assert(&sc->aac_io_lock, MA_OWNED);
1184 if (sc->total_fibs + sc->aac_max_fibs_alloc > sc->aac_max_fibs)
1187 fm = malloc(sizeof(struct aac_fibmap), M_AACRAIDBUF, M_NOWAIT|M_ZERO);
1191 mtx_unlock(&sc->aac_io_lock);
1192 /* allocate the FIBs in DMAable memory and load them */
1193 if (bus_dmamem_alloc(sc->aac_fib_dmat, (void **)&fm->aac_fibs,
1194 BUS_DMA_NOWAIT, &fm->aac_fibmap)) {
1195 device_printf(sc->aac_dev,
1196 "Not enough contiguous memory available.\n");
1197 free(fm, M_AACRAIDBUF);
1198 mtx_lock(&sc->aac_io_lock);
1202 maxsize = sc->aac_max_fib_size + 31;
1203 if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE1)
1204 maxsize += sizeof(struct aac_fib_xporthdr);
1205 /* Ignore errors since this doesn't bounce */
1206 (void)bus_dmamap_load(sc->aac_fib_dmat, fm->aac_fibmap, fm->aac_fibs,
1207 sc->aac_max_fibs_alloc * maxsize,
1208 aac_map_command_helper, &fibphys, 0);
1209 mtx_lock(&sc->aac_io_lock);
1211 /* initialize constant fields in the command structure */
1212 bzero(fm->aac_fibs, sc->aac_max_fibs_alloc * maxsize);
1213 for (i = 0; i < sc->aac_max_fibs_alloc; i++) {
1214 cm = sc->aac_commands + sc->total_fibs;
1215 fm->aac_commands = cm;
1217 cm->cm_fib = (struct aac_fib *)
1218 ((u_int8_t *)fm->aac_fibs + i * maxsize);
1219 cm->cm_fibphys = fibphys + i * maxsize;
1220 if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE1) {
1221 u_int64_t fibphys_aligned;
1223 (cm->cm_fibphys + sizeof(struct aac_fib_xporthdr) + 31) & ~31;
1224 cm->cm_fib = (struct aac_fib *)
1225 ((u_int8_t *)cm->cm_fib + (fibphys_aligned - cm->cm_fibphys));
1226 cm->cm_fibphys = fibphys_aligned;
1228 u_int64_t fibphys_aligned;
1229 fibphys_aligned = (cm->cm_fibphys + 31) & ~31;
1230 cm->cm_fib = (struct aac_fib *)
1231 ((u_int8_t *)cm->cm_fib + (fibphys_aligned - cm->cm_fibphys));
1232 cm->cm_fibphys = fibphys_aligned;
1234 cm->cm_index = sc->total_fibs;
1236 if ((error = bus_dmamap_create(sc->aac_buffer_dmat, 0,
1237 &cm->cm_datamap)) != 0)
1239 if (sc->aac_max_fibs <= 1 || sc->aac_max_fibs - sc->total_fibs > 1)
1240 aacraid_release_command(cm);
1245 TAILQ_INSERT_TAIL(&sc->aac_fibmap_tqh, fm, fm_link);
1246 fwprintf(sc, HBA_FLAGS_DBG_COMM_B, "total_fibs= %d\n", sc->total_fibs);
1250 bus_dmamap_unload(sc->aac_fib_dmat, fm->aac_fibmap);
1251 bus_dmamem_free(sc->aac_fib_dmat, fm->aac_fibs, fm->aac_fibmap);
1252 free(fm, M_AACRAIDBUF);
1257 * Free FIBs owned by this adapter.
1260 aac_free_commands(struct aac_softc *sc)
1262 struct aac_fibmap *fm;
1263 struct aac_command *cm;
1266 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1268 while ((fm = TAILQ_FIRST(&sc->aac_fibmap_tqh)) != NULL) {
1269 TAILQ_REMOVE(&sc->aac_fibmap_tqh, fm, fm_link);
1271 * We check against total_fibs to handle partially
1274 for (i = 0; i < sc->aac_max_fibs_alloc && sc->total_fibs--; i++) {
1275 cm = fm->aac_commands + i;
1276 bus_dmamap_destroy(sc->aac_buffer_dmat, cm->cm_datamap);
1278 bus_dmamap_unload(sc->aac_fib_dmat, fm->aac_fibmap);
1279 bus_dmamem_free(sc->aac_fib_dmat, fm->aac_fibs, fm->aac_fibmap);
1280 free(fm, M_AACRAIDBUF);
1285 * Command-mapping helper function - populate this command's s/g table.
1288 aacraid_map_command_sg(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1290 struct aac_softc *sc;
1291 struct aac_command *cm;
1292 struct aac_fib *fib;
1295 cm = (struct aac_command *)arg;
1298 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "nseg %d", nseg);
1299 mtx_assert(&sc->aac_io_lock, MA_OWNED);
1301 if ((sc->flags & AAC_FLAGS_SYNC_MODE) && sc->aac_sync_cm)
1304 /* copy into the FIB */
1305 if (cm->cm_sgtable != NULL) {
1306 if (fib->Header.Command == RawIo2) {
1307 struct aac_raw_io2 *raw;
1308 struct aac_sge_ieee1212 *sg;
1309 u_int32_t min_size = PAGE_SIZE, cur_size;
1310 int conformable = TRUE;
1312 raw = (struct aac_raw_io2 *)&fib->data[0];
1313 sg = (struct aac_sge_ieee1212 *)cm->cm_sgtable;
1316 for (i = 0; i < nseg; i++) {
1317 cur_size = segs[i].ds_len;
1319 *(bus_addr_t *)&sg[i].addrLow = segs[i].ds_addr;
1320 sg[i].length = cur_size;
1323 raw->sgeFirstSize = cur_size;
1324 } else if (i == 1) {
1325 raw->sgeNominalSize = cur_size;
1326 min_size = cur_size;
1327 } else if ((i+1) < nseg &&
1328 cur_size != raw->sgeNominalSize) {
1329 conformable = FALSE;
1330 if (cur_size < min_size)
1331 min_size = cur_size;
1335 /* not conformable: evaluate required sg elements */
1337 int j, err_found, nseg_new = nseg;
1338 for (i = min_size / PAGE_SIZE; i >= 1; --i) {
1341 for (j = 1; j < nseg - 1; ++j) {
1342 if (sg[j].length % (i*PAGE_SIZE)) {
1346 nseg_new += (sg[j].length / (i*PAGE_SIZE));
1351 if (i>0 && nseg_new<=sc->aac_sg_tablesize &&
1352 !(sc->hint_flags & 4))
1353 nseg = aac_convert_sgraw2(sc,
1354 raw, i, nseg, nseg_new);
1356 raw->flags |= RIO2_SGL_CONFORMANT;
1359 for (i = 0; i < nseg; i++)
1360 aac_sge_ieee1212_tole(sg + i);
1361 aac_raw_io2_tole(raw);
1363 /* update the FIB size for the s/g count */
1364 fib->Header.Size += nseg *
1365 sizeof(struct aac_sge_ieee1212);
1367 } else if (fib->Header.Command == RawIo) {
1368 struct aac_sg_tableraw *sg;
1369 sg = (struct aac_sg_tableraw *)cm->cm_sgtable;
1370 sg->SgCount = htole32(nseg);
1371 for (i = 0; i < nseg; i++) {
1372 sg->SgEntryRaw[i].SgAddress = segs[i].ds_addr;
1373 sg->SgEntryRaw[i].SgByteCount = segs[i].ds_len;
1374 sg->SgEntryRaw[i].Next = 0;
1375 sg->SgEntryRaw[i].Prev = 0;
1376 sg->SgEntryRaw[i].Flags = 0;
1377 aac_sg_entryraw_tole(&sg->SgEntryRaw[i]);
1379 aac_raw_io_tole((struct aac_raw_io *)&fib->data[0]);
1380 /* update the FIB size for the s/g count */
1381 fib->Header.Size += nseg*sizeof(struct aac_sg_entryraw);
1382 } else if ((cm->cm_sc->flags & AAC_FLAGS_SG_64BIT) == 0) {
1383 struct aac_sg_table *sg;
1384 sg = cm->cm_sgtable;
1385 sg->SgCount = htole32(nseg);
1386 for (i = 0; i < nseg; i++) {
1387 sg->SgEntry[i].SgAddress = segs[i].ds_addr;
1388 sg->SgEntry[i].SgByteCount = segs[i].ds_len;
1389 aac_sg_entry_tole(&sg->SgEntry[i]);
1391 /* update the FIB size for the s/g count */
1392 fib->Header.Size += nseg*sizeof(struct aac_sg_entry);
1394 struct aac_sg_table64 *sg;
1395 sg = (struct aac_sg_table64 *)cm->cm_sgtable;
1396 sg->SgCount = htole32(nseg);
1397 for (i = 0; i < nseg; i++) {
1398 sg->SgEntry64[i].SgAddress = segs[i].ds_addr;
1399 sg->SgEntry64[i].SgByteCount = segs[i].ds_len;
1400 aac_sg_entry64_tole(&sg->SgEntry64[i]);
1402 /* update the FIB size for the s/g count */
1403 fib->Header.Size += nseg*sizeof(struct aac_sg_entry64);
1407 /* Fix up the address values in the FIB. Use the command array index
1408 * instead of a pointer since these fields are only 32 bits. Shift
1409 * the SenderFibAddress over to make room for the fast response bit
1410 * and for the AIF bit
1412 cm->cm_fib->Header.SenderFibAddress = (cm->cm_index << 2);
1413 cm->cm_fib->Header.u.ReceiverFibAddress = (u_int32_t)cm->cm_fibphys;
1415 /* save a pointer to the command for speedy reverse-lookup */
1416 cm->cm_fib->Header.Handle += cm->cm_index + 1;
1418 if (cm->cm_passthr_dmat == 0) {
1419 if (cm->cm_flags & AAC_CMD_DATAIN)
1420 bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap,
1421 BUS_DMASYNC_PREREAD);
1422 if (cm->cm_flags & AAC_CMD_DATAOUT)
1423 bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap,
1424 BUS_DMASYNC_PREWRITE);
1427 cm->cm_flags |= AAC_CMD_MAPPED;
1429 if (cm->cm_flags & AAC_CMD_WAIT) {
1430 aac_fib_header_tole(&fib->Header);
1431 aacraid_sync_command(sc, AAC_MONKER_SYNCFIB,
1432 cm->cm_fibphys, 0, 0, 0, NULL, NULL);
1433 } else if (sc->flags & AAC_FLAGS_SYNC_MODE) {
1435 sc->aac_sync_cm = cm;
1436 aac_fib_header_tole(&fib->Header);
1437 aacraid_sync_command(sc, AAC_MONKER_SYNCFIB,
1438 cm->cm_fibphys, 0, 0, 0, &wait, NULL);
1440 int count = 10000000L;
1441 while (AAC_SEND_COMMAND(sc, cm) != 0) {
1443 aac_unmap_command(cm);
1444 sc->flags |= AAC_QUEUE_FRZN;
1445 aac_requeue_ready(cm);
1447 DELAY(5); /* wait 5 usec. */
1453 aac_convert_sgraw2(struct aac_softc *sc, struct aac_raw_io2 *raw,
1454 int pages, int nseg, int nseg_new)
1456 struct aac_sge_ieee1212 *sge;
1460 sge = malloc(nseg_new * sizeof(struct aac_sge_ieee1212),
1461 M_AACRAIDBUF, M_NOWAIT|M_ZERO);
1465 for (i = 1, pos = 1; i < nseg - 1; ++i) {
1466 for (j = 0; j < raw->sge[i].length / (pages*PAGE_SIZE); ++j) {
1467 addr_low = raw->sge[i].addrLow + j * pages * PAGE_SIZE;
1468 sge[pos].addrLow = addr_low;
1469 sge[pos].addrHigh = raw->sge[i].addrHigh;
1470 if (addr_low < raw->sge[i].addrLow)
1471 sge[pos].addrHigh++;
1472 sge[pos].length = pages * PAGE_SIZE;
1477 sge[pos] = raw->sge[nseg-1];
1478 for (i = 1; i < nseg_new; ++i)
1479 raw->sge[i] = sge[i];
1481 free(sge, M_AACRAIDBUF);
1482 raw->sgeCnt = nseg_new;
1483 raw->flags |= RIO2_SGL_CONFORMANT;
1484 raw->sgeNominalSize = pages * PAGE_SIZE;
1489 * Unmap a command from controller-visible space.
1492 aac_unmap_command(struct aac_command *cm)
1494 struct aac_softc *sc;
1497 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1499 if (!(cm->cm_flags & AAC_CMD_MAPPED))
1502 if (cm->cm_datalen != 0 && cm->cm_passthr_dmat == 0) {
1503 if (cm->cm_flags & AAC_CMD_DATAIN)
1504 bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap,
1505 BUS_DMASYNC_POSTREAD);
1506 if (cm->cm_flags & AAC_CMD_DATAOUT)
1507 bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap,
1508 BUS_DMASYNC_POSTWRITE);
1510 bus_dmamap_unload(sc->aac_buffer_dmat, cm->cm_datamap);
1512 cm->cm_flags &= ~AAC_CMD_MAPPED;
1516 * Hardware Interface
1520 * Initialize the adapter.
1523 aac_common_map(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1525 struct aac_softc *sc;
1527 sc = (struct aac_softc *)arg;
1528 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1530 sc->aac_common_busaddr = segs[0].ds_addr;
1534 aac_check_firmware(struct aac_softc *sc)
1536 u_int32_t code, major, minor, maxsize;
1537 u_int32_t options = 0, atu_size = 0, status, waitCount;
1540 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1542 /* check if flash update is running */
1543 if (AAC_GET_FWSTATUS(sc) & AAC_FLASH_UPD_PENDING) {
1546 code = AAC_GET_FWSTATUS(sc);
1547 if (time_uptime > (then + AAC_FWUPD_TIMEOUT)) {
1548 device_printf(sc->aac_dev,
1549 "FATAL: controller not coming ready, "
1550 "status %x\n", code);
1553 } while (!(code & AAC_FLASH_UPD_SUCCESS) && !(code & AAC_FLASH_UPD_FAILED));
1555 * Delay 10 seconds. Because right now FW is doing a soft reset,
1556 * do not read scratch pad register at this time
1558 waitCount = 10 * 10000;
1560 DELAY(100); /* delay 100 microseconds */
1566 * Wait for the adapter to come ready.
1570 code = AAC_GET_FWSTATUS(sc);
1571 if (time_uptime > (then + AAC_BOOT_TIMEOUT)) {
1572 device_printf(sc->aac_dev,
1573 "FATAL: controller not coming ready, "
1574 "status %x\n", code);
1577 } while (!(code & AAC_UP_AND_RUNNING) || code == 0xffffffff);
1580 * Retrieve the firmware version numbers. Dell PERC2/QC cards with
1581 * firmware version 1.x are not compatible with this driver.
1583 if (sc->flags & AAC_FLAGS_PERC2QC) {
1584 if (aacraid_sync_command(sc, AAC_MONKER_GETKERNVER, 0, 0, 0, 0,
1586 device_printf(sc->aac_dev,
1587 "Error reading firmware version\n");
1591 /* These numbers are stored as ASCII! */
1592 major = (AAC_GET_MAILBOX(sc, 1) & 0xff) - 0x30;
1593 minor = (AAC_GET_MAILBOX(sc, 2) & 0xff) - 0x30;
1595 device_printf(sc->aac_dev,
1596 "Firmware version %d.%d is not supported.\n",
1602 * Retrieve the capabilities/supported options word so we know what
1603 * work-arounds to enable. Some firmware revs don't support this
1606 if (aacraid_sync_command(sc, AAC_MONKER_GETINFO, 0, 0, 0, 0, &status, NULL)) {
1607 if (status != AAC_SRB_STS_INVALID_REQUEST) {
1608 device_printf(sc->aac_dev,
1609 "RequestAdapterInfo failed\n");
1613 options = AAC_GET_MAILBOX(sc, 1);
1614 atu_size = AAC_GET_MAILBOX(sc, 2);
1615 sc->supported_options = options;
1616 sc->doorbell_mask = AAC_GET_MAILBOX(sc, 3);
1618 if ((options & AAC_SUPPORTED_4GB_WINDOW) != 0 &&
1619 (sc->flags & AAC_FLAGS_NO4GB) == 0)
1620 sc->flags |= AAC_FLAGS_4GB_WINDOW;
1621 if (options & AAC_SUPPORTED_NONDASD)
1622 sc->flags |= AAC_FLAGS_ENABLE_CAM;
1623 if ((options & AAC_SUPPORTED_SGMAP_HOST64) != 0
1624 && (sizeof(bus_addr_t) > 4)
1625 && (sc->hint_flags & 0x1)) {
1626 device_printf(sc->aac_dev,
1627 "Enabling 64-bit address support\n");
1628 sc->flags |= AAC_FLAGS_SG_64BIT;
1630 if (sc->aac_if.aif_send_command) {
1631 if (options & AAC_SUPPORTED_NEW_COMM_TYPE2)
1632 sc->flags |= AAC_FLAGS_NEW_COMM | AAC_FLAGS_NEW_COMM_TYPE2;
1633 else if (options & AAC_SUPPORTED_NEW_COMM_TYPE1)
1634 sc->flags |= AAC_FLAGS_NEW_COMM | AAC_FLAGS_NEW_COMM_TYPE1;
1635 else if ((options & AAC_SUPPORTED_NEW_COMM_TYPE3) ||
1636 (options & AAC_SUPPORTED_NEW_COMM_TYPE4))
1637 sc->flags |= AAC_FLAGS_NEW_COMM | AAC_FLAGS_NEW_COMM_TYPE34;
1639 if (options & AAC_SUPPORTED_64BIT_ARRAYSIZE)
1640 sc->flags |= AAC_FLAGS_ARRAY_64BIT;
1643 if (!(sc->flags & AAC_FLAGS_NEW_COMM)) {
1644 device_printf(sc->aac_dev, "Communication interface not supported!\n");
1648 if (sc->hint_flags & 2) {
1649 device_printf(sc->aac_dev,
1650 "Sync. mode enforced by driver parameter. This will cause a significant performance decrease!\n");
1651 sc->flags |= AAC_FLAGS_SYNC_MODE;
1652 } else if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE34) {
1653 device_printf(sc->aac_dev,
1654 "Async. mode not supported by current driver, sync. mode enforced.\nPlease update driver to get full performance.\n");
1655 sc->flags |= AAC_FLAGS_SYNC_MODE;
1658 /* Check for broken hardware that does a lower number of commands */
1659 sc->aac_max_fibs = (sc->flags & AAC_FLAGS_256FIBS ? 256:512);
1661 /* Remap mem. resource, if required */
1662 if (atu_size > rman_get_size(sc->aac_regs_res0)) {
1663 bus_release_resource(
1664 sc->aac_dev, SYS_RES_MEMORY,
1665 sc->aac_regs_rid0, sc->aac_regs_res0);
1666 sc->aac_regs_res0 = bus_alloc_resource_anywhere(
1667 sc->aac_dev, SYS_RES_MEMORY, &sc->aac_regs_rid0,
1668 atu_size, RF_ACTIVE);
1669 if (sc->aac_regs_res0 == NULL) {
1670 sc->aac_regs_res0 = bus_alloc_resource_any(
1671 sc->aac_dev, SYS_RES_MEMORY,
1672 &sc->aac_regs_rid0, RF_ACTIVE);
1673 if (sc->aac_regs_res0 == NULL) {
1674 device_printf(sc->aac_dev,
1675 "couldn't allocate register window\n");
1679 sc->aac_btag0 = rman_get_bustag(sc->aac_regs_res0);
1680 sc->aac_bhandle0 = rman_get_bushandle(sc->aac_regs_res0);
1683 /* Read preferred settings */
1684 sc->aac_max_fib_size = sizeof(struct aac_fib);
1685 sc->aac_max_sectors = 128; /* 64KB */
1686 sc->aac_max_aif = 1;
1687 if (sc->flags & AAC_FLAGS_SG_64BIT)
1688 sc->aac_sg_tablesize = (AAC_FIB_DATASIZE
1689 - sizeof(struct aac_blockwrite64))
1690 / sizeof(struct aac_sg_entry64);
1692 sc->aac_sg_tablesize = (AAC_FIB_DATASIZE
1693 - sizeof(struct aac_blockwrite))
1694 / sizeof(struct aac_sg_entry);
1696 if (!aacraid_sync_command(sc, AAC_MONKER_GETCOMMPREF, 0, 0, 0, 0, NULL, NULL)) {
1697 options = AAC_GET_MAILBOX(sc, 1);
1698 sc->aac_max_fib_size = (options & 0xFFFF);
1699 sc->aac_max_sectors = (options >> 16) << 1;
1700 options = AAC_GET_MAILBOX(sc, 2);
1701 sc->aac_sg_tablesize = (options >> 16);
1702 options = AAC_GET_MAILBOX(sc, 3);
1703 sc->aac_max_fibs = ((options >> 16) & 0xFFFF);
1704 if (sc->aac_max_fibs == 0 || sc->aac_hwif != AAC_HWIF_SRCV)
1705 sc->aac_max_fibs = (options & 0xFFFF);
1706 options = AAC_GET_MAILBOX(sc, 4);
1707 sc->aac_max_aif = (options & 0xFFFF);
1708 options = AAC_GET_MAILBOX(sc, 5);
1709 sc->aac_max_msix =(sc->flags & AAC_FLAGS_NEW_COMM_TYPE2) ? options : 0;
1712 maxsize = sc->aac_max_fib_size + 31;
1713 if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE1)
1714 maxsize += sizeof(struct aac_fib_xporthdr);
1715 if (maxsize > PAGE_SIZE) {
1716 sc->aac_max_fib_size -= (maxsize - PAGE_SIZE);
1717 maxsize = PAGE_SIZE;
1719 sc->aac_max_fibs_alloc = PAGE_SIZE / maxsize;
1721 if (sc->aac_max_fib_size > sizeof(struct aac_fib)) {
1722 sc->flags |= AAC_FLAGS_RAW_IO;
1723 device_printf(sc->aac_dev, "Enable Raw I/O\n");
1725 if ((sc->flags & AAC_FLAGS_RAW_IO) &&
1726 (sc->flags & AAC_FLAGS_ARRAY_64BIT)) {
1727 sc->flags |= AAC_FLAGS_LBA_64BIT;
1728 device_printf(sc->aac_dev, "Enable 64-bit array\n");
1731 #ifdef AACRAID_DEBUG
1732 aacraid_get_fw_debug_buffer(sc);
1738 aac_init(struct aac_softc *sc)
1740 struct aac_adapter_init *ip;
1743 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1745 /* reset rrq index */
1746 sc->aac_fibs_pushed_no = 0;
1747 for (i = 0; i < sc->aac_max_msix; i++)
1748 sc->aac_host_rrq_idx[i] = i * sc->aac_vector_cap;
1751 * Fill in the init structure. This tells the adapter about the
1752 * physical location of various important shared data structures.
1754 ip = &sc->aac_common->ac_init;
1755 ip->InitStructRevision = AAC_INIT_STRUCT_REVISION;
1756 if (sc->aac_max_fib_size > sizeof(struct aac_fib)) {
1757 ip->InitStructRevision = AAC_INIT_STRUCT_REVISION_4;
1758 sc->flags |= AAC_FLAGS_RAW_IO;
1760 ip->NoOfMSIXVectors = sc->aac_max_msix;
1762 ip->AdapterFibsPhysicalAddress = sc->aac_common_busaddr +
1763 offsetof(struct aac_common, ac_fibs);
1764 ip->AdapterFibsVirtualAddress = 0;
1765 ip->AdapterFibsSize = AAC_ADAPTER_FIBS * sizeof(struct aac_fib);
1766 ip->AdapterFibAlign = sizeof(struct aac_fib);
1768 ip->PrintfBufferAddress = sc->aac_common_busaddr +
1769 offsetof(struct aac_common, ac_printf);
1770 ip->PrintfBufferSize = AAC_PRINTF_BUFSIZE;
1773 * The adapter assumes that pages are 4K in size, except on some
1774 * broken firmware versions that do the page->byte conversion twice,
1775 * therefore 'assuming' that this value is in 16MB units (2^24).
1776 * Round up since the granularity is so high.
1778 ip->HostPhysMemPages = ctob(physmem) / AAC_PAGE_SIZE;
1779 if (sc->flags & AAC_FLAGS_BROKEN_MEMMAP) {
1780 ip->HostPhysMemPages =
1781 (ip->HostPhysMemPages + AAC_PAGE_SIZE) / AAC_PAGE_SIZE;
1783 ip->HostElapsedSeconds = time_uptime; /* reset later if invalid */
1785 ip->InitFlags = AAC_INITFLAGS_NEW_COMM_SUPPORTED;
1786 if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE1) {
1787 ip->InitStructRevision = AAC_INIT_STRUCT_REVISION_6;
1788 ip->InitFlags |= (AAC_INITFLAGS_NEW_COMM_TYPE1_SUPPORTED |
1789 AAC_INITFLAGS_FAST_JBOD_SUPPORTED);
1790 device_printf(sc->aac_dev, "New comm. interface type1 enabled\n");
1791 } else if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE2) {
1792 ip->InitStructRevision = AAC_INIT_STRUCT_REVISION_7;
1793 ip->InitFlags |= (AAC_INITFLAGS_NEW_COMM_TYPE2_SUPPORTED |
1794 AAC_INITFLAGS_FAST_JBOD_SUPPORTED);
1795 device_printf(sc->aac_dev, "New comm. interface type2 enabled\n");
1797 ip->MaxNumAif = sc->aac_max_aif;
1798 ip->HostRRQ_AddrLow =
1799 sc->aac_common_busaddr + offsetof(struct aac_common, ac_host_rrq);
1800 /* always 32-bit address */
1801 ip->HostRRQ_AddrHigh = 0;
1803 if (sc->aac_support_opt2 & AAC_SUPPORTED_POWER_MANAGEMENT) {
1804 ip->InitFlags |= AAC_INITFLAGS_DRIVER_SUPPORTS_PM;
1805 ip->InitFlags |= AAC_INITFLAGS_DRIVER_USES_UTC_TIME;
1806 device_printf(sc->aac_dev, "Power Management enabled\n");
1809 ip->MaxIoCommands = sc->aac_max_fibs;
1810 ip->MaxIoSize = sc->aac_max_sectors << 9;
1811 ip->MaxFibSize = sc->aac_max_fib_size;
1813 aac_adapter_init_tole(ip);
1816 * Do controller-type-specific initialisation
1818 AAC_MEM0_SETREG4(sc, AAC_SRC_ODBR_C, ~0);
1821 * Give the init structure to the controller.
1823 if (aacraid_sync_command(sc, AAC_MONKER_INITSTRUCT,
1824 sc->aac_common_busaddr +
1825 offsetof(struct aac_common, ac_init), 0, 0, 0,
1827 device_printf(sc->aac_dev,
1828 "error establishing init structure\n");
1834 * Check configuration issues
1836 if ((error = aac_check_config(sc)) != 0)
1845 aac_define_int_mode(struct aac_softc *sc)
1848 int cap, msi_count, error = 0;
1853 if (sc->flags & AAC_FLAGS_SYNC_MODE) {
1854 device_printf(dev, "using line interrupts\n");
1855 sc->aac_max_msix = 1;
1856 sc->aac_vector_cap = sc->aac_max_fibs;
1860 /* max. vectors from AAC_MONKER_GETCOMMPREF */
1861 if (sc->aac_max_msix == 0) {
1862 if (sc->aac_hwif == AAC_HWIF_SRC) {
1864 if ((error = pci_alloc_msi(dev, &msi_count)) != 0) {
1865 device_printf(dev, "alloc msi failed - err=%d; "
1866 "will use INTx\n", error);
1867 pci_release_msi(dev);
1869 sc->msi_tupelo = TRUE;
1873 device_printf(dev, "using MSI interrupts\n");
1875 device_printf(dev, "using line interrupts\n");
1877 sc->aac_max_msix = 1;
1878 sc->aac_vector_cap = sc->aac_max_fibs;
1883 msi_count = pci_msix_count(dev);
1884 if (msi_count > AAC_MAX_MSIX)
1885 msi_count = AAC_MAX_MSIX;
1886 if (msi_count > sc->aac_max_msix)
1887 msi_count = sc->aac_max_msix;
1888 if (msi_count == 0 || (error = pci_alloc_msix(dev, &msi_count)) != 0) {
1889 device_printf(dev, "alloc msix failed - msi_count=%d, err=%d; "
1890 "will try MSI\n", msi_count, error);
1891 pci_release_msi(dev);
1893 sc->msi_enabled = TRUE;
1894 device_printf(dev, "using MSI-X interrupts (%u vectors)\n",
1898 if (!sc->msi_enabled) {
1900 if ((error = pci_alloc_msi(dev, &msi_count)) != 0) {
1901 device_printf(dev, "alloc msi failed - err=%d; "
1902 "will use INTx\n", error);
1903 pci_release_msi(dev);
1905 sc->msi_enabled = TRUE;
1906 device_printf(dev, "using MSI interrupts\n");
1910 if (sc->msi_enabled) {
1911 /* now read controller capability from PCI config. space */
1912 cap = aac_find_pci_capability(sc, PCIY_MSIX);
1913 val = (cap != 0 ? pci_read_config(dev, cap + 2, 2) : 0);
1914 if (!(val & AAC_PCI_MSI_ENABLE)) {
1915 pci_release_msi(dev);
1916 sc->msi_enabled = FALSE;
1920 if (!sc->msi_enabled) {
1921 device_printf(dev, "using legacy interrupts\n");
1922 sc->aac_max_msix = 1;
1924 AAC_ACCESS_DEVREG(sc, AAC_ENABLE_MSIX);
1925 if (sc->aac_max_msix > msi_count)
1926 sc->aac_max_msix = msi_count;
1928 sc->aac_vector_cap = sc->aac_max_fibs / sc->aac_max_msix;
1930 fwprintf(sc, HBA_FLAGS_DBG_DEBUG_B, "msi_enabled %d vector_cap %d max_fibs %d max_msix %d",
1931 sc->msi_enabled,sc->aac_vector_cap, sc->aac_max_fibs, sc->aac_max_msix);
1935 aac_find_pci_capability(struct aac_softc *sc, int cap)
1943 status = pci_read_config(dev, PCIR_STATUS, 2);
1944 if (!(status & PCIM_STATUS_CAPPRESENT))
1947 status = pci_read_config(dev, PCIR_HDRTYPE, 1);
1948 switch (status & PCIM_HDRTYPE) {
1954 ptr = PCIR_CAP_PTR_2;
1960 ptr = pci_read_config(dev, ptr, 1);
1964 next = pci_read_config(dev, ptr + PCICAP_NEXTPTR, 1);
1965 val = pci_read_config(dev, ptr + PCICAP_ID, 1);
1975 aac_setup_intr(struct aac_softc *sc)
1977 int i, msi_count, rid;
1978 struct resource *res;
1981 msi_count = sc->aac_max_msix;
1982 rid = ((sc->msi_enabled || sc->msi_tupelo)? 1:0);
1984 for (i = 0; i < msi_count; i++, rid++) {
1985 if ((res = bus_alloc_resource_any(sc->aac_dev,SYS_RES_IRQ, &rid,
1986 RF_SHAREABLE | RF_ACTIVE)) == NULL) {
1987 device_printf(sc->aac_dev,"can't allocate interrupt\n");
1990 sc->aac_irq_rid[i] = rid;
1991 sc->aac_irq[i] = res;
1992 if (aac_bus_setup_intr(sc->aac_dev, res,
1993 INTR_MPSAFE | INTR_TYPE_BIO, NULL,
1994 aacraid_new_intr_type1, &sc->aac_msix[i], &tag)) {
1995 device_printf(sc->aac_dev, "can't set up interrupt\n");
1998 sc->aac_msix[i].vector_no = i;
1999 sc->aac_msix[i].sc = sc;
2000 sc->aac_intr[i] = tag;
2007 aac_check_config(struct aac_softc *sc)
2009 struct aac_fib *fib;
2010 struct aac_cnt_config *ccfg;
2011 struct aac_cf_status_hdr *cf_shdr;
2014 mtx_lock(&sc->aac_io_lock);
2015 aac_alloc_sync_fib(sc, &fib);
2017 ccfg = (struct aac_cnt_config *)&fib->data[0];
2018 bzero(ccfg, sizeof (*ccfg) - CT_PACKET_SIZE);
2019 ccfg->Command = VM_ContainerConfig;
2020 ccfg->CTCommand.command = CT_GET_CONFIG_STATUS;
2021 ccfg->CTCommand.param[CNT_SIZE] = sizeof(struct aac_cf_status_hdr);
2023 aac_cnt_config_tole(ccfg);
2024 rval = aac_sync_fib(sc, ContainerCommand, 0, fib,
2025 sizeof (struct aac_cnt_config));
2026 aac_cnt_config_toh(ccfg);
2028 cf_shdr = (struct aac_cf_status_hdr *)ccfg->CTCommand.data;
2029 if (rval == 0 && ccfg->Command == ST_OK &&
2030 ccfg->CTCommand.param[0] == CT_OK) {
2031 if (le32toh(cf_shdr->action) <= CFACT_PAUSE) {
2032 bzero(ccfg, sizeof (*ccfg) - CT_PACKET_SIZE);
2033 ccfg->Command = VM_ContainerConfig;
2034 ccfg->CTCommand.command = CT_COMMIT_CONFIG;
2036 aac_cnt_config_tole(ccfg);
2037 rval = aac_sync_fib(sc, ContainerCommand, 0, fib,
2038 sizeof (struct aac_cnt_config));
2039 aac_cnt_config_toh(ccfg);
2041 if (rval == 0 && ccfg->Command == ST_OK &&
2042 ccfg->CTCommand.param[0] == CT_OK) {
2043 /* successful completion */
2046 /* auto commit aborted due to error(s) */
2050 /* auto commit aborted due to adapter indicating
2051 config. issues too dangerous to auto commit */
2059 aac_release_sync_fib(sc);
2060 mtx_unlock(&sc->aac_io_lock);
2065 * Send a synchronous command to the controller and wait for a result.
2066 * Indicate if the controller completed the command with an error status.
2069 aacraid_sync_command(struct aac_softc *sc, u_int32_t command,
2070 u_int32_t arg0, u_int32_t arg1, u_int32_t arg2, u_int32_t arg3,
2071 u_int32_t *sp, u_int32_t *r1)
2076 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2078 /* populate the mailbox */
2079 AAC_SET_MAILBOX(sc, command, arg0, arg1, arg2, arg3);
2081 /* ensure the sync command doorbell flag is cleared */
2082 if (!sc->msi_enabled)
2083 AAC_CLEAR_ISTATUS(sc, AAC_DB_SYNC_COMMAND);
2085 /* then set it to signal the adapter */
2086 AAC_QNOTIFY(sc, AAC_DB_SYNC_COMMAND);
2088 if ((command != AAC_MONKER_SYNCFIB) || (sp == NULL) || (*sp != 0)) {
2089 /* spin waiting for the command to complete */
2092 if (time_uptime > (then + AAC_SYNC_TIMEOUT)) {
2093 fwprintf(sc, HBA_FLAGS_DBG_ERROR_B, "timed out");
2096 } while (!(AAC_GET_ISTATUS(sc) & AAC_DB_SYNC_COMMAND));
2098 /* clear the completion flag */
2099 AAC_CLEAR_ISTATUS(sc, AAC_DB_SYNC_COMMAND);
2101 /* get the command status */
2102 status = AAC_GET_MAILBOX(sc, 0);
2106 /* return parameter */
2108 *r1 = AAC_GET_MAILBOX(sc, 1);
2110 if (status != AAC_SRB_STS_SUCCESS)
2117 aac_sync_fib(struct aac_softc *sc, u_int32_t command, u_int32_t xferstate,
2118 struct aac_fib *fib, u_int16_t datasize)
2120 uint32_t ReceiverFibAddress;
2122 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2123 mtx_assert(&sc->aac_io_lock, MA_OWNED);
2125 if (datasize > AAC_FIB_DATASIZE)
2129 * Set up the sync FIB
2131 fib->Header.XferState = AAC_FIBSTATE_HOSTOWNED |
2132 AAC_FIBSTATE_INITIALISED |
2134 fib->Header.XferState |= xferstate;
2135 fib->Header.Command = command;
2136 fib->Header.StructType = AAC_FIBTYPE_TFIB;
2137 fib->Header.Size = sizeof(struct aac_fib_header) + datasize;
2138 fib->Header.SenderSize = sizeof(struct aac_fib);
2139 fib->Header.SenderFibAddress = 0; /* Not needed */
2140 ReceiverFibAddress = sc->aac_common_busaddr +
2141 offsetof(struct aac_common, ac_sync_fib);
2142 fib->Header.u.ReceiverFibAddress = ReceiverFibAddress;
2143 aac_fib_header_tole(&fib->Header);
2146 * Give the FIB to the controller, wait for a response.
2148 if (aacraid_sync_command(sc, AAC_MONKER_SYNCFIB,
2149 ReceiverFibAddress, 0, 0, 0, NULL, NULL)) {
2150 fwprintf(sc, HBA_FLAGS_DBG_ERROR_B, "IO error");
2151 aac_fib_header_toh(&fib->Header);
2155 aac_fib_header_toh(&fib->Header);
2160 * Check for commands that have been outstanding for a suspiciously long time,
2161 * and complain about them.
2164 aac_timeout(struct aac_softc *sc)
2166 struct aac_command *cm;
2170 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2172 * Traverse the busy command list, bitch about late commands once
2176 deadline = time_uptime - AAC_CMD_TIMEOUT;
2177 TAILQ_FOREACH(cm, &sc->aac_busy, cm_link) {
2178 if (cm->cm_timestamp < deadline) {
2179 device_printf(sc->aac_dev,
2180 "COMMAND %p TIMEOUT AFTER %d SECONDS\n",
2181 cm, (int)(time_uptime-cm->cm_timestamp));
2182 AAC_PRINT_FIB(sc, cm->cm_fib);
2188 aac_reset_adapter(sc);
2189 aacraid_print_queues(sc);
2193 * Interface Function Vectors
2197 * Read the current firmware status word.
2200 aac_src_get_fwstatus(struct aac_softc *sc)
2202 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2204 return(AAC_MEM0_GETREG4(sc, AAC_SRC_OMR));
2208 * Notify the controller of a change in a given queue
2211 aac_src_qnotify(struct aac_softc *sc, int qbit)
2213 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2215 AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, qbit << AAC_SRC_IDR_SHIFT);
2219 * Get the interrupt reason bits
2222 aac_src_get_istatus(struct aac_softc *sc)
2226 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2228 if (sc->msi_enabled) {
2229 val = AAC_MEM0_GETREG4(sc, AAC_SRC_ODBR_MSI);
2230 if (val & AAC_MSI_SYNC_STATUS)
2231 val = AAC_DB_SYNC_COMMAND;
2235 val = AAC_MEM0_GETREG4(sc, AAC_SRC_ODBR_R) >> AAC_SRC_ODR_SHIFT;
2241 * Clear some interrupt reason bits
2244 aac_src_clear_istatus(struct aac_softc *sc, int mask)
2246 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2248 if (sc->msi_enabled) {
2249 if (mask == AAC_DB_SYNC_COMMAND)
2250 AAC_ACCESS_DEVREG(sc, AAC_CLEAR_SYNC_BIT);
2252 AAC_MEM0_SETREG4(sc, AAC_SRC_ODBR_C, mask << AAC_SRC_ODR_SHIFT);
2257 * Populate the mailbox and set the command word
2260 aac_src_set_mailbox(struct aac_softc *sc, u_int32_t command, u_int32_t arg0,
2261 u_int32_t arg1, u_int32_t arg2, u_int32_t arg3)
2263 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2265 AAC_MEM0_SETREG4(sc, AAC_SRC_MAILBOX, command);
2266 AAC_MEM0_SETREG4(sc, AAC_SRC_MAILBOX + 4, arg0);
2267 AAC_MEM0_SETREG4(sc, AAC_SRC_MAILBOX + 8, arg1);
2268 AAC_MEM0_SETREG4(sc, AAC_SRC_MAILBOX + 12, arg2);
2269 AAC_MEM0_SETREG4(sc, AAC_SRC_MAILBOX + 16, arg3);
2273 aac_srcv_set_mailbox(struct aac_softc *sc, u_int32_t command, u_int32_t arg0,
2274 u_int32_t arg1, u_int32_t arg2, u_int32_t arg3)
2276 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2278 AAC_MEM0_SETREG4(sc, AAC_SRCV_MAILBOX, command);
2279 AAC_MEM0_SETREG4(sc, AAC_SRCV_MAILBOX + 4, arg0);
2280 AAC_MEM0_SETREG4(sc, AAC_SRCV_MAILBOX + 8, arg1);
2281 AAC_MEM0_SETREG4(sc, AAC_SRCV_MAILBOX + 12, arg2);
2282 AAC_MEM0_SETREG4(sc, AAC_SRCV_MAILBOX + 16, arg3);
2286 * Fetch the immediate command status word
2289 aac_src_get_mailbox(struct aac_softc *sc, int mb)
2291 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2293 return(AAC_MEM0_GETREG4(sc, AAC_SRC_MAILBOX + (mb * 4)));
2297 aac_srcv_get_mailbox(struct aac_softc *sc, int mb)
2299 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2301 return(AAC_MEM0_GETREG4(sc, AAC_SRCV_MAILBOX + (mb * 4)));
2305 * Set/clear interrupt masks
2308 aac_src_access_devreg(struct aac_softc *sc, int mode)
2312 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2315 case AAC_ENABLE_INTERRUPT:
2316 AAC_MEM0_SETREG4(sc, AAC_SRC_OIMR,
2317 (sc->msi_enabled ? AAC_INT_ENABLE_TYPE1_MSIX :
2318 AAC_INT_ENABLE_TYPE1_INTX));
2321 case AAC_DISABLE_INTERRUPT:
2322 AAC_MEM0_SETREG4(sc, AAC_SRC_OIMR, AAC_INT_DISABLE_ALL);
2325 case AAC_ENABLE_MSIX:
2327 val = AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2329 AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, val);
2330 AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2332 val = PMC_ALL_INTERRUPT_BITS;
2333 AAC_MEM0_SETREG4(sc, AAC_SRC_IOAR, val);
2334 val = AAC_MEM0_GETREG4(sc, AAC_SRC_OIMR);
2335 AAC_MEM0_SETREG4(sc, AAC_SRC_OIMR,
2336 val & (~(PMC_GLOBAL_INT_BIT2 | PMC_GLOBAL_INT_BIT0)));
2339 case AAC_DISABLE_MSIX:
2341 val = AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2343 AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, val);
2344 AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2347 case AAC_CLEAR_AIF_BIT:
2349 val = AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2351 AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, val);
2352 AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2355 case AAC_CLEAR_SYNC_BIT:
2357 val = AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2359 AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, val);
2360 AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2363 case AAC_ENABLE_INTX:
2365 val = AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2367 AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, val);
2368 AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2370 val = PMC_ALL_INTERRUPT_BITS;
2371 AAC_MEM0_SETREG4(sc, AAC_SRC_IOAR, val);
2372 val = AAC_MEM0_GETREG4(sc, AAC_SRC_OIMR);
2373 AAC_MEM0_SETREG4(sc, AAC_SRC_OIMR,
2374 val & (~(PMC_GLOBAL_INT_BIT2)));
2383 * New comm. interface: Send command functions
2386 aac_src_send_command(struct aac_softc *sc, struct aac_command *cm)
2388 struct aac_fib_xporthdr *pFibX;
2389 u_int32_t fibsize, high_addr;
2392 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "send command (new comm. type1)");
2394 if (sc->msi_enabled && cm->cm_fib->Header.Command != AifRequest &&
2395 sc->aac_max_msix > 1) {
2396 u_int16_t vector_no, first_choice = 0xffff;
2398 vector_no = sc->aac_fibs_pushed_no % sc->aac_max_msix;
2401 if (vector_no == sc->aac_max_msix)
2403 if (sc->aac_rrq_outstanding[vector_no] <
2406 if (0xffff == first_choice)
2407 first_choice = vector_no;
2408 else if (vector_no == first_choice)
2411 if (vector_no == first_choice)
2413 sc->aac_rrq_outstanding[vector_no]++;
2414 if (sc->aac_fibs_pushed_no == 0xffffffff)
2415 sc->aac_fibs_pushed_no = 0;
2417 sc->aac_fibs_pushed_no++;
2419 cm->cm_fib->Header.Handle += (vector_no << 16);
2422 if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE2) {
2423 /* Calculate the amount to the fibsize bits */
2424 fibsize = (cm->cm_fib->Header.Size + 127) / 128 - 1;
2425 /* Fill new FIB header */
2426 address = cm->cm_fibphys;
2427 high_addr = (u_int32_t)(address >> 32);
2428 if (high_addr == 0L) {
2429 cm->cm_fib->Header.StructType = AAC_FIBTYPE_TFIB2;
2430 cm->cm_fib->Header.u.TimeStamp = 0L;
2432 cm->cm_fib->Header.StructType = AAC_FIBTYPE_TFIB2_64;
2433 cm->cm_fib->Header.u.SenderFibAddressHigh = high_addr;
2435 cm->cm_fib->Header.SenderFibAddress = (u_int32_t)address;
2437 /* Calculate the amount to the fibsize bits */
2438 fibsize = (sizeof(struct aac_fib_xporthdr) +
2439 cm->cm_fib->Header.Size + 127) / 128 - 1;
2440 /* Fill XPORT header */
2441 pFibX = (struct aac_fib_xporthdr *)
2442 ((unsigned char *)cm->cm_fib - sizeof(struct aac_fib_xporthdr));
2443 pFibX->Handle = cm->cm_fib->Header.Handle;
2444 pFibX->HostAddress = cm->cm_fibphys;
2445 pFibX->Size = cm->cm_fib->Header.Size;
2446 aac_fib_xporthdr_tole(pFibX);
2447 address = cm->cm_fibphys - sizeof(struct aac_fib_xporthdr);
2448 high_addr = (u_int32_t)(address >> 32);
2451 aac_fib_header_tole(&cm->cm_fib->Header);
2455 aac_enqueue_busy(cm);
2457 AAC_MEM0_SETREG4(sc, AAC_SRC_IQUE64_H, high_addr);
2458 AAC_MEM0_SETREG4(sc, AAC_SRC_IQUE64_L, (u_int32_t)address + fibsize);
2460 AAC_MEM0_SETREG4(sc, AAC_SRC_IQUE32, (u_int32_t)address + fibsize);
2466 * New comm. interface: get, set outbound queue index
2469 aac_src_get_outb_queue(struct aac_softc *sc)
2471 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2477 aac_src_set_outb_queue(struct aac_softc *sc, int index)
2479 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2483 * Debugging and Diagnostics
2487 * Print some information about the controller.
2490 aac_describe_controller(struct aac_softc *sc)
2492 struct aac_fib *fib;
2493 struct aac_adapter_info *info;
2494 char *adapter_type = "Adaptec RAID controller";
2496 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2498 mtx_lock(&sc->aac_io_lock);
2499 aac_alloc_sync_fib(sc, &fib);
2501 if (sc->supported_options & AAC_SUPPORTED_SUPPLEMENT_ADAPTER_INFO) {
2503 if (aac_sync_fib(sc, RequestSupplementAdapterInfo, 0, fib, 1))
2504 device_printf(sc->aac_dev, "RequestSupplementAdapterInfo failed\n");
2506 struct aac_supplement_adapter_info *supp_info;
2508 supp_info = ((struct aac_supplement_adapter_info *)&fib->data[0]);
2509 adapter_type = (char *)supp_info->AdapterTypeText;
2510 sc->aac_feature_bits = le32toh(supp_info->FeatureBits);
2511 sc->aac_support_opt2 = le32toh(supp_info->SupportedOptions2);
2514 device_printf(sc->aac_dev, "%s, aacraid driver %d.%d.%d-%d\n",
2516 AAC_DRIVER_MAJOR_VERSION, AAC_DRIVER_MINOR_VERSION,
2517 AAC_DRIVER_BUGFIX_LEVEL, AAC_DRIVER_BUILD);
2520 if (aac_sync_fib(sc, RequestAdapterInfo, 0, fib, 1)) {
2521 device_printf(sc->aac_dev, "RequestAdapterInfo failed\n");
2522 aac_release_sync_fib(sc);
2523 mtx_unlock(&sc->aac_io_lock);
2527 /* save the kernel revision structure for later use */
2528 info = (struct aac_adapter_info *)&fib->data[0];
2529 aac_adapter_info_toh(info);
2530 sc->aac_revision = info->KernelRevision;
2533 device_printf(sc->aac_dev, "%s %dMHz, %dMB memory "
2534 "(%dMB cache, %dMB execution), %s\n",
2535 aac_describe_code(aac_cpu_variant, info->CpuVariant),
2536 info->ClockSpeed, info->TotalMem / (1024 * 1024),
2537 info->BufferMem / (1024 * 1024),
2538 info->ExecutionMem / (1024 * 1024),
2539 aac_describe_code(aac_battery_platform,
2540 info->batteryPlatform));
2542 device_printf(sc->aac_dev,
2543 "Kernel %d.%d-%d, Build %d, S/N %6X\n",
2544 info->KernelRevision.external.comp.major,
2545 info->KernelRevision.external.comp.minor,
2546 info->KernelRevision.external.comp.dash,
2547 info->KernelRevision.buildNumber,
2548 (u_int32_t)(info->SerialNumber & 0xffffff));
2550 device_printf(sc->aac_dev, "Supported Options=%b\n",
2551 sc->supported_options,
2574 aac_release_sync_fib(sc);
2575 mtx_unlock(&sc->aac_io_lock);
2579 * Look up a text description of a numeric error code and return a pointer to
2583 aac_describe_code(struct aac_code_lookup *table, u_int32_t code)
2587 for (i = 0; table[i].string != NULL; i++)
2588 if (table[i].code == code)
2589 return(table[i].string);
2590 return(table[i + 1].string);
2594 * Management Interface
2598 aac_open(struct cdev *dev, int flags, int fmt, struct thread *td)
2600 struct aac_softc *sc;
2603 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2604 device_busy(sc->aac_dev);
2605 devfs_set_cdevpriv(sc, aac_cdevpriv_dtor);
2610 aac_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td)
2612 union aac_statrequest *as;
2613 struct aac_softc *sc;
2616 as = (union aac_statrequest *)arg;
2618 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2622 switch (as->as_item) {
2626 bcopy(&sc->aac_qstat[as->as_item], &as->as_qstat,
2627 sizeof(struct aac_qstat));
2635 case FSACTL_SENDFIB:
2636 case FSACTL_SEND_LARGE_FIB:
2637 arg = *(caddr_t*)arg;
2638 case FSACTL_LNX_SENDFIB:
2639 case FSACTL_LNX_SEND_LARGE_FIB:
2640 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_SENDFIB");
2641 error = aac_ioctl_sendfib(sc, arg);
2643 case FSACTL_SEND_RAW_SRB:
2644 arg = *(caddr_t*)arg;
2645 case FSACTL_LNX_SEND_RAW_SRB:
2646 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_SEND_RAW_SRB");
2647 error = aac_ioctl_send_raw_srb(sc, arg);
2649 case FSACTL_AIF_THREAD:
2650 case FSACTL_LNX_AIF_THREAD:
2651 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_AIF_THREAD");
2654 case FSACTL_OPEN_GET_ADAPTER_FIB:
2655 arg = *(caddr_t*)arg;
2656 case FSACTL_LNX_OPEN_GET_ADAPTER_FIB:
2657 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_OPEN_GET_ADAPTER_FIB");
2658 error = aac_open_aif(sc, arg);
2660 case FSACTL_GET_NEXT_ADAPTER_FIB:
2661 arg = *(caddr_t*)arg;
2662 case FSACTL_LNX_GET_NEXT_ADAPTER_FIB:
2663 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_GET_NEXT_ADAPTER_FIB");
2664 error = aac_getnext_aif(sc, arg);
2666 case FSACTL_CLOSE_GET_ADAPTER_FIB:
2667 arg = *(caddr_t*)arg;
2668 case FSACTL_LNX_CLOSE_GET_ADAPTER_FIB:
2669 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_CLOSE_GET_ADAPTER_FIB");
2670 error = aac_close_aif(sc, arg);
2672 case FSACTL_MINIPORT_REV_CHECK:
2673 arg = *(caddr_t*)arg;
2674 case FSACTL_LNX_MINIPORT_REV_CHECK:
2675 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_MINIPORT_REV_CHECK");
2676 error = aac_rev_check(sc, arg);
2678 case FSACTL_QUERY_DISK:
2679 arg = *(caddr_t*)arg;
2680 case FSACTL_LNX_QUERY_DISK:
2681 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_QUERY_DISK");
2682 error = aac_query_disk(sc, arg);
2684 case FSACTL_DELETE_DISK:
2685 case FSACTL_LNX_DELETE_DISK:
2687 * We don't trust the underland to tell us when to delete a
2688 * container, rather we rely on an AIF coming from the
2693 case FSACTL_GET_PCI_INFO:
2694 arg = *(caddr_t*)arg;
2695 case FSACTL_LNX_GET_PCI_INFO:
2696 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_GET_PCI_INFO");
2697 error = aac_get_pci_info(sc, arg);
2699 case FSACTL_GET_FEATURES:
2700 arg = *(caddr_t*)arg;
2701 case FSACTL_LNX_GET_FEATURES:
2702 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_GET_FEATURES");
2703 error = aac_supported_features(sc, arg);
2706 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "unsupported cmd 0x%lx\n", cmd);
2714 aac_poll(struct cdev *dev, int poll_events, struct thread *td)
2716 struct aac_softc *sc;
2717 struct aac_fib_context *ctx;
2723 mtx_lock(&sc->aac_io_lock);
2724 if ((poll_events & (POLLRDNORM | POLLIN)) != 0) {
2725 for (ctx = sc->fibctx; ctx; ctx = ctx->next) {
2726 if (ctx->ctx_idx != sc->aifq_idx || ctx->ctx_wrap) {
2727 revents |= poll_events & (POLLIN | POLLRDNORM);
2732 mtx_unlock(&sc->aac_io_lock);
2735 if (poll_events & (POLLIN | POLLRDNORM))
2736 selrecord(td, &sc->rcv_select);
2743 aac_ioctl_event(struct aac_softc *sc, struct aac_event *event, void *arg)
2746 switch (event->ev_type) {
2747 case AAC_EVENT_CMFREE:
2748 mtx_assert(&sc->aac_io_lock, MA_OWNED);
2749 if (aacraid_alloc_command(sc, (struct aac_command **)arg)) {
2750 aacraid_add_event(sc, event);
2753 free(event, M_AACRAIDBUF);
2762 * Send a FIB supplied from userspace
2764 * Currently, sending a FIB from userspace in BE hosts is not supported.
2765 * There are several things that need to be considered in order to
2766 * support this, such as:
2767 * - At least the FIB data part from userspace should already be in LE,
2768 * or else the kernel would need to know all FIB types to be able to
2769 * correctly convert it to BE.
2770 * - SG tables are converted to BE by aacraid_map_command_sg(). This
2771 * conversion should be supressed if the FIB comes from userspace.
2772 * - aacraid_wait_command() calls functions that convert the FIB header
2773 * to LE. But if the header is already in LE, the conversion should not
2777 aac_ioctl_sendfib(struct aac_softc *sc, caddr_t ufib)
2779 struct aac_command *cm;
2782 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2789 mtx_lock(&sc->aac_io_lock);
2790 if (aacraid_alloc_command(sc, &cm)) {
2791 struct aac_event *event;
2793 event = malloc(sizeof(struct aac_event), M_AACRAIDBUF,
2795 if (event == NULL) {
2797 mtx_unlock(&sc->aac_io_lock);
2800 event->ev_type = AAC_EVENT_CMFREE;
2801 event->ev_callback = aac_ioctl_event;
2802 event->ev_arg = &cm;
2803 aacraid_add_event(sc, event);
2804 msleep(cm, &sc->aac_io_lock, 0, "aacraid_ctlsfib", 0);
2806 mtx_unlock(&sc->aac_io_lock);
2809 * Fetch the FIB header, then re-copy to get data as well.
2811 if ((error = copyin(ufib, cm->cm_fib,
2812 sizeof(struct aac_fib_header))) != 0)
2814 size = cm->cm_fib->Header.Size + sizeof(struct aac_fib_header);
2815 if (size > sc->aac_max_fib_size) {
2816 device_printf(sc->aac_dev, "incoming FIB oversized (%d > %d)\n",
2817 size, sc->aac_max_fib_size);
2818 size = sc->aac_max_fib_size;
2820 if ((error = copyin(ufib, cm->cm_fib, size)) != 0)
2822 cm->cm_fib->Header.Size = size;
2823 cm->cm_timestamp = time_uptime;
2827 * Pass the FIB to the controller, wait for it to complete.
2829 mtx_lock(&sc->aac_io_lock);
2830 error = aacraid_wait_command(cm);
2831 mtx_unlock(&sc->aac_io_lock);
2833 device_printf(sc->aac_dev,
2834 "aacraid_wait_command return %d\n", error);
2839 * Copy the FIB and data back out to the caller.
2841 size = cm->cm_fib->Header.Size;
2842 if (size > sc->aac_max_fib_size) {
2843 device_printf(sc->aac_dev, "outbound FIB oversized (%d > %d)\n",
2844 size, sc->aac_max_fib_size);
2845 size = sc->aac_max_fib_size;
2847 error = copyout(cm->cm_fib, ufib, size);
2851 mtx_lock(&sc->aac_io_lock);
2852 aacraid_release_command(cm);
2853 mtx_unlock(&sc->aac_io_lock);
2859 * Send a passthrough FIB supplied from userspace
2862 aac_ioctl_send_raw_srb(struct aac_softc *sc, caddr_t arg)
2864 struct aac_command *cm;
2865 struct aac_fib *fib;
2866 struct aac_srb *srbcmd;
2867 struct aac_srb *user_srb = (struct aac_srb *)arg;
2869 int error, transfer_data = 0;
2870 bus_dmamap_t orig_map = 0;
2871 u_int32_t fibsize = 0;
2872 u_int64_t srb_sg_address;
2873 u_int32_t srb_sg_bytecount;
2875 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2879 mtx_lock(&sc->aac_io_lock);
2880 if (aacraid_alloc_command(sc, &cm)) {
2881 struct aac_event *event;
2883 event = malloc(sizeof(struct aac_event), M_AACRAIDBUF,
2885 if (event == NULL) {
2887 mtx_unlock(&sc->aac_io_lock);
2890 event->ev_type = AAC_EVENT_CMFREE;
2891 event->ev_callback = aac_ioctl_event;
2892 event->ev_arg = &cm;
2893 aacraid_add_event(sc, event);
2894 msleep(cm, &sc->aac_io_lock, 0, "aacraid_ctlsraw", 0);
2896 mtx_unlock(&sc->aac_io_lock);
2899 /* save original dma map */
2900 orig_map = cm->cm_datamap;
2903 srbcmd = (struct aac_srb *)fib->data;
2904 if ((error = copyin((void *)&user_srb->data_len, &fibsize,
2905 sizeof (u_int32_t))) != 0)
2907 if (fibsize > (sc->aac_max_fib_size-sizeof(struct aac_fib_header))) {
2911 if ((error = copyin((void *)user_srb, srbcmd, fibsize)) != 0)
2914 srbcmd->function = 0; /* SRBF_ExecuteScsi */
2915 srbcmd->retry_limit = 0; /* obsolete */
2917 /* only one sg element from userspace supported */
2918 if (srbcmd->sg_map.SgCount > 1) {
2923 if (fibsize == (sizeof(struct aac_srb) +
2924 srbcmd->sg_map.SgCount * sizeof(struct aac_sg_entry))) {
2925 struct aac_sg_entry *sgp = srbcmd->sg_map.SgEntry;
2926 struct aac_sg_entry sg;
2928 if ((error = copyin(sgp, &sg, sizeof(sg))) != 0)
2931 srb_sg_bytecount = sg.SgByteCount;
2932 srb_sg_address = (u_int64_t)sg.SgAddress;
2933 } else if (fibsize == (sizeof(struct aac_srb) +
2934 srbcmd->sg_map.SgCount * sizeof(struct aac_sg_entry64))) {
2936 struct aac_sg_entry64 *sgp =
2937 (struct aac_sg_entry64 *)srbcmd->sg_map.SgEntry;
2938 struct aac_sg_entry64 sg;
2940 if ((error = copyin(sgp, &sg, sizeof(sg))) != 0)
2943 srb_sg_bytecount = sg.SgByteCount;
2944 srb_sg_address = sg.SgAddress;
2953 user_reply = (char *)arg + fibsize;
2954 srbcmd->data_len = srb_sg_bytecount;
2955 if (srbcmd->sg_map.SgCount == 1)
2958 if (transfer_data) {
2960 * Create DMA tag for the passthr. data buffer and allocate it.
2962 if (bus_dma_tag_create(sc->aac_parent_dmat, /* parent */
2963 1, 0, /* algnmnt, boundary */
2964 (sc->flags & AAC_FLAGS_SG_64BIT) ?
2965 BUS_SPACE_MAXADDR_32BIT :
2966 0x7fffffff, /* lowaddr */
2967 BUS_SPACE_MAXADDR, /* highaddr */
2968 NULL, NULL, /* filter, filterarg */
2969 srb_sg_bytecount, /* size */
2970 sc->aac_sg_tablesize, /* nsegments */
2971 srb_sg_bytecount, /* maxsegsize */
2973 NULL, NULL, /* No locking needed */
2974 &cm->cm_passthr_dmat)) {
2978 if (bus_dmamem_alloc(cm->cm_passthr_dmat, (void **)&cm->cm_data,
2979 BUS_DMA_NOWAIT, &cm->cm_datamap)) {
2983 /* fill some cm variables */
2984 cm->cm_datalen = srb_sg_bytecount;
2985 if (srbcmd->flags & AAC_SRB_FLAGS_DATA_IN)
2986 cm->cm_flags |= AAC_CMD_DATAIN;
2987 if (srbcmd->flags & AAC_SRB_FLAGS_DATA_OUT)
2988 cm->cm_flags |= AAC_CMD_DATAOUT;
2990 if (srbcmd->flags & AAC_SRB_FLAGS_DATA_OUT) {
2991 if ((error = copyin((void *)(uintptr_t)srb_sg_address,
2992 cm->cm_data, cm->cm_datalen)) != 0)
2994 /* sync required for bus_dmamem_alloc() alloc. mem.? */
2995 bus_dmamap_sync(cm->cm_passthr_dmat, cm->cm_datamap,
2996 BUS_DMASYNC_PREWRITE);
3001 fib->Header.Size = sizeof(struct aac_fib_header) +
3002 sizeof(struct aac_srb);
3003 fib->Header.XferState =
3004 AAC_FIBSTATE_HOSTOWNED |
3005 AAC_FIBSTATE_INITIALISED |
3006 AAC_FIBSTATE_EMPTY |
3007 AAC_FIBSTATE_FROMHOST |
3008 AAC_FIBSTATE_REXPECTED |
3012 fib->Header.Command = (sc->flags & AAC_FLAGS_SG_64BIT) ?
3013 ScsiPortCommandU64 : ScsiPortCommand;
3014 cm->cm_sgtable = (struct aac_sg_table *)&srbcmd->sg_map;
3016 aac_srb_tole(srbcmd);
3019 if (transfer_data) {
3020 bus_dmamap_load(cm->cm_passthr_dmat,
3021 cm->cm_datamap, cm->cm_data,
3023 aacraid_map_command_sg, cm, 0);
3025 aacraid_map_command_sg(cm, NULL, 0, 0);
3028 /* wait for completion */
3029 mtx_lock(&sc->aac_io_lock);
3030 while (!(cm->cm_flags & AAC_CMD_COMPLETED))
3031 msleep(cm, &sc->aac_io_lock, 0, "aacraid_ctlsrw2", 0);
3032 mtx_unlock(&sc->aac_io_lock);
3035 if (transfer_data && (le32toh(srbcmd->flags) & AAC_SRB_FLAGS_DATA_IN)) {
3036 if ((error = copyout(cm->cm_data,
3037 (void *)(uintptr_t)srb_sg_address,
3038 cm->cm_datalen)) != 0)
3040 /* sync required for bus_dmamem_alloc() allocated mem.? */
3041 bus_dmamap_sync(cm->cm_passthr_dmat, cm->cm_datamap,
3042 BUS_DMASYNC_POSTREAD);
3046 aac_srb_response_toh((struct aac_srb_response *)fib->data);
3047 error = copyout(fib->data, user_reply, sizeof(struct aac_srb_response));
3050 if (cm && cm->cm_data) {
3052 bus_dmamap_unload(cm->cm_passthr_dmat, cm->cm_datamap);
3053 bus_dmamem_free(cm->cm_passthr_dmat, cm->cm_data, cm->cm_datamap);
3054 cm->cm_datamap = orig_map;
3056 if (cm && cm->cm_passthr_dmat)
3057 bus_dma_tag_destroy(cm->cm_passthr_dmat);
3059 mtx_lock(&sc->aac_io_lock);
3060 aacraid_release_command(cm);
3061 mtx_unlock(&sc->aac_io_lock);
3067 * Request an AIF from the controller (new comm. type1)
3070 aac_request_aif(struct aac_softc *sc)
3072 struct aac_command *cm;
3073 struct aac_fib *fib;
3075 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3077 if (aacraid_alloc_command(sc, &cm)) {
3078 sc->aif_pending = 1;
3081 sc->aif_pending = 0;
3085 fib->Header.Size = sizeof(struct aac_fib);
3086 fib->Header.XferState =
3087 AAC_FIBSTATE_HOSTOWNED |
3088 AAC_FIBSTATE_INITIALISED |
3089 AAC_FIBSTATE_EMPTY |
3090 AAC_FIBSTATE_FROMHOST |
3091 AAC_FIBSTATE_REXPECTED |
3094 /* set AIF marker */
3095 fib->Header.Handle = 0x00800000;
3096 fib->Header.Command = AifRequest;
3097 ((struct aac_aif_command *)fib->data)->command = htole32(AifReqEvent);
3099 aacraid_map_command_sg(cm, NULL, 0, 0);
3103 * cdevpriv interface private destructor.
3106 aac_cdevpriv_dtor(void *arg)
3108 struct aac_softc *sc;
3111 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3112 device_unbusy(sc->aac_dev);
3116 * Handle an AIF sent to us by the controller; queue it for later reference.
3117 * If the queue fills up, then drop the older entries.
3120 aac_handle_aif(struct aac_softc *sc, struct aac_fib *fib)
3122 struct aac_aif_command *aif;
3123 struct aac_container *co, *co_next;
3124 struct aac_fib_context *ctx;
3125 struct aac_fib *sync_fib;
3126 struct aac_mntinforesp mir;
3127 int next, current, found;
3128 int count = 0, changed = 0, i = 0;
3129 u_int32_t channel, uid;
3131 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3133 aif = (struct aac_aif_command*)&fib->data[0];
3134 aacraid_print_aif(sc, aif);
3136 /* Is it an event that we should care about? */
3137 switch (le32toh(aif->command)) {
3138 case AifCmdEventNotify:
3139 switch (le32toh(aif->data.EN.type)) {
3140 case AifEnAddContainer:
3141 case AifEnDeleteContainer:
3143 * A container was added or deleted, but the message
3144 * doesn't tell us anything else! Re-enumerate the
3145 * containers and sort things out.
3147 aac_alloc_sync_fib(sc, &sync_fib);
3150 * Ask the controller for its containers one at
3152 * XXX What if the controller's list changes
3153 * midway through this enumaration?
3154 * XXX This should be done async.
3156 if (aac_get_container_info(sc, sync_fib, i,
3160 count = mir.MntRespCount;
3162 * Check the container against our list.
3163 * co->co_found was already set to 0 in a
3166 if ((mir.Status == ST_OK) &&
3167 (mir.MntTable[0].VolType != CT_NONE)) {
3170 &sc->aac_container_tqh,
3172 if (co->co_mntobj.ObjectId ==
3173 mir.MntTable[0].ObjectId) {
3180 * If the container matched, continue
3189 * This is a new container. Do all the
3190 * appropriate things to set it up.
3192 aac_add_container(sc, &mir, 1, uid);
3196 } while ((i < count) && (i < AAC_MAX_CONTAINERS));
3197 aac_release_sync_fib(sc);
3200 * Go through our list of containers and see which ones
3201 * were not marked 'found'. Since the controller didn't
3202 * list them they must have been deleted. Do the
3203 * appropriate steps to destroy the device. Also reset
3204 * the co->co_found field.
3206 co = TAILQ_FIRST(&sc->aac_container_tqh);
3207 while (co != NULL) {
3208 if (co->co_found == 0) {
3209 co_next = TAILQ_NEXT(co, co_link);
3210 TAILQ_REMOVE(&sc->aac_container_tqh, co,
3212 free(co, M_AACRAIDBUF);
3217 co = TAILQ_NEXT(co, co_link);
3221 /* Attach the newly created containers */
3223 if (sc->cam_rescan_cb != NULL)
3224 sc->cam_rescan_cb(sc, 0,
3225 AAC_CAM_TARGET_WILDCARD);
3230 case AifEnEnclosureManagement:
3231 switch (le32toh(aif->data.EN.data.EEE.eventType)) {
3232 case AIF_EM_DRIVE_INSERTION:
3233 case AIF_EM_DRIVE_REMOVAL:
3234 channel = le32toh(aif->data.EN.data.EEE.unitID);
3235 if (sc->cam_rescan_cb != NULL)
3236 sc->cam_rescan_cb(sc,
3237 ((channel>>24) & 0xF) + 1,
3238 (channel & 0xFFFF));
3244 case AifEnDeleteJBOD:
3245 case AifRawDeviceRemove:
3246 channel = le32toh(aif->data.EN.data.ECE.container);
3247 if (sc->cam_rescan_cb != NULL)
3248 sc->cam_rescan_cb(sc, ((channel>>24) & 0xF) + 1,
3249 AAC_CAM_TARGET_WILDCARD);
3260 /* Copy the AIF data to the AIF queue for ioctl retrieval */
3261 current = sc->aifq_idx;
3262 next = (current + 1) % AAC_AIFQ_LENGTH;
3264 sc->aifq_filled = 1;
3265 bcopy(fib, &sc->aac_aifq[current], sizeof(struct aac_fib));
3266 /* Make aifq's FIB header and data LE */
3267 aac_fib_header_tole(&sc->aac_aifq[current].Header);
3268 /* modify AIF contexts */
3269 if (sc->aifq_filled) {
3270 for (ctx = sc->fibctx; ctx; ctx = ctx->next) {
3271 if (next == ctx->ctx_idx)
3273 else if (current == ctx->ctx_idx && ctx->ctx_wrap)
3274 ctx->ctx_idx = next;
3277 sc->aifq_idx = next;
3278 /* On the off chance that someone is sleeping for an aif... */
3279 if (sc->aac_state & AAC_STATE_AIF_SLEEPER)
3280 wakeup(sc->aac_aifq);
3281 /* Wakeup any poll()ers */
3282 selwakeuppri(&sc->rcv_select, PRIBIO);
3288 * Return the Revision of the driver to userspace and check to see if the
3289 * userspace app is possibly compatible. This is extremely bogus since
3290 * our driver doesn't follow Adaptec's versioning system. Cheat by just
3291 * returning what the card reported.
3294 aac_rev_check(struct aac_softc *sc, caddr_t udata)
3296 struct aac_rev_check rev_check;
3297 struct aac_rev_check_resp rev_check_resp;
3300 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3303 * Copyin the revision struct from userspace
3305 if ((error = copyin(udata, (caddr_t)&rev_check,
3306 sizeof(struct aac_rev_check))) != 0) {
3310 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "Userland revision= %d\n",
3311 rev_check.callingRevision.buildNumber);
3314 * Doctor up the response struct.
3316 rev_check_resp.possiblyCompatible = 1;
3317 rev_check_resp.adapterSWRevision.external.comp.major =
3318 AAC_DRIVER_MAJOR_VERSION;
3319 rev_check_resp.adapterSWRevision.external.comp.minor =
3320 AAC_DRIVER_MINOR_VERSION;
3321 rev_check_resp.adapterSWRevision.external.comp.type =
3323 rev_check_resp.adapterSWRevision.external.comp.dash =
3324 AAC_DRIVER_BUGFIX_LEVEL;
3325 rev_check_resp.adapterSWRevision.buildNumber =
3328 return(copyout((caddr_t)&rev_check_resp, udata,
3329 sizeof(struct aac_rev_check_resp)));
3333 * Pass the fib context to the caller
3336 aac_open_aif(struct aac_softc *sc, caddr_t arg)
3338 struct aac_fib_context *fibctx, *ctx;
3341 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3343 fibctx = malloc(sizeof(struct aac_fib_context), M_AACRAIDBUF, M_NOWAIT|M_ZERO);
3347 mtx_lock(&sc->aac_io_lock);
3348 /* all elements are already 0, add to queue */
3349 if (sc->fibctx == NULL)
3350 sc->fibctx = fibctx;
3352 for (ctx = sc->fibctx; ctx->next; ctx = ctx->next)
3358 /* evaluate unique value */
3359 fibctx->unique = (*(u_int32_t *)&fibctx & 0xffffffff);
3361 while (ctx != fibctx) {
3362 if (ctx->unique == fibctx->unique) {
3370 error = copyout(&fibctx->unique, (void *)arg, sizeof(u_int32_t));
3371 mtx_unlock(&sc->aac_io_lock);
3373 aac_close_aif(sc, (caddr_t)ctx);
3378 * Close the caller's fib context
3381 aac_close_aif(struct aac_softc *sc, caddr_t arg)
3383 struct aac_fib_context *ctx;
3385 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3387 mtx_lock(&sc->aac_io_lock);
3388 for (ctx = sc->fibctx; ctx; ctx = ctx->next) {
3389 if (ctx->unique == *(uint32_t *)&arg) {
3390 if (ctx == sc->fibctx)
3393 ctx->prev->next = ctx->next;
3395 ctx->next->prev = ctx->prev;
3401 free(ctx, M_AACRAIDBUF);
3403 mtx_unlock(&sc->aac_io_lock);
3408 * Pass the caller the next AIF in their queue
3411 aac_getnext_aif(struct aac_softc *sc, caddr_t arg)
3413 struct get_adapter_fib_ioctl agf;
3414 struct aac_fib_context *ctx;
3417 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3419 mtx_lock(&sc->aac_io_lock);
3420 #ifdef COMPAT_FREEBSD32
3421 if (SV_CURPROC_FLAG(SV_ILP32)) {
3422 struct get_adapter_fib_ioctl32 agf32;
3423 error = copyin(arg, &agf32, sizeof(agf32));
3425 agf.AdapterFibContext = agf32.AdapterFibContext;
3426 agf.Wait = agf32.Wait;
3427 agf.AifFib = (caddr_t)(uintptr_t)agf32.AifFib;
3431 error = copyin(arg, &agf, sizeof(agf));
3433 for (ctx = sc->fibctx; ctx; ctx = ctx->next) {
3434 if (agf.AdapterFibContext == ctx->unique)
3438 mtx_unlock(&sc->aac_io_lock);
3442 error = aac_return_aif(sc, ctx, agf.AifFib);
3443 if (error == EAGAIN && agf.Wait) {
3444 fwprintf(sc, HBA_FLAGS_DBG_AIF_B, "aac_getnext_aif(): waiting for AIF");
3445 sc->aac_state |= AAC_STATE_AIF_SLEEPER;
3446 while (error == EAGAIN) {
3447 mtx_unlock(&sc->aac_io_lock);
3448 error = tsleep(sc->aac_aifq, PRIBIO |
3449 PCATCH, "aacaif", 0);
3450 mtx_lock(&sc->aac_io_lock);
3452 error = aac_return_aif(sc, ctx, agf.AifFib);
3454 sc->aac_state &= ~AAC_STATE_AIF_SLEEPER;
3457 mtx_unlock(&sc->aac_io_lock);
3462 * Hand the next AIF off the top of the queue out to userspace.
3465 aac_return_aif(struct aac_softc *sc, struct aac_fib_context *ctx, caddr_t uptr)
3469 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3471 current = ctx->ctx_idx;
3472 if (current == sc->aifq_idx && !ctx->ctx_wrap) {
3477 copyout(&sc->aac_aifq[current], (void *)uptr, sizeof(struct aac_fib));
3479 device_printf(sc->aac_dev,
3480 "aac_return_aif: copyout returned %d\n", error);
3483 ctx->ctx_idx = (current + 1) % AAC_AIFQ_LENGTH;
3489 aac_get_pci_info(struct aac_softc *sc, caddr_t uptr)
3491 struct aac_pci_info {
3497 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3499 pciinf.bus = pci_get_bus(sc->aac_dev);
3500 pciinf.slot = pci_get_slot(sc->aac_dev);
3502 error = copyout((caddr_t)&pciinf, uptr,
3503 sizeof(struct aac_pci_info));
3509 aac_supported_features(struct aac_softc *sc, caddr_t uptr)
3511 struct aac_features f;
3514 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3516 if ((error = copyin(uptr, &f, sizeof (f))) != 0)
3520 * When the management driver receives FSACTL_GET_FEATURES ioctl with
3521 * ALL zero in the featuresState, the driver will return the current
3522 * state of all the supported features, the data field will not be
3524 * When the management driver receives FSACTL_GET_FEATURES ioctl with
3525 * a specific bit set in the featuresState, the driver will return the
3526 * current state of this specific feature and whatever data that are
3527 * associated with the feature in the data field or perform whatever
3528 * action needed indicates in the data field.
3530 if (f.feat.fValue == 0) {
3531 f.feat.fBits.largeLBA =
3532 (sc->flags & AAC_FLAGS_LBA_64BIT) ? 1 : 0;
3533 f.feat.fBits.JBODSupport = 1;
3534 /* TODO: In the future, add other features state here as well */
3536 if (f.feat.fBits.largeLBA)
3537 f.feat.fBits.largeLBA =
3538 (sc->flags & AAC_FLAGS_LBA_64BIT) ? 1 : 0;
3539 /* TODO: Add other features state and data in the future */
3542 error = copyout(&f, uptr, sizeof (f));
3547 * Give the userland some information about the container. The AAC arch
3548 * expects the driver to be a SCSI passthrough type driver, so it expects
3549 * the containers to have b:t:l numbers. Fake it.
3552 aac_query_disk(struct aac_softc *sc, caddr_t uptr)
3554 struct aac_query_disk query_disk;
3555 struct aac_container *co;
3558 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3560 mtx_lock(&sc->aac_io_lock);
3561 error = copyin(uptr, (caddr_t)&query_disk,
3562 sizeof(struct aac_query_disk));
3564 mtx_unlock(&sc->aac_io_lock);
3568 id = query_disk.ContainerNumber;
3570 mtx_unlock(&sc->aac_io_lock);
3574 TAILQ_FOREACH(co, &sc->aac_container_tqh, co_link) {
3575 if (co->co_mntobj.ObjectId == id)
3580 query_disk.Valid = 0;
3581 query_disk.Locked = 0;
3582 query_disk.Deleted = 1; /* XXX is this right? */
3584 query_disk.Valid = 1;
3585 query_disk.Locked = 1;
3586 query_disk.Deleted = 0;
3587 query_disk.Bus = device_get_unit(sc->aac_dev);
3588 query_disk.Target = 0;
3590 query_disk.UnMapped = 0;
3593 error = copyout((caddr_t)&query_disk, uptr,
3594 sizeof(struct aac_query_disk));
3596 mtx_unlock(&sc->aac_io_lock);
3601 aac_container_bus(struct aac_softc *sc)
3603 struct aac_sim *sim;
3606 sim =(struct aac_sim *)malloc(sizeof(struct aac_sim),
3607 M_AACRAIDBUF, M_NOWAIT | M_ZERO);
3609 device_printf(sc->aac_dev,
3610 "No memory to add container bus\n");
3611 panic("Out of memory?!");
3613 child = device_add_child(sc->aac_dev, "aacraidp", -1);
3614 if (child == NULL) {
3615 device_printf(sc->aac_dev,
3616 "device_add_child failed for container bus\n");
3617 free(sim, M_AACRAIDBUF);
3618 panic("Out of memory?!");
3621 sim->TargetsPerBus = AAC_MAX_CONTAINERS;
3623 sim->BusType = CONTAINER_BUS;
3624 sim->InitiatorBusId = -1;
3626 sim->sim_dev = child;
3627 sim->aac_cam = NULL;
3629 device_set_ivars(child, sim);
3630 device_set_desc(child, "Container Bus");
3631 TAILQ_INSERT_TAIL(&sc->aac_sim_tqh, sim, sim_link);
3633 device_set_desc(child, aac_describe_code(aac_container_types,
3634 mir->MntTable[0].VolType));
3636 bus_generic_attach(sc->aac_dev);
3640 aac_get_bus_info(struct aac_softc *sc)
3642 struct aac_fib *fib;
3643 struct aac_ctcfg *c_cmd;
3644 struct aac_ctcfg_resp *c_resp;
3645 struct aac_vmioctl *vmi;
3646 struct aac_vmi_businf_resp *vmi_resp;
3647 struct aac_getbusinf businfo;
3648 struct aac_sim *caminf;
3652 mtx_lock(&sc->aac_io_lock);
3653 aac_alloc_sync_fib(sc, &fib);
3654 c_cmd = (struct aac_ctcfg *)&fib->data[0];
3655 bzero(c_cmd, sizeof(struct aac_ctcfg));
3657 c_cmd->Command = VM_ContainerConfig;
3658 c_cmd->cmd = CT_GET_SCSI_METHOD;
3661 aac_ctcfg_tole(c_cmd);
3662 error = aac_sync_fib(sc, ContainerCommand, 0, fib,
3663 sizeof(struct aac_ctcfg));
3665 device_printf(sc->aac_dev, "Error %d sending "
3666 "VM_ContainerConfig command\n", error);
3667 aac_release_sync_fib(sc);
3668 mtx_unlock(&sc->aac_io_lock);
3672 c_resp = (struct aac_ctcfg_resp *)&fib->data[0];
3673 aac_ctcfg_resp_toh(c_resp);
3674 if (c_resp->Status != ST_OK) {
3675 device_printf(sc->aac_dev, "VM_ContainerConfig returned 0x%x\n",
3677 aac_release_sync_fib(sc);
3678 mtx_unlock(&sc->aac_io_lock);
3682 sc->scsi_method_id = c_resp->param;
3684 vmi = (struct aac_vmioctl *)&fib->data[0];
3685 bzero(vmi, sizeof(struct aac_vmioctl));
3687 vmi->Command = VM_Ioctl;
3688 vmi->ObjType = FT_DRIVE;
3689 vmi->MethId = sc->scsi_method_id;
3691 vmi->IoctlCmd = GetBusInfo;
3693 aac_vmioctl_tole(vmi);
3694 error = aac_sync_fib(sc, ContainerCommand, 0, fib,
3695 sizeof(struct aac_vmi_businf_resp));
3697 device_printf(sc->aac_dev, "Error %d sending VMIoctl command\n",
3699 aac_release_sync_fib(sc);
3700 mtx_unlock(&sc->aac_io_lock);
3704 vmi_resp = (struct aac_vmi_businf_resp *)&fib->data[0];
3705 aac_vmi_businf_resp_toh(vmi_resp);
3706 if (vmi_resp->Status != ST_OK) {
3707 device_printf(sc->aac_dev, "VM_Ioctl returned %d\n",
3709 aac_release_sync_fib(sc);
3710 mtx_unlock(&sc->aac_io_lock);
3714 bcopy(&vmi_resp->BusInf, &businfo, sizeof(struct aac_getbusinf));
3715 aac_release_sync_fib(sc);
3716 mtx_unlock(&sc->aac_io_lock);
3718 for (i = 0; i < businfo.BusCount; i++) {
3719 if (businfo.BusValid[i] != AAC_BUS_VALID)
3722 caminf = (struct aac_sim *)malloc( sizeof(struct aac_sim),
3723 M_AACRAIDBUF, M_NOWAIT | M_ZERO);
3724 if (caminf == NULL) {
3725 device_printf(sc->aac_dev,
3726 "No memory to add passthrough bus %d\n", i);
3730 child = device_add_child(sc->aac_dev, "aacraidp", -1);
3731 if (child == NULL) {
3732 device_printf(sc->aac_dev,
3733 "device_add_child failed for passthrough bus %d\n",
3735 free(caminf, M_AACRAIDBUF);
3739 caminf->TargetsPerBus = businfo.TargetsPerBus;
3740 caminf->BusNumber = i+1;
3741 caminf->BusType = PASSTHROUGH_BUS;
3742 caminf->InitiatorBusId = -1;
3743 caminf->aac_sc = sc;
3744 caminf->sim_dev = child;
3745 caminf->aac_cam = NULL;
3747 device_set_ivars(child, caminf);
3748 device_set_desc(child, "SCSI Passthrough Bus");
3749 TAILQ_INSERT_TAIL(&sc->aac_sim_tqh, caminf, sim_link);
3754 * Check to see if the kernel is up and running. If we are in a
3755 * BlinkLED state, return the BlinkLED code.
3758 aac_check_adapter_health(struct aac_softc *sc, u_int8_t *bled)
3762 ret = AAC_GET_FWSTATUS(sc);
3764 if (ret & AAC_UP_AND_RUNNING)
3766 else if (ret & AAC_KERNEL_PANIC && bled)
3767 *bled = (ret >> 16) & 0xff;
3773 * Once do an IOP reset, basically have to re-initialize the card as
3774 * if coming up from a cold boot, and the driver is responsible for
3775 * any IO that was outstanding to the adapter at the time of the IOP
3776 * RESET. And prepare the driver for IOP RESET by making the init code
3777 * modular with the ability to call it from multiple places.
3780 aac_reset_adapter(struct aac_softc *sc)
3782 struct aac_command *cm;
3783 struct aac_fib *fib;
3784 struct aac_pause_command *pc;
3785 u_int32_t status, reset_mask, waitCount, max_msix_orig;
3786 int ret, msi_enabled_orig;
3788 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3789 mtx_assert(&sc->aac_io_lock, MA_OWNED);
3791 if (sc->aac_state & AAC_STATE_RESET) {
3792 device_printf(sc->aac_dev, "aac_reset_adapter() already in progress\n");
3795 sc->aac_state |= AAC_STATE_RESET;
3797 /* disable interrupt */
3798 AAC_ACCESS_DEVREG(sc, AAC_DISABLE_INTERRUPT);
3801 * Abort all pending commands:
3802 * a) on the controller
3804 while ((cm = aac_dequeue_busy(sc)) != NULL) {
3805 cm->cm_flags |= AAC_CMD_RESET;
3807 /* is there a completion handler? */
3808 if (cm->cm_complete != NULL) {
3809 cm->cm_complete(cm);
3811 /* assume that someone is sleeping on this
3818 /* b) in the waiting queues */
3819 while ((cm = aac_dequeue_ready(sc)) != NULL) {
3820 cm->cm_flags |= AAC_CMD_RESET;
3822 /* is there a completion handler? */
3823 if (cm->cm_complete != NULL) {
3824 cm->cm_complete(cm);
3826 /* assume that someone is sleeping on this
3834 if (aac_check_adapter_health(sc, NULL) == 0) {
3835 mtx_unlock(&sc->aac_io_lock);
3836 (void) aacraid_shutdown(sc->aac_dev);
3837 mtx_lock(&sc->aac_io_lock);
3840 /* execute IOP reset */
3841 if (sc->aac_support_opt2 & AAC_SUPPORTED_MU_RESET) {
3842 AAC_MEM0_SETREG4(sc, AAC_IRCSR, AAC_IRCSR_CORES_RST);
3844 /* We need to wait for 5 seconds before accessing the MU again
3845 * 10000 * 100us = 1000,000us = 1000ms = 1s
3847 waitCount = 5 * 10000;
3849 DELAY(100); /* delay 100 microseconds */
3853 ret = aacraid_sync_command(sc, AAC_IOP_RESET_ALWAYS,
3854 0, 0, 0, 0, &status, &reset_mask);
3855 if (ret && !sc->doorbell_mask) {
3856 /* call IOP_RESET for older firmware */
3857 if ((aacraid_sync_command(sc, AAC_IOP_RESET, 0,0,0,0,
3858 &status, NULL)) != 0) {
3859 if (status == AAC_SRB_STS_INVALID_REQUEST) {
3860 device_printf(sc->aac_dev,
3861 "IOP_RESET not supported\n");
3863 /* probably timeout */
3864 device_printf(sc->aac_dev,
3865 "IOP_RESET failed\n");
3868 /* unwind aac_shutdown() */
3869 aac_alloc_sync_fib(sc, &fib);
3870 pc = (struct aac_pause_command *)&fib->data[0];
3871 pc->Command = VM_ContainerConfig;
3872 pc->Type = CT_PAUSE_IO;
3877 aac_pause_command_tole(pc);
3878 (void) aac_sync_fib(sc, ContainerCommand, 0,
3879 fib, sizeof (struct aac_pause_command));
3880 aac_release_sync_fib(sc);
3884 } else if (sc->doorbell_mask) {
3886 reset_mask = sc->doorbell_mask;
3889 (sc->aac_support_opt2 & AAC_SUPPORTED_DOORBELL_RESET)) {
3890 AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, reset_mask);
3892 * We need to wait for 5 seconds before accessing the
3894 * 10000 * 100us = 1000,000us = 1000ms = 1s
3896 waitCount = 5 * 10000;
3898 DELAY(100); /* delay 100 microseconds */
3905 * Initialize the adapter.
3907 max_msix_orig = sc->aac_max_msix;
3908 msi_enabled_orig = sc->msi_enabled;
3909 sc->msi_enabled = FALSE;
3910 if (aac_check_firmware(sc) != 0)
3912 if (!(sc->flags & AAC_FLAGS_SYNC_MODE)) {
3913 sc->aac_max_msix = max_msix_orig;
3914 if (msi_enabled_orig) {
3915 sc->msi_enabled = msi_enabled_orig;
3916 AAC_ACCESS_DEVREG(sc, AAC_ENABLE_MSIX);
3918 mtx_unlock(&sc->aac_io_lock);
3920 mtx_lock(&sc->aac_io_lock);
3924 sc->aac_state &= ~AAC_STATE_RESET;
3925 AAC_ACCESS_DEVREG(sc, AAC_ENABLE_INTERRUPT);
3926 aacraid_startio(sc);