2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2000 Michael Smith
5 * Copyright (c) 2001 Scott Long
6 * Copyright (c) 2000 BSDi
7 * Copyright (c) 2001-2010 Adaptec, Inc.
8 * Copyright (c) 2010-2012 PMC-Sierra, Inc.
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
37 * Driver for the Adaptec by PMC Series 6,7,8,... families of RAID controllers
39 #define AAC_DRIVERNAME "aacraid"
41 #include "opt_aacraid.h"
43 /* #include <stddef.h> */
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/malloc.h>
47 #include <sys/kernel.h>
48 #include <sys/kthread.h>
50 #include <sys/sysctl.h>
51 #include <sys/sysent.h>
53 #include <sys/ioccom.h>
57 #include <sys/signalvar.h>
59 #include <sys/eventhandler.h>
62 #include <machine/bus.h>
63 #include <machine/resource.h>
65 #include <dev/pci/pcireg.h>
66 #include <dev/pci/pcivar.h>
68 #include <dev/aacraid/aacraid_reg.h>
69 #include <sys/aac_ioctl.h>
70 #include <dev/aacraid/aacraid_debug.h>
71 #include <dev/aacraid/aacraid_var.h>
73 #ifndef FILTER_HANDLED
74 #define FILTER_HANDLED 0x02
77 static void aac_add_container(struct aac_softc *sc,
78 struct aac_mntinforesp *mir, int f,
80 static void aac_get_bus_info(struct aac_softc *sc);
81 static void aac_container_bus(struct aac_softc *sc);
82 static void aac_daemon(void *arg);
83 static int aac_convert_sgraw2(struct aac_softc *sc, struct aac_raw_io2 *raw,
84 int pages, int nseg, int nseg_new);
86 /* Command Processing */
87 static void aac_timeout(struct aac_softc *sc);
88 static void aac_command_thread(struct aac_softc *sc);
89 static int aac_sync_fib(struct aac_softc *sc, u_int32_t command,
90 u_int32_t xferstate, struct aac_fib *fib,
92 /* Command Buffer Management */
93 static void aac_map_command_helper(void *arg, bus_dma_segment_t *segs,
95 static int aac_alloc_commands(struct aac_softc *sc);
96 static void aac_free_commands(struct aac_softc *sc);
97 static void aac_unmap_command(struct aac_command *cm);
99 /* Hardware Interface */
100 static int aac_alloc(struct aac_softc *sc);
101 static void aac_common_map(void *arg, bus_dma_segment_t *segs, int nseg,
103 static int aac_check_firmware(struct aac_softc *sc);
104 static void aac_define_int_mode(struct aac_softc *sc);
105 static int aac_init(struct aac_softc *sc);
106 static int aac_find_pci_capability(struct aac_softc *sc, int cap);
107 static int aac_setup_intr(struct aac_softc *sc);
108 static int aac_check_config(struct aac_softc *sc);
110 /* PMC SRC interface */
111 static int aac_src_get_fwstatus(struct aac_softc *sc);
112 static void aac_src_qnotify(struct aac_softc *sc, int qbit);
113 static int aac_src_get_istatus(struct aac_softc *sc);
114 static void aac_src_clear_istatus(struct aac_softc *sc, int mask);
115 static void aac_src_set_mailbox(struct aac_softc *sc, u_int32_t command,
116 u_int32_t arg0, u_int32_t arg1,
117 u_int32_t arg2, u_int32_t arg3);
118 static int aac_src_get_mailbox(struct aac_softc *sc, int mb);
119 static void aac_src_access_devreg(struct aac_softc *sc, int mode);
120 static int aac_src_send_command(struct aac_softc *sc, struct aac_command *cm);
121 static int aac_src_get_outb_queue(struct aac_softc *sc);
122 static void aac_src_set_outb_queue(struct aac_softc *sc, int index);
124 struct aac_interface aacraid_src_interface = {
125 aac_src_get_fwstatus,
128 aac_src_clear_istatus,
131 aac_src_access_devreg,
132 aac_src_send_command,
133 aac_src_get_outb_queue,
134 aac_src_set_outb_queue
137 /* PMC SRCv interface */
138 static void aac_srcv_set_mailbox(struct aac_softc *sc, u_int32_t command,
139 u_int32_t arg0, u_int32_t arg1,
140 u_int32_t arg2, u_int32_t arg3);
141 static int aac_srcv_get_mailbox(struct aac_softc *sc, int mb);
143 struct aac_interface aacraid_srcv_interface = {
144 aac_src_get_fwstatus,
147 aac_src_clear_istatus,
148 aac_srcv_set_mailbox,
149 aac_srcv_get_mailbox,
150 aac_src_access_devreg,
151 aac_src_send_command,
152 aac_src_get_outb_queue,
153 aac_src_set_outb_queue
156 /* Debugging and Diagnostics */
157 static struct aac_code_lookup aac_cpu_variant[] = {
158 {"i960JX", CPUI960_JX},
159 {"i960CX", CPUI960_CX},
160 {"i960HX", CPUI960_HX},
161 {"i960RX", CPUI960_RX},
162 {"i960 80303", CPUI960_80303},
163 {"StrongARM SA110", CPUARM_SA110},
164 {"PPC603e", CPUPPC_603e},
165 {"XScale 80321", CPU_XSCALE_80321},
166 {"MIPS 4KC", CPU_MIPS_4KC},
167 {"MIPS 5KC", CPU_MIPS_5KC},
168 {"Unknown StrongARM", CPUARM_xxx},
169 {"Unknown PowerPC", CPUPPC_xxx},
171 {"Unknown processor", 0}
174 static struct aac_code_lookup aac_battery_platform[] = {
175 {"required battery present", PLATFORM_BAT_REQ_PRESENT},
176 {"REQUIRED BATTERY NOT PRESENT", PLATFORM_BAT_REQ_NOTPRESENT},
177 {"optional battery present", PLATFORM_BAT_OPT_PRESENT},
178 {"optional battery not installed", PLATFORM_BAT_OPT_NOTPRESENT},
179 {"no battery support", PLATFORM_BAT_NOT_SUPPORTED},
181 {"unknown battery platform", 0}
183 static void aac_describe_controller(struct aac_softc *sc);
184 static char *aac_describe_code(struct aac_code_lookup *table,
187 /* Management Interface */
188 static d_open_t aac_open;
189 static d_ioctl_t aac_ioctl;
190 static d_poll_t aac_poll;
191 #if __FreeBSD_version >= 702000
192 static void aac_cdevpriv_dtor(void *arg);
194 static d_close_t aac_close;
196 static int aac_ioctl_sendfib(struct aac_softc *sc, caddr_t ufib);
197 static int aac_ioctl_send_raw_srb(struct aac_softc *sc, caddr_t arg);
198 static void aac_handle_aif(struct aac_softc *sc, struct aac_fib *fib);
199 static void aac_request_aif(struct aac_softc *sc);
200 static int aac_rev_check(struct aac_softc *sc, caddr_t udata);
201 static int aac_open_aif(struct aac_softc *sc, caddr_t arg);
202 static int aac_close_aif(struct aac_softc *sc, caddr_t arg);
203 static int aac_getnext_aif(struct aac_softc *sc, caddr_t arg);
204 static int aac_return_aif(struct aac_softc *sc,
205 struct aac_fib_context *ctx, caddr_t uptr);
206 static int aac_query_disk(struct aac_softc *sc, caddr_t uptr);
207 static int aac_get_pci_info(struct aac_softc *sc, caddr_t uptr);
208 static int aac_supported_features(struct aac_softc *sc, caddr_t uptr);
209 static void aac_ioctl_event(struct aac_softc *sc,
210 struct aac_event *event, void *arg);
211 static int aac_reset_adapter(struct aac_softc *sc);
212 static int aac_get_container_info(struct aac_softc *sc,
213 struct aac_fib *fib, int cid,
214 struct aac_mntinforesp *mir,
217 aac_check_adapter_health(struct aac_softc *sc, u_int8_t *bled);
219 static struct cdevsw aacraid_cdevsw = {
220 .d_version = D_VERSION,
221 .d_flags = D_NEEDGIANT,
223 #if __FreeBSD_version < 702000
224 .d_close = aac_close,
226 .d_ioctl = aac_ioctl,
231 MALLOC_DEFINE(M_AACRAIDBUF, "aacraid_buf", "Buffers for the AACRAID driver");
234 SYSCTL_NODE(_hw, OID_AUTO, aacraid, CTLFLAG_RD, 0, "AACRAID driver parameters");
241 * Initialize the controller and softc
244 aacraid_attach(struct aac_softc *sc)
248 struct aac_mntinforesp mir;
249 int count = 0, i = 0;
252 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
253 sc->hint_flags = device_get_flags(sc->aac_dev);
255 * Initialize per-controller queues.
261 /* mark controller as suspended until we get ourselves organised */
262 sc->aac_state |= AAC_STATE_SUSPEND;
265 * Check that the firmware on the card is supported.
267 sc->msi_enabled = sc->msi_tupelo = FALSE;
268 if ((error = aac_check_firmware(sc)) != 0)
274 mtx_init(&sc->aac_io_lock, "AACRAID I/O lock", NULL, MTX_DEF);
275 TAILQ_INIT(&sc->aac_container_tqh);
276 TAILQ_INIT(&sc->aac_ev_cmfree);
278 #if __FreeBSD_version >= 800000
279 /* Initialize the clock daemon callout. */
280 callout_init_mtx(&sc->aac_daemontime, &sc->aac_io_lock, 0);
283 * Initialize the adapter.
285 if ((error = aac_alloc(sc)) != 0)
287 aac_define_int_mode(sc);
288 if (!(sc->flags & AAC_FLAGS_SYNC_MODE)) {
289 if ((error = aac_init(sc)) != 0)
294 * Allocate and connect our interrupt.
296 if ((error = aac_setup_intr(sc)) != 0)
300 * Print a little information about the controller.
302 aac_describe_controller(sc);
305 * Make the control device.
307 unit = device_get_unit(sc->aac_dev);
308 sc->aac_dev_t = make_dev(&aacraid_cdevsw, unit, UID_ROOT, GID_OPERATOR,
309 0640, "aacraid%d", unit);
310 sc->aac_dev_t->si_drv1 = sc;
312 /* Create the AIF thread */
313 if (aac_kthread_create((void(*)(void *))aac_command_thread, sc,
314 &sc->aifthread, 0, 0, "aacraid%daif", unit))
315 panic("Could not create AIF thread");
317 /* Register the shutdown method to only be called post-dump */
318 if ((sc->eh = EVENTHANDLER_REGISTER(shutdown_final, aacraid_shutdown,
319 sc->aac_dev, SHUTDOWN_PRI_DEFAULT)) == NULL)
320 device_printf(sc->aac_dev,
321 "shutdown event registration failed\n");
323 /* Find containers */
324 mtx_lock(&sc->aac_io_lock);
325 aac_alloc_sync_fib(sc, &fib);
326 /* loop over possible containers */
328 if ((aac_get_container_info(sc, fib, i, &mir, &uid)) != 0)
331 count = mir.MntRespCount;
332 aac_add_container(sc, &mir, 0, uid);
334 } while ((i < count) && (i < AAC_MAX_CONTAINERS));
335 aac_release_sync_fib(sc);
336 mtx_unlock(&sc->aac_io_lock);
338 /* Register with CAM for the containers */
339 TAILQ_INIT(&sc->aac_sim_tqh);
340 aac_container_bus(sc);
341 /* Register with CAM for the non-DASD devices */
342 if ((sc->flags & AAC_FLAGS_ENABLE_CAM) != 0)
343 aac_get_bus_info(sc);
345 /* poke the bus to actually attach the child devices */
346 bus_generic_attach(sc->aac_dev);
348 /* mark the controller up */
349 sc->aac_state &= ~AAC_STATE_SUSPEND;
351 /* enable interrupts now */
352 AAC_ACCESS_DEVREG(sc, AAC_ENABLE_INTERRUPT);
354 #if __FreeBSD_version >= 800000
355 mtx_lock(&sc->aac_io_lock);
356 callout_reset(&sc->aac_daemontime, 60 * hz, aac_daemon, sc);
357 mtx_unlock(&sc->aac_io_lock);
363 sc->timeout_id = timeout(aac_daemon, (void *)sc, tvtohz(&tv));
371 aac_daemon(void *arg)
373 struct aac_softc *sc;
375 struct aac_command *cm;
379 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
381 #if __FreeBSD_version >= 800000
382 mtx_assert(&sc->aac_io_lock, MA_OWNED);
383 if (callout_pending(&sc->aac_daemontime) ||
384 callout_active(&sc->aac_daemontime) == 0)
387 mtx_lock(&sc->aac_io_lock);
391 if (!aacraid_alloc_command(sc, &cm)) {
393 cm->cm_timestamp = time_uptime;
395 cm->cm_flags |= AAC_CMD_WAIT;
398 sizeof(struct aac_fib_header) + sizeof(u_int32_t);
399 fib->Header.XferState =
400 AAC_FIBSTATE_HOSTOWNED |
401 AAC_FIBSTATE_INITIALISED |
403 AAC_FIBSTATE_FROMHOST |
404 AAC_FIBSTATE_REXPECTED |
407 AAC_FIBSTATE_FAST_RESPONSE;
408 fib->Header.Command = SendHostTime;
409 *(uint32_t *)fib->data = tv.tv_sec;
411 aacraid_map_command_sg(cm, NULL, 0, 0);
412 aacraid_release_command(cm);
415 #if __FreeBSD_version >= 800000
416 callout_schedule(&sc->aac_daemontime, 30 * 60 * hz);
418 mtx_unlock(&sc->aac_io_lock);
421 sc->timeout_id = timeout(aac_daemon, (void *)sc, tvtohz(&tv));
426 aacraid_add_event(struct aac_softc *sc, struct aac_event *event)
429 switch (event->ev_type & AAC_EVENT_MASK) {
430 case AAC_EVENT_CMFREE:
431 TAILQ_INSERT_TAIL(&sc->aac_ev_cmfree, event, ev_links);
434 device_printf(sc->aac_dev, "aac_add event: unknown event %d\n",
443 * Request information of container #cid
446 aac_get_container_info(struct aac_softc *sc, struct aac_fib *sync_fib, int cid,
447 struct aac_mntinforesp *mir, u_int32_t *uid)
449 struct aac_command *cm;
451 struct aac_mntinfo *mi;
452 struct aac_cnt_config *ccfg;
455 if (sync_fib == NULL) {
456 if (aacraid_alloc_command(sc, &cm)) {
457 device_printf(sc->aac_dev,
458 "Warning, no free command available\n");
466 mi = (struct aac_mntinfo *)&fib->data[0];
467 /* 4KB support?, 64-bit LBA? */
468 if (sc->aac_support_opt2 & AAC_SUPPORTED_VARIABLE_BLOCK_SIZE)
469 mi->Command = VM_NameServeAllBlk;
470 else if (sc->flags & AAC_FLAGS_LBA_64BIT)
471 mi->Command = VM_NameServe64;
473 mi->Command = VM_NameServe;
474 mi->MntType = FT_FILESYS;
478 if (aac_sync_fib(sc, ContainerCommand, 0, fib,
479 sizeof(struct aac_mntinfo))) {
480 device_printf(sc->aac_dev, "Error probing container %d\n", cid);
484 cm->cm_timestamp = time_uptime;
488 sizeof(struct aac_fib_header) + sizeof(struct aac_mntinfo);
489 fib->Header.XferState =
490 AAC_FIBSTATE_HOSTOWNED |
491 AAC_FIBSTATE_INITIALISED |
493 AAC_FIBSTATE_FROMHOST |
494 AAC_FIBSTATE_REXPECTED |
497 AAC_FIBSTATE_FAST_RESPONSE;
498 fib->Header.Command = ContainerCommand;
499 if (aacraid_wait_command(cm) != 0) {
500 device_printf(sc->aac_dev, "Error probing container %d\n", cid);
501 aacraid_release_command(cm);
505 bcopy(&fib->data[0], mir, sizeof(struct aac_mntinforesp));
509 if (mir->MntTable[0].VolType != CT_NONE &&
510 !(mir->MntTable[0].ContentState & AAC_FSCS_HIDDEN)) {
511 if (!(sc->aac_support_opt2 & AAC_SUPPORTED_VARIABLE_BLOCK_SIZE)) {
512 mir->MntTable[0].ObjExtension.BlockDevice.BlockSize = 0x200;
513 mir->MntTable[0].ObjExtension.BlockDevice.bdLgclPhysMap = 0;
515 ccfg = (struct aac_cnt_config *)&fib->data[0];
516 bzero(ccfg, sizeof (*ccfg) - CT_PACKET_SIZE);
517 ccfg->Command = VM_ContainerConfig;
518 ccfg->CTCommand.command = CT_CID_TO_32BITS_UID;
519 ccfg->CTCommand.param[0] = cid;
522 rval = aac_sync_fib(sc, ContainerCommand, 0, fib,
523 sizeof(struct aac_cnt_config));
524 if (rval == 0 && ccfg->Command == ST_OK &&
525 ccfg->CTCommand.param[0] == CT_OK &&
526 mir->MntTable[0].VolType != CT_PASSTHRU)
527 *uid = ccfg->CTCommand.param[1];
530 sizeof(struct aac_fib_header) + sizeof(struct aac_cnt_config);
531 fib->Header.XferState =
532 AAC_FIBSTATE_HOSTOWNED |
533 AAC_FIBSTATE_INITIALISED |
535 AAC_FIBSTATE_FROMHOST |
536 AAC_FIBSTATE_REXPECTED |
539 AAC_FIBSTATE_FAST_RESPONSE;
540 fib->Header.Command = ContainerCommand;
541 rval = aacraid_wait_command(cm);
542 if (rval == 0 && ccfg->Command == ST_OK &&
543 ccfg->CTCommand.param[0] == CT_OK &&
544 mir->MntTable[0].VolType != CT_PASSTHRU)
545 *uid = ccfg->CTCommand.param[1];
546 aacraid_release_command(cm);
554 * Create a device to represent a new container
557 aac_add_container(struct aac_softc *sc, struct aac_mntinforesp *mir, int f,
560 struct aac_container *co;
562 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
565 * Check container volume type for validity. Note that many of
566 * the possible types may never show up.
568 if ((mir->Status == ST_OK) && (mir->MntTable[0].VolType != CT_NONE)) {
569 co = (struct aac_container *)malloc(sizeof *co, M_AACRAIDBUF,
572 panic("Out of memory?!");
576 bcopy(&mir->MntTable[0], &co->co_mntobj,
577 sizeof(struct aac_mntobj));
579 TAILQ_INSERT_TAIL(&sc->aac_container_tqh, co, co_link);
584 * Allocate resources associated with (sc)
587 aac_alloc(struct aac_softc *sc)
591 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
594 * Create DMA tag for mapping buffers into controller-addressable space.
596 if (bus_dma_tag_create(sc->aac_parent_dmat, /* parent */
597 1, 0, /* algnmnt, boundary */
598 (sc->flags & AAC_FLAGS_SG_64BIT) ?
600 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
601 BUS_SPACE_MAXADDR, /* highaddr */
602 NULL, NULL, /* filter, filterarg */
603 sc->aac_max_sectors << 9, /* maxsize */
604 sc->aac_sg_tablesize, /* nsegments */
605 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
606 BUS_DMA_ALLOCNOW, /* flags */
607 busdma_lock_mutex, /* lockfunc */
608 &sc->aac_io_lock, /* lockfuncarg */
609 &sc->aac_buffer_dmat)) {
610 device_printf(sc->aac_dev, "can't allocate buffer DMA tag\n");
615 * Create DMA tag for mapping FIBs into controller-addressable space..
617 if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE1)
618 maxsize = sc->aac_max_fibs_alloc * (sc->aac_max_fib_size +
619 sizeof(struct aac_fib_xporthdr) + 31);
621 maxsize = sc->aac_max_fibs_alloc * (sc->aac_max_fib_size + 31);
622 if (bus_dma_tag_create(sc->aac_parent_dmat, /* parent */
623 1, 0, /* algnmnt, boundary */
624 (sc->flags & AAC_FLAGS_4GB_WINDOW) ?
625 BUS_SPACE_MAXADDR_32BIT :
626 0x7fffffff, /* lowaddr */
627 BUS_SPACE_MAXADDR, /* highaddr */
628 NULL, NULL, /* filter, filterarg */
629 maxsize, /* maxsize */
631 maxsize, /* maxsize */
633 NULL, NULL, /* No locking needed */
634 &sc->aac_fib_dmat)) {
635 device_printf(sc->aac_dev, "can't allocate FIB DMA tag\n");
640 * Create DMA tag for the common structure and allocate it.
642 maxsize = sizeof(struct aac_common);
643 maxsize += sc->aac_max_fibs * sizeof(u_int32_t);
644 if (bus_dma_tag_create(sc->aac_parent_dmat, /* parent */
645 1, 0, /* algnmnt, boundary */
646 (sc->flags & AAC_FLAGS_4GB_WINDOW) ?
647 BUS_SPACE_MAXADDR_32BIT :
648 0x7fffffff, /* lowaddr */
649 BUS_SPACE_MAXADDR, /* highaddr */
650 NULL, NULL, /* filter, filterarg */
651 maxsize, /* maxsize */
653 maxsize, /* maxsegsize */
655 NULL, NULL, /* No locking needed */
656 &sc->aac_common_dmat)) {
657 device_printf(sc->aac_dev,
658 "can't allocate common structure DMA tag\n");
661 if (bus_dmamem_alloc(sc->aac_common_dmat, (void **)&sc->aac_common,
662 BUS_DMA_NOWAIT, &sc->aac_common_dmamap)) {
663 device_printf(sc->aac_dev, "can't allocate common structure\n");
667 (void)bus_dmamap_load(sc->aac_common_dmat, sc->aac_common_dmamap,
668 sc->aac_common, maxsize,
669 aac_common_map, sc, 0);
670 bzero(sc->aac_common, maxsize);
672 /* Allocate some FIBs and associated command structs */
673 TAILQ_INIT(&sc->aac_fibmap_tqh);
674 sc->aac_commands = malloc(sc->aac_max_fibs * sizeof(struct aac_command),
675 M_AACRAIDBUF, M_WAITOK|M_ZERO);
676 mtx_lock(&sc->aac_io_lock);
677 while (sc->total_fibs < sc->aac_max_fibs) {
678 if (aac_alloc_commands(sc) != 0)
681 mtx_unlock(&sc->aac_io_lock);
682 if (sc->total_fibs == 0)
689 * Free all of the resources associated with (sc)
691 * Should not be called if the controller is active.
694 aacraid_free(struct aac_softc *sc)
698 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
700 /* remove the control device */
701 if (sc->aac_dev_t != NULL)
702 destroy_dev(sc->aac_dev_t);
704 /* throw away any FIB buffers, discard the FIB DMA tag */
705 aac_free_commands(sc);
706 if (sc->aac_fib_dmat)
707 bus_dma_tag_destroy(sc->aac_fib_dmat);
709 free(sc->aac_commands, M_AACRAIDBUF);
711 /* destroy the common area */
712 if (sc->aac_common) {
713 bus_dmamap_unload(sc->aac_common_dmat, sc->aac_common_dmamap);
714 bus_dmamem_free(sc->aac_common_dmat, sc->aac_common,
715 sc->aac_common_dmamap);
717 if (sc->aac_common_dmat)
718 bus_dma_tag_destroy(sc->aac_common_dmat);
720 /* disconnect the interrupt handler */
721 for (i = 0; i < AAC_MAX_MSIX; ++i) {
723 bus_teardown_intr(sc->aac_dev,
724 sc->aac_irq[i], sc->aac_intr[i]);
726 bus_release_resource(sc->aac_dev, SYS_RES_IRQ,
727 sc->aac_irq_rid[i], sc->aac_irq[i]);
731 if (sc->msi_enabled || sc->msi_tupelo)
732 pci_release_msi(sc->aac_dev);
734 /* destroy data-transfer DMA tag */
735 if (sc->aac_buffer_dmat)
736 bus_dma_tag_destroy(sc->aac_buffer_dmat);
738 /* destroy the parent DMA tag */
739 if (sc->aac_parent_dmat)
740 bus_dma_tag_destroy(sc->aac_parent_dmat);
742 /* release the register window mapping */
743 if (sc->aac_regs_res0 != NULL)
744 bus_release_resource(sc->aac_dev, SYS_RES_MEMORY,
745 sc->aac_regs_rid0, sc->aac_regs_res0);
746 if (sc->aac_regs_res1 != NULL)
747 bus_release_resource(sc->aac_dev, SYS_RES_MEMORY,
748 sc->aac_regs_rid1, sc->aac_regs_res1);
752 * Disconnect from the controller completely, in preparation for unload.
755 aacraid_detach(device_t dev)
757 struct aac_softc *sc;
758 struct aac_container *co;
762 sc = device_get_softc(dev);
763 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
765 #if __FreeBSD_version >= 800000
766 callout_drain(&sc->aac_daemontime);
768 untimeout(aac_daemon, (void *)sc, sc->timeout_id);
770 /* Remove the child containers */
771 while ((co = TAILQ_FIRST(&sc->aac_container_tqh)) != NULL) {
772 TAILQ_REMOVE(&sc->aac_container_tqh, co, co_link);
773 free(co, M_AACRAIDBUF);
776 /* Remove the CAM SIMs */
777 while ((sim = TAILQ_FIRST(&sc->aac_sim_tqh)) != NULL) {
778 TAILQ_REMOVE(&sc->aac_sim_tqh, sim, sim_link);
779 error = device_delete_child(dev, sim->sim_dev);
782 free(sim, M_AACRAIDBUF);
785 if (sc->aifflags & AAC_AIFFLAGS_RUNNING) {
786 sc->aifflags |= AAC_AIFFLAGS_EXIT;
787 wakeup(sc->aifthread);
788 tsleep(sc->aac_dev, PUSER | PCATCH, "aac_dch", 30 * hz);
791 if (sc->aifflags & AAC_AIFFLAGS_RUNNING)
792 panic("Cannot shutdown AIF thread");
794 if ((error = aacraid_shutdown(dev)))
797 EVENTHANDLER_DEREGISTER(shutdown_final, sc->eh);
801 mtx_destroy(&sc->aac_io_lock);
807 * Bring the controller down to a dormant state and detach all child devices.
809 * This function is called before detach or system shutdown.
811 * Note that we can assume that the bioq on the controller is empty, as we won't
812 * allow shutdown if any device is open.
815 aacraid_shutdown(device_t dev)
817 struct aac_softc *sc;
819 struct aac_close_command *cc;
821 sc = device_get_softc(dev);
822 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
824 sc->aac_state |= AAC_STATE_SUSPEND;
827 * Send a Container shutdown followed by a HostShutdown FIB to the
828 * controller to convince it that we don't want to talk to it anymore.
829 * We've been closed and all I/O completed already
831 device_printf(sc->aac_dev, "shutting down controller...");
833 mtx_lock(&sc->aac_io_lock);
834 aac_alloc_sync_fib(sc, &fib);
835 cc = (struct aac_close_command *)&fib->data[0];
837 bzero(cc, sizeof(struct aac_close_command));
838 cc->Command = VM_CloseAll;
839 cc->ContainerId = 0xfffffffe;
840 if (aac_sync_fib(sc, ContainerCommand, 0, fib,
841 sizeof(struct aac_close_command)))
846 AAC_ACCESS_DEVREG(sc, AAC_DISABLE_INTERRUPT);
847 aac_release_sync_fib(sc);
848 mtx_unlock(&sc->aac_io_lock);
854 * Bring the controller to a quiescent state, ready for system suspend.
857 aacraid_suspend(device_t dev)
859 struct aac_softc *sc;
861 sc = device_get_softc(dev);
863 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
864 sc->aac_state |= AAC_STATE_SUSPEND;
866 AAC_ACCESS_DEVREG(sc, AAC_DISABLE_INTERRUPT);
871 * Bring the controller back to a state ready for operation.
874 aacraid_resume(device_t dev)
876 struct aac_softc *sc;
878 sc = device_get_softc(dev);
880 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
881 sc->aac_state &= ~AAC_STATE_SUSPEND;
882 AAC_ACCESS_DEVREG(sc, AAC_ENABLE_INTERRUPT);
887 * Interrupt handler for NEW_COMM_TYPE1, NEW_COMM_TYPE2, NEW_COMM_TYPE34 interface.
890 aacraid_new_intr_type1(void *arg)
892 struct aac_msix_ctx *ctx;
893 struct aac_softc *sc;
895 struct aac_command *cm;
897 u_int32_t bellbits, bellbits_shifted, index, handle;
898 int isFastResponse, isAif, noMoreAif, mode;
900 ctx = (struct aac_msix_ctx *)arg;
902 vector_no = ctx->vector_no;
904 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
905 mtx_lock(&sc->aac_io_lock);
907 if (sc->msi_enabled) {
908 mode = AAC_INT_MODE_MSI;
909 if (vector_no == 0) {
910 bellbits = AAC_MEM0_GETREG4(sc, AAC_SRC_ODBR_MSI);
911 if (bellbits & 0x40000)
912 mode |= AAC_INT_MODE_AIF;
913 else if (bellbits & 0x1000)
914 mode |= AAC_INT_MODE_SYNC;
917 mode = AAC_INT_MODE_INTX;
918 bellbits = AAC_MEM0_GETREG4(sc, AAC_SRC_ODBR_R);
919 if (bellbits & AAC_DB_RESPONSE_SENT_NS) {
920 bellbits = AAC_DB_RESPONSE_SENT_NS;
921 AAC_MEM0_SETREG4(sc, AAC_SRC_ODBR_C, bellbits);
923 bellbits_shifted = (bellbits >> AAC_SRC_ODR_SHIFT);
924 AAC_MEM0_SETREG4(sc, AAC_SRC_ODBR_C, bellbits);
925 if (bellbits_shifted & AAC_DB_AIF_PENDING)
926 mode |= AAC_INT_MODE_AIF;
927 else if (bellbits_shifted & AAC_DB_SYNC_COMMAND)
928 mode |= AAC_INT_MODE_SYNC;
930 /* ODR readback, Prep #238630 */
931 AAC_MEM0_GETREG4(sc, AAC_SRC_ODBR_R);
934 if (mode & AAC_INT_MODE_SYNC) {
935 if (sc->aac_sync_cm) {
936 cm = sc->aac_sync_cm;
937 cm->cm_flags |= AAC_CMD_COMPLETED;
938 /* is there a completion handler? */
939 if (cm->cm_complete != NULL) {
942 /* assume that someone is sleeping on this command */
945 sc->flags &= ~AAC_QUEUE_FRZN;
946 sc->aac_sync_cm = NULL;
951 if (mode & AAC_INT_MODE_AIF) {
952 if (mode & AAC_INT_MODE_INTX) {
959 /* handle async. status */
960 index = sc->aac_host_rrq_idx[vector_no];
962 isFastResponse = isAif = noMoreAif = 0;
963 /* remove toggle bit (31) */
964 handle = (sc->aac_common->ac_host_rrq[index] & 0x7fffffff);
965 /* check fast response bit (30) */
966 if (handle & 0x40000000)
968 /* check AIF bit (23) */
969 else if (handle & 0x00800000)
971 handle &= 0x0000ffff;
975 cm = sc->aac_commands + (handle - 1);
977 sc->aac_rrq_outstanding[vector_no]--;
979 noMoreAif = (fib->Header.XferState & AAC_FIBSTATE_NOMOREAIF) ? 1:0;
981 aac_handle_aif(sc, fib);
983 aacraid_release_command(cm);
985 if (isFastResponse) {
986 fib->Header.XferState |= AAC_FIBSTATE_DONEADAP;
987 *((u_int32_t *)(fib->data)) = ST_OK;
988 cm->cm_flags |= AAC_CMD_FASTRESP;
991 aac_unmap_command(cm);
992 cm->cm_flags |= AAC_CMD_COMPLETED;
994 /* is there a completion handler? */
995 if (cm->cm_complete != NULL) {
998 /* assume that someone is sleeping on this command */
1001 sc->flags &= ~AAC_QUEUE_FRZN;
1004 sc->aac_common->ac_host_rrq[index++] = 0;
1005 if (index == (vector_no + 1) * sc->aac_vector_cap)
1006 index = vector_no * sc->aac_vector_cap;
1007 sc->aac_host_rrq_idx[vector_no] = index;
1009 if ((isAif && !noMoreAif) || sc->aif_pending)
1010 aac_request_aif(sc);
1014 if (mode & AAC_INT_MODE_AIF) {
1015 aac_request_aif(sc);
1016 AAC_ACCESS_DEVREG(sc, AAC_CLEAR_AIF_BIT);
1020 /* see if we can start some more I/O */
1021 if ((sc->flags & AAC_QUEUE_FRZN) == 0)
1022 aacraid_startio(sc);
1023 mtx_unlock(&sc->aac_io_lock);
1027 * Handle notification of one or more FIBs coming from the controller.
1030 aac_command_thread(struct aac_softc *sc)
1034 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1036 mtx_lock(&sc->aac_io_lock);
1037 sc->aifflags = AAC_AIFFLAGS_RUNNING;
1039 while ((sc->aifflags & AAC_AIFFLAGS_EXIT) == 0) {
1042 if ((sc->aifflags & AAC_AIFFLAGS_PENDING) == 0)
1043 retval = msleep(sc->aifthread, &sc->aac_io_lock, PRIBIO,
1044 "aacraid_aifthd", AAC_PERIODIC_INTERVAL * hz);
1047 * First see if any FIBs need to be allocated. This needs
1048 * to be called without the driver lock because contigmalloc
1049 * will grab Giant, and would result in an LOR.
1051 if ((sc->aifflags & AAC_AIFFLAGS_ALLOCFIBS) != 0) {
1052 aac_alloc_commands(sc);
1053 sc->aifflags &= ~AAC_AIFFLAGS_ALLOCFIBS;
1054 aacraid_startio(sc);
1058 * While we're here, check to see if any commands are stuck.
1059 * This is pretty low-priority, so it's ok if it doesn't
1062 if (retval == EWOULDBLOCK)
1065 /* Check the hardware printf message buffer */
1066 if (sc->aac_common->ac_printf[0] != 0)
1067 aac_print_printf(sc);
1069 sc->aifflags &= ~AAC_AIFFLAGS_RUNNING;
1070 mtx_unlock(&sc->aac_io_lock);
1071 wakeup(sc->aac_dev);
1073 aac_kthread_exit(0);
1077 * Submit a command to the controller, return when it completes.
1078 * XXX This is very dangerous! If the card has gone out to lunch, we could
1079 * be stuck here forever. At the same time, signals are not caught
1080 * because there is a risk that a signal could wakeup the sleep before
1081 * the card has a chance to complete the command. Since there is no way
1082 * to cancel a command that is in progress, we can't protect against the
1083 * card completing a command late and spamming the command and data
1084 * memory. So, we are held hostage until the command completes.
1087 aacraid_wait_command(struct aac_command *cm)
1089 struct aac_softc *sc;
1093 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1094 mtx_assert(&sc->aac_io_lock, MA_OWNED);
1096 /* Put the command on the ready queue and get things going */
1097 aac_enqueue_ready(cm);
1098 aacraid_startio(sc);
1099 error = msleep(cm, &sc->aac_io_lock, PRIBIO, "aacraid_wait", 0);
1104 *Command Buffer Management
1108 * Allocate a command.
1111 aacraid_alloc_command(struct aac_softc *sc, struct aac_command **cmp)
1113 struct aac_command *cm;
1115 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1117 if ((cm = aac_dequeue_free(sc)) == NULL) {
1118 if (sc->total_fibs < sc->aac_max_fibs) {
1119 sc->aifflags |= AAC_AIFFLAGS_ALLOCFIBS;
1120 wakeup(sc->aifthread);
1130 * Release a command back to the freelist.
1133 aacraid_release_command(struct aac_command *cm)
1135 struct aac_event *event;
1136 struct aac_softc *sc;
1139 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1140 mtx_assert(&sc->aac_io_lock, MA_OWNED);
1142 /* (re)initialize the command/FIB */
1143 cm->cm_sgtable = NULL;
1145 cm->cm_complete = NULL;
1147 cm->cm_passthr_dmat = 0;
1148 cm->cm_fib->Header.XferState = AAC_FIBSTATE_EMPTY;
1149 cm->cm_fib->Header.StructType = AAC_FIBTYPE_TFIB;
1150 cm->cm_fib->Header.Unused = 0;
1151 cm->cm_fib->Header.SenderSize = cm->cm_sc->aac_max_fib_size;
1154 * These are duplicated in aac_start to cover the case where an
1155 * intermediate stage may have destroyed them. They're left
1156 * initialized here for debugging purposes only.
1158 cm->cm_fib->Header.u.ReceiverFibAddress = (u_int32_t)cm->cm_fibphys;
1159 cm->cm_fib->Header.Handle = 0;
1161 aac_enqueue_free(cm);
1164 * Dequeue all events so that there's no risk of events getting
1167 while ((event = TAILQ_FIRST(&sc->aac_ev_cmfree)) != NULL) {
1168 TAILQ_REMOVE(&sc->aac_ev_cmfree, event, ev_links);
1169 event->ev_callback(sc, event, event->ev_arg);
1174 * Map helper for command/FIB allocation.
1177 aac_map_command_helper(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1181 fibphys = (uint64_t *)arg;
1183 *fibphys = segs[0].ds_addr;
1187 * Allocate and initialize commands/FIBs for this adapter.
1190 aac_alloc_commands(struct aac_softc *sc)
1192 struct aac_command *cm;
1193 struct aac_fibmap *fm;
1198 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1199 mtx_assert(&sc->aac_io_lock, MA_OWNED);
1201 if (sc->total_fibs + sc->aac_max_fibs_alloc > sc->aac_max_fibs)
1204 fm = malloc(sizeof(struct aac_fibmap), M_AACRAIDBUF, M_NOWAIT|M_ZERO);
1208 mtx_unlock(&sc->aac_io_lock);
1209 /* allocate the FIBs in DMAable memory and load them */
1210 if (bus_dmamem_alloc(sc->aac_fib_dmat, (void **)&fm->aac_fibs,
1211 BUS_DMA_NOWAIT, &fm->aac_fibmap)) {
1212 device_printf(sc->aac_dev,
1213 "Not enough contiguous memory available.\n");
1214 free(fm, M_AACRAIDBUF);
1215 mtx_lock(&sc->aac_io_lock);
1219 maxsize = sc->aac_max_fib_size + 31;
1220 if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE1)
1221 maxsize += sizeof(struct aac_fib_xporthdr);
1222 /* Ignore errors since this doesn't bounce */
1223 (void)bus_dmamap_load(sc->aac_fib_dmat, fm->aac_fibmap, fm->aac_fibs,
1224 sc->aac_max_fibs_alloc * maxsize,
1225 aac_map_command_helper, &fibphys, 0);
1226 mtx_lock(&sc->aac_io_lock);
1228 /* initialize constant fields in the command structure */
1229 bzero(fm->aac_fibs, sc->aac_max_fibs_alloc * maxsize);
1230 for (i = 0; i < sc->aac_max_fibs_alloc; i++) {
1231 cm = sc->aac_commands + sc->total_fibs;
1232 fm->aac_commands = cm;
1234 cm->cm_fib = (struct aac_fib *)
1235 ((u_int8_t *)fm->aac_fibs + i * maxsize);
1236 cm->cm_fibphys = fibphys + i * maxsize;
1237 if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE1) {
1238 u_int64_t fibphys_aligned;
1240 (cm->cm_fibphys + sizeof(struct aac_fib_xporthdr) + 31) & ~31;
1241 cm->cm_fib = (struct aac_fib *)
1242 ((u_int8_t *)cm->cm_fib + (fibphys_aligned - cm->cm_fibphys));
1243 cm->cm_fibphys = fibphys_aligned;
1245 u_int64_t fibphys_aligned;
1246 fibphys_aligned = (cm->cm_fibphys + 31) & ~31;
1247 cm->cm_fib = (struct aac_fib *)
1248 ((u_int8_t *)cm->cm_fib + (fibphys_aligned - cm->cm_fibphys));
1249 cm->cm_fibphys = fibphys_aligned;
1251 cm->cm_index = sc->total_fibs;
1253 if ((error = bus_dmamap_create(sc->aac_buffer_dmat, 0,
1254 &cm->cm_datamap)) != 0)
1256 if (sc->aac_max_fibs <= 1 || sc->aac_max_fibs - sc->total_fibs > 1)
1257 aacraid_release_command(cm);
1262 TAILQ_INSERT_TAIL(&sc->aac_fibmap_tqh, fm, fm_link);
1263 fwprintf(sc, HBA_FLAGS_DBG_COMM_B, "total_fibs= %d\n", sc->total_fibs);
1267 bus_dmamap_unload(sc->aac_fib_dmat, fm->aac_fibmap);
1268 bus_dmamem_free(sc->aac_fib_dmat, fm->aac_fibs, fm->aac_fibmap);
1269 free(fm, M_AACRAIDBUF);
1274 * Free FIBs owned by this adapter.
1277 aac_free_commands(struct aac_softc *sc)
1279 struct aac_fibmap *fm;
1280 struct aac_command *cm;
1283 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1285 while ((fm = TAILQ_FIRST(&sc->aac_fibmap_tqh)) != NULL) {
1287 TAILQ_REMOVE(&sc->aac_fibmap_tqh, fm, fm_link);
1289 * We check against total_fibs to handle partially
1292 for (i = 0; i < sc->aac_max_fibs_alloc && sc->total_fibs--; i++) {
1293 cm = fm->aac_commands + i;
1294 bus_dmamap_destroy(sc->aac_buffer_dmat, cm->cm_datamap);
1296 bus_dmamap_unload(sc->aac_fib_dmat, fm->aac_fibmap);
1297 bus_dmamem_free(sc->aac_fib_dmat, fm->aac_fibs, fm->aac_fibmap);
1298 free(fm, M_AACRAIDBUF);
1303 * Command-mapping helper function - populate this command's s/g table.
1306 aacraid_map_command_sg(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1308 struct aac_softc *sc;
1309 struct aac_command *cm;
1310 struct aac_fib *fib;
1313 cm = (struct aac_command *)arg;
1316 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "nseg %d", nseg);
1317 mtx_assert(&sc->aac_io_lock, MA_OWNED);
1319 if ((sc->flags & AAC_FLAGS_SYNC_MODE) && sc->aac_sync_cm)
1322 /* copy into the FIB */
1323 if (cm->cm_sgtable != NULL) {
1324 if (fib->Header.Command == RawIo2) {
1325 struct aac_raw_io2 *raw;
1326 struct aac_sge_ieee1212 *sg;
1327 u_int32_t min_size = PAGE_SIZE, cur_size;
1328 int conformable = TRUE;
1330 raw = (struct aac_raw_io2 *)&fib->data[0];
1331 sg = (struct aac_sge_ieee1212 *)cm->cm_sgtable;
1334 for (i = 0; i < nseg; i++) {
1335 cur_size = segs[i].ds_len;
1337 *(bus_addr_t *)&sg[i].addrLow = segs[i].ds_addr;
1338 sg[i].length = cur_size;
1341 raw->sgeFirstSize = cur_size;
1342 } else if (i == 1) {
1343 raw->sgeNominalSize = cur_size;
1344 min_size = cur_size;
1345 } else if ((i+1) < nseg &&
1346 cur_size != raw->sgeNominalSize) {
1347 conformable = FALSE;
1348 if (cur_size < min_size)
1349 min_size = cur_size;
1353 /* not conformable: evaluate required sg elements */
1355 int j, err_found, nseg_new = nseg;
1356 for (i = min_size / PAGE_SIZE; i >= 1; --i) {
1359 for (j = 1; j < nseg - 1; ++j) {
1360 if (sg[j].length % (i*PAGE_SIZE)) {
1364 nseg_new += (sg[j].length / (i*PAGE_SIZE));
1369 if (i>0 && nseg_new<=sc->aac_sg_tablesize &&
1370 !(sc->hint_flags & 4))
1371 nseg = aac_convert_sgraw2(sc,
1372 raw, i, nseg, nseg_new);
1374 raw->flags |= RIO2_SGL_CONFORMANT;
1377 /* update the FIB size for the s/g count */
1378 fib->Header.Size += nseg *
1379 sizeof(struct aac_sge_ieee1212);
1381 } else if (fib->Header.Command == RawIo) {
1382 struct aac_sg_tableraw *sg;
1383 sg = (struct aac_sg_tableraw *)cm->cm_sgtable;
1385 for (i = 0; i < nseg; i++) {
1386 sg->SgEntryRaw[i].SgAddress = segs[i].ds_addr;
1387 sg->SgEntryRaw[i].SgByteCount = segs[i].ds_len;
1388 sg->SgEntryRaw[i].Next = 0;
1389 sg->SgEntryRaw[i].Prev = 0;
1390 sg->SgEntryRaw[i].Flags = 0;
1392 /* update the FIB size for the s/g count */
1393 fib->Header.Size += nseg*sizeof(struct aac_sg_entryraw);
1394 } else if ((cm->cm_sc->flags & AAC_FLAGS_SG_64BIT) == 0) {
1395 struct aac_sg_table *sg;
1396 sg = cm->cm_sgtable;
1398 for (i = 0; i < nseg; i++) {
1399 sg->SgEntry[i].SgAddress = segs[i].ds_addr;
1400 sg->SgEntry[i].SgByteCount = segs[i].ds_len;
1402 /* update the FIB size for the s/g count */
1403 fib->Header.Size += nseg*sizeof(struct aac_sg_entry);
1405 struct aac_sg_table64 *sg;
1406 sg = (struct aac_sg_table64 *)cm->cm_sgtable;
1408 for (i = 0; i < nseg; i++) {
1409 sg->SgEntry64[i].SgAddress = segs[i].ds_addr;
1410 sg->SgEntry64[i].SgByteCount = segs[i].ds_len;
1412 /* update the FIB size for the s/g count */
1413 fib->Header.Size += nseg*sizeof(struct aac_sg_entry64);
1417 /* Fix up the address values in the FIB. Use the command array index
1418 * instead of a pointer since these fields are only 32 bits. Shift
1419 * the SenderFibAddress over to make room for the fast response bit
1420 * and for the AIF bit
1422 cm->cm_fib->Header.SenderFibAddress = (cm->cm_index << 2);
1423 cm->cm_fib->Header.u.ReceiverFibAddress = (u_int32_t)cm->cm_fibphys;
1425 /* save a pointer to the command for speedy reverse-lookup */
1426 cm->cm_fib->Header.Handle += cm->cm_index + 1;
1428 if (cm->cm_passthr_dmat == 0) {
1429 if (cm->cm_flags & AAC_CMD_DATAIN)
1430 bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap,
1431 BUS_DMASYNC_PREREAD);
1432 if (cm->cm_flags & AAC_CMD_DATAOUT)
1433 bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap,
1434 BUS_DMASYNC_PREWRITE);
1437 cm->cm_flags |= AAC_CMD_MAPPED;
1439 if (cm->cm_flags & AAC_CMD_WAIT) {
1440 aacraid_sync_command(sc, AAC_MONKER_SYNCFIB,
1441 cm->cm_fibphys, 0, 0, 0, NULL, NULL);
1442 } else if (sc->flags & AAC_FLAGS_SYNC_MODE) {
1444 sc->aac_sync_cm = cm;
1445 aacraid_sync_command(sc, AAC_MONKER_SYNCFIB,
1446 cm->cm_fibphys, 0, 0, 0, &wait, NULL);
1448 int count = 10000000L;
1449 while (AAC_SEND_COMMAND(sc, cm) != 0) {
1451 aac_unmap_command(cm);
1452 sc->flags |= AAC_QUEUE_FRZN;
1453 aac_requeue_ready(cm);
1455 DELAY(5); /* wait 5 usec. */
1462 aac_convert_sgraw2(struct aac_softc *sc, struct aac_raw_io2 *raw,
1463 int pages, int nseg, int nseg_new)
1465 struct aac_sge_ieee1212 *sge;
1469 sge = malloc(nseg_new * sizeof(struct aac_sge_ieee1212),
1470 M_AACRAIDBUF, M_NOWAIT|M_ZERO);
1474 for (i = 1, pos = 1; i < nseg - 1; ++i) {
1475 for (j = 0; j < raw->sge[i].length / (pages*PAGE_SIZE); ++j) {
1476 addr_low = raw->sge[i].addrLow + j * pages * PAGE_SIZE;
1477 sge[pos].addrLow = addr_low;
1478 sge[pos].addrHigh = raw->sge[i].addrHigh;
1479 if (addr_low < raw->sge[i].addrLow)
1480 sge[pos].addrHigh++;
1481 sge[pos].length = pages * PAGE_SIZE;
1486 sge[pos] = raw->sge[nseg-1];
1487 for (i = 1; i < nseg_new; ++i)
1488 raw->sge[i] = sge[i];
1490 free(sge, M_AACRAIDBUF);
1491 raw->sgeCnt = nseg_new;
1492 raw->flags |= RIO2_SGL_CONFORMANT;
1493 raw->sgeNominalSize = pages * PAGE_SIZE;
1499 * Unmap a command from controller-visible space.
1502 aac_unmap_command(struct aac_command *cm)
1504 struct aac_softc *sc;
1507 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1509 if (!(cm->cm_flags & AAC_CMD_MAPPED))
1512 if (cm->cm_datalen != 0 && cm->cm_passthr_dmat == 0) {
1513 if (cm->cm_flags & AAC_CMD_DATAIN)
1514 bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap,
1515 BUS_DMASYNC_POSTREAD);
1516 if (cm->cm_flags & AAC_CMD_DATAOUT)
1517 bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap,
1518 BUS_DMASYNC_POSTWRITE);
1520 bus_dmamap_unload(sc->aac_buffer_dmat, cm->cm_datamap);
1522 cm->cm_flags &= ~AAC_CMD_MAPPED;
1526 * Hardware Interface
1530 * Initialize the adapter.
1533 aac_common_map(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1535 struct aac_softc *sc;
1537 sc = (struct aac_softc *)arg;
1538 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1540 sc->aac_common_busaddr = segs[0].ds_addr;
1544 aac_check_firmware(struct aac_softc *sc)
1546 u_int32_t code, major, minor, maxsize;
1547 u_int32_t options = 0, atu_size = 0, status, waitCount;
1550 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1552 /* check if flash update is running */
1553 if (AAC_GET_FWSTATUS(sc) & AAC_FLASH_UPD_PENDING) {
1556 code = AAC_GET_FWSTATUS(sc);
1557 if (time_uptime > (then + AAC_FWUPD_TIMEOUT)) {
1558 device_printf(sc->aac_dev,
1559 "FATAL: controller not coming ready, "
1560 "status %x\n", code);
1563 } while (!(code & AAC_FLASH_UPD_SUCCESS) && !(code & AAC_FLASH_UPD_FAILED));
1565 * Delay 10 seconds. Because right now FW is doing a soft reset,
1566 * do not read scratch pad register at this time
1568 waitCount = 10 * 10000;
1570 DELAY(100); /* delay 100 microseconds */
1576 * Wait for the adapter to come ready.
1580 code = AAC_GET_FWSTATUS(sc);
1581 if (time_uptime > (then + AAC_BOOT_TIMEOUT)) {
1582 device_printf(sc->aac_dev,
1583 "FATAL: controller not coming ready, "
1584 "status %x\n", code);
1587 } while (!(code & AAC_UP_AND_RUNNING) || code == 0xffffffff);
1590 * Retrieve the firmware version numbers. Dell PERC2/QC cards with
1591 * firmware version 1.x are not compatible with this driver.
1593 if (sc->flags & AAC_FLAGS_PERC2QC) {
1594 if (aacraid_sync_command(sc, AAC_MONKER_GETKERNVER, 0, 0, 0, 0,
1596 device_printf(sc->aac_dev,
1597 "Error reading firmware version\n");
1601 /* These numbers are stored as ASCII! */
1602 major = (AAC_GET_MAILBOX(sc, 1) & 0xff) - 0x30;
1603 minor = (AAC_GET_MAILBOX(sc, 2) & 0xff) - 0x30;
1605 device_printf(sc->aac_dev,
1606 "Firmware version %d.%d is not supported.\n",
1612 * Retrieve the capabilities/supported options word so we know what
1613 * work-arounds to enable. Some firmware revs don't support this
1616 if (aacraid_sync_command(sc, AAC_MONKER_GETINFO, 0, 0, 0, 0, &status, NULL)) {
1617 if (status != AAC_SRB_STS_INVALID_REQUEST) {
1618 device_printf(sc->aac_dev,
1619 "RequestAdapterInfo failed\n");
1623 options = AAC_GET_MAILBOX(sc, 1);
1624 atu_size = AAC_GET_MAILBOX(sc, 2);
1625 sc->supported_options = options;
1626 sc->doorbell_mask = AAC_GET_MAILBOX(sc, 3);
1628 if ((options & AAC_SUPPORTED_4GB_WINDOW) != 0 &&
1629 (sc->flags & AAC_FLAGS_NO4GB) == 0)
1630 sc->flags |= AAC_FLAGS_4GB_WINDOW;
1631 if (options & AAC_SUPPORTED_NONDASD)
1632 sc->flags |= AAC_FLAGS_ENABLE_CAM;
1633 if ((options & AAC_SUPPORTED_SGMAP_HOST64) != 0
1634 && (sizeof(bus_addr_t) > 4)
1635 && (sc->hint_flags & 0x1)) {
1636 device_printf(sc->aac_dev,
1637 "Enabling 64-bit address support\n");
1638 sc->flags |= AAC_FLAGS_SG_64BIT;
1640 if (sc->aac_if.aif_send_command) {
1641 if (options & AAC_SUPPORTED_NEW_COMM_TYPE2)
1642 sc->flags |= AAC_FLAGS_NEW_COMM | AAC_FLAGS_NEW_COMM_TYPE2;
1643 else if (options & AAC_SUPPORTED_NEW_COMM_TYPE1)
1644 sc->flags |= AAC_FLAGS_NEW_COMM | AAC_FLAGS_NEW_COMM_TYPE1;
1645 else if ((options & AAC_SUPPORTED_NEW_COMM_TYPE3) ||
1646 (options & AAC_SUPPORTED_NEW_COMM_TYPE4))
1647 sc->flags |= AAC_FLAGS_NEW_COMM | AAC_FLAGS_NEW_COMM_TYPE34;
1649 if (options & AAC_SUPPORTED_64BIT_ARRAYSIZE)
1650 sc->flags |= AAC_FLAGS_ARRAY_64BIT;
1653 if (!(sc->flags & AAC_FLAGS_NEW_COMM)) {
1654 device_printf(sc->aac_dev, "Communication interface not supported!\n");
1658 if (sc->hint_flags & 2) {
1659 device_printf(sc->aac_dev,
1660 "Sync. mode enforced by driver parameter. This will cause a significant performance decrease!\n");
1661 sc->flags |= AAC_FLAGS_SYNC_MODE;
1662 } else if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE34) {
1663 device_printf(sc->aac_dev,
1664 "Async. mode not supported by current driver, sync. mode enforced.\nPlease update driver to get full performance.\n");
1665 sc->flags |= AAC_FLAGS_SYNC_MODE;
1668 /* Check for broken hardware that does a lower number of commands */
1669 sc->aac_max_fibs = (sc->flags & AAC_FLAGS_256FIBS ? 256:512);
1671 /* Remap mem. resource, if required */
1672 if (atu_size > rman_get_size(sc->aac_regs_res0)) {
1673 bus_release_resource(
1674 sc->aac_dev, SYS_RES_MEMORY,
1675 sc->aac_regs_rid0, sc->aac_regs_res0);
1676 sc->aac_regs_res0 = bus_alloc_resource_anywhere(
1677 sc->aac_dev, SYS_RES_MEMORY, &sc->aac_regs_rid0,
1678 atu_size, RF_ACTIVE);
1679 if (sc->aac_regs_res0 == NULL) {
1680 sc->aac_regs_res0 = bus_alloc_resource_any(
1681 sc->aac_dev, SYS_RES_MEMORY,
1682 &sc->aac_regs_rid0, RF_ACTIVE);
1683 if (sc->aac_regs_res0 == NULL) {
1684 device_printf(sc->aac_dev,
1685 "couldn't allocate register window\n");
1689 sc->aac_btag0 = rman_get_bustag(sc->aac_regs_res0);
1690 sc->aac_bhandle0 = rman_get_bushandle(sc->aac_regs_res0);
1693 /* Read preferred settings */
1694 sc->aac_max_fib_size = sizeof(struct aac_fib);
1695 sc->aac_max_sectors = 128; /* 64KB */
1696 sc->aac_max_aif = 1;
1697 if (sc->flags & AAC_FLAGS_SG_64BIT)
1698 sc->aac_sg_tablesize = (AAC_FIB_DATASIZE
1699 - sizeof(struct aac_blockwrite64))
1700 / sizeof(struct aac_sg_entry64);
1702 sc->aac_sg_tablesize = (AAC_FIB_DATASIZE
1703 - sizeof(struct aac_blockwrite))
1704 / sizeof(struct aac_sg_entry);
1706 if (!aacraid_sync_command(sc, AAC_MONKER_GETCOMMPREF, 0, 0, 0, 0, NULL, NULL)) {
1707 options = AAC_GET_MAILBOX(sc, 1);
1708 sc->aac_max_fib_size = (options & 0xFFFF);
1709 sc->aac_max_sectors = (options >> 16) << 1;
1710 options = AAC_GET_MAILBOX(sc, 2);
1711 sc->aac_sg_tablesize = (options >> 16);
1712 options = AAC_GET_MAILBOX(sc, 3);
1713 sc->aac_max_fibs = ((options >> 16) & 0xFFFF);
1714 if (sc->aac_max_fibs == 0 || sc->aac_hwif != AAC_HWIF_SRCV)
1715 sc->aac_max_fibs = (options & 0xFFFF);
1716 options = AAC_GET_MAILBOX(sc, 4);
1717 sc->aac_max_aif = (options & 0xFFFF);
1718 options = AAC_GET_MAILBOX(sc, 5);
1719 sc->aac_max_msix =(sc->flags & AAC_FLAGS_NEW_COMM_TYPE2) ? options : 0;
1722 maxsize = sc->aac_max_fib_size + 31;
1723 if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE1)
1724 maxsize += sizeof(struct aac_fib_xporthdr);
1725 if (maxsize > PAGE_SIZE) {
1726 sc->aac_max_fib_size -= (maxsize - PAGE_SIZE);
1727 maxsize = PAGE_SIZE;
1729 sc->aac_max_fibs_alloc = PAGE_SIZE / maxsize;
1731 if (sc->aac_max_fib_size > sizeof(struct aac_fib)) {
1732 sc->flags |= AAC_FLAGS_RAW_IO;
1733 device_printf(sc->aac_dev, "Enable Raw I/O\n");
1735 if ((sc->flags & AAC_FLAGS_RAW_IO) &&
1736 (sc->flags & AAC_FLAGS_ARRAY_64BIT)) {
1737 sc->flags |= AAC_FLAGS_LBA_64BIT;
1738 device_printf(sc->aac_dev, "Enable 64-bit array\n");
1741 #ifdef AACRAID_DEBUG
1742 aacraid_get_fw_debug_buffer(sc);
1748 aac_init(struct aac_softc *sc)
1750 struct aac_adapter_init *ip;
1753 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1755 /* reset rrq index */
1756 sc->aac_fibs_pushed_no = 0;
1757 for (i = 0; i < sc->aac_max_msix; i++)
1758 sc->aac_host_rrq_idx[i] = i * sc->aac_vector_cap;
1761 * Fill in the init structure. This tells the adapter about the
1762 * physical location of various important shared data structures.
1764 ip = &sc->aac_common->ac_init;
1765 ip->InitStructRevision = AAC_INIT_STRUCT_REVISION;
1766 if (sc->aac_max_fib_size > sizeof(struct aac_fib)) {
1767 ip->InitStructRevision = AAC_INIT_STRUCT_REVISION_4;
1768 sc->flags |= AAC_FLAGS_RAW_IO;
1770 ip->NoOfMSIXVectors = sc->aac_max_msix;
1772 ip->AdapterFibsPhysicalAddress = sc->aac_common_busaddr +
1773 offsetof(struct aac_common, ac_fibs);
1774 ip->AdapterFibsVirtualAddress = 0;
1775 ip->AdapterFibsSize = AAC_ADAPTER_FIBS * sizeof(struct aac_fib);
1776 ip->AdapterFibAlign = sizeof(struct aac_fib);
1778 ip->PrintfBufferAddress = sc->aac_common_busaddr +
1779 offsetof(struct aac_common, ac_printf);
1780 ip->PrintfBufferSize = AAC_PRINTF_BUFSIZE;
1783 * The adapter assumes that pages are 4K in size, except on some
1784 * broken firmware versions that do the page->byte conversion twice,
1785 * therefore 'assuming' that this value is in 16MB units (2^24).
1786 * Round up since the granularity is so high.
1788 ip->HostPhysMemPages = ctob(physmem) / AAC_PAGE_SIZE;
1789 if (sc->flags & AAC_FLAGS_BROKEN_MEMMAP) {
1790 ip->HostPhysMemPages =
1791 (ip->HostPhysMemPages + AAC_PAGE_SIZE) / AAC_PAGE_SIZE;
1793 ip->HostElapsedSeconds = time_uptime; /* reset later if invalid */
1795 ip->InitFlags = AAC_INITFLAGS_NEW_COMM_SUPPORTED;
1796 if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE1) {
1797 ip->InitStructRevision = AAC_INIT_STRUCT_REVISION_6;
1798 ip->InitFlags |= (AAC_INITFLAGS_NEW_COMM_TYPE1_SUPPORTED |
1799 AAC_INITFLAGS_FAST_JBOD_SUPPORTED);
1800 device_printf(sc->aac_dev, "New comm. interface type1 enabled\n");
1801 } else if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE2) {
1802 ip->InitStructRevision = AAC_INIT_STRUCT_REVISION_7;
1803 ip->InitFlags |= (AAC_INITFLAGS_NEW_COMM_TYPE2_SUPPORTED |
1804 AAC_INITFLAGS_FAST_JBOD_SUPPORTED);
1805 device_printf(sc->aac_dev, "New comm. interface type2 enabled\n");
1807 ip->MaxNumAif = sc->aac_max_aif;
1808 ip->HostRRQ_AddrLow =
1809 sc->aac_common_busaddr + offsetof(struct aac_common, ac_host_rrq);
1810 /* always 32-bit address */
1811 ip->HostRRQ_AddrHigh = 0;
1813 if (sc->aac_support_opt2 & AAC_SUPPORTED_POWER_MANAGEMENT) {
1814 ip->InitFlags |= AAC_INITFLAGS_DRIVER_SUPPORTS_PM;
1815 ip->InitFlags |= AAC_INITFLAGS_DRIVER_USES_UTC_TIME;
1816 device_printf(sc->aac_dev, "Power Management enabled\n");
1819 ip->MaxIoCommands = sc->aac_max_fibs;
1820 ip->MaxIoSize = sc->aac_max_sectors << 9;
1821 ip->MaxFibSize = sc->aac_max_fib_size;
1824 * Do controller-type-specific initialisation
1826 AAC_MEM0_SETREG4(sc, AAC_SRC_ODBR_C, ~0);
1829 * Give the init structure to the controller.
1831 if (aacraid_sync_command(sc, AAC_MONKER_INITSTRUCT,
1832 sc->aac_common_busaddr +
1833 offsetof(struct aac_common, ac_init), 0, 0, 0,
1835 device_printf(sc->aac_dev,
1836 "error establishing init structure\n");
1842 * Check configuration issues
1844 if ((error = aac_check_config(sc)) != 0)
1853 aac_define_int_mode(struct aac_softc *sc)
1856 int cap, msi_count, error = 0;
1861 if (sc->flags & AAC_FLAGS_SYNC_MODE) {
1862 device_printf(dev, "using line interrupts\n");
1863 sc->aac_max_msix = 1;
1864 sc->aac_vector_cap = sc->aac_max_fibs;
1868 /* max. vectors from AAC_MONKER_GETCOMMPREF */
1869 if (sc->aac_max_msix == 0) {
1870 if (sc->aac_hwif == AAC_HWIF_SRC) {
1872 if ((error = pci_alloc_msi(dev, &msi_count)) != 0) {
1873 device_printf(dev, "alloc msi failed - err=%d; "
1874 "will use INTx\n", error);
1875 pci_release_msi(dev);
1877 sc->msi_tupelo = TRUE;
1881 device_printf(dev, "using MSI interrupts\n");
1883 device_printf(dev, "using line interrupts\n");
1885 sc->aac_max_msix = 1;
1886 sc->aac_vector_cap = sc->aac_max_fibs;
1891 msi_count = pci_msix_count(dev);
1892 if (msi_count > AAC_MAX_MSIX)
1893 msi_count = AAC_MAX_MSIX;
1894 if (msi_count > sc->aac_max_msix)
1895 msi_count = sc->aac_max_msix;
1896 if (msi_count == 0 || (error = pci_alloc_msix(dev, &msi_count)) != 0) {
1897 device_printf(dev, "alloc msix failed - msi_count=%d, err=%d; "
1898 "will try MSI\n", msi_count, error);
1899 pci_release_msi(dev);
1901 sc->msi_enabled = TRUE;
1902 device_printf(dev, "using MSI-X interrupts (%u vectors)\n",
1906 if (!sc->msi_enabled) {
1908 if ((error = pci_alloc_msi(dev, &msi_count)) != 0) {
1909 device_printf(dev, "alloc msi failed - err=%d; "
1910 "will use INTx\n", error);
1911 pci_release_msi(dev);
1913 sc->msi_enabled = TRUE;
1914 device_printf(dev, "using MSI interrupts\n");
1918 if (sc->msi_enabled) {
1919 /* now read controller capability from PCI config. space */
1920 cap = aac_find_pci_capability(sc, PCIY_MSIX);
1921 val = (cap != 0 ? pci_read_config(dev, cap + 2, 2) : 0);
1922 if (!(val & AAC_PCI_MSI_ENABLE)) {
1923 pci_release_msi(dev);
1924 sc->msi_enabled = FALSE;
1928 if (!sc->msi_enabled) {
1929 device_printf(dev, "using legacy interrupts\n");
1930 sc->aac_max_msix = 1;
1932 AAC_ACCESS_DEVREG(sc, AAC_ENABLE_MSIX);
1933 if (sc->aac_max_msix > msi_count)
1934 sc->aac_max_msix = msi_count;
1936 sc->aac_vector_cap = sc->aac_max_fibs / sc->aac_max_msix;
1938 fwprintf(sc, HBA_FLAGS_DBG_DEBUG_B, "msi_enabled %d vector_cap %d max_fibs %d max_msix %d",
1939 sc->msi_enabled,sc->aac_vector_cap, sc->aac_max_fibs, sc->aac_max_msix);
1943 aac_find_pci_capability(struct aac_softc *sc, int cap)
1951 status = pci_read_config(dev, PCIR_STATUS, 2);
1952 if (!(status & PCIM_STATUS_CAPPRESENT))
1955 status = pci_read_config(dev, PCIR_HDRTYPE, 1);
1956 switch (status & PCIM_HDRTYPE) {
1962 ptr = PCIR_CAP_PTR_2;
1968 ptr = pci_read_config(dev, ptr, 1);
1972 next = pci_read_config(dev, ptr + PCICAP_NEXTPTR, 1);
1973 val = pci_read_config(dev, ptr + PCICAP_ID, 1);
1983 aac_setup_intr(struct aac_softc *sc)
1985 int i, msi_count, rid;
1986 struct resource *res;
1989 msi_count = sc->aac_max_msix;
1990 rid = ((sc->msi_enabled || sc->msi_tupelo)? 1:0);
1992 for (i = 0; i < msi_count; i++, rid++) {
1993 if ((res = bus_alloc_resource_any(sc->aac_dev,SYS_RES_IRQ, &rid,
1994 RF_SHAREABLE | RF_ACTIVE)) == NULL) {
1995 device_printf(sc->aac_dev,"can't allocate interrupt\n");
1998 sc->aac_irq_rid[i] = rid;
1999 sc->aac_irq[i] = res;
2000 if (aac_bus_setup_intr(sc->aac_dev, res,
2001 INTR_MPSAFE | INTR_TYPE_BIO, NULL,
2002 aacraid_new_intr_type1, &sc->aac_msix[i], &tag)) {
2003 device_printf(sc->aac_dev, "can't set up interrupt\n");
2006 sc->aac_msix[i].vector_no = i;
2007 sc->aac_msix[i].sc = sc;
2008 sc->aac_intr[i] = tag;
2015 aac_check_config(struct aac_softc *sc)
2017 struct aac_fib *fib;
2018 struct aac_cnt_config *ccfg;
2019 struct aac_cf_status_hdr *cf_shdr;
2022 mtx_lock(&sc->aac_io_lock);
2023 aac_alloc_sync_fib(sc, &fib);
2025 ccfg = (struct aac_cnt_config *)&fib->data[0];
2026 bzero(ccfg, sizeof (*ccfg) - CT_PACKET_SIZE);
2027 ccfg->Command = VM_ContainerConfig;
2028 ccfg->CTCommand.command = CT_GET_CONFIG_STATUS;
2029 ccfg->CTCommand.param[CNT_SIZE] = sizeof(struct aac_cf_status_hdr);
2031 rval = aac_sync_fib(sc, ContainerCommand, 0, fib,
2032 sizeof (struct aac_cnt_config));
2033 cf_shdr = (struct aac_cf_status_hdr *)ccfg->CTCommand.data;
2034 if (rval == 0 && ccfg->Command == ST_OK &&
2035 ccfg->CTCommand.param[0] == CT_OK) {
2036 if (cf_shdr->action <= CFACT_PAUSE) {
2037 bzero(ccfg, sizeof (*ccfg) - CT_PACKET_SIZE);
2038 ccfg->Command = VM_ContainerConfig;
2039 ccfg->CTCommand.command = CT_COMMIT_CONFIG;
2041 rval = aac_sync_fib(sc, ContainerCommand, 0, fib,
2042 sizeof (struct aac_cnt_config));
2043 if (rval == 0 && ccfg->Command == ST_OK &&
2044 ccfg->CTCommand.param[0] == CT_OK) {
2045 /* successful completion */
2048 /* auto commit aborted due to error(s) */
2052 /* auto commit aborted due to adapter indicating
2053 config. issues too dangerous to auto commit */
2061 aac_release_sync_fib(sc);
2062 mtx_unlock(&sc->aac_io_lock);
2067 * Send a synchronous command to the controller and wait for a result.
2068 * Indicate if the controller completed the command with an error status.
2071 aacraid_sync_command(struct aac_softc *sc, u_int32_t command,
2072 u_int32_t arg0, u_int32_t arg1, u_int32_t arg2, u_int32_t arg3,
2073 u_int32_t *sp, u_int32_t *r1)
2078 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2080 /* populate the mailbox */
2081 AAC_SET_MAILBOX(sc, command, arg0, arg1, arg2, arg3);
2083 /* ensure the sync command doorbell flag is cleared */
2084 if (!sc->msi_enabled)
2085 AAC_CLEAR_ISTATUS(sc, AAC_DB_SYNC_COMMAND);
2087 /* then set it to signal the adapter */
2088 AAC_QNOTIFY(sc, AAC_DB_SYNC_COMMAND);
2090 if ((command != AAC_MONKER_SYNCFIB) || (sp == NULL) || (*sp != 0)) {
2091 /* spin waiting for the command to complete */
2094 if (time_uptime > (then + AAC_SYNC_TIMEOUT)) {
2095 fwprintf(sc, HBA_FLAGS_DBG_ERROR_B, "timed out");
2098 } while (!(AAC_GET_ISTATUS(sc) & AAC_DB_SYNC_COMMAND));
2100 /* clear the completion flag */
2101 AAC_CLEAR_ISTATUS(sc, AAC_DB_SYNC_COMMAND);
2103 /* get the command status */
2104 status = AAC_GET_MAILBOX(sc, 0);
2108 /* return parameter */
2110 *r1 = AAC_GET_MAILBOX(sc, 1);
2112 if (status != AAC_SRB_STS_SUCCESS)
2119 aac_sync_fib(struct aac_softc *sc, u_int32_t command, u_int32_t xferstate,
2120 struct aac_fib *fib, u_int16_t datasize)
2122 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2123 mtx_assert(&sc->aac_io_lock, MA_OWNED);
2125 if (datasize > AAC_FIB_DATASIZE)
2129 * Set up the sync FIB
2131 fib->Header.XferState = AAC_FIBSTATE_HOSTOWNED |
2132 AAC_FIBSTATE_INITIALISED |
2134 fib->Header.XferState |= xferstate;
2135 fib->Header.Command = command;
2136 fib->Header.StructType = AAC_FIBTYPE_TFIB;
2137 fib->Header.Size = sizeof(struct aac_fib_header) + datasize;
2138 fib->Header.SenderSize = sizeof(struct aac_fib);
2139 fib->Header.SenderFibAddress = 0; /* Not needed */
2140 fib->Header.u.ReceiverFibAddress = sc->aac_common_busaddr +
2141 offsetof(struct aac_common, ac_sync_fib);
2144 * Give the FIB to the controller, wait for a response.
2146 if (aacraid_sync_command(sc, AAC_MONKER_SYNCFIB,
2147 fib->Header.u.ReceiverFibAddress, 0, 0, 0, NULL, NULL)) {
2148 fwprintf(sc, HBA_FLAGS_DBG_ERROR_B, "IO error");
2156 * Check for commands that have been outstanding for a suspiciously long time,
2157 * and complain about them.
2160 aac_timeout(struct aac_softc *sc)
2162 struct aac_command *cm;
2166 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2168 * Traverse the busy command list, bitch about late commands once
2172 deadline = time_uptime - AAC_CMD_TIMEOUT;
2173 TAILQ_FOREACH(cm, &sc->aac_busy, cm_link) {
2174 if (cm->cm_timestamp < deadline) {
2175 device_printf(sc->aac_dev,
2176 "COMMAND %p TIMEOUT AFTER %d SECONDS\n",
2177 cm, (int)(time_uptime-cm->cm_timestamp));
2178 AAC_PRINT_FIB(sc, cm->cm_fib);
2184 aac_reset_adapter(sc);
2185 aacraid_print_queues(sc);
2189 * Interface Function Vectors
2193 * Read the current firmware status word.
2196 aac_src_get_fwstatus(struct aac_softc *sc)
2198 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2200 return(AAC_MEM0_GETREG4(sc, AAC_SRC_OMR));
2204 * Notify the controller of a change in a given queue
2207 aac_src_qnotify(struct aac_softc *sc, int qbit)
2209 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2211 AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, qbit << AAC_SRC_IDR_SHIFT);
2215 * Get the interrupt reason bits
2218 aac_src_get_istatus(struct aac_softc *sc)
2222 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2224 if (sc->msi_enabled) {
2225 val = AAC_MEM0_GETREG4(sc, AAC_SRC_ODBR_MSI);
2226 if (val & AAC_MSI_SYNC_STATUS)
2227 val = AAC_DB_SYNC_COMMAND;
2231 val = AAC_MEM0_GETREG4(sc, AAC_SRC_ODBR_R) >> AAC_SRC_ODR_SHIFT;
2237 * Clear some interrupt reason bits
2240 aac_src_clear_istatus(struct aac_softc *sc, int mask)
2242 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2244 if (sc->msi_enabled) {
2245 if (mask == AAC_DB_SYNC_COMMAND)
2246 AAC_ACCESS_DEVREG(sc, AAC_CLEAR_SYNC_BIT);
2248 AAC_MEM0_SETREG4(sc, AAC_SRC_ODBR_C, mask << AAC_SRC_ODR_SHIFT);
2253 * Populate the mailbox and set the command word
2256 aac_src_set_mailbox(struct aac_softc *sc, u_int32_t command, u_int32_t arg0,
2257 u_int32_t arg1, u_int32_t arg2, u_int32_t arg3)
2259 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2261 AAC_MEM0_SETREG4(sc, AAC_SRC_MAILBOX, command);
2262 AAC_MEM0_SETREG4(sc, AAC_SRC_MAILBOX + 4, arg0);
2263 AAC_MEM0_SETREG4(sc, AAC_SRC_MAILBOX + 8, arg1);
2264 AAC_MEM0_SETREG4(sc, AAC_SRC_MAILBOX + 12, arg2);
2265 AAC_MEM0_SETREG4(sc, AAC_SRC_MAILBOX + 16, arg3);
2269 aac_srcv_set_mailbox(struct aac_softc *sc, u_int32_t command, u_int32_t arg0,
2270 u_int32_t arg1, u_int32_t arg2, u_int32_t arg3)
2272 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2274 AAC_MEM0_SETREG4(sc, AAC_SRCV_MAILBOX, command);
2275 AAC_MEM0_SETREG4(sc, AAC_SRCV_MAILBOX + 4, arg0);
2276 AAC_MEM0_SETREG4(sc, AAC_SRCV_MAILBOX + 8, arg1);
2277 AAC_MEM0_SETREG4(sc, AAC_SRCV_MAILBOX + 12, arg2);
2278 AAC_MEM0_SETREG4(sc, AAC_SRCV_MAILBOX + 16, arg3);
2282 * Fetch the immediate command status word
2285 aac_src_get_mailbox(struct aac_softc *sc, int mb)
2287 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2289 return(AAC_MEM0_GETREG4(sc, AAC_SRC_MAILBOX + (mb * 4)));
2293 aac_srcv_get_mailbox(struct aac_softc *sc, int mb)
2295 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2297 return(AAC_MEM0_GETREG4(sc, AAC_SRCV_MAILBOX + (mb * 4)));
2301 * Set/clear interrupt masks
2304 aac_src_access_devreg(struct aac_softc *sc, int mode)
2308 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2311 case AAC_ENABLE_INTERRUPT:
2312 AAC_MEM0_SETREG4(sc, AAC_SRC_OIMR,
2313 (sc->msi_enabled ? AAC_INT_ENABLE_TYPE1_MSIX :
2314 AAC_INT_ENABLE_TYPE1_INTX));
2317 case AAC_DISABLE_INTERRUPT:
2318 AAC_MEM0_SETREG4(sc, AAC_SRC_OIMR, AAC_INT_DISABLE_ALL);
2321 case AAC_ENABLE_MSIX:
2323 val = AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2325 AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, val);
2326 AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2328 val = PMC_ALL_INTERRUPT_BITS;
2329 AAC_MEM0_SETREG4(sc, AAC_SRC_IOAR, val);
2330 val = AAC_MEM0_GETREG4(sc, AAC_SRC_OIMR);
2331 AAC_MEM0_SETREG4(sc, AAC_SRC_OIMR,
2332 val & (~(PMC_GLOBAL_INT_BIT2 | PMC_GLOBAL_INT_BIT0)));
2335 case AAC_DISABLE_MSIX:
2337 val = AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2339 AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, val);
2340 AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2343 case AAC_CLEAR_AIF_BIT:
2345 val = AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2347 AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, val);
2348 AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2351 case AAC_CLEAR_SYNC_BIT:
2353 val = AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2355 AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, val);
2356 AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2359 case AAC_ENABLE_INTX:
2361 val = AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2363 AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, val);
2364 AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2366 val = PMC_ALL_INTERRUPT_BITS;
2367 AAC_MEM0_SETREG4(sc, AAC_SRC_IOAR, val);
2368 val = AAC_MEM0_GETREG4(sc, AAC_SRC_OIMR);
2369 AAC_MEM0_SETREG4(sc, AAC_SRC_OIMR,
2370 val & (~(PMC_GLOBAL_INT_BIT2)));
2379 * New comm. interface: Send command functions
2382 aac_src_send_command(struct aac_softc *sc, struct aac_command *cm)
2384 struct aac_fib_xporthdr *pFibX;
2385 u_int32_t fibsize, high_addr;
2388 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "send command (new comm. type1)");
2390 if (sc->msi_enabled && cm->cm_fib->Header.Command != AifRequest &&
2391 sc->aac_max_msix > 1) {
2392 u_int16_t vector_no, first_choice = 0xffff;
2394 vector_no = sc->aac_fibs_pushed_no % sc->aac_max_msix;
2397 if (vector_no == sc->aac_max_msix)
2399 if (sc->aac_rrq_outstanding[vector_no] <
2402 if (0xffff == first_choice)
2403 first_choice = vector_no;
2404 else if (vector_no == first_choice)
2407 if (vector_no == first_choice)
2409 sc->aac_rrq_outstanding[vector_no]++;
2410 if (sc->aac_fibs_pushed_no == 0xffffffff)
2411 sc->aac_fibs_pushed_no = 0;
2413 sc->aac_fibs_pushed_no++;
2415 cm->cm_fib->Header.Handle += (vector_no << 16);
2418 if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE2) {
2419 /* Calculate the amount to the fibsize bits */
2420 fibsize = (cm->cm_fib->Header.Size + 127) / 128 - 1;
2421 /* Fill new FIB header */
2422 address = cm->cm_fibphys;
2423 high_addr = (u_int32_t)(address >> 32);
2424 if (high_addr == 0L) {
2425 cm->cm_fib->Header.StructType = AAC_FIBTYPE_TFIB2;
2426 cm->cm_fib->Header.u.TimeStamp = 0L;
2428 cm->cm_fib->Header.StructType = AAC_FIBTYPE_TFIB2_64;
2429 cm->cm_fib->Header.u.SenderFibAddressHigh = high_addr;
2431 cm->cm_fib->Header.SenderFibAddress = (u_int32_t)address;
2433 /* Calculate the amount to the fibsize bits */
2434 fibsize = (sizeof(struct aac_fib_xporthdr) +
2435 cm->cm_fib->Header.Size + 127) / 128 - 1;
2436 /* Fill XPORT header */
2437 pFibX = (struct aac_fib_xporthdr *)
2438 ((unsigned char *)cm->cm_fib - sizeof(struct aac_fib_xporthdr));
2439 pFibX->Handle = cm->cm_fib->Header.Handle;
2440 pFibX->HostAddress = cm->cm_fibphys;
2441 pFibX->Size = cm->cm_fib->Header.Size;
2442 address = cm->cm_fibphys - sizeof(struct aac_fib_xporthdr);
2443 high_addr = (u_int32_t)(address >> 32);
2448 aac_enqueue_busy(cm);
2450 AAC_MEM0_SETREG4(sc, AAC_SRC_IQUE64_H, high_addr);
2451 AAC_MEM0_SETREG4(sc, AAC_SRC_IQUE64_L, (u_int32_t)address + fibsize);
2453 AAC_MEM0_SETREG4(sc, AAC_SRC_IQUE32, (u_int32_t)address + fibsize);
2459 * New comm. interface: get, set outbound queue index
2462 aac_src_get_outb_queue(struct aac_softc *sc)
2464 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2470 aac_src_set_outb_queue(struct aac_softc *sc, int index)
2472 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2476 * Debugging and Diagnostics
2480 * Print some information about the controller.
2483 aac_describe_controller(struct aac_softc *sc)
2485 struct aac_fib *fib;
2486 struct aac_adapter_info *info;
2487 char *adapter_type = "Adaptec RAID controller";
2489 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2491 mtx_lock(&sc->aac_io_lock);
2492 aac_alloc_sync_fib(sc, &fib);
2494 if (sc->supported_options & AAC_SUPPORTED_SUPPLEMENT_ADAPTER_INFO) {
2496 if (aac_sync_fib(sc, RequestSupplementAdapterInfo, 0, fib, 1))
2497 device_printf(sc->aac_dev, "RequestSupplementAdapterInfo failed\n");
2499 struct aac_supplement_adapter_info *supp_info;
2501 supp_info = ((struct aac_supplement_adapter_info *)&fib->data[0]);
2502 adapter_type = (char *)supp_info->AdapterTypeText;
2503 sc->aac_feature_bits = supp_info->FeatureBits;
2504 sc->aac_support_opt2 = supp_info->SupportedOptions2;
2507 device_printf(sc->aac_dev, "%s, aacraid driver %d.%d.%d-%d\n",
2509 AAC_DRIVER_MAJOR_VERSION, AAC_DRIVER_MINOR_VERSION,
2510 AAC_DRIVER_BUGFIX_LEVEL, AAC_DRIVER_BUILD);
2513 if (aac_sync_fib(sc, RequestAdapterInfo, 0, fib, 1)) {
2514 device_printf(sc->aac_dev, "RequestAdapterInfo failed\n");
2515 aac_release_sync_fib(sc);
2516 mtx_unlock(&sc->aac_io_lock);
2520 /* save the kernel revision structure for later use */
2521 info = (struct aac_adapter_info *)&fib->data[0];
2522 sc->aac_revision = info->KernelRevision;
2525 device_printf(sc->aac_dev, "%s %dMHz, %dMB memory "
2526 "(%dMB cache, %dMB execution), %s\n",
2527 aac_describe_code(aac_cpu_variant, info->CpuVariant),
2528 info->ClockSpeed, info->TotalMem / (1024 * 1024),
2529 info->BufferMem / (1024 * 1024),
2530 info->ExecutionMem / (1024 * 1024),
2531 aac_describe_code(aac_battery_platform,
2532 info->batteryPlatform));
2534 device_printf(sc->aac_dev,
2535 "Kernel %d.%d-%d, Build %d, S/N %6X\n",
2536 info->KernelRevision.external.comp.major,
2537 info->KernelRevision.external.comp.minor,
2538 info->KernelRevision.external.comp.dash,
2539 info->KernelRevision.buildNumber,
2540 (u_int32_t)(info->SerialNumber & 0xffffff));
2542 device_printf(sc->aac_dev, "Supported Options=%b\n",
2543 sc->supported_options,
2566 aac_release_sync_fib(sc);
2567 mtx_unlock(&sc->aac_io_lock);
2571 * Look up a text description of a numeric error code and return a pointer to
2575 aac_describe_code(struct aac_code_lookup *table, u_int32_t code)
2579 for (i = 0; table[i].string != NULL; i++)
2580 if (table[i].code == code)
2581 return(table[i].string);
2582 return(table[i + 1].string);
2586 * Management Interface
2590 aac_open(struct cdev *dev, int flags, int fmt, struct thread *td)
2592 struct aac_softc *sc;
2595 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2596 #if __FreeBSD_version >= 702000
2597 device_busy(sc->aac_dev);
2598 devfs_set_cdevpriv(sc, aac_cdevpriv_dtor);
2604 aac_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td)
2606 union aac_statrequest *as;
2607 struct aac_softc *sc;
2610 as = (union aac_statrequest *)arg;
2612 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2616 switch (as->as_item) {
2620 bcopy(&sc->aac_qstat[as->as_item], &as->as_qstat,
2621 sizeof(struct aac_qstat));
2629 case FSACTL_SENDFIB:
2630 case FSACTL_SEND_LARGE_FIB:
2631 arg = *(caddr_t*)arg;
2632 case FSACTL_LNX_SENDFIB:
2633 case FSACTL_LNX_SEND_LARGE_FIB:
2634 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_SENDFIB");
2635 error = aac_ioctl_sendfib(sc, arg);
2637 case FSACTL_SEND_RAW_SRB:
2638 arg = *(caddr_t*)arg;
2639 case FSACTL_LNX_SEND_RAW_SRB:
2640 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_SEND_RAW_SRB");
2641 error = aac_ioctl_send_raw_srb(sc, arg);
2643 case FSACTL_AIF_THREAD:
2644 case FSACTL_LNX_AIF_THREAD:
2645 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_AIF_THREAD");
2648 case FSACTL_OPEN_GET_ADAPTER_FIB:
2649 arg = *(caddr_t*)arg;
2650 case FSACTL_LNX_OPEN_GET_ADAPTER_FIB:
2651 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_OPEN_GET_ADAPTER_FIB");
2652 error = aac_open_aif(sc, arg);
2654 case FSACTL_GET_NEXT_ADAPTER_FIB:
2655 arg = *(caddr_t*)arg;
2656 case FSACTL_LNX_GET_NEXT_ADAPTER_FIB:
2657 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_GET_NEXT_ADAPTER_FIB");
2658 error = aac_getnext_aif(sc, arg);
2660 case FSACTL_CLOSE_GET_ADAPTER_FIB:
2661 arg = *(caddr_t*)arg;
2662 case FSACTL_LNX_CLOSE_GET_ADAPTER_FIB:
2663 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_CLOSE_GET_ADAPTER_FIB");
2664 error = aac_close_aif(sc, arg);
2666 case FSACTL_MINIPORT_REV_CHECK:
2667 arg = *(caddr_t*)arg;
2668 case FSACTL_LNX_MINIPORT_REV_CHECK:
2669 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_MINIPORT_REV_CHECK");
2670 error = aac_rev_check(sc, arg);
2672 case FSACTL_QUERY_DISK:
2673 arg = *(caddr_t*)arg;
2674 case FSACTL_LNX_QUERY_DISK:
2675 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_QUERY_DISK");
2676 error = aac_query_disk(sc, arg);
2678 case FSACTL_DELETE_DISK:
2679 case FSACTL_LNX_DELETE_DISK:
2681 * We don't trust the underland to tell us when to delete a
2682 * container, rather we rely on an AIF coming from the
2687 case FSACTL_GET_PCI_INFO:
2688 arg = *(caddr_t*)arg;
2689 case FSACTL_LNX_GET_PCI_INFO:
2690 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_GET_PCI_INFO");
2691 error = aac_get_pci_info(sc, arg);
2693 case FSACTL_GET_FEATURES:
2694 arg = *(caddr_t*)arg;
2695 case FSACTL_LNX_GET_FEATURES:
2696 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_GET_FEATURES");
2697 error = aac_supported_features(sc, arg);
2700 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "unsupported cmd 0x%lx\n", cmd);
2708 aac_poll(struct cdev *dev, int poll_events, struct thread *td)
2710 struct aac_softc *sc;
2711 struct aac_fib_context *ctx;
2717 mtx_lock(&sc->aac_io_lock);
2718 if ((poll_events & (POLLRDNORM | POLLIN)) != 0) {
2719 for (ctx = sc->fibctx; ctx; ctx = ctx->next) {
2720 if (ctx->ctx_idx != sc->aifq_idx || ctx->ctx_wrap) {
2721 revents |= poll_events & (POLLIN | POLLRDNORM);
2726 mtx_unlock(&sc->aac_io_lock);
2729 if (poll_events & (POLLIN | POLLRDNORM))
2730 selrecord(td, &sc->rcv_select);
2737 aac_ioctl_event(struct aac_softc *sc, struct aac_event *event, void *arg)
2740 switch (event->ev_type) {
2741 case AAC_EVENT_CMFREE:
2742 mtx_assert(&sc->aac_io_lock, MA_OWNED);
2743 if (aacraid_alloc_command(sc, (struct aac_command **)arg)) {
2744 aacraid_add_event(sc, event);
2747 free(event, M_AACRAIDBUF);
2756 * Send a FIB supplied from userspace
2759 aac_ioctl_sendfib(struct aac_softc *sc, caddr_t ufib)
2761 struct aac_command *cm;
2764 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2771 mtx_lock(&sc->aac_io_lock);
2772 if (aacraid_alloc_command(sc, &cm)) {
2773 struct aac_event *event;
2775 event = malloc(sizeof(struct aac_event), M_AACRAIDBUF,
2777 if (event == NULL) {
2779 mtx_unlock(&sc->aac_io_lock);
2782 event->ev_type = AAC_EVENT_CMFREE;
2783 event->ev_callback = aac_ioctl_event;
2784 event->ev_arg = &cm;
2785 aacraid_add_event(sc, event);
2786 msleep(cm, &sc->aac_io_lock, 0, "aacraid_ctlsfib", 0);
2788 mtx_unlock(&sc->aac_io_lock);
2791 * Fetch the FIB header, then re-copy to get data as well.
2793 if ((error = copyin(ufib, cm->cm_fib,
2794 sizeof(struct aac_fib_header))) != 0)
2796 size = cm->cm_fib->Header.Size + sizeof(struct aac_fib_header);
2797 if (size > sc->aac_max_fib_size) {
2798 device_printf(sc->aac_dev, "incoming FIB oversized (%d > %d)\n",
2799 size, sc->aac_max_fib_size);
2800 size = sc->aac_max_fib_size;
2802 if ((error = copyin(ufib, cm->cm_fib, size)) != 0)
2804 cm->cm_fib->Header.Size = size;
2805 cm->cm_timestamp = time_uptime;
2809 * Pass the FIB to the controller, wait for it to complete.
2811 mtx_lock(&sc->aac_io_lock);
2812 error = aacraid_wait_command(cm);
2813 mtx_unlock(&sc->aac_io_lock);
2815 device_printf(sc->aac_dev,
2816 "aacraid_wait_command return %d\n", error);
2821 * Copy the FIB and data back out to the caller.
2823 size = cm->cm_fib->Header.Size;
2824 if (size > sc->aac_max_fib_size) {
2825 device_printf(sc->aac_dev, "outbound FIB oversized (%d > %d)\n",
2826 size, sc->aac_max_fib_size);
2827 size = sc->aac_max_fib_size;
2829 error = copyout(cm->cm_fib, ufib, size);
2833 mtx_lock(&sc->aac_io_lock);
2834 aacraid_release_command(cm);
2835 mtx_unlock(&sc->aac_io_lock);
2841 * Send a passthrough FIB supplied from userspace
2844 aac_ioctl_send_raw_srb(struct aac_softc *sc, caddr_t arg)
2846 struct aac_command *cm;
2847 struct aac_fib *fib;
2848 struct aac_srb *srbcmd;
2849 struct aac_srb *user_srb = (struct aac_srb *)arg;
2851 int error, transfer_data = 0;
2852 bus_dmamap_t orig_map = 0;
2853 u_int32_t fibsize = 0;
2854 u_int64_t srb_sg_address;
2855 u_int32_t srb_sg_bytecount;
2857 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2861 mtx_lock(&sc->aac_io_lock);
2862 if (aacraid_alloc_command(sc, &cm)) {
2863 struct aac_event *event;
2865 event = malloc(sizeof(struct aac_event), M_AACRAIDBUF,
2867 if (event == NULL) {
2869 mtx_unlock(&sc->aac_io_lock);
2872 event->ev_type = AAC_EVENT_CMFREE;
2873 event->ev_callback = aac_ioctl_event;
2874 event->ev_arg = &cm;
2875 aacraid_add_event(sc, event);
2876 msleep(cm, &sc->aac_io_lock, 0, "aacraid_ctlsraw", 0);
2878 mtx_unlock(&sc->aac_io_lock);
2881 /* save original dma map */
2882 orig_map = cm->cm_datamap;
2885 srbcmd = (struct aac_srb *)fib->data;
2886 if ((error = copyin((void *)&user_srb->data_len, &fibsize,
2887 sizeof (u_int32_t))) != 0)
2889 if (fibsize > (sc->aac_max_fib_size-sizeof(struct aac_fib_header))) {
2893 if ((error = copyin((void *)user_srb, srbcmd, fibsize)) != 0)
2896 srbcmd->function = 0; /* SRBF_ExecuteScsi */
2897 srbcmd->retry_limit = 0; /* obsolete */
2899 /* only one sg element from userspace supported */
2900 if (srbcmd->sg_map.SgCount > 1) {
2905 if (fibsize == (sizeof(struct aac_srb) +
2906 srbcmd->sg_map.SgCount * sizeof(struct aac_sg_entry))) {
2907 struct aac_sg_entry *sgp = srbcmd->sg_map.SgEntry;
2908 struct aac_sg_entry sg;
2910 if ((error = copyin(sgp, &sg, sizeof(sg))) != 0)
2913 srb_sg_bytecount = sg.SgByteCount;
2914 srb_sg_address = (u_int64_t)sg.SgAddress;
2915 } else if (fibsize == (sizeof(struct aac_srb) +
2916 srbcmd->sg_map.SgCount * sizeof(struct aac_sg_entry64))) {
2918 struct aac_sg_entry64 *sgp =
2919 (struct aac_sg_entry64 *)srbcmd->sg_map.SgEntry;
2920 struct aac_sg_entry64 sg;
2922 if ((error = copyin(sgp, &sg, sizeof(sg))) != 0)
2925 srb_sg_bytecount = sg.SgByteCount;
2926 srb_sg_address = sg.SgAddress;
2935 user_reply = (char *)arg + fibsize;
2936 srbcmd->data_len = srb_sg_bytecount;
2937 if (srbcmd->sg_map.SgCount == 1)
2940 if (transfer_data) {
2942 * Create DMA tag for the passthr. data buffer and allocate it.
2944 if (bus_dma_tag_create(sc->aac_parent_dmat, /* parent */
2945 1, 0, /* algnmnt, boundary */
2946 (sc->flags & AAC_FLAGS_SG_64BIT) ?
2947 BUS_SPACE_MAXADDR_32BIT :
2948 0x7fffffff, /* lowaddr */
2949 BUS_SPACE_MAXADDR, /* highaddr */
2950 NULL, NULL, /* filter, filterarg */
2951 srb_sg_bytecount, /* size */
2952 sc->aac_sg_tablesize, /* nsegments */
2953 srb_sg_bytecount, /* maxsegsize */
2955 NULL, NULL, /* No locking needed */
2956 &cm->cm_passthr_dmat)) {
2960 if (bus_dmamem_alloc(cm->cm_passthr_dmat, (void **)&cm->cm_data,
2961 BUS_DMA_NOWAIT, &cm->cm_datamap)) {
2965 /* fill some cm variables */
2966 cm->cm_datalen = srb_sg_bytecount;
2967 if (srbcmd->flags & AAC_SRB_FLAGS_DATA_IN)
2968 cm->cm_flags |= AAC_CMD_DATAIN;
2969 if (srbcmd->flags & AAC_SRB_FLAGS_DATA_OUT)
2970 cm->cm_flags |= AAC_CMD_DATAOUT;
2972 if (srbcmd->flags & AAC_SRB_FLAGS_DATA_OUT) {
2973 if ((error = copyin((void *)(uintptr_t)srb_sg_address,
2974 cm->cm_data, cm->cm_datalen)) != 0)
2976 /* sync required for bus_dmamem_alloc() alloc. mem.? */
2977 bus_dmamap_sync(cm->cm_passthr_dmat, cm->cm_datamap,
2978 BUS_DMASYNC_PREWRITE);
2983 fib->Header.Size = sizeof(struct aac_fib_header) +
2984 sizeof(struct aac_srb);
2985 fib->Header.XferState =
2986 AAC_FIBSTATE_HOSTOWNED |
2987 AAC_FIBSTATE_INITIALISED |
2988 AAC_FIBSTATE_EMPTY |
2989 AAC_FIBSTATE_FROMHOST |
2990 AAC_FIBSTATE_REXPECTED |
2994 fib->Header.Command = (sc->flags & AAC_FLAGS_SG_64BIT) ?
2995 ScsiPortCommandU64 : ScsiPortCommand;
2996 cm->cm_sgtable = (struct aac_sg_table *)&srbcmd->sg_map;
2999 if (transfer_data) {
3000 bus_dmamap_load(cm->cm_passthr_dmat,
3001 cm->cm_datamap, cm->cm_data,
3003 aacraid_map_command_sg, cm, 0);
3005 aacraid_map_command_sg(cm, NULL, 0, 0);
3008 /* wait for completion */
3009 mtx_lock(&sc->aac_io_lock);
3010 while (!(cm->cm_flags & AAC_CMD_COMPLETED))
3011 msleep(cm, &sc->aac_io_lock, 0, "aacraid_ctlsrw2", 0);
3012 mtx_unlock(&sc->aac_io_lock);
3015 if (transfer_data && (srbcmd->flags & AAC_SRB_FLAGS_DATA_IN)) {
3016 if ((error = copyout(cm->cm_data,
3017 (void *)(uintptr_t)srb_sg_address,
3018 cm->cm_datalen)) != 0)
3020 /* sync required for bus_dmamem_alloc() allocated mem.? */
3021 bus_dmamap_sync(cm->cm_passthr_dmat, cm->cm_datamap,
3022 BUS_DMASYNC_POSTREAD);
3026 error = copyout(fib->data, user_reply, sizeof(struct aac_srb_response));
3029 if (cm && cm->cm_data) {
3031 bus_dmamap_unload(cm->cm_passthr_dmat, cm->cm_datamap);
3032 bus_dmamem_free(cm->cm_passthr_dmat, cm->cm_data, cm->cm_datamap);
3033 cm->cm_datamap = orig_map;
3035 if (cm && cm->cm_passthr_dmat)
3036 bus_dma_tag_destroy(cm->cm_passthr_dmat);
3038 mtx_lock(&sc->aac_io_lock);
3039 aacraid_release_command(cm);
3040 mtx_unlock(&sc->aac_io_lock);
3046 * Request an AIF from the controller (new comm. type1)
3049 aac_request_aif(struct aac_softc *sc)
3051 struct aac_command *cm;
3052 struct aac_fib *fib;
3054 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3056 if (aacraid_alloc_command(sc, &cm)) {
3057 sc->aif_pending = 1;
3060 sc->aif_pending = 0;
3064 fib->Header.Size = sizeof(struct aac_fib);
3065 fib->Header.XferState =
3066 AAC_FIBSTATE_HOSTOWNED |
3067 AAC_FIBSTATE_INITIALISED |
3068 AAC_FIBSTATE_EMPTY |
3069 AAC_FIBSTATE_FROMHOST |
3070 AAC_FIBSTATE_REXPECTED |
3073 /* set AIF marker */
3074 fib->Header.Handle = 0x00800000;
3075 fib->Header.Command = AifRequest;
3076 ((struct aac_aif_command *)fib->data)->command = AifReqEvent;
3078 aacraid_map_command_sg(cm, NULL, 0, 0);
3082 #if __FreeBSD_version >= 702000
3084 * cdevpriv interface private destructor.
3087 aac_cdevpriv_dtor(void *arg)
3089 struct aac_softc *sc;
3092 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3094 device_unbusy(sc->aac_dev);
3099 aac_close(struct cdev *dev, int flags, int fmt, struct thread *td)
3101 struct aac_softc *sc;
3104 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3110 * Handle an AIF sent to us by the controller; queue it for later reference.
3111 * If the queue fills up, then drop the older entries.
3114 aac_handle_aif(struct aac_softc *sc, struct aac_fib *fib)
3116 struct aac_aif_command *aif;
3117 struct aac_container *co, *co_next;
3118 struct aac_fib_context *ctx;
3119 struct aac_fib *sync_fib;
3120 struct aac_mntinforesp mir;
3121 int next, current, found;
3122 int count = 0, changed = 0, i = 0;
3123 u_int32_t channel, uid;
3125 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3127 aif = (struct aac_aif_command*)&fib->data[0];
3128 aacraid_print_aif(sc, aif);
3130 /* Is it an event that we should care about? */
3131 switch (aif->command) {
3132 case AifCmdEventNotify:
3133 switch (aif->data.EN.type) {
3134 case AifEnAddContainer:
3135 case AifEnDeleteContainer:
3137 * A container was added or deleted, but the message
3138 * doesn't tell us anything else! Re-enumerate the
3139 * containers and sort things out.
3141 aac_alloc_sync_fib(sc, &sync_fib);
3144 * Ask the controller for its containers one at
3146 * XXX What if the controller's list changes
3147 * midway through this enumaration?
3148 * XXX This should be done async.
3150 if (aac_get_container_info(sc, sync_fib, i,
3154 count = mir.MntRespCount;
3156 * Check the container against our list.
3157 * co->co_found was already set to 0 in a
3160 if ((mir.Status == ST_OK) &&
3161 (mir.MntTable[0].VolType != CT_NONE)) {
3164 &sc->aac_container_tqh,
3166 if (co->co_mntobj.ObjectId ==
3167 mir.MntTable[0].ObjectId) {
3174 * If the container matched, continue
3183 * This is a new container. Do all the
3184 * appropriate things to set it up.
3186 aac_add_container(sc, &mir, 1, uid);
3190 } while ((i < count) && (i < AAC_MAX_CONTAINERS));
3191 aac_release_sync_fib(sc);
3194 * Go through our list of containers and see which ones
3195 * were not marked 'found'. Since the controller didn't
3196 * list them they must have been deleted. Do the
3197 * appropriate steps to destroy the device. Also reset
3198 * the co->co_found field.
3200 co = TAILQ_FIRST(&sc->aac_container_tqh);
3201 while (co != NULL) {
3202 if (co->co_found == 0) {
3203 co_next = TAILQ_NEXT(co, co_link);
3204 TAILQ_REMOVE(&sc->aac_container_tqh, co,
3206 free(co, M_AACRAIDBUF);
3211 co = TAILQ_NEXT(co, co_link);
3215 /* Attach the newly created containers */
3217 if (sc->cam_rescan_cb != NULL)
3218 sc->cam_rescan_cb(sc, 0,
3219 AAC_CAM_TARGET_WILDCARD);
3224 case AifEnEnclosureManagement:
3225 switch (aif->data.EN.data.EEE.eventType) {
3226 case AIF_EM_DRIVE_INSERTION:
3227 case AIF_EM_DRIVE_REMOVAL:
3228 channel = aif->data.EN.data.EEE.unitID;
3229 if (sc->cam_rescan_cb != NULL)
3230 sc->cam_rescan_cb(sc,
3231 ((channel>>24) & 0xF) + 1,
3232 (channel & 0xFFFF));
3238 case AifEnDeleteJBOD:
3239 case AifRawDeviceRemove:
3240 channel = aif->data.EN.data.ECE.container;
3241 if (sc->cam_rescan_cb != NULL)
3242 sc->cam_rescan_cb(sc, ((channel>>24) & 0xF) + 1,
3243 AAC_CAM_TARGET_WILDCARD);
3254 /* Copy the AIF data to the AIF queue for ioctl retrieval */
3255 current = sc->aifq_idx;
3256 next = (current + 1) % AAC_AIFQ_LENGTH;
3258 sc->aifq_filled = 1;
3259 bcopy(fib, &sc->aac_aifq[current], sizeof(struct aac_fib));
3260 /* modify AIF contexts */
3261 if (sc->aifq_filled) {
3262 for (ctx = sc->fibctx; ctx; ctx = ctx->next) {
3263 if (next == ctx->ctx_idx)
3265 else if (current == ctx->ctx_idx && ctx->ctx_wrap)
3266 ctx->ctx_idx = next;
3269 sc->aifq_idx = next;
3270 /* On the off chance that someone is sleeping for an aif... */
3271 if (sc->aac_state & AAC_STATE_AIF_SLEEPER)
3272 wakeup(sc->aac_aifq);
3273 /* Wakeup any poll()ers */
3274 selwakeuppri(&sc->rcv_select, PRIBIO);
3280 * Return the Revision of the driver to userspace and check to see if the
3281 * userspace app is possibly compatible. This is extremely bogus since
3282 * our driver doesn't follow Adaptec's versioning system. Cheat by just
3283 * returning what the card reported.
3286 aac_rev_check(struct aac_softc *sc, caddr_t udata)
3288 struct aac_rev_check rev_check;
3289 struct aac_rev_check_resp rev_check_resp;
3292 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3295 * Copyin the revision struct from userspace
3297 if ((error = copyin(udata, (caddr_t)&rev_check,
3298 sizeof(struct aac_rev_check))) != 0) {
3302 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "Userland revision= %d\n",
3303 rev_check.callingRevision.buildNumber);
3306 * Doctor up the response struct.
3308 rev_check_resp.possiblyCompatible = 1;
3309 rev_check_resp.adapterSWRevision.external.comp.major =
3310 AAC_DRIVER_MAJOR_VERSION;
3311 rev_check_resp.adapterSWRevision.external.comp.minor =
3312 AAC_DRIVER_MINOR_VERSION;
3313 rev_check_resp.adapterSWRevision.external.comp.type =
3315 rev_check_resp.adapterSWRevision.external.comp.dash =
3316 AAC_DRIVER_BUGFIX_LEVEL;
3317 rev_check_resp.adapterSWRevision.buildNumber =
3320 return(copyout((caddr_t)&rev_check_resp, udata,
3321 sizeof(struct aac_rev_check_resp)));
3325 * Pass the fib context to the caller
3328 aac_open_aif(struct aac_softc *sc, caddr_t arg)
3330 struct aac_fib_context *fibctx, *ctx;
3333 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3335 fibctx = malloc(sizeof(struct aac_fib_context), M_AACRAIDBUF, M_NOWAIT|M_ZERO);
3339 mtx_lock(&sc->aac_io_lock);
3340 /* all elements are already 0, add to queue */
3341 if (sc->fibctx == NULL)
3342 sc->fibctx = fibctx;
3344 for (ctx = sc->fibctx; ctx->next; ctx = ctx->next)
3350 /* evaluate unique value */
3351 fibctx->unique = (*(u_int32_t *)&fibctx & 0xffffffff);
3353 while (ctx != fibctx) {
3354 if (ctx->unique == fibctx->unique) {
3362 error = copyout(&fibctx->unique, (void *)arg, sizeof(u_int32_t));
3363 mtx_unlock(&sc->aac_io_lock);
3365 aac_close_aif(sc, (caddr_t)ctx);
3370 * Close the caller's fib context
3373 aac_close_aif(struct aac_softc *sc, caddr_t arg)
3375 struct aac_fib_context *ctx;
3377 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3379 mtx_lock(&sc->aac_io_lock);
3380 for (ctx = sc->fibctx; ctx; ctx = ctx->next) {
3381 if (ctx->unique == *(uint32_t *)&arg) {
3382 if (ctx == sc->fibctx)
3385 ctx->prev->next = ctx->next;
3387 ctx->next->prev = ctx->prev;
3393 free(ctx, M_AACRAIDBUF);
3395 mtx_unlock(&sc->aac_io_lock);
3400 * Pass the caller the next AIF in their queue
3403 aac_getnext_aif(struct aac_softc *sc, caddr_t arg)
3405 struct get_adapter_fib_ioctl agf;
3406 struct aac_fib_context *ctx;
3409 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3411 mtx_lock(&sc->aac_io_lock);
3412 #ifdef COMPAT_FREEBSD32
3413 if (SV_CURPROC_FLAG(SV_ILP32)) {
3414 struct get_adapter_fib_ioctl32 agf32;
3415 error = copyin(arg, &agf32, sizeof(agf32));
3417 agf.AdapterFibContext = agf32.AdapterFibContext;
3418 agf.Wait = agf32.Wait;
3419 agf.AifFib = (caddr_t)(uintptr_t)agf32.AifFib;
3423 error = copyin(arg, &agf, sizeof(agf));
3425 for (ctx = sc->fibctx; ctx; ctx = ctx->next) {
3426 if (agf.AdapterFibContext == ctx->unique)
3430 mtx_unlock(&sc->aac_io_lock);
3434 error = aac_return_aif(sc, ctx, agf.AifFib);
3435 if (error == EAGAIN && agf.Wait) {
3436 fwprintf(sc, HBA_FLAGS_DBG_AIF_B, "aac_getnext_aif(): waiting for AIF");
3437 sc->aac_state |= AAC_STATE_AIF_SLEEPER;
3438 while (error == EAGAIN) {
3439 mtx_unlock(&sc->aac_io_lock);
3440 error = tsleep(sc->aac_aifq, PRIBIO |
3441 PCATCH, "aacaif", 0);
3442 mtx_lock(&sc->aac_io_lock);
3444 error = aac_return_aif(sc, ctx, agf.AifFib);
3446 sc->aac_state &= ~AAC_STATE_AIF_SLEEPER;
3449 mtx_unlock(&sc->aac_io_lock);
3454 * Hand the next AIF off the top of the queue out to userspace.
3457 aac_return_aif(struct aac_softc *sc, struct aac_fib_context *ctx, caddr_t uptr)
3461 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3463 current = ctx->ctx_idx;
3464 if (current == sc->aifq_idx && !ctx->ctx_wrap) {
3469 copyout(&sc->aac_aifq[current], (void *)uptr, sizeof(struct aac_fib));
3471 device_printf(sc->aac_dev,
3472 "aac_return_aif: copyout returned %d\n", error);
3475 ctx->ctx_idx = (current + 1) % AAC_AIFQ_LENGTH;
3481 aac_get_pci_info(struct aac_softc *sc, caddr_t uptr)
3483 struct aac_pci_info {
3489 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3491 pciinf.bus = pci_get_bus(sc->aac_dev);
3492 pciinf.slot = pci_get_slot(sc->aac_dev);
3494 error = copyout((caddr_t)&pciinf, uptr,
3495 sizeof(struct aac_pci_info));
3501 aac_supported_features(struct aac_softc *sc, caddr_t uptr)
3503 struct aac_features f;
3506 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3508 if ((error = copyin(uptr, &f, sizeof (f))) != 0)
3512 * When the management driver receives FSACTL_GET_FEATURES ioctl with
3513 * ALL zero in the featuresState, the driver will return the current
3514 * state of all the supported features, the data field will not be
3516 * When the management driver receives FSACTL_GET_FEATURES ioctl with
3517 * a specific bit set in the featuresState, the driver will return the
3518 * current state of this specific feature and whatever data that are
3519 * associated with the feature in the data field or perform whatever
3520 * action needed indicates in the data field.
3522 if (f.feat.fValue == 0) {
3523 f.feat.fBits.largeLBA =
3524 (sc->flags & AAC_FLAGS_LBA_64BIT) ? 1 : 0;
3525 f.feat.fBits.JBODSupport = 1;
3526 /* TODO: In the future, add other features state here as well */
3528 if (f.feat.fBits.largeLBA)
3529 f.feat.fBits.largeLBA =
3530 (sc->flags & AAC_FLAGS_LBA_64BIT) ? 1 : 0;
3531 /* TODO: Add other features state and data in the future */
3534 error = copyout(&f, uptr, sizeof (f));
3539 * Give the userland some information about the container. The AAC arch
3540 * expects the driver to be a SCSI passthrough type driver, so it expects
3541 * the containers to have b:t:l numbers. Fake it.
3544 aac_query_disk(struct aac_softc *sc, caddr_t uptr)
3546 struct aac_query_disk query_disk;
3547 struct aac_container *co;
3550 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3552 mtx_lock(&sc->aac_io_lock);
3553 error = copyin(uptr, (caddr_t)&query_disk,
3554 sizeof(struct aac_query_disk));
3556 mtx_unlock(&sc->aac_io_lock);
3560 id = query_disk.ContainerNumber;
3562 mtx_unlock(&sc->aac_io_lock);
3566 TAILQ_FOREACH(co, &sc->aac_container_tqh, co_link) {
3567 if (co->co_mntobj.ObjectId == id)
3572 query_disk.Valid = 0;
3573 query_disk.Locked = 0;
3574 query_disk.Deleted = 1; /* XXX is this right? */
3576 query_disk.Valid = 1;
3577 query_disk.Locked = 1;
3578 query_disk.Deleted = 0;
3579 query_disk.Bus = device_get_unit(sc->aac_dev);
3580 query_disk.Target = 0;
3582 query_disk.UnMapped = 0;
3585 error = copyout((caddr_t)&query_disk, uptr,
3586 sizeof(struct aac_query_disk));
3588 mtx_unlock(&sc->aac_io_lock);
3593 aac_container_bus(struct aac_softc *sc)
3595 struct aac_sim *sim;
3598 sim =(struct aac_sim *)malloc(sizeof(struct aac_sim),
3599 M_AACRAIDBUF, M_NOWAIT | M_ZERO);
3601 device_printf(sc->aac_dev,
3602 "No memory to add container bus\n");
3603 panic("Out of memory?!");
3605 child = device_add_child(sc->aac_dev, "aacraidp", -1);
3606 if (child == NULL) {
3607 device_printf(sc->aac_dev,
3608 "device_add_child failed for container bus\n");
3609 free(sim, M_AACRAIDBUF);
3610 panic("Out of memory?!");
3613 sim->TargetsPerBus = AAC_MAX_CONTAINERS;
3615 sim->BusType = CONTAINER_BUS;
3616 sim->InitiatorBusId = -1;
3618 sim->sim_dev = child;
3619 sim->aac_cam = NULL;
3621 device_set_ivars(child, sim);
3622 device_set_desc(child, "Container Bus");
3623 TAILQ_INSERT_TAIL(&sc->aac_sim_tqh, sim, sim_link);
3625 device_set_desc(child, aac_describe_code(aac_container_types,
3626 mir->MntTable[0].VolType));
3628 bus_generic_attach(sc->aac_dev);
3632 aac_get_bus_info(struct aac_softc *sc)
3634 struct aac_fib *fib;
3635 struct aac_ctcfg *c_cmd;
3636 struct aac_ctcfg_resp *c_resp;
3637 struct aac_vmioctl *vmi;
3638 struct aac_vmi_businf_resp *vmi_resp;
3639 struct aac_getbusinf businfo;
3640 struct aac_sim *caminf;
3644 mtx_lock(&sc->aac_io_lock);
3645 aac_alloc_sync_fib(sc, &fib);
3646 c_cmd = (struct aac_ctcfg *)&fib->data[0];
3647 bzero(c_cmd, sizeof(struct aac_ctcfg));
3649 c_cmd->Command = VM_ContainerConfig;
3650 c_cmd->cmd = CT_GET_SCSI_METHOD;
3653 error = aac_sync_fib(sc, ContainerCommand, 0, fib,
3654 sizeof(struct aac_ctcfg));
3656 device_printf(sc->aac_dev, "Error %d sending "
3657 "VM_ContainerConfig command\n", error);
3658 aac_release_sync_fib(sc);
3659 mtx_unlock(&sc->aac_io_lock);
3663 c_resp = (struct aac_ctcfg_resp *)&fib->data[0];
3664 if (c_resp->Status != ST_OK) {
3665 device_printf(sc->aac_dev, "VM_ContainerConfig returned 0x%x\n",
3667 aac_release_sync_fib(sc);
3668 mtx_unlock(&sc->aac_io_lock);
3672 sc->scsi_method_id = c_resp->param;
3674 vmi = (struct aac_vmioctl *)&fib->data[0];
3675 bzero(vmi, sizeof(struct aac_vmioctl));
3677 vmi->Command = VM_Ioctl;
3678 vmi->ObjType = FT_DRIVE;
3679 vmi->MethId = sc->scsi_method_id;
3681 vmi->IoctlCmd = GetBusInfo;
3683 error = aac_sync_fib(sc, ContainerCommand, 0, fib,
3684 sizeof(struct aac_vmi_businf_resp));
3686 device_printf(sc->aac_dev, "Error %d sending VMIoctl command\n",
3688 aac_release_sync_fib(sc);
3689 mtx_unlock(&sc->aac_io_lock);
3693 vmi_resp = (struct aac_vmi_businf_resp *)&fib->data[0];
3694 if (vmi_resp->Status != ST_OK) {
3695 device_printf(sc->aac_dev, "VM_Ioctl returned %d\n",
3697 aac_release_sync_fib(sc);
3698 mtx_unlock(&sc->aac_io_lock);
3702 bcopy(&vmi_resp->BusInf, &businfo, sizeof(struct aac_getbusinf));
3703 aac_release_sync_fib(sc);
3704 mtx_unlock(&sc->aac_io_lock);
3706 for (i = 0; i < businfo.BusCount; i++) {
3707 if (businfo.BusValid[i] != AAC_BUS_VALID)
3710 caminf = (struct aac_sim *)malloc( sizeof(struct aac_sim),
3711 M_AACRAIDBUF, M_NOWAIT | M_ZERO);
3712 if (caminf == NULL) {
3713 device_printf(sc->aac_dev,
3714 "No memory to add passthrough bus %d\n", i);
3718 child = device_add_child(sc->aac_dev, "aacraidp", -1);
3719 if (child == NULL) {
3720 device_printf(sc->aac_dev,
3721 "device_add_child failed for passthrough bus %d\n",
3723 free(caminf, M_AACRAIDBUF);
3727 caminf->TargetsPerBus = businfo.TargetsPerBus;
3728 caminf->BusNumber = i+1;
3729 caminf->BusType = PASSTHROUGH_BUS;
3730 caminf->InitiatorBusId = -1;
3731 caminf->aac_sc = sc;
3732 caminf->sim_dev = child;
3733 caminf->aac_cam = NULL;
3735 device_set_ivars(child, caminf);
3736 device_set_desc(child, "SCSI Passthrough Bus");
3737 TAILQ_INSERT_TAIL(&sc->aac_sim_tqh, caminf, sim_link);
3742 * Check to see if the kernel is up and running. If we are in a
3743 * BlinkLED state, return the BlinkLED code.
3746 aac_check_adapter_health(struct aac_softc *sc, u_int8_t *bled)
3750 ret = AAC_GET_FWSTATUS(sc);
3752 if (ret & AAC_UP_AND_RUNNING)
3754 else if (ret & AAC_KERNEL_PANIC && bled)
3755 *bled = (ret >> 16) & 0xff;
3761 * Once do an IOP reset, basically have to re-initialize the card as
3762 * if coming up from a cold boot, and the driver is responsible for
3763 * any IO that was outstanding to the adapter at the time of the IOP
3764 * RESET. And prepare the driver for IOP RESET by making the init code
3765 * modular with the ability to call it from multiple places.
3768 aac_reset_adapter(struct aac_softc *sc)
3770 struct aac_command *cm;
3771 struct aac_fib *fib;
3772 struct aac_pause_command *pc;
3773 u_int32_t status, reset_mask, waitCount, max_msix_orig;
3774 int ret, msi_enabled_orig;
3776 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3777 mtx_assert(&sc->aac_io_lock, MA_OWNED);
3779 if (sc->aac_state & AAC_STATE_RESET) {
3780 device_printf(sc->aac_dev, "aac_reset_adapter() already in progress\n");
3783 sc->aac_state |= AAC_STATE_RESET;
3785 /* disable interrupt */
3786 AAC_ACCESS_DEVREG(sc, AAC_DISABLE_INTERRUPT);
3789 * Abort all pending commands:
3790 * a) on the controller
3792 while ((cm = aac_dequeue_busy(sc)) != NULL) {
3793 cm->cm_flags |= AAC_CMD_RESET;
3795 /* is there a completion handler? */
3796 if (cm->cm_complete != NULL) {
3797 cm->cm_complete(cm);
3799 /* assume that someone is sleeping on this
3806 /* b) in the waiting queues */
3807 while ((cm = aac_dequeue_ready(sc)) != NULL) {
3808 cm->cm_flags |= AAC_CMD_RESET;
3810 /* is there a completion handler? */
3811 if (cm->cm_complete != NULL) {
3812 cm->cm_complete(cm);
3814 /* assume that someone is sleeping on this
3822 if (aac_check_adapter_health(sc, NULL) == 0) {
3823 mtx_unlock(&sc->aac_io_lock);
3824 (void) aacraid_shutdown(sc->aac_dev);
3825 mtx_lock(&sc->aac_io_lock);
3828 /* execute IOP reset */
3829 if (sc->aac_support_opt2 & AAC_SUPPORTED_MU_RESET) {
3830 AAC_MEM0_SETREG4(sc, AAC_IRCSR, AAC_IRCSR_CORES_RST);
3832 /* We need to wait for 5 seconds before accessing the MU again
3833 * 10000 * 100us = 1000,000us = 1000ms = 1s
3835 waitCount = 5 * 10000;
3837 DELAY(100); /* delay 100 microseconds */
3841 ret = aacraid_sync_command(sc, AAC_IOP_RESET_ALWAYS,
3842 0, 0, 0, 0, &status, &reset_mask);
3843 if (ret && !sc->doorbell_mask) {
3844 /* call IOP_RESET for older firmware */
3845 if ((aacraid_sync_command(sc, AAC_IOP_RESET, 0,0,0,0,
3846 &status, NULL)) != 0) {
3847 if (status == AAC_SRB_STS_INVALID_REQUEST) {
3848 device_printf(sc->aac_dev,
3849 "IOP_RESET not supported\n");
3851 /* probably timeout */
3852 device_printf(sc->aac_dev,
3853 "IOP_RESET failed\n");
3856 /* unwind aac_shutdown() */
3857 aac_alloc_sync_fib(sc, &fib);
3858 pc = (struct aac_pause_command *)&fib->data[0];
3859 pc->Command = VM_ContainerConfig;
3860 pc->Type = CT_PAUSE_IO;
3865 (void) aac_sync_fib(sc, ContainerCommand, 0,
3866 fib, sizeof (struct aac_pause_command));
3867 aac_release_sync_fib(sc);
3871 } else if (sc->doorbell_mask) {
3873 reset_mask = sc->doorbell_mask;
3876 (sc->aac_support_opt2 & AAC_SUPPORTED_DOORBELL_RESET)) {
3877 AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, reset_mask);
3879 * We need to wait for 5 seconds before accessing the
3881 * 10000 * 100us = 1000,000us = 1000ms = 1s
3883 waitCount = 5 * 10000;
3885 DELAY(100); /* delay 100 microseconds */
3892 * Initialize the adapter.
3894 max_msix_orig = sc->aac_max_msix;
3895 msi_enabled_orig = sc->msi_enabled;
3896 sc->msi_enabled = FALSE;
3897 if (aac_check_firmware(sc) != 0)
3899 if (!(sc->flags & AAC_FLAGS_SYNC_MODE)) {
3900 sc->aac_max_msix = max_msix_orig;
3901 if (msi_enabled_orig) {
3902 sc->msi_enabled = msi_enabled_orig;
3903 AAC_ACCESS_DEVREG(sc, AAC_ENABLE_MSIX);
3905 mtx_unlock(&sc->aac_io_lock);
3907 mtx_lock(&sc->aac_io_lock);
3911 sc->aac_state &= ~AAC_STATE_RESET;
3912 AAC_ACCESS_DEVREG(sc, AAC_ENABLE_INTERRUPT);
3913 aacraid_startio(sc);