2 * Copyright (c) 2000 Michael Smith
3 * Copyright (c) 2001 Scott Long
4 * Copyright (c) 2000 BSDi
5 * Copyright (c) 2001-2010 Adaptec, Inc.
6 * Copyright (c) 2010-2012 PMC-Sierra, Inc.
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
35 * Driver for the Adaptec by PMC Series 6,7,8,... families of RAID controllers
37 #define AAC_DRIVERNAME "aacraid"
39 #include "opt_aacraid.h"
41 /* #include <stddef.h> */
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/malloc.h>
45 #include <sys/kernel.h>
46 #include <sys/kthread.h>
47 #include <sys/sysctl.h>
49 #include <sys/ioccom.h>
53 #include <sys/signalvar.h>
55 #include <sys/eventhandler.h>
58 #include <machine/bus.h>
59 #include <machine/resource.h>
61 #include <dev/pci/pcireg.h>
62 #include <dev/pci/pcivar.h>
64 #include <dev/aacraid/aacraid_reg.h>
65 #include <sys/aac_ioctl.h>
66 #include <dev/aacraid/aacraid_debug.h>
67 #include <dev/aacraid/aacraid_var.h>
69 #ifndef FILTER_HANDLED
70 #define FILTER_HANDLED 0x02
73 static void aac_add_container(struct aac_softc *sc,
74 struct aac_mntinforesp *mir, int f,
76 static void aac_get_bus_info(struct aac_softc *sc);
77 static void aac_container_bus(struct aac_softc *sc);
78 static void aac_daemon(void *arg);
79 static int aac_convert_sgraw2(struct aac_softc *sc, struct aac_raw_io2 *raw,
80 int pages, int nseg, int nseg_new);
82 /* Command Processing */
83 static void aac_timeout(struct aac_softc *sc);
84 static void aac_command_thread(struct aac_softc *sc);
85 static int aac_sync_fib(struct aac_softc *sc, u_int32_t command,
86 u_int32_t xferstate, struct aac_fib *fib,
88 /* Command Buffer Management */
89 static void aac_map_command_helper(void *arg, bus_dma_segment_t *segs,
91 static int aac_alloc_commands(struct aac_softc *sc);
92 static void aac_free_commands(struct aac_softc *sc);
93 static void aac_unmap_command(struct aac_command *cm);
95 /* Hardware Interface */
96 static int aac_alloc(struct aac_softc *sc);
97 static void aac_common_map(void *arg, bus_dma_segment_t *segs, int nseg,
99 static int aac_check_firmware(struct aac_softc *sc);
100 static void aac_define_int_mode(struct aac_softc *sc);
101 static int aac_init(struct aac_softc *sc);
102 static int aac_find_pci_capability(struct aac_softc *sc, int cap);
103 static int aac_setup_intr(struct aac_softc *sc);
104 static int aac_check_config(struct aac_softc *sc);
106 /* PMC SRC interface */
107 static int aac_src_get_fwstatus(struct aac_softc *sc);
108 static void aac_src_qnotify(struct aac_softc *sc, int qbit);
109 static int aac_src_get_istatus(struct aac_softc *sc);
110 static void aac_src_clear_istatus(struct aac_softc *sc, int mask);
111 static void aac_src_set_mailbox(struct aac_softc *sc, u_int32_t command,
112 u_int32_t arg0, u_int32_t arg1,
113 u_int32_t arg2, u_int32_t arg3);
114 static int aac_src_get_mailbox(struct aac_softc *sc, int mb);
115 static void aac_src_access_devreg(struct aac_softc *sc, int mode);
116 static int aac_src_send_command(struct aac_softc *sc, struct aac_command *cm);
117 static int aac_src_get_outb_queue(struct aac_softc *sc);
118 static void aac_src_set_outb_queue(struct aac_softc *sc, int index);
120 struct aac_interface aacraid_src_interface = {
121 aac_src_get_fwstatus,
124 aac_src_clear_istatus,
127 aac_src_access_devreg,
128 aac_src_send_command,
129 aac_src_get_outb_queue,
130 aac_src_set_outb_queue
133 /* PMC SRCv interface */
134 static void aac_srcv_set_mailbox(struct aac_softc *sc, u_int32_t command,
135 u_int32_t arg0, u_int32_t arg1,
136 u_int32_t arg2, u_int32_t arg3);
137 static int aac_srcv_get_mailbox(struct aac_softc *sc, int mb);
139 struct aac_interface aacraid_srcv_interface = {
140 aac_src_get_fwstatus,
143 aac_src_clear_istatus,
144 aac_srcv_set_mailbox,
145 aac_srcv_get_mailbox,
146 aac_src_access_devreg,
147 aac_src_send_command,
148 aac_src_get_outb_queue,
149 aac_src_set_outb_queue
152 /* Debugging and Diagnostics */
153 static struct aac_code_lookup aac_cpu_variant[] = {
154 {"i960JX", CPUI960_JX},
155 {"i960CX", CPUI960_CX},
156 {"i960HX", CPUI960_HX},
157 {"i960RX", CPUI960_RX},
158 {"i960 80303", CPUI960_80303},
159 {"StrongARM SA110", CPUARM_SA110},
160 {"PPC603e", CPUPPC_603e},
161 {"XScale 80321", CPU_XSCALE_80321},
162 {"MIPS 4KC", CPU_MIPS_4KC},
163 {"MIPS 5KC", CPU_MIPS_5KC},
164 {"Unknown StrongARM", CPUARM_xxx},
165 {"Unknown PowerPC", CPUPPC_xxx},
167 {"Unknown processor", 0}
170 static struct aac_code_lookup aac_battery_platform[] = {
171 {"required battery present", PLATFORM_BAT_REQ_PRESENT},
172 {"REQUIRED BATTERY NOT PRESENT", PLATFORM_BAT_REQ_NOTPRESENT},
173 {"optional battery present", PLATFORM_BAT_OPT_PRESENT},
174 {"optional battery not installed", PLATFORM_BAT_OPT_NOTPRESENT},
175 {"no battery support", PLATFORM_BAT_NOT_SUPPORTED},
177 {"unknown battery platform", 0}
179 static void aac_describe_controller(struct aac_softc *sc);
180 static char *aac_describe_code(struct aac_code_lookup *table,
183 /* Management Interface */
184 static d_open_t aac_open;
185 static d_ioctl_t aac_ioctl;
186 static d_poll_t aac_poll;
187 #if __FreeBSD_version >= 702000
188 static void aac_cdevpriv_dtor(void *arg);
190 static d_close_t aac_close;
192 static int aac_ioctl_sendfib(struct aac_softc *sc, caddr_t ufib);
193 static int aac_ioctl_send_raw_srb(struct aac_softc *sc, caddr_t arg);
194 static void aac_handle_aif(struct aac_softc *sc, struct aac_fib *fib);
195 static void aac_request_aif(struct aac_softc *sc);
196 static int aac_rev_check(struct aac_softc *sc, caddr_t udata);
197 static int aac_open_aif(struct aac_softc *sc, caddr_t arg);
198 static int aac_close_aif(struct aac_softc *sc, caddr_t arg);
199 static int aac_getnext_aif(struct aac_softc *sc, caddr_t arg);
200 static int aac_return_aif(struct aac_softc *sc,
201 struct aac_fib_context *ctx, caddr_t uptr);
202 static int aac_query_disk(struct aac_softc *sc, caddr_t uptr);
203 static int aac_get_pci_info(struct aac_softc *sc, caddr_t uptr);
204 static int aac_supported_features(struct aac_softc *sc, caddr_t uptr);
205 static void aac_ioctl_event(struct aac_softc *sc,
206 struct aac_event *event, void *arg);
207 static int aac_reset_adapter(struct aac_softc *sc);
208 static int aac_get_container_info(struct aac_softc *sc,
209 struct aac_fib *fib, int cid,
210 struct aac_mntinforesp *mir,
213 aac_check_adapter_health(struct aac_softc *sc, u_int8_t *bled);
215 static struct cdevsw aacraid_cdevsw = {
216 .d_version = D_VERSION,
217 .d_flags = D_NEEDGIANT,
219 #if __FreeBSD_version < 702000
220 .d_close = aac_close,
222 .d_ioctl = aac_ioctl,
227 MALLOC_DEFINE(M_AACRAIDBUF, "aacraid_buf", "Buffers for the AACRAID driver");
230 SYSCTL_NODE(_hw, OID_AUTO, aacraid, CTLFLAG_RD, 0, "AACRAID driver parameters");
237 * Initialize the controller and softc
240 aacraid_attach(struct aac_softc *sc)
244 struct aac_mntinforesp mir;
245 int count = 0, i = 0;
248 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
249 sc->hint_flags = device_get_flags(sc->aac_dev);
251 * Initialize per-controller queues.
257 /* mark controller as suspended until we get ourselves organised */
258 sc->aac_state |= AAC_STATE_SUSPEND;
261 * Check that the firmware on the card is supported.
263 sc->msi_enabled = FALSE;
264 if ((error = aac_check_firmware(sc)) != 0)
270 mtx_init(&sc->aac_io_lock, "AACRAID I/O lock", NULL, MTX_DEF);
271 TAILQ_INIT(&sc->aac_container_tqh);
272 TAILQ_INIT(&sc->aac_ev_cmfree);
274 #if __FreeBSD_version >= 800000
275 /* Initialize the clock daemon callout. */
276 callout_init_mtx(&sc->aac_daemontime, &sc->aac_io_lock, 0);
279 * Initialize the adapter.
281 if ((error = aac_alloc(sc)) != 0)
283 if (!(sc->flags & AAC_FLAGS_SYNC_MODE)) {
284 aac_define_int_mode(sc);
285 if ((error = aac_init(sc)) != 0)
290 * Allocate and connect our interrupt.
292 if ((error = aac_setup_intr(sc)) != 0)
296 * Print a little information about the controller.
298 aac_describe_controller(sc);
301 * Make the control device.
303 unit = device_get_unit(sc->aac_dev);
304 sc->aac_dev_t = make_dev(&aacraid_cdevsw, unit, UID_ROOT, GID_OPERATOR,
305 0640, "aacraid%d", unit);
306 sc->aac_dev_t->si_drv1 = sc;
308 /* Create the AIF thread */
309 if (aac_kthread_create((void(*)(void *))aac_command_thread, sc,
310 &sc->aifthread, 0, 0, "aacraid%daif", unit))
311 panic("Could not create AIF thread");
313 /* Register the shutdown method to only be called post-dump */
314 if ((sc->eh = EVENTHANDLER_REGISTER(shutdown_final, aacraid_shutdown,
315 sc->aac_dev, SHUTDOWN_PRI_DEFAULT)) == NULL)
316 device_printf(sc->aac_dev,
317 "shutdown event registration failed\n");
319 /* Find containers */
320 mtx_lock(&sc->aac_io_lock);
321 aac_alloc_sync_fib(sc, &fib);
322 /* loop over possible containers */
324 if ((aac_get_container_info(sc, fib, i, &mir, &uid)) != 0)
327 count = mir.MntRespCount;
328 aac_add_container(sc, &mir, 0, uid);
330 } while ((i < count) && (i < AAC_MAX_CONTAINERS));
331 aac_release_sync_fib(sc);
332 mtx_unlock(&sc->aac_io_lock);
334 /* Register with CAM for the containers */
335 TAILQ_INIT(&sc->aac_sim_tqh);
336 aac_container_bus(sc);
337 /* Register with CAM for the non-DASD devices */
338 if ((sc->flags & AAC_FLAGS_ENABLE_CAM) != 0)
339 aac_get_bus_info(sc);
341 /* poke the bus to actually attach the child devices */
342 bus_generic_attach(sc->aac_dev);
344 /* mark the controller up */
345 sc->aac_state &= ~AAC_STATE_SUSPEND;
347 /* enable interrupts now */
348 AAC_ACCESS_DEVREG(sc, AAC_ENABLE_INTERRUPT);
350 #if __FreeBSD_version >= 800000
351 mtx_lock(&sc->aac_io_lock);
352 callout_reset(&sc->aac_daemontime, 60 * hz, aac_daemon, sc);
353 mtx_unlock(&sc->aac_io_lock);
359 sc->timeout_id = timeout(aac_daemon, (void *)sc, tvtohz(&tv));
367 aac_daemon(void *arg)
369 struct aac_softc *sc;
371 struct aac_command *cm;
375 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
377 #if __FreeBSD_version >= 800000
378 mtx_assert(&sc->aac_io_lock, MA_OWNED);
379 if (callout_pending(&sc->aac_daemontime) ||
380 callout_active(&sc->aac_daemontime) == 0)
383 mtx_lock(&sc->aac_io_lock);
387 if (!aacraid_alloc_command(sc, &cm)) {
389 cm->cm_timestamp = time_uptime;
391 cm->cm_flags |= AAC_CMD_WAIT;
394 sizeof(struct aac_fib_header) + sizeof(u_int32_t);
395 fib->Header.XferState =
396 AAC_FIBSTATE_HOSTOWNED |
397 AAC_FIBSTATE_INITIALISED |
399 AAC_FIBSTATE_FROMHOST |
400 AAC_FIBSTATE_REXPECTED |
403 AAC_FIBSTATE_FAST_RESPONSE;
404 fib->Header.Command = SendHostTime;
405 *(uint32_t *)fib->data = tv.tv_sec;
407 aacraid_map_command_sg(cm, NULL, 0, 0);
408 aacraid_release_command(cm);
411 #if __FreeBSD_version >= 800000
412 callout_schedule(&sc->aac_daemontime, 30 * 60 * hz);
414 mtx_unlock(&sc->aac_io_lock);
417 sc->timeout_id = timeout(aac_daemon, (void *)sc, tvtohz(&tv));
422 aacraid_add_event(struct aac_softc *sc, struct aac_event *event)
425 switch (event->ev_type & AAC_EVENT_MASK) {
426 case AAC_EVENT_CMFREE:
427 TAILQ_INSERT_TAIL(&sc->aac_ev_cmfree, event, ev_links);
430 device_printf(sc->aac_dev, "aac_add event: unknown event %d\n",
439 * Request information of container #cid
442 aac_get_container_info(struct aac_softc *sc, struct aac_fib *sync_fib, int cid,
443 struct aac_mntinforesp *mir, u_int32_t *uid)
445 struct aac_command *cm;
447 struct aac_mntinfo *mi;
448 struct aac_cnt_config *ccfg;
451 if (sync_fib == NULL) {
452 if (aacraid_alloc_command(sc, &cm)) {
453 device_printf(sc->aac_dev,
454 "Warning, no free command available\n");
462 mi = (struct aac_mntinfo *)&fib->data[0];
463 /* 4KB support?, 64-bit LBA? */
464 if (sc->aac_support_opt2 & AAC_SUPPORTED_VARIABLE_BLOCK_SIZE)
465 mi->Command = VM_NameServeAllBlk;
466 else if (sc->flags & AAC_FLAGS_LBA_64BIT)
467 mi->Command = VM_NameServe64;
469 mi->Command = VM_NameServe;
470 mi->MntType = FT_FILESYS;
474 if (aac_sync_fib(sc, ContainerCommand, 0, fib,
475 sizeof(struct aac_mntinfo))) {
476 device_printf(sc->aac_dev, "Error probing container %d\n", cid);
480 cm->cm_timestamp = time_uptime;
484 sizeof(struct aac_fib_header) + sizeof(struct aac_mntinfo);
485 fib->Header.XferState =
486 AAC_FIBSTATE_HOSTOWNED |
487 AAC_FIBSTATE_INITIALISED |
489 AAC_FIBSTATE_FROMHOST |
490 AAC_FIBSTATE_REXPECTED |
493 AAC_FIBSTATE_FAST_RESPONSE;
494 fib->Header.Command = ContainerCommand;
495 if (aacraid_wait_command(cm) != 0) {
496 device_printf(sc->aac_dev, "Error probing container %d\n", cid);
497 aacraid_release_command(cm);
501 bcopy(&fib->data[0], mir, sizeof(struct aac_mntinforesp));
505 if (mir->MntTable[0].VolType != CT_NONE &&
506 !(mir->MntTable[0].ContentState & AAC_FSCS_HIDDEN)) {
507 if (!(sc->aac_support_opt2 & AAC_SUPPORTED_VARIABLE_BLOCK_SIZE)) {
508 mir->MntTable[0].ObjExtension.BlockDevice.BlockSize = 0x200;
509 mir->MntTable[0].ObjExtension.BlockDevice.bdLgclPhysMap = 0;
511 ccfg = (struct aac_cnt_config *)&fib->data[0];
512 bzero(ccfg, sizeof (*ccfg) - CT_PACKET_SIZE);
513 ccfg->Command = VM_ContainerConfig;
514 ccfg->CTCommand.command = CT_CID_TO_32BITS_UID;
515 ccfg->CTCommand.param[0] = cid;
518 rval = aac_sync_fib(sc, ContainerCommand, 0, fib,
519 sizeof(struct aac_cnt_config));
520 if (rval == 0 && ccfg->Command == ST_OK &&
521 ccfg->CTCommand.param[0] == CT_OK &&
522 mir->MntTable[0].VolType != CT_PASSTHRU)
523 *uid = ccfg->CTCommand.param[1];
526 sizeof(struct aac_fib_header) + sizeof(struct aac_cnt_config);
527 fib->Header.XferState =
528 AAC_FIBSTATE_HOSTOWNED |
529 AAC_FIBSTATE_INITIALISED |
531 AAC_FIBSTATE_FROMHOST |
532 AAC_FIBSTATE_REXPECTED |
535 AAC_FIBSTATE_FAST_RESPONSE;
536 fib->Header.Command = ContainerCommand;
537 rval = aacraid_wait_command(cm);
538 if (rval == 0 && ccfg->Command == ST_OK &&
539 ccfg->CTCommand.param[0] == CT_OK &&
540 mir->MntTable[0].VolType != CT_PASSTHRU)
541 *uid = ccfg->CTCommand.param[1];
542 aacraid_release_command(cm);
550 * Create a device to represent a new container
553 aac_add_container(struct aac_softc *sc, struct aac_mntinforesp *mir, int f,
556 struct aac_container *co;
558 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
561 * Check container volume type for validity. Note that many of
562 * the possible types may never show up.
564 if ((mir->Status == ST_OK) && (mir->MntTable[0].VolType != CT_NONE)) {
565 co = (struct aac_container *)malloc(sizeof *co, M_AACRAIDBUF,
568 panic("Out of memory?!");
572 bcopy(&mir->MntTable[0], &co->co_mntobj,
573 sizeof(struct aac_mntobj));
575 TAILQ_INSERT_TAIL(&sc->aac_container_tqh, co, co_link);
580 * Allocate resources associated with (sc)
583 aac_alloc(struct aac_softc *sc)
587 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
590 * Create DMA tag for mapping buffers into controller-addressable space.
592 if (bus_dma_tag_create(sc->aac_parent_dmat, /* parent */
593 1, 0, /* algnmnt, boundary */
594 (sc->flags & AAC_FLAGS_SG_64BIT) ?
596 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
597 BUS_SPACE_MAXADDR, /* highaddr */
598 NULL, NULL, /* filter, filterarg */
599 sc->aac_max_sectors << 9, /* maxsize */
600 sc->aac_sg_tablesize, /* nsegments */
601 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
602 BUS_DMA_ALLOCNOW, /* flags */
603 busdma_lock_mutex, /* lockfunc */
604 &sc->aac_io_lock, /* lockfuncarg */
605 &sc->aac_buffer_dmat)) {
606 device_printf(sc->aac_dev, "can't allocate buffer DMA tag\n");
611 * Create DMA tag for mapping FIBs into controller-addressable space..
613 if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE1)
614 maxsize = sc->aac_max_fibs_alloc * (sc->aac_max_fib_size +
615 sizeof(struct aac_fib_xporthdr) + 31);
617 maxsize = sc->aac_max_fibs_alloc * (sc->aac_max_fib_size + 31);
618 if (bus_dma_tag_create(sc->aac_parent_dmat, /* parent */
619 1, 0, /* algnmnt, boundary */
620 (sc->flags & AAC_FLAGS_4GB_WINDOW) ?
621 BUS_SPACE_MAXADDR_32BIT :
622 0x7fffffff, /* lowaddr */
623 BUS_SPACE_MAXADDR, /* highaddr */
624 NULL, NULL, /* filter, filterarg */
625 maxsize, /* maxsize */
627 maxsize, /* maxsize */
629 NULL, NULL, /* No locking needed */
630 &sc->aac_fib_dmat)) {
631 device_printf(sc->aac_dev, "can't allocate FIB DMA tag\n");
636 * Create DMA tag for the common structure and allocate it.
638 maxsize = sizeof(struct aac_common);
639 maxsize += sc->aac_max_fibs * sizeof(u_int32_t);
640 if (bus_dma_tag_create(sc->aac_parent_dmat, /* parent */
641 1, 0, /* algnmnt, boundary */
642 (sc->flags & AAC_FLAGS_4GB_WINDOW) ?
643 BUS_SPACE_MAXADDR_32BIT :
644 0x7fffffff, /* lowaddr */
645 BUS_SPACE_MAXADDR, /* highaddr */
646 NULL, NULL, /* filter, filterarg */
647 maxsize, /* maxsize */
649 maxsize, /* maxsegsize */
651 NULL, NULL, /* No locking needed */
652 &sc->aac_common_dmat)) {
653 device_printf(sc->aac_dev,
654 "can't allocate common structure DMA tag\n");
657 if (bus_dmamem_alloc(sc->aac_common_dmat, (void **)&sc->aac_common,
658 BUS_DMA_NOWAIT, &sc->aac_common_dmamap)) {
659 device_printf(sc->aac_dev, "can't allocate common structure\n");
663 (void)bus_dmamap_load(sc->aac_common_dmat, sc->aac_common_dmamap,
664 sc->aac_common, maxsize,
665 aac_common_map, sc, 0);
666 bzero(sc->aac_common, maxsize);
668 /* Allocate some FIBs and associated command structs */
669 TAILQ_INIT(&sc->aac_fibmap_tqh);
670 sc->aac_commands = malloc(sc->aac_max_fibs * sizeof(struct aac_command),
671 M_AACRAIDBUF, M_WAITOK|M_ZERO);
672 mtx_lock(&sc->aac_io_lock);
673 while (sc->total_fibs < sc->aac_max_fibs) {
674 if (aac_alloc_commands(sc) != 0)
677 mtx_unlock(&sc->aac_io_lock);
678 if (sc->total_fibs == 0)
685 * Free all of the resources associated with (sc)
687 * Should not be called if the controller is active.
690 aacraid_free(struct aac_softc *sc)
694 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
696 /* remove the control device */
697 if (sc->aac_dev_t != NULL)
698 destroy_dev(sc->aac_dev_t);
700 /* throw away any FIB buffers, discard the FIB DMA tag */
701 aac_free_commands(sc);
702 if (sc->aac_fib_dmat)
703 bus_dma_tag_destroy(sc->aac_fib_dmat);
705 free(sc->aac_commands, M_AACRAIDBUF);
707 /* destroy the common area */
708 if (sc->aac_common) {
709 bus_dmamap_unload(sc->aac_common_dmat, sc->aac_common_dmamap);
710 bus_dmamem_free(sc->aac_common_dmat, sc->aac_common,
711 sc->aac_common_dmamap);
713 if (sc->aac_common_dmat)
714 bus_dma_tag_destroy(sc->aac_common_dmat);
716 /* disconnect the interrupt handler */
717 for (i = 0; i < AAC_MAX_MSIX; ++i) {
719 bus_teardown_intr(sc->aac_dev,
720 sc->aac_irq[i], sc->aac_intr[i]);
722 bus_release_resource(sc->aac_dev, SYS_RES_IRQ,
723 sc->aac_irq_rid[i], sc->aac_irq[i]);
728 pci_release_msi(sc->aac_dev);
730 /* destroy data-transfer DMA tag */
731 if (sc->aac_buffer_dmat)
732 bus_dma_tag_destroy(sc->aac_buffer_dmat);
734 /* destroy the parent DMA tag */
735 if (sc->aac_parent_dmat)
736 bus_dma_tag_destroy(sc->aac_parent_dmat);
738 /* release the register window mapping */
739 if (sc->aac_regs_res0 != NULL)
740 bus_release_resource(sc->aac_dev, SYS_RES_MEMORY,
741 sc->aac_regs_rid0, sc->aac_regs_res0);
742 if (sc->aac_regs_res1 != NULL)
743 bus_release_resource(sc->aac_dev, SYS_RES_MEMORY,
744 sc->aac_regs_rid1, sc->aac_regs_res1);
748 * Disconnect from the controller completely, in preparation for unload.
751 aacraid_detach(device_t dev)
753 struct aac_softc *sc;
754 struct aac_container *co;
758 sc = device_get_softc(dev);
759 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
761 #if __FreeBSD_version >= 800000
762 callout_drain(&sc->aac_daemontime);
764 untimeout(aac_daemon, (void *)sc, sc->timeout_id);
766 /* Remove the child containers */
767 while ((co = TAILQ_FIRST(&sc->aac_container_tqh)) != NULL) {
768 TAILQ_REMOVE(&sc->aac_container_tqh, co, co_link);
769 free(co, M_AACRAIDBUF);
772 /* Remove the CAM SIMs */
773 while ((sim = TAILQ_FIRST(&sc->aac_sim_tqh)) != NULL) {
774 TAILQ_REMOVE(&sc->aac_sim_tqh, sim, sim_link);
775 error = device_delete_child(dev, sim->sim_dev);
778 free(sim, M_AACRAIDBUF);
781 if (sc->aifflags & AAC_AIFFLAGS_RUNNING) {
782 sc->aifflags |= AAC_AIFFLAGS_EXIT;
783 wakeup(sc->aifthread);
784 tsleep(sc->aac_dev, PUSER | PCATCH, "aac_dch", 30 * hz);
787 if (sc->aifflags & AAC_AIFFLAGS_RUNNING)
788 panic("Cannot shutdown AIF thread");
790 if ((error = aacraid_shutdown(dev)))
793 EVENTHANDLER_DEREGISTER(shutdown_final, sc->eh);
797 mtx_destroy(&sc->aac_io_lock);
803 * Bring the controller down to a dormant state and detach all child devices.
805 * This function is called before detach or system shutdown.
807 * Note that we can assume that the bioq on the controller is empty, as we won't
808 * allow shutdown if any device is open.
811 aacraid_shutdown(device_t dev)
813 struct aac_softc *sc;
815 struct aac_close_command *cc;
817 sc = device_get_softc(dev);
818 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
820 sc->aac_state |= AAC_STATE_SUSPEND;
823 * Send a Container shutdown followed by a HostShutdown FIB to the
824 * controller to convince it that we don't want to talk to it anymore.
825 * We've been closed and all I/O completed already
827 device_printf(sc->aac_dev, "shutting down controller...");
829 mtx_lock(&sc->aac_io_lock);
830 aac_alloc_sync_fib(sc, &fib);
831 cc = (struct aac_close_command *)&fib->data[0];
833 bzero(cc, sizeof(struct aac_close_command));
834 cc->Command = VM_CloseAll;
835 cc->ContainerId = 0xfffffffe;
836 if (aac_sync_fib(sc, ContainerCommand, 0, fib,
837 sizeof(struct aac_close_command)))
842 AAC_ACCESS_DEVREG(sc, AAC_DISABLE_INTERRUPT);
843 aac_release_sync_fib(sc);
844 mtx_unlock(&sc->aac_io_lock);
850 * Bring the controller to a quiescent state, ready for system suspend.
853 aacraid_suspend(device_t dev)
855 struct aac_softc *sc;
857 sc = device_get_softc(dev);
859 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
860 sc->aac_state |= AAC_STATE_SUSPEND;
862 AAC_ACCESS_DEVREG(sc, AAC_DISABLE_INTERRUPT);
867 * Bring the controller back to a state ready for operation.
870 aacraid_resume(device_t dev)
872 struct aac_softc *sc;
874 sc = device_get_softc(dev);
876 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
877 sc->aac_state &= ~AAC_STATE_SUSPEND;
878 AAC_ACCESS_DEVREG(sc, AAC_ENABLE_INTERRUPT);
883 * Interrupt handler for NEW_COMM_TYPE1, NEW_COMM_TYPE2, NEW_COMM_TYPE34 interface.
886 aacraid_new_intr_type1(void *arg)
888 struct aac_msix_ctx *ctx;
889 struct aac_softc *sc;
891 struct aac_command *cm;
893 u_int32_t bellbits, bellbits_shifted, index, handle;
894 int isFastResponse, isAif, noMoreAif, mode;
896 ctx = (struct aac_msix_ctx *)arg;
898 vector_no = ctx->vector_no;
900 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
901 mtx_lock(&sc->aac_io_lock);
903 if (sc->msi_enabled) {
904 mode = AAC_INT_MODE_MSI;
905 if (vector_no == 0) {
906 bellbits = AAC_MEM0_GETREG4(sc, AAC_SRC_ODBR_MSI);
907 if (bellbits & 0x40000)
908 mode |= AAC_INT_MODE_AIF;
909 else if (bellbits & 0x1000)
910 mode |= AAC_INT_MODE_SYNC;
913 mode = AAC_INT_MODE_INTX;
914 bellbits = AAC_MEM0_GETREG4(sc, AAC_SRC_ODBR_R);
915 if (bellbits & AAC_DB_RESPONSE_SENT_NS) {
916 bellbits = AAC_DB_RESPONSE_SENT_NS;
917 AAC_MEM0_SETREG4(sc, AAC_SRC_ODBR_C, bellbits);
919 bellbits_shifted = (bellbits >> AAC_SRC_ODR_SHIFT);
920 AAC_MEM0_SETREG4(sc, AAC_SRC_ODBR_C, bellbits);
921 if (bellbits_shifted & AAC_DB_AIF_PENDING)
922 mode |= AAC_INT_MODE_AIF;
923 else if (bellbits_shifted & AAC_DB_SYNC_COMMAND)
924 mode |= AAC_INT_MODE_SYNC;
926 /* ODR readback, Prep #238630 */
927 AAC_MEM0_GETREG4(sc, AAC_SRC_ODBR_R);
930 if (mode & AAC_INT_MODE_SYNC) {
931 if (sc->aac_sync_cm) {
932 cm = sc->aac_sync_cm;
933 cm->cm_flags |= AAC_CMD_COMPLETED;
934 /* is there a completion handler? */
935 if (cm->cm_complete != NULL) {
938 /* assume that someone is sleeping on this command */
941 sc->flags &= ~AAC_QUEUE_FRZN;
942 sc->aac_sync_cm = NULL;
947 if (mode & AAC_INT_MODE_AIF) {
948 if (mode & AAC_INT_MODE_INTX) {
955 /* handle async. status */
956 index = sc->aac_host_rrq_idx[vector_no];
958 isFastResponse = isAif = noMoreAif = 0;
959 /* remove toggle bit (31) */
960 handle = (sc->aac_common->ac_host_rrq[index] & 0x7fffffff);
961 /* check fast response bit (30) */
962 if (handle & 0x40000000)
964 /* check AIF bit (23) */
965 else if (handle & 0x00800000)
967 handle &= 0x0000ffff;
971 cm = sc->aac_commands + (handle - 1);
973 sc->aac_rrq_outstanding[vector_no]--;
975 noMoreAif = (fib->Header.XferState & AAC_FIBSTATE_NOMOREAIF) ? 1:0;
977 aac_handle_aif(sc, fib);
979 aacraid_release_command(cm);
981 if (isFastResponse) {
982 fib->Header.XferState |= AAC_FIBSTATE_DONEADAP;
983 *((u_int32_t *)(fib->data)) = ST_OK;
984 cm->cm_flags |= AAC_CMD_FASTRESP;
987 aac_unmap_command(cm);
988 cm->cm_flags |= AAC_CMD_COMPLETED;
990 /* is there a completion handler? */
991 if (cm->cm_complete != NULL) {
994 /* assume that someone is sleeping on this command */
997 sc->flags &= ~AAC_QUEUE_FRZN;
1000 sc->aac_common->ac_host_rrq[index++] = 0;
1001 if (index == (vector_no + 1) * sc->aac_vector_cap)
1002 index = vector_no * sc->aac_vector_cap;
1003 sc->aac_host_rrq_idx[vector_no] = index;
1005 if ((isAif && !noMoreAif) || sc->aif_pending)
1006 aac_request_aif(sc);
1010 if (mode & AAC_INT_MODE_AIF) {
1011 aac_request_aif(sc);
1012 AAC_ACCESS_DEVREG(sc, AAC_CLEAR_AIF_BIT);
1016 /* see if we can start some more I/O */
1017 if ((sc->flags & AAC_QUEUE_FRZN) == 0)
1018 aacraid_startio(sc);
1019 mtx_unlock(&sc->aac_io_lock);
1023 * Handle notification of one or more FIBs coming from the controller.
1026 aac_command_thread(struct aac_softc *sc)
1030 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1032 mtx_lock(&sc->aac_io_lock);
1033 sc->aifflags = AAC_AIFFLAGS_RUNNING;
1035 while ((sc->aifflags & AAC_AIFFLAGS_EXIT) == 0) {
1038 if ((sc->aifflags & AAC_AIFFLAGS_PENDING) == 0)
1039 retval = msleep(sc->aifthread, &sc->aac_io_lock, PRIBIO,
1040 "aacraid_aifthd", AAC_PERIODIC_INTERVAL * hz);
1043 * First see if any FIBs need to be allocated. This needs
1044 * to be called without the driver lock because contigmalloc
1045 * will grab Giant, and would result in an LOR.
1047 if ((sc->aifflags & AAC_AIFFLAGS_ALLOCFIBS) != 0) {
1048 aac_alloc_commands(sc);
1049 sc->aifflags &= ~AAC_AIFFLAGS_ALLOCFIBS;
1050 aacraid_startio(sc);
1054 * While we're here, check to see if any commands are stuck.
1055 * This is pretty low-priority, so it's ok if it doesn't
1058 if (retval == EWOULDBLOCK)
1061 /* Check the hardware printf message buffer */
1062 if (sc->aac_common->ac_printf[0] != 0)
1063 aac_print_printf(sc);
1065 sc->aifflags &= ~AAC_AIFFLAGS_RUNNING;
1066 mtx_unlock(&sc->aac_io_lock);
1067 wakeup(sc->aac_dev);
1069 aac_kthread_exit(0);
1073 * Submit a command to the controller, return when it completes.
1074 * XXX This is very dangerous! If the card has gone out to lunch, we could
1075 * be stuck here forever. At the same time, signals are not caught
1076 * because there is a risk that a signal could wakeup the sleep before
1077 * the card has a chance to complete the command. Since there is no way
1078 * to cancel a command that is in progress, we can't protect against the
1079 * card completing a command late and spamming the command and data
1080 * memory. So, we are held hostage until the command completes.
1083 aacraid_wait_command(struct aac_command *cm)
1085 struct aac_softc *sc;
1089 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1090 mtx_assert(&sc->aac_io_lock, MA_OWNED);
1092 /* Put the command on the ready queue and get things going */
1093 aac_enqueue_ready(cm);
1094 aacraid_startio(sc);
1095 error = msleep(cm, &sc->aac_io_lock, PRIBIO, "aacraid_wait", 0);
1100 *Command Buffer Management
1104 * Allocate a command.
1107 aacraid_alloc_command(struct aac_softc *sc, struct aac_command **cmp)
1109 struct aac_command *cm;
1111 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1113 if ((cm = aac_dequeue_free(sc)) == NULL) {
1114 if (sc->total_fibs < sc->aac_max_fibs) {
1115 sc->aifflags |= AAC_AIFFLAGS_ALLOCFIBS;
1116 wakeup(sc->aifthread);
1126 * Release a command back to the freelist.
1129 aacraid_release_command(struct aac_command *cm)
1131 struct aac_event *event;
1132 struct aac_softc *sc;
1135 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1136 mtx_assert(&sc->aac_io_lock, MA_OWNED);
1138 /* (re)initialize the command/FIB */
1139 cm->cm_sgtable = NULL;
1141 cm->cm_complete = NULL;
1143 cm->cm_passthr_dmat = 0;
1144 cm->cm_fib->Header.XferState = AAC_FIBSTATE_EMPTY;
1145 cm->cm_fib->Header.StructType = AAC_FIBTYPE_TFIB;
1146 cm->cm_fib->Header.Unused = 0;
1147 cm->cm_fib->Header.SenderSize = cm->cm_sc->aac_max_fib_size;
1150 * These are duplicated in aac_start to cover the case where an
1151 * intermediate stage may have destroyed them. They're left
1152 * initialized here for debugging purposes only.
1154 cm->cm_fib->Header.u.ReceiverFibAddress = (u_int32_t)cm->cm_fibphys;
1155 cm->cm_fib->Header.Handle = 0;
1157 aac_enqueue_free(cm);
1160 * Dequeue all events so that there's no risk of events getting
1163 while ((event = TAILQ_FIRST(&sc->aac_ev_cmfree)) != NULL) {
1164 TAILQ_REMOVE(&sc->aac_ev_cmfree, event, ev_links);
1165 event->ev_callback(sc, event, event->ev_arg);
1170 * Map helper for command/FIB allocation.
1173 aac_map_command_helper(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1177 fibphys = (uint64_t *)arg;
1179 *fibphys = segs[0].ds_addr;
1183 * Allocate and initialize commands/FIBs for this adapter.
1186 aac_alloc_commands(struct aac_softc *sc)
1188 struct aac_command *cm;
1189 struct aac_fibmap *fm;
1194 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1195 mtx_assert(&sc->aac_io_lock, MA_OWNED);
1197 if (sc->total_fibs + sc->aac_max_fibs_alloc > sc->aac_max_fibs)
1200 fm = malloc(sizeof(struct aac_fibmap), M_AACRAIDBUF, M_NOWAIT|M_ZERO);
1204 mtx_unlock(&sc->aac_io_lock);
1205 /* allocate the FIBs in DMAable memory and load them */
1206 if (bus_dmamem_alloc(sc->aac_fib_dmat, (void **)&fm->aac_fibs,
1207 BUS_DMA_NOWAIT, &fm->aac_fibmap)) {
1208 device_printf(sc->aac_dev,
1209 "Not enough contiguous memory available.\n");
1210 free(fm, M_AACRAIDBUF);
1211 mtx_lock(&sc->aac_io_lock);
1215 maxsize = sc->aac_max_fib_size + 31;
1216 if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE1)
1217 maxsize += sizeof(struct aac_fib_xporthdr);
1218 /* Ignore errors since this doesn't bounce */
1219 (void)bus_dmamap_load(sc->aac_fib_dmat, fm->aac_fibmap, fm->aac_fibs,
1220 sc->aac_max_fibs_alloc * maxsize,
1221 aac_map_command_helper, &fibphys, 0);
1222 mtx_lock(&sc->aac_io_lock);
1224 /* initialize constant fields in the command structure */
1225 bzero(fm->aac_fibs, sc->aac_max_fibs_alloc * maxsize);
1226 for (i = 0; i < sc->aac_max_fibs_alloc; i++) {
1227 cm = sc->aac_commands + sc->total_fibs;
1228 fm->aac_commands = cm;
1230 cm->cm_fib = (struct aac_fib *)
1231 ((u_int8_t *)fm->aac_fibs + i * maxsize);
1232 cm->cm_fibphys = fibphys + i * maxsize;
1233 if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE1) {
1234 u_int64_t fibphys_aligned;
1236 (cm->cm_fibphys + sizeof(struct aac_fib_xporthdr) + 31) & ~31;
1237 cm->cm_fib = (struct aac_fib *)
1238 ((u_int8_t *)cm->cm_fib + (fibphys_aligned - cm->cm_fibphys));
1239 cm->cm_fibphys = fibphys_aligned;
1241 u_int64_t fibphys_aligned;
1242 fibphys_aligned = (cm->cm_fibphys + 31) & ~31;
1243 cm->cm_fib = (struct aac_fib *)
1244 ((u_int8_t *)cm->cm_fib + (fibphys_aligned - cm->cm_fibphys));
1245 cm->cm_fibphys = fibphys_aligned;
1247 cm->cm_index = sc->total_fibs;
1249 if ((error = bus_dmamap_create(sc->aac_buffer_dmat, 0,
1250 &cm->cm_datamap)) != 0)
1252 if (sc->aac_max_fibs <= 1 || sc->aac_max_fibs - sc->total_fibs > 1)
1253 aacraid_release_command(cm);
1258 TAILQ_INSERT_TAIL(&sc->aac_fibmap_tqh, fm, fm_link);
1259 fwprintf(sc, HBA_FLAGS_DBG_COMM_B, "total_fibs= %d\n", sc->total_fibs);
1263 bus_dmamap_unload(sc->aac_fib_dmat, fm->aac_fibmap);
1264 bus_dmamem_free(sc->aac_fib_dmat, fm->aac_fibs, fm->aac_fibmap);
1265 free(fm, M_AACRAIDBUF);
1270 * Free FIBs owned by this adapter.
1273 aac_free_commands(struct aac_softc *sc)
1275 struct aac_fibmap *fm;
1276 struct aac_command *cm;
1279 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1281 while ((fm = TAILQ_FIRST(&sc->aac_fibmap_tqh)) != NULL) {
1283 TAILQ_REMOVE(&sc->aac_fibmap_tqh, fm, fm_link);
1285 * We check against total_fibs to handle partially
1288 for (i = 0; i < sc->aac_max_fibs_alloc && sc->total_fibs--; i++) {
1289 cm = fm->aac_commands + i;
1290 bus_dmamap_destroy(sc->aac_buffer_dmat, cm->cm_datamap);
1292 bus_dmamap_unload(sc->aac_fib_dmat, fm->aac_fibmap);
1293 bus_dmamem_free(sc->aac_fib_dmat, fm->aac_fibs, fm->aac_fibmap);
1294 free(fm, M_AACRAIDBUF);
1299 * Command-mapping helper function - populate this command's s/g table.
1302 aacraid_map_command_sg(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1304 struct aac_softc *sc;
1305 struct aac_command *cm;
1306 struct aac_fib *fib;
1309 cm = (struct aac_command *)arg;
1312 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "nseg %d", nseg);
1313 mtx_assert(&sc->aac_io_lock, MA_OWNED);
1315 /* copy into the FIB */
1316 if (cm->cm_sgtable != NULL) {
1317 if (fib->Header.Command == RawIo2) {
1318 struct aac_raw_io2 *raw;
1319 struct aac_sge_ieee1212 *sg;
1320 u_int32_t min_size = PAGE_SIZE, cur_size;
1321 int conformable = TRUE;
1323 raw = (struct aac_raw_io2 *)&fib->data[0];
1324 sg = (struct aac_sge_ieee1212 *)cm->cm_sgtable;
1327 for (i = 0; i < nseg; i++) {
1328 cur_size = segs[i].ds_len;
1330 *(bus_addr_t *)&sg[i].addrLow = segs[i].ds_addr;
1331 sg[i].length = cur_size;
1334 raw->sgeFirstSize = cur_size;
1335 } else if (i == 1) {
1336 raw->sgeNominalSize = cur_size;
1337 min_size = cur_size;
1338 } else if ((i+1) < nseg &&
1339 cur_size != raw->sgeNominalSize) {
1340 conformable = FALSE;
1341 if (cur_size < min_size)
1342 min_size = cur_size;
1346 /* not conformable: evaluate required sg elements */
1348 int j, err_found, nseg_new = nseg;
1349 for (i = min_size / PAGE_SIZE; i >= 1; --i) {
1352 for (j = 1; j < nseg - 1; ++j) {
1353 if (sg[j].length % (i*PAGE_SIZE)) {
1357 nseg_new += (sg[j].length / (i*PAGE_SIZE));
1362 if (i>0 && nseg_new<=sc->aac_sg_tablesize &&
1363 !(sc->hint_flags & 4))
1364 nseg = aac_convert_sgraw2(sc,
1365 raw, i, nseg, nseg_new);
1367 raw->flags |= RIO2_SGL_CONFORMANT;
1370 /* update the FIB size for the s/g count */
1371 fib->Header.Size += nseg *
1372 sizeof(struct aac_sge_ieee1212);
1374 } else if (fib->Header.Command == RawIo) {
1375 struct aac_sg_tableraw *sg;
1376 sg = (struct aac_sg_tableraw *)cm->cm_sgtable;
1378 for (i = 0; i < nseg; i++) {
1379 sg->SgEntryRaw[i].SgAddress = segs[i].ds_addr;
1380 sg->SgEntryRaw[i].SgByteCount = segs[i].ds_len;
1381 sg->SgEntryRaw[i].Next = 0;
1382 sg->SgEntryRaw[i].Prev = 0;
1383 sg->SgEntryRaw[i].Flags = 0;
1385 /* update the FIB size for the s/g count */
1386 fib->Header.Size += nseg*sizeof(struct aac_sg_entryraw);
1387 } else if ((cm->cm_sc->flags & AAC_FLAGS_SG_64BIT) == 0) {
1388 struct aac_sg_table *sg;
1389 sg = cm->cm_sgtable;
1391 for (i = 0; i < nseg; i++) {
1392 sg->SgEntry[i].SgAddress = segs[i].ds_addr;
1393 sg->SgEntry[i].SgByteCount = segs[i].ds_len;
1395 /* update the FIB size for the s/g count */
1396 fib->Header.Size += nseg*sizeof(struct aac_sg_entry);
1398 struct aac_sg_table64 *sg;
1399 sg = (struct aac_sg_table64 *)cm->cm_sgtable;
1401 for (i = 0; i < nseg; i++) {
1402 sg->SgEntry64[i].SgAddress = segs[i].ds_addr;
1403 sg->SgEntry64[i].SgByteCount = segs[i].ds_len;
1405 /* update the FIB size for the s/g count */
1406 fib->Header.Size += nseg*sizeof(struct aac_sg_entry64);
1410 /* Fix up the address values in the FIB. Use the command array index
1411 * instead of a pointer since these fields are only 32 bits. Shift
1412 * the SenderFibAddress over to make room for the fast response bit
1413 * and for the AIF bit
1415 cm->cm_fib->Header.SenderFibAddress = (cm->cm_index << 2);
1416 cm->cm_fib->Header.u.ReceiverFibAddress = (u_int32_t)cm->cm_fibphys;
1418 /* save a pointer to the command for speedy reverse-lookup */
1419 cm->cm_fib->Header.Handle += cm->cm_index + 1;
1421 if (cm->cm_passthr_dmat == 0) {
1422 if (cm->cm_flags & AAC_CMD_DATAIN)
1423 bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap,
1424 BUS_DMASYNC_PREREAD);
1425 if (cm->cm_flags & AAC_CMD_DATAOUT)
1426 bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap,
1427 BUS_DMASYNC_PREWRITE);
1430 cm->cm_flags |= AAC_CMD_MAPPED;
1432 if (sc->flags & AAC_FLAGS_SYNC_MODE) {
1434 aacraid_sync_command(sc, AAC_MONKER_SYNCFIB, cm->cm_fibphys, 0, 0, 0, &wait, NULL);
1435 } else if (cm->cm_flags & AAC_CMD_WAIT) {
1436 aacraid_sync_command(sc, AAC_MONKER_SYNCFIB, cm->cm_fibphys, 0, 0, 0, NULL, NULL);
1438 int count = 10000000L;
1439 while (AAC_SEND_COMMAND(sc, cm) != 0) {
1441 aac_unmap_command(cm);
1442 sc->flags |= AAC_QUEUE_FRZN;
1443 aac_requeue_ready(cm);
1445 DELAY(5); /* wait 5 usec. */
1452 aac_convert_sgraw2(struct aac_softc *sc, struct aac_raw_io2 *raw,
1453 int pages, int nseg, int nseg_new)
1455 struct aac_sge_ieee1212 *sge;
1459 sge = malloc(nseg_new * sizeof(struct aac_sge_ieee1212),
1460 M_AACRAIDBUF, M_NOWAIT|M_ZERO);
1464 for (i = 1, pos = 1; i < nseg - 1; ++i) {
1465 for (j = 0; j < raw->sge[i].length / (pages*PAGE_SIZE); ++j) {
1466 addr_low = raw->sge[i].addrLow + j * pages * PAGE_SIZE;
1467 sge[pos].addrLow = addr_low;
1468 sge[pos].addrHigh = raw->sge[i].addrHigh;
1469 if (addr_low < raw->sge[i].addrLow)
1470 sge[pos].addrHigh++;
1471 sge[pos].length = pages * PAGE_SIZE;
1476 sge[pos] = raw->sge[nseg-1];
1477 for (i = 1; i < nseg_new; ++i)
1478 raw->sge[i] = sge[i];
1480 free(sge, M_AACRAIDBUF);
1481 raw->sgeCnt = nseg_new;
1482 raw->flags |= RIO2_SGL_CONFORMANT;
1483 raw->sgeNominalSize = pages * PAGE_SIZE;
1489 * Unmap a command from controller-visible space.
1492 aac_unmap_command(struct aac_command *cm)
1494 struct aac_softc *sc;
1497 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1499 if (!(cm->cm_flags & AAC_CMD_MAPPED))
1502 if (cm->cm_datalen != 0 && cm->cm_passthr_dmat == 0) {
1503 if (cm->cm_flags & AAC_CMD_DATAIN)
1504 bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap,
1505 BUS_DMASYNC_POSTREAD);
1506 if (cm->cm_flags & AAC_CMD_DATAOUT)
1507 bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap,
1508 BUS_DMASYNC_POSTWRITE);
1510 bus_dmamap_unload(sc->aac_buffer_dmat, cm->cm_datamap);
1512 cm->cm_flags &= ~AAC_CMD_MAPPED;
1516 * Hardware Interface
1520 * Initialize the adapter.
1523 aac_common_map(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1525 struct aac_softc *sc;
1527 sc = (struct aac_softc *)arg;
1528 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1530 sc->aac_common_busaddr = segs[0].ds_addr;
1534 aac_check_firmware(struct aac_softc *sc)
1536 u_int32_t code, major, minor, maxsize;
1537 u_int32_t options = 0, atu_size = 0, status, waitCount;
1540 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1542 /* check if flash update is running */
1543 if (AAC_GET_FWSTATUS(sc) & AAC_FLASH_UPD_PENDING) {
1546 code = AAC_GET_FWSTATUS(sc);
1547 if (time_uptime > (then + AAC_FWUPD_TIMEOUT)) {
1548 device_printf(sc->aac_dev,
1549 "FATAL: controller not coming ready, "
1550 "status %x\n", code);
1553 } while (!(code & AAC_FLASH_UPD_SUCCESS) && !(code & AAC_FLASH_UPD_FAILED));
1555 * Delay 10 seconds. Because right now FW is doing a soft reset,
1556 * do not read scratch pad register at this time
1558 waitCount = 10 * 10000;
1560 DELAY(100); /* delay 100 microseconds */
1566 * Wait for the adapter to come ready.
1570 code = AAC_GET_FWSTATUS(sc);
1571 if (time_uptime > (then + AAC_BOOT_TIMEOUT)) {
1572 device_printf(sc->aac_dev,
1573 "FATAL: controller not coming ready, "
1574 "status %x\n", code);
1577 } while (!(code & AAC_UP_AND_RUNNING) || code == 0xffffffff);
1580 * Retrieve the firmware version numbers. Dell PERC2/QC cards with
1581 * firmware version 1.x are not compatible with this driver.
1583 if (sc->flags & AAC_FLAGS_PERC2QC) {
1584 if (aacraid_sync_command(sc, AAC_MONKER_GETKERNVER, 0, 0, 0, 0,
1586 device_printf(sc->aac_dev,
1587 "Error reading firmware version\n");
1591 /* These numbers are stored as ASCII! */
1592 major = (AAC_GET_MAILBOX(sc, 1) & 0xff) - 0x30;
1593 minor = (AAC_GET_MAILBOX(sc, 2) & 0xff) - 0x30;
1595 device_printf(sc->aac_dev,
1596 "Firmware version %d.%d is not supported.\n",
1602 * Retrieve the capabilities/supported options word so we know what
1603 * work-arounds to enable. Some firmware revs don't support this
1606 if (aacraid_sync_command(sc, AAC_MONKER_GETINFO, 0, 0, 0, 0, &status, NULL)) {
1607 if (status != AAC_SRB_STS_INVALID_REQUEST) {
1608 device_printf(sc->aac_dev,
1609 "RequestAdapterInfo failed\n");
1613 options = AAC_GET_MAILBOX(sc, 1);
1614 atu_size = AAC_GET_MAILBOX(sc, 2);
1615 sc->supported_options = options;
1617 if ((options & AAC_SUPPORTED_4GB_WINDOW) != 0 &&
1618 (sc->flags & AAC_FLAGS_NO4GB) == 0)
1619 sc->flags |= AAC_FLAGS_4GB_WINDOW;
1620 if (options & AAC_SUPPORTED_NONDASD)
1621 sc->flags |= AAC_FLAGS_ENABLE_CAM;
1622 if ((options & AAC_SUPPORTED_SGMAP_HOST64) != 0
1623 && (sizeof(bus_addr_t) > 4)
1624 && (sc->hint_flags & 0x1)) {
1625 device_printf(sc->aac_dev,
1626 "Enabling 64-bit address support\n");
1627 sc->flags |= AAC_FLAGS_SG_64BIT;
1629 if (sc->aac_if.aif_send_command) {
1630 if ((options & AAC_SUPPORTED_NEW_COMM_TYPE3) ||
1631 (options & AAC_SUPPORTED_NEW_COMM_TYPE4))
1632 sc->flags |= AAC_FLAGS_NEW_COMM | AAC_FLAGS_NEW_COMM_TYPE34;
1633 else if (options & AAC_SUPPORTED_NEW_COMM_TYPE1)
1634 sc->flags |= AAC_FLAGS_NEW_COMM | AAC_FLAGS_NEW_COMM_TYPE1;
1635 else if (options & AAC_SUPPORTED_NEW_COMM_TYPE2)
1636 sc->flags |= AAC_FLAGS_NEW_COMM | AAC_FLAGS_NEW_COMM_TYPE2;
1638 if (options & AAC_SUPPORTED_64BIT_ARRAYSIZE)
1639 sc->flags |= AAC_FLAGS_ARRAY_64BIT;
1642 if (!(sc->flags & AAC_FLAGS_NEW_COMM)) {
1643 device_printf(sc->aac_dev, "Communication interface not supported!\n");
1647 if (sc->hint_flags & 2) {
1648 device_printf(sc->aac_dev,
1649 "Sync. mode enforced by driver parameter. This will cause a significant performance decrease!\n");
1650 sc->flags |= AAC_FLAGS_SYNC_MODE;
1651 } else if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE34) {
1652 device_printf(sc->aac_dev,
1653 "Async. mode not supported by current driver, sync. mode enforced.\nPlease update driver to get full performance.\n");
1654 sc->flags |= AAC_FLAGS_SYNC_MODE;
1657 /* Check for broken hardware that does a lower number of commands */
1658 sc->aac_max_fibs = (sc->flags & AAC_FLAGS_256FIBS ? 256:512);
1660 /* Remap mem. resource, if required */
1661 if (atu_size > rman_get_size(sc->aac_regs_res0)) {
1662 bus_release_resource(
1663 sc->aac_dev, SYS_RES_MEMORY,
1664 sc->aac_regs_rid0, sc->aac_regs_res0);
1665 sc->aac_regs_res0 = bus_alloc_resource_anywhere(
1666 sc->aac_dev, SYS_RES_MEMORY, &sc->aac_regs_rid0,
1667 atu_size, RF_ACTIVE);
1668 if (sc->aac_regs_res0 == NULL) {
1669 sc->aac_regs_res0 = bus_alloc_resource_any(
1670 sc->aac_dev, SYS_RES_MEMORY,
1671 &sc->aac_regs_rid0, RF_ACTIVE);
1672 if (sc->aac_regs_res0 == NULL) {
1673 device_printf(sc->aac_dev,
1674 "couldn't allocate register window\n");
1678 sc->aac_btag0 = rman_get_bustag(sc->aac_regs_res0);
1679 sc->aac_bhandle0 = rman_get_bushandle(sc->aac_regs_res0);
1682 /* Read preferred settings */
1683 sc->aac_max_fib_size = sizeof(struct aac_fib);
1684 sc->aac_max_sectors = 128; /* 64KB */
1685 sc->aac_max_aif = 1;
1686 if (sc->flags & AAC_FLAGS_SG_64BIT)
1687 sc->aac_sg_tablesize = (AAC_FIB_DATASIZE
1688 - sizeof(struct aac_blockwrite64))
1689 / sizeof(struct aac_sg_entry64);
1691 sc->aac_sg_tablesize = (AAC_FIB_DATASIZE
1692 - sizeof(struct aac_blockwrite))
1693 / sizeof(struct aac_sg_entry);
1695 if (!aacraid_sync_command(sc, AAC_MONKER_GETCOMMPREF, 0, 0, 0, 0, NULL, NULL)) {
1696 options = AAC_GET_MAILBOX(sc, 1);
1697 sc->aac_max_fib_size = (options & 0xFFFF);
1698 sc->aac_max_sectors = (options >> 16) << 1;
1699 options = AAC_GET_MAILBOX(sc, 2);
1700 sc->aac_sg_tablesize = (options >> 16);
1701 options = AAC_GET_MAILBOX(sc, 3);
1702 sc->aac_max_fibs = ((options >> 16) & 0xFFFF);
1703 if (sc->aac_max_fibs == 0 || sc->aac_hwif != AAC_HWIF_SRCV)
1704 sc->aac_max_fibs = (options & 0xFFFF);
1705 options = AAC_GET_MAILBOX(sc, 4);
1706 sc->aac_max_aif = (options & 0xFFFF);
1707 options = AAC_GET_MAILBOX(sc, 5);
1708 sc->aac_max_msix =(sc->flags & AAC_FLAGS_NEW_COMM_TYPE2) ? options : 0;
1711 maxsize = sc->aac_max_fib_size + 31;
1712 if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE1)
1713 maxsize += sizeof(struct aac_fib_xporthdr);
1714 if (maxsize > PAGE_SIZE) {
1715 sc->aac_max_fib_size -= (maxsize - PAGE_SIZE);
1716 maxsize = PAGE_SIZE;
1718 sc->aac_max_fibs_alloc = PAGE_SIZE / maxsize;
1720 if (sc->aac_max_fib_size > sizeof(struct aac_fib)) {
1721 sc->flags |= AAC_FLAGS_RAW_IO;
1722 device_printf(sc->aac_dev, "Enable Raw I/O\n");
1724 if ((sc->flags & AAC_FLAGS_RAW_IO) &&
1725 (sc->flags & AAC_FLAGS_ARRAY_64BIT)) {
1726 sc->flags |= AAC_FLAGS_LBA_64BIT;
1727 device_printf(sc->aac_dev, "Enable 64-bit array\n");
1730 #ifdef AACRAID_DEBUG
1731 aacraid_get_fw_debug_buffer(sc);
1737 aac_init(struct aac_softc *sc)
1739 struct aac_adapter_init *ip;
1742 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1744 /* reset rrq index */
1745 sc->aac_fibs_pushed_no = 0;
1746 for (i = 0; i < sc->aac_max_msix; i++)
1747 sc->aac_host_rrq_idx[i] = i * sc->aac_vector_cap;
1750 * Fill in the init structure. This tells the adapter about the
1751 * physical location of various important shared data structures.
1753 ip = &sc->aac_common->ac_init;
1754 ip->InitStructRevision = AAC_INIT_STRUCT_REVISION;
1755 if (sc->aac_max_fib_size > sizeof(struct aac_fib)) {
1756 ip->InitStructRevision = AAC_INIT_STRUCT_REVISION_4;
1757 sc->flags |= AAC_FLAGS_RAW_IO;
1759 ip->NoOfMSIXVectors = sc->aac_max_msix;
1761 ip->AdapterFibsPhysicalAddress = sc->aac_common_busaddr +
1762 offsetof(struct aac_common, ac_fibs);
1763 ip->AdapterFibsVirtualAddress = 0;
1764 ip->AdapterFibsSize = AAC_ADAPTER_FIBS * sizeof(struct aac_fib);
1765 ip->AdapterFibAlign = sizeof(struct aac_fib);
1767 ip->PrintfBufferAddress = sc->aac_common_busaddr +
1768 offsetof(struct aac_common, ac_printf);
1769 ip->PrintfBufferSize = AAC_PRINTF_BUFSIZE;
1772 * The adapter assumes that pages are 4K in size, except on some
1773 * broken firmware versions that do the page->byte conversion twice,
1774 * therefore 'assuming' that this value is in 16MB units (2^24).
1775 * Round up since the granularity is so high.
1777 ip->HostPhysMemPages = ctob(physmem) / AAC_PAGE_SIZE;
1778 if (sc->flags & AAC_FLAGS_BROKEN_MEMMAP) {
1779 ip->HostPhysMemPages =
1780 (ip->HostPhysMemPages + AAC_PAGE_SIZE) / AAC_PAGE_SIZE;
1782 ip->HostElapsedSeconds = time_uptime; /* reset later if invalid */
1784 ip->InitFlags = AAC_INITFLAGS_NEW_COMM_SUPPORTED;
1785 if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE1) {
1786 ip->InitStructRevision = AAC_INIT_STRUCT_REVISION_6;
1787 ip->InitFlags |= (AAC_INITFLAGS_NEW_COMM_TYPE1_SUPPORTED |
1788 AAC_INITFLAGS_FAST_JBOD_SUPPORTED);
1789 device_printf(sc->aac_dev, "New comm. interface type1 enabled\n");
1790 } else if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE2) {
1791 ip->InitStructRevision = AAC_INIT_STRUCT_REVISION_7;
1792 ip->InitFlags |= (AAC_INITFLAGS_NEW_COMM_TYPE2_SUPPORTED |
1793 AAC_INITFLAGS_FAST_JBOD_SUPPORTED);
1794 device_printf(sc->aac_dev, "New comm. interface type2 enabled\n");
1796 ip->MaxNumAif = sc->aac_max_aif;
1797 ip->HostRRQ_AddrLow =
1798 sc->aac_common_busaddr + offsetof(struct aac_common, ac_host_rrq);
1799 /* always 32-bit address */
1800 ip->HostRRQ_AddrHigh = 0;
1802 if (sc->aac_support_opt2 & AAC_SUPPORTED_POWER_MANAGEMENT) {
1803 ip->InitFlags |= AAC_INITFLAGS_DRIVER_SUPPORTS_PM;
1804 ip->InitFlags |= AAC_INITFLAGS_DRIVER_USES_UTC_TIME;
1805 device_printf(sc->aac_dev, "Power Management enabled\n");
1808 ip->MaxIoCommands = sc->aac_max_fibs;
1809 ip->MaxIoSize = sc->aac_max_sectors << 9;
1810 ip->MaxFibSize = sc->aac_max_fib_size;
1813 * Do controller-type-specific initialisation
1815 AAC_MEM0_SETREG4(sc, AAC_SRC_ODBR_C, ~0);
1818 * Give the init structure to the controller.
1820 if (aacraid_sync_command(sc, AAC_MONKER_INITSTRUCT,
1821 sc->aac_common_busaddr +
1822 offsetof(struct aac_common, ac_init), 0, 0, 0,
1824 device_printf(sc->aac_dev,
1825 "error establishing init structure\n");
1831 * Check configuration issues
1833 if ((error = aac_check_config(sc)) != 0)
1842 aac_define_int_mode(struct aac_softc *sc)
1845 int cap, msi_count, error = 0;
1850 /* max. vectors from AAC_MONKER_GETCOMMPREF */
1851 if (sc->aac_max_msix == 0) {
1852 sc->aac_max_msix = 1;
1853 sc->aac_vector_cap = sc->aac_max_fibs;
1858 msi_count = pci_msix_count(dev);
1859 if (msi_count > AAC_MAX_MSIX)
1860 msi_count = AAC_MAX_MSIX;
1861 if (msi_count > sc->aac_max_msix)
1862 msi_count = sc->aac_max_msix;
1863 if (msi_count == 0 || (error = pci_alloc_msix(dev, &msi_count)) != 0) {
1864 device_printf(dev, "alloc msix failed - msi_count=%d, err=%d; "
1865 "will try MSI\n", msi_count, error);
1866 pci_release_msi(dev);
1868 sc->msi_enabled = TRUE;
1869 device_printf(dev, "using MSI-X interrupts (%u vectors)\n",
1873 if (!sc->msi_enabled) {
1875 if ((error = pci_alloc_msi(dev, &msi_count)) != 0) {
1876 device_printf(dev, "alloc msi failed - err=%d; "
1877 "will use INTx\n", error);
1878 pci_release_msi(dev);
1880 sc->msi_enabled = TRUE;
1881 device_printf(dev, "using MSI interrupts\n");
1885 if (sc->msi_enabled) {
1886 /* now read controller capability from PCI config. space */
1887 cap = aac_find_pci_capability(sc, PCIY_MSIX);
1888 val = (cap != 0 ? pci_read_config(dev, cap + 2, 2) : 0);
1889 if (!(val & AAC_PCI_MSI_ENABLE)) {
1890 pci_release_msi(dev);
1891 sc->msi_enabled = FALSE;
1895 if (!sc->msi_enabled) {
1896 device_printf(dev, "using legacy interrupts\n");
1897 sc->aac_max_msix = 1;
1899 AAC_ACCESS_DEVREG(sc, AAC_ENABLE_MSIX);
1900 if (sc->aac_max_msix > msi_count)
1901 sc->aac_max_msix = msi_count;
1903 sc->aac_vector_cap = sc->aac_max_fibs / sc->aac_max_msix;
1905 fwprintf(sc, HBA_FLAGS_DBG_DEBUG_B, "msi_enabled %d vector_cap %d max_fibs %d max_msix %d",
1906 sc->msi_enabled,sc->aac_vector_cap, sc->aac_max_fibs, sc->aac_max_msix);
1910 aac_find_pci_capability(struct aac_softc *sc, int cap)
1918 status = pci_read_config(dev, PCIR_STATUS, 2);
1919 if (!(status & PCIM_STATUS_CAPPRESENT))
1922 status = pci_read_config(dev, PCIR_HDRTYPE, 1);
1923 switch (status & PCIM_HDRTYPE) {
1929 ptr = PCIR_CAP_PTR_2;
1935 ptr = pci_read_config(dev, ptr, 1);
1939 next = pci_read_config(dev, ptr + PCICAP_NEXTPTR, 1);
1940 val = pci_read_config(dev, ptr + PCICAP_ID, 1);
1950 aac_setup_intr(struct aac_softc *sc)
1952 int i, msi_count, rid;
1953 struct resource *res;
1956 msi_count = sc->aac_max_msix;
1957 rid = (sc->msi_enabled ? 1:0);
1959 for (i = 0; i < msi_count; i++, rid++) {
1960 if ((res = bus_alloc_resource_any(sc->aac_dev,SYS_RES_IRQ, &rid,
1961 RF_SHAREABLE | RF_ACTIVE)) == NULL) {
1962 device_printf(sc->aac_dev,"can't allocate interrupt\n");
1965 sc->aac_irq_rid[i] = rid;
1966 sc->aac_irq[i] = res;
1967 if (aac_bus_setup_intr(sc->aac_dev, res,
1968 INTR_MPSAFE | INTR_TYPE_BIO, NULL,
1969 aacraid_new_intr_type1, &sc->aac_msix[i], &tag)) {
1970 device_printf(sc->aac_dev, "can't set up interrupt\n");
1973 sc->aac_msix[i].vector_no = i;
1974 sc->aac_msix[i].sc = sc;
1975 sc->aac_intr[i] = tag;
1982 aac_check_config(struct aac_softc *sc)
1984 struct aac_fib *fib;
1985 struct aac_cnt_config *ccfg;
1986 struct aac_cf_status_hdr *cf_shdr;
1989 mtx_lock(&sc->aac_io_lock);
1990 aac_alloc_sync_fib(sc, &fib);
1992 ccfg = (struct aac_cnt_config *)&fib->data[0];
1993 bzero(ccfg, sizeof (*ccfg) - CT_PACKET_SIZE);
1994 ccfg->Command = VM_ContainerConfig;
1995 ccfg->CTCommand.command = CT_GET_CONFIG_STATUS;
1996 ccfg->CTCommand.param[CNT_SIZE] = sizeof(struct aac_cf_status_hdr);
1998 rval = aac_sync_fib(sc, ContainerCommand, 0, fib,
1999 sizeof (struct aac_cnt_config));
2000 cf_shdr = (struct aac_cf_status_hdr *)ccfg->CTCommand.data;
2001 if (rval == 0 && ccfg->Command == ST_OK &&
2002 ccfg->CTCommand.param[0] == CT_OK) {
2003 if (cf_shdr->action <= CFACT_PAUSE) {
2004 bzero(ccfg, sizeof (*ccfg) - CT_PACKET_SIZE);
2005 ccfg->Command = VM_ContainerConfig;
2006 ccfg->CTCommand.command = CT_COMMIT_CONFIG;
2008 rval = aac_sync_fib(sc, ContainerCommand, 0, fib,
2009 sizeof (struct aac_cnt_config));
2010 if (rval == 0 && ccfg->Command == ST_OK &&
2011 ccfg->CTCommand.param[0] == CT_OK) {
2012 /* successful completion */
2015 /* auto commit aborted due to error(s) */
2019 /* auto commit aborted due to adapter indicating
2020 config. issues too dangerous to auto commit */
2028 aac_release_sync_fib(sc);
2029 mtx_unlock(&sc->aac_io_lock);
2034 * Send a synchronous command to the controller and wait for a result.
2035 * Indicate if the controller completed the command with an error status.
2038 aacraid_sync_command(struct aac_softc *sc, u_int32_t command,
2039 u_int32_t arg0, u_int32_t arg1, u_int32_t arg2, u_int32_t arg3,
2040 u_int32_t *sp, u_int32_t *r1)
2045 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2047 /* populate the mailbox */
2048 AAC_SET_MAILBOX(sc, command, arg0, arg1, arg2, arg3);
2050 /* ensure the sync command doorbell flag is cleared */
2051 if (!sc->msi_enabled)
2052 AAC_CLEAR_ISTATUS(sc, AAC_DB_SYNC_COMMAND);
2054 /* then set it to signal the adapter */
2055 AAC_QNOTIFY(sc, AAC_DB_SYNC_COMMAND);
2057 if ((command != AAC_MONKER_SYNCFIB) || (sp == NULL) || (*sp != 0)) {
2058 /* spin waiting for the command to complete */
2061 if (time_uptime > (then + AAC_SYNC_TIMEOUT)) {
2062 fwprintf(sc, HBA_FLAGS_DBG_ERROR_B, "timed out");
2065 } while (!(AAC_GET_ISTATUS(sc) & AAC_DB_SYNC_COMMAND));
2067 /* clear the completion flag */
2068 AAC_CLEAR_ISTATUS(sc, AAC_DB_SYNC_COMMAND);
2070 /* get the command status */
2071 status = AAC_GET_MAILBOX(sc, 0);
2075 /* return parameter */
2077 *r1 = AAC_GET_MAILBOX(sc, 1);
2079 if (status != AAC_SRB_STS_SUCCESS)
2086 aac_sync_fib(struct aac_softc *sc, u_int32_t command, u_int32_t xferstate,
2087 struct aac_fib *fib, u_int16_t datasize)
2089 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2090 mtx_assert(&sc->aac_io_lock, MA_OWNED);
2092 if (datasize > AAC_FIB_DATASIZE)
2096 * Set up the sync FIB
2098 fib->Header.XferState = AAC_FIBSTATE_HOSTOWNED |
2099 AAC_FIBSTATE_INITIALISED |
2101 fib->Header.XferState |= xferstate;
2102 fib->Header.Command = command;
2103 fib->Header.StructType = AAC_FIBTYPE_TFIB;
2104 fib->Header.Size = sizeof(struct aac_fib_header) + datasize;
2105 fib->Header.SenderSize = sizeof(struct aac_fib);
2106 fib->Header.SenderFibAddress = 0; /* Not needed */
2107 fib->Header.u.ReceiverFibAddress = sc->aac_common_busaddr +
2108 offsetof(struct aac_common, ac_sync_fib);
2111 * Give the FIB to the controller, wait for a response.
2113 if (aacraid_sync_command(sc, AAC_MONKER_SYNCFIB,
2114 fib->Header.u.ReceiverFibAddress, 0, 0, 0, NULL, NULL)) {
2115 fwprintf(sc, HBA_FLAGS_DBG_ERROR_B, "IO error");
2123 * Check for commands that have been outstanding for a suspiciously long time,
2124 * and complain about them.
2127 aac_timeout(struct aac_softc *sc)
2129 struct aac_command *cm;
2133 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2135 * Traverse the busy command list, bitch about late commands once
2139 deadline = time_uptime - AAC_CMD_TIMEOUT;
2140 TAILQ_FOREACH(cm, &sc->aac_busy, cm_link) {
2141 if (cm->cm_timestamp < deadline) {
2142 device_printf(sc->aac_dev,
2143 "COMMAND %p TIMEOUT AFTER %d SECONDS\n",
2144 cm, (int)(time_uptime-cm->cm_timestamp));
2145 AAC_PRINT_FIB(sc, cm->cm_fib);
2151 aac_reset_adapter(sc);
2152 aacraid_print_queues(sc);
2156 * Interface Function Vectors
2160 * Read the current firmware status word.
2163 aac_src_get_fwstatus(struct aac_softc *sc)
2165 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2167 return(AAC_MEM0_GETREG4(sc, AAC_SRC_OMR));
2171 * Notify the controller of a change in a given queue
2174 aac_src_qnotify(struct aac_softc *sc, int qbit)
2176 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2178 AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, qbit << AAC_SRC_IDR_SHIFT);
2182 * Get the interrupt reason bits
2185 aac_src_get_istatus(struct aac_softc *sc)
2189 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2191 if (sc->msi_enabled) {
2192 val = AAC_MEM0_GETREG4(sc, AAC_SRC_ODBR_MSI);
2193 if (val & AAC_MSI_SYNC_STATUS)
2194 val = AAC_DB_SYNC_COMMAND;
2198 val = AAC_MEM0_GETREG4(sc, AAC_SRC_ODBR_R) >> AAC_SRC_ODR_SHIFT;
2204 * Clear some interrupt reason bits
2207 aac_src_clear_istatus(struct aac_softc *sc, int mask)
2209 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2211 if (sc->msi_enabled) {
2212 if (mask == AAC_DB_SYNC_COMMAND)
2213 AAC_ACCESS_DEVREG(sc, AAC_CLEAR_SYNC_BIT);
2215 AAC_MEM0_SETREG4(sc, AAC_SRC_ODBR_C, mask << AAC_SRC_ODR_SHIFT);
2220 * Populate the mailbox and set the command word
2223 aac_src_set_mailbox(struct aac_softc *sc, u_int32_t command, u_int32_t arg0,
2224 u_int32_t arg1, u_int32_t arg2, u_int32_t arg3)
2226 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2228 AAC_MEM0_SETREG4(sc, AAC_SRC_MAILBOX, command);
2229 AAC_MEM0_SETREG4(sc, AAC_SRC_MAILBOX + 4, arg0);
2230 AAC_MEM0_SETREG4(sc, AAC_SRC_MAILBOX + 8, arg1);
2231 AAC_MEM0_SETREG4(sc, AAC_SRC_MAILBOX + 12, arg2);
2232 AAC_MEM0_SETREG4(sc, AAC_SRC_MAILBOX + 16, arg3);
2236 aac_srcv_set_mailbox(struct aac_softc *sc, u_int32_t command, u_int32_t arg0,
2237 u_int32_t arg1, u_int32_t arg2, u_int32_t arg3)
2239 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2241 AAC_MEM0_SETREG4(sc, AAC_SRCV_MAILBOX, command);
2242 AAC_MEM0_SETREG4(sc, AAC_SRCV_MAILBOX + 4, arg0);
2243 AAC_MEM0_SETREG4(sc, AAC_SRCV_MAILBOX + 8, arg1);
2244 AAC_MEM0_SETREG4(sc, AAC_SRCV_MAILBOX + 12, arg2);
2245 AAC_MEM0_SETREG4(sc, AAC_SRCV_MAILBOX + 16, arg3);
2249 * Fetch the immediate command status word
2252 aac_src_get_mailbox(struct aac_softc *sc, int mb)
2254 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2256 return(AAC_MEM0_GETREG4(sc, AAC_SRC_MAILBOX + (mb * 4)));
2260 aac_srcv_get_mailbox(struct aac_softc *sc, int mb)
2262 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2264 return(AAC_MEM0_GETREG4(sc, AAC_SRCV_MAILBOX + (mb * 4)));
2268 * Set/clear interrupt masks
2271 aac_src_access_devreg(struct aac_softc *sc, int mode)
2275 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2278 case AAC_ENABLE_INTERRUPT:
2279 AAC_MEM0_SETREG4(sc, AAC_SRC_OIMR,
2280 (sc->msi_enabled ? AAC_INT_ENABLE_TYPE1_MSIX :
2281 AAC_INT_ENABLE_TYPE1_INTX));
2284 case AAC_DISABLE_INTERRUPT:
2285 AAC_MEM0_SETREG4(sc, AAC_SRC_OIMR, AAC_INT_DISABLE_ALL);
2288 case AAC_ENABLE_MSIX:
2290 val = AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2292 AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, val);
2293 AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2295 val = PMC_ALL_INTERRUPT_BITS;
2296 AAC_MEM0_SETREG4(sc, AAC_SRC_IOAR, val);
2297 val = AAC_MEM0_GETREG4(sc, AAC_SRC_OIMR);
2298 AAC_MEM0_SETREG4(sc, AAC_SRC_OIMR,
2299 val & (~(PMC_GLOBAL_INT_BIT2 | PMC_GLOBAL_INT_BIT0)));
2302 case AAC_DISABLE_MSIX:
2304 val = AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2306 AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, val);
2307 AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2310 case AAC_CLEAR_AIF_BIT:
2312 val = AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2314 AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, val);
2315 AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2318 case AAC_CLEAR_SYNC_BIT:
2320 val = AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2322 AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, val);
2323 AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2326 case AAC_ENABLE_INTX:
2328 val = AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2330 AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, val);
2331 AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2333 val = PMC_ALL_INTERRUPT_BITS;
2334 AAC_MEM0_SETREG4(sc, AAC_SRC_IOAR, val);
2335 val = AAC_MEM0_GETREG4(sc, AAC_SRC_OIMR);
2336 AAC_MEM0_SETREG4(sc, AAC_SRC_OIMR,
2337 val & (~(PMC_GLOBAL_INT_BIT2)));
2346 * New comm. interface: Send command functions
2349 aac_src_send_command(struct aac_softc *sc, struct aac_command *cm)
2351 struct aac_fib_xporthdr *pFibX;
2352 u_int32_t fibsize, high_addr;
2355 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "send command (new comm. type1)");
2357 if (sc->msi_enabled && cm->cm_fib->Header.Command != AifRequest &&
2358 sc->aac_max_msix > 1) {
2359 u_int16_t vector_no, first_choice = 0xffff;
2361 vector_no = sc->aac_fibs_pushed_no % sc->aac_max_msix;
2364 if (vector_no == sc->aac_max_msix)
2366 if (sc->aac_rrq_outstanding[vector_no] <
2369 if (0xffff == first_choice)
2370 first_choice = vector_no;
2371 else if (vector_no == first_choice)
2374 if (vector_no == first_choice)
2376 sc->aac_rrq_outstanding[vector_no]++;
2377 if (sc->aac_fibs_pushed_no == 0xffffffff)
2378 sc->aac_fibs_pushed_no = 0;
2380 sc->aac_fibs_pushed_no++;
2382 cm->cm_fib->Header.Handle += (vector_no << 16);
2385 if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE2) {
2386 /* Calculate the amount to the fibsize bits */
2387 fibsize = (cm->cm_fib->Header.Size + 127) / 128 - 1;
2388 /* Fill new FIB header */
2389 address = cm->cm_fibphys;
2390 high_addr = (u_int32_t)(address >> 32);
2391 if (high_addr == 0L) {
2392 cm->cm_fib->Header.StructType = AAC_FIBTYPE_TFIB2;
2393 cm->cm_fib->Header.u.TimeStamp = 0L;
2395 cm->cm_fib->Header.StructType = AAC_FIBTYPE_TFIB2_64;
2396 cm->cm_fib->Header.u.SenderFibAddressHigh = high_addr;
2398 cm->cm_fib->Header.SenderFibAddress = (u_int32_t)address;
2400 /* Calculate the amount to the fibsize bits */
2401 fibsize = (sizeof(struct aac_fib_xporthdr) +
2402 cm->cm_fib->Header.Size + 127) / 128 - 1;
2403 /* Fill XPORT header */
2404 pFibX = (struct aac_fib_xporthdr *)
2405 ((unsigned char *)cm->cm_fib - sizeof(struct aac_fib_xporthdr));
2406 pFibX->Handle = cm->cm_fib->Header.Handle;
2407 pFibX->HostAddress = cm->cm_fibphys;
2408 pFibX->Size = cm->cm_fib->Header.Size;
2409 address = cm->cm_fibphys - sizeof(struct aac_fib_xporthdr);
2410 high_addr = (u_int32_t)(address >> 32);
2415 aac_enqueue_busy(cm);
2417 AAC_MEM0_SETREG4(sc, AAC_SRC_IQUE64_H, high_addr);
2418 AAC_MEM0_SETREG4(sc, AAC_SRC_IQUE64_L, (u_int32_t)address + fibsize);
2420 AAC_MEM0_SETREG4(sc, AAC_SRC_IQUE32, (u_int32_t)address + fibsize);
2426 * New comm. interface: get, set outbound queue index
2429 aac_src_get_outb_queue(struct aac_softc *sc)
2431 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2437 aac_src_set_outb_queue(struct aac_softc *sc, int index)
2439 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2443 * Debugging and Diagnostics
2447 * Print some information about the controller.
2450 aac_describe_controller(struct aac_softc *sc)
2452 struct aac_fib *fib;
2453 struct aac_adapter_info *info;
2454 char *adapter_type = "Adaptec RAID controller";
2456 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2458 mtx_lock(&sc->aac_io_lock);
2459 aac_alloc_sync_fib(sc, &fib);
2461 if (sc->supported_options & AAC_SUPPORTED_SUPPLEMENT_ADAPTER_INFO) {
2463 if (aac_sync_fib(sc, RequestSupplementAdapterInfo, 0, fib, 1))
2464 device_printf(sc->aac_dev, "RequestSupplementAdapterInfo failed\n");
2466 struct aac_supplement_adapter_info *supp_info;
2468 supp_info = ((struct aac_supplement_adapter_info *)&fib->data[0]);
2469 adapter_type = (char *)supp_info->AdapterTypeText;
2470 sc->aac_feature_bits = supp_info->FeatureBits;
2471 sc->aac_support_opt2 = supp_info->SupportedOptions2;
2474 device_printf(sc->aac_dev, "%s, aacraid driver %d.%d.%d-%d\n",
2476 AAC_DRIVER_MAJOR_VERSION, AAC_DRIVER_MINOR_VERSION,
2477 AAC_DRIVER_BUGFIX_LEVEL, AAC_DRIVER_BUILD);
2480 if (aac_sync_fib(sc, RequestAdapterInfo, 0, fib, 1)) {
2481 device_printf(sc->aac_dev, "RequestAdapterInfo failed\n");
2482 aac_release_sync_fib(sc);
2483 mtx_unlock(&sc->aac_io_lock);
2487 /* save the kernel revision structure for later use */
2488 info = (struct aac_adapter_info *)&fib->data[0];
2489 sc->aac_revision = info->KernelRevision;
2492 device_printf(sc->aac_dev, "%s %dMHz, %dMB memory "
2493 "(%dMB cache, %dMB execution), %s\n",
2494 aac_describe_code(aac_cpu_variant, info->CpuVariant),
2495 info->ClockSpeed, info->TotalMem / (1024 * 1024),
2496 info->BufferMem / (1024 * 1024),
2497 info->ExecutionMem / (1024 * 1024),
2498 aac_describe_code(aac_battery_platform,
2499 info->batteryPlatform));
2501 device_printf(sc->aac_dev,
2502 "Kernel %d.%d-%d, Build %d, S/N %6X\n",
2503 info->KernelRevision.external.comp.major,
2504 info->KernelRevision.external.comp.minor,
2505 info->KernelRevision.external.comp.dash,
2506 info->KernelRevision.buildNumber,
2507 (u_int32_t)(info->SerialNumber & 0xffffff));
2509 device_printf(sc->aac_dev, "Supported Options=%b\n",
2510 sc->supported_options,
2533 aac_release_sync_fib(sc);
2534 mtx_unlock(&sc->aac_io_lock);
2538 * Look up a text description of a numeric error code and return a pointer to
2542 aac_describe_code(struct aac_code_lookup *table, u_int32_t code)
2546 for (i = 0; table[i].string != NULL; i++)
2547 if (table[i].code == code)
2548 return(table[i].string);
2549 return(table[i + 1].string);
2553 * Management Interface
2557 aac_open(struct cdev *dev, int flags, int fmt, struct thread *td)
2559 struct aac_softc *sc;
2562 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2563 #if __FreeBSD_version >= 702000
2564 device_busy(sc->aac_dev);
2565 devfs_set_cdevpriv(sc, aac_cdevpriv_dtor);
2571 aac_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td)
2573 union aac_statrequest *as;
2574 struct aac_softc *sc;
2577 as = (union aac_statrequest *)arg;
2579 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2583 switch (as->as_item) {
2587 bcopy(&sc->aac_qstat[as->as_item], &as->as_qstat,
2588 sizeof(struct aac_qstat));
2596 case FSACTL_SENDFIB:
2597 case FSACTL_SEND_LARGE_FIB:
2598 arg = *(caddr_t*)arg;
2599 case FSACTL_LNX_SENDFIB:
2600 case FSACTL_LNX_SEND_LARGE_FIB:
2601 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_SENDFIB");
2602 error = aac_ioctl_sendfib(sc, arg);
2604 case FSACTL_SEND_RAW_SRB:
2605 arg = *(caddr_t*)arg;
2606 case FSACTL_LNX_SEND_RAW_SRB:
2607 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_SEND_RAW_SRB");
2608 error = aac_ioctl_send_raw_srb(sc, arg);
2610 case FSACTL_AIF_THREAD:
2611 case FSACTL_LNX_AIF_THREAD:
2612 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_AIF_THREAD");
2615 case FSACTL_OPEN_GET_ADAPTER_FIB:
2616 arg = *(caddr_t*)arg;
2617 case FSACTL_LNX_OPEN_GET_ADAPTER_FIB:
2618 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_OPEN_GET_ADAPTER_FIB");
2619 error = aac_open_aif(sc, arg);
2621 case FSACTL_GET_NEXT_ADAPTER_FIB:
2622 arg = *(caddr_t*)arg;
2623 case FSACTL_LNX_GET_NEXT_ADAPTER_FIB:
2624 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_GET_NEXT_ADAPTER_FIB");
2625 error = aac_getnext_aif(sc, arg);
2627 case FSACTL_CLOSE_GET_ADAPTER_FIB:
2628 arg = *(caddr_t*)arg;
2629 case FSACTL_LNX_CLOSE_GET_ADAPTER_FIB:
2630 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_CLOSE_GET_ADAPTER_FIB");
2631 error = aac_close_aif(sc, arg);
2633 case FSACTL_MINIPORT_REV_CHECK:
2634 arg = *(caddr_t*)arg;
2635 case FSACTL_LNX_MINIPORT_REV_CHECK:
2636 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_MINIPORT_REV_CHECK");
2637 error = aac_rev_check(sc, arg);
2639 case FSACTL_QUERY_DISK:
2640 arg = *(caddr_t*)arg;
2641 case FSACTL_LNX_QUERY_DISK:
2642 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_QUERY_DISK");
2643 error = aac_query_disk(sc, arg);
2645 case FSACTL_DELETE_DISK:
2646 case FSACTL_LNX_DELETE_DISK:
2648 * We don't trust the underland to tell us when to delete a
2649 * container, rather we rely on an AIF coming from the
2654 case FSACTL_GET_PCI_INFO:
2655 arg = *(caddr_t*)arg;
2656 case FSACTL_LNX_GET_PCI_INFO:
2657 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_GET_PCI_INFO");
2658 error = aac_get_pci_info(sc, arg);
2660 case FSACTL_GET_FEATURES:
2661 arg = *(caddr_t*)arg;
2662 case FSACTL_LNX_GET_FEATURES:
2663 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_GET_FEATURES");
2664 error = aac_supported_features(sc, arg);
2667 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "unsupported cmd 0x%lx\n", cmd);
2675 aac_poll(struct cdev *dev, int poll_events, struct thread *td)
2677 struct aac_softc *sc;
2678 struct aac_fib_context *ctx;
2684 mtx_lock(&sc->aac_io_lock);
2685 if ((poll_events & (POLLRDNORM | POLLIN)) != 0) {
2686 for (ctx = sc->fibctx; ctx; ctx = ctx->next) {
2687 if (ctx->ctx_idx != sc->aifq_idx || ctx->ctx_wrap) {
2688 revents |= poll_events & (POLLIN | POLLRDNORM);
2693 mtx_unlock(&sc->aac_io_lock);
2696 if (poll_events & (POLLIN | POLLRDNORM))
2697 selrecord(td, &sc->rcv_select);
2704 aac_ioctl_event(struct aac_softc *sc, struct aac_event *event, void *arg)
2707 switch (event->ev_type) {
2708 case AAC_EVENT_CMFREE:
2709 mtx_assert(&sc->aac_io_lock, MA_OWNED);
2710 if (aacraid_alloc_command(sc, (struct aac_command **)arg)) {
2711 aacraid_add_event(sc, event);
2714 free(event, M_AACRAIDBUF);
2723 * Send a FIB supplied from userspace
2726 aac_ioctl_sendfib(struct aac_softc *sc, caddr_t ufib)
2728 struct aac_command *cm;
2731 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2738 mtx_lock(&sc->aac_io_lock);
2739 if (aacraid_alloc_command(sc, &cm)) {
2740 struct aac_event *event;
2742 event = malloc(sizeof(struct aac_event), M_AACRAIDBUF,
2744 if (event == NULL) {
2746 mtx_unlock(&sc->aac_io_lock);
2749 event->ev_type = AAC_EVENT_CMFREE;
2750 event->ev_callback = aac_ioctl_event;
2751 event->ev_arg = &cm;
2752 aacraid_add_event(sc, event);
2753 msleep(cm, &sc->aac_io_lock, 0, "aacraid_ctlsfib", 0);
2755 mtx_unlock(&sc->aac_io_lock);
2758 * Fetch the FIB header, then re-copy to get data as well.
2760 if ((error = copyin(ufib, cm->cm_fib,
2761 sizeof(struct aac_fib_header))) != 0)
2763 size = cm->cm_fib->Header.Size + sizeof(struct aac_fib_header);
2764 if (size > sc->aac_max_fib_size) {
2765 device_printf(sc->aac_dev, "incoming FIB oversized (%d > %d)\n",
2766 size, sc->aac_max_fib_size);
2767 size = sc->aac_max_fib_size;
2769 if ((error = copyin(ufib, cm->cm_fib, size)) != 0)
2771 cm->cm_fib->Header.Size = size;
2772 cm->cm_timestamp = time_uptime;
2776 * Pass the FIB to the controller, wait for it to complete.
2778 mtx_lock(&sc->aac_io_lock);
2779 error = aacraid_wait_command(cm);
2780 mtx_unlock(&sc->aac_io_lock);
2782 device_printf(sc->aac_dev,
2783 "aacraid_wait_command return %d\n", error);
2788 * Copy the FIB and data back out to the caller.
2790 size = cm->cm_fib->Header.Size;
2791 if (size > sc->aac_max_fib_size) {
2792 device_printf(sc->aac_dev, "outbound FIB oversized (%d > %d)\n",
2793 size, sc->aac_max_fib_size);
2794 size = sc->aac_max_fib_size;
2796 error = copyout(cm->cm_fib, ufib, size);
2800 mtx_lock(&sc->aac_io_lock);
2801 aacraid_release_command(cm);
2802 mtx_unlock(&sc->aac_io_lock);
2808 * Send a passthrough FIB supplied from userspace
2811 aac_ioctl_send_raw_srb(struct aac_softc *sc, caddr_t arg)
2813 struct aac_command *cm;
2814 struct aac_fib *fib;
2815 struct aac_srb *srbcmd;
2816 struct aac_srb *user_srb = (struct aac_srb *)arg;
2818 int error, transfer_data = 0;
2819 bus_dmamap_t orig_map = 0;
2820 u_int32_t fibsize = 0;
2821 u_int64_t srb_sg_address;
2822 u_int32_t srb_sg_bytecount;
2824 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2828 mtx_lock(&sc->aac_io_lock);
2829 if (aacraid_alloc_command(sc, &cm)) {
2830 struct aac_event *event;
2832 event = malloc(sizeof(struct aac_event), M_AACRAIDBUF,
2834 if (event == NULL) {
2836 mtx_unlock(&sc->aac_io_lock);
2839 event->ev_type = AAC_EVENT_CMFREE;
2840 event->ev_callback = aac_ioctl_event;
2841 event->ev_arg = &cm;
2842 aacraid_add_event(sc, event);
2843 msleep(cm, &sc->aac_io_lock, 0, "aacraid_ctlsraw", 0);
2845 mtx_unlock(&sc->aac_io_lock);
2848 /* save original dma map */
2849 orig_map = cm->cm_datamap;
2852 srbcmd = (struct aac_srb *)fib->data;
2853 if ((error = copyin((void *)&user_srb->data_len, &fibsize,
2854 sizeof (u_int32_t)) != 0))
2856 if (fibsize > (sc->aac_max_fib_size-sizeof(struct aac_fib_header))) {
2860 if ((error = copyin((void *)user_srb, srbcmd, fibsize) != 0))
2863 srbcmd->function = 0; /* SRBF_ExecuteScsi */
2864 srbcmd->retry_limit = 0; /* obsolete */
2866 /* only one sg element from userspace supported */
2867 if (srbcmd->sg_map.SgCount > 1) {
2872 if (fibsize == (sizeof(struct aac_srb) +
2873 srbcmd->sg_map.SgCount * sizeof(struct aac_sg_entry))) {
2874 struct aac_sg_entry *sgp = srbcmd->sg_map.SgEntry;
2875 struct aac_sg_entry sg;
2877 if ((error = copyin(sgp, &sg, sizeof(sg))) != 0)
2880 srb_sg_bytecount = sg.SgByteCount;
2881 srb_sg_address = (u_int64_t)sg.SgAddress;
2882 } else if (fibsize == (sizeof(struct aac_srb) +
2883 srbcmd->sg_map.SgCount * sizeof(struct aac_sg_entry64))) {
2885 struct aac_sg_entry64 *sgp =
2886 (struct aac_sg_entry64 *)srbcmd->sg_map.SgEntry;
2887 struct aac_sg_entry64 sg;
2889 if ((error = copyin(sgp, &sg, sizeof(sg))) != 0)
2892 srb_sg_bytecount = sg.SgByteCount;
2893 srb_sg_address = sg.SgAddress;
2894 if (srb_sg_address > 0xffffffffull &&
2895 !(sc->flags & AAC_FLAGS_SG_64BIT))
2905 user_reply = (char *)arg + fibsize;
2906 srbcmd->data_len = srb_sg_bytecount;
2907 if (srbcmd->sg_map.SgCount == 1)
2910 if (transfer_data) {
2912 * Create DMA tag for the passthr. data buffer and allocate it.
2914 if (bus_dma_tag_create(sc->aac_parent_dmat, /* parent */
2915 1, 0, /* algnmnt, boundary */
2916 (sc->flags & AAC_FLAGS_SG_64BIT) ?
2917 BUS_SPACE_MAXADDR_32BIT :
2918 0x7fffffff, /* lowaddr */
2919 BUS_SPACE_MAXADDR, /* highaddr */
2920 NULL, NULL, /* filter, filterarg */
2921 srb_sg_bytecount, /* size */
2922 sc->aac_sg_tablesize, /* nsegments */
2923 srb_sg_bytecount, /* maxsegsize */
2925 NULL, NULL, /* No locking needed */
2926 &cm->cm_passthr_dmat)) {
2930 if (bus_dmamem_alloc(cm->cm_passthr_dmat, (void **)&cm->cm_data,
2931 BUS_DMA_NOWAIT, &cm->cm_datamap)) {
2935 /* fill some cm variables */
2936 cm->cm_datalen = srb_sg_bytecount;
2937 if (srbcmd->flags & AAC_SRB_FLAGS_DATA_IN)
2938 cm->cm_flags |= AAC_CMD_DATAIN;
2939 if (srbcmd->flags & AAC_SRB_FLAGS_DATA_OUT)
2940 cm->cm_flags |= AAC_CMD_DATAOUT;
2942 if (srbcmd->flags & AAC_SRB_FLAGS_DATA_OUT) {
2943 if ((error = copyin((void *)(uintptr_t)srb_sg_address,
2944 cm->cm_data, cm->cm_datalen)) != 0)
2946 /* sync required for bus_dmamem_alloc() alloc. mem.? */
2947 bus_dmamap_sync(cm->cm_passthr_dmat, cm->cm_datamap,
2948 BUS_DMASYNC_PREWRITE);
2953 fib->Header.Size = sizeof(struct aac_fib_header) +
2954 sizeof(struct aac_srb);
2955 fib->Header.XferState =
2956 AAC_FIBSTATE_HOSTOWNED |
2957 AAC_FIBSTATE_INITIALISED |
2958 AAC_FIBSTATE_EMPTY |
2959 AAC_FIBSTATE_FROMHOST |
2960 AAC_FIBSTATE_REXPECTED |
2964 fib->Header.Command = (sc->flags & AAC_FLAGS_SG_64BIT) ?
2965 ScsiPortCommandU64 : ScsiPortCommand;
2966 cm->cm_sgtable = (struct aac_sg_table *)&srbcmd->sg_map;
2969 if (transfer_data) {
2970 bus_dmamap_load(cm->cm_passthr_dmat,
2971 cm->cm_datamap, cm->cm_data,
2973 aacraid_map_command_sg, cm, 0);
2975 aacraid_map_command_sg(cm, NULL, 0, 0);
2978 /* wait for completion */
2979 mtx_lock(&sc->aac_io_lock);
2980 while (!(cm->cm_flags & AAC_CMD_COMPLETED))
2981 msleep(cm, &sc->aac_io_lock, 0, "aacraid_ctlsrw2", 0);
2982 mtx_unlock(&sc->aac_io_lock);
2985 if (transfer_data && (srbcmd->flags & AAC_SRB_FLAGS_DATA_IN)) {
2986 if ((error = copyout(cm->cm_data,
2987 (void *)(uintptr_t)srb_sg_address,
2988 cm->cm_datalen)) != 0)
2990 /* sync required for bus_dmamem_alloc() allocated mem.? */
2991 bus_dmamap_sync(cm->cm_passthr_dmat, cm->cm_datamap,
2992 BUS_DMASYNC_POSTREAD);
2996 error = copyout(fib->data, user_reply, sizeof(struct aac_srb_response));
2999 if (cm && cm->cm_data) {
3001 bus_dmamap_unload(cm->cm_passthr_dmat, cm->cm_datamap);
3002 bus_dmamem_free(cm->cm_passthr_dmat, cm->cm_data, cm->cm_datamap);
3003 cm->cm_datamap = orig_map;
3005 if (cm && cm->cm_passthr_dmat)
3006 bus_dma_tag_destroy(cm->cm_passthr_dmat);
3008 mtx_lock(&sc->aac_io_lock);
3009 aacraid_release_command(cm);
3010 mtx_unlock(&sc->aac_io_lock);
3016 * Request an AIF from the controller (new comm. type1)
3019 aac_request_aif(struct aac_softc *sc)
3021 struct aac_command *cm;
3022 struct aac_fib *fib;
3024 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3026 if (aacraid_alloc_command(sc, &cm)) {
3027 sc->aif_pending = 1;
3030 sc->aif_pending = 0;
3034 fib->Header.Size = sizeof(struct aac_fib);
3035 fib->Header.XferState =
3036 AAC_FIBSTATE_HOSTOWNED |
3037 AAC_FIBSTATE_INITIALISED |
3038 AAC_FIBSTATE_EMPTY |
3039 AAC_FIBSTATE_FROMHOST |
3040 AAC_FIBSTATE_REXPECTED |
3043 /* set AIF marker */
3044 fib->Header.Handle = 0x00800000;
3045 fib->Header.Command = AifRequest;
3046 ((struct aac_aif_command *)fib->data)->command = AifReqEvent;
3048 aacraid_map_command_sg(cm, NULL, 0, 0);
3052 #if __FreeBSD_version >= 702000
3054 * cdevpriv interface private destructor.
3057 aac_cdevpriv_dtor(void *arg)
3059 struct aac_softc *sc;
3062 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3064 device_unbusy(sc->aac_dev);
3069 aac_close(struct cdev *dev, int flags, int fmt, struct thread *td)
3071 struct aac_softc *sc;
3074 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3080 * Handle an AIF sent to us by the controller; queue it for later reference.
3081 * If the queue fills up, then drop the older entries.
3084 aac_handle_aif(struct aac_softc *sc, struct aac_fib *fib)
3086 struct aac_aif_command *aif;
3087 struct aac_container *co, *co_next;
3088 struct aac_fib_context *ctx;
3089 struct aac_fib *sync_fib;
3090 struct aac_mntinforesp mir;
3091 int next, current, found;
3092 int count = 0, changed = 0, i = 0;
3093 u_int32_t channel, uid;
3095 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3097 aif = (struct aac_aif_command*)&fib->data[0];
3098 aacraid_print_aif(sc, aif);
3100 /* Is it an event that we should care about? */
3101 switch (aif->command) {
3102 case AifCmdEventNotify:
3103 switch (aif->data.EN.type) {
3104 case AifEnAddContainer:
3105 case AifEnDeleteContainer:
3107 * A container was added or deleted, but the message
3108 * doesn't tell us anything else! Re-enumerate the
3109 * containers and sort things out.
3111 aac_alloc_sync_fib(sc, &sync_fib);
3114 * Ask the controller for its containers one at
3116 * XXX What if the controller's list changes
3117 * midway through this enumaration?
3118 * XXX This should be done async.
3120 if (aac_get_container_info(sc, sync_fib, i,
3124 count = mir.MntRespCount;
3126 * Check the container against our list.
3127 * co->co_found was already set to 0 in a
3130 if ((mir.Status == ST_OK) &&
3131 (mir.MntTable[0].VolType != CT_NONE)) {
3134 &sc->aac_container_tqh,
3136 if (co->co_mntobj.ObjectId ==
3137 mir.MntTable[0].ObjectId) {
3144 * If the container matched, continue
3153 * This is a new container. Do all the
3154 * appropriate things to set it up.
3156 aac_add_container(sc, &mir, 1, uid);
3160 } while ((i < count) && (i < AAC_MAX_CONTAINERS));
3161 aac_release_sync_fib(sc);
3164 * Go through our list of containers and see which ones
3165 * were not marked 'found'. Since the controller didn't
3166 * list them they must have been deleted. Do the
3167 * appropriate steps to destroy the device. Also reset
3168 * the co->co_found field.
3170 co = TAILQ_FIRST(&sc->aac_container_tqh);
3171 while (co != NULL) {
3172 if (co->co_found == 0) {
3173 co_next = TAILQ_NEXT(co, co_link);
3174 TAILQ_REMOVE(&sc->aac_container_tqh, co,
3176 free(co, M_AACRAIDBUF);
3181 co = TAILQ_NEXT(co, co_link);
3185 /* Attach the newly created containers */
3187 if (sc->cam_rescan_cb != NULL)
3188 sc->cam_rescan_cb(sc, 0,
3189 AAC_CAM_TARGET_WILDCARD);
3194 case AifEnEnclosureManagement:
3195 switch (aif->data.EN.data.EEE.eventType) {
3196 case AIF_EM_DRIVE_INSERTION:
3197 case AIF_EM_DRIVE_REMOVAL:
3198 channel = aif->data.EN.data.EEE.unitID;
3199 if (sc->cam_rescan_cb != NULL)
3200 sc->cam_rescan_cb(sc,
3201 ((channel>>24) & 0xF) + 1,
3202 (channel & 0xFFFF));
3208 case AifEnDeleteJBOD:
3209 case AifRawDeviceRemove:
3210 channel = aif->data.EN.data.ECE.container;
3211 if (sc->cam_rescan_cb != NULL)
3212 sc->cam_rescan_cb(sc, ((channel>>24) & 0xF) + 1,
3213 AAC_CAM_TARGET_WILDCARD);
3224 /* Copy the AIF data to the AIF queue for ioctl retrieval */
3225 current = sc->aifq_idx;
3226 next = (current + 1) % AAC_AIFQ_LENGTH;
3228 sc->aifq_filled = 1;
3229 bcopy(fib, &sc->aac_aifq[current], sizeof(struct aac_fib));
3230 /* modify AIF contexts */
3231 if (sc->aifq_filled) {
3232 for (ctx = sc->fibctx; ctx; ctx = ctx->next) {
3233 if (next == ctx->ctx_idx)
3235 else if (current == ctx->ctx_idx && ctx->ctx_wrap)
3236 ctx->ctx_idx = next;
3239 sc->aifq_idx = next;
3240 /* On the off chance that someone is sleeping for an aif... */
3241 if (sc->aac_state & AAC_STATE_AIF_SLEEPER)
3242 wakeup(sc->aac_aifq);
3243 /* Wakeup any poll()ers */
3244 selwakeuppri(&sc->rcv_select, PRIBIO);
3250 * Return the Revision of the driver to userspace and check to see if the
3251 * userspace app is possibly compatible. This is extremely bogus since
3252 * our driver doesn't follow Adaptec's versioning system. Cheat by just
3253 * returning what the card reported.
3256 aac_rev_check(struct aac_softc *sc, caddr_t udata)
3258 struct aac_rev_check rev_check;
3259 struct aac_rev_check_resp rev_check_resp;
3262 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3265 * Copyin the revision struct from userspace
3267 if ((error = copyin(udata, (caddr_t)&rev_check,
3268 sizeof(struct aac_rev_check))) != 0) {
3272 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "Userland revision= %d\n",
3273 rev_check.callingRevision.buildNumber);
3276 * Doctor up the response struct.
3278 rev_check_resp.possiblyCompatible = 1;
3279 rev_check_resp.adapterSWRevision.external.comp.major =
3280 AAC_DRIVER_MAJOR_VERSION;
3281 rev_check_resp.adapterSWRevision.external.comp.minor =
3282 AAC_DRIVER_MINOR_VERSION;
3283 rev_check_resp.adapterSWRevision.external.comp.type =
3285 rev_check_resp.adapterSWRevision.external.comp.dash =
3286 AAC_DRIVER_BUGFIX_LEVEL;
3287 rev_check_resp.adapterSWRevision.buildNumber =
3290 return(copyout((caddr_t)&rev_check_resp, udata,
3291 sizeof(struct aac_rev_check_resp)));
3295 * Pass the fib context to the caller
3298 aac_open_aif(struct aac_softc *sc, caddr_t arg)
3300 struct aac_fib_context *fibctx, *ctx;
3303 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3305 fibctx = malloc(sizeof(struct aac_fib_context), M_AACRAIDBUF, M_NOWAIT|M_ZERO);
3309 mtx_lock(&sc->aac_io_lock);
3310 /* all elements are already 0, add to queue */
3311 if (sc->fibctx == NULL)
3312 sc->fibctx = fibctx;
3314 for (ctx = sc->fibctx; ctx->next; ctx = ctx->next)
3320 /* evaluate unique value */
3321 fibctx->unique = (*(u_int32_t *)&fibctx & 0xffffffff);
3323 while (ctx != fibctx) {
3324 if (ctx->unique == fibctx->unique) {
3332 error = copyout(&fibctx->unique, (void *)arg, sizeof(u_int32_t));
3333 mtx_unlock(&sc->aac_io_lock);
3335 aac_close_aif(sc, (caddr_t)ctx);
3340 * Close the caller's fib context
3343 aac_close_aif(struct aac_softc *sc, caddr_t arg)
3345 struct aac_fib_context *ctx;
3347 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3349 mtx_lock(&sc->aac_io_lock);
3350 for (ctx = sc->fibctx; ctx; ctx = ctx->next) {
3351 if (ctx->unique == *(uint32_t *)&arg) {
3352 if (ctx == sc->fibctx)
3355 ctx->prev->next = ctx->next;
3357 ctx->next->prev = ctx->prev;
3363 free(ctx, M_AACRAIDBUF);
3365 mtx_unlock(&sc->aac_io_lock);
3370 * Pass the caller the next AIF in their queue
3373 aac_getnext_aif(struct aac_softc *sc, caddr_t arg)
3375 struct get_adapter_fib_ioctl agf;
3376 struct aac_fib_context *ctx;
3379 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3381 mtx_lock(&sc->aac_io_lock);
3382 if ((error = copyin(arg, &agf, sizeof(agf))) == 0) {
3383 for (ctx = sc->fibctx; ctx; ctx = ctx->next) {
3384 if (agf.AdapterFibContext == ctx->unique)
3388 mtx_unlock(&sc->aac_io_lock);
3392 error = aac_return_aif(sc, ctx, agf.AifFib);
3393 if (error == EAGAIN && agf.Wait) {
3394 fwprintf(sc, HBA_FLAGS_DBG_AIF_B, "aac_getnext_aif(): waiting for AIF");
3395 sc->aac_state |= AAC_STATE_AIF_SLEEPER;
3396 while (error == EAGAIN) {
3397 mtx_unlock(&sc->aac_io_lock);
3398 error = tsleep(sc->aac_aifq, PRIBIO |
3399 PCATCH, "aacaif", 0);
3400 mtx_lock(&sc->aac_io_lock);
3402 error = aac_return_aif(sc, ctx, agf.AifFib);
3404 sc->aac_state &= ~AAC_STATE_AIF_SLEEPER;
3407 mtx_unlock(&sc->aac_io_lock);
3412 * Hand the next AIF off the top of the queue out to userspace.
3415 aac_return_aif(struct aac_softc *sc, struct aac_fib_context *ctx, caddr_t uptr)
3419 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3421 current = ctx->ctx_idx;
3422 if (current == sc->aifq_idx && !ctx->ctx_wrap) {
3427 copyout(&sc->aac_aifq[current], (void *)uptr, sizeof(struct aac_fib));
3429 device_printf(sc->aac_dev,
3430 "aac_return_aif: copyout returned %d\n", error);
3433 ctx->ctx_idx = (current + 1) % AAC_AIFQ_LENGTH;
3439 aac_get_pci_info(struct aac_softc *sc, caddr_t uptr)
3441 struct aac_pci_info {
3447 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3449 pciinf.bus = pci_get_bus(sc->aac_dev);
3450 pciinf.slot = pci_get_slot(sc->aac_dev);
3452 error = copyout((caddr_t)&pciinf, uptr,
3453 sizeof(struct aac_pci_info));
3459 aac_supported_features(struct aac_softc *sc, caddr_t uptr)
3461 struct aac_features f;
3464 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3466 if ((error = copyin(uptr, &f, sizeof (f))) != 0)
3470 * When the management driver receives FSACTL_GET_FEATURES ioctl with
3471 * ALL zero in the featuresState, the driver will return the current
3472 * state of all the supported features, the data field will not be
3474 * When the management driver receives FSACTL_GET_FEATURES ioctl with
3475 * a specific bit set in the featuresState, the driver will return the
3476 * current state of this specific feature and whatever data that are
3477 * associated with the feature in the data field or perform whatever
3478 * action needed indicates in the data field.
3480 if (f.feat.fValue == 0) {
3481 f.feat.fBits.largeLBA =
3482 (sc->flags & AAC_FLAGS_LBA_64BIT) ? 1 : 0;
3483 f.feat.fBits.JBODSupport = 1;
3484 /* TODO: In the future, add other features state here as well */
3486 if (f.feat.fBits.largeLBA)
3487 f.feat.fBits.largeLBA =
3488 (sc->flags & AAC_FLAGS_LBA_64BIT) ? 1 : 0;
3489 /* TODO: Add other features state and data in the future */
3492 error = copyout(&f, uptr, sizeof (f));
3497 * Give the userland some information about the container. The AAC arch
3498 * expects the driver to be a SCSI passthrough type driver, so it expects
3499 * the containers to have b:t:l numbers. Fake it.
3502 aac_query_disk(struct aac_softc *sc, caddr_t uptr)
3504 struct aac_query_disk query_disk;
3505 struct aac_container *co;
3508 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3510 mtx_lock(&sc->aac_io_lock);
3511 error = copyin(uptr, (caddr_t)&query_disk,
3512 sizeof(struct aac_query_disk));
3514 mtx_unlock(&sc->aac_io_lock);
3518 id = query_disk.ContainerNumber;
3520 mtx_unlock(&sc->aac_io_lock);
3524 TAILQ_FOREACH(co, &sc->aac_container_tqh, co_link) {
3525 if (co->co_mntobj.ObjectId == id)
3530 query_disk.Valid = 0;
3531 query_disk.Locked = 0;
3532 query_disk.Deleted = 1; /* XXX is this right? */
3534 query_disk.Valid = 1;
3535 query_disk.Locked = 1;
3536 query_disk.Deleted = 0;
3537 query_disk.Bus = device_get_unit(sc->aac_dev);
3538 query_disk.Target = 0;
3540 query_disk.UnMapped = 0;
3543 error = copyout((caddr_t)&query_disk, uptr,
3544 sizeof(struct aac_query_disk));
3546 mtx_unlock(&sc->aac_io_lock);
3551 aac_container_bus(struct aac_softc *sc)
3553 struct aac_sim *sim;
3556 sim =(struct aac_sim *)malloc(sizeof(struct aac_sim),
3557 M_AACRAIDBUF, M_NOWAIT | M_ZERO);
3559 device_printf(sc->aac_dev,
3560 "No memory to add container bus\n");
3561 panic("Out of memory?!");
3563 child = device_add_child(sc->aac_dev, "aacraidp", -1);
3564 if (child == NULL) {
3565 device_printf(sc->aac_dev,
3566 "device_add_child failed for container bus\n");
3567 free(sim, M_AACRAIDBUF);
3568 panic("Out of memory?!");
3571 sim->TargetsPerBus = AAC_MAX_CONTAINERS;
3573 sim->BusType = CONTAINER_BUS;
3574 sim->InitiatorBusId = -1;
3576 sim->sim_dev = child;
3577 sim->aac_cam = NULL;
3579 device_set_ivars(child, sim);
3580 device_set_desc(child, "Container Bus");
3581 TAILQ_INSERT_TAIL(&sc->aac_sim_tqh, sim, sim_link);
3583 device_set_desc(child, aac_describe_code(aac_container_types,
3584 mir->MntTable[0].VolType));
3586 bus_generic_attach(sc->aac_dev);
3590 aac_get_bus_info(struct aac_softc *sc)
3592 struct aac_fib *fib;
3593 struct aac_ctcfg *c_cmd;
3594 struct aac_ctcfg_resp *c_resp;
3595 struct aac_vmioctl *vmi;
3596 struct aac_vmi_businf_resp *vmi_resp;
3597 struct aac_getbusinf businfo;
3598 struct aac_sim *caminf;
3602 mtx_lock(&sc->aac_io_lock);
3603 aac_alloc_sync_fib(sc, &fib);
3604 c_cmd = (struct aac_ctcfg *)&fib->data[0];
3605 bzero(c_cmd, sizeof(struct aac_ctcfg));
3607 c_cmd->Command = VM_ContainerConfig;
3608 c_cmd->cmd = CT_GET_SCSI_METHOD;
3611 error = aac_sync_fib(sc, ContainerCommand, 0, fib,
3612 sizeof(struct aac_ctcfg));
3614 device_printf(sc->aac_dev, "Error %d sending "
3615 "VM_ContainerConfig command\n", error);
3616 aac_release_sync_fib(sc);
3617 mtx_unlock(&sc->aac_io_lock);
3621 c_resp = (struct aac_ctcfg_resp *)&fib->data[0];
3622 if (c_resp->Status != ST_OK) {
3623 device_printf(sc->aac_dev, "VM_ContainerConfig returned 0x%x\n",
3625 aac_release_sync_fib(sc);
3626 mtx_unlock(&sc->aac_io_lock);
3630 sc->scsi_method_id = c_resp->param;
3632 vmi = (struct aac_vmioctl *)&fib->data[0];
3633 bzero(vmi, sizeof(struct aac_vmioctl));
3635 vmi->Command = VM_Ioctl;
3636 vmi->ObjType = FT_DRIVE;
3637 vmi->MethId = sc->scsi_method_id;
3639 vmi->IoctlCmd = GetBusInfo;
3641 error = aac_sync_fib(sc, ContainerCommand, 0, fib,
3642 sizeof(struct aac_vmi_businf_resp));
3644 device_printf(sc->aac_dev, "Error %d sending VMIoctl command\n",
3646 aac_release_sync_fib(sc);
3647 mtx_unlock(&sc->aac_io_lock);
3651 vmi_resp = (struct aac_vmi_businf_resp *)&fib->data[0];
3652 if (vmi_resp->Status != ST_OK) {
3653 device_printf(sc->aac_dev, "VM_Ioctl returned %d\n",
3655 aac_release_sync_fib(sc);
3656 mtx_unlock(&sc->aac_io_lock);
3660 bcopy(&vmi_resp->BusInf, &businfo, sizeof(struct aac_getbusinf));
3661 aac_release_sync_fib(sc);
3662 mtx_unlock(&sc->aac_io_lock);
3664 for (i = 0; i < businfo.BusCount; i++) {
3665 if (businfo.BusValid[i] != AAC_BUS_VALID)
3668 caminf = (struct aac_sim *)malloc( sizeof(struct aac_sim),
3669 M_AACRAIDBUF, M_NOWAIT | M_ZERO);
3670 if (caminf == NULL) {
3671 device_printf(sc->aac_dev,
3672 "No memory to add passthrough bus %d\n", i);
3676 child = device_add_child(sc->aac_dev, "aacraidp", -1);
3677 if (child == NULL) {
3678 device_printf(sc->aac_dev,
3679 "device_add_child failed for passthrough bus %d\n",
3681 free(caminf, M_AACRAIDBUF);
3685 caminf->TargetsPerBus = businfo.TargetsPerBus;
3686 caminf->BusNumber = i+1;
3687 caminf->BusType = PASSTHROUGH_BUS;
3688 caminf->InitiatorBusId = businfo.InitiatorBusId[i];
3689 caminf->aac_sc = sc;
3690 caminf->sim_dev = child;
3691 caminf->aac_cam = NULL;
3693 device_set_ivars(child, caminf);
3694 device_set_desc(child, "SCSI Passthrough Bus");
3695 TAILQ_INSERT_TAIL(&sc->aac_sim_tqh, caminf, sim_link);
3700 * Check to see if the kernel is up and running. If we are in a
3701 * BlinkLED state, return the BlinkLED code.
3704 aac_check_adapter_health(struct aac_softc *sc, u_int8_t *bled)
3708 ret = AAC_GET_FWSTATUS(sc);
3710 if (ret & AAC_UP_AND_RUNNING)
3712 else if (ret & AAC_KERNEL_PANIC && bled)
3713 *bled = (ret >> 16) & 0xff;
3719 * Once do an IOP reset, basically have to re-initialize the card as
3720 * if coming up from a cold boot, and the driver is responsible for
3721 * any IO that was outstanding to the adapter at the time of the IOP
3722 * RESET. And prepare the driver for IOP RESET by making the init code
3723 * modular with the ability to call it from multiple places.
3726 aac_reset_adapter(struct aac_softc *sc)
3728 struct aac_command *cm;
3729 struct aac_fib *fib;
3730 struct aac_pause_command *pc;
3731 u_int32_t status, reset_mask, waitCount, max_msix_orig;
3732 int msi_enabled_orig;
3734 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3735 mtx_assert(&sc->aac_io_lock, MA_OWNED);
3737 if (sc->aac_state & AAC_STATE_RESET) {
3738 device_printf(sc->aac_dev, "aac_reset_adapter() already in progress\n");
3741 sc->aac_state |= AAC_STATE_RESET;
3743 /* disable interrupt */
3744 AAC_ACCESS_DEVREG(sc, AAC_DISABLE_INTERRUPT);
3747 * Abort all pending commands:
3748 * a) on the controller
3750 while ((cm = aac_dequeue_busy(sc)) != NULL) {
3751 cm->cm_flags |= AAC_CMD_RESET;
3753 /* is there a completion handler? */
3754 if (cm->cm_complete != NULL) {
3755 cm->cm_complete(cm);
3757 /* assume that someone is sleeping on this
3764 /* b) in the waiting queues */
3765 while ((cm = aac_dequeue_ready(sc)) != NULL) {
3766 cm->cm_flags |= AAC_CMD_RESET;
3768 /* is there a completion handler? */
3769 if (cm->cm_complete != NULL) {
3770 cm->cm_complete(cm);
3772 /* assume that someone is sleeping on this
3780 if (aac_check_adapter_health(sc, NULL) == 0) {
3781 mtx_unlock(&sc->aac_io_lock);
3782 (void) aacraid_shutdown(sc->aac_dev);
3783 mtx_lock(&sc->aac_io_lock);
3786 /* execute IOP reset */
3787 if (sc->aac_support_opt2 & AAC_SUPPORTED_MU_RESET) {
3788 AAC_MEM0_SETREG4(sc, AAC_IRCSR, AAC_IRCSR_CORES_RST);
3790 /* We need to wait for 5 seconds before accessing the MU again
3791 * 10000 * 100us = 1000,000us = 1000ms = 1s
3793 waitCount = 5 * 10000;
3795 DELAY(100); /* delay 100 microseconds */
3798 } else if ((aacraid_sync_command(sc,
3799 AAC_IOP_RESET_ALWAYS, 0, 0, 0, 0, &status, &reset_mask)) != 0) {
3800 /* call IOP_RESET for older firmware */
3801 if ((aacraid_sync_command(sc,
3802 AAC_IOP_RESET, 0, 0, 0, 0, &status, NULL)) != 0) {
3804 if (status == AAC_SRB_STS_INVALID_REQUEST)
3805 device_printf(sc->aac_dev, "IOP_RESET not supported\n");
3807 /* probably timeout */
3808 device_printf(sc->aac_dev, "IOP_RESET failed\n");
3810 /* unwind aac_shutdown() */
3811 aac_alloc_sync_fib(sc, &fib);
3812 pc = (struct aac_pause_command *)&fib->data[0];
3813 pc->Command = VM_ContainerConfig;
3814 pc->Type = CT_PAUSE_IO;
3819 (void) aac_sync_fib(sc, ContainerCommand, 0, fib,
3820 sizeof (struct aac_pause_command));
3821 aac_release_sync_fib(sc);
3825 } else if (sc->aac_support_opt2 & AAC_SUPPORTED_DOORBELL_RESET) {
3826 AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, reset_mask);
3828 * We need to wait for 5 seconds before accessing the doorbell
3829 * again, 10000 * 100us = 1000,000us = 1000ms = 1s
3831 waitCount = 5 * 10000;
3833 DELAY(100); /* delay 100 microseconds */
3839 * Initialize the adapter.
3841 max_msix_orig = sc->aac_max_msix;
3842 msi_enabled_orig = sc->msi_enabled;
3843 sc->msi_enabled = FALSE;
3844 if (aac_check_firmware(sc) != 0)
3846 if (!(sc->flags & AAC_FLAGS_SYNC_MODE)) {
3847 sc->aac_max_msix = max_msix_orig;
3848 if (msi_enabled_orig) {
3849 sc->msi_enabled = msi_enabled_orig;
3850 AAC_ACCESS_DEVREG(sc, AAC_ENABLE_MSIX);
3852 mtx_unlock(&sc->aac_io_lock);
3854 mtx_lock(&sc->aac_io_lock);
3858 sc->aac_state &= ~AAC_STATE_RESET;
3859 AAC_ACCESS_DEVREG(sc, AAC_ENABLE_INTERRUPT);
3860 aacraid_startio(sc);