2 * Copyright (c) 2000 Michael Smith
3 * Copyright (c) 2001 Scott Long
4 * Copyright (c) 2000 BSDi
5 * Copyright (c) 2001-2010 Adaptec, Inc.
6 * Copyright (c) 2010-2012 PMC-Sierra, Inc.
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
35 * Driver for the Adaptec by PMC Series 6,7,8,... families of RAID controllers
37 #define AAC_DRIVERNAME "aacraid"
39 #include "opt_aacraid.h"
41 /* #include <stddef.h> */
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/malloc.h>
45 #include <sys/kernel.h>
46 #include <sys/kthread.h>
47 #include <sys/sysctl.h>
49 #include <sys/ioccom.h>
53 #include <sys/signalvar.h>
55 #include <sys/eventhandler.h>
58 #include <machine/bus.h>
59 #include <sys/bus_dma.h>
60 #include <machine/resource.h>
62 #include <dev/pci/pcireg.h>
63 #include <dev/pci/pcivar.h>
65 #include <dev/aacraid/aacraid_reg.h>
66 #include <sys/aac_ioctl.h>
67 #include <dev/aacraid/aacraid_debug.h>
68 #include <dev/aacraid/aacraid_var.h>
70 #ifndef FILTER_HANDLED
71 #define FILTER_HANDLED 0x02
74 static void aac_add_container(struct aac_softc *sc,
75 struct aac_mntinforesp *mir, int f,
77 static void aac_get_bus_info(struct aac_softc *sc);
78 static void aac_container_bus(struct aac_softc *sc);
79 static void aac_daemon(void *arg);
80 static int aac_convert_sgraw2(struct aac_softc *sc, struct aac_raw_io2 *raw,
81 int pages, int nseg, int nseg_new);
83 /* Command Processing */
84 static void aac_timeout(struct aac_softc *sc);
85 static void aac_command_thread(struct aac_softc *sc);
86 static int aac_sync_fib(struct aac_softc *sc, u_int32_t command,
87 u_int32_t xferstate, struct aac_fib *fib,
89 /* Command Buffer Management */
90 static void aac_map_command_helper(void *arg, bus_dma_segment_t *segs,
92 static int aac_alloc_commands(struct aac_softc *sc);
93 static void aac_free_commands(struct aac_softc *sc);
94 static void aac_unmap_command(struct aac_command *cm);
96 /* Hardware Interface */
97 static int aac_alloc(struct aac_softc *sc);
98 static void aac_common_map(void *arg, bus_dma_segment_t *segs, int nseg,
100 static int aac_check_firmware(struct aac_softc *sc);
101 static void aac_define_int_mode(struct aac_softc *sc);
102 static int aac_init(struct aac_softc *sc);
103 static int aac_find_pci_capability(struct aac_softc *sc, int cap);
104 static int aac_setup_intr(struct aac_softc *sc);
105 static int aac_check_config(struct aac_softc *sc);
107 /* PMC SRC interface */
108 static int aac_src_get_fwstatus(struct aac_softc *sc);
109 static void aac_src_qnotify(struct aac_softc *sc, int qbit);
110 static int aac_src_get_istatus(struct aac_softc *sc);
111 static void aac_src_clear_istatus(struct aac_softc *sc, int mask);
112 static void aac_src_set_mailbox(struct aac_softc *sc, u_int32_t command,
113 u_int32_t arg0, u_int32_t arg1,
114 u_int32_t arg2, u_int32_t arg3);
115 static int aac_src_get_mailbox(struct aac_softc *sc, int mb);
116 static void aac_src_access_devreg(struct aac_softc *sc, int mode);
117 static int aac_src_send_command(struct aac_softc *sc, struct aac_command *cm);
118 static int aac_src_get_outb_queue(struct aac_softc *sc);
119 static void aac_src_set_outb_queue(struct aac_softc *sc, int index);
121 struct aac_interface aacraid_src_interface = {
122 aac_src_get_fwstatus,
125 aac_src_clear_istatus,
128 aac_src_access_devreg,
129 aac_src_send_command,
130 aac_src_get_outb_queue,
131 aac_src_set_outb_queue
134 /* PMC SRCv interface */
135 static void aac_srcv_set_mailbox(struct aac_softc *sc, u_int32_t command,
136 u_int32_t arg0, u_int32_t arg1,
137 u_int32_t arg2, u_int32_t arg3);
138 static int aac_srcv_get_mailbox(struct aac_softc *sc, int mb);
140 struct aac_interface aacraid_srcv_interface = {
141 aac_src_get_fwstatus,
144 aac_src_clear_istatus,
145 aac_srcv_set_mailbox,
146 aac_srcv_get_mailbox,
147 aac_src_access_devreg,
148 aac_src_send_command,
149 aac_src_get_outb_queue,
150 aac_src_set_outb_queue
153 /* Debugging and Diagnostics */
154 static struct aac_code_lookup aac_cpu_variant[] = {
155 {"i960JX", CPUI960_JX},
156 {"i960CX", CPUI960_CX},
157 {"i960HX", CPUI960_HX},
158 {"i960RX", CPUI960_RX},
159 {"i960 80303", CPUI960_80303},
160 {"StrongARM SA110", CPUARM_SA110},
161 {"PPC603e", CPUPPC_603e},
162 {"XScale 80321", CPU_XSCALE_80321},
163 {"MIPS 4KC", CPU_MIPS_4KC},
164 {"MIPS 5KC", CPU_MIPS_5KC},
165 {"Unknown StrongARM", CPUARM_xxx},
166 {"Unknown PowerPC", CPUPPC_xxx},
168 {"Unknown processor", 0}
171 static struct aac_code_lookup aac_battery_platform[] = {
172 {"required battery present", PLATFORM_BAT_REQ_PRESENT},
173 {"REQUIRED BATTERY NOT PRESENT", PLATFORM_BAT_REQ_NOTPRESENT},
174 {"optional battery present", PLATFORM_BAT_OPT_PRESENT},
175 {"optional battery not installed", PLATFORM_BAT_OPT_NOTPRESENT},
176 {"no battery support", PLATFORM_BAT_NOT_SUPPORTED},
178 {"unknown battery platform", 0}
180 static void aac_describe_controller(struct aac_softc *sc);
181 static char *aac_describe_code(struct aac_code_lookup *table,
184 /* Management Interface */
185 static d_open_t aac_open;
186 static d_ioctl_t aac_ioctl;
187 static d_poll_t aac_poll;
188 #if __FreeBSD_version >= 702000
189 static void aac_cdevpriv_dtor(void *arg);
191 static d_close_t aac_close;
193 static int aac_ioctl_sendfib(struct aac_softc *sc, caddr_t ufib);
194 static int aac_ioctl_send_raw_srb(struct aac_softc *sc, caddr_t arg);
195 static void aac_handle_aif(struct aac_softc *sc, struct aac_fib *fib);
196 static void aac_request_aif(struct aac_softc *sc);
197 static int aac_rev_check(struct aac_softc *sc, caddr_t udata);
198 static int aac_open_aif(struct aac_softc *sc, caddr_t arg);
199 static int aac_close_aif(struct aac_softc *sc, caddr_t arg);
200 static int aac_getnext_aif(struct aac_softc *sc, caddr_t arg);
201 static int aac_return_aif(struct aac_softc *sc,
202 struct aac_fib_context *ctx, caddr_t uptr);
203 static int aac_query_disk(struct aac_softc *sc, caddr_t uptr);
204 static int aac_get_pci_info(struct aac_softc *sc, caddr_t uptr);
205 static int aac_supported_features(struct aac_softc *sc, caddr_t uptr);
206 static void aac_ioctl_event(struct aac_softc *sc,
207 struct aac_event *event, void *arg);
208 static int aac_reset_adapter(struct aac_softc *sc);
209 static int aac_get_container_info(struct aac_softc *sc,
210 struct aac_fib *fib, int cid,
211 struct aac_mntinforesp *mir,
214 aac_check_adapter_health(struct aac_softc *sc, u_int8_t *bled);
216 static struct cdevsw aacraid_cdevsw = {
217 .d_version = D_VERSION,
218 .d_flags = D_NEEDGIANT,
220 #if __FreeBSD_version < 702000
221 .d_close = aac_close,
223 .d_ioctl = aac_ioctl,
228 MALLOC_DEFINE(M_AACRAIDBUF, "aacraid_buf", "Buffers for the AACRAID driver");
231 SYSCTL_NODE(_hw, OID_AUTO, aacraid, CTLFLAG_RD, 0, "AACRAID driver parameters");
238 * Initialize the controller and softc
241 aacraid_attach(struct aac_softc *sc)
245 struct aac_mntinforesp mir;
246 int count = 0, i = 0;
249 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
250 sc->hint_flags = device_get_flags(sc->aac_dev);
252 * Initialize per-controller queues.
258 /* mark controller as suspended until we get ourselves organised */
259 sc->aac_state |= AAC_STATE_SUSPEND;
262 * Check that the firmware on the card is supported.
264 sc->msi_enabled = FALSE;
265 if ((error = aac_check_firmware(sc)) != 0)
271 mtx_init(&sc->aac_io_lock, "AACRAID I/O lock", NULL, MTX_DEF);
272 TAILQ_INIT(&sc->aac_container_tqh);
273 TAILQ_INIT(&sc->aac_ev_cmfree);
275 #if __FreeBSD_version >= 800000
276 /* Initialize the clock daemon callout. */
277 callout_init_mtx(&sc->aac_daemontime, &sc->aac_io_lock, 0);
280 * Initialize the adapter.
282 if ((error = aac_alloc(sc)) != 0)
284 if (!(sc->flags & AAC_FLAGS_SYNC_MODE)) {
285 aac_define_int_mode(sc);
286 if ((error = aac_init(sc)) != 0)
291 * Allocate and connect our interrupt.
293 if ((error = aac_setup_intr(sc)) != 0)
297 * Print a little information about the controller.
299 aac_describe_controller(sc);
302 * Make the control device.
304 unit = device_get_unit(sc->aac_dev);
305 sc->aac_dev_t = make_dev(&aacraid_cdevsw, unit, UID_ROOT, GID_OPERATOR,
306 0640, "aacraid%d", unit);
307 sc->aac_dev_t->si_drv1 = sc;
309 /* Create the AIF thread */
310 if (aac_kthread_create((void(*)(void *))aac_command_thread, sc,
311 &sc->aifthread, 0, 0, "aacraid%daif", unit))
312 panic("Could not create AIF thread");
314 /* Register the shutdown method to only be called post-dump */
315 if ((sc->eh = EVENTHANDLER_REGISTER(shutdown_final, aacraid_shutdown,
316 sc->aac_dev, SHUTDOWN_PRI_DEFAULT)) == NULL)
317 device_printf(sc->aac_dev,
318 "shutdown event registration failed\n");
320 /* Find containers */
321 mtx_lock(&sc->aac_io_lock);
322 aac_alloc_sync_fib(sc, &fib);
323 /* loop over possible containers */
325 if ((aac_get_container_info(sc, fib, i, &mir, &uid)) != 0)
328 count = mir.MntRespCount;
329 aac_add_container(sc, &mir, 0, uid);
331 } while ((i < count) && (i < AAC_MAX_CONTAINERS));
332 aac_release_sync_fib(sc);
333 mtx_unlock(&sc->aac_io_lock);
335 /* Register with CAM for the containers */
336 TAILQ_INIT(&sc->aac_sim_tqh);
337 aac_container_bus(sc);
338 /* Register with CAM for the non-DASD devices */
339 if ((sc->flags & AAC_FLAGS_ENABLE_CAM) != 0)
340 aac_get_bus_info(sc);
342 /* poke the bus to actually attach the child devices */
343 bus_generic_attach(sc->aac_dev);
345 /* mark the controller up */
346 sc->aac_state &= ~AAC_STATE_SUSPEND;
348 /* enable interrupts now */
349 AAC_ACCESS_DEVREG(sc, AAC_ENABLE_INTERRUPT);
351 #if __FreeBSD_version >= 800000
352 mtx_lock(&sc->aac_io_lock);
353 callout_reset(&sc->aac_daemontime, 60 * hz, aac_daemon, sc);
354 mtx_unlock(&sc->aac_io_lock);
360 sc->timeout_id = timeout(aac_daemon, (void *)sc, tvtohz(&tv));
368 aac_daemon(void *arg)
370 struct aac_softc *sc;
372 struct aac_command *cm;
376 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
378 #if __FreeBSD_version >= 800000
379 mtx_assert(&sc->aac_io_lock, MA_OWNED);
380 if (callout_pending(&sc->aac_daemontime) ||
381 callout_active(&sc->aac_daemontime) == 0)
384 mtx_lock(&sc->aac_io_lock);
388 if (!aacraid_alloc_command(sc, &cm)) {
390 cm->cm_timestamp = time_uptime;
392 cm->cm_flags |= AAC_CMD_WAIT;
395 sizeof(struct aac_fib_header) + sizeof(u_int32_t);
396 fib->Header.XferState =
397 AAC_FIBSTATE_HOSTOWNED |
398 AAC_FIBSTATE_INITIALISED |
400 AAC_FIBSTATE_FROMHOST |
401 AAC_FIBSTATE_REXPECTED |
404 AAC_FIBSTATE_FAST_RESPONSE;
405 fib->Header.Command = SendHostTime;
406 *(uint32_t *)fib->data = tv.tv_sec;
408 aacraid_map_command_sg(cm, NULL, 0, 0);
409 aacraid_release_command(cm);
412 #if __FreeBSD_version >= 800000
413 callout_schedule(&sc->aac_daemontime, 30 * 60 * hz);
415 mtx_unlock(&sc->aac_io_lock);
418 sc->timeout_id = timeout(aac_daemon, (void *)sc, tvtohz(&tv));
423 aacraid_add_event(struct aac_softc *sc, struct aac_event *event)
426 switch (event->ev_type & AAC_EVENT_MASK) {
427 case AAC_EVENT_CMFREE:
428 TAILQ_INSERT_TAIL(&sc->aac_ev_cmfree, event, ev_links);
431 device_printf(sc->aac_dev, "aac_add event: unknown event %d\n",
440 * Request information of container #cid
443 aac_get_container_info(struct aac_softc *sc, struct aac_fib *sync_fib, int cid,
444 struct aac_mntinforesp *mir, u_int32_t *uid)
446 struct aac_command *cm;
448 struct aac_mntinfo *mi;
449 struct aac_cnt_config *ccfg;
452 if (sync_fib == NULL) {
453 if (aacraid_alloc_command(sc, &cm)) {
454 device_printf(sc->aac_dev,
455 "Warning, no free command available\n");
463 mi = (struct aac_mntinfo *)&fib->data[0];
464 /* 4KB support?, 64-bit LBA? */
465 if (sc->aac_support_opt2 & AAC_SUPPORTED_VARIABLE_BLOCK_SIZE)
466 mi->Command = VM_NameServeAllBlk;
467 else if (sc->flags & AAC_FLAGS_LBA_64BIT)
468 mi->Command = VM_NameServe64;
470 mi->Command = VM_NameServe;
471 mi->MntType = FT_FILESYS;
475 if (aac_sync_fib(sc, ContainerCommand, 0, fib,
476 sizeof(struct aac_mntinfo))) {
477 device_printf(sc->aac_dev, "Error probing container %d\n", cid);
481 cm->cm_timestamp = time_uptime;
485 sizeof(struct aac_fib_header) + sizeof(struct aac_mntinfo);
486 fib->Header.XferState =
487 AAC_FIBSTATE_HOSTOWNED |
488 AAC_FIBSTATE_INITIALISED |
490 AAC_FIBSTATE_FROMHOST |
491 AAC_FIBSTATE_REXPECTED |
494 AAC_FIBSTATE_FAST_RESPONSE;
495 fib->Header.Command = ContainerCommand;
496 if (aacraid_wait_command(cm) != 0) {
497 device_printf(sc->aac_dev, "Error probing container %d\n", cid);
498 aacraid_release_command(cm);
502 bcopy(&fib->data[0], mir, sizeof(struct aac_mntinforesp));
506 if (mir->MntTable[0].VolType != CT_NONE &&
507 !(mir->MntTable[0].ContentState & AAC_FSCS_HIDDEN)) {
508 if (!(sc->aac_support_opt2 & AAC_SUPPORTED_VARIABLE_BLOCK_SIZE)) {
509 mir->MntTable[0].ObjExtension.BlockDevice.BlockSize = 0x200;
510 mir->MntTable[0].ObjExtension.BlockDevice.bdLgclPhysMap = 0;
512 ccfg = (struct aac_cnt_config *)&fib->data[0];
513 bzero(ccfg, sizeof (*ccfg) - CT_PACKET_SIZE);
514 ccfg->Command = VM_ContainerConfig;
515 ccfg->CTCommand.command = CT_CID_TO_32BITS_UID;
516 ccfg->CTCommand.param[0] = cid;
519 rval = aac_sync_fib(sc, ContainerCommand, 0, fib,
520 sizeof(struct aac_cnt_config));
521 if (rval == 0 && ccfg->Command == ST_OK &&
522 ccfg->CTCommand.param[0] == CT_OK &&
523 mir->MntTable[0].VolType != CT_PASSTHRU)
524 *uid = ccfg->CTCommand.param[1];
527 sizeof(struct aac_fib_header) + sizeof(struct aac_cnt_config);
528 fib->Header.XferState =
529 AAC_FIBSTATE_HOSTOWNED |
530 AAC_FIBSTATE_INITIALISED |
532 AAC_FIBSTATE_FROMHOST |
533 AAC_FIBSTATE_REXPECTED |
536 AAC_FIBSTATE_FAST_RESPONSE;
537 fib->Header.Command = ContainerCommand;
538 rval = aacraid_wait_command(cm);
539 if (rval == 0 && ccfg->Command == ST_OK &&
540 ccfg->CTCommand.param[0] == CT_OK &&
541 mir->MntTable[0].VolType != CT_PASSTHRU)
542 *uid = ccfg->CTCommand.param[1];
543 aacraid_release_command(cm);
551 * Create a device to represent a new container
554 aac_add_container(struct aac_softc *sc, struct aac_mntinforesp *mir, int f,
557 struct aac_container *co;
559 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
562 * Check container volume type for validity. Note that many of
563 * the possible types may never show up.
565 if ((mir->Status == ST_OK) && (mir->MntTable[0].VolType != CT_NONE)) {
566 co = (struct aac_container *)malloc(sizeof *co, M_AACRAIDBUF,
569 panic("Out of memory?!");
573 bcopy(&mir->MntTable[0], &co->co_mntobj,
574 sizeof(struct aac_mntobj));
576 TAILQ_INSERT_TAIL(&sc->aac_container_tqh, co, co_link);
581 * Allocate resources associated with (sc)
584 aac_alloc(struct aac_softc *sc)
588 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
591 * Create DMA tag for mapping buffers into controller-addressable space.
593 if (bus_dma_tag_create(sc->aac_parent_dmat, /* parent */
594 1, 0, /* algnmnt, boundary */
595 (sc->flags & AAC_FLAGS_SG_64BIT) ?
597 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
598 BUS_SPACE_MAXADDR, /* highaddr */
599 NULL, NULL, /* filter, filterarg */
600 MAXBSIZE, /* maxsize */
601 sc->aac_sg_tablesize, /* nsegments */
602 MAXBSIZE, /* maxsegsize */
603 BUS_DMA_ALLOCNOW, /* flags */
604 busdma_lock_mutex, /* lockfunc */
605 &sc->aac_io_lock, /* lockfuncarg */
606 &sc->aac_buffer_dmat)) {
607 device_printf(sc->aac_dev, "can't allocate buffer DMA tag\n");
612 * Create DMA tag for mapping FIBs into controller-addressable space..
614 if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE1)
615 maxsize = sc->aac_max_fibs_alloc * (sc->aac_max_fib_size +
616 sizeof(struct aac_fib_xporthdr) + 31);
618 maxsize = sc->aac_max_fibs_alloc * (sc->aac_max_fib_size + 31);
619 if (bus_dma_tag_create(sc->aac_parent_dmat, /* parent */
620 1, 0, /* algnmnt, boundary */
621 (sc->flags & AAC_FLAGS_4GB_WINDOW) ?
622 BUS_SPACE_MAXADDR_32BIT :
623 0x7fffffff, /* lowaddr */
624 BUS_SPACE_MAXADDR, /* highaddr */
625 NULL, NULL, /* filter, filterarg */
626 maxsize, /* maxsize */
628 maxsize, /* maxsize */
630 NULL, NULL, /* No locking needed */
631 &sc->aac_fib_dmat)) {
632 device_printf(sc->aac_dev, "can't allocate FIB DMA tag\n");
637 * Create DMA tag for the common structure and allocate it.
639 maxsize = sizeof(struct aac_common);
640 maxsize += sc->aac_max_fibs * sizeof(u_int32_t);
641 if (bus_dma_tag_create(sc->aac_parent_dmat, /* parent */
642 1, 0, /* algnmnt, boundary */
643 (sc->flags & AAC_FLAGS_4GB_WINDOW) ?
644 BUS_SPACE_MAXADDR_32BIT :
645 0x7fffffff, /* lowaddr */
646 BUS_SPACE_MAXADDR, /* highaddr */
647 NULL, NULL, /* filter, filterarg */
648 maxsize, /* maxsize */
650 maxsize, /* maxsegsize */
652 NULL, NULL, /* No locking needed */
653 &sc->aac_common_dmat)) {
654 device_printf(sc->aac_dev,
655 "can't allocate common structure DMA tag\n");
658 if (bus_dmamem_alloc(sc->aac_common_dmat, (void **)&sc->aac_common,
659 BUS_DMA_NOWAIT, &sc->aac_common_dmamap)) {
660 device_printf(sc->aac_dev, "can't allocate common structure\n");
664 (void)bus_dmamap_load(sc->aac_common_dmat, sc->aac_common_dmamap,
665 sc->aac_common, maxsize,
666 aac_common_map, sc, 0);
667 bzero(sc->aac_common, maxsize);
669 /* Allocate some FIBs and associated command structs */
670 TAILQ_INIT(&sc->aac_fibmap_tqh);
671 sc->aac_commands = malloc(sc->aac_max_fibs * sizeof(struct aac_command),
672 M_AACRAIDBUF, M_WAITOK|M_ZERO);
673 mtx_lock(&sc->aac_io_lock);
674 while (sc->total_fibs < sc->aac_max_fibs) {
675 if (aac_alloc_commands(sc) != 0)
678 mtx_unlock(&sc->aac_io_lock);
679 if (sc->total_fibs == 0)
686 * Free all of the resources associated with (sc)
688 * Should not be called if the controller is active.
691 aacraid_free(struct aac_softc *sc)
695 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
697 /* remove the control device */
698 if (sc->aac_dev_t != NULL)
699 destroy_dev(sc->aac_dev_t);
701 /* throw away any FIB buffers, discard the FIB DMA tag */
702 aac_free_commands(sc);
703 if (sc->aac_fib_dmat)
704 bus_dma_tag_destroy(sc->aac_fib_dmat);
706 free(sc->aac_commands, M_AACRAIDBUF);
708 /* destroy the common area */
709 if (sc->aac_common) {
710 bus_dmamap_unload(sc->aac_common_dmat, sc->aac_common_dmamap);
711 bus_dmamem_free(sc->aac_common_dmat, sc->aac_common,
712 sc->aac_common_dmamap);
714 if (sc->aac_common_dmat)
715 bus_dma_tag_destroy(sc->aac_common_dmat);
717 /* disconnect the interrupt handler */
718 for (i = 0; i < AAC_MAX_MSIX; ++i) {
720 bus_teardown_intr(sc->aac_dev,
721 sc->aac_irq[i], sc->aac_intr[i]);
723 bus_release_resource(sc->aac_dev, SYS_RES_IRQ,
724 sc->aac_irq_rid[i], sc->aac_irq[i]);
729 pci_release_msi(sc->aac_dev);
731 /* destroy data-transfer DMA tag */
732 if (sc->aac_buffer_dmat)
733 bus_dma_tag_destroy(sc->aac_buffer_dmat);
735 /* destroy the parent DMA tag */
736 if (sc->aac_parent_dmat)
737 bus_dma_tag_destroy(sc->aac_parent_dmat);
739 /* release the register window mapping */
740 if (sc->aac_regs_res0 != NULL)
741 bus_release_resource(sc->aac_dev, SYS_RES_MEMORY,
742 sc->aac_regs_rid0, sc->aac_regs_res0);
743 if (sc->aac_regs_res1 != NULL)
744 bus_release_resource(sc->aac_dev, SYS_RES_MEMORY,
745 sc->aac_regs_rid1, sc->aac_regs_res1);
749 * Disconnect from the controller completely, in preparation for unload.
752 aacraid_detach(device_t dev)
754 struct aac_softc *sc;
755 struct aac_container *co;
759 sc = device_get_softc(dev);
760 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
762 #if __FreeBSD_version >= 800000
763 callout_drain(&sc->aac_daemontime);
765 untimeout(aac_daemon, (void *)sc, sc->timeout_id);
767 /* Remove the child containers */
768 while ((co = TAILQ_FIRST(&sc->aac_container_tqh)) != NULL) {
769 TAILQ_REMOVE(&sc->aac_container_tqh, co, co_link);
770 free(co, M_AACRAIDBUF);
773 /* Remove the CAM SIMs */
774 while ((sim = TAILQ_FIRST(&sc->aac_sim_tqh)) != NULL) {
775 TAILQ_REMOVE(&sc->aac_sim_tqh, sim, sim_link);
776 error = device_delete_child(dev, sim->sim_dev);
779 free(sim, M_AACRAIDBUF);
782 if (sc->aifflags & AAC_AIFFLAGS_RUNNING) {
783 sc->aifflags |= AAC_AIFFLAGS_EXIT;
784 wakeup(sc->aifthread);
785 tsleep(sc->aac_dev, PUSER | PCATCH, "aac_dch", 30 * hz);
788 if (sc->aifflags & AAC_AIFFLAGS_RUNNING)
789 panic("Cannot shutdown AIF thread");
791 if ((error = aacraid_shutdown(dev)))
794 EVENTHANDLER_DEREGISTER(shutdown_final, sc->eh);
798 mtx_destroy(&sc->aac_io_lock);
804 * Bring the controller down to a dormant state and detach all child devices.
806 * This function is called before detach or system shutdown.
808 * Note that we can assume that the bioq on the controller is empty, as we won't
809 * allow shutdown if any device is open.
812 aacraid_shutdown(device_t dev)
814 struct aac_softc *sc;
816 struct aac_close_command *cc;
818 sc = device_get_softc(dev);
819 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
821 sc->aac_state |= AAC_STATE_SUSPEND;
824 * Send a Container shutdown followed by a HostShutdown FIB to the
825 * controller to convince it that we don't want to talk to it anymore.
826 * We've been closed and all I/O completed already
828 device_printf(sc->aac_dev, "shutting down controller...");
830 mtx_lock(&sc->aac_io_lock);
831 aac_alloc_sync_fib(sc, &fib);
832 cc = (struct aac_close_command *)&fib->data[0];
834 bzero(cc, sizeof(struct aac_close_command));
835 cc->Command = VM_CloseAll;
836 cc->ContainerId = 0xfffffffe;
837 if (aac_sync_fib(sc, ContainerCommand, 0, fib,
838 sizeof(struct aac_close_command)))
843 AAC_ACCESS_DEVREG(sc, AAC_DISABLE_INTERRUPT);
844 aac_release_sync_fib(sc);
845 mtx_unlock(&sc->aac_io_lock);
851 * Bring the controller to a quiescent state, ready for system suspend.
854 aacraid_suspend(device_t dev)
856 struct aac_softc *sc;
858 sc = device_get_softc(dev);
860 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
861 sc->aac_state |= AAC_STATE_SUSPEND;
863 AAC_ACCESS_DEVREG(sc, AAC_DISABLE_INTERRUPT);
868 * Bring the controller back to a state ready for operation.
871 aacraid_resume(device_t dev)
873 struct aac_softc *sc;
875 sc = device_get_softc(dev);
877 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
878 sc->aac_state &= ~AAC_STATE_SUSPEND;
879 AAC_ACCESS_DEVREG(sc, AAC_ENABLE_INTERRUPT);
884 * Interrupt handler for NEW_COMM_TYPE1, NEW_COMM_TYPE2, NEW_COMM_TYPE34 interface.
887 aacraid_new_intr_type1(void *arg)
889 struct aac_msix_ctx *ctx;
890 struct aac_softc *sc;
892 struct aac_command *cm;
894 u_int32_t bellbits, bellbits_shifted, index, handle;
895 int isFastResponse, isAif, noMoreAif, mode;
897 ctx = (struct aac_msix_ctx *)arg;
899 vector_no = ctx->vector_no;
901 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
902 mtx_lock(&sc->aac_io_lock);
904 if (sc->msi_enabled) {
905 mode = AAC_INT_MODE_MSI;
906 if (vector_no == 0) {
907 bellbits = AAC_MEM0_GETREG4(sc, AAC_SRC_ODBR_MSI);
908 if (bellbits & 0x40000)
909 mode |= AAC_INT_MODE_AIF;
910 else if (bellbits & 0x1000)
911 mode |= AAC_INT_MODE_SYNC;
914 mode = AAC_INT_MODE_INTX;
915 bellbits = AAC_MEM0_GETREG4(sc, AAC_SRC_ODBR_R);
916 if (bellbits & AAC_DB_RESPONSE_SENT_NS) {
917 bellbits = AAC_DB_RESPONSE_SENT_NS;
918 AAC_MEM0_SETREG4(sc, AAC_SRC_ODBR_C, bellbits);
920 bellbits_shifted = (bellbits >> AAC_SRC_ODR_SHIFT);
921 AAC_MEM0_SETREG4(sc, AAC_SRC_ODBR_C, bellbits);
922 if (bellbits_shifted & AAC_DB_AIF_PENDING)
923 mode |= AAC_INT_MODE_AIF;
924 else if (bellbits_shifted & AAC_DB_SYNC_COMMAND)
925 mode |= AAC_INT_MODE_SYNC;
927 /* ODR readback, Prep #238630 */
928 AAC_MEM0_GETREG4(sc, AAC_SRC_ODBR_R);
931 if (mode & AAC_INT_MODE_SYNC) {
932 if (sc->aac_sync_cm) {
933 cm = sc->aac_sync_cm;
934 cm->cm_flags |= AAC_CMD_COMPLETED;
935 /* is there a completion handler? */
936 if (cm->cm_complete != NULL) {
939 /* assume that someone is sleeping on this command */
942 sc->flags &= ~AAC_QUEUE_FRZN;
943 sc->aac_sync_cm = NULL;
948 if (mode & AAC_INT_MODE_AIF) {
949 if (mode & AAC_INT_MODE_INTX) {
956 /* handle async. status */
957 index = sc->aac_host_rrq_idx[vector_no];
959 isFastResponse = isAif = noMoreAif = 0;
960 /* remove toggle bit (31) */
961 handle = (sc->aac_common->ac_host_rrq[index] & 0x7fffffff);
962 /* check fast response bit (30) */
963 if (handle & 0x40000000)
965 /* check AIF bit (23) */
966 else if (handle & 0x00800000)
968 handle &= 0x0000ffff;
972 cm = sc->aac_commands + (handle - 1);
974 sc->aac_rrq_outstanding[vector_no]--;
976 noMoreAif = (fib->Header.XferState & AAC_FIBSTATE_NOMOREAIF) ? 1:0;
978 aac_handle_aif(sc, fib);
980 aacraid_release_command(cm);
982 if (isFastResponse) {
983 fib->Header.XferState |= AAC_FIBSTATE_DONEADAP;
984 *((u_int32_t *)(fib->data)) = ST_OK;
985 cm->cm_flags |= AAC_CMD_FASTRESP;
988 aac_unmap_command(cm);
989 cm->cm_flags |= AAC_CMD_COMPLETED;
991 /* is there a completion handler? */
992 if (cm->cm_complete != NULL) {
995 /* assume that someone is sleeping on this command */
998 sc->flags &= ~AAC_QUEUE_FRZN;
1001 sc->aac_common->ac_host_rrq[index++] = 0;
1002 if (index == (vector_no + 1) * sc->aac_vector_cap)
1003 index = vector_no * sc->aac_vector_cap;
1004 sc->aac_host_rrq_idx[vector_no] = index;
1006 if ((isAif && !noMoreAif) || sc->aif_pending)
1007 aac_request_aif(sc);
1011 if (mode & AAC_INT_MODE_AIF) {
1012 aac_request_aif(sc);
1013 AAC_ACCESS_DEVREG(sc, AAC_CLEAR_AIF_BIT);
1017 /* see if we can start some more I/O */
1018 if ((sc->flags & AAC_QUEUE_FRZN) == 0)
1019 aacraid_startio(sc);
1020 mtx_unlock(&sc->aac_io_lock);
1024 * Handle notification of one or more FIBs coming from the controller.
1027 aac_command_thread(struct aac_softc *sc)
1031 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1033 mtx_lock(&sc->aac_io_lock);
1034 sc->aifflags = AAC_AIFFLAGS_RUNNING;
1036 while ((sc->aifflags & AAC_AIFFLAGS_EXIT) == 0) {
1039 if ((sc->aifflags & AAC_AIFFLAGS_PENDING) == 0)
1040 retval = msleep(sc->aifthread, &sc->aac_io_lock, PRIBIO,
1041 "aacraid_aifthd", AAC_PERIODIC_INTERVAL * hz);
1044 * First see if any FIBs need to be allocated. This needs
1045 * to be called without the driver lock because contigmalloc
1046 * will grab Giant, and would result in an LOR.
1048 if ((sc->aifflags & AAC_AIFFLAGS_ALLOCFIBS) != 0) {
1049 aac_alloc_commands(sc);
1050 sc->aifflags &= ~AAC_AIFFLAGS_ALLOCFIBS;
1051 aacraid_startio(sc);
1055 * While we're here, check to see if any commands are stuck.
1056 * This is pretty low-priority, so it's ok if it doesn't
1059 if (retval == EWOULDBLOCK)
1062 /* Check the hardware printf message buffer */
1063 if (sc->aac_common->ac_printf[0] != 0)
1064 aac_print_printf(sc);
1066 sc->aifflags &= ~AAC_AIFFLAGS_RUNNING;
1067 mtx_unlock(&sc->aac_io_lock);
1068 wakeup(sc->aac_dev);
1070 aac_kthread_exit(0);
1074 * Submit a command to the controller, return when it completes.
1075 * XXX This is very dangerous! If the card has gone out to lunch, we could
1076 * be stuck here forever. At the same time, signals are not caught
1077 * because there is a risk that a signal could wakeup the sleep before
1078 * the card has a chance to complete the command. Since there is no way
1079 * to cancel a command that is in progress, we can't protect against the
1080 * card completing a command late and spamming the command and data
1081 * memory. So, we are held hostage until the command completes.
1084 aacraid_wait_command(struct aac_command *cm)
1086 struct aac_softc *sc;
1090 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1091 mtx_assert(&sc->aac_io_lock, MA_OWNED);
1093 /* Put the command on the ready queue and get things going */
1094 aac_enqueue_ready(cm);
1095 aacraid_startio(sc);
1096 error = msleep(cm, &sc->aac_io_lock, PRIBIO, "aacraid_wait", 0);
1101 *Command Buffer Management
1105 * Allocate a command.
1108 aacraid_alloc_command(struct aac_softc *sc, struct aac_command **cmp)
1110 struct aac_command *cm;
1112 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1114 if ((cm = aac_dequeue_free(sc)) == NULL) {
1115 if (sc->total_fibs < sc->aac_max_fibs) {
1116 sc->aifflags |= AAC_AIFFLAGS_ALLOCFIBS;
1117 wakeup(sc->aifthread);
1127 * Release a command back to the freelist.
1130 aacraid_release_command(struct aac_command *cm)
1132 struct aac_event *event;
1133 struct aac_softc *sc;
1136 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1137 mtx_assert(&sc->aac_io_lock, MA_OWNED);
1139 /* (re)initialize the command/FIB */
1140 cm->cm_sgtable = NULL;
1142 cm->cm_complete = NULL;
1144 cm->cm_passthr_dmat = 0;
1145 cm->cm_fib->Header.XferState = AAC_FIBSTATE_EMPTY;
1146 cm->cm_fib->Header.StructType = AAC_FIBTYPE_TFIB;
1147 cm->cm_fib->Header.Unused = 0;
1148 cm->cm_fib->Header.SenderSize = cm->cm_sc->aac_max_fib_size;
1151 * These are duplicated in aac_start to cover the case where an
1152 * intermediate stage may have destroyed them. They're left
1153 * initialized here for debugging purposes only.
1155 cm->cm_fib->Header.u.ReceiverFibAddress = (u_int32_t)cm->cm_fibphys;
1156 cm->cm_fib->Header.Handle = 0;
1158 aac_enqueue_free(cm);
1161 * Dequeue all events so that there's no risk of events getting
1164 while ((event = TAILQ_FIRST(&sc->aac_ev_cmfree)) != NULL) {
1165 TAILQ_REMOVE(&sc->aac_ev_cmfree, event, ev_links);
1166 event->ev_callback(sc, event, event->ev_arg);
1171 * Map helper for command/FIB allocation.
1174 aac_map_command_helper(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1178 fibphys = (uint64_t *)arg;
1180 *fibphys = segs[0].ds_addr;
1184 * Allocate and initialize commands/FIBs for this adapter.
1187 aac_alloc_commands(struct aac_softc *sc)
1189 struct aac_command *cm;
1190 struct aac_fibmap *fm;
1195 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1196 mtx_assert(&sc->aac_io_lock, MA_OWNED);
1198 if (sc->total_fibs + sc->aac_max_fibs_alloc > sc->aac_max_fibs)
1201 fm = malloc(sizeof(struct aac_fibmap), M_AACRAIDBUF, M_NOWAIT|M_ZERO);
1205 mtx_unlock(&sc->aac_io_lock);
1206 /* allocate the FIBs in DMAable memory and load them */
1207 if (bus_dmamem_alloc(sc->aac_fib_dmat, (void **)&fm->aac_fibs,
1208 BUS_DMA_NOWAIT, &fm->aac_fibmap)) {
1209 device_printf(sc->aac_dev,
1210 "Not enough contiguous memory available.\n");
1211 free(fm, M_AACRAIDBUF);
1212 mtx_lock(&sc->aac_io_lock);
1216 maxsize = sc->aac_max_fib_size + 31;
1217 if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE1)
1218 maxsize += sizeof(struct aac_fib_xporthdr);
1219 /* Ignore errors since this doesn't bounce */
1220 (void)bus_dmamap_load(sc->aac_fib_dmat, fm->aac_fibmap, fm->aac_fibs,
1221 sc->aac_max_fibs_alloc * maxsize,
1222 aac_map_command_helper, &fibphys, 0);
1223 mtx_lock(&sc->aac_io_lock);
1225 /* initialize constant fields in the command structure */
1226 bzero(fm->aac_fibs, sc->aac_max_fibs_alloc * maxsize);
1227 for (i = 0; i < sc->aac_max_fibs_alloc; i++) {
1228 cm = sc->aac_commands + sc->total_fibs;
1229 fm->aac_commands = cm;
1231 cm->cm_fib = (struct aac_fib *)
1232 ((u_int8_t *)fm->aac_fibs + i * maxsize);
1233 cm->cm_fibphys = fibphys + i * maxsize;
1234 if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE1) {
1235 u_int64_t fibphys_aligned;
1237 (cm->cm_fibphys + sizeof(struct aac_fib_xporthdr) + 31) & ~31;
1238 cm->cm_fib = (struct aac_fib *)
1239 ((u_int8_t *)cm->cm_fib + (fibphys_aligned - cm->cm_fibphys));
1240 cm->cm_fibphys = fibphys_aligned;
1242 u_int64_t fibphys_aligned;
1243 fibphys_aligned = (cm->cm_fibphys + 31) & ~31;
1244 cm->cm_fib = (struct aac_fib *)
1245 ((u_int8_t *)cm->cm_fib + (fibphys_aligned - cm->cm_fibphys));
1246 cm->cm_fibphys = fibphys_aligned;
1248 cm->cm_index = sc->total_fibs;
1250 if ((error = bus_dmamap_create(sc->aac_buffer_dmat, 0,
1251 &cm->cm_datamap)) != 0)
1253 if (sc->aac_max_fibs <= 1 || sc->aac_max_fibs - sc->total_fibs > 1)
1254 aacraid_release_command(cm);
1259 TAILQ_INSERT_TAIL(&sc->aac_fibmap_tqh, fm, fm_link);
1260 fwprintf(sc, HBA_FLAGS_DBG_COMM_B, "total_fibs= %d\n", sc->total_fibs);
1264 bus_dmamap_unload(sc->aac_fib_dmat, fm->aac_fibmap);
1265 bus_dmamem_free(sc->aac_fib_dmat, fm->aac_fibs, fm->aac_fibmap);
1266 free(fm, M_AACRAIDBUF);
1271 * Free FIBs owned by this adapter.
1274 aac_free_commands(struct aac_softc *sc)
1276 struct aac_fibmap *fm;
1277 struct aac_command *cm;
1280 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1282 while ((fm = TAILQ_FIRST(&sc->aac_fibmap_tqh)) != NULL) {
1284 TAILQ_REMOVE(&sc->aac_fibmap_tqh, fm, fm_link);
1286 * We check against total_fibs to handle partially
1289 for (i = 0; i < sc->aac_max_fibs_alloc && sc->total_fibs--; i++) {
1290 cm = fm->aac_commands + i;
1291 bus_dmamap_destroy(sc->aac_buffer_dmat, cm->cm_datamap);
1293 bus_dmamap_unload(sc->aac_fib_dmat, fm->aac_fibmap);
1294 bus_dmamem_free(sc->aac_fib_dmat, fm->aac_fibs, fm->aac_fibmap);
1295 free(fm, M_AACRAIDBUF);
1300 * Command-mapping helper function - populate this command's s/g table.
1303 aacraid_map_command_sg(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1305 struct aac_softc *sc;
1306 struct aac_command *cm;
1307 struct aac_fib *fib;
1310 cm = (struct aac_command *)arg;
1313 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "nseg %d", nseg);
1314 mtx_assert(&sc->aac_io_lock, MA_OWNED);
1316 /* copy into the FIB */
1317 if (cm->cm_sgtable != NULL) {
1318 if (fib->Header.Command == RawIo2) {
1319 struct aac_raw_io2 *raw;
1320 struct aac_sge_ieee1212 *sg;
1321 u_int32_t min_size = PAGE_SIZE, cur_size;
1322 int conformable = TRUE;
1324 raw = (struct aac_raw_io2 *)&fib->data[0];
1325 sg = (struct aac_sge_ieee1212 *)cm->cm_sgtable;
1328 for (i = 0; i < nseg; i++) {
1329 cur_size = segs[i].ds_len;
1331 *(bus_addr_t *)&sg[i].addrLow = segs[i].ds_addr;
1332 sg[i].length = cur_size;
1335 raw->sgeFirstSize = cur_size;
1336 } else if (i == 1) {
1337 raw->sgeNominalSize = cur_size;
1338 min_size = cur_size;
1339 } else if ((i+1) < nseg &&
1340 cur_size != raw->sgeNominalSize) {
1341 conformable = FALSE;
1342 if (cur_size < min_size)
1343 min_size = cur_size;
1347 /* not conformable: evaluate required sg elements */
1349 int j, err_found, nseg_new = nseg;
1350 for (i = min_size / PAGE_SIZE; i >= 1; --i) {
1353 for (j = 1; j < nseg - 1; ++j) {
1354 if (sg[j].length % (i*PAGE_SIZE)) {
1358 nseg_new += (sg[j].length / (i*PAGE_SIZE));
1363 if (i>0 && nseg_new<=sc->aac_sg_tablesize &&
1364 !(sc->hint_flags & 4))
1365 nseg = aac_convert_sgraw2(sc,
1366 raw, i, nseg, nseg_new);
1368 raw->flags |= RIO2_SGL_CONFORMANT;
1371 /* update the FIB size for the s/g count */
1372 fib->Header.Size += nseg *
1373 sizeof(struct aac_sge_ieee1212);
1375 } else if (fib->Header.Command == RawIo) {
1376 struct aac_sg_tableraw *sg;
1377 sg = (struct aac_sg_tableraw *)cm->cm_sgtable;
1379 for (i = 0; i < nseg; i++) {
1380 sg->SgEntryRaw[i].SgAddress = segs[i].ds_addr;
1381 sg->SgEntryRaw[i].SgByteCount = segs[i].ds_len;
1382 sg->SgEntryRaw[i].Next = 0;
1383 sg->SgEntryRaw[i].Prev = 0;
1384 sg->SgEntryRaw[i].Flags = 0;
1386 /* update the FIB size for the s/g count */
1387 fib->Header.Size += nseg*sizeof(struct aac_sg_entryraw);
1388 } else if ((cm->cm_sc->flags & AAC_FLAGS_SG_64BIT) == 0) {
1389 struct aac_sg_table *sg;
1390 sg = cm->cm_sgtable;
1392 for (i = 0; i < nseg; i++) {
1393 sg->SgEntry[i].SgAddress = segs[i].ds_addr;
1394 sg->SgEntry[i].SgByteCount = segs[i].ds_len;
1396 /* update the FIB size for the s/g count */
1397 fib->Header.Size += nseg*sizeof(struct aac_sg_entry);
1399 struct aac_sg_table64 *sg;
1400 sg = (struct aac_sg_table64 *)cm->cm_sgtable;
1402 for (i = 0; i < nseg; i++) {
1403 sg->SgEntry64[i].SgAddress = segs[i].ds_addr;
1404 sg->SgEntry64[i].SgByteCount = segs[i].ds_len;
1406 /* update the FIB size for the s/g count */
1407 fib->Header.Size += nseg*sizeof(struct aac_sg_entry64);
1411 /* Fix up the address values in the FIB. Use the command array index
1412 * instead of a pointer since these fields are only 32 bits. Shift
1413 * the SenderFibAddress over to make room for the fast response bit
1414 * and for the AIF bit
1416 cm->cm_fib->Header.SenderFibAddress = (cm->cm_index << 2);
1417 cm->cm_fib->Header.u.ReceiverFibAddress = (u_int32_t)cm->cm_fibphys;
1419 /* save a pointer to the command for speedy reverse-lookup */
1420 cm->cm_fib->Header.Handle += cm->cm_index + 1;
1422 if (cm->cm_passthr_dmat == 0) {
1423 if (cm->cm_flags & AAC_CMD_DATAIN)
1424 bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap,
1425 BUS_DMASYNC_PREREAD);
1426 if (cm->cm_flags & AAC_CMD_DATAOUT)
1427 bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap,
1428 BUS_DMASYNC_PREWRITE);
1431 cm->cm_flags |= AAC_CMD_MAPPED;
1433 if (sc->flags & AAC_FLAGS_SYNC_MODE) {
1435 aacraid_sync_command(sc, AAC_MONKER_SYNCFIB, cm->cm_fibphys, 0, 0, 0, &wait, NULL);
1436 } else if (cm->cm_flags & AAC_CMD_WAIT) {
1437 aacraid_sync_command(sc, AAC_MONKER_SYNCFIB, cm->cm_fibphys, 0, 0, 0, NULL, NULL);
1439 int count = 10000000L;
1440 while (AAC_SEND_COMMAND(sc, cm) != 0) {
1442 aac_unmap_command(cm);
1443 sc->flags |= AAC_QUEUE_FRZN;
1444 aac_requeue_ready(cm);
1446 DELAY(5); /* wait 5 usec. */
1453 aac_convert_sgraw2(struct aac_softc *sc, struct aac_raw_io2 *raw,
1454 int pages, int nseg, int nseg_new)
1456 struct aac_sge_ieee1212 *sge;
1460 sge = malloc(nseg_new * sizeof(struct aac_sge_ieee1212),
1461 M_AACRAIDBUF, M_NOWAIT|M_ZERO);
1465 for (i = 1, pos = 1; i < nseg - 1; ++i) {
1466 for (j = 0; j < raw->sge[i].length / (pages*PAGE_SIZE); ++j) {
1467 addr_low = raw->sge[i].addrLow + j * pages * PAGE_SIZE;
1468 sge[pos].addrLow = addr_low;
1469 sge[pos].addrHigh = raw->sge[i].addrHigh;
1470 if (addr_low < raw->sge[i].addrLow)
1471 sge[pos].addrHigh++;
1472 sge[pos].length = pages * PAGE_SIZE;
1477 sge[pos] = raw->sge[nseg-1];
1478 for (i = 1; i < nseg_new; ++i)
1479 raw->sge[i] = sge[i];
1481 free(sge, M_AACRAIDBUF);
1482 raw->sgeCnt = nseg_new;
1483 raw->flags |= RIO2_SGL_CONFORMANT;
1484 raw->sgeNominalSize = pages * PAGE_SIZE;
1490 * Unmap a command from controller-visible space.
1493 aac_unmap_command(struct aac_command *cm)
1495 struct aac_softc *sc;
1498 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1500 if (!(cm->cm_flags & AAC_CMD_MAPPED))
1503 if (cm->cm_datalen != 0 && cm->cm_passthr_dmat == 0) {
1504 if (cm->cm_flags & AAC_CMD_DATAIN)
1505 bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap,
1506 BUS_DMASYNC_POSTREAD);
1507 if (cm->cm_flags & AAC_CMD_DATAOUT)
1508 bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap,
1509 BUS_DMASYNC_POSTWRITE);
1511 bus_dmamap_unload(sc->aac_buffer_dmat, cm->cm_datamap);
1513 cm->cm_flags &= ~AAC_CMD_MAPPED;
1517 * Hardware Interface
1521 * Initialize the adapter.
1524 aac_common_map(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1526 struct aac_softc *sc;
1528 sc = (struct aac_softc *)arg;
1529 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1531 sc->aac_common_busaddr = segs[0].ds_addr;
1535 aac_check_firmware(struct aac_softc *sc)
1537 u_int32_t code, major, minor, maxsize;
1538 u_int32_t options = 0, atu_size = 0, status, waitCount;
1541 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1543 /* check if flash update is running */
1544 if (AAC_GET_FWSTATUS(sc) & AAC_FLASH_UPD_PENDING) {
1547 code = AAC_GET_FWSTATUS(sc);
1548 if (time_uptime > (then + AAC_FWUPD_TIMEOUT)) {
1549 device_printf(sc->aac_dev,
1550 "FATAL: controller not coming ready, "
1551 "status %x\n", code);
1554 } while (!(code & AAC_FLASH_UPD_SUCCESS) && !(code & AAC_FLASH_UPD_FAILED));
1556 * Delay 10 seconds. Because right now FW is doing a soft reset,
1557 * do not read scratch pad register at this time
1559 waitCount = 10 * 10000;
1561 DELAY(100); /* delay 100 microseconds */
1567 * Wait for the adapter to come ready.
1571 code = AAC_GET_FWSTATUS(sc);
1572 if (time_uptime > (then + AAC_BOOT_TIMEOUT)) {
1573 device_printf(sc->aac_dev,
1574 "FATAL: controller not coming ready, "
1575 "status %x\n", code);
1578 } while (!(code & AAC_UP_AND_RUNNING) || code == 0xffffffff);
1581 * Retrieve the firmware version numbers. Dell PERC2/QC cards with
1582 * firmware version 1.x are not compatible with this driver.
1584 if (sc->flags & AAC_FLAGS_PERC2QC) {
1585 if (aacraid_sync_command(sc, AAC_MONKER_GETKERNVER, 0, 0, 0, 0,
1587 device_printf(sc->aac_dev,
1588 "Error reading firmware version\n");
1592 /* These numbers are stored as ASCII! */
1593 major = (AAC_GET_MAILBOX(sc, 1) & 0xff) - 0x30;
1594 minor = (AAC_GET_MAILBOX(sc, 2) & 0xff) - 0x30;
1596 device_printf(sc->aac_dev,
1597 "Firmware version %d.%d is not supported.\n",
1603 * Retrieve the capabilities/supported options word so we know what
1604 * work-arounds to enable. Some firmware revs don't support this
1607 if (aacraid_sync_command(sc, AAC_MONKER_GETINFO, 0, 0, 0, 0, &status, NULL)) {
1608 if (status != AAC_SRB_STS_INVALID_REQUEST) {
1609 device_printf(sc->aac_dev,
1610 "RequestAdapterInfo failed\n");
1614 options = AAC_GET_MAILBOX(sc, 1);
1615 atu_size = AAC_GET_MAILBOX(sc, 2);
1616 sc->supported_options = options;
1618 if ((options & AAC_SUPPORTED_4GB_WINDOW) != 0 &&
1619 (sc->flags & AAC_FLAGS_NO4GB) == 0)
1620 sc->flags |= AAC_FLAGS_4GB_WINDOW;
1621 if (options & AAC_SUPPORTED_NONDASD)
1622 sc->flags |= AAC_FLAGS_ENABLE_CAM;
1623 if ((options & AAC_SUPPORTED_SGMAP_HOST64) != 0
1624 && (sizeof(bus_addr_t) > 4)
1625 && (sc->hint_flags & 0x1)) {
1626 device_printf(sc->aac_dev,
1627 "Enabling 64-bit address support\n");
1628 sc->flags |= AAC_FLAGS_SG_64BIT;
1630 if (sc->aac_if.aif_send_command) {
1631 if ((options & AAC_SUPPORTED_NEW_COMM_TYPE3) ||
1632 (options & AAC_SUPPORTED_NEW_COMM_TYPE4))
1633 sc->flags |= AAC_FLAGS_NEW_COMM | AAC_FLAGS_NEW_COMM_TYPE34;
1634 else if (options & AAC_SUPPORTED_NEW_COMM_TYPE1)
1635 sc->flags |= AAC_FLAGS_NEW_COMM | AAC_FLAGS_NEW_COMM_TYPE1;
1636 else if (options & AAC_SUPPORTED_NEW_COMM_TYPE2)
1637 sc->flags |= AAC_FLAGS_NEW_COMM | AAC_FLAGS_NEW_COMM_TYPE2;
1639 if (options & AAC_SUPPORTED_64BIT_ARRAYSIZE)
1640 sc->flags |= AAC_FLAGS_ARRAY_64BIT;
1643 if (!(sc->flags & AAC_FLAGS_NEW_COMM)) {
1644 device_printf(sc->aac_dev, "Communication interface not supported!\n");
1648 if (sc->hint_flags & 2) {
1649 device_printf(sc->aac_dev,
1650 "Sync. mode enforced by driver parameter. This will cause a significant performance decrease!\n");
1651 sc->flags |= AAC_FLAGS_SYNC_MODE;
1652 } else if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE34) {
1653 device_printf(sc->aac_dev,
1654 "Async. mode not supported by current driver, sync. mode enforced.\nPlease update driver to get full performance.\n");
1655 sc->flags |= AAC_FLAGS_SYNC_MODE;
1658 /* Check for broken hardware that does a lower number of commands */
1659 sc->aac_max_fibs = (sc->flags & AAC_FLAGS_256FIBS ? 256:512);
1661 /* Remap mem. resource, if required */
1662 if (atu_size > rman_get_size(sc->aac_regs_res0)) {
1663 bus_release_resource(
1664 sc->aac_dev, SYS_RES_MEMORY,
1665 sc->aac_regs_rid0, sc->aac_regs_res0);
1666 sc->aac_regs_res0 = bus_alloc_resource(
1667 sc->aac_dev, SYS_RES_MEMORY, &sc->aac_regs_rid0,
1668 0ul, ~0ul, atu_size, RF_ACTIVE);
1669 if (sc->aac_regs_res0 == NULL) {
1670 sc->aac_regs_res0 = bus_alloc_resource_any(
1671 sc->aac_dev, SYS_RES_MEMORY,
1672 &sc->aac_regs_rid0, RF_ACTIVE);
1673 if (sc->aac_regs_res0 == NULL) {
1674 device_printf(sc->aac_dev,
1675 "couldn't allocate register window\n");
1679 sc->aac_btag0 = rman_get_bustag(sc->aac_regs_res0);
1680 sc->aac_bhandle0 = rman_get_bushandle(sc->aac_regs_res0);
1683 /* Read preferred settings */
1684 sc->aac_max_fib_size = sizeof(struct aac_fib);
1685 sc->aac_max_sectors = 128; /* 64KB */
1686 sc->aac_max_aif = 1;
1687 if (sc->flags & AAC_FLAGS_SG_64BIT)
1688 sc->aac_sg_tablesize = (AAC_FIB_DATASIZE
1689 - sizeof(struct aac_blockwrite64))
1690 / sizeof(struct aac_sg_entry64);
1692 sc->aac_sg_tablesize = (AAC_FIB_DATASIZE
1693 - sizeof(struct aac_blockwrite))
1694 / sizeof(struct aac_sg_entry);
1696 if (!aacraid_sync_command(sc, AAC_MONKER_GETCOMMPREF, 0, 0, 0, 0, NULL, NULL)) {
1697 options = AAC_GET_MAILBOX(sc, 1);
1698 sc->aac_max_fib_size = (options & 0xFFFF);
1699 sc->aac_max_sectors = (options >> 16) << 1;
1700 options = AAC_GET_MAILBOX(sc, 2);
1701 sc->aac_sg_tablesize = (options >> 16);
1702 options = AAC_GET_MAILBOX(sc, 3);
1703 sc->aac_max_fibs = ((options >> 16) & 0xFFFF);
1704 if (sc->aac_max_fibs == 0 || sc->aac_hwif != AAC_HWIF_SRCV)
1705 sc->aac_max_fibs = (options & 0xFFFF);
1706 options = AAC_GET_MAILBOX(sc, 4);
1707 sc->aac_max_aif = (options & 0xFFFF);
1708 options = AAC_GET_MAILBOX(sc, 5);
1709 sc->aac_max_msix =(sc->flags & AAC_FLAGS_NEW_COMM_TYPE2) ? options : 0;
1712 maxsize = sc->aac_max_fib_size + 31;
1713 if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE1)
1714 maxsize += sizeof(struct aac_fib_xporthdr);
1715 if (maxsize > PAGE_SIZE) {
1716 sc->aac_max_fib_size -= (maxsize - PAGE_SIZE);
1717 maxsize = PAGE_SIZE;
1719 sc->aac_max_fibs_alloc = PAGE_SIZE / maxsize;
1721 if (sc->aac_max_fib_size > sizeof(struct aac_fib)) {
1722 sc->flags |= AAC_FLAGS_RAW_IO;
1723 device_printf(sc->aac_dev, "Enable Raw I/O\n");
1725 if ((sc->flags & AAC_FLAGS_RAW_IO) &&
1726 (sc->flags & AAC_FLAGS_ARRAY_64BIT)) {
1727 sc->flags |= AAC_FLAGS_LBA_64BIT;
1728 device_printf(sc->aac_dev, "Enable 64-bit array\n");
1731 #ifdef AACRAID_DEBUG
1732 aacraid_get_fw_debug_buffer(sc);
1738 aac_init(struct aac_softc *sc)
1740 struct aac_adapter_init *ip;
1743 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1745 /* reset rrq index */
1746 sc->aac_fibs_pushed_no = 0;
1747 for (i = 0; i < sc->aac_max_msix; i++)
1748 sc->aac_host_rrq_idx[i] = i * sc->aac_vector_cap;
1751 * Fill in the init structure. This tells the adapter about the
1752 * physical location of various important shared data structures.
1754 ip = &sc->aac_common->ac_init;
1755 ip->InitStructRevision = AAC_INIT_STRUCT_REVISION;
1756 if (sc->aac_max_fib_size > sizeof(struct aac_fib)) {
1757 ip->InitStructRevision = AAC_INIT_STRUCT_REVISION_4;
1758 sc->flags |= AAC_FLAGS_RAW_IO;
1760 ip->NoOfMSIXVectors = sc->aac_max_msix;
1762 ip->AdapterFibsPhysicalAddress = sc->aac_common_busaddr +
1763 offsetof(struct aac_common, ac_fibs);
1764 ip->AdapterFibsVirtualAddress = 0;
1765 ip->AdapterFibsSize = AAC_ADAPTER_FIBS * sizeof(struct aac_fib);
1766 ip->AdapterFibAlign = sizeof(struct aac_fib);
1768 ip->PrintfBufferAddress = sc->aac_common_busaddr +
1769 offsetof(struct aac_common, ac_printf);
1770 ip->PrintfBufferSize = AAC_PRINTF_BUFSIZE;
1773 * The adapter assumes that pages are 4K in size, except on some
1774 * broken firmware versions that do the page->byte conversion twice,
1775 * therefore 'assuming' that this value is in 16MB units (2^24).
1776 * Round up since the granularity is so high.
1778 ip->HostPhysMemPages = ctob(physmem) / AAC_PAGE_SIZE;
1779 if (sc->flags & AAC_FLAGS_BROKEN_MEMMAP) {
1780 ip->HostPhysMemPages =
1781 (ip->HostPhysMemPages + AAC_PAGE_SIZE) / AAC_PAGE_SIZE;
1783 ip->HostElapsedSeconds = time_uptime; /* reset later if invalid */
1785 ip->InitFlags = AAC_INITFLAGS_NEW_COMM_SUPPORTED;
1786 if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE1) {
1787 ip->InitStructRevision = AAC_INIT_STRUCT_REVISION_6;
1788 ip->InitFlags |= (AAC_INITFLAGS_NEW_COMM_TYPE1_SUPPORTED |
1789 AAC_INITFLAGS_FAST_JBOD_SUPPORTED);
1790 device_printf(sc->aac_dev, "New comm. interface type1 enabled\n");
1791 } else if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE2) {
1792 ip->InitStructRevision = AAC_INIT_STRUCT_REVISION_7;
1793 ip->InitFlags |= (AAC_INITFLAGS_NEW_COMM_TYPE2_SUPPORTED |
1794 AAC_INITFLAGS_FAST_JBOD_SUPPORTED);
1795 device_printf(sc->aac_dev, "New comm. interface type2 enabled\n");
1797 ip->MaxNumAif = sc->aac_max_aif;
1798 ip->HostRRQ_AddrLow =
1799 sc->aac_common_busaddr + offsetof(struct aac_common, ac_host_rrq);
1800 /* always 32-bit address */
1801 ip->HostRRQ_AddrHigh = 0;
1803 if (sc->aac_support_opt2 & AAC_SUPPORTED_POWER_MANAGEMENT) {
1804 ip->InitFlags |= AAC_INITFLAGS_DRIVER_SUPPORTS_PM;
1805 ip->InitFlags |= AAC_INITFLAGS_DRIVER_USES_UTC_TIME;
1806 device_printf(sc->aac_dev, "Power Management enabled\n");
1809 ip->MaxIoCommands = sc->aac_max_fibs;
1810 ip->MaxIoSize = sc->aac_max_sectors << 9;
1811 ip->MaxFibSize = sc->aac_max_fib_size;
1814 * Do controller-type-specific initialisation
1816 AAC_MEM0_SETREG4(sc, AAC_SRC_ODBR_C, ~0);
1819 * Give the init structure to the controller.
1821 if (aacraid_sync_command(sc, AAC_MONKER_INITSTRUCT,
1822 sc->aac_common_busaddr +
1823 offsetof(struct aac_common, ac_init), 0, 0, 0,
1825 device_printf(sc->aac_dev,
1826 "error establishing init structure\n");
1832 * Check configuration issues
1834 if ((error = aac_check_config(sc)) != 0)
1843 aac_define_int_mode(struct aac_softc *sc)
1846 int cap, msi_count, error = 0;
1851 /* max. vectors from AAC_MONKER_GETCOMMPREF */
1852 if (sc->aac_max_msix == 0) {
1853 sc->aac_max_msix = 1;
1854 sc->aac_vector_cap = sc->aac_max_fibs;
1859 msi_count = pci_msix_count(dev);
1860 if (msi_count > AAC_MAX_MSIX)
1861 msi_count = AAC_MAX_MSIX;
1862 if (msi_count > sc->aac_max_msix)
1863 msi_count = sc->aac_max_msix;
1864 if (msi_count == 0 || (error = pci_alloc_msix(dev, &msi_count)) != 0) {
1865 device_printf(dev, "alloc msix failed - msi_count=%d, err=%d; "
1866 "will try MSI\n", msi_count, error);
1867 pci_release_msi(dev);
1869 sc->msi_enabled = TRUE;
1870 device_printf(dev, "using MSI-X interrupts (%u vectors)\n",
1874 if (!sc->msi_enabled) {
1876 if ((error = pci_alloc_msi(dev, &msi_count)) != 0) {
1877 device_printf(dev, "alloc msi failed - err=%d; "
1878 "will use INTx\n", error);
1879 pci_release_msi(dev);
1881 sc->msi_enabled = TRUE;
1882 device_printf(dev, "using MSI interrupts\n");
1886 if (sc->msi_enabled) {
1887 /* now read controller capability from PCI config. space */
1888 cap = aac_find_pci_capability(sc, PCIY_MSIX);
1889 val = (cap != 0 ? pci_read_config(dev, cap + 2, 2) : 0);
1890 if (!(val & AAC_PCI_MSI_ENABLE)) {
1891 pci_release_msi(dev);
1892 sc->msi_enabled = FALSE;
1896 if (!sc->msi_enabled) {
1897 device_printf(dev, "using legacy interrupts\n");
1898 sc->aac_max_msix = 1;
1900 AAC_ACCESS_DEVREG(sc, AAC_ENABLE_MSIX);
1901 if (sc->aac_max_msix > msi_count)
1902 sc->aac_max_msix = msi_count;
1904 sc->aac_vector_cap = sc->aac_max_fibs / sc->aac_max_msix;
1906 fwprintf(sc, HBA_FLAGS_DBG_DEBUG_B, "msi_enabled %d vector_cap %d max_fibs %d max_msix %d",
1907 sc->msi_enabled,sc->aac_vector_cap, sc->aac_max_fibs, sc->aac_max_msix);
1911 aac_find_pci_capability(struct aac_softc *sc, int cap)
1919 status = pci_read_config(dev, PCIR_STATUS, 2);
1920 if (!(status & PCIM_STATUS_CAPPRESENT))
1923 status = pci_read_config(dev, PCIR_HDRTYPE, 1);
1924 switch (status & PCIM_HDRTYPE) {
1930 ptr = PCIR_CAP_PTR_2;
1936 ptr = pci_read_config(dev, ptr, 1);
1940 next = pci_read_config(dev, ptr + PCICAP_NEXTPTR, 1);
1941 val = pci_read_config(dev, ptr + PCICAP_ID, 1);
1951 aac_setup_intr(struct aac_softc *sc)
1953 int i, msi_count, rid;
1954 struct resource *res;
1957 msi_count = sc->aac_max_msix;
1958 rid = (sc->msi_enabled ? 1:0);
1960 for (i = 0; i < msi_count; i++, rid++) {
1961 if ((res = bus_alloc_resource_any(sc->aac_dev,SYS_RES_IRQ, &rid,
1962 RF_SHAREABLE | RF_ACTIVE)) == NULL) {
1963 device_printf(sc->aac_dev,"can't allocate interrupt\n");
1966 sc->aac_irq_rid[i] = rid;
1967 sc->aac_irq[i] = res;
1968 if (aac_bus_setup_intr(sc->aac_dev, res,
1969 INTR_MPSAFE | INTR_TYPE_BIO, NULL,
1970 aacraid_new_intr_type1, &sc->aac_msix[i], &tag)) {
1971 device_printf(sc->aac_dev, "can't set up interrupt\n");
1974 sc->aac_msix[i].vector_no = i;
1975 sc->aac_msix[i].sc = sc;
1976 sc->aac_intr[i] = tag;
1983 aac_check_config(struct aac_softc *sc)
1985 struct aac_fib *fib;
1986 struct aac_cnt_config *ccfg;
1987 struct aac_cf_status_hdr *cf_shdr;
1990 mtx_lock(&sc->aac_io_lock);
1991 aac_alloc_sync_fib(sc, &fib);
1993 ccfg = (struct aac_cnt_config *)&fib->data[0];
1994 bzero(ccfg, sizeof (*ccfg) - CT_PACKET_SIZE);
1995 ccfg->Command = VM_ContainerConfig;
1996 ccfg->CTCommand.command = CT_GET_CONFIG_STATUS;
1997 ccfg->CTCommand.param[CNT_SIZE] = sizeof(struct aac_cf_status_hdr);
1999 rval = aac_sync_fib(sc, ContainerCommand, 0, fib,
2000 sizeof (struct aac_cnt_config));
2001 cf_shdr = (struct aac_cf_status_hdr *)ccfg->CTCommand.data;
2002 if (rval == 0 && ccfg->Command == ST_OK &&
2003 ccfg->CTCommand.param[0] == CT_OK) {
2004 if (cf_shdr->action <= CFACT_PAUSE) {
2005 bzero(ccfg, sizeof (*ccfg) - CT_PACKET_SIZE);
2006 ccfg->Command = VM_ContainerConfig;
2007 ccfg->CTCommand.command = CT_COMMIT_CONFIG;
2009 rval = aac_sync_fib(sc, ContainerCommand, 0, fib,
2010 sizeof (struct aac_cnt_config));
2011 if (rval == 0 && ccfg->Command == ST_OK &&
2012 ccfg->CTCommand.param[0] == CT_OK) {
2013 /* successful completion */
2016 /* auto commit aborted due to error(s) */
2020 /* auto commit aborted due to adapter indicating
2021 config. issues too dangerous to auto commit */
2029 aac_release_sync_fib(sc);
2030 mtx_unlock(&sc->aac_io_lock);
2035 * Send a synchronous command to the controller and wait for a result.
2036 * Indicate if the controller completed the command with an error status.
2039 aacraid_sync_command(struct aac_softc *sc, u_int32_t command,
2040 u_int32_t arg0, u_int32_t arg1, u_int32_t arg2, u_int32_t arg3,
2041 u_int32_t *sp, u_int32_t *r1)
2046 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2048 /* populate the mailbox */
2049 AAC_SET_MAILBOX(sc, command, arg0, arg1, arg2, arg3);
2051 /* ensure the sync command doorbell flag is cleared */
2052 if (!sc->msi_enabled)
2053 AAC_CLEAR_ISTATUS(sc, AAC_DB_SYNC_COMMAND);
2055 /* then set it to signal the adapter */
2056 AAC_QNOTIFY(sc, AAC_DB_SYNC_COMMAND);
2058 if ((command != AAC_MONKER_SYNCFIB) || (sp == NULL) || (*sp != 0)) {
2059 /* spin waiting for the command to complete */
2062 if (time_uptime > (then + AAC_SYNC_TIMEOUT)) {
2063 fwprintf(sc, HBA_FLAGS_DBG_ERROR_B, "timed out");
2066 } while (!(AAC_GET_ISTATUS(sc) & AAC_DB_SYNC_COMMAND));
2068 /* clear the completion flag */
2069 AAC_CLEAR_ISTATUS(sc, AAC_DB_SYNC_COMMAND);
2071 /* get the command status */
2072 status = AAC_GET_MAILBOX(sc, 0);
2076 /* return parameter */
2078 *r1 = AAC_GET_MAILBOX(sc, 1);
2080 if (status != AAC_SRB_STS_SUCCESS)
2087 aac_sync_fib(struct aac_softc *sc, u_int32_t command, u_int32_t xferstate,
2088 struct aac_fib *fib, u_int16_t datasize)
2090 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2091 mtx_assert(&sc->aac_io_lock, MA_OWNED);
2093 if (datasize > AAC_FIB_DATASIZE)
2097 * Set up the sync FIB
2099 fib->Header.XferState = AAC_FIBSTATE_HOSTOWNED |
2100 AAC_FIBSTATE_INITIALISED |
2102 fib->Header.XferState |= xferstate;
2103 fib->Header.Command = command;
2104 fib->Header.StructType = AAC_FIBTYPE_TFIB;
2105 fib->Header.Size = sizeof(struct aac_fib_header) + datasize;
2106 fib->Header.SenderSize = sizeof(struct aac_fib);
2107 fib->Header.SenderFibAddress = 0; /* Not needed */
2108 fib->Header.u.ReceiverFibAddress = sc->aac_common_busaddr +
2109 offsetof(struct aac_common, ac_sync_fib);
2112 * Give the FIB to the controller, wait for a response.
2114 if (aacraid_sync_command(sc, AAC_MONKER_SYNCFIB,
2115 fib->Header.u.ReceiverFibAddress, 0, 0, 0, NULL, NULL)) {
2116 fwprintf(sc, HBA_FLAGS_DBG_ERROR_B, "IO error");
2124 * Check for commands that have been outstanding for a suspiciously long time,
2125 * and complain about them.
2128 aac_timeout(struct aac_softc *sc)
2130 struct aac_command *cm;
2134 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2136 * Traverse the busy command list, bitch about late commands once
2140 deadline = time_uptime - AAC_CMD_TIMEOUT;
2141 TAILQ_FOREACH(cm, &sc->aac_busy, cm_link) {
2142 if (cm->cm_timestamp < deadline) {
2143 device_printf(sc->aac_dev,
2144 "COMMAND %p TIMEOUT AFTER %d SECONDS\n",
2145 cm, (int)(time_uptime-cm->cm_timestamp));
2146 AAC_PRINT_FIB(sc, cm->cm_fib);
2152 aac_reset_adapter(sc);
2153 aacraid_print_queues(sc);
2157 * Interface Function Vectors
2161 * Read the current firmware status word.
2164 aac_src_get_fwstatus(struct aac_softc *sc)
2166 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2168 return(AAC_MEM0_GETREG4(sc, AAC_SRC_OMR));
2172 * Notify the controller of a change in a given queue
2175 aac_src_qnotify(struct aac_softc *sc, int qbit)
2177 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2179 AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, qbit << AAC_SRC_IDR_SHIFT);
2183 * Get the interrupt reason bits
2186 aac_src_get_istatus(struct aac_softc *sc)
2190 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2192 if (sc->msi_enabled) {
2193 val = AAC_MEM0_GETREG4(sc, AAC_SRC_ODBR_MSI);
2194 if (val & AAC_MSI_SYNC_STATUS)
2195 val = AAC_DB_SYNC_COMMAND;
2199 val = AAC_MEM0_GETREG4(sc, AAC_SRC_ODBR_R) >> AAC_SRC_ODR_SHIFT;
2205 * Clear some interrupt reason bits
2208 aac_src_clear_istatus(struct aac_softc *sc, int mask)
2210 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2212 if (sc->msi_enabled) {
2213 if (mask == AAC_DB_SYNC_COMMAND)
2214 AAC_ACCESS_DEVREG(sc, AAC_CLEAR_SYNC_BIT);
2216 AAC_MEM0_SETREG4(sc, AAC_SRC_ODBR_C, mask << AAC_SRC_ODR_SHIFT);
2221 * Populate the mailbox and set the command word
2224 aac_src_set_mailbox(struct aac_softc *sc, u_int32_t command, u_int32_t arg0,
2225 u_int32_t arg1, u_int32_t arg2, u_int32_t arg3)
2227 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2229 AAC_MEM0_SETREG4(sc, AAC_SRC_MAILBOX, command);
2230 AAC_MEM0_SETREG4(sc, AAC_SRC_MAILBOX + 4, arg0);
2231 AAC_MEM0_SETREG4(sc, AAC_SRC_MAILBOX + 8, arg1);
2232 AAC_MEM0_SETREG4(sc, AAC_SRC_MAILBOX + 12, arg2);
2233 AAC_MEM0_SETREG4(sc, AAC_SRC_MAILBOX + 16, arg3);
2237 aac_srcv_set_mailbox(struct aac_softc *sc, u_int32_t command, u_int32_t arg0,
2238 u_int32_t arg1, u_int32_t arg2, u_int32_t arg3)
2240 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2242 AAC_MEM0_SETREG4(sc, AAC_SRCV_MAILBOX, command);
2243 AAC_MEM0_SETREG4(sc, AAC_SRCV_MAILBOX + 4, arg0);
2244 AAC_MEM0_SETREG4(sc, AAC_SRCV_MAILBOX + 8, arg1);
2245 AAC_MEM0_SETREG4(sc, AAC_SRCV_MAILBOX + 12, arg2);
2246 AAC_MEM0_SETREG4(sc, AAC_SRCV_MAILBOX + 16, arg3);
2250 * Fetch the immediate command status word
2253 aac_src_get_mailbox(struct aac_softc *sc, int mb)
2255 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2257 return(AAC_MEM0_GETREG4(sc, AAC_SRC_MAILBOX + (mb * 4)));
2261 aac_srcv_get_mailbox(struct aac_softc *sc, int mb)
2263 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2265 return(AAC_MEM0_GETREG4(sc, AAC_SRCV_MAILBOX + (mb * 4)));
2269 * Set/clear interrupt masks
2272 aac_src_access_devreg(struct aac_softc *sc, int mode)
2276 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2279 case AAC_ENABLE_INTERRUPT:
2280 AAC_MEM0_SETREG4(sc, AAC_SRC_OIMR,
2281 (sc->msi_enabled ? AAC_INT_ENABLE_TYPE1_MSIX :
2282 AAC_INT_ENABLE_TYPE1_INTX));
2285 case AAC_DISABLE_INTERRUPT:
2286 AAC_MEM0_SETREG4(sc, AAC_SRC_OIMR, AAC_INT_DISABLE_ALL);
2289 case AAC_ENABLE_MSIX:
2291 val = AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2293 AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, val);
2294 AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2296 val = PMC_ALL_INTERRUPT_BITS;
2297 AAC_MEM0_SETREG4(sc, AAC_SRC_IOAR, val);
2298 val = AAC_MEM0_GETREG4(sc, AAC_SRC_OIMR);
2299 AAC_MEM0_SETREG4(sc, AAC_SRC_OIMR,
2300 val & (~(PMC_GLOBAL_INT_BIT2 | PMC_GLOBAL_INT_BIT0)));
2303 case AAC_DISABLE_MSIX:
2305 val = AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2307 AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, val);
2308 AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2311 case AAC_CLEAR_AIF_BIT:
2313 val = AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2315 AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, val);
2316 AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2319 case AAC_CLEAR_SYNC_BIT:
2321 val = AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2323 AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, val);
2324 AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2327 case AAC_ENABLE_INTX:
2329 val = AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2331 AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, val);
2332 AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2334 val = PMC_ALL_INTERRUPT_BITS;
2335 AAC_MEM0_SETREG4(sc, AAC_SRC_IOAR, val);
2336 val = AAC_MEM0_GETREG4(sc, AAC_SRC_OIMR);
2337 AAC_MEM0_SETREG4(sc, AAC_SRC_OIMR,
2338 val & (~(PMC_GLOBAL_INT_BIT2)));
2347 * New comm. interface: Send command functions
2350 aac_src_send_command(struct aac_softc *sc, struct aac_command *cm)
2352 struct aac_fib_xporthdr *pFibX;
2353 u_int32_t fibsize, high_addr;
2356 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "send command (new comm. type1)");
2358 if (sc->msi_enabled && cm->cm_fib->Header.Command != AifRequest &&
2359 sc->aac_max_msix > 1) {
2360 u_int16_t vector_no, first_choice = 0xffff;
2362 vector_no = sc->aac_fibs_pushed_no % sc->aac_max_msix;
2365 if (vector_no == sc->aac_max_msix)
2367 if (sc->aac_rrq_outstanding[vector_no] <
2370 if (0xffff == first_choice)
2371 first_choice = vector_no;
2372 else if (vector_no == first_choice)
2375 if (vector_no == first_choice)
2377 sc->aac_rrq_outstanding[vector_no]++;
2378 if (sc->aac_fibs_pushed_no == 0xffffffff)
2379 sc->aac_fibs_pushed_no = 0;
2381 sc->aac_fibs_pushed_no++;
2383 cm->cm_fib->Header.Handle += (vector_no << 16);
2386 if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE2) {
2387 /* Calculate the amount to the fibsize bits */
2388 fibsize = (cm->cm_fib->Header.Size + 127) / 128 - 1;
2389 /* Fill new FIB header */
2390 address = cm->cm_fibphys;
2391 high_addr = (u_int32_t)(address >> 32);
2392 if (high_addr == 0L) {
2393 cm->cm_fib->Header.StructType = AAC_FIBTYPE_TFIB2;
2394 cm->cm_fib->Header.u.TimeStamp = 0L;
2396 cm->cm_fib->Header.StructType = AAC_FIBTYPE_TFIB2_64;
2397 cm->cm_fib->Header.u.SenderFibAddressHigh = high_addr;
2399 cm->cm_fib->Header.SenderFibAddress = (u_int32_t)address;
2401 /* Calculate the amount to the fibsize bits */
2402 fibsize = (sizeof(struct aac_fib_xporthdr) +
2403 cm->cm_fib->Header.Size + 127) / 128 - 1;
2404 /* Fill XPORT header */
2405 pFibX = (struct aac_fib_xporthdr *)
2406 ((unsigned char *)cm->cm_fib - sizeof(struct aac_fib_xporthdr));
2407 pFibX->Handle = cm->cm_fib->Header.Handle;
2408 pFibX->HostAddress = cm->cm_fibphys;
2409 pFibX->Size = cm->cm_fib->Header.Size;
2410 address = cm->cm_fibphys - sizeof(struct aac_fib_xporthdr);
2411 high_addr = (u_int32_t)(address >> 32);
2416 aac_enqueue_busy(cm);
2418 AAC_MEM0_SETREG4(sc, AAC_SRC_IQUE64_H, high_addr);
2419 AAC_MEM0_SETREG4(sc, AAC_SRC_IQUE64_L, (u_int32_t)address + fibsize);
2421 AAC_MEM0_SETREG4(sc, AAC_SRC_IQUE32, (u_int32_t)address + fibsize);
2427 * New comm. interface: get, set outbound queue index
2430 aac_src_get_outb_queue(struct aac_softc *sc)
2432 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2438 aac_src_set_outb_queue(struct aac_softc *sc, int index)
2440 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2444 * Debugging and Diagnostics
2448 * Print some information about the controller.
2451 aac_describe_controller(struct aac_softc *sc)
2453 struct aac_fib *fib;
2454 struct aac_adapter_info *info;
2455 char *adapter_type = "Adaptec RAID controller";
2457 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2459 mtx_lock(&sc->aac_io_lock);
2460 aac_alloc_sync_fib(sc, &fib);
2462 if (sc->supported_options & AAC_SUPPORTED_SUPPLEMENT_ADAPTER_INFO) {
2464 if (aac_sync_fib(sc, RequestSupplementAdapterInfo, 0, fib, 1))
2465 device_printf(sc->aac_dev, "RequestSupplementAdapterInfo failed\n");
2467 struct aac_supplement_adapter_info *supp_info;
2469 supp_info = ((struct aac_supplement_adapter_info *)&fib->data[0]);
2470 adapter_type = (char *)supp_info->AdapterTypeText;
2471 sc->aac_feature_bits = supp_info->FeatureBits;
2472 sc->aac_support_opt2 = supp_info->SupportedOptions2;
2475 device_printf(sc->aac_dev, "%s, aacraid driver %d.%d.%d-%d\n",
2477 AAC_DRIVER_MAJOR_VERSION, AAC_DRIVER_MINOR_VERSION,
2478 AAC_DRIVER_BUGFIX_LEVEL, AAC_DRIVER_BUILD);
2481 if (aac_sync_fib(sc, RequestAdapterInfo, 0, fib, 1)) {
2482 device_printf(sc->aac_dev, "RequestAdapterInfo failed\n");
2483 aac_release_sync_fib(sc);
2484 mtx_unlock(&sc->aac_io_lock);
2488 /* save the kernel revision structure for later use */
2489 info = (struct aac_adapter_info *)&fib->data[0];
2490 sc->aac_revision = info->KernelRevision;
2493 device_printf(sc->aac_dev, "%s %dMHz, %dMB memory "
2494 "(%dMB cache, %dMB execution), %s\n",
2495 aac_describe_code(aac_cpu_variant, info->CpuVariant),
2496 info->ClockSpeed, info->TotalMem / (1024 * 1024),
2497 info->BufferMem / (1024 * 1024),
2498 info->ExecutionMem / (1024 * 1024),
2499 aac_describe_code(aac_battery_platform,
2500 info->batteryPlatform));
2502 device_printf(sc->aac_dev,
2503 "Kernel %d.%d-%d, Build %d, S/N %6X\n",
2504 info->KernelRevision.external.comp.major,
2505 info->KernelRevision.external.comp.minor,
2506 info->KernelRevision.external.comp.dash,
2507 info->KernelRevision.buildNumber,
2508 (u_int32_t)(info->SerialNumber & 0xffffff));
2510 device_printf(sc->aac_dev, "Supported Options=%b\n",
2511 sc->supported_options,
2534 aac_release_sync_fib(sc);
2535 mtx_unlock(&sc->aac_io_lock);
2539 * Look up a text description of a numeric error code and return a pointer to
2543 aac_describe_code(struct aac_code_lookup *table, u_int32_t code)
2547 for (i = 0; table[i].string != NULL; i++)
2548 if (table[i].code == code)
2549 return(table[i].string);
2550 return(table[i + 1].string);
2554 * Management Interface
2558 aac_open(struct cdev *dev, int flags, int fmt, struct thread *td)
2560 struct aac_softc *sc;
2563 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2564 #if __FreeBSD_version >= 702000
2565 device_busy(sc->aac_dev);
2566 devfs_set_cdevpriv(sc, aac_cdevpriv_dtor);
2572 aac_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td)
2574 union aac_statrequest *as;
2575 struct aac_softc *sc;
2578 as = (union aac_statrequest *)arg;
2580 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2584 switch (as->as_item) {
2588 bcopy(&sc->aac_qstat[as->as_item], &as->as_qstat,
2589 sizeof(struct aac_qstat));
2597 case FSACTL_SENDFIB:
2598 case FSACTL_SEND_LARGE_FIB:
2599 arg = *(caddr_t*)arg;
2600 case FSACTL_LNX_SENDFIB:
2601 case FSACTL_LNX_SEND_LARGE_FIB:
2602 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_SENDFIB");
2603 error = aac_ioctl_sendfib(sc, arg);
2605 case FSACTL_SEND_RAW_SRB:
2606 arg = *(caddr_t*)arg;
2607 case FSACTL_LNX_SEND_RAW_SRB:
2608 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_SEND_RAW_SRB");
2609 error = aac_ioctl_send_raw_srb(sc, arg);
2611 case FSACTL_AIF_THREAD:
2612 case FSACTL_LNX_AIF_THREAD:
2613 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_AIF_THREAD");
2616 case FSACTL_OPEN_GET_ADAPTER_FIB:
2617 arg = *(caddr_t*)arg;
2618 case FSACTL_LNX_OPEN_GET_ADAPTER_FIB:
2619 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_OPEN_GET_ADAPTER_FIB");
2620 error = aac_open_aif(sc, arg);
2622 case FSACTL_GET_NEXT_ADAPTER_FIB:
2623 arg = *(caddr_t*)arg;
2624 case FSACTL_LNX_GET_NEXT_ADAPTER_FIB:
2625 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_GET_NEXT_ADAPTER_FIB");
2626 error = aac_getnext_aif(sc, arg);
2628 case FSACTL_CLOSE_GET_ADAPTER_FIB:
2629 arg = *(caddr_t*)arg;
2630 case FSACTL_LNX_CLOSE_GET_ADAPTER_FIB:
2631 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_CLOSE_GET_ADAPTER_FIB");
2632 error = aac_close_aif(sc, arg);
2634 case FSACTL_MINIPORT_REV_CHECK:
2635 arg = *(caddr_t*)arg;
2636 case FSACTL_LNX_MINIPORT_REV_CHECK:
2637 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_MINIPORT_REV_CHECK");
2638 error = aac_rev_check(sc, arg);
2640 case FSACTL_QUERY_DISK:
2641 arg = *(caddr_t*)arg;
2642 case FSACTL_LNX_QUERY_DISK:
2643 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_QUERY_DISK");
2644 error = aac_query_disk(sc, arg);
2646 case FSACTL_DELETE_DISK:
2647 case FSACTL_LNX_DELETE_DISK:
2649 * We don't trust the underland to tell us when to delete a
2650 * container, rather we rely on an AIF coming from the
2655 case FSACTL_GET_PCI_INFO:
2656 arg = *(caddr_t*)arg;
2657 case FSACTL_LNX_GET_PCI_INFO:
2658 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_GET_PCI_INFO");
2659 error = aac_get_pci_info(sc, arg);
2661 case FSACTL_GET_FEATURES:
2662 arg = *(caddr_t*)arg;
2663 case FSACTL_LNX_GET_FEATURES:
2664 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_GET_FEATURES");
2665 error = aac_supported_features(sc, arg);
2668 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "unsupported cmd 0x%lx\n", cmd);
2676 aac_poll(struct cdev *dev, int poll_events, struct thread *td)
2678 struct aac_softc *sc;
2679 struct aac_fib_context *ctx;
2685 mtx_lock(&sc->aac_io_lock);
2686 if ((poll_events & (POLLRDNORM | POLLIN)) != 0) {
2687 for (ctx = sc->fibctx; ctx; ctx = ctx->next) {
2688 if (ctx->ctx_idx != sc->aifq_idx || ctx->ctx_wrap) {
2689 revents |= poll_events & (POLLIN | POLLRDNORM);
2694 mtx_unlock(&sc->aac_io_lock);
2697 if (poll_events & (POLLIN | POLLRDNORM))
2698 selrecord(td, &sc->rcv_select);
2705 aac_ioctl_event(struct aac_softc *sc, struct aac_event *event, void *arg)
2708 switch (event->ev_type) {
2709 case AAC_EVENT_CMFREE:
2710 mtx_assert(&sc->aac_io_lock, MA_OWNED);
2711 if (aacraid_alloc_command(sc, (struct aac_command **)arg)) {
2712 aacraid_add_event(sc, event);
2715 free(event, M_AACRAIDBUF);
2724 * Send a FIB supplied from userspace
2727 aac_ioctl_sendfib(struct aac_softc *sc, caddr_t ufib)
2729 struct aac_command *cm;
2732 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2739 mtx_lock(&sc->aac_io_lock);
2740 if (aacraid_alloc_command(sc, &cm)) {
2741 struct aac_event *event;
2743 event = malloc(sizeof(struct aac_event), M_AACRAIDBUF,
2745 if (event == NULL) {
2747 mtx_unlock(&sc->aac_io_lock);
2750 event->ev_type = AAC_EVENT_CMFREE;
2751 event->ev_callback = aac_ioctl_event;
2752 event->ev_arg = &cm;
2753 aacraid_add_event(sc, event);
2754 msleep(cm, &sc->aac_io_lock, 0, "aacraid_ctlsfib", 0);
2756 mtx_unlock(&sc->aac_io_lock);
2759 * Fetch the FIB header, then re-copy to get data as well.
2761 if ((error = copyin(ufib, cm->cm_fib,
2762 sizeof(struct aac_fib_header))) != 0)
2764 size = cm->cm_fib->Header.Size + sizeof(struct aac_fib_header);
2765 if (size > sc->aac_max_fib_size) {
2766 device_printf(sc->aac_dev, "incoming FIB oversized (%d > %d)\n",
2767 size, sc->aac_max_fib_size);
2768 size = sc->aac_max_fib_size;
2770 if ((error = copyin(ufib, cm->cm_fib, size)) != 0)
2772 cm->cm_fib->Header.Size = size;
2773 cm->cm_timestamp = time_uptime;
2777 * Pass the FIB to the controller, wait for it to complete.
2779 mtx_lock(&sc->aac_io_lock);
2780 error = aacraid_wait_command(cm);
2781 mtx_unlock(&sc->aac_io_lock);
2783 device_printf(sc->aac_dev,
2784 "aacraid_wait_command return %d\n", error);
2789 * Copy the FIB and data back out to the caller.
2791 size = cm->cm_fib->Header.Size;
2792 if (size > sc->aac_max_fib_size) {
2793 device_printf(sc->aac_dev, "outbound FIB oversized (%d > %d)\n",
2794 size, sc->aac_max_fib_size);
2795 size = sc->aac_max_fib_size;
2797 error = copyout(cm->cm_fib, ufib, size);
2801 mtx_lock(&sc->aac_io_lock);
2802 aacraid_release_command(cm);
2803 mtx_unlock(&sc->aac_io_lock);
2809 * Send a passthrough FIB supplied from userspace
2812 aac_ioctl_send_raw_srb(struct aac_softc *sc, caddr_t arg)
2814 struct aac_command *cm;
2815 struct aac_fib *fib;
2816 struct aac_srb *srbcmd;
2817 struct aac_srb *user_srb = (struct aac_srb *)arg;
2819 int error, transfer_data = 0;
2820 bus_dmamap_t orig_map = 0;
2821 u_int32_t fibsize = 0;
2822 u_int64_t srb_sg_address;
2823 u_int32_t srb_sg_bytecount;
2825 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2829 mtx_lock(&sc->aac_io_lock);
2830 if (aacraid_alloc_command(sc, &cm)) {
2831 struct aac_event *event;
2833 event = malloc(sizeof(struct aac_event), M_AACRAIDBUF,
2835 if (event == NULL) {
2837 mtx_unlock(&sc->aac_io_lock);
2840 event->ev_type = AAC_EVENT_CMFREE;
2841 event->ev_callback = aac_ioctl_event;
2842 event->ev_arg = &cm;
2843 aacraid_add_event(sc, event);
2844 msleep(cm, &sc->aac_io_lock, 0, "aacraid_ctlsraw", 0);
2846 mtx_unlock(&sc->aac_io_lock);
2849 /* save original dma map */
2850 orig_map = cm->cm_datamap;
2853 srbcmd = (struct aac_srb *)fib->data;
2854 if ((error = copyin((void *)&user_srb->data_len, &fibsize,
2855 sizeof (u_int32_t)) != 0))
2857 if (fibsize > (sc->aac_max_fib_size-sizeof(struct aac_fib_header))) {
2861 if ((error = copyin((void *)user_srb, srbcmd, fibsize) != 0))
2864 srbcmd->function = 0; /* SRBF_ExecuteScsi */
2865 srbcmd->retry_limit = 0; /* obsolete */
2867 /* only one sg element from userspace supported */
2868 if (srbcmd->sg_map.SgCount > 1) {
2873 if (fibsize == (sizeof(struct aac_srb) +
2874 srbcmd->sg_map.SgCount * sizeof(struct aac_sg_entry))) {
2875 struct aac_sg_entry *sgp = srbcmd->sg_map.SgEntry;
2876 srb_sg_bytecount = sgp->SgByteCount;
2877 srb_sg_address = (u_int64_t)sgp->SgAddress;
2878 } else if (fibsize == (sizeof(struct aac_srb) +
2879 srbcmd->sg_map.SgCount * sizeof(struct aac_sg_entry64))) {
2881 struct aac_sg_entry64 *sgp =
2882 (struct aac_sg_entry64 *)srbcmd->sg_map.SgEntry;
2883 srb_sg_bytecount = sgp->SgByteCount;
2884 srb_sg_address = sgp->SgAddress;
2885 if (srb_sg_address > 0xffffffffull &&
2886 !(sc->flags & AAC_FLAGS_SG_64BIT))
2896 user_reply = (char *)arg + fibsize;
2897 srbcmd->data_len = srb_sg_bytecount;
2898 if (srbcmd->sg_map.SgCount == 1)
2901 if (transfer_data) {
2903 * Create DMA tag for the passthr. data buffer and allocate it.
2905 if (bus_dma_tag_create(sc->aac_parent_dmat, /* parent */
2906 1, 0, /* algnmnt, boundary */
2907 (sc->flags & AAC_FLAGS_SG_64BIT) ?
2908 BUS_SPACE_MAXADDR_32BIT :
2909 0x7fffffff, /* lowaddr */
2910 BUS_SPACE_MAXADDR, /* highaddr */
2911 NULL, NULL, /* filter, filterarg */
2912 srb_sg_bytecount, /* size */
2913 sc->aac_sg_tablesize, /* nsegments */
2914 srb_sg_bytecount, /* maxsegsize */
2916 NULL, NULL, /* No locking needed */
2917 &cm->cm_passthr_dmat)) {
2921 if (bus_dmamem_alloc(cm->cm_passthr_dmat, (void **)&cm->cm_data,
2922 BUS_DMA_NOWAIT, &cm->cm_datamap)) {
2926 /* fill some cm variables */
2927 cm->cm_datalen = srb_sg_bytecount;
2928 if (srbcmd->flags & AAC_SRB_FLAGS_DATA_IN)
2929 cm->cm_flags |= AAC_CMD_DATAIN;
2930 if (srbcmd->flags & AAC_SRB_FLAGS_DATA_OUT)
2931 cm->cm_flags |= AAC_CMD_DATAOUT;
2933 if (srbcmd->flags & AAC_SRB_FLAGS_DATA_OUT) {
2934 if ((error = copyin((void *)(uintptr_t)srb_sg_address,
2935 cm->cm_data, cm->cm_datalen)) != 0)
2937 /* sync required for bus_dmamem_alloc() alloc. mem.? */
2938 bus_dmamap_sync(cm->cm_passthr_dmat, cm->cm_datamap,
2939 BUS_DMASYNC_PREWRITE);
2944 fib->Header.Size = sizeof(struct aac_fib_header) +
2945 sizeof(struct aac_srb);
2946 fib->Header.XferState =
2947 AAC_FIBSTATE_HOSTOWNED |
2948 AAC_FIBSTATE_INITIALISED |
2949 AAC_FIBSTATE_EMPTY |
2950 AAC_FIBSTATE_FROMHOST |
2951 AAC_FIBSTATE_REXPECTED |
2955 fib->Header.Command = (sc->flags & AAC_FLAGS_SG_64BIT) ?
2956 ScsiPortCommandU64 : ScsiPortCommand;
2957 cm->cm_sgtable = (struct aac_sg_table *)&srbcmd->sg_map;
2960 if (transfer_data) {
2961 bus_dmamap_load(cm->cm_passthr_dmat,
2962 cm->cm_datamap, cm->cm_data,
2964 aacraid_map_command_sg, cm, 0);
2966 aacraid_map_command_sg(cm, NULL, 0, 0);
2969 /* wait for completion */
2970 mtx_lock(&sc->aac_io_lock);
2971 while (!(cm->cm_flags & AAC_CMD_COMPLETED))
2972 msleep(cm, &sc->aac_io_lock, 0, "aacraid_ctlsrw2", 0);
2973 mtx_unlock(&sc->aac_io_lock);
2976 if (transfer_data && (srbcmd->flags & AAC_SRB_FLAGS_DATA_IN)) {
2977 if ((error = copyout(cm->cm_data,
2978 (void *)(uintptr_t)srb_sg_address,
2979 cm->cm_datalen)) != 0)
2981 /* sync required for bus_dmamem_alloc() allocated mem.? */
2982 bus_dmamap_sync(cm->cm_passthr_dmat, cm->cm_datamap,
2983 BUS_DMASYNC_POSTREAD);
2987 error = copyout(fib->data, user_reply, sizeof(struct aac_srb_response));
2990 if (cm && cm->cm_data) {
2992 bus_dmamap_unload(cm->cm_passthr_dmat, cm->cm_datamap);
2993 bus_dmamem_free(cm->cm_passthr_dmat, cm->cm_data, cm->cm_datamap);
2994 cm->cm_datamap = orig_map;
2996 if (cm && cm->cm_passthr_dmat)
2997 bus_dma_tag_destroy(cm->cm_passthr_dmat);
2999 mtx_lock(&sc->aac_io_lock);
3000 aacraid_release_command(cm);
3001 mtx_unlock(&sc->aac_io_lock);
3007 * Request an AIF from the controller (new comm. type1)
3010 aac_request_aif(struct aac_softc *sc)
3012 struct aac_command *cm;
3013 struct aac_fib *fib;
3015 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3017 if (aacraid_alloc_command(sc, &cm)) {
3018 sc->aif_pending = 1;
3021 sc->aif_pending = 0;
3025 fib->Header.Size = sizeof(struct aac_fib);
3026 fib->Header.XferState =
3027 AAC_FIBSTATE_HOSTOWNED |
3028 AAC_FIBSTATE_INITIALISED |
3029 AAC_FIBSTATE_EMPTY |
3030 AAC_FIBSTATE_FROMHOST |
3031 AAC_FIBSTATE_REXPECTED |
3034 /* set AIF marker */
3035 fib->Header.Handle = 0x00800000;
3036 fib->Header.Command = AifRequest;
3037 ((struct aac_aif_command *)fib->data)->command = AifReqEvent;
3039 aacraid_map_command_sg(cm, NULL, 0, 0);
3043 #if __FreeBSD_version >= 702000
3045 * cdevpriv interface private destructor.
3048 aac_cdevpriv_dtor(void *arg)
3050 struct aac_softc *sc;
3053 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3055 device_unbusy(sc->aac_dev);
3060 aac_close(struct cdev *dev, int flags, int fmt, struct thread *td)
3062 struct aac_softc *sc;
3065 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3071 * Handle an AIF sent to us by the controller; queue it for later reference.
3072 * If the queue fills up, then drop the older entries.
3075 aac_handle_aif(struct aac_softc *sc, struct aac_fib *fib)
3077 struct aac_aif_command *aif;
3078 struct aac_container *co, *co_next;
3079 struct aac_fib_context *ctx;
3080 struct aac_fib *sync_fib;
3081 struct aac_mntinforesp mir;
3082 int next, current, found;
3083 int count = 0, changed = 0, i = 0;
3084 u_int32_t channel, uid;
3086 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3088 aif = (struct aac_aif_command*)&fib->data[0];
3089 aacraid_print_aif(sc, aif);
3091 /* Is it an event that we should care about? */
3092 switch (aif->command) {
3093 case AifCmdEventNotify:
3094 switch (aif->data.EN.type) {
3095 case AifEnAddContainer:
3096 case AifEnDeleteContainer:
3098 * A container was added or deleted, but the message
3099 * doesn't tell us anything else! Re-enumerate the
3100 * containers and sort things out.
3102 aac_alloc_sync_fib(sc, &sync_fib);
3105 * Ask the controller for its containers one at
3107 * XXX What if the controller's list changes
3108 * midway through this enumaration?
3109 * XXX This should be done async.
3111 if (aac_get_container_info(sc, sync_fib, i,
3115 count = mir.MntRespCount;
3117 * Check the container against our list.
3118 * co->co_found was already set to 0 in a
3121 if ((mir.Status == ST_OK) &&
3122 (mir.MntTable[0].VolType != CT_NONE)) {
3125 &sc->aac_container_tqh,
3127 if (co->co_mntobj.ObjectId ==
3128 mir.MntTable[0].ObjectId) {
3135 * If the container matched, continue
3144 * This is a new container. Do all the
3145 * appropriate things to set it up.
3147 aac_add_container(sc, &mir, 1, uid);
3151 } while ((i < count) && (i < AAC_MAX_CONTAINERS));
3152 aac_release_sync_fib(sc);
3155 * Go through our list of containers and see which ones
3156 * were not marked 'found'. Since the controller didn't
3157 * list them they must have been deleted. Do the
3158 * appropriate steps to destroy the device. Also reset
3159 * the co->co_found field.
3161 co = TAILQ_FIRST(&sc->aac_container_tqh);
3162 while (co != NULL) {
3163 if (co->co_found == 0) {
3164 co_next = TAILQ_NEXT(co, co_link);
3165 TAILQ_REMOVE(&sc->aac_container_tqh, co,
3167 free(co, M_AACRAIDBUF);
3172 co = TAILQ_NEXT(co, co_link);
3176 /* Attach the newly created containers */
3178 if (sc->cam_rescan_cb != NULL)
3179 sc->cam_rescan_cb(sc, 0,
3180 AAC_CAM_TARGET_WILDCARD);
3185 case AifEnEnclosureManagement:
3186 switch (aif->data.EN.data.EEE.eventType) {
3187 case AIF_EM_DRIVE_INSERTION:
3188 case AIF_EM_DRIVE_REMOVAL:
3189 channel = aif->data.EN.data.EEE.unitID;
3190 if (sc->cam_rescan_cb != NULL)
3191 sc->cam_rescan_cb(sc,
3192 ((channel>>24) & 0xF) + 1,
3193 (channel & 0xFFFF));
3199 case AifEnDeleteJBOD:
3200 case AifRawDeviceRemove:
3201 channel = aif->data.EN.data.ECE.container;
3202 if (sc->cam_rescan_cb != NULL)
3203 sc->cam_rescan_cb(sc, ((channel>>24) & 0xF) + 1,
3204 AAC_CAM_TARGET_WILDCARD);
3215 /* Copy the AIF data to the AIF queue for ioctl retrieval */
3216 current = sc->aifq_idx;
3217 next = (current + 1) % AAC_AIFQ_LENGTH;
3219 sc->aifq_filled = 1;
3220 bcopy(fib, &sc->aac_aifq[current], sizeof(struct aac_fib));
3221 /* modify AIF contexts */
3222 if (sc->aifq_filled) {
3223 for (ctx = sc->fibctx; ctx; ctx = ctx->next) {
3224 if (next == ctx->ctx_idx)
3226 else if (current == ctx->ctx_idx && ctx->ctx_wrap)
3227 ctx->ctx_idx = next;
3230 sc->aifq_idx = next;
3231 /* On the off chance that someone is sleeping for an aif... */
3232 if (sc->aac_state & AAC_STATE_AIF_SLEEPER)
3233 wakeup(sc->aac_aifq);
3234 /* Wakeup any poll()ers */
3235 selwakeuppri(&sc->rcv_select, PRIBIO);
3241 * Return the Revision of the driver to userspace and check to see if the
3242 * userspace app is possibly compatible. This is extremely bogus since
3243 * our driver doesn't follow Adaptec's versioning system. Cheat by just
3244 * returning what the card reported.
3247 aac_rev_check(struct aac_softc *sc, caddr_t udata)
3249 struct aac_rev_check rev_check;
3250 struct aac_rev_check_resp rev_check_resp;
3253 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3256 * Copyin the revision struct from userspace
3258 if ((error = copyin(udata, (caddr_t)&rev_check,
3259 sizeof(struct aac_rev_check))) != 0) {
3263 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "Userland revision= %d\n",
3264 rev_check.callingRevision.buildNumber);
3267 * Doctor up the response struct.
3269 rev_check_resp.possiblyCompatible = 1;
3270 rev_check_resp.adapterSWRevision.external.comp.major =
3271 AAC_DRIVER_MAJOR_VERSION;
3272 rev_check_resp.adapterSWRevision.external.comp.minor =
3273 AAC_DRIVER_MINOR_VERSION;
3274 rev_check_resp.adapterSWRevision.external.comp.type =
3276 rev_check_resp.adapterSWRevision.external.comp.dash =
3277 AAC_DRIVER_BUGFIX_LEVEL;
3278 rev_check_resp.adapterSWRevision.buildNumber =
3281 return(copyout((caddr_t)&rev_check_resp, udata,
3282 sizeof(struct aac_rev_check_resp)));
3286 * Pass the fib context to the caller
3289 aac_open_aif(struct aac_softc *sc, caddr_t arg)
3291 struct aac_fib_context *fibctx, *ctx;
3294 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3296 fibctx = malloc(sizeof(struct aac_fib_context), M_AACRAIDBUF, M_NOWAIT|M_ZERO);
3300 mtx_lock(&sc->aac_io_lock);
3301 /* all elements are already 0, add to queue */
3302 if (sc->fibctx == NULL)
3303 sc->fibctx = fibctx;
3305 for (ctx = sc->fibctx; ctx->next; ctx = ctx->next)
3311 /* evaluate unique value */
3312 fibctx->unique = (*(u_int32_t *)&fibctx & 0xffffffff);
3314 while (ctx != fibctx) {
3315 if (ctx->unique == fibctx->unique) {
3323 error = copyout(&fibctx->unique, (void *)arg, sizeof(u_int32_t));
3324 mtx_unlock(&sc->aac_io_lock);
3326 aac_close_aif(sc, (caddr_t)ctx);
3331 * Close the caller's fib context
3334 aac_close_aif(struct aac_softc *sc, caddr_t arg)
3336 struct aac_fib_context *ctx;
3338 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3340 mtx_lock(&sc->aac_io_lock);
3341 for (ctx = sc->fibctx; ctx; ctx = ctx->next) {
3342 if (ctx->unique == *(uint32_t *)&arg) {
3343 if (ctx == sc->fibctx)
3346 ctx->prev->next = ctx->next;
3348 ctx->next->prev = ctx->prev;
3354 free(ctx, M_AACRAIDBUF);
3356 mtx_unlock(&sc->aac_io_lock);
3361 * Pass the caller the next AIF in their queue
3364 aac_getnext_aif(struct aac_softc *sc, caddr_t arg)
3366 struct get_adapter_fib_ioctl agf;
3367 struct aac_fib_context *ctx;
3370 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3372 mtx_lock(&sc->aac_io_lock);
3373 if ((error = copyin(arg, &agf, sizeof(agf))) == 0) {
3374 for (ctx = sc->fibctx; ctx; ctx = ctx->next) {
3375 if (agf.AdapterFibContext == ctx->unique)
3379 mtx_unlock(&sc->aac_io_lock);
3383 error = aac_return_aif(sc, ctx, agf.AifFib);
3384 if (error == EAGAIN && agf.Wait) {
3385 fwprintf(sc, HBA_FLAGS_DBG_AIF_B, "aac_getnext_aif(): waiting for AIF");
3386 sc->aac_state |= AAC_STATE_AIF_SLEEPER;
3387 while (error == EAGAIN) {
3388 mtx_unlock(&sc->aac_io_lock);
3389 error = tsleep(sc->aac_aifq, PRIBIO |
3390 PCATCH, "aacaif", 0);
3391 mtx_lock(&sc->aac_io_lock);
3393 error = aac_return_aif(sc, ctx, agf.AifFib);
3395 sc->aac_state &= ~AAC_STATE_AIF_SLEEPER;
3398 mtx_unlock(&sc->aac_io_lock);
3403 * Hand the next AIF off the top of the queue out to userspace.
3406 aac_return_aif(struct aac_softc *sc, struct aac_fib_context *ctx, caddr_t uptr)
3410 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3412 current = ctx->ctx_idx;
3413 if (current == sc->aifq_idx && !ctx->ctx_wrap) {
3418 copyout(&sc->aac_aifq[current], (void *)uptr, sizeof(struct aac_fib));
3420 device_printf(sc->aac_dev,
3421 "aac_return_aif: copyout returned %d\n", error);
3424 ctx->ctx_idx = (current + 1) % AAC_AIFQ_LENGTH;
3430 aac_get_pci_info(struct aac_softc *sc, caddr_t uptr)
3432 struct aac_pci_info {
3438 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3440 pciinf.bus = pci_get_bus(sc->aac_dev);
3441 pciinf.slot = pci_get_slot(sc->aac_dev);
3443 error = copyout((caddr_t)&pciinf, uptr,
3444 sizeof(struct aac_pci_info));
3450 aac_supported_features(struct aac_softc *sc, caddr_t uptr)
3452 struct aac_features f;
3455 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3457 if ((error = copyin(uptr, &f, sizeof (f))) != 0)
3461 * When the management driver receives FSACTL_GET_FEATURES ioctl with
3462 * ALL zero in the featuresState, the driver will return the current
3463 * state of all the supported features, the data field will not be
3465 * When the management driver receives FSACTL_GET_FEATURES ioctl with
3466 * a specific bit set in the featuresState, the driver will return the
3467 * current state of this specific feature and whatever data that are
3468 * associated with the feature in the data field or perform whatever
3469 * action needed indicates in the data field.
3471 if (f.feat.fValue == 0) {
3472 f.feat.fBits.largeLBA =
3473 (sc->flags & AAC_FLAGS_LBA_64BIT) ? 1 : 0;
3474 f.feat.fBits.JBODSupport = 1;
3475 /* TODO: In the future, add other features state here as well */
3477 if (f.feat.fBits.largeLBA)
3478 f.feat.fBits.largeLBA =
3479 (sc->flags & AAC_FLAGS_LBA_64BIT) ? 1 : 0;
3480 /* TODO: Add other features state and data in the future */
3483 error = copyout(&f, uptr, sizeof (f));
3488 * Give the userland some information about the container. The AAC arch
3489 * expects the driver to be a SCSI passthrough type driver, so it expects
3490 * the containers to have b:t:l numbers. Fake it.
3493 aac_query_disk(struct aac_softc *sc, caddr_t uptr)
3495 struct aac_query_disk query_disk;
3496 struct aac_container *co;
3499 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3501 mtx_lock(&sc->aac_io_lock);
3502 error = copyin(uptr, (caddr_t)&query_disk,
3503 sizeof(struct aac_query_disk));
3505 mtx_unlock(&sc->aac_io_lock);
3509 id = query_disk.ContainerNumber;
3511 mtx_unlock(&sc->aac_io_lock);
3515 TAILQ_FOREACH(co, &sc->aac_container_tqh, co_link) {
3516 if (co->co_mntobj.ObjectId == id)
3521 query_disk.Valid = 0;
3522 query_disk.Locked = 0;
3523 query_disk.Deleted = 1; /* XXX is this right? */
3525 query_disk.Valid = 1;
3526 query_disk.Locked = 1;
3527 query_disk.Deleted = 0;
3528 query_disk.Bus = device_get_unit(sc->aac_dev);
3529 query_disk.Target = 0;
3531 query_disk.UnMapped = 0;
3534 error = copyout((caddr_t)&query_disk, uptr,
3535 sizeof(struct aac_query_disk));
3537 mtx_unlock(&sc->aac_io_lock);
3542 aac_container_bus(struct aac_softc *sc)
3544 struct aac_sim *sim;
3547 sim =(struct aac_sim *)malloc(sizeof(struct aac_sim),
3548 M_AACRAIDBUF, M_NOWAIT | M_ZERO);
3550 device_printf(sc->aac_dev,
3551 "No memory to add container bus\n");
3552 panic("Out of memory?!");
3554 child = device_add_child(sc->aac_dev, "aacraidp", -1);
3555 if (child == NULL) {
3556 device_printf(sc->aac_dev,
3557 "device_add_child failed for container bus\n");
3558 free(sim, M_AACRAIDBUF);
3559 panic("Out of memory?!");
3562 sim->TargetsPerBus = AAC_MAX_CONTAINERS;
3564 sim->BusType = CONTAINER_BUS;
3565 sim->InitiatorBusId = -1;
3567 sim->sim_dev = child;
3568 sim->aac_cam = NULL;
3570 device_set_ivars(child, sim);
3571 device_set_desc(child, "Container Bus");
3572 TAILQ_INSERT_TAIL(&sc->aac_sim_tqh, sim, sim_link);
3574 device_set_desc(child, aac_describe_code(aac_container_types,
3575 mir->MntTable[0].VolType));
3577 bus_generic_attach(sc->aac_dev);
3581 aac_get_bus_info(struct aac_softc *sc)
3583 struct aac_fib *fib;
3584 struct aac_ctcfg *c_cmd;
3585 struct aac_ctcfg_resp *c_resp;
3586 struct aac_vmioctl *vmi;
3587 struct aac_vmi_businf_resp *vmi_resp;
3588 struct aac_getbusinf businfo;
3589 struct aac_sim *caminf;
3593 mtx_lock(&sc->aac_io_lock);
3594 aac_alloc_sync_fib(sc, &fib);
3595 c_cmd = (struct aac_ctcfg *)&fib->data[0];
3596 bzero(c_cmd, sizeof(struct aac_ctcfg));
3598 c_cmd->Command = VM_ContainerConfig;
3599 c_cmd->cmd = CT_GET_SCSI_METHOD;
3602 error = aac_sync_fib(sc, ContainerCommand, 0, fib,
3603 sizeof(struct aac_ctcfg));
3605 device_printf(sc->aac_dev, "Error %d sending "
3606 "VM_ContainerConfig command\n", error);
3607 aac_release_sync_fib(sc);
3608 mtx_unlock(&sc->aac_io_lock);
3612 c_resp = (struct aac_ctcfg_resp *)&fib->data[0];
3613 if (c_resp->Status != ST_OK) {
3614 device_printf(sc->aac_dev, "VM_ContainerConfig returned 0x%x\n",
3616 aac_release_sync_fib(sc);
3617 mtx_unlock(&sc->aac_io_lock);
3621 sc->scsi_method_id = c_resp->param;
3623 vmi = (struct aac_vmioctl *)&fib->data[0];
3624 bzero(vmi, sizeof(struct aac_vmioctl));
3626 vmi->Command = VM_Ioctl;
3627 vmi->ObjType = FT_DRIVE;
3628 vmi->MethId = sc->scsi_method_id;
3630 vmi->IoctlCmd = GetBusInfo;
3632 error = aac_sync_fib(sc, ContainerCommand, 0, fib,
3633 sizeof(struct aac_vmi_businf_resp));
3635 device_printf(sc->aac_dev, "Error %d sending VMIoctl command\n",
3637 aac_release_sync_fib(sc);
3638 mtx_unlock(&sc->aac_io_lock);
3642 vmi_resp = (struct aac_vmi_businf_resp *)&fib->data[0];
3643 if (vmi_resp->Status != ST_OK) {
3644 device_printf(sc->aac_dev, "VM_Ioctl returned %d\n",
3646 aac_release_sync_fib(sc);
3647 mtx_unlock(&sc->aac_io_lock);
3651 bcopy(&vmi_resp->BusInf, &businfo, sizeof(struct aac_getbusinf));
3652 aac_release_sync_fib(sc);
3653 mtx_unlock(&sc->aac_io_lock);
3655 for (i = 0; i < businfo.BusCount; i++) {
3656 if (businfo.BusValid[i] != AAC_BUS_VALID)
3659 caminf = (struct aac_sim *)malloc( sizeof(struct aac_sim),
3660 M_AACRAIDBUF, M_NOWAIT | M_ZERO);
3661 if (caminf == NULL) {
3662 device_printf(sc->aac_dev,
3663 "No memory to add passthrough bus %d\n", i);
3667 child = device_add_child(sc->aac_dev, "aacraidp", -1);
3668 if (child == NULL) {
3669 device_printf(sc->aac_dev,
3670 "device_add_child failed for passthrough bus %d\n",
3672 free(caminf, M_AACRAIDBUF);
3676 caminf->TargetsPerBus = businfo.TargetsPerBus;
3677 caminf->BusNumber = i+1;
3678 caminf->BusType = PASSTHROUGH_BUS;
3679 caminf->InitiatorBusId = businfo.InitiatorBusId[i];
3680 caminf->aac_sc = sc;
3681 caminf->sim_dev = child;
3682 caminf->aac_cam = NULL;
3684 device_set_ivars(child, caminf);
3685 device_set_desc(child, "SCSI Passthrough Bus");
3686 TAILQ_INSERT_TAIL(&sc->aac_sim_tqh, caminf, sim_link);
3691 * Check to see if the kernel is up and running. If we are in a
3692 * BlinkLED state, return the BlinkLED code.
3695 aac_check_adapter_health(struct aac_softc *sc, u_int8_t *bled)
3699 ret = AAC_GET_FWSTATUS(sc);
3701 if (ret & AAC_UP_AND_RUNNING)
3703 else if (ret & AAC_KERNEL_PANIC && bled)
3704 *bled = (ret >> 16) & 0xff;
3710 * Once do an IOP reset, basically have to re-initialize the card as
3711 * if coming up from a cold boot, and the driver is responsible for
3712 * any IO that was outstanding to the adapter at the time of the IOP
3713 * RESET. And prepare the driver for IOP RESET by making the init code
3714 * modular with the ability to call it from multiple places.
3717 aac_reset_adapter(struct aac_softc *sc)
3719 struct aac_command *cm;
3720 struct aac_fib *fib;
3721 struct aac_pause_command *pc;
3722 u_int32_t status, reset_mask, waitCount, max_msix_orig;
3723 int msi_enabled_orig;
3725 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3726 mtx_assert(&sc->aac_io_lock, MA_OWNED);
3728 if (sc->aac_state & AAC_STATE_RESET) {
3729 device_printf(sc->aac_dev, "aac_reset_adapter() already in progress\n");
3732 sc->aac_state |= AAC_STATE_RESET;
3734 /* disable interrupt */
3735 AAC_ACCESS_DEVREG(sc, AAC_DISABLE_INTERRUPT);
3738 * Abort all pending commands:
3739 * a) on the controller
3741 while ((cm = aac_dequeue_busy(sc)) != NULL) {
3742 cm->cm_flags |= AAC_CMD_RESET;
3744 /* is there a completion handler? */
3745 if (cm->cm_complete != NULL) {
3746 cm->cm_complete(cm);
3748 /* assume that someone is sleeping on this
3755 /* b) in the waiting queues */
3756 while ((cm = aac_dequeue_ready(sc)) != NULL) {
3757 cm->cm_flags |= AAC_CMD_RESET;
3759 /* is there a completion handler? */
3760 if (cm->cm_complete != NULL) {
3761 cm->cm_complete(cm);
3763 /* assume that someone is sleeping on this
3771 if (aac_check_adapter_health(sc, NULL) == 0) {
3772 mtx_unlock(&sc->aac_io_lock);
3773 (void) aacraid_shutdown(sc->aac_dev);
3774 mtx_lock(&sc->aac_io_lock);
3777 /* execute IOP reset */
3778 if (sc->aac_support_opt2 & AAC_SUPPORTED_MU_RESET) {
3779 AAC_MEM0_SETREG4(sc, AAC_IRCSR, AAC_IRCSR_CORES_RST);
3781 /* We need to wait for 5 seconds before accessing the MU again
3782 * 10000 * 100us = 1000,000us = 1000ms = 1s
3784 waitCount = 5 * 10000;
3786 DELAY(100); /* delay 100 microseconds */
3789 } else if ((aacraid_sync_command(sc,
3790 AAC_IOP_RESET_ALWAYS, 0, 0, 0, 0, &status, &reset_mask)) != 0) {
3791 /* call IOP_RESET for older firmware */
3792 if ((aacraid_sync_command(sc,
3793 AAC_IOP_RESET, 0, 0, 0, 0, &status, NULL)) != 0) {
3795 if (status == AAC_SRB_STS_INVALID_REQUEST)
3796 device_printf(sc->aac_dev, "IOP_RESET not supported\n");
3798 /* probably timeout */
3799 device_printf(sc->aac_dev, "IOP_RESET failed\n");
3801 /* unwind aac_shutdown() */
3802 aac_alloc_sync_fib(sc, &fib);
3803 pc = (struct aac_pause_command *)&fib->data[0];
3804 pc->Command = VM_ContainerConfig;
3805 pc->Type = CT_PAUSE_IO;
3810 (void) aac_sync_fib(sc, ContainerCommand, 0, fib,
3811 sizeof (struct aac_pause_command));
3812 aac_release_sync_fib(sc);
3816 } else if (sc->aac_support_opt2 & AAC_SUPPORTED_DOORBELL_RESET) {
3817 AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, reset_mask);
3819 * We need to wait for 5 seconds before accessing the doorbell
3820 * again, 10000 * 100us = 1000,000us = 1000ms = 1s
3822 waitCount = 5 * 10000;
3824 DELAY(100); /* delay 100 microseconds */
3830 * Initialize the adapter.
3832 max_msix_orig = sc->aac_max_msix;
3833 msi_enabled_orig = sc->msi_enabled;
3834 sc->msi_enabled = FALSE;
3835 if (aac_check_firmware(sc) != 0)
3837 if (!(sc->flags & AAC_FLAGS_SYNC_MODE)) {
3838 sc->aac_max_msix = max_msix_orig;
3839 if (msi_enabled_orig) {
3840 sc->msi_enabled = msi_enabled_orig;
3841 AAC_ACCESS_DEVREG(sc, AAC_ENABLE_MSIX);
3843 mtx_unlock(&sc->aac_io_lock);
3845 mtx_lock(&sc->aac_io_lock);
3849 sc->aac_state &= ~AAC_STATE_RESET;
3850 AAC_ACCESS_DEVREG(sc, AAC_ENABLE_INTERRUPT);
3851 aacraid_startio(sc);