2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2000 Michael Smith
5 * Copyright (c) 2001 Scott Long
6 * Copyright (c) 2000 BSDi
7 * Copyright (c) 2001-2010 Adaptec, Inc.
8 * Copyright (c) 2010-2012 PMC-Sierra, Inc.
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
37 * Driver for the Adaptec by PMC Series 6,7,8,... families of RAID controllers
39 #define AAC_DRIVERNAME "aacraid"
41 #include "opt_aacraid.h"
42 #include "opt_compat.h"
44 /* #include <stddef.h> */
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/malloc.h>
48 #include <sys/kernel.h>
49 #include <sys/kthread.h>
51 #include <sys/sysctl.h>
52 #include <sys/sysent.h>
54 #include <sys/ioccom.h>
58 #include <sys/signalvar.h>
60 #include <sys/eventhandler.h>
63 #include <machine/bus.h>
64 #include <machine/resource.h>
66 #include <dev/pci/pcireg.h>
67 #include <dev/pci/pcivar.h>
69 #include <dev/aacraid/aacraid_reg.h>
70 #include <sys/aac_ioctl.h>
71 #include <dev/aacraid/aacraid_debug.h>
72 #include <dev/aacraid/aacraid_var.h>
74 #ifndef FILTER_HANDLED
75 #define FILTER_HANDLED 0x02
78 static void aac_add_container(struct aac_softc *sc,
79 struct aac_mntinforesp *mir, int f,
81 static void aac_get_bus_info(struct aac_softc *sc);
82 static void aac_container_bus(struct aac_softc *sc);
83 static void aac_daemon(void *arg);
84 static int aac_convert_sgraw2(struct aac_softc *sc, struct aac_raw_io2 *raw,
85 int pages, int nseg, int nseg_new);
87 /* Command Processing */
88 static void aac_timeout(struct aac_softc *sc);
89 static void aac_command_thread(struct aac_softc *sc);
90 static int aac_sync_fib(struct aac_softc *sc, u_int32_t command,
91 u_int32_t xferstate, struct aac_fib *fib,
93 /* Command Buffer Management */
94 static void aac_map_command_helper(void *arg, bus_dma_segment_t *segs,
96 static int aac_alloc_commands(struct aac_softc *sc);
97 static void aac_free_commands(struct aac_softc *sc);
98 static void aac_unmap_command(struct aac_command *cm);
100 /* Hardware Interface */
101 static int aac_alloc(struct aac_softc *sc);
102 static void aac_common_map(void *arg, bus_dma_segment_t *segs, int nseg,
104 static int aac_check_firmware(struct aac_softc *sc);
105 static void aac_define_int_mode(struct aac_softc *sc);
106 static int aac_init(struct aac_softc *sc);
107 static int aac_find_pci_capability(struct aac_softc *sc, int cap);
108 static int aac_setup_intr(struct aac_softc *sc);
109 static int aac_check_config(struct aac_softc *sc);
111 /* PMC SRC interface */
112 static int aac_src_get_fwstatus(struct aac_softc *sc);
113 static void aac_src_qnotify(struct aac_softc *sc, int qbit);
114 static int aac_src_get_istatus(struct aac_softc *sc);
115 static void aac_src_clear_istatus(struct aac_softc *sc, int mask);
116 static void aac_src_set_mailbox(struct aac_softc *sc, u_int32_t command,
117 u_int32_t arg0, u_int32_t arg1,
118 u_int32_t arg2, u_int32_t arg3);
119 static int aac_src_get_mailbox(struct aac_softc *sc, int mb);
120 static void aac_src_access_devreg(struct aac_softc *sc, int mode);
121 static int aac_src_send_command(struct aac_softc *sc, struct aac_command *cm);
122 static int aac_src_get_outb_queue(struct aac_softc *sc);
123 static void aac_src_set_outb_queue(struct aac_softc *sc, int index);
125 struct aac_interface aacraid_src_interface = {
126 aac_src_get_fwstatus,
129 aac_src_clear_istatus,
132 aac_src_access_devreg,
133 aac_src_send_command,
134 aac_src_get_outb_queue,
135 aac_src_set_outb_queue
138 /* PMC SRCv interface */
139 static void aac_srcv_set_mailbox(struct aac_softc *sc, u_int32_t command,
140 u_int32_t arg0, u_int32_t arg1,
141 u_int32_t arg2, u_int32_t arg3);
142 static int aac_srcv_get_mailbox(struct aac_softc *sc, int mb);
144 struct aac_interface aacraid_srcv_interface = {
145 aac_src_get_fwstatus,
148 aac_src_clear_istatus,
149 aac_srcv_set_mailbox,
150 aac_srcv_get_mailbox,
151 aac_src_access_devreg,
152 aac_src_send_command,
153 aac_src_get_outb_queue,
154 aac_src_set_outb_queue
157 /* Debugging and Diagnostics */
158 static struct aac_code_lookup aac_cpu_variant[] = {
159 {"i960JX", CPUI960_JX},
160 {"i960CX", CPUI960_CX},
161 {"i960HX", CPUI960_HX},
162 {"i960RX", CPUI960_RX},
163 {"i960 80303", CPUI960_80303},
164 {"StrongARM SA110", CPUARM_SA110},
165 {"PPC603e", CPUPPC_603e},
166 {"XScale 80321", CPU_XSCALE_80321},
167 {"MIPS 4KC", CPU_MIPS_4KC},
168 {"MIPS 5KC", CPU_MIPS_5KC},
169 {"Unknown StrongARM", CPUARM_xxx},
170 {"Unknown PowerPC", CPUPPC_xxx},
172 {"Unknown processor", 0}
175 static struct aac_code_lookup aac_battery_platform[] = {
176 {"required battery present", PLATFORM_BAT_REQ_PRESENT},
177 {"REQUIRED BATTERY NOT PRESENT", PLATFORM_BAT_REQ_NOTPRESENT},
178 {"optional battery present", PLATFORM_BAT_OPT_PRESENT},
179 {"optional battery not installed", PLATFORM_BAT_OPT_NOTPRESENT},
180 {"no battery support", PLATFORM_BAT_NOT_SUPPORTED},
182 {"unknown battery platform", 0}
184 static void aac_describe_controller(struct aac_softc *sc);
185 static char *aac_describe_code(struct aac_code_lookup *table,
188 /* Management Interface */
189 static d_open_t aac_open;
190 static d_ioctl_t aac_ioctl;
191 static d_poll_t aac_poll;
192 #if __FreeBSD_version >= 702000
193 static void aac_cdevpriv_dtor(void *arg);
195 static d_close_t aac_close;
197 static int aac_ioctl_sendfib(struct aac_softc *sc, caddr_t ufib);
198 static int aac_ioctl_send_raw_srb(struct aac_softc *sc, caddr_t arg);
199 static void aac_handle_aif(struct aac_softc *sc, struct aac_fib *fib);
200 static void aac_request_aif(struct aac_softc *sc);
201 static int aac_rev_check(struct aac_softc *sc, caddr_t udata);
202 static int aac_open_aif(struct aac_softc *sc, caddr_t arg);
203 static int aac_close_aif(struct aac_softc *sc, caddr_t arg);
204 static int aac_getnext_aif(struct aac_softc *sc, caddr_t arg);
205 static int aac_return_aif(struct aac_softc *sc,
206 struct aac_fib_context *ctx, caddr_t uptr);
207 static int aac_query_disk(struct aac_softc *sc, caddr_t uptr);
208 static int aac_get_pci_info(struct aac_softc *sc, caddr_t uptr);
209 static int aac_supported_features(struct aac_softc *sc, caddr_t uptr);
210 static void aac_ioctl_event(struct aac_softc *sc,
211 struct aac_event *event, void *arg);
212 static int aac_reset_adapter(struct aac_softc *sc);
213 static int aac_get_container_info(struct aac_softc *sc,
214 struct aac_fib *fib, int cid,
215 struct aac_mntinforesp *mir,
218 aac_check_adapter_health(struct aac_softc *sc, u_int8_t *bled);
220 static struct cdevsw aacraid_cdevsw = {
221 .d_version = D_VERSION,
222 .d_flags = D_NEEDGIANT,
224 #if __FreeBSD_version < 702000
225 .d_close = aac_close,
227 .d_ioctl = aac_ioctl,
232 MALLOC_DEFINE(M_AACRAIDBUF, "aacraid_buf", "Buffers for the AACRAID driver");
235 SYSCTL_NODE(_hw, OID_AUTO, aacraid, CTLFLAG_RD, 0, "AACRAID driver parameters");
242 * Initialize the controller and softc
245 aacraid_attach(struct aac_softc *sc)
249 struct aac_mntinforesp mir;
250 int count = 0, i = 0;
253 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
254 sc->hint_flags = device_get_flags(sc->aac_dev);
256 * Initialize per-controller queues.
262 /* mark controller as suspended until we get ourselves organised */
263 sc->aac_state |= AAC_STATE_SUSPEND;
266 * Check that the firmware on the card is supported.
268 sc->msi_enabled = FALSE;
269 if ((error = aac_check_firmware(sc)) != 0)
275 mtx_init(&sc->aac_io_lock, "AACRAID I/O lock", NULL, MTX_DEF);
276 TAILQ_INIT(&sc->aac_container_tqh);
277 TAILQ_INIT(&sc->aac_ev_cmfree);
279 #if __FreeBSD_version >= 800000
280 /* Initialize the clock daemon callout. */
281 callout_init_mtx(&sc->aac_daemontime, &sc->aac_io_lock, 0);
284 * Initialize the adapter.
286 if ((error = aac_alloc(sc)) != 0)
288 if (!(sc->flags & AAC_FLAGS_SYNC_MODE)) {
289 aac_define_int_mode(sc);
290 if ((error = aac_init(sc)) != 0)
295 * Allocate and connect our interrupt.
297 if ((error = aac_setup_intr(sc)) != 0)
301 * Print a little information about the controller.
303 aac_describe_controller(sc);
306 * Make the control device.
308 unit = device_get_unit(sc->aac_dev);
309 sc->aac_dev_t = make_dev(&aacraid_cdevsw, unit, UID_ROOT, GID_OPERATOR,
310 0640, "aacraid%d", unit);
311 sc->aac_dev_t->si_drv1 = sc;
313 /* Create the AIF thread */
314 if (aac_kthread_create((void(*)(void *))aac_command_thread, sc,
315 &sc->aifthread, 0, 0, "aacraid%daif", unit))
316 panic("Could not create AIF thread");
318 /* Register the shutdown method to only be called post-dump */
319 if ((sc->eh = EVENTHANDLER_REGISTER(shutdown_final, aacraid_shutdown,
320 sc->aac_dev, SHUTDOWN_PRI_DEFAULT)) == NULL)
321 device_printf(sc->aac_dev,
322 "shutdown event registration failed\n");
324 /* Find containers */
325 mtx_lock(&sc->aac_io_lock);
326 aac_alloc_sync_fib(sc, &fib);
327 /* loop over possible containers */
329 if ((aac_get_container_info(sc, fib, i, &mir, &uid)) != 0)
332 count = mir.MntRespCount;
333 aac_add_container(sc, &mir, 0, uid);
335 } while ((i < count) && (i < AAC_MAX_CONTAINERS));
336 aac_release_sync_fib(sc);
337 mtx_unlock(&sc->aac_io_lock);
339 /* Register with CAM for the containers */
340 TAILQ_INIT(&sc->aac_sim_tqh);
341 aac_container_bus(sc);
342 /* Register with CAM for the non-DASD devices */
343 if ((sc->flags & AAC_FLAGS_ENABLE_CAM) != 0)
344 aac_get_bus_info(sc);
346 /* poke the bus to actually attach the child devices */
347 bus_generic_attach(sc->aac_dev);
349 /* mark the controller up */
350 sc->aac_state &= ~AAC_STATE_SUSPEND;
352 /* enable interrupts now */
353 AAC_ACCESS_DEVREG(sc, AAC_ENABLE_INTERRUPT);
355 #if __FreeBSD_version >= 800000
356 mtx_lock(&sc->aac_io_lock);
357 callout_reset(&sc->aac_daemontime, 60 * hz, aac_daemon, sc);
358 mtx_unlock(&sc->aac_io_lock);
364 sc->timeout_id = timeout(aac_daemon, (void *)sc, tvtohz(&tv));
372 aac_daemon(void *arg)
374 struct aac_softc *sc;
376 struct aac_command *cm;
380 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
382 #if __FreeBSD_version >= 800000
383 mtx_assert(&sc->aac_io_lock, MA_OWNED);
384 if (callout_pending(&sc->aac_daemontime) ||
385 callout_active(&sc->aac_daemontime) == 0)
388 mtx_lock(&sc->aac_io_lock);
392 if (!aacraid_alloc_command(sc, &cm)) {
394 cm->cm_timestamp = time_uptime;
396 cm->cm_flags |= AAC_CMD_WAIT;
399 sizeof(struct aac_fib_header) + sizeof(u_int32_t);
400 fib->Header.XferState =
401 AAC_FIBSTATE_HOSTOWNED |
402 AAC_FIBSTATE_INITIALISED |
404 AAC_FIBSTATE_FROMHOST |
405 AAC_FIBSTATE_REXPECTED |
408 AAC_FIBSTATE_FAST_RESPONSE;
409 fib->Header.Command = SendHostTime;
410 *(uint32_t *)fib->data = tv.tv_sec;
412 aacraid_map_command_sg(cm, NULL, 0, 0);
413 aacraid_release_command(cm);
416 #if __FreeBSD_version >= 800000
417 callout_schedule(&sc->aac_daemontime, 30 * 60 * hz);
419 mtx_unlock(&sc->aac_io_lock);
422 sc->timeout_id = timeout(aac_daemon, (void *)sc, tvtohz(&tv));
427 aacraid_add_event(struct aac_softc *sc, struct aac_event *event)
430 switch (event->ev_type & AAC_EVENT_MASK) {
431 case AAC_EVENT_CMFREE:
432 TAILQ_INSERT_TAIL(&sc->aac_ev_cmfree, event, ev_links);
435 device_printf(sc->aac_dev, "aac_add event: unknown event %d\n",
444 * Request information of container #cid
447 aac_get_container_info(struct aac_softc *sc, struct aac_fib *sync_fib, int cid,
448 struct aac_mntinforesp *mir, u_int32_t *uid)
450 struct aac_command *cm;
452 struct aac_mntinfo *mi;
453 struct aac_cnt_config *ccfg;
456 if (sync_fib == NULL) {
457 if (aacraid_alloc_command(sc, &cm)) {
458 device_printf(sc->aac_dev,
459 "Warning, no free command available\n");
467 mi = (struct aac_mntinfo *)&fib->data[0];
468 /* 4KB support?, 64-bit LBA? */
469 if (sc->aac_support_opt2 & AAC_SUPPORTED_VARIABLE_BLOCK_SIZE)
470 mi->Command = VM_NameServeAllBlk;
471 else if (sc->flags & AAC_FLAGS_LBA_64BIT)
472 mi->Command = VM_NameServe64;
474 mi->Command = VM_NameServe;
475 mi->MntType = FT_FILESYS;
479 if (aac_sync_fib(sc, ContainerCommand, 0, fib,
480 sizeof(struct aac_mntinfo))) {
481 device_printf(sc->aac_dev, "Error probing container %d\n", cid);
485 cm->cm_timestamp = time_uptime;
489 sizeof(struct aac_fib_header) + sizeof(struct aac_mntinfo);
490 fib->Header.XferState =
491 AAC_FIBSTATE_HOSTOWNED |
492 AAC_FIBSTATE_INITIALISED |
494 AAC_FIBSTATE_FROMHOST |
495 AAC_FIBSTATE_REXPECTED |
498 AAC_FIBSTATE_FAST_RESPONSE;
499 fib->Header.Command = ContainerCommand;
500 if (aacraid_wait_command(cm) != 0) {
501 device_printf(sc->aac_dev, "Error probing container %d\n", cid);
502 aacraid_release_command(cm);
506 bcopy(&fib->data[0], mir, sizeof(struct aac_mntinforesp));
510 if (mir->MntTable[0].VolType != CT_NONE &&
511 !(mir->MntTable[0].ContentState & AAC_FSCS_HIDDEN)) {
512 if (!(sc->aac_support_opt2 & AAC_SUPPORTED_VARIABLE_BLOCK_SIZE)) {
513 mir->MntTable[0].ObjExtension.BlockDevice.BlockSize = 0x200;
514 mir->MntTable[0].ObjExtension.BlockDevice.bdLgclPhysMap = 0;
516 ccfg = (struct aac_cnt_config *)&fib->data[0];
517 bzero(ccfg, sizeof (*ccfg) - CT_PACKET_SIZE);
518 ccfg->Command = VM_ContainerConfig;
519 ccfg->CTCommand.command = CT_CID_TO_32BITS_UID;
520 ccfg->CTCommand.param[0] = cid;
523 rval = aac_sync_fib(sc, ContainerCommand, 0, fib,
524 sizeof(struct aac_cnt_config));
525 if (rval == 0 && ccfg->Command == ST_OK &&
526 ccfg->CTCommand.param[0] == CT_OK &&
527 mir->MntTable[0].VolType != CT_PASSTHRU)
528 *uid = ccfg->CTCommand.param[1];
531 sizeof(struct aac_fib_header) + sizeof(struct aac_cnt_config);
532 fib->Header.XferState =
533 AAC_FIBSTATE_HOSTOWNED |
534 AAC_FIBSTATE_INITIALISED |
536 AAC_FIBSTATE_FROMHOST |
537 AAC_FIBSTATE_REXPECTED |
540 AAC_FIBSTATE_FAST_RESPONSE;
541 fib->Header.Command = ContainerCommand;
542 rval = aacraid_wait_command(cm);
543 if (rval == 0 && ccfg->Command == ST_OK &&
544 ccfg->CTCommand.param[0] == CT_OK &&
545 mir->MntTable[0].VolType != CT_PASSTHRU)
546 *uid = ccfg->CTCommand.param[1];
547 aacraid_release_command(cm);
555 * Create a device to represent a new container
558 aac_add_container(struct aac_softc *sc, struct aac_mntinforesp *mir, int f,
561 struct aac_container *co;
563 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
566 * Check container volume type for validity. Note that many of
567 * the possible types may never show up.
569 if ((mir->Status == ST_OK) && (mir->MntTable[0].VolType != CT_NONE)) {
570 co = (struct aac_container *)malloc(sizeof *co, M_AACRAIDBUF,
573 panic("Out of memory?!");
577 bcopy(&mir->MntTable[0], &co->co_mntobj,
578 sizeof(struct aac_mntobj));
580 TAILQ_INSERT_TAIL(&sc->aac_container_tqh, co, co_link);
585 * Allocate resources associated with (sc)
588 aac_alloc(struct aac_softc *sc)
592 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
595 * Create DMA tag for mapping buffers into controller-addressable space.
597 if (bus_dma_tag_create(sc->aac_parent_dmat, /* parent */
598 1, 0, /* algnmnt, boundary */
599 (sc->flags & AAC_FLAGS_SG_64BIT) ?
601 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
602 BUS_SPACE_MAXADDR, /* highaddr */
603 NULL, NULL, /* filter, filterarg */
604 sc->aac_max_sectors << 9, /* maxsize */
605 sc->aac_sg_tablesize, /* nsegments */
606 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
607 BUS_DMA_ALLOCNOW, /* flags */
608 busdma_lock_mutex, /* lockfunc */
609 &sc->aac_io_lock, /* lockfuncarg */
610 &sc->aac_buffer_dmat)) {
611 device_printf(sc->aac_dev, "can't allocate buffer DMA tag\n");
616 * Create DMA tag for mapping FIBs into controller-addressable space..
618 if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE1)
619 maxsize = sc->aac_max_fibs_alloc * (sc->aac_max_fib_size +
620 sizeof(struct aac_fib_xporthdr) + 31);
622 maxsize = sc->aac_max_fibs_alloc * (sc->aac_max_fib_size + 31);
623 if (bus_dma_tag_create(sc->aac_parent_dmat, /* parent */
624 1, 0, /* algnmnt, boundary */
625 (sc->flags & AAC_FLAGS_4GB_WINDOW) ?
626 BUS_SPACE_MAXADDR_32BIT :
627 0x7fffffff, /* lowaddr */
628 BUS_SPACE_MAXADDR, /* highaddr */
629 NULL, NULL, /* filter, filterarg */
630 maxsize, /* maxsize */
632 maxsize, /* maxsize */
634 NULL, NULL, /* No locking needed */
635 &sc->aac_fib_dmat)) {
636 device_printf(sc->aac_dev, "can't allocate FIB DMA tag\n");
641 * Create DMA tag for the common structure and allocate it.
643 maxsize = sizeof(struct aac_common);
644 maxsize += sc->aac_max_fibs * sizeof(u_int32_t);
645 if (bus_dma_tag_create(sc->aac_parent_dmat, /* parent */
646 1, 0, /* algnmnt, boundary */
647 (sc->flags & AAC_FLAGS_4GB_WINDOW) ?
648 BUS_SPACE_MAXADDR_32BIT :
649 0x7fffffff, /* lowaddr */
650 BUS_SPACE_MAXADDR, /* highaddr */
651 NULL, NULL, /* filter, filterarg */
652 maxsize, /* maxsize */
654 maxsize, /* maxsegsize */
656 NULL, NULL, /* No locking needed */
657 &sc->aac_common_dmat)) {
658 device_printf(sc->aac_dev,
659 "can't allocate common structure DMA tag\n");
662 if (bus_dmamem_alloc(sc->aac_common_dmat, (void **)&sc->aac_common,
663 BUS_DMA_NOWAIT, &sc->aac_common_dmamap)) {
664 device_printf(sc->aac_dev, "can't allocate common structure\n");
668 (void)bus_dmamap_load(sc->aac_common_dmat, sc->aac_common_dmamap,
669 sc->aac_common, maxsize,
670 aac_common_map, sc, 0);
671 bzero(sc->aac_common, maxsize);
673 /* Allocate some FIBs and associated command structs */
674 TAILQ_INIT(&sc->aac_fibmap_tqh);
675 sc->aac_commands = malloc(sc->aac_max_fibs * sizeof(struct aac_command),
676 M_AACRAIDBUF, M_WAITOK|M_ZERO);
677 mtx_lock(&sc->aac_io_lock);
678 while (sc->total_fibs < sc->aac_max_fibs) {
679 if (aac_alloc_commands(sc) != 0)
682 mtx_unlock(&sc->aac_io_lock);
683 if (sc->total_fibs == 0)
690 * Free all of the resources associated with (sc)
692 * Should not be called if the controller is active.
695 aacraid_free(struct aac_softc *sc)
699 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
701 /* remove the control device */
702 if (sc->aac_dev_t != NULL)
703 destroy_dev(sc->aac_dev_t);
705 /* throw away any FIB buffers, discard the FIB DMA tag */
706 aac_free_commands(sc);
707 if (sc->aac_fib_dmat)
708 bus_dma_tag_destroy(sc->aac_fib_dmat);
710 free(sc->aac_commands, M_AACRAIDBUF);
712 /* destroy the common area */
713 if (sc->aac_common) {
714 bus_dmamap_unload(sc->aac_common_dmat, sc->aac_common_dmamap);
715 bus_dmamem_free(sc->aac_common_dmat, sc->aac_common,
716 sc->aac_common_dmamap);
718 if (sc->aac_common_dmat)
719 bus_dma_tag_destroy(sc->aac_common_dmat);
721 /* disconnect the interrupt handler */
722 for (i = 0; i < AAC_MAX_MSIX; ++i) {
724 bus_teardown_intr(sc->aac_dev,
725 sc->aac_irq[i], sc->aac_intr[i]);
727 bus_release_resource(sc->aac_dev, SYS_RES_IRQ,
728 sc->aac_irq_rid[i], sc->aac_irq[i]);
733 pci_release_msi(sc->aac_dev);
735 /* destroy data-transfer DMA tag */
736 if (sc->aac_buffer_dmat)
737 bus_dma_tag_destroy(sc->aac_buffer_dmat);
739 /* destroy the parent DMA tag */
740 if (sc->aac_parent_dmat)
741 bus_dma_tag_destroy(sc->aac_parent_dmat);
743 /* release the register window mapping */
744 if (sc->aac_regs_res0 != NULL)
745 bus_release_resource(sc->aac_dev, SYS_RES_MEMORY,
746 sc->aac_regs_rid0, sc->aac_regs_res0);
747 if (sc->aac_regs_res1 != NULL)
748 bus_release_resource(sc->aac_dev, SYS_RES_MEMORY,
749 sc->aac_regs_rid1, sc->aac_regs_res1);
753 * Disconnect from the controller completely, in preparation for unload.
756 aacraid_detach(device_t dev)
758 struct aac_softc *sc;
759 struct aac_container *co;
763 sc = device_get_softc(dev);
764 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
766 #if __FreeBSD_version >= 800000
767 callout_drain(&sc->aac_daemontime);
769 untimeout(aac_daemon, (void *)sc, sc->timeout_id);
771 /* Remove the child containers */
772 while ((co = TAILQ_FIRST(&sc->aac_container_tqh)) != NULL) {
773 TAILQ_REMOVE(&sc->aac_container_tqh, co, co_link);
774 free(co, M_AACRAIDBUF);
777 /* Remove the CAM SIMs */
778 while ((sim = TAILQ_FIRST(&sc->aac_sim_tqh)) != NULL) {
779 TAILQ_REMOVE(&sc->aac_sim_tqh, sim, sim_link);
780 error = device_delete_child(dev, sim->sim_dev);
783 free(sim, M_AACRAIDBUF);
786 if (sc->aifflags & AAC_AIFFLAGS_RUNNING) {
787 sc->aifflags |= AAC_AIFFLAGS_EXIT;
788 wakeup(sc->aifthread);
789 tsleep(sc->aac_dev, PUSER | PCATCH, "aac_dch", 30 * hz);
792 if (sc->aifflags & AAC_AIFFLAGS_RUNNING)
793 panic("Cannot shutdown AIF thread");
795 if ((error = aacraid_shutdown(dev)))
798 EVENTHANDLER_DEREGISTER(shutdown_final, sc->eh);
802 mtx_destroy(&sc->aac_io_lock);
808 * Bring the controller down to a dormant state and detach all child devices.
810 * This function is called before detach or system shutdown.
812 * Note that we can assume that the bioq on the controller is empty, as we won't
813 * allow shutdown if any device is open.
816 aacraid_shutdown(device_t dev)
818 struct aac_softc *sc;
820 struct aac_close_command *cc;
822 sc = device_get_softc(dev);
823 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
825 sc->aac_state |= AAC_STATE_SUSPEND;
828 * Send a Container shutdown followed by a HostShutdown FIB to the
829 * controller to convince it that we don't want to talk to it anymore.
830 * We've been closed and all I/O completed already
832 device_printf(sc->aac_dev, "shutting down controller...");
834 mtx_lock(&sc->aac_io_lock);
835 aac_alloc_sync_fib(sc, &fib);
836 cc = (struct aac_close_command *)&fib->data[0];
838 bzero(cc, sizeof(struct aac_close_command));
839 cc->Command = VM_CloseAll;
840 cc->ContainerId = 0xfffffffe;
841 if (aac_sync_fib(sc, ContainerCommand, 0, fib,
842 sizeof(struct aac_close_command)))
847 AAC_ACCESS_DEVREG(sc, AAC_DISABLE_INTERRUPT);
848 aac_release_sync_fib(sc);
849 mtx_unlock(&sc->aac_io_lock);
855 * Bring the controller to a quiescent state, ready for system suspend.
858 aacraid_suspend(device_t dev)
860 struct aac_softc *sc;
862 sc = device_get_softc(dev);
864 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
865 sc->aac_state |= AAC_STATE_SUSPEND;
867 AAC_ACCESS_DEVREG(sc, AAC_DISABLE_INTERRUPT);
872 * Bring the controller back to a state ready for operation.
875 aacraid_resume(device_t dev)
877 struct aac_softc *sc;
879 sc = device_get_softc(dev);
881 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
882 sc->aac_state &= ~AAC_STATE_SUSPEND;
883 AAC_ACCESS_DEVREG(sc, AAC_ENABLE_INTERRUPT);
888 * Interrupt handler for NEW_COMM_TYPE1, NEW_COMM_TYPE2, NEW_COMM_TYPE34 interface.
891 aacraid_new_intr_type1(void *arg)
893 struct aac_msix_ctx *ctx;
894 struct aac_softc *sc;
896 struct aac_command *cm;
898 u_int32_t bellbits, bellbits_shifted, index, handle;
899 int isFastResponse, isAif, noMoreAif, mode;
901 ctx = (struct aac_msix_ctx *)arg;
903 vector_no = ctx->vector_no;
905 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
906 mtx_lock(&sc->aac_io_lock);
908 if (sc->msi_enabled) {
909 mode = AAC_INT_MODE_MSI;
910 if (vector_no == 0) {
911 bellbits = AAC_MEM0_GETREG4(sc, AAC_SRC_ODBR_MSI);
912 if (bellbits & 0x40000)
913 mode |= AAC_INT_MODE_AIF;
914 else if (bellbits & 0x1000)
915 mode |= AAC_INT_MODE_SYNC;
918 mode = AAC_INT_MODE_INTX;
919 bellbits = AAC_MEM0_GETREG4(sc, AAC_SRC_ODBR_R);
920 if (bellbits & AAC_DB_RESPONSE_SENT_NS) {
921 bellbits = AAC_DB_RESPONSE_SENT_NS;
922 AAC_MEM0_SETREG4(sc, AAC_SRC_ODBR_C, bellbits);
924 bellbits_shifted = (bellbits >> AAC_SRC_ODR_SHIFT);
925 AAC_MEM0_SETREG4(sc, AAC_SRC_ODBR_C, bellbits);
926 if (bellbits_shifted & AAC_DB_AIF_PENDING)
927 mode |= AAC_INT_MODE_AIF;
928 else if (bellbits_shifted & AAC_DB_SYNC_COMMAND)
929 mode |= AAC_INT_MODE_SYNC;
931 /* ODR readback, Prep #238630 */
932 AAC_MEM0_GETREG4(sc, AAC_SRC_ODBR_R);
935 if (mode & AAC_INT_MODE_SYNC) {
936 if (sc->aac_sync_cm) {
937 cm = sc->aac_sync_cm;
938 cm->cm_flags |= AAC_CMD_COMPLETED;
939 /* is there a completion handler? */
940 if (cm->cm_complete != NULL) {
943 /* assume that someone is sleeping on this command */
946 sc->flags &= ~AAC_QUEUE_FRZN;
947 sc->aac_sync_cm = NULL;
952 if (mode & AAC_INT_MODE_AIF) {
953 if (mode & AAC_INT_MODE_INTX) {
960 /* handle async. status */
961 index = sc->aac_host_rrq_idx[vector_no];
963 isFastResponse = isAif = noMoreAif = 0;
964 /* remove toggle bit (31) */
965 handle = (sc->aac_common->ac_host_rrq[index] & 0x7fffffff);
966 /* check fast response bit (30) */
967 if (handle & 0x40000000)
969 /* check AIF bit (23) */
970 else if (handle & 0x00800000)
972 handle &= 0x0000ffff;
976 cm = sc->aac_commands + (handle - 1);
978 sc->aac_rrq_outstanding[vector_no]--;
980 noMoreAif = (fib->Header.XferState & AAC_FIBSTATE_NOMOREAIF) ? 1:0;
982 aac_handle_aif(sc, fib);
984 aacraid_release_command(cm);
986 if (isFastResponse) {
987 fib->Header.XferState |= AAC_FIBSTATE_DONEADAP;
988 *((u_int32_t *)(fib->data)) = ST_OK;
989 cm->cm_flags |= AAC_CMD_FASTRESP;
992 aac_unmap_command(cm);
993 cm->cm_flags |= AAC_CMD_COMPLETED;
995 /* is there a completion handler? */
996 if (cm->cm_complete != NULL) {
999 /* assume that someone is sleeping on this command */
1002 sc->flags &= ~AAC_QUEUE_FRZN;
1005 sc->aac_common->ac_host_rrq[index++] = 0;
1006 if (index == (vector_no + 1) * sc->aac_vector_cap)
1007 index = vector_no * sc->aac_vector_cap;
1008 sc->aac_host_rrq_idx[vector_no] = index;
1010 if ((isAif && !noMoreAif) || sc->aif_pending)
1011 aac_request_aif(sc);
1015 if (mode & AAC_INT_MODE_AIF) {
1016 aac_request_aif(sc);
1017 AAC_ACCESS_DEVREG(sc, AAC_CLEAR_AIF_BIT);
1021 /* see if we can start some more I/O */
1022 if ((sc->flags & AAC_QUEUE_FRZN) == 0)
1023 aacraid_startio(sc);
1024 mtx_unlock(&sc->aac_io_lock);
1028 * Handle notification of one or more FIBs coming from the controller.
1031 aac_command_thread(struct aac_softc *sc)
1035 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1037 mtx_lock(&sc->aac_io_lock);
1038 sc->aifflags = AAC_AIFFLAGS_RUNNING;
1040 while ((sc->aifflags & AAC_AIFFLAGS_EXIT) == 0) {
1043 if ((sc->aifflags & AAC_AIFFLAGS_PENDING) == 0)
1044 retval = msleep(sc->aifthread, &sc->aac_io_lock, PRIBIO,
1045 "aacraid_aifthd", AAC_PERIODIC_INTERVAL * hz);
1048 * First see if any FIBs need to be allocated. This needs
1049 * to be called without the driver lock because contigmalloc
1050 * will grab Giant, and would result in an LOR.
1052 if ((sc->aifflags & AAC_AIFFLAGS_ALLOCFIBS) != 0) {
1053 aac_alloc_commands(sc);
1054 sc->aifflags &= ~AAC_AIFFLAGS_ALLOCFIBS;
1055 aacraid_startio(sc);
1059 * While we're here, check to see if any commands are stuck.
1060 * This is pretty low-priority, so it's ok if it doesn't
1063 if (retval == EWOULDBLOCK)
1066 /* Check the hardware printf message buffer */
1067 if (sc->aac_common->ac_printf[0] != 0)
1068 aac_print_printf(sc);
1070 sc->aifflags &= ~AAC_AIFFLAGS_RUNNING;
1071 mtx_unlock(&sc->aac_io_lock);
1072 wakeup(sc->aac_dev);
1074 aac_kthread_exit(0);
1078 * Submit a command to the controller, return when it completes.
1079 * XXX This is very dangerous! If the card has gone out to lunch, we could
1080 * be stuck here forever. At the same time, signals are not caught
1081 * because there is a risk that a signal could wakeup the sleep before
1082 * the card has a chance to complete the command. Since there is no way
1083 * to cancel a command that is in progress, we can't protect against the
1084 * card completing a command late and spamming the command and data
1085 * memory. So, we are held hostage until the command completes.
1088 aacraid_wait_command(struct aac_command *cm)
1090 struct aac_softc *sc;
1094 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1095 mtx_assert(&sc->aac_io_lock, MA_OWNED);
1097 /* Put the command on the ready queue and get things going */
1098 aac_enqueue_ready(cm);
1099 aacraid_startio(sc);
1100 error = msleep(cm, &sc->aac_io_lock, PRIBIO, "aacraid_wait", 0);
1105 *Command Buffer Management
1109 * Allocate a command.
1112 aacraid_alloc_command(struct aac_softc *sc, struct aac_command **cmp)
1114 struct aac_command *cm;
1116 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1118 if ((cm = aac_dequeue_free(sc)) == NULL) {
1119 if (sc->total_fibs < sc->aac_max_fibs) {
1120 sc->aifflags |= AAC_AIFFLAGS_ALLOCFIBS;
1121 wakeup(sc->aifthread);
1131 * Release a command back to the freelist.
1134 aacraid_release_command(struct aac_command *cm)
1136 struct aac_event *event;
1137 struct aac_softc *sc;
1140 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1141 mtx_assert(&sc->aac_io_lock, MA_OWNED);
1143 /* (re)initialize the command/FIB */
1144 cm->cm_sgtable = NULL;
1146 cm->cm_complete = NULL;
1148 cm->cm_passthr_dmat = 0;
1149 cm->cm_fib->Header.XferState = AAC_FIBSTATE_EMPTY;
1150 cm->cm_fib->Header.StructType = AAC_FIBTYPE_TFIB;
1151 cm->cm_fib->Header.Unused = 0;
1152 cm->cm_fib->Header.SenderSize = cm->cm_sc->aac_max_fib_size;
1155 * These are duplicated in aac_start to cover the case where an
1156 * intermediate stage may have destroyed them. They're left
1157 * initialized here for debugging purposes only.
1159 cm->cm_fib->Header.u.ReceiverFibAddress = (u_int32_t)cm->cm_fibphys;
1160 cm->cm_fib->Header.Handle = 0;
1162 aac_enqueue_free(cm);
1165 * Dequeue all events so that there's no risk of events getting
1168 while ((event = TAILQ_FIRST(&sc->aac_ev_cmfree)) != NULL) {
1169 TAILQ_REMOVE(&sc->aac_ev_cmfree, event, ev_links);
1170 event->ev_callback(sc, event, event->ev_arg);
1175 * Map helper for command/FIB allocation.
1178 aac_map_command_helper(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1182 fibphys = (uint64_t *)arg;
1184 *fibphys = segs[0].ds_addr;
1188 * Allocate and initialize commands/FIBs for this adapter.
1191 aac_alloc_commands(struct aac_softc *sc)
1193 struct aac_command *cm;
1194 struct aac_fibmap *fm;
1199 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1200 mtx_assert(&sc->aac_io_lock, MA_OWNED);
1202 if (sc->total_fibs + sc->aac_max_fibs_alloc > sc->aac_max_fibs)
1205 fm = malloc(sizeof(struct aac_fibmap), M_AACRAIDBUF, M_NOWAIT|M_ZERO);
1209 mtx_unlock(&sc->aac_io_lock);
1210 /* allocate the FIBs in DMAable memory and load them */
1211 if (bus_dmamem_alloc(sc->aac_fib_dmat, (void **)&fm->aac_fibs,
1212 BUS_DMA_NOWAIT, &fm->aac_fibmap)) {
1213 device_printf(sc->aac_dev,
1214 "Not enough contiguous memory available.\n");
1215 free(fm, M_AACRAIDBUF);
1216 mtx_lock(&sc->aac_io_lock);
1220 maxsize = sc->aac_max_fib_size + 31;
1221 if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE1)
1222 maxsize += sizeof(struct aac_fib_xporthdr);
1223 /* Ignore errors since this doesn't bounce */
1224 (void)bus_dmamap_load(sc->aac_fib_dmat, fm->aac_fibmap, fm->aac_fibs,
1225 sc->aac_max_fibs_alloc * maxsize,
1226 aac_map_command_helper, &fibphys, 0);
1227 mtx_lock(&sc->aac_io_lock);
1229 /* initialize constant fields in the command structure */
1230 bzero(fm->aac_fibs, sc->aac_max_fibs_alloc * maxsize);
1231 for (i = 0; i < sc->aac_max_fibs_alloc; i++) {
1232 cm = sc->aac_commands + sc->total_fibs;
1233 fm->aac_commands = cm;
1235 cm->cm_fib = (struct aac_fib *)
1236 ((u_int8_t *)fm->aac_fibs + i * maxsize);
1237 cm->cm_fibphys = fibphys + i * maxsize;
1238 if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE1) {
1239 u_int64_t fibphys_aligned;
1241 (cm->cm_fibphys + sizeof(struct aac_fib_xporthdr) + 31) & ~31;
1242 cm->cm_fib = (struct aac_fib *)
1243 ((u_int8_t *)cm->cm_fib + (fibphys_aligned - cm->cm_fibphys));
1244 cm->cm_fibphys = fibphys_aligned;
1246 u_int64_t fibphys_aligned;
1247 fibphys_aligned = (cm->cm_fibphys + 31) & ~31;
1248 cm->cm_fib = (struct aac_fib *)
1249 ((u_int8_t *)cm->cm_fib + (fibphys_aligned - cm->cm_fibphys));
1250 cm->cm_fibphys = fibphys_aligned;
1252 cm->cm_index = sc->total_fibs;
1254 if ((error = bus_dmamap_create(sc->aac_buffer_dmat, 0,
1255 &cm->cm_datamap)) != 0)
1257 if (sc->aac_max_fibs <= 1 || sc->aac_max_fibs - sc->total_fibs > 1)
1258 aacraid_release_command(cm);
1263 TAILQ_INSERT_TAIL(&sc->aac_fibmap_tqh, fm, fm_link);
1264 fwprintf(sc, HBA_FLAGS_DBG_COMM_B, "total_fibs= %d\n", sc->total_fibs);
1268 bus_dmamap_unload(sc->aac_fib_dmat, fm->aac_fibmap);
1269 bus_dmamem_free(sc->aac_fib_dmat, fm->aac_fibs, fm->aac_fibmap);
1270 free(fm, M_AACRAIDBUF);
1275 * Free FIBs owned by this adapter.
1278 aac_free_commands(struct aac_softc *sc)
1280 struct aac_fibmap *fm;
1281 struct aac_command *cm;
1284 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1286 while ((fm = TAILQ_FIRST(&sc->aac_fibmap_tqh)) != NULL) {
1288 TAILQ_REMOVE(&sc->aac_fibmap_tqh, fm, fm_link);
1290 * We check against total_fibs to handle partially
1293 for (i = 0; i < sc->aac_max_fibs_alloc && sc->total_fibs--; i++) {
1294 cm = fm->aac_commands + i;
1295 bus_dmamap_destroy(sc->aac_buffer_dmat, cm->cm_datamap);
1297 bus_dmamap_unload(sc->aac_fib_dmat, fm->aac_fibmap);
1298 bus_dmamem_free(sc->aac_fib_dmat, fm->aac_fibs, fm->aac_fibmap);
1299 free(fm, M_AACRAIDBUF);
1304 * Command-mapping helper function - populate this command's s/g table.
1307 aacraid_map_command_sg(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1309 struct aac_softc *sc;
1310 struct aac_command *cm;
1311 struct aac_fib *fib;
1314 cm = (struct aac_command *)arg;
1317 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "nseg %d", nseg);
1318 mtx_assert(&sc->aac_io_lock, MA_OWNED);
1320 /* copy into the FIB */
1321 if (cm->cm_sgtable != NULL) {
1322 if (fib->Header.Command == RawIo2) {
1323 struct aac_raw_io2 *raw;
1324 struct aac_sge_ieee1212 *sg;
1325 u_int32_t min_size = PAGE_SIZE, cur_size;
1326 int conformable = TRUE;
1328 raw = (struct aac_raw_io2 *)&fib->data[0];
1329 sg = (struct aac_sge_ieee1212 *)cm->cm_sgtable;
1332 for (i = 0; i < nseg; i++) {
1333 cur_size = segs[i].ds_len;
1335 *(bus_addr_t *)&sg[i].addrLow = segs[i].ds_addr;
1336 sg[i].length = cur_size;
1339 raw->sgeFirstSize = cur_size;
1340 } else if (i == 1) {
1341 raw->sgeNominalSize = cur_size;
1342 min_size = cur_size;
1343 } else if ((i+1) < nseg &&
1344 cur_size != raw->sgeNominalSize) {
1345 conformable = FALSE;
1346 if (cur_size < min_size)
1347 min_size = cur_size;
1351 /* not conformable: evaluate required sg elements */
1353 int j, err_found, nseg_new = nseg;
1354 for (i = min_size / PAGE_SIZE; i >= 1; --i) {
1357 for (j = 1; j < nseg - 1; ++j) {
1358 if (sg[j].length % (i*PAGE_SIZE)) {
1362 nseg_new += (sg[j].length / (i*PAGE_SIZE));
1367 if (i>0 && nseg_new<=sc->aac_sg_tablesize &&
1368 !(sc->hint_flags & 4))
1369 nseg = aac_convert_sgraw2(sc,
1370 raw, i, nseg, nseg_new);
1372 raw->flags |= RIO2_SGL_CONFORMANT;
1375 /* update the FIB size for the s/g count */
1376 fib->Header.Size += nseg *
1377 sizeof(struct aac_sge_ieee1212);
1379 } else if (fib->Header.Command == RawIo) {
1380 struct aac_sg_tableraw *sg;
1381 sg = (struct aac_sg_tableraw *)cm->cm_sgtable;
1383 for (i = 0; i < nseg; i++) {
1384 sg->SgEntryRaw[i].SgAddress = segs[i].ds_addr;
1385 sg->SgEntryRaw[i].SgByteCount = segs[i].ds_len;
1386 sg->SgEntryRaw[i].Next = 0;
1387 sg->SgEntryRaw[i].Prev = 0;
1388 sg->SgEntryRaw[i].Flags = 0;
1390 /* update the FIB size for the s/g count */
1391 fib->Header.Size += nseg*sizeof(struct aac_sg_entryraw);
1392 } else if ((cm->cm_sc->flags & AAC_FLAGS_SG_64BIT) == 0) {
1393 struct aac_sg_table *sg;
1394 sg = cm->cm_sgtable;
1396 for (i = 0; i < nseg; i++) {
1397 sg->SgEntry[i].SgAddress = segs[i].ds_addr;
1398 sg->SgEntry[i].SgByteCount = segs[i].ds_len;
1400 /* update the FIB size for the s/g count */
1401 fib->Header.Size += nseg*sizeof(struct aac_sg_entry);
1403 struct aac_sg_table64 *sg;
1404 sg = (struct aac_sg_table64 *)cm->cm_sgtable;
1406 for (i = 0; i < nseg; i++) {
1407 sg->SgEntry64[i].SgAddress = segs[i].ds_addr;
1408 sg->SgEntry64[i].SgByteCount = segs[i].ds_len;
1410 /* update the FIB size for the s/g count */
1411 fib->Header.Size += nseg*sizeof(struct aac_sg_entry64);
1415 /* Fix up the address values in the FIB. Use the command array index
1416 * instead of a pointer since these fields are only 32 bits. Shift
1417 * the SenderFibAddress over to make room for the fast response bit
1418 * and for the AIF bit
1420 cm->cm_fib->Header.SenderFibAddress = (cm->cm_index << 2);
1421 cm->cm_fib->Header.u.ReceiverFibAddress = (u_int32_t)cm->cm_fibphys;
1423 /* save a pointer to the command for speedy reverse-lookup */
1424 cm->cm_fib->Header.Handle += cm->cm_index + 1;
1426 if (cm->cm_passthr_dmat == 0) {
1427 if (cm->cm_flags & AAC_CMD_DATAIN)
1428 bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap,
1429 BUS_DMASYNC_PREREAD);
1430 if (cm->cm_flags & AAC_CMD_DATAOUT)
1431 bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap,
1432 BUS_DMASYNC_PREWRITE);
1435 cm->cm_flags |= AAC_CMD_MAPPED;
1437 if (sc->flags & AAC_FLAGS_SYNC_MODE) {
1439 aacraid_sync_command(sc, AAC_MONKER_SYNCFIB, cm->cm_fibphys, 0, 0, 0, &wait, NULL);
1440 } else if (cm->cm_flags & AAC_CMD_WAIT) {
1441 aacraid_sync_command(sc, AAC_MONKER_SYNCFIB, cm->cm_fibphys, 0, 0, 0, NULL, NULL);
1443 int count = 10000000L;
1444 while (AAC_SEND_COMMAND(sc, cm) != 0) {
1446 aac_unmap_command(cm);
1447 sc->flags |= AAC_QUEUE_FRZN;
1448 aac_requeue_ready(cm);
1450 DELAY(5); /* wait 5 usec. */
1457 aac_convert_sgraw2(struct aac_softc *sc, struct aac_raw_io2 *raw,
1458 int pages, int nseg, int nseg_new)
1460 struct aac_sge_ieee1212 *sge;
1464 sge = malloc(nseg_new * sizeof(struct aac_sge_ieee1212),
1465 M_AACRAIDBUF, M_NOWAIT|M_ZERO);
1469 for (i = 1, pos = 1; i < nseg - 1; ++i) {
1470 for (j = 0; j < raw->sge[i].length / (pages*PAGE_SIZE); ++j) {
1471 addr_low = raw->sge[i].addrLow + j * pages * PAGE_SIZE;
1472 sge[pos].addrLow = addr_low;
1473 sge[pos].addrHigh = raw->sge[i].addrHigh;
1474 if (addr_low < raw->sge[i].addrLow)
1475 sge[pos].addrHigh++;
1476 sge[pos].length = pages * PAGE_SIZE;
1481 sge[pos] = raw->sge[nseg-1];
1482 for (i = 1; i < nseg_new; ++i)
1483 raw->sge[i] = sge[i];
1485 free(sge, M_AACRAIDBUF);
1486 raw->sgeCnt = nseg_new;
1487 raw->flags |= RIO2_SGL_CONFORMANT;
1488 raw->sgeNominalSize = pages * PAGE_SIZE;
1494 * Unmap a command from controller-visible space.
1497 aac_unmap_command(struct aac_command *cm)
1499 struct aac_softc *sc;
1502 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1504 if (!(cm->cm_flags & AAC_CMD_MAPPED))
1507 if (cm->cm_datalen != 0 && cm->cm_passthr_dmat == 0) {
1508 if (cm->cm_flags & AAC_CMD_DATAIN)
1509 bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap,
1510 BUS_DMASYNC_POSTREAD);
1511 if (cm->cm_flags & AAC_CMD_DATAOUT)
1512 bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap,
1513 BUS_DMASYNC_POSTWRITE);
1515 bus_dmamap_unload(sc->aac_buffer_dmat, cm->cm_datamap);
1517 cm->cm_flags &= ~AAC_CMD_MAPPED;
1521 * Hardware Interface
1525 * Initialize the adapter.
1528 aac_common_map(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1530 struct aac_softc *sc;
1532 sc = (struct aac_softc *)arg;
1533 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1535 sc->aac_common_busaddr = segs[0].ds_addr;
1539 aac_check_firmware(struct aac_softc *sc)
1541 u_int32_t code, major, minor, maxsize;
1542 u_int32_t options = 0, atu_size = 0, status, waitCount;
1545 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1547 /* check if flash update is running */
1548 if (AAC_GET_FWSTATUS(sc) & AAC_FLASH_UPD_PENDING) {
1551 code = AAC_GET_FWSTATUS(sc);
1552 if (time_uptime > (then + AAC_FWUPD_TIMEOUT)) {
1553 device_printf(sc->aac_dev,
1554 "FATAL: controller not coming ready, "
1555 "status %x\n", code);
1558 } while (!(code & AAC_FLASH_UPD_SUCCESS) && !(code & AAC_FLASH_UPD_FAILED));
1560 * Delay 10 seconds. Because right now FW is doing a soft reset,
1561 * do not read scratch pad register at this time
1563 waitCount = 10 * 10000;
1565 DELAY(100); /* delay 100 microseconds */
1571 * Wait for the adapter to come ready.
1575 code = AAC_GET_FWSTATUS(sc);
1576 if (time_uptime > (then + AAC_BOOT_TIMEOUT)) {
1577 device_printf(sc->aac_dev,
1578 "FATAL: controller not coming ready, "
1579 "status %x\n", code);
1582 } while (!(code & AAC_UP_AND_RUNNING) || code == 0xffffffff);
1585 * Retrieve the firmware version numbers. Dell PERC2/QC cards with
1586 * firmware version 1.x are not compatible with this driver.
1588 if (sc->flags & AAC_FLAGS_PERC2QC) {
1589 if (aacraid_sync_command(sc, AAC_MONKER_GETKERNVER, 0, 0, 0, 0,
1591 device_printf(sc->aac_dev,
1592 "Error reading firmware version\n");
1596 /* These numbers are stored as ASCII! */
1597 major = (AAC_GET_MAILBOX(sc, 1) & 0xff) - 0x30;
1598 minor = (AAC_GET_MAILBOX(sc, 2) & 0xff) - 0x30;
1600 device_printf(sc->aac_dev,
1601 "Firmware version %d.%d is not supported.\n",
1607 * Retrieve the capabilities/supported options word so we know what
1608 * work-arounds to enable. Some firmware revs don't support this
1611 if (aacraid_sync_command(sc, AAC_MONKER_GETINFO, 0, 0, 0, 0, &status, NULL)) {
1612 if (status != AAC_SRB_STS_INVALID_REQUEST) {
1613 device_printf(sc->aac_dev,
1614 "RequestAdapterInfo failed\n");
1618 options = AAC_GET_MAILBOX(sc, 1);
1619 atu_size = AAC_GET_MAILBOX(sc, 2);
1620 sc->supported_options = options;
1622 if ((options & AAC_SUPPORTED_4GB_WINDOW) != 0 &&
1623 (sc->flags & AAC_FLAGS_NO4GB) == 0)
1624 sc->flags |= AAC_FLAGS_4GB_WINDOW;
1625 if (options & AAC_SUPPORTED_NONDASD)
1626 sc->flags |= AAC_FLAGS_ENABLE_CAM;
1627 if ((options & AAC_SUPPORTED_SGMAP_HOST64) != 0
1628 && (sizeof(bus_addr_t) > 4)
1629 && (sc->hint_flags & 0x1)) {
1630 device_printf(sc->aac_dev,
1631 "Enabling 64-bit address support\n");
1632 sc->flags |= AAC_FLAGS_SG_64BIT;
1634 if (sc->aac_if.aif_send_command) {
1635 if ((options & AAC_SUPPORTED_NEW_COMM_TYPE3) ||
1636 (options & AAC_SUPPORTED_NEW_COMM_TYPE4))
1637 sc->flags |= AAC_FLAGS_NEW_COMM | AAC_FLAGS_NEW_COMM_TYPE34;
1638 else if (options & AAC_SUPPORTED_NEW_COMM_TYPE1)
1639 sc->flags |= AAC_FLAGS_NEW_COMM | AAC_FLAGS_NEW_COMM_TYPE1;
1640 else if (options & AAC_SUPPORTED_NEW_COMM_TYPE2)
1641 sc->flags |= AAC_FLAGS_NEW_COMM | AAC_FLAGS_NEW_COMM_TYPE2;
1643 if (options & AAC_SUPPORTED_64BIT_ARRAYSIZE)
1644 sc->flags |= AAC_FLAGS_ARRAY_64BIT;
1647 if (!(sc->flags & AAC_FLAGS_NEW_COMM)) {
1648 device_printf(sc->aac_dev, "Communication interface not supported!\n");
1652 if (sc->hint_flags & 2) {
1653 device_printf(sc->aac_dev,
1654 "Sync. mode enforced by driver parameter. This will cause a significant performance decrease!\n");
1655 sc->flags |= AAC_FLAGS_SYNC_MODE;
1656 } else if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE34) {
1657 device_printf(sc->aac_dev,
1658 "Async. mode not supported by current driver, sync. mode enforced.\nPlease update driver to get full performance.\n");
1659 sc->flags |= AAC_FLAGS_SYNC_MODE;
1662 /* Check for broken hardware that does a lower number of commands */
1663 sc->aac_max_fibs = (sc->flags & AAC_FLAGS_256FIBS ? 256:512);
1665 /* Remap mem. resource, if required */
1666 if (atu_size > rman_get_size(sc->aac_regs_res0)) {
1667 bus_release_resource(
1668 sc->aac_dev, SYS_RES_MEMORY,
1669 sc->aac_regs_rid0, sc->aac_regs_res0);
1670 sc->aac_regs_res0 = bus_alloc_resource_anywhere(
1671 sc->aac_dev, SYS_RES_MEMORY, &sc->aac_regs_rid0,
1672 atu_size, RF_ACTIVE);
1673 if (sc->aac_regs_res0 == NULL) {
1674 sc->aac_regs_res0 = bus_alloc_resource_any(
1675 sc->aac_dev, SYS_RES_MEMORY,
1676 &sc->aac_regs_rid0, RF_ACTIVE);
1677 if (sc->aac_regs_res0 == NULL) {
1678 device_printf(sc->aac_dev,
1679 "couldn't allocate register window\n");
1683 sc->aac_btag0 = rman_get_bustag(sc->aac_regs_res0);
1684 sc->aac_bhandle0 = rman_get_bushandle(sc->aac_regs_res0);
1687 /* Read preferred settings */
1688 sc->aac_max_fib_size = sizeof(struct aac_fib);
1689 sc->aac_max_sectors = 128; /* 64KB */
1690 sc->aac_max_aif = 1;
1691 if (sc->flags & AAC_FLAGS_SG_64BIT)
1692 sc->aac_sg_tablesize = (AAC_FIB_DATASIZE
1693 - sizeof(struct aac_blockwrite64))
1694 / sizeof(struct aac_sg_entry64);
1696 sc->aac_sg_tablesize = (AAC_FIB_DATASIZE
1697 - sizeof(struct aac_blockwrite))
1698 / sizeof(struct aac_sg_entry);
1700 if (!aacraid_sync_command(sc, AAC_MONKER_GETCOMMPREF, 0, 0, 0, 0, NULL, NULL)) {
1701 options = AAC_GET_MAILBOX(sc, 1);
1702 sc->aac_max_fib_size = (options & 0xFFFF);
1703 sc->aac_max_sectors = (options >> 16) << 1;
1704 options = AAC_GET_MAILBOX(sc, 2);
1705 sc->aac_sg_tablesize = (options >> 16);
1706 options = AAC_GET_MAILBOX(sc, 3);
1707 sc->aac_max_fibs = ((options >> 16) & 0xFFFF);
1708 if (sc->aac_max_fibs == 0 || sc->aac_hwif != AAC_HWIF_SRCV)
1709 sc->aac_max_fibs = (options & 0xFFFF);
1710 options = AAC_GET_MAILBOX(sc, 4);
1711 sc->aac_max_aif = (options & 0xFFFF);
1712 options = AAC_GET_MAILBOX(sc, 5);
1713 sc->aac_max_msix =(sc->flags & AAC_FLAGS_NEW_COMM_TYPE2) ? options : 0;
1716 maxsize = sc->aac_max_fib_size + 31;
1717 if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE1)
1718 maxsize += sizeof(struct aac_fib_xporthdr);
1719 if (maxsize > PAGE_SIZE) {
1720 sc->aac_max_fib_size -= (maxsize - PAGE_SIZE);
1721 maxsize = PAGE_SIZE;
1723 sc->aac_max_fibs_alloc = PAGE_SIZE / maxsize;
1725 if (sc->aac_max_fib_size > sizeof(struct aac_fib)) {
1726 sc->flags |= AAC_FLAGS_RAW_IO;
1727 device_printf(sc->aac_dev, "Enable Raw I/O\n");
1729 if ((sc->flags & AAC_FLAGS_RAW_IO) &&
1730 (sc->flags & AAC_FLAGS_ARRAY_64BIT)) {
1731 sc->flags |= AAC_FLAGS_LBA_64BIT;
1732 device_printf(sc->aac_dev, "Enable 64-bit array\n");
1735 #ifdef AACRAID_DEBUG
1736 aacraid_get_fw_debug_buffer(sc);
1742 aac_init(struct aac_softc *sc)
1744 struct aac_adapter_init *ip;
1747 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1749 /* reset rrq index */
1750 sc->aac_fibs_pushed_no = 0;
1751 for (i = 0; i < sc->aac_max_msix; i++)
1752 sc->aac_host_rrq_idx[i] = i * sc->aac_vector_cap;
1755 * Fill in the init structure. This tells the adapter about the
1756 * physical location of various important shared data structures.
1758 ip = &sc->aac_common->ac_init;
1759 ip->InitStructRevision = AAC_INIT_STRUCT_REVISION;
1760 if (sc->aac_max_fib_size > sizeof(struct aac_fib)) {
1761 ip->InitStructRevision = AAC_INIT_STRUCT_REVISION_4;
1762 sc->flags |= AAC_FLAGS_RAW_IO;
1764 ip->NoOfMSIXVectors = sc->aac_max_msix;
1766 ip->AdapterFibsPhysicalAddress = sc->aac_common_busaddr +
1767 offsetof(struct aac_common, ac_fibs);
1768 ip->AdapterFibsVirtualAddress = 0;
1769 ip->AdapterFibsSize = AAC_ADAPTER_FIBS * sizeof(struct aac_fib);
1770 ip->AdapterFibAlign = sizeof(struct aac_fib);
1772 ip->PrintfBufferAddress = sc->aac_common_busaddr +
1773 offsetof(struct aac_common, ac_printf);
1774 ip->PrintfBufferSize = AAC_PRINTF_BUFSIZE;
1777 * The adapter assumes that pages are 4K in size, except on some
1778 * broken firmware versions that do the page->byte conversion twice,
1779 * therefore 'assuming' that this value is in 16MB units (2^24).
1780 * Round up since the granularity is so high.
1782 ip->HostPhysMemPages = ctob(physmem) / AAC_PAGE_SIZE;
1783 if (sc->flags & AAC_FLAGS_BROKEN_MEMMAP) {
1784 ip->HostPhysMemPages =
1785 (ip->HostPhysMemPages + AAC_PAGE_SIZE) / AAC_PAGE_SIZE;
1787 ip->HostElapsedSeconds = time_uptime; /* reset later if invalid */
1789 ip->InitFlags = AAC_INITFLAGS_NEW_COMM_SUPPORTED;
1790 if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE1) {
1791 ip->InitStructRevision = AAC_INIT_STRUCT_REVISION_6;
1792 ip->InitFlags |= (AAC_INITFLAGS_NEW_COMM_TYPE1_SUPPORTED |
1793 AAC_INITFLAGS_FAST_JBOD_SUPPORTED);
1794 device_printf(sc->aac_dev, "New comm. interface type1 enabled\n");
1795 } else if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE2) {
1796 ip->InitStructRevision = AAC_INIT_STRUCT_REVISION_7;
1797 ip->InitFlags |= (AAC_INITFLAGS_NEW_COMM_TYPE2_SUPPORTED |
1798 AAC_INITFLAGS_FAST_JBOD_SUPPORTED);
1799 device_printf(sc->aac_dev, "New comm. interface type2 enabled\n");
1801 ip->MaxNumAif = sc->aac_max_aif;
1802 ip->HostRRQ_AddrLow =
1803 sc->aac_common_busaddr + offsetof(struct aac_common, ac_host_rrq);
1804 /* always 32-bit address */
1805 ip->HostRRQ_AddrHigh = 0;
1807 if (sc->aac_support_opt2 & AAC_SUPPORTED_POWER_MANAGEMENT) {
1808 ip->InitFlags |= AAC_INITFLAGS_DRIVER_SUPPORTS_PM;
1809 ip->InitFlags |= AAC_INITFLAGS_DRIVER_USES_UTC_TIME;
1810 device_printf(sc->aac_dev, "Power Management enabled\n");
1813 ip->MaxIoCommands = sc->aac_max_fibs;
1814 ip->MaxIoSize = sc->aac_max_sectors << 9;
1815 ip->MaxFibSize = sc->aac_max_fib_size;
1818 * Do controller-type-specific initialisation
1820 AAC_MEM0_SETREG4(sc, AAC_SRC_ODBR_C, ~0);
1823 * Give the init structure to the controller.
1825 if (aacraid_sync_command(sc, AAC_MONKER_INITSTRUCT,
1826 sc->aac_common_busaddr +
1827 offsetof(struct aac_common, ac_init), 0, 0, 0,
1829 device_printf(sc->aac_dev,
1830 "error establishing init structure\n");
1836 * Check configuration issues
1838 if ((error = aac_check_config(sc)) != 0)
1847 aac_define_int_mode(struct aac_softc *sc)
1850 int cap, msi_count, error = 0;
1855 /* max. vectors from AAC_MONKER_GETCOMMPREF */
1856 if (sc->aac_max_msix == 0) {
1857 sc->aac_max_msix = 1;
1858 sc->aac_vector_cap = sc->aac_max_fibs;
1863 msi_count = pci_msix_count(dev);
1864 if (msi_count > AAC_MAX_MSIX)
1865 msi_count = AAC_MAX_MSIX;
1866 if (msi_count > sc->aac_max_msix)
1867 msi_count = sc->aac_max_msix;
1868 if (msi_count == 0 || (error = pci_alloc_msix(dev, &msi_count)) != 0) {
1869 device_printf(dev, "alloc msix failed - msi_count=%d, err=%d; "
1870 "will try MSI\n", msi_count, error);
1871 pci_release_msi(dev);
1873 sc->msi_enabled = TRUE;
1874 device_printf(dev, "using MSI-X interrupts (%u vectors)\n",
1878 if (!sc->msi_enabled) {
1880 if ((error = pci_alloc_msi(dev, &msi_count)) != 0) {
1881 device_printf(dev, "alloc msi failed - err=%d; "
1882 "will use INTx\n", error);
1883 pci_release_msi(dev);
1885 sc->msi_enabled = TRUE;
1886 device_printf(dev, "using MSI interrupts\n");
1890 if (sc->msi_enabled) {
1891 /* now read controller capability from PCI config. space */
1892 cap = aac_find_pci_capability(sc, PCIY_MSIX);
1893 val = (cap != 0 ? pci_read_config(dev, cap + 2, 2) : 0);
1894 if (!(val & AAC_PCI_MSI_ENABLE)) {
1895 pci_release_msi(dev);
1896 sc->msi_enabled = FALSE;
1900 if (!sc->msi_enabled) {
1901 device_printf(dev, "using legacy interrupts\n");
1902 sc->aac_max_msix = 1;
1904 AAC_ACCESS_DEVREG(sc, AAC_ENABLE_MSIX);
1905 if (sc->aac_max_msix > msi_count)
1906 sc->aac_max_msix = msi_count;
1908 sc->aac_vector_cap = sc->aac_max_fibs / sc->aac_max_msix;
1910 fwprintf(sc, HBA_FLAGS_DBG_DEBUG_B, "msi_enabled %d vector_cap %d max_fibs %d max_msix %d",
1911 sc->msi_enabled,sc->aac_vector_cap, sc->aac_max_fibs, sc->aac_max_msix);
1915 aac_find_pci_capability(struct aac_softc *sc, int cap)
1923 status = pci_read_config(dev, PCIR_STATUS, 2);
1924 if (!(status & PCIM_STATUS_CAPPRESENT))
1927 status = pci_read_config(dev, PCIR_HDRTYPE, 1);
1928 switch (status & PCIM_HDRTYPE) {
1934 ptr = PCIR_CAP_PTR_2;
1940 ptr = pci_read_config(dev, ptr, 1);
1944 next = pci_read_config(dev, ptr + PCICAP_NEXTPTR, 1);
1945 val = pci_read_config(dev, ptr + PCICAP_ID, 1);
1955 aac_setup_intr(struct aac_softc *sc)
1957 int i, msi_count, rid;
1958 struct resource *res;
1961 msi_count = sc->aac_max_msix;
1962 rid = (sc->msi_enabled ? 1:0);
1964 for (i = 0; i < msi_count; i++, rid++) {
1965 if ((res = bus_alloc_resource_any(sc->aac_dev,SYS_RES_IRQ, &rid,
1966 RF_SHAREABLE | RF_ACTIVE)) == NULL) {
1967 device_printf(sc->aac_dev,"can't allocate interrupt\n");
1970 sc->aac_irq_rid[i] = rid;
1971 sc->aac_irq[i] = res;
1972 if (aac_bus_setup_intr(sc->aac_dev, res,
1973 INTR_MPSAFE | INTR_TYPE_BIO, NULL,
1974 aacraid_new_intr_type1, &sc->aac_msix[i], &tag)) {
1975 device_printf(sc->aac_dev, "can't set up interrupt\n");
1978 sc->aac_msix[i].vector_no = i;
1979 sc->aac_msix[i].sc = sc;
1980 sc->aac_intr[i] = tag;
1987 aac_check_config(struct aac_softc *sc)
1989 struct aac_fib *fib;
1990 struct aac_cnt_config *ccfg;
1991 struct aac_cf_status_hdr *cf_shdr;
1994 mtx_lock(&sc->aac_io_lock);
1995 aac_alloc_sync_fib(sc, &fib);
1997 ccfg = (struct aac_cnt_config *)&fib->data[0];
1998 bzero(ccfg, sizeof (*ccfg) - CT_PACKET_SIZE);
1999 ccfg->Command = VM_ContainerConfig;
2000 ccfg->CTCommand.command = CT_GET_CONFIG_STATUS;
2001 ccfg->CTCommand.param[CNT_SIZE] = sizeof(struct aac_cf_status_hdr);
2003 rval = aac_sync_fib(sc, ContainerCommand, 0, fib,
2004 sizeof (struct aac_cnt_config));
2005 cf_shdr = (struct aac_cf_status_hdr *)ccfg->CTCommand.data;
2006 if (rval == 0 && ccfg->Command == ST_OK &&
2007 ccfg->CTCommand.param[0] == CT_OK) {
2008 if (cf_shdr->action <= CFACT_PAUSE) {
2009 bzero(ccfg, sizeof (*ccfg) - CT_PACKET_SIZE);
2010 ccfg->Command = VM_ContainerConfig;
2011 ccfg->CTCommand.command = CT_COMMIT_CONFIG;
2013 rval = aac_sync_fib(sc, ContainerCommand, 0, fib,
2014 sizeof (struct aac_cnt_config));
2015 if (rval == 0 && ccfg->Command == ST_OK &&
2016 ccfg->CTCommand.param[0] == CT_OK) {
2017 /* successful completion */
2020 /* auto commit aborted due to error(s) */
2024 /* auto commit aborted due to adapter indicating
2025 config. issues too dangerous to auto commit */
2033 aac_release_sync_fib(sc);
2034 mtx_unlock(&sc->aac_io_lock);
2039 * Send a synchronous command to the controller and wait for a result.
2040 * Indicate if the controller completed the command with an error status.
2043 aacraid_sync_command(struct aac_softc *sc, u_int32_t command,
2044 u_int32_t arg0, u_int32_t arg1, u_int32_t arg2, u_int32_t arg3,
2045 u_int32_t *sp, u_int32_t *r1)
2050 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2052 /* populate the mailbox */
2053 AAC_SET_MAILBOX(sc, command, arg0, arg1, arg2, arg3);
2055 /* ensure the sync command doorbell flag is cleared */
2056 if (!sc->msi_enabled)
2057 AAC_CLEAR_ISTATUS(sc, AAC_DB_SYNC_COMMAND);
2059 /* then set it to signal the adapter */
2060 AAC_QNOTIFY(sc, AAC_DB_SYNC_COMMAND);
2062 if ((command != AAC_MONKER_SYNCFIB) || (sp == NULL) || (*sp != 0)) {
2063 /* spin waiting for the command to complete */
2066 if (time_uptime > (then + AAC_SYNC_TIMEOUT)) {
2067 fwprintf(sc, HBA_FLAGS_DBG_ERROR_B, "timed out");
2070 } while (!(AAC_GET_ISTATUS(sc) & AAC_DB_SYNC_COMMAND));
2072 /* clear the completion flag */
2073 AAC_CLEAR_ISTATUS(sc, AAC_DB_SYNC_COMMAND);
2075 /* get the command status */
2076 status = AAC_GET_MAILBOX(sc, 0);
2080 /* return parameter */
2082 *r1 = AAC_GET_MAILBOX(sc, 1);
2084 if (status != AAC_SRB_STS_SUCCESS)
2091 aac_sync_fib(struct aac_softc *sc, u_int32_t command, u_int32_t xferstate,
2092 struct aac_fib *fib, u_int16_t datasize)
2094 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2095 mtx_assert(&sc->aac_io_lock, MA_OWNED);
2097 if (datasize > AAC_FIB_DATASIZE)
2101 * Set up the sync FIB
2103 fib->Header.XferState = AAC_FIBSTATE_HOSTOWNED |
2104 AAC_FIBSTATE_INITIALISED |
2106 fib->Header.XferState |= xferstate;
2107 fib->Header.Command = command;
2108 fib->Header.StructType = AAC_FIBTYPE_TFIB;
2109 fib->Header.Size = sizeof(struct aac_fib_header) + datasize;
2110 fib->Header.SenderSize = sizeof(struct aac_fib);
2111 fib->Header.SenderFibAddress = 0; /* Not needed */
2112 fib->Header.u.ReceiverFibAddress = sc->aac_common_busaddr +
2113 offsetof(struct aac_common, ac_sync_fib);
2116 * Give the FIB to the controller, wait for a response.
2118 if (aacraid_sync_command(sc, AAC_MONKER_SYNCFIB,
2119 fib->Header.u.ReceiverFibAddress, 0, 0, 0, NULL, NULL)) {
2120 fwprintf(sc, HBA_FLAGS_DBG_ERROR_B, "IO error");
2128 * Check for commands that have been outstanding for a suspiciously long time,
2129 * and complain about them.
2132 aac_timeout(struct aac_softc *sc)
2134 struct aac_command *cm;
2138 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2140 * Traverse the busy command list, bitch about late commands once
2144 deadline = time_uptime - AAC_CMD_TIMEOUT;
2145 TAILQ_FOREACH(cm, &sc->aac_busy, cm_link) {
2146 if (cm->cm_timestamp < deadline) {
2147 device_printf(sc->aac_dev,
2148 "COMMAND %p TIMEOUT AFTER %d SECONDS\n",
2149 cm, (int)(time_uptime-cm->cm_timestamp));
2150 AAC_PRINT_FIB(sc, cm->cm_fib);
2156 aac_reset_adapter(sc);
2157 aacraid_print_queues(sc);
2161 * Interface Function Vectors
2165 * Read the current firmware status word.
2168 aac_src_get_fwstatus(struct aac_softc *sc)
2170 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2172 return(AAC_MEM0_GETREG4(sc, AAC_SRC_OMR));
2176 * Notify the controller of a change in a given queue
2179 aac_src_qnotify(struct aac_softc *sc, int qbit)
2181 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2183 AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, qbit << AAC_SRC_IDR_SHIFT);
2187 * Get the interrupt reason bits
2190 aac_src_get_istatus(struct aac_softc *sc)
2194 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2196 if (sc->msi_enabled) {
2197 val = AAC_MEM0_GETREG4(sc, AAC_SRC_ODBR_MSI);
2198 if (val & AAC_MSI_SYNC_STATUS)
2199 val = AAC_DB_SYNC_COMMAND;
2203 val = AAC_MEM0_GETREG4(sc, AAC_SRC_ODBR_R) >> AAC_SRC_ODR_SHIFT;
2209 * Clear some interrupt reason bits
2212 aac_src_clear_istatus(struct aac_softc *sc, int mask)
2214 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2216 if (sc->msi_enabled) {
2217 if (mask == AAC_DB_SYNC_COMMAND)
2218 AAC_ACCESS_DEVREG(sc, AAC_CLEAR_SYNC_BIT);
2220 AAC_MEM0_SETREG4(sc, AAC_SRC_ODBR_C, mask << AAC_SRC_ODR_SHIFT);
2225 * Populate the mailbox and set the command word
2228 aac_src_set_mailbox(struct aac_softc *sc, u_int32_t command, u_int32_t arg0,
2229 u_int32_t arg1, u_int32_t arg2, u_int32_t arg3)
2231 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2233 AAC_MEM0_SETREG4(sc, AAC_SRC_MAILBOX, command);
2234 AAC_MEM0_SETREG4(sc, AAC_SRC_MAILBOX + 4, arg0);
2235 AAC_MEM0_SETREG4(sc, AAC_SRC_MAILBOX + 8, arg1);
2236 AAC_MEM0_SETREG4(sc, AAC_SRC_MAILBOX + 12, arg2);
2237 AAC_MEM0_SETREG4(sc, AAC_SRC_MAILBOX + 16, arg3);
2241 aac_srcv_set_mailbox(struct aac_softc *sc, u_int32_t command, u_int32_t arg0,
2242 u_int32_t arg1, u_int32_t arg2, u_int32_t arg3)
2244 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2246 AAC_MEM0_SETREG4(sc, AAC_SRCV_MAILBOX, command);
2247 AAC_MEM0_SETREG4(sc, AAC_SRCV_MAILBOX + 4, arg0);
2248 AAC_MEM0_SETREG4(sc, AAC_SRCV_MAILBOX + 8, arg1);
2249 AAC_MEM0_SETREG4(sc, AAC_SRCV_MAILBOX + 12, arg2);
2250 AAC_MEM0_SETREG4(sc, AAC_SRCV_MAILBOX + 16, arg3);
2254 * Fetch the immediate command status word
2257 aac_src_get_mailbox(struct aac_softc *sc, int mb)
2259 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2261 return(AAC_MEM0_GETREG4(sc, AAC_SRC_MAILBOX + (mb * 4)));
2265 aac_srcv_get_mailbox(struct aac_softc *sc, int mb)
2267 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2269 return(AAC_MEM0_GETREG4(sc, AAC_SRCV_MAILBOX + (mb * 4)));
2273 * Set/clear interrupt masks
2276 aac_src_access_devreg(struct aac_softc *sc, int mode)
2280 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2283 case AAC_ENABLE_INTERRUPT:
2284 AAC_MEM0_SETREG4(sc, AAC_SRC_OIMR,
2285 (sc->msi_enabled ? AAC_INT_ENABLE_TYPE1_MSIX :
2286 AAC_INT_ENABLE_TYPE1_INTX));
2289 case AAC_DISABLE_INTERRUPT:
2290 AAC_MEM0_SETREG4(sc, AAC_SRC_OIMR, AAC_INT_DISABLE_ALL);
2293 case AAC_ENABLE_MSIX:
2295 val = AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2297 AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, val);
2298 AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2300 val = PMC_ALL_INTERRUPT_BITS;
2301 AAC_MEM0_SETREG4(sc, AAC_SRC_IOAR, val);
2302 val = AAC_MEM0_GETREG4(sc, AAC_SRC_OIMR);
2303 AAC_MEM0_SETREG4(sc, AAC_SRC_OIMR,
2304 val & (~(PMC_GLOBAL_INT_BIT2 | PMC_GLOBAL_INT_BIT0)));
2307 case AAC_DISABLE_MSIX:
2309 val = AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2311 AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, val);
2312 AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2315 case AAC_CLEAR_AIF_BIT:
2317 val = AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2319 AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, val);
2320 AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2323 case AAC_CLEAR_SYNC_BIT:
2325 val = AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2327 AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, val);
2328 AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2331 case AAC_ENABLE_INTX:
2333 val = AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2335 AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, val);
2336 AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2338 val = PMC_ALL_INTERRUPT_BITS;
2339 AAC_MEM0_SETREG4(sc, AAC_SRC_IOAR, val);
2340 val = AAC_MEM0_GETREG4(sc, AAC_SRC_OIMR);
2341 AAC_MEM0_SETREG4(sc, AAC_SRC_OIMR,
2342 val & (~(PMC_GLOBAL_INT_BIT2)));
2351 * New comm. interface: Send command functions
2354 aac_src_send_command(struct aac_softc *sc, struct aac_command *cm)
2356 struct aac_fib_xporthdr *pFibX;
2357 u_int32_t fibsize, high_addr;
2360 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "send command (new comm. type1)");
2362 if (sc->msi_enabled && cm->cm_fib->Header.Command != AifRequest &&
2363 sc->aac_max_msix > 1) {
2364 u_int16_t vector_no, first_choice = 0xffff;
2366 vector_no = sc->aac_fibs_pushed_no % sc->aac_max_msix;
2369 if (vector_no == sc->aac_max_msix)
2371 if (sc->aac_rrq_outstanding[vector_no] <
2374 if (0xffff == first_choice)
2375 first_choice = vector_no;
2376 else if (vector_no == first_choice)
2379 if (vector_no == first_choice)
2381 sc->aac_rrq_outstanding[vector_no]++;
2382 if (sc->aac_fibs_pushed_no == 0xffffffff)
2383 sc->aac_fibs_pushed_no = 0;
2385 sc->aac_fibs_pushed_no++;
2387 cm->cm_fib->Header.Handle += (vector_no << 16);
2390 if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE2) {
2391 /* Calculate the amount to the fibsize bits */
2392 fibsize = (cm->cm_fib->Header.Size + 127) / 128 - 1;
2393 /* Fill new FIB header */
2394 address = cm->cm_fibphys;
2395 high_addr = (u_int32_t)(address >> 32);
2396 if (high_addr == 0L) {
2397 cm->cm_fib->Header.StructType = AAC_FIBTYPE_TFIB2;
2398 cm->cm_fib->Header.u.TimeStamp = 0L;
2400 cm->cm_fib->Header.StructType = AAC_FIBTYPE_TFIB2_64;
2401 cm->cm_fib->Header.u.SenderFibAddressHigh = high_addr;
2403 cm->cm_fib->Header.SenderFibAddress = (u_int32_t)address;
2405 /* Calculate the amount to the fibsize bits */
2406 fibsize = (sizeof(struct aac_fib_xporthdr) +
2407 cm->cm_fib->Header.Size + 127) / 128 - 1;
2408 /* Fill XPORT header */
2409 pFibX = (struct aac_fib_xporthdr *)
2410 ((unsigned char *)cm->cm_fib - sizeof(struct aac_fib_xporthdr));
2411 pFibX->Handle = cm->cm_fib->Header.Handle;
2412 pFibX->HostAddress = cm->cm_fibphys;
2413 pFibX->Size = cm->cm_fib->Header.Size;
2414 address = cm->cm_fibphys - sizeof(struct aac_fib_xporthdr);
2415 high_addr = (u_int32_t)(address >> 32);
2420 aac_enqueue_busy(cm);
2422 AAC_MEM0_SETREG4(sc, AAC_SRC_IQUE64_H, high_addr);
2423 AAC_MEM0_SETREG4(sc, AAC_SRC_IQUE64_L, (u_int32_t)address + fibsize);
2425 AAC_MEM0_SETREG4(sc, AAC_SRC_IQUE32, (u_int32_t)address + fibsize);
2431 * New comm. interface: get, set outbound queue index
2434 aac_src_get_outb_queue(struct aac_softc *sc)
2436 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2442 aac_src_set_outb_queue(struct aac_softc *sc, int index)
2444 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2448 * Debugging and Diagnostics
2452 * Print some information about the controller.
2455 aac_describe_controller(struct aac_softc *sc)
2457 struct aac_fib *fib;
2458 struct aac_adapter_info *info;
2459 char *adapter_type = "Adaptec RAID controller";
2461 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2463 mtx_lock(&sc->aac_io_lock);
2464 aac_alloc_sync_fib(sc, &fib);
2466 if (sc->supported_options & AAC_SUPPORTED_SUPPLEMENT_ADAPTER_INFO) {
2468 if (aac_sync_fib(sc, RequestSupplementAdapterInfo, 0, fib, 1))
2469 device_printf(sc->aac_dev, "RequestSupplementAdapterInfo failed\n");
2471 struct aac_supplement_adapter_info *supp_info;
2473 supp_info = ((struct aac_supplement_adapter_info *)&fib->data[0]);
2474 adapter_type = (char *)supp_info->AdapterTypeText;
2475 sc->aac_feature_bits = supp_info->FeatureBits;
2476 sc->aac_support_opt2 = supp_info->SupportedOptions2;
2479 device_printf(sc->aac_dev, "%s, aacraid driver %d.%d.%d-%d\n",
2481 AAC_DRIVER_MAJOR_VERSION, AAC_DRIVER_MINOR_VERSION,
2482 AAC_DRIVER_BUGFIX_LEVEL, AAC_DRIVER_BUILD);
2485 if (aac_sync_fib(sc, RequestAdapterInfo, 0, fib, 1)) {
2486 device_printf(sc->aac_dev, "RequestAdapterInfo failed\n");
2487 aac_release_sync_fib(sc);
2488 mtx_unlock(&sc->aac_io_lock);
2492 /* save the kernel revision structure for later use */
2493 info = (struct aac_adapter_info *)&fib->data[0];
2494 sc->aac_revision = info->KernelRevision;
2497 device_printf(sc->aac_dev, "%s %dMHz, %dMB memory "
2498 "(%dMB cache, %dMB execution), %s\n",
2499 aac_describe_code(aac_cpu_variant, info->CpuVariant),
2500 info->ClockSpeed, info->TotalMem / (1024 * 1024),
2501 info->BufferMem / (1024 * 1024),
2502 info->ExecutionMem / (1024 * 1024),
2503 aac_describe_code(aac_battery_platform,
2504 info->batteryPlatform));
2506 device_printf(sc->aac_dev,
2507 "Kernel %d.%d-%d, Build %d, S/N %6X\n",
2508 info->KernelRevision.external.comp.major,
2509 info->KernelRevision.external.comp.minor,
2510 info->KernelRevision.external.comp.dash,
2511 info->KernelRevision.buildNumber,
2512 (u_int32_t)(info->SerialNumber & 0xffffff));
2514 device_printf(sc->aac_dev, "Supported Options=%b\n",
2515 sc->supported_options,
2538 aac_release_sync_fib(sc);
2539 mtx_unlock(&sc->aac_io_lock);
2543 * Look up a text description of a numeric error code and return a pointer to
2547 aac_describe_code(struct aac_code_lookup *table, u_int32_t code)
2551 for (i = 0; table[i].string != NULL; i++)
2552 if (table[i].code == code)
2553 return(table[i].string);
2554 return(table[i + 1].string);
2558 * Management Interface
2562 aac_open(struct cdev *dev, int flags, int fmt, struct thread *td)
2564 struct aac_softc *sc;
2567 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2568 #if __FreeBSD_version >= 702000
2569 device_busy(sc->aac_dev);
2570 devfs_set_cdevpriv(sc, aac_cdevpriv_dtor);
2576 aac_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td)
2578 union aac_statrequest *as;
2579 struct aac_softc *sc;
2582 as = (union aac_statrequest *)arg;
2584 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2588 switch (as->as_item) {
2592 bcopy(&sc->aac_qstat[as->as_item], &as->as_qstat,
2593 sizeof(struct aac_qstat));
2601 case FSACTL_SENDFIB:
2602 case FSACTL_SEND_LARGE_FIB:
2603 arg = *(caddr_t*)arg;
2604 case FSACTL_LNX_SENDFIB:
2605 case FSACTL_LNX_SEND_LARGE_FIB:
2606 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_SENDFIB");
2607 error = aac_ioctl_sendfib(sc, arg);
2609 case FSACTL_SEND_RAW_SRB:
2610 arg = *(caddr_t*)arg;
2611 case FSACTL_LNX_SEND_RAW_SRB:
2612 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_SEND_RAW_SRB");
2613 error = aac_ioctl_send_raw_srb(sc, arg);
2615 case FSACTL_AIF_THREAD:
2616 case FSACTL_LNX_AIF_THREAD:
2617 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_AIF_THREAD");
2620 case FSACTL_OPEN_GET_ADAPTER_FIB:
2621 arg = *(caddr_t*)arg;
2622 case FSACTL_LNX_OPEN_GET_ADAPTER_FIB:
2623 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_OPEN_GET_ADAPTER_FIB");
2624 error = aac_open_aif(sc, arg);
2626 case FSACTL_GET_NEXT_ADAPTER_FIB:
2627 arg = *(caddr_t*)arg;
2628 case FSACTL_LNX_GET_NEXT_ADAPTER_FIB:
2629 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_GET_NEXT_ADAPTER_FIB");
2630 error = aac_getnext_aif(sc, arg);
2632 case FSACTL_CLOSE_GET_ADAPTER_FIB:
2633 arg = *(caddr_t*)arg;
2634 case FSACTL_LNX_CLOSE_GET_ADAPTER_FIB:
2635 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_CLOSE_GET_ADAPTER_FIB");
2636 error = aac_close_aif(sc, arg);
2638 case FSACTL_MINIPORT_REV_CHECK:
2639 arg = *(caddr_t*)arg;
2640 case FSACTL_LNX_MINIPORT_REV_CHECK:
2641 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_MINIPORT_REV_CHECK");
2642 error = aac_rev_check(sc, arg);
2644 case FSACTL_QUERY_DISK:
2645 arg = *(caddr_t*)arg;
2646 case FSACTL_LNX_QUERY_DISK:
2647 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_QUERY_DISK");
2648 error = aac_query_disk(sc, arg);
2650 case FSACTL_DELETE_DISK:
2651 case FSACTL_LNX_DELETE_DISK:
2653 * We don't trust the underland to tell us when to delete a
2654 * container, rather we rely on an AIF coming from the
2659 case FSACTL_GET_PCI_INFO:
2660 arg = *(caddr_t*)arg;
2661 case FSACTL_LNX_GET_PCI_INFO:
2662 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_GET_PCI_INFO");
2663 error = aac_get_pci_info(sc, arg);
2665 case FSACTL_GET_FEATURES:
2666 arg = *(caddr_t*)arg;
2667 case FSACTL_LNX_GET_FEATURES:
2668 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_GET_FEATURES");
2669 error = aac_supported_features(sc, arg);
2672 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "unsupported cmd 0x%lx\n", cmd);
2680 aac_poll(struct cdev *dev, int poll_events, struct thread *td)
2682 struct aac_softc *sc;
2683 struct aac_fib_context *ctx;
2689 mtx_lock(&sc->aac_io_lock);
2690 if ((poll_events & (POLLRDNORM | POLLIN)) != 0) {
2691 for (ctx = sc->fibctx; ctx; ctx = ctx->next) {
2692 if (ctx->ctx_idx != sc->aifq_idx || ctx->ctx_wrap) {
2693 revents |= poll_events & (POLLIN | POLLRDNORM);
2698 mtx_unlock(&sc->aac_io_lock);
2701 if (poll_events & (POLLIN | POLLRDNORM))
2702 selrecord(td, &sc->rcv_select);
2709 aac_ioctl_event(struct aac_softc *sc, struct aac_event *event, void *arg)
2712 switch (event->ev_type) {
2713 case AAC_EVENT_CMFREE:
2714 mtx_assert(&sc->aac_io_lock, MA_OWNED);
2715 if (aacraid_alloc_command(sc, (struct aac_command **)arg)) {
2716 aacraid_add_event(sc, event);
2719 free(event, M_AACRAIDBUF);
2728 * Send a FIB supplied from userspace
2731 aac_ioctl_sendfib(struct aac_softc *sc, caddr_t ufib)
2733 struct aac_command *cm;
2736 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2743 mtx_lock(&sc->aac_io_lock);
2744 if (aacraid_alloc_command(sc, &cm)) {
2745 struct aac_event *event;
2747 event = malloc(sizeof(struct aac_event), M_AACRAIDBUF,
2749 if (event == NULL) {
2751 mtx_unlock(&sc->aac_io_lock);
2754 event->ev_type = AAC_EVENT_CMFREE;
2755 event->ev_callback = aac_ioctl_event;
2756 event->ev_arg = &cm;
2757 aacraid_add_event(sc, event);
2758 msleep(cm, &sc->aac_io_lock, 0, "aacraid_ctlsfib", 0);
2760 mtx_unlock(&sc->aac_io_lock);
2763 * Fetch the FIB header, then re-copy to get data as well.
2765 if ((error = copyin(ufib, cm->cm_fib,
2766 sizeof(struct aac_fib_header))) != 0)
2768 size = cm->cm_fib->Header.Size + sizeof(struct aac_fib_header);
2769 if (size > sc->aac_max_fib_size) {
2770 device_printf(sc->aac_dev, "incoming FIB oversized (%d > %d)\n",
2771 size, sc->aac_max_fib_size);
2772 size = sc->aac_max_fib_size;
2774 if ((error = copyin(ufib, cm->cm_fib, size)) != 0)
2776 cm->cm_fib->Header.Size = size;
2777 cm->cm_timestamp = time_uptime;
2781 * Pass the FIB to the controller, wait for it to complete.
2783 mtx_lock(&sc->aac_io_lock);
2784 error = aacraid_wait_command(cm);
2785 mtx_unlock(&sc->aac_io_lock);
2787 device_printf(sc->aac_dev,
2788 "aacraid_wait_command return %d\n", error);
2793 * Copy the FIB and data back out to the caller.
2795 size = cm->cm_fib->Header.Size;
2796 if (size > sc->aac_max_fib_size) {
2797 device_printf(sc->aac_dev, "outbound FIB oversized (%d > %d)\n",
2798 size, sc->aac_max_fib_size);
2799 size = sc->aac_max_fib_size;
2801 error = copyout(cm->cm_fib, ufib, size);
2805 mtx_lock(&sc->aac_io_lock);
2806 aacraid_release_command(cm);
2807 mtx_unlock(&sc->aac_io_lock);
2813 * Send a passthrough FIB supplied from userspace
2816 aac_ioctl_send_raw_srb(struct aac_softc *sc, caddr_t arg)
2818 struct aac_command *cm;
2819 struct aac_fib *fib;
2820 struct aac_srb *srbcmd;
2821 struct aac_srb *user_srb = (struct aac_srb *)arg;
2823 int error, transfer_data = 0;
2824 bus_dmamap_t orig_map = 0;
2825 u_int32_t fibsize = 0;
2826 u_int64_t srb_sg_address;
2827 u_int32_t srb_sg_bytecount;
2829 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2833 mtx_lock(&sc->aac_io_lock);
2834 if (aacraid_alloc_command(sc, &cm)) {
2835 struct aac_event *event;
2837 event = malloc(sizeof(struct aac_event), M_AACRAIDBUF,
2839 if (event == NULL) {
2841 mtx_unlock(&sc->aac_io_lock);
2844 event->ev_type = AAC_EVENT_CMFREE;
2845 event->ev_callback = aac_ioctl_event;
2846 event->ev_arg = &cm;
2847 aacraid_add_event(sc, event);
2848 msleep(cm, &sc->aac_io_lock, 0, "aacraid_ctlsraw", 0);
2850 mtx_unlock(&sc->aac_io_lock);
2853 /* save original dma map */
2854 orig_map = cm->cm_datamap;
2857 srbcmd = (struct aac_srb *)fib->data;
2858 if ((error = copyin((void *)&user_srb->data_len, &fibsize,
2859 sizeof (u_int32_t)) != 0))
2861 if (fibsize > (sc->aac_max_fib_size-sizeof(struct aac_fib_header))) {
2865 if ((error = copyin((void *)user_srb, srbcmd, fibsize) != 0))
2868 srbcmd->function = 0; /* SRBF_ExecuteScsi */
2869 srbcmd->retry_limit = 0; /* obsolete */
2871 /* only one sg element from userspace supported */
2872 if (srbcmd->sg_map.SgCount > 1) {
2877 if (fibsize == (sizeof(struct aac_srb) +
2878 srbcmd->sg_map.SgCount * sizeof(struct aac_sg_entry))) {
2879 struct aac_sg_entry *sgp = srbcmd->sg_map.SgEntry;
2880 struct aac_sg_entry sg;
2882 if ((error = copyin(sgp, &sg, sizeof(sg))) != 0)
2885 srb_sg_bytecount = sg.SgByteCount;
2886 srb_sg_address = (u_int64_t)sg.SgAddress;
2887 } else if (fibsize == (sizeof(struct aac_srb) +
2888 srbcmd->sg_map.SgCount * sizeof(struct aac_sg_entry64))) {
2890 struct aac_sg_entry64 *sgp =
2891 (struct aac_sg_entry64 *)srbcmd->sg_map.SgEntry;
2892 struct aac_sg_entry64 sg;
2894 if ((error = copyin(sgp, &sg, sizeof(sg))) != 0)
2897 srb_sg_bytecount = sg.SgByteCount;
2898 srb_sg_address = sg.SgAddress;
2899 if (srb_sg_address > 0xffffffffull &&
2900 !(sc->flags & AAC_FLAGS_SG_64BIT))
2910 user_reply = (char *)arg + fibsize;
2911 srbcmd->data_len = srb_sg_bytecount;
2912 if (srbcmd->sg_map.SgCount == 1)
2915 if (transfer_data) {
2917 * Create DMA tag for the passthr. data buffer and allocate it.
2919 if (bus_dma_tag_create(sc->aac_parent_dmat, /* parent */
2920 1, 0, /* algnmnt, boundary */
2921 (sc->flags & AAC_FLAGS_SG_64BIT) ?
2922 BUS_SPACE_MAXADDR_32BIT :
2923 0x7fffffff, /* lowaddr */
2924 BUS_SPACE_MAXADDR, /* highaddr */
2925 NULL, NULL, /* filter, filterarg */
2926 srb_sg_bytecount, /* size */
2927 sc->aac_sg_tablesize, /* nsegments */
2928 srb_sg_bytecount, /* maxsegsize */
2930 NULL, NULL, /* No locking needed */
2931 &cm->cm_passthr_dmat)) {
2935 if (bus_dmamem_alloc(cm->cm_passthr_dmat, (void **)&cm->cm_data,
2936 BUS_DMA_NOWAIT, &cm->cm_datamap)) {
2940 /* fill some cm variables */
2941 cm->cm_datalen = srb_sg_bytecount;
2942 if (srbcmd->flags & AAC_SRB_FLAGS_DATA_IN)
2943 cm->cm_flags |= AAC_CMD_DATAIN;
2944 if (srbcmd->flags & AAC_SRB_FLAGS_DATA_OUT)
2945 cm->cm_flags |= AAC_CMD_DATAOUT;
2947 if (srbcmd->flags & AAC_SRB_FLAGS_DATA_OUT) {
2948 if ((error = copyin((void *)(uintptr_t)srb_sg_address,
2949 cm->cm_data, cm->cm_datalen)) != 0)
2951 /* sync required for bus_dmamem_alloc() alloc. mem.? */
2952 bus_dmamap_sync(cm->cm_passthr_dmat, cm->cm_datamap,
2953 BUS_DMASYNC_PREWRITE);
2958 fib->Header.Size = sizeof(struct aac_fib_header) +
2959 sizeof(struct aac_srb);
2960 fib->Header.XferState =
2961 AAC_FIBSTATE_HOSTOWNED |
2962 AAC_FIBSTATE_INITIALISED |
2963 AAC_FIBSTATE_EMPTY |
2964 AAC_FIBSTATE_FROMHOST |
2965 AAC_FIBSTATE_REXPECTED |
2969 fib->Header.Command = (sc->flags & AAC_FLAGS_SG_64BIT) ?
2970 ScsiPortCommandU64 : ScsiPortCommand;
2971 cm->cm_sgtable = (struct aac_sg_table *)&srbcmd->sg_map;
2974 if (transfer_data) {
2975 bus_dmamap_load(cm->cm_passthr_dmat,
2976 cm->cm_datamap, cm->cm_data,
2978 aacraid_map_command_sg, cm, 0);
2980 aacraid_map_command_sg(cm, NULL, 0, 0);
2983 /* wait for completion */
2984 mtx_lock(&sc->aac_io_lock);
2985 while (!(cm->cm_flags & AAC_CMD_COMPLETED))
2986 msleep(cm, &sc->aac_io_lock, 0, "aacraid_ctlsrw2", 0);
2987 mtx_unlock(&sc->aac_io_lock);
2990 if (transfer_data && (srbcmd->flags & AAC_SRB_FLAGS_DATA_IN)) {
2991 if ((error = copyout(cm->cm_data,
2992 (void *)(uintptr_t)srb_sg_address,
2993 cm->cm_datalen)) != 0)
2995 /* sync required for bus_dmamem_alloc() allocated mem.? */
2996 bus_dmamap_sync(cm->cm_passthr_dmat, cm->cm_datamap,
2997 BUS_DMASYNC_POSTREAD);
3001 error = copyout(fib->data, user_reply, sizeof(struct aac_srb_response));
3004 if (cm && cm->cm_data) {
3006 bus_dmamap_unload(cm->cm_passthr_dmat, cm->cm_datamap);
3007 bus_dmamem_free(cm->cm_passthr_dmat, cm->cm_data, cm->cm_datamap);
3008 cm->cm_datamap = orig_map;
3010 if (cm && cm->cm_passthr_dmat)
3011 bus_dma_tag_destroy(cm->cm_passthr_dmat);
3013 mtx_lock(&sc->aac_io_lock);
3014 aacraid_release_command(cm);
3015 mtx_unlock(&sc->aac_io_lock);
3021 * Request an AIF from the controller (new comm. type1)
3024 aac_request_aif(struct aac_softc *sc)
3026 struct aac_command *cm;
3027 struct aac_fib *fib;
3029 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3031 if (aacraid_alloc_command(sc, &cm)) {
3032 sc->aif_pending = 1;
3035 sc->aif_pending = 0;
3039 fib->Header.Size = sizeof(struct aac_fib);
3040 fib->Header.XferState =
3041 AAC_FIBSTATE_HOSTOWNED |
3042 AAC_FIBSTATE_INITIALISED |
3043 AAC_FIBSTATE_EMPTY |
3044 AAC_FIBSTATE_FROMHOST |
3045 AAC_FIBSTATE_REXPECTED |
3048 /* set AIF marker */
3049 fib->Header.Handle = 0x00800000;
3050 fib->Header.Command = AifRequest;
3051 ((struct aac_aif_command *)fib->data)->command = AifReqEvent;
3053 aacraid_map_command_sg(cm, NULL, 0, 0);
3057 #if __FreeBSD_version >= 702000
3059 * cdevpriv interface private destructor.
3062 aac_cdevpriv_dtor(void *arg)
3064 struct aac_softc *sc;
3067 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3069 device_unbusy(sc->aac_dev);
3074 aac_close(struct cdev *dev, int flags, int fmt, struct thread *td)
3076 struct aac_softc *sc;
3079 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3085 * Handle an AIF sent to us by the controller; queue it for later reference.
3086 * If the queue fills up, then drop the older entries.
3089 aac_handle_aif(struct aac_softc *sc, struct aac_fib *fib)
3091 struct aac_aif_command *aif;
3092 struct aac_container *co, *co_next;
3093 struct aac_fib_context *ctx;
3094 struct aac_fib *sync_fib;
3095 struct aac_mntinforesp mir;
3096 int next, current, found;
3097 int count = 0, changed = 0, i = 0;
3098 u_int32_t channel, uid;
3100 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3102 aif = (struct aac_aif_command*)&fib->data[0];
3103 aacraid_print_aif(sc, aif);
3105 /* Is it an event that we should care about? */
3106 switch (aif->command) {
3107 case AifCmdEventNotify:
3108 switch (aif->data.EN.type) {
3109 case AifEnAddContainer:
3110 case AifEnDeleteContainer:
3112 * A container was added or deleted, but the message
3113 * doesn't tell us anything else! Re-enumerate the
3114 * containers and sort things out.
3116 aac_alloc_sync_fib(sc, &sync_fib);
3119 * Ask the controller for its containers one at
3121 * XXX What if the controller's list changes
3122 * midway through this enumaration?
3123 * XXX This should be done async.
3125 if (aac_get_container_info(sc, sync_fib, i,
3129 count = mir.MntRespCount;
3131 * Check the container against our list.
3132 * co->co_found was already set to 0 in a
3135 if ((mir.Status == ST_OK) &&
3136 (mir.MntTable[0].VolType != CT_NONE)) {
3139 &sc->aac_container_tqh,
3141 if (co->co_mntobj.ObjectId ==
3142 mir.MntTable[0].ObjectId) {
3149 * If the container matched, continue
3158 * This is a new container. Do all the
3159 * appropriate things to set it up.
3161 aac_add_container(sc, &mir, 1, uid);
3165 } while ((i < count) && (i < AAC_MAX_CONTAINERS));
3166 aac_release_sync_fib(sc);
3169 * Go through our list of containers and see which ones
3170 * were not marked 'found'. Since the controller didn't
3171 * list them they must have been deleted. Do the
3172 * appropriate steps to destroy the device. Also reset
3173 * the co->co_found field.
3175 co = TAILQ_FIRST(&sc->aac_container_tqh);
3176 while (co != NULL) {
3177 if (co->co_found == 0) {
3178 co_next = TAILQ_NEXT(co, co_link);
3179 TAILQ_REMOVE(&sc->aac_container_tqh, co,
3181 free(co, M_AACRAIDBUF);
3186 co = TAILQ_NEXT(co, co_link);
3190 /* Attach the newly created containers */
3192 if (sc->cam_rescan_cb != NULL)
3193 sc->cam_rescan_cb(sc, 0,
3194 AAC_CAM_TARGET_WILDCARD);
3199 case AifEnEnclosureManagement:
3200 switch (aif->data.EN.data.EEE.eventType) {
3201 case AIF_EM_DRIVE_INSERTION:
3202 case AIF_EM_DRIVE_REMOVAL:
3203 channel = aif->data.EN.data.EEE.unitID;
3204 if (sc->cam_rescan_cb != NULL)
3205 sc->cam_rescan_cb(sc,
3206 ((channel>>24) & 0xF) + 1,
3207 (channel & 0xFFFF));
3213 case AifEnDeleteJBOD:
3214 case AifRawDeviceRemove:
3215 channel = aif->data.EN.data.ECE.container;
3216 if (sc->cam_rescan_cb != NULL)
3217 sc->cam_rescan_cb(sc, ((channel>>24) & 0xF) + 1,
3218 AAC_CAM_TARGET_WILDCARD);
3229 /* Copy the AIF data to the AIF queue for ioctl retrieval */
3230 current = sc->aifq_idx;
3231 next = (current + 1) % AAC_AIFQ_LENGTH;
3233 sc->aifq_filled = 1;
3234 bcopy(fib, &sc->aac_aifq[current], sizeof(struct aac_fib));
3235 /* modify AIF contexts */
3236 if (sc->aifq_filled) {
3237 for (ctx = sc->fibctx; ctx; ctx = ctx->next) {
3238 if (next == ctx->ctx_idx)
3240 else if (current == ctx->ctx_idx && ctx->ctx_wrap)
3241 ctx->ctx_idx = next;
3244 sc->aifq_idx = next;
3245 /* On the off chance that someone is sleeping for an aif... */
3246 if (sc->aac_state & AAC_STATE_AIF_SLEEPER)
3247 wakeup(sc->aac_aifq);
3248 /* Wakeup any poll()ers */
3249 selwakeuppri(&sc->rcv_select, PRIBIO);
3255 * Return the Revision of the driver to userspace and check to see if the
3256 * userspace app is possibly compatible. This is extremely bogus since
3257 * our driver doesn't follow Adaptec's versioning system. Cheat by just
3258 * returning what the card reported.
3261 aac_rev_check(struct aac_softc *sc, caddr_t udata)
3263 struct aac_rev_check rev_check;
3264 struct aac_rev_check_resp rev_check_resp;
3267 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3270 * Copyin the revision struct from userspace
3272 if ((error = copyin(udata, (caddr_t)&rev_check,
3273 sizeof(struct aac_rev_check))) != 0) {
3277 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "Userland revision= %d\n",
3278 rev_check.callingRevision.buildNumber);
3281 * Doctor up the response struct.
3283 rev_check_resp.possiblyCompatible = 1;
3284 rev_check_resp.adapterSWRevision.external.comp.major =
3285 AAC_DRIVER_MAJOR_VERSION;
3286 rev_check_resp.adapterSWRevision.external.comp.minor =
3287 AAC_DRIVER_MINOR_VERSION;
3288 rev_check_resp.adapterSWRevision.external.comp.type =
3290 rev_check_resp.adapterSWRevision.external.comp.dash =
3291 AAC_DRIVER_BUGFIX_LEVEL;
3292 rev_check_resp.adapterSWRevision.buildNumber =
3295 return(copyout((caddr_t)&rev_check_resp, udata,
3296 sizeof(struct aac_rev_check_resp)));
3300 * Pass the fib context to the caller
3303 aac_open_aif(struct aac_softc *sc, caddr_t arg)
3305 struct aac_fib_context *fibctx, *ctx;
3308 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3310 fibctx = malloc(sizeof(struct aac_fib_context), M_AACRAIDBUF, M_NOWAIT|M_ZERO);
3314 mtx_lock(&sc->aac_io_lock);
3315 /* all elements are already 0, add to queue */
3316 if (sc->fibctx == NULL)
3317 sc->fibctx = fibctx;
3319 for (ctx = sc->fibctx; ctx->next; ctx = ctx->next)
3325 /* evaluate unique value */
3326 fibctx->unique = (*(u_int32_t *)&fibctx & 0xffffffff);
3328 while (ctx != fibctx) {
3329 if (ctx->unique == fibctx->unique) {
3337 error = copyout(&fibctx->unique, (void *)arg, sizeof(u_int32_t));
3338 mtx_unlock(&sc->aac_io_lock);
3340 aac_close_aif(sc, (caddr_t)ctx);
3345 * Close the caller's fib context
3348 aac_close_aif(struct aac_softc *sc, caddr_t arg)
3350 struct aac_fib_context *ctx;
3352 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3354 mtx_lock(&sc->aac_io_lock);
3355 for (ctx = sc->fibctx; ctx; ctx = ctx->next) {
3356 if (ctx->unique == *(uint32_t *)&arg) {
3357 if (ctx == sc->fibctx)
3360 ctx->prev->next = ctx->next;
3362 ctx->next->prev = ctx->prev;
3368 free(ctx, M_AACRAIDBUF);
3370 mtx_unlock(&sc->aac_io_lock);
3375 * Pass the caller the next AIF in their queue
3378 aac_getnext_aif(struct aac_softc *sc, caddr_t arg)
3380 struct get_adapter_fib_ioctl agf;
3381 struct aac_fib_context *ctx;
3384 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3386 mtx_lock(&sc->aac_io_lock);
3387 #ifdef COMPAT_FREEBSD32
3388 if (SV_CURPROC_FLAG(SV_ILP32)) {
3389 struct get_adapter_fib_ioctl32 agf32;
3390 error = copyin(arg, &agf32, sizeof(agf32));
3392 agf.AdapterFibContext = agf32.AdapterFibContext;
3393 agf.Wait = agf32.Wait;
3394 agf.AifFib = (caddr_t)(uintptr_t)agf32.AifFib;
3398 error = copyin(arg, &agf, sizeof(agf));
3400 for (ctx = sc->fibctx; ctx; ctx = ctx->next) {
3401 if (agf.AdapterFibContext == ctx->unique)
3405 mtx_unlock(&sc->aac_io_lock);
3409 error = aac_return_aif(sc, ctx, agf.AifFib);
3410 if (error == EAGAIN && agf.Wait) {
3411 fwprintf(sc, HBA_FLAGS_DBG_AIF_B, "aac_getnext_aif(): waiting for AIF");
3412 sc->aac_state |= AAC_STATE_AIF_SLEEPER;
3413 while (error == EAGAIN) {
3414 mtx_unlock(&sc->aac_io_lock);
3415 error = tsleep(sc->aac_aifq, PRIBIO |
3416 PCATCH, "aacaif", 0);
3417 mtx_lock(&sc->aac_io_lock);
3419 error = aac_return_aif(sc, ctx, agf.AifFib);
3421 sc->aac_state &= ~AAC_STATE_AIF_SLEEPER;
3424 mtx_unlock(&sc->aac_io_lock);
3429 * Hand the next AIF off the top of the queue out to userspace.
3432 aac_return_aif(struct aac_softc *sc, struct aac_fib_context *ctx, caddr_t uptr)
3436 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3438 current = ctx->ctx_idx;
3439 if (current == sc->aifq_idx && !ctx->ctx_wrap) {
3444 copyout(&sc->aac_aifq[current], (void *)uptr, sizeof(struct aac_fib));
3446 device_printf(sc->aac_dev,
3447 "aac_return_aif: copyout returned %d\n", error);
3450 ctx->ctx_idx = (current + 1) % AAC_AIFQ_LENGTH;
3456 aac_get_pci_info(struct aac_softc *sc, caddr_t uptr)
3458 struct aac_pci_info {
3464 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3466 pciinf.bus = pci_get_bus(sc->aac_dev);
3467 pciinf.slot = pci_get_slot(sc->aac_dev);
3469 error = copyout((caddr_t)&pciinf, uptr,
3470 sizeof(struct aac_pci_info));
3476 aac_supported_features(struct aac_softc *sc, caddr_t uptr)
3478 struct aac_features f;
3481 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3483 if ((error = copyin(uptr, &f, sizeof (f))) != 0)
3487 * When the management driver receives FSACTL_GET_FEATURES ioctl with
3488 * ALL zero in the featuresState, the driver will return the current
3489 * state of all the supported features, the data field will not be
3491 * When the management driver receives FSACTL_GET_FEATURES ioctl with
3492 * a specific bit set in the featuresState, the driver will return the
3493 * current state of this specific feature and whatever data that are
3494 * associated with the feature in the data field or perform whatever
3495 * action needed indicates in the data field.
3497 if (f.feat.fValue == 0) {
3498 f.feat.fBits.largeLBA =
3499 (sc->flags & AAC_FLAGS_LBA_64BIT) ? 1 : 0;
3500 f.feat.fBits.JBODSupport = 1;
3501 /* TODO: In the future, add other features state here as well */
3503 if (f.feat.fBits.largeLBA)
3504 f.feat.fBits.largeLBA =
3505 (sc->flags & AAC_FLAGS_LBA_64BIT) ? 1 : 0;
3506 /* TODO: Add other features state and data in the future */
3509 error = copyout(&f, uptr, sizeof (f));
3514 * Give the userland some information about the container. The AAC arch
3515 * expects the driver to be a SCSI passthrough type driver, so it expects
3516 * the containers to have b:t:l numbers. Fake it.
3519 aac_query_disk(struct aac_softc *sc, caddr_t uptr)
3521 struct aac_query_disk query_disk;
3522 struct aac_container *co;
3525 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3527 mtx_lock(&sc->aac_io_lock);
3528 error = copyin(uptr, (caddr_t)&query_disk,
3529 sizeof(struct aac_query_disk));
3531 mtx_unlock(&sc->aac_io_lock);
3535 id = query_disk.ContainerNumber;
3537 mtx_unlock(&sc->aac_io_lock);
3541 TAILQ_FOREACH(co, &sc->aac_container_tqh, co_link) {
3542 if (co->co_mntobj.ObjectId == id)
3547 query_disk.Valid = 0;
3548 query_disk.Locked = 0;
3549 query_disk.Deleted = 1; /* XXX is this right? */
3551 query_disk.Valid = 1;
3552 query_disk.Locked = 1;
3553 query_disk.Deleted = 0;
3554 query_disk.Bus = device_get_unit(sc->aac_dev);
3555 query_disk.Target = 0;
3557 query_disk.UnMapped = 0;
3560 error = copyout((caddr_t)&query_disk, uptr,
3561 sizeof(struct aac_query_disk));
3563 mtx_unlock(&sc->aac_io_lock);
3568 aac_container_bus(struct aac_softc *sc)
3570 struct aac_sim *sim;
3573 sim =(struct aac_sim *)malloc(sizeof(struct aac_sim),
3574 M_AACRAIDBUF, M_NOWAIT | M_ZERO);
3576 device_printf(sc->aac_dev,
3577 "No memory to add container bus\n");
3578 panic("Out of memory?!");
3580 child = device_add_child(sc->aac_dev, "aacraidp", -1);
3581 if (child == NULL) {
3582 device_printf(sc->aac_dev,
3583 "device_add_child failed for container bus\n");
3584 free(sim, M_AACRAIDBUF);
3585 panic("Out of memory?!");
3588 sim->TargetsPerBus = AAC_MAX_CONTAINERS;
3590 sim->BusType = CONTAINER_BUS;
3591 sim->InitiatorBusId = -1;
3593 sim->sim_dev = child;
3594 sim->aac_cam = NULL;
3596 device_set_ivars(child, sim);
3597 device_set_desc(child, "Container Bus");
3598 TAILQ_INSERT_TAIL(&sc->aac_sim_tqh, sim, sim_link);
3600 device_set_desc(child, aac_describe_code(aac_container_types,
3601 mir->MntTable[0].VolType));
3603 bus_generic_attach(sc->aac_dev);
3607 aac_get_bus_info(struct aac_softc *sc)
3609 struct aac_fib *fib;
3610 struct aac_ctcfg *c_cmd;
3611 struct aac_ctcfg_resp *c_resp;
3612 struct aac_vmioctl *vmi;
3613 struct aac_vmi_businf_resp *vmi_resp;
3614 struct aac_getbusinf businfo;
3615 struct aac_sim *caminf;
3619 mtx_lock(&sc->aac_io_lock);
3620 aac_alloc_sync_fib(sc, &fib);
3621 c_cmd = (struct aac_ctcfg *)&fib->data[0];
3622 bzero(c_cmd, sizeof(struct aac_ctcfg));
3624 c_cmd->Command = VM_ContainerConfig;
3625 c_cmd->cmd = CT_GET_SCSI_METHOD;
3628 error = aac_sync_fib(sc, ContainerCommand, 0, fib,
3629 sizeof(struct aac_ctcfg));
3631 device_printf(sc->aac_dev, "Error %d sending "
3632 "VM_ContainerConfig command\n", error);
3633 aac_release_sync_fib(sc);
3634 mtx_unlock(&sc->aac_io_lock);
3638 c_resp = (struct aac_ctcfg_resp *)&fib->data[0];
3639 if (c_resp->Status != ST_OK) {
3640 device_printf(sc->aac_dev, "VM_ContainerConfig returned 0x%x\n",
3642 aac_release_sync_fib(sc);
3643 mtx_unlock(&sc->aac_io_lock);
3647 sc->scsi_method_id = c_resp->param;
3649 vmi = (struct aac_vmioctl *)&fib->data[0];
3650 bzero(vmi, sizeof(struct aac_vmioctl));
3652 vmi->Command = VM_Ioctl;
3653 vmi->ObjType = FT_DRIVE;
3654 vmi->MethId = sc->scsi_method_id;
3656 vmi->IoctlCmd = GetBusInfo;
3658 error = aac_sync_fib(sc, ContainerCommand, 0, fib,
3659 sizeof(struct aac_vmi_businf_resp));
3661 device_printf(sc->aac_dev, "Error %d sending VMIoctl command\n",
3663 aac_release_sync_fib(sc);
3664 mtx_unlock(&sc->aac_io_lock);
3668 vmi_resp = (struct aac_vmi_businf_resp *)&fib->data[0];
3669 if (vmi_resp->Status != ST_OK) {
3670 device_printf(sc->aac_dev, "VM_Ioctl returned %d\n",
3672 aac_release_sync_fib(sc);
3673 mtx_unlock(&sc->aac_io_lock);
3677 bcopy(&vmi_resp->BusInf, &businfo, sizeof(struct aac_getbusinf));
3678 aac_release_sync_fib(sc);
3679 mtx_unlock(&sc->aac_io_lock);
3681 for (i = 0; i < businfo.BusCount; i++) {
3682 if (businfo.BusValid[i] != AAC_BUS_VALID)
3685 caminf = (struct aac_sim *)malloc( sizeof(struct aac_sim),
3686 M_AACRAIDBUF, M_NOWAIT | M_ZERO);
3687 if (caminf == NULL) {
3688 device_printf(sc->aac_dev,
3689 "No memory to add passthrough bus %d\n", i);
3693 child = device_add_child(sc->aac_dev, "aacraidp", -1);
3694 if (child == NULL) {
3695 device_printf(sc->aac_dev,
3696 "device_add_child failed for passthrough bus %d\n",
3698 free(caminf, M_AACRAIDBUF);
3702 caminf->TargetsPerBus = businfo.TargetsPerBus;
3703 caminf->BusNumber = i+1;
3704 caminf->BusType = PASSTHROUGH_BUS;
3705 caminf->InitiatorBusId = businfo.InitiatorBusId[i];
3706 caminf->aac_sc = sc;
3707 caminf->sim_dev = child;
3708 caminf->aac_cam = NULL;
3710 device_set_ivars(child, caminf);
3711 device_set_desc(child, "SCSI Passthrough Bus");
3712 TAILQ_INSERT_TAIL(&sc->aac_sim_tqh, caminf, sim_link);
3717 * Check to see if the kernel is up and running. If we are in a
3718 * BlinkLED state, return the BlinkLED code.
3721 aac_check_adapter_health(struct aac_softc *sc, u_int8_t *bled)
3725 ret = AAC_GET_FWSTATUS(sc);
3727 if (ret & AAC_UP_AND_RUNNING)
3729 else if (ret & AAC_KERNEL_PANIC && bled)
3730 *bled = (ret >> 16) & 0xff;
3736 * Once do an IOP reset, basically have to re-initialize the card as
3737 * if coming up from a cold boot, and the driver is responsible for
3738 * any IO that was outstanding to the adapter at the time of the IOP
3739 * RESET. And prepare the driver for IOP RESET by making the init code
3740 * modular with the ability to call it from multiple places.
3743 aac_reset_adapter(struct aac_softc *sc)
3745 struct aac_command *cm;
3746 struct aac_fib *fib;
3747 struct aac_pause_command *pc;
3748 u_int32_t status, reset_mask, waitCount, max_msix_orig;
3749 int msi_enabled_orig;
3751 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3752 mtx_assert(&sc->aac_io_lock, MA_OWNED);
3754 if (sc->aac_state & AAC_STATE_RESET) {
3755 device_printf(sc->aac_dev, "aac_reset_adapter() already in progress\n");
3758 sc->aac_state |= AAC_STATE_RESET;
3760 /* disable interrupt */
3761 AAC_ACCESS_DEVREG(sc, AAC_DISABLE_INTERRUPT);
3764 * Abort all pending commands:
3765 * a) on the controller
3767 while ((cm = aac_dequeue_busy(sc)) != NULL) {
3768 cm->cm_flags |= AAC_CMD_RESET;
3770 /* is there a completion handler? */
3771 if (cm->cm_complete != NULL) {
3772 cm->cm_complete(cm);
3774 /* assume that someone is sleeping on this
3781 /* b) in the waiting queues */
3782 while ((cm = aac_dequeue_ready(sc)) != NULL) {
3783 cm->cm_flags |= AAC_CMD_RESET;
3785 /* is there a completion handler? */
3786 if (cm->cm_complete != NULL) {
3787 cm->cm_complete(cm);
3789 /* assume that someone is sleeping on this
3797 if (aac_check_adapter_health(sc, NULL) == 0) {
3798 mtx_unlock(&sc->aac_io_lock);
3799 (void) aacraid_shutdown(sc->aac_dev);
3800 mtx_lock(&sc->aac_io_lock);
3803 /* execute IOP reset */
3804 if (sc->aac_support_opt2 & AAC_SUPPORTED_MU_RESET) {
3805 AAC_MEM0_SETREG4(sc, AAC_IRCSR, AAC_IRCSR_CORES_RST);
3807 /* We need to wait for 5 seconds before accessing the MU again
3808 * 10000 * 100us = 1000,000us = 1000ms = 1s
3810 waitCount = 5 * 10000;
3812 DELAY(100); /* delay 100 microseconds */
3815 } else if ((aacraid_sync_command(sc,
3816 AAC_IOP_RESET_ALWAYS, 0, 0, 0, 0, &status, &reset_mask)) != 0) {
3817 /* call IOP_RESET for older firmware */
3818 if ((aacraid_sync_command(sc,
3819 AAC_IOP_RESET, 0, 0, 0, 0, &status, NULL)) != 0) {
3821 if (status == AAC_SRB_STS_INVALID_REQUEST)
3822 device_printf(sc->aac_dev, "IOP_RESET not supported\n");
3824 /* probably timeout */
3825 device_printf(sc->aac_dev, "IOP_RESET failed\n");
3827 /* unwind aac_shutdown() */
3828 aac_alloc_sync_fib(sc, &fib);
3829 pc = (struct aac_pause_command *)&fib->data[0];
3830 pc->Command = VM_ContainerConfig;
3831 pc->Type = CT_PAUSE_IO;
3836 (void) aac_sync_fib(sc, ContainerCommand, 0, fib,
3837 sizeof (struct aac_pause_command));
3838 aac_release_sync_fib(sc);
3842 } else if (sc->aac_support_opt2 & AAC_SUPPORTED_DOORBELL_RESET) {
3843 AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, reset_mask);
3845 * We need to wait for 5 seconds before accessing the doorbell
3846 * again, 10000 * 100us = 1000,000us = 1000ms = 1s
3848 waitCount = 5 * 10000;
3850 DELAY(100); /* delay 100 microseconds */
3856 * Initialize the adapter.
3858 max_msix_orig = sc->aac_max_msix;
3859 msi_enabled_orig = sc->msi_enabled;
3860 sc->msi_enabled = FALSE;
3861 if (aac_check_firmware(sc) != 0)
3863 if (!(sc->flags & AAC_FLAGS_SYNC_MODE)) {
3864 sc->aac_max_msix = max_msix_orig;
3865 if (msi_enabled_orig) {
3866 sc->msi_enabled = msi_enabled_orig;
3867 AAC_ACCESS_DEVREG(sc, AAC_ENABLE_MSIX);
3869 mtx_unlock(&sc->aac_io_lock);
3871 mtx_lock(&sc->aac_io_lock);
3875 sc->aac_state &= ~AAC_STATE_RESET;
3876 AAC_ACCESS_DEVREG(sc, AAC_ENABLE_INTERRUPT);
3877 aacraid_startio(sc);