2 * Copyright (c) 2006 IronPort Systems
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/malloc.h>
35 #include <sys/kernel.h>
37 #include <sys/selinfo.h>
40 #include <sys/eventhandler.h>
42 #include <sys/bus_dma.h>
44 #include <sys/ioccom.h>
48 #include <machine/bus.h>
49 #include <machine/resource.h>
51 #include <dev/mfi/mfireg.h>
52 #include <dev/mfi/mfi_ioctl.h>
53 #include <dev/mfi/mfivar.h>
55 static int mfi_alloc_commands(struct mfi_softc *);
56 static void mfi_release_command(struct mfi_command *cm);
57 static int mfi_comms_init(struct mfi_softc *);
58 static int mfi_polled_command(struct mfi_softc *, struct mfi_command *);
59 static int mfi_wait_command(struct mfi_softc *, struct mfi_command *);
60 static int mfi_get_controller_info(struct mfi_softc *);
61 static int mfi_get_log_state(struct mfi_softc *,
62 struct mfi_evt_log_state **);
64 static int mfi_get_entry(struct mfi_softc *, int);
66 static int mfi_dcmd_command(struct mfi_softc *, struct mfi_command **,
67 uint32_t, void **, size_t);
68 static void mfi_data_cb(void *, bus_dma_segment_t *, int, int);
69 static void mfi_startup(void *arg);
70 static void mfi_intr(void *arg);
71 static void mfi_enable_intr(struct mfi_softc *sc);
72 static void mfi_ldprobe(struct mfi_softc *sc);
73 static int mfi_aen_register(struct mfi_softc *sc, int seq, int locale);
74 static void mfi_aen_complete(struct mfi_command *);
75 static int mfi_aen_setup(struct mfi_softc *, uint32_t);
76 static int mfi_add_ld(struct mfi_softc *sc, int);
77 static void mfi_add_ld_complete(struct mfi_command *);
78 static struct mfi_command * mfi_bio_command(struct mfi_softc *);
79 static void mfi_bio_complete(struct mfi_command *);
80 static int mfi_mapcmd(struct mfi_softc *, struct mfi_command *);
81 static int mfi_send_frame(struct mfi_softc *, struct mfi_command *);
82 static void mfi_complete(struct mfi_softc *, struct mfi_command *);
83 static int mfi_abort(struct mfi_softc *, struct mfi_command *);
84 static int mfi_linux_ioctl_int(struct cdev *, u_long, caddr_t, int, d_thread_t *);
86 /* Management interface */
87 static d_open_t mfi_open;
88 static d_close_t mfi_close;
89 static d_ioctl_t mfi_ioctl;
90 static d_poll_t mfi_poll;
92 static struct cdevsw mfi_cdevsw = {
93 .d_version = D_VERSION,
102 MALLOC_DEFINE(M_MFIBUF, "mfibuf", "Buffers for the MFI driver");
104 #define MFI_INQ_LENGTH SHORT_INQUIRY_LENGTH
107 mfi_transition_firmware(struct mfi_softc *sc)
109 int32_t fw_state, cur_state;
112 fw_state = MFI_READ4(sc, MFI_OMSG0) & MFI_FWSTATE_MASK;
113 while (fw_state != MFI_FWSTATE_READY) {
115 device_printf(sc->mfi_dev, "Waiting for firmware to "
117 cur_state = fw_state;
119 case MFI_FWSTATE_FAULT:
120 device_printf(sc->mfi_dev, "Firmware fault\n");
122 case MFI_FWSTATE_WAIT_HANDSHAKE:
123 MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_CLEAR_HANDSHAKE);
126 case MFI_FWSTATE_OPERATIONAL:
127 MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_READY);
130 case MFI_FWSTATE_UNDEFINED:
131 case MFI_FWSTATE_BB_INIT:
134 case MFI_FWSTATE_FW_INIT:
135 case MFI_FWSTATE_DEVICE_SCAN:
136 case MFI_FWSTATE_FLUSH_CACHE:
140 device_printf(sc->mfi_dev,"Unknown firmware state %d\n",
144 for (i = 0; i < (max_wait * 10); i++) {
145 fw_state = MFI_READ4(sc, MFI_OMSG0) & MFI_FWSTATE_MASK;
146 if (fw_state == cur_state)
151 if (fw_state == cur_state) {
152 device_printf(sc->mfi_dev, "firmware stuck in state "
161 mfi_addr32_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
166 *addr = segs[0].ds_addr;
170 mfi_attach(struct mfi_softc *sc)
173 int error, commsz, framessz, sensesz;
176 mtx_init(&sc->mfi_io_lock, "MFI I/O lock", NULL, MTX_DEF);
177 TAILQ_INIT(&sc->mfi_ld_tqh);
178 TAILQ_INIT(&sc->mfi_aen_pids);
185 /* Before we get too far, see if the firmware is working */
186 if ((error = mfi_transition_firmware(sc)) != 0) {
187 device_printf(sc->mfi_dev, "Firmware not in READY state, "
188 "error %d\n", error);
193 * Get information needed for sizing the contiguous memory for the
194 * frame pool. Size down the sgl parameter since we know that
195 * we will never need more than what's required for MAXPHYS.
196 * It would be nice if these constants were available at runtime
197 * instead of compile time.
199 status = MFI_READ4(sc, MFI_OMSG0);
200 sc->mfi_max_fw_cmds = status & MFI_FWSTATE_MAXCMD_MASK;
201 sc->mfi_max_fw_sgl = (status & MFI_FWSTATE_MAXSGL_MASK) >> 16;
202 sc->mfi_total_sgl = min(sc->mfi_max_fw_sgl, ((MAXPHYS / PAGE_SIZE) +1));
205 * Create the dma tag for data buffers. Used both for block I/O
206 * and for various internal data queries.
208 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
209 1, 0, /* algnmnt, boundary */
210 BUS_SPACE_MAXADDR, /* lowaddr */
211 BUS_SPACE_MAXADDR, /* highaddr */
212 NULL, NULL, /* filter, filterarg */
213 BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
214 sc->mfi_total_sgl, /* nsegments */
215 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
216 BUS_DMA_ALLOCNOW, /* flags */
217 busdma_lock_mutex, /* lockfunc */
218 &sc->mfi_io_lock, /* lockfuncarg */
219 &sc->mfi_buffer_dmat)) {
220 device_printf(sc->mfi_dev, "Cannot allocate buffer DMA tag\n");
225 * Allocate DMA memory for the comms queues. Keep it under 4GB for
226 * efficiency. The mfi_hwcomms struct includes space for 1 reply queue
227 * entry, so the calculated size here will be will be 1 more than
228 * mfi_max_fw_cmds. This is apparently a requirement of the hardware.
230 commsz = (sizeof(uint32_t) * sc->mfi_max_fw_cmds) +
231 sizeof(struct mfi_hwcomms);
232 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
233 1, 0, /* algnmnt, boundary */
234 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
235 BUS_SPACE_MAXADDR, /* highaddr */
236 NULL, NULL, /* filter, filterarg */
237 commsz, /* maxsize */
239 commsz, /* maxsegsize */
241 NULL, NULL, /* lockfunc, lockarg */
242 &sc->mfi_comms_dmat)) {
243 device_printf(sc->mfi_dev, "Cannot allocate comms DMA tag\n");
246 if (bus_dmamem_alloc(sc->mfi_comms_dmat, (void **)&sc->mfi_comms,
247 BUS_DMA_NOWAIT, &sc->mfi_comms_dmamap)) {
248 device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
251 bzero(sc->mfi_comms, commsz);
252 bus_dmamap_load(sc->mfi_comms_dmat, sc->mfi_comms_dmamap,
253 sc->mfi_comms, commsz, mfi_addr32_cb, &sc->mfi_comms_busaddr, 0);
256 * Allocate DMA memory for the command frames. Keep them in the
257 * lower 4GB for efficiency. Calculate the size of the frames at
258 * the same time; the frame is 64 bytes plus space for the SG lists.
259 * The assumption here is that the SG list will start at the second
260 * 64 byte segment of the frame and not use the unused bytes in the
261 * frame. While this might seem wasteful, apparently the frames must
262 * be 64 byte aligned, so any savings would be negated by the extra
265 if (sizeof(bus_addr_t) == 8) {
266 sc->mfi_sgsize = sizeof(struct mfi_sg64);
267 sc->mfi_flags |= MFI_FLAGS_SG64;
269 sc->mfi_sgsize = sizeof(struct mfi_sg32);
271 frames = (sc->mfi_sgsize * sc->mfi_total_sgl + MFI_FRAME_SIZE - 1) /
273 sc->mfi_frame_size = frames * MFI_FRAME_SIZE;
274 framessz = sc->mfi_frame_size * sc->mfi_max_fw_cmds;
275 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
276 64, 0, /* algnmnt, boundary */
277 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
278 BUS_SPACE_MAXADDR, /* highaddr */
279 NULL, NULL, /* filter, filterarg */
280 framessz, /* maxsize */
282 framessz, /* maxsegsize */
284 NULL, NULL, /* lockfunc, lockarg */
285 &sc->mfi_frames_dmat)) {
286 device_printf(sc->mfi_dev, "Cannot allocate frame DMA tag\n");
289 if (bus_dmamem_alloc(sc->mfi_frames_dmat, (void **)&sc->mfi_frames,
290 BUS_DMA_NOWAIT, &sc->mfi_frames_dmamap)) {
291 device_printf(sc->mfi_dev, "Cannot allocate frames memory\n");
294 bzero(sc->mfi_frames, framessz);
295 bus_dmamap_load(sc->mfi_frames_dmat, sc->mfi_frames_dmamap,
296 sc->mfi_frames, framessz, mfi_addr32_cb, &sc->mfi_frames_busaddr,0);
299 * Allocate DMA memory for the frame sense data. Keep them in the
300 * lower 4GB for efficiency
302 sensesz = sc->mfi_max_fw_cmds * MFI_SENSE_LEN;
303 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
304 4, 0, /* algnmnt, boundary */
305 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
306 BUS_SPACE_MAXADDR, /* highaddr */
307 NULL, NULL, /* filter, filterarg */
308 sensesz, /* maxsize */
310 sensesz, /* maxsegsize */
312 NULL, NULL, /* lockfunc, lockarg */
313 &sc->mfi_sense_dmat)) {
314 device_printf(sc->mfi_dev, "Cannot allocate sense DMA tag\n");
317 if (bus_dmamem_alloc(sc->mfi_sense_dmat, (void **)&sc->mfi_sense,
318 BUS_DMA_NOWAIT, &sc->mfi_sense_dmamap)) {
319 device_printf(sc->mfi_dev, "Cannot allocate sense memory\n");
322 bus_dmamap_load(sc->mfi_sense_dmat, sc->mfi_sense_dmamap,
323 sc->mfi_sense, sensesz, mfi_addr32_cb, &sc->mfi_sense_busaddr, 0);
325 if ((error = mfi_alloc_commands(sc)) != 0)
328 if ((error = mfi_comms_init(sc)) != 0)
331 if ((error = mfi_get_controller_info(sc)) != 0)
334 if ((error = mfi_aen_setup(sc, 0), 0) != 0)
338 * Set up the interrupt handler. XXX This should happen in
342 if ((sc->mfi_irq = bus_alloc_resource_any(sc->mfi_dev, SYS_RES_IRQ,
343 &sc->mfi_irq_rid, RF_SHAREABLE | RF_ACTIVE)) == NULL) {
344 device_printf(sc->mfi_dev, "Cannot allocate interrupt\n");
347 if (bus_setup_intr(sc->mfi_dev, sc->mfi_irq, INTR_MPSAFE|INTR_TYPE_BIO,
348 mfi_intr, sc, &sc->mfi_intr)) {
349 device_printf(sc->mfi_dev, "Cannot set up interrupt\n");
353 /* Register a config hook to probe the bus for arrays */
354 sc->mfi_ich.ich_func = mfi_startup;
355 sc->mfi_ich.ich_arg = sc;
356 if (config_intrhook_establish(&sc->mfi_ich) != 0) {
357 device_printf(sc->mfi_dev, "Cannot establish configuration "
363 * Register a shutdown handler.
365 if ((sc->mfi_eh = EVENTHANDLER_REGISTER(shutdown_final, mfi_shutdown,
366 sc, SHUTDOWN_PRI_DEFAULT)) == NULL) {
367 device_printf(sc->mfi_dev, "Warning: shutdown event "
368 "registration failed\n");
372 * Create the control device for doing management
374 unit = device_get_unit(sc->mfi_dev);
375 sc->mfi_cdev = make_dev(&mfi_cdevsw, unit, UID_ROOT, GID_OPERATOR,
376 0640, "mfi%d", unit);
378 make_dev_alias(sc->mfi_cdev, "megaraid_sas_ioctl_node");
379 if (sc->mfi_cdev != NULL)
380 sc->mfi_cdev->si_drv1 = sc;
386 mfi_alloc_commands(struct mfi_softc *sc)
388 struct mfi_command *cm;
392 * XXX Should we allocate all the commands up front, or allocate on
393 * demand later like 'aac' does?
395 ncmds = sc->mfi_max_fw_cmds;
396 sc->mfi_commands = malloc(sizeof(struct mfi_command) * ncmds, M_MFIBUF,
399 for (i = 0; i < ncmds; i++) {
400 cm = &sc->mfi_commands[i];
401 cm->cm_frame = (union mfi_frame *)((uintptr_t)sc->mfi_frames +
402 sc->mfi_frame_size * i);
403 cm->cm_frame_busaddr = sc->mfi_frames_busaddr +
404 sc->mfi_frame_size * i;
405 cm->cm_frame->header.context = i;
406 cm->cm_sense = &sc->mfi_sense[i];
407 cm->cm_sense_busaddr= sc->mfi_sense_busaddr + MFI_SENSE_LEN * i;
409 if (bus_dmamap_create(sc->mfi_buffer_dmat, 0,
410 &cm->cm_dmamap) == 0)
411 mfi_release_command(cm);
414 sc->mfi_total_cmds++;
421 mfi_release_command(struct mfi_command *cm)
426 * Zero out the important fields of the frame, but make sure the
427 * context field is preserved
429 hdr_data = (uint32_t *)cm->cm_frame;
433 cm->cm_extra_frames = 0;
435 cm->cm_complete = NULL;
436 cm->cm_private = NULL;
438 cm->cm_total_frame_size = 0;
439 mfi_enqueue_free(cm);
443 mfi_dcmd_command(struct mfi_softc *sc, struct mfi_command **cmp, uint32_t opcode,
444 void **bufp, size_t bufsize)
446 struct mfi_command *cm;
447 struct mfi_dcmd_frame *dcmd;
450 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
452 cm = mfi_dequeue_free(sc);
456 if ((bufsize > 0) && (bufp != NULL)) {
458 buf = malloc(bufsize, M_MFIBUF, M_NOWAIT|M_ZERO);
460 mfi_release_command(cm);
469 dcmd = &cm->cm_frame->dcmd;
470 bzero(dcmd->mbox, MFI_MBOX_SIZE);
471 dcmd->header.cmd = MFI_CMD_DCMD;
472 dcmd->header.timeout = 0;
473 dcmd->header.flags = 0;
474 dcmd->header.data_len = bufsize;
475 dcmd->opcode = opcode;
476 cm->cm_sg = &dcmd->sgl;
477 cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
480 cm->cm_private = buf;
481 cm->cm_len = bufsize;
484 if ((bufp != NULL) && (*bufp == NULL) && (buf != NULL))
490 mfi_comms_init(struct mfi_softc *sc)
492 struct mfi_command *cm;
493 struct mfi_init_frame *init;
494 struct mfi_init_qinfo *qinfo;
497 if ((cm = mfi_dequeue_free(sc)) == NULL)
501 * Abuse the SG list area of the frame to hold the init_qinfo
504 init = &cm->cm_frame->init;
505 qinfo = (struct mfi_init_qinfo *)((uintptr_t)init + MFI_FRAME_SIZE);
507 bzero(qinfo, sizeof(struct mfi_init_qinfo));
508 qinfo->rq_entries = sc->mfi_max_fw_cmds + 1;
509 qinfo->rq_addr_lo = sc->mfi_comms_busaddr +
510 offsetof(struct mfi_hwcomms, hw_reply_q);
511 qinfo->pi_addr_lo = sc->mfi_comms_busaddr +
512 offsetof(struct mfi_hwcomms, hw_pi);
513 qinfo->ci_addr_lo = sc->mfi_comms_busaddr +
514 offsetof(struct mfi_hwcomms, hw_ci);
516 init->header.cmd = MFI_CMD_INIT;
517 init->header.data_len = sizeof(struct mfi_init_qinfo);
518 init->qinfo_new_addr_lo = cm->cm_frame_busaddr + MFI_FRAME_SIZE;
520 if ((error = mfi_polled_command(sc, cm)) != 0) {
521 device_printf(sc->mfi_dev, "failed to send init command\n");
524 mfi_release_command(cm);
530 mfi_get_controller_info(struct mfi_softc *sc)
532 struct mfi_command *cm = NULL;
533 struct mfi_ctrl_info *ci = NULL;
534 uint32_t max_sectors_1, max_sectors_2;
537 mtx_lock(&sc->mfi_io_lock);
538 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_GETINFO,
539 (void **)&ci, sizeof(*ci));
542 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
544 if ((error = mfi_mapcmd(sc, cm)) != 0) {
545 device_printf(sc->mfi_dev, "Controller info buffer map failed\n");
547 mfi_release_command(cm);
551 /* It's ok if this fails, just use default info instead */
552 if ((error = mfi_polled_command(sc, cm)) != 0) {
553 device_printf(sc->mfi_dev, "Failed to get controller info\n");
554 sc->mfi_max_io = (sc->mfi_total_sgl - 1) * PAGE_SIZE /
560 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
561 BUS_DMASYNC_POSTREAD);
562 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
564 max_sectors_1 = (1 << ci->stripe_sz_ops.min) * ci->max_strips_per_io;
565 max_sectors_2 = ci->max_request_size;
566 sc->mfi_max_io = min(max_sectors_1, max_sectors_2);
572 mfi_release_command(cm);
573 mtx_unlock(&sc->mfi_io_lock);
578 mfi_get_log_state(struct mfi_softc *sc, struct mfi_evt_log_state **log_state)
580 struct mfi_command *cm = NULL;
583 mtx_lock(&sc->mfi_io_lock);
584 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_EVENT_GETINFO,
585 (void **)log_state, sizeof(**log_state));
588 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
590 if ((error = mfi_mapcmd(sc, cm)) != 0) {
591 device_printf(sc->mfi_dev, "Log state buffer map failed\n");
595 if ((error = mfi_polled_command(sc, cm)) != 0) {
596 device_printf(sc->mfi_dev, "Failed to get log state\n");
600 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
601 BUS_DMASYNC_POSTREAD);
602 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
606 mfi_release_command(cm);
607 mtx_unlock(&sc->mfi_io_lock);
613 mfi_aen_setup(struct mfi_softc *sc, uint32_t seq_start)
615 struct mfi_evt_log_state *log_state = NULL;
616 union mfi_evt class_locale;
620 class_locale.members.reserved = 0;
621 class_locale.members.locale = MFI_EVT_LOCALE_ALL;
622 class_locale.members.class = MFI_EVT_CLASS_DEBUG;
624 if (seq_start == 0) {
625 error = mfi_get_log_state(sc, &log_state);
628 free(log_state, M_MFIBUF);
632 * Don't run them yet since we can't parse them.
633 * We can indirectly get the contents from
634 * the AEN mechanism via setting it lower then
635 * current. The firmware will iterate through them.
638 for (seq = log_state->shutdown_seq_num;
639 seq <= log_state->newest_seq_num; seq++) {
640 mfi_get_entry(sc, seq);
644 seq = log_state->shutdown_seq_num + 1;
647 mfi_aen_register(sc, seq, class_locale.word);
648 free(log_state, M_MFIBUF);
654 mfi_polled_command(struct mfi_softc *sc, struct mfi_command *cm)
656 struct mfi_frame_header *hdr;
657 int tm = MFI_POLL_TIMEOUT_SECS * 1000000;
659 hdr = &cm->cm_frame->header;
660 hdr->cmd_status = 0xff;
661 hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
663 mfi_send_frame(sc, cm);
665 while (hdr->cmd_status == 0xff) {
672 if (hdr->cmd_status == 0xff) {
673 device_printf(sc->mfi_dev, "Frame %p timed out\n", hdr);
681 mfi_wait_command(struct mfi_softc *sc, struct mfi_command *cm)
684 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
685 cm->cm_complete = NULL;
687 mfi_enqueue_ready(cm);
689 return (msleep(cm, &sc->mfi_io_lock, PRIBIO, "mfiwait", 0));
693 mfi_free(struct mfi_softc *sc)
695 struct mfi_command *cm;
698 if (sc->mfi_cdev != NULL)
699 destroy_dev(sc->mfi_cdev);
701 if (sc->mfi_total_cmds != 0) {
702 for (i = 0; i < sc->mfi_total_cmds; i++) {
703 cm = &sc->mfi_commands[i];
704 bus_dmamap_destroy(sc->mfi_buffer_dmat, cm->cm_dmamap);
706 free(sc->mfi_commands, M_MFIBUF);
710 bus_teardown_intr(sc->mfi_dev, sc->mfi_irq, sc->mfi_intr);
711 if (sc->mfi_irq != NULL)
712 bus_release_resource(sc->mfi_dev, SYS_RES_IRQ, sc->mfi_irq_rid,
715 if (sc->mfi_sense_busaddr != 0)
716 bus_dmamap_unload(sc->mfi_sense_dmat, sc->mfi_sense_dmamap);
717 if (sc->mfi_sense != NULL)
718 bus_dmamem_free(sc->mfi_sense_dmat, sc->mfi_sense,
719 sc->mfi_sense_dmamap);
720 if (sc->mfi_sense_dmat != NULL)
721 bus_dma_tag_destroy(sc->mfi_sense_dmat);
723 if (sc->mfi_frames_busaddr != 0)
724 bus_dmamap_unload(sc->mfi_frames_dmat, sc->mfi_frames_dmamap);
725 if (sc->mfi_frames != NULL)
726 bus_dmamem_free(sc->mfi_frames_dmat, sc->mfi_frames,
727 sc->mfi_frames_dmamap);
728 if (sc->mfi_frames_dmat != NULL)
729 bus_dma_tag_destroy(sc->mfi_frames_dmat);
731 if (sc->mfi_comms_busaddr != 0)
732 bus_dmamap_unload(sc->mfi_comms_dmat, sc->mfi_comms_dmamap);
733 if (sc->mfi_comms != NULL)
734 bus_dmamem_free(sc->mfi_comms_dmat, sc->mfi_comms,
735 sc->mfi_comms_dmamap);
736 if (sc->mfi_comms_dmat != NULL)
737 bus_dma_tag_destroy(sc->mfi_comms_dmat);
739 if (sc->mfi_buffer_dmat != NULL)
740 bus_dma_tag_destroy(sc->mfi_buffer_dmat);
741 if (sc->mfi_parent_dmat != NULL)
742 bus_dma_tag_destroy(sc->mfi_parent_dmat);
744 if (mtx_initialized(&sc->mfi_io_lock))
745 mtx_destroy(&sc->mfi_io_lock);
751 mfi_startup(void *arg)
753 struct mfi_softc *sc;
755 sc = (struct mfi_softc *)arg;
757 config_intrhook_disestablish(&sc->mfi_ich);
766 struct mfi_softc *sc;
767 struct mfi_command *cm;
768 uint32_t status, pi, ci, context;
770 sc = (struct mfi_softc *)arg;
772 status = MFI_READ4(sc, MFI_OSTS);
773 if ((status & MFI_OSTS_INTR_VALID) == 0)
775 MFI_WRITE4(sc, MFI_OSTS, status);
777 pi = sc->mfi_comms->hw_pi;
778 ci = sc->mfi_comms->hw_ci;
779 mtx_lock(&sc->mfi_io_lock);
781 context = sc->mfi_comms->hw_reply_q[ci];
782 sc->mfi_comms->hw_reply_q[ci] = 0xffffffff;
783 if (context == 0xffffffff) {
784 device_printf(sc->mfi_dev, "mfi_intr: invalid context "
785 "pi= %d ci= %d\n", pi, ci);
787 cm = &sc->mfi_commands[context];
789 mfi_complete(sc, cm);
792 if (ci == (sc->mfi_max_fw_cmds + 1)) {
796 mtx_unlock(&sc->mfi_io_lock);
798 sc->mfi_comms->hw_ci = ci;
804 mfi_shutdown(struct mfi_softc *sc)
806 struct mfi_dcmd_frame *dcmd;
807 struct mfi_command *cm;
810 mtx_lock(&sc->mfi_io_lock);
811 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_SHUTDOWN, NULL, 0);
812 mtx_unlock(&sc->mfi_io_lock);
816 if (sc->mfi_aen_cm != NULL)
817 mfi_abort(sc, sc->mfi_aen_cm);
819 dcmd = &cm->cm_frame->dcmd;
820 dcmd->header.flags = MFI_FRAME_DIR_NONE;
822 if ((error = mfi_polled_command(sc, cm)) != 0) {
823 device_printf(sc->mfi_dev, "Failed to shutdown controller\n");
826 mfi_release_command(cm);
831 mfi_enable_intr(struct mfi_softc *sc)
834 MFI_WRITE4(sc, MFI_OMSK, 0x01);
838 mfi_ldprobe(struct mfi_softc *sc)
840 struct mfi_frame_header *hdr;
841 struct mfi_command *cm = NULL;
842 struct mfi_ld_list *list = NULL;
845 mtx_lock(&sc->mfi_io_lock);
846 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_LIST,
847 (void **)&list, sizeof(*list));
851 cm->cm_flags = MFI_CMD_DATAIN;
852 if (mfi_wait_command(sc, cm) != 0) {
853 device_printf(sc->mfi_dev, "Failed to get device listing\n");
857 hdr = &cm->cm_frame->header;
858 if (hdr->cmd_status != MFI_STAT_OK) {
859 device_printf(sc->mfi_dev, "MFI_DCMD_LD_GET_LIST failed %x\n",
864 for (i = 0; i < list->ld_count; i++)
865 mfi_add_ld(sc, list->ld_list[i].ld.target_id);
868 free(list, M_MFIBUF);
870 mfi_release_command(cm);
871 mtx_unlock(&sc->mfi_io_lock);
877 mfi_decode_log(struct mfi_softc *sc, struct mfi_log_detail *detail)
879 switch (detail->arg_type) {
881 device_printf(sc->mfi_dev, "%d - Log entry type %d\n",
891 mfi_decode_evt(struct mfi_softc *sc, struct mfi_evt_detail *detail)
893 switch (detail->arg_type) {
894 case MR_EVT_ARGS_NONE:
895 device_printf(sc->mfi_dev, "%d - %s\n",
900 case MR_EVT_ARGS_CDB_SENSE:
901 device_printf(sc->mfi_dev, "%d - PD %02d(e%d/s%d) CDB %*D"
904 detail->args.cdb_sense.pd.device_id,
905 detail->args.cdb_sense.pd.enclosure_index,
906 detail->args.cdb_sense.pd.slot_number,
907 detail->args.cdb_sense.cdb_len,
908 detail->args.cdb_sense.cdb,
910 detail->args.cdb_sense.sense_len,
911 detail->args.cdb_sense.sense,
917 device_printf(sc->mfi_dev, "%d - VD %02d/%d "
920 detail->args.ld.ld_index,
921 detail->args.ld.target_id,
925 case MR_EVT_ARGS_LD_COUNT:
926 device_printf(sc->mfi_dev, "%d - VD %02d/%d "
929 detail->args.ld_count.ld.ld_index,
930 detail->args.ld_count.ld.target_id,
931 (long long)detail->args.ld_count.count,
935 case MR_EVT_ARGS_LD_LBA:
936 device_printf(sc->mfi_dev, "%d - VD %02d/%d "
939 detail->args.ld_lba.ld.ld_index,
940 detail->args.ld_lba.ld.target_id,
941 (long long)detail->args.ld_lba.lba,
945 case MR_EVT_ARGS_LD_OWNER:
946 device_printf(sc->mfi_dev, "%d - VD %02d/%d "
947 "owner changed: prior %d, new %d: %s\n",
949 detail->args.ld_owner.ld.ld_index,
950 detail->args.ld_owner.ld.target_id,
951 detail->args.ld_owner.pre_owner,
952 detail->args.ld_owner.new_owner,
956 case MR_EVT_ARGS_LD_LBA_PD_LBA:
957 device_printf(sc->mfi_dev, "%d - VD %02d/%d "
958 "lba %lld, physical drive PD %02d(e%d/s%d) lba %lld: %s\n",
960 detail->args.ld_lba_pd_lba.ld.ld_index,
961 detail->args.ld_lba_pd_lba.ld.target_id,
962 (long long)detail->args.ld_lba_pd_lba.ld_lba,
963 detail->args.ld_lba_pd_lba.pd.device_id,
964 detail->args.ld_lba_pd_lba.pd.enclosure_index,
965 detail->args.ld_lba_pd_lba.pd.slot_number,
966 (long long)detail->args.ld_lba_pd_lba.pd_lba,
970 case MR_EVT_ARGS_LD_PROG:
971 device_printf(sc->mfi_dev, "%d - VD %02d/%d "
972 "progress %d%% in %ds: %s\n",
974 detail->args.ld_prog.ld.ld_index,
975 detail->args.ld_prog.ld.target_id,
976 detail->args.ld_prog.prog.progress/655,
977 detail->args.ld_prog.prog.elapsed_seconds,
981 case MR_EVT_ARGS_LD_STATE:
982 device_printf(sc->mfi_dev, "%d - VD %02d/%d "
983 "state prior %d new %d: %s\n",
985 detail->args.ld_state.ld.ld_index,
986 detail->args.ld_state.ld.target_id,
987 detail->args.ld_state.prev_state,
988 detail->args.ld_state.new_state,
992 case MR_EVT_ARGS_LD_STRIP:
993 device_printf(sc->mfi_dev, "%d - VD %02d/%d "
996 detail->args.ld_strip.ld.ld_index,
997 detail->args.ld_strip.ld.target_id,
998 (long long)detail->args.ld_strip.strip,
1002 case MR_EVT_ARGS_PD:
1003 device_printf(sc->mfi_dev, "%d - PD %02d(e%d/s%d) "
1006 detail->args.pd.device_id,
1007 detail->args.pd.enclosure_index,
1008 detail->args.pd.slot_number,
1012 case MR_EVT_ARGS_PD_ERR:
1013 device_printf(sc->mfi_dev, "%d - PD %02d(e%d/s%d) "
1016 detail->args.pd_err.pd.device_id,
1017 detail->args.pd_err.pd.enclosure_index,
1018 detail->args.pd_err.pd.slot_number,
1019 detail->args.pd_err.err,
1023 case MR_EVT_ARGS_PD_LBA:
1024 device_printf(sc->mfi_dev, "%d - PD %02d(e%d/s%d) "
1027 detail->args.pd_lba.pd.device_id,
1028 detail->args.pd_lba.pd.enclosure_index,
1029 detail->args.pd_lba.pd.slot_number,
1030 (long long)detail->args.pd_lba.lba,
1034 case MR_EVT_ARGS_PD_LBA_LD:
1035 device_printf(sc->mfi_dev, "%d - PD %02d(e%d/s%d) "
1036 "lba %lld VD %02d/%d: %s\n",
1038 detail->args.pd_lba_ld.pd.device_id,
1039 detail->args.pd_lba_ld.pd.enclosure_index,
1040 detail->args.pd_lba_ld.pd.slot_number,
1041 (long long)detail->args.pd_lba.lba,
1042 detail->args.pd_lba_ld.ld.ld_index,
1043 detail->args.pd_lba_ld.ld.target_id,
1047 case MR_EVT_ARGS_PD_PROG:
1048 device_printf(sc->mfi_dev, "%d - PD %02d(e%d/s%d) "
1049 "progress %d%% seconds %ds: %s\n",
1051 detail->args.pd_prog.pd.device_id,
1052 detail->args.pd_prog.pd.enclosure_index,
1053 detail->args.pd_prog.pd.slot_number,
1054 detail->args.pd_prog.prog.progress/655,
1055 detail->args.pd_prog.prog.elapsed_seconds,
1059 case MR_EVT_ARGS_PD_STATE:
1060 device_printf(sc->mfi_dev, "%d - PD %02d(e%d/s%d) "
1061 "state prior %d new %d: %s\n",
1063 detail->args.pd_prog.pd.device_id,
1064 detail->args.pd_prog.pd.enclosure_index,
1065 detail->args.pd_prog.pd.slot_number,
1066 detail->args.pd_state.prev_state,
1067 detail->args.pd_state.new_state,
1071 case MR_EVT_ARGS_PCI:
1072 device_printf(sc->mfi_dev, "%d - PCI 0x04%x 0x04%x "
1073 "0x04%x 0x04%x: %s\n",
1075 detail->args.pci.venderId,
1076 detail->args.pci.deviceId,
1077 detail->args.pci.subVenderId,
1078 detail->args.pci.subDeviceId,
1082 case MR_EVT_ARGS_RATE:
1083 device_printf(sc->mfi_dev, "%d - Rebuild rate %d: %s\n",
1089 case MR_EVT_ARGS_TIME:
1090 device_printf(sc->mfi_dev, "%d - Adapter ticks %d "
1091 "elapsed %ds: %s\n",
1093 detail->args.time.rtc,
1094 detail->args.time.elapsedSeconds,
1098 case MR_EVT_ARGS_ECC:
1099 device_printf(sc->mfi_dev, "%d - Adapter ECC %x,%x: %s: %s\n",
1101 detail->args.ecc.ecar,
1102 detail->args.ecc.elog,
1103 detail->args.ecc.str,
1108 device_printf(sc->mfi_dev, "%d - Type %d: %s\n",
1110 detail->arg_type, detail->description
1116 mfi_aen_register(struct mfi_softc *sc, int seq, int locale)
1118 struct mfi_command *cm;
1119 struct mfi_dcmd_frame *dcmd;
1120 union mfi_evt current_aen, prior_aen;
1121 struct mfi_evt_detail *ed = NULL;
1124 current_aen.word = locale;
1125 if (sc->mfi_aen_cm != NULL) {
1127 ((uint32_t *)&sc->mfi_aen_cm->cm_frame->dcmd.mbox)[1];
1128 if (prior_aen.members.class <= current_aen.members.class &&
1129 !((prior_aen.members.locale & current_aen.members.locale)
1130 ^current_aen.members.locale)) {
1133 prior_aen.members.locale |= current_aen.members.locale;
1134 if (prior_aen.members.class
1135 < current_aen.members.class)
1136 current_aen.members.class =
1137 prior_aen.members.class;
1138 mfi_abort(sc, sc->mfi_aen_cm);
1142 mtx_lock(&sc->mfi_io_lock);
1143 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_EVENT_WAIT,
1144 (void **)&ed, sizeof(*ed));
1145 mtx_unlock(&sc->mfi_io_lock);
1149 dcmd = &cm->cm_frame->dcmd;
1150 ((uint32_t *)&dcmd->mbox)[0] = seq;
1151 ((uint32_t *)&dcmd->mbox)[1] = locale;
1152 cm->cm_flags = MFI_CMD_DATAIN;
1153 cm->cm_complete = mfi_aen_complete;
1155 sc->mfi_aen_cm = cm;
1157 mfi_enqueue_ready(cm);
1164 mfi_aen_complete(struct mfi_command *cm)
1166 struct mfi_frame_header *hdr;
1167 struct mfi_softc *sc;
1168 struct mfi_evt_detail *detail;
1169 struct mfi_aen *mfi_aen_entry;
1170 int seq = 0, aborted = 0;
1173 hdr = &cm->cm_frame->header;
1175 if (sc->mfi_aen_cm == NULL)
1178 if (sc->mfi_aen_cm->cm_aen_abort || hdr->cmd_status == 0xff) {
1179 sc->mfi_aen_cm->cm_aen_abort = 0;
1182 sc->mfi_aen_triggered = 1;
1183 if (sc->mfi_poll_waiting)
1184 selwakeup(&sc->mfi_select);
1185 detail = cm->cm_data;
1186 mtx_unlock(&sc->mfi_io_lock);
1187 mfi_decode_evt(sc, detail);
1188 mtx_lock(&sc->mfi_io_lock);
1189 seq = detail->seq + 1;
1190 TAILQ_FOREACH(mfi_aen_entry, &sc->mfi_aen_pids, aen_link) {
1191 TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
1193 psignal(mfi_aen_entry->p, SIGIO);
1194 free(mfi_aen_entry, M_MFIBUF);
1198 free(cm->cm_data, M_MFIBUF);
1199 sc->mfi_aen_cm = NULL;
1200 wakeup(&sc->mfi_aen_cm);
1201 mfi_release_command(cm);
1203 /* set it up again so the driver can catch more events */
1205 mtx_unlock(&sc->mfi_io_lock);
1206 mfi_aen_setup(sc, seq);
1207 mtx_lock(&sc->mfi_io_lock);
1213 mfi_get_entry(struct mfi_softc *sc, int seq)
1215 struct mfi_command *cm;
1216 struct mfi_dcmd_frame *dcmd;
1217 struct mfi_log_detail *ed;
1220 mtx_lock(&sc->mfi_io_lock);
1221 if ((cm = mfi_dequeue_free(sc)) == NULL) {
1222 mtx_unlock(&sc->mfi_io_lock);
1225 mtx_unlock(&sc->mfi_io_lock);
1227 ed = malloc(sizeof(struct mfi_log_detail), M_MFIBUF, M_NOWAIT | M_ZERO);
1229 mtx_lock(&sc->mfi_io_lock);
1230 mfi_release_command(cm);
1231 mtx_unlock(&sc->mfi_io_lock);
1235 dcmd = &cm->cm_frame->dcmd;
1236 bzero(dcmd->mbox, MFI_MBOX_SIZE);
1237 dcmd->header.cmd = MFI_CMD_DCMD;
1238 dcmd->header.timeout = 0;
1239 dcmd->header.data_len = sizeof(struct mfi_log_detail);
1240 dcmd->opcode = MFI_DCMD_CTRL_EVENT_GET;
1241 ((uint32_t *)&dcmd->mbox)[0] = seq;
1242 ((uint32_t *)&dcmd->mbox)[1] = MFI_EVT_LOCALE_ALL;
1243 cm->cm_sg = &dcmd->sgl;
1244 cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
1245 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1247 cm->cm_len = sizeof(struct mfi_evt_detail);
1249 if ((error = mfi_mapcmd(sc, cm)) != 0) {
1250 device_printf(sc->mfi_dev, "Controller info buffer map failed");
1252 mfi_release_command(cm);
1256 if ((error = mfi_polled_command(sc, cm)) != 0) {
1257 device_printf(sc->mfi_dev, "Failed to get controller entry\n");
1258 sc->mfi_max_io = (sc->mfi_total_sgl - 1) * PAGE_SIZE /
1261 mfi_release_command(cm);
1265 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1266 BUS_DMASYNC_POSTREAD);
1267 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1269 mfi_decode_log(sc, ed);
1271 mtx_lock(&sc->mfi_io_lock);
1272 free(cm->cm_data, M_MFIBUF);
1273 mfi_release_command(cm);
1274 mtx_unlock(&sc->mfi_io_lock);
1280 mfi_add_ld(struct mfi_softc *sc, int id)
1282 struct mfi_command *cm;
1283 struct mfi_dcmd_frame *dcmd = NULL;
1284 struct mfi_ld_info *ld_info = NULL;
1287 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1289 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_INFO,
1290 (void **)&ld_info, sizeof(*ld_info));
1292 device_printf(sc->mfi_dev,
1293 "Failed to allocate for MFI_DCMD_LD_GET_INFO %d\n", error);
1295 free(ld_info, M_MFIBUF);
1298 cm->cm_flags = MFI_CMD_DATAIN;
1299 cm->cm_complete = mfi_add_ld_complete;
1300 dcmd = &cm->cm_frame->dcmd;
1303 mfi_enqueue_ready(cm);
1310 mfi_add_ld_complete(struct mfi_command *cm)
1312 struct mfi_frame_header *hdr;
1313 struct mfi_ld_info *ld_info;
1314 struct mfi_softc *sc;
1319 hdr = &cm->cm_frame->header;
1320 ld_info = cm->cm_private;
1322 if (hdr->cmd_status != MFI_STAT_OK) {
1323 free(ld_info, M_MFIBUF);
1324 mfi_release_command(cm);
1327 mfi_release_command(cm);
1329 ld = malloc(sizeof(struct mfi_ld), M_MFIBUF, M_NOWAIT|M_ZERO);
1331 device_printf(sc->mfi_dev, "Cannot allocate ld\n");
1332 free(ld_info, M_MFIBUF);
1336 if ((child = device_add_child(sc->mfi_dev, "mfid", -1)) == NULL) {
1337 device_printf(sc->mfi_dev, "Failed to add logical disk\n");
1339 free(ld_info, M_MFIBUF);
1343 ld->ld_id = ld_info->ld_config.properties.ld.target_id;
1344 ld->ld_disk = child;
1345 ld->ld_info = ld_info;
1347 device_set_ivars(child, ld);
1348 device_set_desc(child, "MFI Logical Disk");
1349 mtx_unlock(&sc->mfi_io_lock);
1351 bus_generic_attach(sc->mfi_dev);
1353 mtx_lock(&sc->mfi_io_lock);
1356 static struct mfi_command *
1357 mfi_bio_command(struct mfi_softc *sc)
1359 struct mfi_io_frame *io;
1360 struct mfi_command *cm;
1362 int flags, blkcount;
1364 if ((cm = mfi_dequeue_free(sc)) == NULL)
1367 if ((bio = mfi_dequeue_bio(sc)) == NULL) {
1368 mfi_release_command(cm);
1372 io = &cm->cm_frame->io;
1373 switch (bio->bio_cmd & 0x03) {
1375 io->header.cmd = MFI_CMD_LD_READ;
1376 flags = MFI_CMD_DATAIN;
1379 io->header.cmd = MFI_CMD_LD_WRITE;
1380 flags = MFI_CMD_DATAOUT;
1383 panic("Invalid bio command");
1386 /* Cheat with the sector length to avoid a non-constant division */
1387 blkcount = (bio->bio_bcount + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
1388 io->header.target_id = (uintptr_t)bio->bio_driver1;
1389 io->header.timeout = 0;
1390 io->header.flags = 0;
1391 io->header.sense_len = MFI_SENSE_LEN;
1392 io->header.data_len = blkcount;
1393 io->sense_addr_lo = cm->cm_sense_busaddr;
1394 io->sense_addr_hi = 0;
1395 io->lba_hi = (bio->bio_pblkno & 0xffffffff00000000) >> 32;
1396 io->lba_lo = bio->bio_pblkno & 0xffffffff;
1397 cm->cm_complete = mfi_bio_complete;
1398 cm->cm_private = bio;
1399 cm->cm_data = bio->bio_data;
1400 cm->cm_len = bio->bio_bcount;
1401 cm->cm_sg = &io->sgl;
1402 cm->cm_total_frame_size = MFI_IO_FRAME_SIZE;
1403 cm->cm_flags = flags;
1409 mfi_bio_complete(struct mfi_command *cm)
1412 struct mfi_frame_header *hdr;
1413 struct mfi_softc *sc;
1415 bio = cm->cm_private;
1416 hdr = &cm->cm_frame->header;
1419 if ((hdr->cmd_status != 0) || (hdr->scsi_status != 0)) {
1420 bio->bio_flags |= BIO_ERROR;
1421 bio->bio_error = EIO;
1422 device_printf(sc->mfi_dev, "I/O error, status= %d "
1423 "scsi_status= %d\n", hdr->cmd_status, hdr->scsi_status);
1424 mfi_print_sense(cm->cm_sc, cm->cm_sense);
1427 mfi_release_command(cm);
1428 mfi_disk_complete(bio);
1432 mfi_startio(struct mfi_softc *sc)
1434 struct mfi_command *cm;
1437 /* Don't bother if we're short on resources */
1438 if (sc->mfi_flags & MFI_FLAGS_QFRZN)
1441 /* Try a command that has already been prepared */
1442 cm = mfi_dequeue_ready(sc);
1444 /* Nope, so look for work on the bioq */
1446 cm = mfi_bio_command(sc);
1448 /* No work available, so exit */
1452 /* Send the command to the controller */
1453 if (mfi_mapcmd(sc, cm) != 0) {
1454 mfi_requeue_ready(cm);
1461 mfi_mapcmd(struct mfi_softc *sc, struct mfi_command *cm)
1465 if (cm->cm_data != NULL) {
1466 polled = (cm->cm_flags & MFI_CMD_POLLED) ? BUS_DMA_NOWAIT : 0;
1467 error = bus_dmamap_load(sc->mfi_buffer_dmat, cm->cm_dmamap,
1468 cm->cm_data, cm->cm_len, mfi_data_cb, cm, polled);
1469 if (error == EINPROGRESS) {
1470 sc->mfi_flags |= MFI_FLAGS_QFRZN;
1474 mfi_enqueue_busy(cm);
1475 error = mfi_send_frame(sc, cm);
1482 mfi_data_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1484 struct mfi_frame_header *hdr;
1485 struct mfi_command *cm;
1487 struct mfi_softc *sc;
1493 cm = (struct mfi_command *)arg;
1495 hdr = &cm->cm_frame->header;
1498 if ((sc->mfi_flags & MFI_FLAGS_SG64) == 0) {
1499 for (i = 0; i < nsegs; i++) {
1500 sgl->sg32[i].addr = segs[i].ds_addr;
1501 sgl->sg32[i].len = segs[i].ds_len;
1504 for (i = 0; i < nsegs; i++) {
1505 sgl->sg64[i].addr = segs[i].ds_addr;
1506 sgl->sg64[i].len = segs[i].ds_len;
1508 hdr->flags |= MFI_FRAME_SGL64;
1510 hdr->sg_count = nsegs;
1513 if (cm->cm_flags & MFI_CMD_DATAIN) {
1514 dir |= BUS_DMASYNC_PREREAD;
1515 hdr->flags |= MFI_FRAME_DIR_READ;
1517 if (cm->cm_flags & MFI_CMD_DATAOUT) {
1518 dir |= BUS_DMASYNC_PREWRITE;
1519 hdr->flags |= MFI_FRAME_DIR_WRITE;
1521 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir);
1522 cm->cm_flags |= MFI_CMD_MAPPED;
1525 * Instead of calculating the total number of frames in the
1526 * compound frame, it's already assumed that there will be at
1527 * least 1 frame, so don't compensate for the modulo of the
1528 * following division.
1530 cm->cm_total_frame_size += (sc->mfi_sgsize * nsegs);
1531 cm->cm_extra_frames = (cm->cm_total_frame_size - 1) / MFI_FRAME_SIZE;
1533 /* The caller will take care of delivering polled commands */
1534 if ((cm->cm_flags & MFI_CMD_POLLED) == 0) {
1535 mfi_enqueue_busy(cm);
1536 mfi_send_frame(sc, cm);
1543 mfi_send_frame(struct mfi_softc *sc, struct mfi_command *cm)
1547 * The bus address of the command is aligned on a 64 byte boundary,
1548 * leaving the least 6 bits as zero. For whatever reason, the
1549 * hardware wants the address shifted right by three, leaving just
1550 * 3 zero bits. These three bits are then used to indicate how many
1551 * 64 byte frames beyond the first one are used in the command. The
1552 * extra frames are typically filled with S/G elements. The extra
1553 * frames must also be contiguous. Thus, a compound frame can be at
1554 * most 512 bytes long, allowing for up to 59 32-bit S/G elements or
1555 * 39 64-bit S/G elements for block I/O commands. This means that
1556 * I/O transfers of 256k and higher simply are not possible, which
1557 * is quite odd for such a modern adapter.
1559 MFI_WRITE4(sc, MFI_IQP, (cm->cm_frame_busaddr >> 3) |
1560 cm->cm_extra_frames);
1565 mfi_complete(struct mfi_softc *sc, struct mfi_command *cm)
1569 if ((cm->cm_flags & MFI_CMD_MAPPED) != 0) {
1571 if (cm->cm_flags & MFI_CMD_DATAIN)
1572 dir |= BUS_DMASYNC_POSTREAD;
1573 if (cm->cm_flags & MFI_CMD_DATAOUT)
1574 dir |= BUS_DMASYNC_POSTWRITE;
1576 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir);
1577 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1578 cm->cm_flags &= ~MFI_CMD_MAPPED;
1581 if (cm->cm_complete != NULL)
1582 cm->cm_complete(cm);
1586 sc->mfi_flags &= ~MFI_FLAGS_QFRZN;
1591 mfi_abort(struct mfi_softc *sc, struct mfi_command *cm_abort)
1593 struct mfi_command *cm;
1594 struct mfi_abort_frame *abort;
1596 mtx_lock(&sc->mfi_io_lock);
1597 if ((cm = mfi_dequeue_free(sc)) == NULL) {
1598 mtx_unlock(&sc->mfi_io_lock);
1601 mtx_unlock(&sc->mfi_io_lock);
1603 abort = &cm->cm_frame->abort;
1604 abort->header.cmd = MFI_CMD_ABORT;
1605 abort->header.flags = 0;
1606 abort->abort_context = cm_abort->cm_frame->header.context;
1607 abort->abort_mfi_addr_lo = cm_abort->cm_frame_busaddr;
1608 abort->abort_mfi_addr_hi = 0;
1611 sc->mfi_aen_cm->cm_aen_abort = 1;
1613 mfi_polled_command(sc, cm);
1614 mtx_lock(&sc->mfi_io_lock);
1615 mfi_release_command(cm);
1616 mtx_unlock(&sc->mfi_io_lock);
1618 while (sc->mfi_aen_cm != NULL) {
1619 tsleep(&sc->mfi_aen_cm, 0, "mfiabort", 5 * hz);
1626 mfi_dump_blocks(struct mfi_softc *sc, int id, uint64_t lba, void *virt, int len)
1628 struct mfi_command *cm;
1629 struct mfi_io_frame *io;
1632 if ((cm = mfi_dequeue_free(sc)) == NULL)
1635 io = &cm->cm_frame->io;
1636 io->header.cmd = MFI_CMD_LD_WRITE;
1637 io->header.target_id = id;
1638 io->header.timeout = 0;
1639 io->header.flags = 0;
1640 io->header.sense_len = MFI_SENSE_LEN;
1641 io->header.data_len = (len + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
1642 io->sense_addr_lo = cm->cm_sense_busaddr;
1643 io->sense_addr_hi = 0;
1644 io->lba_hi = (lba & 0xffffffff00000000) >> 32;
1645 io->lba_lo = lba & 0xffffffff;
1648 cm->cm_sg = &io->sgl;
1649 cm->cm_total_frame_size = MFI_IO_FRAME_SIZE;
1650 cm->cm_flags = MFI_CMD_POLLED | MFI_CMD_DATAOUT;
1652 if ((error = mfi_mapcmd(sc, cm)) != 0) {
1653 mfi_release_command(cm);
1657 error = mfi_polled_command(sc, cm);
1658 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1659 BUS_DMASYNC_POSTWRITE);
1660 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1661 mfi_release_command(cm);
1667 mfi_open(struct cdev *dev, int flags, int fmt, d_thread_t *td)
1669 struct mfi_softc *sc;
1672 sc->mfi_flags |= MFI_FLAGS_OPEN;
1678 mfi_close(struct cdev *dev, int flags, int fmt, d_thread_t *td)
1680 struct mfi_softc *sc;
1681 struct mfi_aen *mfi_aen_entry;
1684 sc->mfi_flags &= ~MFI_FLAGS_OPEN;
1686 TAILQ_FOREACH(mfi_aen_entry, &sc->mfi_aen_pids, aen_link) {
1687 if (mfi_aen_entry->p == curproc) {
1688 TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
1690 free(mfi_aen_entry, M_MFIBUF);
1697 mfi_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, d_thread_t *td)
1699 struct mfi_softc *sc;
1700 union mfi_statrequest *ms;
1708 ms = (union mfi_statrequest *)arg;
1709 switch (ms->ms_item) {
1714 bcopy(&sc->mfi_qstat[ms->ms_item], &ms->ms_qstat,
1715 sizeof(struct mfi_qstat));
1722 case 0xc1144d01: /* Firmware Linux ioctl shim */
1724 devclass_t devclass;
1725 struct mfi_linux_ioc_packet l_ioc;
1728 devclass = devclass_find("mfi");
1729 if (devclass == NULL)
1732 error = copyin(arg, &l_ioc, sizeof(l_ioc));
1735 adapter = l_ioc.lioc_adapter_no;
1736 sc = devclass_get_softc(devclass, adapter);
1739 return (mfi_linux_ioctl_int(sc->mfi_cdev,
1740 cmd, arg, flag, td));
1743 case 0x400c4d03: /* AEN Linux ioctl shim */
1745 devclass_t devclass;
1746 struct mfi_linux_ioc_aen l_aen;
1749 devclass = devclass_find("mfi");
1750 if (devclass == NULL)
1753 error = copyin(arg, &l_aen, sizeof(l_aen));
1756 adapter = l_aen.laen_adapter_no;
1757 sc = devclass_get_softc(devclass, adapter);
1760 return (mfi_linux_ioctl_int(sc->mfi_cdev,
1761 cmd, arg, flag, td));
1773 mfi_linux_ioctl_int(struct cdev *dev, u_long cmd, caddr_t arg, int flag, d_thread_t *td)
1775 struct mfi_softc *sc;
1776 struct mfi_linux_ioc_packet l_ioc;
1777 struct mfi_linux_ioc_aen l_aen;
1778 struct mfi_command *cm = NULL;
1779 struct mfi_aen *mfi_aen_entry;
1780 uint32_t *sense_ptr;
1782 uint8_t *data = NULL, *temp;
1789 case 0xc1144d01: /* Firmware Linux ioctl shim */
1790 error = copyin(arg, &l_ioc, sizeof(l_ioc));
1794 if (l_ioc.lioc_sge_count > MAX_LINUX_IOCTL_SGE) {
1798 mtx_lock(&sc->mfi_io_lock);
1799 if ((cm = mfi_dequeue_free(sc)) == NULL) {
1800 mtx_unlock(&sc->mfi_io_lock);
1803 mtx_unlock(&sc->mfi_io_lock);
1806 * save off original context since copying from user
1807 * will clobber some data
1809 context = cm->cm_frame->header.context;
1811 bcopy(l_ioc.lioc_frame.raw, cm->cm_frame,
1812 l_ioc.lioc_sgl_off); /* Linux can do 2 frames ? */
1813 cm->cm_total_frame_size = l_ioc.lioc_sgl_off;
1815 (union mfi_sgl *)&cm->cm_frame->bytes[l_ioc.lioc_sgl_off];
1816 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_DATAOUT
1818 cm->cm_len = cm->cm_frame->header.data_len;
1819 cm->cm_data = data = malloc(cm->cm_len, M_MFIBUF,
1822 /* restore header context */
1823 cm->cm_frame->header.context = context;
1826 for (i = 0; i < l_ioc.lioc_sge_count; i++) {
1827 error = copyin(l_ioc.lioc_sgl[i].iov_base,
1829 l_ioc.lioc_sgl[i].iov_len);
1831 device_printf(sc->mfi_dev,
1835 temp = &temp[l_ioc.lioc_sgl[i].iov_len];
1838 if (l_ioc.lioc_sense_len) {
1840 (void *)&cm->cm_frame->bytes[l_ioc.lioc_sense_off];
1841 *sense_ptr = cm->cm_sense_busaddr;
1844 if ((error = mfi_mapcmd(sc, cm)) != 0) {
1845 device_printf(sc->mfi_dev,
1846 "Controller info buffer map failed");
1850 if ((error = mfi_polled_command(sc, cm)) != 0) {
1851 device_printf(sc->mfi_dev,
1852 "Controller polled failed");
1856 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1857 BUS_DMASYNC_POSTREAD);
1858 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1861 for (i = 0; i < l_ioc.lioc_sge_count; i++) {
1862 error = copyout(temp,
1863 l_ioc.lioc_sgl[i].iov_base,
1864 l_ioc.lioc_sgl[i].iov_len);
1866 device_printf(sc->mfi_dev,
1870 temp = &temp[l_ioc.lioc_sgl[i].iov_len];
1873 if (l_ioc.lioc_sense_len) {
1874 /* copy out sense */
1875 sense_ptr = (void *)
1876 &l_ioc.lioc_frame.raw[l_ioc.lioc_sense_off];
1878 temp += cm->cm_sense_busaddr;
1879 error = copyout(temp, sense_ptr,
1880 l_ioc.lioc_sense_len);
1882 device_printf(sc->mfi_dev,
1888 error = copyout(&cm->cm_frame->header.cmd_status,
1889 &((struct mfi_linux_ioc_packet*)arg)
1890 ->lioc_frame.hdr.cmd_status,
1893 device_printf(sc->mfi_dev,
1900 free(data, M_MFIBUF);
1902 mtx_lock(&sc->mfi_io_lock);
1903 mfi_release_command(cm);
1904 mtx_unlock(&sc->mfi_io_lock);
1908 case 0x400c4d03: /* AEN Linux ioctl shim */
1909 error = copyin(arg, &l_aen, sizeof(l_aen));
1912 printf("AEN IMPLEMENTED for pid %d\n", curproc->p_pid);
1913 mfi_aen_entry = malloc(sizeof(struct mfi_aen), M_MFIBUF,
1915 if (mfi_aen_entry != NULL) {
1916 mfi_aen_entry->p = curproc;
1917 TAILQ_INSERT_TAIL(&sc->mfi_aen_pids, mfi_aen_entry,
1920 error = mfi_aen_register(sc, l_aen.laen_seq_num,
1921 l_aen.laen_class_locale);
1924 TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
1926 free(mfi_aen_entry, M_MFIBUF);
1931 device_printf(sc->mfi_dev, "IOCTL 0x%lx not handled\n", cmd);
1940 mfi_poll(struct cdev *dev, int poll_events, struct thread *td)
1942 struct mfi_softc *sc;
1947 if (poll_events & (POLLIN | POLLRDNORM)) {
1948 if (sc->mfi_aen_triggered != 0)
1949 revents |= poll_events & (POLLIN | POLLRDNORM);
1950 if (sc->mfi_aen_triggered == 0 && sc->mfi_aen_cm == NULL) {
1956 if (poll_events & (POLLIN | POLLRDNORM)) {
1957 sc->mfi_poll_waiting = 1;
1958 selrecord(td, &sc->mfi_select);
1959 sc->mfi_poll_waiting = 0;