2 * Copyright (c) 2006 IronPort Systems
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * Copyright (c) 2007 LSI Corp.
28 * Copyright (c) 2007 Rajesh Prabhakaran.
29 * All rights reserved.
31 * Redistribution and use in source and binary forms, with or without
32 * modification, are permitted provided that the following conditions
34 * 1. Redistributions of source code must retain the above copyright
35 * notice, this list of conditions and the following disclaimer.
36 * 2. Redistributions in binary form must reproduce the above copyright
37 * notice, this list of conditions and the following disclaimer in the
38 * documentation and/or other materials provided with the distribution.
40 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
41 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
42 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
43 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
44 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
45 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
46 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
47 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
48 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
49 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
53 #include <sys/cdefs.h>
54 __FBSDID("$FreeBSD$");
58 #include <sys/param.h>
59 #include <sys/systm.h>
60 #include <sys/sysctl.h>
61 #include <sys/malloc.h>
62 #include <sys/kernel.h>
64 #include <sys/selinfo.h>
67 #include <sys/eventhandler.h>
69 #include <sys/bus_dma.h>
71 #include <sys/ioccom.h>
74 #include <sys/signalvar.h>
76 #include <machine/bus.h>
77 #include <machine/resource.h>
79 #include <dev/mfi/mfireg.h>
80 #include <dev/mfi/mfi_ioctl.h>
81 #include <dev/mfi/mfivar.h>
83 static int mfi_alloc_commands(struct mfi_softc *);
84 static int mfi_comms_init(struct mfi_softc *);
85 static int mfi_wait_command(struct mfi_softc *, struct mfi_command *);
86 static int mfi_get_controller_info(struct mfi_softc *);
87 static int mfi_get_log_state(struct mfi_softc *,
88 struct mfi_evt_log_state **);
89 static int mfi_parse_entries(struct mfi_softc *, int, int);
90 static int mfi_dcmd_command(struct mfi_softc *, struct mfi_command **,
91 uint32_t, void **, size_t);
92 static void mfi_data_cb(void *, bus_dma_segment_t *, int, int);
93 static void mfi_startup(void *arg);
94 static void mfi_intr(void *arg);
95 static void mfi_ldprobe(struct mfi_softc *sc);
96 static int mfi_aen_register(struct mfi_softc *sc, int seq, int locale);
97 static void mfi_aen_complete(struct mfi_command *);
98 static int mfi_aen_setup(struct mfi_softc *, uint32_t);
99 static int mfi_add_ld(struct mfi_softc *sc, int);
100 static void mfi_add_ld_complete(struct mfi_command *);
101 static struct mfi_command * mfi_bio_command(struct mfi_softc *);
102 static void mfi_bio_complete(struct mfi_command *);
103 static int mfi_mapcmd(struct mfi_softc *, struct mfi_command *);
104 static int mfi_send_frame(struct mfi_softc *, struct mfi_command *);
105 static void mfi_complete(struct mfi_softc *, struct mfi_command *);
106 static int mfi_abort(struct mfi_softc *, struct mfi_command *);
107 static int mfi_linux_ioctl_int(struct cdev *, u_long, caddr_t, int, struct thread *);
108 static void mfi_timeout(void *);
109 static int mfi_user_command(struct mfi_softc *,
110 struct mfi_ioc_passthru *);
111 static void mfi_enable_intr_xscale(struct mfi_softc *sc);
112 static void mfi_enable_intr_ppc(struct mfi_softc *sc);
113 static int32_t mfi_read_fw_status_xscale(struct mfi_softc *sc);
114 static int32_t mfi_read_fw_status_ppc(struct mfi_softc *sc);
115 static int mfi_check_clear_intr_xscale(struct mfi_softc *sc);
116 static int mfi_check_clear_intr_ppc(struct mfi_softc *sc);
117 static void mfi_issue_cmd_xscale(struct mfi_softc *sc,uint32_t bus_add,uint32_t frame_cnt);
118 static void mfi_issue_cmd_ppc(struct mfi_softc *sc,uint32_t bus_add,uint32_t frame_cnt);
120 SYSCTL_NODE(_hw, OID_AUTO, mfi, CTLFLAG_RD, 0, "MFI driver parameters");
121 static int mfi_event_locale = MFI_EVT_LOCALE_ALL;
122 TUNABLE_INT("hw.mfi.event_locale", &mfi_event_locale);
123 SYSCTL_INT(_hw_mfi, OID_AUTO, event_locale, CTLFLAG_RW, &mfi_event_locale,
124 0, "event message locale");
126 static int mfi_event_class = MFI_EVT_CLASS_INFO;
127 TUNABLE_INT("hw.mfi.event_class", &mfi_event_class);
128 SYSCTL_INT(_hw_mfi, OID_AUTO, event_class, CTLFLAG_RW, &mfi_event_class,
129 0, "event message class");
131 static int mfi_max_cmds = 128;
132 TUNABLE_INT("hw.mfi.max_cmds", &mfi_max_cmds);
133 SYSCTL_INT(_hw_mfi, OID_AUTO, max_cmds, CTLFLAG_RD, &mfi_max_cmds,
136 /* Management interface */
137 static d_open_t mfi_open;
138 static d_close_t mfi_close;
139 static d_ioctl_t mfi_ioctl;
140 static d_poll_t mfi_poll;
142 static struct cdevsw mfi_cdevsw = {
143 .d_version = D_VERSION,
146 .d_close = mfi_close,
147 .d_ioctl = mfi_ioctl,
152 MALLOC_DEFINE(M_MFIBUF, "mfibuf", "Buffers for the MFI driver");
154 #define MFI_INQ_LENGTH SHORT_INQUIRY_LENGTH
157 mfi_enable_intr_xscale(struct mfi_softc *sc)
159 MFI_WRITE4(sc, MFI_OMSK, 0x01);
163 mfi_enable_intr_ppc(struct mfi_softc *sc)
165 MFI_WRITE4(sc, MFI_ODCR0, 0xFFFFFFFF);
166 if (sc->mfi_flags & MFI_FLAGS_1078) {
167 MFI_WRITE4(sc, MFI_OMSK, ~MFI_1078_EIM);
168 } else if (sc->mfi_flags & MFI_FLAGS_GEN2) {
169 MFI_WRITE4(sc, MFI_OMSK, ~MFI_GEN2_EIM);
174 mfi_read_fw_status_xscale(struct mfi_softc *sc)
176 return MFI_READ4(sc, MFI_OMSG0);
180 mfi_read_fw_status_ppc(struct mfi_softc *sc)
182 return MFI_READ4(sc, MFI_OSP0);
186 mfi_check_clear_intr_xscale(struct mfi_softc *sc)
190 status = MFI_READ4(sc, MFI_OSTS);
191 if ((status & MFI_OSTS_INTR_VALID) == 0)
194 MFI_WRITE4(sc, MFI_OSTS, status);
199 mfi_check_clear_intr_ppc(struct mfi_softc *sc)
203 status = MFI_READ4(sc, MFI_OSTS);
204 if (sc->mfi_flags & MFI_FLAGS_1078) {
205 if (!(status & MFI_1078_RM)) {
208 } else if (sc->mfi_flags & MFI_FLAGS_GEN2) {
209 if (!(status & MFI_GEN2_RM)) {
214 MFI_WRITE4(sc, MFI_ODCR0, status);
219 mfi_issue_cmd_xscale(struct mfi_softc *sc,uint32_t bus_add,uint32_t frame_cnt)
221 MFI_WRITE4(sc, MFI_IQP,(bus_add >>3)|frame_cnt);
225 mfi_issue_cmd_ppc(struct mfi_softc *sc,uint32_t bus_add,uint32_t frame_cnt)
227 MFI_WRITE4(sc, MFI_IQP, (bus_add |frame_cnt <<1)|1 );
231 mfi_transition_firmware(struct mfi_softc *sc)
233 uint32_t fw_state, cur_state;
236 fw_state = sc->mfi_read_fw_status(sc)& MFI_FWSTATE_MASK;
237 while (fw_state != MFI_FWSTATE_READY) {
239 device_printf(sc->mfi_dev, "Waiting for firmware to "
241 cur_state = fw_state;
243 case MFI_FWSTATE_FAULT:
244 device_printf(sc->mfi_dev, "Firmware fault\n");
246 case MFI_FWSTATE_WAIT_HANDSHAKE:
247 MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_CLEAR_HANDSHAKE);
250 case MFI_FWSTATE_OPERATIONAL:
251 MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_READY);
254 case MFI_FWSTATE_UNDEFINED:
255 case MFI_FWSTATE_BB_INIT:
258 case MFI_FWSTATE_FW_INIT:
259 case MFI_FWSTATE_DEVICE_SCAN:
260 case MFI_FWSTATE_FLUSH_CACHE:
263 case MFI_FWSTATE_BOOT_MESSAGE_PENDING:
264 MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_HOTPLUG);
268 device_printf(sc->mfi_dev,"Unknown firmware state %#x\n",
272 for (i = 0; i < (max_wait * 10); i++) {
273 fw_state = sc->mfi_read_fw_status(sc) & MFI_FWSTATE_MASK;
274 if (fw_state == cur_state)
279 if (fw_state == cur_state) {
280 device_printf(sc->mfi_dev, "Firmware stuck in state "
289 mfi_addr32_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
294 *addr = segs[0].ds_addr;
298 mfi_attach(struct mfi_softc *sc)
301 int error, commsz, framessz, sensesz;
302 int frames, unit, max_fw_sge;
304 device_printf(sc->mfi_dev, "Megaraid SAS driver Ver 3.00 \n");
306 mtx_init(&sc->mfi_io_lock, "MFI I/O lock", NULL, MTX_DEF);
307 sx_init(&sc->mfi_config_lock, "MFI config");
308 TAILQ_INIT(&sc->mfi_ld_tqh);
309 TAILQ_INIT(&sc->mfi_aen_pids);
310 TAILQ_INIT(&sc->mfi_cam_ccbq);
317 if (sc->mfi_flags & MFI_FLAGS_1064R) {
318 sc->mfi_enable_intr = mfi_enable_intr_xscale;
319 sc->mfi_read_fw_status = mfi_read_fw_status_xscale;
320 sc->mfi_check_clear_intr = mfi_check_clear_intr_xscale;
321 sc->mfi_issue_cmd = mfi_issue_cmd_xscale;
324 sc->mfi_enable_intr = mfi_enable_intr_ppc;
325 sc->mfi_read_fw_status = mfi_read_fw_status_ppc;
326 sc->mfi_check_clear_intr = mfi_check_clear_intr_ppc;
327 sc->mfi_issue_cmd = mfi_issue_cmd_ppc;
331 /* Before we get too far, see if the firmware is working */
332 if ((error = mfi_transition_firmware(sc)) != 0) {
333 device_printf(sc->mfi_dev, "Firmware not in READY state, "
334 "error %d\n", error);
339 * Get information needed for sizing the contiguous memory for the
340 * frame pool. Size down the sgl parameter since we know that
341 * we will never need more than what's required for MAXPHYS.
342 * It would be nice if these constants were available at runtime
343 * instead of compile time.
345 status = sc->mfi_read_fw_status(sc);
346 sc->mfi_max_fw_cmds = status & MFI_FWSTATE_MAXCMD_MASK;
347 max_fw_sge = (status & MFI_FWSTATE_MAXSGL_MASK) >> 16;
348 sc->mfi_max_sge = min(max_fw_sge, ((MFI_MAXPHYS / PAGE_SIZE) + 1));
351 * Create the dma tag for data buffers. Used both for block I/O
352 * and for various internal data queries.
354 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
355 1, 0, /* algnmnt, boundary */
356 BUS_SPACE_MAXADDR, /* lowaddr */
357 BUS_SPACE_MAXADDR, /* highaddr */
358 NULL, NULL, /* filter, filterarg */
359 BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
360 sc->mfi_max_sge, /* nsegments */
361 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
362 BUS_DMA_ALLOCNOW, /* flags */
363 busdma_lock_mutex, /* lockfunc */
364 &sc->mfi_io_lock, /* lockfuncarg */
365 &sc->mfi_buffer_dmat)) {
366 device_printf(sc->mfi_dev, "Cannot allocate buffer DMA tag\n");
371 * Allocate DMA memory for the comms queues. Keep it under 4GB for
372 * efficiency. The mfi_hwcomms struct includes space for 1 reply queue
373 * entry, so the calculated size here will be will be 1 more than
374 * mfi_max_fw_cmds. This is apparently a requirement of the hardware.
376 commsz = (sizeof(uint32_t) * sc->mfi_max_fw_cmds) +
377 sizeof(struct mfi_hwcomms);
378 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
379 1, 0, /* algnmnt, boundary */
380 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
381 BUS_SPACE_MAXADDR, /* highaddr */
382 NULL, NULL, /* filter, filterarg */
383 commsz, /* maxsize */
385 commsz, /* maxsegsize */
387 NULL, NULL, /* lockfunc, lockarg */
388 &sc->mfi_comms_dmat)) {
389 device_printf(sc->mfi_dev, "Cannot allocate comms DMA tag\n");
392 if (bus_dmamem_alloc(sc->mfi_comms_dmat, (void **)&sc->mfi_comms,
393 BUS_DMA_NOWAIT, &sc->mfi_comms_dmamap)) {
394 device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
397 bzero(sc->mfi_comms, commsz);
398 bus_dmamap_load(sc->mfi_comms_dmat, sc->mfi_comms_dmamap,
399 sc->mfi_comms, commsz, mfi_addr32_cb, &sc->mfi_comms_busaddr, 0);
402 * Allocate DMA memory for the command frames. Keep them in the
403 * lower 4GB for efficiency. Calculate the size of the commands at
404 * the same time; each command is one 64 byte frame plus a set of
405 * additional frames for holding sg lists or other data.
406 * The assumption here is that the SG list will start at the second
407 * frame and not use the unused bytes in the first frame. While this
408 * isn't technically correct, it simplifies the calculation and allows
409 * for command frames that might be larger than an mfi_io_frame.
411 if (sizeof(bus_addr_t) == 8) {
412 sc->mfi_sge_size = sizeof(struct mfi_sg64);
413 sc->mfi_flags |= MFI_FLAGS_SG64;
415 sc->mfi_sge_size = sizeof(struct mfi_sg32);
417 frames = (sc->mfi_sge_size * sc->mfi_max_sge - 1) / MFI_FRAME_SIZE + 2;
418 sc->mfi_cmd_size = frames * MFI_FRAME_SIZE;
419 framessz = sc->mfi_cmd_size * sc->mfi_max_fw_cmds;
420 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
421 64, 0, /* algnmnt, boundary */
422 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
423 BUS_SPACE_MAXADDR, /* highaddr */
424 NULL, NULL, /* filter, filterarg */
425 framessz, /* maxsize */
427 framessz, /* maxsegsize */
429 NULL, NULL, /* lockfunc, lockarg */
430 &sc->mfi_frames_dmat)) {
431 device_printf(sc->mfi_dev, "Cannot allocate frame DMA tag\n");
434 if (bus_dmamem_alloc(sc->mfi_frames_dmat, (void **)&sc->mfi_frames,
435 BUS_DMA_NOWAIT, &sc->mfi_frames_dmamap)) {
436 device_printf(sc->mfi_dev, "Cannot allocate frames memory\n");
439 bzero(sc->mfi_frames, framessz);
440 bus_dmamap_load(sc->mfi_frames_dmat, sc->mfi_frames_dmamap,
441 sc->mfi_frames, framessz, mfi_addr32_cb, &sc->mfi_frames_busaddr,0);
444 * Allocate DMA memory for the frame sense data. Keep them in the
445 * lower 4GB for efficiency
447 sensesz = sc->mfi_max_fw_cmds * MFI_SENSE_LEN;
448 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
449 4, 0, /* algnmnt, boundary */
450 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
451 BUS_SPACE_MAXADDR, /* highaddr */
452 NULL, NULL, /* filter, filterarg */
453 sensesz, /* maxsize */
455 sensesz, /* maxsegsize */
457 NULL, NULL, /* lockfunc, lockarg */
458 &sc->mfi_sense_dmat)) {
459 device_printf(sc->mfi_dev, "Cannot allocate sense DMA tag\n");
462 if (bus_dmamem_alloc(sc->mfi_sense_dmat, (void **)&sc->mfi_sense,
463 BUS_DMA_NOWAIT, &sc->mfi_sense_dmamap)) {
464 device_printf(sc->mfi_dev, "Cannot allocate sense memory\n");
467 bus_dmamap_load(sc->mfi_sense_dmat, sc->mfi_sense_dmamap,
468 sc->mfi_sense, sensesz, mfi_addr32_cb, &sc->mfi_sense_busaddr, 0);
470 if ((error = mfi_alloc_commands(sc)) != 0)
473 if ((error = mfi_comms_init(sc)) != 0)
476 if ((error = mfi_get_controller_info(sc)) != 0)
479 mtx_lock(&sc->mfi_io_lock);
480 if ((error = mfi_aen_setup(sc, 0), 0) != 0) {
481 mtx_unlock(&sc->mfi_io_lock);
484 mtx_unlock(&sc->mfi_io_lock);
487 * Set up the interrupt handler. XXX This should happen in
491 if ((sc->mfi_irq = bus_alloc_resource_any(sc->mfi_dev, SYS_RES_IRQ,
492 &sc->mfi_irq_rid, RF_SHAREABLE | RF_ACTIVE)) == NULL) {
493 device_printf(sc->mfi_dev, "Cannot allocate interrupt\n");
496 if (bus_setup_intr(sc->mfi_dev, sc->mfi_irq, INTR_MPSAFE|INTR_TYPE_BIO,
497 NULL, mfi_intr, sc, &sc->mfi_intr)) {
498 device_printf(sc->mfi_dev, "Cannot set up interrupt\n");
502 /* Register a config hook to probe the bus for arrays */
503 sc->mfi_ich.ich_func = mfi_startup;
504 sc->mfi_ich.ich_arg = sc;
505 if (config_intrhook_establish(&sc->mfi_ich) != 0) {
506 device_printf(sc->mfi_dev, "Cannot establish configuration "
512 * Register a shutdown handler.
514 if ((sc->mfi_eh = EVENTHANDLER_REGISTER(shutdown_final, mfi_shutdown,
515 sc, SHUTDOWN_PRI_DEFAULT)) == NULL) {
516 device_printf(sc->mfi_dev, "Warning: shutdown event "
517 "registration failed\n");
521 * Create the control device for doing management
523 unit = device_get_unit(sc->mfi_dev);
524 sc->mfi_cdev = make_dev(&mfi_cdevsw, unit, UID_ROOT, GID_OPERATOR,
525 0640, "mfi%d", unit);
527 make_dev_alias(sc->mfi_cdev, "megaraid_sas_ioctl_node");
528 if (sc->mfi_cdev != NULL)
529 sc->mfi_cdev->si_drv1 = sc;
530 SYSCTL_ADD_INT(device_get_sysctl_ctx(sc->mfi_dev),
531 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->mfi_dev)),
532 OID_AUTO, "delete_busy_volumes", CTLFLAG_RW,
533 &sc->mfi_delete_busy_volumes, 0, "Allow removal of busy volumes");
534 SYSCTL_ADD_INT(device_get_sysctl_ctx(sc->mfi_dev),
535 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->mfi_dev)),
536 OID_AUTO, "keep_deleted_volumes", CTLFLAG_RW,
537 &sc->mfi_keep_deleted_volumes, 0,
538 "Don't detach the mfid device for a busy volume that is deleted");
540 device_add_child(sc->mfi_dev, "mfip", -1);
541 bus_generic_attach(sc->mfi_dev);
543 /* Start the timeout watchdog */
544 callout_init(&sc->mfi_watchdog_callout, CALLOUT_MPSAFE);
545 callout_reset(&sc->mfi_watchdog_callout, MFI_CMD_TIMEOUT * hz,
552 mfi_alloc_commands(struct mfi_softc *sc)
554 struct mfi_command *cm;
558 * XXX Should we allocate all the commands up front, or allocate on
559 * demand later like 'aac' does?
561 ncmds = MIN(mfi_max_cmds, sc->mfi_max_fw_cmds);
563 device_printf(sc->mfi_dev, "Max fw cmds= %d, sizing driver "
564 "pool to %d\n", sc->mfi_max_fw_cmds, ncmds);
566 sc->mfi_commands = malloc(sizeof(struct mfi_command) * ncmds, M_MFIBUF,
569 for (i = 0; i < ncmds; i++) {
570 cm = &sc->mfi_commands[i];
571 cm->cm_frame = (union mfi_frame *)((uintptr_t)sc->mfi_frames +
572 sc->mfi_cmd_size * i);
573 cm->cm_frame_busaddr = sc->mfi_frames_busaddr +
574 sc->mfi_cmd_size * i;
575 cm->cm_frame->header.context = i;
576 cm->cm_sense = &sc->mfi_sense[i];
577 cm->cm_sense_busaddr= sc->mfi_sense_busaddr + MFI_SENSE_LEN * i;
580 if (bus_dmamap_create(sc->mfi_buffer_dmat, 0,
581 &cm->cm_dmamap) == 0)
582 mfi_release_command(cm);
585 sc->mfi_total_cmds++;
592 mfi_release_command(struct mfi_command *cm)
594 struct mfi_frame_header *hdr;
598 * Zero out the important fields of the frame, but make sure the
599 * context field is preserved. For efficiency, handle the fields
600 * as 32 bit words. Clear out the first S/G entry too for safety.
602 hdr = &cm->cm_frame->header;
603 if (cm->cm_data != NULL && hdr->sg_count) {
604 cm->cm_sg->sg32[0].len = 0;
605 cm->cm_sg->sg32[0].addr = 0;
608 hdr_data = (uint32_t *)cm->cm_frame;
609 hdr_data[0] = 0; /* cmd, sense_len, cmd_status, scsi_status */
610 hdr_data[1] = 0; /* target_id, lun_id, cdb_len, sg_count */
611 hdr_data[4] = 0; /* flags, timeout */
612 hdr_data[5] = 0; /* data_len */
614 cm->cm_extra_frames = 0;
616 cm->cm_complete = NULL;
617 cm->cm_private = NULL;
620 cm->cm_total_frame_size = 0;
622 mfi_enqueue_free(cm);
626 mfi_dcmd_command(struct mfi_softc *sc, struct mfi_command **cmp, uint32_t opcode,
627 void **bufp, size_t bufsize)
629 struct mfi_command *cm;
630 struct mfi_dcmd_frame *dcmd;
633 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
635 cm = mfi_dequeue_free(sc);
639 if ((bufsize > 0) && (bufp != NULL)) {
641 buf = malloc(bufsize, M_MFIBUF, M_NOWAIT|M_ZERO);
643 mfi_release_command(cm);
652 dcmd = &cm->cm_frame->dcmd;
653 bzero(dcmd->mbox, MFI_MBOX_SIZE);
654 dcmd->header.cmd = MFI_CMD_DCMD;
655 dcmd->header.timeout = 0;
656 dcmd->header.flags = 0;
657 dcmd->header.data_len = bufsize;
658 dcmd->opcode = opcode;
659 cm->cm_sg = &dcmd->sgl;
660 cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
663 cm->cm_private = buf;
664 cm->cm_len = bufsize;
667 if ((bufp != NULL) && (*bufp == NULL) && (buf != NULL))
673 mfi_comms_init(struct mfi_softc *sc)
675 struct mfi_command *cm;
676 struct mfi_init_frame *init;
677 struct mfi_init_qinfo *qinfo;
680 mtx_lock(&sc->mfi_io_lock);
681 if ((cm = mfi_dequeue_free(sc)) == NULL)
685 * Abuse the SG list area of the frame to hold the init_qinfo
688 init = &cm->cm_frame->init;
689 qinfo = (struct mfi_init_qinfo *)((uintptr_t)init + MFI_FRAME_SIZE);
691 bzero(qinfo, sizeof(struct mfi_init_qinfo));
692 qinfo->rq_entries = sc->mfi_max_fw_cmds + 1;
693 qinfo->rq_addr_lo = sc->mfi_comms_busaddr +
694 offsetof(struct mfi_hwcomms, hw_reply_q);
695 qinfo->pi_addr_lo = sc->mfi_comms_busaddr +
696 offsetof(struct mfi_hwcomms, hw_pi);
697 qinfo->ci_addr_lo = sc->mfi_comms_busaddr +
698 offsetof(struct mfi_hwcomms, hw_ci);
700 init->header.cmd = MFI_CMD_INIT;
701 init->header.data_len = sizeof(struct mfi_init_qinfo);
702 init->qinfo_new_addr_lo = cm->cm_frame_busaddr + MFI_FRAME_SIZE;
704 cm->cm_flags = MFI_CMD_POLLED;
706 if ((error = mfi_mapcmd(sc, cm)) != 0) {
707 device_printf(sc->mfi_dev, "failed to send init command\n");
708 mtx_unlock(&sc->mfi_io_lock);
711 mfi_release_command(cm);
712 mtx_unlock(&sc->mfi_io_lock);
718 mfi_get_controller_info(struct mfi_softc *sc)
720 struct mfi_command *cm = NULL;
721 struct mfi_ctrl_info *ci = NULL;
722 uint32_t max_sectors_1, max_sectors_2;
725 mtx_lock(&sc->mfi_io_lock);
726 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_GETINFO,
727 (void **)&ci, sizeof(*ci));
730 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
732 if ((error = mfi_mapcmd(sc, cm)) != 0) {
733 device_printf(sc->mfi_dev, "Failed to get controller info\n");
734 sc->mfi_max_io = (sc->mfi_max_sge - 1) * PAGE_SIZE /
740 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
741 BUS_DMASYNC_POSTREAD);
742 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
744 max_sectors_1 = (1 << ci->stripe_sz_ops.min) * ci->max_strips_per_io;
745 max_sectors_2 = ci->max_request_size;
746 sc->mfi_max_io = min(max_sectors_1, max_sectors_2);
752 mfi_release_command(cm);
753 mtx_unlock(&sc->mfi_io_lock);
758 mfi_get_log_state(struct mfi_softc *sc, struct mfi_evt_log_state **log_state)
760 struct mfi_command *cm = NULL;
763 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_EVENT_GETINFO,
764 (void **)log_state, sizeof(**log_state));
767 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
769 if ((error = mfi_mapcmd(sc, cm)) != 0) {
770 device_printf(sc->mfi_dev, "Failed to get log state\n");
774 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
775 BUS_DMASYNC_POSTREAD);
776 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
780 mfi_release_command(cm);
786 mfi_aen_setup(struct mfi_softc *sc, uint32_t seq_start)
788 struct mfi_evt_log_state *log_state = NULL;
789 union mfi_evt class_locale;
793 class_locale.members.reserved = 0;
794 class_locale.members.locale = mfi_event_locale;
795 class_locale.members.evt_class = mfi_event_class;
797 if (seq_start == 0) {
798 error = mfi_get_log_state(sc, &log_state);
801 free(log_state, M_MFIBUF);
806 * Walk through any events that fired since the last
809 mfi_parse_entries(sc, log_state->shutdown_seq_num,
810 log_state->newest_seq_num);
811 seq = log_state->newest_seq_num;
814 mfi_aen_register(sc, seq, class_locale.word);
815 free(log_state, M_MFIBUF);
821 mfi_wait_command(struct mfi_softc *sc, struct mfi_command *cm)
824 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
825 cm->cm_complete = NULL;
829 * MegaCli can issue a DCMD of 0. In this case do nothing
830 * and return 0 to it as status
832 if (cm->cm_frame->dcmd.opcode == 0) {
833 cm->cm_frame->header.cmd_status = MFI_STAT_OK;
835 return (cm->cm_error);
837 mfi_enqueue_ready(cm);
839 if ((cm->cm_flags & MFI_CMD_COMPLETED) == 0)
840 msleep(cm, &sc->mfi_io_lock, PRIBIO, "mfiwait", 0);
841 return (cm->cm_error);
845 mfi_free(struct mfi_softc *sc)
847 struct mfi_command *cm;
850 callout_drain(&sc->mfi_watchdog_callout);
852 if (sc->mfi_cdev != NULL)
853 destroy_dev(sc->mfi_cdev);
855 if (sc->mfi_total_cmds != 0) {
856 for (i = 0; i < sc->mfi_total_cmds; i++) {
857 cm = &sc->mfi_commands[i];
858 bus_dmamap_destroy(sc->mfi_buffer_dmat, cm->cm_dmamap);
860 free(sc->mfi_commands, M_MFIBUF);
864 bus_teardown_intr(sc->mfi_dev, sc->mfi_irq, sc->mfi_intr);
865 if (sc->mfi_irq != NULL)
866 bus_release_resource(sc->mfi_dev, SYS_RES_IRQ, sc->mfi_irq_rid,
869 if (sc->mfi_sense_busaddr != 0)
870 bus_dmamap_unload(sc->mfi_sense_dmat, sc->mfi_sense_dmamap);
871 if (sc->mfi_sense != NULL)
872 bus_dmamem_free(sc->mfi_sense_dmat, sc->mfi_sense,
873 sc->mfi_sense_dmamap);
874 if (sc->mfi_sense_dmat != NULL)
875 bus_dma_tag_destroy(sc->mfi_sense_dmat);
877 if (sc->mfi_frames_busaddr != 0)
878 bus_dmamap_unload(sc->mfi_frames_dmat, sc->mfi_frames_dmamap);
879 if (sc->mfi_frames != NULL)
880 bus_dmamem_free(sc->mfi_frames_dmat, sc->mfi_frames,
881 sc->mfi_frames_dmamap);
882 if (sc->mfi_frames_dmat != NULL)
883 bus_dma_tag_destroy(sc->mfi_frames_dmat);
885 if (sc->mfi_comms_busaddr != 0)
886 bus_dmamap_unload(sc->mfi_comms_dmat, sc->mfi_comms_dmamap);
887 if (sc->mfi_comms != NULL)
888 bus_dmamem_free(sc->mfi_comms_dmat, sc->mfi_comms,
889 sc->mfi_comms_dmamap);
890 if (sc->mfi_comms_dmat != NULL)
891 bus_dma_tag_destroy(sc->mfi_comms_dmat);
893 if (sc->mfi_buffer_dmat != NULL)
894 bus_dma_tag_destroy(sc->mfi_buffer_dmat);
895 if (sc->mfi_parent_dmat != NULL)
896 bus_dma_tag_destroy(sc->mfi_parent_dmat);
898 if (mtx_initialized(&sc->mfi_io_lock)) {
899 mtx_destroy(&sc->mfi_io_lock);
900 sx_destroy(&sc->mfi_config_lock);
907 mfi_startup(void *arg)
909 struct mfi_softc *sc;
911 sc = (struct mfi_softc *)arg;
913 config_intrhook_disestablish(&sc->mfi_ich);
915 sc->mfi_enable_intr(sc);
916 sx_xlock(&sc->mfi_config_lock);
917 mtx_lock(&sc->mfi_io_lock);
919 mtx_unlock(&sc->mfi_io_lock);
920 sx_xunlock(&sc->mfi_config_lock);
926 struct mfi_softc *sc;
927 struct mfi_command *cm;
928 uint32_t pi, ci, context;
930 sc = (struct mfi_softc *)arg;
932 if (sc->mfi_check_clear_intr(sc))
935 pi = sc->mfi_comms->hw_pi;
936 ci = sc->mfi_comms->hw_ci;
937 mtx_lock(&sc->mfi_io_lock);
939 context = sc->mfi_comms->hw_reply_q[ci];
940 if (context < sc->mfi_max_fw_cmds) {
941 cm = &sc->mfi_commands[context];
944 mfi_complete(sc, cm);
946 if (++ci == (sc->mfi_max_fw_cmds + 1)) {
951 sc->mfi_comms->hw_ci = ci;
953 /* Give defered I/O a chance to run */
954 if (sc->mfi_flags & MFI_FLAGS_QFRZN)
955 sc->mfi_flags &= ~MFI_FLAGS_QFRZN;
957 mtx_unlock(&sc->mfi_io_lock);
963 mfi_shutdown(struct mfi_softc *sc)
965 struct mfi_dcmd_frame *dcmd;
966 struct mfi_command *cm;
969 mtx_lock(&sc->mfi_io_lock);
970 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_SHUTDOWN, NULL, 0);
972 mtx_unlock(&sc->mfi_io_lock);
976 if (sc->mfi_aen_cm != NULL)
977 mfi_abort(sc, sc->mfi_aen_cm);
979 dcmd = &cm->cm_frame->dcmd;
980 dcmd->header.flags = MFI_FRAME_DIR_NONE;
981 cm->cm_flags = MFI_CMD_POLLED;
984 if ((error = mfi_mapcmd(sc, cm)) != 0) {
985 device_printf(sc->mfi_dev, "Failed to shutdown controller\n");
988 mfi_release_command(cm);
989 mtx_unlock(&sc->mfi_io_lock);
994 mfi_ldprobe(struct mfi_softc *sc)
996 struct mfi_frame_header *hdr;
997 struct mfi_command *cm = NULL;
998 struct mfi_ld_list *list = NULL;
1002 sx_assert(&sc->mfi_config_lock, SA_XLOCKED);
1003 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1005 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_LIST,
1006 (void **)&list, sizeof(*list));
1010 cm->cm_flags = MFI_CMD_DATAIN;
1011 if (mfi_wait_command(sc, cm) != 0) {
1012 device_printf(sc->mfi_dev, "Failed to get device listing\n");
1016 hdr = &cm->cm_frame->header;
1017 if (hdr->cmd_status != MFI_STAT_OK) {
1018 device_printf(sc->mfi_dev, "MFI_DCMD_LD_GET_LIST failed %x\n",
1023 for (i = 0; i < list->ld_count; i++) {
1024 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
1025 if (ld->ld_id == list->ld_list[i].ld.v.target_id)
1028 mfi_add_ld(sc, list->ld_list[i].ld.v.target_id);
1033 free(list, M_MFIBUF);
1035 mfi_release_command(cm);
1041 * The timestamp is the number of seconds since 00:00 Jan 1, 2000. If
1042 * the bits in 24-31 are all set, then it is the number of seconds since
1046 format_timestamp(uint32_t timestamp)
1048 static char buffer[32];
1050 if ((timestamp & 0xff000000) == 0xff000000)
1051 snprintf(buffer, sizeof(buffer), "boot + %us", timestamp &
1054 snprintf(buffer, sizeof(buffer), "%us", timestamp);
1059 format_class(int8_t class)
1061 static char buffer[6];
1064 case MFI_EVT_CLASS_DEBUG:
1066 case MFI_EVT_CLASS_PROGRESS:
1067 return ("progress");
1068 case MFI_EVT_CLASS_INFO:
1070 case MFI_EVT_CLASS_WARNING:
1072 case MFI_EVT_CLASS_CRITICAL:
1074 case MFI_EVT_CLASS_FATAL:
1076 case MFI_EVT_CLASS_DEAD:
1079 snprintf(buffer, sizeof(buffer), "%d", class);
1085 mfi_decode_evt(struct mfi_softc *sc, struct mfi_evt_detail *detail)
1088 device_printf(sc->mfi_dev, "%d (%s/0x%04x/%s) - %s\n", detail->seq,
1089 format_timestamp(detail->time), detail->evt_class.members.locale,
1090 format_class(detail->evt_class.members.evt_class), detail->description);
1094 mfi_aen_register(struct mfi_softc *sc, int seq, int locale)
1096 struct mfi_command *cm;
1097 struct mfi_dcmd_frame *dcmd;
1098 union mfi_evt current_aen, prior_aen;
1099 struct mfi_evt_detail *ed = NULL;
1102 current_aen.word = locale;
1103 if (sc->mfi_aen_cm != NULL) {
1105 ((uint32_t *)&sc->mfi_aen_cm->cm_frame->dcmd.mbox)[1];
1106 if (prior_aen.members.evt_class <= current_aen.members.evt_class &&
1107 !((prior_aen.members.locale & current_aen.members.locale)
1108 ^current_aen.members.locale)) {
1111 prior_aen.members.locale |= current_aen.members.locale;
1112 if (prior_aen.members.evt_class
1113 < current_aen.members.evt_class)
1114 current_aen.members.evt_class =
1115 prior_aen.members.evt_class;
1116 mfi_abort(sc, sc->mfi_aen_cm);
1120 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_EVENT_WAIT,
1121 (void **)&ed, sizeof(*ed));
1126 dcmd = &cm->cm_frame->dcmd;
1127 ((uint32_t *)&dcmd->mbox)[0] = seq;
1128 ((uint32_t *)&dcmd->mbox)[1] = locale;
1129 cm->cm_flags = MFI_CMD_DATAIN;
1130 cm->cm_complete = mfi_aen_complete;
1132 sc->mfi_aen_cm = cm;
1134 mfi_enqueue_ready(cm);
1142 mfi_aen_complete(struct mfi_command *cm)
1144 struct mfi_frame_header *hdr;
1145 struct mfi_softc *sc;
1146 struct mfi_evt_detail *detail;
1147 struct mfi_aen *mfi_aen_entry, *tmp;
1148 int seq = 0, aborted = 0;
1151 hdr = &cm->cm_frame->header;
1153 if (sc->mfi_aen_cm == NULL)
1156 if (sc->mfi_aen_cm->cm_aen_abort ||
1157 hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
1158 sc->mfi_aen_cm->cm_aen_abort = 0;
1161 sc->mfi_aen_triggered = 1;
1162 if (sc->mfi_poll_waiting) {
1163 sc->mfi_poll_waiting = 0;
1164 selwakeup(&sc->mfi_select);
1166 detail = cm->cm_data;
1168 * XXX If this function is too expensive or is recursive, then
1169 * events should be put onto a queue and processed later.
1171 mfi_decode_evt(sc, detail);
1172 seq = detail->seq + 1;
1173 TAILQ_FOREACH_SAFE(mfi_aen_entry, &sc->mfi_aen_pids, aen_link, tmp) {
1174 TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
1176 PROC_LOCK(mfi_aen_entry->p);
1177 kern_psignal(mfi_aen_entry->p, SIGIO);
1178 PROC_UNLOCK(mfi_aen_entry->p);
1179 free(mfi_aen_entry, M_MFIBUF);
1183 free(cm->cm_data, M_MFIBUF);
1184 sc->mfi_aen_cm = NULL;
1185 wakeup(&sc->mfi_aen_cm);
1186 mfi_release_command(cm);
1188 /* set it up again so the driver can catch more events */
1190 mfi_aen_setup(sc, seq);
1194 #define MAX_EVENTS 15
1197 mfi_parse_entries(struct mfi_softc *sc, int start_seq, int stop_seq)
1199 struct mfi_command *cm;
1200 struct mfi_dcmd_frame *dcmd;
1201 struct mfi_evt_list *el;
1202 union mfi_evt class_locale;
1203 int error, i, seq, size;
1205 class_locale.members.reserved = 0;
1206 class_locale.members.locale = mfi_event_locale;
1207 class_locale.members.evt_class = mfi_event_class;
1209 size = sizeof(struct mfi_evt_list) + sizeof(struct mfi_evt_detail)
1211 el = malloc(size, M_MFIBUF, M_NOWAIT | M_ZERO);
1215 for (seq = start_seq;;) {
1216 if ((cm = mfi_dequeue_free(sc)) == NULL) {
1221 dcmd = &cm->cm_frame->dcmd;
1222 bzero(dcmd->mbox, MFI_MBOX_SIZE);
1223 dcmd->header.cmd = MFI_CMD_DCMD;
1224 dcmd->header.timeout = 0;
1225 dcmd->header.data_len = size;
1226 dcmd->opcode = MFI_DCMD_CTRL_EVENT_GET;
1227 ((uint32_t *)&dcmd->mbox)[0] = seq;
1228 ((uint32_t *)&dcmd->mbox)[1] = class_locale.word;
1229 cm->cm_sg = &dcmd->sgl;
1230 cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
1231 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1235 if ((error = mfi_mapcmd(sc, cm)) != 0) {
1236 device_printf(sc->mfi_dev,
1237 "Failed to get controller entries\n");
1238 mfi_release_command(cm);
1242 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1243 BUS_DMASYNC_POSTREAD);
1244 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1246 if (dcmd->header.cmd_status == MFI_STAT_NOT_FOUND) {
1247 mfi_release_command(cm);
1250 if (dcmd->header.cmd_status != MFI_STAT_OK) {
1251 device_printf(sc->mfi_dev,
1252 "Error %d fetching controller entries\n",
1253 dcmd->header.cmd_status);
1254 mfi_release_command(cm);
1257 mfi_release_command(cm);
1259 for (i = 0; i < el->count; i++) {
1261 * If this event is newer than 'stop_seq' then
1262 * break out of the loop. Note that the log
1263 * is a circular buffer so we have to handle
1264 * the case that our stop point is earlier in
1265 * the buffer than our start point.
1267 if (el->event[i].seq >= stop_seq) {
1268 if (start_seq <= stop_seq)
1270 else if (el->event[i].seq < start_seq)
1273 mfi_decode_evt(sc, &el->event[i]);
1275 seq = el->event[el->count - 1].seq + 1;
1283 mfi_add_ld(struct mfi_softc *sc, int id)
1285 struct mfi_command *cm;
1286 struct mfi_dcmd_frame *dcmd = NULL;
1287 struct mfi_ld_info *ld_info = NULL;
1290 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1292 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_INFO,
1293 (void **)&ld_info, sizeof(*ld_info));
1295 device_printf(sc->mfi_dev,
1296 "Failed to allocate for MFI_DCMD_LD_GET_INFO %d\n", error);
1298 free(ld_info, M_MFIBUF);
1301 cm->cm_flags = MFI_CMD_DATAIN;
1302 dcmd = &cm->cm_frame->dcmd;
1304 if (mfi_wait_command(sc, cm) != 0) {
1305 device_printf(sc->mfi_dev,
1306 "Failed to get logical drive: %d\n", id);
1307 free(ld_info, M_MFIBUF);
1311 mfi_add_ld_complete(cm);
1316 mfi_add_ld_complete(struct mfi_command *cm)
1318 struct mfi_frame_header *hdr;
1319 struct mfi_ld_info *ld_info;
1320 struct mfi_softc *sc;
1324 hdr = &cm->cm_frame->header;
1325 ld_info = cm->cm_private;
1327 if (hdr->cmd_status != MFI_STAT_OK) {
1328 free(ld_info, M_MFIBUF);
1329 mfi_release_command(cm);
1332 mfi_release_command(cm);
1334 mtx_unlock(&sc->mfi_io_lock);
1336 if ((child = device_add_child(sc->mfi_dev, "mfid", -1)) == NULL) {
1337 device_printf(sc->mfi_dev, "Failed to add logical disk\n");
1338 free(ld_info, M_MFIBUF);
1340 mtx_lock(&sc->mfi_io_lock);
1344 device_set_ivars(child, ld_info);
1345 device_set_desc(child, "MFI Logical Disk");
1346 bus_generic_attach(sc->mfi_dev);
1348 mtx_lock(&sc->mfi_io_lock);
1351 static struct mfi_command *
1352 mfi_bio_command(struct mfi_softc *sc)
1354 struct mfi_io_frame *io;
1355 struct mfi_command *cm;
1357 int flags, blkcount;
1359 if ((cm = mfi_dequeue_free(sc)) == NULL)
1362 if ((bio = mfi_dequeue_bio(sc)) == NULL) {
1363 mfi_release_command(cm);
1367 io = &cm->cm_frame->io;
1368 switch (bio->bio_cmd & 0x03) {
1370 io->header.cmd = MFI_CMD_LD_READ;
1371 flags = MFI_CMD_DATAIN;
1374 io->header.cmd = MFI_CMD_LD_WRITE;
1375 flags = MFI_CMD_DATAOUT;
1378 panic("Invalid bio command");
1381 /* Cheat with the sector length to avoid a non-constant division */
1382 blkcount = (bio->bio_bcount + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
1383 io->header.target_id = (uintptr_t)bio->bio_driver1;
1384 io->header.timeout = 0;
1385 io->header.flags = 0;
1386 io->header.sense_len = MFI_SENSE_LEN;
1387 io->header.data_len = blkcount;
1388 io->sense_addr_lo = cm->cm_sense_busaddr;
1389 io->sense_addr_hi = 0;
1390 io->lba_hi = (bio->bio_pblkno & 0xffffffff00000000) >> 32;
1391 io->lba_lo = bio->bio_pblkno & 0xffffffff;
1392 cm->cm_complete = mfi_bio_complete;
1393 cm->cm_private = bio;
1394 cm->cm_data = bio->bio_data;
1395 cm->cm_len = bio->bio_bcount;
1396 cm->cm_sg = &io->sgl;
1397 cm->cm_total_frame_size = MFI_IO_FRAME_SIZE;
1398 cm->cm_flags = flags;
1403 mfi_bio_complete(struct mfi_command *cm)
1406 struct mfi_frame_header *hdr;
1407 struct mfi_softc *sc;
1409 bio = cm->cm_private;
1410 hdr = &cm->cm_frame->header;
1413 if ((hdr->cmd_status != MFI_STAT_OK) || (hdr->scsi_status != 0)) {
1414 bio->bio_flags |= BIO_ERROR;
1415 bio->bio_error = EIO;
1416 device_printf(sc->mfi_dev, "I/O error, status= %d "
1417 "scsi_status= %d\n", hdr->cmd_status, hdr->scsi_status);
1418 mfi_print_sense(cm->cm_sc, cm->cm_sense);
1419 } else if (cm->cm_error != 0) {
1420 bio->bio_flags |= BIO_ERROR;
1423 mfi_release_command(cm);
1424 mfi_disk_complete(bio);
1428 mfi_startio(struct mfi_softc *sc)
1430 struct mfi_command *cm;
1431 struct ccb_hdr *ccbh;
1434 /* Don't bother if we're short on resources */
1435 if (sc->mfi_flags & MFI_FLAGS_QFRZN)
1438 /* Try a command that has already been prepared */
1439 cm = mfi_dequeue_ready(sc);
1442 if ((ccbh = TAILQ_FIRST(&sc->mfi_cam_ccbq)) != NULL)
1443 cm = sc->mfi_cam_start(ccbh);
1446 /* Nope, so look for work on the bioq */
1448 cm = mfi_bio_command(sc);
1450 /* No work available, so exit */
1454 /* Send the command to the controller */
1455 if (mfi_mapcmd(sc, cm) != 0) {
1456 mfi_requeue_ready(cm);
1463 mfi_mapcmd(struct mfi_softc *sc, struct mfi_command *cm)
1467 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1469 if (cm->cm_data != NULL) {
1470 polled = (cm->cm_flags & MFI_CMD_POLLED) ? BUS_DMA_NOWAIT : 0;
1471 error = bus_dmamap_load(sc->mfi_buffer_dmat, cm->cm_dmamap,
1472 cm->cm_data, cm->cm_len, mfi_data_cb, cm, polled);
1473 if (error == EINPROGRESS) {
1474 sc->mfi_flags |= MFI_FLAGS_QFRZN;
1478 error = mfi_send_frame(sc, cm);
1485 mfi_data_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1487 struct mfi_frame_header *hdr;
1488 struct mfi_command *cm;
1490 struct mfi_softc *sc;
1491 int i, j, first, dir;
1493 cm = (struct mfi_command *)arg;
1495 hdr = &cm->cm_frame->header;
1499 printf("error %d in callback\n", error);
1500 cm->cm_error = error;
1501 mfi_complete(sc, cm);
1506 if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
1507 first = cm->cm_stp_len;
1508 if ((sc->mfi_flags & MFI_FLAGS_SG64) == 0) {
1509 sgl->sg32[j].addr = segs[0].ds_addr;
1510 sgl->sg32[j++].len = first;
1512 sgl->sg64[j].addr = segs[0].ds_addr;
1513 sgl->sg64[j++].len = first;
1517 if ((sc->mfi_flags & MFI_FLAGS_SG64) == 0) {
1518 for (i = 0; i < nsegs; i++) {
1519 sgl->sg32[j].addr = segs[i].ds_addr + first;
1520 sgl->sg32[j++].len = segs[i].ds_len - first;
1524 for (i = 0; i < nsegs; i++) {
1525 sgl->sg64[j].addr = segs[i].ds_addr + first;
1526 sgl->sg64[j++].len = segs[i].ds_len - first;
1529 hdr->flags |= MFI_FRAME_SGL64;
1534 if (cm->cm_flags & MFI_CMD_DATAIN) {
1535 dir |= BUS_DMASYNC_PREREAD;
1536 hdr->flags |= MFI_FRAME_DIR_READ;
1538 if (cm->cm_flags & MFI_CMD_DATAOUT) {
1539 dir |= BUS_DMASYNC_PREWRITE;
1540 hdr->flags |= MFI_FRAME_DIR_WRITE;
1542 if (cm->cm_frame->header.cmd == MFI_CMD_STP)
1543 dir |= BUS_DMASYNC_PREWRITE;
1544 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir);
1545 cm->cm_flags |= MFI_CMD_MAPPED;
1548 * Instead of calculating the total number of frames in the
1549 * compound frame, it's already assumed that there will be at
1550 * least 1 frame, so don't compensate for the modulo of the
1551 * following division.
1553 cm->cm_total_frame_size += (sc->mfi_sge_size * nsegs);
1554 cm->cm_extra_frames = (cm->cm_total_frame_size - 1) / MFI_FRAME_SIZE;
1556 mfi_send_frame(sc, cm);
1562 mfi_send_frame(struct mfi_softc *sc, struct mfi_command *cm)
1564 struct mfi_frame_header *hdr;
1565 int tm = MFI_POLL_TIMEOUT_SECS * 1000;
1567 hdr = &cm->cm_frame->header;
1569 if ((cm->cm_flags & MFI_CMD_POLLED) == 0) {
1570 cm->cm_timestamp = time_uptime;
1571 mfi_enqueue_busy(cm);
1573 hdr->cmd_status = MFI_STAT_INVALID_STATUS;
1574 hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
1578 * The bus address of the command is aligned on a 64 byte boundary,
1579 * leaving the least 6 bits as zero. For whatever reason, the
1580 * hardware wants the address shifted right by three, leaving just
1581 * 3 zero bits. These three bits are then used as a prefetching
1582 * hint for the hardware to predict how many frames need to be
1583 * fetched across the bus. If a command has more than 8 frames
1584 * then the 3 bits are set to 0x7 and the firmware uses other
1585 * information in the command to determine the total amount to fetch.
1586 * However, FreeBSD doesn't support I/O larger than 128K, so 8 frames
1587 * is enough for both 32bit and 64bit systems.
1589 if (cm->cm_extra_frames > 7)
1590 cm->cm_extra_frames = 7;
1592 sc->mfi_issue_cmd(sc,cm->cm_frame_busaddr,cm->cm_extra_frames);
1594 if ((cm->cm_flags & MFI_CMD_POLLED) == 0)
1597 /* This is a polled command, so busy-wait for it to complete. */
1598 while (hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
1605 if (hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
1606 device_printf(sc->mfi_dev, "Frame %p timed out "
1607 "command 0x%X\n", hdr, cm->cm_frame->dcmd.opcode);
1615 mfi_complete(struct mfi_softc *sc, struct mfi_command *cm)
1619 if ((cm->cm_flags & MFI_CMD_MAPPED) != 0) {
1621 if ((cm->cm_flags & MFI_CMD_DATAIN) ||
1622 (cm->cm_frame->header.cmd == MFI_CMD_STP))
1623 dir |= BUS_DMASYNC_POSTREAD;
1624 if (cm->cm_flags & MFI_CMD_DATAOUT)
1625 dir |= BUS_DMASYNC_POSTWRITE;
1627 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir);
1628 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1629 cm->cm_flags &= ~MFI_CMD_MAPPED;
1632 cm->cm_flags |= MFI_CMD_COMPLETED;
1634 if (cm->cm_complete != NULL)
1635 cm->cm_complete(cm);
1641 mfi_abort(struct mfi_softc *sc, struct mfi_command *cm_abort)
1643 struct mfi_command *cm;
1644 struct mfi_abort_frame *abort;
1647 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1649 if ((cm = mfi_dequeue_free(sc)) == NULL) {
1653 abort = &cm->cm_frame->abort;
1654 abort->header.cmd = MFI_CMD_ABORT;
1655 abort->header.flags = 0;
1656 abort->abort_context = cm_abort->cm_frame->header.context;
1657 abort->abort_mfi_addr_lo = cm_abort->cm_frame_busaddr;
1658 abort->abort_mfi_addr_hi = 0;
1660 cm->cm_flags = MFI_CMD_POLLED;
1662 sc->mfi_aen_cm->cm_aen_abort = 1;
1664 mfi_release_command(cm);
1666 while (i < 5 && sc->mfi_aen_cm != NULL) {
1667 msleep(&sc->mfi_aen_cm, &sc->mfi_io_lock, 0, "mfiabort", 5 * hz);
1675 mfi_dump_blocks(struct mfi_softc *sc, int id, uint64_t lba, void *virt, int len)
1677 struct mfi_command *cm;
1678 struct mfi_io_frame *io;
1681 if ((cm = mfi_dequeue_free(sc)) == NULL)
1684 io = &cm->cm_frame->io;
1685 io->header.cmd = MFI_CMD_LD_WRITE;
1686 io->header.target_id = id;
1687 io->header.timeout = 0;
1688 io->header.flags = 0;
1689 io->header.sense_len = MFI_SENSE_LEN;
1690 io->header.data_len = (len + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
1691 io->sense_addr_lo = cm->cm_sense_busaddr;
1692 io->sense_addr_hi = 0;
1693 io->lba_hi = (lba & 0xffffffff00000000) >> 32;
1694 io->lba_lo = lba & 0xffffffff;
1697 cm->cm_sg = &io->sgl;
1698 cm->cm_total_frame_size = MFI_IO_FRAME_SIZE;
1699 cm->cm_flags = MFI_CMD_POLLED | MFI_CMD_DATAOUT;
1701 error = mfi_mapcmd(sc, cm);
1702 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1703 BUS_DMASYNC_POSTWRITE);
1704 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1705 mfi_release_command(cm);
1711 mfi_open(struct cdev *dev, int flags, int fmt, struct thread *td)
1713 struct mfi_softc *sc;
1718 mtx_lock(&sc->mfi_io_lock);
1719 if (sc->mfi_detaching)
1722 sc->mfi_flags |= MFI_FLAGS_OPEN;
1725 mtx_unlock(&sc->mfi_io_lock);
1731 mfi_close(struct cdev *dev, int flags, int fmt, struct thread *td)
1733 struct mfi_softc *sc;
1734 struct mfi_aen *mfi_aen_entry, *tmp;
1738 mtx_lock(&sc->mfi_io_lock);
1739 sc->mfi_flags &= ~MFI_FLAGS_OPEN;
1741 TAILQ_FOREACH_SAFE(mfi_aen_entry, &sc->mfi_aen_pids, aen_link, tmp) {
1742 if (mfi_aen_entry->p == curproc) {
1743 TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
1745 free(mfi_aen_entry, M_MFIBUF);
1748 mtx_unlock(&sc->mfi_io_lock);
1753 mfi_config_lock(struct mfi_softc *sc, uint32_t opcode)
1757 case MFI_DCMD_LD_DELETE:
1758 case MFI_DCMD_CFG_ADD:
1759 case MFI_DCMD_CFG_CLEAR:
1760 sx_xlock(&sc->mfi_config_lock);
1768 mfi_config_unlock(struct mfi_softc *sc, int locked)
1772 sx_xunlock(&sc->mfi_config_lock);
1775 /* Perform pre-issue checks on commands from userland and possibly veto them. */
1777 mfi_check_command_pre(struct mfi_softc *sc, struct mfi_command *cm)
1779 struct mfi_disk *ld, *ld2;
1782 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1784 switch (cm->cm_frame->dcmd.opcode) {
1785 case MFI_DCMD_LD_DELETE:
1786 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
1787 if (ld->ld_id == cm->cm_frame->dcmd.mbox[0])
1793 error = mfi_disk_disable(ld);
1795 case MFI_DCMD_CFG_CLEAR:
1796 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
1797 error = mfi_disk_disable(ld);
1802 TAILQ_FOREACH(ld2, &sc->mfi_ld_tqh, ld_link) {
1805 mfi_disk_enable(ld2);
1815 /* Perform post-issue checks on commands from userland. */
1817 mfi_check_command_post(struct mfi_softc *sc, struct mfi_command *cm)
1819 struct mfi_disk *ld, *ldn;
1821 switch (cm->cm_frame->dcmd.opcode) {
1822 case MFI_DCMD_LD_DELETE:
1823 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
1824 if (ld->ld_id == cm->cm_frame->dcmd.mbox[0])
1827 KASSERT(ld != NULL, ("volume dissappeared"));
1828 if (cm->cm_frame->header.cmd_status == MFI_STAT_OK) {
1829 mtx_unlock(&sc->mfi_io_lock);
1831 device_delete_child(sc->mfi_dev, ld->ld_dev);
1833 mtx_lock(&sc->mfi_io_lock);
1835 mfi_disk_enable(ld);
1837 case MFI_DCMD_CFG_CLEAR:
1838 if (cm->cm_frame->header.cmd_status == MFI_STAT_OK) {
1839 mtx_unlock(&sc->mfi_io_lock);
1841 TAILQ_FOREACH_SAFE(ld, &sc->mfi_ld_tqh, ld_link, ldn) {
1842 device_delete_child(sc->mfi_dev, ld->ld_dev);
1845 mtx_lock(&sc->mfi_io_lock);
1847 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link)
1848 mfi_disk_enable(ld);
1851 case MFI_DCMD_CFG_ADD:
1854 case MFI_DCMD_CFG_FOREIGN_IMPORT:
1861 mfi_user_command(struct mfi_softc *sc, struct mfi_ioc_passthru *ioc)
1863 struct mfi_command *cm;
1864 struct mfi_dcmd_frame *dcmd;
1865 void *ioc_buf = NULL;
1867 int error = 0, locked;
1870 if (ioc->buf_size > 0) {
1871 ioc_buf = malloc(ioc->buf_size, M_MFIBUF, M_WAITOK);
1872 if (ioc_buf == NULL) {
1875 error = copyin(ioc->buf, ioc_buf, ioc->buf_size);
1877 device_printf(sc->mfi_dev, "failed to copyin\n");
1878 free(ioc_buf, M_MFIBUF);
1883 locked = mfi_config_lock(sc, ioc->ioc_frame.opcode);
1885 mtx_lock(&sc->mfi_io_lock);
1886 while ((cm = mfi_dequeue_free(sc)) == NULL)
1887 msleep(mfi_user_command, &sc->mfi_io_lock, 0, "mfiioc", hz);
1889 /* Save context for later */
1890 context = cm->cm_frame->header.context;
1892 dcmd = &cm->cm_frame->dcmd;
1893 bcopy(&ioc->ioc_frame, dcmd, sizeof(struct mfi_dcmd_frame));
1895 cm->cm_sg = &dcmd->sgl;
1896 cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
1897 cm->cm_data = ioc_buf;
1898 cm->cm_len = ioc->buf_size;
1900 /* restore context */
1901 cm->cm_frame->header.context = context;
1903 /* Cheat since we don't know if we're writing or reading */
1904 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_DATAOUT;
1906 error = mfi_check_command_pre(sc, cm);
1910 error = mfi_wait_command(sc, cm);
1912 device_printf(sc->mfi_dev, "ioctl failed %d\n", error);
1915 bcopy(dcmd, &ioc->ioc_frame, sizeof(struct mfi_dcmd_frame));
1916 mfi_check_command_post(sc, cm);
1918 mfi_release_command(cm);
1919 mtx_unlock(&sc->mfi_io_lock);
1920 mfi_config_unlock(sc, locked);
1921 if (ioc->buf_size > 0)
1922 error = copyout(ioc_buf, ioc->buf, ioc->buf_size);
1924 free(ioc_buf, M_MFIBUF);
1929 #define PTRIN(p) ((void *)(uintptr_t)(p))
1931 #define PTRIN(p) (p)
1935 mfi_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td)
1937 struct mfi_softc *sc;
1938 union mfi_statrequest *ms;
1939 struct mfi_ioc_packet *ioc;
1941 struct mfi_ioc_packet32 *ioc32;
1943 struct mfi_ioc_aen *aen;
1944 struct mfi_command *cm = NULL;
1946 union mfi_sense_ptr sense_ptr;
1947 uint8_t *data = NULL, *temp, *addr;
1950 struct mfi_ioc_passthru *iop = (struct mfi_ioc_passthru *)arg;
1952 struct mfi_ioc_passthru32 *iop32 = (struct mfi_ioc_passthru32 *)arg;
1953 struct mfi_ioc_passthru iop_swab;
1962 ms = (union mfi_statrequest *)arg;
1963 switch (ms->ms_item) {
1968 bcopy(&sc->mfi_qstat[ms->ms_item], &ms->ms_qstat,
1969 sizeof(struct mfi_qstat));
1976 case MFIIO_QUERY_DISK:
1978 struct mfi_query_disk *qd;
1979 struct mfi_disk *ld;
1981 qd = (struct mfi_query_disk *)arg;
1982 mtx_lock(&sc->mfi_io_lock);
1983 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
1984 if (ld->ld_id == qd->array_id)
1989 mtx_unlock(&sc->mfi_io_lock);
1993 if (ld->ld_flags & MFI_DISK_FLAGS_OPEN)
1995 bzero(qd->devname, SPECNAMELEN + 1);
1996 snprintf(qd->devname, SPECNAMELEN, "mfid%d", ld->ld_unit);
1997 mtx_unlock(&sc->mfi_io_lock);
2005 devclass_t devclass;
2006 ioc = (struct mfi_ioc_packet *)arg;
2009 adapter = ioc->mfi_adapter_no;
2010 if (device_get_unit(sc->mfi_dev) == 0 && adapter != 0) {
2011 devclass = devclass_find("mfi");
2012 sc = devclass_get_softc(devclass, adapter);
2014 mtx_lock(&sc->mfi_io_lock);
2015 if ((cm = mfi_dequeue_free(sc)) == NULL) {
2016 mtx_unlock(&sc->mfi_io_lock);
2019 mtx_unlock(&sc->mfi_io_lock);
2023 * save off original context since copying from user
2024 * will clobber some data
2026 context = cm->cm_frame->header.context;
2028 bcopy(ioc->mfi_frame.raw, cm->cm_frame,
2029 2 * MFI_DCMD_FRAME_SIZE); /* this isn't quite right */
2030 cm->cm_total_frame_size = (sizeof(union mfi_sgl)
2031 * ioc->mfi_sge_count) + ioc->mfi_sgl_off;
2032 if (ioc->mfi_sge_count) {
2034 (union mfi_sgl *)&cm->cm_frame->bytes[ioc->mfi_sgl_off];
2037 if (cm->cm_frame->header.flags & MFI_FRAME_DATAIN)
2038 cm->cm_flags |= MFI_CMD_DATAIN;
2039 if (cm->cm_frame->header.flags & MFI_FRAME_DATAOUT)
2040 cm->cm_flags |= MFI_CMD_DATAOUT;
2041 /* Legacy app shim */
2042 if (cm->cm_flags == 0)
2043 cm->cm_flags |= MFI_CMD_DATAIN | MFI_CMD_DATAOUT;
2044 cm->cm_len = cm->cm_frame->header.data_len;
2045 if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
2047 if (cmd == MFI_CMD) {
2050 cm->cm_stp_len = ioc->mfi_sgl[0].iov_len;
2053 /* 32bit on 64bit */
2054 ioc32 = (struct mfi_ioc_packet32 *)ioc;
2055 cm->cm_stp_len = ioc32->mfi_sgl[0].iov_len;
2058 cm->cm_len += cm->cm_stp_len;
2061 (cm->cm_flags & (MFI_CMD_DATAIN | MFI_CMD_DATAOUT))) {
2062 cm->cm_data = data = malloc(cm->cm_len, M_MFIBUF,
2064 if (cm->cm_data == NULL) {
2065 device_printf(sc->mfi_dev, "Malloc failed\n");
2072 /* restore header context */
2073 cm->cm_frame->header.context = context;
2076 if ((cm->cm_flags & MFI_CMD_DATAOUT) ||
2077 (cm->cm_frame->header.cmd == MFI_CMD_STP)) {
2078 for (i = 0; i < ioc->mfi_sge_count; i++) {
2080 if (cmd == MFI_CMD) {
2083 addr = ioc->mfi_sgl[i].iov_base;
2084 len = ioc->mfi_sgl[i].iov_len;
2087 /* 32bit on 64bit */
2088 ioc32 = (struct mfi_ioc_packet32 *)ioc;
2089 addr = PTRIN(ioc32->mfi_sgl[i].iov_base);
2090 len = ioc32->mfi_sgl[i].iov_len;
2093 error = copyin(addr, temp, len);
2095 device_printf(sc->mfi_dev,
2096 "Copy in failed\n");
2103 if (cm->cm_frame->header.cmd == MFI_CMD_DCMD)
2104 locked = mfi_config_lock(sc, cm->cm_frame->dcmd.opcode);
2106 if (cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) {
2107 cm->cm_frame->pass.sense_addr_lo = cm->cm_sense_busaddr;
2108 cm->cm_frame->pass.sense_addr_hi = 0;
2111 mtx_lock(&sc->mfi_io_lock);
2112 error = mfi_check_command_pre(sc, cm);
2114 mtx_unlock(&sc->mfi_io_lock);
2118 if ((error = mfi_wait_command(sc, cm)) != 0) {
2119 device_printf(sc->mfi_dev,
2120 "Controller polled failed\n");
2121 mtx_unlock(&sc->mfi_io_lock);
2125 mfi_check_command_post(sc, cm);
2126 mtx_unlock(&sc->mfi_io_lock);
2129 if ((cm->cm_flags & MFI_CMD_DATAIN) ||
2130 (cm->cm_frame->header.cmd == MFI_CMD_STP)) {
2131 for (i = 0; i < ioc->mfi_sge_count; i++) {
2133 if (cmd == MFI_CMD) {
2136 addr = ioc->mfi_sgl[i].iov_base;
2137 len = ioc->mfi_sgl[i].iov_len;
2140 /* 32bit on 64bit */
2141 ioc32 = (struct mfi_ioc_packet32 *)ioc;
2142 addr = PTRIN(ioc32->mfi_sgl[i].iov_base);
2143 len = ioc32->mfi_sgl[i].iov_len;
2146 error = copyout(temp, addr, len);
2148 device_printf(sc->mfi_dev,
2149 "Copy out failed\n");
2156 if (ioc->mfi_sense_len) {
2157 /* get user-space sense ptr then copy out sense */
2158 bcopy(&ioc->mfi_frame.raw[ioc->mfi_sense_off],
2159 &sense_ptr.sense_ptr_data[0],
2160 sizeof(sense_ptr.sense_ptr_data));
2162 if (cmd != MFI_CMD) {
2164 * not 64bit native so zero out any address
2166 sense_ptr.addr.high = 0;
2169 error = copyout(cm->cm_sense, sense_ptr.user_space,
2170 ioc->mfi_sense_len);
2172 device_printf(sc->mfi_dev,
2173 "Copy out failed\n");
2178 ioc->mfi_frame.hdr.cmd_status = cm->cm_frame->header.cmd_status;
2180 mfi_config_unlock(sc, locked);
2182 free(data, M_MFIBUF);
2184 mtx_lock(&sc->mfi_io_lock);
2185 mfi_release_command(cm);
2186 mtx_unlock(&sc->mfi_io_lock);
2192 aen = (struct mfi_ioc_aen *)arg;
2193 error = mfi_aen_register(sc, aen->aen_seq_num,
2194 aen->aen_class_locale);
2197 case MFI_LINUX_CMD_2: /* Firmware Linux ioctl shim */
2199 devclass_t devclass;
2200 struct mfi_linux_ioc_packet l_ioc;
2203 devclass = devclass_find("mfi");
2204 if (devclass == NULL)
2207 error = copyin(arg, &l_ioc, sizeof(l_ioc));
2210 adapter = l_ioc.lioc_adapter_no;
2211 sc = devclass_get_softc(devclass, adapter);
2214 return (mfi_linux_ioctl_int(sc->mfi_cdev,
2215 cmd, arg, flag, td));
2218 case MFI_LINUX_SET_AEN_2: /* AEN Linux ioctl shim */
2220 devclass_t devclass;
2221 struct mfi_linux_ioc_aen l_aen;
2224 devclass = devclass_find("mfi");
2225 if (devclass == NULL)
2228 error = copyin(arg, &l_aen, sizeof(l_aen));
2231 adapter = l_aen.laen_adapter_no;
2232 sc = devclass_get_softc(devclass, adapter);
2235 return (mfi_linux_ioctl_int(sc->mfi_cdev,
2236 cmd, arg, flag, td));
2240 case MFIIO_PASSTHRU32:
2241 iop_swab.ioc_frame = iop32->ioc_frame;
2242 iop_swab.buf_size = iop32->buf_size;
2243 iop_swab.buf = PTRIN(iop32->buf);
2247 case MFIIO_PASSTHRU:
2248 error = mfi_user_command(sc, iop);
2250 if (cmd == MFIIO_PASSTHRU32)
2251 iop32->ioc_frame = iop_swab.ioc_frame;
2255 device_printf(sc->mfi_dev, "IOCTL 0x%lx not handled\n", cmd);
2264 mfi_linux_ioctl_int(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td)
2266 struct mfi_softc *sc;
2267 struct mfi_linux_ioc_packet l_ioc;
2268 struct mfi_linux_ioc_aen l_aen;
2269 struct mfi_command *cm = NULL;
2270 struct mfi_aen *mfi_aen_entry;
2271 union mfi_sense_ptr sense_ptr;
2273 uint8_t *data = NULL, *temp;
2280 case MFI_LINUX_CMD_2: /* Firmware Linux ioctl shim */
2281 error = copyin(arg, &l_ioc, sizeof(l_ioc));
2285 if (l_ioc.lioc_sge_count > MAX_LINUX_IOCTL_SGE) {
2289 mtx_lock(&sc->mfi_io_lock);
2290 if ((cm = mfi_dequeue_free(sc)) == NULL) {
2291 mtx_unlock(&sc->mfi_io_lock);
2294 mtx_unlock(&sc->mfi_io_lock);
2298 * save off original context since copying from user
2299 * will clobber some data
2301 context = cm->cm_frame->header.context;
2303 bcopy(l_ioc.lioc_frame.raw, cm->cm_frame,
2304 2 * MFI_DCMD_FRAME_SIZE); /* this isn't quite right */
2305 cm->cm_total_frame_size = (sizeof(union mfi_sgl)
2306 * l_ioc.lioc_sge_count) + l_ioc.lioc_sgl_off;
2307 if (l_ioc.lioc_sge_count)
2309 (union mfi_sgl *)&cm->cm_frame->bytes[l_ioc.lioc_sgl_off];
2311 if (cm->cm_frame->header.flags & MFI_FRAME_DATAIN)
2312 cm->cm_flags |= MFI_CMD_DATAIN;
2313 if (cm->cm_frame->header.flags & MFI_FRAME_DATAOUT)
2314 cm->cm_flags |= MFI_CMD_DATAOUT;
2315 cm->cm_len = cm->cm_frame->header.data_len;
2317 (cm->cm_flags & (MFI_CMD_DATAIN | MFI_CMD_DATAOUT))) {
2318 cm->cm_data = data = malloc(cm->cm_len, M_MFIBUF,
2320 if (cm->cm_data == NULL) {
2321 device_printf(sc->mfi_dev, "Malloc failed\n");
2328 /* restore header context */
2329 cm->cm_frame->header.context = context;
2332 if (cm->cm_flags & MFI_CMD_DATAOUT) {
2333 for (i = 0; i < l_ioc.lioc_sge_count; i++) {
2334 error = copyin(PTRIN(l_ioc.lioc_sgl[i].iov_base),
2336 l_ioc.lioc_sgl[i].iov_len);
2338 device_printf(sc->mfi_dev,
2339 "Copy in failed\n");
2342 temp = &temp[l_ioc.lioc_sgl[i].iov_len];
2346 if (cm->cm_frame->header.cmd == MFI_CMD_DCMD)
2347 locked = mfi_config_lock(sc, cm->cm_frame->dcmd.opcode);
2349 if (cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) {
2350 cm->cm_frame->pass.sense_addr_lo = cm->cm_sense_busaddr;
2351 cm->cm_frame->pass.sense_addr_hi = 0;
2354 mtx_lock(&sc->mfi_io_lock);
2355 error = mfi_check_command_pre(sc, cm);
2357 mtx_unlock(&sc->mfi_io_lock);
2361 if ((error = mfi_wait_command(sc, cm)) != 0) {
2362 device_printf(sc->mfi_dev,
2363 "Controller polled failed\n");
2364 mtx_unlock(&sc->mfi_io_lock);
2368 mfi_check_command_post(sc, cm);
2369 mtx_unlock(&sc->mfi_io_lock);
2372 if (cm->cm_flags & MFI_CMD_DATAIN) {
2373 for (i = 0; i < l_ioc.lioc_sge_count; i++) {
2374 error = copyout(temp,
2375 PTRIN(l_ioc.lioc_sgl[i].iov_base),
2376 l_ioc.lioc_sgl[i].iov_len);
2378 device_printf(sc->mfi_dev,
2379 "Copy out failed\n");
2382 temp = &temp[l_ioc.lioc_sgl[i].iov_len];
2386 if (l_ioc.lioc_sense_len) {
2387 /* get user-space sense ptr then copy out sense */
2388 bcopy(&((struct mfi_linux_ioc_packet*)arg)
2389 ->lioc_frame.raw[l_ioc.lioc_sense_off],
2390 &sense_ptr.sense_ptr_data[0],
2391 sizeof(sense_ptr.sense_ptr_data));
2394 * only 32bit Linux support so zero out any
2395 * address over 32bit
2397 sense_ptr.addr.high = 0;
2399 error = copyout(cm->cm_sense, sense_ptr.user_space,
2400 l_ioc.lioc_sense_len);
2402 device_printf(sc->mfi_dev,
2403 "Copy out failed\n");
2408 error = copyout(&cm->cm_frame->header.cmd_status,
2409 &((struct mfi_linux_ioc_packet*)arg)
2410 ->lioc_frame.hdr.cmd_status,
2413 device_printf(sc->mfi_dev,
2414 "Copy out failed\n");
2419 mfi_config_unlock(sc, locked);
2421 free(data, M_MFIBUF);
2423 mtx_lock(&sc->mfi_io_lock);
2424 mfi_release_command(cm);
2425 mtx_unlock(&sc->mfi_io_lock);
2429 case MFI_LINUX_SET_AEN_2: /* AEN Linux ioctl shim */
2430 error = copyin(arg, &l_aen, sizeof(l_aen));
2433 printf("AEN IMPLEMENTED for pid %d\n", curproc->p_pid);
2434 mfi_aen_entry = malloc(sizeof(struct mfi_aen), M_MFIBUF,
2436 mtx_lock(&sc->mfi_io_lock);
2437 if (mfi_aen_entry != NULL) {
2438 mfi_aen_entry->p = curproc;
2439 TAILQ_INSERT_TAIL(&sc->mfi_aen_pids, mfi_aen_entry,
2442 error = mfi_aen_register(sc, l_aen.laen_seq_num,
2443 l_aen.laen_class_locale);
2446 TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
2448 free(mfi_aen_entry, M_MFIBUF);
2450 mtx_unlock(&sc->mfi_io_lock);
2454 device_printf(sc->mfi_dev, "IOCTL 0x%lx not handled\n", cmd);
2463 mfi_poll(struct cdev *dev, int poll_events, struct thread *td)
2465 struct mfi_softc *sc;
2470 if (poll_events & (POLLIN | POLLRDNORM)) {
2471 if (sc->mfi_aen_triggered != 0) {
2472 revents |= poll_events & (POLLIN | POLLRDNORM);
2473 sc->mfi_aen_triggered = 0;
2475 if (sc->mfi_aen_triggered == 0 && sc->mfi_aen_cm == NULL) {
2481 if (poll_events & (POLLIN | POLLRDNORM)) {
2482 sc->mfi_poll_waiting = 1;
2483 selrecord(td, &sc->mfi_select);
2494 struct mfi_softc *sc;
2495 struct mfi_command *cm;
2501 dc = devclass_find("mfi");
2503 printf("No mfi dev class\n");
2507 for (i = 0; ; i++) {
2508 sc = devclass_get_softc(dc, i);
2511 device_printf(sc->mfi_dev, "Dumping\n\n");
2513 deadline = time_uptime - MFI_CMD_TIMEOUT;
2514 mtx_lock(&sc->mfi_io_lock);
2515 TAILQ_FOREACH(cm, &sc->mfi_busy, cm_link) {
2516 if (cm->cm_timestamp < deadline) {
2517 device_printf(sc->mfi_dev,
2518 "COMMAND %p TIMEOUT AFTER %d SECONDS\n", cm,
2519 (int)(time_uptime - cm->cm_timestamp));
2530 mtx_unlock(&sc->mfi_io_lock);
2537 mfi_timeout(void *data)
2539 struct mfi_softc *sc = (struct mfi_softc *)data;
2540 struct mfi_command *cm;
2544 deadline = time_uptime - MFI_CMD_TIMEOUT;
2545 mtx_lock(&sc->mfi_io_lock);
2546 TAILQ_FOREACH(cm, &sc->mfi_busy, cm_link) {
2547 if (sc->mfi_aen_cm == cm)
2549 if ((sc->mfi_aen_cm != cm) && (cm->cm_timestamp < deadline)) {
2550 device_printf(sc->mfi_dev,
2551 "COMMAND %p TIMEOUT AFTER %d SECONDS\n", cm,
2552 (int)(time_uptime - cm->cm_timestamp));
2554 MFI_VALIDATE_CMD(sc, cm);
2564 mtx_unlock(&sc->mfi_io_lock);
2566 callout_reset(&sc->mfi_watchdog_callout, MFI_CMD_TIMEOUT * hz,