2 * Copyright (c) 2006 IronPort Systems
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * Copyright (c) 2007 LSI Corp.
28 * Copyright (c) 2007 Rajesh Prabhakaran.
29 * All rights reserved.
31 * Redistribution and use in source and binary forms, with or without
32 * modification, are permitted provided that the following conditions
34 * 1. Redistributions of source code must retain the above copyright
35 * notice, this list of conditions and the following disclaimer.
36 * 2. Redistributions in binary form must reproduce the above copyright
37 * notice, this list of conditions and the following disclaimer in the
38 * documentation and/or other materials provided with the distribution.
40 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
41 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
42 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
43 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
44 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
45 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
46 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
47 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
48 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
49 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
53 #include <sys/cdefs.h>
54 __FBSDID("$FreeBSD$");
56 #include "opt_compat.h"
59 #include <sys/param.h>
60 #include <sys/systm.h>
61 #include <sys/sysctl.h>
62 #include <sys/malloc.h>
63 #include <sys/kernel.h>
65 #include <sys/selinfo.h>
68 #include <sys/eventhandler.h>
70 #include <sys/bus_dma.h>
72 #include <sys/ioccom.h>
75 #include <sys/signalvar.h>
76 #include <sys/sysent.h>
77 #include <sys/taskqueue.h>
79 #include <machine/bus.h>
80 #include <machine/resource.h>
82 #include <dev/mfi/mfireg.h>
83 #include <dev/mfi/mfi_ioctl.h>
84 #include <dev/mfi/mfivar.h>
85 #include <sys/interrupt.h>
86 #include <sys/priority.h>
88 static int mfi_alloc_commands(struct mfi_softc *);
89 static int mfi_comms_init(struct mfi_softc *);
90 static int mfi_get_controller_info(struct mfi_softc *);
91 static int mfi_get_log_state(struct mfi_softc *,
92 struct mfi_evt_log_state **);
93 static int mfi_parse_entries(struct mfi_softc *, int, int);
94 static void mfi_data_cb(void *, bus_dma_segment_t *, int, int);
95 static void mfi_startup(void *arg);
96 static void mfi_intr(void *arg);
97 static void mfi_ldprobe(struct mfi_softc *sc);
98 static void mfi_syspdprobe(struct mfi_softc *sc);
99 static void mfi_handle_evt(void *context, int pending);
100 static int mfi_aen_register(struct mfi_softc *sc, int seq, int locale);
101 static void mfi_aen_complete(struct mfi_command *);
102 static int mfi_add_ld(struct mfi_softc *sc, int);
103 static void mfi_add_ld_complete(struct mfi_command *);
104 static int mfi_add_sys_pd(struct mfi_softc *sc, int);
105 static void mfi_add_sys_pd_complete(struct mfi_command *);
106 static struct mfi_command * mfi_bio_command(struct mfi_softc *);
107 static void mfi_bio_complete(struct mfi_command *);
108 static struct mfi_command *mfi_build_ldio(struct mfi_softc *,struct bio*);
109 static struct mfi_command *mfi_build_syspdio(struct mfi_softc *,struct bio*);
110 static int mfi_send_frame(struct mfi_softc *, struct mfi_command *);
111 static int mfi_std_send_frame(struct mfi_softc *, struct mfi_command *);
112 static int mfi_abort(struct mfi_softc *, struct mfi_command **);
113 static int mfi_linux_ioctl_int(struct cdev *, u_long, caddr_t, int, struct thread *);
114 static void mfi_timeout(void *);
115 static int mfi_user_command(struct mfi_softc *,
116 struct mfi_ioc_passthru *);
117 static void mfi_enable_intr_xscale(struct mfi_softc *sc);
118 static void mfi_enable_intr_ppc(struct mfi_softc *sc);
119 static int32_t mfi_read_fw_status_xscale(struct mfi_softc *sc);
120 static int32_t mfi_read_fw_status_ppc(struct mfi_softc *sc);
121 static int mfi_check_clear_intr_xscale(struct mfi_softc *sc);
122 static int mfi_check_clear_intr_ppc(struct mfi_softc *sc);
123 static void mfi_issue_cmd_xscale(struct mfi_softc *sc, bus_addr_t bus_add,
125 static void mfi_issue_cmd_ppc(struct mfi_softc *sc, bus_addr_t bus_add,
127 static int mfi_config_lock(struct mfi_softc *sc, uint32_t opcode);
128 static void mfi_config_unlock(struct mfi_softc *sc, int locked);
129 static int mfi_check_command_pre(struct mfi_softc *sc, struct mfi_command *cm);
130 static void mfi_check_command_post(struct mfi_softc *sc, struct mfi_command *cm);
131 static int mfi_check_for_sscd(struct mfi_softc *sc, struct mfi_command *cm);
133 SYSCTL_NODE(_hw, OID_AUTO, mfi, CTLFLAG_RD, 0, "MFI driver parameters");
134 static int mfi_event_locale = MFI_EVT_LOCALE_ALL;
135 SYSCTL_INT(_hw_mfi, OID_AUTO, event_locale, CTLFLAG_RWTUN, &mfi_event_locale,
136 0, "event message locale");
138 static int mfi_event_class = MFI_EVT_CLASS_INFO;
139 SYSCTL_INT(_hw_mfi, OID_AUTO, event_class, CTLFLAG_RWTUN, &mfi_event_class,
140 0, "event message class");
142 static int mfi_max_cmds = 128;
143 SYSCTL_INT(_hw_mfi, OID_AUTO, max_cmds, CTLFLAG_RDTUN, &mfi_max_cmds,
144 0, "Max commands limit (-1 = controller limit)");
146 static int mfi_detect_jbod_change = 1;
147 SYSCTL_INT(_hw_mfi, OID_AUTO, detect_jbod_change, CTLFLAG_RWTUN,
148 &mfi_detect_jbod_change, 0, "Detect a change to a JBOD");
150 int mfi_polled_cmd_timeout = MFI_POLL_TIMEOUT_SECS;
151 SYSCTL_INT(_hw_mfi, OID_AUTO, polled_cmd_timeout, CTLFLAG_RWTUN,
152 &mfi_polled_cmd_timeout, 0,
153 "Polled command timeout - used for firmware flash etc (in seconds)");
155 static int mfi_cmd_timeout = MFI_CMD_TIMEOUT;
156 SYSCTL_INT(_hw_mfi, OID_AUTO, cmd_timeout, CTLFLAG_RWTUN, &mfi_cmd_timeout,
157 0, "Command timeout (in seconds)");
159 /* Management interface */
160 static d_open_t mfi_open;
161 static d_close_t mfi_close;
162 static d_ioctl_t mfi_ioctl;
163 static d_poll_t mfi_poll;
165 static struct cdevsw mfi_cdevsw = {
166 .d_version = D_VERSION,
169 .d_close = mfi_close,
170 .d_ioctl = mfi_ioctl,
175 MALLOC_DEFINE(M_MFIBUF, "mfibuf", "Buffers for the MFI driver");
177 #define MFI_INQ_LENGTH SHORT_INQUIRY_LENGTH
178 struct mfi_skinny_dma_info mfi_skinny;
181 mfi_enable_intr_xscale(struct mfi_softc *sc)
183 MFI_WRITE4(sc, MFI_OMSK, 0x01);
187 mfi_enable_intr_ppc(struct mfi_softc *sc)
189 if (sc->mfi_flags & MFI_FLAGS_1078) {
190 MFI_WRITE4(sc, MFI_ODCR0, 0xFFFFFFFF);
191 MFI_WRITE4(sc, MFI_OMSK, ~MFI_1078_EIM);
193 else if (sc->mfi_flags & MFI_FLAGS_GEN2) {
194 MFI_WRITE4(sc, MFI_ODCR0, 0xFFFFFFFF);
195 MFI_WRITE4(sc, MFI_OMSK, ~MFI_GEN2_EIM);
197 else if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
198 MFI_WRITE4(sc, MFI_OMSK, ~0x00000001);
203 mfi_read_fw_status_xscale(struct mfi_softc *sc)
205 return MFI_READ4(sc, MFI_OMSG0);
209 mfi_read_fw_status_ppc(struct mfi_softc *sc)
211 return MFI_READ4(sc, MFI_OSP0);
215 mfi_check_clear_intr_xscale(struct mfi_softc *sc)
219 status = MFI_READ4(sc, MFI_OSTS);
220 if ((status & MFI_OSTS_INTR_VALID) == 0)
223 MFI_WRITE4(sc, MFI_OSTS, status);
228 mfi_check_clear_intr_ppc(struct mfi_softc *sc)
232 status = MFI_READ4(sc, MFI_OSTS);
233 if (sc->mfi_flags & MFI_FLAGS_1078) {
234 if (!(status & MFI_1078_RM)) {
238 else if (sc->mfi_flags & MFI_FLAGS_GEN2) {
239 if (!(status & MFI_GEN2_RM)) {
243 else if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
244 if (!(status & MFI_SKINNY_RM)) {
248 if (sc->mfi_flags & MFI_FLAGS_SKINNY)
249 MFI_WRITE4(sc, MFI_OSTS, status);
251 MFI_WRITE4(sc, MFI_ODCR0, status);
256 mfi_issue_cmd_xscale(struct mfi_softc *sc, bus_addr_t bus_add, uint32_t frame_cnt)
258 MFI_WRITE4(sc, MFI_IQP,(bus_add >>3)|frame_cnt);
262 mfi_issue_cmd_ppc(struct mfi_softc *sc, bus_addr_t bus_add, uint32_t frame_cnt)
264 if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
265 MFI_WRITE4(sc, MFI_IQPL, (bus_add | frame_cnt <<1)|1 );
266 MFI_WRITE4(sc, MFI_IQPH, 0x00000000);
268 MFI_WRITE4(sc, MFI_IQP, (bus_add | frame_cnt <<1)|1 );
273 mfi_transition_firmware(struct mfi_softc *sc)
275 uint32_t fw_state, cur_state;
277 uint32_t cur_abs_reg_val = 0;
278 uint32_t prev_abs_reg_val = 0;
280 cur_abs_reg_val = sc->mfi_read_fw_status(sc);
281 fw_state = cur_abs_reg_val & MFI_FWSTATE_MASK;
282 while (fw_state != MFI_FWSTATE_READY) {
284 device_printf(sc->mfi_dev, "Waiting for firmware to "
286 cur_state = fw_state;
288 case MFI_FWSTATE_FAULT:
289 device_printf(sc->mfi_dev, "Firmware fault\n");
291 case MFI_FWSTATE_WAIT_HANDSHAKE:
292 if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT)
293 MFI_WRITE4(sc, MFI_SKINNY_IDB, MFI_FWINIT_CLEAR_HANDSHAKE);
295 MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_CLEAR_HANDSHAKE);
296 max_wait = MFI_RESET_WAIT_TIME;
298 case MFI_FWSTATE_OPERATIONAL:
299 if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT)
300 MFI_WRITE4(sc, MFI_SKINNY_IDB, 7);
302 MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_READY);
303 max_wait = MFI_RESET_WAIT_TIME;
305 case MFI_FWSTATE_UNDEFINED:
306 case MFI_FWSTATE_BB_INIT:
307 max_wait = MFI_RESET_WAIT_TIME;
309 case MFI_FWSTATE_FW_INIT_2:
310 max_wait = MFI_RESET_WAIT_TIME;
312 case MFI_FWSTATE_FW_INIT:
313 case MFI_FWSTATE_FLUSH_CACHE:
314 max_wait = MFI_RESET_WAIT_TIME;
316 case MFI_FWSTATE_DEVICE_SCAN:
317 max_wait = MFI_RESET_WAIT_TIME; /* wait for 180 seconds */
318 prev_abs_reg_val = cur_abs_reg_val;
320 case MFI_FWSTATE_BOOT_MESSAGE_PENDING:
321 if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT)
322 MFI_WRITE4(sc, MFI_SKINNY_IDB, MFI_FWINIT_HOTPLUG);
324 MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_HOTPLUG);
325 max_wait = MFI_RESET_WAIT_TIME;
328 device_printf(sc->mfi_dev, "Unknown firmware state %#x\n",
332 for (i = 0; i < (max_wait * 10); i++) {
333 cur_abs_reg_val = sc->mfi_read_fw_status(sc);
334 fw_state = cur_abs_reg_val & MFI_FWSTATE_MASK;
335 if (fw_state == cur_state)
340 if (fw_state == MFI_FWSTATE_DEVICE_SCAN) {
341 /* Check the device scanning progress */
342 if (prev_abs_reg_val != cur_abs_reg_val) {
346 if (fw_state == cur_state) {
347 device_printf(sc->mfi_dev, "Firmware stuck in state "
356 mfi_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
361 *addr = segs[0].ds_addr;
366 mfi_attach(struct mfi_softc *sc)
369 int error, commsz, framessz, sensesz;
370 int frames, unit, max_fw_sge, max_fw_cmds;
371 uint32_t tb_mem_size = 0;
376 device_printf(sc->mfi_dev, "Megaraid SAS driver Ver %s \n",
379 mtx_init(&sc->mfi_io_lock, "MFI I/O lock", NULL, MTX_DEF);
380 sx_init(&sc->mfi_config_lock, "MFI config");
381 TAILQ_INIT(&sc->mfi_ld_tqh);
382 TAILQ_INIT(&sc->mfi_syspd_tqh);
383 TAILQ_INIT(&sc->mfi_ld_pend_tqh);
384 TAILQ_INIT(&sc->mfi_syspd_pend_tqh);
385 TAILQ_INIT(&sc->mfi_evt_queue);
386 TASK_INIT(&sc->mfi_evt_task, 0, mfi_handle_evt, sc);
387 TASK_INIT(&sc->mfi_map_sync_task, 0, mfi_handle_map_sync, sc);
388 TAILQ_INIT(&sc->mfi_aen_pids);
389 TAILQ_INIT(&sc->mfi_cam_ccbq);
397 sc->last_seq_num = 0;
398 sc->disableOnlineCtrlReset = 1;
399 sc->issuepend_done = 1;
400 sc->hw_crit_error = 0;
402 if (sc->mfi_flags & MFI_FLAGS_1064R) {
403 sc->mfi_enable_intr = mfi_enable_intr_xscale;
404 sc->mfi_read_fw_status = mfi_read_fw_status_xscale;
405 sc->mfi_check_clear_intr = mfi_check_clear_intr_xscale;
406 sc->mfi_issue_cmd = mfi_issue_cmd_xscale;
407 } else if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
408 sc->mfi_enable_intr = mfi_tbolt_enable_intr_ppc;
409 sc->mfi_disable_intr = mfi_tbolt_disable_intr_ppc;
410 sc->mfi_read_fw_status = mfi_tbolt_read_fw_status_ppc;
411 sc->mfi_check_clear_intr = mfi_tbolt_check_clear_intr_ppc;
412 sc->mfi_issue_cmd = mfi_tbolt_issue_cmd_ppc;
413 sc->mfi_adp_reset = mfi_tbolt_adp_reset;
415 TAILQ_INIT(&sc->mfi_cmd_tbolt_tqh);
417 sc->mfi_enable_intr = mfi_enable_intr_ppc;
418 sc->mfi_read_fw_status = mfi_read_fw_status_ppc;
419 sc->mfi_check_clear_intr = mfi_check_clear_intr_ppc;
420 sc->mfi_issue_cmd = mfi_issue_cmd_ppc;
424 /* Before we get too far, see if the firmware is working */
425 if ((error = mfi_transition_firmware(sc)) != 0) {
426 device_printf(sc->mfi_dev, "Firmware not in READY state, "
427 "error %d\n", error);
431 /* Start: LSIP200113393 */
432 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
433 1, 0, /* algnmnt, boundary */
434 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
435 BUS_SPACE_MAXADDR, /* highaddr */
436 NULL, NULL, /* filter, filterarg */
437 MEGASAS_MAX_NAME*sizeof(bus_addr_t), /* maxsize */
439 MEGASAS_MAX_NAME*sizeof(bus_addr_t), /* maxsegsize */
441 NULL, NULL, /* lockfunc, lockarg */
442 &sc->verbuf_h_dmat)) {
443 device_printf(sc->mfi_dev, "Cannot allocate verbuf_h_dmat DMA tag\n");
446 if (bus_dmamem_alloc(sc->verbuf_h_dmat, (void **)&sc->verbuf,
447 BUS_DMA_NOWAIT, &sc->verbuf_h_dmamap)) {
448 device_printf(sc->mfi_dev, "Cannot allocate verbuf_h_dmamap memory\n");
451 bzero(sc->verbuf, MEGASAS_MAX_NAME*sizeof(bus_addr_t));
452 bus_dmamap_load(sc->verbuf_h_dmat, sc->verbuf_h_dmamap,
453 sc->verbuf, MEGASAS_MAX_NAME*sizeof(bus_addr_t),
454 mfi_addr_cb, &sc->verbuf_h_busaddr, 0);
455 /* End: LSIP200113393 */
458 * Get information needed for sizing the contiguous memory for the
459 * frame pool. Size down the sgl parameter since we know that
460 * we will never need more than what's required for MAXPHYS.
461 * It would be nice if these constants were available at runtime
462 * instead of compile time.
464 status = sc->mfi_read_fw_status(sc);
465 max_fw_cmds = status & MFI_FWSTATE_MAXCMD_MASK;
466 if (mfi_max_cmds > 0 && mfi_max_cmds < max_fw_cmds) {
467 device_printf(sc->mfi_dev, "FW MaxCmds = %d, limiting to %d\n",
468 max_fw_cmds, mfi_max_cmds);
469 sc->mfi_max_fw_cmds = mfi_max_cmds;
471 sc->mfi_max_fw_cmds = max_fw_cmds;
473 max_fw_sge = (status & MFI_FWSTATE_MAXSGL_MASK) >> 16;
474 sc->mfi_max_sge = min(max_fw_sge, ((MFI_MAXPHYS / PAGE_SIZE) + 1));
476 /* ThunderBolt Support get the contiguous memory */
478 if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
479 mfi_tbolt_init_globals(sc);
480 device_printf(sc->mfi_dev, "MaxCmd = %d, Drv MaxCmd = %d, "
481 "MaxSgl = %d, state = %#x\n", max_fw_cmds,
482 sc->mfi_max_fw_cmds, sc->mfi_max_sge, status);
483 tb_mem_size = mfi_tbolt_get_memory_requirement(sc);
485 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
486 1, 0, /* algnmnt, boundary */
487 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
488 BUS_SPACE_MAXADDR, /* highaddr */
489 NULL, NULL, /* filter, filterarg */
490 tb_mem_size, /* maxsize */
492 tb_mem_size, /* maxsegsize */
494 NULL, NULL, /* lockfunc, lockarg */
496 device_printf(sc->mfi_dev, "Cannot allocate comms DMA tag\n");
499 if (bus_dmamem_alloc(sc->mfi_tb_dmat, (void **)&sc->request_message_pool,
500 BUS_DMA_NOWAIT, &sc->mfi_tb_dmamap)) {
501 device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
504 bzero(sc->request_message_pool, tb_mem_size);
505 bus_dmamap_load(sc->mfi_tb_dmat, sc->mfi_tb_dmamap,
506 sc->request_message_pool, tb_mem_size, mfi_addr_cb, &sc->mfi_tb_busaddr, 0);
508 /* For ThunderBolt memory init */
509 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
510 0x100, 0, /* alignmnt, boundary */
511 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
512 BUS_SPACE_MAXADDR, /* highaddr */
513 NULL, NULL, /* filter, filterarg */
514 MFI_FRAME_SIZE, /* maxsize */
516 MFI_FRAME_SIZE, /* maxsegsize */
518 NULL, NULL, /* lockfunc, lockarg */
519 &sc->mfi_tb_init_dmat)) {
520 device_printf(sc->mfi_dev, "Cannot allocate init DMA tag\n");
523 if (bus_dmamem_alloc(sc->mfi_tb_init_dmat, (void **)&sc->mfi_tb_init,
524 BUS_DMA_NOWAIT, &sc->mfi_tb_init_dmamap)) {
525 device_printf(sc->mfi_dev, "Cannot allocate init memory\n");
528 bzero(sc->mfi_tb_init, MFI_FRAME_SIZE);
529 bus_dmamap_load(sc->mfi_tb_init_dmat, sc->mfi_tb_init_dmamap,
530 sc->mfi_tb_init, MFI_FRAME_SIZE, mfi_addr_cb,
531 &sc->mfi_tb_init_busaddr, 0);
532 if (mfi_tbolt_init_desc_pool(sc, sc->request_message_pool,
534 device_printf(sc->mfi_dev,
535 "Thunderbolt pool preparation error\n");
540 Allocate DMA memory mapping for MPI2 IOC Init descriptor,
541 we are taking it diffrent from what we have allocated for Request
542 and reply descriptors to avoid confusion later
544 tb_mem_size = sizeof(struct MPI2_IOC_INIT_REQUEST);
545 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
546 1, 0, /* algnmnt, boundary */
547 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
548 BUS_SPACE_MAXADDR, /* highaddr */
549 NULL, NULL, /* filter, filterarg */
550 tb_mem_size, /* maxsize */
552 tb_mem_size, /* maxsegsize */
554 NULL, NULL, /* lockfunc, lockarg */
555 &sc->mfi_tb_ioc_init_dmat)) {
556 device_printf(sc->mfi_dev,
557 "Cannot allocate comms DMA tag\n");
560 if (bus_dmamem_alloc(sc->mfi_tb_ioc_init_dmat,
561 (void **)&sc->mfi_tb_ioc_init_desc,
562 BUS_DMA_NOWAIT, &sc->mfi_tb_ioc_init_dmamap)) {
563 device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
566 bzero(sc->mfi_tb_ioc_init_desc, tb_mem_size);
567 bus_dmamap_load(sc->mfi_tb_ioc_init_dmat, sc->mfi_tb_ioc_init_dmamap,
568 sc->mfi_tb_ioc_init_desc, tb_mem_size, mfi_addr_cb,
569 &sc->mfi_tb_ioc_init_busaddr, 0);
572 * Create the dma tag for data buffers. Used both for block I/O
573 * and for various internal data queries.
575 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
576 1, 0, /* algnmnt, boundary */
577 BUS_SPACE_MAXADDR, /* lowaddr */
578 BUS_SPACE_MAXADDR, /* highaddr */
579 NULL, NULL, /* filter, filterarg */
580 BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
581 sc->mfi_max_sge, /* nsegments */
582 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
583 BUS_DMA_ALLOCNOW, /* flags */
584 busdma_lock_mutex, /* lockfunc */
585 &sc->mfi_io_lock, /* lockfuncarg */
586 &sc->mfi_buffer_dmat)) {
587 device_printf(sc->mfi_dev, "Cannot allocate buffer DMA tag\n");
592 * Allocate DMA memory for the comms queues. Keep it under 4GB for
593 * efficiency. The mfi_hwcomms struct includes space for 1 reply queue
594 * entry, so the calculated size here will be will be 1 more than
595 * mfi_max_fw_cmds. This is apparently a requirement of the hardware.
597 commsz = (sizeof(uint32_t) * sc->mfi_max_fw_cmds) +
598 sizeof(struct mfi_hwcomms);
599 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
600 1, 0, /* algnmnt, boundary */
601 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
602 BUS_SPACE_MAXADDR, /* highaddr */
603 NULL, NULL, /* filter, filterarg */
604 commsz, /* maxsize */
606 commsz, /* maxsegsize */
608 NULL, NULL, /* lockfunc, lockarg */
609 &sc->mfi_comms_dmat)) {
610 device_printf(sc->mfi_dev, "Cannot allocate comms DMA tag\n");
613 if (bus_dmamem_alloc(sc->mfi_comms_dmat, (void **)&sc->mfi_comms,
614 BUS_DMA_NOWAIT, &sc->mfi_comms_dmamap)) {
615 device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
618 bzero(sc->mfi_comms, commsz);
619 bus_dmamap_load(sc->mfi_comms_dmat, sc->mfi_comms_dmamap,
620 sc->mfi_comms, commsz, mfi_addr_cb, &sc->mfi_comms_busaddr, 0);
622 * Allocate DMA memory for the command frames. Keep them in the
623 * lower 4GB for efficiency. Calculate the size of the commands at
624 * the same time; each command is one 64 byte frame plus a set of
625 * additional frames for holding sg lists or other data.
626 * The assumption here is that the SG list will start at the second
627 * frame and not use the unused bytes in the first frame. While this
628 * isn't technically correct, it simplifies the calculation and allows
629 * for command frames that might be larger than an mfi_io_frame.
631 if (sizeof(bus_addr_t) == 8) {
632 sc->mfi_sge_size = sizeof(struct mfi_sg64);
633 sc->mfi_flags |= MFI_FLAGS_SG64;
635 sc->mfi_sge_size = sizeof(struct mfi_sg32);
637 if (sc->mfi_flags & MFI_FLAGS_SKINNY)
638 sc->mfi_sge_size = sizeof(struct mfi_sg_skinny);
639 frames = (sc->mfi_sge_size * sc->mfi_max_sge - 1) / MFI_FRAME_SIZE + 2;
640 sc->mfi_cmd_size = frames * MFI_FRAME_SIZE;
641 framessz = sc->mfi_cmd_size * sc->mfi_max_fw_cmds;
642 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
643 64, 0, /* algnmnt, boundary */
644 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
645 BUS_SPACE_MAXADDR, /* highaddr */
646 NULL, NULL, /* filter, filterarg */
647 framessz, /* maxsize */
649 framessz, /* maxsegsize */
651 NULL, NULL, /* lockfunc, lockarg */
652 &sc->mfi_frames_dmat)) {
653 device_printf(sc->mfi_dev, "Cannot allocate frame DMA tag\n");
656 if (bus_dmamem_alloc(sc->mfi_frames_dmat, (void **)&sc->mfi_frames,
657 BUS_DMA_NOWAIT, &sc->mfi_frames_dmamap)) {
658 device_printf(sc->mfi_dev, "Cannot allocate frames memory\n");
661 bzero(sc->mfi_frames, framessz);
662 bus_dmamap_load(sc->mfi_frames_dmat, sc->mfi_frames_dmamap,
663 sc->mfi_frames, framessz, mfi_addr_cb, &sc->mfi_frames_busaddr,0);
665 * Allocate DMA memory for the frame sense data. Keep them in the
666 * lower 4GB for efficiency
668 sensesz = sc->mfi_max_fw_cmds * MFI_SENSE_LEN;
669 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
670 4, 0, /* algnmnt, boundary */
671 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
672 BUS_SPACE_MAXADDR, /* highaddr */
673 NULL, NULL, /* filter, filterarg */
674 sensesz, /* maxsize */
676 sensesz, /* maxsegsize */
678 NULL, NULL, /* lockfunc, lockarg */
679 &sc->mfi_sense_dmat)) {
680 device_printf(sc->mfi_dev, "Cannot allocate sense DMA tag\n");
683 if (bus_dmamem_alloc(sc->mfi_sense_dmat, (void **)&sc->mfi_sense,
684 BUS_DMA_NOWAIT, &sc->mfi_sense_dmamap)) {
685 device_printf(sc->mfi_dev, "Cannot allocate sense memory\n");
688 bus_dmamap_load(sc->mfi_sense_dmat, sc->mfi_sense_dmamap,
689 sc->mfi_sense, sensesz, mfi_addr_cb, &sc->mfi_sense_busaddr, 0);
690 if ((error = mfi_alloc_commands(sc)) != 0)
693 /* Before moving the FW to operational state, check whether
694 * hostmemory is required by the FW or not
697 /* ThunderBolt MFI_IOC2 INIT */
698 if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
699 sc->mfi_disable_intr(sc);
700 mtx_lock(&sc->mfi_io_lock);
701 if ((error = mfi_tbolt_init_MFI_queue(sc)) != 0) {
702 device_printf(sc->mfi_dev,
703 "TB Init has failed with error %d\n",error);
704 mtx_unlock(&sc->mfi_io_lock);
707 mtx_unlock(&sc->mfi_io_lock);
709 if ((error = mfi_tbolt_alloc_cmd(sc)) != 0)
711 if (bus_setup_intr(sc->mfi_dev, sc->mfi_irq,
712 INTR_MPSAFE|INTR_TYPE_BIO, NULL, mfi_intr_tbolt, sc,
714 device_printf(sc->mfi_dev, "Cannot set up interrupt\n");
717 sc->mfi_intr_ptr = mfi_intr_tbolt;
718 sc->mfi_enable_intr(sc);
720 if ((error = mfi_comms_init(sc)) != 0)
723 if (bus_setup_intr(sc->mfi_dev, sc->mfi_irq,
724 INTR_MPSAFE|INTR_TYPE_BIO, NULL, mfi_intr, sc, &sc->mfi_intr)) {
725 device_printf(sc->mfi_dev, "Cannot set up interrupt\n");
728 sc->mfi_intr_ptr = mfi_intr;
729 sc->mfi_enable_intr(sc);
731 if ((error = mfi_get_controller_info(sc)) != 0)
733 sc->disableOnlineCtrlReset = 0;
735 /* Register a config hook to probe the bus for arrays */
736 sc->mfi_ich.ich_func = mfi_startup;
737 sc->mfi_ich.ich_arg = sc;
738 if (config_intrhook_establish(&sc->mfi_ich) != 0) {
739 device_printf(sc->mfi_dev, "Cannot establish configuration "
743 mtx_lock(&sc->mfi_io_lock);
744 if ((error = mfi_aen_setup(sc, 0), 0) != 0) {
745 mtx_unlock(&sc->mfi_io_lock);
748 mtx_unlock(&sc->mfi_io_lock);
751 * Register a shutdown handler.
753 if ((sc->mfi_eh = EVENTHANDLER_REGISTER(shutdown_final, mfi_shutdown,
754 sc, SHUTDOWN_PRI_DEFAULT)) == NULL) {
755 device_printf(sc->mfi_dev, "Warning: shutdown event "
756 "registration failed\n");
760 * Create the control device for doing management
762 unit = device_get_unit(sc->mfi_dev);
763 sc->mfi_cdev = make_dev(&mfi_cdevsw, unit, UID_ROOT, GID_OPERATOR,
764 0640, "mfi%d", unit);
766 make_dev_alias(sc->mfi_cdev, "megaraid_sas_ioctl_node");
767 if (sc->mfi_cdev != NULL)
768 sc->mfi_cdev->si_drv1 = sc;
769 SYSCTL_ADD_INT(device_get_sysctl_ctx(sc->mfi_dev),
770 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->mfi_dev)),
771 OID_AUTO, "delete_busy_volumes", CTLFLAG_RW,
772 &sc->mfi_delete_busy_volumes, 0, "Allow removal of busy volumes");
773 SYSCTL_ADD_INT(device_get_sysctl_ctx(sc->mfi_dev),
774 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->mfi_dev)),
775 OID_AUTO, "keep_deleted_volumes", CTLFLAG_RW,
776 &sc->mfi_keep_deleted_volumes, 0,
777 "Don't detach the mfid device for a busy volume that is deleted");
779 device_add_child(sc->mfi_dev, "mfip", -1);
780 bus_generic_attach(sc->mfi_dev);
782 /* Start the timeout watchdog */
783 callout_init(&sc->mfi_watchdog_callout, CALLOUT_MPSAFE);
784 callout_reset(&sc->mfi_watchdog_callout, mfi_cmd_timeout * hz,
787 if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
788 mtx_lock(&sc->mfi_io_lock);
789 mfi_tbolt_sync_map_info(sc);
790 mtx_unlock(&sc->mfi_io_lock);
797 mfi_alloc_commands(struct mfi_softc *sc)
799 struct mfi_command *cm;
803 * XXX Should we allocate all the commands up front, or allocate on
804 * demand later like 'aac' does?
806 sc->mfi_commands = malloc(sizeof(sc->mfi_commands[0]) *
807 sc->mfi_max_fw_cmds, M_MFIBUF, M_WAITOK | M_ZERO);
809 for (i = 0; i < sc->mfi_max_fw_cmds; i++) {
810 cm = &sc->mfi_commands[i];
811 cm->cm_frame = (union mfi_frame *)((uintptr_t)sc->mfi_frames +
812 sc->mfi_cmd_size * i);
813 cm->cm_frame_busaddr = sc->mfi_frames_busaddr +
814 sc->mfi_cmd_size * i;
815 cm->cm_frame->header.context = i;
816 cm->cm_sense = &sc->mfi_sense[i];
817 cm->cm_sense_busaddr= sc->mfi_sense_busaddr + MFI_SENSE_LEN * i;
820 if (bus_dmamap_create(sc->mfi_buffer_dmat, 0,
821 &cm->cm_dmamap) == 0) {
822 mtx_lock(&sc->mfi_io_lock);
823 mfi_release_command(cm);
824 mtx_unlock(&sc->mfi_io_lock);
826 device_printf(sc->mfi_dev, "Failed to allocate %d "
827 "command blocks, only allocated %d\n",
828 sc->mfi_max_fw_cmds, i - 1);
829 for (j = 0; j < i; j++) {
830 cm = &sc->mfi_commands[i];
831 bus_dmamap_destroy(sc->mfi_buffer_dmat,
834 free(sc->mfi_commands, M_MFIBUF);
835 sc->mfi_commands = NULL;
845 mfi_release_command(struct mfi_command *cm)
847 struct mfi_frame_header *hdr;
850 mtx_assert(&cm->cm_sc->mfi_io_lock, MA_OWNED);
853 * Zero out the important fields of the frame, but make sure the
854 * context field is preserved. For efficiency, handle the fields
855 * as 32 bit words. Clear out the first S/G entry too for safety.
857 hdr = &cm->cm_frame->header;
858 if (cm->cm_data != NULL && hdr->sg_count) {
859 cm->cm_sg->sg32[0].len = 0;
860 cm->cm_sg->sg32[0].addr = 0;
864 * Command may be on other queues e.g. busy queue depending on the
865 * flow of a previous call to mfi_mapcmd, so ensure its dequeued
868 if ((cm->cm_flags & MFI_ON_MFIQ_BUSY) != 0)
870 if ((cm->cm_flags & MFI_ON_MFIQ_READY) != 0)
871 mfi_remove_ready(cm);
873 /* We're not expecting it to be on any other queue but check */
874 if ((cm->cm_flags & MFI_ON_MFIQ_MASK) != 0) {
875 panic("Command %p is still on another queue, flags = %#x",
880 if ((cm->cm_flags & MFI_CMD_TBOLT) != 0) {
881 mfi_tbolt_return_cmd(cm->cm_sc,
882 cm->cm_sc->mfi_cmd_pool_tbolt[cm->cm_extra_frames - 1],
886 hdr_data = (uint32_t *)cm->cm_frame;
887 hdr_data[0] = 0; /* cmd, sense_len, cmd_status, scsi_status */
888 hdr_data[1] = 0; /* target_id, lun_id, cdb_len, sg_count */
889 hdr_data[4] = 0; /* flags, timeout */
890 hdr_data[5] = 0; /* data_len */
892 cm->cm_extra_frames = 0;
894 cm->cm_complete = NULL;
895 cm->cm_private = NULL;
898 cm->cm_total_frame_size = 0;
899 cm->retry_for_fw_reset = 0;
901 mfi_enqueue_free(cm);
905 mfi_dcmd_command(struct mfi_softc *sc, struct mfi_command **cmp,
906 uint32_t opcode, void **bufp, size_t bufsize)
908 struct mfi_command *cm;
909 struct mfi_dcmd_frame *dcmd;
911 uint32_t context = 0;
913 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
915 cm = mfi_dequeue_free(sc);
919 /* Zero out the MFI frame */
920 context = cm->cm_frame->header.context;
921 bzero(cm->cm_frame, sizeof(union mfi_frame));
922 cm->cm_frame->header.context = context;
924 if ((bufsize > 0) && (bufp != NULL)) {
926 buf = malloc(bufsize, M_MFIBUF, M_NOWAIT|M_ZERO);
928 mfi_release_command(cm);
937 dcmd = &cm->cm_frame->dcmd;
938 bzero(dcmd->mbox, MFI_MBOX_SIZE);
939 dcmd->header.cmd = MFI_CMD_DCMD;
940 dcmd->header.timeout = 0;
941 dcmd->header.flags = 0;
942 dcmd->header.data_len = bufsize;
943 dcmd->header.scsi_status = 0;
944 dcmd->opcode = opcode;
945 cm->cm_sg = &dcmd->sgl;
946 cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
949 cm->cm_private = buf;
950 cm->cm_len = bufsize;
953 if ((bufp != NULL) && (*bufp == NULL) && (buf != NULL))
959 mfi_comms_init(struct mfi_softc *sc)
961 struct mfi_command *cm;
962 struct mfi_init_frame *init;
963 struct mfi_init_qinfo *qinfo;
965 uint32_t context = 0;
967 mtx_lock(&sc->mfi_io_lock);
968 if ((cm = mfi_dequeue_free(sc)) == NULL) {
969 mtx_unlock(&sc->mfi_io_lock);
973 /* Zero out the MFI frame */
974 context = cm->cm_frame->header.context;
975 bzero(cm->cm_frame, sizeof(union mfi_frame));
976 cm->cm_frame->header.context = context;
979 * Abuse the SG list area of the frame to hold the init_qinfo
982 init = &cm->cm_frame->init;
983 qinfo = (struct mfi_init_qinfo *)((uintptr_t)init + MFI_FRAME_SIZE);
985 bzero(qinfo, sizeof(struct mfi_init_qinfo));
986 qinfo->rq_entries = sc->mfi_max_fw_cmds + 1;
987 qinfo->rq_addr_lo = sc->mfi_comms_busaddr +
988 offsetof(struct mfi_hwcomms, hw_reply_q);
989 qinfo->pi_addr_lo = sc->mfi_comms_busaddr +
990 offsetof(struct mfi_hwcomms, hw_pi);
991 qinfo->ci_addr_lo = sc->mfi_comms_busaddr +
992 offsetof(struct mfi_hwcomms, hw_ci);
994 init->header.cmd = MFI_CMD_INIT;
995 init->header.data_len = sizeof(struct mfi_init_qinfo);
996 init->qinfo_new_addr_lo = cm->cm_frame_busaddr + MFI_FRAME_SIZE;
998 cm->cm_flags = MFI_CMD_POLLED;
1000 if ((error = mfi_mapcmd(sc, cm)) != 0)
1001 device_printf(sc->mfi_dev, "failed to send init command\n");
1002 mfi_release_command(cm);
1003 mtx_unlock(&sc->mfi_io_lock);
1009 mfi_get_controller_info(struct mfi_softc *sc)
1011 struct mfi_command *cm = NULL;
1012 struct mfi_ctrl_info *ci = NULL;
1013 uint32_t max_sectors_1, max_sectors_2;
1016 mtx_lock(&sc->mfi_io_lock);
1017 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_GETINFO,
1018 (void **)&ci, sizeof(*ci));
1021 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1023 if ((error = mfi_mapcmd(sc, cm)) != 0) {
1024 device_printf(sc->mfi_dev, "Failed to get controller info\n");
1025 sc->mfi_max_io = (sc->mfi_max_sge - 1) * PAGE_SIZE /
1031 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1032 BUS_DMASYNC_POSTREAD);
1033 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1035 max_sectors_1 = (1 << ci->stripe_sz_ops.max) * ci->max_strips_per_io;
1036 max_sectors_2 = ci->max_request_size;
1037 sc->mfi_max_io = min(max_sectors_1, max_sectors_2);
1038 sc->disableOnlineCtrlReset =
1039 ci->properties.OnOffProperties.disableOnlineCtrlReset;
1045 mfi_release_command(cm);
1046 mtx_unlock(&sc->mfi_io_lock);
1051 mfi_get_log_state(struct mfi_softc *sc, struct mfi_evt_log_state **log_state)
1053 struct mfi_command *cm = NULL;
1056 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1057 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_EVENT_GETINFO,
1058 (void **)log_state, sizeof(**log_state));
1061 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1063 if ((error = mfi_mapcmd(sc, cm)) != 0) {
1064 device_printf(sc->mfi_dev, "Failed to get log state\n");
1068 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1069 BUS_DMASYNC_POSTREAD);
1070 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1074 mfi_release_command(cm);
1080 mfi_aen_setup(struct mfi_softc *sc, uint32_t seq_start)
1082 struct mfi_evt_log_state *log_state = NULL;
1083 union mfi_evt class_locale;
1087 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1089 class_locale.members.reserved = 0;
1090 class_locale.members.locale = mfi_event_locale;
1091 class_locale.members.evt_class = mfi_event_class;
1093 if (seq_start == 0) {
1094 if ((error = mfi_get_log_state(sc, &log_state)) != 0)
1096 sc->mfi_boot_seq_num = log_state->boot_seq_num;
1099 * Walk through any events that fired since the last
1102 if ((error = mfi_parse_entries(sc, log_state->shutdown_seq_num,
1103 log_state->newest_seq_num)) != 0)
1105 seq = log_state->newest_seq_num;
1108 error = mfi_aen_register(sc, seq, class_locale.word);
1110 free(log_state, M_MFIBUF);
1116 mfi_wait_command(struct mfi_softc *sc, struct mfi_command *cm)
1119 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1120 cm->cm_complete = NULL;
1123 * MegaCli can issue a DCMD of 0. In this case do nothing
1124 * and return 0 to it as status
1126 if (cm->cm_frame->dcmd.opcode == 0) {
1127 cm->cm_frame->header.cmd_status = MFI_STAT_OK;
1129 return (cm->cm_error);
1131 mfi_enqueue_ready(cm);
1133 if ((cm->cm_flags & MFI_CMD_COMPLETED) == 0)
1134 msleep(cm, &sc->mfi_io_lock, PRIBIO, "mfiwait", 0);
1135 return (cm->cm_error);
1139 mfi_free(struct mfi_softc *sc)
1141 struct mfi_command *cm;
1144 callout_drain(&sc->mfi_watchdog_callout);
1146 if (sc->mfi_cdev != NULL)
1147 destroy_dev(sc->mfi_cdev);
1149 if (sc->mfi_commands != NULL) {
1150 for (i = 0; i < sc->mfi_max_fw_cmds; i++) {
1151 cm = &sc->mfi_commands[i];
1152 bus_dmamap_destroy(sc->mfi_buffer_dmat, cm->cm_dmamap);
1154 free(sc->mfi_commands, M_MFIBUF);
1155 sc->mfi_commands = NULL;
1159 bus_teardown_intr(sc->mfi_dev, sc->mfi_irq, sc->mfi_intr);
1160 if (sc->mfi_irq != NULL)
1161 bus_release_resource(sc->mfi_dev, SYS_RES_IRQ, sc->mfi_irq_rid,
1164 if (sc->mfi_sense_busaddr != 0)
1165 bus_dmamap_unload(sc->mfi_sense_dmat, sc->mfi_sense_dmamap);
1166 if (sc->mfi_sense != NULL)
1167 bus_dmamem_free(sc->mfi_sense_dmat, sc->mfi_sense,
1168 sc->mfi_sense_dmamap);
1169 if (sc->mfi_sense_dmat != NULL)
1170 bus_dma_tag_destroy(sc->mfi_sense_dmat);
1172 if (sc->mfi_frames_busaddr != 0)
1173 bus_dmamap_unload(sc->mfi_frames_dmat, sc->mfi_frames_dmamap);
1174 if (sc->mfi_frames != NULL)
1175 bus_dmamem_free(sc->mfi_frames_dmat, sc->mfi_frames,
1176 sc->mfi_frames_dmamap);
1177 if (sc->mfi_frames_dmat != NULL)
1178 bus_dma_tag_destroy(sc->mfi_frames_dmat);
1180 if (sc->mfi_comms_busaddr != 0)
1181 bus_dmamap_unload(sc->mfi_comms_dmat, sc->mfi_comms_dmamap);
1182 if (sc->mfi_comms != NULL)
1183 bus_dmamem_free(sc->mfi_comms_dmat, sc->mfi_comms,
1184 sc->mfi_comms_dmamap);
1185 if (sc->mfi_comms_dmat != NULL)
1186 bus_dma_tag_destroy(sc->mfi_comms_dmat);
1188 /* ThunderBolt contiguous memory free here */
1189 if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
1190 if (sc->mfi_tb_busaddr != 0)
1191 bus_dmamap_unload(sc->mfi_tb_dmat, sc->mfi_tb_dmamap);
1192 if (sc->request_message_pool != NULL)
1193 bus_dmamem_free(sc->mfi_tb_dmat, sc->request_message_pool,
1195 if (sc->mfi_tb_dmat != NULL)
1196 bus_dma_tag_destroy(sc->mfi_tb_dmat);
1198 /* Version buffer memory free */
1199 /* Start LSIP200113393 */
1200 if (sc->verbuf_h_busaddr != 0)
1201 bus_dmamap_unload(sc->verbuf_h_dmat, sc->verbuf_h_dmamap);
1202 if (sc->verbuf != NULL)
1203 bus_dmamem_free(sc->verbuf_h_dmat, sc->verbuf,
1204 sc->verbuf_h_dmamap);
1205 if (sc->verbuf_h_dmat != NULL)
1206 bus_dma_tag_destroy(sc->verbuf_h_dmat);
1208 /* End LSIP200113393 */
1209 /* ThunderBolt INIT packet memory Free */
1210 if (sc->mfi_tb_init_busaddr != 0)
1211 bus_dmamap_unload(sc->mfi_tb_init_dmat,
1212 sc->mfi_tb_init_dmamap);
1213 if (sc->mfi_tb_init != NULL)
1214 bus_dmamem_free(sc->mfi_tb_init_dmat, sc->mfi_tb_init,
1215 sc->mfi_tb_init_dmamap);
1216 if (sc->mfi_tb_init_dmat != NULL)
1217 bus_dma_tag_destroy(sc->mfi_tb_init_dmat);
1219 /* ThunderBolt IOC Init Desc memory free here */
1220 if (sc->mfi_tb_ioc_init_busaddr != 0)
1221 bus_dmamap_unload(sc->mfi_tb_ioc_init_dmat,
1222 sc->mfi_tb_ioc_init_dmamap);
1223 if (sc->mfi_tb_ioc_init_desc != NULL)
1224 bus_dmamem_free(sc->mfi_tb_ioc_init_dmat,
1225 sc->mfi_tb_ioc_init_desc,
1226 sc->mfi_tb_ioc_init_dmamap);
1227 if (sc->mfi_tb_ioc_init_dmat != NULL)
1228 bus_dma_tag_destroy(sc->mfi_tb_ioc_init_dmat);
1229 if (sc->mfi_cmd_pool_tbolt != NULL) {
1230 for (int i = 0; i < sc->mfi_max_fw_cmds; i++) {
1231 if (sc->mfi_cmd_pool_tbolt[i] != NULL) {
1232 free(sc->mfi_cmd_pool_tbolt[i],
1234 sc->mfi_cmd_pool_tbolt[i] = NULL;
1237 free(sc->mfi_cmd_pool_tbolt, M_MFIBUF);
1238 sc->mfi_cmd_pool_tbolt = NULL;
1240 if (sc->request_desc_pool != NULL) {
1241 free(sc->request_desc_pool, M_MFIBUF);
1242 sc->request_desc_pool = NULL;
1245 if (sc->mfi_buffer_dmat != NULL)
1246 bus_dma_tag_destroy(sc->mfi_buffer_dmat);
1247 if (sc->mfi_parent_dmat != NULL)
1248 bus_dma_tag_destroy(sc->mfi_parent_dmat);
1250 if (mtx_initialized(&sc->mfi_io_lock)) {
1251 mtx_destroy(&sc->mfi_io_lock);
1252 sx_destroy(&sc->mfi_config_lock);
1259 mfi_startup(void *arg)
1261 struct mfi_softc *sc;
1263 sc = (struct mfi_softc *)arg;
1265 config_intrhook_disestablish(&sc->mfi_ich);
1267 sc->mfi_enable_intr(sc);
1268 sx_xlock(&sc->mfi_config_lock);
1269 mtx_lock(&sc->mfi_io_lock);
1271 if (sc->mfi_flags & MFI_FLAGS_SKINNY)
1273 mtx_unlock(&sc->mfi_io_lock);
1274 sx_xunlock(&sc->mfi_config_lock);
1280 struct mfi_softc *sc;
1281 struct mfi_command *cm;
1282 uint32_t pi, ci, context;
1284 sc = (struct mfi_softc *)arg;
1286 if (sc->mfi_check_clear_intr(sc))
1290 pi = sc->mfi_comms->hw_pi;
1291 ci = sc->mfi_comms->hw_ci;
1292 mtx_lock(&sc->mfi_io_lock);
1294 context = sc->mfi_comms->hw_reply_q[ci];
1295 if (context < sc->mfi_max_fw_cmds) {
1296 cm = &sc->mfi_commands[context];
1297 mfi_remove_busy(cm);
1299 mfi_complete(sc, cm);
1301 if (++ci == (sc->mfi_max_fw_cmds + 1))
1305 sc->mfi_comms->hw_ci = ci;
1307 /* Give defered I/O a chance to run */
1308 sc->mfi_flags &= ~MFI_FLAGS_QFRZN;
1310 mtx_unlock(&sc->mfi_io_lock);
1313 * Dummy read to flush the bus; this ensures that the indexes are up
1314 * to date. Restart processing if more commands have come it.
1316 (void)sc->mfi_read_fw_status(sc);
1317 if (pi != sc->mfi_comms->hw_pi)
1324 mfi_shutdown(struct mfi_softc *sc)
1326 struct mfi_dcmd_frame *dcmd;
1327 struct mfi_command *cm;
1331 if (sc->mfi_aen_cm != NULL) {
1332 sc->cm_aen_abort = 1;
1333 mfi_abort(sc, &sc->mfi_aen_cm);
1336 if (sc->mfi_map_sync_cm != NULL) {
1337 sc->cm_map_abort = 1;
1338 mfi_abort(sc, &sc->mfi_map_sync_cm);
1341 mtx_lock(&sc->mfi_io_lock);
1342 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_SHUTDOWN, NULL, 0);
1344 mtx_unlock(&sc->mfi_io_lock);
1348 dcmd = &cm->cm_frame->dcmd;
1349 dcmd->header.flags = MFI_FRAME_DIR_NONE;
1350 cm->cm_flags = MFI_CMD_POLLED;
1353 if ((error = mfi_mapcmd(sc, cm)) != 0)
1354 device_printf(sc->mfi_dev, "Failed to shutdown controller\n");
1356 mfi_release_command(cm);
1357 mtx_unlock(&sc->mfi_io_lock);
1362 mfi_syspdprobe(struct mfi_softc *sc)
1364 struct mfi_frame_header *hdr;
1365 struct mfi_command *cm = NULL;
1366 struct mfi_pd_list *pdlist = NULL;
1367 struct mfi_system_pd *syspd, *tmp;
1368 struct mfi_system_pending *syspd_pend;
1369 int error, i, found;
1371 sx_assert(&sc->mfi_config_lock, SA_XLOCKED);
1372 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1373 /* Add SYSTEM PD's */
1374 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_PD_LIST_QUERY,
1375 (void **)&pdlist, sizeof(*pdlist));
1377 device_printf(sc->mfi_dev,
1378 "Error while forming SYSTEM PD list\n");
1382 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1383 cm->cm_frame->dcmd.mbox[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST;
1384 cm->cm_frame->dcmd.mbox[1] = 0;
1385 if (mfi_mapcmd(sc, cm) != 0) {
1386 device_printf(sc->mfi_dev,
1387 "Failed to get syspd device listing\n");
1390 bus_dmamap_sync(sc->mfi_buffer_dmat,cm->cm_dmamap,
1391 BUS_DMASYNC_POSTREAD);
1392 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1393 hdr = &cm->cm_frame->header;
1394 if (hdr->cmd_status != MFI_STAT_OK) {
1395 device_printf(sc->mfi_dev,
1396 "MFI_DCMD_PD_LIST_QUERY failed %x\n", hdr->cmd_status);
1399 /* Get each PD and add it to the system */
1400 for (i = 0; i < pdlist->count; i++) {
1401 if (pdlist->addr[i].device_id ==
1402 pdlist->addr[i].encl_device_id)
1405 TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh, pd_link) {
1406 if (syspd->pd_id == pdlist->addr[i].device_id)
1409 TAILQ_FOREACH(syspd_pend, &sc->mfi_syspd_pend_tqh, pd_link) {
1410 if (syspd_pend->pd_id == pdlist->addr[i].device_id)
1414 mfi_add_sys_pd(sc, pdlist->addr[i].device_id);
1416 /* Delete SYSPD's whose state has been changed */
1417 TAILQ_FOREACH_SAFE(syspd, &sc->mfi_syspd_tqh, pd_link, tmp) {
1419 for (i = 0; i < pdlist->count; i++) {
1420 if (syspd->pd_id == pdlist->addr[i].device_id) {
1427 mtx_unlock(&sc->mfi_io_lock);
1429 device_delete_child(sc->mfi_dev, syspd->pd_dev);
1431 mtx_lock(&sc->mfi_io_lock);
1436 free(pdlist, M_MFIBUF);
1438 mfi_release_command(cm);
1444 mfi_ldprobe(struct mfi_softc *sc)
1446 struct mfi_frame_header *hdr;
1447 struct mfi_command *cm = NULL;
1448 struct mfi_ld_list *list = NULL;
1449 struct mfi_disk *ld;
1450 struct mfi_disk_pending *ld_pend;
1453 sx_assert(&sc->mfi_config_lock, SA_XLOCKED);
1454 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1456 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_LIST,
1457 (void **)&list, sizeof(*list));
1461 cm->cm_flags = MFI_CMD_DATAIN;
1462 if (mfi_wait_command(sc, cm) != 0) {
1463 device_printf(sc->mfi_dev, "Failed to get device listing\n");
1467 hdr = &cm->cm_frame->header;
1468 if (hdr->cmd_status != MFI_STAT_OK) {
1469 device_printf(sc->mfi_dev, "MFI_DCMD_LD_GET_LIST failed %x\n",
1474 for (i = 0; i < list->ld_count; i++) {
1475 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
1476 if (ld->ld_id == list->ld_list[i].ld.v.target_id)
1479 TAILQ_FOREACH(ld_pend, &sc->mfi_ld_pend_tqh, ld_link) {
1480 if (ld_pend->ld_id == list->ld_list[i].ld.v.target_id)
1483 mfi_add_ld(sc, list->ld_list[i].ld.v.target_id);
1488 free(list, M_MFIBUF);
1490 mfi_release_command(cm);
1496 * The timestamp is the number of seconds since 00:00 Jan 1, 2000. If
1497 * the bits in 24-31 are all set, then it is the number of seconds since
1501 format_timestamp(uint32_t timestamp)
1503 static char buffer[32];
1505 if ((timestamp & 0xff000000) == 0xff000000)
1506 snprintf(buffer, sizeof(buffer), "boot + %us", timestamp &
1509 snprintf(buffer, sizeof(buffer), "%us", timestamp);
1514 format_class(int8_t class)
1516 static char buffer[6];
1519 case MFI_EVT_CLASS_DEBUG:
1521 case MFI_EVT_CLASS_PROGRESS:
1522 return ("progress");
1523 case MFI_EVT_CLASS_INFO:
1525 case MFI_EVT_CLASS_WARNING:
1527 case MFI_EVT_CLASS_CRITICAL:
1529 case MFI_EVT_CLASS_FATAL:
1531 case MFI_EVT_CLASS_DEAD:
1534 snprintf(buffer, sizeof(buffer), "%d", class);
1540 mfi_decode_evt(struct mfi_softc *sc, struct mfi_evt_detail *detail)
1542 struct mfi_system_pd *syspd = NULL;
1544 device_printf(sc->mfi_dev, "%d (%s/0x%04x/%s) - %s\n", detail->seq,
1545 format_timestamp(detail->time), detail->evt_class.members.locale,
1546 format_class(detail->evt_class.members.evt_class),
1547 detail->description);
1549 /* Don't act on old AEN's or while shutting down */
1550 if (detail->seq < sc->mfi_boot_seq_num || sc->mfi_detaching)
1553 switch (detail->arg_type) {
1554 case MR_EVT_ARGS_NONE:
1555 if (detail->code == MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED) {
1556 device_printf(sc->mfi_dev, "HostBus scan raised\n");
1557 if (mfi_detect_jbod_change) {
1559 * Probe for new SYSPD's and Delete
1562 sx_xlock(&sc->mfi_config_lock);
1563 mtx_lock(&sc->mfi_io_lock);
1565 mtx_unlock(&sc->mfi_io_lock);
1566 sx_xunlock(&sc->mfi_config_lock);
1570 case MR_EVT_ARGS_LD_STATE:
1571 /* During load time driver reads all the events starting
1572 * from the one that has been logged after shutdown. Avoid
1575 if (detail->args.ld_state.new_state == MFI_LD_STATE_OFFLINE ) {
1577 struct mfi_disk *ld;
1578 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
1580 detail->args.ld_state.ld.target_id)
1584 Fix: for kernel panics when SSCD is removed
1585 KASSERT(ld != NULL, ("volume dissappeared"));
1589 device_delete_child(sc->mfi_dev, ld->ld_dev);
1594 case MR_EVT_ARGS_PD:
1595 if (detail->code == MR_EVT_PD_REMOVED) {
1596 if (mfi_detect_jbod_change) {
1598 * If the removed device is a SYSPD then
1601 TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh,
1604 detail->args.pd.device_id) {
1606 device_delete_child(
1615 if (detail->code == MR_EVT_PD_INSERTED) {
1616 if (mfi_detect_jbod_change) {
1617 /* Probe for new SYSPD's */
1618 sx_xlock(&sc->mfi_config_lock);
1619 mtx_lock(&sc->mfi_io_lock);
1621 mtx_unlock(&sc->mfi_io_lock);
1622 sx_xunlock(&sc->mfi_config_lock);
1625 if (sc->mfi_cam_rescan_cb != NULL &&
1626 (detail->code == MR_EVT_PD_INSERTED ||
1627 detail->code == MR_EVT_PD_REMOVED)) {
1628 sc->mfi_cam_rescan_cb(sc, detail->args.pd.device_id);
1635 mfi_queue_evt(struct mfi_softc *sc, struct mfi_evt_detail *detail)
1637 struct mfi_evt_queue_elm *elm;
1639 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1640 elm = malloc(sizeof(*elm), M_MFIBUF, M_NOWAIT|M_ZERO);
1643 memcpy(&elm->detail, detail, sizeof(*detail));
1644 TAILQ_INSERT_TAIL(&sc->mfi_evt_queue, elm, link);
1645 taskqueue_enqueue(taskqueue_swi, &sc->mfi_evt_task);
1649 mfi_handle_evt(void *context, int pending)
1651 TAILQ_HEAD(,mfi_evt_queue_elm) queue;
1652 struct mfi_softc *sc;
1653 struct mfi_evt_queue_elm *elm;
1657 mtx_lock(&sc->mfi_io_lock);
1658 TAILQ_CONCAT(&queue, &sc->mfi_evt_queue, link);
1659 mtx_unlock(&sc->mfi_io_lock);
1660 while ((elm = TAILQ_FIRST(&queue)) != NULL) {
1661 TAILQ_REMOVE(&queue, elm, link);
1662 mfi_decode_evt(sc, &elm->detail);
1663 free(elm, M_MFIBUF);
1668 mfi_aen_register(struct mfi_softc *sc, int seq, int locale)
1670 struct mfi_command *cm;
1671 struct mfi_dcmd_frame *dcmd;
1672 union mfi_evt current_aen, prior_aen;
1673 struct mfi_evt_detail *ed = NULL;
1676 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1678 current_aen.word = locale;
1679 if (sc->mfi_aen_cm != NULL) {
1681 ((uint32_t *)&sc->mfi_aen_cm->cm_frame->dcmd.mbox)[1];
1682 if (prior_aen.members.evt_class <= current_aen.members.evt_class &&
1683 !((prior_aen.members.locale & current_aen.members.locale)
1684 ^current_aen.members.locale)) {
1687 prior_aen.members.locale |= current_aen.members.locale;
1688 if (prior_aen.members.evt_class
1689 < current_aen.members.evt_class)
1690 current_aen.members.evt_class =
1691 prior_aen.members.evt_class;
1692 mfi_abort(sc, &sc->mfi_aen_cm);
1696 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_EVENT_WAIT,
1697 (void **)&ed, sizeof(*ed));
1701 dcmd = &cm->cm_frame->dcmd;
1702 ((uint32_t *)&dcmd->mbox)[0] = seq;
1703 ((uint32_t *)&dcmd->mbox)[1] = locale;
1704 cm->cm_flags = MFI_CMD_DATAIN;
1705 cm->cm_complete = mfi_aen_complete;
1707 sc->last_seq_num = seq;
1708 sc->mfi_aen_cm = cm;
1710 mfi_enqueue_ready(cm);
1718 mfi_aen_complete(struct mfi_command *cm)
1720 struct mfi_frame_header *hdr;
1721 struct mfi_softc *sc;
1722 struct mfi_evt_detail *detail;
1723 struct mfi_aen *mfi_aen_entry, *tmp;
1724 int seq = 0, aborted = 0;
1727 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1729 if (sc->mfi_aen_cm == NULL)
1732 hdr = &cm->cm_frame->header;
1734 if (sc->cm_aen_abort ||
1735 hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
1736 sc->cm_aen_abort = 0;
1739 sc->mfi_aen_triggered = 1;
1740 if (sc->mfi_poll_waiting) {
1741 sc->mfi_poll_waiting = 0;
1742 selwakeup(&sc->mfi_select);
1744 detail = cm->cm_data;
1745 mfi_queue_evt(sc, detail);
1746 seq = detail->seq + 1;
1747 TAILQ_FOREACH_SAFE(mfi_aen_entry, &sc->mfi_aen_pids, aen_link,
1749 TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
1751 PROC_LOCK(mfi_aen_entry->p);
1752 kern_psignal(mfi_aen_entry->p, SIGIO);
1753 PROC_UNLOCK(mfi_aen_entry->p);
1754 free(mfi_aen_entry, M_MFIBUF);
1758 free(cm->cm_data, M_MFIBUF);
1759 wakeup(&sc->mfi_aen_cm);
1760 sc->mfi_aen_cm = NULL;
1761 mfi_release_command(cm);
1763 /* set it up again so the driver can catch more events */
1765 mfi_aen_setup(sc, seq);
1768 #define MAX_EVENTS 15
1771 mfi_parse_entries(struct mfi_softc *sc, int start_seq, int stop_seq)
1773 struct mfi_command *cm;
1774 struct mfi_dcmd_frame *dcmd;
1775 struct mfi_evt_list *el;
1776 union mfi_evt class_locale;
1777 int error, i, seq, size;
1779 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1781 class_locale.members.reserved = 0;
1782 class_locale.members.locale = mfi_event_locale;
1783 class_locale.members.evt_class = mfi_event_class;
1785 size = sizeof(struct mfi_evt_list) + sizeof(struct mfi_evt_detail)
1787 el = malloc(size, M_MFIBUF, M_NOWAIT | M_ZERO);
1791 for (seq = start_seq;;) {
1792 if ((cm = mfi_dequeue_free(sc)) == NULL) {
1797 dcmd = &cm->cm_frame->dcmd;
1798 bzero(dcmd->mbox, MFI_MBOX_SIZE);
1799 dcmd->header.cmd = MFI_CMD_DCMD;
1800 dcmd->header.timeout = 0;
1801 dcmd->header.data_len = size;
1802 dcmd->opcode = MFI_DCMD_CTRL_EVENT_GET;
1803 ((uint32_t *)&dcmd->mbox)[0] = seq;
1804 ((uint32_t *)&dcmd->mbox)[1] = class_locale.word;
1805 cm->cm_sg = &dcmd->sgl;
1806 cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
1807 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1811 if ((error = mfi_mapcmd(sc, cm)) != 0) {
1812 device_printf(sc->mfi_dev,
1813 "Failed to get controller entries\n");
1814 mfi_release_command(cm);
1818 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1819 BUS_DMASYNC_POSTREAD);
1820 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1822 if (dcmd->header.cmd_status == MFI_STAT_NOT_FOUND) {
1823 mfi_release_command(cm);
1826 if (dcmd->header.cmd_status != MFI_STAT_OK) {
1827 device_printf(sc->mfi_dev,
1828 "Error %d fetching controller entries\n",
1829 dcmd->header.cmd_status);
1830 mfi_release_command(cm);
1834 mfi_release_command(cm);
1836 for (i = 0; i < el->count; i++) {
1838 * If this event is newer than 'stop_seq' then
1839 * break out of the loop. Note that the log
1840 * is a circular buffer so we have to handle
1841 * the case that our stop point is earlier in
1842 * the buffer than our start point.
1844 if (el->event[i].seq >= stop_seq) {
1845 if (start_seq <= stop_seq)
1847 else if (el->event[i].seq < start_seq)
1850 mfi_queue_evt(sc, &el->event[i]);
1852 seq = el->event[el->count - 1].seq + 1;
1860 mfi_add_ld(struct mfi_softc *sc, int id)
1862 struct mfi_command *cm;
1863 struct mfi_dcmd_frame *dcmd = NULL;
1864 struct mfi_ld_info *ld_info = NULL;
1865 struct mfi_disk_pending *ld_pend;
1868 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1870 ld_pend = malloc(sizeof(*ld_pend), M_MFIBUF, M_NOWAIT | M_ZERO);
1871 if (ld_pend != NULL) {
1872 ld_pend->ld_id = id;
1873 TAILQ_INSERT_TAIL(&sc->mfi_ld_pend_tqh, ld_pend, ld_link);
1876 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_INFO,
1877 (void **)&ld_info, sizeof(*ld_info));
1879 device_printf(sc->mfi_dev,
1880 "Failed to allocate for MFI_DCMD_LD_GET_INFO %d\n", error);
1882 free(ld_info, M_MFIBUF);
1885 cm->cm_flags = MFI_CMD_DATAIN;
1886 dcmd = &cm->cm_frame->dcmd;
1888 if (mfi_wait_command(sc, cm) != 0) {
1889 device_printf(sc->mfi_dev,
1890 "Failed to get logical drive: %d\n", id);
1891 free(ld_info, M_MFIBUF);
1894 if (ld_info->ld_config.params.isSSCD != 1)
1895 mfi_add_ld_complete(cm);
1897 mfi_release_command(cm);
1898 if (ld_info) /* SSCD drives ld_info free here */
1899 free(ld_info, M_MFIBUF);
1905 mfi_add_ld_complete(struct mfi_command *cm)
1907 struct mfi_frame_header *hdr;
1908 struct mfi_ld_info *ld_info;
1909 struct mfi_softc *sc;
1913 hdr = &cm->cm_frame->header;
1914 ld_info = cm->cm_private;
1916 if (sc->cm_map_abort || hdr->cmd_status != MFI_STAT_OK) {
1917 free(ld_info, M_MFIBUF);
1918 wakeup(&sc->mfi_map_sync_cm);
1919 mfi_release_command(cm);
1922 wakeup(&sc->mfi_map_sync_cm);
1923 mfi_release_command(cm);
1925 mtx_unlock(&sc->mfi_io_lock);
1927 if ((child = device_add_child(sc->mfi_dev, "mfid", -1)) == NULL) {
1928 device_printf(sc->mfi_dev, "Failed to add logical disk\n");
1929 free(ld_info, M_MFIBUF);
1931 mtx_lock(&sc->mfi_io_lock);
1935 device_set_ivars(child, ld_info);
1936 device_set_desc(child, "MFI Logical Disk");
1937 bus_generic_attach(sc->mfi_dev);
1939 mtx_lock(&sc->mfi_io_lock);
1942 static int mfi_add_sys_pd(struct mfi_softc *sc, int id)
1944 struct mfi_command *cm;
1945 struct mfi_dcmd_frame *dcmd = NULL;
1946 struct mfi_pd_info *pd_info = NULL;
1947 struct mfi_system_pending *syspd_pend;
1950 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1952 syspd_pend = malloc(sizeof(*syspd_pend), M_MFIBUF, M_NOWAIT | M_ZERO);
1953 if (syspd_pend != NULL) {
1954 syspd_pend->pd_id = id;
1955 TAILQ_INSERT_TAIL(&sc->mfi_syspd_pend_tqh, syspd_pend, pd_link);
1958 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_PD_GET_INFO,
1959 (void **)&pd_info, sizeof(*pd_info));
1961 device_printf(sc->mfi_dev,
1962 "Failed to allocated for MFI_DCMD_PD_GET_INFO %d\n",
1965 free(pd_info, M_MFIBUF);
1968 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1969 dcmd = &cm->cm_frame->dcmd;
1971 dcmd->header.scsi_status = 0;
1972 dcmd->header.pad0 = 0;
1973 if ((error = mfi_mapcmd(sc, cm)) != 0) {
1974 device_printf(sc->mfi_dev,
1975 "Failed to get physical drive info %d\n", id);
1976 free(pd_info, M_MFIBUF);
1977 mfi_release_command(cm);
1980 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1981 BUS_DMASYNC_POSTREAD);
1982 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1983 mfi_add_sys_pd_complete(cm);
1988 mfi_add_sys_pd_complete(struct mfi_command *cm)
1990 struct mfi_frame_header *hdr;
1991 struct mfi_pd_info *pd_info;
1992 struct mfi_softc *sc;
1996 hdr = &cm->cm_frame->header;
1997 pd_info = cm->cm_private;
1999 if (hdr->cmd_status != MFI_STAT_OK) {
2000 free(pd_info, M_MFIBUF);
2001 mfi_release_command(cm);
2004 if (pd_info->fw_state != MFI_PD_STATE_SYSTEM) {
2005 device_printf(sc->mfi_dev, "PD=%x is not SYSTEM PD\n",
2006 pd_info->ref.v.device_id);
2007 free(pd_info, M_MFIBUF);
2008 mfi_release_command(cm);
2011 mfi_release_command(cm);
2013 mtx_unlock(&sc->mfi_io_lock);
2015 if ((child = device_add_child(sc->mfi_dev, "mfisyspd", -1)) == NULL) {
2016 device_printf(sc->mfi_dev, "Failed to add system pd\n");
2017 free(pd_info, M_MFIBUF);
2019 mtx_lock(&sc->mfi_io_lock);
2023 device_set_ivars(child, pd_info);
2024 device_set_desc(child, "MFI System PD");
2025 bus_generic_attach(sc->mfi_dev);
2027 mtx_lock(&sc->mfi_io_lock);
2030 static struct mfi_command *
2031 mfi_bio_command(struct mfi_softc *sc)
2034 struct mfi_command *cm = NULL;
2036 /*reserving two commands to avoid starvation for IOCTL*/
2037 if (sc->mfi_qstat[MFIQ_FREE].q_length < 2) {
2040 if ((bio = mfi_dequeue_bio(sc)) == NULL) {
2043 if ((uintptr_t)bio->bio_driver2 == MFI_LD_IO) {
2044 cm = mfi_build_ldio(sc, bio);
2045 } else if ((uintptr_t) bio->bio_driver2 == MFI_SYS_PD_IO) {
2046 cm = mfi_build_syspdio(sc, bio);
2049 mfi_enqueue_bio(sc, bio);
2054 * mostly copied from cam/scsi/scsi_all.c:scsi_read_write
2058 mfi_build_cdb(int readop, uint8_t byte2, u_int64_t lba, u_int32_t block_count, uint8_t *cdb)
2062 if (((lba & 0x1fffff) == lba)
2063 && ((block_count & 0xff) == block_count)
2065 /* We can fit in a 6 byte cdb */
2066 struct scsi_rw_6 *scsi_cmd;
2068 scsi_cmd = (struct scsi_rw_6 *)cdb;
2069 scsi_cmd->opcode = readop ? READ_6 : WRITE_6;
2070 scsi_ulto3b(lba, scsi_cmd->addr);
2071 scsi_cmd->length = block_count & 0xff;
2072 scsi_cmd->control = 0;
2073 cdb_len = sizeof(*scsi_cmd);
2074 } else if (((block_count & 0xffff) == block_count) && ((lba & 0xffffffff) == lba)) {
2075 /* Need a 10 byte CDB */
2076 struct scsi_rw_10 *scsi_cmd;
2078 scsi_cmd = (struct scsi_rw_10 *)cdb;
2079 scsi_cmd->opcode = readop ? READ_10 : WRITE_10;
2080 scsi_cmd->byte2 = byte2;
2081 scsi_ulto4b(lba, scsi_cmd->addr);
2082 scsi_cmd->reserved = 0;
2083 scsi_ulto2b(block_count, scsi_cmd->length);
2084 scsi_cmd->control = 0;
2085 cdb_len = sizeof(*scsi_cmd);
2086 } else if (((block_count & 0xffffffff) == block_count) &&
2087 ((lba & 0xffffffff) == lba)) {
2088 /* Block count is too big for 10 byte CDB use a 12 byte CDB */
2089 struct scsi_rw_12 *scsi_cmd;
2091 scsi_cmd = (struct scsi_rw_12 *)cdb;
2092 scsi_cmd->opcode = readop ? READ_12 : WRITE_12;
2093 scsi_cmd->byte2 = byte2;
2094 scsi_ulto4b(lba, scsi_cmd->addr);
2095 scsi_cmd->reserved = 0;
2096 scsi_ulto4b(block_count, scsi_cmd->length);
2097 scsi_cmd->control = 0;
2098 cdb_len = sizeof(*scsi_cmd);
2101 * 16 byte CDB. We'll only get here if the LBA is larger
2104 struct scsi_rw_16 *scsi_cmd;
2106 scsi_cmd = (struct scsi_rw_16 *)cdb;
2107 scsi_cmd->opcode = readop ? READ_16 : WRITE_16;
2108 scsi_cmd->byte2 = byte2;
2109 scsi_u64to8b(lba, scsi_cmd->addr);
2110 scsi_cmd->reserved = 0;
2111 scsi_ulto4b(block_count, scsi_cmd->length);
2112 scsi_cmd->control = 0;
2113 cdb_len = sizeof(*scsi_cmd);
2119 extern char *unmapped_buf;
2121 static struct mfi_command *
2122 mfi_build_syspdio(struct mfi_softc *sc, struct bio *bio)
2124 struct mfi_command *cm;
2125 struct mfi_pass_frame *pass;
2126 uint32_t context = 0;
2127 int flags = 0, blkcount = 0, readop;
2130 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2132 if ((cm = mfi_dequeue_free(sc)) == NULL)
2135 /* Zero out the MFI frame */
2136 context = cm->cm_frame->header.context;
2137 bzero(cm->cm_frame, sizeof(union mfi_frame));
2138 cm->cm_frame->header.context = context;
2139 pass = &cm->cm_frame->pass;
2140 bzero(pass->cdb, 16);
2141 pass->header.cmd = MFI_CMD_PD_SCSI_IO;
2142 switch (bio->bio_cmd & 0x03) {
2144 flags = MFI_CMD_DATAIN | MFI_CMD_BIO;
2148 flags = MFI_CMD_DATAOUT | MFI_CMD_BIO;
2152 /* TODO: what about BIO_DELETE??? */
2153 panic("Unsupported bio command %x\n", bio->bio_cmd);
2156 /* Cheat with the sector length to avoid a non-constant division */
2157 blkcount = (bio->bio_bcount + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
2158 /* Fill the LBA and Transfer length in CDB */
2159 cdb_len = mfi_build_cdb(readop, 0, bio->bio_pblkno, blkcount,
2161 pass->header.target_id = (uintptr_t)bio->bio_driver1;
2162 pass->header.lun_id = 0;
2163 pass->header.timeout = 0;
2164 pass->header.flags = 0;
2165 pass->header.scsi_status = 0;
2166 pass->header.sense_len = MFI_SENSE_LEN;
2167 pass->header.data_len = bio->bio_bcount;
2168 pass->header.cdb_len = cdb_len;
2169 pass->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2170 pass->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2171 cm->cm_complete = mfi_bio_complete;
2172 cm->cm_private = bio;
2173 cm->cm_data = unmapped_buf;
2174 cm->cm_len = bio->bio_bcount;
2175 cm->cm_sg = &pass->sgl;
2176 cm->cm_total_frame_size = MFI_PASS_FRAME_SIZE;
2177 cm->cm_flags = flags;
2182 static struct mfi_command *
2183 mfi_build_ldio(struct mfi_softc *sc, struct bio *bio)
2185 struct mfi_io_frame *io;
2186 struct mfi_command *cm;
2189 uint32_t context = 0;
2191 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2193 if ((cm = mfi_dequeue_free(sc)) == NULL)
2196 /* Zero out the MFI frame */
2197 context = cm->cm_frame->header.context;
2198 bzero(cm->cm_frame, sizeof(union mfi_frame));
2199 cm->cm_frame->header.context = context;
2200 io = &cm->cm_frame->io;
2201 switch (bio->bio_cmd & 0x03) {
2203 io->header.cmd = MFI_CMD_LD_READ;
2204 flags = MFI_CMD_DATAIN | MFI_CMD_BIO;
2207 io->header.cmd = MFI_CMD_LD_WRITE;
2208 flags = MFI_CMD_DATAOUT | MFI_CMD_BIO;
2211 /* TODO: what about BIO_DELETE??? */
2212 panic("Unsupported bio command %x\n", bio->bio_cmd);
2215 /* Cheat with the sector length to avoid a non-constant division */
2216 blkcount = (bio->bio_bcount + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
2217 io->header.target_id = (uintptr_t)bio->bio_driver1;
2218 io->header.timeout = 0;
2219 io->header.flags = 0;
2220 io->header.scsi_status = 0;
2221 io->header.sense_len = MFI_SENSE_LEN;
2222 io->header.data_len = blkcount;
2223 io->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2224 io->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2225 io->lba_hi = (bio->bio_pblkno & 0xffffffff00000000) >> 32;
2226 io->lba_lo = bio->bio_pblkno & 0xffffffff;
2227 cm->cm_complete = mfi_bio_complete;
2228 cm->cm_private = bio;
2229 cm->cm_data = unmapped_buf;
2230 cm->cm_len = bio->bio_bcount;
2231 cm->cm_sg = &io->sgl;
2232 cm->cm_total_frame_size = MFI_IO_FRAME_SIZE;
2233 cm->cm_flags = flags;
2239 mfi_bio_complete(struct mfi_command *cm)
2242 struct mfi_frame_header *hdr;
2243 struct mfi_softc *sc;
2245 bio = cm->cm_private;
2246 hdr = &cm->cm_frame->header;
2249 if ((hdr->cmd_status != MFI_STAT_OK) || (hdr->scsi_status != 0)) {
2250 bio->bio_flags |= BIO_ERROR;
2251 bio->bio_error = EIO;
2252 device_printf(sc->mfi_dev, "I/O error, cmd=%p, status=%#x, "
2253 "scsi_status=%#x\n", cm, hdr->cmd_status, hdr->scsi_status);
2254 mfi_print_sense(cm->cm_sc, cm->cm_sense);
2255 } else if (cm->cm_error != 0) {
2256 bio->bio_flags |= BIO_ERROR;
2257 bio->bio_error = cm->cm_error;
2258 device_printf(sc->mfi_dev, "I/O error, cmd=%p, error=%#x\n",
2262 mfi_release_command(cm);
2263 mfi_disk_complete(bio);
2267 mfi_startio(struct mfi_softc *sc)
2269 struct mfi_command *cm;
2270 struct ccb_hdr *ccbh;
2273 /* Don't bother if we're short on resources */
2274 if (sc->mfi_flags & MFI_FLAGS_QFRZN)
2277 /* Try a command that has already been prepared */
2278 cm = mfi_dequeue_ready(sc);
2281 if ((ccbh = TAILQ_FIRST(&sc->mfi_cam_ccbq)) != NULL)
2282 cm = sc->mfi_cam_start(ccbh);
2285 /* Nope, so look for work on the bioq */
2287 cm = mfi_bio_command(sc);
2289 /* No work available, so exit */
2293 /* Send the command to the controller */
2294 if (mfi_mapcmd(sc, cm) != 0) {
2295 device_printf(sc->mfi_dev, "Failed to startio\n");
2296 mfi_requeue_ready(cm);
2303 mfi_mapcmd(struct mfi_softc *sc, struct mfi_command *cm)
2307 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2309 if ((cm->cm_data != NULL) && (cm->cm_frame->header.cmd != MFI_CMD_STP )) {
2310 polled = (cm->cm_flags & MFI_CMD_POLLED) ? BUS_DMA_NOWAIT : 0;
2311 if (cm->cm_flags & MFI_CMD_CCB)
2312 error = bus_dmamap_load_ccb(sc->mfi_buffer_dmat,
2313 cm->cm_dmamap, cm->cm_data, mfi_data_cb, cm,
2315 else if (cm->cm_flags & MFI_CMD_BIO)
2316 error = bus_dmamap_load_bio(sc->mfi_buffer_dmat,
2317 cm->cm_dmamap, cm->cm_private, mfi_data_cb, cm,
2320 error = bus_dmamap_load(sc->mfi_buffer_dmat,
2321 cm->cm_dmamap, cm->cm_data, cm->cm_len,
2322 mfi_data_cb, cm, polled);
2323 if (error == EINPROGRESS) {
2324 sc->mfi_flags |= MFI_FLAGS_QFRZN;
2328 error = mfi_send_frame(sc, cm);
2335 mfi_data_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
2337 struct mfi_frame_header *hdr;
2338 struct mfi_command *cm;
2340 struct mfi_softc *sc;
2341 int i, j, first, dir;
2342 int sge_size, locked;
2344 cm = (struct mfi_command *)arg;
2346 hdr = &cm->cm_frame->header;
2350 * We need to check if we have the lock as this is async
2351 * callback so even though our caller mfi_mapcmd asserts
2352 * it has the lock, there is no garantee that hasn't been
2353 * dropped if bus_dmamap_load returned prior to our
2356 if ((locked = mtx_owned(&sc->mfi_io_lock)) == 0)
2357 mtx_lock(&sc->mfi_io_lock);
2360 printf("error %d in callback\n", error);
2361 cm->cm_error = error;
2362 mfi_complete(sc, cm);
2365 /* Use IEEE sgl only for IO's on a SKINNY controller
2366 * For other commands on a SKINNY controller use either
2367 * sg32 or sg64 based on the sizeof(bus_addr_t).
2368 * Also calculate the total frame size based on the type
2371 if (((cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) ||
2372 (cm->cm_frame->header.cmd == MFI_CMD_LD_READ) ||
2373 (cm->cm_frame->header.cmd == MFI_CMD_LD_WRITE)) &&
2374 (sc->mfi_flags & MFI_FLAGS_SKINNY)) {
2375 for (i = 0; i < nsegs; i++) {
2376 sgl->sg_skinny[i].addr = segs[i].ds_addr;
2377 sgl->sg_skinny[i].len = segs[i].ds_len;
2378 sgl->sg_skinny[i].flag = 0;
2380 hdr->flags |= MFI_FRAME_IEEE_SGL | MFI_FRAME_SGL64;
2381 sge_size = sizeof(struct mfi_sg_skinny);
2382 hdr->sg_count = nsegs;
2385 if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
2386 first = cm->cm_stp_len;
2387 if ((sc->mfi_flags & MFI_FLAGS_SG64) == 0) {
2388 sgl->sg32[j].addr = segs[0].ds_addr;
2389 sgl->sg32[j++].len = first;
2391 sgl->sg64[j].addr = segs[0].ds_addr;
2392 sgl->sg64[j++].len = first;
2396 if ((sc->mfi_flags & MFI_FLAGS_SG64) == 0) {
2397 for (i = 0; i < nsegs; i++) {
2398 sgl->sg32[j].addr = segs[i].ds_addr + first;
2399 sgl->sg32[j++].len = segs[i].ds_len - first;
2403 for (i = 0; i < nsegs; i++) {
2404 sgl->sg64[j].addr = segs[i].ds_addr + first;
2405 sgl->sg64[j++].len = segs[i].ds_len - first;
2408 hdr->flags |= MFI_FRAME_SGL64;
2411 sge_size = sc->mfi_sge_size;
2415 if (cm->cm_flags & MFI_CMD_DATAIN) {
2416 dir |= BUS_DMASYNC_PREREAD;
2417 hdr->flags |= MFI_FRAME_DIR_READ;
2419 if (cm->cm_flags & MFI_CMD_DATAOUT) {
2420 dir |= BUS_DMASYNC_PREWRITE;
2421 hdr->flags |= MFI_FRAME_DIR_WRITE;
2423 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir);
2424 cm->cm_flags |= MFI_CMD_MAPPED;
2427 * Instead of calculating the total number of frames in the
2428 * compound frame, it's already assumed that there will be at
2429 * least 1 frame, so don't compensate for the modulo of the
2430 * following division.
2432 cm->cm_total_frame_size += (sc->mfi_sge_size * nsegs);
2433 cm->cm_extra_frames = (cm->cm_total_frame_size - 1) / MFI_FRAME_SIZE;
2435 if ((error = mfi_send_frame(sc, cm)) != 0) {
2436 printf("error %d in callback from mfi_send_frame\n", error);
2437 cm->cm_error = error;
2438 mfi_complete(sc, cm);
2443 /* leave the lock in the state we found it */
2445 mtx_unlock(&sc->mfi_io_lock);
2451 mfi_send_frame(struct mfi_softc *sc, struct mfi_command *cm)
2455 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2457 if (sc->MFA_enabled)
2458 error = mfi_tbolt_send_frame(sc, cm);
2460 error = mfi_std_send_frame(sc, cm);
2462 if (error != 0 && (cm->cm_flags & MFI_ON_MFIQ_BUSY) != 0)
2463 mfi_remove_busy(cm);
2469 mfi_std_send_frame(struct mfi_softc *sc, struct mfi_command *cm)
2471 struct mfi_frame_header *hdr;
2472 int tm = mfi_polled_cmd_timeout * 1000;
2474 hdr = &cm->cm_frame->header;
2476 if ((cm->cm_flags & MFI_CMD_POLLED) == 0) {
2477 cm->cm_timestamp = time_uptime;
2478 mfi_enqueue_busy(cm);
2480 hdr->cmd_status = MFI_STAT_INVALID_STATUS;
2481 hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
2485 * The bus address of the command is aligned on a 64 byte boundary,
2486 * leaving the least 6 bits as zero. For whatever reason, the
2487 * hardware wants the address shifted right by three, leaving just
2488 * 3 zero bits. These three bits are then used as a prefetching
2489 * hint for the hardware to predict how many frames need to be
2490 * fetched across the bus. If a command has more than 8 frames
2491 * then the 3 bits are set to 0x7 and the firmware uses other
2492 * information in the command to determine the total amount to fetch.
2493 * However, FreeBSD doesn't support I/O larger than 128K, so 8 frames
2494 * is enough for both 32bit and 64bit systems.
2496 if (cm->cm_extra_frames > 7)
2497 cm->cm_extra_frames = 7;
2499 sc->mfi_issue_cmd(sc, cm->cm_frame_busaddr, cm->cm_extra_frames);
2501 if ((cm->cm_flags & MFI_CMD_POLLED) == 0)
2504 /* This is a polled command, so busy-wait for it to complete. */
2505 while (hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
2512 if (hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
2513 device_printf(sc->mfi_dev, "Frame %p timed out "
2514 "command 0x%X\n", hdr, cm->cm_frame->dcmd.opcode);
2523 mfi_complete(struct mfi_softc *sc, struct mfi_command *cm)
2526 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2528 if ((cm->cm_flags & MFI_CMD_MAPPED) != 0) {
2530 if ((cm->cm_flags & MFI_CMD_DATAIN) ||
2531 (cm->cm_frame->header.cmd == MFI_CMD_STP))
2532 dir |= BUS_DMASYNC_POSTREAD;
2533 if (cm->cm_flags & MFI_CMD_DATAOUT)
2534 dir |= BUS_DMASYNC_POSTWRITE;
2536 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir);
2537 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2538 cm->cm_flags &= ~MFI_CMD_MAPPED;
2541 cm->cm_flags |= MFI_CMD_COMPLETED;
2543 if (cm->cm_complete != NULL)
2544 cm->cm_complete(cm);
2550 mfi_abort(struct mfi_softc *sc, struct mfi_command **cm_abort)
2552 struct mfi_command *cm;
2553 struct mfi_abort_frame *abort;
2555 uint32_t context = 0;
2557 mtx_lock(&sc->mfi_io_lock);
2558 if ((cm = mfi_dequeue_free(sc)) == NULL) {
2559 mtx_unlock(&sc->mfi_io_lock);
2563 /* Zero out the MFI frame */
2564 context = cm->cm_frame->header.context;
2565 bzero(cm->cm_frame, sizeof(union mfi_frame));
2566 cm->cm_frame->header.context = context;
2568 abort = &cm->cm_frame->abort;
2569 abort->header.cmd = MFI_CMD_ABORT;
2570 abort->header.flags = 0;
2571 abort->header.scsi_status = 0;
2572 abort->abort_context = (*cm_abort)->cm_frame->header.context;
2573 abort->abort_mfi_addr_lo = (uint32_t)(*cm_abort)->cm_frame_busaddr;
2574 abort->abort_mfi_addr_hi =
2575 (uint32_t)((uint64_t)(*cm_abort)->cm_frame_busaddr >> 32);
2577 cm->cm_flags = MFI_CMD_POLLED;
2579 if ((error = mfi_mapcmd(sc, cm)) != 0)
2580 device_printf(sc->mfi_dev, "failed to abort command\n");
2581 mfi_release_command(cm);
2583 mtx_unlock(&sc->mfi_io_lock);
2584 while (i < 5 && *cm_abort != NULL) {
2585 tsleep(cm_abort, 0, "mfiabort",
2589 if (*cm_abort != NULL) {
2590 /* Force a complete if command didn't abort */
2591 mtx_lock(&sc->mfi_io_lock);
2592 (*cm_abort)->cm_complete(*cm_abort);
2593 mtx_unlock(&sc->mfi_io_lock);
2600 mfi_dump_blocks(struct mfi_softc *sc, int id, uint64_t lba, void *virt,
2603 struct mfi_command *cm;
2604 struct mfi_io_frame *io;
2606 uint32_t context = 0;
2608 if ((cm = mfi_dequeue_free(sc)) == NULL)
2611 /* Zero out the MFI frame */
2612 context = cm->cm_frame->header.context;
2613 bzero(cm->cm_frame, sizeof(union mfi_frame));
2614 cm->cm_frame->header.context = context;
2616 io = &cm->cm_frame->io;
2617 io->header.cmd = MFI_CMD_LD_WRITE;
2618 io->header.target_id = id;
2619 io->header.timeout = 0;
2620 io->header.flags = 0;
2621 io->header.scsi_status = 0;
2622 io->header.sense_len = MFI_SENSE_LEN;
2623 io->header.data_len = (len + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
2624 io->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2625 io->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2626 io->lba_hi = (lba & 0xffffffff00000000) >> 32;
2627 io->lba_lo = lba & 0xffffffff;
2630 cm->cm_sg = &io->sgl;
2631 cm->cm_total_frame_size = MFI_IO_FRAME_SIZE;
2632 cm->cm_flags = MFI_CMD_POLLED | MFI_CMD_DATAOUT;
2634 if ((error = mfi_mapcmd(sc, cm)) != 0)
2635 device_printf(sc->mfi_dev, "failed dump blocks\n");
2636 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
2637 BUS_DMASYNC_POSTWRITE);
2638 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2639 mfi_release_command(cm);
2645 mfi_dump_syspd_blocks(struct mfi_softc *sc, int id, uint64_t lba, void *virt,
2648 struct mfi_command *cm;
2649 struct mfi_pass_frame *pass;
2650 int error, readop, cdb_len;
2653 if ((cm = mfi_dequeue_free(sc)) == NULL)
2656 pass = &cm->cm_frame->pass;
2657 bzero(pass->cdb, 16);
2658 pass->header.cmd = MFI_CMD_PD_SCSI_IO;
2661 blkcount = (len + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
2662 cdb_len = mfi_build_cdb(readop, 0, lba, blkcount, pass->cdb);
2663 pass->header.target_id = id;
2664 pass->header.timeout = 0;
2665 pass->header.flags = 0;
2666 pass->header.scsi_status = 0;
2667 pass->header.sense_len = MFI_SENSE_LEN;
2668 pass->header.data_len = len;
2669 pass->header.cdb_len = cdb_len;
2670 pass->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2671 pass->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2674 cm->cm_sg = &pass->sgl;
2675 cm->cm_total_frame_size = MFI_PASS_FRAME_SIZE;
2676 cm->cm_flags = MFI_CMD_POLLED | MFI_CMD_DATAOUT | MFI_CMD_SCSI;
2678 if ((error = mfi_mapcmd(sc, cm)) != 0)
2679 device_printf(sc->mfi_dev, "failed dump blocks\n");
2680 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
2681 BUS_DMASYNC_POSTWRITE);
2682 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2683 mfi_release_command(cm);
2689 mfi_open(struct cdev *dev, int flags, int fmt, struct thread *td)
2691 struct mfi_softc *sc;
2696 mtx_lock(&sc->mfi_io_lock);
2697 if (sc->mfi_detaching)
2700 sc->mfi_flags |= MFI_FLAGS_OPEN;
2703 mtx_unlock(&sc->mfi_io_lock);
2709 mfi_close(struct cdev *dev, int flags, int fmt, struct thread *td)
2711 struct mfi_softc *sc;
2712 struct mfi_aen *mfi_aen_entry, *tmp;
2716 mtx_lock(&sc->mfi_io_lock);
2717 sc->mfi_flags &= ~MFI_FLAGS_OPEN;
2719 TAILQ_FOREACH_SAFE(mfi_aen_entry, &sc->mfi_aen_pids, aen_link, tmp) {
2720 if (mfi_aen_entry->p == curproc) {
2721 TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
2723 free(mfi_aen_entry, M_MFIBUF);
2726 mtx_unlock(&sc->mfi_io_lock);
2731 mfi_config_lock(struct mfi_softc *sc, uint32_t opcode)
2735 case MFI_DCMD_LD_DELETE:
2736 case MFI_DCMD_CFG_ADD:
2737 case MFI_DCMD_CFG_CLEAR:
2738 case MFI_DCMD_CFG_FOREIGN_IMPORT:
2739 sx_xlock(&sc->mfi_config_lock);
2747 mfi_config_unlock(struct mfi_softc *sc, int locked)
2751 sx_xunlock(&sc->mfi_config_lock);
2755 * Perform pre-issue checks on commands from userland and possibly veto
2759 mfi_check_command_pre(struct mfi_softc *sc, struct mfi_command *cm)
2761 struct mfi_disk *ld, *ld2;
2763 struct mfi_system_pd *syspd = NULL;
2767 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2769 switch (cm->cm_frame->dcmd.opcode) {
2770 case MFI_DCMD_LD_DELETE:
2771 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2772 if (ld->ld_id == cm->cm_frame->dcmd.mbox[0])
2778 error = mfi_disk_disable(ld);
2780 case MFI_DCMD_CFG_CLEAR:
2781 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2782 error = mfi_disk_disable(ld);
2787 TAILQ_FOREACH(ld2, &sc->mfi_ld_tqh, ld_link) {
2790 mfi_disk_enable(ld2);
2794 case MFI_DCMD_PD_STATE_SET:
2795 mbox = (uint16_t *) cm->cm_frame->dcmd.mbox;
2797 if (mbox[2] == MFI_PD_STATE_UNCONFIGURED_GOOD) {
2798 TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh, pd_link) {
2799 if (syspd->pd_id == syspd_id)
2806 error = mfi_syspd_disable(syspd);
2814 /* Perform post-issue checks on commands from userland. */
2816 mfi_check_command_post(struct mfi_softc *sc, struct mfi_command *cm)
2818 struct mfi_disk *ld, *ldn;
2819 struct mfi_system_pd *syspd = NULL;
2823 switch (cm->cm_frame->dcmd.opcode) {
2824 case MFI_DCMD_LD_DELETE:
2825 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2826 if (ld->ld_id == cm->cm_frame->dcmd.mbox[0])
2829 KASSERT(ld != NULL, ("volume dissappeared"));
2830 if (cm->cm_frame->header.cmd_status == MFI_STAT_OK) {
2831 mtx_unlock(&sc->mfi_io_lock);
2833 device_delete_child(sc->mfi_dev, ld->ld_dev);
2835 mtx_lock(&sc->mfi_io_lock);
2837 mfi_disk_enable(ld);
2839 case MFI_DCMD_CFG_CLEAR:
2840 if (cm->cm_frame->header.cmd_status == MFI_STAT_OK) {
2841 mtx_unlock(&sc->mfi_io_lock);
2843 TAILQ_FOREACH_SAFE(ld, &sc->mfi_ld_tqh, ld_link, ldn) {
2844 device_delete_child(sc->mfi_dev, ld->ld_dev);
2847 mtx_lock(&sc->mfi_io_lock);
2849 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link)
2850 mfi_disk_enable(ld);
2853 case MFI_DCMD_CFG_ADD:
2856 case MFI_DCMD_CFG_FOREIGN_IMPORT:
2859 case MFI_DCMD_PD_STATE_SET:
2860 mbox = (uint16_t *) cm->cm_frame->dcmd.mbox;
2862 if (mbox[2] == MFI_PD_STATE_UNCONFIGURED_GOOD) {
2863 TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh,pd_link) {
2864 if (syspd->pd_id == syspd_id)
2870 /* If the transition fails then enable the syspd again */
2871 if (syspd && cm->cm_frame->header.cmd_status != MFI_STAT_OK)
2872 mfi_syspd_enable(syspd);
2878 mfi_check_for_sscd(struct mfi_softc *sc, struct mfi_command *cm)
2880 struct mfi_config_data *conf_data;
2881 struct mfi_command *ld_cm = NULL;
2882 struct mfi_ld_info *ld_info = NULL;
2883 struct mfi_ld_config *ld;
2887 conf_data = (struct mfi_config_data *)cm->cm_data;
2889 if (cm->cm_frame->dcmd.opcode == MFI_DCMD_CFG_ADD) {
2890 p = (char *)conf_data->array;
2891 p += conf_data->array_size * conf_data->array_count;
2892 ld = (struct mfi_ld_config *)p;
2893 if (ld->params.isSSCD == 1)
2895 } else if (cm->cm_frame->dcmd.opcode == MFI_DCMD_LD_DELETE) {
2896 error = mfi_dcmd_command (sc, &ld_cm, MFI_DCMD_LD_GET_INFO,
2897 (void **)&ld_info, sizeof(*ld_info));
2899 device_printf(sc->mfi_dev, "Failed to allocate"
2900 "MFI_DCMD_LD_GET_INFO %d", error);
2902 free(ld_info, M_MFIBUF);
2905 ld_cm->cm_flags = MFI_CMD_DATAIN;
2906 ld_cm->cm_frame->dcmd.mbox[0]= cm->cm_frame->dcmd.mbox[0];
2907 ld_cm->cm_frame->header.target_id = cm->cm_frame->dcmd.mbox[0];
2908 if (mfi_wait_command(sc, ld_cm) != 0) {
2909 device_printf(sc->mfi_dev, "failed to get log drv\n");
2910 mfi_release_command(ld_cm);
2911 free(ld_info, M_MFIBUF);
2915 if (ld_cm->cm_frame->header.cmd_status != MFI_STAT_OK) {
2916 free(ld_info, M_MFIBUF);
2917 mfi_release_command(ld_cm);
2921 ld_info = (struct mfi_ld_info *)ld_cm->cm_private;
2923 if (ld_info->ld_config.params.isSSCD == 1)
2926 mfi_release_command(ld_cm);
2927 free(ld_info, M_MFIBUF);
2934 mfi_stp_cmd(struct mfi_softc *sc, struct mfi_command *cm,caddr_t arg)
2937 struct mfi_ioc_packet *ioc;
2938 ioc = (struct mfi_ioc_packet *)arg;
2939 int sge_size, error;
2940 struct megasas_sge *kern_sge;
2942 memset(sc->kbuff_arr, 0, sizeof(sc->kbuff_arr));
2943 kern_sge =(struct megasas_sge *) ((uintptr_t)cm->cm_frame + ioc->mfi_sgl_off);
2944 cm->cm_frame->header.sg_count = ioc->mfi_sge_count;
2946 if (sizeof(bus_addr_t) == 8) {
2947 cm->cm_frame->header.flags |= MFI_FRAME_SGL64;
2948 cm->cm_extra_frames = 2;
2949 sge_size = sizeof(struct mfi_sg64);
2951 cm->cm_extra_frames = (cm->cm_total_frame_size - 1) / MFI_FRAME_SIZE;
2952 sge_size = sizeof(struct mfi_sg32);
2955 cm->cm_total_frame_size += (sge_size * ioc->mfi_sge_count);
2956 for (i = 0; i < ioc->mfi_sge_count; i++) {
2957 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
2958 1, 0, /* algnmnt, boundary */
2959 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
2960 BUS_SPACE_MAXADDR, /* highaddr */
2961 NULL, NULL, /* filter, filterarg */
2962 ioc->mfi_sgl[i].iov_len,/* maxsize */
2964 ioc->mfi_sgl[i].iov_len,/* maxsegsize */
2965 BUS_DMA_ALLOCNOW, /* flags */
2966 NULL, NULL, /* lockfunc, lockarg */
2967 &sc->mfi_kbuff_arr_dmat[i])) {
2968 device_printf(sc->mfi_dev,
2969 "Cannot allocate mfi_kbuff_arr_dmat tag\n");
2973 if (bus_dmamem_alloc(sc->mfi_kbuff_arr_dmat[i],
2974 (void **)&sc->kbuff_arr[i], BUS_DMA_NOWAIT,
2975 &sc->mfi_kbuff_arr_dmamap[i])) {
2976 device_printf(sc->mfi_dev,
2977 "Cannot allocate mfi_kbuff_arr_dmamap memory\n");
2981 bus_dmamap_load(sc->mfi_kbuff_arr_dmat[i],
2982 sc->mfi_kbuff_arr_dmamap[i], sc->kbuff_arr[i],
2983 ioc->mfi_sgl[i].iov_len, mfi_addr_cb,
2984 &sc->mfi_kbuff_arr_busaddr[i], 0);
2986 if (!sc->kbuff_arr[i]) {
2987 device_printf(sc->mfi_dev,
2988 "Could not allocate memory for kbuff_arr info\n");
2991 kern_sge[i].phys_addr = sc->mfi_kbuff_arr_busaddr[i];
2992 kern_sge[i].length = ioc->mfi_sgl[i].iov_len;
2994 if (sizeof(bus_addr_t) == 8) {
2995 cm->cm_frame->stp.sgl.sg64[i].addr =
2996 kern_sge[i].phys_addr;
2997 cm->cm_frame->stp.sgl.sg64[i].len =
2998 ioc->mfi_sgl[i].iov_len;
3000 cm->cm_frame->stp.sgl.sg32[i].addr =
3001 kern_sge[i].phys_addr;
3002 cm->cm_frame->stp.sgl.sg32[i].len =
3003 ioc->mfi_sgl[i].iov_len;
3006 error = copyin(ioc->mfi_sgl[i].iov_base,
3008 ioc->mfi_sgl[i].iov_len);
3010 device_printf(sc->mfi_dev, "Copy in failed\n");
3015 cm->cm_flags |=MFI_CMD_MAPPED;
3020 mfi_user_command(struct mfi_softc *sc, struct mfi_ioc_passthru *ioc)
3022 struct mfi_command *cm;
3023 struct mfi_dcmd_frame *dcmd;
3024 void *ioc_buf = NULL;
3026 int error = 0, locked;
3029 if (ioc->buf_size > 0) {
3030 if (ioc->buf_size > 1024 * 1024)
3032 ioc_buf = malloc(ioc->buf_size, M_MFIBUF, M_WAITOK);
3033 error = copyin(ioc->buf, ioc_buf, ioc->buf_size);
3035 device_printf(sc->mfi_dev, "failed to copyin\n");
3036 free(ioc_buf, M_MFIBUF);
3041 locked = mfi_config_lock(sc, ioc->ioc_frame.opcode);
3043 mtx_lock(&sc->mfi_io_lock);
3044 while ((cm = mfi_dequeue_free(sc)) == NULL)
3045 msleep(mfi_user_command, &sc->mfi_io_lock, 0, "mfiioc", hz);
3047 /* Save context for later */
3048 context = cm->cm_frame->header.context;
3050 dcmd = &cm->cm_frame->dcmd;
3051 bcopy(&ioc->ioc_frame, dcmd, sizeof(struct mfi_dcmd_frame));
3053 cm->cm_sg = &dcmd->sgl;
3054 cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
3055 cm->cm_data = ioc_buf;
3056 cm->cm_len = ioc->buf_size;
3058 /* restore context */
3059 cm->cm_frame->header.context = context;
3061 /* Cheat since we don't know if we're writing or reading */
3062 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_DATAOUT;
3064 error = mfi_check_command_pre(sc, cm);
3068 error = mfi_wait_command(sc, cm);
3070 device_printf(sc->mfi_dev, "ioctl failed %d\n", error);
3073 bcopy(dcmd, &ioc->ioc_frame, sizeof(struct mfi_dcmd_frame));
3074 mfi_check_command_post(sc, cm);
3076 mfi_release_command(cm);
3077 mtx_unlock(&sc->mfi_io_lock);
3078 mfi_config_unlock(sc, locked);
3079 if (ioc->buf_size > 0)
3080 error = copyout(ioc_buf, ioc->buf, ioc->buf_size);
3082 free(ioc_buf, M_MFIBUF);
3086 #define PTRIN(p) ((void *)(uintptr_t)(p))
3089 mfi_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td)
3091 struct mfi_softc *sc;
3092 union mfi_statrequest *ms;
3093 struct mfi_ioc_packet *ioc;
3094 #ifdef COMPAT_FREEBSD32
3095 struct mfi_ioc_packet32 *ioc32;
3097 struct mfi_ioc_aen *aen;
3098 struct mfi_command *cm = NULL;
3099 uint32_t context = 0;
3100 union mfi_sense_ptr sense_ptr;
3101 uint8_t *data = NULL, *temp, *addr, skip_pre_post = 0;
3104 struct mfi_ioc_passthru *iop = (struct mfi_ioc_passthru *)arg;
3105 #ifdef COMPAT_FREEBSD32
3106 struct mfi_ioc_passthru32 *iop32 = (struct mfi_ioc_passthru32 *)arg;
3107 struct mfi_ioc_passthru iop_swab;
3117 if (sc->hw_crit_error)
3120 if (sc->issuepend_done == 0)
3125 ms = (union mfi_statrequest *)arg;
3126 switch (ms->ms_item) {
3131 bcopy(&sc->mfi_qstat[ms->ms_item], &ms->ms_qstat,
3132 sizeof(struct mfi_qstat));
3139 case MFIIO_QUERY_DISK:
3141 struct mfi_query_disk *qd;
3142 struct mfi_disk *ld;
3144 qd = (struct mfi_query_disk *)arg;
3145 mtx_lock(&sc->mfi_io_lock);
3146 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
3147 if (ld->ld_id == qd->array_id)
3152 mtx_unlock(&sc->mfi_io_lock);
3156 if (ld->ld_flags & MFI_DISK_FLAGS_OPEN)
3158 bzero(qd->devname, SPECNAMELEN + 1);
3159 snprintf(qd->devname, SPECNAMELEN, "mfid%d", ld->ld_unit);
3160 mtx_unlock(&sc->mfi_io_lock);
3164 #ifdef COMPAT_FREEBSD32
3168 devclass_t devclass;
3169 ioc = (struct mfi_ioc_packet *)arg;
3172 adapter = ioc->mfi_adapter_no;
3173 if (device_get_unit(sc->mfi_dev) == 0 && adapter != 0) {
3174 devclass = devclass_find("mfi");
3175 sc = devclass_get_softc(devclass, adapter);
3177 mtx_lock(&sc->mfi_io_lock);
3178 if ((cm = mfi_dequeue_free(sc)) == NULL) {
3179 mtx_unlock(&sc->mfi_io_lock);
3182 mtx_unlock(&sc->mfi_io_lock);
3186 * save off original context since copying from user
3187 * will clobber some data
3189 context = cm->cm_frame->header.context;
3190 cm->cm_frame->header.context = cm->cm_index;
3192 bcopy(ioc->mfi_frame.raw, cm->cm_frame,
3193 2 * MEGAMFI_FRAME_SIZE);
3194 cm->cm_total_frame_size = (sizeof(union mfi_sgl)
3195 * ioc->mfi_sge_count) + ioc->mfi_sgl_off;
3196 cm->cm_frame->header.scsi_status = 0;
3197 cm->cm_frame->header.pad0 = 0;
3198 if (ioc->mfi_sge_count) {
3200 (union mfi_sgl *)&cm->cm_frame->bytes[ioc->mfi_sgl_off];
3204 if (cm->cm_frame->header.flags & MFI_FRAME_DATAIN)
3205 cm->cm_flags |= MFI_CMD_DATAIN;
3206 if (cm->cm_frame->header.flags & MFI_FRAME_DATAOUT)
3207 cm->cm_flags |= MFI_CMD_DATAOUT;
3208 /* Legacy app shim */
3209 if (cm->cm_flags == 0)
3210 cm->cm_flags |= MFI_CMD_DATAIN | MFI_CMD_DATAOUT;
3211 cm->cm_len = cm->cm_frame->header.data_len;
3212 if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
3213 #ifdef COMPAT_FREEBSD32
3214 if (cmd == MFI_CMD) {
3217 cm->cm_stp_len = ioc->mfi_sgl[0].iov_len;
3218 #ifdef COMPAT_FREEBSD32
3220 /* 32bit on 64bit */
3221 ioc32 = (struct mfi_ioc_packet32 *)ioc;
3222 cm->cm_stp_len = ioc32->mfi_sgl[0].iov_len;
3225 cm->cm_len += cm->cm_stp_len;
3228 (cm->cm_flags & (MFI_CMD_DATAIN | MFI_CMD_DATAOUT))) {
3229 cm->cm_data = data = malloc(cm->cm_len, M_MFIBUF,
3231 if (cm->cm_data == NULL) {
3232 device_printf(sc->mfi_dev, "Malloc failed\n");
3239 /* restore header context */
3240 cm->cm_frame->header.context = context;
3242 if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
3243 res = mfi_stp_cmd(sc, cm, arg);
3248 if ((cm->cm_flags & MFI_CMD_DATAOUT) ||
3249 (cm->cm_frame->header.cmd == MFI_CMD_STP)) {
3250 for (i = 0; i < ioc->mfi_sge_count; i++) {
3251 #ifdef COMPAT_FREEBSD32
3252 if (cmd == MFI_CMD) {
3255 addr = ioc->mfi_sgl[i].iov_base;
3256 len = ioc->mfi_sgl[i].iov_len;
3257 #ifdef COMPAT_FREEBSD32
3259 /* 32bit on 64bit */
3260 ioc32 = (struct mfi_ioc_packet32 *)ioc;
3261 addr = PTRIN(ioc32->mfi_sgl[i].iov_base);
3262 len = ioc32->mfi_sgl[i].iov_len;
3265 error = copyin(addr, temp, len);
3267 device_printf(sc->mfi_dev,
3268 "Copy in failed\n");
3276 if (cm->cm_frame->header.cmd == MFI_CMD_DCMD)
3277 locked = mfi_config_lock(sc,
3278 cm->cm_frame->dcmd.opcode);
3280 if (cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) {
3281 cm->cm_frame->pass.sense_addr_lo =
3282 (uint32_t)cm->cm_sense_busaddr;
3283 cm->cm_frame->pass.sense_addr_hi =
3284 (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
3286 mtx_lock(&sc->mfi_io_lock);
3287 skip_pre_post = mfi_check_for_sscd (sc, cm);
3288 if (!skip_pre_post) {
3289 error = mfi_check_command_pre(sc, cm);
3291 mtx_unlock(&sc->mfi_io_lock);
3295 if ((error = mfi_wait_command(sc, cm)) != 0) {
3296 device_printf(sc->mfi_dev,
3297 "Controller polled failed\n");
3298 mtx_unlock(&sc->mfi_io_lock);
3301 if (!skip_pre_post) {
3302 mfi_check_command_post(sc, cm);
3304 mtx_unlock(&sc->mfi_io_lock);
3306 if (cm->cm_frame->header.cmd != MFI_CMD_STP) {
3308 if ((cm->cm_flags & MFI_CMD_DATAIN) ||
3309 (cm->cm_frame->header.cmd == MFI_CMD_STP)) {
3310 for (i = 0; i < ioc->mfi_sge_count; i++) {
3311 #ifdef COMPAT_FREEBSD32
3312 if (cmd == MFI_CMD) {
3315 addr = ioc->mfi_sgl[i].iov_base;
3316 len = ioc->mfi_sgl[i].iov_len;
3317 #ifdef COMPAT_FREEBSD32
3319 /* 32bit on 64bit */
3320 ioc32 = (struct mfi_ioc_packet32 *)ioc;
3321 addr = PTRIN(ioc32->mfi_sgl[i].iov_base);
3322 len = ioc32->mfi_sgl[i].iov_len;
3325 error = copyout(temp, addr, len);
3327 device_printf(sc->mfi_dev,
3328 "Copy out failed\n");
3336 if (ioc->mfi_sense_len) {
3337 /* get user-space sense ptr then copy out sense */
3338 bcopy(&ioc->mfi_frame.raw[ioc->mfi_sense_off],
3339 &sense_ptr.sense_ptr_data[0],
3340 sizeof(sense_ptr.sense_ptr_data));
3341 #ifdef COMPAT_FREEBSD32
3342 if (cmd != MFI_CMD) {
3344 * not 64bit native so zero out any address
3346 sense_ptr.addr.high = 0;
3349 error = copyout(cm->cm_sense, sense_ptr.user_space,
3350 ioc->mfi_sense_len);
3352 device_printf(sc->mfi_dev,
3353 "Copy out failed\n");
3358 ioc->mfi_frame.hdr.cmd_status = cm->cm_frame->header.cmd_status;
3360 mfi_config_unlock(sc, locked);
3362 free(data, M_MFIBUF);
3363 if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
3364 for (i = 0; i < 2; i++) {
3365 if (sc->kbuff_arr[i]) {
3366 if (sc->mfi_kbuff_arr_busaddr != 0)
3368 sc->mfi_kbuff_arr_dmat[i],
3369 sc->mfi_kbuff_arr_dmamap[i]
3371 if (sc->kbuff_arr[i] != NULL)
3373 sc->mfi_kbuff_arr_dmat[i],
3375 sc->mfi_kbuff_arr_dmamap[i]
3377 if (sc->mfi_kbuff_arr_dmat[i] != NULL)
3378 bus_dma_tag_destroy(
3379 sc->mfi_kbuff_arr_dmat[i]);
3384 mtx_lock(&sc->mfi_io_lock);
3385 mfi_release_command(cm);
3386 mtx_unlock(&sc->mfi_io_lock);
3392 aen = (struct mfi_ioc_aen *)arg;
3393 mtx_lock(&sc->mfi_io_lock);
3394 error = mfi_aen_register(sc, aen->aen_seq_num,
3395 aen->aen_class_locale);
3396 mtx_unlock(&sc->mfi_io_lock);
3399 case MFI_LINUX_CMD_2: /* Firmware Linux ioctl shim */
3401 devclass_t devclass;
3402 struct mfi_linux_ioc_packet l_ioc;
3405 devclass = devclass_find("mfi");
3406 if (devclass == NULL)
3409 error = copyin(arg, &l_ioc, sizeof(l_ioc));
3412 adapter = l_ioc.lioc_adapter_no;
3413 sc = devclass_get_softc(devclass, adapter);
3416 return (mfi_linux_ioctl_int(sc->mfi_cdev,
3417 cmd, arg, flag, td));
3420 case MFI_LINUX_SET_AEN_2: /* AEN Linux ioctl shim */
3422 devclass_t devclass;
3423 struct mfi_linux_ioc_aen l_aen;
3426 devclass = devclass_find("mfi");
3427 if (devclass == NULL)
3430 error = copyin(arg, &l_aen, sizeof(l_aen));
3433 adapter = l_aen.laen_adapter_no;
3434 sc = devclass_get_softc(devclass, adapter);
3437 return (mfi_linux_ioctl_int(sc->mfi_cdev,
3438 cmd, arg, flag, td));
3441 #ifdef COMPAT_FREEBSD32
3442 case MFIIO_PASSTHRU32:
3443 if (!SV_CURPROC_FLAG(SV_ILP32)) {
3447 iop_swab.ioc_frame = iop32->ioc_frame;
3448 iop_swab.buf_size = iop32->buf_size;
3449 iop_swab.buf = PTRIN(iop32->buf);
3453 case MFIIO_PASSTHRU:
3454 error = mfi_user_command(sc, iop);
3455 #ifdef COMPAT_FREEBSD32
3456 if (cmd == MFIIO_PASSTHRU32)
3457 iop32->ioc_frame = iop_swab.ioc_frame;
3461 device_printf(sc->mfi_dev, "IOCTL 0x%lx not handled\n", cmd);
3470 mfi_linux_ioctl_int(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td)
3472 struct mfi_softc *sc;
3473 struct mfi_linux_ioc_packet l_ioc;
3474 struct mfi_linux_ioc_aen l_aen;
3475 struct mfi_command *cm = NULL;
3476 struct mfi_aen *mfi_aen_entry;
3477 union mfi_sense_ptr sense_ptr;
3478 uint32_t context = 0;
3479 uint8_t *data = NULL, *temp;
3486 case MFI_LINUX_CMD_2: /* Firmware Linux ioctl shim */
3487 error = copyin(arg, &l_ioc, sizeof(l_ioc));
3491 if (l_ioc.lioc_sge_count > MAX_LINUX_IOCTL_SGE) {
3495 mtx_lock(&sc->mfi_io_lock);
3496 if ((cm = mfi_dequeue_free(sc)) == NULL) {
3497 mtx_unlock(&sc->mfi_io_lock);
3500 mtx_unlock(&sc->mfi_io_lock);
3504 * save off original context since copying from user
3505 * will clobber some data
3507 context = cm->cm_frame->header.context;
3509 bcopy(l_ioc.lioc_frame.raw, cm->cm_frame,
3510 2 * MFI_DCMD_FRAME_SIZE); /* this isn't quite right */
3511 cm->cm_total_frame_size = (sizeof(union mfi_sgl)
3512 * l_ioc.lioc_sge_count) + l_ioc.lioc_sgl_off;
3513 cm->cm_frame->header.scsi_status = 0;
3514 cm->cm_frame->header.pad0 = 0;
3515 if (l_ioc.lioc_sge_count)
3517 (union mfi_sgl *)&cm->cm_frame->bytes[l_ioc.lioc_sgl_off];
3519 if (cm->cm_frame->header.flags & MFI_FRAME_DATAIN)
3520 cm->cm_flags |= MFI_CMD_DATAIN;
3521 if (cm->cm_frame->header.flags & MFI_FRAME_DATAOUT)
3522 cm->cm_flags |= MFI_CMD_DATAOUT;
3523 cm->cm_len = cm->cm_frame->header.data_len;
3525 (cm->cm_flags & (MFI_CMD_DATAIN | MFI_CMD_DATAOUT))) {
3526 cm->cm_data = data = malloc(cm->cm_len, M_MFIBUF,
3528 if (cm->cm_data == NULL) {
3529 device_printf(sc->mfi_dev, "Malloc failed\n");
3536 /* restore header context */
3537 cm->cm_frame->header.context = context;
3540 if (cm->cm_flags & MFI_CMD_DATAOUT) {
3541 for (i = 0; i < l_ioc.lioc_sge_count; i++) {
3542 error = copyin(PTRIN(l_ioc.lioc_sgl[i].iov_base),
3544 l_ioc.lioc_sgl[i].iov_len);
3546 device_printf(sc->mfi_dev,
3547 "Copy in failed\n");
3550 temp = &temp[l_ioc.lioc_sgl[i].iov_len];
3554 if (cm->cm_frame->header.cmd == MFI_CMD_DCMD)
3555 locked = mfi_config_lock(sc, cm->cm_frame->dcmd.opcode);
3557 if (cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) {
3558 cm->cm_frame->pass.sense_addr_lo =
3559 (uint32_t)cm->cm_sense_busaddr;
3560 cm->cm_frame->pass.sense_addr_hi =
3561 (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
3564 mtx_lock(&sc->mfi_io_lock);
3565 error = mfi_check_command_pre(sc, cm);
3567 mtx_unlock(&sc->mfi_io_lock);
3571 if ((error = mfi_wait_command(sc, cm)) != 0) {
3572 device_printf(sc->mfi_dev,
3573 "Controller polled failed\n");
3574 mtx_unlock(&sc->mfi_io_lock);
3578 mfi_check_command_post(sc, cm);
3579 mtx_unlock(&sc->mfi_io_lock);
3582 if (cm->cm_flags & MFI_CMD_DATAIN) {
3583 for (i = 0; i < l_ioc.lioc_sge_count; i++) {
3584 error = copyout(temp,
3585 PTRIN(l_ioc.lioc_sgl[i].iov_base),
3586 l_ioc.lioc_sgl[i].iov_len);
3588 device_printf(sc->mfi_dev,
3589 "Copy out failed\n");
3592 temp = &temp[l_ioc.lioc_sgl[i].iov_len];
3596 if (l_ioc.lioc_sense_len) {
3597 /* get user-space sense ptr then copy out sense */
3598 bcopy(&((struct mfi_linux_ioc_packet*)arg)
3599 ->lioc_frame.raw[l_ioc.lioc_sense_off],
3600 &sense_ptr.sense_ptr_data[0],
3601 sizeof(sense_ptr.sense_ptr_data));
3604 * only 32bit Linux support so zero out any
3605 * address over 32bit
3607 sense_ptr.addr.high = 0;
3609 error = copyout(cm->cm_sense, sense_ptr.user_space,
3610 l_ioc.lioc_sense_len);
3612 device_printf(sc->mfi_dev,
3613 "Copy out failed\n");
3618 error = copyout(&cm->cm_frame->header.cmd_status,
3619 &((struct mfi_linux_ioc_packet*)arg)
3620 ->lioc_frame.hdr.cmd_status,
3623 device_printf(sc->mfi_dev,
3624 "Copy out failed\n");
3629 mfi_config_unlock(sc, locked);
3631 free(data, M_MFIBUF);
3633 mtx_lock(&sc->mfi_io_lock);
3634 mfi_release_command(cm);
3635 mtx_unlock(&sc->mfi_io_lock);
3639 case MFI_LINUX_SET_AEN_2: /* AEN Linux ioctl shim */
3640 error = copyin(arg, &l_aen, sizeof(l_aen));
3643 printf("AEN IMPLEMENTED for pid %d\n", curproc->p_pid);
3644 mfi_aen_entry = malloc(sizeof(struct mfi_aen), M_MFIBUF,
3646 mtx_lock(&sc->mfi_io_lock);
3647 if (mfi_aen_entry != NULL) {
3648 mfi_aen_entry->p = curproc;
3649 TAILQ_INSERT_TAIL(&sc->mfi_aen_pids, mfi_aen_entry,
3652 error = mfi_aen_register(sc, l_aen.laen_seq_num,
3653 l_aen.laen_class_locale);
3656 TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
3658 free(mfi_aen_entry, M_MFIBUF);
3660 mtx_unlock(&sc->mfi_io_lock);
3664 device_printf(sc->mfi_dev, "IOCTL 0x%lx not handled\n", cmd);
3673 mfi_poll(struct cdev *dev, int poll_events, struct thread *td)
3675 struct mfi_softc *sc;
3680 if (poll_events & (POLLIN | POLLRDNORM)) {
3681 if (sc->mfi_aen_triggered != 0) {
3682 revents |= poll_events & (POLLIN | POLLRDNORM);
3683 sc->mfi_aen_triggered = 0;
3685 if (sc->mfi_aen_triggered == 0 && sc->mfi_aen_cm == NULL) {
3691 if (poll_events & (POLLIN | POLLRDNORM)) {
3692 sc->mfi_poll_waiting = 1;
3693 selrecord(td, &sc->mfi_select);
3703 struct mfi_softc *sc;
3704 struct mfi_command *cm;
3710 dc = devclass_find("mfi");
3712 printf("No mfi dev class\n");
3716 for (i = 0; ; i++) {
3717 sc = devclass_get_softc(dc, i);
3720 device_printf(sc->mfi_dev, "Dumping\n\n");
3722 deadline = time_uptime - mfi_cmd_timeout;
3723 mtx_lock(&sc->mfi_io_lock);
3724 TAILQ_FOREACH(cm, &sc->mfi_busy, cm_link) {
3725 if (cm->cm_timestamp <= deadline) {
3726 device_printf(sc->mfi_dev,
3727 "COMMAND %p TIMEOUT AFTER %d SECONDS\n",
3728 cm, (int)(time_uptime - cm->cm_timestamp));
3739 mtx_unlock(&sc->mfi_io_lock);
3746 mfi_timeout(void *data)
3748 struct mfi_softc *sc = (struct mfi_softc *)data;
3749 struct mfi_command *cm, *tmp;
3753 deadline = time_uptime - mfi_cmd_timeout;
3754 if (sc->adpreset == 0) {
3755 if (!mfi_tbolt_reset(sc)) {
3756 callout_reset(&sc->mfi_watchdog_callout,
3757 mfi_cmd_timeout * hz, mfi_timeout, sc);
3761 mtx_lock(&sc->mfi_io_lock);
3762 TAILQ_FOREACH_SAFE(cm, &sc->mfi_busy, cm_link, tmp) {
3763 if (sc->mfi_aen_cm == cm || sc->mfi_map_sync_cm == cm)
3765 if (cm->cm_timestamp <= deadline) {
3766 if (sc->adpreset != 0 && sc->issuepend_done == 0) {
3767 cm->cm_timestamp = time_uptime;
3769 device_printf(sc->mfi_dev,
3770 "COMMAND %p TIMEOUT AFTER %d SECONDS\n",
3771 cm, (int)(time_uptime - cm->cm_timestamp)
3774 MFI_VALIDATE_CMD(sc, cm);
3776 * While commands can get stuck forever we do
3777 * not fail them as there is no way to tell if
3778 * the controller has actually processed them
3781 * In addition its very likely that force
3782 * failing a command here would cause a panic
3795 mtx_unlock(&sc->mfi_io_lock);
3797 callout_reset(&sc->mfi_watchdog_callout, mfi_cmd_timeout * hz,