2 * Copyright (c) 2006 IronPort Systems
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * Copyright (c) 2007 LSI Corp.
28 * Copyright (c) 2007 Rajesh Prabhakaran.
29 * All rights reserved.
31 * Redistribution and use in source and binary forms, with or without
32 * modification, are permitted provided that the following conditions
34 * 1. Redistributions of source code must retain the above copyright
35 * notice, this list of conditions and the following disclaimer.
36 * 2. Redistributions in binary form must reproduce the above copyright
37 * notice, this list of conditions and the following disclaimer in the
38 * documentation and/or other materials provided with the distribution.
40 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
41 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
42 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
43 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
44 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
45 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
46 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
47 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
48 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
49 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
53 #include <sys/cdefs.h>
54 __FBSDID("$FreeBSD$");
56 #include "opt_compat.h"
59 #include <sys/param.h>
60 #include <sys/systm.h>
61 #include <sys/sysctl.h>
62 #include <sys/malloc.h>
63 #include <sys/kernel.h>
65 #include <sys/selinfo.h>
68 #include <sys/eventhandler.h>
70 #include <sys/bus_dma.h>
72 #include <sys/ioccom.h>
75 #include <sys/signalvar.h>
76 #include <sys/sysent.h>
77 #include <sys/taskqueue.h>
79 #include <machine/bus.h>
80 #include <machine/resource.h>
82 #include <dev/mfi/mfireg.h>
83 #include <dev/mfi/mfi_ioctl.h>
84 #include <dev/mfi/mfivar.h>
85 #include <sys/interrupt.h>
86 #include <sys/priority.h>
88 static int mfi_alloc_commands(struct mfi_softc *);
89 static int mfi_comms_init(struct mfi_softc *);
90 static int mfi_get_controller_info(struct mfi_softc *);
91 static int mfi_get_log_state(struct mfi_softc *,
92 struct mfi_evt_log_state **);
93 static int mfi_parse_entries(struct mfi_softc *, int, int);
94 static void mfi_data_cb(void *, bus_dma_segment_t *, int, int);
95 static void mfi_startup(void *arg);
96 static void mfi_intr(void *arg);
97 static void mfi_ldprobe(struct mfi_softc *sc);
98 static void mfi_syspdprobe(struct mfi_softc *sc);
99 static void mfi_handle_evt(void *context, int pending);
100 static int mfi_aen_register(struct mfi_softc *sc, int seq, int locale);
101 static void mfi_aen_complete(struct mfi_command *);
102 static int mfi_add_ld(struct mfi_softc *sc, int);
103 static void mfi_add_ld_complete(struct mfi_command *);
104 static int mfi_add_sys_pd(struct mfi_softc *sc, int);
105 static void mfi_add_sys_pd_complete(struct mfi_command *);
106 static struct mfi_command * mfi_bio_command(struct mfi_softc *);
107 static void mfi_bio_complete(struct mfi_command *);
108 static struct mfi_command *mfi_build_ldio(struct mfi_softc *,struct bio*);
109 static struct mfi_command *mfi_build_syspdio(struct mfi_softc *,struct bio*);
110 static int mfi_send_frame(struct mfi_softc *, struct mfi_command *);
111 static int mfi_std_send_frame(struct mfi_softc *, struct mfi_command *);
112 static int mfi_abort(struct mfi_softc *, struct mfi_command **);
113 static int mfi_linux_ioctl_int(struct cdev *, u_long, caddr_t, int, struct thread *);
114 static void mfi_timeout(void *);
115 static int mfi_user_command(struct mfi_softc *,
116 struct mfi_ioc_passthru *);
117 static void mfi_enable_intr_xscale(struct mfi_softc *sc);
118 static void mfi_enable_intr_ppc(struct mfi_softc *sc);
119 static int32_t mfi_read_fw_status_xscale(struct mfi_softc *sc);
120 static int32_t mfi_read_fw_status_ppc(struct mfi_softc *sc);
121 static int mfi_check_clear_intr_xscale(struct mfi_softc *sc);
122 static int mfi_check_clear_intr_ppc(struct mfi_softc *sc);
123 static void mfi_issue_cmd_xscale(struct mfi_softc *sc, bus_addr_t bus_add,
125 static void mfi_issue_cmd_ppc(struct mfi_softc *sc, bus_addr_t bus_add,
127 static int mfi_config_lock(struct mfi_softc *sc, uint32_t opcode);
128 static void mfi_config_unlock(struct mfi_softc *sc, int locked);
129 static int mfi_check_command_pre(struct mfi_softc *sc, struct mfi_command *cm);
130 static void mfi_check_command_post(struct mfi_softc *sc, struct mfi_command *cm);
131 static int mfi_check_for_sscd(struct mfi_softc *sc, struct mfi_command *cm);
133 SYSCTL_NODE(_hw, OID_AUTO, mfi, CTLFLAG_RD, 0, "MFI driver parameters");
134 static int mfi_event_locale = MFI_EVT_LOCALE_ALL;
135 TUNABLE_INT("hw.mfi.event_locale", &mfi_event_locale);
136 SYSCTL_INT(_hw_mfi, OID_AUTO, event_locale, CTLFLAG_RWTUN, &mfi_event_locale,
137 0, "event message locale");
139 static int mfi_event_class = MFI_EVT_CLASS_INFO;
140 TUNABLE_INT("hw.mfi.event_class", &mfi_event_class);
141 SYSCTL_INT(_hw_mfi, OID_AUTO, event_class, CTLFLAG_RWTUN, &mfi_event_class,
142 0, "event message class");
144 static int mfi_max_cmds = 128;
145 TUNABLE_INT("hw.mfi.max_cmds", &mfi_max_cmds);
146 SYSCTL_INT(_hw_mfi, OID_AUTO, max_cmds, CTLFLAG_RDTUN, &mfi_max_cmds,
147 0, "Max commands limit (-1 = controller limit)");
149 static int mfi_detect_jbod_change = 1;
150 TUNABLE_INT("hw.mfi.detect_jbod_change", &mfi_detect_jbod_change);
151 SYSCTL_INT(_hw_mfi, OID_AUTO, detect_jbod_change, CTLFLAG_RWTUN,
152 &mfi_detect_jbod_change, 0, "Detect a change to a JBOD");
154 int mfi_polled_cmd_timeout = MFI_POLL_TIMEOUT_SECS;
155 TUNABLE_INT("hw.mfi.polled_cmd_timeout", &mfi_polled_cmd_timeout);
156 SYSCTL_INT(_hw_mfi, OID_AUTO, polled_cmd_timeout, CTLFLAG_RWTUN,
157 &mfi_polled_cmd_timeout, 0,
158 "Polled command timeout - used for firmware flash etc (in seconds)");
160 static int mfi_cmd_timeout = MFI_CMD_TIMEOUT;
161 TUNABLE_INT("hw.mfi.cmd_timeout", &mfi_cmd_timeout);
162 SYSCTL_INT(_hw_mfi, OID_AUTO, cmd_timeout, CTLFLAG_RWTUN, &mfi_cmd_timeout,
163 0, "Command timeout (in seconds)");
165 /* Management interface */
166 static d_open_t mfi_open;
167 static d_close_t mfi_close;
168 static d_ioctl_t mfi_ioctl;
169 static d_poll_t mfi_poll;
171 static struct cdevsw mfi_cdevsw = {
172 .d_version = D_VERSION,
175 .d_close = mfi_close,
176 .d_ioctl = mfi_ioctl,
181 MALLOC_DEFINE(M_MFIBUF, "mfibuf", "Buffers for the MFI driver");
183 #define MFI_INQ_LENGTH SHORT_INQUIRY_LENGTH
184 struct mfi_skinny_dma_info mfi_skinny;
187 mfi_enable_intr_xscale(struct mfi_softc *sc)
189 MFI_WRITE4(sc, MFI_OMSK, 0x01);
193 mfi_enable_intr_ppc(struct mfi_softc *sc)
195 if (sc->mfi_flags & MFI_FLAGS_1078) {
196 MFI_WRITE4(sc, MFI_ODCR0, 0xFFFFFFFF);
197 MFI_WRITE4(sc, MFI_OMSK, ~MFI_1078_EIM);
199 else if (sc->mfi_flags & MFI_FLAGS_GEN2) {
200 MFI_WRITE4(sc, MFI_ODCR0, 0xFFFFFFFF);
201 MFI_WRITE4(sc, MFI_OMSK, ~MFI_GEN2_EIM);
203 else if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
204 MFI_WRITE4(sc, MFI_OMSK, ~0x00000001);
209 mfi_read_fw_status_xscale(struct mfi_softc *sc)
211 return MFI_READ4(sc, MFI_OMSG0);
215 mfi_read_fw_status_ppc(struct mfi_softc *sc)
217 return MFI_READ4(sc, MFI_OSP0);
221 mfi_check_clear_intr_xscale(struct mfi_softc *sc)
225 status = MFI_READ4(sc, MFI_OSTS);
226 if ((status & MFI_OSTS_INTR_VALID) == 0)
229 MFI_WRITE4(sc, MFI_OSTS, status);
234 mfi_check_clear_intr_ppc(struct mfi_softc *sc)
238 status = MFI_READ4(sc, MFI_OSTS);
239 if (sc->mfi_flags & MFI_FLAGS_1078) {
240 if (!(status & MFI_1078_RM)) {
244 else if (sc->mfi_flags & MFI_FLAGS_GEN2) {
245 if (!(status & MFI_GEN2_RM)) {
249 else if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
250 if (!(status & MFI_SKINNY_RM)) {
254 if (sc->mfi_flags & MFI_FLAGS_SKINNY)
255 MFI_WRITE4(sc, MFI_OSTS, status);
257 MFI_WRITE4(sc, MFI_ODCR0, status);
262 mfi_issue_cmd_xscale(struct mfi_softc *sc, bus_addr_t bus_add, uint32_t frame_cnt)
264 MFI_WRITE4(sc, MFI_IQP,(bus_add >>3)|frame_cnt);
268 mfi_issue_cmd_ppc(struct mfi_softc *sc, bus_addr_t bus_add, uint32_t frame_cnt)
270 if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
271 MFI_WRITE4(sc, MFI_IQPL, (bus_add | frame_cnt <<1)|1 );
272 MFI_WRITE4(sc, MFI_IQPH, 0x00000000);
274 MFI_WRITE4(sc, MFI_IQP, (bus_add | frame_cnt <<1)|1 );
279 mfi_transition_firmware(struct mfi_softc *sc)
281 uint32_t fw_state, cur_state;
283 uint32_t cur_abs_reg_val = 0;
284 uint32_t prev_abs_reg_val = 0;
286 cur_abs_reg_val = sc->mfi_read_fw_status(sc);
287 fw_state = cur_abs_reg_val & MFI_FWSTATE_MASK;
288 while (fw_state != MFI_FWSTATE_READY) {
290 device_printf(sc->mfi_dev, "Waiting for firmware to "
292 cur_state = fw_state;
294 case MFI_FWSTATE_FAULT:
295 device_printf(sc->mfi_dev, "Firmware fault\n");
297 case MFI_FWSTATE_WAIT_HANDSHAKE:
298 if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT)
299 MFI_WRITE4(sc, MFI_SKINNY_IDB, MFI_FWINIT_CLEAR_HANDSHAKE);
301 MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_CLEAR_HANDSHAKE);
302 max_wait = MFI_RESET_WAIT_TIME;
304 case MFI_FWSTATE_OPERATIONAL:
305 if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT)
306 MFI_WRITE4(sc, MFI_SKINNY_IDB, 7);
308 MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_READY);
309 max_wait = MFI_RESET_WAIT_TIME;
311 case MFI_FWSTATE_UNDEFINED:
312 case MFI_FWSTATE_BB_INIT:
313 max_wait = MFI_RESET_WAIT_TIME;
315 case MFI_FWSTATE_FW_INIT_2:
316 max_wait = MFI_RESET_WAIT_TIME;
318 case MFI_FWSTATE_FW_INIT:
319 case MFI_FWSTATE_FLUSH_CACHE:
320 max_wait = MFI_RESET_WAIT_TIME;
322 case MFI_FWSTATE_DEVICE_SCAN:
323 max_wait = MFI_RESET_WAIT_TIME; /* wait for 180 seconds */
324 prev_abs_reg_val = cur_abs_reg_val;
326 case MFI_FWSTATE_BOOT_MESSAGE_PENDING:
327 if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT)
328 MFI_WRITE4(sc, MFI_SKINNY_IDB, MFI_FWINIT_HOTPLUG);
330 MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_HOTPLUG);
331 max_wait = MFI_RESET_WAIT_TIME;
334 device_printf(sc->mfi_dev, "Unknown firmware state %#x\n",
338 for (i = 0; i < (max_wait * 10); i++) {
339 cur_abs_reg_val = sc->mfi_read_fw_status(sc);
340 fw_state = cur_abs_reg_val & MFI_FWSTATE_MASK;
341 if (fw_state == cur_state)
346 if (fw_state == MFI_FWSTATE_DEVICE_SCAN) {
347 /* Check the device scanning progress */
348 if (prev_abs_reg_val != cur_abs_reg_val) {
352 if (fw_state == cur_state) {
353 device_printf(sc->mfi_dev, "Firmware stuck in state "
362 mfi_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
367 *addr = segs[0].ds_addr;
372 mfi_attach(struct mfi_softc *sc)
375 int error, commsz, framessz, sensesz;
376 int frames, unit, max_fw_sge, max_fw_cmds;
377 uint32_t tb_mem_size = 0;
382 device_printf(sc->mfi_dev, "Megaraid SAS driver Ver %s \n",
385 mtx_init(&sc->mfi_io_lock, "MFI I/O lock", NULL, MTX_DEF);
386 sx_init(&sc->mfi_config_lock, "MFI config");
387 TAILQ_INIT(&sc->mfi_ld_tqh);
388 TAILQ_INIT(&sc->mfi_syspd_tqh);
389 TAILQ_INIT(&sc->mfi_ld_pend_tqh);
390 TAILQ_INIT(&sc->mfi_syspd_pend_tqh);
391 TAILQ_INIT(&sc->mfi_evt_queue);
392 TASK_INIT(&sc->mfi_evt_task, 0, mfi_handle_evt, sc);
393 TASK_INIT(&sc->mfi_map_sync_task, 0, mfi_handle_map_sync, sc);
394 TAILQ_INIT(&sc->mfi_aen_pids);
395 TAILQ_INIT(&sc->mfi_cam_ccbq);
403 sc->last_seq_num = 0;
404 sc->disableOnlineCtrlReset = 1;
405 sc->issuepend_done = 1;
406 sc->hw_crit_error = 0;
408 if (sc->mfi_flags & MFI_FLAGS_1064R) {
409 sc->mfi_enable_intr = mfi_enable_intr_xscale;
410 sc->mfi_read_fw_status = mfi_read_fw_status_xscale;
411 sc->mfi_check_clear_intr = mfi_check_clear_intr_xscale;
412 sc->mfi_issue_cmd = mfi_issue_cmd_xscale;
413 } else if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
414 sc->mfi_enable_intr = mfi_tbolt_enable_intr_ppc;
415 sc->mfi_disable_intr = mfi_tbolt_disable_intr_ppc;
416 sc->mfi_read_fw_status = mfi_tbolt_read_fw_status_ppc;
417 sc->mfi_check_clear_intr = mfi_tbolt_check_clear_intr_ppc;
418 sc->mfi_issue_cmd = mfi_tbolt_issue_cmd_ppc;
419 sc->mfi_adp_reset = mfi_tbolt_adp_reset;
421 TAILQ_INIT(&sc->mfi_cmd_tbolt_tqh);
423 sc->mfi_enable_intr = mfi_enable_intr_ppc;
424 sc->mfi_read_fw_status = mfi_read_fw_status_ppc;
425 sc->mfi_check_clear_intr = mfi_check_clear_intr_ppc;
426 sc->mfi_issue_cmd = mfi_issue_cmd_ppc;
430 /* Before we get too far, see if the firmware is working */
431 if ((error = mfi_transition_firmware(sc)) != 0) {
432 device_printf(sc->mfi_dev, "Firmware not in READY state, "
433 "error %d\n", error);
437 /* Start: LSIP200113393 */
438 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
439 1, 0, /* algnmnt, boundary */
440 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
441 BUS_SPACE_MAXADDR, /* highaddr */
442 NULL, NULL, /* filter, filterarg */
443 MEGASAS_MAX_NAME*sizeof(bus_addr_t), /* maxsize */
445 MEGASAS_MAX_NAME*sizeof(bus_addr_t), /* maxsegsize */
447 NULL, NULL, /* lockfunc, lockarg */
448 &sc->verbuf_h_dmat)) {
449 device_printf(sc->mfi_dev, "Cannot allocate verbuf_h_dmat DMA tag\n");
452 if (bus_dmamem_alloc(sc->verbuf_h_dmat, (void **)&sc->verbuf,
453 BUS_DMA_NOWAIT, &sc->verbuf_h_dmamap)) {
454 device_printf(sc->mfi_dev, "Cannot allocate verbuf_h_dmamap memory\n");
457 bzero(sc->verbuf, MEGASAS_MAX_NAME*sizeof(bus_addr_t));
458 bus_dmamap_load(sc->verbuf_h_dmat, sc->verbuf_h_dmamap,
459 sc->verbuf, MEGASAS_MAX_NAME*sizeof(bus_addr_t),
460 mfi_addr_cb, &sc->verbuf_h_busaddr, 0);
461 /* End: LSIP200113393 */
464 * Get information needed for sizing the contiguous memory for the
465 * frame pool. Size down the sgl parameter since we know that
466 * we will never need more than what's required for MAXPHYS.
467 * It would be nice if these constants were available at runtime
468 * instead of compile time.
470 status = sc->mfi_read_fw_status(sc);
471 max_fw_cmds = status & MFI_FWSTATE_MAXCMD_MASK;
472 if (mfi_max_cmds > 0 && mfi_max_cmds < max_fw_cmds) {
473 device_printf(sc->mfi_dev, "FW MaxCmds = %d, limiting to %d\n",
474 max_fw_cmds, mfi_max_cmds);
475 sc->mfi_max_fw_cmds = mfi_max_cmds;
477 sc->mfi_max_fw_cmds = max_fw_cmds;
479 max_fw_sge = (status & MFI_FWSTATE_MAXSGL_MASK) >> 16;
480 sc->mfi_max_sge = min(max_fw_sge, ((MFI_MAXPHYS / PAGE_SIZE) + 1));
482 /* ThunderBolt Support get the contiguous memory */
484 if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
485 mfi_tbolt_init_globals(sc);
486 device_printf(sc->mfi_dev, "MaxCmd = %d, Drv MaxCmd = %d, "
487 "MaxSgl = %d, state = %#x\n", max_fw_cmds,
488 sc->mfi_max_fw_cmds, sc->mfi_max_sge, status);
489 tb_mem_size = mfi_tbolt_get_memory_requirement(sc);
491 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
492 1, 0, /* algnmnt, boundary */
493 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
494 BUS_SPACE_MAXADDR, /* highaddr */
495 NULL, NULL, /* filter, filterarg */
496 tb_mem_size, /* maxsize */
498 tb_mem_size, /* maxsegsize */
500 NULL, NULL, /* lockfunc, lockarg */
502 device_printf(sc->mfi_dev, "Cannot allocate comms DMA tag\n");
505 if (bus_dmamem_alloc(sc->mfi_tb_dmat, (void **)&sc->request_message_pool,
506 BUS_DMA_NOWAIT, &sc->mfi_tb_dmamap)) {
507 device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
510 bzero(sc->request_message_pool, tb_mem_size);
511 bus_dmamap_load(sc->mfi_tb_dmat, sc->mfi_tb_dmamap,
512 sc->request_message_pool, tb_mem_size, mfi_addr_cb, &sc->mfi_tb_busaddr, 0);
514 /* For ThunderBolt memory init */
515 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
516 0x100, 0, /* alignmnt, boundary */
517 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
518 BUS_SPACE_MAXADDR, /* highaddr */
519 NULL, NULL, /* filter, filterarg */
520 MFI_FRAME_SIZE, /* maxsize */
522 MFI_FRAME_SIZE, /* maxsegsize */
524 NULL, NULL, /* lockfunc, lockarg */
525 &sc->mfi_tb_init_dmat)) {
526 device_printf(sc->mfi_dev, "Cannot allocate init DMA tag\n");
529 if (bus_dmamem_alloc(sc->mfi_tb_init_dmat, (void **)&sc->mfi_tb_init,
530 BUS_DMA_NOWAIT, &sc->mfi_tb_init_dmamap)) {
531 device_printf(sc->mfi_dev, "Cannot allocate init memory\n");
534 bzero(sc->mfi_tb_init, MFI_FRAME_SIZE);
535 bus_dmamap_load(sc->mfi_tb_init_dmat, sc->mfi_tb_init_dmamap,
536 sc->mfi_tb_init, MFI_FRAME_SIZE, mfi_addr_cb,
537 &sc->mfi_tb_init_busaddr, 0);
538 if (mfi_tbolt_init_desc_pool(sc, sc->request_message_pool,
540 device_printf(sc->mfi_dev,
541 "Thunderbolt pool preparation error\n");
546 Allocate DMA memory mapping for MPI2 IOC Init descriptor,
547 we are taking it diffrent from what we have allocated for Request
548 and reply descriptors to avoid confusion later
550 tb_mem_size = sizeof(struct MPI2_IOC_INIT_REQUEST);
551 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
552 1, 0, /* algnmnt, boundary */
553 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
554 BUS_SPACE_MAXADDR, /* highaddr */
555 NULL, NULL, /* filter, filterarg */
556 tb_mem_size, /* maxsize */
558 tb_mem_size, /* maxsegsize */
560 NULL, NULL, /* lockfunc, lockarg */
561 &sc->mfi_tb_ioc_init_dmat)) {
562 device_printf(sc->mfi_dev,
563 "Cannot allocate comms DMA tag\n");
566 if (bus_dmamem_alloc(sc->mfi_tb_ioc_init_dmat,
567 (void **)&sc->mfi_tb_ioc_init_desc,
568 BUS_DMA_NOWAIT, &sc->mfi_tb_ioc_init_dmamap)) {
569 device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
572 bzero(sc->mfi_tb_ioc_init_desc, tb_mem_size);
573 bus_dmamap_load(sc->mfi_tb_ioc_init_dmat, sc->mfi_tb_ioc_init_dmamap,
574 sc->mfi_tb_ioc_init_desc, tb_mem_size, mfi_addr_cb,
575 &sc->mfi_tb_ioc_init_busaddr, 0);
578 * Create the dma tag for data buffers. Used both for block I/O
579 * and for various internal data queries.
581 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
582 1, 0, /* algnmnt, boundary */
583 BUS_SPACE_MAXADDR, /* lowaddr */
584 BUS_SPACE_MAXADDR, /* highaddr */
585 NULL, NULL, /* filter, filterarg */
586 BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
587 sc->mfi_max_sge, /* nsegments */
588 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
589 BUS_DMA_ALLOCNOW, /* flags */
590 busdma_lock_mutex, /* lockfunc */
591 &sc->mfi_io_lock, /* lockfuncarg */
592 &sc->mfi_buffer_dmat)) {
593 device_printf(sc->mfi_dev, "Cannot allocate buffer DMA tag\n");
598 * Allocate DMA memory for the comms queues. Keep it under 4GB for
599 * efficiency. The mfi_hwcomms struct includes space for 1 reply queue
600 * entry, so the calculated size here will be will be 1 more than
601 * mfi_max_fw_cmds. This is apparently a requirement of the hardware.
603 commsz = (sizeof(uint32_t) * sc->mfi_max_fw_cmds) +
604 sizeof(struct mfi_hwcomms);
605 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
606 1, 0, /* algnmnt, boundary */
607 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
608 BUS_SPACE_MAXADDR, /* highaddr */
609 NULL, NULL, /* filter, filterarg */
610 commsz, /* maxsize */
612 commsz, /* maxsegsize */
614 NULL, NULL, /* lockfunc, lockarg */
615 &sc->mfi_comms_dmat)) {
616 device_printf(sc->mfi_dev, "Cannot allocate comms DMA tag\n");
619 if (bus_dmamem_alloc(sc->mfi_comms_dmat, (void **)&sc->mfi_comms,
620 BUS_DMA_NOWAIT, &sc->mfi_comms_dmamap)) {
621 device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
624 bzero(sc->mfi_comms, commsz);
625 bus_dmamap_load(sc->mfi_comms_dmat, sc->mfi_comms_dmamap,
626 sc->mfi_comms, commsz, mfi_addr_cb, &sc->mfi_comms_busaddr, 0);
628 * Allocate DMA memory for the command frames. Keep them in the
629 * lower 4GB for efficiency. Calculate the size of the commands at
630 * the same time; each command is one 64 byte frame plus a set of
631 * additional frames for holding sg lists or other data.
632 * The assumption here is that the SG list will start at the second
633 * frame and not use the unused bytes in the first frame. While this
634 * isn't technically correct, it simplifies the calculation and allows
635 * for command frames that might be larger than an mfi_io_frame.
637 if (sizeof(bus_addr_t) == 8) {
638 sc->mfi_sge_size = sizeof(struct mfi_sg64);
639 sc->mfi_flags |= MFI_FLAGS_SG64;
641 sc->mfi_sge_size = sizeof(struct mfi_sg32);
643 if (sc->mfi_flags & MFI_FLAGS_SKINNY)
644 sc->mfi_sge_size = sizeof(struct mfi_sg_skinny);
645 frames = (sc->mfi_sge_size * sc->mfi_max_sge - 1) / MFI_FRAME_SIZE + 2;
646 sc->mfi_cmd_size = frames * MFI_FRAME_SIZE;
647 framessz = sc->mfi_cmd_size * sc->mfi_max_fw_cmds;
648 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
649 64, 0, /* algnmnt, boundary */
650 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
651 BUS_SPACE_MAXADDR, /* highaddr */
652 NULL, NULL, /* filter, filterarg */
653 framessz, /* maxsize */
655 framessz, /* maxsegsize */
657 NULL, NULL, /* lockfunc, lockarg */
658 &sc->mfi_frames_dmat)) {
659 device_printf(sc->mfi_dev, "Cannot allocate frame DMA tag\n");
662 if (bus_dmamem_alloc(sc->mfi_frames_dmat, (void **)&sc->mfi_frames,
663 BUS_DMA_NOWAIT, &sc->mfi_frames_dmamap)) {
664 device_printf(sc->mfi_dev, "Cannot allocate frames memory\n");
667 bzero(sc->mfi_frames, framessz);
668 bus_dmamap_load(sc->mfi_frames_dmat, sc->mfi_frames_dmamap,
669 sc->mfi_frames, framessz, mfi_addr_cb, &sc->mfi_frames_busaddr,0);
671 * Allocate DMA memory for the frame sense data. Keep them in the
672 * lower 4GB for efficiency
674 sensesz = sc->mfi_max_fw_cmds * MFI_SENSE_LEN;
675 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
676 4, 0, /* algnmnt, boundary */
677 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
678 BUS_SPACE_MAXADDR, /* highaddr */
679 NULL, NULL, /* filter, filterarg */
680 sensesz, /* maxsize */
682 sensesz, /* maxsegsize */
684 NULL, NULL, /* lockfunc, lockarg */
685 &sc->mfi_sense_dmat)) {
686 device_printf(sc->mfi_dev, "Cannot allocate sense DMA tag\n");
689 if (bus_dmamem_alloc(sc->mfi_sense_dmat, (void **)&sc->mfi_sense,
690 BUS_DMA_NOWAIT, &sc->mfi_sense_dmamap)) {
691 device_printf(sc->mfi_dev, "Cannot allocate sense memory\n");
694 bus_dmamap_load(sc->mfi_sense_dmat, sc->mfi_sense_dmamap,
695 sc->mfi_sense, sensesz, mfi_addr_cb, &sc->mfi_sense_busaddr, 0);
696 if ((error = mfi_alloc_commands(sc)) != 0)
699 /* Before moving the FW to operational state, check whether
700 * hostmemory is required by the FW or not
703 /* ThunderBolt MFI_IOC2 INIT */
704 if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
705 sc->mfi_disable_intr(sc);
706 mtx_lock(&sc->mfi_io_lock);
707 if ((error = mfi_tbolt_init_MFI_queue(sc)) != 0) {
708 device_printf(sc->mfi_dev,
709 "TB Init has failed with error %d\n",error);
710 mtx_unlock(&sc->mfi_io_lock);
713 mtx_unlock(&sc->mfi_io_lock);
715 if ((error = mfi_tbolt_alloc_cmd(sc)) != 0)
717 if (bus_setup_intr(sc->mfi_dev, sc->mfi_irq,
718 INTR_MPSAFE|INTR_TYPE_BIO, NULL, mfi_intr_tbolt, sc,
720 device_printf(sc->mfi_dev, "Cannot set up interrupt\n");
723 sc->mfi_intr_ptr = mfi_intr_tbolt;
724 sc->mfi_enable_intr(sc);
726 if ((error = mfi_comms_init(sc)) != 0)
729 if (bus_setup_intr(sc->mfi_dev, sc->mfi_irq,
730 INTR_MPSAFE|INTR_TYPE_BIO, NULL, mfi_intr, sc, &sc->mfi_intr)) {
731 device_printf(sc->mfi_dev, "Cannot set up interrupt\n");
734 sc->mfi_intr_ptr = mfi_intr;
735 sc->mfi_enable_intr(sc);
737 if ((error = mfi_get_controller_info(sc)) != 0)
739 sc->disableOnlineCtrlReset = 0;
741 /* Register a config hook to probe the bus for arrays */
742 sc->mfi_ich.ich_func = mfi_startup;
743 sc->mfi_ich.ich_arg = sc;
744 if (config_intrhook_establish(&sc->mfi_ich) != 0) {
745 device_printf(sc->mfi_dev, "Cannot establish configuration "
749 mtx_lock(&sc->mfi_io_lock);
750 if ((error = mfi_aen_setup(sc, 0), 0) != 0) {
751 mtx_unlock(&sc->mfi_io_lock);
754 mtx_unlock(&sc->mfi_io_lock);
757 * Register a shutdown handler.
759 if ((sc->mfi_eh = EVENTHANDLER_REGISTER(shutdown_final, mfi_shutdown,
760 sc, SHUTDOWN_PRI_DEFAULT)) == NULL) {
761 device_printf(sc->mfi_dev, "Warning: shutdown event "
762 "registration failed\n");
766 * Create the control device for doing management
768 unit = device_get_unit(sc->mfi_dev);
769 sc->mfi_cdev = make_dev(&mfi_cdevsw, unit, UID_ROOT, GID_OPERATOR,
770 0640, "mfi%d", unit);
772 make_dev_alias(sc->mfi_cdev, "megaraid_sas_ioctl_node");
773 if (sc->mfi_cdev != NULL)
774 sc->mfi_cdev->si_drv1 = sc;
775 SYSCTL_ADD_INT(device_get_sysctl_ctx(sc->mfi_dev),
776 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->mfi_dev)),
777 OID_AUTO, "delete_busy_volumes", CTLFLAG_RW,
778 &sc->mfi_delete_busy_volumes, 0, "Allow removal of busy volumes");
779 SYSCTL_ADD_INT(device_get_sysctl_ctx(sc->mfi_dev),
780 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->mfi_dev)),
781 OID_AUTO, "keep_deleted_volumes", CTLFLAG_RW,
782 &sc->mfi_keep_deleted_volumes, 0,
783 "Don't detach the mfid device for a busy volume that is deleted");
785 device_add_child(sc->mfi_dev, "mfip", -1);
786 bus_generic_attach(sc->mfi_dev);
788 /* Start the timeout watchdog */
789 callout_init(&sc->mfi_watchdog_callout, CALLOUT_MPSAFE);
790 callout_reset(&sc->mfi_watchdog_callout, mfi_cmd_timeout * hz,
793 if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
794 mtx_lock(&sc->mfi_io_lock);
795 mfi_tbolt_sync_map_info(sc);
796 mtx_unlock(&sc->mfi_io_lock);
803 mfi_alloc_commands(struct mfi_softc *sc)
805 struct mfi_command *cm;
809 * XXX Should we allocate all the commands up front, or allocate on
810 * demand later like 'aac' does?
812 sc->mfi_commands = malloc(sizeof(sc->mfi_commands[0]) *
813 sc->mfi_max_fw_cmds, M_MFIBUF, M_WAITOK | M_ZERO);
815 for (i = 0; i < sc->mfi_max_fw_cmds; i++) {
816 cm = &sc->mfi_commands[i];
817 cm->cm_frame = (union mfi_frame *)((uintptr_t)sc->mfi_frames +
818 sc->mfi_cmd_size * i);
819 cm->cm_frame_busaddr = sc->mfi_frames_busaddr +
820 sc->mfi_cmd_size * i;
821 cm->cm_frame->header.context = i;
822 cm->cm_sense = &sc->mfi_sense[i];
823 cm->cm_sense_busaddr= sc->mfi_sense_busaddr + MFI_SENSE_LEN * i;
826 if (bus_dmamap_create(sc->mfi_buffer_dmat, 0,
827 &cm->cm_dmamap) == 0) {
828 mtx_lock(&sc->mfi_io_lock);
829 mfi_release_command(cm);
830 mtx_unlock(&sc->mfi_io_lock);
832 device_printf(sc->mfi_dev, "Failed to allocate %d "
833 "command blocks, only allocated %d\n",
834 sc->mfi_max_fw_cmds, i - 1);
835 for (j = 0; j < i; j++) {
836 cm = &sc->mfi_commands[i];
837 bus_dmamap_destroy(sc->mfi_buffer_dmat,
840 free(sc->mfi_commands, M_MFIBUF);
841 sc->mfi_commands = NULL;
851 mfi_release_command(struct mfi_command *cm)
853 struct mfi_frame_header *hdr;
856 mtx_assert(&cm->cm_sc->mfi_io_lock, MA_OWNED);
859 * Zero out the important fields of the frame, but make sure the
860 * context field is preserved. For efficiency, handle the fields
861 * as 32 bit words. Clear out the first S/G entry too for safety.
863 hdr = &cm->cm_frame->header;
864 if (cm->cm_data != NULL && hdr->sg_count) {
865 cm->cm_sg->sg32[0].len = 0;
866 cm->cm_sg->sg32[0].addr = 0;
870 * Command may be on other queues e.g. busy queue depending on the
871 * flow of a previous call to mfi_mapcmd, so ensure its dequeued
874 if ((cm->cm_flags & MFI_ON_MFIQ_BUSY) != 0)
876 if ((cm->cm_flags & MFI_ON_MFIQ_READY) != 0)
877 mfi_remove_ready(cm);
879 /* We're not expecting it to be on any other queue but check */
880 if ((cm->cm_flags & MFI_ON_MFIQ_MASK) != 0) {
881 panic("Command %p is still on another queue, flags = %#x",
886 if ((cm->cm_flags & MFI_CMD_TBOLT) != 0) {
887 mfi_tbolt_return_cmd(cm->cm_sc,
888 cm->cm_sc->mfi_cmd_pool_tbolt[cm->cm_extra_frames - 1],
892 hdr_data = (uint32_t *)cm->cm_frame;
893 hdr_data[0] = 0; /* cmd, sense_len, cmd_status, scsi_status */
894 hdr_data[1] = 0; /* target_id, lun_id, cdb_len, sg_count */
895 hdr_data[4] = 0; /* flags, timeout */
896 hdr_data[5] = 0; /* data_len */
898 cm->cm_extra_frames = 0;
900 cm->cm_complete = NULL;
901 cm->cm_private = NULL;
904 cm->cm_total_frame_size = 0;
905 cm->retry_for_fw_reset = 0;
907 mfi_enqueue_free(cm);
911 mfi_dcmd_command(struct mfi_softc *sc, struct mfi_command **cmp,
912 uint32_t opcode, void **bufp, size_t bufsize)
914 struct mfi_command *cm;
915 struct mfi_dcmd_frame *dcmd;
917 uint32_t context = 0;
919 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
921 cm = mfi_dequeue_free(sc);
925 /* Zero out the MFI frame */
926 context = cm->cm_frame->header.context;
927 bzero(cm->cm_frame, sizeof(union mfi_frame));
928 cm->cm_frame->header.context = context;
930 if ((bufsize > 0) && (bufp != NULL)) {
932 buf = malloc(bufsize, M_MFIBUF, M_NOWAIT|M_ZERO);
934 mfi_release_command(cm);
943 dcmd = &cm->cm_frame->dcmd;
944 bzero(dcmd->mbox, MFI_MBOX_SIZE);
945 dcmd->header.cmd = MFI_CMD_DCMD;
946 dcmd->header.timeout = 0;
947 dcmd->header.flags = 0;
948 dcmd->header.data_len = bufsize;
949 dcmd->header.scsi_status = 0;
950 dcmd->opcode = opcode;
951 cm->cm_sg = &dcmd->sgl;
952 cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
955 cm->cm_private = buf;
956 cm->cm_len = bufsize;
959 if ((bufp != NULL) && (*bufp == NULL) && (buf != NULL))
965 mfi_comms_init(struct mfi_softc *sc)
967 struct mfi_command *cm;
968 struct mfi_init_frame *init;
969 struct mfi_init_qinfo *qinfo;
971 uint32_t context = 0;
973 mtx_lock(&sc->mfi_io_lock);
974 if ((cm = mfi_dequeue_free(sc)) == NULL) {
975 mtx_unlock(&sc->mfi_io_lock);
979 /* Zero out the MFI frame */
980 context = cm->cm_frame->header.context;
981 bzero(cm->cm_frame, sizeof(union mfi_frame));
982 cm->cm_frame->header.context = context;
985 * Abuse the SG list area of the frame to hold the init_qinfo
988 init = &cm->cm_frame->init;
989 qinfo = (struct mfi_init_qinfo *)((uintptr_t)init + MFI_FRAME_SIZE);
991 bzero(qinfo, sizeof(struct mfi_init_qinfo));
992 qinfo->rq_entries = sc->mfi_max_fw_cmds + 1;
993 qinfo->rq_addr_lo = sc->mfi_comms_busaddr +
994 offsetof(struct mfi_hwcomms, hw_reply_q);
995 qinfo->pi_addr_lo = sc->mfi_comms_busaddr +
996 offsetof(struct mfi_hwcomms, hw_pi);
997 qinfo->ci_addr_lo = sc->mfi_comms_busaddr +
998 offsetof(struct mfi_hwcomms, hw_ci);
1000 init->header.cmd = MFI_CMD_INIT;
1001 init->header.data_len = sizeof(struct mfi_init_qinfo);
1002 init->qinfo_new_addr_lo = cm->cm_frame_busaddr + MFI_FRAME_SIZE;
1004 cm->cm_flags = MFI_CMD_POLLED;
1006 if ((error = mfi_mapcmd(sc, cm)) != 0)
1007 device_printf(sc->mfi_dev, "failed to send init command\n");
1008 mfi_release_command(cm);
1009 mtx_unlock(&sc->mfi_io_lock);
1015 mfi_get_controller_info(struct mfi_softc *sc)
1017 struct mfi_command *cm = NULL;
1018 struct mfi_ctrl_info *ci = NULL;
1019 uint32_t max_sectors_1, max_sectors_2;
1022 mtx_lock(&sc->mfi_io_lock);
1023 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_GETINFO,
1024 (void **)&ci, sizeof(*ci));
1027 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1029 if ((error = mfi_mapcmd(sc, cm)) != 0) {
1030 device_printf(sc->mfi_dev, "Failed to get controller info\n");
1031 sc->mfi_max_io = (sc->mfi_max_sge - 1) * PAGE_SIZE /
1037 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1038 BUS_DMASYNC_POSTREAD);
1039 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1041 max_sectors_1 = (1 << ci->stripe_sz_ops.max) * ci->max_strips_per_io;
1042 max_sectors_2 = ci->max_request_size;
1043 sc->mfi_max_io = min(max_sectors_1, max_sectors_2);
1044 sc->disableOnlineCtrlReset =
1045 ci->properties.OnOffProperties.disableOnlineCtrlReset;
1051 mfi_release_command(cm);
1052 mtx_unlock(&sc->mfi_io_lock);
1057 mfi_get_log_state(struct mfi_softc *sc, struct mfi_evt_log_state **log_state)
1059 struct mfi_command *cm = NULL;
1062 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1063 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_EVENT_GETINFO,
1064 (void **)log_state, sizeof(**log_state));
1067 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1069 if ((error = mfi_mapcmd(sc, cm)) != 0) {
1070 device_printf(sc->mfi_dev, "Failed to get log state\n");
1074 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1075 BUS_DMASYNC_POSTREAD);
1076 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1080 mfi_release_command(cm);
1086 mfi_aen_setup(struct mfi_softc *sc, uint32_t seq_start)
1088 struct mfi_evt_log_state *log_state = NULL;
1089 union mfi_evt class_locale;
1093 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1095 class_locale.members.reserved = 0;
1096 class_locale.members.locale = mfi_event_locale;
1097 class_locale.members.evt_class = mfi_event_class;
1099 if (seq_start == 0) {
1100 if ((error = mfi_get_log_state(sc, &log_state)) != 0)
1102 sc->mfi_boot_seq_num = log_state->boot_seq_num;
1105 * Walk through any events that fired since the last
1108 if ((error = mfi_parse_entries(sc, log_state->shutdown_seq_num,
1109 log_state->newest_seq_num)) != 0)
1111 seq = log_state->newest_seq_num;
1114 error = mfi_aen_register(sc, seq, class_locale.word);
1116 free(log_state, M_MFIBUF);
1122 mfi_wait_command(struct mfi_softc *sc, struct mfi_command *cm)
1125 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1126 cm->cm_complete = NULL;
1129 * MegaCli can issue a DCMD of 0. In this case do nothing
1130 * and return 0 to it as status
1132 if (cm->cm_frame->dcmd.opcode == 0) {
1133 cm->cm_frame->header.cmd_status = MFI_STAT_OK;
1135 return (cm->cm_error);
1137 mfi_enqueue_ready(cm);
1139 if ((cm->cm_flags & MFI_CMD_COMPLETED) == 0)
1140 msleep(cm, &sc->mfi_io_lock, PRIBIO, "mfiwait", 0);
1141 return (cm->cm_error);
1145 mfi_free(struct mfi_softc *sc)
1147 struct mfi_command *cm;
1150 callout_drain(&sc->mfi_watchdog_callout);
1152 if (sc->mfi_cdev != NULL)
1153 destroy_dev(sc->mfi_cdev);
1155 if (sc->mfi_commands != NULL) {
1156 for (i = 0; i < sc->mfi_max_fw_cmds; i++) {
1157 cm = &sc->mfi_commands[i];
1158 bus_dmamap_destroy(sc->mfi_buffer_dmat, cm->cm_dmamap);
1160 free(sc->mfi_commands, M_MFIBUF);
1161 sc->mfi_commands = NULL;
1165 bus_teardown_intr(sc->mfi_dev, sc->mfi_irq, sc->mfi_intr);
1166 if (sc->mfi_irq != NULL)
1167 bus_release_resource(sc->mfi_dev, SYS_RES_IRQ, sc->mfi_irq_rid,
1170 if (sc->mfi_sense_busaddr != 0)
1171 bus_dmamap_unload(sc->mfi_sense_dmat, sc->mfi_sense_dmamap);
1172 if (sc->mfi_sense != NULL)
1173 bus_dmamem_free(sc->mfi_sense_dmat, sc->mfi_sense,
1174 sc->mfi_sense_dmamap);
1175 if (sc->mfi_sense_dmat != NULL)
1176 bus_dma_tag_destroy(sc->mfi_sense_dmat);
1178 if (sc->mfi_frames_busaddr != 0)
1179 bus_dmamap_unload(sc->mfi_frames_dmat, sc->mfi_frames_dmamap);
1180 if (sc->mfi_frames != NULL)
1181 bus_dmamem_free(sc->mfi_frames_dmat, sc->mfi_frames,
1182 sc->mfi_frames_dmamap);
1183 if (sc->mfi_frames_dmat != NULL)
1184 bus_dma_tag_destroy(sc->mfi_frames_dmat);
1186 if (sc->mfi_comms_busaddr != 0)
1187 bus_dmamap_unload(sc->mfi_comms_dmat, sc->mfi_comms_dmamap);
1188 if (sc->mfi_comms != NULL)
1189 bus_dmamem_free(sc->mfi_comms_dmat, sc->mfi_comms,
1190 sc->mfi_comms_dmamap);
1191 if (sc->mfi_comms_dmat != NULL)
1192 bus_dma_tag_destroy(sc->mfi_comms_dmat);
1194 /* ThunderBolt contiguous memory free here */
1195 if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
1196 if (sc->mfi_tb_busaddr != 0)
1197 bus_dmamap_unload(sc->mfi_tb_dmat, sc->mfi_tb_dmamap);
1198 if (sc->request_message_pool != NULL)
1199 bus_dmamem_free(sc->mfi_tb_dmat, sc->request_message_pool,
1201 if (sc->mfi_tb_dmat != NULL)
1202 bus_dma_tag_destroy(sc->mfi_tb_dmat);
1204 /* Version buffer memory free */
1205 /* Start LSIP200113393 */
1206 if (sc->verbuf_h_busaddr != 0)
1207 bus_dmamap_unload(sc->verbuf_h_dmat, sc->verbuf_h_dmamap);
1208 if (sc->verbuf != NULL)
1209 bus_dmamem_free(sc->verbuf_h_dmat, sc->verbuf,
1210 sc->verbuf_h_dmamap);
1211 if (sc->verbuf_h_dmat != NULL)
1212 bus_dma_tag_destroy(sc->verbuf_h_dmat);
1214 /* End LSIP200113393 */
1215 /* ThunderBolt INIT packet memory Free */
1216 if (sc->mfi_tb_init_busaddr != 0)
1217 bus_dmamap_unload(sc->mfi_tb_init_dmat,
1218 sc->mfi_tb_init_dmamap);
1219 if (sc->mfi_tb_init != NULL)
1220 bus_dmamem_free(sc->mfi_tb_init_dmat, sc->mfi_tb_init,
1221 sc->mfi_tb_init_dmamap);
1222 if (sc->mfi_tb_init_dmat != NULL)
1223 bus_dma_tag_destroy(sc->mfi_tb_init_dmat);
1225 /* ThunderBolt IOC Init Desc memory free here */
1226 if (sc->mfi_tb_ioc_init_busaddr != 0)
1227 bus_dmamap_unload(sc->mfi_tb_ioc_init_dmat,
1228 sc->mfi_tb_ioc_init_dmamap);
1229 if (sc->mfi_tb_ioc_init_desc != NULL)
1230 bus_dmamem_free(sc->mfi_tb_ioc_init_dmat,
1231 sc->mfi_tb_ioc_init_desc,
1232 sc->mfi_tb_ioc_init_dmamap);
1233 if (sc->mfi_tb_ioc_init_dmat != NULL)
1234 bus_dma_tag_destroy(sc->mfi_tb_ioc_init_dmat);
1235 if (sc->mfi_cmd_pool_tbolt != NULL) {
1236 for (int i = 0; i < sc->mfi_max_fw_cmds; i++) {
1237 if (sc->mfi_cmd_pool_tbolt[i] != NULL) {
1238 free(sc->mfi_cmd_pool_tbolt[i],
1240 sc->mfi_cmd_pool_tbolt[i] = NULL;
1243 free(sc->mfi_cmd_pool_tbolt, M_MFIBUF);
1244 sc->mfi_cmd_pool_tbolt = NULL;
1246 if (sc->request_desc_pool != NULL) {
1247 free(sc->request_desc_pool, M_MFIBUF);
1248 sc->request_desc_pool = NULL;
1251 if (sc->mfi_buffer_dmat != NULL)
1252 bus_dma_tag_destroy(sc->mfi_buffer_dmat);
1253 if (sc->mfi_parent_dmat != NULL)
1254 bus_dma_tag_destroy(sc->mfi_parent_dmat);
1256 if (mtx_initialized(&sc->mfi_io_lock)) {
1257 mtx_destroy(&sc->mfi_io_lock);
1258 sx_destroy(&sc->mfi_config_lock);
1265 mfi_startup(void *arg)
1267 struct mfi_softc *sc;
1269 sc = (struct mfi_softc *)arg;
1271 config_intrhook_disestablish(&sc->mfi_ich);
1273 sc->mfi_enable_intr(sc);
1274 sx_xlock(&sc->mfi_config_lock);
1275 mtx_lock(&sc->mfi_io_lock);
1277 if (sc->mfi_flags & MFI_FLAGS_SKINNY)
1279 mtx_unlock(&sc->mfi_io_lock);
1280 sx_xunlock(&sc->mfi_config_lock);
1286 struct mfi_softc *sc;
1287 struct mfi_command *cm;
1288 uint32_t pi, ci, context;
1290 sc = (struct mfi_softc *)arg;
1292 if (sc->mfi_check_clear_intr(sc))
1296 pi = sc->mfi_comms->hw_pi;
1297 ci = sc->mfi_comms->hw_ci;
1298 mtx_lock(&sc->mfi_io_lock);
1300 context = sc->mfi_comms->hw_reply_q[ci];
1301 if (context < sc->mfi_max_fw_cmds) {
1302 cm = &sc->mfi_commands[context];
1303 mfi_remove_busy(cm);
1305 mfi_complete(sc, cm);
1307 if (++ci == (sc->mfi_max_fw_cmds + 1))
1311 sc->mfi_comms->hw_ci = ci;
1313 /* Give defered I/O a chance to run */
1314 sc->mfi_flags &= ~MFI_FLAGS_QFRZN;
1316 mtx_unlock(&sc->mfi_io_lock);
1319 * Dummy read to flush the bus; this ensures that the indexes are up
1320 * to date. Restart processing if more commands have come it.
1322 (void)sc->mfi_read_fw_status(sc);
1323 if (pi != sc->mfi_comms->hw_pi)
1330 mfi_shutdown(struct mfi_softc *sc)
1332 struct mfi_dcmd_frame *dcmd;
1333 struct mfi_command *cm;
1337 if (sc->mfi_aen_cm != NULL) {
1338 sc->cm_aen_abort = 1;
1339 mfi_abort(sc, &sc->mfi_aen_cm);
1342 if (sc->mfi_map_sync_cm != NULL) {
1343 sc->cm_map_abort = 1;
1344 mfi_abort(sc, &sc->mfi_map_sync_cm);
1347 mtx_lock(&sc->mfi_io_lock);
1348 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_SHUTDOWN, NULL, 0);
1350 mtx_unlock(&sc->mfi_io_lock);
1354 dcmd = &cm->cm_frame->dcmd;
1355 dcmd->header.flags = MFI_FRAME_DIR_NONE;
1356 cm->cm_flags = MFI_CMD_POLLED;
1359 if ((error = mfi_mapcmd(sc, cm)) != 0)
1360 device_printf(sc->mfi_dev, "Failed to shutdown controller\n");
1362 mfi_release_command(cm);
1363 mtx_unlock(&sc->mfi_io_lock);
1368 mfi_syspdprobe(struct mfi_softc *sc)
1370 struct mfi_frame_header *hdr;
1371 struct mfi_command *cm = NULL;
1372 struct mfi_pd_list *pdlist = NULL;
1373 struct mfi_system_pd *syspd, *tmp;
1374 struct mfi_system_pending *syspd_pend;
1375 int error, i, found;
1377 sx_assert(&sc->mfi_config_lock, SA_XLOCKED);
1378 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1379 /* Add SYSTEM PD's */
1380 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_PD_LIST_QUERY,
1381 (void **)&pdlist, sizeof(*pdlist));
1383 device_printf(sc->mfi_dev,
1384 "Error while forming SYSTEM PD list\n");
1388 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1389 cm->cm_frame->dcmd.mbox[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST;
1390 cm->cm_frame->dcmd.mbox[1] = 0;
1391 if (mfi_mapcmd(sc, cm) != 0) {
1392 device_printf(sc->mfi_dev,
1393 "Failed to get syspd device listing\n");
1396 bus_dmamap_sync(sc->mfi_buffer_dmat,cm->cm_dmamap,
1397 BUS_DMASYNC_POSTREAD);
1398 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1399 hdr = &cm->cm_frame->header;
1400 if (hdr->cmd_status != MFI_STAT_OK) {
1401 device_printf(sc->mfi_dev,
1402 "MFI_DCMD_PD_LIST_QUERY failed %x\n", hdr->cmd_status);
1405 /* Get each PD and add it to the system */
1406 for (i = 0; i < pdlist->count; i++) {
1407 if (pdlist->addr[i].device_id ==
1408 pdlist->addr[i].encl_device_id)
1411 TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh, pd_link) {
1412 if (syspd->pd_id == pdlist->addr[i].device_id)
1415 TAILQ_FOREACH(syspd_pend, &sc->mfi_syspd_pend_tqh, pd_link) {
1416 if (syspd_pend->pd_id == pdlist->addr[i].device_id)
1420 mfi_add_sys_pd(sc, pdlist->addr[i].device_id);
1422 /* Delete SYSPD's whose state has been changed */
1423 TAILQ_FOREACH_SAFE(syspd, &sc->mfi_syspd_tqh, pd_link, tmp) {
1425 for (i = 0; i < pdlist->count; i++) {
1426 if (syspd->pd_id == pdlist->addr[i].device_id) {
1433 mtx_unlock(&sc->mfi_io_lock);
1435 device_delete_child(sc->mfi_dev, syspd->pd_dev);
1437 mtx_lock(&sc->mfi_io_lock);
1442 free(pdlist, M_MFIBUF);
1444 mfi_release_command(cm);
1450 mfi_ldprobe(struct mfi_softc *sc)
1452 struct mfi_frame_header *hdr;
1453 struct mfi_command *cm = NULL;
1454 struct mfi_ld_list *list = NULL;
1455 struct mfi_disk *ld;
1456 struct mfi_disk_pending *ld_pend;
1459 sx_assert(&sc->mfi_config_lock, SA_XLOCKED);
1460 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1462 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_LIST,
1463 (void **)&list, sizeof(*list));
1467 cm->cm_flags = MFI_CMD_DATAIN;
1468 if (mfi_wait_command(sc, cm) != 0) {
1469 device_printf(sc->mfi_dev, "Failed to get device listing\n");
1473 hdr = &cm->cm_frame->header;
1474 if (hdr->cmd_status != MFI_STAT_OK) {
1475 device_printf(sc->mfi_dev, "MFI_DCMD_LD_GET_LIST failed %x\n",
1480 for (i = 0; i < list->ld_count; i++) {
1481 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
1482 if (ld->ld_id == list->ld_list[i].ld.v.target_id)
1485 TAILQ_FOREACH(ld_pend, &sc->mfi_ld_pend_tqh, ld_link) {
1486 if (ld_pend->ld_id == list->ld_list[i].ld.v.target_id)
1489 mfi_add_ld(sc, list->ld_list[i].ld.v.target_id);
1494 free(list, M_MFIBUF);
1496 mfi_release_command(cm);
1502 * The timestamp is the number of seconds since 00:00 Jan 1, 2000. If
1503 * the bits in 24-31 are all set, then it is the number of seconds since
1507 format_timestamp(uint32_t timestamp)
1509 static char buffer[32];
1511 if ((timestamp & 0xff000000) == 0xff000000)
1512 snprintf(buffer, sizeof(buffer), "boot + %us", timestamp &
1515 snprintf(buffer, sizeof(buffer), "%us", timestamp);
1520 format_class(int8_t class)
1522 static char buffer[6];
1525 case MFI_EVT_CLASS_DEBUG:
1527 case MFI_EVT_CLASS_PROGRESS:
1528 return ("progress");
1529 case MFI_EVT_CLASS_INFO:
1531 case MFI_EVT_CLASS_WARNING:
1533 case MFI_EVT_CLASS_CRITICAL:
1535 case MFI_EVT_CLASS_FATAL:
1537 case MFI_EVT_CLASS_DEAD:
1540 snprintf(buffer, sizeof(buffer), "%d", class);
1546 mfi_decode_evt(struct mfi_softc *sc, struct mfi_evt_detail *detail)
1548 struct mfi_system_pd *syspd = NULL;
1550 device_printf(sc->mfi_dev, "%d (%s/0x%04x/%s) - %s\n", detail->seq,
1551 format_timestamp(detail->time), detail->evt_class.members.locale,
1552 format_class(detail->evt_class.members.evt_class),
1553 detail->description);
1555 /* Don't act on old AEN's or while shutting down */
1556 if (detail->seq < sc->mfi_boot_seq_num || sc->mfi_detaching)
1559 switch (detail->arg_type) {
1560 case MR_EVT_ARGS_NONE:
1561 if (detail->code == MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED) {
1562 device_printf(sc->mfi_dev, "HostBus scan raised\n");
1563 if (mfi_detect_jbod_change) {
1565 * Probe for new SYSPD's and Delete
1568 sx_xlock(&sc->mfi_config_lock);
1569 mtx_lock(&sc->mfi_io_lock);
1571 mtx_unlock(&sc->mfi_io_lock);
1572 sx_xunlock(&sc->mfi_config_lock);
1576 case MR_EVT_ARGS_LD_STATE:
1577 /* During load time driver reads all the events starting
1578 * from the one that has been logged after shutdown. Avoid
1581 if (detail->args.ld_state.new_state == MFI_LD_STATE_OFFLINE ) {
1583 struct mfi_disk *ld;
1584 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
1586 detail->args.ld_state.ld.target_id)
1590 Fix: for kernel panics when SSCD is removed
1591 KASSERT(ld != NULL, ("volume dissappeared"));
1595 device_delete_child(sc->mfi_dev, ld->ld_dev);
1600 case MR_EVT_ARGS_PD:
1601 if (detail->code == MR_EVT_PD_REMOVED) {
1602 if (mfi_detect_jbod_change) {
1604 * If the removed device is a SYSPD then
1607 TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh,
1610 detail->args.pd.device_id) {
1612 device_delete_child(
1621 if (detail->code == MR_EVT_PD_INSERTED) {
1622 if (mfi_detect_jbod_change) {
1623 /* Probe for new SYSPD's */
1624 sx_xlock(&sc->mfi_config_lock);
1625 mtx_lock(&sc->mfi_io_lock);
1627 mtx_unlock(&sc->mfi_io_lock);
1628 sx_xunlock(&sc->mfi_config_lock);
1631 if (sc->mfi_cam_rescan_cb != NULL &&
1632 (detail->code == MR_EVT_PD_INSERTED ||
1633 detail->code == MR_EVT_PD_REMOVED)) {
1634 sc->mfi_cam_rescan_cb(sc, detail->args.pd.device_id);
1641 mfi_queue_evt(struct mfi_softc *sc, struct mfi_evt_detail *detail)
1643 struct mfi_evt_queue_elm *elm;
1645 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1646 elm = malloc(sizeof(*elm), M_MFIBUF, M_NOWAIT|M_ZERO);
1649 memcpy(&elm->detail, detail, sizeof(*detail));
1650 TAILQ_INSERT_TAIL(&sc->mfi_evt_queue, elm, link);
1651 taskqueue_enqueue(taskqueue_swi, &sc->mfi_evt_task);
1655 mfi_handle_evt(void *context, int pending)
1657 TAILQ_HEAD(,mfi_evt_queue_elm) queue;
1658 struct mfi_softc *sc;
1659 struct mfi_evt_queue_elm *elm;
1663 mtx_lock(&sc->mfi_io_lock);
1664 TAILQ_CONCAT(&queue, &sc->mfi_evt_queue, link);
1665 mtx_unlock(&sc->mfi_io_lock);
1666 while ((elm = TAILQ_FIRST(&queue)) != NULL) {
1667 TAILQ_REMOVE(&queue, elm, link);
1668 mfi_decode_evt(sc, &elm->detail);
1669 free(elm, M_MFIBUF);
1674 mfi_aen_register(struct mfi_softc *sc, int seq, int locale)
1676 struct mfi_command *cm;
1677 struct mfi_dcmd_frame *dcmd;
1678 union mfi_evt current_aen, prior_aen;
1679 struct mfi_evt_detail *ed = NULL;
1682 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1684 current_aen.word = locale;
1685 if (sc->mfi_aen_cm != NULL) {
1687 ((uint32_t *)&sc->mfi_aen_cm->cm_frame->dcmd.mbox)[1];
1688 if (prior_aen.members.evt_class <= current_aen.members.evt_class &&
1689 !((prior_aen.members.locale & current_aen.members.locale)
1690 ^current_aen.members.locale)) {
1693 prior_aen.members.locale |= current_aen.members.locale;
1694 if (prior_aen.members.evt_class
1695 < current_aen.members.evt_class)
1696 current_aen.members.evt_class =
1697 prior_aen.members.evt_class;
1698 mfi_abort(sc, &sc->mfi_aen_cm);
1702 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_EVENT_WAIT,
1703 (void **)&ed, sizeof(*ed));
1707 dcmd = &cm->cm_frame->dcmd;
1708 ((uint32_t *)&dcmd->mbox)[0] = seq;
1709 ((uint32_t *)&dcmd->mbox)[1] = locale;
1710 cm->cm_flags = MFI_CMD_DATAIN;
1711 cm->cm_complete = mfi_aen_complete;
1713 sc->last_seq_num = seq;
1714 sc->mfi_aen_cm = cm;
1716 mfi_enqueue_ready(cm);
1724 mfi_aen_complete(struct mfi_command *cm)
1726 struct mfi_frame_header *hdr;
1727 struct mfi_softc *sc;
1728 struct mfi_evt_detail *detail;
1729 struct mfi_aen *mfi_aen_entry, *tmp;
1730 int seq = 0, aborted = 0;
1733 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1735 if (sc->mfi_aen_cm == NULL)
1738 hdr = &cm->cm_frame->header;
1740 if (sc->cm_aen_abort ||
1741 hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
1742 sc->cm_aen_abort = 0;
1745 sc->mfi_aen_triggered = 1;
1746 if (sc->mfi_poll_waiting) {
1747 sc->mfi_poll_waiting = 0;
1748 selwakeup(&sc->mfi_select);
1750 detail = cm->cm_data;
1751 mfi_queue_evt(sc, detail);
1752 seq = detail->seq + 1;
1753 TAILQ_FOREACH_SAFE(mfi_aen_entry, &sc->mfi_aen_pids, aen_link,
1755 TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
1757 PROC_LOCK(mfi_aen_entry->p);
1758 kern_psignal(mfi_aen_entry->p, SIGIO);
1759 PROC_UNLOCK(mfi_aen_entry->p);
1760 free(mfi_aen_entry, M_MFIBUF);
1764 free(cm->cm_data, M_MFIBUF);
1765 wakeup(&sc->mfi_aen_cm);
1766 sc->mfi_aen_cm = NULL;
1767 mfi_release_command(cm);
1769 /* set it up again so the driver can catch more events */
1771 mfi_aen_setup(sc, seq);
1774 #define MAX_EVENTS 15
1777 mfi_parse_entries(struct mfi_softc *sc, int start_seq, int stop_seq)
1779 struct mfi_command *cm;
1780 struct mfi_dcmd_frame *dcmd;
1781 struct mfi_evt_list *el;
1782 union mfi_evt class_locale;
1783 int error, i, seq, size;
1785 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1787 class_locale.members.reserved = 0;
1788 class_locale.members.locale = mfi_event_locale;
1789 class_locale.members.evt_class = mfi_event_class;
1791 size = sizeof(struct mfi_evt_list) + sizeof(struct mfi_evt_detail)
1793 el = malloc(size, M_MFIBUF, M_NOWAIT | M_ZERO);
1797 for (seq = start_seq;;) {
1798 if ((cm = mfi_dequeue_free(sc)) == NULL) {
1803 dcmd = &cm->cm_frame->dcmd;
1804 bzero(dcmd->mbox, MFI_MBOX_SIZE);
1805 dcmd->header.cmd = MFI_CMD_DCMD;
1806 dcmd->header.timeout = 0;
1807 dcmd->header.data_len = size;
1808 dcmd->opcode = MFI_DCMD_CTRL_EVENT_GET;
1809 ((uint32_t *)&dcmd->mbox)[0] = seq;
1810 ((uint32_t *)&dcmd->mbox)[1] = class_locale.word;
1811 cm->cm_sg = &dcmd->sgl;
1812 cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
1813 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1817 if ((error = mfi_mapcmd(sc, cm)) != 0) {
1818 device_printf(sc->mfi_dev,
1819 "Failed to get controller entries\n");
1820 mfi_release_command(cm);
1824 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1825 BUS_DMASYNC_POSTREAD);
1826 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1828 if (dcmd->header.cmd_status == MFI_STAT_NOT_FOUND) {
1829 mfi_release_command(cm);
1832 if (dcmd->header.cmd_status != MFI_STAT_OK) {
1833 device_printf(sc->mfi_dev,
1834 "Error %d fetching controller entries\n",
1835 dcmd->header.cmd_status);
1836 mfi_release_command(cm);
1840 mfi_release_command(cm);
1842 for (i = 0; i < el->count; i++) {
1844 * If this event is newer than 'stop_seq' then
1845 * break out of the loop. Note that the log
1846 * is a circular buffer so we have to handle
1847 * the case that our stop point is earlier in
1848 * the buffer than our start point.
1850 if (el->event[i].seq >= stop_seq) {
1851 if (start_seq <= stop_seq)
1853 else if (el->event[i].seq < start_seq)
1856 mfi_queue_evt(sc, &el->event[i]);
1858 seq = el->event[el->count - 1].seq + 1;
1866 mfi_add_ld(struct mfi_softc *sc, int id)
1868 struct mfi_command *cm;
1869 struct mfi_dcmd_frame *dcmd = NULL;
1870 struct mfi_ld_info *ld_info = NULL;
1871 struct mfi_disk_pending *ld_pend;
1874 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1876 ld_pend = malloc(sizeof(*ld_pend), M_MFIBUF, M_NOWAIT | M_ZERO);
1877 if (ld_pend != NULL) {
1878 ld_pend->ld_id = id;
1879 TAILQ_INSERT_TAIL(&sc->mfi_ld_pend_tqh, ld_pend, ld_link);
1882 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_INFO,
1883 (void **)&ld_info, sizeof(*ld_info));
1885 device_printf(sc->mfi_dev,
1886 "Failed to allocate for MFI_DCMD_LD_GET_INFO %d\n", error);
1888 free(ld_info, M_MFIBUF);
1891 cm->cm_flags = MFI_CMD_DATAIN;
1892 dcmd = &cm->cm_frame->dcmd;
1894 if (mfi_wait_command(sc, cm) != 0) {
1895 device_printf(sc->mfi_dev,
1896 "Failed to get logical drive: %d\n", id);
1897 free(ld_info, M_MFIBUF);
1900 if (ld_info->ld_config.params.isSSCD != 1)
1901 mfi_add_ld_complete(cm);
1903 mfi_release_command(cm);
1904 if (ld_info) /* SSCD drives ld_info free here */
1905 free(ld_info, M_MFIBUF);
1911 mfi_add_ld_complete(struct mfi_command *cm)
1913 struct mfi_frame_header *hdr;
1914 struct mfi_ld_info *ld_info;
1915 struct mfi_softc *sc;
1919 hdr = &cm->cm_frame->header;
1920 ld_info = cm->cm_private;
1922 if (sc->cm_map_abort || hdr->cmd_status != MFI_STAT_OK) {
1923 free(ld_info, M_MFIBUF);
1924 wakeup(&sc->mfi_map_sync_cm);
1925 mfi_release_command(cm);
1928 wakeup(&sc->mfi_map_sync_cm);
1929 mfi_release_command(cm);
1931 mtx_unlock(&sc->mfi_io_lock);
1933 if ((child = device_add_child(sc->mfi_dev, "mfid", -1)) == NULL) {
1934 device_printf(sc->mfi_dev, "Failed to add logical disk\n");
1935 free(ld_info, M_MFIBUF);
1937 mtx_lock(&sc->mfi_io_lock);
1941 device_set_ivars(child, ld_info);
1942 device_set_desc(child, "MFI Logical Disk");
1943 bus_generic_attach(sc->mfi_dev);
1945 mtx_lock(&sc->mfi_io_lock);
1948 static int mfi_add_sys_pd(struct mfi_softc *sc, int id)
1950 struct mfi_command *cm;
1951 struct mfi_dcmd_frame *dcmd = NULL;
1952 struct mfi_pd_info *pd_info = NULL;
1953 struct mfi_system_pending *syspd_pend;
1956 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1958 syspd_pend = malloc(sizeof(*syspd_pend), M_MFIBUF, M_NOWAIT | M_ZERO);
1959 if (syspd_pend != NULL) {
1960 syspd_pend->pd_id = id;
1961 TAILQ_INSERT_TAIL(&sc->mfi_syspd_pend_tqh, syspd_pend, pd_link);
1964 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_PD_GET_INFO,
1965 (void **)&pd_info, sizeof(*pd_info));
1967 device_printf(sc->mfi_dev,
1968 "Failed to allocated for MFI_DCMD_PD_GET_INFO %d\n",
1971 free(pd_info, M_MFIBUF);
1974 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1975 dcmd = &cm->cm_frame->dcmd;
1977 dcmd->header.scsi_status = 0;
1978 dcmd->header.pad0 = 0;
1979 if ((error = mfi_mapcmd(sc, cm)) != 0) {
1980 device_printf(sc->mfi_dev,
1981 "Failed to get physical drive info %d\n", id);
1982 free(pd_info, M_MFIBUF);
1983 mfi_release_command(cm);
1986 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1987 BUS_DMASYNC_POSTREAD);
1988 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1989 mfi_add_sys_pd_complete(cm);
1994 mfi_add_sys_pd_complete(struct mfi_command *cm)
1996 struct mfi_frame_header *hdr;
1997 struct mfi_pd_info *pd_info;
1998 struct mfi_softc *sc;
2002 hdr = &cm->cm_frame->header;
2003 pd_info = cm->cm_private;
2005 if (hdr->cmd_status != MFI_STAT_OK) {
2006 free(pd_info, M_MFIBUF);
2007 mfi_release_command(cm);
2010 if (pd_info->fw_state != MFI_PD_STATE_SYSTEM) {
2011 device_printf(sc->mfi_dev, "PD=%x is not SYSTEM PD\n",
2012 pd_info->ref.v.device_id);
2013 free(pd_info, M_MFIBUF);
2014 mfi_release_command(cm);
2017 mfi_release_command(cm);
2019 mtx_unlock(&sc->mfi_io_lock);
2021 if ((child = device_add_child(sc->mfi_dev, "mfisyspd", -1)) == NULL) {
2022 device_printf(sc->mfi_dev, "Failed to add system pd\n");
2023 free(pd_info, M_MFIBUF);
2025 mtx_lock(&sc->mfi_io_lock);
2029 device_set_ivars(child, pd_info);
2030 device_set_desc(child, "MFI System PD");
2031 bus_generic_attach(sc->mfi_dev);
2033 mtx_lock(&sc->mfi_io_lock);
2036 static struct mfi_command *
2037 mfi_bio_command(struct mfi_softc *sc)
2040 struct mfi_command *cm = NULL;
2042 /*reserving two commands to avoid starvation for IOCTL*/
2043 if (sc->mfi_qstat[MFIQ_FREE].q_length < 2) {
2046 if ((bio = mfi_dequeue_bio(sc)) == NULL) {
2049 if ((uintptr_t)bio->bio_driver2 == MFI_LD_IO) {
2050 cm = mfi_build_ldio(sc, bio);
2051 } else if ((uintptr_t) bio->bio_driver2 == MFI_SYS_PD_IO) {
2052 cm = mfi_build_syspdio(sc, bio);
2055 mfi_enqueue_bio(sc, bio);
2060 * mostly copied from cam/scsi/scsi_all.c:scsi_read_write
2064 mfi_build_cdb(int readop, uint8_t byte2, u_int64_t lba, u_int32_t block_count, uint8_t *cdb)
2068 if (((lba & 0x1fffff) == lba)
2069 && ((block_count & 0xff) == block_count)
2071 /* We can fit in a 6 byte cdb */
2072 struct scsi_rw_6 *scsi_cmd;
2074 scsi_cmd = (struct scsi_rw_6 *)cdb;
2075 scsi_cmd->opcode = readop ? READ_6 : WRITE_6;
2076 scsi_ulto3b(lba, scsi_cmd->addr);
2077 scsi_cmd->length = block_count & 0xff;
2078 scsi_cmd->control = 0;
2079 cdb_len = sizeof(*scsi_cmd);
2080 } else if (((block_count & 0xffff) == block_count) && ((lba & 0xffffffff) == lba)) {
2081 /* Need a 10 byte CDB */
2082 struct scsi_rw_10 *scsi_cmd;
2084 scsi_cmd = (struct scsi_rw_10 *)cdb;
2085 scsi_cmd->opcode = readop ? READ_10 : WRITE_10;
2086 scsi_cmd->byte2 = byte2;
2087 scsi_ulto4b(lba, scsi_cmd->addr);
2088 scsi_cmd->reserved = 0;
2089 scsi_ulto2b(block_count, scsi_cmd->length);
2090 scsi_cmd->control = 0;
2091 cdb_len = sizeof(*scsi_cmd);
2092 } else if (((block_count & 0xffffffff) == block_count) &&
2093 ((lba & 0xffffffff) == lba)) {
2094 /* Block count is too big for 10 byte CDB use a 12 byte CDB */
2095 struct scsi_rw_12 *scsi_cmd;
2097 scsi_cmd = (struct scsi_rw_12 *)cdb;
2098 scsi_cmd->opcode = readop ? READ_12 : WRITE_12;
2099 scsi_cmd->byte2 = byte2;
2100 scsi_ulto4b(lba, scsi_cmd->addr);
2101 scsi_cmd->reserved = 0;
2102 scsi_ulto4b(block_count, scsi_cmd->length);
2103 scsi_cmd->control = 0;
2104 cdb_len = sizeof(*scsi_cmd);
2107 * 16 byte CDB. We'll only get here if the LBA is larger
2110 struct scsi_rw_16 *scsi_cmd;
2112 scsi_cmd = (struct scsi_rw_16 *)cdb;
2113 scsi_cmd->opcode = readop ? READ_16 : WRITE_16;
2114 scsi_cmd->byte2 = byte2;
2115 scsi_u64to8b(lba, scsi_cmd->addr);
2116 scsi_cmd->reserved = 0;
2117 scsi_ulto4b(block_count, scsi_cmd->length);
2118 scsi_cmd->control = 0;
2119 cdb_len = sizeof(*scsi_cmd);
2125 static struct mfi_command *
2126 mfi_build_syspdio(struct mfi_softc *sc, struct bio *bio)
2128 struct mfi_command *cm;
2129 struct mfi_pass_frame *pass;
2130 uint32_t context = 0;
2131 int flags = 0, blkcount = 0, readop;
2134 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2136 if ((cm = mfi_dequeue_free(sc)) == NULL)
2139 /* Zero out the MFI frame */
2140 context = cm->cm_frame->header.context;
2141 bzero(cm->cm_frame, sizeof(union mfi_frame));
2142 cm->cm_frame->header.context = context;
2143 pass = &cm->cm_frame->pass;
2144 bzero(pass->cdb, 16);
2145 pass->header.cmd = MFI_CMD_PD_SCSI_IO;
2146 switch (bio->bio_cmd & 0x03) {
2148 flags = MFI_CMD_DATAIN;
2152 flags = MFI_CMD_DATAOUT;
2156 /* TODO: what about BIO_DELETE??? */
2157 panic("Unsupported bio command %x\n", bio->bio_cmd);
2160 /* Cheat with the sector length to avoid a non-constant division */
2161 blkcount = (bio->bio_bcount + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
2162 /* Fill the LBA and Transfer length in CDB */
2163 cdb_len = mfi_build_cdb(readop, 0, bio->bio_pblkno, blkcount,
2165 pass->header.target_id = (uintptr_t)bio->bio_driver1;
2166 pass->header.lun_id = 0;
2167 pass->header.timeout = 0;
2168 pass->header.flags = 0;
2169 pass->header.scsi_status = 0;
2170 pass->header.sense_len = MFI_SENSE_LEN;
2171 pass->header.data_len = bio->bio_bcount;
2172 pass->header.cdb_len = cdb_len;
2173 pass->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2174 pass->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2175 cm->cm_complete = mfi_bio_complete;
2176 cm->cm_private = bio;
2177 cm->cm_data = bio->bio_data;
2178 cm->cm_len = bio->bio_bcount;
2179 cm->cm_sg = &pass->sgl;
2180 cm->cm_total_frame_size = MFI_PASS_FRAME_SIZE;
2181 cm->cm_flags = flags;
2186 static struct mfi_command *
2187 mfi_build_ldio(struct mfi_softc *sc, struct bio *bio)
2189 struct mfi_io_frame *io;
2190 struct mfi_command *cm;
2193 uint32_t context = 0;
2195 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2197 if ((cm = mfi_dequeue_free(sc)) == NULL)
2200 /* Zero out the MFI frame */
2201 context = cm->cm_frame->header.context;
2202 bzero(cm->cm_frame, sizeof(union mfi_frame));
2203 cm->cm_frame->header.context = context;
2204 io = &cm->cm_frame->io;
2205 switch (bio->bio_cmd & 0x03) {
2207 io->header.cmd = MFI_CMD_LD_READ;
2208 flags = MFI_CMD_DATAIN;
2211 io->header.cmd = MFI_CMD_LD_WRITE;
2212 flags = MFI_CMD_DATAOUT;
2215 /* TODO: what about BIO_DELETE??? */
2216 panic("Unsupported bio command %x\n", bio->bio_cmd);
2219 /* Cheat with the sector length to avoid a non-constant division */
2220 blkcount = (bio->bio_bcount + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
2221 io->header.target_id = (uintptr_t)bio->bio_driver1;
2222 io->header.timeout = 0;
2223 io->header.flags = 0;
2224 io->header.scsi_status = 0;
2225 io->header.sense_len = MFI_SENSE_LEN;
2226 io->header.data_len = blkcount;
2227 io->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2228 io->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2229 io->lba_hi = (bio->bio_pblkno & 0xffffffff00000000) >> 32;
2230 io->lba_lo = bio->bio_pblkno & 0xffffffff;
2231 cm->cm_complete = mfi_bio_complete;
2232 cm->cm_private = bio;
2233 cm->cm_data = bio->bio_data;
2234 cm->cm_len = bio->bio_bcount;
2235 cm->cm_sg = &io->sgl;
2236 cm->cm_total_frame_size = MFI_IO_FRAME_SIZE;
2237 cm->cm_flags = flags;
2243 mfi_bio_complete(struct mfi_command *cm)
2246 struct mfi_frame_header *hdr;
2247 struct mfi_softc *sc;
2249 bio = cm->cm_private;
2250 hdr = &cm->cm_frame->header;
2253 if ((hdr->cmd_status != MFI_STAT_OK) || (hdr->scsi_status != 0)) {
2254 bio->bio_flags |= BIO_ERROR;
2255 bio->bio_error = EIO;
2256 device_printf(sc->mfi_dev, "I/O error, cmd=%p, status=%#x, "
2257 "scsi_status=%#x\n", cm, hdr->cmd_status, hdr->scsi_status);
2258 mfi_print_sense(cm->cm_sc, cm->cm_sense);
2259 } else if (cm->cm_error != 0) {
2260 bio->bio_flags |= BIO_ERROR;
2261 bio->bio_error = cm->cm_error;
2262 device_printf(sc->mfi_dev, "I/O error, cmd=%p, error=%#x\n",
2266 mfi_release_command(cm);
2267 mfi_disk_complete(bio);
2271 mfi_startio(struct mfi_softc *sc)
2273 struct mfi_command *cm;
2274 struct ccb_hdr *ccbh;
2277 /* Don't bother if we're short on resources */
2278 if (sc->mfi_flags & MFI_FLAGS_QFRZN)
2281 /* Try a command that has already been prepared */
2282 cm = mfi_dequeue_ready(sc);
2285 if ((ccbh = TAILQ_FIRST(&sc->mfi_cam_ccbq)) != NULL)
2286 cm = sc->mfi_cam_start(ccbh);
2289 /* Nope, so look for work on the bioq */
2291 cm = mfi_bio_command(sc);
2293 /* No work available, so exit */
2297 /* Send the command to the controller */
2298 if (mfi_mapcmd(sc, cm) != 0) {
2299 device_printf(sc->mfi_dev, "Failed to startio\n");
2300 mfi_requeue_ready(cm);
2307 mfi_mapcmd(struct mfi_softc *sc, struct mfi_command *cm)
2311 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2313 if ((cm->cm_data != NULL) && (cm->cm_frame->header.cmd != MFI_CMD_STP )) {
2314 polled = (cm->cm_flags & MFI_CMD_POLLED) ? BUS_DMA_NOWAIT : 0;
2315 if (cm->cm_flags & MFI_CMD_CCB)
2316 error = bus_dmamap_load_ccb(sc->mfi_buffer_dmat,
2317 cm->cm_dmamap, cm->cm_data, mfi_data_cb, cm,
2320 error = bus_dmamap_load(sc->mfi_buffer_dmat,
2321 cm->cm_dmamap, cm->cm_data, cm->cm_len,
2322 mfi_data_cb, cm, polled);
2323 if (error == EINPROGRESS) {
2324 sc->mfi_flags |= MFI_FLAGS_QFRZN;
2328 error = mfi_send_frame(sc, cm);
2335 mfi_data_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
2337 struct mfi_frame_header *hdr;
2338 struct mfi_command *cm;
2340 struct mfi_softc *sc;
2341 int i, j, first, dir;
2342 int sge_size, locked;
2344 cm = (struct mfi_command *)arg;
2346 hdr = &cm->cm_frame->header;
2350 * We need to check if we have the lock as this is async
2351 * callback so even though our caller mfi_mapcmd asserts
2352 * it has the lock, there is no garantee that hasn't been
2353 * dropped if bus_dmamap_load returned prior to our
2356 if ((locked = mtx_owned(&sc->mfi_io_lock)) == 0)
2357 mtx_lock(&sc->mfi_io_lock);
2360 printf("error %d in callback\n", error);
2361 cm->cm_error = error;
2362 mfi_complete(sc, cm);
2365 /* Use IEEE sgl only for IO's on a SKINNY controller
2366 * For other commands on a SKINNY controller use either
2367 * sg32 or sg64 based on the sizeof(bus_addr_t).
2368 * Also calculate the total frame size based on the type
2371 if (((cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) ||
2372 (cm->cm_frame->header.cmd == MFI_CMD_LD_READ) ||
2373 (cm->cm_frame->header.cmd == MFI_CMD_LD_WRITE)) &&
2374 (sc->mfi_flags & MFI_FLAGS_SKINNY)) {
2375 for (i = 0; i < nsegs; i++) {
2376 sgl->sg_skinny[i].addr = segs[i].ds_addr;
2377 sgl->sg_skinny[i].len = segs[i].ds_len;
2378 sgl->sg_skinny[i].flag = 0;
2380 hdr->flags |= MFI_FRAME_IEEE_SGL | MFI_FRAME_SGL64;
2381 sge_size = sizeof(struct mfi_sg_skinny);
2382 hdr->sg_count = nsegs;
2385 if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
2386 first = cm->cm_stp_len;
2387 if ((sc->mfi_flags & MFI_FLAGS_SG64) == 0) {
2388 sgl->sg32[j].addr = segs[0].ds_addr;
2389 sgl->sg32[j++].len = first;
2391 sgl->sg64[j].addr = segs[0].ds_addr;
2392 sgl->sg64[j++].len = first;
2396 if ((sc->mfi_flags & MFI_FLAGS_SG64) == 0) {
2397 for (i = 0; i < nsegs; i++) {
2398 sgl->sg32[j].addr = segs[i].ds_addr + first;
2399 sgl->sg32[j++].len = segs[i].ds_len - first;
2403 for (i = 0; i < nsegs; i++) {
2404 sgl->sg64[j].addr = segs[i].ds_addr + first;
2405 sgl->sg64[j++].len = segs[i].ds_len - first;
2408 hdr->flags |= MFI_FRAME_SGL64;
2411 sge_size = sc->mfi_sge_size;
2415 if (cm->cm_flags & MFI_CMD_DATAIN) {
2416 dir |= BUS_DMASYNC_PREREAD;
2417 hdr->flags |= MFI_FRAME_DIR_READ;
2419 if (cm->cm_flags & MFI_CMD_DATAOUT) {
2420 dir |= BUS_DMASYNC_PREWRITE;
2421 hdr->flags |= MFI_FRAME_DIR_WRITE;
2423 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir);
2424 cm->cm_flags |= MFI_CMD_MAPPED;
2427 * Instead of calculating the total number of frames in the
2428 * compound frame, it's already assumed that there will be at
2429 * least 1 frame, so don't compensate for the modulo of the
2430 * following division.
2432 cm->cm_total_frame_size += (sc->mfi_sge_size * nsegs);
2433 cm->cm_extra_frames = (cm->cm_total_frame_size - 1) / MFI_FRAME_SIZE;
2435 if ((error = mfi_send_frame(sc, cm)) != 0) {
2436 printf("error %d in callback from mfi_send_frame\n", error);
2437 cm->cm_error = error;
2438 mfi_complete(sc, cm);
2443 /* leave the lock in the state we found it */
2445 mtx_unlock(&sc->mfi_io_lock);
2451 mfi_send_frame(struct mfi_softc *sc, struct mfi_command *cm)
2455 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2457 if (sc->MFA_enabled)
2458 error = mfi_tbolt_send_frame(sc, cm);
2460 error = mfi_std_send_frame(sc, cm);
2462 if (error != 0 && (cm->cm_flags & MFI_ON_MFIQ_BUSY) != 0)
2463 mfi_remove_busy(cm);
2469 mfi_std_send_frame(struct mfi_softc *sc, struct mfi_command *cm)
2471 struct mfi_frame_header *hdr;
2472 int tm = mfi_polled_cmd_timeout * 1000;
2474 hdr = &cm->cm_frame->header;
2476 if ((cm->cm_flags & MFI_CMD_POLLED) == 0) {
2477 cm->cm_timestamp = time_uptime;
2478 mfi_enqueue_busy(cm);
2480 hdr->cmd_status = MFI_STAT_INVALID_STATUS;
2481 hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
2485 * The bus address of the command is aligned on a 64 byte boundary,
2486 * leaving the least 6 bits as zero. For whatever reason, the
2487 * hardware wants the address shifted right by three, leaving just
2488 * 3 zero bits. These three bits are then used as a prefetching
2489 * hint for the hardware to predict how many frames need to be
2490 * fetched across the bus. If a command has more than 8 frames
2491 * then the 3 bits are set to 0x7 and the firmware uses other
2492 * information in the command to determine the total amount to fetch.
2493 * However, FreeBSD doesn't support I/O larger than 128K, so 8 frames
2494 * is enough for both 32bit and 64bit systems.
2496 if (cm->cm_extra_frames > 7)
2497 cm->cm_extra_frames = 7;
2499 sc->mfi_issue_cmd(sc, cm->cm_frame_busaddr, cm->cm_extra_frames);
2501 if ((cm->cm_flags & MFI_CMD_POLLED) == 0)
2504 /* This is a polled command, so busy-wait for it to complete. */
2505 while (hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
2512 if (hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
2513 device_printf(sc->mfi_dev, "Frame %p timed out "
2514 "command 0x%X\n", hdr, cm->cm_frame->dcmd.opcode);
2523 mfi_complete(struct mfi_softc *sc, struct mfi_command *cm)
2526 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2528 if ((cm->cm_flags & MFI_CMD_MAPPED) != 0) {
2530 if ((cm->cm_flags & MFI_CMD_DATAIN) ||
2531 (cm->cm_frame->header.cmd == MFI_CMD_STP))
2532 dir |= BUS_DMASYNC_POSTREAD;
2533 if (cm->cm_flags & MFI_CMD_DATAOUT)
2534 dir |= BUS_DMASYNC_POSTWRITE;
2536 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir);
2537 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2538 cm->cm_flags &= ~MFI_CMD_MAPPED;
2541 cm->cm_flags |= MFI_CMD_COMPLETED;
2543 if (cm->cm_complete != NULL)
2544 cm->cm_complete(cm);
2550 mfi_abort(struct mfi_softc *sc, struct mfi_command **cm_abort)
2552 struct mfi_command *cm;
2553 struct mfi_abort_frame *abort;
2555 uint32_t context = 0;
2557 mtx_lock(&sc->mfi_io_lock);
2558 if ((cm = mfi_dequeue_free(sc)) == NULL) {
2559 mtx_unlock(&sc->mfi_io_lock);
2563 /* Zero out the MFI frame */
2564 context = cm->cm_frame->header.context;
2565 bzero(cm->cm_frame, sizeof(union mfi_frame));
2566 cm->cm_frame->header.context = context;
2568 abort = &cm->cm_frame->abort;
2569 abort->header.cmd = MFI_CMD_ABORT;
2570 abort->header.flags = 0;
2571 abort->header.scsi_status = 0;
2572 abort->abort_context = (*cm_abort)->cm_frame->header.context;
2573 abort->abort_mfi_addr_lo = (uint32_t)(*cm_abort)->cm_frame_busaddr;
2574 abort->abort_mfi_addr_hi =
2575 (uint32_t)((uint64_t)(*cm_abort)->cm_frame_busaddr >> 32);
2577 cm->cm_flags = MFI_CMD_POLLED;
2579 if ((error = mfi_mapcmd(sc, cm)) != 0)
2580 device_printf(sc->mfi_dev, "failed to abort command\n");
2581 mfi_release_command(cm);
2583 mtx_unlock(&sc->mfi_io_lock);
2584 while (i < 5 && *cm_abort != NULL) {
2585 tsleep(cm_abort, 0, "mfiabort",
2589 if (*cm_abort != NULL) {
2590 /* Force a complete if command didn't abort */
2591 mtx_lock(&sc->mfi_io_lock);
2592 (*cm_abort)->cm_complete(*cm_abort);
2593 mtx_unlock(&sc->mfi_io_lock);
2600 mfi_dump_blocks(struct mfi_softc *sc, int id, uint64_t lba, void *virt,
2603 struct mfi_command *cm;
2604 struct mfi_io_frame *io;
2606 uint32_t context = 0;
2608 if ((cm = mfi_dequeue_free(sc)) == NULL)
2611 /* Zero out the MFI frame */
2612 context = cm->cm_frame->header.context;
2613 bzero(cm->cm_frame, sizeof(union mfi_frame));
2614 cm->cm_frame->header.context = context;
2616 io = &cm->cm_frame->io;
2617 io->header.cmd = MFI_CMD_LD_WRITE;
2618 io->header.target_id = id;
2619 io->header.timeout = 0;
2620 io->header.flags = 0;
2621 io->header.scsi_status = 0;
2622 io->header.sense_len = MFI_SENSE_LEN;
2623 io->header.data_len = (len + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
2624 io->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2625 io->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2626 io->lba_hi = (lba & 0xffffffff00000000) >> 32;
2627 io->lba_lo = lba & 0xffffffff;
2630 cm->cm_sg = &io->sgl;
2631 cm->cm_total_frame_size = MFI_IO_FRAME_SIZE;
2632 cm->cm_flags = MFI_CMD_POLLED | MFI_CMD_DATAOUT;
2634 if ((error = mfi_mapcmd(sc, cm)) != 0)
2635 device_printf(sc->mfi_dev, "failed dump blocks\n");
2636 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
2637 BUS_DMASYNC_POSTWRITE);
2638 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2639 mfi_release_command(cm);
2645 mfi_dump_syspd_blocks(struct mfi_softc *sc, int id, uint64_t lba, void *virt,
2648 struct mfi_command *cm;
2649 struct mfi_pass_frame *pass;
2650 int error, readop, cdb_len;
2653 if ((cm = mfi_dequeue_free(sc)) == NULL)
2656 pass = &cm->cm_frame->pass;
2657 bzero(pass->cdb, 16);
2658 pass->header.cmd = MFI_CMD_PD_SCSI_IO;
2661 blkcount = (len + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
2662 cdb_len = mfi_build_cdb(readop, 0, lba, blkcount, pass->cdb);
2663 pass->header.target_id = id;
2664 pass->header.timeout = 0;
2665 pass->header.flags = 0;
2666 pass->header.scsi_status = 0;
2667 pass->header.sense_len = MFI_SENSE_LEN;
2668 pass->header.data_len = len;
2669 pass->header.cdb_len = cdb_len;
2670 pass->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2671 pass->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2674 cm->cm_sg = &pass->sgl;
2675 cm->cm_total_frame_size = MFI_PASS_FRAME_SIZE;
2676 cm->cm_flags = MFI_CMD_POLLED | MFI_CMD_DATAOUT | MFI_CMD_SCSI;
2678 if ((error = mfi_mapcmd(sc, cm)) != 0)
2679 device_printf(sc->mfi_dev, "failed dump blocks\n");
2680 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
2681 BUS_DMASYNC_POSTWRITE);
2682 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2683 mfi_release_command(cm);
2689 mfi_open(struct cdev *dev, int flags, int fmt, struct thread *td)
2691 struct mfi_softc *sc;
2696 mtx_lock(&sc->mfi_io_lock);
2697 if (sc->mfi_detaching)
2700 sc->mfi_flags |= MFI_FLAGS_OPEN;
2703 mtx_unlock(&sc->mfi_io_lock);
2709 mfi_close(struct cdev *dev, int flags, int fmt, struct thread *td)
2711 struct mfi_softc *sc;
2712 struct mfi_aen *mfi_aen_entry, *tmp;
2716 mtx_lock(&sc->mfi_io_lock);
2717 sc->mfi_flags &= ~MFI_FLAGS_OPEN;
2719 TAILQ_FOREACH_SAFE(mfi_aen_entry, &sc->mfi_aen_pids, aen_link, tmp) {
2720 if (mfi_aen_entry->p == curproc) {
2721 TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
2723 free(mfi_aen_entry, M_MFIBUF);
2726 mtx_unlock(&sc->mfi_io_lock);
2731 mfi_config_lock(struct mfi_softc *sc, uint32_t opcode)
2735 case MFI_DCMD_LD_DELETE:
2736 case MFI_DCMD_CFG_ADD:
2737 case MFI_DCMD_CFG_CLEAR:
2738 case MFI_DCMD_CFG_FOREIGN_IMPORT:
2739 sx_xlock(&sc->mfi_config_lock);
2747 mfi_config_unlock(struct mfi_softc *sc, int locked)
2751 sx_xunlock(&sc->mfi_config_lock);
2755 * Perform pre-issue checks on commands from userland and possibly veto
2759 mfi_check_command_pre(struct mfi_softc *sc, struct mfi_command *cm)
2761 struct mfi_disk *ld, *ld2;
2763 struct mfi_system_pd *syspd = NULL;
2767 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2769 switch (cm->cm_frame->dcmd.opcode) {
2770 case MFI_DCMD_LD_DELETE:
2771 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2772 if (ld->ld_id == cm->cm_frame->dcmd.mbox[0])
2778 error = mfi_disk_disable(ld);
2780 case MFI_DCMD_CFG_CLEAR:
2781 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2782 error = mfi_disk_disable(ld);
2787 TAILQ_FOREACH(ld2, &sc->mfi_ld_tqh, ld_link) {
2790 mfi_disk_enable(ld2);
2794 case MFI_DCMD_PD_STATE_SET:
2795 mbox = (uint16_t *) cm->cm_frame->dcmd.mbox;
2797 if (mbox[2] == MFI_PD_STATE_UNCONFIGURED_GOOD) {
2798 TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh, pd_link) {
2799 if (syspd->pd_id == syspd_id)
2806 error = mfi_syspd_disable(syspd);
2814 /* Perform post-issue checks on commands from userland. */
2816 mfi_check_command_post(struct mfi_softc *sc, struct mfi_command *cm)
2818 struct mfi_disk *ld, *ldn;
2819 struct mfi_system_pd *syspd = NULL;
2823 switch (cm->cm_frame->dcmd.opcode) {
2824 case MFI_DCMD_LD_DELETE:
2825 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2826 if (ld->ld_id == cm->cm_frame->dcmd.mbox[0])
2829 KASSERT(ld != NULL, ("volume dissappeared"));
2830 if (cm->cm_frame->header.cmd_status == MFI_STAT_OK) {
2831 mtx_unlock(&sc->mfi_io_lock);
2833 device_delete_child(sc->mfi_dev, ld->ld_dev);
2835 mtx_lock(&sc->mfi_io_lock);
2837 mfi_disk_enable(ld);
2839 case MFI_DCMD_CFG_CLEAR:
2840 if (cm->cm_frame->header.cmd_status == MFI_STAT_OK) {
2841 mtx_unlock(&sc->mfi_io_lock);
2843 TAILQ_FOREACH_SAFE(ld, &sc->mfi_ld_tqh, ld_link, ldn) {
2844 device_delete_child(sc->mfi_dev, ld->ld_dev);
2847 mtx_lock(&sc->mfi_io_lock);
2849 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link)
2850 mfi_disk_enable(ld);
2853 case MFI_DCMD_CFG_ADD:
2856 case MFI_DCMD_CFG_FOREIGN_IMPORT:
2859 case MFI_DCMD_PD_STATE_SET:
2860 mbox = (uint16_t *) cm->cm_frame->dcmd.mbox;
2862 if (mbox[2] == MFI_PD_STATE_UNCONFIGURED_GOOD) {
2863 TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh,pd_link) {
2864 if (syspd->pd_id == syspd_id)
2870 /* If the transition fails then enable the syspd again */
2871 if (syspd && cm->cm_frame->header.cmd_status != MFI_STAT_OK)
2872 mfi_syspd_enable(syspd);
2878 mfi_check_for_sscd(struct mfi_softc *sc, struct mfi_command *cm)
2880 struct mfi_config_data *conf_data;
2881 struct mfi_command *ld_cm = NULL;
2882 struct mfi_ld_info *ld_info = NULL;
2883 struct mfi_ld_config *ld;
2887 conf_data = (struct mfi_config_data *)cm->cm_data;
2889 if (cm->cm_frame->dcmd.opcode == MFI_DCMD_CFG_ADD) {
2890 p = (char *)conf_data->array;
2891 p += conf_data->array_size * conf_data->array_count;
2892 ld = (struct mfi_ld_config *)p;
2893 if (ld->params.isSSCD == 1)
2895 } else if (cm->cm_frame->dcmd.opcode == MFI_DCMD_LD_DELETE) {
2896 error = mfi_dcmd_command (sc, &ld_cm, MFI_DCMD_LD_GET_INFO,
2897 (void **)&ld_info, sizeof(*ld_info));
2899 device_printf(sc->mfi_dev, "Failed to allocate"
2900 "MFI_DCMD_LD_GET_INFO %d", error);
2902 free(ld_info, M_MFIBUF);
2905 ld_cm->cm_flags = MFI_CMD_DATAIN;
2906 ld_cm->cm_frame->dcmd.mbox[0]= cm->cm_frame->dcmd.mbox[0];
2907 ld_cm->cm_frame->header.target_id = cm->cm_frame->dcmd.mbox[0];
2908 if (mfi_wait_command(sc, ld_cm) != 0) {
2909 device_printf(sc->mfi_dev, "failed to get log drv\n");
2910 mfi_release_command(ld_cm);
2911 free(ld_info, M_MFIBUF);
2915 if (ld_cm->cm_frame->header.cmd_status != MFI_STAT_OK) {
2916 free(ld_info, M_MFIBUF);
2917 mfi_release_command(ld_cm);
2921 ld_info = (struct mfi_ld_info *)ld_cm->cm_private;
2923 if (ld_info->ld_config.params.isSSCD == 1)
2926 mfi_release_command(ld_cm);
2927 free(ld_info, M_MFIBUF);
2934 mfi_stp_cmd(struct mfi_softc *sc, struct mfi_command *cm,caddr_t arg)
2937 struct mfi_ioc_packet *ioc;
2938 ioc = (struct mfi_ioc_packet *)arg;
2939 int sge_size, error;
2940 struct megasas_sge *kern_sge;
2942 memset(sc->kbuff_arr, 0, sizeof(sc->kbuff_arr));
2943 kern_sge =(struct megasas_sge *) ((uintptr_t)cm->cm_frame + ioc->mfi_sgl_off);
2944 cm->cm_frame->header.sg_count = ioc->mfi_sge_count;
2946 if (sizeof(bus_addr_t) == 8) {
2947 cm->cm_frame->header.flags |= MFI_FRAME_SGL64;
2948 cm->cm_extra_frames = 2;
2949 sge_size = sizeof(struct mfi_sg64);
2951 cm->cm_extra_frames = (cm->cm_total_frame_size - 1) / MFI_FRAME_SIZE;
2952 sge_size = sizeof(struct mfi_sg32);
2955 cm->cm_total_frame_size += (sge_size * ioc->mfi_sge_count);
2956 for (i = 0; i < ioc->mfi_sge_count; i++) {
2957 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
2958 1, 0, /* algnmnt, boundary */
2959 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
2960 BUS_SPACE_MAXADDR, /* highaddr */
2961 NULL, NULL, /* filter, filterarg */
2962 ioc->mfi_sgl[i].iov_len,/* maxsize */
2964 ioc->mfi_sgl[i].iov_len,/* maxsegsize */
2965 BUS_DMA_ALLOCNOW, /* flags */
2966 NULL, NULL, /* lockfunc, lockarg */
2967 &sc->mfi_kbuff_arr_dmat[i])) {
2968 device_printf(sc->mfi_dev,
2969 "Cannot allocate mfi_kbuff_arr_dmat tag\n");
2973 if (bus_dmamem_alloc(sc->mfi_kbuff_arr_dmat[i],
2974 (void **)&sc->kbuff_arr[i], BUS_DMA_NOWAIT,
2975 &sc->mfi_kbuff_arr_dmamap[i])) {
2976 device_printf(sc->mfi_dev,
2977 "Cannot allocate mfi_kbuff_arr_dmamap memory\n");
2981 bus_dmamap_load(sc->mfi_kbuff_arr_dmat[i],
2982 sc->mfi_kbuff_arr_dmamap[i], sc->kbuff_arr[i],
2983 ioc->mfi_sgl[i].iov_len, mfi_addr_cb,
2984 &sc->mfi_kbuff_arr_busaddr[i], 0);
2986 if (!sc->kbuff_arr[i]) {
2987 device_printf(sc->mfi_dev,
2988 "Could not allocate memory for kbuff_arr info\n");
2991 kern_sge[i].phys_addr = sc->mfi_kbuff_arr_busaddr[i];
2992 kern_sge[i].length = ioc->mfi_sgl[i].iov_len;
2994 if (sizeof(bus_addr_t) == 8) {
2995 cm->cm_frame->stp.sgl.sg64[i].addr =
2996 kern_sge[i].phys_addr;
2997 cm->cm_frame->stp.sgl.sg64[i].len =
2998 ioc->mfi_sgl[i].iov_len;
3000 cm->cm_frame->stp.sgl.sg32[i].addr =
3001 kern_sge[i].phys_addr;
3002 cm->cm_frame->stp.sgl.sg32[i].len =
3003 ioc->mfi_sgl[i].iov_len;
3006 error = copyin(ioc->mfi_sgl[i].iov_base,
3008 ioc->mfi_sgl[i].iov_len);
3010 device_printf(sc->mfi_dev, "Copy in failed\n");
3015 cm->cm_flags |=MFI_CMD_MAPPED;
3020 mfi_user_command(struct mfi_softc *sc, struct mfi_ioc_passthru *ioc)
3022 struct mfi_command *cm;
3023 struct mfi_dcmd_frame *dcmd;
3024 void *ioc_buf = NULL;
3026 int error = 0, locked;
3029 if (ioc->buf_size > 0) {
3030 if (ioc->buf_size > 1024 * 1024)
3032 ioc_buf = malloc(ioc->buf_size, M_MFIBUF, M_WAITOK);
3033 error = copyin(ioc->buf, ioc_buf, ioc->buf_size);
3035 device_printf(sc->mfi_dev, "failed to copyin\n");
3036 free(ioc_buf, M_MFIBUF);
3041 locked = mfi_config_lock(sc, ioc->ioc_frame.opcode);
3043 mtx_lock(&sc->mfi_io_lock);
3044 while ((cm = mfi_dequeue_free(sc)) == NULL)
3045 msleep(mfi_user_command, &sc->mfi_io_lock, 0, "mfiioc", hz);
3047 /* Save context for later */
3048 context = cm->cm_frame->header.context;
3050 dcmd = &cm->cm_frame->dcmd;
3051 bcopy(&ioc->ioc_frame, dcmd, sizeof(struct mfi_dcmd_frame));
3053 cm->cm_sg = &dcmd->sgl;
3054 cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
3055 cm->cm_data = ioc_buf;
3056 cm->cm_len = ioc->buf_size;
3058 /* restore context */
3059 cm->cm_frame->header.context = context;
3061 /* Cheat since we don't know if we're writing or reading */
3062 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_DATAOUT;
3064 error = mfi_check_command_pre(sc, cm);
3068 error = mfi_wait_command(sc, cm);
3070 device_printf(sc->mfi_dev, "ioctl failed %d\n", error);
3073 bcopy(dcmd, &ioc->ioc_frame, sizeof(struct mfi_dcmd_frame));
3074 mfi_check_command_post(sc, cm);
3076 mfi_release_command(cm);
3077 mtx_unlock(&sc->mfi_io_lock);
3078 mfi_config_unlock(sc, locked);
3079 if (ioc->buf_size > 0)
3080 error = copyout(ioc_buf, ioc->buf, ioc->buf_size);
3082 free(ioc_buf, M_MFIBUF);
3086 #define PTRIN(p) ((void *)(uintptr_t)(p))
3089 mfi_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td)
3091 struct mfi_softc *sc;
3092 union mfi_statrequest *ms;
3093 struct mfi_ioc_packet *ioc;
3094 #ifdef COMPAT_FREEBSD32
3095 struct mfi_ioc_packet32 *ioc32;
3097 struct mfi_ioc_aen *aen;
3098 struct mfi_command *cm = NULL;
3099 uint32_t context = 0;
3100 union mfi_sense_ptr sense_ptr;
3101 uint8_t *data = NULL, *temp, *addr, skip_pre_post = 0;
3104 struct mfi_ioc_passthru *iop = (struct mfi_ioc_passthru *)arg;
3105 #ifdef COMPAT_FREEBSD32
3106 struct mfi_ioc_passthru32 *iop32 = (struct mfi_ioc_passthru32 *)arg;
3107 struct mfi_ioc_passthru iop_swab;
3117 if (sc->hw_crit_error)
3120 if (sc->issuepend_done == 0)
3125 ms = (union mfi_statrequest *)arg;
3126 switch (ms->ms_item) {
3131 bcopy(&sc->mfi_qstat[ms->ms_item], &ms->ms_qstat,
3132 sizeof(struct mfi_qstat));
3139 case MFIIO_QUERY_DISK:
3141 struct mfi_query_disk *qd;
3142 struct mfi_disk *ld;
3144 qd = (struct mfi_query_disk *)arg;
3145 mtx_lock(&sc->mfi_io_lock);
3146 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
3147 if (ld->ld_id == qd->array_id)
3152 mtx_unlock(&sc->mfi_io_lock);
3156 if (ld->ld_flags & MFI_DISK_FLAGS_OPEN)
3158 bzero(qd->devname, SPECNAMELEN + 1);
3159 snprintf(qd->devname, SPECNAMELEN, "mfid%d", ld->ld_unit);
3160 mtx_unlock(&sc->mfi_io_lock);
3164 #ifdef COMPAT_FREEBSD32
3168 devclass_t devclass;
3169 ioc = (struct mfi_ioc_packet *)arg;
3172 adapter = ioc->mfi_adapter_no;
3173 if (device_get_unit(sc->mfi_dev) == 0 && adapter != 0) {
3174 devclass = devclass_find("mfi");
3175 sc = devclass_get_softc(devclass, adapter);
3177 mtx_lock(&sc->mfi_io_lock);
3178 if ((cm = mfi_dequeue_free(sc)) == NULL) {
3179 mtx_unlock(&sc->mfi_io_lock);
3182 mtx_unlock(&sc->mfi_io_lock);
3186 * save off original context since copying from user
3187 * will clobber some data
3189 context = cm->cm_frame->header.context;
3190 cm->cm_frame->header.context = cm->cm_index;
3192 bcopy(ioc->mfi_frame.raw, cm->cm_frame,
3193 2 * MEGAMFI_FRAME_SIZE);
3194 cm->cm_total_frame_size = (sizeof(union mfi_sgl)
3195 * ioc->mfi_sge_count) + ioc->mfi_sgl_off;
3196 cm->cm_frame->header.scsi_status = 0;
3197 cm->cm_frame->header.pad0 = 0;
3198 if (ioc->mfi_sge_count) {
3200 (union mfi_sgl *)&cm->cm_frame->bytes[ioc->mfi_sgl_off];
3204 if (cm->cm_frame->header.flags & MFI_FRAME_DATAIN)
3205 cm->cm_flags |= MFI_CMD_DATAIN;
3206 if (cm->cm_frame->header.flags & MFI_FRAME_DATAOUT)
3207 cm->cm_flags |= MFI_CMD_DATAOUT;
3208 /* Legacy app shim */
3209 if (cm->cm_flags == 0)
3210 cm->cm_flags |= MFI_CMD_DATAIN | MFI_CMD_DATAOUT;
3211 cm->cm_len = cm->cm_frame->header.data_len;
3212 if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
3213 #ifdef COMPAT_FREEBSD32
3214 if (cmd == MFI_CMD) {
3217 cm->cm_stp_len = ioc->mfi_sgl[0].iov_len;
3218 #ifdef COMPAT_FREEBSD32
3220 /* 32bit on 64bit */
3221 ioc32 = (struct mfi_ioc_packet32 *)ioc;
3222 cm->cm_stp_len = ioc32->mfi_sgl[0].iov_len;
3225 cm->cm_len += cm->cm_stp_len;
3228 (cm->cm_flags & (MFI_CMD_DATAIN | MFI_CMD_DATAOUT))) {
3229 cm->cm_data = data = malloc(cm->cm_len, M_MFIBUF,
3231 if (cm->cm_data == NULL) {
3232 device_printf(sc->mfi_dev, "Malloc failed\n");
3239 /* restore header context */
3240 cm->cm_frame->header.context = context;
3242 if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
3243 res = mfi_stp_cmd(sc, cm, arg);
3248 if ((cm->cm_flags & MFI_CMD_DATAOUT) ||
3249 (cm->cm_frame->header.cmd == MFI_CMD_STP)) {
3250 for (i = 0; i < ioc->mfi_sge_count; i++) {
3251 #ifdef COMPAT_FREEBSD32
3252 if (cmd == MFI_CMD) {
3255 addr = ioc->mfi_sgl[i].iov_base;
3256 len = ioc->mfi_sgl[i].iov_len;
3257 #ifdef COMPAT_FREEBSD32
3259 /* 32bit on 64bit */
3260 ioc32 = (struct mfi_ioc_packet32 *)ioc;
3261 addr = PTRIN(ioc32->mfi_sgl[i].iov_base);
3262 len = ioc32->mfi_sgl[i].iov_len;
3265 error = copyin(addr, temp, len);
3267 device_printf(sc->mfi_dev,
3268 "Copy in failed\n");
3276 if (cm->cm_frame->header.cmd == MFI_CMD_DCMD)
3277 locked = mfi_config_lock(sc,
3278 cm->cm_frame->dcmd.opcode);
3280 if (cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) {
3281 cm->cm_frame->pass.sense_addr_lo =
3282 (uint32_t)cm->cm_sense_busaddr;
3283 cm->cm_frame->pass.sense_addr_hi =
3284 (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
3286 mtx_lock(&sc->mfi_io_lock);
3287 skip_pre_post = mfi_check_for_sscd (sc, cm);
3288 if (!skip_pre_post) {
3289 error = mfi_check_command_pre(sc, cm);
3291 mtx_unlock(&sc->mfi_io_lock);
3295 if ((error = mfi_wait_command(sc, cm)) != 0) {
3296 device_printf(sc->mfi_dev,
3297 "Controller polled failed\n");
3298 mtx_unlock(&sc->mfi_io_lock);
3301 if (!skip_pre_post) {
3302 mfi_check_command_post(sc, cm);
3304 mtx_unlock(&sc->mfi_io_lock);
3306 if (cm->cm_frame->header.cmd != MFI_CMD_STP) {
3308 if ((cm->cm_flags & MFI_CMD_DATAIN) ||
3309 (cm->cm_frame->header.cmd == MFI_CMD_STP)) {
3310 for (i = 0; i < ioc->mfi_sge_count; i++) {
3311 #ifdef COMPAT_FREEBSD32
3312 if (cmd == MFI_CMD) {
3315 addr = ioc->mfi_sgl[i].iov_base;
3316 len = ioc->mfi_sgl[i].iov_len;
3317 #ifdef COMPAT_FREEBSD32
3319 /* 32bit on 64bit */
3320 ioc32 = (struct mfi_ioc_packet32 *)ioc;
3321 addr = PTRIN(ioc32->mfi_sgl[i].iov_base);
3322 len = ioc32->mfi_sgl[i].iov_len;
3325 error = copyout(temp, addr, len);
3327 device_printf(sc->mfi_dev,
3328 "Copy out failed\n");
3336 if (ioc->mfi_sense_len) {
3337 /* get user-space sense ptr then copy out sense */
3338 bcopy(&ioc->mfi_frame.raw[ioc->mfi_sense_off],
3339 &sense_ptr.sense_ptr_data[0],
3340 sizeof(sense_ptr.sense_ptr_data));
3341 #ifdef COMPAT_FREEBSD32
3342 if (cmd != MFI_CMD) {
3344 * not 64bit native so zero out any address
3346 sense_ptr.addr.high = 0;
3349 error = copyout(cm->cm_sense, sense_ptr.user_space,
3350 ioc->mfi_sense_len);
3352 device_printf(sc->mfi_dev,
3353 "Copy out failed\n");
3358 ioc->mfi_frame.hdr.cmd_status = cm->cm_frame->header.cmd_status;
3360 mfi_config_unlock(sc, locked);
3362 free(data, M_MFIBUF);
3363 if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
3364 for (i = 0; i < 2; i++) {
3365 if (sc->kbuff_arr[i]) {
3366 if (sc->mfi_kbuff_arr_busaddr != 0)
3368 sc->mfi_kbuff_arr_dmat[i],
3369 sc->mfi_kbuff_arr_dmamap[i]
3371 if (sc->kbuff_arr[i] != NULL)
3373 sc->mfi_kbuff_arr_dmat[i],
3375 sc->mfi_kbuff_arr_dmamap[i]
3377 if (sc->mfi_kbuff_arr_dmat[i] != NULL)
3378 bus_dma_tag_destroy(
3379 sc->mfi_kbuff_arr_dmat[i]);
3384 mtx_lock(&sc->mfi_io_lock);
3385 mfi_release_command(cm);
3386 mtx_unlock(&sc->mfi_io_lock);
3392 aen = (struct mfi_ioc_aen *)arg;
3393 mtx_lock(&sc->mfi_io_lock);
3394 error = mfi_aen_register(sc, aen->aen_seq_num,
3395 aen->aen_class_locale);
3396 mtx_unlock(&sc->mfi_io_lock);
3399 case MFI_LINUX_CMD_2: /* Firmware Linux ioctl shim */
3401 devclass_t devclass;
3402 struct mfi_linux_ioc_packet l_ioc;
3405 devclass = devclass_find("mfi");
3406 if (devclass == NULL)
3409 error = copyin(arg, &l_ioc, sizeof(l_ioc));
3412 adapter = l_ioc.lioc_adapter_no;
3413 sc = devclass_get_softc(devclass, adapter);
3416 return (mfi_linux_ioctl_int(sc->mfi_cdev,
3417 cmd, arg, flag, td));
3420 case MFI_LINUX_SET_AEN_2: /* AEN Linux ioctl shim */
3422 devclass_t devclass;
3423 struct mfi_linux_ioc_aen l_aen;
3426 devclass = devclass_find("mfi");
3427 if (devclass == NULL)
3430 error = copyin(arg, &l_aen, sizeof(l_aen));
3433 adapter = l_aen.laen_adapter_no;
3434 sc = devclass_get_softc(devclass, adapter);
3437 return (mfi_linux_ioctl_int(sc->mfi_cdev,
3438 cmd, arg, flag, td));
3441 #ifdef COMPAT_FREEBSD32
3442 case MFIIO_PASSTHRU32:
3443 if (!SV_CURPROC_FLAG(SV_ILP32)) {
3447 iop_swab.ioc_frame = iop32->ioc_frame;
3448 iop_swab.buf_size = iop32->buf_size;
3449 iop_swab.buf = PTRIN(iop32->buf);
3453 case MFIIO_PASSTHRU:
3454 error = mfi_user_command(sc, iop);
3455 #ifdef COMPAT_FREEBSD32
3456 if (cmd == MFIIO_PASSTHRU32)
3457 iop32->ioc_frame = iop_swab.ioc_frame;
3461 device_printf(sc->mfi_dev, "IOCTL 0x%lx not handled\n", cmd);
3470 mfi_linux_ioctl_int(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td)
3472 struct mfi_softc *sc;
3473 struct mfi_linux_ioc_packet l_ioc;
3474 struct mfi_linux_ioc_aen l_aen;
3475 struct mfi_command *cm = NULL;
3476 struct mfi_aen *mfi_aen_entry;
3477 union mfi_sense_ptr sense_ptr;
3478 uint32_t context = 0;
3479 uint8_t *data = NULL, *temp;
3486 case MFI_LINUX_CMD_2: /* Firmware Linux ioctl shim */
3487 error = copyin(arg, &l_ioc, sizeof(l_ioc));
3491 if (l_ioc.lioc_sge_count > MAX_LINUX_IOCTL_SGE) {
3495 mtx_lock(&sc->mfi_io_lock);
3496 if ((cm = mfi_dequeue_free(sc)) == NULL) {
3497 mtx_unlock(&sc->mfi_io_lock);
3500 mtx_unlock(&sc->mfi_io_lock);
3504 * save off original context since copying from user
3505 * will clobber some data
3507 context = cm->cm_frame->header.context;
3509 bcopy(l_ioc.lioc_frame.raw, cm->cm_frame,
3510 2 * MFI_DCMD_FRAME_SIZE); /* this isn't quite right */
3511 cm->cm_total_frame_size = (sizeof(union mfi_sgl)
3512 * l_ioc.lioc_sge_count) + l_ioc.lioc_sgl_off;
3513 cm->cm_frame->header.scsi_status = 0;
3514 cm->cm_frame->header.pad0 = 0;
3515 if (l_ioc.lioc_sge_count)
3517 (union mfi_sgl *)&cm->cm_frame->bytes[l_ioc.lioc_sgl_off];
3519 if (cm->cm_frame->header.flags & MFI_FRAME_DATAIN)
3520 cm->cm_flags |= MFI_CMD_DATAIN;
3521 if (cm->cm_frame->header.flags & MFI_FRAME_DATAOUT)
3522 cm->cm_flags |= MFI_CMD_DATAOUT;
3523 cm->cm_len = cm->cm_frame->header.data_len;
3525 (cm->cm_flags & (MFI_CMD_DATAIN | MFI_CMD_DATAOUT))) {
3526 cm->cm_data = data = malloc(cm->cm_len, M_MFIBUF,
3528 if (cm->cm_data == NULL) {
3529 device_printf(sc->mfi_dev, "Malloc failed\n");
3536 /* restore header context */
3537 cm->cm_frame->header.context = context;
3540 if (cm->cm_flags & MFI_CMD_DATAOUT) {
3541 for (i = 0; i < l_ioc.lioc_sge_count; i++) {
3542 error = copyin(PTRIN(l_ioc.lioc_sgl[i].iov_base),
3544 l_ioc.lioc_sgl[i].iov_len);
3546 device_printf(sc->mfi_dev,
3547 "Copy in failed\n");
3550 temp = &temp[l_ioc.lioc_sgl[i].iov_len];
3554 if (cm->cm_frame->header.cmd == MFI_CMD_DCMD)
3555 locked = mfi_config_lock(sc, cm->cm_frame->dcmd.opcode);
3557 if (cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) {
3558 cm->cm_frame->pass.sense_addr_lo =
3559 (uint32_t)cm->cm_sense_busaddr;
3560 cm->cm_frame->pass.sense_addr_hi =
3561 (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
3564 mtx_lock(&sc->mfi_io_lock);
3565 error = mfi_check_command_pre(sc, cm);
3567 mtx_unlock(&sc->mfi_io_lock);
3571 if ((error = mfi_wait_command(sc, cm)) != 0) {
3572 device_printf(sc->mfi_dev,
3573 "Controller polled failed\n");
3574 mtx_unlock(&sc->mfi_io_lock);
3578 mfi_check_command_post(sc, cm);
3579 mtx_unlock(&sc->mfi_io_lock);
3582 if (cm->cm_flags & MFI_CMD_DATAIN) {
3583 for (i = 0; i < l_ioc.lioc_sge_count; i++) {
3584 error = copyout(temp,
3585 PTRIN(l_ioc.lioc_sgl[i].iov_base),
3586 l_ioc.lioc_sgl[i].iov_len);
3588 device_printf(sc->mfi_dev,
3589 "Copy out failed\n");
3592 temp = &temp[l_ioc.lioc_sgl[i].iov_len];
3596 if (l_ioc.lioc_sense_len) {
3597 /* get user-space sense ptr then copy out sense */
3598 bcopy(&((struct mfi_linux_ioc_packet*)arg)
3599 ->lioc_frame.raw[l_ioc.lioc_sense_off],
3600 &sense_ptr.sense_ptr_data[0],
3601 sizeof(sense_ptr.sense_ptr_data));
3604 * only 32bit Linux support so zero out any
3605 * address over 32bit
3607 sense_ptr.addr.high = 0;
3609 error = copyout(cm->cm_sense, sense_ptr.user_space,
3610 l_ioc.lioc_sense_len);
3612 device_printf(sc->mfi_dev,
3613 "Copy out failed\n");
3618 error = copyout(&cm->cm_frame->header.cmd_status,
3619 &((struct mfi_linux_ioc_packet*)arg)
3620 ->lioc_frame.hdr.cmd_status,
3623 device_printf(sc->mfi_dev,
3624 "Copy out failed\n");
3629 mfi_config_unlock(sc, locked);
3631 free(data, M_MFIBUF);
3633 mtx_lock(&sc->mfi_io_lock);
3634 mfi_release_command(cm);
3635 mtx_unlock(&sc->mfi_io_lock);
3639 case MFI_LINUX_SET_AEN_2: /* AEN Linux ioctl shim */
3640 error = copyin(arg, &l_aen, sizeof(l_aen));
3643 printf("AEN IMPLEMENTED for pid %d\n", curproc->p_pid);
3644 mfi_aen_entry = malloc(sizeof(struct mfi_aen), M_MFIBUF,
3646 mtx_lock(&sc->mfi_io_lock);
3647 if (mfi_aen_entry != NULL) {
3648 mfi_aen_entry->p = curproc;
3649 TAILQ_INSERT_TAIL(&sc->mfi_aen_pids, mfi_aen_entry,
3652 error = mfi_aen_register(sc, l_aen.laen_seq_num,
3653 l_aen.laen_class_locale);
3656 TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
3658 free(mfi_aen_entry, M_MFIBUF);
3660 mtx_unlock(&sc->mfi_io_lock);
3664 device_printf(sc->mfi_dev, "IOCTL 0x%lx not handled\n", cmd);
3673 mfi_poll(struct cdev *dev, int poll_events, struct thread *td)
3675 struct mfi_softc *sc;
3680 if (poll_events & (POLLIN | POLLRDNORM)) {
3681 if (sc->mfi_aen_triggered != 0) {
3682 revents |= poll_events & (POLLIN | POLLRDNORM);
3683 sc->mfi_aen_triggered = 0;
3685 if (sc->mfi_aen_triggered == 0 && sc->mfi_aen_cm == NULL) {
3691 if (poll_events & (POLLIN | POLLRDNORM)) {
3692 sc->mfi_poll_waiting = 1;
3693 selrecord(td, &sc->mfi_select);
3703 struct mfi_softc *sc;
3704 struct mfi_command *cm;
3710 dc = devclass_find("mfi");
3712 printf("No mfi dev class\n");
3716 for (i = 0; ; i++) {
3717 sc = devclass_get_softc(dc, i);
3720 device_printf(sc->mfi_dev, "Dumping\n\n");
3722 deadline = time_uptime - mfi_cmd_timeout;
3723 mtx_lock(&sc->mfi_io_lock);
3724 TAILQ_FOREACH(cm, &sc->mfi_busy, cm_link) {
3725 if (cm->cm_timestamp <= deadline) {
3726 device_printf(sc->mfi_dev,
3727 "COMMAND %p TIMEOUT AFTER %d SECONDS\n",
3728 cm, (int)(time_uptime - cm->cm_timestamp));
3739 mtx_unlock(&sc->mfi_io_lock);
3746 mfi_timeout(void *data)
3748 struct mfi_softc *sc = (struct mfi_softc *)data;
3749 struct mfi_command *cm, *tmp;
3753 deadline = time_uptime - mfi_cmd_timeout;
3754 if (sc->adpreset == 0) {
3755 if (!mfi_tbolt_reset(sc)) {
3756 callout_reset(&sc->mfi_watchdog_callout,
3757 mfi_cmd_timeout * hz, mfi_timeout, sc);
3761 mtx_lock(&sc->mfi_io_lock);
3762 TAILQ_FOREACH_SAFE(cm, &sc->mfi_busy, cm_link, tmp) {
3763 if (sc->mfi_aen_cm == cm || sc->mfi_map_sync_cm == cm)
3765 if (cm->cm_timestamp <= deadline) {
3766 if (sc->adpreset != 0 && sc->issuepend_done == 0) {
3767 cm->cm_timestamp = time_uptime;
3769 device_printf(sc->mfi_dev,
3770 "COMMAND %p TIMEOUT AFTER %d SECONDS\n",
3771 cm, (int)(time_uptime - cm->cm_timestamp)
3774 MFI_VALIDATE_CMD(sc, cm);
3776 * While commands can get stuck forever we do
3777 * not fail them as there is no way to tell if
3778 * the controller has actually processed them
3781 * In addition its very likely that force
3782 * failing a command here would cause a panic
3795 mtx_unlock(&sc->mfi_io_lock);
3797 callout_reset(&sc->mfi_watchdog_callout, mfi_cmd_timeout * hz,