2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD AND BSD-2-Clause
4 * Copyright (c) 2006 IronPort Systems
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * Copyright (c) 2007 LSI Corp.
30 * Copyright (c) 2007 Rajesh Prabhakaran.
31 * All rights reserved.
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions
36 * 1. Redistributions of source code must retain the above copyright
37 * notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce the above copyright
39 * notice, this list of conditions and the following disclaimer in the
40 * documentation and/or other materials provided with the distribution.
42 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
43 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
44 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
45 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
46 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
47 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
48 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
49 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
50 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
51 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
55 #include <sys/cdefs.h>
56 __FBSDID("$FreeBSD$");
60 #include <sys/param.h>
61 #include <sys/systm.h>
62 #include <sys/sysctl.h>
63 #include <sys/malloc.h>
64 #include <sys/kernel.h>
66 #include <sys/selinfo.h>
69 #include <sys/eventhandler.h>
72 #include <sys/ioccom.h>
75 #include <sys/signalvar.h>
76 #include <sys/sysent.h>
77 #include <sys/taskqueue.h>
79 #include <machine/bus.h>
80 #include <machine/resource.h>
82 #include <dev/mfi/mfireg.h>
83 #include <dev/mfi/mfi_ioctl.h>
84 #include <dev/mfi/mfivar.h>
85 #include <sys/interrupt.h>
86 #include <sys/priority.h>
88 static int mfi_alloc_commands(struct mfi_softc *);
89 static int mfi_comms_init(struct mfi_softc *);
90 static int mfi_get_controller_info(struct mfi_softc *);
91 static int mfi_get_log_state(struct mfi_softc *,
92 struct mfi_evt_log_state **);
93 static int mfi_parse_entries(struct mfi_softc *, int, int);
94 static void mfi_data_cb(void *, bus_dma_segment_t *, int, int);
95 static void mfi_startup(void *arg);
96 static void mfi_intr(void *arg);
97 static void mfi_ldprobe(struct mfi_softc *sc);
98 static void mfi_syspdprobe(struct mfi_softc *sc);
99 static void mfi_handle_evt(void *context, int pending);
100 static int mfi_aen_register(struct mfi_softc *sc, int seq, int locale);
101 static void mfi_aen_complete(struct mfi_command *);
102 static int mfi_add_ld(struct mfi_softc *sc, int);
103 static void mfi_add_ld_complete(struct mfi_command *);
104 static int mfi_add_sys_pd(struct mfi_softc *sc, int);
105 static void mfi_add_sys_pd_complete(struct mfi_command *);
106 static struct mfi_command * mfi_bio_command(struct mfi_softc *);
107 static void mfi_bio_complete(struct mfi_command *);
108 static struct mfi_command *mfi_build_ldio(struct mfi_softc *,struct bio*);
109 static struct mfi_command *mfi_build_syspdio(struct mfi_softc *,struct bio*);
110 static int mfi_send_frame(struct mfi_softc *, struct mfi_command *);
111 static int mfi_std_send_frame(struct mfi_softc *, struct mfi_command *);
112 static int mfi_abort(struct mfi_softc *, struct mfi_command **);
113 static int mfi_linux_ioctl_int(struct cdev *, u_long, caddr_t, int, struct thread *);
114 static void mfi_timeout(void *);
115 static int mfi_user_command(struct mfi_softc *,
116 struct mfi_ioc_passthru *);
117 static void mfi_enable_intr_xscale(struct mfi_softc *sc);
118 static void mfi_enable_intr_ppc(struct mfi_softc *sc);
119 static int32_t mfi_read_fw_status_xscale(struct mfi_softc *sc);
120 static int32_t mfi_read_fw_status_ppc(struct mfi_softc *sc);
121 static int mfi_check_clear_intr_xscale(struct mfi_softc *sc);
122 static int mfi_check_clear_intr_ppc(struct mfi_softc *sc);
123 static void mfi_issue_cmd_xscale(struct mfi_softc *sc, bus_addr_t bus_add,
125 static void mfi_issue_cmd_ppc(struct mfi_softc *sc, bus_addr_t bus_add,
127 static int mfi_config_lock(struct mfi_softc *sc, uint32_t opcode);
128 static void mfi_config_unlock(struct mfi_softc *sc, int locked);
129 static int mfi_check_command_pre(struct mfi_softc *sc, struct mfi_command *cm);
130 static void mfi_check_command_post(struct mfi_softc *sc, struct mfi_command *cm);
131 static int mfi_check_for_sscd(struct mfi_softc *sc, struct mfi_command *cm);
133 SYSCTL_NODE(_hw, OID_AUTO, mfi, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
134 "MFI driver parameters");
135 static int mfi_event_locale = MFI_EVT_LOCALE_ALL;
136 SYSCTL_INT(_hw_mfi, OID_AUTO, event_locale, CTLFLAG_RWTUN, &mfi_event_locale,
137 0, "event message locale");
139 static int mfi_event_class = MFI_EVT_CLASS_INFO;
140 SYSCTL_INT(_hw_mfi, OID_AUTO, event_class, CTLFLAG_RWTUN, &mfi_event_class,
141 0, "event message class");
143 static int mfi_max_cmds = 128;
144 SYSCTL_INT(_hw_mfi, OID_AUTO, max_cmds, CTLFLAG_RDTUN, &mfi_max_cmds,
145 0, "Max commands limit (-1 = controller limit)");
147 static int mfi_detect_jbod_change = 1;
148 SYSCTL_INT(_hw_mfi, OID_AUTO, detect_jbod_change, CTLFLAG_RWTUN,
149 &mfi_detect_jbod_change, 0, "Detect a change to a JBOD");
151 int mfi_polled_cmd_timeout = MFI_POLL_TIMEOUT_SECS;
152 SYSCTL_INT(_hw_mfi, OID_AUTO, polled_cmd_timeout, CTLFLAG_RWTUN,
153 &mfi_polled_cmd_timeout, 0,
154 "Polled command timeout - used for firmware flash etc (in seconds)");
156 static int mfi_cmd_timeout = MFI_CMD_TIMEOUT;
157 SYSCTL_INT(_hw_mfi, OID_AUTO, cmd_timeout, CTLFLAG_RWTUN, &mfi_cmd_timeout,
158 0, "Command timeout (in seconds)");
160 /* Management interface */
161 static d_open_t mfi_open;
162 static d_close_t mfi_close;
163 static d_ioctl_t mfi_ioctl;
164 static d_poll_t mfi_poll;
166 static struct cdevsw mfi_cdevsw = {
167 .d_version = D_VERSION,
170 .d_close = mfi_close,
171 .d_ioctl = mfi_ioctl,
176 MALLOC_DEFINE(M_MFIBUF, "mfibuf", "Buffers for the MFI driver");
178 #define MFI_INQ_LENGTH SHORT_INQUIRY_LENGTH
179 struct mfi_skinny_dma_info mfi_skinny;
182 mfi_enable_intr_xscale(struct mfi_softc *sc)
184 MFI_WRITE4(sc, MFI_OMSK, 0x01);
188 mfi_enable_intr_ppc(struct mfi_softc *sc)
190 if (sc->mfi_flags & MFI_FLAGS_1078) {
191 MFI_WRITE4(sc, MFI_ODCR0, 0xFFFFFFFF);
192 MFI_WRITE4(sc, MFI_OMSK, ~MFI_1078_EIM);
194 else if (sc->mfi_flags & MFI_FLAGS_GEN2) {
195 MFI_WRITE4(sc, MFI_ODCR0, 0xFFFFFFFF);
196 MFI_WRITE4(sc, MFI_OMSK, ~MFI_GEN2_EIM);
198 else if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
199 MFI_WRITE4(sc, MFI_OMSK, ~0x00000001);
204 mfi_read_fw_status_xscale(struct mfi_softc *sc)
206 return MFI_READ4(sc, MFI_OMSG0);
210 mfi_read_fw_status_ppc(struct mfi_softc *sc)
212 return MFI_READ4(sc, MFI_OSP0);
216 mfi_check_clear_intr_xscale(struct mfi_softc *sc)
220 status = MFI_READ4(sc, MFI_OSTS);
221 if ((status & MFI_OSTS_INTR_VALID) == 0)
224 MFI_WRITE4(sc, MFI_OSTS, status);
229 mfi_check_clear_intr_ppc(struct mfi_softc *sc)
233 status = MFI_READ4(sc, MFI_OSTS);
234 if (sc->mfi_flags & MFI_FLAGS_1078) {
235 if (!(status & MFI_1078_RM)) {
239 else if (sc->mfi_flags & MFI_FLAGS_GEN2) {
240 if (!(status & MFI_GEN2_RM)) {
244 else if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
245 if (!(status & MFI_SKINNY_RM)) {
249 if (sc->mfi_flags & MFI_FLAGS_SKINNY)
250 MFI_WRITE4(sc, MFI_OSTS, status);
252 MFI_WRITE4(sc, MFI_ODCR0, status);
257 mfi_issue_cmd_xscale(struct mfi_softc *sc, bus_addr_t bus_add, uint32_t frame_cnt)
259 MFI_WRITE4(sc, MFI_IQP,(bus_add >>3)|frame_cnt);
263 mfi_issue_cmd_ppc(struct mfi_softc *sc, bus_addr_t bus_add, uint32_t frame_cnt)
265 if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
266 MFI_WRITE4(sc, MFI_IQPL, (bus_add | frame_cnt <<1)|1 );
267 MFI_WRITE4(sc, MFI_IQPH, 0x00000000);
269 MFI_WRITE4(sc, MFI_IQP, (bus_add | frame_cnt <<1)|1 );
274 mfi_transition_firmware(struct mfi_softc *sc)
276 uint32_t fw_state, cur_state;
278 uint32_t cur_abs_reg_val = 0;
279 uint32_t prev_abs_reg_val = 0;
281 cur_abs_reg_val = sc->mfi_read_fw_status(sc);
282 fw_state = cur_abs_reg_val & MFI_FWSTATE_MASK;
283 while (fw_state != MFI_FWSTATE_READY) {
285 device_printf(sc->mfi_dev, "Waiting for firmware to "
287 cur_state = fw_state;
289 case MFI_FWSTATE_FAULT:
290 device_printf(sc->mfi_dev, "Firmware fault\n");
292 case MFI_FWSTATE_WAIT_HANDSHAKE:
293 if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT)
294 MFI_WRITE4(sc, MFI_SKINNY_IDB, MFI_FWINIT_CLEAR_HANDSHAKE);
296 MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_CLEAR_HANDSHAKE);
297 max_wait = MFI_RESET_WAIT_TIME;
299 case MFI_FWSTATE_OPERATIONAL:
300 if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT)
301 MFI_WRITE4(sc, MFI_SKINNY_IDB, 7);
303 MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_READY);
304 max_wait = MFI_RESET_WAIT_TIME;
306 case MFI_FWSTATE_UNDEFINED:
307 case MFI_FWSTATE_BB_INIT:
308 max_wait = MFI_RESET_WAIT_TIME;
310 case MFI_FWSTATE_FW_INIT_2:
311 max_wait = MFI_RESET_WAIT_TIME;
313 case MFI_FWSTATE_FW_INIT:
314 case MFI_FWSTATE_FLUSH_CACHE:
315 max_wait = MFI_RESET_WAIT_TIME;
317 case MFI_FWSTATE_DEVICE_SCAN:
318 max_wait = MFI_RESET_WAIT_TIME; /* wait for 180 seconds */
319 prev_abs_reg_val = cur_abs_reg_val;
321 case MFI_FWSTATE_BOOT_MESSAGE_PENDING:
322 if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT)
323 MFI_WRITE4(sc, MFI_SKINNY_IDB, MFI_FWINIT_HOTPLUG);
325 MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_HOTPLUG);
326 max_wait = MFI_RESET_WAIT_TIME;
329 device_printf(sc->mfi_dev, "Unknown firmware state %#x\n",
333 for (i = 0; i < (max_wait * 10); i++) {
334 cur_abs_reg_val = sc->mfi_read_fw_status(sc);
335 fw_state = cur_abs_reg_val & MFI_FWSTATE_MASK;
336 if (fw_state == cur_state)
341 if (fw_state == MFI_FWSTATE_DEVICE_SCAN) {
342 /* Check the device scanning progress */
343 if (prev_abs_reg_val != cur_abs_reg_val) {
347 if (fw_state == cur_state) {
348 device_printf(sc->mfi_dev, "Firmware stuck in state "
357 mfi_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
362 *addr = segs[0].ds_addr;
366 mfi_attach(struct mfi_softc *sc)
369 int error, commsz, framessz, sensesz;
370 int frames, unit, max_fw_sge, max_fw_cmds;
371 uint32_t tb_mem_size = 0;
377 device_printf(sc->mfi_dev, "Megaraid SAS driver Ver %s \n",
380 mtx_init(&sc->mfi_io_lock, "MFI I/O lock", NULL, MTX_DEF);
381 sx_init(&sc->mfi_config_lock, "MFI config");
382 TAILQ_INIT(&sc->mfi_ld_tqh);
383 TAILQ_INIT(&sc->mfi_syspd_tqh);
384 TAILQ_INIT(&sc->mfi_ld_pend_tqh);
385 TAILQ_INIT(&sc->mfi_syspd_pend_tqh);
386 TAILQ_INIT(&sc->mfi_evt_queue);
387 TASK_INIT(&sc->mfi_evt_task, 0, mfi_handle_evt, sc);
388 TASK_INIT(&sc->mfi_map_sync_task, 0, mfi_handle_map_sync, sc);
389 TAILQ_INIT(&sc->mfi_aen_pids);
390 TAILQ_INIT(&sc->mfi_cam_ccbq);
398 sc->last_seq_num = 0;
399 sc->disableOnlineCtrlReset = 1;
400 sc->issuepend_done = 1;
401 sc->hw_crit_error = 0;
403 if (sc->mfi_flags & MFI_FLAGS_1064R) {
404 sc->mfi_enable_intr = mfi_enable_intr_xscale;
405 sc->mfi_read_fw_status = mfi_read_fw_status_xscale;
406 sc->mfi_check_clear_intr = mfi_check_clear_intr_xscale;
407 sc->mfi_issue_cmd = mfi_issue_cmd_xscale;
408 } else if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
409 sc->mfi_enable_intr = mfi_tbolt_enable_intr_ppc;
410 sc->mfi_disable_intr = mfi_tbolt_disable_intr_ppc;
411 sc->mfi_read_fw_status = mfi_tbolt_read_fw_status_ppc;
412 sc->mfi_check_clear_intr = mfi_tbolt_check_clear_intr_ppc;
413 sc->mfi_issue_cmd = mfi_tbolt_issue_cmd_ppc;
414 sc->mfi_adp_reset = mfi_tbolt_adp_reset;
416 TAILQ_INIT(&sc->mfi_cmd_tbolt_tqh);
418 sc->mfi_enable_intr = mfi_enable_intr_ppc;
419 sc->mfi_read_fw_status = mfi_read_fw_status_ppc;
420 sc->mfi_check_clear_intr = mfi_check_clear_intr_ppc;
421 sc->mfi_issue_cmd = mfi_issue_cmd_ppc;
424 /* Before we get too far, see if the firmware is working */
425 if ((error = mfi_transition_firmware(sc)) != 0) {
426 device_printf(sc->mfi_dev, "Firmware not in READY state, "
427 "error %d\n", error);
431 /* Start: LSIP200113393 */
432 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
433 1, 0, /* algnmnt, boundary */
434 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
435 BUS_SPACE_MAXADDR, /* highaddr */
436 NULL, NULL, /* filter, filterarg */
437 MEGASAS_MAX_NAME*sizeof(bus_addr_t), /* maxsize */
439 MEGASAS_MAX_NAME*sizeof(bus_addr_t), /* maxsegsize */
441 NULL, NULL, /* lockfunc, lockarg */
442 &sc->verbuf_h_dmat)) {
443 device_printf(sc->mfi_dev, "Cannot allocate verbuf_h_dmat DMA tag\n");
446 if (bus_dmamem_alloc(sc->verbuf_h_dmat, (void **)&sc->verbuf,
447 BUS_DMA_NOWAIT, &sc->verbuf_h_dmamap)) {
448 device_printf(sc->mfi_dev, "Cannot allocate verbuf_h_dmamap memory\n");
451 bzero(sc->verbuf, MEGASAS_MAX_NAME*sizeof(bus_addr_t));
452 bus_dmamap_load(sc->verbuf_h_dmat, sc->verbuf_h_dmamap,
453 sc->verbuf, MEGASAS_MAX_NAME*sizeof(bus_addr_t),
454 mfi_addr_cb, &sc->verbuf_h_busaddr, 0);
455 /* End: LSIP200113393 */
458 * Get information needed for sizing the contiguous memory for the
459 * frame pool. Size down the sgl parameter since we know that
460 * we will never need more than what's required for MFI_MAXPHYS.
461 * It would be nice if these constants were available at runtime
462 * instead of compile time.
464 status = sc->mfi_read_fw_status(sc);
465 max_fw_cmds = status & MFI_FWSTATE_MAXCMD_MASK;
466 if (mfi_max_cmds > 0 && mfi_max_cmds < max_fw_cmds) {
467 device_printf(sc->mfi_dev, "FW MaxCmds = %d, limiting to %d\n",
468 max_fw_cmds, mfi_max_cmds);
469 sc->mfi_max_fw_cmds = mfi_max_cmds;
471 sc->mfi_max_fw_cmds = max_fw_cmds;
473 max_fw_sge = (status & MFI_FWSTATE_MAXSGL_MASK) >> 16;
474 sc->mfi_max_sge = min(max_fw_sge, ((MFI_MAXPHYS / PAGE_SIZE) + 1));
476 /* ThunderBolt Support get the contiguous memory */
478 if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
479 mfi_tbolt_init_globals(sc);
480 device_printf(sc->mfi_dev, "MaxCmd = %d, Drv MaxCmd = %d, "
481 "MaxSgl = %d, state = %#x\n", max_fw_cmds,
482 sc->mfi_max_fw_cmds, sc->mfi_max_sge, status);
483 tb_mem_size = mfi_tbolt_get_memory_requirement(sc);
485 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
486 1, 0, /* algnmnt, boundary */
487 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
488 BUS_SPACE_MAXADDR, /* highaddr */
489 NULL, NULL, /* filter, filterarg */
490 tb_mem_size, /* maxsize */
492 tb_mem_size, /* maxsegsize */
494 NULL, NULL, /* lockfunc, lockarg */
496 device_printf(sc->mfi_dev, "Cannot allocate comms DMA tag\n");
499 if (bus_dmamem_alloc(sc->mfi_tb_dmat, (void **)&sc->request_message_pool,
500 BUS_DMA_NOWAIT, &sc->mfi_tb_dmamap)) {
501 device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
504 bzero(sc->request_message_pool, tb_mem_size);
505 bus_dmamap_load(sc->mfi_tb_dmat, sc->mfi_tb_dmamap,
506 sc->request_message_pool, tb_mem_size, mfi_addr_cb, &sc->mfi_tb_busaddr, 0);
508 /* For ThunderBolt memory init */
509 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
510 0x100, 0, /* alignmnt, boundary */
511 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
512 BUS_SPACE_MAXADDR, /* highaddr */
513 NULL, NULL, /* filter, filterarg */
514 MFI_FRAME_SIZE, /* maxsize */
516 MFI_FRAME_SIZE, /* maxsegsize */
518 NULL, NULL, /* lockfunc, lockarg */
519 &sc->mfi_tb_init_dmat)) {
520 device_printf(sc->mfi_dev, "Cannot allocate init DMA tag\n");
523 if (bus_dmamem_alloc(sc->mfi_tb_init_dmat, (void **)&sc->mfi_tb_init,
524 BUS_DMA_NOWAIT, &sc->mfi_tb_init_dmamap)) {
525 device_printf(sc->mfi_dev, "Cannot allocate init memory\n");
528 bzero(sc->mfi_tb_init, MFI_FRAME_SIZE);
529 bus_dmamap_load(sc->mfi_tb_init_dmat, sc->mfi_tb_init_dmamap,
530 sc->mfi_tb_init, MFI_FRAME_SIZE, mfi_addr_cb,
531 &sc->mfi_tb_init_busaddr, 0);
532 if (mfi_tbolt_init_desc_pool(sc, sc->request_message_pool,
534 device_printf(sc->mfi_dev,
535 "Thunderbolt pool preparation error\n");
540 Allocate DMA memory mapping for MPI2 IOC Init descriptor,
541 we are taking it different from what we have allocated for Request
542 and reply descriptors to avoid confusion later
544 tb_mem_size = sizeof(struct MPI2_IOC_INIT_REQUEST);
545 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
546 1, 0, /* algnmnt, boundary */
547 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
548 BUS_SPACE_MAXADDR, /* highaddr */
549 NULL, NULL, /* filter, filterarg */
550 tb_mem_size, /* maxsize */
552 tb_mem_size, /* maxsegsize */
554 NULL, NULL, /* lockfunc, lockarg */
555 &sc->mfi_tb_ioc_init_dmat)) {
556 device_printf(sc->mfi_dev,
557 "Cannot allocate comms DMA tag\n");
560 if (bus_dmamem_alloc(sc->mfi_tb_ioc_init_dmat,
561 (void **)&sc->mfi_tb_ioc_init_desc,
562 BUS_DMA_NOWAIT, &sc->mfi_tb_ioc_init_dmamap)) {
563 device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
566 bzero(sc->mfi_tb_ioc_init_desc, tb_mem_size);
567 bus_dmamap_load(sc->mfi_tb_ioc_init_dmat, sc->mfi_tb_ioc_init_dmamap,
568 sc->mfi_tb_ioc_init_desc, tb_mem_size, mfi_addr_cb,
569 &sc->mfi_tb_ioc_init_busaddr, 0);
572 * Create the dma tag for data buffers. Used both for block I/O
573 * and for various internal data queries.
575 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
576 1, 0, /* algnmnt, boundary */
577 BUS_SPACE_MAXADDR, /* lowaddr */
578 BUS_SPACE_MAXADDR, /* highaddr */
579 NULL, NULL, /* filter, filterarg */
580 BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
581 sc->mfi_max_sge, /* nsegments */
582 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
583 BUS_DMA_ALLOCNOW, /* flags */
584 busdma_lock_mutex, /* lockfunc */
585 &sc->mfi_io_lock, /* lockfuncarg */
586 &sc->mfi_buffer_dmat)) {
587 device_printf(sc->mfi_dev, "Cannot allocate buffer DMA tag\n");
592 * Allocate DMA memory for the comms queues. Keep it under 4GB for
593 * efficiency. The mfi_hwcomms struct includes space for 1 reply queue
594 * entry, so the calculated size here will be will be 1 more than
595 * mfi_max_fw_cmds. This is apparently a requirement of the hardware.
597 commsz = (sizeof(uint32_t) * sc->mfi_max_fw_cmds) +
598 sizeof(struct mfi_hwcomms);
599 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
600 1, 0, /* algnmnt, boundary */
601 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
602 BUS_SPACE_MAXADDR, /* highaddr */
603 NULL, NULL, /* filter, filterarg */
604 commsz, /* maxsize */
606 commsz, /* maxsegsize */
608 NULL, NULL, /* lockfunc, lockarg */
609 &sc->mfi_comms_dmat)) {
610 device_printf(sc->mfi_dev, "Cannot allocate comms DMA tag\n");
613 if (bus_dmamem_alloc(sc->mfi_comms_dmat, (void **)&sc->mfi_comms,
614 BUS_DMA_NOWAIT, &sc->mfi_comms_dmamap)) {
615 device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
618 bzero(sc->mfi_comms, commsz);
619 bus_dmamap_load(sc->mfi_comms_dmat, sc->mfi_comms_dmamap,
620 sc->mfi_comms, commsz, mfi_addr_cb, &sc->mfi_comms_busaddr, 0);
622 * Allocate DMA memory for the command frames. Keep them in the
623 * lower 4GB for efficiency. Calculate the size of the commands at
624 * the same time; each command is one 64 byte frame plus a set of
625 * additional frames for holding sg lists or other data.
626 * The assumption here is that the SG list will start at the second
627 * frame and not use the unused bytes in the first frame. While this
628 * isn't technically correct, it simplifies the calculation and allows
629 * for command frames that might be larger than an mfi_io_frame.
631 if (sizeof(bus_addr_t) == 8) {
632 sc->mfi_sge_size = sizeof(struct mfi_sg64);
633 sc->mfi_flags |= MFI_FLAGS_SG64;
635 sc->mfi_sge_size = sizeof(struct mfi_sg32);
637 if (sc->mfi_flags & MFI_FLAGS_SKINNY)
638 sc->mfi_sge_size = sizeof(struct mfi_sg_skinny);
639 frames = (sc->mfi_sge_size * sc->mfi_max_sge - 1) / MFI_FRAME_SIZE + 2;
640 sc->mfi_cmd_size = frames * MFI_FRAME_SIZE;
641 framessz = sc->mfi_cmd_size * sc->mfi_max_fw_cmds;
642 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
643 64, 0, /* algnmnt, boundary */
644 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
645 BUS_SPACE_MAXADDR, /* highaddr */
646 NULL, NULL, /* filter, filterarg */
647 framessz, /* maxsize */
649 framessz, /* maxsegsize */
651 NULL, NULL, /* lockfunc, lockarg */
652 &sc->mfi_frames_dmat)) {
653 device_printf(sc->mfi_dev, "Cannot allocate frame DMA tag\n");
656 if (bus_dmamem_alloc(sc->mfi_frames_dmat, (void **)&sc->mfi_frames,
657 BUS_DMA_NOWAIT, &sc->mfi_frames_dmamap)) {
658 device_printf(sc->mfi_dev, "Cannot allocate frames memory\n");
661 bzero(sc->mfi_frames, framessz);
662 bus_dmamap_load(sc->mfi_frames_dmat, sc->mfi_frames_dmamap,
663 sc->mfi_frames, framessz, mfi_addr_cb, &sc->mfi_frames_busaddr,0);
665 * Allocate DMA memory for the frame sense data. Keep them in the
666 * lower 4GB for efficiency
668 sensesz = sc->mfi_max_fw_cmds * MFI_SENSE_LEN;
669 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
670 4, 0, /* algnmnt, boundary */
671 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
672 BUS_SPACE_MAXADDR, /* highaddr */
673 NULL, NULL, /* filter, filterarg */
674 sensesz, /* maxsize */
676 sensesz, /* maxsegsize */
678 NULL, NULL, /* lockfunc, lockarg */
679 &sc->mfi_sense_dmat)) {
680 device_printf(sc->mfi_dev, "Cannot allocate sense DMA tag\n");
683 if (bus_dmamem_alloc(sc->mfi_sense_dmat, (void **)&sc->mfi_sense,
684 BUS_DMA_NOWAIT, &sc->mfi_sense_dmamap)) {
685 device_printf(sc->mfi_dev, "Cannot allocate sense memory\n");
688 bus_dmamap_load(sc->mfi_sense_dmat, sc->mfi_sense_dmamap,
689 sc->mfi_sense, sensesz, mfi_addr_cb, &sc->mfi_sense_busaddr, 0);
690 if ((error = mfi_alloc_commands(sc)) != 0)
693 /* Before moving the FW to operational state, check whether
694 * hostmemory is required by the FW or not
697 /* ThunderBolt MFI_IOC2 INIT */
698 if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
699 sc->mfi_disable_intr(sc);
700 mtx_lock(&sc->mfi_io_lock);
701 if ((error = mfi_tbolt_init_MFI_queue(sc)) != 0) {
702 device_printf(sc->mfi_dev,
703 "TB Init has failed with error %d\n",error);
704 mtx_unlock(&sc->mfi_io_lock);
707 mtx_unlock(&sc->mfi_io_lock);
709 if ((error = mfi_tbolt_alloc_cmd(sc)) != 0)
711 if (bus_setup_intr(sc->mfi_dev, sc->mfi_irq,
712 INTR_MPSAFE|INTR_TYPE_BIO, NULL, mfi_intr_tbolt, sc,
714 device_printf(sc->mfi_dev, "Cannot set up interrupt\n");
717 sc->mfi_intr_ptr = mfi_intr_tbolt;
718 sc->mfi_enable_intr(sc);
720 if ((error = mfi_comms_init(sc)) != 0)
723 if (bus_setup_intr(sc->mfi_dev, sc->mfi_irq,
724 INTR_MPSAFE|INTR_TYPE_BIO, NULL, mfi_intr, sc, &sc->mfi_intr)) {
725 device_printf(sc->mfi_dev, "Cannot set up interrupt\n");
728 sc->mfi_intr_ptr = mfi_intr;
729 sc->mfi_enable_intr(sc);
731 if ((error = mfi_get_controller_info(sc)) != 0)
733 sc->disableOnlineCtrlReset = 0;
735 /* Register a config hook to probe the bus for arrays */
736 sc->mfi_ich.ich_func = mfi_startup;
737 sc->mfi_ich.ich_arg = sc;
738 if (config_intrhook_establish(&sc->mfi_ich) != 0) {
739 device_printf(sc->mfi_dev, "Cannot establish configuration "
743 mtx_lock(&sc->mfi_io_lock);
744 if ((error = mfi_aen_setup(sc, 0), 0) != 0) {
745 mtx_unlock(&sc->mfi_io_lock);
748 mtx_unlock(&sc->mfi_io_lock);
751 * Register a shutdown handler.
753 if ((sc->mfi_eh = EVENTHANDLER_REGISTER(shutdown_final, mfi_shutdown,
754 sc, SHUTDOWN_PRI_DEFAULT)) == NULL) {
755 device_printf(sc->mfi_dev, "Warning: shutdown event "
756 "registration failed\n");
760 * Create the control device for doing management
762 unit = device_get_unit(sc->mfi_dev);
763 sc->mfi_cdev = make_dev(&mfi_cdevsw, unit, UID_ROOT, GID_OPERATOR,
764 0640, "mfi%d", unit);
766 make_dev_alias_p(MAKEDEV_CHECKNAME | MAKEDEV_WAITOK, &dev_t,
767 sc->mfi_cdev, "%s", "megaraid_sas_ioctl_node");
768 if (sc->mfi_cdev != NULL)
769 sc->mfi_cdev->si_drv1 = sc;
770 SYSCTL_ADD_INT(device_get_sysctl_ctx(sc->mfi_dev),
771 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->mfi_dev)),
772 OID_AUTO, "delete_busy_volumes", CTLFLAG_RW,
773 &sc->mfi_delete_busy_volumes, 0, "Allow removal of busy volumes");
774 SYSCTL_ADD_INT(device_get_sysctl_ctx(sc->mfi_dev),
775 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->mfi_dev)),
776 OID_AUTO, "keep_deleted_volumes", CTLFLAG_RW,
777 &sc->mfi_keep_deleted_volumes, 0,
778 "Don't detach the mfid device for a busy volume that is deleted");
780 device_add_child(sc->mfi_dev, "mfip", -1);
781 bus_generic_attach(sc->mfi_dev);
783 /* Start the timeout watchdog */
784 callout_init(&sc->mfi_watchdog_callout, 1);
785 callout_reset(&sc->mfi_watchdog_callout, mfi_cmd_timeout * hz,
788 if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
789 mtx_lock(&sc->mfi_io_lock);
790 mfi_tbolt_sync_map_info(sc);
791 mtx_unlock(&sc->mfi_io_lock);
798 mfi_alloc_commands(struct mfi_softc *sc)
800 struct mfi_command *cm;
804 * XXX Should we allocate all the commands up front, or allocate on
805 * demand later like 'aac' does?
807 sc->mfi_commands = malloc(sizeof(sc->mfi_commands[0]) *
808 sc->mfi_max_fw_cmds, M_MFIBUF, M_WAITOK | M_ZERO);
810 for (i = 0; i < sc->mfi_max_fw_cmds; i++) {
811 cm = &sc->mfi_commands[i];
812 cm->cm_frame = (union mfi_frame *)((uintptr_t)sc->mfi_frames +
813 sc->mfi_cmd_size * i);
814 cm->cm_frame_busaddr = sc->mfi_frames_busaddr +
815 sc->mfi_cmd_size * i;
816 cm->cm_frame->header.context = i;
817 cm->cm_sense = &sc->mfi_sense[i];
818 cm->cm_sense_busaddr= sc->mfi_sense_busaddr + MFI_SENSE_LEN * i;
821 if (bus_dmamap_create(sc->mfi_buffer_dmat, 0,
822 &cm->cm_dmamap) == 0) {
823 mtx_lock(&sc->mfi_io_lock);
824 mfi_release_command(cm);
825 mtx_unlock(&sc->mfi_io_lock);
827 device_printf(sc->mfi_dev, "Failed to allocate %d "
828 "command blocks, only allocated %d\n",
829 sc->mfi_max_fw_cmds, i - 1);
830 for (j = 0; j < i; j++) {
831 cm = &sc->mfi_commands[i];
832 bus_dmamap_destroy(sc->mfi_buffer_dmat,
835 free(sc->mfi_commands, M_MFIBUF);
836 sc->mfi_commands = NULL;
846 mfi_release_command(struct mfi_command *cm)
848 struct mfi_frame_header *hdr;
851 mtx_assert(&cm->cm_sc->mfi_io_lock, MA_OWNED);
854 * Zero out the important fields of the frame, but make sure the
855 * context field is preserved. For efficiency, handle the fields
856 * as 32 bit words. Clear out the first S/G entry too for safety.
858 hdr = &cm->cm_frame->header;
859 if (cm->cm_data != NULL && hdr->sg_count) {
860 cm->cm_sg->sg32[0].len = 0;
861 cm->cm_sg->sg32[0].addr = 0;
865 * Command may be on other queues e.g. busy queue depending on the
866 * flow of a previous call to mfi_mapcmd, so ensure its dequeued
869 if ((cm->cm_flags & MFI_ON_MFIQ_BUSY) != 0)
871 if ((cm->cm_flags & MFI_ON_MFIQ_READY) != 0)
872 mfi_remove_ready(cm);
874 /* We're not expecting it to be on any other queue but check */
875 if ((cm->cm_flags & MFI_ON_MFIQ_MASK) != 0) {
876 panic("Command %p is still on another queue, flags = %#x",
881 if ((cm->cm_flags & MFI_CMD_TBOLT) != 0) {
882 mfi_tbolt_return_cmd(cm->cm_sc,
883 cm->cm_sc->mfi_cmd_pool_tbolt[cm->cm_extra_frames - 1],
887 hdr_data = (uint32_t *)cm->cm_frame;
888 hdr_data[0] = 0; /* cmd, sense_len, cmd_status, scsi_status */
889 hdr_data[1] = 0; /* target_id, lun_id, cdb_len, sg_count */
890 hdr_data[4] = 0; /* flags, timeout */
891 hdr_data[5] = 0; /* data_len */
893 cm->cm_extra_frames = 0;
895 cm->cm_complete = NULL;
896 cm->cm_private = NULL;
899 cm->cm_total_frame_size = 0;
900 cm->retry_for_fw_reset = 0;
902 mfi_enqueue_free(cm);
906 mfi_dcmd_command(struct mfi_softc *sc, struct mfi_command **cmp,
907 uint32_t opcode, void **bufp, size_t bufsize)
909 struct mfi_command *cm;
910 struct mfi_dcmd_frame *dcmd;
912 uint32_t context = 0;
914 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
916 cm = mfi_dequeue_free(sc);
920 /* Zero out the MFI frame */
921 context = cm->cm_frame->header.context;
922 bzero(cm->cm_frame, sizeof(union mfi_frame));
923 cm->cm_frame->header.context = context;
925 if ((bufsize > 0) && (bufp != NULL)) {
927 buf = malloc(bufsize, M_MFIBUF, M_NOWAIT|M_ZERO);
929 mfi_release_command(cm);
938 dcmd = &cm->cm_frame->dcmd;
939 bzero(dcmd->mbox, MFI_MBOX_SIZE);
940 dcmd->header.cmd = MFI_CMD_DCMD;
941 dcmd->header.timeout = 0;
942 dcmd->header.flags = 0;
943 dcmd->header.data_len = bufsize;
944 dcmd->header.scsi_status = 0;
945 dcmd->opcode = opcode;
946 cm->cm_sg = &dcmd->sgl;
947 cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
950 cm->cm_private = buf;
951 cm->cm_len = bufsize;
954 if ((bufp != NULL) && (*bufp == NULL) && (buf != NULL))
960 mfi_comms_init(struct mfi_softc *sc)
962 struct mfi_command *cm;
963 struct mfi_init_frame *init;
964 struct mfi_init_qinfo *qinfo;
966 uint32_t context = 0;
968 mtx_lock(&sc->mfi_io_lock);
969 if ((cm = mfi_dequeue_free(sc)) == NULL) {
970 mtx_unlock(&sc->mfi_io_lock);
974 /* Zero out the MFI frame */
975 context = cm->cm_frame->header.context;
976 bzero(cm->cm_frame, sizeof(union mfi_frame));
977 cm->cm_frame->header.context = context;
980 * Abuse the SG list area of the frame to hold the init_qinfo
983 init = &cm->cm_frame->init;
984 qinfo = (struct mfi_init_qinfo *)((uintptr_t)init + MFI_FRAME_SIZE);
986 bzero(qinfo, sizeof(struct mfi_init_qinfo));
987 qinfo->rq_entries = sc->mfi_max_fw_cmds + 1;
988 qinfo->rq_addr_lo = sc->mfi_comms_busaddr +
989 offsetof(struct mfi_hwcomms, hw_reply_q);
990 qinfo->pi_addr_lo = sc->mfi_comms_busaddr +
991 offsetof(struct mfi_hwcomms, hw_pi);
992 qinfo->ci_addr_lo = sc->mfi_comms_busaddr +
993 offsetof(struct mfi_hwcomms, hw_ci);
995 init->header.cmd = MFI_CMD_INIT;
996 init->header.data_len = sizeof(struct mfi_init_qinfo);
997 init->qinfo_new_addr_lo = cm->cm_frame_busaddr + MFI_FRAME_SIZE;
999 cm->cm_flags = MFI_CMD_POLLED;
1001 if ((error = mfi_mapcmd(sc, cm)) != 0)
1002 device_printf(sc->mfi_dev, "failed to send init command\n");
1003 mfi_release_command(cm);
1004 mtx_unlock(&sc->mfi_io_lock);
1010 mfi_get_controller_info(struct mfi_softc *sc)
1012 struct mfi_command *cm = NULL;
1013 struct mfi_ctrl_info *ci = NULL;
1014 uint32_t max_sectors_1, max_sectors_2;
1017 mtx_lock(&sc->mfi_io_lock);
1018 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_GETINFO,
1019 (void **)&ci, sizeof(*ci));
1022 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1024 if ((error = mfi_mapcmd(sc, cm)) != 0) {
1025 device_printf(sc->mfi_dev, "Failed to get controller info\n");
1026 sc->mfi_max_io = (sc->mfi_max_sge - 1) * PAGE_SIZE /
1032 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1033 BUS_DMASYNC_POSTREAD);
1034 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1036 max_sectors_1 = (1 << ci->stripe_sz_ops.max) * ci->max_strips_per_io;
1037 max_sectors_2 = ci->max_request_size;
1038 sc->mfi_max_io = min(max_sectors_1, max_sectors_2);
1039 sc->disableOnlineCtrlReset =
1040 ci->properties.OnOffProperties.disableOnlineCtrlReset;
1046 mfi_release_command(cm);
1047 mtx_unlock(&sc->mfi_io_lock);
1052 mfi_get_log_state(struct mfi_softc *sc, struct mfi_evt_log_state **log_state)
1054 struct mfi_command *cm = NULL;
1057 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1058 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_EVENT_GETINFO,
1059 (void **)log_state, sizeof(**log_state));
1062 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1064 if ((error = mfi_mapcmd(sc, cm)) != 0) {
1065 device_printf(sc->mfi_dev, "Failed to get log state\n");
1069 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1070 BUS_DMASYNC_POSTREAD);
1071 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1075 mfi_release_command(cm);
1081 mfi_aen_setup(struct mfi_softc *sc, uint32_t seq_start)
1083 struct mfi_evt_log_state *log_state = NULL;
1084 union mfi_evt class_locale;
1088 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1090 class_locale.members.reserved = 0;
1091 class_locale.members.locale = mfi_event_locale;
1092 class_locale.members.evt_class = mfi_event_class;
1094 if (seq_start == 0) {
1095 if ((error = mfi_get_log_state(sc, &log_state)) != 0)
1097 sc->mfi_boot_seq_num = log_state->boot_seq_num;
1100 * Walk through any events that fired since the last
1103 if ((error = mfi_parse_entries(sc, log_state->shutdown_seq_num,
1104 log_state->newest_seq_num)) != 0)
1106 seq = log_state->newest_seq_num;
1109 error = mfi_aen_register(sc, seq, class_locale.word);
1111 free(log_state, M_MFIBUF);
1117 mfi_wait_command(struct mfi_softc *sc, struct mfi_command *cm)
1120 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1121 cm->cm_complete = NULL;
1124 * MegaCli can issue a DCMD of 0. In this case do nothing
1125 * and return 0 to it as status
1127 if (cm->cm_frame->dcmd.opcode == 0) {
1128 cm->cm_frame->header.cmd_status = MFI_STAT_OK;
1130 return (cm->cm_error);
1132 mfi_enqueue_ready(cm);
1134 if ((cm->cm_flags & MFI_CMD_COMPLETED) == 0)
1135 msleep(cm, &sc->mfi_io_lock, PRIBIO, "mfiwait", 0);
1136 return (cm->cm_error);
1140 mfi_free(struct mfi_softc *sc)
1142 struct mfi_command *cm;
1145 callout_drain(&sc->mfi_watchdog_callout);
1147 if (sc->mfi_cdev != NULL)
1148 destroy_dev(sc->mfi_cdev);
1150 if (sc->mfi_commands != NULL) {
1151 for (i = 0; i < sc->mfi_max_fw_cmds; i++) {
1152 cm = &sc->mfi_commands[i];
1153 bus_dmamap_destroy(sc->mfi_buffer_dmat, cm->cm_dmamap);
1155 free(sc->mfi_commands, M_MFIBUF);
1156 sc->mfi_commands = NULL;
1160 bus_teardown_intr(sc->mfi_dev, sc->mfi_irq, sc->mfi_intr);
1161 if (sc->mfi_irq != NULL)
1162 bus_release_resource(sc->mfi_dev, SYS_RES_IRQ, sc->mfi_irq_rid,
1165 if (sc->mfi_sense_busaddr != 0)
1166 bus_dmamap_unload(sc->mfi_sense_dmat, sc->mfi_sense_dmamap);
1167 if (sc->mfi_sense != NULL)
1168 bus_dmamem_free(sc->mfi_sense_dmat, sc->mfi_sense,
1169 sc->mfi_sense_dmamap);
1170 if (sc->mfi_sense_dmat != NULL)
1171 bus_dma_tag_destroy(sc->mfi_sense_dmat);
1173 if (sc->mfi_frames_busaddr != 0)
1174 bus_dmamap_unload(sc->mfi_frames_dmat, sc->mfi_frames_dmamap);
1175 if (sc->mfi_frames != NULL)
1176 bus_dmamem_free(sc->mfi_frames_dmat, sc->mfi_frames,
1177 sc->mfi_frames_dmamap);
1178 if (sc->mfi_frames_dmat != NULL)
1179 bus_dma_tag_destroy(sc->mfi_frames_dmat);
1181 if (sc->mfi_comms_busaddr != 0)
1182 bus_dmamap_unload(sc->mfi_comms_dmat, sc->mfi_comms_dmamap);
1183 if (sc->mfi_comms != NULL)
1184 bus_dmamem_free(sc->mfi_comms_dmat, sc->mfi_comms,
1185 sc->mfi_comms_dmamap);
1186 if (sc->mfi_comms_dmat != NULL)
1187 bus_dma_tag_destroy(sc->mfi_comms_dmat);
1189 /* ThunderBolt contiguous memory free here */
1190 if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
1191 if (sc->mfi_tb_busaddr != 0)
1192 bus_dmamap_unload(sc->mfi_tb_dmat, sc->mfi_tb_dmamap);
1193 if (sc->request_message_pool != NULL)
1194 bus_dmamem_free(sc->mfi_tb_dmat, sc->request_message_pool,
1196 if (sc->mfi_tb_dmat != NULL)
1197 bus_dma_tag_destroy(sc->mfi_tb_dmat);
1199 /* Version buffer memory free */
1200 /* Start LSIP200113393 */
1201 if (sc->verbuf_h_busaddr != 0)
1202 bus_dmamap_unload(sc->verbuf_h_dmat, sc->verbuf_h_dmamap);
1203 if (sc->verbuf != NULL)
1204 bus_dmamem_free(sc->verbuf_h_dmat, sc->verbuf,
1205 sc->verbuf_h_dmamap);
1206 if (sc->verbuf_h_dmat != NULL)
1207 bus_dma_tag_destroy(sc->verbuf_h_dmat);
1209 /* End LSIP200113393 */
1210 /* ThunderBolt INIT packet memory Free */
1211 if (sc->mfi_tb_init_busaddr != 0)
1212 bus_dmamap_unload(sc->mfi_tb_init_dmat,
1213 sc->mfi_tb_init_dmamap);
1214 if (sc->mfi_tb_init != NULL)
1215 bus_dmamem_free(sc->mfi_tb_init_dmat, sc->mfi_tb_init,
1216 sc->mfi_tb_init_dmamap);
1217 if (sc->mfi_tb_init_dmat != NULL)
1218 bus_dma_tag_destroy(sc->mfi_tb_init_dmat);
1220 /* ThunderBolt IOC Init Desc memory free here */
1221 if (sc->mfi_tb_ioc_init_busaddr != 0)
1222 bus_dmamap_unload(sc->mfi_tb_ioc_init_dmat,
1223 sc->mfi_tb_ioc_init_dmamap);
1224 if (sc->mfi_tb_ioc_init_desc != NULL)
1225 bus_dmamem_free(sc->mfi_tb_ioc_init_dmat,
1226 sc->mfi_tb_ioc_init_desc,
1227 sc->mfi_tb_ioc_init_dmamap);
1228 if (sc->mfi_tb_ioc_init_dmat != NULL)
1229 bus_dma_tag_destroy(sc->mfi_tb_ioc_init_dmat);
1230 if (sc->mfi_cmd_pool_tbolt != NULL) {
1231 for (int i = 0; i < sc->mfi_max_fw_cmds; i++) {
1232 if (sc->mfi_cmd_pool_tbolt[i] != NULL) {
1233 free(sc->mfi_cmd_pool_tbolt[i],
1235 sc->mfi_cmd_pool_tbolt[i] = NULL;
1238 free(sc->mfi_cmd_pool_tbolt, M_MFIBUF);
1239 sc->mfi_cmd_pool_tbolt = NULL;
1241 if (sc->request_desc_pool != NULL) {
1242 free(sc->request_desc_pool, M_MFIBUF);
1243 sc->request_desc_pool = NULL;
1246 if (sc->mfi_buffer_dmat != NULL)
1247 bus_dma_tag_destroy(sc->mfi_buffer_dmat);
1248 if (sc->mfi_parent_dmat != NULL)
1249 bus_dma_tag_destroy(sc->mfi_parent_dmat);
1251 if (mtx_initialized(&sc->mfi_io_lock)) {
1252 mtx_destroy(&sc->mfi_io_lock);
1253 sx_destroy(&sc->mfi_config_lock);
1260 mfi_startup(void *arg)
1262 struct mfi_softc *sc;
1264 sc = (struct mfi_softc *)arg;
1266 sc->mfi_enable_intr(sc);
1267 sx_xlock(&sc->mfi_config_lock);
1268 mtx_lock(&sc->mfi_io_lock);
1270 if (sc->mfi_flags & MFI_FLAGS_SKINNY)
1272 mtx_unlock(&sc->mfi_io_lock);
1273 sx_xunlock(&sc->mfi_config_lock);
1275 config_intrhook_disestablish(&sc->mfi_ich);
1281 struct mfi_softc *sc;
1282 struct mfi_command *cm;
1283 uint32_t pi, ci, context;
1285 sc = (struct mfi_softc *)arg;
1287 if (sc->mfi_check_clear_intr(sc))
1291 pi = sc->mfi_comms->hw_pi;
1292 ci = sc->mfi_comms->hw_ci;
1293 mtx_lock(&sc->mfi_io_lock);
1295 context = sc->mfi_comms->hw_reply_q[ci];
1296 if (context < sc->mfi_max_fw_cmds) {
1297 cm = &sc->mfi_commands[context];
1298 mfi_remove_busy(cm);
1300 mfi_complete(sc, cm);
1302 if (++ci == (sc->mfi_max_fw_cmds + 1))
1306 sc->mfi_comms->hw_ci = ci;
1308 /* Give defered I/O a chance to run */
1309 sc->mfi_flags &= ~MFI_FLAGS_QFRZN;
1311 mtx_unlock(&sc->mfi_io_lock);
1314 * Dummy read to flush the bus; this ensures that the indexes are up
1315 * to date. Restart processing if more commands have come it.
1317 (void)sc->mfi_read_fw_status(sc);
1318 if (pi != sc->mfi_comms->hw_pi)
1325 mfi_shutdown(struct mfi_softc *sc)
1327 struct mfi_dcmd_frame *dcmd;
1328 struct mfi_command *cm;
1331 if (sc->mfi_aen_cm != NULL) {
1332 sc->cm_aen_abort = 1;
1333 mfi_abort(sc, &sc->mfi_aen_cm);
1336 if (sc->mfi_map_sync_cm != NULL) {
1337 sc->cm_map_abort = 1;
1338 mfi_abort(sc, &sc->mfi_map_sync_cm);
1341 mtx_lock(&sc->mfi_io_lock);
1342 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_SHUTDOWN, NULL, 0);
1344 mtx_unlock(&sc->mfi_io_lock);
1348 dcmd = &cm->cm_frame->dcmd;
1349 dcmd->header.flags = MFI_FRAME_DIR_NONE;
1350 cm->cm_flags = MFI_CMD_POLLED;
1353 if ((error = mfi_mapcmd(sc, cm)) != 0)
1354 device_printf(sc->mfi_dev, "Failed to shutdown controller\n");
1356 mfi_release_command(cm);
1357 mtx_unlock(&sc->mfi_io_lock);
1362 mfi_syspdprobe(struct mfi_softc *sc)
1364 struct mfi_frame_header *hdr;
1365 struct mfi_command *cm = NULL;
1366 struct mfi_pd_list *pdlist = NULL;
1367 struct mfi_system_pd *syspd, *tmp;
1368 struct mfi_system_pending *syspd_pend;
1369 int error, i, found;
1371 sx_assert(&sc->mfi_config_lock, SA_XLOCKED);
1372 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1373 /* Add SYSTEM PD's */
1374 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_PD_LIST_QUERY,
1375 (void **)&pdlist, sizeof(*pdlist));
1377 device_printf(sc->mfi_dev,
1378 "Error while forming SYSTEM PD list\n");
1382 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1383 cm->cm_frame->dcmd.mbox[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST;
1384 cm->cm_frame->dcmd.mbox[1] = 0;
1385 if (mfi_mapcmd(sc, cm) != 0) {
1386 device_printf(sc->mfi_dev,
1387 "Failed to get syspd device listing\n");
1390 bus_dmamap_sync(sc->mfi_buffer_dmat,cm->cm_dmamap,
1391 BUS_DMASYNC_POSTREAD);
1392 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1393 hdr = &cm->cm_frame->header;
1394 if (hdr->cmd_status != MFI_STAT_OK) {
1395 device_printf(sc->mfi_dev,
1396 "MFI_DCMD_PD_LIST_QUERY failed %x\n", hdr->cmd_status);
1399 /* Get each PD and add it to the system */
1400 for (i = 0; i < pdlist->count; i++) {
1401 if (pdlist->addr[i].device_id ==
1402 pdlist->addr[i].encl_device_id)
1405 TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh, pd_link) {
1406 if (syspd->pd_id == pdlist->addr[i].device_id)
1409 TAILQ_FOREACH(syspd_pend, &sc->mfi_syspd_pend_tqh, pd_link) {
1410 if (syspd_pend->pd_id == pdlist->addr[i].device_id)
1414 mfi_add_sys_pd(sc, pdlist->addr[i].device_id);
1416 /* Delete SYSPD's whose state has been changed */
1417 TAILQ_FOREACH_SAFE(syspd, &sc->mfi_syspd_tqh, pd_link, tmp) {
1419 for (i = 0; i < pdlist->count; i++) {
1420 if (syspd->pd_id == pdlist->addr[i].device_id) {
1427 mtx_unlock(&sc->mfi_io_lock);
1429 device_delete_child(sc->mfi_dev, syspd->pd_dev);
1431 mtx_lock(&sc->mfi_io_lock);
1436 free(pdlist, M_MFIBUF);
1438 mfi_release_command(cm);
1444 mfi_ldprobe(struct mfi_softc *sc)
1446 struct mfi_frame_header *hdr;
1447 struct mfi_command *cm = NULL;
1448 struct mfi_ld_list *list = NULL;
1449 struct mfi_disk *ld;
1450 struct mfi_disk_pending *ld_pend;
1453 sx_assert(&sc->mfi_config_lock, SA_XLOCKED);
1454 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1456 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_LIST,
1457 (void **)&list, sizeof(*list));
1461 cm->cm_flags = MFI_CMD_DATAIN;
1462 if (mfi_wait_command(sc, cm) != 0) {
1463 device_printf(sc->mfi_dev, "Failed to get device listing\n");
1467 hdr = &cm->cm_frame->header;
1468 if (hdr->cmd_status != MFI_STAT_OK) {
1469 device_printf(sc->mfi_dev, "MFI_DCMD_LD_GET_LIST failed %x\n",
1474 for (i = 0; i < list->ld_count; i++) {
1475 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
1476 if (ld->ld_id == list->ld_list[i].ld.v.target_id)
1479 TAILQ_FOREACH(ld_pend, &sc->mfi_ld_pend_tqh, ld_link) {
1480 if (ld_pend->ld_id == list->ld_list[i].ld.v.target_id)
1483 mfi_add_ld(sc, list->ld_list[i].ld.v.target_id);
1488 free(list, M_MFIBUF);
1490 mfi_release_command(cm);
1496 * The timestamp is the number of seconds since 00:00 Jan 1, 2000. If
1497 * the bits in 24-31 are all set, then it is the number of seconds since
1501 format_timestamp(uint32_t timestamp)
1503 static char buffer[32];
1505 if ((timestamp & 0xff000000) == 0xff000000)
1506 snprintf(buffer, sizeof(buffer), "boot + %us", timestamp &
1509 snprintf(buffer, sizeof(buffer), "%us", timestamp);
1514 format_class(int8_t class)
1516 static char buffer[6];
1519 case MFI_EVT_CLASS_DEBUG:
1521 case MFI_EVT_CLASS_PROGRESS:
1522 return ("progress");
1523 case MFI_EVT_CLASS_INFO:
1525 case MFI_EVT_CLASS_WARNING:
1527 case MFI_EVT_CLASS_CRITICAL:
1529 case MFI_EVT_CLASS_FATAL:
1531 case MFI_EVT_CLASS_DEAD:
1534 snprintf(buffer, sizeof(buffer), "%d", class);
1540 mfi_decode_evt(struct mfi_softc *sc, struct mfi_evt_detail *detail)
1542 struct mfi_system_pd *syspd = NULL;
1544 device_printf(sc->mfi_dev, "%d (%s/0x%04x/%s) - %s\n", detail->seq,
1545 format_timestamp(detail->time), detail->evt_class.members.locale,
1546 format_class(detail->evt_class.members.evt_class),
1547 detail->description);
1549 /* Don't act on old AEN's or while shutting down */
1550 if (detail->seq < sc->mfi_boot_seq_num || sc->mfi_detaching)
1553 switch (detail->arg_type) {
1554 case MR_EVT_ARGS_NONE:
1555 if (detail->code == MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED) {
1556 device_printf(sc->mfi_dev, "HostBus scan raised\n");
1557 if (mfi_detect_jbod_change) {
1559 * Probe for new SYSPD's and Delete
1562 sx_xlock(&sc->mfi_config_lock);
1563 mtx_lock(&sc->mfi_io_lock);
1565 mtx_unlock(&sc->mfi_io_lock);
1566 sx_xunlock(&sc->mfi_config_lock);
1570 case MR_EVT_ARGS_LD_STATE:
1571 /* During load time driver reads all the events starting
1572 * from the one that has been logged after shutdown. Avoid
1575 if (detail->args.ld_state.new_state == MFI_LD_STATE_OFFLINE ) {
1577 struct mfi_disk *ld;
1578 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
1580 detail->args.ld_state.ld.target_id)
1584 Fix: for kernel panics when SSCD is removed
1585 KASSERT(ld != NULL, ("volume dissappeared"));
1589 device_delete_child(sc->mfi_dev, ld->ld_dev);
1594 case MR_EVT_ARGS_PD:
1595 if (detail->code == MR_EVT_PD_REMOVED) {
1596 if (mfi_detect_jbod_change) {
1598 * If the removed device is a SYSPD then
1601 TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh,
1604 detail->args.pd.device_id) {
1606 device_delete_child(
1615 if (detail->code == MR_EVT_PD_INSERTED) {
1616 if (mfi_detect_jbod_change) {
1617 /* Probe for new SYSPD's */
1618 sx_xlock(&sc->mfi_config_lock);
1619 mtx_lock(&sc->mfi_io_lock);
1621 mtx_unlock(&sc->mfi_io_lock);
1622 sx_xunlock(&sc->mfi_config_lock);
1625 if (sc->mfi_cam_rescan_cb != NULL &&
1626 (detail->code == MR_EVT_PD_INSERTED ||
1627 detail->code == MR_EVT_PD_REMOVED)) {
1628 sc->mfi_cam_rescan_cb(sc, detail->args.pd.device_id);
1635 mfi_queue_evt(struct mfi_softc *sc, struct mfi_evt_detail *detail)
1637 struct mfi_evt_queue_elm *elm;
1639 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1640 elm = malloc(sizeof(*elm), M_MFIBUF, M_NOWAIT|M_ZERO);
1643 memcpy(&elm->detail, detail, sizeof(*detail));
1644 TAILQ_INSERT_TAIL(&sc->mfi_evt_queue, elm, link);
1645 taskqueue_enqueue(taskqueue_swi, &sc->mfi_evt_task);
1649 mfi_handle_evt(void *context, int pending)
1651 TAILQ_HEAD(,mfi_evt_queue_elm) queue;
1652 struct mfi_softc *sc;
1653 struct mfi_evt_queue_elm *elm;
1657 mtx_lock(&sc->mfi_io_lock);
1658 TAILQ_CONCAT(&queue, &sc->mfi_evt_queue, link);
1659 mtx_unlock(&sc->mfi_io_lock);
1660 while ((elm = TAILQ_FIRST(&queue)) != NULL) {
1661 TAILQ_REMOVE(&queue, elm, link);
1662 mfi_decode_evt(sc, &elm->detail);
1663 free(elm, M_MFIBUF);
1668 mfi_aen_register(struct mfi_softc *sc, int seq, int locale)
1670 struct mfi_command *cm;
1671 struct mfi_dcmd_frame *dcmd;
1672 union mfi_evt current_aen, prior_aen;
1673 struct mfi_evt_detail *ed = NULL;
1676 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1678 current_aen.word = locale;
1679 if (sc->mfi_aen_cm != NULL) {
1681 ((uint32_t *)&sc->mfi_aen_cm->cm_frame->dcmd.mbox)[1];
1682 if (prior_aen.members.evt_class <= current_aen.members.evt_class &&
1683 !((prior_aen.members.locale & current_aen.members.locale)
1684 ^current_aen.members.locale)) {
1687 prior_aen.members.locale |= current_aen.members.locale;
1688 if (prior_aen.members.evt_class
1689 < current_aen.members.evt_class)
1690 current_aen.members.evt_class =
1691 prior_aen.members.evt_class;
1692 mfi_abort(sc, &sc->mfi_aen_cm);
1696 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_EVENT_WAIT,
1697 (void **)&ed, sizeof(*ed));
1701 dcmd = &cm->cm_frame->dcmd;
1702 ((uint32_t *)&dcmd->mbox)[0] = seq;
1703 ((uint32_t *)&dcmd->mbox)[1] = locale;
1704 cm->cm_flags = MFI_CMD_DATAIN;
1705 cm->cm_complete = mfi_aen_complete;
1707 sc->last_seq_num = seq;
1708 sc->mfi_aen_cm = cm;
1710 mfi_enqueue_ready(cm);
1718 mfi_aen_complete(struct mfi_command *cm)
1720 struct mfi_frame_header *hdr;
1721 struct mfi_softc *sc;
1722 struct mfi_evt_detail *detail;
1723 struct mfi_aen *mfi_aen_entry, *tmp;
1724 int seq = 0, aborted = 0;
1727 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1729 if (sc->mfi_aen_cm == NULL)
1732 hdr = &cm->cm_frame->header;
1734 if (sc->cm_aen_abort ||
1735 hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
1736 sc->cm_aen_abort = 0;
1739 sc->mfi_aen_triggered = 1;
1740 if (sc->mfi_poll_waiting) {
1741 sc->mfi_poll_waiting = 0;
1742 selwakeup(&sc->mfi_select);
1744 detail = cm->cm_data;
1745 mfi_queue_evt(sc, detail);
1746 seq = detail->seq + 1;
1747 TAILQ_FOREACH_SAFE(mfi_aen_entry, &sc->mfi_aen_pids, aen_link,
1749 TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
1751 PROC_LOCK(mfi_aen_entry->p);
1752 kern_psignal(mfi_aen_entry->p, SIGIO);
1753 PROC_UNLOCK(mfi_aen_entry->p);
1754 free(mfi_aen_entry, M_MFIBUF);
1758 free(cm->cm_data, M_MFIBUF);
1759 wakeup(&sc->mfi_aen_cm);
1760 sc->mfi_aen_cm = NULL;
1761 mfi_release_command(cm);
1763 /* set it up again so the driver can catch more events */
1765 mfi_aen_setup(sc, seq);
1768 #define MAX_EVENTS 15
1771 mfi_parse_entries(struct mfi_softc *sc, int start_seq, int stop_seq)
1773 struct mfi_command *cm;
1774 struct mfi_dcmd_frame *dcmd;
1775 struct mfi_evt_list *el;
1776 union mfi_evt class_locale;
1777 int error, i, seq, size;
1779 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1781 class_locale.members.reserved = 0;
1782 class_locale.members.locale = mfi_event_locale;
1783 class_locale.members.evt_class = mfi_event_class;
1785 size = sizeof(struct mfi_evt_list) + sizeof(struct mfi_evt_detail)
1787 el = malloc(size, M_MFIBUF, M_NOWAIT | M_ZERO);
1791 for (seq = start_seq;;) {
1792 if ((cm = mfi_dequeue_free(sc)) == NULL) {
1797 dcmd = &cm->cm_frame->dcmd;
1798 bzero(dcmd->mbox, MFI_MBOX_SIZE);
1799 dcmd->header.cmd = MFI_CMD_DCMD;
1800 dcmd->header.timeout = 0;
1801 dcmd->header.data_len = size;
1802 dcmd->opcode = MFI_DCMD_CTRL_EVENT_GET;
1803 ((uint32_t *)&dcmd->mbox)[0] = seq;
1804 ((uint32_t *)&dcmd->mbox)[1] = class_locale.word;
1805 cm->cm_sg = &dcmd->sgl;
1806 cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
1807 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1811 if ((error = mfi_mapcmd(sc, cm)) != 0) {
1812 device_printf(sc->mfi_dev,
1813 "Failed to get controller entries\n");
1814 mfi_release_command(cm);
1818 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1819 BUS_DMASYNC_POSTREAD);
1820 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1822 if (dcmd->header.cmd_status == MFI_STAT_NOT_FOUND) {
1823 mfi_release_command(cm);
1826 if (dcmd->header.cmd_status != MFI_STAT_OK) {
1827 device_printf(sc->mfi_dev,
1828 "Error %d fetching controller entries\n",
1829 dcmd->header.cmd_status);
1830 mfi_release_command(cm);
1834 mfi_release_command(cm);
1836 for (i = 0; i < el->count; i++) {
1838 * If this event is newer than 'stop_seq' then
1839 * break out of the loop. Note that the log
1840 * is a circular buffer so we have to handle
1841 * the case that our stop point is earlier in
1842 * the buffer than our start point.
1844 if (el->event[i].seq >= stop_seq) {
1845 if (start_seq <= stop_seq)
1847 else if (el->event[i].seq < start_seq)
1850 mfi_queue_evt(sc, &el->event[i]);
1852 seq = el->event[el->count - 1].seq + 1;
1860 mfi_add_ld(struct mfi_softc *sc, int id)
1862 struct mfi_command *cm;
1863 struct mfi_dcmd_frame *dcmd = NULL;
1864 struct mfi_ld_info *ld_info = NULL;
1865 struct mfi_disk_pending *ld_pend;
1868 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1870 ld_pend = malloc(sizeof(*ld_pend), M_MFIBUF, M_NOWAIT | M_ZERO);
1871 if (ld_pend != NULL) {
1872 ld_pend->ld_id = id;
1873 TAILQ_INSERT_TAIL(&sc->mfi_ld_pend_tqh, ld_pend, ld_link);
1876 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_INFO,
1877 (void **)&ld_info, sizeof(*ld_info));
1879 device_printf(sc->mfi_dev,
1880 "Failed to allocate for MFI_DCMD_LD_GET_INFO %d\n", error);
1882 free(ld_info, M_MFIBUF);
1885 cm->cm_flags = MFI_CMD_DATAIN;
1886 dcmd = &cm->cm_frame->dcmd;
1888 if (mfi_wait_command(sc, cm) != 0) {
1889 device_printf(sc->mfi_dev,
1890 "Failed to get logical drive: %d\n", id);
1891 free(ld_info, M_MFIBUF);
1894 if (ld_info->ld_config.params.isSSCD != 1)
1895 mfi_add_ld_complete(cm);
1897 mfi_release_command(cm);
1898 if (ld_info) /* SSCD drives ld_info free here */
1899 free(ld_info, M_MFIBUF);
1905 mfi_add_ld_complete(struct mfi_command *cm)
1907 struct mfi_frame_header *hdr;
1908 struct mfi_ld_info *ld_info;
1909 struct mfi_softc *sc;
1913 hdr = &cm->cm_frame->header;
1914 ld_info = cm->cm_private;
1916 if (sc->cm_map_abort || hdr->cmd_status != MFI_STAT_OK) {
1917 free(ld_info, M_MFIBUF);
1918 wakeup(&sc->mfi_map_sync_cm);
1919 mfi_release_command(cm);
1922 wakeup(&sc->mfi_map_sync_cm);
1923 mfi_release_command(cm);
1925 mtx_unlock(&sc->mfi_io_lock);
1927 if ((child = device_add_child(sc->mfi_dev, "mfid", -1)) == NULL) {
1928 device_printf(sc->mfi_dev, "Failed to add logical disk\n");
1929 free(ld_info, M_MFIBUF);
1931 mtx_lock(&sc->mfi_io_lock);
1935 device_set_ivars(child, ld_info);
1936 device_set_desc(child, "MFI Logical Disk");
1937 bus_generic_attach(sc->mfi_dev);
1939 mtx_lock(&sc->mfi_io_lock);
1942 static int mfi_add_sys_pd(struct mfi_softc *sc, int id)
1944 struct mfi_command *cm;
1945 struct mfi_dcmd_frame *dcmd = NULL;
1946 struct mfi_pd_info *pd_info = NULL;
1947 struct mfi_system_pending *syspd_pend;
1950 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1952 syspd_pend = malloc(sizeof(*syspd_pend), M_MFIBUF, M_NOWAIT | M_ZERO);
1953 if (syspd_pend != NULL) {
1954 syspd_pend->pd_id = id;
1955 TAILQ_INSERT_TAIL(&sc->mfi_syspd_pend_tqh, syspd_pend, pd_link);
1958 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_PD_GET_INFO,
1959 (void **)&pd_info, sizeof(*pd_info));
1961 device_printf(sc->mfi_dev,
1962 "Failed to allocated for MFI_DCMD_PD_GET_INFO %d\n",
1965 free(pd_info, M_MFIBUF);
1968 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1969 dcmd = &cm->cm_frame->dcmd;
1971 dcmd->header.scsi_status = 0;
1972 dcmd->header.pad0 = 0;
1973 if ((error = mfi_mapcmd(sc, cm)) != 0) {
1974 device_printf(sc->mfi_dev,
1975 "Failed to get physical drive info %d\n", id);
1976 free(pd_info, M_MFIBUF);
1977 mfi_release_command(cm);
1980 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1981 BUS_DMASYNC_POSTREAD);
1982 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1983 mfi_add_sys_pd_complete(cm);
1988 mfi_add_sys_pd_complete(struct mfi_command *cm)
1990 struct mfi_frame_header *hdr;
1991 struct mfi_pd_info *pd_info;
1992 struct mfi_softc *sc;
1996 hdr = &cm->cm_frame->header;
1997 pd_info = cm->cm_private;
1999 if (hdr->cmd_status != MFI_STAT_OK) {
2000 free(pd_info, M_MFIBUF);
2001 mfi_release_command(cm);
2004 if (pd_info->fw_state != MFI_PD_STATE_SYSTEM) {
2005 device_printf(sc->mfi_dev, "PD=%x is not SYSTEM PD\n",
2006 pd_info->ref.v.device_id);
2007 free(pd_info, M_MFIBUF);
2008 mfi_release_command(cm);
2011 mfi_release_command(cm);
2013 mtx_unlock(&sc->mfi_io_lock);
2015 if ((child = device_add_child(sc->mfi_dev, "mfisyspd", -1)) == NULL) {
2016 device_printf(sc->mfi_dev, "Failed to add system pd\n");
2017 free(pd_info, M_MFIBUF);
2019 mtx_lock(&sc->mfi_io_lock);
2023 device_set_ivars(child, pd_info);
2024 device_set_desc(child, "MFI System PD");
2025 bus_generic_attach(sc->mfi_dev);
2027 mtx_lock(&sc->mfi_io_lock);
2030 static struct mfi_command *
2031 mfi_bio_command(struct mfi_softc *sc)
2034 struct mfi_command *cm = NULL;
2036 /*reserving two commands to avoid starvation for IOCTL*/
2037 if (sc->mfi_qstat[MFIQ_FREE].q_length < 2) {
2040 if ((bio = mfi_dequeue_bio(sc)) == NULL) {
2043 if ((uintptr_t)bio->bio_driver2 == MFI_LD_IO) {
2044 cm = mfi_build_ldio(sc, bio);
2045 } else if ((uintptr_t) bio->bio_driver2 == MFI_SYS_PD_IO) {
2046 cm = mfi_build_syspdio(sc, bio);
2049 mfi_enqueue_bio(sc, bio);
2054 * mostly copied from cam/scsi/scsi_all.c:scsi_read_write
2058 mfi_build_cdb(int readop, uint8_t byte2, u_int64_t lba, u_int32_t block_count, uint8_t *cdb)
2062 if (((lba & 0x1fffff) == lba)
2063 && ((block_count & 0xff) == block_count)
2065 /* We can fit in a 6 byte cdb */
2066 struct scsi_rw_6 *scsi_cmd;
2068 scsi_cmd = (struct scsi_rw_6 *)cdb;
2069 scsi_cmd->opcode = readop ? READ_6 : WRITE_6;
2070 scsi_ulto3b(lba, scsi_cmd->addr);
2071 scsi_cmd->length = block_count & 0xff;
2072 scsi_cmd->control = 0;
2073 cdb_len = sizeof(*scsi_cmd);
2074 } else if (((block_count & 0xffff) == block_count) && ((lba & 0xffffffff) == lba)) {
2075 /* Need a 10 byte CDB */
2076 struct scsi_rw_10 *scsi_cmd;
2078 scsi_cmd = (struct scsi_rw_10 *)cdb;
2079 scsi_cmd->opcode = readop ? READ_10 : WRITE_10;
2080 scsi_cmd->byte2 = byte2;
2081 scsi_ulto4b(lba, scsi_cmd->addr);
2082 scsi_cmd->reserved = 0;
2083 scsi_ulto2b(block_count, scsi_cmd->length);
2084 scsi_cmd->control = 0;
2085 cdb_len = sizeof(*scsi_cmd);
2086 } else if (((block_count & 0xffffffff) == block_count) &&
2087 ((lba & 0xffffffff) == lba)) {
2088 /* Block count is too big for 10 byte CDB use a 12 byte CDB */
2089 struct scsi_rw_12 *scsi_cmd;
2091 scsi_cmd = (struct scsi_rw_12 *)cdb;
2092 scsi_cmd->opcode = readop ? READ_12 : WRITE_12;
2093 scsi_cmd->byte2 = byte2;
2094 scsi_ulto4b(lba, scsi_cmd->addr);
2095 scsi_cmd->reserved = 0;
2096 scsi_ulto4b(block_count, scsi_cmd->length);
2097 scsi_cmd->control = 0;
2098 cdb_len = sizeof(*scsi_cmd);
2101 * 16 byte CDB. We'll only get here if the LBA is larger
2104 struct scsi_rw_16 *scsi_cmd;
2106 scsi_cmd = (struct scsi_rw_16 *)cdb;
2107 scsi_cmd->opcode = readop ? READ_16 : WRITE_16;
2108 scsi_cmd->byte2 = byte2;
2109 scsi_u64to8b(lba, scsi_cmd->addr);
2110 scsi_cmd->reserved = 0;
2111 scsi_ulto4b(block_count, scsi_cmd->length);
2112 scsi_cmd->control = 0;
2113 cdb_len = sizeof(*scsi_cmd);
2119 extern char *unmapped_buf;
2121 static struct mfi_command *
2122 mfi_build_syspdio(struct mfi_softc *sc, struct bio *bio)
2124 struct mfi_command *cm;
2125 struct mfi_pass_frame *pass;
2126 uint32_t context = 0;
2127 int flags = 0, blkcount = 0, readop;
2130 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2132 if ((cm = mfi_dequeue_free(sc)) == NULL)
2135 /* Zero out the MFI frame */
2136 context = cm->cm_frame->header.context;
2137 bzero(cm->cm_frame, sizeof(union mfi_frame));
2138 cm->cm_frame->header.context = context;
2139 pass = &cm->cm_frame->pass;
2140 bzero(pass->cdb, 16);
2141 pass->header.cmd = MFI_CMD_PD_SCSI_IO;
2142 switch (bio->bio_cmd) {
2144 flags = MFI_CMD_DATAIN | MFI_CMD_BIO;
2148 flags = MFI_CMD_DATAOUT | MFI_CMD_BIO;
2152 /* TODO: what about BIO_DELETE??? */
2153 biofinish(bio, NULL, EOPNOTSUPP);
2154 mfi_enqueue_free(cm);
2158 /* Cheat with the sector length to avoid a non-constant division */
2159 blkcount = howmany(bio->bio_bcount, MFI_SECTOR_LEN);
2160 /* Fill the LBA and Transfer length in CDB */
2161 cdb_len = mfi_build_cdb(readop, 0, bio->bio_pblkno, blkcount,
2163 pass->header.target_id = (uintptr_t)bio->bio_driver1;
2164 pass->header.lun_id = 0;
2165 pass->header.timeout = 0;
2166 pass->header.flags = 0;
2167 pass->header.scsi_status = 0;
2168 pass->header.sense_len = MFI_SENSE_LEN;
2169 pass->header.data_len = bio->bio_bcount;
2170 pass->header.cdb_len = cdb_len;
2171 pass->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2172 pass->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2173 cm->cm_complete = mfi_bio_complete;
2174 cm->cm_private = bio;
2175 cm->cm_data = unmapped_buf;
2176 cm->cm_len = bio->bio_bcount;
2177 cm->cm_sg = &pass->sgl;
2178 cm->cm_total_frame_size = MFI_PASS_FRAME_SIZE;
2179 cm->cm_flags = flags;
2184 static struct mfi_command *
2185 mfi_build_ldio(struct mfi_softc *sc, struct bio *bio)
2187 struct mfi_io_frame *io;
2188 struct mfi_command *cm;
2191 uint32_t context = 0;
2193 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2195 if ((cm = mfi_dequeue_free(sc)) == NULL)
2198 /* Zero out the MFI frame */
2199 context = cm->cm_frame->header.context;
2200 bzero(cm->cm_frame, sizeof(union mfi_frame));
2201 cm->cm_frame->header.context = context;
2202 io = &cm->cm_frame->io;
2203 switch (bio->bio_cmd) {
2205 io->header.cmd = MFI_CMD_LD_READ;
2206 flags = MFI_CMD_DATAIN | MFI_CMD_BIO;
2209 io->header.cmd = MFI_CMD_LD_WRITE;
2210 flags = MFI_CMD_DATAOUT | MFI_CMD_BIO;
2213 /* TODO: what about BIO_DELETE??? */
2214 biofinish(bio, NULL, EOPNOTSUPP);
2215 mfi_enqueue_free(cm);
2219 /* Cheat with the sector length to avoid a non-constant division */
2220 blkcount = howmany(bio->bio_bcount, MFI_SECTOR_LEN);
2221 io->header.target_id = (uintptr_t)bio->bio_driver1;
2222 io->header.timeout = 0;
2223 io->header.flags = 0;
2224 io->header.scsi_status = 0;
2225 io->header.sense_len = MFI_SENSE_LEN;
2226 io->header.data_len = blkcount;
2227 io->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2228 io->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2229 io->lba_hi = (bio->bio_pblkno & 0xffffffff00000000) >> 32;
2230 io->lba_lo = bio->bio_pblkno & 0xffffffff;
2231 cm->cm_complete = mfi_bio_complete;
2232 cm->cm_private = bio;
2233 cm->cm_data = unmapped_buf;
2234 cm->cm_len = bio->bio_bcount;
2235 cm->cm_sg = &io->sgl;
2236 cm->cm_total_frame_size = MFI_IO_FRAME_SIZE;
2237 cm->cm_flags = flags;
2243 mfi_bio_complete(struct mfi_command *cm)
2246 struct mfi_frame_header *hdr;
2247 struct mfi_softc *sc;
2249 bio = cm->cm_private;
2250 hdr = &cm->cm_frame->header;
2253 if ((hdr->cmd_status != MFI_STAT_OK) || (hdr->scsi_status != 0)) {
2254 bio->bio_flags |= BIO_ERROR;
2255 bio->bio_error = EIO;
2256 device_printf(sc->mfi_dev, "I/O error, cmd=%p, status=%#x, "
2257 "scsi_status=%#x\n", cm, hdr->cmd_status, hdr->scsi_status);
2258 mfi_print_sense(cm->cm_sc, cm->cm_sense);
2259 } else if (cm->cm_error != 0) {
2260 bio->bio_flags |= BIO_ERROR;
2261 bio->bio_error = cm->cm_error;
2262 device_printf(sc->mfi_dev, "I/O error, cmd=%p, error=%#x\n",
2266 mfi_release_command(cm);
2267 mfi_disk_complete(bio);
2271 mfi_startio(struct mfi_softc *sc)
2273 struct mfi_command *cm;
2274 struct ccb_hdr *ccbh;
2277 /* Don't bother if we're short on resources */
2278 if (sc->mfi_flags & MFI_FLAGS_QFRZN)
2281 /* Try a command that has already been prepared */
2282 cm = mfi_dequeue_ready(sc);
2285 if ((ccbh = TAILQ_FIRST(&sc->mfi_cam_ccbq)) != NULL)
2286 cm = sc->mfi_cam_start(ccbh);
2289 /* Nope, so look for work on the bioq */
2291 cm = mfi_bio_command(sc);
2293 /* No work available, so exit */
2297 /* Send the command to the controller */
2298 if (mfi_mapcmd(sc, cm) != 0) {
2299 device_printf(sc->mfi_dev, "Failed to startio\n");
2300 mfi_requeue_ready(cm);
2307 mfi_mapcmd(struct mfi_softc *sc, struct mfi_command *cm)
2311 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2313 if ((cm->cm_data != NULL) && (cm->cm_frame->header.cmd != MFI_CMD_STP )) {
2314 polled = (cm->cm_flags & MFI_CMD_POLLED) ? BUS_DMA_NOWAIT : 0;
2315 if (cm->cm_flags & MFI_CMD_CCB)
2316 error = bus_dmamap_load_ccb(sc->mfi_buffer_dmat,
2317 cm->cm_dmamap, cm->cm_data, mfi_data_cb, cm,
2319 else if (cm->cm_flags & MFI_CMD_BIO)
2320 error = bus_dmamap_load_bio(sc->mfi_buffer_dmat,
2321 cm->cm_dmamap, cm->cm_private, mfi_data_cb, cm,
2324 error = bus_dmamap_load(sc->mfi_buffer_dmat,
2325 cm->cm_dmamap, cm->cm_data, cm->cm_len,
2326 mfi_data_cb, cm, polled);
2327 if (error == EINPROGRESS) {
2328 sc->mfi_flags |= MFI_FLAGS_QFRZN;
2332 error = mfi_send_frame(sc, cm);
2339 mfi_data_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
2341 struct mfi_frame_header *hdr;
2342 struct mfi_command *cm;
2344 struct mfi_softc *sc;
2345 int i, j, first, dir;
2346 int sge_size, locked;
2348 cm = (struct mfi_command *)arg;
2350 hdr = &cm->cm_frame->header;
2354 * We need to check if we have the lock as this is async
2355 * callback so even though our caller mfi_mapcmd asserts
2356 * it has the lock, there is no guarantee that hasn't been
2357 * dropped if bus_dmamap_load returned prior to our
2360 if ((locked = mtx_owned(&sc->mfi_io_lock)) == 0)
2361 mtx_lock(&sc->mfi_io_lock);
2364 printf("error %d in callback\n", error);
2365 cm->cm_error = error;
2366 mfi_complete(sc, cm);
2369 /* Use IEEE sgl only for IO's on a SKINNY controller
2370 * For other commands on a SKINNY controller use either
2371 * sg32 or sg64 based on the sizeof(bus_addr_t).
2372 * Also calculate the total frame size based on the type
2375 if (((cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) ||
2376 (cm->cm_frame->header.cmd == MFI_CMD_LD_READ) ||
2377 (cm->cm_frame->header.cmd == MFI_CMD_LD_WRITE)) &&
2378 (sc->mfi_flags & MFI_FLAGS_SKINNY)) {
2379 for (i = 0; i < nsegs; i++) {
2380 sgl->sg_skinny[i].addr = segs[i].ds_addr;
2381 sgl->sg_skinny[i].len = segs[i].ds_len;
2382 sgl->sg_skinny[i].flag = 0;
2384 hdr->flags |= MFI_FRAME_IEEE_SGL | MFI_FRAME_SGL64;
2385 sge_size = sizeof(struct mfi_sg_skinny);
2386 hdr->sg_count = nsegs;
2389 if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
2390 first = cm->cm_stp_len;
2391 if ((sc->mfi_flags & MFI_FLAGS_SG64) == 0) {
2392 sgl->sg32[j].addr = segs[0].ds_addr;
2393 sgl->sg32[j++].len = first;
2395 sgl->sg64[j].addr = segs[0].ds_addr;
2396 sgl->sg64[j++].len = first;
2400 if ((sc->mfi_flags & MFI_FLAGS_SG64) == 0) {
2401 for (i = 0; i < nsegs; i++) {
2402 sgl->sg32[j].addr = segs[i].ds_addr + first;
2403 sgl->sg32[j++].len = segs[i].ds_len - first;
2407 for (i = 0; i < nsegs; i++) {
2408 sgl->sg64[j].addr = segs[i].ds_addr + first;
2409 sgl->sg64[j++].len = segs[i].ds_len - first;
2412 hdr->flags |= MFI_FRAME_SGL64;
2415 sge_size = sc->mfi_sge_size;
2419 if (cm->cm_flags & MFI_CMD_DATAIN) {
2420 dir |= BUS_DMASYNC_PREREAD;
2421 hdr->flags |= MFI_FRAME_DIR_READ;
2423 if (cm->cm_flags & MFI_CMD_DATAOUT) {
2424 dir |= BUS_DMASYNC_PREWRITE;
2425 hdr->flags |= MFI_FRAME_DIR_WRITE;
2427 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir);
2428 cm->cm_flags |= MFI_CMD_MAPPED;
2431 * Instead of calculating the total number of frames in the
2432 * compound frame, it's already assumed that there will be at
2433 * least 1 frame, so don't compensate for the modulo of the
2434 * following division.
2436 cm->cm_total_frame_size += (sc->mfi_sge_size * nsegs);
2437 cm->cm_extra_frames = (cm->cm_total_frame_size - 1) / MFI_FRAME_SIZE;
2439 if ((error = mfi_send_frame(sc, cm)) != 0) {
2440 printf("error %d in callback from mfi_send_frame\n", error);
2441 cm->cm_error = error;
2442 mfi_complete(sc, cm);
2447 /* leave the lock in the state we found it */
2449 mtx_unlock(&sc->mfi_io_lock);
2455 mfi_send_frame(struct mfi_softc *sc, struct mfi_command *cm)
2459 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2461 if (sc->MFA_enabled)
2462 error = mfi_tbolt_send_frame(sc, cm);
2464 error = mfi_std_send_frame(sc, cm);
2466 if (error != 0 && (cm->cm_flags & MFI_ON_MFIQ_BUSY) != 0)
2467 mfi_remove_busy(cm);
2473 mfi_std_send_frame(struct mfi_softc *sc, struct mfi_command *cm)
2475 struct mfi_frame_header *hdr;
2476 int tm = mfi_polled_cmd_timeout * 1000;
2478 hdr = &cm->cm_frame->header;
2480 if ((cm->cm_flags & MFI_CMD_POLLED) == 0) {
2481 cm->cm_timestamp = time_uptime;
2482 mfi_enqueue_busy(cm);
2484 hdr->cmd_status = MFI_STAT_INVALID_STATUS;
2485 hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
2489 * The bus address of the command is aligned on a 64 byte boundary,
2490 * leaving the least 6 bits as zero. For whatever reason, the
2491 * hardware wants the address shifted right by three, leaving just
2492 * 3 zero bits. These three bits are then used as a prefetching
2493 * hint for the hardware to predict how many frames need to be
2494 * fetched across the bus. If a command has more than 8 frames
2495 * then the 3 bits are set to 0x7 and the firmware uses other
2496 * information in the command to determine the total amount to fetch.
2497 * However, FreeBSD doesn't support I/O larger than 128K, so 8 frames
2498 * is enough for both 32bit and 64bit systems.
2500 if (cm->cm_extra_frames > 7)
2501 cm->cm_extra_frames = 7;
2503 sc->mfi_issue_cmd(sc, cm->cm_frame_busaddr, cm->cm_extra_frames);
2505 if ((cm->cm_flags & MFI_CMD_POLLED) == 0)
2508 /* This is a polled command, so busy-wait for it to complete. */
2509 while (hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
2516 if (hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
2517 device_printf(sc->mfi_dev, "Frame %p timed out "
2518 "command 0x%X\n", hdr, cm->cm_frame->dcmd.opcode);
2526 mfi_complete(struct mfi_softc *sc, struct mfi_command *cm)
2529 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2531 if ((cm->cm_flags & MFI_CMD_MAPPED) != 0) {
2533 if ((cm->cm_flags & MFI_CMD_DATAIN) ||
2534 (cm->cm_frame->header.cmd == MFI_CMD_STP))
2535 dir |= BUS_DMASYNC_POSTREAD;
2536 if (cm->cm_flags & MFI_CMD_DATAOUT)
2537 dir |= BUS_DMASYNC_POSTWRITE;
2539 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir);
2540 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2541 cm->cm_flags &= ~MFI_CMD_MAPPED;
2544 cm->cm_flags |= MFI_CMD_COMPLETED;
2546 if (cm->cm_complete != NULL)
2547 cm->cm_complete(cm);
2553 mfi_abort(struct mfi_softc *sc, struct mfi_command **cm_abort)
2555 struct mfi_command *cm;
2556 struct mfi_abort_frame *abort;
2558 uint32_t context = 0;
2560 mtx_lock(&sc->mfi_io_lock);
2561 if ((cm = mfi_dequeue_free(sc)) == NULL) {
2562 mtx_unlock(&sc->mfi_io_lock);
2566 /* Zero out the MFI frame */
2567 context = cm->cm_frame->header.context;
2568 bzero(cm->cm_frame, sizeof(union mfi_frame));
2569 cm->cm_frame->header.context = context;
2571 abort = &cm->cm_frame->abort;
2572 abort->header.cmd = MFI_CMD_ABORT;
2573 abort->header.flags = 0;
2574 abort->header.scsi_status = 0;
2575 abort->abort_context = (*cm_abort)->cm_frame->header.context;
2576 abort->abort_mfi_addr_lo = (uint32_t)(*cm_abort)->cm_frame_busaddr;
2577 abort->abort_mfi_addr_hi =
2578 (uint32_t)((uint64_t)(*cm_abort)->cm_frame_busaddr >> 32);
2580 cm->cm_flags = MFI_CMD_POLLED;
2582 if ((error = mfi_mapcmd(sc, cm)) != 0)
2583 device_printf(sc->mfi_dev, "failed to abort command\n");
2584 mfi_release_command(cm);
2586 mtx_unlock(&sc->mfi_io_lock);
2587 while (i < 5 && *cm_abort != NULL) {
2588 tsleep(cm_abort, 0, "mfiabort",
2592 if (*cm_abort != NULL) {
2593 /* Force a complete if command didn't abort */
2594 mtx_lock(&sc->mfi_io_lock);
2595 (*cm_abort)->cm_complete(*cm_abort);
2596 mtx_unlock(&sc->mfi_io_lock);
2603 mfi_dump_blocks(struct mfi_softc *sc, int id, uint64_t lba, void *virt,
2606 struct mfi_command *cm;
2607 struct mfi_io_frame *io;
2609 uint32_t context = 0;
2611 if ((cm = mfi_dequeue_free(sc)) == NULL)
2614 /* Zero out the MFI frame */
2615 context = cm->cm_frame->header.context;
2616 bzero(cm->cm_frame, sizeof(union mfi_frame));
2617 cm->cm_frame->header.context = context;
2619 io = &cm->cm_frame->io;
2620 io->header.cmd = MFI_CMD_LD_WRITE;
2621 io->header.target_id = id;
2622 io->header.timeout = 0;
2623 io->header.flags = 0;
2624 io->header.scsi_status = 0;
2625 io->header.sense_len = MFI_SENSE_LEN;
2626 io->header.data_len = howmany(len, MFI_SECTOR_LEN);
2627 io->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2628 io->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2629 io->lba_hi = (lba & 0xffffffff00000000) >> 32;
2630 io->lba_lo = lba & 0xffffffff;
2633 cm->cm_sg = &io->sgl;
2634 cm->cm_total_frame_size = MFI_IO_FRAME_SIZE;
2635 cm->cm_flags = MFI_CMD_POLLED | MFI_CMD_DATAOUT;
2637 if ((error = mfi_mapcmd(sc, cm)) != 0)
2638 device_printf(sc->mfi_dev, "failed dump blocks\n");
2639 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
2640 BUS_DMASYNC_POSTWRITE);
2641 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2642 mfi_release_command(cm);
2648 mfi_dump_syspd_blocks(struct mfi_softc *sc, int id, uint64_t lba, void *virt,
2651 struct mfi_command *cm;
2652 struct mfi_pass_frame *pass;
2653 int error, readop, cdb_len;
2656 if ((cm = mfi_dequeue_free(sc)) == NULL)
2659 pass = &cm->cm_frame->pass;
2660 bzero(pass->cdb, 16);
2661 pass->header.cmd = MFI_CMD_PD_SCSI_IO;
2664 blkcount = howmany(len, MFI_SECTOR_LEN);
2665 cdb_len = mfi_build_cdb(readop, 0, lba, blkcount, pass->cdb);
2666 pass->header.target_id = id;
2667 pass->header.timeout = 0;
2668 pass->header.flags = 0;
2669 pass->header.scsi_status = 0;
2670 pass->header.sense_len = MFI_SENSE_LEN;
2671 pass->header.data_len = len;
2672 pass->header.cdb_len = cdb_len;
2673 pass->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2674 pass->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2677 cm->cm_sg = &pass->sgl;
2678 cm->cm_total_frame_size = MFI_PASS_FRAME_SIZE;
2679 cm->cm_flags = MFI_CMD_POLLED | MFI_CMD_DATAOUT | MFI_CMD_SCSI;
2681 if ((error = mfi_mapcmd(sc, cm)) != 0)
2682 device_printf(sc->mfi_dev, "failed dump blocks\n");
2683 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
2684 BUS_DMASYNC_POSTWRITE);
2685 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2686 mfi_release_command(cm);
2692 mfi_open(struct cdev *dev, int flags, int fmt, struct thread *td)
2694 struct mfi_softc *sc;
2699 mtx_lock(&sc->mfi_io_lock);
2700 if (sc->mfi_detaching)
2703 sc->mfi_flags |= MFI_FLAGS_OPEN;
2706 mtx_unlock(&sc->mfi_io_lock);
2712 mfi_close(struct cdev *dev, int flags, int fmt, struct thread *td)
2714 struct mfi_softc *sc;
2715 struct mfi_aen *mfi_aen_entry, *tmp;
2719 mtx_lock(&sc->mfi_io_lock);
2720 sc->mfi_flags &= ~MFI_FLAGS_OPEN;
2722 TAILQ_FOREACH_SAFE(mfi_aen_entry, &sc->mfi_aen_pids, aen_link, tmp) {
2723 if (mfi_aen_entry->p == curproc) {
2724 TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
2726 free(mfi_aen_entry, M_MFIBUF);
2729 mtx_unlock(&sc->mfi_io_lock);
2734 mfi_config_lock(struct mfi_softc *sc, uint32_t opcode)
2738 case MFI_DCMD_LD_DELETE:
2739 case MFI_DCMD_CFG_ADD:
2740 case MFI_DCMD_CFG_CLEAR:
2741 case MFI_DCMD_CFG_FOREIGN_IMPORT:
2742 sx_xlock(&sc->mfi_config_lock);
2750 mfi_config_unlock(struct mfi_softc *sc, int locked)
2754 sx_xunlock(&sc->mfi_config_lock);
2758 * Perform pre-issue checks on commands from userland and possibly veto
2762 mfi_check_command_pre(struct mfi_softc *sc, struct mfi_command *cm)
2764 struct mfi_disk *ld, *ld2;
2766 struct mfi_system_pd *syspd = NULL;
2770 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2772 switch (cm->cm_frame->dcmd.opcode) {
2773 case MFI_DCMD_LD_DELETE:
2774 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2775 if (ld->ld_id == cm->cm_frame->dcmd.mbox[0])
2781 error = mfi_disk_disable(ld);
2783 case MFI_DCMD_CFG_CLEAR:
2784 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2785 error = mfi_disk_disable(ld);
2790 TAILQ_FOREACH(ld2, &sc->mfi_ld_tqh, ld_link) {
2793 mfi_disk_enable(ld2);
2797 case MFI_DCMD_PD_STATE_SET:
2798 mbox = (uint16_t *) cm->cm_frame->dcmd.mbox;
2800 if (mbox[2] == MFI_PD_STATE_UNCONFIGURED_GOOD) {
2801 TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh, pd_link) {
2802 if (syspd->pd_id == syspd_id)
2809 error = mfi_syspd_disable(syspd);
2817 /* Perform post-issue checks on commands from userland. */
2819 mfi_check_command_post(struct mfi_softc *sc, struct mfi_command *cm)
2821 struct mfi_disk *ld, *ldn;
2822 struct mfi_system_pd *syspd = NULL;
2826 switch (cm->cm_frame->dcmd.opcode) {
2827 case MFI_DCMD_LD_DELETE:
2828 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2829 if (ld->ld_id == cm->cm_frame->dcmd.mbox[0])
2832 KASSERT(ld != NULL, ("volume dissappeared"));
2833 if (cm->cm_frame->header.cmd_status == MFI_STAT_OK) {
2834 mtx_unlock(&sc->mfi_io_lock);
2836 device_delete_child(sc->mfi_dev, ld->ld_dev);
2838 mtx_lock(&sc->mfi_io_lock);
2840 mfi_disk_enable(ld);
2842 case MFI_DCMD_CFG_CLEAR:
2843 if (cm->cm_frame->header.cmd_status == MFI_STAT_OK) {
2844 mtx_unlock(&sc->mfi_io_lock);
2846 TAILQ_FOREACH_SAFE(ld, &sc->mfi_ld_tqh, ld_link, ldn) {
2847 device_delete_child(sc->mfi_dev, ld->ld_dev);
2850 mtx_lock(&sc->mfi_io_lock);
2852 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link)
2853 mfi_disk_enable(ld);
2856 case MFI_DCMD_CFG_ADD:
2859 case MFI_DCMD_CFG_FOREIGN_IMPORT:
2862 case MFI_DCMD_PD_STATE_SET:
2863 mbox = (uint16_t *) cm->cm_frame->dcmd.mbox;
2865 if (mbox[2] == MFI_PD_STATE_UNCONFIGURED_GOOD) {
2866 TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh,pd_link) {
2867 if (syspd->pd_id == syspd_id)
2873 /* If the transition fails then enable the syspd again */
2874 if (syspd && cm->cm_frame->header.cmd_status != MFI_STAT_OK)
2875 mfi_syspd_enable(syspd);
2881 mfi_check_for_sscd(struct mfi_softc *sc, struct mfi_command *cm)
2883 struct mfi_config_data *conf_data;
2884 struct mfi_command *ld_cm = NULL;
2885 struct mfi_ld_info *ld_info = NULL;
2886 struct mfi_ld_config *ld;
2890 conf_data = (struct mfi_config_data *)cm->cm_data;
2892 if (cm->cm_frame->dcmd.opcode == MFI_DCMD_CFG_ADD) {
2893 p = (char *)conf_data->array;
2894 p += conf_data->array_size * conf_data->array_count;
2895 ld = (struct mfi_ld_config *)p;
2896 if (ld->params.isSSCD == 1)
2898 } else if (cm->cm_frame->dcmd.opcode == MFI_DCMD_LD_DELETE) {
2899 error = mfi_dcmd_command (sc, &ld_cm, MFI_DCMD_LD_GET_INFO,
2900 (void **)&ld_info, sizeof(*ld_info));
2902 device_printf(sc->mfi_dev, "Failed to allocate"
2903 "MFI_DCMD_LD_GET_INFO %d", error);
2905 free(ld_info, M_MFIBUF);
2908 ld_cm->cm_flags = MFI_CMD_DATAIN;
2909 ld_cm->cm_frame->dcmd.mbox[0]= cm->cm_frame->dcmd.mbox[0];
2910 ld_cm->cm_frame->header.target_id = cm->cm_frame->dcmd.mbox[0];
2911 if (mfi_wait_command(sc, ld_cm) != 0) {
2912 device_printf(sc->mfi_dev, "failed to get log drv\n");
2913 mfi_release_command(ld_cm);
2914 free(ld_info, M_MFIBUF);
2918 if (ld_cm->cm_frame->header.cmd_status != MFI_STAT_OK) {
2919 free(ld_info, M_MFIBUF);
2920 mfi_release_command(ld_cm);
2924 ld_info = (struct mfi_ld_info *)ld_cm->cm_private;
2926 if (ld_info->ld_config.params.isSSCD == 1)
2929 mfi_release_command(ld_cm);
2930 free(ld_info, M_MFIBUF);
2936 mfi_stp_cmd(struct mfi_softc *sc, struct mfi_command *cm,caddr_t arg)
2939 struct mfi_ioc_packet *ioc;
2940 ioc = (struct mfi_ioc_packet *)arg;
2941 int sge_size, error;
2942 struct megasas_sge *kern_sge;
2944 memset(sc->kbuff_arr, 0, sizeof(sc->kbuff_arr));
2945 kern_sge =(struct megasas_sge *) ((uintptr_t)cm->cm_frame + ioc->mfi_sgl_off);
2946 cm->cm_frame->header.sg_count = ioc->mfi_sge_count;
2948 if (sizeof(bus_addr_t) == 8) {
2949 cm->cm_frame->header.flags |= MFI_FRAME_SGL64;
2950 cm->cm_extra_frames = 2;
2951 sge_size = sizeof(struct mfi_sg64);
2953 cm->cm_extra_frames = (cm->cm_total_frame_size - 1) / MFI_FRAME_SIZE;
2954 sge_size = sizeof(struct mfi_sg32);
2957 cm->cm_total_frame_size += (sge_size * ioc->mfi_sge_count);
2958 for (i = 0; i < ioc->mfi_sge_count; i++) {
2959 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
2960 1, 0, /* algnmnt, boundary */
2961 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
2962 BUS_SPACE_MAXADDR, /* highaddr */
2963 NULL, NULL, /* filter, filterarg */
2964 ioc->mfi_sgl[i].iov_len,/* maxsize */
2966 ioc->mfi_sgl[i].iov_len,/* maxsegsize */
2967 BUS_DMA_ALLOCNOW, /* flags */
2968 NULL, NULL, /* lockfunc, lockarg */
2969 &sc->mfi_kbuff_arr_dmat[i])) {
2970 device_printf(sc->mfi_dev,
2971 "Cannot allocate mfi_kbuff_arr_dmat tag\n");
2975 if (bus_dmamem_alloc(sc->mfi_kbuff_arr_dmat[i],
2976 (void **)&sc->kbuff_arr[i], BUS_DMA_NOWAIT,
2977 &sc->mfi_kbuff_arr_dmamap[i])) {
2978 device_printf(sc->mfi_dev,
2979 "Cannot allocate mfi_kbuff_arr_dmamap memory\n");
2983 bus_dmamap_load(sc->mfi_kbuff_arr_dmat[i],
2984 sc->mfi_kbuff_arr_dmamap[i], sc->kbuff_arr[i],
2985 ioc->mfi_sgl[i].iov_len, mfi_addr_cb,
2986 &sc->mfi_kbuff_arr_busaddr[i], 0);
2988 if (!sc->kbuff_arr[i]) {
2989 device_printf(sc->mfi_dev,
2990 "Could not allocate memory for kbuff_arr info\n");
2993 kern_sge[i].phys_addr = sc->mfi_kbuff_arr_busaddr[i];
2994 kern_sge[i].length = ioc->mfi_sgl[i].iov_len;
2996 if (sizeof(bus_addr_t) == 8) {
2997 cm->cm_frame->stp.sgl.sg64[i].addr =
2998 kern_sge[i].phys_addr;
2999 cm->cm_frame->stp.sgl.sg64[i].len =
3000 ioc->mfi_sgl[i].iov_len;
3002 cm->cm_frame->stp.sgl.sg32[i].addr =
3003 kern_sge[i].phys_addr;
3004 cm->cm_frame->stp.sgl.sg32[i].len =
3005 ioc->mfi_sgl[i].iov_len;
3008 error = copyin(ioc->mfi_sgl[i].iov_base,
3010 ioc->mfi_sgl[i].iov_len);
3012 device_printf(sc->mfi_dev, "Copy in failed\n");
3017 cm->cm_flags |=MFI_CMD_MAPPED;
3022 mfi_user_command(struct mfi_softc *sc, struct mfi_ioc_passthru *ioc)
3024 struct mfi_command *cm;
3025 struct mfi_dcmd_frame *dcmd;
3026 void *ioc_buf = NULL;
3028 int error = 0, locked;
3030 if (ioc->buf_size > 0) {
3031 if (ioc->buf_size > 1024 * 1024)
3033 ioc_buf = malloc(ioc->buf_size, M_MFIBUF, M_WAITOK);
3034 error = copyin(ioc->buf, ioc_buf, ioc->buf_size);
3036 device_printf(sc->mfi_dev, "failed to copyin\n");
3037 free(ioc_buf, M_MFIBUF);
3042 locked = mfi_config_lock(sc, ioc->ioc_frame.opcode);
3044 mtx_lock(&sc->mfi_io_lock);
3045 while ((cm = mfi_dequeue_free(sc)) == NULL)
3046 msleep(mfi_user_command, &sc->mfi_io_lock, 0, "mfiioc", hz);
3048 /* Save context for later */
3049 context = cm->cm_frame->header.context;
3051 dcmd = &cm->cm_frame->dcmd;
3052 bcopy(&ioc->ioc_frame, dcmd, sizeof(struct mfi_dcmd_frame));
3054 cm->cm_sg = &dcmd->sgl;
3055 cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
3056 cm->cm_data = ioc_buf;
3057 cm->cm_len = ioc->buf_size;
3059 /* restore context */
3060 cm->cm_frame->header.context = context;
3062 /* Cheat since we don't know if we're writing or reading */
3063 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_DATAOUT;
3065 error = mfi_check_command_pre(sc, cm);
3069 error = mfi_wait_command(sc, cm);
3071 device_printf(sc->mfi_dev, "ioctl failed %d\n", error);
3074 bcopy(dcmd, &ioc->ioc_frame, sizeof(struct mfi_dcmd_frame));
3075 mfi_check_command_post(sc, cm);
3077 mfi_release_command(cm);
3078 mtx_unlock(&sc->mfi_io_lock);
3079 mfi_config_unlock(sc, locked);
3080 if (ioc->buf_size > 0)
3081 error = copyout(ioc_buf, ioc->buf, ioc->buf_size);
3083 free(ioc_buf, M_MFIBUF);
3087 #define PTRIN(p) ((void *)(uintptr_t)(p))
3090 mfi_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td)
3092 struct mfi_softc *sc;
3093 union mfi_statrequest *ms;
3094 struct mfi_ioc_packet *ioc;
3095 #ifdef COMPAT_FREEBSD32
3096 struct mfi_ioc_packet32 *ioc32;
3098 struct mfi_ioc_aen *aen;
3099 struct mfi_command *cm = NULL;
3100 uint32_t context = 0;
3101 union mfi_sense_ptr sense_ptr;
3102 uint8_t *data = NULL, *temp, *addr, skip_pre_post = 0;
3105 struct mfi_ioc_passthru *iop = (struct mfi_ioc_passthru *)arg;
3106 #ifdef COMPAT_FREEBSD32
3107 struct mfi_ioc_passthru32 *iop32 = (struct mfi_ioc_passthru32 *)arg;
3108 struct mfi_ioc_passthru iop_swab;
3118 if (sc->hw_crit_error)
3121 if (sc->issuepend_done == 0)
3126 ms = (union mfi_statrequest *)arg;
3127 switch (ms->ms_item) {
3132 bcopy(&sc->mfi_qstat[ms->ms_item], &ms->ms_qstat,
3133 sizeof(struct mfi_qstat));
3140 case MFIIO_QUERY_DISK:
3142 struct mfi_query_disk *qd;
3143 struct mfi_disk *ld;
3145 qd = (struct mfi_query_disk *)arg;
3146 mtx_lock(&sc->mfi_io_lock);
3147 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
3148 if (ld->ld_id == qd->array_id)
3153 mtx_unlock(&sc->mfi_io_lock);
3157 if (ld->ld_flags & MFI_DISK_FLAGS_OPEN)
3159 bzero(qd->devname, SPECNAMELEN + 1);
3160 snprintf(qd->devname, SPECNAMELEN, "mfid%d", ld->ld_unit);
3161 mtx_unlock(&sc->mfi_io_lock);
3165 #ifdef COMPAT_FREEBSD32
3169 devclass_t devclass;
3170 ioc = (struct mfi_ioc_packet *)arg;
3173 adapter = ioc->mfi_adapter_no;
3174 if (device_get_unit(sc->mfi_dev) == 0 && adapter != 0) {
3175 devclass = devclass_find("mfi");
3176 sc = devclass_get_softc(devclass, adapter);
3178 mtx_lock(&sc->mfi_io_lock);
3179 if ((cm = mfi_dequeue_free(sc)) == NULL) {
3180 mtx_unlock(&sc->mfi_io_lock);
3183 mtx_unlock(&sc->mfi_io_lock);
3187 * save off original context since copying from user
3188 * will clobber some data
3190 context = cm->cm_frame->header.context;
3191 cm->cm_frame->header.context = cm->cm_index;
3193 bcopy(ioc->mfi_frame.raw, cm->cm_frame,
3194 2 * MEGAMFI_FRAME_SIZE);
3195 cm->cm_total_frame_size = (sizeof(union mfi_sgl)
3196 * ioc->mfi_sge_count) + ioc->mfi_sgl_off;
3197 cm->cm_frame->header.scsi_status = 0;
3198 cm->cm_frame->header.pad0 = 0;
3199 if (ioc->mfi_sge_count) {
3201 (union mfi_sgl *)&cm->cm_frame->bytes[ioc->mfi_sgl_off];
3205 if (cm->cm_frame->header.flags & MFI_FRAME_DATAIN)
3206 cm->cm_flags |= MFI_CMD_DATAIN;
3207 if (cm->cm_frame->header.flags & MFI_FRAME_DATAOUT)
3208 cm->cm_flags |= MFI_CMD_DATAOUT;
3209 /* Legacy app shim */
3210 if (cm->cm_flags == 0)
3211 cm->cm_flags |= MFI_CMD_DATAIN | MFI_CMD_DATAOUT;
3212 cm->cm_len = cm->cm_frame->header.data_len;
3213 if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
3214 #ifdef COMPAT_FREEBSD32
3215 if (cmd == MFI_CMD) {
3218 cm->cm_stp_len = ioc->mfi_sgl[0].iov_len;
3219 #ifdef COMPAT_FREEBSD32
3221 /* 32bit on 64bit */
3222 ioc32 = (struct mfi_ioc_packet32 *)ioc;
3223 cm->cm_stp_len = ioc32->mfi_sgl[0].iov_len;
3226 cm->cm_len += cm->cm_stp_len;
3229 (cm->cm_flags & (MFI_CMD_DATAIN | MFI_CMD_DATAOUT))) {
3230 cm->cm_data = data = malloc(cm->cm_len, M_MFIBUF,
3236 /* restore header context */
3237 cm->cm_frame->header.context = context;
3239 if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
3240 res = mfi_stp_cmd(sc, cm, arg);
3245 if ((cm->cm_flags & MFI_CMD_DATAOUT) ||
3246 (cm->cm_frame->header.cmd == MFI_CMD_STP)) {
3247 for (i = 0; i < ioc->mfi_sge_count; i++) {
3248 #ifdef COMPAT_FREEBSD32
3249 if (cmd == MFI_CMD) {
3252 addr = ioc->mfi_sgl[i].iov_base;
3253 len = ioc->mfi_sgl[i].iov_len;
3254 #ifdef COMPAT_FREEBSD32
3256 /* 32bit on 64bit */
3257 ioc32 = (struct mfi_ioc_packet32 *)ioc;
3258 addr = PTRIN(ioc32->mfi_sgl[i].iov_base);
3259 len = ioc32->mfi_sgl[i].iov_len;
3262 error = copyin(addr, temp, len);
3264 device_printf(sc->mfi_dev,
3265 "Copy in failed\n");
3273 if (cm->cm_frame->header.cmd == MFI_CMD_DCMD)
3274 locked = mfi_config_lock(sc,
3275 cm->cm_frame->dcmd.opcode);
3277 if (cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) {
3278 cm->cm_frame->pass.sense_addr_lo =
3279 (uint32_t)cm->cm_sense_busaddr;
3280 cm->cm_frame->pass.sense_addr_hi =
3281 (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
3283 mtx_lock(&sc->mfi_io_lock);
3284 skip_pre_post = mfi_check_for_sscd (sc, cm);
3285 if (!skip_pre_post) {
3286 error = mfi_check_command_pre(sc, cm);
3288 mtx_unlock(&sc->mfi_io_lock);
3292 if ((error = mfi_wait_command(sc, cm)) != 0) {
3293 device_printf(sc->mfi_dev,
3294 "Controller polled failed\n");
3295 mtx_unlock(&sc->mfi_io_lock);
3298 if (!skip_pre_post) {
3299 mfi_check_command_post(sc, cm);
3301 mtx_unlock(&sc->mfi_io_lock);
3303 if (cm->cm_frame->header.cmd != MFI_CMD_STP) {
3305 if ((cm->cm_flags & MFI_CMD_DATAIN) ||
3306 (cm->cm_frame->header.cmd == MFI_CMD_STP)) {
3307 for (i = 0; i < ioc->mfi_sge_count; i++) {
3308 #ifdef COMPAT_FREEBSD32
3309 if (cmd == MFI_CMD) {
3312 addr = ioc->mfi_sgl[i].iov_base;
3313 len = ioc->mfi_sgl[i].iov_len;
3314 #ifdef COMPAT_FREEBSD32
3316 /* 32bit on 64bit */
3317 ioc32 = (struct mfi_ioc_packet32 *)ioc;
3318 addr = PTRIN(ioc32->mfi_sgl[i].iov_base);
3319 len = ioc32->mfi_sgl[i].iov_len;
3322 error = copyout(temp, addr, len);
3324 device_printf(sc->mfi_dev,
3325 "Copy out failed\n");
3333 if (ioc->mfi_sense_len) {
3334 /* get user-space sense ptr then copy out sense */
3335 bcopy(&ioc->mfi_frame.raw[ioc->mfi_sense_off],
3336 &sense_ptr.sense_ptr_data[0],
3337 sizeof(sense_ptr.sense_ptr_data));
3338 #ifdef COMPAT_FREEBSD32
3339 if (cmd != MFI_CMD) {
3341 * not 64bit native so zero out any address
3343 sense_ptr.addr.high = 0;
3346 error = copyout(cm->cm_sense, sense_ptr.user_space,
3347 ioc->mfi_sense_len);
3349 device_printf(sc->mfi_dev,
3350 "Copy out failed\n");
3355 ioc->mfi_frame.hdr.cmd_status = cm->cm_frame->header.cmd_status;
3357 mfi_config_unlock(sc, locked);
3359 free(data, M_MFIBUF);
3360 if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
3361 for (i = 0; i < 2; i++) {
3362 if (sc->kbuff_arr[i]) {
3363 if (sc->mfi_kbuff_arr_busaddr[i] != 0)
3365 sc->mfi_kbuff_arr_dmat[i],
3366 sc->mfi_kbuff_arr_dmamap[i]
3368 if (sc->kbuff_arr[i] != NULL)
3370 sc->mfi_kbuff_arr_dmat[i],
3372 sc->mfi_kbuff_arr_dmamap[i]
3374 if (sc->mfi_kbuff_arr_dmat[i] != NULL)
3375 bus_dma_tag_destroy(
3376 sc->mfi_kbuff_arr_dmat[i]);
3381 mtx_lock(&sc->mfi_io_lock);
3382 mfi_release_command(cm);
3383 mtx_unlock(&sc->mfi_io_lock);
3389 aen = (struct mfi_ioc_aen *)arg;
3390 mtx_lock(&sc->mfi_io_lock);
3391 error = mfi_aen_register(sc, aen->aen_seq_num,
3392 aen->aen_class_locale);
3393 mtx_unlock(&sc->mfi_io_lock);
3396 case MFI_LINUX_CMD_2: /* Firmware Linux ioctl shim */
3398 devclass_t devclass;
3399 struct mfi_linux_ioc_packet l_ioc;
3402 devclass = devclass_find("mfi");
3403 if (devclass == NULL)
3406 error = copyin(arg, &l_ioc, sizeof(l_ioc));
3409 adapter = l_ioc.lioc_adapter_no;
3410 sc = devclass_get_softc(devclass, adapter);
3413 return (mfi_linux_ioctl_int(sc->mfi_cdev,
3414 cmd, arg, flag, td));
3417 case MFI_LINUX_SET_AEN_2: /* AEN Linux ioctl shim */
3419 devclass_t devclass;
3420 struct mfi_linux_ioc_aen l_aen;
3423 devclass = devclass_find("mfi");
3424 if (devclass == NULL)
3427 error = copyin(arg, &l_aen, sizeof(l_aen));
3430 adapter = l_aen.laen_adapter_no;
3431 sc = devclass_get_softc(devclass, adapter);
3434 return (mfi_linux_ioctl_int(sc->mfi_cdev,
3435 cmd, arg, flag, td));
3438 #ifdef COMPAT_FREEBSD32
3439 case MFIIO_PASSTHRU32:
3440 if (!SV_CURPROC_FLAG(SV_ILP32)) {
3444 iop_swab.ioc_frame = iop32->ioc_frame;
3445 iop_swab.buf_size = iop32->buf_size;
3446 iop_swab.buf = PTRIN(iop32->buf);
3450 case MFIIO_PASSTHRU:
3451 error = mfi_user_command(sc, iop);
3452 #ifdef COMPAT_FREEBSD32
3453 if (cmd == MFIIO_PASSTHRU32)
3454 iop32->ioc_frame = iop_swab.ioc_frame;
3458 device_printf(sc->mfi_dev, "IOCTL 0x%lx not handled\n", cmd);
3467 mfi_linux_ioctl_int(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td)
3469 struct mfi_softc *sc;
3470 struct mfi_linux_ioc_packet l_ioc;
3471 struct mfi_linux_ioc_aen l_aen;
3472 struct mfi_command *cm = NULL;
3473 struct mfi_aen *mfi_aen_entry;
3474 union mfi_sense_ptr sense_ptr;
3475 uint32_t context = 0;
3476 uint8_t *data = NULL, *temp;
3483 case MFI_LINUX_CMD_2: /* Firmware Linux ioctl shim */
3484 error = copyin(arg, &l_ioc, sizeof(l_ioc));
3488 if (l_ioc.lioc_sge_count > MAX_LINUX_IOCTL_SGE) {
3492 mtx_lock(&sc->mfi_io_lock);
3493 if ((cm = mfi_dequeue_free(sc)) == NULL) {
3494 mtx_unlock(&sc->mfi_io_lock);
3497 mtx_unlock(&sc->mfi_io_lock);
3501 * save off original context since copying from user
3502 * will clobber some data
3504 context = cm->cm_frame->header.context;
3506 bcopy(l_ioc.lioc_frame.raw, cm->cm_frame,
3507 2 * MFI_DCMD_FRAME_SIZE); /* this isn't quite right */
3508 cm->cm_total_frame_size = (sizeof(union mfi_sgl)
3509 * l_ioc.lioc_sge_count) + l_ioc.lioc_sgl_off;
3510 cm->cm_frame->header.scsi_status = 0;
3511 cm->cm_frame->header.pad0 = 0;
3512 if (l_ioc.lioc_sge_count)
3514 (union mfi_sgl *)&cm->cm_frame->bytes[l_ioc.lioc_sgl_off];
3516 if (cm->cm_frame->header.flags & MFI_FRAME_DATAIN)
3517 cm->cm_flags |= MFI_CMD_DATAIN;
3518 if (cm->cm_frame->header.flags & MFI_FRAME_DATAOUT)
3519 cm->cm_flags |= MFI_CMD_DATAOUT;
3520 cm->cm_len = cm->cm_frame->header.data_len;
3522 (cm->cm_flags & (MFI_CMD_DATAIN | MFI_CMD_DATAOUT))) {
3523 cm->cm_data = data = malloc(cm->cm_len, M_MFIBUF,
3529 /* restore header context */
3530 cm->cm_frame->header.context = context;
3533 if (cm->cm_flags & MFI_CMD_DATAOUT) {
3534 for (i = 0; i < l_ioc.lioc_sge_count; i++) {
3535 error = copyin(PTRIN(l_ioc.lioc_sgl[i].iov_base),
3537 l_ioc.lioc_sgl[i].iov_len);
3539 device_printf(sc->mfi_dev,
3540 "Copy in failed\n");
3543 temp = &temp[l_ioc.lioc_sgl[i].iov_len];
3547 if (cm->cm_frame->header.cmd == MFI_CMD_DCMD)
3548 locked = mfi_config_lock(sc, cm->cm_frame->dcmd.opcode);
3550 if (cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) {
3551 cm->cm_frame->pass.sense_addr_lo =
3552 (uint32_t)cm->cm_sense_busaddr;
3553 cm->cm_frame->pass.sense_addr_hi =
3554 (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
3557 mtx_lock(&sc->mfi_io_lock);
3558 error = mfi_check_command_pre(sc, cm);
3560 mtx_unlock(&sc->mfi_io_lock);
3564 if ((error = mfi_wait_command(sc, cm)) != 0) {
3565 device_printf(sc->mfi_dev,
3566 "Controller polled failed\n");
3567 mtx_unlock(&sc->mfi_io_lock);
3571 mfi_check_command_post(sc, cm);
3572 mtx_unlock(&sc->mfi_io_lock);
3575 if (cm->cm_flags & MFI_CMD_DATAIN) {
3576 for (i = 0; i < l_ioc.lioc_sge_count; i++) {
3577 error = copyout(temp,
3578 PTRIN(l_ioc.lioc_sgl[i].iov_base),
3579 l_ioc.lioc_sgl[i].iov_len);
3581 device_printf(sc->mfi_dev,
3582 "Copy out failed\n");
3585 temp = &temp[l_ioc.lioc_sgl[i].iov_len];
3589 if (l_ioc.lioc_sense_len) {
3590 /* get user-space sense ptr then copy out sense */
3591 bcopy(&((struct mfi_linux_ioc_packet*)arg)
3592 ->lioc_frame.raw[l_ioc.lioc_sense_off],
3593 &sense_ptr.sense_ptr_data[0],
3594 sizeof(sense_ptr.sense_ptr_data));
3597 * only 32bit Linux support so zero out any
3598 * address over 32bit
3600 sense_ptr.addr.high = 0;
3602 error = copyout(cm->cm_sense, sense_ptr.user_space,
3603 l_ioc.lioc_sense_len);
3605 device_printf(sc->mfi_dev,
3606 "Copy out failed\n");
3611 error = copyout(&cm->cm_frame->header.cmd_status,
3612 &((struct mfi_linux_ioc_packet*)arg)
3613 ->lioc_frame.hdr.cmd_status,
3616 device_printf(sc->mfi_dev,
3617 "Copy out failed\n");
3622 mfi_config_unlock(sc, locked);
3624 free(data, M_MFIBUF);
3626 mtx_lock(&sc->mfi_io_lock);
3627 mfi_release_command(cm);
3628 mtx_unlock(&sc->mfi_io_lock);
3632 case MFI_LINUX_SET_AEN_2: /* AEN Linux ioctl shim */
3633 error = copyin(arg, &l_aen, sizeof(l_aen));
3636 printf("AEN IMPLEMENTED for pid %d\n", curproc->p_pid);
3637 mfi_aen_entry = malloc(sizeof(struct mfi_aen), M_MFIBUF,
3639 mtx_lock(&sc->mfi_io_lock);
3640 if (mfi_aen_entry != NULL) {
3641 mfi_aen_entry->p = curproc;
3642 TAILQ_INSERT_TAIL(&sc->mfi_aen_pids, mfi_aen_entry,
3645 error = mfi_aen_register(sc, l_aen.laen_seq_num,
3646 l_aen.laen_class_locale);
3649 TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
3651 free(mfi_aen_entry, M_MFIBUF);
3653 mtx_unlock(&sc->mfi_io_lock);
3657 device_printf(sc->mfi_dev, "IOCTL 0x%lx not handled\n", cmd);
3666 mfi_poll(struct cdev *dev, int poll_events, struct thread *td)
3668 struct mfi_softc *sc;
3673 if (poll_events & (POLLIN | POLLRDNORM)) {
3674 if (sc->mfi_aen_triggered != 0) {
3675 revents |= poll_events & (POLLIN | POLLRDNORM);
3676 sc->mfi_aen_triggered = 0;
3678 if (sc->mfi_aen_triggered == 0 && sc->mfi_aen_cm == NULL) {
3684 if (poll_events & (POLLIN | POLLRDNORM)) {
3685 sc->mfi_poll_waiting = 1;
3686 selrecord(td, &sc->mfi_select);
3696 struct mfi_softc *sc;
3697 struct mfi_command *cm;
3703 dc = devclass_find("mfi");
3705 printf("No mfi dev class\n");
3709 for (i = 0; ; i++) {
3710 sc = devclass_get_softc(dc, i);
3713 device_printf(sc->mfi_dev, "Dumping\n\n");
3715 deadline = time_uptime - mfi_cmd_timeout;
3716 mtx_lock(&sc->mfi_io_lock);
3717 TAILQ_FOREACH(cm, &sc->mfi_busy, cm_link) {
3718 if (cm->cm_timestamp <= deadline) {
3719 device_printf(sc->mfi_dev,
3720 "COMMAND %p TIMEOUT AFTER %d SECONDS\n",
3721 cm, (int)(time_uptime - cm->cm_timestamp));
3732 mtx_unlock(&sc->mfi_io_lock);
3739 mfi_timeout(void *data)
3741 struct mfi_softc *sc = (struct mfi_softc *)data;
3742 struct mfi_command *cm, *tmp;
3746 deadline = time_uptime - mfi_cmd_timeout;
3747 if (sc->adpreset == 0) {
3748 if (!mfi_tbolt_reset(sc)) {
3749 callout_reset(&sc->mfi_watchdog_callout,
3750 mfi_cmd_timeout * hz, mfi_timeout, sc);
3754 mtx_lock(&sc->mfi_io_lock);
3755 TAILQ_FOREACH_SAFE(cm, &sc->mfi_busy, cm_link, tmp) {
3756 if (sc->mfi_aen_cm == cm || sc->mfi_map_sync_cm == cm)
3758 if (cm->cm_timestamp <= deadline) {
3759 if (sc->adpreset != 0 && sc->issuepend_done == 0) {
3760 cm->cm_timestamp = time_uptime;
3762 device_printf(sc->mfi_dev,
3763 "COMMAND %p TIMEOUT AFTER %d SECONDS\n",
3764 cm, (int)(time_uptime - cm->cm_timestamp)
3767 MFI_VALIDATE_CMD(sc, cm);
3769 * While commands can get stuck forever we do
3770 * not fail them as there is no way to tell if
3771 * the controller has actually processed them
3774 * In addition its very likely that force
3775 * failing a command here would cause a panic
3788 mtx_unlock(&sc->mfi_io_lock);
3790 callout_reset(&sc->mfi_watchdog_callout, mfi_cmd_timeout * hz,