2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD AND BSD-2-Clause
4 * Copyright (c) 2006 IronPort Systems
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * Copyright (c) 2007 LSI Corp.
30 * Copyright (c) 2007 Rajesh Prabhakaran.
31 * All rights reserved.
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions
36 * 1. Redistributions of source code must retain the above copyright
37 * notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce the above copyright
39 * notice, this list of conditions and the following disclaimer in the
40 * documentation and/or other materials provided with the distribution.
42 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
43 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
44 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
45 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
46 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
47 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
48 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
49 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
50 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
51 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
55 #include <sys/cdefs.h>
56 __FBSDID("$FreeBSD$");
60 #include <sys/param.h>
61 #include <sys/systm.h>
62 #include <sys/sysctl.h>
63 #include <sys/malloc.h>
64 #include <sys/kernel.h>
66 #include <sys/selinfo.h>
69 #include <sys/eventhandler.h>
72 #include <sys/ioccom.h>
75 #include <sys/signalvar.h>
76 #include <sys/sysent.h>
77 #include <sys/taskqueue.h>
79 #include <machine/bus.h>
80 #include <machine/resource.h>
82 #include <dev/mfi/mfireg.h>
83 #include <dev/mfi/mfi_ioctl.h>
84 #include <dev/mfi/mfivar.h>
85 #include <sys/interrupt.h>
86 #include <sys/priority.h>
88 static int mfi_alloc_commands(struct mfi_softc *);
89 static int mfi_comms_init(struct mfi_softc *);
90 static int mfi_get_controller_info(struct mfi_softc *);
91 static int mfi_get_log_state(struct mfi_softc *,
92 struct mfi_evt_log_state **);
93 static int mfi_parse_entries(struct mfi_softc *, int, int);
94 static void mfi_data_cb(void *, bus_dma_segment_t *, int, int);
95 static void mfi_startup(void *arg);
96 static void mfi_intr(void *arg);
97 static void mfi_ldprobe(struct mfi_softc *sc);
98 static void mfi_syspdprobe(struct mfi_softc *sc);
99 static void mfi_handle_evt(void *context, int pending);
100 static int mfi_aen_register(struct mfi_softc *sc, int seq, int locale);
101 static void mfi_aen_complete(struct mfi_command *);
102 static int mfi_add_ld(struct mfi_softc *sc, int);
103 static void mfi_add_ld_complete(struct mfi_command *);
104 static int mfi_add_sys_pd(struct mfi_softc *sc, int);
105 static void mfi_add_sys_pd_complete(struct mfi_command *);
106 static struct mfi_command * mfi_bio_command(struct mfi_softc *);
107 static void mfi_bio_complete(struct mfi_command *);
108 static struct mfi_command *mfi_build_ldio(struct mfi_softc *,struct bio*);
109 static struct mfi_command *mfi_build_syspdio(struct mfi_softc *,struct bio*);
110 static int mfi_send_frame(struct mfi_softc *, struct mfi_command *);
111 static int mfi_std_send_frame(struct mfi_softc *, struct mfi_command *);
112 static int mfi_abort(struct mfi_softc *, struct mfi_command **);
113 static int mfi_linux_ioctl_int(struct cdev *, u_long, caddr_t, int, struct thread *);
114 static void mfi_timeout(void *);
115 static int mfi_user_command(struct mfi_softc *,
116 struct mfi_ioc_passthru *);
117 static void mfi_enable_intr_xscale(struct mfi_softc *sc);
118 static void mfi_enable_intr_ppc(struct mfi_softc *sc);
119 static int32_t mfi_read_fw_status_xscale(struct mfi_softc *sc);
120 static int32_t mfi_read_fw_status_ppc(struct mfi_softc *sc);
121 static int mfi_check_clear_intr_xscale(struct mfi_softc *sc);
122 static int mfi_check_clear_intr_ppc(struct mfi_softc *sc);
123 static void mfi_issue_cmd_xscale(struct mfi_softc *sc, bus_addr_t bus_add,
125 static void mfi_issue_cmd_ppc(struct mfi_softc *sc, bus_addr_t bus_add,
127 static int mfi_config_lock(struct mfi_softc *sc, uint32_t opcode);
128 static void mfi_config_unlock(struct mfi_softc *sc, int locked);
129 static int mfi_check_command_pre(struct mfi_softc *sc, struct mfi_command *cm);
130 static void mfi_check_command_post(struct mfi_softc *sc, struct mfi_command *cm);
131 static int mfi_check_for_sscd(struct mfi_softc *sc, struct mfi_command *cm);
133 SYSCTL_NODE(_hw, OID_AUTO, mfi, CTLFLAG_RD, 0, "MFI driver parameters");
134 static int mfi_event_locale = MFI_EVT_LOCALE_ALL;
135 SYSCTL_INT(_hw_mfi, OID_AUTO, event_locale, CTLFLAG_RWTUN, &mfi_event_locale,
136 0, "event message locale");
138 static int mfi_event_class = MFI_EVT_CLASS_INFO;
139 SYSCTL_INT(_hw_mfi, OID_AUTO, event_class, CTLFLAG_RWTUN, &mfi_event_class,
140 0, "event message class");
142 static int mfi_max_cmds = 128;
143 SYSCTL_INT(_hw_mfi, OID_AUTO, max_cmds, CTLFLAG_RDTUN, &mfi_max_cmds,
144 0, "Max commands limit (-1 = controller limit)");
146 static int mfi_detect_jbod_change = 1;
147 SYSCTL_INT(_hw_mfi, OID_AUTO, detect_jbod_change, CTLFLAG_RWTUN,
148 &mfi_detect_jbod_change, 0, "Detect a change to a JBOD");
150 int mfi_polled_cmd_timeout = MFI_POLL_TIMEOUT_SECS;
151 SYSCTL_INT(_hw_mfi, OID_AUTO, polled_cmd_timeout, CTLFLAG_RWTUN,
152 &mfi_polled_cmd_timeout, 0,
153 "Polled command timeout - used for firmware flash etc (in seconds)");
155 static int mfi_cmd_timeout = MFI_CMD_TIMEOUT;
156 SYSCTL_INT(_hw_mfi, OID_AUTO, cmd_timeout, CTLFLAG_RWTUN, &mfi_cmd_timeout,
157 0, "Command timeout (in seconds)");
159 /* Management interface */
160 static d_open_t mfi_open;
161 static d_close_t mfi_close;
162 static d_ioctl_t mfi_ioctl;
163 static d_poll_t mfi_poll;
165 static struct cdevsw mfi_cdevsw = {
166 .d_version = D_VERSION,
169 .d_close = mfi_close,
170 .d_ioctl = mfi_ioctl,
175 MALLOC_DEFINE(M_MFIBUF, "mfibuf", "Buffers for the MFI driver");
177 #define MFI_INQ_LENGTH SHORT_INQUIRY_LENGTH
178 struct mfi_skinny_dma_info mfi_skinny;
181 mfi_enable_intr_xscale(struct mfi_softc *sc)
183 MFI_WRITE4(sc, MFI_OMSK, 0x01);
187 mfi_enable_intr_ppc(struct mfi_softc *sc)
189 if (sc->mfi_flags & MFI_FLAGS_1078) {
190 MFI_WRITE4(sc, MFI_ODCR0, 0xFFFFFFFF);
191 MFI_WRITE4(sc, MFI_OMSK, ~MFI_1078_EIM);
193 else if (sc->mfi_flags & MFI_FLAGS_GEN2) {
194 MFI_WRITE4(sc, MFI_ODCR0, 0xFFFFFFFF);
195 MFI_WRITE4(sc, MFI_OMSK, ~MFI_GEN2_EIM);
197 else if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
198 MFI_WRITE4(sc, MFI_OMSK, ~0x00000001);
203 mfi_read_fw_status_xscale(struct mfi_softc *sc)
205 return MFI_READ4(sc, MFI_OMSG0);
209 mfi_read_fw_status_ppc(struct mfi_softc *sc)
211 return MFI_READ4(sc, MFI_OSP0);
215 mfi_check_clear_intr_xscale(struct mfi_softc *sc)
219 status = MFI_READ4(sc, MFI_OSTS);
220 if ((status & MFI_OSTS_INTR_VALID) == 0)
223 MFI_WRITE4(sc, MFI_OSTS, status);
228 mfi_check_clear_intr_ppc(struct mfi_softc *sc)
232 status = MFI_READ4(sc, MFI_OSTS);
233 if (sc->mfi_flags & MFI_FLAGS_1078) {
234 if (!(status & MFI_1078_RM)) {
238 else if (sc->mfi_flags & MFI_FLAGS_GEN2) {
239 if (!(status & MFI_GEN2_RM)) {
243 else if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
244 if (!(status & MFI_SKINNY_RM)) {
248 if (sc->mfi_flags & MFI_FLAGS_SKINNY)
249 MFI_WRITE4(sc, MFI_OSTS, status);
251 MFI_WRITE4(sc, MFI_ODCR0, status);
256 mfi_issue_cmd_xscale(struct mfi_softc *sc, bus_addr_t bus_add, uint32_t frame_cnt)
258 MFI_WRITE4(sc, MFI_IQP,(bus_add >>3)|frame_cnt);
262 mfi_issue_cmd_ppc(struct mfi_softc *sc, bus_addr_t bus_add, uint32_t frame_cnt)
264 if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
265 MFI_WRITE4(sc, MFI_IQPL, (bus_add | frame_cnt <<1)|1 );
266 MFI_WRITE4(sc, MFI_IQPH, 0x00000000);
268 MFI_WRITE4(sc, MFI_IQP, (bus_add | frame_cnt <<1)|1 );
273 mfi_transition_firmware(struct mfi_softc *sc)
275 uint32_t fw_state, cur_state;
277 uint32_t cur_abs_reg_val = 0;
278 uint32_t prev_abs_reg_val = 0;
280 cur_abs_reg_val = sc->mfi_read_fw_status(sc);
281 fw_state = cur_abs_reg_val & MFI_FWSTATE_MASK;
282 while (fw_state != MFI_FWSTATE_READY) {
284 device_printf(sc->mfi_dev, "Waiting for firmware to "
286 cur_state = fw_state;
288 case MFI_FWSTATE_FAULT:
289 device_printf(sc->mfi_dev, "Firmware fault\n");
291 case MFI_FWSTATE_WAIT_HANDSHAKE:
292 if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT)
293 MFI_WRITE4(sc, MFI_SKINNY_IDB, MFI_FWINIT_CLEAR_HANDSHAKE);
295 MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_CLEAR_HANDSHAKE);
296 max_wait = MFI_RESET_WAIT_TIME;
298 case MFI_FWSTATE_OPERATIONAL:
299 if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT)
300 MFI_WRITE4(sc, MFI_SKINNY_IDB, 7);
302 MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_READY);
303 max_wait = MFI_RESET_WAIT_TIME;
305 case MFI_FWSTATE_UNDEFINED:
306 case MFI_FWSTATE_BB_INIT:
307 max_wait = MFI_RESET_WAIT_TIME;
309 case MFI_FWSTATE_FW_INIT_2:
310 max_wait = MFI_RESET_WAIT_TIME;
312 case MFI_FWSTATE_FW_INIT:
313 case MFI_FWSTATE_FLUSH_CACHE:
314 max_wait = MFI_RESET_WAIT_TIME;
316 case MFI_FWSTATE_DEVICE_SCAN:
317 max_wait = MFI_RESET_WAIT_TIME; /* wait for 180 seconds */
318 prev_abs_reg_val = cur_abs_reg_val;
320 case MFI_FWSTATE_BOOT_MESSAGE_PENDING:
321 if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT)
322 MFI_WRITE4(sc, MFI_SKINNY_IDB, MFI_FWINIT_HOTPLUG);
324 MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_HOTPLUG);
325 max_wait = MFI_RESET_WAIT_TIME;
328 device_printf(sc->mfi_dev, "Unknown firmware state %#x\n",
332 for (i = 0; i < (max_wait * 10); i++) {
333 cur_abs_reg_val = sc->mfi_read_fw_status(sc);
334 fw_state = cur_abs_reg_val & MFI_FWSTATE_MASK;
335 if (fw_state == cur_state)
340 if (fw_state == MFI_FWSTATE_DEVICE_SCAN) {
341 /* Check the device scanning progress */
342 if (prev_abs_reg_val != cur_abs_reg_val) {
346 if (fw_state == cur_state) {
347 device_printf(sc->mfi_dev, "Firmware stuck in state "
356 mfi_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
361 *addr = segs[0].ds_addr;
366 mfi_attach(struct mfi_softc *sc)
369 int error, commsz, framessz, sensesz;
370 int frames, unit, max_fw_sge, max_fw_cmds;
371 uint32_t tb_mem_size = 0;
377 device_printf(sc->mfi_dev, "Megaraid SAS driver Ver %s \n",
380 mtx_init(&sc->mfi_io_lock, "MFI I/O lock", NULL, MTX_DEF);
381 sx_init(&sc->mfi_config_lock, "MFI config");
382 TAILQ_INIT(&sc->mfi_ld_tqh);
383 TAILQ_INIT(&sc->mfi_syspd_tqh);
384 TAILQ_INIT(&sc->mfi_ld_pend_tqh);
385 TAILQ_INIT(&sc->mfi_syspd_pend_tqh);
386 TAILQ_INIT(&sc->mfi_evt_queue);
387 TASK_INIT(&sc->mfi_evt_task, 0, mfi_handle_evt, sc);
388 TASK_INIT(&sc->mfi_map_sync_task, 0, mfi_handle_map_sync, sc);
389 TAILQ_INIT(&sc->mfi_aen_pids);
390 TAILQ_INIT(&sc->mfi_cam_ccbq);
398 sc->last_seq_num = 0;
399 sc->disableOnlineCtrlReset = 1;
400 sc->issuepend_done = 1;
401 sc->hw_crit_error = 0;
403 if (sc->mfi_flags & MFI_FLAGS_1064R) {
404 sc->mfi_enable_intr = mfi_enable_intr_xscale;
405 sc->mfi_read_fw_status = mfi_read_fw_status_xscale;
406 sc->mfi_check_clear_intr = mfi_check_clear_intr_xscale;
407 sc->mfi_issue_cmd = mfi_issue_cmd_xscale;
408 } else if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
409 sc->mfi_enable_intr = mfi_tbolt_enable_intr_ppc;
410 sc->mfi_disable_intr = mfi_tbolt_disable_intr_ppc;
411 sc->mfi_read_fw_status = mfi_tbolt_read_fw_status_ppc;
412 sc->mfi_check_clear_intr = mfi_tbolt_check_clear_intr_ppc;
413 sc->mfi_issue_cmd = mfi_tbolt_issue_cmd_ppc;
414 sc->mfi_adp_reset = mfi_tbolt_adp_reset;
416 TAILQ_INIT(&sc->mfi_cmd_tbolt_tqh);
418 sc->mfi_enable_intr = mfi_enable_intr_ppc;
419 sc->mfi_read_fw_status = mfi_read_fw_status_ppc;
420 sc->mfi_check_clear_intr = mfi_check_clear_intr_ppc;
421 sc->mfi_issue_cmd = mfi_issue_cmd_ppc;
425 /* Before we get too far, see if the firmware is working */
426 if ((error = mfi_transition_firmware(sc)) != 0) {
427 device_printf(sc->mfi_dev, "Firmware not in READY state, "
428 "error %d\n", error);
432 /* Start: LSIP200113393 */
433 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
434 1, 0, /* algnmnt, boundary */
435 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
436 BUS_SPACE_MAXADDR, /* highaddr */
437 NULL, NULL, /* filter, filterarg */
438 MEGASAS_MAX_NAME*sizeof(bus_addr_t), /* maxsize */
440 MEGASAS_MAX_NAME*sizeof(bus_addr_t), /* maxsegsize */
442 NULL, NULL, /* lockfunc, lockarg */
443 &sc->verbuf_h_dmat)) {
444 device_printf(sc->mfi_dev, "Cannot allocate verbuf_h_dmat DMA tag\n");
447 if (bus_dmamem_alloc(sc->verbuf_h_dmat, (void **)&sc->verbuf,
448 BUS_DMA_NOWAIT, &sc->verbuf_h_dmamap)) {
449 device_printf(sc->mfi_dev, "Cannot allocate verbuf_h_dmamap memory\n");
452 bzero(sc->verbuf, MEGASAS_MAX_NAME*sizeof(bus_addr_t));
453 bus_dmamap_load(sc->verbuf_h_dmat, sc->verbuf_h_dmamap,
454 sc->verbuf, MEGASAS_MAX_NAME*sizeof(bus_addr_t),
455 mfi_addr_cb, &sc->verbuf_h_busaddr, 0);
456 /* End: LSIP200113393 */
459 * Get information needed for sizing the contiguous memory for the
460 * frame pool. Size down the sgl parameter since we know that
461 * we will never need more than what's required for MAXPHYS.
462 * It would be nice if these constants were available at runtime
463 * instead of compile time.
465 status = sc->mfi_read_fw_status(sc);
466 max_fw_cmds = status & MFI_FWSTATE_MAXCMD_MASK;
467 if (mfi_max_cmds > 0 && mfi_max_cmds < max_fw_cmds) {
468 device_printf(sc->mfi_dev, "FW MaxCmds = %d, limiting to %d\n",
469 max_fw_cmds, mfi_max_cmds);
470 sc->mfi_max_fw_cmds = mfi_max_cmds;
472 sc->mfi_max_fw_cmds = max_fw_cmds;
474 max_fw_sge = (status & MFI_FWSTATE_MAXSGL_MASK) >> 16;
475 sc->mfi_max_sge = min(max_fw_sge, ((MFI_MAXPHYS / PAGE_SIZE) + 1));
477 /* ThunderBolt Support get the contiguous memory */
479 if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
480 mfi_tbolt_init_globals(sc);
481 device_printf(sc->mfi_dev, "MaxCmd = %d, Drv MaxCmd = %d, "
482 "MaxSgl = %d, state = %#x\n", max_fw_cmds,
483 sc->mfi_max_fw_cmds, sc->mfi_max_sge, status);
484 tb_mem_size = mfi_tbolt_get_memory_requirement(sc);
486 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
487 1, 0, /* algnmnt, boundary */
488 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
489 BUS_SPACE_MAXADDR, /* highaddr */
490 NULL, NULL, /* filter, filterarg */
491 tb_mem_size, /* maxsize */
493 tb_mem_size, /* maxsegsize */
495 NULL, NULL, /* lockfunc, lockarg */
497 device_printf(sc->mfi_dev, "Cannot allocate comms DMA tag\n");
500 if (bus_dmamem_alloc(sc->mfi_tb_dmat, (void **)&sc->request_message_pool,
501 BUS_DMA_NOWAIT, &sc->mfi_tb_dmamap)) {
502 device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
505 bzero(sc->request_message_pool, tb_mem_size);
506 bus_dmamap_load(sc->mfi_tb_dmat, sc->mfi_tb_dmamap,
507 sc->request_message_pool, tb_mem_size, mfi_addr_cb, &sc->mfi_tb_busaddr, 0);
509 /* For ThunderBolt memory init */
510 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
511 0x100, 0, /* alignmnt, boundary */
512 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
513 BUS_SPACE_MAXADDR, /* highaddr */
514 NULL, NULL, /* filter, filterarg */
515 MFI_FRAME_SIZE, /* maxsize */
517 MFI_FRAME_SIZE, /* maxsegsize */
519 NULL, NULL, /* lockfunc, lockarg */
520 &sc->mfi_tb_init_dmat)) {
521 device_printf(sc->mfi_dev, "Cannot allocate init DMA tag\n");
524 if (bus_dmamem_alloc(sc->mfi_tb_init_dmat, (void **)&sc->mfi_tb_init,
525 BUS_DMA_NOWAIT, &sc->mfi_tb_init_dmamap)) {
526 device_printf(sc->mfi_dev, "Cannot allocate init memory\n");
529 bzero(sc->mfi_tb_init, MFI_FRAME_SIZE);
530 bus_dmamap_load(sc->mfi_tb_init_dmat, sc->mfi_tb_init_dmamap,
531 sc->mfi_tb_init, MFI_FRAME_SIZE, mfi_addr_cb,
532 &sc->mfi_tb_init_busaddr, 0);
533 if (mfi_tbolt_init_desc_pool(sc, sc->request_message_pool,
535 device_printf(sc->mfi_dev,
536 "Thunderbolt pool preparation error\n");
541 Allocate DMA memory mapping for MPI2 IOC Init descriptor,
542 we are taking it different from what we have allocated for Request
543 and reply descriptors to avoid confusion later
545 tb_mem_size = sizeof(struct MPI2_IOC_INIT_REQUEST);
546 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
547 1, 0, /* algnmnt, boundary */
548 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
549 BUS_SPACE_MAXADDR, /* highaddr */
550 NULL, NULL, /* filter, filterarg */
551 tb_mem_size, /* maxsize */
553 tb_mem_size, /* maxsegsize */
555 NULL, NULL, /* lockfunc, lockarg */
556 &sc->mfi_tb_ioc_init_dmat)) {
557 device_printf(sc->mfi_dev,
558 "Cannot allocate comms DMA tag\n");
561 if (bus_dmamem_alloc(sc->mfi_tb_ioc_init_dmat,
562 (void **)&sc->mfi_tb_ioc_init_desc,
563 BUS_DMA_NOWAIT, &sc->mfi_tb_ioc_init_dmamap)) {
564 device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
567 bzero(sc->mfi_tb_ioc_init_desc, tb_mem_size);
568 bus_dmamap_load(sc->mfi_tb_ioc_init_dmat, sc->mfi_tb_ioc_init_dmamap,
569 sc->mfi_tb_ioc_init_desc, tb_mem_size, mfi_addr_cb,
570 &sc->mfi_tb_ioc_init_busaddr, 0);
573 * Create the dma tag for data buffers. Used both for block I/O
574 * and for various internal data queries.
576 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
577 1, 0, /* algnmnt, boundary */
578 BUS_SPACE_MAXADDR, /* lowaddr */
579 BUS_SPACE_MAXADDR, /* highaddr */
580 NULL, NULL, /* filter, filterarg */
581 BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
582 sc->mfi_max_sge, /* nsegments */
583 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
584 BUS_DMA_ALLOCNOW, /* flags */
585 busdma_lock_mutex, /* lockfunc */
586 &sc->mfi_io_lock, /* lockfuncarg */
587 &sc->mfi_buffer_dmat)) {
588 device_printf(sc->mfi_dev, "Cannot allocate buffer DMA tag\n");
593 * Allocate DMA memory for the comms queues. Keep it under 4GB for
594 * efficiency. The mfi_hwcomms struct includes space for 1 reply queue
595 * entry, so the calculated size here will be will be 1 more than
596 * mfi_max_fw_cmds. This is apparently a requirement of the hardware.
598 commsz = (sizeof(uint32_t) * sc->mfi_max_fw_cmds) +
599 sizeof(struct mfi_hwcomms);
600 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
601 1, 0, /* algnmnt, boundary */
602 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
603 BUS_SPACE_MAXADDR, /* highaddr */
604 NULL, NULL, /* filter, filterarg */
605 commsz, /* maxsize */
607 commsz, /* maxsegsize */
609 NULL, NULL, /* lockfunc, lockarg */
610 &sc->mfi_comms_dmat)) {
611 device_printf(sc->mfi_dev, "Cannot allocate comms DMA tag\n");
614 if (bus_dmamem_alloc(sc->mfi_comms_dmat, (void **)&sc->mfi_comms,
615 BUS_DMA_NOWAIT, &sc->mfi_comms_dmamap)) {
616 device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
619 bzero(sc->mfi_comms, commsz);
620 bus_dmamap_load(sc->mfi_comms_dmat, sc->mfi_comms_dmamap,
621 sc->mfi_comms, commsz, mfi_addr_cb, &sc->mfi_comms_busaddr, 0);
623 * Allocate DMA memory for the command frames. Keep them in the
624 * lower 4GB for efficiency. Calculate the size of the commands at
625 * the same time; each command is one 64 byte frame plus a set of
626 * additional frames for holding sg lists or other data.
627 * The assumption here is that the SG list will start at the second
628 * frame and not use the unused bytes in the first frame. While this
629 * isn't technically correct, it simplifies the calculation and allows
630 * for command frames that might be larger than an mfi_io_frame.
632 if (sizeof(bus_addr_t) == 8) {
633 sc->mfi_sge_size = sizeof(struct mfi_sg64);
634 sc->mfi_flags |= MFI_FLAGS_SG64;
636 sc->mfi_sge_size = sizeof(struct mfi_sg32);
638 if (sc->mfi_flags & MFI_FLAGS_SKINNY)
639 sc->mfi_sge_size = sizeof(struct mfi_sg_skinny);
640 frames = (sc->mfi_sge_size * sc->mfi_max_sge - 1) / MFI_FRAME_SIZE + 2;
641 sc->mfi_cmd_size = frames * MFI_FRAME_SIZE;
642 framessz = sc->mfi_cmd_size * sc->mfi_max_fw_cmds;
643 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
644 64, 0, /* algnmnt, boundary */
645 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
646 BUS_SPACE_MAXADDR, /* highaddr */
647 NULL, NULL, /* filter, filterarg */
648 framessz, /* maxsize */
650 framessz, /* maxsegsize */
652 NULL, NULL, /* lockfunc, lockarg */
653 &sc->mfi_frames_dmat)) {
654 device_printf(sc->mfi_dev, "Cannot allocate frame DMA tag\n");
657 if (bus_dmamem_alloc(sc->mfi_frames_dmat, (void **)&sc->mfi_frames,
658 BUS_DMA_NOWAIT, &sc->mfi_frames_dmamap)) {
659 device_printf(sc->mfi_dev, "Cannot allocate frames memory\n");
662 bzero(sc->mfi_frames, framessz);
663 bus_dmamap_load(sc->mfi_frames_dmat, sc->mfi_frames_dmamap,
664 sc->mfi_frames, framessz, mfi_addr_cb, &sc->mfi_frames_busaddr,0);
666 * Allocate DMA memory for the frame sense data. Keep them in the
667 * lower 4GB for efficiency
669 sensesz = sc->mfi_max_fw_cmds * MFI_SENSE_LEN;
670 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
671 4, 0, /* algnmnt, boundary */
672 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
673 BUS_SPACE_MAXADDR, /* highaddr */
674 NULL, NULL, /* filter, filterarg */
675 sensesz, /* maxsize */
677 sensesz, /* maxsegsize */
679 NULL, NULL, /* lockfunc, lockarg */
680 &sc->mfi_sense_dmat)) {
681 device_printf(sc->mfi_dev, "Cannot allocate sense DMA tag\n");
684 if (bus_dmamem_alloc(sc->mfi_sense_dmat, (void **)&sc->mfi_sense,
685 BUS_DMA_NOWAIT, &sc->mfi_sense_dmamap)) {
686 device_printf(sc->mfi_dev, "Cannot allocate sense memory\n");
689 bus_dmamap_load(sc->mfi_sense_dmat, sc->mfi_sense_dmamap,
690 sc->mfi_sense, sensesz, mfi_addr_cb, &sc->mfi_sense_busaddr, 0);
691 if ((error = mfi_alloc_commands(sc)) != 0)
694 /* Before moving the FW to operational state, check whether
695 * hostmemory is required by the FW or not
698 /* ThunderBolt MFI_IOC2 INIT */
699 if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
700 sc->mfi_disable_intr(sc);
701 mtx_lock(&sc->mfi_io_lock);
702 if ((error = mfi_tbolt_init_MFI_queue(sc)) != 0) {
703 device_printf(sc->mfi_dev,
704 "TB Init has failed with error %d\n",error);
705 mtx_unlock(&sc->mfi_io_lock);
708 mtx_unlock(&sc->mfi_io_lock);
710 if ((error = mfi_tbolt_alloc_cmd(sc)) != 0)
712 if (bus_setup_intr(sc->mfi_dev, sc->mfi_irq,
713 INTR_MPSAFE|INTR_TYPE_BIO, NULL, mfi_intr_tbolt, sc,
715 device_printf(sc->mfi_dev, "Cannot set up interrupt\n");
718 sc->mfi_intr_ptr = mfi_intr_tbolt;
719 sc->mfi_enable_intr(sc);
721 if ((error = mfi_comms_init(sc)) != 0)
724 if (bus_setup_intr(sc->mfi_dev, sc->mfi_irq,
725 INTR_MPSAFE|INTR_TYPE_BIO, NULL, mfi_intr, sc, &sc->mfi_intr)) {
726 device_printf(sc->mfi_dev, "Cannot set up interrupt\n");
729 sc->mfi_intr_ptr = mfi_intr;
730 sc->mfi_enable_intr(sc);
732 if ((error = mfi_get_controller_info(sc)) != 0)
734 sc->disableOnlineCtrlReset = 0;
736 /* Register a config hook to probe the bus for arrays */
737 sc->mfi_ich.ich_func = mfi_startup;
738 sc->mfi_ich.ich_arg = sc;
739 if (config_intrhook_establish(&sc->mfi_ich) != 0) {
740 device_printf(sc->mfi_dev, "Cannot establish configuration "
744 mtx_lock(&sc->mfi_io_lock);
745 if ((error = mfi_aen_setup(sc, 0), 0) != 0) {
746 mtx_unlock(&sc->mfi_io_lock);
749 mtx_unlock(&sc->mfi_io_lock);
752 * Register a shutdown handler.
754 if ((sc->mfi_eh = EVENTHANDLER_REGISTER(shutdown_final, mfi_shutdown,
755 sc, SHUTDOWN_PRI_DEFAULT)) == NULL) {
756 device_printf(sc->mfi_dev, "Warning: shutdown event "
757 "registration failed\n");
761 * Create the control device for doing management
763 unit = device_get_unit(sc->mfi_dev);
764 sc->mfi_cdev = make_dev(&mfi_cdevsw, unit, UID_ROOT, GID_OPERATOR,
765 0640, "mfi%d", unit);
767 make_dev_alias_p(MAKEDEV_CHECKNAME | MAKEDEV_WAITOK, &dev_t,
768 sc->mfi_cdev, "%s", "megaraid_sas_ioctl_node");
769 if (sc->mfi_cdev != NULL)
770 sc->mfi_cdev->si_drv1 = sc;
771 SYSCTL_ADD_INT(device_get_sysctl_ctx(sc->mfi_dev),
772 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->mfi_dev)),
773 OID_AUTO, "delete_busy_volumes", CTLFLAG_RW,
774 &sc->mfi_delete_busy_volumes, 0, "Allow removal of busy volumes");
775 SYSCTL_ADD_INT(device_get_sysctl_ctx(sc->mfi_dev),
776 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->mfi_dev)),
777 OID_AUTO, "keep_deleted_volumes", CTLFLAG_RW,
778 &sc->mfi_keep_deleted_volumes, 0,
779 "Don't detach the mfid device for a busy volume that is deleted");
781 device_add_child(sc->mfi_dev, "mfip", -1);
782 bus_generic_attach(sc->mfi_dev);
784 /* Start the timeout watchdog */
785 callout_init(&sc->mfi_watchdog_callout, 1);
786 callout_reset(&sc->mfi_watchdog_callout, mfi_cmd_timeout * hz,
789 if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
790 mtx_lock(&sc->mfi_io_lock);
791 mfi_tbolt_sync_map_info(sc);
792 mtx_unlock(&sc->mfi_io_lock);
799 mfi_alloc_commands(struct mfi_softc *sc)
801 struct mfi_command *cm;
805 * XXX Should we allocate all the commands up front, or allocate on
806 * demand later like 'aac' does?
808 sc->mfi_commands = malloc(sizeof(sc->mfi_commands[0]) *
809 sc->mfi_max_fw_cmds, M_MFIBUF, M_WAITOK | M_ZERO);
811 for (i = 0; i < sc->mfi_max_fw_cmds; i++) {
812 cm = &sc->mfi_commands[i];
813 cm->cm_frame = (union mfi_frame *)((uintptr_t)sc->mfi_frames +
814 sc->mfi_cmd_size * i);
815 cm->cm_frame_busaddr = sc->mfi_frames_busaddr +
816 sc->mfi_cmd_size * i;
817 cm->cm_frame->header.context = i;
818 cm->cm_sense = &sc->mfi_sense[i];
819 cm->cm_sense_busaddr= sc->mfi_sense_busaddr + MFI_SENSE_LEN * i;
822 if (bus_dmamap_create(sc->mfi_buffer_dmat, 0,
823 &cm->cm_dmamap) == 0) {
824 mtx_lock(&sc->mfi_io_lock);
825 mfi_release_command(cm);
826 mtx_unlock(&sc->mfi_io_lock);
828 device_printf(sc->mfi_dev, "Failed to allocate %d "
829 "command blocks, only allocated %d\n",
830 sc->mfi_max_fw_cmds, i - 1);
831 for (j = 0; j < i; j++) {
832 cm = &sc->mfi_commands[i];
833 bus_dmamap_destroy(sc->mfi_buffer_dmat,
836 free(sc->mfi_commands, M_MFIBUF);
837 sc->mfi_commands = NULL;
847 mfi_release_command(struct mfi_command *cm)
849 struct mfi_frame_header *hdr;
852 mtx_assert(&cm->cm_sc->mfi_io_lock, MA_OWNED);
855 * Zero out the important fields of the frame, but make sure the
856 * context field is preserved. For efficiency, handle the fields
857 * as 32 bit words. Clear out the first S/G entry too for safety.
859 hdr = &cm->cm_frame->header;
860 if (cm->cm_data != NULL && hdr->sg_count) {
861 cm->cm_sg->sg32[0].len = 0;
862 cm->cm_sg->sg32[0].addr = 0;
866 * Command may be on other queues e.g. busy queue depending on the
867 * flow of a previous call to mfi_mapcmd, so ensure its dequeued
870 if ((cm->cm_flags & MFI_ON_MFIQ_BUSY) != 0)
872 if ((cm->cm_flags & MFI_ON_MFIQ_READY) != 0)
873 mfi_remove_ready(cm);
875 /* We're not expecting it to be on any other queue but check */
876 if ((cm->cm_flags & MFI_ON_MFIQ_MASK) != 0) {
877 panic("Command %p is still on another queue, flags = %#x",
882 if ((cm->cm_flags & MFI_CMD_TBOLT) != 0) {
883 mfi_tbolt_return_cmd(cm->cm_sc,
884 cm->cm_sc->mfi_cmd_pool_tbolt[cm->cm_extra_frames - 1],
888 hdr_data = (uint32_t *)cm->cm_frame;
889 hdr_data[0] = 0; /* cmd, sense_len, cmd_status, scsi_status */
890 hdr_data[1] = 0; /* target_id, lun_id, cdb_len, sg_count */
891 hdr_data[4] = 0; /* flags, timeout */
892 hdr_data[5] = 0; /* data_len */
894 cm->cm_extra_frames = 0;
896 cm->cm_complete = NULL;
897 cm->cm_private = NULL;
900 cm->cm_total_frame_size = 0;
901 cm->retry_for_fw_reset = 0;
903 mfi_enqueue_free(cm);
907 mfi_dcmd_command(struct mfi_softc *sc, struct mfi_command **cmp,
908 uint32_t opcode, void **bufp, size_t bufsize)
910 struct mfi_command *cm;
911 struct mfi_dcmd_frame *dcmd;
913 uint32_t context = 0;
915 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
917 cm = mfi_dequeue_free(sc);
921 /* Zero out the MFI frame */
922 context = cm->cm_frame->header.context;
923 bzero(cm->cm_frame, sizeof(union mfi_frame));
924 cm->cm_frame->header.context = context;
926 if ((bufsize > 0) && (bufp != NULL)) {
928 buf = malloc(bufsize, M_MFIBUF, M_NOWAIT|M_ZERO);
930 mfi_release_command(cm);
939 dcmd = &cm->cm_frame->dcmd;
940 bzero(dcmd->mbox, MFI_MBOX_SIZE);
941 dcmd->header.cmd = MFI_CMD_DCMD;
942 dcmd->header.timeout = 0;
943 dcmd->header.flags = 0;
944 dcmd->header.data_len = bufsize;
945 dcmd->header.scsi_status = 0;
946 dcmd->opcode = opcode;
947 cm->cm_sg = &dcmd->sgl;
948 cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
951 cm->cm_private = buf;
952 cm->cm_len = bufsize;
955 if ((bufp != NULL) && (*bufp == NULL) && (buf != NULL))
961 mfi_comms_init(struct mfi_softc *sc)
963 struct mfi_command *cm;
964 struct mfi_init_frame *init;
965 struct mfi_init_qinfo *qinfo;
967 uint32_t context = 0;
969 mtx_lock(&sc->mfi_io_lock);
970 if ((cm = mfi_dequeue_free(sc)) == NULL) {
971 mtx_unlock(&sc->mfi_io_lock);
975 /* Zero out the MFI frame */
976 context = cm->cm_frame->header.context;
977 bzero(cm->cm_frame, sizeof(union mfi_frame));
978 cm->cm_frame->header.context = context;
981 * Abuse the SG list area of the frame to hold the init_qinfo
984 init = &cm->cm_frame->init;
985 qinfo = (struct mfi_init_qinfo *)((uintptr_t)init + MFI_FRAME_SIZE);
987 bzero(qinfo, sizeof(struct mfi_init_qinfo));
988 qinfo->rq_entries = sc->mfi_max_fw_cmds + 1;
989 qinfo->rq_addr_lo = sc->mfi_comms_busaddr +
990 offsetof(struct mfi_hwcomms, hw_reply_q);
991 qinfo->pi_addr_lo = sc->mfi_comms_busaddr +
992 offsetof(struct mfi_hwcomms, hw_pi);
993 qinfo->ci_addr_lo = sc->mfi_comms_busaddr +
994 offsetof(struct mfi_hwcomms, hw_ci);
996 init->header.cmd = MFI_CMD_INIT;
997 init->header.data_len = sizeof(struct mfi_init_qinfo);
998 init->qinfo_new_addr_lo = cm->cm_frame_busaddr + MFI_FRAME_SIZE;
1000 cm->cm_flags = MFI_CMD_POLLED;
1002 if ((error = mfi_mapcmd(sc, cm)) != 0)
1003 device_printf(sc->mfi_dev, "failed to send init command\n");
1004 mfi_release_command(cm);
1005 mtx_unlock(&sc->mfi_io_lock);
1011 mfi_get_controller_info(struct mfi_softc *sc)
1013 struct mfi_command *cm = NULL;
1014 struct mfi_ctrl_info *ci = NULL;
1015 uint32_t max_sectors_1, max_sectors_2;
1018 mtx_lock(&sc->mfi_io_lock);
1019 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_GETINFO,
1020 (void **)&ci, sizeof(*ci));
1023 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1025 if ((error = mfi_mapcmd(sc, cm)) != 0) {
1026 device_printf(sc->mfi_dev, "Failed to get controller info\n");
1027 sc->mfi_max_io = (sc->mfi_max_sge - 1) * PAGE_SIZE /
1033 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1034 BUS_DMASYNC_POSTREAD);
1035 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1037 max_sectors_1 = (1 << ci->stripe_sz_ops.max) * ci->max_strips_per_io;
1038 max_sectors_2 = ci->max_request_size;
1039 sc->mfi_max_io = min(max_sectors_1, max_sectors_2);
1040 sc->disableOnlineCtrlReset =
1041 ci->properties.OnOffProperties.disableOnlineCtrlReset;
1047 mfi_release_command(cm);
1048 mtx_unlock(&sc->mfi_io_lock);
1053 mfi_get_log_state(struct mfi_softc *sc, struct mfi_evt_log_state **log_state)
1055 struct mfi_command *cm = NULL;
1058 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1059 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_EVENT_GETINFO,
1060 (void **)log_state, sizeof(**log_state));
1063 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1065 if ((error = mfi_mapcmd(sc, cm)) != 0) {
1066 device_printf(sc->mfi_dev, "Failed to get log state\n");
1070 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1071 BUS_DMASYNC_POSTREAD);
1072 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1076 mfi_release_command(cm);
1082 mfi_aen_setup(struct mfi_softc *sc, uint32_t seq_start)
1084 struct mfi_evt_log_state *log_state = NULL;
1085 union mfi_evt class_locale;
1089 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1091 class_locale.members.reserved = 0;
1092 class_locale.members.locale = mfi_event_locale;
1093 class_locale.members.evt_class = mfi_event_class;
1095 if (seq_start == 0) {
1096 if ((error = mfi_get_log_state(sc, &log_state)) != 0)
1098 sc->mfi_boot_seq_num = log_state->boot_seq_num;
1101 * Walk through any events that fired since the last
1104 if ((error = mfi_parse_entries(sc, log_state->shutdown_seq_num,
1105 log_state->newest_seq_num)) != 0)
1107 seq = log_state->newest_seq_num;
1110 error = mfi_aen_register(sc, seq, class_locale.word);
1112 free(log_state, M_MFIBUF);
1118 mfi_wait_command(struct mfi_softc *sc, struct mfi_command *cm)
1121 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1122 cm->cm_complete = NULL;
1125 * MegaCli can issue a DCMD of 0. In this case do nothing
1126 * and return 0 to it as status
1128 if (cm->cm_frame->dcmd.opcode == 0) {
1129 cm->cm_frame->header.cmd_status = MFI_STAT_OK;
1131 return (cm->cm_error);
1133 mfi_enqueue_ready(cm);
1135 if ((cm->cm_flags & MFI_CMD_COMPLETED) == 0)
1136 msleep(cm, &sc->mfi_io_lock, PRIBIO, "mfiwait", 0);
1137 return (cm->cm_error);
1141 mfi_free(struct mfi_softc *sc)
1143 struct mfi_command *cm;
1146 callout_drain(&sc->mfi_watchdog_callout);
1148 if (sc->mfi_cdev != NULL)
1149 destroy_dev(sc->mfi_cdev);
1151 if (sc->mfi_commands != NULL) {
1152 for (i = 0; i < sc->mfi_max_fw_cmds; i++) {
1153 cm = &sc->mfi_commands[i];
1154 bus_dmamap_destroy(sc->mfi_buffer_dmat, cm->cm_dmamap);
1156 free(sc->mfi_commands, M_MFIBUF);
1157 sc->mfi_commands = NULL;
1161 bus_teardown_intr(sc->mfi_dev, sc->mfi_irq, sc->mfi_intr);
1162 if (sc->mfi_irq != NULL)
1163 bus_release_resource(sc->mfi_dev, SYS_RES_IRQ, sc->mfi_irq_rid,
1166 if (sc->mfi_sense_busaddr != 0)
1167 bus_dmamap_unload(sc->mfi_sense_dmat, sc->mfi_sense_dmamap);
1168 if (sc->mfi_sense != NULL)
1169 bus_dmamem_free(sc->mfi_sense_dmat, sc->mfi_sense,
1170 sc->mfi_sense_dmamap);
1171 if (sc->mfi_sense_dmat != NULL)
1172 bus_dma_tag_destroy(sc->mfi_sense_dmat);
1174 if (sc->mfi_frames_busaddr != 0)
1175 bus_dmamap_unload(sc->mfi_frames_dmat, sc->mfi_frames_dmamap);
1176 if (sc->mfi_frames != NULL)
1177 bus_dmamem_free(sc->mfi_frames_dmat, sc->mfi_frames,
1178 sc->mfi_frames_dmamap);
1179 if (sc->mfi_frames_dmat != NULL)
1180 bus_dma_tag_destroy(sc->mfi_frames_dmat);
1182 if (sc->mfi_comms_busaddr != 0)
1183 bus_dmamap_unload(sc->mfi_comms_dmat, sc->mfi_comms_dmamap);
1184 if (sc->mfi_comms != NULL)
1185 bus_dmamem_free(sc->mfi_comms_dmat, sc->mfi_comms,
1186 sc->mfi_comms_dmamap);
1187 if (sc->mfi_comms_dmat != NULL)
1188 bus_dma_tag_destroy(sc->mfi_comms_dmat);
1190 /* ThunderBolt contiguous memory free here */
1191 if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
1192 if (sc->mfi_tb_busaddr != 0)
1193 bus_dmamap_unload(sc->mfi_tb_dmat, sc->mfi_tb_dmamap);
1194 if (sc->request_message_pool != NULL)
1195 bus_dmamem_free(sc->mfi_tb_dmat, sc->request_message_pool,
1197 if (sc->mfi_tb_dmat != NULL)
1198 bus_dma_tag_destroy(sc->mfi_tb_dmat);
1200 /* Version buffer memory free */
1201 /* Start LSIP200113393 */
1202 if (sc->verbuf_h_busaddr != 0)
1203 bus_dmamap_unload(sc->verbuf_h_dmat, sc->verbuf_h_dmamap);
1204 if (sc->verbuf != NULL)
1205 bus_dmamem_free(sc->verbuf_h_dmat, sc->verbuf,
1206 sc->verbuf_h_dmamap);
1207 if (sc->verbuf_h_dmat != NULL)
1208 bus_dma_tag_destroy(sc->verbuf_h_dmat);
1210 /* End LSIP200113393 */
1211 /* ThunderBolt INIT packet memory Free */
1212 if (sc->mfi_tb_init_busaddr != 0)
1213 bus_dmamap_unload(sc->mfi_tb_init_dmat,
1214 sc->mfi_tb_init_dmamap);
1215 if (sc->mfi_tb_init != NULL)
1216 bus_dmamem_free(sc->mfi_tb_init_dmat, sc->mfi_tb_init,
1217 sc->mfi_tb_init_dmamap);
1218 if (sc->mfi_tb_init_dmat != NULL)
1219 bus_dma_tag_destroy(sc->mfi_tb_init_dmat);
1221 /* ThunderBolt IOC Init Desc memory free here */
1222 if (sc->mfi_tb_ioc_init_busaddr != 0)
1223 bus_dmamap_unload(sc->mfi_tb_ioc_init_dmat,
1224 sc->mfi_tb_ioc_init_dmamap);
1225 if (sc->mfi_tb_ioc_init_desc != NULL)
1226 bus_dmamem_free(sc->mfi_tb_ioc_init_dmat,
1227 sc->mfi_tb_ioc_init_desc,
1228 sc->mfi_tb_ioc_init_dmamap);
1229 if (sc->mfi_tb_ioc_init_dmat != NULL)
1230 bus_dma_tag_destroy(sc->mfi_tb_ioc_init_dmat);
1231 if (sc->mfi_cmd_pool_tbolt != NULL) {
1232 for (int i = 0; i < sc->mfi_max_fw_cmds; i++) {
1233 if (sc->mfi_cmd_pool_tbolt[i] != NULL) {
1234 free(sc->mfi_cmd_pool_tbolt[i],
1236 sc->mfi_cmd_pool_tbolt[i] = NULL;
1239 free(sc->mfi_cmd_pool_tbolt, M_MFIBUF);
1240 sc->mfi_cmd_pool_tbolt = NULL;
1242 if (sc->request_desc_pool != NULL) {
1243 free(sc->request_desc_pool, M_MFIBUF);
1244 sc->request_desc_pool = NULL;
1247 if (sc->mfi_buffer_dmat != NULL)
1248 bus_dma_tag_destroy(sc->mfi_buffer_dmat);
1249 if (sc->mfi_parent_dmat != NULL)
1250 bus_dma_tag_destroy(sc->mfi_parent_dmat);
1252 if (mtx_initialized(&sc->mfi_io_lock)) {
1253 mtx_destroy(&sc->mfi_io_lock);
1254 sx_destroy(&sc->mfi_config_lock);
1261 mfi_startup(void *arg)
1263 struct mfi_softc *sc;
1265 sc = (struct mfi_softc *)arg;
1267 sc->mfi_enable_intr(sc);
1268 sx_xlock(&sc->mfi_config_lock);
1269 mtx_lock(&sc->mfi_io_lock);
1271 if (sc->mfi_flags & MFI_FLAGS_SKINNY)
1273 mtx_unlock(&sc->mfi_io_lock);
1274 sx_xunlock(&sc->mfi_config_lock);
1276 config_intrhook_disestablish(&sc->mfi_ich);
1282 struct mfi_softc *sc;
1283 struct mfi_command *cm;
1284 uint32_t pi, ci, context;
1286 sc = (struct mfi_softc *)arg;
1288 if (sc->mfi_check_clear_intr(sc))
1292 pi = sc->mfi_comms->hw_pi;
1293 ci = sc->mfi_comms->hw_ci;
1294 mtx_lock(&sc->mfi_io_lock);
1296 context = sc->mfi_comms->hw_reply_q[ci];
1297 if (context < sc->mfi_max_fw_cmds) {
1298 cm = &sc->mfi_commands[context];
1299 mfi_remove_busy(cm);
1301 mfi_complete(sc, cm);
1303 if (++ci == (sc->mfi_max_fw_cmds + 1))
1307 sc->mfi_comms->hw_ci = ci;
1309 /* Give defered I/O a chance to run */
1310 sc->mfi_flags &= ~MFI_FLAGS_QFRZN;
1312 mtx_unlock(&sc->mfi_io_lock);
1315 * Dummy read to flush the bus; this ensures that the indexes are up
1316 * to date. Restart processing if more commands have come it.
1318 (void)sc->mfi_read_fw_status(sc);
1319 if (pi != sc->mfi_comms->hw_pi)
1326 mfi_shutdown(struct mfi_softc *sc)
1328 struct mfi_dcmd_frame *dcmd;
1329 struct mfi_command *cm;
1333 if (sc->mfi_aen_cm != NULL) {
1334 sc->cm_aen_abort = 1;
1335 mfi_abort(sc, &sc->mfi_aen_cm);
1338 if (sc->mfi_map_sync_cm != NULL) {
1339 sc->cm_map_abort = 1;
1340 mfi_abort(sc, &sc->mfi_map_sync_cm);
1343 mtx_lock(&sc->mfi_io_lock);
1344 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_SHUTDOWN, NULL, 0);
1346 mtx_unlock(&sc->mfi_io_lock);
1350 dcmd = &cm->cm_frame->dcmd;
1351 dcmd->header.flags = MFI_FRAME_DIR_NONE;
1352 cm->cm_flags = MFI_CMD_POLLED;
1355 if ((error = mfi_mapcmd(sc, cm)) != 0)
1356 device_printf(sc->mfi_dev, "Failed to shutdown controller\n");
1358 mfi_release_command(cm);
1359 mtx_unlock(&sc->mfi_io_lock);
1364 mfi_syspdprobe(struct mfi_softc *sc)
1366 struct mfi_frame_header *hdr;
1367 struct mfi_command *cm = NULL;
1368 struct mfi_pd_list *pdlist = NULL;
1369 struct mfi_system_pd *syspd, *tmp;
1370 struct mfi_system_pending *syspd_pend;
1371 int error, i, found;
1373 sx_assert(&sc->mfi_config_lock, SA_XLOCKED);
1374 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1375 /* Add SYSTEM PD's */
1376 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_PD_LIST_QUERY,
1377 (void **)&pdlist, sizeof(*pdlist));
1379 device_printf(sc->mfi_dev,
1380 "Error while forming SYSTEM PD list\n");
1384 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1385 cm->cm_frame->dcmd.mbox[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST;
1386 cm->cm_frame->dcmd.mbox[1] = 0;
1387 if (mfi_mapcmd(sc, cm) != 0) {
1388 device_printf(sc->mfi_dev,
1389 "Failed to get syspd device listing\n");
1392 bus_dmamap_sync(sc->mfi_buffer_dmat,cm->cm_dmamap,
1393 BUS_DMASYNC_POSTREAD);
1394 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1395 hdr = &cm->cm_frame->header;
1396 if (hdr->cmd_status != MFI_STAT_OK) {
1397 device_printf(sc->mfi_dev,
1398 "MFI_DCMD_PD_LIST_QUERY failed %x\n", hdr->cmd_status);
1401 /* Get each PD and add it to the system */
1402 for (i = 0; i < pdlist->count; i++) {
1403 if (pdlist->addr[i].device_id ==
1404 pdlist->addr[i].encl_device_id)
1407 TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh, pd_link) {
1408 if (syspd->pd_id == pdlist->addr[i].device_id)
1411 TAILQ_FOREACH(syspd_pend, &sc->mfi_syspd_pend_tqh, pd_link) {
1412 if (syspd_pend->pd_id == pdlist->addr[i].device_id)
1416 mfi_add_sys_pd(sc, pdlist->addr[i].device_id);
1418 /* Delete SYSPD's whose state has been changed */
1419 TAILQ_FOREACH_SAFE(syspd, &sc->mfi_syspd_tqh, pd_link, tmp) {
1421 for (i = 0; i < pdlist->count; i++) {
1422 if (syspd->pd_id == pdlist->addr[i].device_id) {
1429 mtx_unlock(&sc->mfi_io_lock);
1431 device_delete_child(sc->mfi_dev, syspd->pd_dev);
1433 mtx_lock(&sc->mfi_io_lock);
1438 free(pdlist, M_MFIBUF);
1440 mfi_release_command(cm);
1446 mfi_ldprobe(struct mfi_softc *sc)
1448 struct mfi_frame_header *hdr;
1449 struct mfi_command *cm = NULL;
1450 struct mfi_ld_list *list = NULL;
1451 struct mfi_disk *ld;
1452 struct mfi_disk_pending *ld_pend;
1455 sx_assert(&sc->mfi_config_lock, SA_XLOCKED);
1456 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1458 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_LIST,
1459 (void **)&list, sizeof(*list));
1463 cm->cm_flags = MFI_CMD_DATAIN;
1464 if (mfi_wait_command(sc, cm) != 0) {
1465 device_printf(sc->mfi_dev, "Failed to get device listing\n");
1469 hdr = &cm->cm_frame->header;
1470 if (hdr->cmd_status != MFI_STAT_OK) {
1471 device_printf(sc->mfi_dev, "MFI_DCMD_LD_GET_LIST failed %x\n",
1476 for (i = 0; i < list->ld_count; i++) {
1477 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
1478 if (ld->ld_id == list->ld_list[i].ld.v.target_id)
1481 TAILQ_FOREACH(ld_pend, &sc->mfi_ld_pend_tqh, ld_link) {
1482 if (ld_pend->ld_id == list->ld_list[i].ld.v.target_id)
1485 mfi_add_ld(sc, list->ld_list[i].ld.v.target_id);
1490 free(list, M_MFIBUF);
1492 mfi_release_command(cm);
1498 * The timestamp is the number of seconds since 00:00 Jan 1, 2000. If
1499 * the bits in 24-31 are all set, then it is the number of seconds since
1503 format_timestamp(uint32_t timestamp)
1505 static char buffer[32];
1507 if ((timestamp & 0xff000000) == 0xff000000)
1508 snprintf(buffer, sizeof(buffer), "boot + %us", timestamp &
1511 snprintf(buffer, sizeof(buffer), "%us", timestamp);
1516 format_class(int8_t class)
1518 static char buffer[6];
1521 case MFI_EVT_CLASS_DEBUG:
1523 case MFI_EVT_CLASS_PROGRESS:
1524 return ("progress");
1525 case MFI_EVT_CLASS_INFO:
1527 case MFI_EVT_CLASS_WARNING:
1529 case MFI_EVT_CLASS_CRITICAL:
1531 case MFI_EVT_CLASS_FATAL:
1533 case MFI_EVT_CLASS_DEAD:
1536 snprintf(buffer, sizeof(buffer), "%d", class);
1542 mfi_decode_evt(struct mfi_softc *sc, struct mfi_evt_detail *detail)
1544 struct mfi_system_pd *syspd = NULL;
1546 device_printf(sc->mfi_dev, "%d (%s/0x%04x/%s) - %s\n", detail->seq,
1547 format_timestamp(detail->time), detail->evt_class.members.locale,
1548 format_class(detail->evt_class.members.evt_class),
1549 detail->description);
1551 /* Don't act on old AEN's or while shutting down */
1552 if (detail->seq < sc->mfi_boot_seq_num || sc->mfi_detaching)
1555 switch (detail->arg_type) {
1556 case MR_EVT_ARGS_NONE:
1557 if (detail->code == MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED) {
1558 device_printf(sc->mfi_dev, "HostBus scan raised\n");
1559 if (mfi_detect_jbod_change) {
1561 * Probe for new SYSPD's and Delete
1564 sx_xlock(&sc->mfi_config_lock);
1565 mtx_lock(&sc->mfi_io_lock);
1567 mtx_unlock(&sc->mfi_io_lock);
1568 sx_xunlock(&sc->mfi_config_lock);
1572 case MR_EVT_ARGS_LD_STATE:
1573 /* During load time driver reads all the events starting
1574 * from the one that has been logged after shutdown. Avoid
1577 if (detail->args.ld_state.new_state == MFI_LD_STATE_OFFLINE ) {
1579 struct mfi_disk *ld;
1580 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
1582 detail->args.ld_state.ld.target_id)
1586 Fix: for kernel panics when SSCD is removed
1587 KASSERT(ld != NULL, ("volume dissappeared"));
1591 device_delete_child(sc->mfi_dev, ld->ld_dev);
1596 case MR_EVT_ARGS_PD:
1597 if (detail->code == MR_EVT_PD_REMOVED) {
1598 if (mfi_detect_jbod_change) {
1600 * If the removed device is a SYSPD then
1603 TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh,
1606 detail->args.pd.device_id) {
1608 device_delete_child(
1617 if (detail->code == MR_EVT_PD_INSERTED) {
1618 if (mfi_detect_jbod_change) {
1619 /* Probe for new SYSPD's */
1620 sx_xlock(&sc->mfi_config_lock);
1621 mtx_lock(&sc->mfi_io_lock);
1623 mtx_unlock(&sc->mfi_io_lock);
1624 sx_xunlock(&sc->mfi_config_lock);
1627 if (sc->mfi_cam_rescan_cb != NULL &&
1628 (detail->code == MR_EVT_PD_INSERTED ||
1629 detail->code == MR_EVT_PD_REMOVED)) {
1630 sc->mfi_cam_rescan_cb(sc, detail->args.pd.device_id);
1637 mfi_queue_evt(struct mfi_softc *sc, struct mfi_evt_detail *detail)
1639 struct mfi_evt_queue_elm *elm;
1641 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1642 elm = malloc(sizeof(*elm), M_MFIBUF, M_NOWAIT|M_ZERO);
1645 memcpy(&elm->detail, detail, sizeof(*detail));
1646 TAILQ_INSERT_TAIL(&sc->mfi_evt_queue, elm, link);
1647 taskqueue_enqueue(taskqueue_swi, &sc->mfi_evt_task);
1651 mfi_handle_evt(void *context, int pending)
1653 TAILQ_HEAD(,mfi_evt_queue_elm) queue;
1654 struct mfi_softc *sc;
1655 struct mfi_evt_queue_elm *elm;
1659 mtx_lock(&sc->mfi_io_lock);
1660 TAILQ_CONCAT(&queue, &sc->mfi_evt_queue, link);
1661 mtx_unlock(&sc->mfi_io_lock);
1662 while ((elm = TAILQ_FIRST(&queue)) != NULL) {
1663 TAILQ_REMOVE(&queue, elm, link);
1664 mfi_decode_evt(sc, &elm->detail);
1665 free(elm, M_MFIBUF);
1670 mfi_aen_register(struct mfi_softc *sc, int seq, int locale)
1672 struct mfi_command *cm;
1673 struct mfi_dcmd_frame *dcmd;
1674 union mfi_evt current_aen, prior_aen;
1675 struct mfi_evt_detail *ed = NULL;
1678 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1680 current_aen.word = locale;
1681 if (sc->mfi_aen_cm != NULL) {
1683 ((uint32_t *)&sc->mfi_aen_cm->cm_frame->dcmd.mbox)[1];
1684 if (prior_aen.members.evt_class <= current_aen.members.evt_class &&
1685 !((prior_aen.members.locale & current_aen.members.locale)
1686 ^current_aen.members.locale)) {
1689 prior_aen.members.locale |= current_aen.members.locale;
1690 if (prior_aen.members.evt_class
1691 < current_aen.members.evt_class)
1692 current_aen.members.evt_class =
1693 prior_aen.members.evt_class;
1694 mfi_abort(sc, &sc->mfi_aen_cm);
1698 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_EVENT_WAIT,
1699 (void **)&ed, sizeof(*ed));
1703 dcmd = &cm->cm_frame->dcmd;
1704 ((uint32_t *)&dcmd->mbox)[0] = seq;
1705 ((uint32_t *)&dcmd->mbox)[1] = locale;
1706 cm->cm_flags = MFI_CMD_DATAIN;
1707 cm->cm_complete = mfi_aen_complete;
1709 sc->last_seq_num = seq;
1710 sc->mfi_aen_cm = cm;
1712 mfi_enqueue_ready(cm);
1720 mfi_aen_complete(struct mfi_command *cm)
1722 struct mfi_frame_header *hdr;
1723 struct mfi_softc *sc;
1724 struct mfi_evt_detail *detail;
1725 struct mfi_aen *mfi_aen_entry, *tmp;
1726 int seq = 0, aborted = 0;
1729 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1731 if (sc->mfi_aen_cm == NULL)
1734 hdr = &cm->cm_frame->header;
1736 if (sc->cm_aen_abort ||
1737 hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
1738 sc->cm_aen_abort = 0;
1741 sc->mfi_aen_triggered = 1;
1742 if (sc->mfi_poll_waiting) {
1743 sc->mfi_poll_waiting = 0;
1744 selwakeup(&sc->mfi_select);
1746 detail = cm->cm_data;
1747 mfi_queue_evt(sc, detail);
1748 seq = detail->seq + 1;
1749 TAILQ_FOREACH_SAFE(mfi_aen_entry, &sc->mfi_aen_pids, aen_link,
1751 TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
1753 PROC_LOCK(mfi_aen_entry->p);
1754 kern_psignal(mfi_aen_entry->p, SIGIO);
1755 PROC_UNLOCK(mfi_aen_entry->p);
1756 free(mfi_aen_entry, M_MFIBUF);
1760 free(cm->cm_data, M_MFIBUF);
1761 wakeup(&sc->mfi_aen_cm);
1762 sc->mfi_aen_cm = NULL;
1763 mfi_release_command(cm);
1765 /* set it up again so the driver can catch more events */
1767 mfi_aen_setup(sc, seq);
1770 #define MAX_EVENTS 15
1773 mfi_parse_entries(struct mfi_softc *sc, int start_seq, int stop_seq)
1775 struct mfi_command *cm;
1776 struct mfi_dcmd_frame *dcmd;
1777 struct mfi_evt_list *el;
1778 union mfi_evt class_locale;
1779 int error, i, seq, size;
1781 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1783 class_locale.members.reserved = 0;
1784 class_locale.members.locale = mfi_event_locale;
1785 class_locale.members.evt_class = mfi_event_class;
1787 size = sizeof(struct mfi_evt_list) + sizeof(struct mfi_evt_detail)
1789 el = malloc(size, M_MFIBUF, M_NOWAIT | M_ZERO);
1793 for (seq = start_seq;;) {
1794 if ((cm = mfi_dequeue_free(sc)) == NULL) {
1799 dcmd = &cm->cm_frame->dcmd;
1800 bzero(dcmd->mbox, MFI_MBOX_SIZE);
1801 dcmd->header.cmd = MFI_CMD_DCMD;
1802 dcmd->header.timeout = 0;
1803 dcmd->header.data_len = size;
1804 dcmd->opcode = MFI_DCMD_CTRL_EVENT_GET;
1805 ((uint32_t *)&dcmd->mbox)[0] = seq;
1806 ((uint32_t *)&dcmd->mbox)[1] = class_locale.word;
1807 cm->cm_sg = &dcmd->sgl;
1808 cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
1809 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1813 if ((error = mfi_mapcmd(sc, cm)) != 0) {
1814 device_printf(sc->mfi_dev,
1815 "Failed to get controller entries\n");
1816 mfi_release_command(cm);
1820 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1821 BUS_DMASYNC_POSTREAD);
1822 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1824 if (dcmd->header.cmd_status == MFI_STAT_NOT_FOUND) {
1825 mfi_release_command(cm);
1828 if (dcmd->header.cmd_status != MFI_STAT_OK) {
1829 device_printf(sc->mfi_dev,
1830 "Error %d fetching controller entries\n",
1831 dcmd->header.cmd_status);
1832 mfi_release_command(cm);
1836 mfi_release_command(cm);
1838 for (i = 0; i < el->count; i++) {
1840 * If this event is newer than 'stop_seq' then
1841 * break out of the loop. Note that the log
1842 * is a circular buffer so we have to handle
1843 * the case that our stop point is earlier in
1844 * the buffer than our start point.
1846 if (el->event[i].seq >= stop_seq) {
1847 if (start_seq <= stop_seq)
1849 else if (el->event[i].seq < start_seq)
1852 mfi_queue_evt(sc, &el->event[i]);
1854 seq = el->event[el->count - 1].seq + 1;
1862 mfi_add_ld(struct mfi_softc *sc, int id)
1864 struct mfi_command *cm;
1865 struct mfi_dcmd_frame *dcmd = NULL;
1866 struct mfi_ld_info *ld_info = NULL;
1867 struct mfi_disk_pending *ld_pend;
1870 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1872 ld_pend = malloc(sizeof(*ld_pend), M_MFIBUF, M_NOWAIT | M_ZERO);
1873 if (ld_pend != NULL) {
1874 ld_pend->ld_id = id;
1875 TAILQ_INSERT_TAIL(&sc->mfi_ld_pend_tqh, ld_pend, ld_link);
1878 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_INFO,
1879 (void **)&ld_info, sizeof(*ld_info));
1881 device_printf(sc->mfi_dev,
1882 "Failed to allocate for MFI_DCMD_LD_GET_INFO %d\n", error);
1884 free(ld_info, M_MFIBUF);
1887 cm->cm_flags = MFI_CMD_DATAIN;
1888 dcmd = &cm->cm_frame->dcmd;
1890 if (mfi_wait_command(sc, cm) != 0) {
1891 device_printf(sc->mfi_dev,
1892 "Failed to get logical drive: %d\n", id);
1893 free(ld_info, M_MFIBUF);
1896 if (ld_info->ld_config.params.isSSCD != 1)
1897 mfi_add_ld_complete(cm);
1899 mfi_release_command(cm);
1900 if (ld_info) /* SSCD drives ld_info free here */
1901 free(ld_info, M_MFIBUF);
1907 mfi_add_ld_complete(struct mfi_command *cm)
1909 struct mfi_frame_header *hdr;
1910 struct mfi_ld_info *ld_info;
1911 struct mfi_softc *sc;
1915 hdr = &cm->cm_frame->header;
1916 ld_info = cm->cm_private;
1918 if (sc->cm_map_abort || hdr->cmd_status != MFI_STAT_OK) {
1919 free(ld_info, M_MFIBUF);
1920 wakeup(&sc->mfi_map_sync_cm);
1921 mfi_release_command(cm);
1924 wakeup(&sc->mfi_map_sync_cm);
1925 mfi_release_command(cm);
1927 mtx_unlock(&sc->mfi_io_lock);
1929 if ((child = device_add_child(sc->mfi_dev, "mfid", -1)) == NULL) {
1930 device_printf(sc->mfi_dev, "Failed to add logical disk\n");
1931 free(ld_info, M_MFIBUF);
1933 mtx_lock(&sc->mfi_io_lock);
1937 device_set_ivars(child, ld_info);
1938 device_set_desc(child, "MFI Logical Disk");
1939 bus_generic_attach(sc->mfi_dev);
1941 mtx_lock(&sc->mfi_io_lock);
1944 static int mfi_add_sys_pd(struct mfi_softc *sc, int id)
1946 struct mfi_command *cm;
1947 struct mfi_dcmd_frame *dcmd = NULL;
1948 struct mfi_pd_info *pd_info = NULL;
1949 struct mfi_system_pending *syspd_pend;
1952 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1954 syspd_pend = malloc(sizeof(*syspd_pend), M_MFIBUF, M_NOWAIT | M_ZERO);
1955 if (syspd_pend != NULL) {
1956 syspd_pend->pd_id = id;
1957 TAILQ_INSERT_TAIL(&sc->mfi_syspd_pend_tqh, syspd_pend, pd_link);
1960 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_PD_GET_INFO,
1961 (void **)&pd_info, sizeof(*pd_info));
1963 device_printf(sc->mfi_dev,
1964 "Failed to allocated for MFI_DCMD_PD_GET_INFO %d\n",
1967 free(pd_info, M_MFIBUF);
1970 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1971 dcmd = &cm->cm_frame->dcmd;
1973 dcmd->header.scsi_status = 0;
1974 dcmd->header.pad0 = 0;
1975 if ((error = mfi_mapcmd(sc, cm)) != 0) {
1976 device_printf(sc->mfi_dev,
1977 "Failed to get physical drive info %d\n", id);
1978 free(pd_info, M_MFIBUF);
1979 mfi_release_command(cm);
1982 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1983 BUS_DMASYNC_POSTREAD);
1984 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1985 mfi_add_sys_pd_complete(cm);
1990 mfi_add_sys_pd_complete(struct mfi_command *cm)
1992 struct mfi_frame_header *hdr;
1993 struct mfi_pd_info *pd_info;
1994 struct mfi_softc *sc;
1998 hdr = &cm->cm_frame->header;
1999 pd_info = cm->cm_private;
2001 if (hdr->cmd_status != MFI_STAT_OK) {
2002 free(pd_info, M_MFIBUF);
2003 mfi_release_command(cm);
2006 if (pd_info->fw_state != MFI_PD_STATE_SYSTEM) {
2007 device_printf(sc->mfi_dev, "PD=%x is not SYSTEM PD\n",
2008 pd_info->ref.v.device_id);
2009 free(pd_info, M_MFIBUF);
2010 mfi_release_command(cm);
2013 mfi_release_command(cm);
2015 mtx_unlock(&sc->mfi_io_lock);
2017 if ((child = device_add_child(sc->mfi_dev, "mfisyspd", -1)) == NULL) {
2018 device_printf(sc->mfi_dev, "Failed to add system pd\n");
2019 free(pd_info, M_MFIBUF);
2021 mtx_lock(&sc->mfi_io_lock);
2025 device_set_ivars(child, pd_info);
2026 device_set_desc(child, "MFI System PD");
2027 bus_generic_attach(sc->mfi_dev);
2029 mtx_lock(&sc->mfi_io_lock);
2032 static struct mfi_command *
2033 mfi_bio_command(struct mfi_softc *sc)
2036 struct mfi_command *cm = NULL;
2038 /*reserving two commands to avoid starvation for IOCTL*/
2039 if (sc->mfi_qstat[MFIQ_FREE].q_length < 2) {
2042 if ((bio = mfi_dequeue_bio(sc)) == NULL) {
2045 if ((uintptr_t)bio->bio_driver2 == MFI_LD_IO) {
2046 cm = mfi_build_ldio(sc, bio);
2047 } else if ((uintptr_t) bio->bio_driver2 == MFI_SYS_PD_IO) {
2048 cm = mfi_build_syspdio(sc, bio);
2051 mfi_enqueue_bio(sc, bio);
2056 * mostly copied from cam/scsi/scsi_all.c:scsi_read_write
2060 mfi_build_cdb(int readop, uint8_t byte2, u_int64_t lba, u_int32_t block_count, uint8_t *cdb)
2064 if (((lba & 0x1fffff) == lba)
2065 && ((block_count & 0xff) == block_count)
2067 /* We can fit in a 6 byte cdb */
2068 struct scsi_rw_6 *scsi_cmd;
2070 scsi_cmd = (struct scsi_rw_6 *)cdb;
2071 scsi_cmd->opcode = readop ? READ_6 : WRITE_6;
2072 scsi_ulto3b(lba, scsi_cmd->addr);
2073 scsi_cmd->length = block_count & 0xff;
2074 scsi_cmd->control = 0;
2075 cdb_len = sizeof(*scsi_cmd);
2076 } else if (((block_count & 0xffff) == block_count) && ((lba & 0xffffffff) == lba)) {
2077 /* Need a 10 byte CDB */
2078 struct scsi_rw_10 *scsi_cmd;
2080 scsi_cmd = (struct scsi_rw_10 *)cdb;
2081 scsi_cmd->opcode = readop ? READ_10 : WRITE_10;
2082 scsi_cmd->byte2 = byte2;
2083 scsi_ulto4b(lba, scsi_cmd->addr);
2084 scsi_cmd->reserved = 0;
2085 scsi_ulto2b(block_count, scsi_cmd->length);
2086 scsi_cmd->control = 0;
2087 cdb_len = sizeof(*scsi_cmd);
2088 } else if (((block_count & 0xffffffff) == block_count) &&
2089 ((lba & 0xffffffff) == lba)) {
2090 /* Block count is too big for 10 byte CDB use a 12 byte CDB */
2091 struct scsi_rw_12 *scsi_cmd;
2093 scsi_cmd = (struct scsi_rw_12 *)cdb;
2094 scsi_cmd->opcode = readop ? READ_12 : WRITE_12;
2095 scsi_cmd->byte2 = byte2;
2096 scsi_ulto4b(lba, scsi_cmd->addr);
2097 scsi_cmd->reserved = 0;
2098 scsi_ulto4b(block_count, scsi_cmd->length);
2099 scsi_cmd->control = 0;
2100 cdb_len = sizeof(*scsi_cmd);
2103 * 16 byte CDB. We'll only get here if the LBA is larger
2106 struct scsi_rw_16 *scsi_cmd;
2108 scsi_cmd = (struct scsi_rw_16 *)cdb;
2109 scsi_cmd->opcode = readop ? READ_16 : WRITE_16;
2110 scsi_cmd->byte2 = byte2;
2111 scsi_u64to8b(lba, scsi_cmd->addr);
2112 scsi_cmd->reserved = 0;
2113 scsi_ulto4b(block_count, scsi_cmd->length);
2114 scsi_cmd->control = 0;
2115 cdb_len = sizeof(*scsi_cmd);
2121 extern char *unmapped_buf;
2123 static struct mfi_command *
2124 mfi_build_syspdio(struct mfi_softc *sc, struct bio *bio)
2126 struct mfi_command *cm;
2127 struct mfi_pass_frame *pass;
2128 uint32_t context = 0;
2129 int flags = 0, blkcount = 0, readop;
2132 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2134 if ((cm = mfi_dequeue_free(sc)) == NULL)
2137 /* Zero out the MFI frame */
2138 context = cm->cm_frame->header.context;
2139 bzero(cm->cm_frame, sizeof(union mfi_frame));
2140 cm->cm_frame->header.context = context;
2141 pass = &cm->cm_frame->pass;
2142 bzero(pass->cdb, 16);
2143 pass->header.cmd = MFI_CMD_PD_SCSI_IO;
2144 switch (bio->bio_cmd) {
2146 flags = MFI_CMD_DATAIN | MFI_CMD_BIO;
2150 flags = MFI_CMD_DATAOUT | MFI_CMD_BIO;
2154 /* TODO: what about BIO_DELETE??? */
2155 panic("Unsupported bio command %x\n", bio->bio_cmd);
2158 /* Cheat with the sector length to avoid a non-constant division */
2159 blkcount = howmany(bio->bio_bcount, MFI_SECTOR_LEN);
2160 /* Fill the LBA and Transfer length in CDB */
2161 cdb_len = mfi_build_cdb(readop, 0, bio->bio_pblkno, blkcount,
2163 pass->header.target_id = (uintptr_t)bio->bio_driver1;
2164 pass->header.lun_id = 0;
2165 pass->header.timeout = 0;
2166 pass->header.flags = 0;
2167 pass->header.scsi_status = 0;
2168 pass->header.sense_len = MFI_SENSE_LEN;
2169 pass->header.data_len = bio->bio_bcount;
2170 pass->header.cdb_len = cdb_len;
2171 pass->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2172 pass->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2173 cm->cm_complete = mfi_bio_complete;
2174 cm->cm_private = bio;
2175 cm->cm_data = unmapped_buf;
2176 cm->cm_len = bio->bio_bcount;
2177 cm->cm_sg = &pass->sgl;
2178 cm->cm_total_frame_size = MFI_PASS_FRAME_SIZE;
2179 cm->cm_flags = flags;
2184 static struct mfi_command *
2185 mfi_build_ldio(struct mfi_softc *sc, struct bio *bio)
2187 struct mfi_io_frame *io;
2188 struct mfi_command *cm;
2191 uint32_t context = 0;
2193 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2195 if ((cm = mfi_dequeue_free(sc)) == NULL)
2198 /* Zero out the MFI frame */
2199 context = cm->cm_frame->header.context;
2200 bzero(cm->cm_frame, sizeof(union mfi_frame));
2201 cm->cm_frame->header.context = context;
2202 io = &cm->cm_frame->io;
2203 switch (bio->bio_cmd) {
2205 io->header.cmd = MFI_CMD_LD_READ;
2206 flags = MFI_CMD_DATAIN | MFI_CMD_BIO;
2209 io->header.cmd = MFI_CMD_LD_WRITE;
2210 flags = MFI_CMD_DATAOUT | MFI_CMD_BIO;
2213 /* TODO: what about BIO_DELETE??? */
2214 panic("Unsupported bio command %x\n", bio->bio_cmd);
2217 /* Cheat with the sector length to avoid a non-constant division */
2218 blkcount = howmany(bio->bio_bcount, MFI_SECTOR_LEN);
2219 io->header.target_id = (uintptr_t)bio->bio_driver1;
2220 io->header.timeout = 0;
2221 io->header.flags = 0;
2222 io->header.scsi_status = 0;
2223 io->header.sense_len = MFI_SENSE_LEN;
2224 io->header.data_len = blkcount;
2225 io->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2226 io->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2227 io->lba_hi = (bio->bio_pblkno & 0xffffffff00000000) >> 32;
2228 io->lba_lo = bio->bio_pblkno & 0xffffffff;
2229 cm->cm_complete = mfi_bio_complete;
2230 cm->cm_private = bio;
2231 cm->cm_data = unmapped_buf;
2232 cm->cm_len = bio->bio_bcount;
2233 cm->cm_sg = &io->sgl;
2234 cm->cm_total_frame_size = MFI_IO_FRAME_SIZE;
2235 cm->cm_flags = flags;
2241 mfi_bio_complete(struct mfi_command *cm)
2244 struct mfi_frame_header *hdr;
2245 struct mfi_softc *sc;
2247 bio = cm->cm_private;
2248 hdr = &cm->cm_frame->header;
2251 if ((hdr->cmd_status != MFI_STAT_OK) || (hdr->scsi_status != 0)) {
2252 bio->bio_flags |= BIO_ERROR;
2253 bio->bio_error = EIO;
2254 device_printf(sc->mfi_dev, "I/O error, cmd=%p, status=%#x, "
2255 "scsi_status=%#x\n", cm, hdr->cmd_status, hdr->scsi_status);
2256 mfi_print_sense(cm->cm_sc, cm->cm_sense);
2257 } else if (cm->cm_error != 0) {
2258 bio->bio_flags |= BIO_ERROR;
2259 bio->bio_error = cm->cm_error;
2260 device_printf(sc->mfi_dev, "I/O error, cmd=%p, error=%#x\n",
2264 mfi_release_command(cm);
2265 mfi_disk_complete(bio);
2269 mfi_startio(struct mfi_softc *sc)
2271 struct mfi_command *cm;
2272 struct ccb_hdr *ccbh;
2275 /* Don't bother if we're short on resources */
2276 if (sc->mfi_flags & MFI_FLAGS_QFRZN)
2279 /* Try a command that has already been prepared */
2280 cm = mfi_dequeue_ready(sc);
2283 if ((ccbh = TAILQ_FIRST(&sc->mfi_cam_ccbq)) != NULL)
2284 cm = sc->mfi_cam_start(ccbh);
2287 /* Nope, so look for work on the bioq */
2289 cm = mfi_bio_command(sc);
2291 /* No work available, so exit */
2295 /* Send the command to the controller */
2296 if (mfi_mapcmd(sc, cm) != 0) {
2297 device_printf(sc->mfi_dev, "Failed to startio\n");
2298 mfi_requeue_ready(cm);
2305 mfi_mapcmd(struct mfi_softc *sc, struct mfi_command *cm)
2309 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2311 if ((cm->cm_data != NULL) && (cm->cm_frame->header.cmd != MFI_CMD_STP )) {
2312 polled = (cm->cm_flags & MFI_CMD_POLLED) ? BUS_DMA_NOWAIT : 0;
2313 if (cm->cm_flags & MFI_CMD_CCB)
2314 error = bus_dmamap_load_ccb(sc->mfi_buffer_dmat,
2315 cm->cm_dmamap, cm->cm_data, mfi_data_cb, cm,
2317 else if (cm->cm_flags & MFI_CMD_BIO)
2318 error = bus_dmamap_load_bio(sc->mfi_buffer_dmat,
2319 cm->cm_dmamap, cm->cm_private, mfi_data_cb, cm,
2322 error = bus_dmamap_load(sc->mfi_buffer_dmat,
2323 cm->cm_dmamap, cm->cm_data, cm->cm_len,
2324 mfi_data_cb, cm, polled);
2325 if (error == EINPROGRESS) {
2326 sc->mfi_flags |= MFI_FLAGS_QFRZN;
2330 error = mfi_send_frame(sc, cm);
2337 mfi_data_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
2339 struct mfi_frame_header *hdr;
2340 struct mfi_command *cm;
2342 struct mfi_softc *sc;
2343 int i, j, first, dir;
2344 int sge_size, locked;
2346 cm = (struct mfi_command *)arg;
2348 hdr = &cm->cm_frame->header;
2352 * We need to check if we have the lock as this is async
2353 * callback so even though our caller mfi_mapcmd asserts
2354 * it has the lock, there is no guarantee that hasn't been
2355 * dropped if bus_dmamap_load returned prior to our
2358 if ((locked = mtx_owned(&sc->mfi_io_lock)) == 0)
2359 mtx_lock(&sc->mfi_io_lock);
2362 printf("error %d in callback\n", error);
2363 cm->cm_error = error;
2364 mfi_complete(sc, cm);
2367 /* Use IEEE sgl only for IO's on a SKINNY controller
2368 * For other commands on a SKINNY controller use either
2369 * sg32 or sg64 based on the sizeof(bus_addr_t).
2370 * Also calculate the total frame size based on the type
2373 if (((cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) ||
2374 (cm->cm_frame->header.cmd == MFI_CMD_LD_READ) ||
2375 (cm->cm_frame->header.cmd == MFI_CMD_LD_WRITE)) &&
2376 (sc->mfi_flags & MFI_FLAGS_SKINNY)) {
2377 for (i = 0; i < nsegs; i++) {
2378 sgl->sg_skinny[i].addr = segs[i].ds_addr;
2379 sgl->sg_skinny[i].len = segs[i].ds_len;
2380 sgl->sg_skinny[i].flag = 0;
2382 hdr->flags |= MFI_FRAME_IEEE_SGL | MFI_FRAME_SGL64;
2383 sge_size = sizeof(struct mfi_sg_skinny);
2384 hdr->sg_count = nsegs;
2387 if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
2388 first = cm->cm_stp_len;
2389 if ((sc->mfi_flags & MFI_FLAGS_SG64) == 0) {
2390 sgl->sg32[j].addr = segs[0].ds_addr;
2391 sgl->sg32[j++].len = first;
2393 sgl->sg64[j].addr = segs[0].ds_addr;
2394 sgl->sg64[j++].len = first;
2398 if ((sc->mfi_flags & MFI_FLAGS_SG64) == 0) {
2399 for (i = 0; i < nsegs; i++) {
2400 sgl->sg32[j].addr = segs[i].ds_addr + first;
2401 sgl->sg32[j++].len = segs[i].ds_len - first;
2405 for (i = 0; i < nsegs; i++) {
2406 sgl->sg64[j].addr = segs[i].ds_addr + first;
2407 sgl->sg64[j++].len = segs[i].ds_len - first;
2410 hdr->flags |= MFI_FRAME_SGL64;
2413 sge_size = sc->mfi_sge_size;
2417 if (cm->cm_flags & MFI_CMD_DATAIN) {
2418 dir |= BUS_DMASYNC_PREREAD;
2419 hdr->flags |= MFI_FRAME_DIR_READ;
2421 if (cm->cm_flags & MFI_CMD_DATAOUT) {
2422 dir |= BUS_DMASYNC_PREWRITE;
2423 hdr->flags |= MFI_FRAME_DIR_WRITE;
2425 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir);
2426 cm->cm_flags |= MFI_CMD_MAPPED;
2429 * Instead of calculating the total number of frames in the
2430 * compound frame, it's already assumed that there will be at
2431 * least 1 frame, so don't compensate for the modulo of the
2432 * following division.
2434 cm->cm_total_frame_size += (sc->mfi_sge_size * nsegs);
2435 cm->cm_extra_frames = (cm->cm_total_frame_size - 1) / MFI_FRAME_SIZE;
2437 if ((error = mfi_send_frame(sc, cm)) != 0) {
2438 printf("error %d in callback from mfi_send_frame\n", error);
2439 cm->cm_error = error;
2440 mfi_complete(sc, cm);
2445 /* leave the lock in the state we found it */
2447 mtx_unlock(&sc->mfi_io_lock);
2453 mfi_send_frame(struct mfi_softc *sc, struct mfi_command *cm)
2457 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2459 if (sc->MFA_enabled)
2460 error = mfi_tbolt_send_frame(sc, cm);
2462 error = mfi_std_send_frame(sc, cm);
2464 if (error != 0 && (cm->cm_flags & MFI_ON_MFIQ_BUSY) != 0)
2465 mfi_remove_busy(cm);
2471 mfi_std_send_frame(struct mfi_softc *sc, struct mfi_command *cm)
2473 struct mfi_frame_header *hdr;
2474 int tm = mfi_polled_cmd_timeout * 1000;
2476 hdr = &cm->cm_frame->header;
2478 if ((cm->cm_flags & MFI_CMD_POLLED) == 0) {
2479 cm->cm_timestamp = time_uptime;
2480 mfi_enqueue_busy(cm);
2482 hdr->cmd_status = MFI_STAT_INVALID_STATUS;
2483 hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
2487 * The bus address of the command is aligned on a 64 byte boundary,
2488 * leaving the least 6 bits as zero. For whatever reason, the
2489 * hardware wants the address shifted right by three, leaving just
2490 * 3 zero bits. These three bits are then used as a prefetching
2491 * hint for the hardware to predict how many frames need to be
2492 * fetched across the bus. If a command has more than 8 frames
2493 * then the 3 bits are set to 0x7 and the firmware uses other
2494 * information in the command to determine the total amount to fetch.
2495 * However, FreeBSD doesn't support I/O larger than 128K, so 8 frames
2496 * is enough for both 32bit and 64bit systems.
2498 if (cm->cm_extra_frames > 7)
2499 cm->cm_extra_frames = 7;
2501 sc->mfi_issue_cmd(sc, cm->cm_frame_busaddr, cm->cm_extra_frames);
2503 if ((cm->cm_flags & MFI_CMD_POLLED) == 0)
2506 /* This is a polled command, so busy-wait for it to complete. */
2507 while (hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
2514 if (hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
2515 device_printf(sc->mfi_dev, "Frame %p timed out "
2516 "command 0x%X\n", hdr, cm->cm_frame->dcmd.opcode);
2525 mfi_complete(struct mfi_softc *sc, struct mfi_command *cm)
2528 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2530 if ((cm->cm_flags & MFI_CMD_MAPPED) != 0) {
2532 if ((cm->cm_flags & MFI_CMD_DATAIN) ||
2533 (cm->cm_frame->header.cmd == MFI_CMD_STP))
2534 dir |= BUS_DMASYNC_POSTREAD;
2535 if (cm->cm_flags & MFI_CMD_DATAOUT)
2536 dir |= BUS_DMASYNC_POSTWRITE;
2538 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir);
2539 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2540 cm->cm_flags &= ~MFI_CMD_MAPPED;
2543 cm->cm_flags |= MFI_CMD_COMPLETED;
2545 if (cm->cm_complete != NULL)
2546 cm->cm_complete(cm);
2552 mfi_abort(struct mfi_softc *sc, struct mfi_command **cm_abort)
2554 struct mfi_command *cm;
2555 struct mfi_abort_frame *abort;
2557 uint32_t context = 0;
2559 mtx_lock(&sc->mfi_io_lock);
2560 if ((cm = mfi_dequeue_free(sc)) == NULL) {
2561 mtx_unlock(&sc->mfi_io_lock);
2565 /* Zero out the MFI frame */
2566 context = cm->cm_frame->header.context;
2567 bzero(cm->cm_frame, sizeof(union mfi_frame));
2568 cm->cm_frame->header.context = context;
2570 abort = &cm->cm_frame->abort;
2571 abort->header.cmd = MFI_CMD_ABORT;
2572 abort->header.flags = 0;
2573 abort->header.scsi_status = 0;
2574 abort->abort_context = (*cm_abort)->cm_frame->header.context;
2575 abort->abort_mfi_addr_lo = (uint32_t)(*cm_abort)->cm_frame_busaddr;
2576 abort->abort_mfi_addr_hi =
2577 (uint32_t)((uint64_t)(*cm_abort)->cm_frame_busaddr >> 32);
2579 cm->cm_flags = MFI_CMD_POLLED;
2581 if ((error = mfi_mapcmd(sc, cm)) != 0)
2582 device_printf(sc->mfi_dev, "failed to abort command\n");
2583 mfi_release_command(cm);
2585 mtx_unlock(&sc->mfi_io_lock);
2586 while (i < 5 && *cm_abort != NULL) {
2587 tsleep(cm_abort, 0, "mfiabort",
2591 if (*cm_abort != NULL) {
2592 /* Force a complete if command didn't abort */
2593 mtx_lock(&sc->mfi_io_lock);
2594 (*cm_abort)->cm_complete(*cm_abort);
2595 mtx_unlock(&sc->mfi_io_lock);
2602 mfi_dump_blocks(struct mfi_softc *sc, int id, uint64_t lba, void *virt,
2605 struct mfi_command *cm;
2606 struct mfi_io_frame *io;
2608 uint32_t context = 0;
2610 if ((cm = mfi_dequeue_free(sc)) == NULL)
2613 /* Zero out the MFI frame */
2614 context = cm->cm_frame->header.context;
2615 bzero(cm->cm_frame, sizeof(union mfi_frame));
2616 cm->cm_frame->header.context = context;
2618 io = &cm->cm_frame->io;
2619 io->header.cmd = MFI_CMD_LD_WRITE;
2620 io->header.target_id = id;
2621 io->header.timeout = 0;
2622 io->header.flags = 0;
2623 io->header.scsi_status = 0;
2624 io->header.sense_len = MFI_SENSE_LEN;
2625 io->header.data_len = howmany(len, MFI_SECTOR_LEN);
2626 io->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2627 io->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2628 io->lba_hi = (lba & 0xffffffff00000000) >> 32;
2629 io->lba_lo = lba & 0xffffffff;
2632 cm->cm_sg = &io->sgl;
2633 cm->cm_total_frame_size = MFI_IO_FRAME_SIZE;
2634 cm->cm_flags = MFI_CMD_POLLED | MFI_CMD_DATAOUT;
2636 if ((error = mfi_mapcmd(sc, cm)) != 0)
2637 device_printf(sc->mfi_dev, "failed dump blocks\n");
2638 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
2639 BUS_DMASYNC_POSTWRITE);
2640 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2641 mfi_release_command(cm);
2647 mfi_dump_syspd_blocks(struct mfi_softc *sc, int id, uint64_t lba, void *virt,
2650 struct mfi_command *cm;
2651 struct mfi_pass_frame *pass;
2652 int error, readop, cdb_len;
2655 if ((cm = mfi_dequeue_free(sc)) == NULL)
2658 pass = &cm->cm_frame->pass;
2659 bzero(pass->cdb, 16);
2660 pass->header.cmd = MFI_CMD_PD_SCSI_IO;
2663 blkcount = howmany(len, MFI_SECTOR_LEN);
2664 cdb_len = mfi_build_cdb(readop, 0, lba, blkcount, pass->cdb);
2665 pass->header.target_id = id;
2666 pass->header.timeout = 0;
2667 pass->header.flags = 0;
2668 pass->header.scsi_status = 0;
2669 pass->header.sense_len = MFI_SENSE_LEN;
2670 pass->header.data_len = len;
2671 pass->header.cdb_len = cdb_len;
2672 pass->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2673 pass->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2676 cm->cm_sg = &pass->sgl;
2677 cm->cm_total_frame_size = MFI_PASS_FRAME_SIZE;
2678 cm->cm_flags = MFI_CMD_POLLED | MFI_CMD_DATAOUT | MFI_CMD_SCSI;
2680 if ((error = mfi_mapcmd(sc, cm)) != 0)
2681 device_printf(sc->mfi_dev, "failed dump blocks\n");
2682 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
2683 BUS_DMASYNC_POSTWRITE);
2684 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2685 mfi_release_command(cm);
2691 mfi_open(struct cdev *dev, int flags, int fmt, struct thread *td)
2693 struct mfi_softc *sc;
2698 mtx_lock(&sc->mfi_io_lock);
2699 if (sc->mfi_detaching)
2702 sc->mfi_flags |= MFI_FLAGS_OPEN;
2705 mtx_unlock(&sc->mfi_io_lock);
2711 mfi_close(struct cdev *dev, int flags, int fmt, struct thread *td)
2713 struct mfi_softc *sc;
2714 struct mfi_aen *mfi_aen_entry, *tmp;
2718 mtx_lock(&sc->mfi_io_lock);
2719 sc->mfi_flags &= ~MFI_FLAGS_OPEN;
2721 TAILQ_FOREACH_SAFE(mfi_aen_entry, &sc->mfi_aen_pids, aen_link, tmp) {
2722 if (mfi_aen_entry->p == curproc) {
2723 TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
2725 free(mfi_aen_entry, M_MFIBUF);
2728 mtx_unlock(&sc->mfi_io_lock);
2733 mfi_config_lock(struct mfi_softc *sc, uint32_t opcode)
2737 case MFI_DCMD_LD_DELETE:
2738 case MFI_DCMD_CFG_ADD:
2739 case MFI_DCMD_CFG_CLEAR:
2740 case MFI_DCMD_CFG_FOREIGN_IMPORT:
2741 sx_xlock(&sc->mfi_config_lock);
2749 mfi_config_unlock(struct mfi_softc *sc, int locked)
2753 sx_xunlock(&sc->mfi_config_lock);
2757 * Perform pre-issue checks on commands from userland and possibly veto
2761 mfi_check_command_pre(struct mfi_softc *sc, struct mfi_command *cm)
2763 struct mfi_disk *ld, *ld2;
2765 struct mfi_system_pd *syspd = NULL;
2769 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2771 switch (cm->cm_frame->dcmd.opcode) {
2772 case MFI_DCMD_LD_DELETE:
2773 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2774 if (ld->ld_id == cm->cm_frame->dcmd.mbox[0])
2780 error = mfi_disk_disable(ld);
2782 case MFI_DCMD_CFG_CLEAR:
2783 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2784 error = mfi_disk_disable(ld);
2789 TAILQ_FOREACH(ld2, &sc->mfi_ld_tqh, ld_link) {
2792 mfi_disk_enable(ld2);
2796 case MFI_DCMD_PD_STATE_SET:
2797 mbox = (uint16_t *) cm->cm_frame->dcmd.mbox;
2799 if (mbox[2] == MFI_PD_STATE_UNCONFIGURED_GOOD) {
2800 TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh, pd_link) {
2801 if (syspd->pd_id == syspd_id)
2808 error = mfi_syspd_disable(syspd);
2816 /* Perform post-issue checks on commands from userland. */
2818 mfi_check_command_post(struct mfi_softc *sc, struct mfi_command *cm)
2820 struct mfi_disk *ld, *ldn;
2821 struct mfi_system_pd *syspd = NULL;
2825 switch (cm->cm_frame->dcmd.opcode) {
2826 case MFI_DCMD_LD_DELETE:
2827 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2828 if (ld->ld_id == cm->cm_frame->dcmd.mbox[0])
2831 KASSERT(ld != NULL, ("volume dissappeared"));
2832 if (cm->cm_frame->header.cmd_status == MFI_STAT_OK) {
2833 mtx_unlock(&sc->mfi_io_lock);
2835 device_delete_child(sc->mfi_dev, ld->ld_dev);
2837 mtx_lock(&sc->mfi_io_lock);
2839 mfi_disk_enable(ld);
2841 case MFI_DCMD_CFG_CLEAR:
2842 if (cm->cm_frame->header.cmd_status == MFI_STAT_OK) {
2843 mtx_unlock(&sc->mfi_io_lock);
2845 TAILQ_FOREACH_SAFE(ld, &sc->mfi_ld_tqh, ld_link, ldn) {
2846 device_delete_child(sc->mfi_dev, ld->ld_dev);
2849 mtx_lock(&sc->mfi_io_lock);
2851 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link)
2852 mfi_disk_enable(ld);
2855 case MFI_DCMD_CFG_ADD:
2858 case MFI_DCMD_CFG_FOREIGN_IMPORT:
2861 case MFI_DCMD_PD_STATE_SET:
2862 mbox = (uint16_t *) cm->cm_frame->dcmd.mbox;
2864 if (mbox[2] == MFI_PD_STATE_UNCONFIGURED_GOOD) {
2865 TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh,pd_link) {
2866 if (syspd->pd_id == syspd_id)
2872 /* If the transition fails then enable the syspd again */
2873 if (syspd && cm->cm_frame->header.cmd_status != MFI_STAT_OK)
2874 mfi_syspd_enable(syspd);
2880 mfi_check_for_sscd(struct mfi_softc *sc, struct mfi_command *cm)
2882 struct mfi_config_data *conf_data;
2883 struct mfi_command *ld_cm = NULL;
2884 struct mfi_ld_info *ld_info = NULL;
2885 struct mfi_ld_config *ld;
2889 conf_data = (struct mfi_config_data *)cm->cm_data;
2891 if (cm->cm_frame->dcmd.opcode == MFI_DCMD_CFG_ADD) {
2892 p = (char *)conf_data->array;
2893 p += conf_data->array_size * conf_data->array_count;
2894 ld = (struct mfi_ld_config *)p;
2895 if (ld->params.isSSCD == 1)
2897 } else if (cm->cm_frame->dcmd.opcode == MFI_DCMD_LD_DELETE) {
2898 error = mfi_dcmd_command (sc, &ld_cm, MFI_DCMD_LD_GET_INFO,
2899 (void **)&ld_info, sizeof(*ld_info));
2901 device_printf(sc->mfi_dev, "Failed to allocate"
2902 "MFI_DCMD_LD_GET_INFO %d", error);
2904 free(ld_info, M_MFIBUF);
2907 ld_cm->cm_flags = MFI_CMD_DATAIN;
2908 ld_cm->cm_frame->dcmd.mbox[0]= cm->cm_frame->dcmd.mbox[0];
2909 ld_cm->cm_frame->header.target_id = cm->cm_frame->dcmd.mbox[0];
2910 if (mfi_wait_command(sc, ld_cm) != 0) {
2911 device_printf(sc->mfi_dev, "failed to get log drv\n");
2912 mfi_release_command(ld_cm);
2913 free(ld_info, M_MFIBUF);
2917 if (ld_cm->cm_frame->header.cmd_status != MFI_STAT_OK) {
2918 free(ld_info, M_MFIBUF);
2919 mfi_release_command(ld_cm);
2923 ld_info = (struct mfi_ld_info *)ld_cm->cm_private;
2925 if (ld_info->ld_config.params.isSSCD == 1)
2928 mfi_release_command(ld_cm);
2929 free(ld_info, M_MFIBUF);
2936 mfi_stp_cmd(struct mfi_softc *sc, struct mfi_command *cm,caddr_t arg)
2939 struct mfi_ioc_packet *ioc;
2940 ioc = (struct mfi_ioc_packet *)arg;
2941 int sge_size, error;
2942 struct megasas_sge *kern_sge;
2944 memset(sc->kbuff_arr, 0, sizeof(sc->kbuff_arr));
2945 kern_sge =(struct megasas_sge *) ((uintptr_t)cm->cm_frame + ioc->mfi_sgl_off);
2946 cm->cm_frame->header.sg_count = ioc->mfi_sge_count;
2948 if (sizeof(bus_addr_t) == 8) {
2949 cm->cm_frame->header.flags |= MFI_FRAME_SGL64;
2950 cm->cm_extra_frames = 2;
2951 sge_size = sizeof(struct mfi_sg64);
2953 cm->cm_extra_frames = (cm->cm_total_frame_size - 1) / MFI_FRAME_SIZE;
2954 sge_size = sizeof(struct mfi_sg32);
2957 cm->cm_total_frame_size += (sge_size * ioc->mfi_sge_count);
2958 for (i = 0; i < ioc->mfi_sge_count; i++) {
2959 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
2960 1, 0, /* algnmnt, boundary */
2961 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
2962 BUS_SPACE_MAXADDR, /* highaddr */
2963 NULL, NULL, /* filter, filterarg */
2964 ioc->mfi_sgl[i].iov_len,/* maxsize */
2966 ioc->mfi_sgl[i].iov_len,/* maxsegsize */
2967 BUS_DMA_ALLOCNOW, /* flags */
2968 NULL, NULL, /* lockfunc, lockarg */
2969 &sc->mfi_kbuff_arr_dmat[i])) {
2970 device_printf(sc->mfi_dev,
2971 "Cannot allocate mfi_kbuff_arr_dmat tag\n");
2975 if (bus_dmamem_alloc(sc->mfi_kbuff_arr_dmat[i],
2976 (void **)&sc->kbuff_arr[i], BUS_DMA_NOWAIT,
2977 &sc->mfi_kbuff_arr_dmamap[i])) {
2978 device_printf(sc->mfi_dev,
2979 "Cannot allocate mfi_kbuff_arr_dmamap memory\n");
2983 bus_dmamap_load(sc->mfi_kbuff_arr_dmat[i],
2984 sc->mfi_kbuff_arr_dmamap[i], sc->kbuff_arr[i],
2985 ioc->mfi_sgl[i].iov_len, mfi_addr_cb,
2986 &sc->mfi_kbuff_arr_busaddr[i], 0);
2988 if (!sc->kbuff_arr[i]) {
2989 device_printf(sc->mfi_dev,
2990 "Could not allocate memory for kbuff_arr info\n");
2993 kern_sge[i].phys_addr = sc->mfi_kbuff_arr_busaddr[i];
2994 kern_sge[i].length = ioc->mfi_sgl[i].iov_len;
2996 if (sizeof(bus_addr_t) == 8) {
2997 cm->cm_frame->stp.sgl.sg64[i].addr =
2998 kern_sge[i].phys_addr;
2999 cm->cm_frame->stp.sgl.sg64[i].len =
3000 ioc->mfi_sgl[i].iov_len;
3002 cm->cm_frame->stp.sgl.sg32[i].addr =
3003 kern_sge[i].phys_addr;
3004 cm->cm_frame->stp.sgl.sg32[i].len =
3005 ioc->mfi_sgl[i].iov_len;
3008 error = copyin(ioc->mfi_sgl[i].iov_base,
3010 ioc->mfi_sgl[i].iov_len);
3012 device_printf(sc->mfi_dev, "Copy in failed\n");
3017 cm->cm_flags |=MFI_CMD_MAPPED;
3022 mfi_user_command(struct mfi_softc *sc, struct mfi_ioc_passthru *ioc)
3024 struct mfi_command *cm;
3025 struct mfi_dcmd_frame *dcmd;
3026 void *ioc_buf = NULL;
3028 int error = 0, locked;
3031 if (ioc->buf_size > 0) {
3032 if (ioc->buf_size > 1024 * 1024)
3034 ioc_buf = malloc(ioc->buf_size, M_MFIBUF, M_WAITOK);
3035 error = copyin(ioc->buf, ioc_buf, ioc->buf_size);
3037 device_printf(sc->mfi_dev, "failed to copyin\n");
3038 free(ioc_buf, M_MFIBUF);
3043 locked = mfi_config_lock(sc, ioc->ioc_frame.opcode);
3045 mtx_lock(&sc->mfi_io_lock);
3046 while ((cm = mfi_dequeue_free(sc)) == NULL)
3047 msleep(mfi_user_command, &sc->mfi_io_lock, 0, "mfiioc", hz);
3049 /* Save context for later */
3050 context = cm->cm_frame->header.context;
3052 dcmd = &cm->cm_frame->dcmd;
3053 bcopy(&ioc->ioc_frame, dcmd, sizeof(struct mfi_dcmd_frame));
3055 cm->cm_sg = &dcmd->sgl;
3056 cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
3057 cm->cm_data = ioc_buf;
3058 cm->cm_len = ioc->buf_size;
3060 /* restore context */
3061 cm->cm_frame->header.context = context;
3063 /* Cheat since we don't know if we're writing or reading */
3064 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_DATAOUT;
3066 error = mfi_check_command_pre(sc, cm);
3070 error = mfi_wait_command(sc, cm);
3072 device_printf(sc->mfi_dev, "ioctl failed %d\n", error);
3075 bcopy(dcmd, &ioc->ioc_frame, sizeof(struct mfi_dcmd_frame));
3076 mfi_check_command_post(sc, cm);
3078 mfi_release_command(cm);
3079 mtx_unlock(&sc->mfi_io_lock);
3080 mfi_config_unlock(sc, locked);
3081 if (ioc->buf_size > 0)
3082 error = copyout(ioc_buf, ioc->buf, ioc->buf_size);
3084 free(ioc_buf, M_MFIBUF);
3088 #define PTRIN(p) ((void *)(uintptr_t)(p))
3091 mfi_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td)
3093 struct mfi_softc *sc;
3094 union mfi_statrequest *ms;
3095 struct mfi_ioc_packet *ioc;
3096 #ifdef COMPAT_FREEBSD32
3097 struct mfi_ioc_packet32 *ioc32;
3099 struct mfi_ioc_aen *aen;
3100 struct mfi_command *cm = NULL;
3101 uint32_t context = 0;
3102 union mfi_sense_ptr sense_ptr;
3103 uint8_t *data = NULL, *temp, *addr, skip_pre_post = 0;
3106 struct mfi_ioc_passthru *iop = (struct mfi_ioc_passthru *)arg;
3107 #ifdef COMPAT_FREEBSD32
3108 struct mfi_ioc_passthru32 *iop32 = (struct mfi_ioc_passthru32 *)arg;
3109 struct mfi_ioc_passthru iop_swab;
3119 if (sc->hw_crit_error)
3122 if (sc->issuepend_done == 0)
3127 ms = (union mfi_statrequest *)arg;
3128 switch (ms->ms_item) {
3133 bcopy(&sc->mfi_qstat[ms->ms_item], &ms->ms_qstat,
3134 sizeof(struct mfi_qstat));
3141 case MFIIO_QUERY_DISK:
3143 struct mfi_query_disk *qd;
3144 struct mfi_disk *ld;
3146 qd = (struct mfi_query_disk *)arg;
3147 mtx_lock(&sc->mfi_io_lock);
3148 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
3149 if (ld->ld_id == qd->array_id)
3154 mtx_unlock(&sc->mfi_io_lock);
3158 if (ld->ld_flags & MFI_DISK_FLAGS_OPEN)
3160 bzero(qd->devname, SPECNAMELEN + 1);
3161 snprintf(qd->devname, SPECNAMELEN, "mfid%d", ld->ld_unit);
3162 mtx_unlock(&sc->mfi_io_lock);
3166 #ifdef COMPAT_FREEBSD32
3170 devclass_t devclass;
3171 ioc = (struct mfi_ioc_packet *)arg;
3174 adapter = ioc->mfi_adapter_no;
3175 if (device_get_unit(sc->mfi_dev) == 0 && adapter != 0) {
3176 devclass = devclass_find("mfi");
3177 sc = devclass_get_softc(devclass, adapter);
3179 mtx_lock(&sc->mfi_io_lock);
3180 if ((cm = mfi_dequeue_free(sc)) == NULL) {
3181 mtx_unlock(&sc->mfi_io_lock);
3184 mtx_unlock(&sc->mfi_io_lock);
3188 * save off original context since copying from user
3189 * will clobber some data
3191 context = cm->cm_frame->header.context;
3192 cm->cm_frame->header.context = cm->cm_index;
3194 bcopy(ioc->mfi_frame.raw, cm->cm_frame,
3195 2 * MEGAMFI_FRAME_SIZE);
3196 cm->cm_total_frame_size = (sizeof(union mfi_sgl)
3197 * ioc->mfi_sge_count) + ioc->mfi_sgl_off;
3198 cm->cm_frame->header.scsi_status = 0;
3199 cm->cm_frame->header.pad0 = 0;
3200 if (ioc->mfi_sge_count) {
3202 (union mfi_sgl *)&cm->cm_frame->bytes[ioc->mfi_sgl_off];
3206 if (cm->cm_frame->header.flags & MFI_FRAME_DATAIN)
3207 cm->cm_flags |= MFI_CMD_DATAIN;
3208 if (cm->cm_frame->header.flags & MFI_FRAME_DATAOUT)
3209 cm->cm_flags |= MFI_CMD_DATAOUT;
3210 /* Legacy app shim */
3211 if (cm->cm_flags == 0)
3212 cm->cm_flags |= MFI_CMD_DATAIN | MFI_CMD_DATAOUT;
3213 cm->cm_len = cm->cm_frame->header.data_len;
3214 if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
3215 #ifdef COMPAT_FREEBSD32
3216 if (cmd == MFI_CMD) {
3219 cm->cm_stp_len = ioc->mfi_sgl[0].iov_len;
3220 #ifdef COMPAT_FREEBSD32
3222 /* 32bit on 64bit */
3223 ioc32 = (struct mfi_ioc_packet32 *)ioc;
3224 cm->cm_stp_len = ioc32->mfi_sgl[0].iov_len;
3227 cm->cm_len += cm->cm_stp_len;
3230 (cm->cm_flags & (MFI_CMD_DATAIN | MFI_CMD_DATAOUT))) {
3231 cm->cm_data = data = malloc(cm->cm_len, M_MFIBUF,
3237 /* restore header context */
3238 cm->cm_frame->header.context = context;
3240 if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
3241 res = mfi_stp_cmd(sc, cm, arg);
3246 if ((cm->cm_flags & MFI_CMD_DATAOUT) ||
3247 (cm->cm_frame->header.cmd == MFI_CMD_STP)) {
3248 for (i = 0; i < ioc->mfi_sge_count; i++) {
3249 #ifdef COMPAT_FREEBSD32
3250 if (cmd == MFI_CMD) {
3253 addr = ioc->mfi_sgl[i].iov_base;
3254 len = ioc->mfi_sgl[i].iov_len;
3255 #ifdef COMPAT_FREEBSD32
3257 /* 32bit on 64bit */
3258 ioc32 = (struct mfi_ioc_packet32 *)ioc;
3259 addr = PTRIN(ioc32->mfi_sgl[i].iov_base);
3260 len = ioc32->mfi_sgl[i].iov_len;
3263 error = copyin(addr, temp, len);
3265 device_printf(sc->mfi_dev,
3266 "Copy in failed\n");
3274 if (cm->cm_frame->header.cmd == MFI_CMD_DCMD)
3275 locked = mfi_config_lock(sc,
3276 cm->cm_frame->dcmd.opcode);
3278 if (cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) {
3279 cm->cm_frame->pass.sense_addr_lo =
3280 (uint32_t)cm->cm_sense_busaddr;
3281 cm->cm_frame->pass.sense_addr_hi =
3282 (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
3284 mtx_lock(&sc->mfi_io_lock);
3285 skip_pre_post = mfi_check_for_sscd (sc, cm);
3286 if (!skip_pre_post) {
3287 error = mfi_check_command_pre(sc, cm);
3289 mtx_unlock(&sc->mfi_io_lock);
3293 if ((error = mfi_wait_command(sc, cm)) != 0) {
3294 device_printf(sc->mfi_dev,
3295 "Controller polled failed\n");
3296 mtx_unlock(&sc->mfi_io_lock);
3299 if (!skip_pre_post) {
3300 mfi_check_command_post(sc, cm);
3302 mtx_unlock(&sc->mfi_io_lock);
3304 if (cm->cm_frame->header.cmd != MFI_CMD_STP) {
3306 if ((cm->cm_flags & MFI_CMD_DATAIN) ||
3307 (cm->cm_frame->header.cmd == MFI_CMD_STP)) {
3308 for (i = 0; i < ioc->mfi_sge_count; i++) {
3309 #ifdef COMPAT_FREEBSD32
3310 if (cmd == MFI_CMD) {
3313 addr = ioc->mfi_sgl[i].iov_base;
3314 len = ioc->mfi_sgl[i].iov_len;
3315 #ifdef COMPAT_FREEBSD32
3317 /* 32bit on 64bit */
3318 ioc32 = (struct mfi_ioc_packet32 *)ioc;
3319 addr = PTRIN(ioc32->mfi_sgl[i].iov_base);
3320 len = ioc32->mfi_sgl[i].iov_len;
3323 error = copyout(temp, addr, len);
3325 device_printf(sc->mfi_dev,
3326 "Copy out failed\n");
3334 if (ioc->mfi_sense_len) {
3335 /* get user-space sense ptr then copy out sense */
3336 bcopy(&ioc->mfi_frame.raw[ioc->mfi_sense_off],
3337 &sense_ptr.sense_ptr_data[0],
3338 sizeof(sense_ptr.sense_ptr_data));
3339 #ifdef COMPAT_FREEBSD32
3340 if (cmd != MFI_CMD) {
3342 * not 64bit native so zero out any address
3344 sense_ptr.addr.high = 0;
3347 error = copyout(cm->cm_sense, sense_ptr.user_space,
3348 ioc->mfi_sense_len);
3350 device_printf(sc->mfi_dev,
3351 "Copy out failed\n");
3356 ioc->mfi_frame.hdr.cmd_status = cm->cm_frame->header.cmd_status;
3358 mfi_config_unlock(sc, locked);
3360 free(data, M_MFIBUF);
3361 if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
3362 for (i = 0; i < 2; i++) {
3363 if (sc->kbuff_arr[i]) {
3364 if (sc->mfi_kbuff_arr_busaddr[i] != 0)
3366 sc->mfi_kbuff_arr_dmat[i],
3367 sc->mfi_kbuff_arr_dmamap[i]
3369 if (sc->kbuff_arr[i] != NULL)
3371 sc->mfi_kbuff_arr_dmat[i],
3373 sc->mfi_kbuff_arr_dmamap[i]
3375 if (sc->mfi_kbuff_arr_dmat[i] != NULL)
3376 bus_dma_tag_destroy(
3377 sc->mfi_kbuff_arr_dmat[i]);
3382 mtx_lock(&sc->mfi_io_lock);
3383 mfi_release_command(cm);
3384 mtx_unlock(&sc->mfi_io_lock);
3390 aen = (struct mfi_ioc_aen *)arg;
3391 mtx_lock(&sc->mfi_io_lock);
3392 error = mfi_aen_register(sc, aen->aen_seq_num,
3393 aen->aen_class_locale);
3394 mtx_unlock(&sc->mfi_io_lock);
3397 case MFI_LINUX_CMD_2: /* Firmware Linux ioctl shim */
3399 devclass_t devclass;
3400 struct mfi_linux_ioc_packet l_ioc;
3403 devclass = devclass_find("mfi");
3404 if (devclass == NULL)
3407 error = copyin(arg, &l_ioc, sizeof(l_ioc));
3410 adapter = l_ioc.lioc_adapter_no;
3411 sc = devclass_get_softc(devclass, adapter);
3414 return (mfi_linux_ioctl_int(sc->mfi_cdev,
3415 cmd, arg, flag, td));
3418 case MFI_LINUX_SET_AEN_2: /* AEN Linux ioctl shim */
3420 devclass_t devclass;
3421 struct mfi_linux_ioc_aen l_aen;
3424 devclass = devclass_find("mfi");
3425 if (devclass == NULL)
3428 error = copyin(arg, &l_aen, sizeof(l_aen));
3431 adapter = l_aen.laen_adapter_no;
3432 sc = devclass_get_softc(devclass, adapter);
3435 return (mfi_linux_ioctl_int(sc->mfi_cdev,
3436 cmd, arg, flag, td));
3439 #ifdef COMPAT_FREEBSD32
3440 case MFIIO_PASSTHRU32:
3441 if (!SV_CURPROC_FLAG(SV_ILP32)) {
3445 iop_swab.ioc_frame = iop32->ioc_frame;
3446 iop_swab.buf_size = iop32->buf_size;
3447 iop_swab.buf = PTRIN(iop32->buf);
3451 case MFIIO_PASSTHRU:
3452 error = mfi_user_command(sc, iop);
3453 #ifdef COMPAT_FREEBSD32
3454 if (cmd == MFIIO_PASSTHRU32)
3455 iop32->ioc_frame = iop_swab.ioc_frame;
3459 device_printf(sc->mfi_dev, "IOCTL 0x%lx not handled\n", cmd);
3468 mfi_linux_ioctl_int(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td)
3470 struct mfi_softc *sc;
3471 struct mfi_linux_ioc_packet l_ioc;
3472 struct mfi_linux_ioc_aen l_aen;
3473 struct mfi_command *cm = NULL;
3474 struct mfi_aen *mfi_aen_entry;
3475 union mfi_sense_ptr sense_ptr;
3476 uint32_t context = 0;
3477 uint8_t *data = NULL, *temp;
3484 case MFI_LINUX_CMD_2: /* Firmware Linux ioctl shim */
3485 error = copyin(arg, &l_ioc, sizeof(l_ioc));
3489 if (l_ioc.lioc_sge_count > MAX_LINUX_IOCTL_SGE) {
3493 mtx_lock(&sc->mfi_io_lock);
3494 if ((cm = mfi_dequeue_free(sc)) == NULL) {
3495 mtx_unlock(&sc->mfi_io_lock);
3498 mtx_unlock(&sc->mfi_io_lock);
3502 * save off original context since copying from user
3503 * will clobber some data
3505 context = cm->cm_frame->header.context;
3507 bcopy(l_ioc.lioc_frame.raw, cm->cm_frame,
3508 2 * MFI_DCMD_FRAME_SIZE); /* this isn't quite right */
3509 cm->cm_total_frame_size = (sizeof(union mfi_sgl)
3510 * l_ioc.lioc_sge_count) + l_ioc.lioc_sgl_off;
3511 cm->cm_frame->header.scsi_status = 0;
3512 cm->cm_frame->header.pad0 = 0;
3513 if (l_ioc.lioc_sge_count)
3515 (union mfi_sgl *)&cm->cm_frame->bytes[l_ioc.lioc_sgl_off];
3517 if (cm->cm_frame->header.flags & MFI_FRAME_DATAIN)
3518 cm->cm_flags |= MFI_CMD_DATAIN;
3519 if (cm->cm_frame->header.flags & MFI_FRAME_DATAOUT)
3520 cm->cm_flags |= MFI_CMD_DATAOUT;
3521 cm->cm_len = cm->cm_frame->header.data_len;
3523 (cm->cm_flags & (MFI_CMD_DATAIN | MFI_CMD_DATAOUT))) {
3524 cm->cm_data = data = malloc(cm->cm_len, M_MFIBUF,
3530 /* restore header context */
3531 cm->cm_frame->header.context = context;
3534 if (cm->cm_flags & MFI_CMD_DATAOUT) {
3535 for (i = 0; i < l_ioc.lioc_sge_count; i++) {
3536 error = copyin(PTRIN(l_ioc.lioc_sgl[i].iov_base),
3538 l_ioc.lioc_sgl[i].iov_len);
3540 device_printf(sc->mfi_dev,
3541 "Copy in failed\n");
3544 temp = &temp[l_ioc.lioc_sgl[i].iov_len];
3548 if (cm->cm_frame->header.cmd == MFI_CMD_DCMD)
3549 locked = mfi_config_lock(sc, cm->cm_frame->dcmd.opcode);
3551 if (cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) {
3552 cm->cm_frame->pass.sense_addr_lo =
3553 (uint32_t)cm->cm_sense_busaddr;
3554 cm->cm_frame->pass.sense_addr_hi =
3555 (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
3558 mtx_lock(&sc->mfi_io_lock);
3559 error = mfi_check_command_pre(sc, cm);
3561 mtx_unlock(&sc->mfi_io_lock);
3565 if ((error = mfi_wait_command(sc, cm)) != 0) {
3566 device_printf(sc->mfi_dev,
3567 "Controller polled failed\n");
3568 mtx_unlock(&sc->mfi_io_lock);
3572 mfi_check_command_post(sc, cm);
3573 mtx_unlock(&sc->mfi_io_lock);
3576 if (cm->cm_flags & MFI_CMD_DATAIN) {
3577 for (i = 0; i < l_ioc.lioc_sge_count; i++) {
3578 error = copyout(temp,
3579 PTRIN(l_ioc.lioc_sgl[i].iov_base),
3580 l_ioc.lioc_sgl[i].iov_len);
3582 device_printf(sc->mfi_dev,
3583 "Copy out failed\n");
3586 temp = &temp[l_ioc.lioc_sgl[i].iov_len];
3590 if (l_ioc.lioc_sense_len) {
3591 /* get user-space sense ptr then copy out sense */
3592 bcopy(&((struct mfi_linux_ioc_packet*)arg)
3593 ->lioc_frame.raw[l_ioc.lioc_sense_off],
3594 &sense_ptr.sense_ptr_data[0],
3595 sizeof(sense_ptr.sense_ptr_data));
3598 * only 32bit Linux support so zero out any
3599 * address over 32bit
3601 sense_ptr.addr.high = 0;
3603 error = copyout(cm->cm_sense, sense_ptr.user_space,
3604 l_ioc.lioc_sense_len);
3606 device_printf(sc->mfi_dev,
3607 "Copy out failed\n");
3612 error = copyout(&cm->cm_frame->header.cmd_status,
3613 &((struct mfi_linux_ioc_packet*)arg)
3614 ->lioc_frame.hdr.cmd_status,
3617 device_printf(sc->mfi_dev,
3618 "Copy out failed\n");
3623 mfi_config_unlock(sc, locked);
3625 free(data, M_MFIBUF);
3627 mtx_lock(&sc->mfi_io_lock);
3628 mfi_release_command(cm);
3629 mtx_unlock(&sc->mfi_io_lock);
3633 case MFI_LINUX_SET_AEN_2: /* AEN Linux ioctl shim */
3634 error = copyin(arg, &l_aen, sizeof(l_aen));
3637 printf("AEN IMPLEMENTED for pid %d\n", curproc->p_pid);
3638 mfi_aen_entry = malloc(sizeof(struct mfi_aen), M_MFIBUF,
3640 mtx_lock(&sc->mfi_io_lock);
3641 if (mfi_aen_entry != NULL) {
3642 mfi_aen_entry->p = curproc;
3643 TAILQ_INSERT_TAIL(&sc->mfi_aen_pids, mfi_aen_entry,
3646 error = mfi_aen_register(sc, l_aen.laen_seq_num,
3647 l_aen.laen_class_locale);
3650 TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
3652 free(mfi_aen_entry, M_MFIBUF);
3654 mtx_unlock(&sc->mfi_io_lock);
3658 device_printf(sc->mfi_dev, "IOCTL 0x%lx not handled\n", cmd);
3667 mfi_poll(struct cdev *dev, int poll_events, struct thread *td)
3669 struct mfi_softc *sc;
3674 if (poll_events & (POLLIN | POLLRDNORM)) {
3675 if (sc->mfi_aen_triggered != 0) {
3676 revents |= poll_events & (POLLIN | POLLRDNORM);
3677 sc->mfi_aen_triggered = 0;
3679 if (sc->mfi_aen_triggered == 0 && sc->mfi_aen_cm == NULL) {
3685 if (poll_events & (POLLIN | POLLRDNORM)) {
3686 sc->mfi_poll_waiting = 1;
3687 selrecord(td, &sc->mfi_select);
3697 struct mfi_softc *sc;
3698 struct mfi_command *cm;
3704 dc = devclass_find("mfi");
3706 printf("No mfi dev class\n");
3710 for (i = 0; ; i++) {
3711 sc = devclass_get_softc(dc, i);
3714 device_printf(sc->mfi_dev, "Dumping\n\n");
3716 deadline = time_uptime - mfi_cmd_timeout;
3717 mtx_lock(&sc->mfi_io_lock);
3718 TAILQ_FOREACH(cm, &sc->mfi_busy, cm_link) {
3719 if (cm->cm_timestamp <= deadline) {
3720 device_printf(sc->mfi_dev,
3721 "COMMAND %p TIMEOUT AFTER %d SECONDS\n",
3722 cm, (int)(time_uptime - cm->cm_timestamp));
3733 mtx_unlock(&sc->mfi_io_lock);
3740 mfi_timeout(void *data)
3742 struct mfi_softc *sc = (struct mfi_softc *)data;
3743 struct mfi_command *cm, *tmp;
3747 deadline = time_uptime - mfi_cmd_timeout;
3748 if (sc->adpreset == 0) {
3749 if (!mfi_tbolt_reset(sc)) {
3750 callout_reset(&sc->mfi_watchdog_callout,
3751 mfi_cmd_timeout * hz, mfi_timeout, sc);
3755 mtx_lock(&sc->mfi_io_lock);
3756 TAILQ_FOREACH_SAFE(cm, &sc->mfi_busy, cm_link, tmp) {
3757 if (sc->mfi_aen_cm == cm || sc->mfi_map_sync_cm == cm)
3759 if (cm->cm_timestamp <= deadline) {
3760 if (sc->adpreset != 0 && sc->issuepend_done == 0) {
3761 cm->cm_timestamp = time_uptime;
3763 device_printf(sc->mfi_dev,
3764 "COMMAND %p TIMEOUT AFTER %d SECONDS\n",
3765 cm, (int)(time_uptime - cm->cm_timestamp)
3768 MFI_VALIDATE_CMD(sc, cm);
3770 * While commands can get stuck forever we do
3771 * not fail them as there is no way to tell if
3772 * the controller has actually processed them
3775 * In addition its very likely that force
3776 * failing a command here would cause a panic
3789 mtx_unlock(&sc->mfi_io_lock);
3791 callout_reset(&sc->mfi_watchdog_callout, mfi_cmd_timeout * hz,