2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD AND BSD-2-Clause
4 * Copyright (c) 2006 IronPort Systems
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * Copyright (c) 2007 LSI Corp.
30 * Copyright (c) 2007 Rajesh Prabhakaran.
31 * All rights reserved.
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions
36 * 1. Redistributions of source code must retain the above copyright
37 * notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce the above copyright
39 * notice, this list of conditions and the following disclaimer in the
40 * documentation and/or other materials provided with the distribution.
42 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
43 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
44 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
45 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
46 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
47 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
48 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
49 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
50 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
51 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
55 #include <sys/cdefs.h>
56 __FBSDID("$FreeBSD$");
60 #include <sys/param.h>
61 #include <sys/systm.h>
62 #include <sys/sysctl.h>
63 #include <sys/malloc.h>
64 #include <sys/kernel.h>
66 #include <sys/selinfo.h>
69 #include <sys/eventhandler.h>
72 #include <sys/ioccom.h>
75 #include <sys/signalvar.h>
76 #include <sys/sysent.h>
77 #include <sys/taskqueue.h>
79 #include <machine/bus.h>
80 #include <machine/resource.h>
82 #include <dev/mfi/mfireg.h>
83 #include <dev/mfi/mfi_ioctl.h>
84 #include <dev/mfi/mfivar.h>
85 #include <sys/interrupt.h>
86 #include <sys/priority.h>
88 static int mfi_alloc_commands(struct mfi_softc *);
89 static int mfi_comms_init(struct mfi_softc *);
90 static int mfi_get_controller_info(struct mfi_softc *);
91 static int mfi_get_log_state(struct mfi_softc *,
92 struct mfi_evt_log_state **);
93 static int mfi_parse_entries(struct mfi_softc *, int, int);
94 static void mfi_data_cb(void *, bus_dma_segment_t *, int, int);
95 static void mfi_startup(void *arg);
96 static void mfi_intr(void *arg);
97 static void mfi_ldprobe(struct mfi_softc *sc);
98 static void mfi_syspdprobe(struct mfi_softc *sc);
99 static void mfi_handle_evt(void *context, int pending);
100 static int mfi_aen_register(struct mfi_softc *sc, int seq, int locale);
101 static void mfi_aen_complete(struct mfi_command *);
102 static int mfi_add_ld(struct mfi_softc *sc, int);
103 static void mfi_add_ld_complete(struct mfi_command *);
104 static int mfi_add_sys_pd(struct mfi_softc *sc, int);
105 static void mfi_add_sys_pd_complete(struct mfi_command *);
106 static struct mfi_command * mfi_bio_command(struct mfi_softc *);
107 static void mfi_bio_complete(struct mfi_command *);
108 static struct mfi_command *mfi_build_ldio(struct mfi_softc *,struct bio*);
109 static struct mfi_command *mfi_build_syspdio(struct mfi_softc *,struct bio*);
110 static int mfi_send_frame(struct mfi_softc *, struct mfi_command *);
111 static int mfi_std_send_frame(struct mfi_softc *, struct mfi_command *);
112 static int mfi_abort(struct mfi_softc *, struct mfi_command **);
113 static int mfi_linux_ioctl_int(struct cdev *, u_long, caddr_t, int, struct thread *);
114 static void mfi_timeout(void *);
115 static int mfi_user_command(struct mfi_softc *,
116 struct mfi_ioc_passthru *);
117 static void mfi_enable_intr_xscale(struct mfi_softc *sc);
118 static void mfi_enable_intr_ppc(struct mfi_softc *sc);
119 static int32_t mfi_read_fw_status_xscale(struct mfi_softc *sc);
120 static int32_t mfi_read_fw_status_ppc(struct mfi_softc *sc);
121 static int mfi_check_clear_intr_xscale(struct mfi_softc *sc);
122 static int mfi_check_clear_intr_ppc(struct mfi_softc *sc);
123 static void mfi_issue_cmd_xscale(struct mfi_softc *sc, bus_addr_t bus_add,
125 static void mfi_issue_cmd_ppc(struct mfi_softc *sc, bus_addr_t bus_add,
127 static int mfi_config_lock(struct mfi_softc *sc, uint32_t opcode);
128 static void mfi_config_unlock(struct mfi_softc *sc, int locked);
129 static int mfi_check_command_pre(struct mfi_softc *sc, struct mfi_command *cm);
130 static void mfi_check_command_post(struct mfi_softc *sc, struct mfi_command *cm);
131 static int mfi_check_for_sscd(struct mfi_softc *sc, struct mfi_command *cm);
133 SYSCTL_NODE(_hw, OID_AUTO, mfi, CTLFLAG_RD, 0, "MFI driver parameters");
134 static int mfi_event_locale = MFI_EVT_LOCALE_ALL;
135 SYSCTL_INT(_hw_mfi, OID_AUTO, event_locale, CTLFLAG_RWTUN, &mfi_event_locale,
136 0, "event message locale");
138 static int mfi_event_class = MFI_EVT_CLASS_INFO;
139 SYSCTL_INT(_hw_mfi, OID_AUTO, event_class, CTLFLAG_RWTUN, &mfi_event_class,
140 0, "event message class");
142 static int mfi_max_cmds = 128;
143 SYSCTL_INT(_hw_mfi, OID_AUTO, max_cmds, CTLFLAG_RDTUN, &mfi_max_cmds,
144 0, "Max commands limit (-1 = controller limit)");
146 static int mfi_detect_jbod_change = 1;
147 SYSCTL_INT(_hw_mfi, OID_AUTO, detect_jbod_change, CTLFLAG_RWTUN,
148 &mfi_detect_jbod_change, 0, "Detect a change to a JBOD");
150 int mfi_polled_cmd_timeout = MFI_POLL_TIMEOUT_SECS;
151 SYSCTL_INT(_hw_mfi, OID_AUTO, polled_cmd_timeout, CTLFLAG_RWTUN,
152 &mfi_polled_cmd_timeout, 0,
153 "Polled command timeout - used for firmware flash etc (in seconds)");
155 static int mfi_cmd_timeout = MFI_CMD_TIMEOUT;
156 SYSCTL_INT(_hw_mfi, OID_AUTO, cmd_timeout, CTLFLAG_RWTUN, &mfi_cmd_timeout,
157 0, "Command timeout (in seconds)");
159 /* Management interface */
160 static d_open_t mfi_open;
161 static d_close_t mfi_close;
162 static d_ioctl_t mfi_ioctl;
163 static d_poll_t mfi_poll;
165 static struct cdevsw mfi_cdevsw = {
166 .d_version = D_VERSION,
169 .d_close = mfi_close,
170 .d_ioctl = mfi_ioctl,
175 MALLOC_DEFINE(M_MFIBUF, "mfibuf", "Buffers for the MFI driver");
177 #define MFI_INQ_LENGTH SHORT_INQUIRY_LENGTH
178 struct mfi_skinny_dma_info mfi_skinny;
181 mfi_enable_intr_xscale(struct mfi_softc *sc)
183 MFI_WRITE4(sc, MFI_OMSK, 0x01);
187 mfi_enable_intr_ppc(struct mfi_softc *sc)
189 if (sc->mfi_flags & MFI_FLAGS_1078) {
190 MFI_WRITE4(sc, MFI_ODCR0, 0xFFFFFFFF);
191 MFI_WRITE4(sc, MFI_OMSK, ~MFI_1078_EIM);
193 else if (sc->mfi_flags & MFI_FLAGS_GEN2) {
194 MFI_WRITE4(sc, MFI_ODCR0, 0xFFFFFFFF);
195 MFI_WRITE4(sc, MFI_OMSK, ~MFI_GEN2_EIM);
197 else if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
198 MFI_WRITE4(sc, MFI_OMSK, ~0x00000001);
203 mfi_read_fw_status_xscale(struct mfi_softc *sc)
205 return MFI_READ4(sc, MFI_OMSG0);
209 mfi_read_fw_status_ppc(struct mfi_softc *sc)
211 return MFI_READ4(sc, MFI_OSP0);
215 mfi_check_clear_intr_xscale(struct mfi_softc *sc)
219 status = MFI_READ4(sc, MFI_OSTS);
220 if ((status & MFI_OSTS_INTR_VALID) == 0)
223 MFI_WRITE4(sc, MFI_OSTS, status);
228 mfi_check_clear_intr_ppc(struct mfi_softc *sc)
232 status = MFI_READ4(sc, MFI_OSTS);
233 if (sc->mfi_flags & MFI_FLAGS_1078) {
234 if (!(status & MFI_1078_RM)) {
238 else if (sc->mfi_flags & MFI_FLAGS_GEN2) {
239 if (!(status & MFI_GEN2_RM)) {
243 else if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
244 if (!(status & MFI_SKINNY_RM)) {
248 if (sc->mfi_flags & MFI_FLAGS_SKINNY)
249 MFI_WRITE4(sc, MFI_OSTS, status);
251 MFI_WRITE4(sc, MFI_ODCR0, status);
256 mfi_issue_cmd_xscale(struct mfi_softc *sc, bus_addr_t bus_add, uint32_t frame_cnt)
258 MFI_WRITE4(sc, MFI_IQP,(bus_add >>3)|frame_cnt);
262 mfi_issue_cmd_ppc(struct mfi_softc *sc, bus_addr_t bus_add, uint32_t frame_cnt)
264 if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
265 MFI_WRITE4(sc, MFI_IQPL, (bus_add | frame_cnt <<1)|1 );
266 MFI_WRITE4(sc, MFI_IQPH, 0x00000000);
268 MFI_WRITE4(sc, MFI_IQP, (bus_add | frame_cnt <<1)|1 );
273 mfi_transition_firmware(struct mfi_softc *sc)
275 uint32_t fw_state, cur_state;
277 uint32_t cur_abs_reg_val = 0;
278 uint32_t prev_abs_reg_val = 0;
280 cur_abs_reg_val = sc->mfi_read_fw_status(sc);
281 fw_state = cur_abs_reg_val & MFI_FWSTATE_MASK;
282 while (fw_state != MFI_FWSTATE_READY) {
284 device_printf(sc->mfi_dev, "Waiting for firmware to "
286 cur_state = fw_state;
288 case MFI_FWSTATE_FAULT:
289 device_printf(sc->mfi_dev, "Firmware fault\n");
291 case MFI_FWSTATE_WAIT_HANDSHAKE:
292 if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT)
293 MFI_WRITE4(sc, MFI_SKINNY_IDB, MFI_FWINIT_CLEAR_HANDSHAKE);
295 MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_CLEAR_HANDSHAKE);
296 max_wait = MFI_RESET_WAIT_TIME;
298 case MFI_FWSTATE_OPERATIONAL:
299 if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT)
300 MFI_WRITE4(sc, MFI_SKINNY_IDB, 7);
302 MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_READY);
303 max_wait = MFI_RESET_WAIT_TIME;
305 case MFI_FWSTATE_UNDEFINED:
306 case MFI_FWSTATE_BB_INIT:
307 max_wait = MFI_RESET_WAIT_TIME;
309 case MFI_FWSTATE_FW_INIT_2:
310 max_wait = MFI_RESET_WAIT_TIME;
312 case MFI_FWSTATE_FW_INIT:
313 case MFI_FWSTATE_FLUSH_CACHE:
314 max_wait = MFI_RESET_WAIT_TIME;
316 case MFI_FWSTATE_DEVICE_SCAN:
317 max_wait = MFI_RESET_WAIT_TIME; /* wait for 180 seconds */
318 prev_abs_reg_val = cur_abs_reg_val;
320 case MFI_FWSTATE_BOOT_MESSAGE_PENDING:
321 if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT)
322 MFI_WRITE4(sc, MFI_SKINNY_IDB, MFI_FWINIT_HOTPLUG);
324 MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_HOTPLUG);
325 max_wait = MFI_RESET_WAIT_TIME;
328 device_printf(sc->mfi_dev, "Unknown firmware state %#x\n",
332 for (i = 0; i < (max_wait * 10); i++) {
333 cur_abs_reg_val = sc->mfi_read_fw_status(sc);
334 fw_state = cur_abs_reg_val & MFI_FWSTATE_MASK;
335 if (fw_state == cur_state)
340 if (fw_state == MFI_FWSTATE_DEVICE_SCAN) {
341 /* Check the device scanning progress */
342 if (prev_abs_reg_val != cur_abs_reg_val) {
346 if (fw_state == cur_state) {
347 device_printf(sc->mfi_dev, "Firmware stuck in state "
356 mfi_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
361 *addr = segs[0].ds_addr;
366 mfi_attach(struct mfi_softc *sc)
369 int error, commsz, framessz, sensesz;
370 int frames, unit, max_fw_sge, max_fw_cmds;
371 uint32_t tb_mem_size = 0;
377 device_printf(sc->mfi_dev, "Megaraid SAS driver Ver %s \n",
380 mtx_init(&sc->mfi_io_lock, "MFI I/O lock", NULL, MTX_DEF);
381 sx_init(&sc->mfi_config_lock, "MFI config");
382 TAILQ_INIT(&sc->mfi_ld_tqh);
383 TAILQ_INIT(&sc->mfi_syspd_tqh);
384 TAILQ_INIT(&sc->mfi_ld_pend_tqh);
385 TAILQ_INIT(&sc->mfi_syspd_pend_tqh);
386 TAILQ_INIT(&sc->mfi_evt_queue);
387 TASK_INIT(&sc->mfi_evt_task, 0, mfi_handle_evt, sc);
388 TASK_INIT(&sc->mfi_map_sync_task, 0, mfi_handle_map_sync, sc);
389 TAILQ_INIT(&sc->mfi_aen_pids);
390 TAILQ_INIT(&sc->mfi_cam_ccbq);
398 sc->last_seq_num = 0;
399 sc->disableOnlineCtrlReset = 1;
400 sc->issuepend_done = 1;
401 sc->hw_crit_error = 0;
403 if (sc->mfi_flags & MFI_FLAGS_1064R) {
404 sc->mfi_enable_intr = mfi_enable_intr_xscale;
405 sc->mfi_read_fw_status = mfi_read_fw_status_xscale;
406 sc->mfi_check_clear_intr = mfi_check_clear_intr_xscale;
407 sc->mfi_issue_cmd = mfi_issue_cmd_xscale;
408 } else if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
409 sc->mfi_enable_intr = mfi_tbolt_enable_intr_ppc;
410 sc->mfi_disable_intr = mfi_tbolt_disable_intr_ppc;
411 sc->mfi_read_fw_status = mfi_tbolt_read_fw_status_ppc;
412 sc->mfi_check_clear_intr = mfi_tbolt_check_clear_intr_ppc;
413 sc->mfi_issue_cmd = mfi_tbolt_issue_cmd_ppc;
414 sc->mfi_adp_reset = mfi_tbolt_adp_reset;
416 TAILQ_INIT(&sc->mfi_cmd_tbolt_tqh);
418 sc->mfi_enable_intr = mfi_enable_intr_ppc;
419 sc->mfi_read_fw_status = mfi_read_fw_status_ppc;
420 sc->mfi_check_clear_intr = mfi_check_clear_intr_ppc;
421 sc->mfi_issue_cmd = mfi_issue_cmd_ppc;
425 /* Before we get too far, see if the firmware is working */
426 if ((error = mfi_transition_firmware(sc)) != 0) {
427 device_printf(sc->mfi_dev, "Firmware not in READY state, "
428 "error %d\n", error);
432 /* Start: LSIP200113393 */
433 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
434 1, 0, /* algnmnt, boundary */
435 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
436 BUS_SPACE_MAXADDR, /* highaddr */
437 NULL, NULL, /* filter, filterarg */
438 MEGASAS_MAX_NAME*sizeof(bus_addr_t), /* maxsize */
440 MEGASAS_MAX_NAME*sizeof(bus_addr_t), /* maxsegsize */
442 NULL, NULL, /* lockfunc, lockarg */
443 &sc->verbuf_h_dmat)) {
444 device_printf(sc->mfi_dev, "Cannot allocate verbuf_h_dmat DMA tag\n");
447 if (bus_dmamem_alloc(sc->verbuf_h_dmat, (void **)&sc->verbuf,
448 BUS_DMA_NOWAIT, &sc->verbuf_h_dmamap)) {
449 device_printf(sc->mfi_dev, "Cannot allocate verbuf_h_dmamap memory\n");
452 bzero(sc->verbuf, MEGASAS_MAX_NAME*sizeof(bus_addr_t));
453 bus_dmamap_load(sc->verbuf_h_dmat, sc->verbuf_h_dmamap,
454 sc->verbuf, MEGASAS_MAX_NAME*sizeof(bus_addr_t),
455 mfi_addr_cb, &sc->verbuf_h_busaddr, 0);
456 /* End: LSIP200113393 */
459 * Get information needed for sizing the contiguous memory for the
460 * frame pool. Size down the sgl parameter since we know that
461 * we will never need more than what's required for MAXPHYS.
462 * It would be nice if these constants were available at runtime
463 * instead of compile time.
465 status = sc->mfi_read_fw_status(sc);
466 max_fw_cmds = status & MFI_FWSTATE_MAXCMD_MASK;
467 if (mfi_max_cmds > 0 && mfi_max_cmds < max_fw_cmds) {
468 device_printf(sc->mfi_dev, "FW MaxCmds = %d, limiting to %d\n",
469 max_fw_cmds, mfi_max_cmds);
470 sc->mfi_max_fw_cmds = mfi_max_cmds;
472 sc->mfi_max_fw_cmds = max_fw_cmds;
474 max_fw_sge = (status & MFI_FWSTATE_MAXSGL_MASK) >> 16;
475 sc->mfi_max_sge = min(max_fw_sge, ((MFI_MAXPHYS / PAGE_SIZE) + 1));
477 /* ThunderBolt Support get the contiguous memory */
479 if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
480 mfi_tbolt_init_globals(sc);
481 device_printf(sc->mfi_dev, "MaxCmd = %d, Drv MaxCmd = %d, "
482 "MaxSgl = %d, state = %#x\n", max_fw_cmds,
483 sc->mfi_max_fw_cmds, sc->mfi_max_sge, status);
484 tb_mem_size = mfi_tbolt_get_memory_requirement(sc);
486 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
487 1, 0, /* algnmnt, boundary */
488 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
489 BUS_SPACE_MAXADDR, /* highaddr */
490 NULL, NULL, /* filter, filterarg */
491 tb_mem_size, /* maxsize */
493 tb_mem_size, /* maxsegsize */
495 NULL, NULL, /* lockfunc, lockarg */
497 device_printf(sc->mfi_dev, "Cannot allocate comms DMA tag\n");
500 if (bus_dmamem_alloc(sc->mfi_tb_dmat, (void **)&sc->request_message_pool,
501 BUS_DMA_NOWAIT, &sc->mfi_tb_dmamap)) {
502 device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
505 bzero(sc->request_message_pool, tb_mem_size);
506 bus_dmamap_load(sc->mfi_tb_dmat, sc->mfi_tb_dmamap,
507 sc->request_message_pool, tb_mem_size, mfi_addr_cb, &sc->mfi_tb_busaddr, 0);
509 /* For ThunderBolt memory init */
510 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
511 0x100, 0, /* alignmnt, boundary */
512 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
513 BUS_SPACE_MAXADDR, /* highaddr */
514 NULL, NULL, /* filter, filterarg */
515 MFI_FRAME_SIZE, /* maxsize */
517 MFI_FRAME_SIZE, /* maxsegsize */
519 NULL, NULL, /* lockfunc, lockarg */
520 &sc->mfi_tb_init_dmat)) {
521 device_printf(sc->mfi_dev, "Cannot allocate init DMA tag\n");
524 if (bus_dmamem_alloc(sc->mfi_tb_init_dmat, (void **)&sc->mfi_tb_init,
525 BUS_DMA_NOWAIT, &sc->mfi_tb_init_dmamap)) {
526 device_printf(sc->mfi_dev, "Cannot allocate init memory\n");
529 bzero(sc->mfi_tb_init, MFI_FRAME_SIZE);
530 bus_dmamap_load(sc->mfi_tb_init_dmat, sc->mfi_tb_init_dmamap,
531 sc->mfi_tb_init, MFI_FRAME_SIZE, mfi_addr_cb,
532 &sc->mfi_tb_init_busaddr, 0);
533 if (mfi_tbolt_init_desc_pool(sc, sc->request_message_pool,
535 device_printf(sc->mfi_dev,
536 "Thunderbolt pool preparation error\n");
541 Allocate DMA memory mapping for MPI2 IOC Init descriptor,
542 we are taking it different from what we have allocated for Request
543 and reply descriptors to avoid confusion later
545 tb_mem_size = sizeof(struct MPI2_IOC_INIT_REQUEST);
546 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
547 1, 0, /* algnmnt, boundary */
548 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
549 BUS_SPACE_MAXADDR, /* highaddr */
550 NULL, NULL, /* filter, filterarg */
551 tb_mem_size, /* maxsize */
553 tb_mem_size, /* maxsegsize */
555 NULL, NULL, /* lockfunc, lockarg */
556 &sc->mfi_tb_ioc_init_dmat)) {
557 device_printf(sc->mfi_dev,
558 "Cannot allocate comms DMA tag\n");
561 if (bus_dmamem_alloc(sc->mfi_tb_ioc_init_dmat,
562 (void **)&sc->mfi_tb_ioc_init_desc,
563 BUS_DMA_NOWAIT, &sc->mfi_tb_ioc_init_dmamap)) {
564 device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
567 bzero(sc->mfi_tb_ioc_init_desc, tb_mem_size);
568 bus_dmamap_load(sc->mfi_tb_ioc_init_dmat, sc->mfi_tb_ioc_init_dmamap,
569 sc->mfi_tb_ioc_init_desc, tb_mem_size, mfi_addr_cb,
570 &sc->mfi_tb_ioc_init_busaddr, 0);
573 * Create the dma tag for data buffers. Used both for block I/O
574 * and for various internal data queries.
576 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
577 1, 0, /* algnmnt, boundary */
578 BUS_SPACE_MAXADDR, /* lowaddr */
579 BUS_SPACE_MAXADDR, /* highaddr */
580 NULL, NULL, /* filter, filterarg */
581 BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
582 sc->mfi_max_sge, /* nsegments */
583 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
584 BUS_DMA_ALLOCNOW, /* flags */
585 busdma_lock_mutex, /* lockfunc */
586 &sc->mfi_io_lock, /* lockfuncarg */
587 &sc->mfi_buffer_dmat)) {
588 device_printf(sc->mfi_dev, "Cannot allocate buffer DMA tag\n");
593 * Allocate DMA memory for the comms queues. Keep it under 4GB for
594 * efficiency. The mfi_hwcomms struct includes space for 1 reply queue
595 * entry, so the calculated size here will be will be 1 more than
596 * mfi_max_fw_cmds. This is apparently a requirement of the hardware.
598 commsz = (sizeof(uint32_t) * sc->mfi_max_fw_cmds) +
599 sizeof(struct mfi_hwcomms);
600 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
601 1, 0, /* algnmnt, boundary */
602 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
603 BUS_SPACE_MAXADDR, /* highaddr */
604 NULL, NULL, /* filter, filterarg */
605 commsz, /* maxsize */
607 commsz, /* maxsegsize */
609 NULL, NULL, /* lockfunc, lockarg */
610 &sc->mfi_comms_dmat)) {
611 device_printf(sc->mfi_dev, "Cannot allocate comms DMA tag\n");
614 if (bus_dmamem_alloc(sc->mfi_comms_dmat, (void **)&sc->mfi_comms,
615 BUS_DMA_NOWAIT, &sc->mfi_comms_dmamap)) {
616 device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
619 bzero(sc->mfi_comms, commsz);
620 bus_dmamap_load(sc->mfi_comms_dmat, sc->mfi_comms_dmamap,
621 sc->mfi_comms, commsz, mfi_addr_cb, &sc->mfi_comms_busaddr, 0);
623 * Allocate DMA memory for the command frames. Keep them in the
624 * lower 4GB for efficiency. Calculate the size of the commands at
625 * the same time; each command is one 64 byte frame plus a set of
626 * additional frames for holding sg lists or other data.
627 * The assumption here is that the SG list will start at the second
628 * frame and not use the unused bytes in the first frame. While this
629 * isn't technically correct, it simplifies the calculation and allows
630 * for command frames that might be larger than an mfi_io_frame.
632 if (sizeof(bus_addr_t) == 8) {
633 sc->mfi_sge_size = sizeof(struct mfi_sg64);
634 sc->mfi_flags |= MFI_FLAGS_SG64;
636 sc->mfi_sge_size = sizeof(struct mfi_sg32);
638 if (sc->mfi_flags & MFI_FLAGS_SKINNY)
639 sc->mfi_sge_size = sizeof(struct mfi_sg_skinny);
640 frames = (sc->mfi_sge_size * sc->mfi_max_sge - 1) / MFI_FRAME_SIZE + 2;
641 sc->mfi_cmd_size = frames * MFI_FRAME_SIZE;
642 framessz = sc->mfi_cmd_size * sc->mfi_max_fw_cmds;
643 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
644 64, 0, /* algnmnt, boundary */
645 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
646 BUS_SPACE_MAXADDR, /* highaddr */
647 NULL, NULL, /* filter, filterarg */
648 framessz, /* maxsize */
650 framessz, /* maxsegsize */
652 NULL, NULL, /* lockfunc, lockarg */
653 &sc->mfi_frames_dmat)) {
654 device_printf(sc->mfi_dev, "Cannot allocate frame DMA tag\n");
657 if (bus_dmamem_alloc(sc->mfi_frames_dmat, (void **)&sc->mfi_frames,
658 BUS_DMA_NOWAIT, &sc->mfi_frames_dmamap)) {
659 device_printf(sc->mfi_dev, "Cannot allocate frames memory\n");
662 bzero(sc->mfi_frames, framessz);
663 bus_dmamap_load(sc->mfi_frames_dmat, sc->mfi_frames_dmamap,
664 sc->mfi_frames, framessz, mfi_addr_cb, &sc->mfi_frames_busaddr,0);
666 * Allocate DMA memory for the frame sense data. Keep them in the
667 * lower 4GB for efficiency
669 sensesz = sc->mfi_max_fw_cmds * MFI_SENSE_LEN;
670 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
671 4, 0, /* algnmnt, boundary */
672 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
673 BUS_SPACE_MAXADDR, /* highaddr */
674 NULL, NULL, /* filter, filterarg */
675 sensesz, /* maxsize */
677 sensesz, /* maxsegsize */
679 NULL, NULL, /* lockfunc, lockarg */
680 &sc->mfi_sense_dmat)) {
681 device_printf(sc->mfi_dev, "Cannot allocate sense DMA tag\n");
684 if (bus_dmamem_alloc(sc->mfi_sense_dmat, (void **)&sc->mfi_sense,
685 BUS_DMA_NOWAIT, &sc->mfi_sense_dmamap)) {
686 device_printf(sc->mfi_dev, "Cannot allocate sense memory\n");
689 bus_dmamap_load(sc->mfi_sense_dmat, sc->mfi_sense_dmamap,
690 sc->mfi_sense, sensesz, mfi_addr_cb, &sc->mfi_sense_busaddr, 0);
691 if ((error = mfi_alloc_commands(sc)) != 0)
694 /* Before moving the FW to operational state, check whether
695 * hostmemory is required by the FW or not
698 /* ThunderBolt MFI_IOC2 INIT */
699 if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
700 sc->mfi_disable_intr(sc);
701 mtx_lock(&sc->mfi_io_lock);
702 if ((error = mfi_tbolt_init_MFI_queue(sc)) != 0) {
703 device_printf(sc->mfi_dev,
704 "TB Init has failed with error %d\n",error);
705 mtx_unlock(&sc->mfi_io_lock);
708 mtx_unlock(&sc->mfi_io_lock);
710 if ((error = mfi_tbolt_alloc_cmd(sc)) != 0)
712 if (bus_setup_intr(sc->mfi_dev, sc->mfi_irq,
713 INTR_MPSAFE|INTR_TYPE_BIO, NULL, mfi_intr_tbolt, sc,
715 device_printf(sc->mfi_dev, "Cannot set up interrupt\n");
718 sc->mfi_intr_ptr = mfi_intr_tbolt;
719 sc->mfi_enable_intr(sc);
721 if ((error = mfi_comms_init(sc)) != 0)
724 if (bus_setup_intr(sc->mfi_dev, sc->mfi_irq,
725 INTR_MPSAFE|INTR_TYPE_BIO, NULL, mfi_intr, sc, &sc->mfi_intr)) {
726 device_printf(sc->mfi_dev, "Cannot set up interrupt\n");
729 sc->mfi_intr_ptr = mfi_intr;
730 sc->mfi_enable_intr(sc);
732 if ((error = mfi_get_controller_info(sc)) != 0)
734 sc->disableOnlineCtrlReset = 0;
736 /* Register a config hook to probe the bus for arrays */
737 sc->mfi_ich.ich_func = mfi_startup;
738 sc->mfi_ich.ich_arg = sc;
739 if (config_intrhook_establish(&sc->mfi_ich) != 0) {
740 device_printf(sc->mfi_dev, "Cannot establish configuration "
744 mtx_lock(&sc->mfi_io_lock);
745 if ((error = mfi_aen_setup(sc, 0), 0) != 0) {
746 mtx_unlock(&sc->mfi_io_lock);
749 mtx_unlock(&sc->mfi_io_lock);
752 * Register a shutdown handler.
754 if ((sc->mfi_eh = EVENTHANDLER_REGISTER(shutdown_final, mfi_shutdown,
755 sc, SHUTDOWN_PRI_DEFAULT)) == NULL) {
756 device_printf(sc->mfi_dev, "Warning: shutdown event "
757 "registration failed\n");
761 * Create the control device for doing management
763 unit = device_get_unit(sc->mfi_dev);
764 sc->mfi_cdev = make_dev(&mfi_cdevsw, unit, UID_ROOT, GID_OPERATOR,
765 0640, "mfi%d", unit);
767 make_dev_alias_p(MAKEDEV_CHECKNAME | MAKEDEV_WAITOK, &dev_t,
768 sc->mfi_cdev, "%s", "megaraid_sas_ioctl_node");
769 if (sc->mfi_cdev != NULL)
770 sc->mfi_cdev->si_drv1 = sc;
771 SYSCTL_ADD_INT(device_get_sysctl_ctx(sc->mfi_dev),
772 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->mfi_dev)),
773 OID_AUTO, "delete_busy_volumes", CTLFLAG_RW,
774 &sc->mfi_delete_busy_volumes, 0, "Allow removal of busy volumes");
775 SYSCTL_ADD_INT(device_get_sysctl_ctx(sc->mfi_dev),
776 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->mfi_dev)),
777 OID_AUTO, "keep_deleted_volumes", CTLFLAG_RW,
778 &sc->mfi_keep_deleted_volumes, 0,
779 "Don't detach the mfid device for a busy volume that is deleted");
781 device_add_child(sc->mfi_dev, "mfip", -1);
782 bus_generic_attach(sc->mfi_dev);
784 /* Start the timeout watchdog */
785 callout_init(&sc->mfi_watchdog_callout, 1);
786 callout_reset(&sc->mfi_watchdog_callout, mfi_cmd_timeout * hz,
789 if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
790 mtx_lock(&sc->mfi_io_lock);
791 mfi_tbolt_sync_map_info(sc);
792 mtx_unlock(&sc->mfi_io_lock);
799 mfi_alloc_commands(struct mfi_softc *sc)
801 struct mfi_command *cm;
805 * XXX Should we allocate all the commands up front, or allocate on
806 * demand later like 'aac' does?
808 sc->mfi_commands = malloc(sizeof(sc->mfi_commands[0]) *
809 sc->mfi_max_fw_cmds, M_MFIBUF, M_WAITOK | M_ZERO);
811 for (i = 0; i < sc->mfi_max_fw_cmds; i++) {
812 cm = &sc->mfi_commands[i];
813 cm->cm_frame = (union mfi_frame *)((uintptr_t)sc->mfi_frames +
814 sc->mfi_cmd_size * i);
815 cm->cm_frame_busaddr = sc->mfi_frames_busaddr +
816 sc->mfi_cmd_size * i;
817 cm->cm_frame->header.context = i;
818 cm->cm_sense = &sc->mfi_sense[i];
819 cm->cm_sense_busaddr= sc->mfi_sense_busaddr + MFI_SENSE_LEN * i;
822 if (bus_dmamap_create(sc->mfi_buffer_dmat, 0,
823 &cm->cm_dmamap) == 0) {
824 mtx_lock(&sc->mfi_io_lock);
825 mfi_release_command(cm);
826 mtx_unlock(&sc->mfi_io_lock);
828 device_printf(sc->mfi_dev, "Failed to allocate %d "
829 "command blocks, only allocated %d\n",
830 sc->mfi_max_fw_cmds, i - 1);
831 for (j = 0; j < i; j++) {
832 cm = &sc->mfi_commands[i];
833 bus_dmamap_destroy(sc->mfi_buffer_dmat,
836 free(sc->mfi_commands, M_MFIBUF);
837 sc->mfi_commands = NULL;
847 mfi_release_command(struct mfi_command *cm)
849 struct mfi_frame_header *hdr;
852 mtx_assert(&cm->cm_sc->mfi_io_lock, MA_OWNED);
855 * Zero out the important fields of the frame, but make sure the
856 * context field is preserved. For efficiency, handle the fields
857 * as 32 bit words. Clear out the first S/G entry too for safety.
859 hdr = &cm->cm_frame->header;
860 if (cm->cm_data != NULL && hdr->sg_count) {
861 cm->cm_sg->sg32[0].len = 0;
862 cm->cm_sg->sg32[0].addr = 0;
866 * Command may be on other queues e.g. busy queue depending on the
867 * flow of a previous call to mfi_mapcmd, so ensure its dequeued
870 if ((cm->cm_flags & MFI_ON_MFIQ_BUSY) != 0)
872 if ((cm->cm_flags & MFI_ON_MFIQ_READY) != 0)
873 mfi_remove_ready(cm);
875 /* We're not expecting it to be on any other queue but check */
876 if ((cm->cm_flags & MFI_ON_MFIQ_MASK) != 0) {
877 panic("Command %p is still on another queue, flags = %#x",
882 if ((cm->cm_flags & MFI_CMD_TBOLT) != 0) {
883 mfi_tbolt_return_cmd(cm->cm_sc,
884 cm->cm_sc->mfi_cmd_pool_tbolt[cm->cm_extra_frames - 1],
888 hdr_data = (uint32_t *)cm->cm_frame;
889 hdr_data[0] = 0; /* cmd, sense_len, cmd_status, scsi_status */
890 hdr_data[1] = 0; /* target_id, lun_id, cdb_len, sg_count */
891 hdr_data[4] = 0; /* flags, timeout */
892 hdr_data[5] = 0; /* data_len */
894 cm->cm_extra_frames = 0;
896 cm->cm_complete = NULL;
897 cm->cm_private = NULL;
900 cm->cm_total_frame_size = 0;
901 cm->retry_for_fw_reset = 0;
903 mfi_enqueue_free(cm);
907 mfi_dcmd_command(struct mfi_softc *sc, struct mfi_command **cmp,
908 uint32_t opcode, void **bufp, size_t bufsize)
910 struct mfi_command *cm;
911 struct mfi_dcmd_frame *dcmd;
913 uint32_t context = 0;
915 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
917 cm = mfi_dequeue_free(sc);
921 /* Zero out the MFI frame */
922 context = cm->cm_frame->header.context;
923 bzero(cm->cm_frame, sizeof(union mfi_frame));
924 cm->cm_frame->header.context = context;
926 if ((bufsize > 0) && (bufp != NULL)) {
928 buf = malloc(bufsize, M_MFIBUF, M_NOWAIT|M_ZERO);
930 mfi_release_command(cm);
939 dcmd = &cm->cm_frame->dcmd;
940 bzero(dcmd->mbox, MFI_MBOX_SIZE);
941 dcmd->header.cmd = MFI_CMD_DCMD;
942 dcmd->header.timeout = 0;
943 dcmd->header.flags = 0;
944 dcmd->header.data_len = bufsize;
945 dcmd->header.scsi_status = 0;
946 dcmd->opcode = opcode;
947 cm->cm_sg = &dcmd->sgl;
948 cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
951 cm->cm_private = buf;
952 cm->cm_len = bufsize;
955 if ((bufp != NULL) && (*bufp == NULL) && (buf != NULL))
961 mfi_comms_init(struct mfi_softc *sc)
963 struct mfi_command *cm;
964 struct mfi_init_frame *init;
965 struct mfi_init_qinfo *qinfo;
967 uint32_t context = 0;
969 mtx_lock(&sc->mfi_io_lock);
970 if ((cm = mfi_dequeue_free(sc)) == NULL) {
971 mtx_unlock(&sc->mfi_io_lock);
975 /* Zero out the MFI frame */
976 context = cm->cm_frame->header.context;
977 bzero(cm->cm_frame, sizeof(union mfi_frame));
978 cm->cm_frame->header.context = context;
981 * Abuse the SG list area of the frame to hold the init_qinfo
984 init = &cm->cm_frame->init;
985 qinfo = (struct mfi_init_qinfo *)((uintptr_t)init + MFI_FRAME_SIZE);
987 bzero(qinfo, sizeof(struct mfi_init_qinfo));
988 qinfo->rq_entries = sc->mfi_max_fw_cmds + 1;
989 qinfo->rq_addr_lo = sc->mfi_comms_busaddr +
990 offsetof(struct mfi_hwcomms, hw_reply_q);
991 qinfo->pi_addr_lo = sc->mfi_comms_busaddr +
992 offsetof(struct mfi_hwcomms, hw_pi);
993 qinfo->ci_addr_lo = sc->mfi_comms_busaddr +
994 offsetof(struct mfi_hwcomms, hw_ci);
996 init->header.cmd = MFI_CMD_INIT;
997 init->header.data_len = sizeof(struct mfi_init_qinfo);
998 init->qinfo_new_addr_lo = cm->cm_frame_busaddr + MFI_FRAME_SIZE;
1000 cm->cm_flags = MFI_CMD_POLLED;
1002 if ((error = mfi_mapcmd(sc, cm)) != 0)
1003 device_printf(sc->mfi_dev, "failed to send init command\n");
1004 mfi_release_command(cm);
1005 mtx_unlock(&sc->mfi_io_lock);
1011 mfi_get_controller_info(struct mfi_softc *sc)
1013 struct mfi_command *cm = NULL;
1014 struct mfi_ctrl_info *ci = NULL;
1015 uint32_t max_sectors_1, max_sectors_2;
1018 mtx_lock(&sc->mfi_io_lock);
1019 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_GETINFO,
1020 (void **)&ci, sizeof(*ci));
1023 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1025 if ((error = mfi_mapcmd(sc, cm)) != 0) {
1026 device_printf(sc->mfi_dev, "Failed to get controller info\n");
1027 sc->mfi_max_io = (sc->mfi_max_sge - 1) * PAGE_SIZE /
1033 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1034 BUS_DMASYNC_POSTREAD);
1035 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1037 max_sectors_1 = (1 << ci->stripe_sz_ops.max) * ci->max_strips_per_io;
1038 max_sectors_2 = ci->max_request_size;
1039 sc->mfi_max_io = min(max_sectors_1, max_sectors_2);
1040 sc->disableOnlineCtrlReset =
1041 ci->properties.OnOffProperties.disableOnlineCtrlReset;
1047 mfi_release_command(cm);
1048 mtx_unlock(&sc->mfi_io_lock);
1053 mfi_get_log_state(struct mfi_softc *sc, struct mfi_evt_log_state **log_state)
1055 struct mfi_command *cm = NULL;
1058 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1059 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_EVENT_GETINFO,
1060 (void **)log_state, sizeof(**log_state));
1063 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1065 if ((error = mfi_mapcmd(sc, cm)) != 0) {
1066 device_printf(sc->mfi_dev, "Failed to get log state\n");
1070 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1071 BUS_DMASYNC_POSTREAD);
1072 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1076 mfi_release_command(cm);
1082 mfi_aen_setup(struct mfi_softc *sc, uint32_t seq_start)
1084 struct mfi_evt_log_state *log_state = NULL;
1085 union mfi_evt class_locale;
1089 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1091 class_locale.members.reserved = 0;
1092 class_locale.members.locale = mfi_event_locale;
1093 class_locale.members.evt_class = mfi_event_class;
1095 if (seq_start == 0) {
1096 if ((error = mfi_get_log_state(sc, &log_state)) != 0)
1098 sc->mfi_boot_seq_num = log_state->boot_seq_num;
1101 * Walk through any events that fired since the last
1104 if ((error = mfi_parse_entries(sc, log_state->shutdown_seq_num,
1105 log_state->newest_seq_num)) != 0)
1107 seq = log_state->newest_seq_num;
1110 error = mfi_aen_register(sc, seq, class_locale.word);
1112 free(log_state, M_MFIBUF);
1118 mfi_wait_command(struct mfi_softc *sc, struct mfi_command *cm)
1121 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1122 cm->cm_complete = NULL;
1125 * MegaCli can issue a DCMD of 0. In this case do nothing
1126 * and return 0 to it as status
1128 if (cm->cm_frame->dcmd.opcode == 0) {
1129 cm->cm_frame->header.cmd_status = MFI_STAT_OK;
1131 return (cm->cm_error);
1133 mfi_enqueue_ready(cm);
1135 if ((cm->cm_flags & MFI_CMD_COMPLETED) == 0)
1136 msleep(cm, &sc->mfi_io_lock, PRIBIO, "mfiwait", 0);
1137 return (cm->cm_error);
1141 mfi_free(struct mfi_softc *sc)
1143 struct mfi_command *cm;
1146 callout_drain(&sc->mfi_watchdog_callout);
1148 if (sc->mfi_cdev != NULL)
1149 destroy_dev(sc->mfi_cdev);
1151 if (sc->mfi_commands != NULL) {
1152 for (i = 0; i < sc->mfi_max_fw_cmds; i++) {
1153 cm = &sc->mfi_commands[i];
1154 bus_dmamap_destroy(sc->mfi_buffer_dmat, cm->cm_dmamap);
1156 free(sc->mfi_commands, M_MFIBUF);
1157 sc->mfi_commands = NULL;
1161 bus_teardown_intr(sc->mfi_dev, sc->mfi_irq, sc->mfi_intr);
1162 if (sc->mfi_irq != NULL)
1163 bus_release_resource(sc->mfi_dev, SYS_RES_IRQ, sc->mfi_irq_rid,
1166 if (sc->mfi_sense_busaddr != 0)
1167 bus_dmamap_unload(sc->mfi_sense_dmat, sc->mfi_sense_dmamap);
1168 if (sc->mfi_sense != NULL)
1169 bus_dmamem_free(sc->mfi_sense_dmat, sc->mfi_sense,
1170 sc->mfi_sense_dmamap);
1171 if (sc->mfi_sense_dmat != NULL)
1172 bus_dma_tag_destroy(sc->mfi_sense_dmat);
1174 if (sc->mfi_frames_busaddr != 0)
1175 bus_dmamap_unload(sc->mfi_frames_dmat, sc->mfi_frames_dmamap);
1176 if (sc->mfi_frames != NULL)
1177 bus_dmamem_free(sc->mfi_frames_dmat, sc->mfi_frames,
1178 sc->mfi_frames_dmamap);
1179 if (sc->mfi_frames_dmat != NULL)
1180 bus_dma_tag_destroy(sc->mfi_frames_dmat);
1182 if (sc->mfi_comms_busaddr != 0)
1183 bus_dmamap_unload(sc->mfi_comms_dmat, sc->mfi_comms_dmamap);
1184 if (sc->mfi_comms != NULL)
1185 bus_dmamem_free(sc->mfi_comms_dmat, sc->mfi_comms,
1186 sc->mfi_comms_dmamap);
1187 if (sc->mfi_comms_dmat != NULL)
1188 bus_dma_tag_destroy(sc->mfi_comms_dmat);
1190 /* ThunderBolt contiguous memory free here */
1191 if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
1192 if (sc->mfi_tb_busaddr != 0)
1193 bus_dmamap_unload(sc->mfi_tb_dmat, sc->mfi_tb_dmamap);
1194 if (sc->request_message_pool != NULL)
1195 bus_dmamem_free(sc->mfi_tb_dmat, sc->request_message_pool,
1197 if (sc->mfi_tb_dmat != NULL)
1198 bus_dma_tag_destroy(sc->mfi_tb_dmat);
1200 /* Version buffer memory free */
1201 /* Start LSIP200113393 */
1202 if (sc->verbuf_h_busaddr != 0)
1203 bus_dmamap_unload(sc->verbuf_h_dmat, sc->verbuf_h_dmamap);
1204 if (sc->verbuf != NULL)
1205 bus_dmamem_free(sc->verbuf_h_dmat, sc->verbuf,
1206 sc->verbuf_h_dmamap);
1207 if (sc->verbuf_h_dmat != NULL)
1208 bus_dma_tag_destroy(sc->verbuf_h_dmat);
1210 /* End LSIP200113393 */
1211 /* ThunderBolt INIT packet memory Free */
1212 if (sc->mfi_tb_init_busaddr != 0)
1213 bus_dmamap_unload(sc->mfi_tb_init_dmat,
1214 sc->mfi_tb_init_dmamap);
1215 if (sc->mfi_tb_init != NULL)
1216 bus_dmamem_free(sc->mfi_tb_init_dmat, sc->mfi_tb_init,
1217 sc->mfi_tb_init_dmamap);
1218 if (sc->mfi_tb_init_dmat != NULL)
1219 bus_dma_tag_destroy(sc->mfi_tb_init_dmat);
1221 /* ThunderBolt IOC Init Desc memory free here */
1222 if (sc->mfi_tb_ioc_init_busaddr != 0)
1223 bus_dmamap_unload(sc->mfi_tb_ioc_init_dmat,
1224 sc->mfi_tb_ioc_init_dmamap);
1225 if (sc->mfi_tb_ioc_init_desc != NULL)
1226 bus_dmamem_free(sc->mfi_tb_ioc_init_dmat,
1227 sc->mfi_tb_ioc_init_desc,
1228 sc->mfi_tb_ioc_init_dmamap);
1229 if (sc->mfi_tb_ioc_init_dmat != NULL)
1230 bus_dma_tag_destroy(sc->mfi_tb_ioc_init_dmat);
1231 if (sc->mfi_cmd_pool_tbolt != NULL) {
1232 for (int i = 0; i < sc->mfi_max_fw_cmds; i++) {
1233 if (sc->mfi_cmd_pool_tbolt[i] != NULL) {
1234 free(sc->mfi_cmd_pool_tbolt[i],
1236 sc->mfi_cmd_pool_tbolt[i] = NULL;
1239 free(sc->mfi_cmd_pool_tbolt, M_MFIBUF);
1240 sc->mfi_cmd_pool_tbolt = NULL;
1242 if (sc->request_desc_pool != NULL) {
1243 free(sc->request_desc_pool, M_MFIBUF);
1244 sc->request_desc_pool = NULL;
1247 if (sc->mfi_buffer_dmat != NULL)
1248 bus_dma_tag_destroy(sc->mfi_buffer_dmat);
1249 if (sc->mfi_parent_dmat != NULL)
1250 bus_dma_tag_destroy(sc->mfi_parent_dmat);
1252 if (mtx_initialized(&sc->mfi_io_lock)) {
1253 mtx_destroy(&sc->mfi_io_lock);
1254 sx_destroy(&sc->mfi_config_lock);
1261 mfi_startup(void *arg)
1263 struct mfi_softc *sc;
1265 sc = (struct mfi_softc *)arg;
1267 sc->mfi_enable_intr(sc);
1268 sx_xlock(&sc->mfi_config_lock);
1269 mtx_lock(&sc->mfi_io_lock);
1271 if (sc->mfi_flags & MFI_FLAGS_SKINNY)
1273 mtx_unlock(&sc->mfi_io_lock);
1274 sx_xunlock(&sc->mfi_config_lock);
1276 config_intrhook_disestablish(&sc->mfi_ich);
1282 struct mfi_softc *sc;
1283 struct mfi_command *cm;
1284 uint32_t pi, ci, context;
1286 sc = (struct mfi_softc *)arg;
1288 if (sc->mfi_check_clear_intr(sc))
1292 pi = sc->mfi_comms->hw_pi;
1293 ci = sc->mfi_comms->hw_ci;
1294 mtx_lock(&sc->mfi_io_lock);
1296 context = sc->mfi_comms->hw_reply_q[ci];
1297 if (context < sc->mfi_max_fw_cmds) {
1298 cm = &sc->mfi_commands[context];
1299 mfi_remove_busy(cm);
1301 mfi_complete(sc, cm);
1303 if (++ci == (sc->mfi_max_fw_cmds + 1))
1307 sc->mfi_comms->hw_ci = ci;
1309 /* Give defered I/O a chance to run */
1310 sc->mfi_flags &= ~MFI_FLAGS_QFRZN;
1312 mtx_unlock(&sc->mfi_io_lock);
1315 * Dummy read to flush the bus; this ensures that the indexes are up
1316 * to date. Restart processing if more commands have come it.
1318 (void)sc->mfi_read_fw_status(sc);
1319 if (pi != sc->mfi_comms->hw_pi)
1326 mfi_shutdown(struct mfi_softc *sc)
1328 struct mfi_dcmd_frame *dcmd;
1329 struct mfi_command *cm;
1333 if (sc->mfi_aen_cm != NULL) {
1334 sc->cm_aen_abort = 1;
1335 mfi_abort(sc, &sc->mfi_aen_cm);
1338 if (sc->mfi_map_sync_cm != NULL) {
1339 sc->cm_map_abort = 1;
1340 mfi_abort(sc, &sc->mfi_map_sync_cm);
1343 mtx_lock(&sc->mfi_io_lock);
1344 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_SHUTDOWN, NULL, 0);
1346 mtx_unlock(&sc->mfi_io_lock);
1350 dcmd = &cm->cm_frame->dcmd;
1351 dcmd->header.flags = MFI_FRAME_DIR_NONE;
1352 cm->cm_flags = MFI_CMD_POLLED;
1355 if ((error = mfi_mapcmd(sc, cm)) != 0)
1356 device_printf(sc->mfi_dev, "Failed to shutdown controller\n");
1358 mfi_release_command(cm);
1359 mtx_unlock(&sc->mfi_io_lock);
1364 mfi_syspdprobe(struct mfi_softc *sc)
1366 struct mfi_frame_header *hdr;
1367 struct mfi_command *cm = NULL;
1368 struct mfi_pd_list *pdlist = NULL;
1369 struct mfi_system_pd *syspd, *tmp;
1370 struct mfi_system_pending *syspd_pend;
1371 int error, i, found;
1373 sx_assert(&sc->mfi_config_lock, SA_XLOCKED);
1374 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1375 /* Add SYSTEM PD's */
1376 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_PD_LIST_QUERY,
1377 (void **)&pdlist, sizeof(*pdlist));
1379 device_printf(sc->mfi_dev,
1380 "Error while forming SYSTEM PD list\n");
1384 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1385 cm->cm_frame->dcmd.mbox[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST;
1386 cm->cm_frame->dcmd.mbox[1] = 0;
1387 if (mfi_mapcmd(sc, cm) != 0) {
1388 device_printf(sc->mfi_dev,
1389 "Failed to get syspd device listing\n");
1392 bus_dmamap_sync(sc->mfi_buffer_dmat,cm->cm_dmamap,
1393 BUS_DMASYNC_POSTREAD);
1394 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1395 hdr = &cm->cm_frame->header;
1396 if (hdr->cmd_status != MFI_STAT_OK) {
1397 device_printf(sc->mfi_dev,
1398 "MFI_DCMD_PD_LIST_QUERY failed %x\n", hdr->cmd_status);
1401 /* Get each PD and add it to the system */
1402 for (i = 0; i < pdlist->count; i++) {
1403 if (pdlist->addr[i].device_id ==
1404 pdlist->addr[i].encl_device_id)
1407 TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh, pd_link) {
1408 if (syspd->pd_id == pdlist->addr[i].device_id)
1411 TAILQ_FOREACH(syspd_pend, &sc->mfi_syspd_pend_tqh, pd_link) {
1412 if (syspd_pend->pd_id == pdlist->addr[i].device_id)
1416 mfi_add_sys_pd(sc, pdlist->addr[i].device_id);
1418 /* Delete SYSPD's whose state has been changed */
1419 TAILQ_FOREACH_SAFE(syspd, &sc->mfi_syspd_tqh, pd_link, tmp) {
1421 for (i = 0; i < pdlist->count; i++) {
1422 if (syspd->pd_id == pdlist->addr[i].device_id) {
1429 mtx_unlock(&sc->mfi_io_lock);
1431 device_delete_child(sc->mfi_dev, syspd->pd_dev);
1433 mtx_lock(&sc->mfi_io_lock);
1438 free(pdlist, M_MFIBUF);
1440 mfi_release_command(cm);
1446 mfi_ldprobe(struct mfi_softc *sc)
1448 struct mfi_frame_header *hdr;
1449 struct mfi_command *cm = NULL;
1450 struct mfi_ld_list *list = NULL;
1451 struct mfi_disk *ld;
1452 struct mfi_disk_pending *ld_pend;
1455 sx_assert(&sc->mfi_config_lock, SA_XLOCKED);
1456 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1458 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_LIST,
1459 (void **)&list, sizeof(*list));
1463 cm->cm_flags = MFI_CMD_DATAIN;
1464 if (mfi_wait_command(sc, cm) != 0) {
1465 device_printf(sc->mfi_dev, "Failed to get device listing\n");
1469 hdr = &cm->cm_frame->header;
1470 if (hdr->cmd_status != MFI_STAT_OK) {
1471 device_printf(sc->mfi_dev, "MFI_DCMD_LD_GET_LIST failed %x\n",
1476 for (i = 0; i < list->ld_count; i++) {
1477 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
1478 if (ld->ld_id == list->ld_list[i].ld.v.target_id)
1481 TAILQ_FOREACH(ld_pend, &sc->mfi_ld_pend_tqh, ld_link) {
1482 if (ld_pend->ld_id == list->ld_list[i].ld.v.target_id)
1485 mfi_add_ld(sc, list->ld_list[i].ld.v.target_id);
1490 free(list, M_MFIBUF);
1492 mfi_release_command(cm);
1498 * The timestamp is the number of seconds since 00:00 Jan 1, 2000. If
1499 * the bits in 24-31 are all set, then it is the number of seconds since
1503 format_timestamp(uint32_t timestamp)
1505 static char buffer[32];
1507 if ((timestamp & 0xff000000) == 0xff000000)
1508 snprintf(buffer, sizeof(buffer), "boot + %us", timestamp &
1511 snprintf(buffer, sizeof(buffer), "%us", timestamp);
1516 format_class(int8_t class)
1518 static char buffer[6];
1521 case MFI_EVT_CLASS_DEBUG:
1523 case MFI_EVT_CLASS_PROGRESS:
1524 return ("progress");
1525 case MFI_EVT_CLASS_INFO:
1527 case MFI_EVT_CLASS_WARNING:
1529 case MFI_EVT_CLASS_CRITICAL:
1531 case MFI_EVT_CLASS_FATAL:
1533 case MFI_EVT_CLASS_DEAD:
1536 snprintf(buffer, sizeof(buffer), "%d", class);
1542 mfi_decode_evt(struct mfi_softc *sc, struct mfi_evt_detail *detail)
1544 struct mfi_system_pd *syspd = NULL;
1546 device_printf(sc->mfi_dev, "%d (%s/0x%04x/%s) - %s\n", detail->seq,
1547 format_timestamp(detail->time), detail->evt_class.members.locale,
1548 format_class(detail->evt_class.members.evt_class),
1549 detail->description);
1551 /* Don't act on old AEN's or while shutting down */
1552 if (detail->seq < sc->mfi_boot_seq_num || sc->mfi_detaching)
1555 switch (detail->arg_type) {
1556 case MR_EVT_ARGS_NONE:
1557 if (detail->code == MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED) {
1558 device_printf(sc->mfi_dev, "HostBus scan raised\n");
1559 if (mfi_detect_jbod_change) {
1561 * Probe for new SYSPD's and Delete
1564 sx_xlock(&sc->mfi_config_lock);
1565 mtx_lock(&sc->mfi_io_lock);
1567 mtx_unlock(&sc->mfi_io_lock);
1568 sx_xunlock(&sc->mfi_config_lock);
1572 case MR_EVT_ARGS_LD_STATE:
1573 /* During load time driver reads all the events starting
1574 * from the one that has been logged after shutdown. Avoid
1577 if (detail->args.ld_state.new_state == MFI_LD_STATE_OFFLINE ) {
1579 struct mfi_disk *ld;
1580 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
1582 detail->args.ld_state.ld.target_id)
1586 Fix: for kernel panics when SSCD is removed
1587 KASSERT(ld != NULL, ("volume dissappeared"));
1591 device_delete_child(sc->mfi_dev, ld->ld_dev);
1596 case MR_EVT_ARGS_PD:
1597 if (detail->code == MR_EVT_PD_REMOVED) {
1598 if (mfi_detect_jbod_change) {
1600 * If the removed device is a SYSPD then
1603 TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh,
1606 detail->args.pd.device_id) {
1608 device_delete_child(
1617 if (detail->code == MR_EVT_PD_INSERTED) {
1618 if (mfi_detect_jbod_change) {
1619 /* Probe for new SYSPD's */
1620 sx_xlock(&sc->mfi_config_lock);
1621 mtx_lock(&sc->mfi_io_lock);
1623 mtx_unlock(&sc->mfi_io_lock);
1624 sx_xunlock(&sc->mfi_config_lock);
1627 if (sc->mfi_cam_rescan_cb != NULL &&
1628 (detail->code == MR_EVT_PD_INSERTED ||
1629 detail->code == MR_EVT_PD_REMOVED)) {
1630 sc->mfi_cam_rescan_cb(sc, detail->args.pd.device_id);
1637 mfi_queue_evt(struct mfi_softc *sc, struct mfi_evt_detail *detail)
1639 struct mfi_evt_queue_elm *elm;
1641 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1642 elm = malloc(sizeof(*elm), M_MFIBUF, M_NOWAIT|M_ZERO);
1645 memcpy(&elm->detail, detail, sizeof(*detail));
1646 TAILQ_INSERT_TAIL(&sc->mfi_evt_queue, elm, link);
1647 taskqueue_enqueue(taskqueue_swi, &sc->mfi_evt_task);
1651 mfi_handle_evt(void *context, int pending)
1653 TAILQ_HEAD(,mfi_evt_queue_elm) queue;
1654 struct mfi_softc *sc;
1655 struct mfi_evt_queue_elm *elm;
1659 mtx_lock(&sc->mfi_io_lock);
1660 TAILQ_CONCAT(&queue, &sc->mfi_evt_queue, link);
1661 mtx_unlock(&sc->mfi_io_lock);
1662 while ((elm = TAILQ_FIRST(&queue)) != NULL) {
1663 TAILQ_REMOVE(&queue, elm, link);
1664 mfi_decode_evt(sc, &elm->detail);
1665 free(elm, M_MFIBUF);
1670 mfi_aen_register(struct mfi_softc *sc, int seq, int locale)
1672 struct mfi_command *cm;
1673 struct mfi_dcmd_frame *dcmd;
1674 union mfi_evt current_aen, prior_aen;
1675 struct mfi_evt_detail *ed = NULL;
1678 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1680 current_aen.word = locale;
1681 if (sc->mfi_aen_cm != NULL) {
1683 ((uint32_t *)&sc->mfi_aen_cm->cm_frame->dcmd.mbox)[1];
1684 if (prior_aen.members.evt_class <= current_aen.members.evt_class &&
1685 !((prior_aen.members.locale & current_aen.members.locale)
1686 ^current_aen.members.locale)) {
1689 prior_aen.members.locale |= current_aen.members.locale;
1690 if (prior_aen.members.evt_class
1691 < current_aen.members.evt_class)
1692 current_aen.members.evt_class =
1693 prior_aen.members.evt_class;
1694 mfi_abort(sc, &sc->mfi_aen_cm);
1698 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_EVENT_WAIT,
1699 (void **)&ed, sizeof(*ed));
1703 dcmd = &cm->cm_frame->dcmd;
1704 ((uint32_t *)&dcmd->mbox)[0] = seq;
1705 ((uint32_t *)&dcmd->mbox)[1] = locale;
1706 cm->cm_flags = MFI_CMD_DATAIN;
1707 cm->cm_complete = mfi_aen_complete;
1709 sc->last_seq_num = seq;
1710 sc->mfi_aen_cm = cm;
1712 mfi_enqueue_ready(cm);
1720 mfi_aen_complete(struct mfi_command *cm)
1722 struct mfi_frame_header *hdr;
1723 struct mfi_softc *sc;
1724 struct mfi_evt_detail *detail;
1725 struct mfi_aen *mfi_aen_entry, *tmp;
1726 int seq = 0, aborted = 0;
1729 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1731 if (sc->mfi_aen_cm == NULL)
1734 hdr = &cm->cm_frame->header;
1736 if (sc->cm_aen_abort ||
1737 hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
1738 sc->cm_aen_abort = 0;
1741 sc->mfi_aen_triggered = 1;
1742 if (sc->mfi_poll_waiting) {
1743 sc->mfi_poll_waiting = 0;
1744 selwakeup(&sc->mfi_select);
1746 detail = cm->cm_data;
1747 mfi_queue_evt(sc, detail);
1748 seq = detail->seq + 1;
1749 TAILQ_FOREACH_SAFE(mfi_aen_entry, &sc->mfi_aen_pids, aen_link,
1751 TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
1753 PROC_LOCK(mfi_aen_entry->p);
1754 kern_psignal(mfi_aen_entry->p, SIGIO);
1755 PROC_UNLOCK(mfi_aen_entry->p);
1756 free(mfi_aen_entry, M_MFIBUF);
1760 free(cm->cm_data, M_MFIBUF);
1761 wakeup(&sc->mfi_aen_cm);
1762 sc->mfi_aen_cm = NULL;
1763 mfi_release_command(cm);
1765 /* set it up again so the driver can catch more events */
1767 mfi_aen_setup(sc, seq);
1770 #define MAX_EVENTS 15
1773 mfi_parse_entries(struct mfi_softc *sc, int start_seq, int stop_seq)
1775 struct mfi_command *cm;
1776 struct mfi_dcmd_frame *dcmd;
1777 struct mfi_evt_list *el;
1778 union mfi_evt class_locale;
1779 int error, i, seq, size;
1781 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1783 class_locale.members.reserved = 0;
1784 class_locale.members.locale = mfi_event_locale;
1785 class_locale.members.evt_class = mfi_event_class;
1787 size = sizeof(struct mfi_evt_list) + sizeof(struct mfi_evt_detail)
1789 el = malloc(size, M_MFIBUF, M_NOWAIT | M_ZERO);
1793 for (seq = start_seq;;) {
1794 if ((cm = mfi_dequeue_free(sc)) == NULL) {
1799 dcmd = &cm->cm_frame->dcmd;
1800 bzero(dcmd->mbox, MFI_MBOX_SIZE);
1801 dcmd->header.cmd = MFI_CMD_DCMD;
1802 dcmd->header.timeout = 0;
1803 dcmd->header.data_len = size;
1804 dcmd->opcode = MFI_DCMD_CTRL_EVENT_GET;
1805 ((uint32_t *)&dcmd->mbox)[0] = seq;
1806 ((uint32_t *)&dcmd->mbox)[1] = class_locale.word;
1807 cm->cm_sg = &dcmd->sgl;
1808 cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
1809 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1813 if ((error = mfi_mapcmd(sc, cm)) != 0) {
1814 device_printf(sc->mfi_dev,
1815 "Failed to get controller entries\n");
1816 mfi_release_command(cm);
1820 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1821 BUS_DMASYNC_POSTREAD);
1822 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1824 if (dcmd->header.cmd_status == MFI_STAT_NOT_FOUND) {
1825 mfi_release_command(cm);
1828 if (dcmd->header.cmd_status != MFI_STAT_OK) {
1829 device_printf(sc->mfi_dev,
1830 "Error %d fetching controller entries\n",
1831 dcmd->header.cmd_status);
1832 mfi_release_command(cm);
1836 mfi_release_command(cm);
1838 for (i = 0; i < el->count; i++) {
1840 * If this event is newer than 'stop_seq' then
1841 * break out of the loop. Note that the log
1842 * is a circular buffer so we have to handle
1843 * the case that our stop point is earlier in
1844 * the buffer than our start point.
1846 if (el->event[i].seq >= stop_seq) {
1847 if (start_seq <= stop_seq)
1849 else if (el->event[i].seq < start_seq)
1852 mfi_queue_evt(sc, &el->event[i]);
1854 seq = el->event[el->count - 1].seq + 1;
1862 mfi_add_ld(struct mfi_softc *sc, int id)
1864 struct mfi_command *cm;
1865 struct mfi_dcmd_frame *dcmd = NULL;
1866 struct mfi_ld_info *ld_info = NULL;
1867 struct mfi_disk_pending *ld_pend;
1870 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1872 ld_pend = malloc(sizeof(*ld_pend), M_MFIBUF, M_NOWAIT | M_ZERO);
1873 if (ld_pend != NULL) {
1874 ld_pend->ld_id = id;
1875 TAILQ_INSERT_TAIL(&sc->mfi_ld_pend_tqh, ld_pend, ld_link);
1878 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_INFO,
1879 (void **)&ld_info, sizeof(*ld_info));
1881 device_printf(sc->mfi_dev,
1882 "Failed to allocate for MFI_DCMD_LD_GET_INFO %d\n", error);
1884 free(ld_info, M_MFIBUF);
1887 cm->cm_flags = MFI_CMD_DATAIN;
1888 dcmd = &cm->cm_frame->dcmd;
1890 if (mfi_wait_command(sc, cm) != 0) {
1891 device_printf(sc->mfi_dev,
1892 "Failed to get logical drive: %d\n", id);
1893 free(ld_info, M_MFIBUF);
1896 if (ld_info->ld_config.params.isSSCD != 1)
1897 mfi_add_ld_complete(cm);
1899 mfi_release_command(cm);
1900 if (ld_info) /* SSCD drives ld_info free here */
1901 free(ld_info, M_MFIBUF);
1907 mfi_add_ld_complete(struct mfi_command *cm)
1909 struct mfi_frame_header *hdr;
1910 struct mfi_ld_info *ld_info;
1911 struct mfi_softc *sc;
1915 hdr = &cm->cm_frame->header;
1916 ld_info = cm->cm_private;
1918 if (sc->cm_map_abort || hdr->cmd_status != MFI_STAT_OK) {
1919 free(ld_info, M_MFIBUF);
1920 wakeup(&sc->mfi_map_sync_cm);
1921 mfi_release_command(cm);
1924 wakeup(&sc->mfi_map_sync_cm);
1925 mfi_release_command(cm);
1927 mtx_unlock(&sc->mfi_io_lock);
1929 if ((child = device_add_child(sc->mfi_dev, "mfid", -1)) == NULL) {
1930 device_printf(sc->mfi_dev, "Failed to add logical disk\n");
1931 free(ld_info, M_MFIBUF);
1933 mtx_lock(&sc->mfi_io_lock);
1937 device_set_ivars(child, ld_info);
1938 device_set_desc(child, "MFI Logical Disk");
1939 bus_generic_attach(sc->mfi_dev);
1941 mtx_lock(&sc->mfi_io_lock);
1944 static int mfi_add_sys_pd(struct mfi_softc *sc, int id)
1946 struct mfi_command *cm;
1947 struct mfi_dcmd_frame *dcmd = NULL;
1948 struct mfi_pd_info *pd_info = NULL;
1949 struct mfi_system_pending *syspd_pend;
1952 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1954 syspd_pend = malloc(sizeof(*syspd_pend), M_MFIBUF, M_NOWAIT | M_ZERO);
1955 if (syspd_pend != NULL) {
1956 syspd_pend->pd_id = id;
1957 TAILQ_INSERT_TAIL(&sc->mfi_syspd_pend_tqh, syspd_pend, pd_link);
1960 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_PD_GET_INFO,
1961 (void **)&pd_info, sizeof(*pd_info));
1963 device_printf(sc->mfi_dev,
1964 "Failed to allocated for MFI_DCMD_PD_GET_INFO %d\n",
1967 free(pd_info, M_MFIBUF);
1970 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1971 dcmd = &cm->cm_frame->dcmd;
1973 dcmd->header.scsi_status = 0;
1974 dcmd->header.pad0 = 0;
1975 if ((error = mfi_mapcmd(sc, cm)) != 0) {
1976 device_printf(sc->mfi_dev,
1977 "Failed to get physical drive info %d\n", id);
1978 free(pd_info, M_MFIBUF);
1979 mfi_release_command(cm);
1982 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1983 BUS_DMASYNC_POSTREAD);
1984 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1985 mfi_add_sys_pd_complete(cm);
1990 mfi_add_sys_pd_complete(struct mfi_command *cm)
1992 struct mfi_frame_header *hdr;
1993 struct mfi_pd_info *pd_info;
1994 struct mfi_softc *sc;
1998 hdr = &cm->cm_frame->header;
1999 pd_info = cm->cm_private;
2001 if (hdr->cmd_status != MFI_STAT_OK) {
2002 free(pd_info, M_MFIBUF);
2003 mfi_release_command(cm);
2006 if (pd_info->fw_state != MFI_PD_STATE_SYSTEM) {
2007 device_printf(sc->mfi_dev, "PD=%x is not SYSTEM PD\n",
2008 pd_info->ref.v.device_id);
2009 free(pd_info, M_MFIBUF);
2010 mfi_release_command(cm);
2013 mfi_release_command(cm);
2015 mtx_unlock(&sc->mfi_io_lock);
2017 if ((child = device_add_child(sc->mfi_dev, "mfisyspd", -1)) == NULL) {
2018 device_printf(sc->mfi_dev, "Failed to add system pd\n");
2019 free(pd_info, M_MFIBUF);
2021 mtx_lock(&sc->mfi_io_lock);
2025 device_set_ivars(child, pd_info);
2026 device_set_desc(child, "MFI System PD");
2027 bus_generic_attach(sc->mfi_dev);
2029 mtx_lock(&sc->mfi_io_lock);
2032 static struct mfi_command *
2033 mfi_bio_command(struct mfi_softc *sc)
2036 struct mfi_command *cm = NULL;
2038 /*reserving two commands to avoid starvation for IOCTL*/
2039 if (sc->mfi_qstat[MFIQ_FREE].q_length < 2) {
2042 if ((bio = mfi_dequeue_bio(sc)) == NULL) {
2045 if ((uintptr_t)bio->bio_driver2 == MFI_LD_IO) {
2046 cm = mfi_build_ldio(sc, bio);
2047 } else if ((uintptr_t) bio->bio_driver2 == MFI_SYS_PD_IO) {
2048 cm = mfi_build_syspdio(sc, bio);
2051 mfi_enqueue_bio(sc, bio);
2056 * mostly copied from cam/scsi/scsi_all.c:scsi_read_write
2060 mfi_build_cdb(int readop, uint8_t byte2, u_int64_t lba, u_int32_t block_count, uint8_t *cdb)
2064 if (((lba & 0x1fffff) == lba)
2065 && ((block_count & 0xff) == block_count)
2067 /* We can fit in a 6 byte cdb */
2068 struct scsi_rw_6 *scsi_cmd;
2070 scsi_cmd = (struct scsi_rw_6 *)cdb;
2071 scsi_cmd->opcode = readop ? READ_6 : WRITE_6;
2072 scsi_ulto3b(lba, scsi_cmd->addr);
2073 scsi_cmd->length = block_count & 0xff;
2074 scsi_cmd->control = 0;
2075 cdb_len = sizeof(*scsi_cmd);
2076 } else if (((block_count & 0xffff) == block_count) && ((lba & 0xffffffff) == lba)) {
2077 /* Need a 10 byte CDB */
2078 struct scsi_rw_10 *scsi_cmd;
2080 scsi_cmd = (struct scsi_rw_10 *)cdb;
2081 scsi_cmd->opcode = readop ? READ_10 : WRITE_10;
2082 scsi_cmd->byte2 = byte2;
2083 scsi_ulto4b(lba, scsi_cmd->addr);
2084 scsi_cmd->reserved = 0;
2085 scsi_ulto2b(block_count, scsi_cmd->length);
2086 scsi_cmd->control = 0;
2087 cdb_len = sizeof(*scsi_cmd);
2088 } else if (((block_count & 0xffffffff) == block_count) &&
2089 ((lba & 0xffffffff) == lba)) {
2090 /* Block count is too big for 10 byte CDB use a 12 byte CDB */
2091 struct scsi_rw_12 *scsi_cmd;
2093 scsi_cmd = (struct scsi_rw_12 *)cdb;
2094 scsi_cmd->opcode = readop ? READ_12 : WRITE_12;
2095 scsi_cmd->byte2 = byte2;
2096 scsi_ulto4b(lba, scsi_cmd->addr);
2097 scsi_cmd->reserved = 0;
2098 scsi_ulto4b(block_count, scsi_cmd->length);
2099 scsi_cmd->control = 0;
2100 cdb_len = sizeof(*scsi_cmd);
2103 * 16 byte CDB. We'll only get here if the LBA is larger
2106 struct scsi_rw_16 *scsi_cmd;
2108 scsi_cmd = (struct scsi_rw_16 *)cdb;
2109 scsi_cmd->opcode = readop ? READ_16 : WRITE_16;
2110 scsi_cmd->byte2 = byte2;
2111 scsi_u64to8b(lba, scsi_cmd->addr);
2112 scsi_cmd->reserved = 0;
2113 scsi_ulto4b(block_count, scsi_cmd->length);
2114 scsi_cmd->control = 0;
2115 cdb_len = sizeof(*scsi_cmd);
2121 extern char *unmapped_buf;
2123 static struct mfi_command *
2124 mfi_build_syspdio(struct mfi_softc *sc, struct bio *bio)
2126 struct mfi_command *cm;
2127 struct mfi_pass_frame *pass;
2128 uint32_t context = 0;
2129 int flags = 0, blkcount = 0, readop;
2132 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2134 if ((cm = mfi_dequeue_free(sc)) == NULL)
2137 /* Zero out the MFI frame */
2138 context = cm->cm_frame->header.context;
2139 bzero(cm->cm_frame, sizeof(union mfi_frame));
2140 cm->cm_frame->header.context = context;
2141 pass = &cm->cm_frame->pass;
2142 bzero(pass->cdb, 16);
2143 pass->header.cmd = MFI_CMD_PD_SCSI_IO;
2144 switch (bio->bio_cmd) {
2146 flags = MFI_CMD_DATAIN | MFI_CMD_BIO;
2150 flags = MFI_CMD_DATAOUT | MFI_CMD_BIO;
2154 /* TODO: what about BIO_DELETE??? */
2155 biofinish(bio, NULL, EOPNOTSUPP);
2156 mfi_enqueue_free(cm);
2160 /* Cheat with the sector length to avoid a non-constant division */
2161 blkcount = howmany(bio->bio_bcount, MFI_SECTOR_LEN);
2162 /* Fill the LBA and Transfer length in CDB */
2163 cdb_len = mfi_build_cdb(readop, 0, bio->bio_pblkno, blkcount,
2165 pass->header.target_id = (uintptr_t)bio->bio_driver1;
2166 pass->header.lun_id = 0;
2167 pass->header.timeout = 0;
2168 pass->header.flags = 0;
2169 pass->header.scsi_status = 0;
2170 pass->header.sense_len = MFI_SENSE_LEN;
2171 pass->header.data_len = bio->bio_bcount;
2172 pass->header.cdb_len = cdb_len;
2173 pass->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2174 pass->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2175 cm->cm_complete = mfi_bio_complete;
2176 cm->cm_private = bio;
2177 cm->cm_data = unmapped_buf;
2178 cm->cm_len = bio->bio_bcount;
2179 cm->cm_sg = &pass->sgl;
2180 cm->cm_total_frame_size = MFI_PASS_FRAME_SIZE;
2181 cm->cm_flags = flags;
2186 static struct mfi_command *
2187 mfi_build_ldio(struct mfi_softc *sc, struct bio *bio)
2189 struct mfi_io_frame *io;
2190 struct mfi_command *cm;
2193 uint32_t context = 0;
2195 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2197 if ((cm = mfi_dequeue_free(sc)) == NULL)
2200 /* Zero out the MFI frame */
2201 context = cm->cm_frame->header.context;
2202 bzero(cm->cm_frame, sizeof(union mfi_frame));
2203 cm->cm_frame->header.context = context;
2204 io = &cm->cm_frame->io;
2205 switch (bio->bio_cmd) {
2207 io->header.cmd = MFI_CMD_LD_READ;
2208 flags = MFI_CMD_DATAIN | MFI_CMD_BIO;
2211 io->header.cmd = MFI_CMD_LD_WRITE;
2212 flags = MFI_CMD_DATAOUT | MFI_CMD_BIO;
2215 /* TODO: what about BIO_DELETE??? */
2216 biofinish(bio, NULL, EOPNOTSUPP);
2217 mfi_enqueue_free(cm);
2221 /* Cheat with the sector length to avoid a non-constant division */
2222 blkcount = howmany(bio->bio_bcount, MFI_SECTOR_LEN);
2223 io->header.target_id = (uintptr_t)bio->bio_driver1;
2224 io->header.timeout = 0;
2225 io->header.flags = 0;
2226 io->header.scsi_status = 0;
2227 io->header.sense_len = MFI_SENSE_LEN;
2228 io->header.data_len = blkcount;
2229 io->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2230 io->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2231 io->lba_hi = (bio->bio_pblkno & 0xffffffff00000000) >> 32;
2232 io->lba_lo = bio->bio_pblkno & 0xffffffff;
2233 cm->cm_complete = mfi_bio_complete;
2234 cm->cm_private = bio;
2235 cm->cm_data = unmapped_buf;
2236 cm->cm_len = bio->bio_bcount;
2237 cm->cm_sg = &io->sgl;
2238 cm->cm_total_frame_size = MFI_IO_FRAME_SIZE;
2239 cm->cm_flags = flags;
2245 mfi_bio_complete(struct mfi_command *cm)
2248 struct mfi_frame_header *hdr;
2249 struct mfi_softc *sc;
2251 bio = cm->cm_private;
2252 hdr = &cm->cm_frame->header;
2255 if ((hdr->cmd_status != MFI_STAT_OK) || (hdr->scsi_status != 0)) {
2256 bio->bio_flags |= BIO_ERROR;
2257 bio->bio_error = EIO;
2258 device_printf(sc->mfi_dev, "I/O error, cmd=%p, status=%#x, "
2259 "scsi_status=%#x\n", cm, hdr->cmd_status, hdr->scsi_status);
2260 mfi_print_sense(cm->cm_sc, cm->cm_sense);
2261 } else if (cm->cm_error != 0) {
2262 bio->bio_flags |= BIO_ERROR;
2263 bio->bio_error = cm->cm_error;
2264 device_printf(sc->mfi_dev, "I/O error, cmd=%p, error=%#x\n",
2268 mfi_release_command(cm);
2269 mfi_disk_complete(bio);
2273 mfi_startio(struct mfi_softc *sc)
2275 struct mfi_command *cm;
2276 struct ccb_hdr *ccbh;
2279 /* Don't bother if we're short on resources */
2280 if (sc->mfi_flags & MFI_FLAGS_QFRZN)
2283 /* Try a command that has already been prepared */
2284 cm = mfi_dequeue_ready(sc);
2287 if ((ccbh = TAILQ_FIRST(&sc->mfi_cam_ccbq)) != NULL)
2288 cm = sc->mfi_cam_start(ccbh);
2291 /* Nope, so look for work on the bioq */
2293 cm = mfi_bio_command(sc);
2295 /* No work available, so exit */
2299 /* Send the command to the controller */
2300 if (mfi_mapcmd(sc, cm) != 0) {
2301 device_printf(sc->mfi_dev, "Failed to startio\n");
2302 mfi_requeue_ready(cm);
2309 mfi_mapcmd(struct mfi_softc *sc, struct mfi_command *cm)
2313 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2315 if ((cm->cm_data != NULL) && (cm->cm_frame->header.cmd != MFI_CMD_STP )) {
2316 polled = (cm->cm_flags & MFI_CMD_POLLED) ? BUS_DMA_NOWAIT : 0;
2317 if (cm->cm_flags & MFI_CMD_CCB)
2318 error = bus_dmamap_load_ccb(sc->mfi_buffer_dmat,
2319 cm->cm_dmamap, cm->cm_data, mfi_data_cb, cm,
2321 else if (cm->cm_flags & MFI_CMD_BIO)
2322 error = bus_dmamap_load_bio(sc->mfi_buffer_dmat,
2323 cm->cm_dmamap, cm->cm_private, mfi_data_cb, cm,
2326 error = bus_dmamap_load(sc->mfi_buffer_dmat,
2327 cm->cm_dmamap, cm->cm_data, cm->cm_len,
2328 mfi_data_cb, cm, polled);
2329 if (error == EINPROGRESS) {
2330 sc->mfi_flags |= MFI_FLAGS_QFRZN;
2334 error = mfi_send_frame(sc, cm);
2341 mfi_data_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
2343 struct mfi_frame_header *hdr;
2344 struct mfi_command *cm;
2346 struct mfi_softc *sc;
2347 int i, j, first, dir;
2348 int sge_size, locked;
2350 cm = (struct mfi_command *)arg;
2352 hdr = &cm->cm_frame->header;
2356 * We need to check if we have the lock as this is async
2357 * callback so even though our caller mfi_mapcmd asserts
2358 * it has the lock, there is no guarantee that hasn't been
2359 * dropped if bus_dmamap_load returned prior to our
2362 if ((locked = mtx_owned(&sc->mfi_io_lock)) == 0)
2363 mtx_lock(&sc->mfi_io_lock);
2366 printf("error %d in callback\n", error);
2367 cm->cm_error = error;
2368 mfi_complete(sc, cm);
2371 /* Use IEEE sgl only for IO's on a SKINNY controller
2372 * For other commands on a SKINNY controller use either
2373 * sg32 or sg64 based on the sizeof(bus_addr_t).
2374 * Also calculate the total frame size based on the type
2377 if (((cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) ||
2378 (cm->cm_frame->header.cmd == MFI_CMD_LD_READ) ||
2379 (cm->cm_frame->header.cmd == MFI_CMD_LD_WRITE)) &&
2380 (sc->mfi_flags & MFI_FLAGS_SKINNY)) {
2381 for (i = 0; i < nsegs; i++) {
2382 sgl->sg_skinny[i].addr = segs[i].ds_addr;
2383 sgl->sg_skinny[i].len = segs[i].ds_len;
2384 sgl->sg_skinny[i].flag = 0;
2386 hdr->flags |= MFI_FRAME_IEEE_SGL | MFI_FRAME_SGL64;
2387 sge_size = sizeof(struct mfi_sg_skinny);
2388 hdr->sg_count = nsegs;
2391 if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
2392 first = cm->cm_stp_len;
2393 if ((sc->mfi_flags & MFI_FLAGS_SG64) == 0) {
2394 sgl->sg32[j].addr = segs[0].ds_addr;
2395 sgl->sg32[j++].len = first;
2397 sgl->sg64[j].addr = segs[0].ds_addr;
2398 sgl->sg64[j++].len = first;
2402 if ((sc->mfi_flags & MFI_FLAGS_SG64) == 0) {
2403 for (i = 0; i < nsegs; i++) {
2404 sgl->sg32[j].addr = segs[i].ds_addr + first;
2405 sgl->sg32[j++].len = segs[i].ds_len - first;
2409 for (i = 0; i < nsegs; i++) {
2410 sgl->sg64[j].addr = segs[i].ds_addr + first;
2411 sgl->sg64[j++].len = segs[i].ds_len - first;
2414 hdr->flags |= MFI_FRAME_SGL64;
2417 sge_size = sc->mfi_sge_size;
2421 if (cm->cm_flags & MFI_CMD_DATAIN) {
2422 dir |= BUS_DMASYNC_PREREAD;
2423 hdr->flags |= MFI_FRAME_DIR_READ;
2425 if (cm->cm_flags & MFI_CMD_DATAOUT) {
2426 dir |= BUS_DMASYNC_PREWRITE;
2427 hdr->flags |= MFI_FRAME_DIR_WRITE;
2429 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir);
2430 cm->cm_flags |= MFI_CMD_MAPPED;
2433 * Instead of calculating the total number of frames in the
2434 * compound frame, it's already assumed that there will be at
2435 * least 1 frame, so don't compensate for the modulo of the
2436 * following division.
2438 cm->cm_total_frame_size += (sc->mfi_sge_size * nsegs);
2439 cm->cm_extra_frames = (cm->cm_total_frame_size - 1) / MFI_FRAME_SIZE;
2441 if ((error = mfi_send_frame(sc, cm)) != 0) {
2442 printf("error %d in callback from mfi_send_frame\n", error);
2443 cm->cm_error = error;
2444 mfi_complete(sc, cm);
2449 /* leave the lock in the state we found it */
2451 mtx_unlock(&sc->mfi_io_lock);
2457 mfi_send_frame(struct mfi_softc *sc, struct mfi_command *cm)
2461 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2463 if (sc->MFA_enabled)
2464 error = mfi_tbolt_send_frame(sc, cm);
2466 error = mfi_std_send_frame(sc, cm);
2468 if (error != 0 && (cm->cm_flags & MFI_ON_MFIQ_BUSY) != 0)
2469 mfi_remove_busy(cm);
2475 mfi_std_send_frame(struct mfi_softc *sc, struct mfi_command *cm)
2477 struct mfi_frame_header *hdr;
2478 int tm = mfi_polled_cmd_timeout * 1000;
2480 hdr = &cm->cm_frame->header;
2482 if ((cm->cm_flags & MFI_CMD_POLLED) == 0) {
2483 cm->cm_timestamp = time_uptime;
2484 mfi_enqueue_busy(cm);
2486 hdr->cmd_status = MFI_STAT_INVALID_STATUS;
2487 hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
2491 * The bus address of the command is aligned on a 64 byte boundary,
2492 * leaving the least 6 bits as zero. For whatever reason, the
2493 * hardware wants the address shifted right by three, leaving just
2494 * 3 zero bits. These three bits are then used as a prefetching
2495 * hint for the hardware to predict how many frames need to be
2496 * fetched across the bus. If a command has more than 8 frames
2497 * then the 3 bits are set to 0x7 and the firmware uses other
2498 * information in the command to determine the total amount to fetch.
2499 * However, FreeBSD doesn't support I/O larger than 128K, so 8 frames
2500 * is enough for both 32bit and 64bit systems.
2502 if (cm->cm_extra_frames > 7)
2503 cm->cm_extra_frames = 7;
2505 sc->mfi_issue_cmd(sc, cm->cm_frame_busaddr, cm->cm_extra_frames);
2507 if ((cm->cm_flags & MFI_CMD_POLLED) == 0)
2510 /* This is a polled command, so busy-wait for it to complete. */
2511 while (hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
2518 if (hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
2519 device_printf(sc->mfi_dev, "Frame %p timed out "
2520 "command 0x%X\n", hdr, cm->cm_frame->dcmd.opcode);
2529 mfi_complete(struct mfi_softc *sc, struct mfi_command *cm)
2532 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2534 if ((cm->cm_flags & MFI_CMD_MAPPED) != 0) {
2536 if ((cm->cm_flags & MFI_CMD_DATAIN) ||
2537 (cm->cm_frame->header.cmd == MFI_CMD_STP))
2538 dir |= BUS_DMASYNC_POSTREAD;
2539 if (cm->cm_flags & MFI_CMD_DATAOUT)
2540 dir |= BUS_DMASYNC_POSTWRITE;
2542 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir);
2543 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2544 cm->cm_flags &= ~MFI_CMD_MAPPED;
2547 cm->cm_flags |= MFI_CMD_COMPLETED;
2549 if (cm->cm_complete != NULL)
2550 cm->cm_complete(cm);
2556 mfi_abort(struct mfi_softc *sc, struct mfi_command **cm_abort)
2558 struct mfi_command *cm;
2559 struct mfi_abort_frame *abort;
2561 uint32_t context = 0;
2563 mtx_lock(&sc->mfi_io_lock);
2564 if ((cm = mfi_dequeue_free(sc)) == NULL) {
2565 mtx_unlock(&sc->mfi_io_lock);
2569 /* Zero out the MFI frame */
2570 context = cm->cm_frame->header.context;
2571 bzero(cm->cm_frame, sizeof(union mfi_frame));
2572 cm->cm_frame->header.context = context;
2574 abort = &cm->cm_frame->abort;
2575 abort->header.cmd = MFI_CMD_ABORT;
2576 abort->header.flags = 0;
2577 abort->header.scsi_status = 0;
2578 abort->abort_context = (*cm_abort)->cm_frame->header.context;
2579 abort->abort_mfi_addr_lo = (uint32_t)(*cm_abort)->cm_frame_busaddr;
2580 abort->abort_mfi_addr_hi =
2581 (uint32_t)((uint64_t)(*cm_abort)->cm_frame_busaddr >> 32);
2583 cm->cm_flags = MFI_CMD_POLLED;
2585 if ((error = mfi_mapcmd(sc, cm)) != 0)
2586 device_printf(sc->mfi_dev, "failed to abort command\n");
2587 mfi_release_command(cm);
2589 mtx_unlock(&sc->mfi_io_lock);
2590 while (i < 5 && *cm_abort != NULL) {
2591 tsleep(cm_abort, 0, "mfiabort",
2595 if (*cm_abort != NULL) {
2596 /* Force a complete if command didn't abort */
2597 mtx_lock(&sc->mfi_io_lock);
2598 (*cm_abort)->cm_complete(*cm_abort);
2599 mtx_unlock(&sc->mfi_io_lock);
2606 mfi_dump_blocks(struct mfi_softc *sc, int id, uint64_t lba, void *virt,
2609 struct mfi_command *cm;
2610 struct mfi_io_frame *io;
2612 uint32_t context = 0;
2614 if ((cm = mfi_dequeue_free(sc)) == NULL)
2617 /* Zero out the MFI frame */
2618 context = cm->cm_frame->header.context;
2619 bzero(cm->cm_frame, sizeof(union mfi_frame));
2620 cm->cm_frame->header.context = context;
2622 io = &cm->cm_frame->io;
2623 io->header.cmd = MFI_CMD_LD_WRITE;
2624 io->header.target_id = id;
2625 io->header.timeout = 0;
2626 io->header.flags = 0;
2627 io->header.scsi_status = 0;
2628 io->header.sense_len = MFI_SENSE_LEN;
2629 io->header.data_len = howmany(len, MFI_SECTOR_LEN);
2630 io->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2631 io->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2632 io->lba_hi = (lba & 0xffffffff00000000) >> 32;
2633 io->lba_lo = lba & 0xffffffff;
2636 cm->cm_sg = &io->sgl;
2637 cm->cm_total_frame_size = MFI_IO_FRAME_SIZE;
2638 cm->cm_flags = MFI_CMD_POLLED | MFI_CMD_DATAOUT;
2640 if ((error = mfi_mapcmd(sc, cm)) != 0)
2641 device_printf(sc->mfi_dev, "failed dump blocks\n");
2642 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
2643 BUS_DMASYNC_POSTWRITE);
2644 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2645 mfi_release_command(cm);
2651 mfi_dump_syspd_blocks(struct mfi_softc *sc, int id, uint64_t lba, void *virt,
2654 struct mfi_command *cm;
2655 struct mfi_pass_frame *pass;
2656 int error, readop, cdb_len;
2659 if ((cm = mfi_dequeue_free(sc)) == NULL)
2662 pass = &cm->cm_frame->pass;
2663 bzero(pass->cdb, 16);
2664 pass->header.cmd = MFI_CMD_PD_SCSI_IO;
2667 blkcount = howmany(len, MFI_SECTOR_LEN);
2668 cdb_len = mfi_build_cdb(readop, 0, lba, blkcount, pass->cdb);
2669 pass->header.target_id = id;
2670 pass->header.timeout = 0;
2671 pass->header.flags = 0;
2672 pass->header.scsi_status = 0;
2673 pass->header.sense_len = MFI_SENSE_LEN;
2674 pass->header.data_len = len;
2675 pass->header.cdb_len = cdb_len;
2676 pass->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2677 pass->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2680 cm->cm_sg = &pass->sgl;
2681 cm->cm_total_frame_size = MFI_PASS_FRAME_SIZE;
2682 cm->cm_flags = MFI_CMD_POLLED | MFI_CMD_DATAOUT | MFI_CMD_SCSI;
2684 if ((error = mfi_mapcmd(sc, cm)) != 0)
2685 device_printf(sc->mfi_dev, "failed dump blocks\n");
2686 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
2687 BUS_DMASYNC_POSTWRITE);
2688 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2689 mfi_release_command(cm);
2695 mfi_open(struct cdev *dev, int flags, int fmt, struct thread *td)
2697 struct mfi_softc *sc;
2702 mtx_lock(&sc->mfi_io_lock);
2703 if (sc->mfi_detaching)
2706 sc->mfi_flags |= MFI_FLAGS_OPEN;
2709 mtx_unlock(&sc->mfi_io_lock);
2715 mfi_close(struct cdev *dev, int flags, int fmt, struct thread *td)
2717 struct mfi_softc *sc;
2718 struct mfi_aen *mfi_aen_entry, *tmp;
2722 mtx_lock(&sc->mfi_io_lock);
2723 sc->mfi_flags &= ~MFI_FLAGS_OPEN;
2725 TAILQ_FOREACH_SAFE(mfi_aen_entry, &sc->mfi_aen_pids, aen_link, tmp) {
2726 if (mfi_aen_entry->p == curproc) {
2727 TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
2729 free(mfi_aen_entry, M_MFIBUF);
2732 mtx_unlock(&sc->mfi_io_lock);
2737 mfi_config_lock(struct mfi_softc *sc, uint32_t opcode)
2741 case MFI_DCMD_LD_DELETE:
2742 case MFI_DCMD_CFG_ADD:
2743 case MFI_DCMD_CFG_CLEAR:
2744 case MFI_DCMD_CFG_FOREIGN_IMPORT:
2745 sx_xlock(&sc->mfi_config_lock);
2753 mfi_config_unlock(struct mfi_softc *sc, int locked)
2757 sx_xunlock(&sc->mfi_config_lock);
2761 * Perform pre-issue checks on commands from userland and possibly veto
2765 mfi_check_command_pre(struct mfi_softc *sc, struct mfi_command *cm)
2767 struct mfi_disk *ld, *ld2;
2769 struct mfi_system_pd *syspd = NULL;
2773 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2775 switch (cm->cm_frame->dcmd.opcode) {
2776 case MFI_DCMD_LD_DELETE:
2777 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2778 if (ld->ld_id == cm->cm_frame->dcmd.mbox[0])
2784 error = mfi_disk_disable(ld);
2786 case MFI_DCMD_CFG_CLEAR:
2787 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2788 error = mfi_disk_disable(ld);
2793 TAILQ_FOREACH(ld2, &sc->mfi_ld_tqh, ld_link) {
2796 mfi_disk_enable(ld2);
2800 case MFI_DCMD_PD_STATE_SET:
2801 mbox = (uint16_t *) cm->cm_frame->dcmd.mbox;
2803 if (mbox[2] == MFI_PD_STATE_UNCONFIGURED_GOOD) {
2804 TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh, pd_link) {
2805 if (syspd->pd_id == syspd_id)
2812 error = mfi_syspd_disable(syspd);
2820 /* Perform post-issue checks on commands from userland. */
2822 mfi_check_command_post(struct mfi_softc *sc, struct mfi_command *cm)
2824 struct mfi_disk *ld, *ldn;
2825 struct mfi_system_pd *syspd = NULL;
2829 switch (cm->cm_frame->dcmd.opcode) {
2830 case MFI_DCMD_LD_DELETE:
2831 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2832 if (ld->ld_id == cm->cm_frame->dcmd.mbox[0])
2835 KASSERT(ld != NULL, ("volume dissappeared"));
2836 if (cm->cm_frame->header.cmd_status == MFI_STAT_OK) {
2837 mtx_unlock(&sc->mfi_io_lock);
2839 device_delete_child(sc->mfi_dev, ld->ld_dev);
2841 mtx_lock(&sc->mfi_io_lock);
2843 mfi_disk_enable(ld);
2845 case MFI_DCMD_CFG_CLEAR:
2846 if (cm->cm_frame->header.cmd_status == MFI_STAT_OK) {
2847 mtx_unlock(&sc->mfi_io_lock);
2849 TAILQ_FOREACH_SAFE(ld, &sc->mfi_ld_tqh, ld_link, ldn) {
2850 device_delete_child(sc->mfi_dev, ld->ld_dev);
2853 mtx_lock(&sc->mfi_io_lock);
2855 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link)
2856 mfi_disk_enable(ld);
2859 case MFI_DCMD_CFG_ADD:
2862 case MFI_DCMD_CFG_FOREIGN_IMPORT:
2865 case MFI_DCMD_PD_STATE_SET:
2866 mbox = (uint16_t *) cm->cm_frame->dcmd.mbox;
2868 if (mbox[2] == MFI_PD_STATE_UNCONFIGURED_GOOD) {
2869 TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh,pd_link) {
2870 if (syspd->pd_id == syspd_id)
2876 /* If the transition fails then enable the syspd again */
2877 if (syspd && cm->cm_frame->header.cmd_status != MFI_STAT_OK)
2878 mfi_syspd_enable(syspd);
2884 mfi_check_for_sscd(struct mfi_softc *sc, struct mfi_command *cm)
2886 struct mfi_config_data *conf_data;
2887 struct mfi_command *ld_cm = NULL;
2888 struct mfi_ld_info *ld_info = NULL;
2889 struct mfi_ld_config *ld;
2893 conf_data = (struct mfi_config_data *)cm->cm_data;
2895 if (cm->cm_frame->dcmd.opcode == MFI_DCMD_CFG_ADD) {
2896 p = (char *)conf_data->array;
2897 p += conf_data->array_size * conf_data->array_count;
2898 ld = (struct mfi_ld_config *)p;
2899 if (ld->params.isSSCD == 1)
2901 } else if (cm->cm_frame->dcmd.opcode == MFI_DCMD_LD_DELETE) {
2902 error = mfi_dcmd_command (sc, &ld_cm, MFI_DCMD_LD_GET_INFO,
2903 (void **)&ld_info, sizeof(*ld_info));
2905 device_printf(sc->mfi_dev, "Failed to allocate"
2906 "MFI_DCMD_LD_GET_INFO %d", error);
2908 free(ld_info, M_MFIBUF);
2911 ld_cm->cm_flags = MFI_CMD_DATAIN;
2912 ld_cm->cm_frame->dcmd.mbox[0]= cm->cm_frame->dcmd.mbox[0];
2913 ld_cm->cm_frame->header.target_id = cm->cm_frame->dcmd.mbox[0];
2914 if (mfi_wait_command(sc, ld_cm) != 0) {
2915 device_printf(sc->mfi_dev, "failed to get log drv\n");
2916 mfi_release_command(ld_cm);
2917 free(ld_info, M_MFIBUF);
2921 if (ld_cm->cm_frame->header.cmd_status != MFI_STAT_OK) {
2922 free(ld_info, M_MFIBUF);
2923 mfi_release_command(ld_cm);
2927 ld_info = (struct mfi_ld_info *)ld_cm->cm_private;
2929 if (ld_info->ld_config.params.isSSCD == 1)
2932 mfi_release_command(ld_cm);
2933 free(ld_info, M_MFIBUF);
2940 mfi_stp_cmd(struct mfi_softc *sc, struct mfi_command *cm,caddr_t arg)
2943 struct mfi_ioc_packet *ioc;
2944 ioc = (struct mfi_ioc_packet *)arg;
2945 int sge_size, error;
2946 struct megasas_sge *kern_sge;
2948 memset(sc->kbuff_arr, 0, sizeof(sc->kbuff_arr));
2949 kern_sge =(struct megasas_sge *) ((uintptr_t)cm->cm_frame + ioc->mfi_sgl_off);
2950 cm->cm_frame->header.sg_count = ioc->mfi_sge_count;
2952 if (sizeof(bus_addr_t) == 8) {
2953 cm->cm_frame->header.flags |= MFI_FRAME_SGL64;
2954 cm->cm_extra_frames = 2;
2955 sge_size = sizeof(struct mfi_sg64);
2957 cm->cm_extra_frames = (cm->cm_total_frame_size - 1) / MFI_FRAME_SIZE;
2958 sge_size = sizeof(struct mfi_sg32);
2961 cm->cm_total_frame_size += (sge_size * ioc->mfi_sge_count);
2962 for (i = 0; i < ioc->mfi_sge_count; i++) {
2963 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
2964 1, 0, /* algnmnt, boundary */
2965 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
2966 BUS_SPACE_MAXADDR, /* highaddr */
2967 NULL, NULL, /* filter, filterarg */
2968 ioc->mfi_sgl[i].iov_len,/* maxsize */
2970 ioc->mfi_sgl[i].iov_len,/* maxsegsize */
2971 BUS_DMA_ALLOCNOW, /* flags */
2972 NULL, NULL, /* lockfunc, lockarg */
2973 &sc->mfi_kbuff_arr_dmat[i])) {
2974 device_printf(sc->mfi_dev,
2975 "Cannot allocate mfi_kbuff_arr_dmat tag\n");
2979 if (bus_dmamem_alloc(sc->mfi_kbuff_arr_dmat[i],
2980 (void **)&sc->kbuff_arr[i], BUS_DMA_NOWAIT,
2981 &sc->mfi_kbuff_arr_dmamap[i])) {
2982 device_printf(sc->mfi_dev,
2983 "Cannot allocate mfi_kbuff_arr_dmamap memory\n");
2987 bus_dmamap_load(sc->mfi_kbuff_arr_dmat[i],
2988 sc->mfi_kbuff_arr_dmamap[i], sc->kbuff_arr[i],
2989 ioc->mfi_sgl[i].iov_len, mfi_addr_cb,
2990 &sc->mfi_kbuff_arr_busaddr[i], 0);
2992 if (!sc->kbuff_arr[i]) {
2993 device_printf(sc->mfi_dev,
2994 "Could not allocate memory for kbuff_arr info\n");
2997 kern_sge[i].phys_addr = sc->mfi_kbuff_arr_busaddr[i];
2998 kern_sge[i].length = ioc->mfi_sgl[i].iov_len;
3000 if (sizeof(bus_addr_t) == 8) {
3001 cm->cm_frame->stp.sgl.sg64[i].addr =
3002 kern_sge[i].phys_addr;
3003 cm->cm_frame->stp.sgl.sg64[i].len =
3004 ioc->mfi_sgl[i].iov_len;
3006 cm->cm_frame->stp.sgl.sg32[i].addr =
3007 kern_sge[i].phys_addr;
3008 cm->cm_frame->stp.sgl.sg32[i].len =
3009 ioc->mfi_sgl[i].iov_len;
3012 error = copyin(ioc->mfi_sgl[i].iov_base,
3014 ioc->mfi_sgl[i].iov_len);
3016 device_printf(sc->mfi_dev, "Copy in failed\n");
3021 cm->cm_flags |=MFI_CMD_MAPPED;
3026 mfi_user_command(struct mfi_softc *sc, struct mfi_ioc_passthru *ioc)
3028 struct mfi_command *cm;
3029 struct mfi_dcmd_frame *dcmd;
3030 void *ioc_buf = NULL;
3032 int error = 0, locked;
3035 if (ioc->buf_size > 0) {
3036 if (ioc->buf_size > 1024 * 1024)
3038 ioc_buf = malloc(ioc->buf_size, M_MFIBUF, M_WAITOK);
3039 error = copyin(ioc->buf, ioc_buf, ioc->buf_size);
3041 device_printf(sc->mfi_dev, "failed to copyin\n");
3042 free(ioc_buf, M_MFIBUF);
3047 locked = mfi_config_lock(sc, ioc->ioc_frame.opcode);
3049 mtx_lock(&sc->mfi_io_lock);
3050 while ((cm = mfi_dequeue_free(sc)) == NULL)
3051 msleep(mfi_user_command, &sc->mfi_io_lock, 0, "mfiioc", hz);
3053 /* Save context for later */
3054 context = cm->cm_frame->header.context;
3056 dcmd = &cm->cm_frame->dcmd;
3057 bcopy(&ioc->ioc_frame, dcmd, sizeof(struct mfi_dcmd_frame));
3059 cm->cm_sg = &dcmd->sgl;
3060 cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
3061 cm->cm_data = ioc_buf;
3062 cm->cm_len = ioc->buf_size;
3064 /* restore context */
3065 cm->cm_frame->header.context = context;
3067 /* Cheat since we don't know if we're writing or reading */
3068 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_DATAOUT;
3070 error = mfi_check_command_pre(sc, cm);
3074 error = mfi_wait_command(sc, cm);
3076 device_printf(sc->mfi_dev, "ioctl failed %d\n", error);
3079 bcopy(dcmd, &ioc->ioc_frame, sizeof(struct mfi_dcmd_frame));
3080 mfi_check_command_post(sc, cm);
3082 mfi_release_command(cm);
3083 mtx_unlock(&sc->mfi_io_lock);
3084 mfi_config_unlock(sc, locked);
3085 if (ioc->buf_size > 0)
3086 error = copyout(ioc_buf, ioc->buf, ioc->buf_size);
3088 free(ioc_buf, M_MFIBUF);
3092 #define PTRIN(p) ((void *)(uintptr_t)(p))
3095 mfi_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td)
3097 struct mfi_softc *sc;
3098 union mfi_statrequest *ms;
3099 struct mfi_ioc_packet *ioc;
3100 #ifdef COMPAT_FREEBSD32
3101 struct mfi_ioc_packet32 *ioc32;
3103 struct mfi_ioc_aen *aen;
3104 struct mfi_command *cm = NULL;
3105 uint32_t context = 0;
3106 union mfi_sense_ptr sense_ptr;
3107 uint8_t *data = NULL, *temp, *addr, skip_pre_post = 0;
3110 struct mfi_ioc_passthru *iop = (struct mfi_ioc_passthru *)arg;
3111 #ifdef COMPAT_FREEBSD32
3112 struct mfi_ioc_passthru32 *iop32 = (struct mfi_ioc_passthru32 *)arg;
3113 struct mfi_ioc_passthru iop_swab;
3123 if (sc->hw_crit_error)
3126 if (sc->issuepend_done == 0)
3131 ms = (union mfi_statrequest *)arg;
3132 switch (ms->ms_item) {
3137 bcopy(&sc->mfi_qstat[ms->ms_item], &ms->ms_qstat,
3138 sizeof(struct mfi_qstat));
3145 case MFIIO_QUERY_DISK:
3147 struct mfi_query_disk *qd;
3148 struct mfi_disk *ld;
3150 qd = (struct mfi_query_disk *)arg;
3151 mtx_lock(&sc->mfi_io_lock);
3152 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
3153 if (ld->ld_id == qd->array_id)
3158 mtx_unlock(&sc->mfi_io_lock);
3162 if (ld->ld_flags & MFI_DISK_FLAGS_OPEN)
3164 bzero(qd->devname, SPECNAMELEN + 1);
3165 snprintf(qd->devname, SPECNAMELEN, "mfid%d", ld->ld_unit);
3166 mtx_unlock(&sc->mfi_io_lock);
3170 #ifdef COMPAT_FREEBSD32
3174 devclass_t devclass;
3175 ioc = (struct mfi_ioc_packet *)arg;
3178 adapter = ioc->mfi_adapter_no;
3179 if (device_get_unit(sc->mfi_dev) == 0 && adapter != 0) {
3180 devclass = devclass_find("mfi");
3181 sc = devclass_get_softc(devclass, adapter);
3183 mtx_lock(&sc->mfi_io_lock);
3184 if ((cm = mfi_dequeue_free(sc)) == NULL) {
3185 mtx_unlock(&sc->mfi_io_lock);
3188 mtx_unlock(&sc->mfi_io_lock);
3192 * save off original context since copying from user
3193 * will clobber some data
3195 context = cm->cm_frame->header.context;
3196 cm->cm_frame->header.context = cm->cm_index;
3198 bcopy(ioc->mfi_frame.raw, cm->cm_frame,
3199 2 * MEGAMFI_FRAME_SIZE);
3200 cm->cm_total_frame_size = (sizeof(union mfi_sgl)
3201 * ioc->mfi_sge_count) + ioc->mfi_sgl_off;
3202 cm->cm_frame->header.scsi_status = 0;
3203 cm->cm_frame->header.pad0 = 0;
3204 if (ioc->mfi_sge_count) {
3206 (union mfi_sgl *)&cm->cm_frame->bytes[ioc->mfi_sgl_off];
3210 if (cm->cm_frame->header.flags & MFI_FRAME_DATAIN)
3211 cm->cm_flags |= MFI_CMD_DATAIN;
3212 if (cm->cm_frame->header.flags & MFI_FRAME_DATAOUT)
3213 cm->cm_flags |= MFI_CMD_DATAOUT;
3214 /* Legacy app shim */
3215 if (cm->cm_flags == 0)
3216 cm->cm_flags |= MFI_CMD_DATAIN | MFI_CMD_DATAOUT;
3217 cm->cm_len = cm->cm_frame->header.data_len;
3218 if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
3219 #ifdef COMPAT_FREEBSD32
3220 if (cmd == MFI_CMD) {
3223 cm->cm_stp_len = ioc->mfi_sgl[0].iov_len;
3224 #ifdef COMPAT_FREEBSD32
3226 /* 32bit on 64bit */
3227 ioc32 = (struct mfi_ioc_packet32 *)ioc;
3228 cm->cm_stp_len = ioc32->mfi_sgl[0].iov_len;
3231 cm->cm_len += cm->cm_stp_len;
3234 (cm->cm_flags & (MFI_CMD_DATAIN | MFI_CMD_DATAOUT))) {
3235 cm->cm_data = data = malloc(cm->cm_len, M_MFIBUF,
3241 /* restore header context */
3242 cm->cm_frame->header.context = context;
3244 if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
3245 res = mfi_stp_cmd(sc, cm, arg);
3250 if ((cm->cm_flags & MFI_CMD_DATAOUT) ||
3251 (cm->cm_frame->header.cmd == MFI_CMD_STP)) {
3252 for (i = 0; i < ioc->mfi_sge_count; i++) {
3253 #ifdef COMPAT_FREEBSD32
3254 if (cmd == MFI_CMD) {
3257 addr = ioc->mfi_sgl[i].iov_base;
3258 len = ioc->mfi_sgl[i].iov_len;
3259 #ifdef COMPAT_FREEBSD32
3261 /* 32bit on 64bit */
3262 ioc32 = (struct mfi_ioc_packet32 *)ioc;
3263 addr = PTRIN(ioc32->mfi_sgl[i].iov_base);
3264 len = ioc32->mfi_sgl[i].iov_len;
3267 error = copyin(addr, temp, len);
3269 device_printf(sc->mfi_dev,
3270 "Copy in failed\n");
3278 if (cm->cm_frame->header.cmd == MFI_CMD_DCMD)
3279 locked = mfi_config_lock(sc,
3280 cm->cm_frame->dcmd.opcode);
3282 if (cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) {
3283 cm->cm_frame->pass.sense_addr_lo =
3284 (uint32_t)cm->cm_sense_busaddr;
3285 cm->cm_frame->pass.sense_addr_hi =
3286 (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
3288 mtx_lock(&sc->mfi_io_lock);
3289 skip_pre_post = mfi_check_for_sscd (sc, cm);
3290 if (!skip_pre_post) {
3291 error = mfi_check_command_pre(sc, cm);
3293 mtx_unlock(&sc->mfi_io_lock);
3297 if ((error = mfi_wait_command(sc, cm)) != 0) {
3298 device_printf(sc->mfi_dev,
3299 "Controller polled failed\n");
3300 mtx_unlock(&sc->mfi_io_lock);
3303 if (!skip_pre_post) {
3304 mfi_check_command_post(sc, cm);
3306 mtx_unlock(&sc->mfi_io_lock);
3308 if (cm->cm_frame->header.cmd != MFI_CMD_STP) {
3310 if ((cm->cm_flags & MFI_CMD_DATAIN) ||
3311 (cm->cm_frame->header.cmd == MFI_CMD_STP)) {
3312 for (i = 0; i < ioc->mfi_sge_count; i++) {
3313 #ifdef COMPAT_FREEBSD32
3314 if (cmd == MFI_CMD) {
3317 addr = ioc->mfi_sgl[i].iov_base;
3318 len = ioc->mfi_sgl[i].iov_len;
3319 #ifdef COMPAT_FREEBSD32
3321 /* 32bit on 64bit */
3322 ioc32 = (struct mfi_ioc_packet32 *)ioc;
3323 addr = PTRIN(ioc32->mfi_sgl[i].iov_base);
3324 len = ioc32->mfi_sgl[i].iov_len;
3327 error = copyout(temp, addr, len);
3329 device_printf(sc->mfi_dev,
3330 "Copy out failed\n");
3338 if (ioc->mfi_sense_len) {
3339 /* get user-space sense ptr then copy out sense */
3340 bcopy(&ioc->mfi_frame.raw[ioc->mfi_sense_off],
3341 &sense_ptr.sense_ptr_data[0],
3342 sizeof(sense_ptr.sense_ptr_data));
3343 #ifdef COMPAT_FREEBSD32
3344 if (cmd != MFI_CMD) {
3346 * not 64bit native so zero out any address
3348 sense_ptr.addr.high = 0;
3351 error = copyout(cm->cm_sense, sense_ptr.user_space,
3352 ioc->mfi_sense_len);
3354 device_printf(sc->mfi_dev,
3355 "Copy out failed\n");
3360 ioc->mfi_frame.hdr.cmd_status = cm->cm_frame->header.cmd_status;
3362 mfi_config_unlock(sc, locked);
3364 free(data, M_MFIBUF);
3365 if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
3366 for (i = 0; i < 2; i++) {
3367 if (sc->kbuff_arr[i]) {
3368 if (sc->mfi_kbuff_arr_busaddr[i] != 0)
3370 sc->mfi_kbuff_arr_dmat[i],
3371 sc->mfi_kbuff_arr_dmamap[i]
3373 if (sc->kbuff_arr[i] != NULL)
3375 sc->mfi_kbuff_arr_dmat[i],
3377 sc->mfi_kbuff_arr_dmamap[i]
3379 if (sc->mfi_kbuff_arr_dmat[i] != NULL)
3380 bus_dma_tag_destroy(
3381 sc->mfi_kbuff_arr_dmat[i]);
3386 mtx_lock(&sc->mfi_io_lock);
3387 mfi_release_command(cm);
3388 mtx_unlock(&sc->mfi_io_lock);
3394 aen = (struct mfi_ioc_aen *)arg;
3395 mtx_lock(&sc->mfi_io_lock);
3396 error = mfi_aen_register(sc, aen->aen_seq_num,
3397 aen->aen_class_locale);
3398 mtx_unlock(&sc->mfi_io_lock);
3401 case MFI_LINUX_CMD_2: /* Firmware Linux ioctl shim */
3403 devclass_t devclass;
3404 struct mfi_linux_ioc_packet l_ioc;
3407 devclass = devclass_find("mfi");
3408 if (devclass == NULL)
3411 error = copyin(arg, &l_ioc, sizeof(l_ioc));
3414 adapter = l_ioc.lioc_adapter_no;
3415 sc = devclass_get_softc(devclass, adapter);
3418 return (mfi_linux_ioctl_int(sc->mfi_cdev,
3419 cmd, arg, flag, td));
3422 case MFI_LINUX_SET_AEN_2: /* AEN Linux ioctl shim */
3424 devclass_t devclass;
3425 struct mfi_linux_ioc_aen l_aen;
3428 devclass = devclass_find("mfi");
3429 if (devclass == NULL)
3432 error = copyin(arg, &l_aen, sizeof(l_aen));
3435 adapter = l_aen.laen_adapter_no;
3436 sc = devclass_get_softc(devclass, adapter);
3439 return (mfi_linux_ioctl_int(sc->mfi_cdev,
3440 cmd, arg, flag, td));
3443 #ifdef COMPAT_FREEBSD32
3444 case MFIIO_PASSTHRU32:
3445 if (!SV_CURPROC_FLAG(SV_ILP32)) {
3449 iop_swab.ioc_frame = iop32->ioc_frame;
3450 iop_swab.buf_size = iop32->buf_size;
3451 iop_swab.buf = PTRIN(iop32->buf);
3455 case MFIIO_PASSTHRU:
3456 error = mfi_user_command(sc, iop);
3457 #ifdef COMPAT_FREEBSD32
3458 if (cmd == MFIIO_PASSTHRU32)
3459 iop32->ioc_frame = iop_swab.ioc_frame;
3463 device_printf(sc->mfi_dev, "IOCTL 0x%lx not handled\n", cmd);
3472 mfi_linux_ioctl_int(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td)
3474 struct mfi_softc *sc;
3475 struct mfi_linux_ioc_packet l_ioc;
3476 struct mfi_linux_ioc_aen l_aen;
3477 struct mfi_command *cm = NULL;
3478 struct mfi_aen *mfi_aen_entry;
3479 union mfi_sense_ptr sense_ptr;
3480 uint32_t context = 0;
3481 uint8_t *data = NULL, *temp;
3488 case MFI_LINUX_CMD_2: /* Firmware Linux ioctl shim */
3489 error = copyin(arg, &l_ioc, sizeof(l_ioc));
3493 if (l_ioc.lioc_sge_count > MAX_LINUX_IOCTL_SGE) {
3497 mtx_lock(&sc->mfi_io_lock);
3498 if ((cm = mfi_dequeue_free(sc)) == NULL) {
3499 mtx_unlock(&sc->mfi_io_lock);
3502 mtx_unlock(&sc->mfi_io_lock);
3506 * save off original context since copying from user
3507 * will clobber some data
3509 context = cm->cm_frame->header.context;
3511 bcopy(l_ioc.lioc_frame.raw, cm->cm_frame,
3512 2 * MFI_DCMD_FRAME_SIZE); /* this isn't quite right */
3513 cm->cm_total_frame_size = (sizeof(union mfi_sgl)
3514 * l_ioc.lioc_sge_count) + l_ioc.lioc_sgl_off;
3515 cm->cm_frame->header.scsi_status = 0;
3516 cm->cm_frame->header.pad0 = 0;
3517 if (l_ioc.lioc_sge_count)
3519 (union mfi_sgl *)&cm->cm_frame->bytes[l_ioc.lioc_sgl_off];
3521 if (cm->cm_frame->header.flags & MFI_FRAME_DATAIN)
3522 cm->cm_flags |= MFI_CMD_DATAIN;
3523 if (cm->cm_frame->header.flags & MFI_FRAME_DATAOUT)
3524 cm->cm_flags |= MFI_CMD_DATAOUT;
3525 cm->cm_len = cm->cm_frame->header.data_len;
3527 (cm->cm_flags & (MFI_CMD_DATAIN | MFI_CMD_DATAOUT))) {
3528 cm->cm_data = data = malloc(cm->cm_len, M_MFIBUF,
3534 /* restore header context */
3535 cm->cm_frame->header.context = context;
3538 if (cm->cm_flags & MFI_CMD_DATAOUT) {
3539 for (i = 0; i < l_ioc.lioc_sge_count; i++) {
3540 error = copyin(PTRIN(l_ioc.lioc_sgl[i].iov_base),
3542 l_ioc.lioc_sgl[i].iov_len);
3544 device_printf(sc->mfi_dev,
3545 "Copy in failed\n");
3548 temp = &temp[l_ioc.lioc_sgl[i].iov_len];
3552 if (cm->cm_frame->header.cmd == MFI_CMD_DCMD)
3553 locked = mfi_config_lock(sc, cm->cm_frame->dcmd.opcode);
3555 if (cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) {
3556 cm->cm_frame->pass.sense_addr_lo =
3557 (uint32_t)cm->cm_sense_busaddr;
3558 cm->cm_frame->pass.sense_addr_hi =
3559 (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
3562 mtx_lock(&sc->mfi_io_lock);
3563 error = mfi_check_command_pre(sc, cm);
3565 mtx_unlock(&sc->mfi_io_lock);
3569 if ((error = mfi_wait_command(sc, cm)) != 0) {
3570 device_printf(sc->mfi_dev,
3571 "Controller polled failed\n");
3572 mtx_unlock(&sc->mfi_io_lock);
3576 mfi_check_command_post(sc, cm);
3577 mtx_unlock(&sc->mfi_io_lock);
3580 if (cm->cm_flags & MFI_CMD_DATAIN) {
3581 for (i = 0; i < l_ioc.lioc_sge_count; i++) {
3582 error = copyout(temp,
3583 PTRIN(l_ioc.lioc_sgl[i].iov_base),
3584 l_ioc.lioc_sgl[i].iov_len);
3586 device_printf(sc->mfi_dev,
3587 "Copy out failed\n");
3590 temp = &temp[l_ioc.lioc_sgl[i].iov_len];
3594 if (l_ioc.lioc_sense_len) {
3595 /* get user-space sense ptr then copy out sense */
3596 bcopy(&((struct mfi_linux_ioc_packet*)arg)
3597 ->lioc_frame.raw[l_ioc.lioc_sense_off],
3598 &sense_ptr.sense_ptr_data[0],
3599 sizeof(sense_ptr.sense_ptr_data));
3602 * only 32bit Linux support so zero out any
3603 * address over 32bit
3605 sense_ptr.addr.high = 0;
3607 error = copyout(cm->cm_sense, sense_ptr.user_space,
3608 l_ioc.lioc_sense_len);
3610 device_printf(sc->mfi_dev,
3611 "Copy out failed\n");
3616 error = copyout(&cm->cm_frame->header.cmd_status,
3617 &((struct mfi_linux_ioc_packet*)arg)
3618 ->lioc_frame.hdr.cmd_status,
3621 device_printf(sc->mfi_dev,
3622 "Copy out failed\n");
3627 mfi_config_unlock(sc, locked);
3629 free(data, M_MFIBUF);
3631 mtx_lock(&sc->mfi_io_lock);
3632 mfi_release_command(cm);
3633 mtx_unlock(&sc->mfi_io_lock);
3637 case MFI_LINUX_SET_AEN_2: /* AEN Linux ioctl shim */
3638 error = copyin(arg, &l_aen, sizeof(l_aen));
3641 printf("AEN IMPLEMENTED for pid %d\n", curproc->p_pid);
3642 mfi_aen_entry = malloc(sizeof(struct mfi_aen), M_MFIBUF,
3644 mtx_lock(&sc->mfi_io_lock);
3645 if (mfi_aen_entry != NULL) {
3646 mfi_aen_entry->p = curproc;
3647 TAILQ_INSERT_TAIL(&sc->mfi_aen_pids, mfi_aen_entry,
3650 error = mfi_aen_register(sc, l_aen.laen_seq_num,
3651 l_aen.laen_class_locale);
3654 TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
3656 free(mfi_aen_entry, M_MFIBUF);
3658 mtx_unlock(&sc->mfi_io_lock);
3662 device_printf(sc->mfi_dev, "IOCTL 0x%lx not handled\n", cmd);
3671 mfi_poll(struct cdev *dev, int poll_events, struct thread *td)
3673 struct mfi_softc *sc;
3678 if (poll_events & (POLLIN | POLLRDNORM)) {
3679 if (sc->mfi_aen_triggered != 0) {
3680 revents |= poll_events & (POLLIN | POLLRDNORM);
3681 sc->mfi_aen_triggered = 0;
3683 if (sc->mfi_aen_triggered == 0 && sc->mfi_aen_cm == NULL) {
3689 if (poll_events & (POLLIN | POLLRDNORM)) {
3690 sc->mfi_poll_waiting = 1;
3691 selrecord(td, &sc->mfi_select);
3701 struct mfi_softc *sc;
3702 struct mfi_command *cm;
3708 dc = devclass_find("mfi");
3710 printf("No mfi dev class\n");
3714 for (i = 0; ; i++) {
3715 sc = devclass_get_softc(dc, i);
3718 device_printf(sc->mfi_dev, "Dumping\n\n");
3720 deadline = time_uptime - mfi_cmd_timeout;
3721 mtx_lock(&sc->mfi_io_lock);
3722 TAILQ_FOREACH(cm, &sc->mfi_busy, cm_link) {
3723 if (cm->cm_timestamp <= deadline) {
3724 device_printf(sc->mfi_dev,
3725 "COMMAND %p TIMEOUT AFTER %d SECONDS\n",
3726 cm, (int)(time_uptime - cm->cm_timestamp));
3737 mtx_unlock(&sc->mfi_io_lock);
3744 mfi_timeout(void *data)
3746 struct mfi_softc *sc = (struct mfi_softc *)data;
3747 struct mfi_command *cm, *tmp;
3751 deadline = time_uptime - mfi_cmd_timeout;
3752 if (sc->adpreset == 0) {
3753 if (!mfi_tbolt_reset(sc)) {
3754 callout_reset(&sc->mfi_watchdog_callout,
3755 mfi_cmd_timeout * hz, mfi_timeout, sc);
3759 mtx_lock(&sc->mfi_io_lock);
3760 TAILQ_FOREACH_SAFE(cm, &sc->mfi_busy, cm_link, tmp) {
3761 if (sc->mfi_aen_cm == cm || sc->mfi_map_sync_cm == cm)
3763 if (cm->cm_timestamp <= deadline) {
3764 if (sc->adpreset != 0 && sc->issuepend_done == 0) {
3765 cm->cm_timestamp = time_uptime;
3767 device_printf(sc->mfi_dev,
3768 "COMMAND %p TIMEOUT AFTER %d SECONDS\n",
3769 cm, (int)(time_uptime - cm->cm_timestamp)
3772 MFI_VALIDATE_CMD(sc, cm);
3774 * While commands can get stuck forever we do
3775 * not fail them as there is no way to tell if
3776 * the controller has actually processed them
3779 * In addition its very likely that force
3780 * failing a command here would cause a panic
3793 mtx_unlock(&sc->mfi_io_lock);
3795 callout_reset(&sc->mfi_watchdog_callout, mfi_cmd_timeout * hz,