2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD AND BSD-2-Clause
4 * Copyright (c) 2006 IronPort Systems
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * Copyright (c) 2007 LSI Corp.
30 * Copyright (c) 2007 Rajesh Prabhakaran.
31 * All rights reserved.
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions
36 * 1. Redistributions of source code must retain the above copyright
37 * notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce the above copyright
39 * notice, this list of conditions and the following disclaimer in the
40 * documentation and/or other materials provided with the distribution.
42 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
43 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
44 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
45 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
46 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
47 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
48 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
49 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
50 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
51 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
55 #include <sys/cdefs.h>
56 __FBSDID("$FreeBSD$");
60 #include <sys/param.h>
61 #include <sys/systm.h>
62 #include <sys/sysctl.h>
63 #include <sys/malloc.h>
64 #include <sys/kernel.h>
66 #include <sys/selinfo.h>
69 #include <sys/eventhandler.h>
72 #include <sys/ioccom.h>
75 #include <sys/signalvar.h>
76 #include <sys/sysent.h>
77 #include <sys/taskqueue.h>
79 #include <machine/bus.h>
80 #include <machine/resource.h>
82 #include <dev/mfi/mfireg.h>
83 #include <dev/mfi/mfi_ioctl.h>
84 #include <dev/mfi/mfivar.h>
85 #include <sys/interrupt.h>
86 #include <sys/priority.h>
88 static int mfi_alloc_commands(struct mfi_softc *);
89 static int mfi_comms_init(struct mfi_softc *);
90 static int mfi_get_controller_info(struct mfi_softc *);
91 static int mfi_get_log_state(struct mfi_softc *,
92 struct mfi_evt_log_state **);
93 static int mfi_parse_entries(struct mfi_softc *, int, int);
94 static void mfi_data_cb(void *, bus_dma_segment_t *, int, int);
95 static void mfi_startup(void *arg);
96 static void mfi_intr(void *arg);
97 static void mfi_ldprobe(struct mfi_softc *sc);
98 static void mfi_syspdprobe(struct mfi_softc *sc);
99 static void mfi_handle_evt(void *context, int pending);
100 static int mfi_aen_register(struct mfi_softc *sc, int seq, int locale);
101 static void mfi_aen_complete(struct mfi_command *);
102 static int mfi_add_ld(struct mfi_softc *sc, int);
103 static void mfi_add_ld_complete(struct mfi_command *);
104 static int mfi_add_sys_pd(struct mfi_softc *sc, int);
105 static void mfi_add_sys_pd_complete(struct mfi_command *);
106 static struct mfi_command * mfi_bio_command(struct mfi_softc *);
107 static void mfi_bio_complete(struct mfi_command *);
108 static struct mfi_command *mfi_build_ldio(struct mfi_softc *,struct bio*);
109 static struct mfi_command *mfi_build_syspdio(struct mfi_softc *,struct bio*);
110 static int mfi_send_frame(struct mfi_softc *, struct mfi_command *);
111 static int mfi_std_send_frame(struct mfi_softc *, struct mfi_command *);
112 static int mfi_abort(struct mfi_softc *, struct mfi_command **);
113 static int mfi_linux_ioctl_int(struct cdev *, u_long, caddr_t, int, struct thread *);
114 static void mfi_timeout(void *);
115 static int mfi_user_command(struct mfi_softc *,
116 struct mfi_ioc_passthru *);
117 static void mfi_enable_intr_xscale(struct mfi_softc *sc);
118 static void mfi_enable_intr_ppc(struct mfi_softc *sc);
119 static int32_t mfi_read_fw_status_xscale(struct mfi_softc *sc);
120 static int32_t mfi_read_fw_status_ppc(struct mfi_softc *sc);
121 static int mfi_check_clear_intr_xscale(struct mfi_softc *sc);
122 static int mfi_check_clear_intr_ppc(struct mfi_softc *sc);
123 static void mfi_issue_cmd_xscale(struct mfi_softc *sc, bus_addr_t bus_add,
125 static void mfi_issue_cmd_ppc(struct mfi_softc *sc, bus_addr_t bus_add,
127 static int mfi_config_lock(struct mfi_softc *sc, uint32_t opcode);
128 static void mfi_config_unlock(struct mfi_softc *sc, int locked);
129 static int mfi_check_command_pre(struct mfi_softc *sc, struct mfi_command *cm);
130 static void mfi_check_command_post(struct mfi_softc *sc, struct mfi_command *cm);
131 static int mfi_check_for_sscd(struct mfi_softc *sc, struct mfi_command *cm);
133 SYSCTL_NODE(_hw, OID_AUTO, mfi, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
134 "MFI driver parameters");
135 static int mfi_event_locale = MFI_EVT_LOCALE_ALL;
136 SYSCTL_INT(_hw_mfi, OID_AUTO, event_locale, CTLFLAG_RWTUN, &mfi_event_locale,
137 0, "event message locale");
139 static int mfi_event_class = MFI_EVT_CLASS_INFO;
140 SYSCTL_INT(_hw_mfi, OID_AUTO, event_class, CTLFLAG_RWTUN, &mfi_event_class,
141 0, "event message class");
143 static int mfi_max_cmds = 128;
144 SYSCTL_INT(_hw_mfi, OID_AUTO, max_cmds, CTLFLAG_RDTUN, &mfi_max_cmds,
145 0, "Max commands limit (-1 = controller limit)");
147 static int mfi_detect_jbod_change = 1;
148 SYSCTL_INT(_hw_mfi, OID_AUTO, detect_jbod_change, CTLFLAG_RWTUN,
149 &mfi_detect_jbod_change, 0, "Detect a change to a JBOD");
151 int mfi_polled_cmd_timeout = MFI_POLL_TIMEOUT_SECS;
152 SYSCTL_INT(_hw_mfi, OID_AUTO, polled_cmd_timeout, CTLFLAG_RWTUN,
153 &mfi_polled_cmd_timeout, 0,
154 "Polled command timeout - used for firmware flash etc (in seconds)");
156 static int mfi_cmd_timeout = MFI_CMD_TIMEOUT;
157 SYSCTL_INT(_hw_mfi, OID_AUTO, cmd_timeout, CTLFLAG_RWTUN, &mfi_cmd_timeout,
158 0, "Command timeout (in seconds)");
160 /* Management interface */
161 static d_open_t mfi_open;
162 static d_close_t mfi_close;
163 static d_ioctl_t mfi_ioctl;
164 static d_poll_t mfi_poll;
166 static struct cdevsw mfi_cdevsw = {
167 .d_version = D_VERSION,
170 .d_close = mfi_close,
171 .d_ioctl = mfi_ioctl,
176 MALLOC_DEFINE(M_MFIBUF, "mfibuf", "Buffers for the MFI driver");
178 #define MFI_INQ_LENGTH SHORT_INQUIRY_LENGTH
179 struct mfi_skinny_dma_info mfi_skinny;
182 mfi_enable_intr_xscale(struct mfi_softc *sc)
184 MFI_WRITE4(sc, MFI_OMSK, 0x01);
188 mfi_enable_intr_ppc(struct mfi_softc *sc)
190 if (sc->mfi_flags & MFI_FLAGS_1078) {
191 MFI_WRITE4(sc, MFI_ODCR0, 0xFFFFFFFF);
192 MFI_WRITE4(sc, MFI_OMSK, ~MFI_1078_EIM);
194 else if (sc->mfi_flags & MFI_FLAGS_GEN2) {
195 MFI_WRITE4(sc, MFI_ODCR0, 0xFFFFFFFF);
196 MFI_WRITE4(sc, MFI_OMSK, ~MFI_GEN2_EIM);
198 else if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
199 MFI_WRITE4(sc, MFI_OMSK, ~0x00000001);
204 mfi_read_fw_status_xscale(struct mfi_softc *sc)
206 return MFI_READ4(sc, MFI_OMSG0);
210 mfi_read_fw_status_ppc(struct mfi_softc *sc)
212 return MFI_READ4(sc, MFI_OSP0);
216 mfi_check_clear_intr_xscale(struct mfi_softc *sc)
220 status = MFI_READ4(sc, MFI_OSTS);
221 if ((status & MFI_OSTS_INTR_VALID) == 0)
224 MFI_WRITE4(sc, MFI_OSTS, status);
229 mfi_check_clear_intr_ppc(struct mfi_softc *sc)
233 status = MFI_READ4(sc, MFI_OSTS);
234 if (sc->mfi_flags & MFI_FLAGS_1078) {
235 if (!(status & MFI_1078_RM)) {
239 else if (sc->mfi_flags & MFI_FLAGS_GEN2) {
240 if (!(status & MFI_GEN2_RM)) {
244 else if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
245 if (!(status & MFI_SKINNY_RM)) {
249 if (sc->mfi_flags & MFI_FLAGS_SKINNY)
250 MFI_WRITE4(sc, MFI_OSTS, status);
252 MFI_WRITE4(sc, MFI_ODCR0, status);
257 mfi_issue_cmd_xscale(struct mfi_softc *sc, bus_addr_t bus_add, uint32_t frame_cnt)
259 MFI_WRITE4(sc, MFI_IQP,(bus_add >>3)|frame_cnt);
263 mfi_issue_cmd_ppc(struct mfi_softc *sc, bus_addr_t bus_add, uint32_t frame_cnt)
265 if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
266 MFI_WRITE4(sc, MFI_IQPL, (bus_add | frame_cnt <<1)|1 );
267 MFI_WRITE4(sc, MFI_IQPH, 0x00000000);
269 MFI_WRITE4(sc, MFI_IQP, (bus_add | frame_cnt <<1)|1 );
274 mfi_transition_firmware(struct mfi_softc *sc)
276 uint32_t fw_state, cur_state;
278 uint32_t cur_abs_reg_val = 0;
279 uint32_t prev_abs_reg_val = 0;
281 cur_abs_reg_val = sc->mfi_read_fw_status(sc);
282 fw_state = cur_abs_reg_val & MFI_FWSTATE_MASK;
283 while (fw_state != MFI_FWSTATE_READY) {
285 device_printf(sc->mfi_dev, "Waiting for firmware to "
287 cur_state = fw_state;
289 case MFI_FWSTATE_FAULT:
290 device_printf(sc->mfi_dev, "Firmware fault\n");
292 case MFI_FWSTATE_WAIT_HANDSHAKE:
293 if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT)
294 MFI_WRITE4(sc, MFI_SKINNY_IDB, MFI_FWINIT_CLEAR_HANDSHAKE);
296 MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_CLEAR_HANDSHAKE);
297 max_wait = MFI_RESET_WAIT_TIME;
299 case MFI_FWSTATE_OPERATIONAL:
300 if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT)
301 MFI_WRITE4(sc, MFI_SKINNY_IDB, 7);
303 MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_READY);
304 max_wait = MFI_RESET_WAIT_TIME;
306 case MFI_FWSTATE_UNDEFINED:
307 case MFI_FWSTATE_BB_INIT:
308 max_wait = MFI_RESET_WAIT_TIME;
310 case MFI_FWSTATE_FW_INIT_2:
311 max_wait = MFI_RESET_WAIT_TIME;
313 case MFI_FWSTATE_FW_INIT:
314 case MFI_FWSTATE_FLUSH_CACHE:
315 max_wait = MFI_RESET_WAIT_TIME;
317 case MFI_FWSTATE_DEVICE_SCAN:
318 max_wait = MFI_RESET_WAIT_TIME; /* wait for 180 seconds */
319 prev_abs_reg_val = cur_abs_reg_val;
321 case MFI_FWSTATE_BOOT_MESSAGE_PENDING:
322 if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT)
323 MFI_WRITE4(sc, MFI_SKINNY_IDB, MFI_FWINIT_HOTPLUG);
325 MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_HOTPLUG);
326 max_wait = MFI_RESET_WAIT_TIME;
329 device_printf(sc->mfi_dev, "Unknown firmware state %#x\n",
333 for (i = 0; i < (max_wait * 10); i++) {
334 cur_abs_reg_val = sc->mfi_read_fw_status(sc);
335 fw_state = cur_abs_reg_val & MFI_FWSTATE_MASK;
336 if (fw_state == cur_state)
341 if (fw_state == MFI_FWSTATE_DEVICE_SCAN) {
342 /* Check the device scanning progress */
343 if (prev_abs_reg_val != cur_abs_reg_val) {
347 if (fw_state == cur_state) {
348 device_printf(sc->mfi_dev, "Firmware stuck in state "
357 mfi_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
362 *addr = segs[0].ds_addr;
367 mfi_attach(struct mfi_softc *sc)
370 int error, commsz, framessz, sensesz;
371 int frames, unit, max_fw_sge, max_fw_cmds;
372 uint32_t tb_mem_size = 0;
378 device_printf(sc->mfi_dev, "Megaraid SAS driver Ver %s \n",
381 mtx_init(&sc->mfi_io_lock, "MFI I/O lock", NULL, MTX_DEF);
382 sx_init(&sc->mfi_config_lock, "MFI config");
383 TAILQ_INIT(&sc->mfi_ld_tqh);
384 TAILQ_INIT(&sc->mfi_syspd_tqh);
385 TAILQ_INIT(&sc->mfi_ld_pend_tqh);
386 TAILQ_INIT(&sc->mfi_syspd_pend_tqh);
387 TAILQ_INIT(&sc->mfi_evt_queue);
388 TASK_INIT(&sc->mfi_evt_task, 0, mfi_handle_evt, sc);
389 TASK_INIT(&sc->mfi_map_sync_task, 0, mfi_handle_map_sync, sc);
390 TAILQ_INIT(&sc->mfi_aen_pids);
391 TAILQ_INIT(&sc->mfi_cam_ccbq);
399 sc->last_seq_num = 0;
400 sc->disableOnlineCtrlReset = 1;
401 sc->issuepend_done = 1;
402 sc->hw_crit_error = 0;
404 if (sc->mfi_flags & MFI_FLAGS_1064R) {
405 sc->mfi_enable_intr = mfi_enable_intr_xscale;
406 sc->mfi_read_fw_status = mfi_read_fw_status_xscale;
407 sc->mfi_check_clear_intr = mfi_check_clear_intr_xscale;
408 sc->mfi_issue_cmd = mfi_issue_cmd_xscale;
409 } else if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
410 sc->mfi_enable_intr = mfi_tbolt_enable_intr_ppc;
411 sc->mfi_disable_intr = mfi_tbolt_disable_intr_ppc;
412 sc->mfi_read_fw_status = mfi_tbolt_read_fw_status_ppc;
413 sc->mfi_check_clear_intr = mfi_tbolt_check_clear_intr_ppc;
414 sc->mfi_issue_cmd = mfi_tbolt_issue_cmd_ppc;
415 sc->mfi_adp_reset = mfi_tbolt_adp_reset;
417 TAILQ_INIT(&sc->mfi_cmd_tbolt_tqh);
419 sc->mfi_enable_intr = mfi_enable_intr_ppc;
420 sc->mfi_read_fw_status = mfi_read_fw_status_ppc;
421 sc->mfi_check_clear_intr = mfi_check_clear_intr_ppc;
422 sc->mfi_issue_cmd = mfi_issue_cmd_ppc;
426 /* Before we get too far, see if the firmware is working */
427 if ((error = mfi_transition_firmware(sc)) != 0) {
428 device_printf(sc->mfi_dev, "Firmware not in READY state, "
429 "error %d\n", error);
433 /* Start: LSIP200113393 */
434 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
435 1, 0, /* algnmnt, boundary */
436 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
437 BUS_SPACE_MAXADDR, /* highaddr */
438 NULL, NULL, /* filter, filterarg */
439 MEGASAS_MAX_NAME*sizeof(bus_addr_t), /* maxsize */
441 MEGASAS_MAX_NAME*sizeof(bus_addr_t), /* maxsegsize */
443 NULL, NULL, /* lockfunc, lockarg */
444 &sc->verbuf_h_dmat)) {
445 device_printf(sc->mfi_dev, "Cannot allocate verbuf_h_dmat DMA tag\n");
448 if (bus_dmamem_alloc(sc->verbuf_h_dmat, (void **)&sc->verbuf,
449 BUS_DMA_NOWAIT, &sc->verbuf_h_dmamap)) {
450 device_printf(sc->mfi_dev, "Cannot allocate verbuf_h_dmamap memory\n");
453 bzero(sc->verbuf, MEGASAS_MAX_NAME*sizeof(bus_addr_t));
454 bus_dmamap_load(sc->verbuf_h_dmat, sc->verbuf_h_dmamap,
455 sc->verbuf, MEGASAS_MAX_NAME*sizeof(bus_addr_t),
456 mfi_addr_cb, &sc->verbuf_h_busaddr, 0);
457 /* End: LSIP200113393 */
460 * Get information needed for sizing the contiguous memory for the
461 * frame pool. Size down the sgl parameter since we know that
462 * we will never need more than what's required for MAXPHYS.
463 * It would be nice if these constants were available at runtime
464 * instead of compile time.
466 status = sc->mfi_read_fw_status(sc);
467 max_fw_cmds = status & MFI_FWSTATE_MAXCMD_MASK;
468 if (mfi_max_cmds > 0 && mfi_max_cmds < max_fw_cmds) {
469 device_printf(sc->mfi_dev, "FW MaxCmds = %d, limiting to %d\n",
470 max_fw_cmds, mfi_max_cmds);
471 sc->mfi_max_fw_cmds = mfi_max_cmds;
473 sc->mfi_max_fw_cmds = max_fw_cmds;
475 max_fw_sge = (status & MFI_FWSTATE_MAXSGL_MASK) >> 16;
476 sc->mfi_max_sge = min(max_fw_sge, ((MFI_MAXPHYS / PAGE_SIZE) + 1));
478 /* ThunderBolt Support get the contiguous memory */
480 if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
481 mfi_tbolt_init_globals(sc);
482 device_printf(sc->mfi_dev, "MaxCmd = %d, Drv MaxCmd = %d, "
483 "MaxSgl = %d, state = %#x\n", max_fw_cmds,
484 sc->mfi_max_fw_cmds, sc->mfi_max_sge, status);
485 tb_mem_size = mfi_tbolt_get_memory_requirement(sc);
487 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
488 1, 0, /* algnmnt, boundary */
489 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
490 BUS_SPACE_MAXADDR, /* highaddr */
491 NULL, NULL, /* filter, filterarg */
492 tb_mem_size, /* maxsize */
494 tb_mem_size, /* maxsegsize */
496 NULL, NULL, /* lockfunc, lockarg */
498 device_printf(sc->mfi_dev, "Cannot allocate comms DMA tag\n");
501 if (bus_dmamem_alloc(sc->mfi_tb_dmat, (void **)&sc->request_message_pool,
502 BUS_DMA_NOWAIT, &sc->mfi_tb_dmamap)) {
503 device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
506 bzero(sc->request_message_pool, tb_mem_size);
507 bus_dmamap_load(sc->mfi_tb_dmat, sc->mfi_tb_dmamap,
508 sc->request_message_pool, tb_mem_size, mfi_addr_cb, &sc->mfi_tb_busaddr, 0);
510 /* For ThunderBolt memory init */
511 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
512 0x100, 0, /* alignmnt, boundary */
513 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
514 BUS_SPACE_MAXADDR, /* highaddr */
515 NULL, NULL, /* filter, filterarg */
516 MFI_FRAME_SIZE, /* maxsize */
518 MFI_FRAME_SIZE, /* maxsegsize */
520 NULL, NULL, /* lockfunc, lockarg */
521 &sc->mfi_tb_init_dmat)) {
522 device_printf(sc->mfi_dev, "Cannot allocate init DMA tag\n");
525 if (bus_dmamem_alloc(sc->mfi_tb_init_dmat, (void **)&sc->mfi_tb_init,
526 BUS_DMA_NOWAIT, &sc->mfi_tb_init_dmamap)) {
527 device_printf(sc->mfi_dev, "Cannot allocate init memory\n");
530 bzero(sc->mfi_tb_init, MFI_FRAME_SIZE);
531 bus_dmamap_load(sc->mfi_tb_init_dmat, sc->mfi_tb_init_dmamap,
532 sc->mfi_tb_init, MFI_FRAME_SIZE, mfi_addr_cb,
533 &sc->mfi_tb_init_busaddr, 0);
534 if (mfi_tbolt_init_desc_pool(sc, sc->request_message_pool,
536 device_printf(sc->mfi_dev,
537 "Thunderbolt pool preparation error\n");
542 Allocate DMA memory mapping for MPI2 IOC Init descriptor,
543 we are taking it different from what we have allocated for Request
544 and reply descriptors to avoid confusion later
546 tb_mem_size = sizeof(struct MPI2_IOC_INIT_REQUEST);
547 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
548 1, 0, /* algnmnt, boundary */
549 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
550 BUS_SPACE_MAXADDR, /* highaddr */
551 NULL, NULL, /* filter, filterarg */
552 tb_mem_size, /* maxsize */
554 tb_mem_size, /* maxsegsize */
556 NULL, NULL, /* lockfunc, lockarg */
557 &sc->mfi_tb_ioc_init_dmat)) {
558 device_printf(sc->mfi_dev,
559 "Cannot allocate comms DMA tag\n");
562 if (bus_dmamem_alloc(sc->mfi_tb_ioc_init_dmat,
563 (void **)&sc->mfi_tb_ioc_init_desc,
564 BUS_DMA_NOWAIT, &sc->mfi_tb_ioc_init_dmamap)) {
565 device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
568 bzero(sc->mfi_tb_ioc_init_desc, tb_mem_size);
569 bus_dmamap_load(sc->mfi_tb_ioc_init_dmat, sc->mfi_tb_ioc_init_dmamap,
570 sc->mfi_tb_ioc_init_desc, tb_mem_size, mfi_addr_cb,
571 &sc->mfi_tb_ioc_init_busaddr, 0);
574 * Create the dma tag for data buffers. Used both for block I/O
575 * and for various internal data queries.
577 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
578 1, 0, /* algnmnt, boundary */
579 BUS_SPACE_MAXADDR, /* lowaddr */
580 BUS_SPACE_MAXADDR, /* highaddr */
581 NULL, NULL, /* filter, filterarg */
582 BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
583 sc->mfi_max_sge, /* nsegments */
584 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
585 BUS_DMA_ALLOCNOW, /* flags */
586 busdma_lock_mutex, /* lockfunc */
587 &sc->mfi_io_lock, /* lockfuncarg */
588 &sc->mfi_buffer_dmat)) {
589 device_printf(sc->mfi_dev, "Cannot allocate buffer DMA tag\n");
594 * Allocate DMA memory for the comms queues. Keep it under 4GB for
595 * efficiency. The mfi_hwcomms struct includes space for 1 reply queue
596 * entry, so the calculated size here will be will be 1 more than
597 * mfi_max_fw_cmds. This is apparently a requirement of the hardware.
599 commsz = (sizeof(uint32_t) * sc->mfi_max_fw_cmds) +
600 sizeof(struct mfi_hwcomms);
601 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
602 1, 0, /* algnmnt, boundary */
603 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
604 BUS_SPACE_MAXADDR, /* highaddr */
605 NULL, NULL, /* filter, filterarg */
606 commsz, /* maxsize */
608 commsz, /* maxsegsize */
610 NULL, NULL, /* lockfunc, lockarg */
611 &sc->mfi_comms_dmat)) {
612 device_printf(sc->mfi_dev, "Cannot allocate comms DMA tag\n");
615 if (bus_dmamem_alloc(sc->mfi_comms_dmat, (void **)&sc->mfi_comms,
616 BUS_DMA_NOWAIT, &sc->mfi_comms_dmamap)) {
617 device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
620 bzero(sc->mfi_comms, commsz);
621 bus_dmamap_load(sc->mfi_comms_dmat, sc->mfi_comms_dmamap,
622 sc->mfi_comms, commsz, mfi_addr_cb, &sc->mfi_comms_busaddr, 0);
624 * Allocate DMA memory for the command frames. Keep them in the
625 * lower 4GB for efficiency. Calculate the size of the commands at
626 * the same time; each command is one 64 byte frame plus a set of
627 * additional frames for holding sg lists or other data.
628 * The assumption here is that the SG list will start at the second
629 * frame and not use the unused bytes in the first frame. While this
630 * isn't technically correct, it simplifies the calculation and allows
631 * for command frames that might be larger than an mfi_io_frame.
633 if (sizeof(bus_addr_t) == 8) {
634 sc->mfi_sge_size = sizeof(struct mfi_sg64);
635 sc->mfi_flags |= MFI_FLAGS_SG64;
637 sc->mfi_sge_size = sizeof(struct mfi_sg32);
639 if (sc->mfi_flags & MFI_FLAGS_SKINNY)
640 sc->mfi_sge_size = sizeof(struct mfi_sg_skinny);
641 frames = (sc->mfi_sge_size * sc->mfi_max_sge - 1) / MFI_FRAME_SIZE + 2;
642 sc->mfi_cmd_size = frames * MFI_FRAME_SIZE;
643 framessz = sc->mfi_cmd_size * sc->mfi_max_fw_cmds;
644 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
645 64, 0, /* algnmnt, boundary */
646 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
647 BUS_SPACE_MAXADDR, /* highaddr */
648 NULL, NULL, /* filter, filterarg */
649 framessz, /* maxsize */
651 framessz, /* maxsegsize */
653 NULL, NULL, /* lockfunc, lockarg */
654 &sc->mfi_frames_dmat)) {
655 device_printf(sc->mfi_dev, "Cannot allocate frame DMA tag\n");
658 if (bus_dmamem_alloc(sc->mfi_frames_dmat, (void **)&sc->mfi_frames,
659 BUS_DMA_NOWAIT, &sc->mfi_frames_dmamap)) {
660 device_printf(sc->mfi_dev, "Cannot allocate frames memory\n");
663 bzero(sc->mfi_frames, framessz);
664 bus_dmamap_load(sc->mfi_frames_dmat, sc->mfi_frames_dmamap,
665 sc->mfi_frames, framessz, mfi_addr_cb, &sc->mfi_frames_busaddr,0);
667 * Allocate DMA memory for the frame sense data. Keep them in the
668 * lower 4GB for efficiency
670 sensesz = sc->mfi_max_fw_cmds * MFI_SENSE_LEN;
671 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
672 4, 0, /* algnmnt, boundary */
673 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
674 BUS_SPACE_MAXADDR, /* highaddr */
675 NULL, NULL, /* filter, filterarg */
676 sensesz, /* maxsize */
678 sensesz, /* maxsegsize */
680 NULL, NULL, /* lockfunc, lockarg */
681 &sc->mfi_sense_dmat)) {
682 device_printf(sc->mfi_dev, "Cannot allocate sense DMA tag\n");
685 if (bus_dmamem_alloc(sc->mfi_sense_dmat, (void **)&sc->mfi_sense,
686 BUS_DMA_NOWAIT, &sc->mfi_sense_dmamap)) {
687 device_printf(sc->mfi_dev, "Cannot allocate sense memory\n");
690 bus_dmamap_load(sc->mfi_sense_dmat, sc->mfi_sense_dmamap,
691 sc->mfi_sense, sensesz, mfi_addr_cb, &sc->mfi_sense_busaddr, 0);
692 if ((error = mfi_alloc_commands(sc)) != 0)
695 /* Before moving the FW to operational state, check whether
696 * hostmemory is required by the FW or not
699 /* ThunderBolt MFI_IOC2 INIT */
700 if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
701 sc->mfi_disable_intr(sc);
702 mtx_lock(&sc->mfi_io_lock);
703 if ((error = mfi_tbolt_init_MFI_queue(sc)) != 0) {
704 device_printf(sc->mfi_dev,
705 "TB Init has failed with error %d\n",error);
706 mtx_unlock(&sc->mfi_io_lock);
709 mtx_unlock(&sc->mfi_io_lock);
711 if ((error = mfi_tbolt_alloc_cmd(sc)) != 0)
713 if (bus_setup_intr(sc->mfi_dev, sc->mfi_irq,
714 INTR_MPSAFE|INTR_TYPE_BIO, NULL, mfi_intr_tbolt, sc,
716 device_printf(sc->mfi_dev, "Cannot set up interrupt\n");
719 sc->mfi_intr_ptr = mfi_intr_tbolt;
720 sc->mfi_enable_intr(sc);
722 if ((error = mfi_comms_init(sc)) != 0)
725 if (bus_setup_intr(sc->mfi_dev, sc->mfi_irq,
726 INTR_MPSAFE|INTR_TYPE_BIO, NULL, mfi_intr, sc, &sc->mfi_intr)) {
727 device_printf(sc->mfi_dev, "Cannot set up interrupt\n");
730 sc->mfi_intr_ptr = mfi_intr;
731 sc->mfi_enable_intr(sc);
733 if ((error = mfi_get_controller_info(sc)) != 0)
735 sc->disableOnlineCtrlReset = 0;
737 /* Register a config hook to probe the bus for arrays */
738 sc->mfi_ich.ich_func = mfi_startup;
739 sc->mfi_ich.ich_arg = sc;
740 if (config_intrhook_establish(&sc->mfi_ich) != 0) {
741 device_printf(sc->mfi_dev, "Cannot establish configuration "
745 mtx_lock(&sc->mfi_io_lock);
746 if ((error = mfi_aen_setup(sc, 0), 0) != 0) {
747 mtx_unlock(&sc->mfi_io_lock);
750 mtx_unlock(&sc->mfi_io_lock);
753 * Register a shutdown handler.
755 if ((sc->mfi_eh = EVENTHANDLER_REGISTER(shutdown_final, mfi_shutdown,
756 sc, SHUTDOWN_PRI_DEFAULT)) == NULL) {
757 device_printf(sc->mfi_dev, "Warning: shutdown event "
758 "registration failed\n");
762 * Create the control device for doing management
764 unit = device_get_unit(sc->mfi_dev);
765 sc->mfi_cdev = make_dev(&mfi_cdevsw, unit, UID_ROOT, GID_OPERATOR,
766 0640, "mfi%d", unit);
768 make_dev_alias_p(MAKEDEV_CHECKNAME | MAKEDEV_WAITOK, &dev_t,
769 sc->mfi_cdev, "%s", "megaraid_sas_ioctl_node");
770 if (sc->mfi_cdev != NULL)
771 sc->mfi_cdev->si_drv1 = sc;
772 SYSCTL_ADD_INT(device_get_sysctl_ctx(sc->mfi_dev),
773 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->mfi_dev)),
774 OID_AUTO, "delete_busy_volumes", CTLFLAG_RW,
775 &sc->mfi_delete_busy_volumes, 0, "Allow removal of busy volumes");
776 SYSCTL_ADD_INT(device_get_sysctl_ctx(sc->mfi_dev),
777 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->mfi_dev)),
778 OID_AUTO, "keep_deleted_volumes", CTLFLAG_RW,
779 &sc->mfi_keep_deleted_volumes, 0,
780 "Don't detach the mfid device for a busy volume that is deleted");
782 device_add_child(sc->mfi_dev, "mfip", -1);
783 bus_generic_attach(sc->mfi_dev);
785 /* Start the timeout watchdog */
786 callout_init(&sc->mfi_watchdog_callout, 1);
787 callout_reset(&sc->mfi_watchdog_callout, mfi_cmd_timeout * hz,
790 if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
791 mtx_lock(&sc->mfi_io_lock);
792 mfi_tbolt_sync_map_info(sc);
793 mtx_unlock(&sc->mfi_io_lock);
800 mfi_alloc_commands(struct mfi_softc *sc)
802 struct mfi_command *cm;
806 * XXX Should we allocate all the commands up front, or allocate on
807 * demand later like 'aac' does?
809 sc->mfi_commands = malloc(sizeof(sc->mfi_commands[0]) *
810 sc->mfi_max_fw_cmds, M_MFIBUF, M_WAITOK | M_ZERO);
812 for (i = 0; i < sc->mfi_max_fw_cmds; i++) {
813 cm = &sc->mfi_commands[i];
814 cm->cm_frame = (union mfi_frame *)((uintptr_t)sc->mfi_frames +
815 sc->mfi_cmd_size * i);
816 cm->cm_frame_busaddr = sc->mfi_frames_busaddr +
817 sc->mfi_cmd_size * i;
818 cm->cm_frame->header.context = i;
819 cm->cm_sense = &sc->mfi_sense[i];
820 cm->cm_sense_busaddr= sc->mfi_sense_busaddr + MFI_SENSE_LEN * i;
823 if (bus_dmamap_create(sc->mfi_buffer_dmat, 0,
824 &cm->cm_dmamap) == 0) {
825 mtx_lock(&sc->mfi_io_lock);
826 mfi_release_command(cm);
827 mtx_unlock(&sc->mfi_io_lock);
829 device_printf(sc->mfi_dev, "Failed to allocate %d "
830 "command blocks, only allocated %d\n",
831 sc->mfi_max_fw_cmds, i - 1);
832 for (j = 0; j < i; j++) {
833 cm = &sc->mfi_commands[i];
834 bus_dmamap_destroy(sc->mfi_buffer_dmat,
837 free(sc->mfi_commands, M_MFIBUF);
838 sc->mfi_commands = NULL;
848 mfi_release_command(struct mfi_command *cm)
850 struct mfi_frame_header *hdr;
853 mtx_assert(&cm->cm_sc->mfi_io_lock, MA_OWNED);
856 * Zero out the important fields of the frame, but make sure the
857 * context field is preserved. For efficiency, handle the fields
858 * as 32 bit words. Clear out the first S/G entry too for safety.
860 hdr = &cm->cm_frame->header;
861 if (cm->cm_data != NULL && hdr->sg_count) {
862 cm->cm_sg->sg32[0].len = 0;
863 cm->cm_sg->sg32[0].addr = 0;
867 * Command may be on other queues e.g. busy queue depending on the
868 * flow of a previous call to mfi_mapcmd, so ensure its dequeued
871 if ((cm->cm_flags & MFI_ON_MFIQ_BUSY) != 0)
873 if ((cm->cm_flags & MFI_ON_MFIQ_READY) != 0)
874 mfi_remove_ready(cm);
876 /* We're not expecting it to be on any other queue but check */
877 if ((cm->cm_flags & MFI_ON_MFIQ_MASK) != 0) {
878 panic("Command %p is still on another queue, flags = %#x",
883 if ((cm->cm_flags & MFI_CMD_TBOLT) != 0) {
884 mfi_tbolt_return_cmd(cm->cm_sc,
885 cm->cm_sc->mfi_cmd_pool_tbolt[cm->cm_extra_frames - 1],
889 hdr_data = (uint32_t *)cm->cm_frame;
890 hdr_data[0] = 0; /* cmd, sense_len, cmd_status, scsi_status */
891 hdr_data[1] = 0; /* target_id, lun_id, cdb_len, sg_count */
892 hdr_data[4] = 0; /* flags, timeout */
893 hdr_data[5] = 0; /* data_len */
895 cm->cm_extra_frames = 0;
897 cm->cm_complete = NULL;
898 cm->cm_private = NULL;
901 cm->cm_total_frame_size = 0;
902 cm->retry_for_fw_reset = 0;
904 mfi_enqueue_free(cm);
908 mfi_dcmd_command(struct mfi_softc *sc, struct mfi_command **cmp,
909 uint32_t opcode, void **bufp, size_t bufsize)
911 struct mfi_command *cm;
912 struct mfi_dcmd_frame *dcmd;
914 uint32_t context = 0;
916 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
918 cm = mfi_dequeue_free(sc);
922 /* Zero out the MFI frame */
923 context = cm->cm_frame->header.context;
924 bzero(cm->cm_frame, sizeof(union mfi_frame));
925 cm->cm_frame->header.context = context;
927 if ((bufsize > 0) && (bufp != NULL)) {
929 buf = malloc(bufsize, M_MFIBUF, M_NOWAIT|M_ZERO);
931 mfi_release_command(cm);
940 dcmd = &cm->cm_frame->dcmd;
941 bzero(dcmd->mbox, MFI_MBOX_SIZE);
942 dcmd->header.cmd = MFI_CMD_DCMD;
943 dcmd->header.timeout = 0;
944 dcmd->header.flags = 0;
945 dcmd->header.data_len = bufsize;
946 dcmd->header.scsi_status = 0;
947 dcmd->opcode = opcode;
948 cm->cm_sg = &dcmd->sgl;
949 cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
952 cm->cm_private = buf;
953 cm->cm_len = bufsize;
956 if ((bufp != NULL) && (*bufp == NULL) && (buf != NULL))
962 mfi_comms_init(struct mfi_softc *sc)
964 struct mfi_command *cm;
965 struct mfi_init_frame *init;
966 struct mfi_init_qinfo *qinfo;
968 uint32_t context = 0;
970 mtx_lock(&sc->mfi_io_lock);
971 if ((cm = mfi_dequeue_free(sc)) == NULL) {
972 mtx_unlock(&sc->mfi_io_lock);
976 /* Zero out the MFI frame */
977 context = cm->cm_frame->header.context;
978 bzero(cm->cm_frame, sizeof(union mfi_frame));
979 cm->cm_frame->header.context = context;
982 * Abuse the SG list area of the frame to hold the init_qinfo
985 init = &cm->cm_frame->init;
986 qinfo = (struct mfi_init_qinfo *)((uintptr_t)init + MFI_FRAME_SIZE);
988 bzero(qinfo, sizeof(struct mfi_init_qinfo));
989 qinfo->rq_entries = sc->mfi_max_fw_cmds + 1;
990 qinfo->rq_addr_lo = sc->mfi_comms_busaddr +
991 offsetof(struct mfi_hwcomms, hw_reply_q);
992 qinfo->pi_addr_lo = sc->mfi_comms_busaddr +
993 offsetof(struct mfi_hwcomms, hw_pi);
994 qinfo->ci_addr_lo = sc->mfi_comms_busaddr +
995 offsetof(struct mfi_hwcomms, hw_ci);
997 init->header.cmd = MFI_CMD_INIT;
998 init->header.data_len = sizeof(struct mfi_init_qinfo);
999 init->qinfo_new_addr_lo = cm->cm_frame_busaddr + MFI_FRAME_SIZE;
1001 cm->cm_flags = MFI_CMD_POLLED;
1003 if ((error = mfi_mapcmd(sc, cm)) != 0)
1004 device_printf(sc->mfi_dev, "failed to send init command\n");
1005 mfi_release_command(cm);
1006 mtx_unlock(&sc->mfi_io_lock);
1012 mfi_get_controller_info(struct mfi_softc *sc)
1014 struct mfi_command *cm = NULL;
1015 struct mfi_ctrl_info *ci = NULL;
1016 uint32_t max_sectors_1, max_sectors_2;
1019 mtx_lock(&sc->mfi_io_lock);
1020 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_GETINFO,
1021 (void **)&ci, sizeof(*ci));
1024 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1026 if ((error = mfi_mapcmd(sc, cm)) != 0) {
1027 device_printf(sc->mfi_dev, "Failed to get controller info\n");
1028 sc->mfi_max_io = (sc->mfi_max_sge - 1) * PAGE_SIZE /
1034 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1035 BUS_DMASYNC_POSTREAD);
1036 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1038 max_sectors_1 = (1 << ci->stripe_sz_ops.max) * ci->max_strips_per_io;
1039 max_sectors_2 = ci->max_request_size;
1040 sc->mfi_max_io = min(max_sectors_1, max_sectors_2);
1041 sc->disableOnlineCtrlReset =
1042 ci->properties.OnOffProperties.disableOnlineCtrlReset;
1048 mfi_release_command(cm);
1049 mtx_unlock(&sc->mfi_io_lock);
1054 mfi_get_log_state(struct mfi_softc *sc, struct mfi_evt_log_state **log_state)
1056 struct mfi_command *cm = NULL;
1059 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1060 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_EVENT_GETINFO,
1061 (void **)log_state, sizeof(**log_state));
1064 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1066 if ((error = mfi_mapcmd(sc, cm)) != 0) {
1067 device_printf(sc->mfi_dev, "Failed to get log state\n");
1071 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1072 BUS_DMASYNC_POSTREAD);
1073 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1077 mfi_release_command(cm);
1083 mfi_aen_setup(struct mfi_softc *sc, uint32_t seq_start)
1085 struct mfi_evt_log_state *log_state = NULL;
1086 union mfi_evt class_locale;
1090 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1092 class_locale.members.reserved = 0;
1093 class_locale.members.locale = mfi_event_locale;
1094 class_locale.members.evt_class = mfi_event_class;
1096 if (seq_start == 0) {
1097 if ((error = mfi_get_log_state(sc, &log_state)) != 0)
1099 sc->mfi_boot_seq_num = log_state->boot_seq_num;
1102 * Walk through any events that fired since the last
1105 if ((error = mfi_parse_entries(sc, log_state->shutdown_seq_num,
1106 log_state->newest_seq_num)) != 0)
1108 seq = log_state->newest_seq_num;
1111 error = mfi_aen_register(sc, seq, class_locale.word);
1113 free(log_state, M_MFIBUF);
1119 mfi_wait_command(struct mfi_softc *sc, struct mfi_command *cm)
1122 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1123 cm->cm_complete = NULL;
1126 * MegaCli can issue a DCMD of 0. In this case do nothing
1127 * and return 0 to it as status
1129 if (cm->cm_frame->dcmd.opcode == 0) {
1130 cm->cm_frame->header.cmd_status = MFI_STAT_OK;
1132 return (cm->cm_error);
1134 mfi_enqueue_ready(cm);
1136 if ((cm->cm_flags & MFI_CMD_COMPLETED) == 0)
1137 msleep(cm, &sc->mfi_io_lock, PRIBIO, "mfiwait", 0);
1138 return (cm->cm_error);
1142 mfi_free(struct mfi_softc *sc)
1144 struct mfi_command *cm;
1147 callout_drain(&sc->mfi_watchdog_callout);
1149 if (sc->mfi_cdev != NULL)
1150 destroy_dev(sc->mfi_cdev);
1152 if (sc->mfi_commands != NULL) {
1153 for (i = 0; i < sc->mfi_max_fw_cmds; i++) {
1154 cm = &sc->mfi_commands[i];
1155 bus_dmamap_destroy(sc->mfi_buffer_dmat, cm->cm_dmamap);
1157 free(sc->mfi_commands, M_MFIBUF);
1158 sc->mfi_commands = NULL;
1162 bus_teardown_intr(sc->mfi_dev, sc->mfi_irq, sc->mfi_intr);
1163 if (sc->mfi_irq != NULL)
1164 bus_release_resource(sc->mfi_dev, SYS_RES_IRQ, sc->mfi_irq_rid,
1167 if (sc->mfi_sense_busaddr != 0)
1168 bus_dmamap_unload(sc->mfi_sense_dmat, sc->mfi_sense_dmamap);
1169 if (sc->mfi_sense != NULL)
1170 bus_dmamem_free(sc->mfi_sense_dmat, sc->mfi_sense,
1171 sc->mfi_sense_dmamap);
1172 if (sc->mfi_sense_dmat != NULL)
1173 bus_dma_tag_destroy(sc->mfi_sense_dmat);
1175 if (sc->mfi_frames_busaddr != 0)
1176 bus_dmamap_unload(sc->mfi_frames_dmat, sc->mfi_frames_dmamap);
1177 if (sc->mfi_frames != NULL)
1178 bus_dmamem_free(sc->mfi_frames_dmat, sc->mfi_frames,
1179 sc->mfi_frames_dmamap);
1180 if (sc->mfi_frames_dmat != NULL)
1181 bus_dma_tag_destroy(sc->mfi_frames_dmat);
1183 if (sc->mfi_comms_busaddr != 0)
1184 bus_dmamap_unload(sc->mfi_comms_dmat, sc->mfi_comms_dmamap);
1185 if (sc->mfi_comms != NULL)
1186 bus_dmamem_free(sc->mfi_comms_dmat, sc->mfi_comms,
1187 sc->mfi_comms_dmamap);
1188 if (sc->mfi_comms_dmat != NULL)
1189 bus_dma_tag_destroy(sc->mfi_comms_dmat);
1191 /* ThunderBolt contiguous memory free here */
1192 if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
1193 if (sc->mfi_tb_busaddr != 0)
1194 bus_dmamap_unload(sc->mfi_tb_dmat, sc->mfi_tb_dmamap);
1195 if (sc->request_message_pool != NULL)
1196 bus_dmamem_free(sc->mfi_tb_dmat, sc->request_message_pool,
1198 if (sc->mfi_tb_dmat != NULL)
1199 bus_dma_tag_destroy(sc->mfi_tb_dmat);
1201 /* Version buffer memory free */
1202 /* Start LSIP200113393 */
1203 if (sc->verbuf_h_busaddr != 0)
1204 bus_dmamap_unload(sc->verbuf_h_dmat, sc->verbuf_h_dmamap);
1205 if (sc->verbuf != NULL)
1206 bus_dmamem_free(sc->verbuf_h_dmat, sc->verbuf,
1207 sc->verbuf_h_dmamap);
1208 if (sc->verbuf_h_dmat != NULL)
1209 bus_dma_tag_destroy(sc->verbuf_h_dmat);
1211 /* End LSIP200113393 */
1212 /* ThunderBolt INIT packet memory Free */
1213 if (sc->mfi_tb_init_busaddr != 0)
1214 bus_dmamap_unload(sc->mfi_tb_init_dmat,
1215 sc->mfi_tb_init_dmamap);
1216 if (sc->mfi_tb_init != NULL)
1217 bus_dmamem_free(sc->mfi_tb_init_dmat, sc->mfi_tb_init,
1218 sc->mfi_tb_init_dmamap);
1219 if (sc->mfi_tb_init_dmat != NULL)
1220 bus_dma_tag_destroy(sc->mfi_tb_init_dmat);
1222 /* ThunderBolt IOC Init Desc memory free here */
1223 if (sc->mfi_tb_ioc_init_busaddr != 0)
1224 bus_dmamap_unload(sc->mfi_tb_ioc_init_dmat,
1225 sc->mfi_tb_ioc_init_dmamap);
1226 if (sc->mfi_tb_ioc_init_desc != NULL)
1227 bus_dmamem_free(sc->mfi_tb_ioc_init_dmat,
1228 sc->mfi_tb_ioc_init_desc,
1229 sc->mfi_tb_ioc_init_dmamap);
1230 if (sc->mfi_tb_ioc_init_dmat != NULL)
1231 bus_dma_tag_destroy(sc->mfi_tb_ioc_init_dmat);
1232 if (sc->mfi_cmd_pool_tbolt != NULL) {
1233 for (int i = 0; i < sc->mfi_max_fw_cmds; i++) {
1234 if (sc->mfi_cmd_pool_tbolt[i] != NULL) {
1235 free(sc->mfi_cmd_pool_tbolt[i],
1237 sc->mfi_cmd_pool_tbolt[i] = NULL;
1240 free(sc->mfi_cmd_pool_tbolt, M_MFIBUF);
1241 sc->mfi_cmd_pool_tbolt = NULL;
1243 if (sc->request_desc_pool != NULL) {
1244 free(sc->request_desc_pool, M_MFIBUF);
1245 sc->request_desc_pool = NULL;
1248 if (sc->mfi_buffer_dmat != NULL)
1249 bus_dma_tag_destroy(sc->mfi_buffer_dmat);
1250 if (sc->mfi_parent_dmat != NULL)
1251 bus_dma_tag_destroy(sc->mfi_parent_dmat);
1253 if (mtx_initialized(&sc->mfi_io_lock)) {
1254 mtx_destroy(&sc->mfi_io_lock);
1255 sx_destroy(&sc->mfi_config_lock);
1262 mfi_startup(void *arg)
1264 struct mfi_softc *sc;
1266 sc = (struct mfi_softc *)arg;
1268 sc->mfi_enable_intr(sc);
1269 sx_xlock(&sc->mfi_config_lock);
1270 mtx_lock(&sc->mfi_io_lock);
1272 if (sc->mfi_flags & MFI_FLAGS_SKINNY)
1274 mtx_unlock(&sc->mfi_io_lock);
1275 sx_xunlock(&sc->mfi_config_lock);
1277 config_intrhook_disestablish(&sc->mfi_ich);
1283 struct mfi_softc *sc;
1284 struct mfi_command *cm;
1285 uint32_t pi, ci, context;
1287 sc = (struct mfi_softc *)arg;
1289 if (sc->mfi_check_clear_intr(sc))
1293 pi = sc->mfi_comms->hw_pi;
1294 ci = sc->mfi_comms->hw_ci;
1295 mtx_lock(&sc->mfi_io_lock);
1297 context = sc->mfi_comms->hw_reply_q[ci];
1298 if (context < sc->mfi_max_fw_cmds) {
1299 cm = &sc->mfi_commands[context];
1300 mfi_remove_busy(cm);
1302 mfi_complete(sc, cm);
1304 if (++ci == (sc->mfi_max_fw_cmds + 1))
1308 sc->mfi_comms->hw_ci = ci;
1310 /* Give defered I/O a chance to run */
1311 sc->mfi_flags &= ~MFI_FLAGS_QFRZN;
1313 mtx_unlock(&sc->mfi_io_lock);
1316 * Dummy read to flush the bus; this ensures that the indexes are up
1317 * to date. Restart processing if more commands have come it.
1319 (void)sc->mfi_read_fw_status(sc);
1320 if (pi != sc->mfi_comms->hw_pi)
1327 mfi_shutdown(struct mfi_softc *sc)
1329 struct mfi_dcmd_frame *dcmd;
1330 struct mfi_command *cm;
1334 if (sc->mfi_aen_cm != NULL) {
1335 sc->cm_aen_abort = 1;
1336 mfi_abort(sc, &sc->mfi_aen_cm);
1339 if (sc->mfi_map_sync_cm != NULL) {
1340 sc->cm_map_abort = 1;
1341 mfi_abort(sc, &sc->mfi_map_sync_cm);
1344 mtx_lock(&sc->mfi_io_lock);
1345 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_SHUTDOWN, NULL, 0);
1347 mtx_unlock(&sc->mfi_io_lock);
1351 dcmd = &cm->cm_frame->dcmd;
1352 dcmd->header.flags = MFI_FRAME_DIR_NONE;
1353 cm->cm_flags = MFI_CMD_POLLED;
1356 if ((error = mfi_mapcmd(sc, cm)) != 0)
1357 device_printf(sc->mfi_dev, "Failed to shutdown controller\n");
1359 mfi_release_command(cm);
1360 mtx_unlock(&sc->mfi_io_lock);
1365 mfi_syspdprobe(struct mfi_softc *sc)
1367 struct mfi_frame_header *hdr;
1368 struct mfi_command *cm = NULL;
1369 struct mfi_pd_list *pdlist = NULL;
1370 struct mfi_system_pd *syspd, *tmp;
1371 struct mfi_system_pending *syspd_pend;
1372 int error, i, found;
1374 sx_assert(&sc->mfi_config_lock, SA_XLOCKED);
1375 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1376 /* Add SYSTEM PD's */
1377 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_PD_LIST_QUERY,
1378 (void **)&pdlist, sizeof(*pdlist));
1380 device_printf(sc->mfi_dev,
1381 "Error while forming SYSTEM PD list\n");
1385 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1386 cm->cm_frame->dcmd.mbox[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST;
1387 cm->cm_frame->dcmd.mbox[1] = 0;
1388 if (mfi_mapcmd(sc, cm) != 0) {
1389 device_printf(sc->mfi_dev,
1390 "Failed to get syspd device listing\n");
1393 bus_dmamap_sync(sc->mfi_buffer_dmat,cm->cm_dmamap,
1394 BUS_DMASYNC_POSTREAD);
1395 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1396 hdr = &cm->cm_frame->header;
1397 if (hdr->cmd_status != MFI_STAT_OK) {
1398 device_printf(sc->mfi_dev,
1399 "MFI_DCMD_PD_LIST_QUERY failed %x\n", hdr->cmd_status);
1402 /* Get each PD and add it to the system */
1403 for (i = 0; i < pdlist->count; i++) {
1404 if (pdlist->addr[i].device_id ==
1405 pdlist->addr[i].encl_device_id)
1408 TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh, pd_link) {
1409 if (syspd->pd_id == pdlist->addr[i].device_id)
1412 TAILQ_FOREACH(syspd_pend, &sc->mfi_syspd_pend_tqh, pd_link) {
1413 if (syspd_pend->pd_id == pdlist->addr[i].device_id)
1417 mfi_add_sys_pd(sc, pdlist->addr[i].device_id);
1419 /* Delete SYSPD's whose state has been changed */
1420 TAILQ_FOREACH_SAFE(syspd, &sc->mfi_syspd_tqh, pd_link, tmp) {
1422 for (i = 0; i < pdlist->count; i++) {
1423 if (syspd->pd_id == pdlist->addr[i].device_id) {
1430 mtx_unlock(&sc->mfi_io_lock);
1432 device_delete_child(sc->mfi_dev, syspd->pd_dev);
1434 mtx_lock(&sc->mfi_io_lock);
1439 free(pdlist, M_MFIBUF);
1441 mfi_release_command(cm);
1447 mfi_ldprobe(struct mfi_softc *sc)
1449 struct mfi_frame_header *hdr;
1450 struct mfi_command *cm = NULL;
1451 struct mfi_ld_list *list = NULL;
1452 struct mfi_disk *ld;
1453 struct mfi_disk_pending *ld_pend;
1456 sx_assert(&sc->mfi_config_lock, SA_XLOCKED);
1457 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1459 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_LIST,
1460 (void **)&list, sizeof(*list));
1464 cm->cm_flags = MFI_CMD_DATAIN;
1465 if (mfi_wait_command(sc, cm) != 0) {
1466 device_printf(sc->mfi_dev, "Failed to get device listing\n");
1470 hdr = &cm->cm_frame->header;
1471 if (hdr->cmd_status != MFI_STAT_OK) {
1472 device_printf(sc->mfi_dev, "MFI_DCMD_LD_GET_LIST failed %x\n",
1477 for (i = 0; i < list->ld_count; i++) {
1478 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
1479 if (ld->ld_id == list->ld_list[i].ld.v.target_id)
1482 TAILQ_FOREACH(ld_pend, &sc->mfi_ld_pend_tqh, ld_link) {
1483 if (ld_pend->ld_id == list->ld_list[i].ld.v.target_id)
1486 mfi_add_ld(sc, list->ld_list[i].ld.v.target_id);
1491 free(list, M_MFIBUF);
1493 mfi_release_command(cm);
1499 * The timestamp is the number of seconds since 00:00 Jan 1, 2000. If
1500 * the bits in 24-31 are all set, then it is the number of seconds since
1504 format_timestamp(uint32_t timestamp)
1506 static char buffer[32];
1508 if ((timestamp & 0xff000000) == 0xff000000)
1509 snprintf(buffer, sizeof(buffer), "boot + %us", timestamp &
1512 snprintf(buffer, sizeof(buffer), "%us", timestamp);
1517 format_class(int8_t class)
1519 static char buffer[6];
1522 case MFI_EVT_CLASS_DEBUG:
1524 case MFI_EVT_CLASS_PROGRESS:
1525 return ("progress");
1526 case MFI_EVT_CLASS_INFO:
1528 case MFI_EVT_CLASS_WARNING:
1530 case MFI_EVT_CLASS_CRITICAL:
1532 case MFI_EVT_CLASS_FATAL:
1534 case MFI_EVT_CLASS_DEAD:
1537 snprintf(buffer, sizeof(buffer), "%d", class);
1543 mfi_decode_evt(struct mfi_softc *sc, struct mfi_evt_detail *detail)
1545 struct mfi_system_pd *syspd = NULL;
1547 device_printf(sc->mfi_dev, "%d (%s/0x%04x/%s) - %s\n", detail->seq,
1548 format_timestamp(detail->time), detail->evt_class.members.locale,
1549 format_class(detail->evt_class.members.evt_class),
1550 detail->description);
1552 /* Don't act on old AEN's or while shutting down */
1553 if (detail->seq < sc->mfi_boot_seq_num || sc->mfi_detaching)
1556 switch (detail->arg_type) {
1557 case MR_EVT_ARGS_NONE:
1558 if (detail->code == MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED) {
1559 device_printf(sc->mfi_dev, "HostBus scan raised\n");
1560 if (mfi_detect_jbod_change) {
1562 * Probe for new SYSPD's and Delete
1565 sx_xlock(&sc->mfi_config_lock);
1566 mtx_lock(&sc->mfi_io_lock);
1568 mtx_unlock(&sc->mfi_io_lock);
1569 sx_xunlock(&sc->mfi_config_lock);
1573 case MR_EVT_ARGS_LD_STATE:
1574 /* During load time driver reads all the events starting
1575 * from the one that has been logged after shutdown. Avoid
1578 if (detail->args.ld_state.new_state == MFI_LD_STATE_OFFLINE ) {
1580 struct mfi_disk *ld;
1581 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
1583 detail->args.ld_state.ld.target_id)
1587 Fix: for kernel panics when SSCD is removed
1588 KASSERT(ld != NULL, ("volume dissappeared"));
1592 device_delete_child(sc->mfi_dev, ld->ld_dev);
1597 case MR_EVT_ARGS_PD:
1598 if (detail->code == MR_EVT_PD_REMOVED) {
1599 if (mfi_detect_jbod_change) {
1601 * If the removed device is a SYSPD then
1604 TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh,
1607 detail->args.pd.device_id) {
1609 device_delete_child(
1618 if (detail->code == MR_EVT_PD_INSERTED) {
1619 if (mfi_detect_jbod_change) {
1620 /* Probe for new SYSPD's */
1621 sx_xlock(&sc->mfi_config_lock);
1622 mtx_lock(&sc->mfi_io_lock);
1624 mtx_unlock(&sc->mfi_io_lock);
1625 sx_xunlock(&sc->mfi_config_lock);
1628 if (sc->mfi_cam_rescan_cb != NULL &&
1629 (detail->code == MR_EVT_PD_INSERTED ||
1630 detail->code == MR_EVT_PD_REMOVED)) {
1631 sc->mfi_cam_rescan_cb(sc, detail->args.pd.device_id);
1638 mfi_queue_evt(struct mfi_softc *sc, struct mfi_evt_detail *detail)
1640 struct mfi_evt_queue_elm *elm;
1642 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1643 elm = malloc(sizeof(*elm), M_MFIBUF, M_NOWAIT|M_ZERO);
1646 memcpy(&elm->detail, detail, sizeof(*detail));
1647 TAILQ_INSERT_TAIL(&sc->mfi_evt_queue, elm, link);
1648 taskqueue_enqueue(taskqueue_swi, &sc->mfi_evt_task);
1652 mfi_handle_evt(void *context, int pending)
1654 TAILQ_HEAD(,mfi_evt_queue_elm) queue;
1655 struct mfi_softc *sc;
1656 struct mfi_evt_queue_elm *elm;
1660 mtx_lock(&sc->mfi_io_lock);
1661 TAILQ_CONCAT(&queue, &sc->mfi_evt_queue, link);
1662 mtx_unlock(&sc->mfi_io_lock);
1663 while ((elm = TAILQ_FIRST(&queue)) != NULL) {
1664 TAILQ_REMOVE(&queue, elm, link);
1665 mfi_decode_evt(sc, &elm->detail);
1666 free(elm, M_MFIBUF);
1671 mfi_aen_register(struct mfi_softc *sc, int seq, int locale)
1673 struct mfi_command *cm;
1674 struct mfi_dcmd_frame *dcmd;
1675 union mfi_evt current_aen, prior_aen;
1676 struct mfi_evt_detail *ed = NULL;
1679 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1681 current_aen.word = locale;
1682 if (sc->mfi_aen_cm != NULL) {
1684 ((uint32_t *)&sc->mfi_aen_cm->cm_frame->dcmd.mbox)[1];
1685 if (prior_aen.members.evt_class <= current_aen.members.evt_class &&
1686 !((prior_aen.members.locale & current_aen.members.locale)
1687 ^current_aen.members.locale)) {
1690 prior_aen.members.locale |= current_aen.members.locale;
1691 if (prior_aen.members.evt_class
1692 < current_aen.members.evt_class)
1693 current_aen.members.evt_class =
1694 prior_aen.members.evt_class;
1695 mfi_abort(sc, &sc->mfi_aen_cm);
1699 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_EVENT_WAIT,
1700 (void **)&ed, sizeof(*ed));
1704 dcmd = &cm->cm_frame->dcmd;
1705 ((uint32_t *)&dcmd->mbox)[0] = seq;
1706 ((uint32_t *)&dcmd->mbox)[1] = locale;
1707 cm->cm_flags = MFI_CMD_DATAIN;
1708 cm->cm_complete = mfi_aen_complete;
1710 sc->last_seq_num = seq;
1711 sc->mfi_aen_cm = cm;
1713 mfi_enqueue_ready(cm);
1721 mfi_aen_complete(struct mfi_command *cm)
1723 struct mfi_frame_header *hdr;
1724 struct mfi_softc *sc;
1725 struct mfi_evt_detail *detail;
1726 struct mfi_aen *mfi_aen_entry, *tmp;
1727 int seq = 0, aborted = 0;
1730 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1732 if (sc->mfi_aen_cm == NULL)
1735 hdr = &cm->cm_frame->header;
1737 if (sc->cm_aen_abort ||
1738 hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
1739 sc->cm_aen_abort = 0;
1742 sc->mfi_aen_triggered = 1;
1743 if (sc->mfi_poll_waiting) {
1744 sc->mfi_poll_waiting = 0;
1745 selwakeup(&sc->mfi_select);
1747 detail = cm->cm_data;
1748 mfi_queue_evt(sc, detail);
1749 seq = detail->seq + 1;
1750 TAILQ_FOREACH_SAFE(mfi_aen_entry, &sc->mfi_aen_pids, aen_link,
1752 TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
1754 PROC_LOCK(mfi_aen_entry->p);
1755 kern_psignal(mfi_aen_entry->p, SIGIO);
1756 PROC_UNLOCK(mfi_aen_entry->p);
1757 free(mfi_aen_entry, M_MFIBUF);
1761 free(cm->cm_data, M_MFIBUF);
1762 wakeup(&sc->mfi_aen_cm);
1763 sc->mfi_aen_cm = NULL;
1764 mfi_release_command(cm);
1766 /* set it up again so the driver can catch more events */
1768 mfi_aen_setup(sc, seq);
1771 #define MAX_EVENTS 15
1774 mfi_parse_entries(struct mfi_softc *sc, int start_seq, int stop_seq)
1776 struct mfi_command *cm;
1777 struct mfi_dcmd_frame *dcmd;
1778 struct mfi_evt_list *el;
1779 union mfi_evt class_locale;
1780 int error, i, seq, size;
1782 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1784 class_locale.members.reserved = 0;
1785 class_locale.members.locale = mfi_event_locale;
1786 class_locale.members.evt_class = mfi_event_class;
1788 size = sizeof(struct mfi_evt_list) + sizeof(struct mfi_evt_detail)
1790 el = malloc(size, M_MFIBUF, M_NOWAIT | M_ZERO);
1794 for (seq = start_seq;;) {
1795 if ((cm = mfi_dequeue_free(sc)) == NULL) {
1800 dcmd = &cm->cm_frame->dcmd;
1801 bzero(dcmd->mbox, MFI_MBOX_SIZE);
1802 dcmd->header.cmd = MFI_CMD_DCMD;
1803 dcmd->header.timeout = 0;
1804 dcmd->header.data_len = size;
1805 dcmd->opcode = MFI_DCMD_CTRL_EVENT_GET;
1806 ((uint32_t *)&dcmd->mbox)[0] = seq;
1807 ((uint32_t *)&dcmd->mbox)[1] = class_locale.word;
1808 cm->cm_sg = &dcmd->sgl;
1809 cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
1810 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1814 if ((error = mfi_mapcmd(sc, cm)) != 0) {
1815 device_printf(sc->mfi_dev,
1816 "Failed to get controller entries\n");
1817 mfi_release_command(cm);
1821 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1822 BUS_DMASYNC_POSTREAD);
1823 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1825 if (dcmd->header.cmd_status == MFI_STAT_NOT_FOUND) {
1826 mfi_release_command(cm);
1829 if (dcmd->header.cmd_status != MFI_STAT_OK) {
1830 device_printf(sc->mfi_dev,
1831 "Error %d fetching controller entries\n",
1832 dcmd->header.cmd_status);
1833 mfi_release_command(cm);
1837 mfi_release_command(cm);
1839 for (i = 0; i < el->count; i++) {
1841 * If this event is newer than 'stop_seq' then
1842 * break out of the loop. Note that the log
1843 * is a circular buffer so we have to handle
1844 * the case that our stop point is earlier in
1845 * the buffer than our start point.
1847 if (el->event[i].seq >= stop_seq) {
1848 if (start_seq <= stop_seq)
1850 else if (el->event[i].seq < start_seq)
1853 mfi_queue_evt(sc, &el->event[i]);
1855 seq = el->event[el->count - 1].seq + 1;
1863 mfi_add_ld(struct mfi_softc *sc, int id)
1865 struct mfi_command *cm;
1866 struct mfi_dcmd_frame *dcmd = NULL;
1867 struct mfi_ld_info *ld_info = NULL;
1868 struct mfi_disk_pending *ld_pend;
1871 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1873 ld_pend = malloc(sizeof(*ld_pend), M_MFIBUF, M_NOWAIT | M_ZERO);
1874 if (ld_pend != NULL) {
1875 ld_pend->ld_id = id;
1876 TAILQ_INSERT_TAIL(&sc->mfi_ld_pend_tqh, ld_pend, ld_link);
1879 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_INFO,
1880 (void **)&ld_info, sizeof(*ld_info));
1882 device_printf(sc->mfi_dev,
1883 "Failed to allocate for MFI_DCMD_LD_GET_INFO %d\n", error);
1885 free(ld_info, M_MFIBUF);
1888 cm->cm_flags = MFI_CMD_DATAIN;
1889 dcmd = &cm->cm_frame->dcmd;
1891 if (mfi_wait_command(sc, cm) != 0) {
1892 device_printf(sc->mfi_dev,
1893 "Failed to get logical drive: %d\n", id);
1894 free(ld_info, M_MFIBUF);
1897 if (ld_info->ld_config.params.isSSCD != 1)
1898 mfi_add_ld_complete(cm);
1900 mfi_release_command(cm);
1901 if (ld_info) /* SSCD drives ld_info free here */
1902 free(ld_info, M_MFIBUF);
1908 mfi_add_ld_complete(struct mfi_command *cm)
1910 struct mfi_frame_header *hdr;
1911 struct mfi_ld_info *ld_info;
1912 struct mfi_softc *sc;
1916 hdr = &cm->cm_frame->header;
1917 ld_info = cm->cm_private;
1919 if (sc->cm_map_abort || hdr->cmd_status != MFI_STAT_OK) {
1920 free(ld_info, M_MFIBUF);
1921 wakeup(&sc->mfi_map_sync_cm);
1922 mfi_release_command(cm);
1925 wakeup(&sc->mfi_map_sync_cm);
1926 mfi_release_command(cm);
1928 mtx_unlock(&sc->mfi_io_lock);
1930 if ((child = device_add_child(sc->mfi_dev, "mfid", -1)) == NULL) {
1931 device_printf(sc->mfi_dev, "Failed to add logical disk\n");
1932 free(ld_info, M_MFIBUF);
1934 mtx_lock(&sc->mfi_io_lock);
1938 device_set_ivars(child, ld_info);
1939 device_set_desc(child, "MFI Logical Disk");
1940 bus_generic_attach(sc->mfi_dev);
1942 mtx_lock(&sc->mfi_io_lock);
1945 static int mfi_add_sys_pd(struct mfi_softc *sc, int id)
1947 struct mfi_command *cm;
1948 struct mfi_dcmd_frame *dcmd = NULL;
1949 struct mfi_pd_info *pd_info = NULL;
1950 struct mfi_system_pending *syspd_pend;
1953 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1955 syspd_pend = malloc(sizeof(*syspd_pend), M_MFIBUF, M_NOWAIT | M_ZERO);
1956 if (syspd_pend != NULL) {
1957 syspd_pend->pd_id = id;
1958 TAILQ_INSERT_TAIL(&sc->mfi_syspd_pend_tqh, syspd_pend, pd_link);
1961 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_PD_GET_INFO,
1962 (void **)&pd_info, sizeof(*pd_info));
1964 device_printf(sc->mfi_dev,
1965 "Failed to allocated for MFI_DCMD_PD_GET_INFO %d\n",
1968 free(pd_info, M_MFIBUF);
1971 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1972 dcmd = &cm->cm_frame->dcmd;
1974 dcmd->header.scsi_status = 0;
1975 dcmd->header.pad0 = 0;
1976 if ((error = mfi_mapcmd(sc, cm)) != 0) {
1977 device_printf(sc->mfi_dev,
1978 "Failed to get physical drive info %d\n", id);
1979 free(pd_info, M_MFIBUF);
1980 mfi_release_command(cm);
1983 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1984 BUS_DMASYNC_POSTREAD);
1985 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1986 mfi_add_sys_pd_complete(cm);
1991 mfi_add_sys_pd_complete(struct mfi_command *cm)
1993 struct mfi_frame_header *hdr;
1994 struct mfi_pd_info *pd_info;
1995 struct mfi_softc *sc;
1999 hdr = &cm->cm_frame->header;
2000 pd_info = cm->cm_private;
2002 if (hdr->cmd_status != MFI_STAT_OK) {
2003 free(pd_info, M_MFIBUF);
2004 mfi_release_command(cm);
2007 if (pd_info->fw_state != MFI_PD_STATE_SYSTEM) {
2008 device_printf(sc->mfi_dev, "PD=%x is not SYSTEM PD\n",
2009 pd_info->ref.v.device_id);
2010 free(pd_info, M_MFIBUF);
2011 mfi_release_command(cm);
2014 mfi_release_command(cm);
2016 mtx_unlock(&sc->mfi_io_lock);
2018 if ((child = device_add_child(sc->mfi_dev, "mfisyspd", -1)) == NULL) {
2019 device_printf(sc->mfi_dev, "Failed to add system pd\n");
2020 free(pd_info, M_MFIBUF);
2022 mtx_lock(&sc->mfi_io_lock);
2026 device_set_ivars(child, pd_info);
2027 device_set_desc(child, "MFI System PD");
2028 bus_generic_attach(sc->mfi_dev);
2030 mtx_lock(&sc->mfi_io_lock);
2033 static struct mfi_command *
2034 mfi_bio_command(struct mfi_softc *sc)
2037 struct mfi_command *cm = NULL;
2039 /*reserving two commands to avoid starvation for IOCTL*/
2040 if (sc->mfi_qstat[MFIQ_FREE].q_length < 2) {
2043 if ((bio = mfi_dequeue_bio(sc)) == NULL) {
2046 if ((uintptr_t)bio->bio_driver2 == MFI_LD_IO) {
2047 cm = mfi_build_ldio(sc, bio);
2048 } else if ((uintptr_t) bio->bio_driver2 == MFI_SYS_PD_IO) {
2049 cm = mfi_build_syspdio(sc, bio);
2052 mfi_enqueue_bio(sc, bio);
2057 * mostly copied from cam/scsi/scsi_all.c:scsi_read_write
2061 mfi_build_cdb(int readop, uint8_t byte2, u_int64_t lba, u_int32_t block_count, uint8_t *cdb)
2065 if (((lba & 0x1fffff) == lba)
2066 && ((block_count & 0xff) == block_count)
2068 /* We can fit in a 6 byte cdb */
2069 struct scsi_rw_6 *scsi_cmd;
2071 scsi_cmd = (struct scsi_rw_6 *)cdb;
2072 scsi_cmd->opcode = readop ? READ_6 : WRITE_6;
2073 scsi_ulto3b(lba, scsi_cmd->addr);
2074 scsi_cmd->length = block_count & 0xff;
2075 scsi_cmd->control = 0;
2076 cdb_len = sizeof(*scsi_cmd);
2077 } else if (((block_count & 0xffff) == block_count) && ((lba & 0xffffffff) == lba)) {
2078 /* Need a 10 byte CDB */
2079 struct scsi_rw_10 *scsi_cmd;
2081 scsi_cmd = (struct scsi_rw_10 *)cdb;
2082 scsi_cmd->opcode = readop ? READ_10 : WRITE_10;
2083 scsi_cmd->byte2 = byte2;
2084 scsi_ulto4b(lba, scsi_cmd->addr);
2085 scsi_cmd->reserved = 0;
2086 scsi_ulto2b(block_count, scsi_cmd->length);
2087 scsi_cmd->control = 0;
2088 cdb_len = sizeof(*scsi_cmd);
2089 } else if (((block_count & 0xffffffff) == block_count) &&
2090 ((lba & 0xffffffff) == lba)) {
2091 /* Block count is too big for 10 byte CDB use a 12 byte CDB */
2092 struct scsi_rw_12 *scsi_cmd;
2094 scsi_cmd = (struct scsi_rw_12 *)cdb;
2095 scsi_cmd->opcode = readop ? READ_12 : WRITE_12;
2096 scsi_cmd->byte2 = byte2;
2097 scsi_ulto4b(lba, scsi_cmd->addr);
2098 scsi_cmd->reserved = 0;
2099 scsi_ulto4b(block_count, scsi_cmd->length);
2100 scsi_cmd->control = 0;
2101 cdb_len = sizeof(*scsi_cmd);
2104 * 16 byte CDB. We'll only get here if the LBA is larger
2107 struct scsi_rw_16 *scsi_cmd;
2109 scsi_cmd = (struct scsi_rw_16 *)cdb;
2110 scsi_cmd->opcode = readop ? READ_16 : WRITE_16;
2111 scsi_cmd->byte2 = byte2;
2112 scsi_u64to8b(lba, scsi_cmd->addr);
2113 scsi_cmd->reserved = 0;
2114 scsi_ulto4b(block_count, scsi_cmd->length);
2115 scsi_cmd->control = 0;
2116 cdb_len = sizeof(*scsi_cmd);
2122 extern char *unmapped_buf;
2124 static struct mfi_command *
2125 mfi_build_syspdio(struct mfi_softc *sc, struct bio *bio)
2127 struct mfi_command *cm;
2128 struct mfi_pass_frame *pass;
2129 uint32_t context = 0;
2130 int flags = 0, blkcount = 0, readop;
2133 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2135 if ((cm = mfi_dequeue_free(sc)) == NULL)
2138 /* Zero out the MFI frame */
2139 context = cm->cm_frame->header.context;
2140 bzero(cm->cm_frame, sizeof(union mfi_frame));
2141 cm->cm_frame->header.context = context;
2142 pass = &cm->cm_frame->pass;
2143 bzero(pass->cdb, 16);
2144 pass->header.cmd = MFI_CMD_PD_SCSI_IO;
2145 switch (bio->bio_cmd) {
2147 flags = MFI_CMD_DATAIN | MFI_CMD_BIO;
2151 flags = MFI_CMD_DATAOUT | MFI_CMD_BIO;
2155 /* TODO: what about BIO_DELETE??? */
2156 biofinish(bio, NULL, EOPNOTSUPP);
2157 mfi_enqueue_free(cm);
2161 /* Cheat with the sector length to avoid a non-constant division */
2162 blkcount = howmany(bio->bio_bcount, MFI_SECTOR_LEN);
2163 /* Fill the LBA and Transfer length in CDB */
2164 cdb_len = mfi_build_cdb(readop, 0, bio->bio_pblkno, blkcount,
2166 pass->header.target_id = (uintptr_t)bio->bio_driver1;
2167 pass->header.lun_id = 0;
2168 pass->header.timeout = 0;
2169 pass->header.flags = 0;
2170 pass->header.scsi_status = 0;
2171 pass->header.sense_len = MFI_SENSE_LEN;
2172 pass->header.data_len = bio->bio_bcount;
2173 pass->header.cdb_len = cdb_len;
2174 pass->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2175 pass->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2176 cm->cm_complete = mfi_bio_complete;
2177 cm->cm_private = bio;
2178 cm->cm_data = unmapped_buf;
2179 cm->cm_len = bio->bio_bcount;
2180 cm->cm_sg = &pass->sgl;
2181 cm->cm_total_frame_size = MFI_PASS_FRAME_SIZE;
2182 cm->cm_flags = flags;
2187 static struct mfi_command *
2188 mfi_build_ldio(struct mfi_softc *sc, struct bio *bio)
2190 struct mfi_io_frame *io;
2191 struct mfi_command *cm;
2194 uint32_t context = 0;
2196 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2198 if ((cm = mfi_dequeue_free(sc)) == NULL)
2201 /* Zero out the MFI frame */
2202 context = cm->cm_frame->header.context;
2203 bzero(cm->cm_frame, sizeof(union mfi_frame));
2204 cm->cm_frame->header.context = context;
2205 io = &cm->cm_frame->io;
2206 switch (bio->bio_cmd) {
2208 io->header.cmd = MFI_CMD_LD_READ;
2209 flags = MFI_CMD_DATAIN | MFI_CMD_BIO;
2212 io->header.cmd = MFI_CMD_LD_WRITE;
2213 flags = MFI_CMD_DATAOUT | MFI_CMD_BIO;
2216 /* TODO: what about BIO_DELETE??? */
2217 biofinish(bio, NULL, EOPNOTSUPP);
2218 mfi_enqueue_free(cm);
2222 /* Cheat with the sector length to avoid a non-constant division */
2223 blkcount = howmany(bio->bio_bcount, MFI_SECTOR_LEN);
2224 io->header.target_id = (uintptr_t)bio->bio_driver1;
2225 io->header.timeout = 0;
2226 io->header.flags = 0;
2227 io->header.scsi_status = 0;
2228 io->header.sense_len = MFI_SENSE_LEN;
2229 io->header.data_len = blkcount;
2230 io->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2231 io->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2232 io->lba_hi = (bio->bio_pblkno & 0xffffffff00000000) >> 32;
2233 io->lba_lo = bio->bio_pblkno & 0xffffffff;
2234 cm->cm_complete = mfi_bio_complete;
2235 cm->cm_private = bio;
2236 cm->cm_data = unmapped_buf;
2237 cm->cm_len = bio->bio_bcount;
2238 cm->cm_sg = &io->sgl;
2239 cm->cm_total_frame_size = MFI_IO_FRAME_SIZE;
2240 cm->cm_flags = flags;
2246 mfi_bio_complete(struct mfi_command *cm)
2249 struct mfi_frame_header *hdr;
2250 struct mfi_softc *sc;
2252 bio = cm->cm_private;
2253 hdr = &cm->cm_frame->header;
2256 if ((hdr->cmd_status != MFI_STAT_OK) || (hdr->scsi_status != 0)) {
2257 bio->bio_flags |= BIO_ERROR;
2258 bio->bio_error = EIO;
2259 device_printf(sc->mfi_dev, "I/O error, cmd=%p, status=%#x, "
2260 "scsi_status=%#x\n", cm, hdr->cmd_status, hdr->scsi_status);
2261 mfi_print_sense(cm->cm_sc, cm->cm_sense);
2262 } else if (cm->cm_error != 0) {
2263 bio->bio_flags |= BIO_ERROR;
2264 bio->bio_error = cm->cm_error;
2265 device_printf(sc->mfi_dev, "I/O error, cmd=%p, error=%#x\n",
2269 mfi_release_command(cm);
2270 mfi_disk_complete(bio);
2274 mfi_startio(struct mfi_softc *sc)
2276 struct mfi_command *cm;
2277 struct ccb_hdr *ccbh;
2280 /* Don't bother if we're short on resources */
2281 if (sc->mfi_flags & MFI_FLAGS_QFRZN)
2284 /* Try a command that has already been prepared */
2285 cm = mfi_dequeue_ready(sc);
2288 if ((ccbh = TAILQ_FIRST(&sc->mfi_cam_ccbq)) != NULL)
2289 cm = sc->mfi_cam_start(ccbh);
2292 /* Nope, so look for work on the bioq */
2294 cm = mfi_bio_command(sc);
2296 /* No work available, so exit */
2300 /* Send the command to the controller */
2301 if (mfi_mapcmd(sc, cm) != 0) {
2302 device_printf(sc->mfi_dev, "Failed to startio\n");
2303 mfi_requeue_ready(cm);
2310 mfi_mapcmd(struct mfi_softc *sc, struct mfi_command *cm)
2314 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2316 if ((cm->cm_data != NULL) && (cm->cm_frame->header.cmd != MFI_CMD_STP )) {
2317 polled = (cm->cm_flags & MFI_CMD_POLLED) ? BUS_DMA_NOWAIT : 0;
2318 if (cm->cm_flags & MFI_CMD_CCB)
2319 error = bus_dmamap_load_ccb(sc->mfi_buffer_dmat,
2320 cm->cm_dmamap, cm->cm_data, mfi_data_cb, cm,
2322 else if (cm->cm_flags & MFI_CMD_BIO)
2323 error = bus_dmamap_load_bio(sc->mfi_buffer_dmat,
2324 cm->cm_dmamap, cm->cm_private, mfi_data_cb, cm,
2327 error = bus_dmamap_load(sc->mfi_buffer_dmat,
2328 cm->cm_dmamap, cm->cm_data, cm->cm_len,
2329 mfi_data_cb, cm, polled);
2330 if (error == EINPROGRESS) {
2331 sc->mfi_flags |= MFI_FLAGS_QFRZN;
2335 error = mfi_send_frame(sc, cm);
2342 mfi_data_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
2344 struct mfi_frame_header *hdr;
2345 struct mfi_command *cm;
2347 struct mfi_softc *sc;
2348 int i, j, first, dir;
2349 int sge_size, locked;
2351 cm = (struct mfi_command *)arg;
2353 hdr = &cm->cm_frame->header;
2357 * We need to check if we have the lock as this is async
2358 * callback so even though our caller mfi_mapcmd asserts
2359 * it has the lock, there is no guarantee that hasn't been
2360 * dropped if bus_dmamap_load returned prior to our
2363 if ((locked = mtx_owned(&sc->mfi_io_lock)) == 0)
2364 mtx_lock(&sc->mfi_io_lock);
2367 printf("error %d in callback\n", error);
2368 cm->cm_error = error;
2369 mfi_complete(sc, cm);
2372 /* Use IEEE sgl only for IO's on a SKINNY controller
2373 * For other commands on a SKINNY controller use either
2374 * sg32 or sg64 based on the sizeof(bus_addr_t).
2375 * Also calculate the total frame size based on the type
2378 if (((cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) ||
2379 (cm->cm_frame->header.cmd == MFI_CMD_LD_READ) ||
2380 (cm->cm_frame->header.cmd == MFI_CMD_LD_WRITE)) &&
2381 (sc->mfi_flags & MFI_FLAGS_SKINNY)) {
2382 for (i = 0; i < nsegs; i++) {
2383 sgl->sg_skinny[i].addr = segs[i].ds_addr;
2384 sgl->sg_skinny[i].len = segs[i].ds_len;
2385 sgl->sg_skinny[i].flag = 0;
2387 hdr->flags |= MFI_FRAME_IEEE_SGL | MFI_FRAME_SGL64;
2388 sge_size = sizeof(struct mfi_sg_skinny);
2389 hdr->sg_count = nsegs;
2392 if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
2393 first = cm->cm_stp_len;
2394 if ((sc->mfi_flags & MFI_FLAGS_SG64) == 0) {
2395 sgl->sg32[j].addr = segs[0].ds_addr;
2396 sgl->sg32[j++].len = first;
2398 sgl->sg64[j].addr = segs[0].ds_addr;
2399 sgl->sg64[j++].len = first;
2403 if ((sc->mfi_flags & MFI_FLAGS_SG64) == 0) {
2404 for (i = 0; i < nsegs; i++) {
2405 sgl->sg32[j].addr = segs[i].ds_addr + first;
2406 sgl->sg32[j++].len = segs[i].ds_len - first;
2410 for (i = 0; i < nsegs; i++) {
2411 sgl->sg64[j].addr = segs[i].ds_addr + first;
2412 sgl->sg64[j++].len = segs[i].ds_len - first;
2415 hdr->flags |= MFI_FRAME_SGL64;
2418 sge_size = sc->mfi_sge_size;
2422 if (cm->cm_flags & MFI_CMD_DATAIN) {
2423 dir |= BUS_DMASYNC_PREREAD;
2424 hdr->flags |= MFI_FRAME_DIR_READ;
2426 if (cm->cm_flags & MFI_CMD_DATAOUT) {
2427 dir |= BUS_DMASYNC_PREWRITE;
2428 hdr->flags |= MFI_FRAME_DIR_WRITE;
2430 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir);
2431 cm->cm_flags |= MFI_CMD_MAPPED;
2434 * Instead of calculating the total number of frames in the
2435 * compound frame, it's already assumed that there will be at
2436 * least 1 frame, so don't compensate for the modulo of the
2437 * following division.
2439 cm->cm_total_frame_size += (sc->mfi_sge_size * nsegs);
2440 cm->cm_extra_frames = (cm->cm_total_frame_size - 1) / MFI_FRAME_SIZE;
2442 if ((error = mfi_send_frame(sc, cm)) != 0) {
2443 printf("error %d in callback from mfi_send_frame\n", error);
2444 cm->cm_error = error;
2445 mfi_complete(sc, cm);
2450 /* leave the lock in the state we found it */
2452 mtx_unlock(&sc->mfi_io_lock);
2458 mfi_send_frame(struct mfi_softc *sc, struct mfi_command *cm)
2462 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2464 if (sc->MFA_enabled)
2465 error = mfi_tbolt_send_frame(sc, cm);
2467 error = mfi_std_send_frame(sc, cm);
2469 if (error != 0 && (cm->cm_flags & MFI_ON_MFIQ_BUSY) != 0)
2470 mfi_remove_busy(cm);
2476 mfi_std_send_frame(struct mfi_softc *sc, struct mfi_command *cm)
2478 struct mfi_frame_header *hdr;
2479 int tm = mfi_polled_cmd_timeout * 1000;
2481 hdr = &cm->cm_frame->header;
2483 if ((cm->cm_flags & MFI_CMD_POLLED) == 0) {
2484 cm->cm_timestamp = time_uptime;
2485 mfi_enqueue_busy(cm);
2487 hdr->cmd_status = MFI_STAT_INVALID_STATUS;
2488 hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
2492 * The bus address of the command is aligned on a 64 byte boundary,
2493 * leaving the least 6 bits as zero. For whatever reason, the
2494 * hardware wants the address shifted right by three, leaving just
2495 * 3 zero bits. These three bits are then used as a prefetching
2496 * hint for the hardware to predict how many frames need to be
2497 * fetched across the bus. If a command has more than 8 frames
2498 * then the 3 bits are set to 0x7 and the firmware uses other
2499 * information in the command to determine the total amount to fetch.
2500 * However, FreeBSD doesn't support I/O larger than 128K, so 8 frames
2501 * is enough for both 32bit and 64bit systems.
2503 if (cm->cm_extra_frames > 7)
2504 cm->cm_extra_frames = 7;
2506 sc->mfi_issue_cmd(sc, cm->cm_frame_busaddr, cm->cm_extra_frames);
2508 if ((cm->cm_flags & MFI_CMD_POLLED) == 0)
2511 /* This is a polled command, so busy-wait for it to complete. */
2512 while (hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
2519 if (hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
2520 device_printf(sc->mfi_dev, "Frame %p timed out "
2521 "command 0x%X\n", hdr, cm->cm_frame->dcmd.opcode);
2530 mfi_complete(struct mfi_softc *sc, struct mfi_command *cm)
2533 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2535 if ((cm->cm_flags & MFI_CMD_MAPPED) != 0) {
2537 if ((cm->cm_flags & MFI_CMD_DATAIN) ||
2538 (cm->cm_frame->header.cmd == MFI_CMD_STP))
2539 dir |= BUS_DMASYNC_POSTREAD;
2540 if (cm->cm_flags & MFI_CMD_DATAOUT)
2541 dir |= BUS_DMASYNC_POSTWRITE;
2543 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir);
2544 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2545 cm->cm_flags &= ~MFI_CMD_MAPPED;
2548 cm->cm_flags |= MFI_CMD_COMPLETED;
2550 if (cm->cm_complete != NULL)
2551 cm->cm_complete(cm);
2557 mfi_abort(struct mfi_softc *sc, struct mfi_command **cm_abort)
2559 struct mfi_command *cm;
2560 struct mfi_abort_frame *abort;
2562 uint32_t context = 0;
2564 mtx_lock(&sc->mfi_io_lock);
2565 if ((cm = mfi_dequeue_free(sc)) == NULL) {
2566 mtx_unlock(&sc->mfi_io_lock);
2570 /* Zero out the MFI frame */
2571 context = cm->cm_frame->header.context;
2572 bzero(cm->cm_frame, sizeof(union mfi_frame));
2573 cm->cm_frame->header.context = context;
2575 abort = &cm->cm_frame->abort;
2576 abort->header.cmd = MFI_CMD_ABORT;
2577 abort->header.flags = 0;
2578 abort->header.scsi_status = 0;
2579 abort->abort_context = (*cm_abort)->cm_frame->header.context;
2580 abort->abort_mfi_addr_lo = (uint32_t)(*cm_abort)->cm_frame_busaddr;
2581 abort->abort_mfi_addr_hi =
2582 (uint32_t)((uint64_t)(*cm_abort)->cm_frame_busaddr >> 32);
2584 cm->cm_flags = MFI_CMD_POLLED;
2586 if ((error = mfi_mapcmd(sc, cm)) != 0)
2587 device_printf(sc->mfi_dev, "failed to abort command\n");
2588 mfi_release_command(cm);
2590 mtx_unlock(&sc->mfi_io_lock);
2591 while (i < 5 && *cm_abort != NULL) {
2592 tsleep(cm_abort, 0, "mfiabort",
2596 if (*cm_abort != NULL) {
2597 /* Force a complete if command didn't abort */
2598 mtx_lock(&sc->mfi_io_lock);
2599 (*cm_abort)->cm_complete(*cm_abort);
2600 mtx_unlock(&sc->mfi_io_lock);
2607 mfi_dump_blocks(struct mfi_softc *sc, int id, uint64_t lba, void *virt,
2610 struct mfi_command *cm;
2611 struct mfi_io_frame *io;
2613 uint32_t context = 0;
2615 if ((cm = mfi_dequeue_free(sc)) == NULL)
2618 /* Zero out the MFI frame */
2619 context = cm->cm_frame->header.context;
2620 bzero(cm->cm_frame, sizeof(union mfi_frame));
2621 cm->cm_frame->header.context = context;
2623 io = &cm->cm_frame->io;
2624 io->header.cmd = MFI_CMD_LD_WRITE;
2625 io->header.target_id = id;
2626 io->header.timeout = 0;
2627 io->header.flags = 0;
2628 io->header.scsi_status = 0;
2629 io->header.sense_len = MFI_SENSE_LEN;
2630 io->header.data_len = howmany(len, MFI_SECTOR_LEN);
2631 io->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2632 io->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2633 io->lba_hi = (lba & 0xffffffff00000000) >> 32;
2634 io->lba_lo = lba & 0xffffffff;
2637 cm->cm_sg = &io->sgl;
2638 cm->cm_total_frame_size = MFI_IO_FRAME_SIZE;
2639 cm->cm_flags = MFI_CMD_POLLED | MFI_CMD_DATAOUT;
2641 if ((error = mfi_mapcmd(sc, cm)) != 0)
2642 device_printf(sc->mfi_dev, "failed dump blocks\n");
2643 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
2644 BUS_DMASYNC_POSTWRITE);
2645 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2646 mfi_release_command(cm);
2652 mfi_dump_syspd_blocks(struct mfi_softc *sc, int id, uint64_t lba, void *virt,
2655 struct mfi_command *cm;
2656 struct mfi_pass_frame *pass;
2657 int error, readop, cdb_len;
2660 if ((cm = mfi_dequeue_free(sc)) == NULL)
2663 pass = &cm->cm_frame->pass;
2664 bzero(pass->cdb, 16);
2665 pass->header.cmd = MFI_CMD_PD_SCSI_IO;
2668 blkcount = howmany(len, MFI_SECTOR_LEN);
2669 cdb_len = mfi_build_cdb(readop, 0, lba, blkcount, pass->cdb);
2670 pass->header.target_id = id;
2671 pass->header.timeout = 0;
2672 pass->header.flags = 0;
2673 pass->header.scsi_status = 0;
2674 pass->header.sense_len = MFI_SENSE_LEN;
2675 pass->header.data_len = len;
2676 pass->header.cdb_len = cdb_len;
2677 pass->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2678 pass->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2681 cm->cm_sg = &pass->sgl;
2682 cm->cm_total_frame_size = MFI_PASS_FRAME_SIZE;
2683 cm->cm_flags = MFI_CMD_POLLED | MFI_CMD_DATAOUT | MFI_CMD_SCSI;
2685 if ((error = mfi_mapcmd(sc, cm)) != 0)
2686 device_printf(sc->mfi_dev, "failed dump blocks\n");
2687 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
2688 BUS_DMASYNC_POSTWRITE);
2689 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2690 mfi_release_command(cm);
2696 mfi_open(struct cdev *dev, int flags, int fmt, struct thread *td)
2698 struct mfi_softc *sc;
2703 mtx_lock(&sc->mfi_io_lock);
2704 if (sc->mfi_detaching)
2707 sc->mfi_flags |= MFI_FLAGS_OPEN;
2710 mtx_unlock(&sc->mfi_io_lock);
2716 mfi_close(struct cdev *dev, int flags, int fmt, struct thread *td)
2718 struct mfi_softc *sc;
2719 struct mfi_aen *mfi_aen_entry, *tmp;
2723 mtx_lock(&sc->mfi_io_lock);
2724 sc->mfi_flags &= ~MFI_FLAGS_OPEN;
2726 TAILQ_FOREACH_SAFE(mfi_aen_entry, &sc->mfi_aen_pids, aen_link, tmp) {
2727 if (mfi_aen_entry->p == curproc) {
2728 TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
2730 free(mfi_aen_entry, M_MFIBUF);
2733 mtx_unlock(&sc->mfi_io_lock);
2738 mfi_config_lock(struct mfi_softc *sc, uint32_t opcode)
2742 case MFI_DCMD_LD_DELETE:
2743 case MFI_DCMD_CFG_ADD:
2744 case MFI_DCMD_CFG_CLEAR:
2745 case MFI_DCMD_CFG_FOREIGN_IMPORT:
2746 sx_xlock(&sc->mfi_config_lock);
2754 mfi_config_unlock(struct mfi_softc *sc, int locked)
2758 sx_xunlock(&sc->mfi_config_lock);
2762 * Perform pre-issue checks on commands from userland and possibly veto
2766 mfi_check_command_pre(struct mfi_softc *sc, struct mfi_command *cm)
2768 struct mfi_disk *ld, *ld2;
2770 struct mfi_system_pd *syspd = NULL;
2774 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2776 switch (cm->cm_frame->dcmd.opcode) {
2777 case MFI_DCMD_LD_DELETE:
2778 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2779 if (ld->ld_id == cm->cm_frame->dcmd.mbox[0])
2785 error = mfi_disk_disable(ld);
2787 case MFI_DCMD_CFG_CLEAR:
2788 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2789 error = mfi_disk_disable(ld);
2794 TAILQ_FOREACH(ld2, &sc->mfi_ld_tqh, ld_link) {
2797 mfi_disk_enable(ld2);
2801 case MFI_DCMD_PD_STATE_SET:
2802 mbox = (uint16_t *) cm->cm_frame->dcmd.mbox;
2804 if (mbox[2] == MFI_PD_STATE_UNCONFIGURED_GOOD) {
2805 TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh, pd_link) {
2806 if (syspd->pd_id == syspd_id)
2813 error = mfi_syspd_disable(syspd);
2821 /* Perform post-issue checks on commands from userland. */
2823 mfi_check_command_post(struct mfi_softc *sc, struct mfi_command *cm)
2825 struct mfi_disk *ld, *ldn;
2826 struct mfi_system_pd *syspd = NULL;
2830 switch (cm->cm_frame->dcmd.opcode) {
2831 case MFI_DCMD_LD_DELETE:
2832 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2833 if (ld->ld_id == cm->cm_frame->dcmd.mbox[0])
2836 KASSERT(ld != NULL, ("volume dissappeared"));
2837 if (cm->cm_frame->header.cmd_status == MFI_STAT_OK) {
2838 mtx_unlock(&sc->mfi_io_lock);
2840 device_delete_child(sc->mfi_dev, ld->ld_dev);
2842 mtx_lock(&sc->mfi_io_lock);
2844 mfi_disk_enable(ld);
2846 case MFI_DCMD_CFG_CLEAR:
2847 if (cm->cm_frame->header.cmd_status == MFI_STAT_OK) {
2848 mtx_unlock(&sc->mfi_io_lock);
2850 TAILQ_FOREACH_SAFE(ld, &sc->mfi_ld_tqh, ld_link, ldn) {
2851 device_delete_child(sc->mfi_dev, ld->ld_dev);
2854 mtx_lock(&sc->mfi_io_lock);
2856 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link)
2857 mfi_disk_enable(ld);
2860 case MFI_DCMD_CFG_ADD:
2863 case MFI_DCMD_CFG_FOREIGN_IMPORT:
2866 case MFI_DCMD_PD_STATE_SET:
2867 mbox = (uint16_t *) cm->cm_frame->dcmd.mbox;
2869 if (mbox[2] == MFI_PD_STATE_UNCONFIGURED_GOOD) {
2870 TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh,pd_link) {
2871 if (syspd->pd_id == syspd_id)
2877 /* If the transition fails then enable the syspd again */
2878 if (syspd && cm->cm_frame->header.cmd_status != MFI_STAT_OK)
2879 mfi_syspd_enable(syspd);
2885 mfi_check_for_sscd(struct mfi_softc *sc, struct mfi_command *cm)
2887 struct mfi_config_data *conf_data;
2888 struct mfi_command *ld_cm = NULL;
2889 struct mfi_ld_info *ld_info = NULL;
2890 struct mfi_ld_config *ld;
2894 conf_data = (struct mfi_config_data *)cm->cm_data;
2896 if (cm->cm_frame->dcmd.opcode == MFI_DCMD_CFG_ADD) {
2897 p = (char *)conf_data->array;
2898 p += conf_data->array_size * conf_data->array_count;
2899 ld = (struct mfi_ld_config *)p;
2900 if (ld->params.isSSCD == 1)
2902 } else if (cm->cm_frame->dcmd.opcode == MFI_DCMD_LD_DELETE) {
2903 error = mfi_dcmd_command (sc, &ld_cm, MFI_DCMD_LD_GET_INFO,
2904 (void **)&ld_info, sizeof(*ld_info));
2906 device_printf(sc->mfi_dev, "Failed to allocate"
2907 "MFI_DCMD_LD_GET_INFO %d", error);
2909 free(ld_info, M_MFIBUF);
2912 ld_cm->cm_flags = MFI_CMD_DATAIN;
2913 ld_cm->cm_frame->dcmd.mbox[0]= cm->cm_frame->dcmd.mbox[0];
2914 ld_cm->cm_frame->header.target_id = cm->cm_frame->dcmd.mbox[0];
2915 if (mfi_wait_command(sc, ld_cm) != 0) {
2916 device_printf(sc->mfi_dev, "failed to get log drv\n");
2917 mfi_release_command(ld_cm);
2918 free(ld_info, M_MFIBUF);
2922 if (ld_cm->cm_frame->header.cmd_status != MFI_STAT_OK) {
2923 free(ld_info, M_MFIBUF);
2924 mfi_release_command(ld_cm);
2928 ld_info = (struct mfi_ld_info *)ld_cm->cm_private;
2930 if (ld_info->ld_config.params.isSSCD == 1)
2933 mfi_release_command(ld_cm);
2934 free(ld_info, M_MFIBUF);
2941 mfi_stp_cmd(struct mfi_softc *sc, struct mfi_command *cm,caddr_t arg)
2944 struct mfi_ioc_packet *ioc;
2945 ioc = (struct mfi_ioc_packet *)arg;
2946 int sge_size, error;
2947 struct megasas_sge *kern_sge;
2949 memset(sc->kbuff_arr, 0, sizeof(sc->kbuff_arr));
2950 kern_sge =(struct megasas_sge *) ((uintptr_t)cm->cm_frame + ioc->mfi_sgl_off);
2951 cm->cm_frame->header.sg_count = ioc->mfi_sge_count;
2953 if (sizeof(bus_addr_t) == 8) {
2954 cm->cm_frame->header.flags |= MFI_FRAME_SGL64;
2955 cm->cm_extra_frames = 2;
2956 sge_size = sizeof(struct mfi_sg64);
2958 cm->cm_extra_frames = (cm->cm_total_frame_size - 1) / MFI_FRAME_SIZE;
2959 sge_size = sizeof(struct mfi_sg32);
2962 cm->cm_total_frame_size += (sge_size * ioc->mfi_sge_count);
2963 for (i = 0; i < ioc->mfi_sge_count; i++) {
2964 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
2965 1, 0, /* algnmnt, boundary */
2966 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
2967 BUS_SPACE_MAXADDR, /* highaddr */
2968 NULL, NULL, /* filter, filterarg */
2969 ioc->mfi_sgl[i].iov_len,/* maxsize */
2971 ioc->mfi_sgl[i].iov_len,/* maxsegsize */
2972 BUS_DMA_ALLOCNOW, /* flags */
2973 NULL, NULL, /* lockfunc, lockarg */
2974 &sc->mfi_kbuff_arr_dmat[i])) {
2975 device_printf(sc->mfi_dev,
2976 "Cannot allocate mfi_kbuff_arr_dmat tag\n");
2980 if (bus_dmamem_alloc(sc->mfi_kbuff_arr_dmat[i],
2981 (void **)&sc->kbuff_arr[i], BUS_DMA_NOWAIT,
2982 &sc->mfi_kbuff_arr_dmamap[i])) {
2983 device_printf(sc->mfi_dev,
2984 "Cannot allocate mfi_kbuff_arr_dmamap memory\n");
2988 bus_dmamap_load(sc->mfi_kbuff_arr_dmat[i],
2989 sc->mfi_kbuff_arr_dmamap[i], sc->kbuff_arr[i],
2990 ioc->mfi_sgl[i].iov_len, mfi_addr_cb,
2991 &sc->mfi_kbuff_arr_busaddr[i], 0);
2993 if (!sc->kbuff_arr[i]) {
2994 device_printf(sc->mfi_dev,
2995 "Could not allocate memory for kbuff_arr info\n");
2998 kern_sge[i].phys_addr = sc->mfi_kbuff_arr_busaddr[i];
2999 kern_sge[i].length = ioc->mfi_sgl[i].iov_len;
3001 if (sizeof(bus_addr_t) == 8) {
3002 cm->cm_frame->stp.sgl.sg64[i].addr =
3003 kern_sge[i].phys_addr;
3004 cm->cm_frame->stp.sgl.sg64[i].len =
3005 ioc->mfi_sgl[i].iov_len;
3007 cm->cm_frame->stp.sgl.sg32[i].addr =
3008 kern_sge[i].phys_addr;
3009 cm->cm_frame->stp.sgl.sg32[i].len =
3010 ioc->mfi_sgl[i].iov_len;
3013 error = copyin(ioc->mfi_sgl[i].iov_base,
3015 ioc->mfi_sgl[i].iov_len);
3017 device_printf(sc->mfi_dev, "Copy in failed\n");
3022 cm->cm_flags |=MFI_CMD_MAPPED;
3027 mfi_user_command(struct mfi_softc *sc, struct mfi_ioc_passthru *ioc)
3029 struct mfi_command *cm;
3030 struct mfi_dcmd_frame *dcmd;
3031 void *ioc_buf = NULL;
3033 int error = 0, locked;
3036 if (ioc->buf_size > 0) {
3037 if (ioc->buf_size > 1024 * 1024)
3039 ioc_buf = malloc(ioc->buf_size, M_MFIBUF, M_WAITOK);
3040 error = copyin(ioc->buf, ioc_buf, ioc->buf_size);
3042 device_printf(sc->mfi_dev, "failed to copyin\n");
3043 free(ioc_buf, M_MFIBUF);
3048 locked = mfi_config_lock(sc, ioc->ioc_frame.opcode);
3050 mtx_lock(&sc->mfi_io_lock);
3051 while ((cm = mfi_dequeue_free(sc)) == NULL)
3052 msleep(mfi_user_command, &sc->mfi_io_lock, 0, "mfiioc", hz);
3054 /* Save context for later */
3055 context = cm->cm_frame->header.context;
3057 dcmd = &cm->cm_frame->dcmd;
3058 bcopy(&ioc->ioc_frame, dcmd, sizeof(struct mfi_dcmd_frame));
3060 cm->cm_sg = &dcmd->sgl;
3061 cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
3062 cm->cm_data = ioc_buf;
3063 cm->cm_len = ioc->buf_size;
3065 /* restore context */
3066 cm->cm_frame->header.context = context;
3068 /* Cheat since we don't know if we're writing or reading */
3069 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_DATAOUT;
3071 error = mfi_check_command_pre(sc, cm);
3075 error = mfi_wait_command(sc, cm);
3077 device_printf(sc->mfi_dev, "ioctl failed %d\n", error);
3080 bcopy(dcmd, &ioc->ioc_frame, sizeof(struct mfi_dcmd_frame));
3081 mfi_check_command_post(sc, cm);
3083 mfi_release_command(cm);
3084 mtx_unlock(&sc->mfi_io_lock);
3085 mfi_config_unlock(sc, locked);
3086 if (ioc->buf_size > 0)
3087 error = copyout(ioc_buf, ioc->buf, ioc->buf_size);
3089 free(ioc_buf, M_MFIBUF);
3093 #define PTRIN(p) ((void *)(uintptr_t)(p))
3096 mfi_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td)
3098 struct mfi_softc *sc;
3099 union mfi_statrequest *ms;
3100 struct mfi_ioc_packet *ioc;
3101 #ifdef COMPAT_FREEBSD32
3102 struct mfi_ioc_packet32 *ioc32;
3104 struct mfi_ioc_aen *aen;
3105 struct mfi_command *cm = NULL;
3106 uint32_t context = 0;
3107 union mfi_sense_ptr sense_ptr;
3108 uint8_t *data = NULL, *temp, *addr, skip_pre_post = 0;
3111 struct mfi_ioc_passthru *iop = (struct mfi_ioc_passthru *)arg;
3112 #ifdef COMPAT_FREEBSD32
3113 struct mfi_ioc_passthru32 *iop32 = (struct mfi_ioc_passthru32 *)arg;
3114 struct mfi_ioc_passthru iop_swab;
3124 if (sc->hw_crit_error)
3127 if (sc->issuepend_done == 0)
3132 ms = (union mfi_statrequest *)arg;
3133 switch (ms->ms_item) {
3138 bcopy(&sc->mfi_qstat[ms->ms_item], &ms->ms_qstat,
3139 sizeof(struct mfi_qstat));
3146 case MFIIO_QUERY_DISK:
3148 struct mfi_query_disk *qd;
3149 struct mfi_disk *ld;
3151 qd = (struct mfi_query_disk *)arg;
3152 mtx_lock(&sc->mfi_io_lock);
3153 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
3154 if (ld->ld_id == qd->array_id)
3159 mtx_unlock(&sc->mfi_io_lock);
3163 if (ld->ld_flags & MFI_DISK_FLAGS_OPEN)
3165 bzero(qd->devname, SPECNAMELEN + 1);
3166 snprintf(qd->devname, SPECNAMELEN, "mfid%d", ld->ld_unit);
3167 mtx_unlock(&sc->mfi_io_lock);
3171 #ifdef COMPAT_FREEBSD32
3175 devclass_t devclass;
3176 ioc = (struct mfi_ioc_packet *)arg;
3179 adapter = ioc->mfi_adapter_no;
3180 if (device_get_unit(sc->mfi_dev) == 0 && adapter != 0) {
3181 devclass = devclass_find("mfi");
3182 sc = devclass_get_softc(devclass, adapter);
3184 mtx_lock(&sc->mfi_io_lock);
3185 if ((cm = mfi_dequeue_free(sc)) == NULL) {
3186 mtx_unlock(&sc->mfi_io_lock);
3189 mtx_unlock(&sc->mfi_io_lock);
3193 * save off original context since copying from user
3194 * will clobber some data
3196 context = cm->cm_frame->header.context;
3197 cm->cm_frame->header.context = cm->cm_index;
3199 bcopy(ioc->mfi_frame.raw, cm->cm_frame,
3200 2 * MEGAMFI_FRAME_SIZE);
3201 cm->cm_total_frame_size = (sizeof(union mfi_sgl)
3202 * ioc->mfi_sge_count) + ioc->mfi_sgl_off;
3203 cm->cm_frame->header.scsi_status = 0;
3204 cm->cm_frame->header.pad0 = 0;
3205 if (ioc->mfi_sge_count) {
3207 (union mfi_sgl *)&cm->cm_frame->bytes[ioc->mfi_sgl_off];
3211 if (cm->cm_frame->header.flags & MFI_FRAME_DATAIN)
3212 cm->cm_flags |= MFI_CMD_DATAIN;
3213 if (cm->cm_frame->header.flags & MFI_FRAME_DATAOUT)
3214 cm->cm_flags |= MFI_CMD_DATAOUT;
3215 /* Legacy app shim */
3216 if (cm->cm_flags == 0)
3217 cm->cm_flags |= MFI_CMD_DATAIN | MFI_CMD_DATAOUT;
3218 cm->cm_len = cm->cm_frame->header.data_len;
3219 if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
3220 #ifdef COMPAT_FREEBSD32
3221 if (cmd == MFI_CMD) {
3224 cm->cm_stp_len = ioc->mfi_sgl[0].iov_len;
3225 #ifdef COMPAT_FREEBSD32
3227 /* 32bit on 64bit */
3228 ioc32 = (struct mfi_ioc_packet32 *)ioc;
3229 cm->cm_stp_len = ioc32->mfi_sgl[0].iov_len;
3232 cm->cm_len += cm->cm_stp_len;
3235 (cm->cm_flags & (MFI_CMD_DATAIN | MFI_CMD_DATAOUT))) {
3236 cm->cm_data = data = malloc(cm->cm_len, M_MFIBUF,
3242 /* restore header context */
3243 cm->cm_frame->header.context = context;
3245 if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
3246 res = mfi_stp_cmd(sc, cm, arg);
3251 if ((cm->cm_flags & MFI_CMD_DATAOUT) ||
3252 (cm->cm_frame->header.cmd == MFI_CMD_STP)) {
3253 for (i = 0; i < ioc->mfi_sge_count; i++) {
3254 #ifdef COMPAT_FREEBSD32
3255 if (cmd == MFI_CMD) {
3258 addr = ioc->mfi_sgl[i].iov_base;
3259 len = ioc->mfi_sgl[i].iov_len;
3260 #ifdef COMPAT_FREEBSD32
3262 /* 32bit on 64bit */
3263 ioc32 = (struct mfi_ioc_packet32 *)ioc;
3264 addr = PTRIN(ioc32->mfi_sgl[i].iov_base);
3265 len = ioc32->mfi_sgl[i].iov_len;
3268 error = copyin(addr, temp, len);
3270 device_printf(sc->mfi_dev,
3271 "Copy in failed\n");
3279 if (cm->cm_frame->header.cmd == MFI_CMD_DCMD)
3280 locked = mfi_config_lock(sc,
3281 cm->cm_frame->dcmd.opcode);
3283 if (cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) {
3284 cm->cm_frame->pass.sense_addr_lo =
3285 (uint32_t)cm->cm_sense_busaddr;
3286 cm->cm_frame->pass.sense_addr_hi =
3287 (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
3289 mtx_lock(&sc->mfi_io_lock);
3290 skip_pre_post = mfi_check_for_sscd (sc, cm);
3291 if (!skip_pre_post) {
3292 error = mfi_check_command_pre(sc, cm);
3294 mtx_unlock(&sc->mfi_io_lock);
3298 if ((error = mfi_wait_command(sc, cm)) != 0) {
3299 device_printf(sc->mfi_dev,
3300 "Controller polled failed\n");
3301 mtx_unlock(&sc->mfi_io_lock);
3304 if (!skip_pre_post) {
3305 mfi_check_command_post(sc, cm);
3307 mtx_unlock(&sc->mfi_io_lock);
3309 if (cm->cm_frame->header.cmd != MFI_CMD_STP) {
3311 if ((cm->cm_flags & MFI_CMD_DATAIN) ||
3312 (cm->cm_frame->header.cmd == MFI_CMD_STP)) {
3313 for (i = 0; i < ioc->mfi_sge_count; i++) {
3314 #ifdef COMPAT_FREEBSD32
3315 if (cmd == MFI_CMD) {
3318 addr = ioc->mfi_sgl[i].iov_base;
3319 len = ioc->mfi_sgl[i].iov_len;
3320 #ifdef COMPAT_FREEBSD32
3322 /* 32bit on 64bit */
3323 ioc32 = (struct mfi_ioc_packet32 *)ioc;
3324 addr = PTRIN(ioc32->mfi_sgl[i].iov_base);
3325 len = ioc32->mfi_sgl[i].iov_len;
3328 error = copyout(temp, addr, len);
3330 device_printf(sc->mfi_dev,
3331 "Copy out failed\n");
3339 if (ioc->mfi_sense_len) {
3340 /* get user-space sense ptr then copy out sense */
3341 bcopy(&ioc->mfi_frame.raw[ioc->mfi_sense_off],
3342 &sense_ptr.sense_ptr_data[0],
3343 sizeof(sense_ptr.sense_ptr_data));
3344 #ifdef COMPAT_FREEBSD32
3345 if (cmd != MFI_CMD) {
3347 * not 64bit native so zero out any address
3349 sense_ptr.addr.high = 0;
3352 error = copyout(cm->cm_sense, sense_ptr.user_space,
3353 ioc->mfi_sense_len);
3355 device_printf(sc->mfi_dev,
3356 "Copy out failed\n");
3361 ioc->mfi_frame.hdr.cmd_status = cm->cm_frame->header.cmd_status;
3363 mfi_config_unlock(sc, locked);
3365 free(data, M_MFIBUF);
3366 if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
3367 for (i = 0; i < 2; i++) {
3368 if (sc->kbuff_arr[i]) {
3369 if (sc->mfi_kbuff_arr_busaddr[i] != 0)
3371 sc->mfi_kbuff_arr_dmat[i],
3372 sc->mfi_kbuff_arr_dmamap[i]
3374 if (sc->kbuff_arr[i] != NULL)
3376 sc->mfi_kbuff_arr_dmat[i],
3378 sc->mfi_kbuff_arr_dmamap[i]
3380 if (sc->mfi_kbuff_arr_dmat[i] != NULL)
3381 bus_dma_tag_destroy(
3382 sc->mfi_kbuff_arr_dmat[i]);
3387 mtx_lock(&sc->mfi_io_lock);
3388 mfi_release_command(cm);
3389 mtx_unlock(&sc->mfi_io_lock);
3395 aen = (struct mfi_ioc_aen *)arg;
3396 mtx_lock(&sc->mfi_io_lock);
3397 error = mfi_aen_register(sc, aen->aen_seq_num,
3398 aen->aen_class_locale);
3399 mtx_unlock(&sc->mfi_io_lock);
3402 case MFI_LINUX_CMD_2: /* Firmware Linux ioctl shim */
3404 devclass_t devclass;
3405 struct mfi_linux_ioc_packet l_ioc;
3408 devclass = devclass_find("mfi");
3409 if (devclass == NULL)
3412 error = copyin(arg, &l_ioc, sizeof(l_ioc));
3415 adapter = l_ioc.lioc_adapter_no;
3416 sc = devclass_get_softc(devclass, adapter);
3419 return (mfi_linux_ioctl_int(sc->mfi_cdev,
3420 cmd, arg, flag, td));
3423 case MFI_LINUX_SET_AEN_2: /* AEN Linux ioctl shim */
3425 devclass_t devclass;
3426 struct mfi_linux_ioc_aen l_aen;
3429 devclass = devclass_find("mfi");
3430 if (devclass == NULL)
3433 error = copyin(arg, &l_aen, sizeof(l_aen));
3436 adapter = l_aen.laen_adapter_no;
3437 sc = devclass_get_softc(devclass, adapter);
3440 return (mfi_linux_ioctl_int(sc->mfi_cdev,
3441 cmd, arg, flag, td));
3444 #ifdef COMPAT_FREEBSD32
3445 case MFIIO_PASSTHRU32:
3446 if (!SV_CURPROC_FLAG(SV_ILP32)) {
3450 iop_swab.ioc_frame = iop32->ioc_frame;
3451 iop_swab.buf_size = iop32->buf_size;
3452 iop_swab.buf = PTRIN(iop32->buf);
3456 case MFIIO_PASSTHRU:
3457 error = mfi_user_command(sc, iop);
3458 #ifdef COMPAT_FREEBSD32
3459 if (cmd == MFIIO_PASSTHRU32)
3460 iop32->ioc_frame = iop_swab.ioc_frame;
3464 device_printf(sc->mfi_dev, "IOCTL 0x%lx not handled\n", cmd);
3473 mfi_linux_ioctl_int(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td)
3475 struct mfi_softc *sc;
3476 struct mfi_linux_ioc_packet l_ioc;
3477 struct mfi_linux_ioc_aen l_aen;
3478 struct mfi_command *cm = NULL;
3479 struct mfi_aen *mfi_aen_entry;
3480 union mfi_sense_ptr sense_ptr;
3481 uint32_t context = 0;
3482 uint8_t *data = NULL, *temp;
3489 case MFI_LINUX_CMD_2: /* Firmware Linux ioctl shim */
3490 error = copyin(arg, &l_ioc, sizeof(l_ioc));
3494 if (l_ioc.lioc_sge_count > MAX_LINUX_IOCTL_SGE) {
3498 mtx_lock(&sc->mfi_io_lock);
3499 if ((cm = mfi_dequeue_free(sc)) == NULL) {
3500 mtx_unlock(&sc->mfi_io_lock);
3503 mtx_unlock(&sc->mfi_io_lock);
3507 * save off original context since copying from user
3508 * will clobber some data
3510 context = cm->cm_frame->header.context;
3512 bcopy(l_ioc.lioc_frame.raw, cm->cm_frame,
3513 2 * MFI_DCMD_FRAME_SIZE); /* this isn't quite right */
3514 cm->cm_total_frame_size = (sizeof(union mfi_sgl)
3515 * l_ioc.lioc_sge_count) + l_ioc.lioc_sgl_off;
3516 cm->cm_frame->header.scsi_status = 0;
3517 cm->cm_frame->header.pad0 = 0;
3518 if (l_ioc.lioc_sge_count)
3520 (union mfi_sgl *)&cm->cm_frame->bytes[l_ioc.lioc_sgl_off];
3522 if (cm->cm_frame->header.flags & MFI_FRAME_DATAIN)
3523 cm->cm_flags |= MFI_CMD_DATAIN;
3524 if (cm->cm_frame->header.flags & MFI_FRAME_DATAOUT)
3525 cm->cm_flags |= MFI_CMD_DATAOUT;
3526 cm->cm_len = cm->cm_frame->header.data_len;
3528 (cm->cm_flags & (MFI_CMD_DATAIN | MFI_CMD_DATAOUT))) {
3529 cm->cm_data = data = malloc(cm->cm_len, M_MFIBUF,
3535 /* restore header context */
3536 cm->cm_frame->header.context = context;
3539 if (cm->cm_flags & MFI_CMD_DATAOUT) {
3540 for (i = 0; i < l_ioc.lioc_sge_count; i++) {
3541 error = copyin(PTRIN(l_ioc.lioc_sgl[i].iov_base),
3543 l_ioc.lioc_sgl[i].iov_len);
3545 device_printf(sc->mfi_dev,
3546 "Copy in failed\n");
3549 temp = &temp[l_ioc.lioc_sgl[i].iov_len];
3553 if (cm->cm_frame->header.cmd == MFI_CMD_DCMD)
3554 locked = mfi_config_lock(sc, cm->cm_frame->dcmd.opcode);
3556 if (cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) {
3557 cm->cm_frame->pass.sense_addr_lo =
3558 (uint32_t)cm->cm_sense_busaddr;
3559 cm->cm_frame->pass.sense_addr_hi =
3560 (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
3563 mtx_lock(&sc->mfi_io_lock);
3564 error = mfi_check_command_pre(sc, cm);
3566 mtx_unlock(&sc->mfi_io_lock);
3570 if ((error = mfi_wait_command(sc, cm)) != 0) {
3571 device_printf(sc->mfi_dev,
3572 "Controller polled failed\n");
3573 mtx_unlock(&sc->mfi_io_lock);
3577 mfi_check_command_post(sc, cm);
3578 mtx_unlock(&sc->mfi_io_lock);
3581 if (cm->cm_flags & MFI_CMD_DATAIN) {
3582 for (i = 0; i < l_ioc.lioc_sge_count; i++) {
3583 error = copyout(temp,
3584 PTRIN(l_ioc.lioc_sgl[i].iov_base),
3585 l_ioc.lioc_sgl[i].iov_len);
3587 device_printf(sc->mfi_dev,
3588 "Copy out failed\n");
3591 temp = &temp[l_ioc.lioc_sgl[i].iov_len];
3595 if (l_ioc.lioc_sense_len) {
3596 /* get user-space sense ptr then copy out sense */
3597 bcopy(&((struct mfi_linux_ioc_packet*)arg)
3598 ->lioc_frame.raw[l_ioc.lioc_sense_off],
3599 &sense_ptr.sense_ptr_data[0],
3600 sizeof(sense_ptr.sense_ptr_data));
3603 * only 32bit Linux support so zero out any
3604 * address over 32bit
3606 sense_ptr.addr.high = 0;
3608 error = copyout(cm->cm_sense, sense_ptr.user_space,
3609 l_ioc.lioc_sense_len);
3611 device_printf(sc->mfi_dev,
3612 "Copy out failed\n");
3617 error = copyout(&cm->cm_frame->header.cmd_status,
3618 &((struct mfi_linux_ioc_packet*)arg)
3619 ->lioc_frame.hdr.cmd_status,
3622 device_printf(sc->mfi_dev,
3623 "Copy out failed\n");
3628 mfi_config_unlock(sc, locked);
3630 free(data, M_MFIBUF);
3632 mtx_lock(&sc->mfi_io_lock);
3633 mfi_release_command(cm);
3634 mtx_unlock(&sc->mfi_io_lock);
3638 case MFI_LINUX_SET_AEN_2: /* AEN Linux ioctl shim */
3639 error = copyin(arg, &l_aen, sizeof(l_aen));
3642 printf("AEN IMPLEMENTED for pid %d\n", curproc->p_pid);
3643 mfi_aen_entry = malloc(sizeof(struct mfi_aen), M_MFIBUF,
3645 mtx_lock(&sc->mfi_io_lock);
3646 if (mfi_aen_entry != NULL) {
3647 mfi_aen_entry->p = curproc;
3648 TAILQ_INSERT_TAIL(&sc->mfi_aen_pids, mfi_aen_entry,
3651 error = mfi_aen_register(sc, l_aen.laen_seq_num,
3652 l_aen.laen_class_locale);
3655 TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
3657 free(mfi_aen_entry, M_MFIBUF);
3659 mtx_unlock(&sc->mfi_io_lock);
3663 device_printf(sc->mfi_dev, "IOCTL 0x%lx not handled\n", cmd);
3672 mfi_poll(struct cdev *dev, int poll_events, struct thread *td)
3674 struct mfi_softc *sc;
3679 if (poll_events & (POLLIN | POLLRDNORM)) {
3680 if (sc->mfi_aen_triggered != 0) {
3681 revents |= poll_events & (POLLIN | POLLRDNORM);
3682 sc->mfi_aen_triggered = 0;
3684 if (sc->mfi_aen_triggered == 0 && sc->mfi_aen_cm == NULL) {
3690 if (poll_events & (POLLIN | POLLRDNORM)) {
3691 sc->mfi_poll_waiting = 1;
3692 selrecord(td, &sc->mfi_select);
3702 struct mfi_softc *sc;
3703 struct mfi_command *cm;
3709 dc = devclass_find("mfi");
3711 printf("No mfi dev class\n");
3715 for (i = 0; ; i++) {
3716 sc = devclass_get_softc(dc, i);
3719 device_printf(sc->mfi_dev, "Dumping\n\n");
3721 deadline = time_uptime - mfi_cmd_timeout;
3722 mtx_lock(&sc->mfi_io_lock);
3723 TAILQ_FOREACH(cm, &sc->mfi_busy, cm_link) {
3724 if (cm->cm_timestamp <= deadline) {
3725 device_printf(sc->mfi_dev,
3726 "COMMAND %p TIMEOUT AFTER %d SECONDS\n",
3727 cm, (int)(time_uptime - cm->cm_timestamp));
3738 mtx_unlock(&sc->mfi_io_lock);
3745 mfi_timeout(void *data)
3747 struct mfi_softc *sc = (struct mfi_softc *)data;
3748 struct mfi_command *cm, *tmp;
3752 deadline = time_uptime - mfi_cmd_timeout;
3753 if (sc->adpreset == 0) {
3754 if (!mfi_tbolt_reset(sc)) {
3755 callout_reset(&sc->mfi_watchdog_callout,
3756 mfi_cmd_timeout * hz, mfi_timeout, sc);
3760 mtx_lock(&sc->mfi_io_lock);
3761 TAILQ_FOREACH_SAFE(cm, &sc->mfi_busy, cm_link, tmp) {
3762 if (sc->mfi_aen_cm == cm || sc->mfi_map_sync_cm == cm)
3764 if (cm->cm_timestamp <= deadline) {
3765 if (sc->adpreset != 0 && sc->issuepend_done == 0) {
3766 cm->cm_timestamp = time_uptime;
3768 device_printf(sc->mfi_dev,
3769 "COMMAND %p TIMEOUT AFTER %d SECONDS\n",
3770 cm, (int)(time_uptime - cm->cm_timestamp)
3773 MFI_VALIDATE_CMD(sc, cm);
3775 * While commands can get stuck forever we do
3776 * not fail them as there is no way to tell if
3777 * the controller has actually processed them
3780 * In addition its very likely that force
3781 * failing a command here would cause a panic
3794 mtx_unlock(&sc->mfi_io_lock);
3796 callout_reset(&sc->mfi_watchdog_callout, mfi_cmd_timeout * hz,