2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD AND BSD-2-Clause
4 * Copyright (c) 2006 IronPort Systems
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * Copyright (c) 2007 LSI Corp.
30 * Copyright (c) 2007 Rajesh Prabhakaran.
31 * All rights reserved.
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions
36 * 1. Redistributions of source code must retain the above copyright
37 * notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce the above copyright
39 * notice, this list of conditions and the following disclaimer in the
40 * documentation and/or other materials provided with the distribution.
42 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
43 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
44 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
45 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
46 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
47 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
48 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
49 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
50 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
51 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
55 #include <sys/cdefs.h>
56 __FBSDID("$FreeBSD$");
58 #include "opt_compat.h"
61 #include <sys/param.h>
62 #include <sys/systm.h>
63 #include <sys/sysctl.h>
64 #include <sys/malloc.h>
65 #include <sys/kernel.h>
67 #include <sys/selinfo.h>
70 #include <sys/eventhandler.h>
73 #include <sys/ioccom.h>
76 #include <sys/signalvar.h>
77 #include <sys/sysent.h>
78 #include <sys/taskqueue.h>
80 #include <machine/bus.h>
81 #include <machine/resource.h>
83 #include <dev/mfi/mfireg.h>
84 #include <dev/mfi/mfi_ioctl.h>
85 #include <dev/mfi/mfivar.h>
86 #include <sys/interrupt.h>
87 #include <sys/priority.h>
89 static int mfi_alloc_commands(struct mfi_softc *);
90 static int mfi_comms_init(struct mfi_softc *);
91 static int mfi_get_controller_info(struct mfi_softc *);
92 static int mfi_get_log_state(struct mfi_softc *,
93 struct mfi_evt_log_state **);
94 static int mfi_parse_entries(struct mfi_softc *, int, int);
95 static void mfi_data_cb(void *, bus_dma_segment_t *, int, int);
96 static void mfi_startup(void *arg);
97 static void mfi_intr(void *arg);
98 static void mfi_ldprobe(struct mfi_softc *sc);
99 static void mfi_syspdprobe(struct mfi_softc *sc);
100 static void mfi_handle_evt(void *context, int pending);
101 static int mfi_aen_register(struct mfi_softc *sc, int seq, int locale);
102 static void mfi_aen_complete(struct mfi_command *);
103 static int mfi_add_ld(struct mfi_softc *sc, int);
104 static void mfi_add_ld_complete(struct mfi_command *);
105 static int mfi_add_sys_pd(struct mfi_softc *sc, int);
106 static void mfi_add_sys_pd_complete(struct mfi_command *);
107 static struct mfi_command * mfi_bio_command(struct mfi_softc *);
108 static void mfi_bio_complete(struct mfi_command *);
109 static struct mfi_command *mfi_build_ldio(struct mfi_softc *,struct bio*);
110 static struct mfi_command *mfi_build_syspdio(struct mfi_softc *,struct bio*);
111 static int mfi_send_frame(struct mfi_softc *, struct mfi_command *);
112 static int mfi_std_send_frame(struct mfi_softc *, struct mfi_command *);
113 static int mfi_abort(struct mfi_softc *, struct mfi_command **);
114 static int mfi_linux_ioctl_int(struct cdev *, u_long, caddr_t, int, struct thread *);
115 static void mfi_timeout(void *);
116 static int mfi_user_command(struct mfi_softc *,
117 struct mfi_ioc_passthru *);
118 static void mfi_enable_intr_xscale(struct mfi_softc *sc);
119 static void mfi_enable_intr_ppc(struct mfi_softc *sc);
120 static int32_t mfi_read_fw_status_xscale(struct mfi_softc *sc);
121 static int32_t mfi_read_fw_status_ppc(struct mfi_softc *sc);
122 static int mfi_check_clear_intr_xscale(struct mfi_softc *sc);
123 static int mfi_check_clear_intr_ppc(struct mfi_softc *sc);
124 static void mfi_issue_cmd_xscale(struct mfi_softc *sc, bus_addr_t bus_add,
126 static void mfi_issue_cmd_ppc(struct mfi_softc *sc, bus_addr_t bus_add,
128 static int mfi_config_lock(struct mfi_softc *sc, uint32_t opcode);
129 static void mfi_config_unlock(struct mfi_softc *sc, int locked);
130 static int mfi_check_command_pre(struct mfi_softc *sc, struct mfi_command *cm);
131 static void mfi_check_command_post(struct mfi_softc *sc, struct mfi_command *cm);
132 static int mfi_check_for_sscd(struct mfi_softc *sc, struct mfi_command *cm);
134 SYSCTL_NODE(_hw, OID_AUTO, mfi, CTLFLAG_RD, 0, "MFI driver parameters");
135 static int mfi_event_locale = MFI_EVT_LOCALE_ALL;
136 SYSCTL_INT(_hw_mfi, OID_AUTO, event_locale, CTLFLAG_RWTUN, &mfi_event_locale,
137 0, "event message locale");
139 static int mfi_event_class = MFI_EVT_CLASS_INFO;
140 SYSCTL_INT(_hw_mfi, OID_AUTO, event_class, CTLFLAG_RWTUN, &mfi_event_class,
141 0, "event message class");
143 static int mfi_max_cmds = 128;
144 SYSCTL_INT(_hw_mfi, OID_AUTO, max_cmds, CTLFLAG_RDTUN, &mfi_max_cmds,
145 0, "Max commands limit (-1 = controller limit)");
147 static int mfi_detect_jbod_change = 1;
148 SYSCTL_INT(_hw_mfi, OID_AUTO, detect_jbod_change, CTLFLAG_RWTUN,
149 &mfi_detect_jbod_change, 0, "Detect a change to a JBOD");
151 int mfi_polled_cmd_timeout = MFI_POLL_TIMEOUT_SECS;
152 SYSCTL_INT(_hw_mfi, OID_AUTO, polled_cmd_timeout, CTLFLAG_RWTUN,
153 &mfi_polled_cmd_timeout, 0,
154 "Polled command timeout - used for firmware flash etc (in seconds)");
156 static int mfi_cmd_timeout = MFI_CMD_TIMEOUT;
157 SYSCTL_INT(_hw_mfi, OID_AUTO, cmd_timeout, CTLFLAG_RWTUN, &mfi_cmd_timeout,
158 0, "Command timeout (in seconds)");
160 /* Management interface */
161 static d_open_t mfi_open;
162 static d_close_t mfi_close;
163 static d_ioctl_t mfi_ioctl;
164 static d_poll_t mfi_poll;
166 static struct cdevsw mfi_cdevsw = {
167 .d_version = D_VERSION,
170 .d_close = mfi_close,
171 .d_ioctl = mfi_ioctl,
176 MALLOC_DEFINE(M_MFIBUF, "mfibuf", "Buffers for the MFI driver");
178 #define MFI_INQ_LENGTH SHORT_INQUIRY_LENGTH
179 struct mfi_skinny_dma_info mfi_skinny;
182 mfi_enable_intr_xscale(struct mfi_softc *sc)
184 MFI_WRITE4(sc, MFI_OMSK, 0x01);
188 mfi_enable_intr_ppc(struct mfi_softc *sc)
190 if (sc->mfi_flags & MFI_FLAGS_1078) {
191 MFI_WRITE4(sc, MFI_ODCR0, 0xFFFFFFFF);
192 MFI_WRITE4(sc, MFI_OMSK, ~MFI_1078_EIM);
194 else if (sc->mfi_flags & MFI_FLAGS_GEN2) {
195 MFI_WRITE4(sc, MFI_ODCR0, 0xFFFFFFFF);
196 MFI_WRITE4(sc, MFI_OMSK, ~MFI_GEN2_EIM);
198 else if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
199 MFI_WRITE4(sc, MFI_OMSK, ~0x00000001);
204 mfi_read_fw_status_xscale(struct mfi_softc *sc)
206 return MFI_READ4(sc, MFI_OMSG0);
210 mfi_read_fw_status_ppc(struct mfi_softc *sc)
212 return MFI_READ4(sc, MFI_OSP0);
216 mfi_check_clear_intr_xscale(struct mfi_softc *sc)
220 status = MFI_READ4(sc, MFI_OSTS);
221 if ((status & MFI_OSTS_INTR_VALID) == 0)
224 MFI_WRITE4(sc, MFI_OSTS, status);
229 mfi_check_clear_intr_ppc(struct mfi_softc *sc)
233 status = MFI_READ4(sc, MFI_OSTS);
234 if (sc->mfi_flags & MFI_FLAGS_1078) {
235 if (!(status & MFI_1078_RM)) {
239 else if (sc->mfi_flags & MFI_FLAGS_GEN2) {
240 if (!(status & MFI_GEN2_RM)) {
244 else if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
245 if (!(status & MFI_SKINNY_RM)) {
249 if (sc->mfi_flags & MFI_FLAGS_SKINNY)
250 MFI_WRITE4(sc, MFI_OSTS, status);
252 MFI_WRITE4(sc, MFI_ODCR0, status);
257 mfi_issue_cmd_xscale(struct mfi_softc *sc, bus_addr_t bus_add, uint32_t frame_cnt)
259 MFI_WRITE4(sc, MFI_IQP,(bus_add >>3)|frame_cnt);
263 mfi_issue_cmd_ppc(struct mfi_softc *sc, bus_addr_t bus_add, uint32_t frame_cnt)
265 if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
266 MFI_WRITE4(sc, MFI_IQPL, (bus_add | frame_cnt <<1)|1 );
267 MFI_WRITE4(sc, MFI_IQPH, 0x00000000);
269 MFI_WRITE4(sc, MFI_IQP, (bus_add | frame_cnt <<1)|1 );
274 mfi_transition_firmware(struct mfi_softc *sc)
276 uint32_t fw_state, cur_state;
278 uint32_t cur_abs_reg_val = 0;
279 uint32_t prev_abs_reg_val = 0;
281 cur_abs_reg_val = sc->mfi_read_fw_status(sc);
282 fw_state = cur_abs_reg_val & MFI_FWSTATE_MASK;
283 while (fw_state != MFI_FWSTATE_READY) {
285 device_printf(sc->mfi_dev, "Waiting for firmware to "
287 cur_state = fw_state;
289 case MFI_FWSTATE_FAULT:
290 device_printf(sc->mfi_dev, "Firmware fault\n");
292 case MFI_FWSTATE_WAIT_HANDSHAKE:
293 if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT)
294 MFI_WRITE4(sc, MFI_SKINNY_IDB, MFI_FWINIT_CLEAR_HANDSHAKE);
296 MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_CLEAR_HANDSHAKE);
297 max_wait = MFI_RESET_WAIT_TIME;
299 case MFI_FWSTATE_OPERATIONAL:
300 if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT)
301 MFI_WRITE4(sc, MFI_SKINNY_IDB, 7);
303 MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_READY);
304 max_wait = MFI_RESET_WAIT_TIME;
306 case MFI_FWSTATE_UNDEFINED:
307 case MFI_FWSTATE_BB_INIT:
308 max_wait = MFI_RESET_WAIT_TIME;
310 case MFI_FWSTATE_FW_INIT_2:
311 max_wait = MFI_RESET_WAIT_TIME;
313 case MFI_FWSTATE_FW_INIT:
314 case MFI_FWSTATE_FLUSH_CACHE:
315 max_wait = MFI_RESET_WAIT_TIME;
317 case MFI_FWSTATE_DEVICE_SCAN:
318 max_wait = MFI_RESET_WAIT_TIME; /* wait for 180 seconds */
319 prev_abs_reg_val = cur_abs_reg_val;
321 case MFI_FWSTATE_BOOT_MESSAGE_PENDING:
322 if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT)
323 MFI_WRITE4(sc, MFI_SKINNY_IDB, MFI_FWINIT_HOTPLUG);
325 MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_HOTPLUG);
326 max_wait = MFI_RESET_WAIT_TIME;
329 device_printf(sc->mfi_dev, "Unknown firmware state %#x\n",
333 for (i = 0; i < (max_wait * 10); i++) {
334 cur_abs_reg_val = sc->mfi_read_fw_status(sc);
335 fw_state = cur_abs_reg_val & MFI_FWSTATE_MASK;
336 if (fw_state == cur_state)
341 if (fw_state == MFI_FWSTATE_DEVICE_SCAN) {
342 /* Check the device scanning progress */
343 if (prev_abs_reg_val != cur_abs_reg_val) {
347 if (fw_state == cur_state) {
348 device_printf(sc->mfi_dev, "Firmware stuck in state "
357 mfi_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
362 *addr = segs[0].ds_addr;
367 mfi_attach(struct mfi_softc *sc)
370 int error, commsz, framessz, sensesz;
371 int frames, unit, max_fw_sge, max_fw_cmds;
372 uint32_t tb_mem_size = 0;
378 device_printf(sc->mfi_dev, "Megaraid SAS driver Ver %s \n",
381 mtx_init(&sc->mfi_io_lock, "MFI I/O lock", NULL, MTX_DEF);
382 sx_init(&sc->mfi_config_lock, "MFI config");
383 TAILQ_INIT(&sc->mfi_ld_tqh);
384 TAILQ_INIT(&sc->mfi_syspd_tqh);
385 TAILQ_INIT(&sc->mfi_ld_pend_tqh);
386 TAILQ_INIT(&sc->mfi_syspd_pend_tqh);
387 TAILQ_INIT(&sc->mfi_evt_queue);
388 TASK_INIT(&sc->mfi_evt_task, 0, mfi_handle_evt, sc);
389 TASK_INIT(&sc->mfi_map_sync_task, 0, mfi_handle_map_sync, sc);
390 TAILQ_INIT(&sc->mfi_aen_pids);
391 TAILQ_INIT(&sc->mfi_cam_ccbq);
399 sc->last_seq_num = 0;
400 sc->disableOnlineCtrlReset = 1;
401 sc->issuepend_done = 1;
402 sc->hw_crit_error = 0;
404 if (sc->mfi_flags & MFI_FLAGS_1064R) {
405 sc->mfi_enable_intr = mfi_enable_intr_xscale;
406 sc->mfi_read_fw_status = mfi_read_fw_status_xscale;
407 sc->mfi_check_clear_intr = mfi_check_clear_intr_xscale;
408 sc->mfi_issue_cmd = mfi_issue_cmd_xscale;
409 } else if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
410 sc->mfi_enable_intr = mfi_tbolt_enable_intr_ppc;
411 sc->mfi_disable_intr = mfi_tbolt_disable_intr_ppc;
412 sc->mfi_read_fw_status = mfi_tbolt_read_fw_status_ppc;
413 sc->mfi_check_clear_intr = mfi_tbolt_check_clear_intr_ppc;
414 sc->mfi_issue_cmd = mfi_tbolt_issue_cmd_ppc;
415 sc->mfi_adp_reset = mfi_tbolt_adp_reset;
417 TAILQ_INIT(&sc->mfi_cmd_tbolt_tqh);
419 sc->mfi_enable_intr = mfi_enable_intr_ppc;
420 sc->mfi_read_fw_status = mfi_read_fw_status_ppc;
421 sc->mfi_check_clear_intr = mfi_check_clear_intr_ppc;
422 sc->mfi_issue_cmd = mfi_issue_cmd_ppc;
426 /* Before we get too far, see if the firmware is working */
427 if ((error = mfi_transition_firmware(sc)) != 0) {
428 device_printf(sc->mfi_dev, "Firmware not in READY state, "
429 "error %d\n", error);
433 /* Start: LSIP200113393 */
434 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
435 1, 0, /* algnmnt, boundary */
436 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
437 BUS_SPACE_MAXADDR, /* highaddr */
438 NULL, NULL, /* filter, filterarg */
439 MEGASAS_MAX_NAME*sizeof(bus_addr_t), /* maxsize */
441 MEGASAS_MAX_NAME*sizeof(bus_addr_t), /* maxsegsize */
443 NULL, NULL, /* lockfunc, lockarg */
444 &sc->verbuf_h_dmat)) {
445 device_printf(sc->mfi_dev, "Cannot allocate verbuf_h_dmat DMA tag\n");
448 if (bus_dmamem_alloc(sc->verbuf_h_dmat, (void **)&sc->verbuf,
449 BUS_DMA_NOWAIT, &sc->verbuf_h_dmamap)) {
450 device_printf(sc->mfi_dev, "Cannot allocate verbuf_h_dmamap memory\n");
453 bzero(sc->verbuf, MEGASAS_MAX_NAME*sizeof(bus_addr_t));
454 bus_dmamap_load(sc->verbuf_h_dmat, sc->verbuf_h_dmamap,
455 sc->verbuf, MEGASAS_MAX_NAME*sizeof(bus_addr_t),
456 mfi_addr_cb, &sc->verbuf_h_busaddr, 0);
457 /* End: LSIP200113393 */
460 * Get information needed for sizing the contiguous memory for the
461 * frame pool. Size down the sgl parameter since we know that
462 * we will never need more than what's required for MAXPHYS.
463 * It would be nice if these constants were available at runtime
464 * instead of compile time.
466 status = sc->mfi_read_fw_status(sc);
467 max_fw_cmds = status & MFI_FWSTATE_MAXCMD_MASK;
468 if (mfi_max_cmds > 0 && mfi_max_cmds < max_fw_cmds) {
469 device_printf(sc->mfi_dev, "FW MaxCmds = %d, limiting to %d\n",
470 max_fw_cmds, mfi_max_cmds);
471 sc->mfi_max_fw_cmds = mfi_max_cmds;
473 sc->mfi_max_fw_cmds = max_fw_cmds;
475 max_fw_sge = (status & MFI_FWSTATE_MAXSGL_MASK) >> 16;
476 sc->mfi_max_sge = min(max_fw_sge, ((MFI_MAXPHYS / PAGE_SIZE) + 1));
478 /* ThunderBolt Support get the contiguous memory */
480 if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
481 mfi_tbolt_init_globals(sc);
482 device_printf(sc->mfi_dev, "MaxCmd = %d, Drv MaxCmd = %d, "
483 "MaxSgl = %d, state = %#x\n", max_fw_cmds,
484 sc->mfi_max_fw_cmds, sc->mfi_max_sge, status);
485 tb_mem_size = mfi_tbolt_get_memory_requirement(sc);
487 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
488 1, 0, /* algnmnt, boundary */
489 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
490 BUS_SPACE_MAXADDR, /* highaddr */
491 NULL, NULL, /* filter, filterarg */
492 tb_mem_size, /* maxsize */
494 tb_mem_size, /* maxsegsize */
496 NULL, NULL, /* lockfunc, lockarg */
498 device_printf(sc->mfi_dev, "Cannot allocate comms DMA tag\n");
501 if (bus_dmamem_alloc(sc->mfi_tb_dmat, (void **)&sc->request_message_pool,
502 BUS_DMA_NOWAIT, &sc->mfi_tb_dmamap)) {
503 device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
506 bzero(sc->request_message_pool, tb_mem_size);
507 bus_dmamap_load(sc->mfi_tb_dmat, sc->mfi_tb_dmamap,
508 sc->request_message_pool, tb_mem_size, mfi_addr_cb, &sc->mfi_tb_busaddr, 0);
510 /* For ThunderBolt memory init */
511 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
512 0x100, 0, /* alignmnt, boundary */
513 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
514 BUS_SPACE_MAXADDR, /* highaddr */
515 NULL, NULL, /* filter, filterarg */
516 MFI_FRAME_SIZE, /* maxsize */
518 MFI_FRAME_SIZE, /* maxsegsize */
520 NULL, NULL, /* lockfunc, lockarg */
521 &sc->mfi_tb_init_dmat)) {
522 device_printf(sc->mfi_dev, "Cannot allocate init DMA tag\n");
525 if (bus_dmamem_alloc(sc->mfi_tb_init_dmat, (void **)&sc->mfi_tb_init,
526 BUS_DMA_NOWAIT, &sc->mfi_tb_init_dmamap)) {
527 device_printf(sc->mfi_dev, "Cannot allocate init memory\n");
530 bzero(sc->mfi_tb_init, MFI_FRAME_SIZE);
531 bus_dmamap_load(sc->mfi_tb_init_dmat, sc->mfi_tb_init_dmamap,
532 sc->mfi_tb_init, MFI_FRAME_SIZE, mfi_addr_cb,
533 &sc->mfi_tb_init_busaddr, 0);
534 if (mfi_tbolt_init_desc_pool(sc, sc->request_message_pool,
536 device_printf(sc->mfi_dev,
537 "Thunderbolt pool preparation error\n");
542 Allocate DMA memory mapping for MPI2 IOC Init descriptor,
543 we are taking it different from what we have allocated for Request
544 and reply descriptors to avoid confusion later
546 tb_mem_size = sizeof(struct MPI2_IOC_INIT_REQUEST);
547 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
548 1, 0, /* algnmnt, boundary */
549 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
550 BUS_SPACE_MAXADDR, /* highaddr */
551 NULL, NULL, /* filter, filterarg */
552 tb_mem_size, /* maxsize */
554 tb_mem_size, /* maxsegsize */
556 NULL, NULL, /* lockfunc, lockarg */
557 &sc->mfi_tb_ioc_init_dmat)) {
558 device_printf(sc->mfi_dev,
559 "Cannot allocate comms DMA tag\n");
562 if (bus_dmamem_alloc(sc->mfi_tb_ioc_init_dmat,
563 (void **)&sc->mfi_tb_ioc_init_desc,
564 BUS_DMA_NOWAIT, &sc->mfi_tb_ioc_init_dmamap)) {
565 device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
568 bzero(sc->mfi_tb_ioc_init_desc, tb_mem_size);
569 bus_dmamap_load(sc->mfi_tb_ioc_init_dmat, sc->mfi_tb_ioc_init_dmamap,
570 sc->mfi_tb_ioc_init_desc, tb_mem_size, mfi_addr_cb,
571 &sc->mfi_tb_ioc_init_busaddr, 0);
574 * Create the dma tag for data buffers. Used both for block I/O
575 * and for various internal data queries.
577 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
578 1, 0, /* algnmnt, boundary */
579 BUS_SPACE_MAXADDR, /* lowaddr */
580 BUS_SPACE_MAXADDR, /* highaddr */
581 NULL, NULL, /* filter, filterarg */
582 BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
583 sc->mfi_max_sge, /* nsegments */
584 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
585 BUS_DMA_ALLOCNOW, /* flags */
586 busdma_lock_mutex, /* lockfunc */
587 &sc->mfi_io_lock, /* lockfuncarg */
588 &sc->mfi_buffer_dmat)) {
589 device_printf(sc->mfi_dev, "Cannot allocate buffer DMA tag\n");
594 * Allocate DMA memory for the comms queues. Keep it under 4GB for
595 * efficiency. The mfi_hwcomms struct includes space for 1 reply queue
596 * entry, so the calculated size here will be will be 1 more than
597 * mfi_max_fw_cmds. This is apparently a requirement of the hardware.
599 commsz = (sizeof(uint32_t) * sc->mfi_max_fw_cmds) +
600 sizeof(struct mfi_hwcomms);
601 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
602 1, 0, /* algnmnt, boundary */
603 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
604 BUS_SPACE_MAXADDR, /* highaddr */
605 NULL, NULL, /* filter, filterarg */
606 commsz, /* maxsize */
608 commsz, /* maxsegsize */
610 NULL, NULL, /* lockfunc, lockarg */
611 &sc->mfi_comms_dmat)) {
612 device_printf(sc->mfi_dev, "Cannot allocate comms DMA tag\n");
615 if (bus_dmamem_alloc(sc->mfi_comms_dmat, (void **)&sc->mfi_comms,
616 BUS_DMA_NOWAIT, &sc->mfi_comms_dmamap)) {
617 device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
620 bzero(sc->mfi_comms, commsz);
621 bus_dmamap_load(sc->mfi_comms_dmat, sc->mfi_comms_dmamap,
622 sc->mfi_comms, commsz, mfi_addr_cb, &sc->mfi_comms_busaddr, 0);
624 * Allocate DMA memory for the command frames. Keep them in the
625 * lower 4GB for efficiency. Calculate the size of the commands at
626 * the same time; each command is one 64 byte frame plus a set of
627 * additional frames for holding sg lists or other data.
628 * The assumption here is that the SG list will start at the second
629 * frame and not use the unused bytes in the first frame. While this
630 * isn't technically correct, it simplifies the calculation and allows
631 * for command frames that might be larger than an mfi_io_frame.
633 if (sizeof(bus_addr_t) == 8) {
634 sc->mfi_sge_size = sizeof(struct mfi_sg64);
635 sc->mfi_flags |= MFI_FLAGS_SG64;
637 sc->mfi_sge_size = sizeof(struct mfi_sg32);
639 if (sc->mfi_flags & MFI_FLAGS_SKINNY)
640 sc->mfi_sge_size = sizeof(struct mfi_sg_skinny);
641 frames = (sc->mfi_sge_size * sc->mfi_max_sge - 1) / MFI_FRAME_SIZE + 2;
642 sc->mfi_cmd_size = frames * MFI_FRAME_SIZE;
643 framessz = sc->mfi_cmd_size * sc->mfi_max_fw_cmds;
644 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
645 64, 0, /* algnmnt, boundary */
646 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
647 BUS_SPACE_MAXADDR, /* highaddr */
648 NULL, NULL, /* filter, filterarg */
649 framessz, /* maxsize */
651 framessz, /* maxsegsize */
653 NULL, NULL, /* lockfunc, lockarg */
654 &sc->mfi_frames_dmat)) {
655 device_printf(sc->mfi_dev, "Cannot allocate frame DMA tag\n");
658 if (bus_dmamem_alloc(sc->mfi_frames_dmat, (void **)&sc->mfi_frames,
659 BUS_DMA_NOWAIT, &sc->mfi_frames_dmamap)) {
660 device_printf(sc->mfi_dev, "Cannot allocate frames memory\n");
663 bzero(sc->mfi_frames, framessz);
664 bus_dmamap_load(sc->mfi_frames_dmat, sc->mfi_frames_dmamap,
665 sc->mfi_frames, framessz, mfi_addr_cb, &sc->mfi_frames_busaddr,0);
667 * Allocate DMA memory for the frame sense data. Keep them in the
668 * lower 4GB for efficiency
670 sensesz = sc->mfi_max_fw_cmds * MFI_SENSE_LEN;
671 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
672 4, 0, /* algnmnt, boundary */
673 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
674 BUS_SPACE_MAXADDR, /* highaddr */
675 NULL, NULL, /* filter, filterarg */
676 sensesz, /* maxsize */
678 sensesz, /* maxsegsize */
680 NULL, NULL, /* lockfunc, lockarg */
681 &sc->mfi_sense_dmat)) {
682 device_printf(sc->mfi_dev, "Cannot allocate sense DMA tag\n");
685 if (bus_dmamem_alloc(sc->mfi_sense_dmat, (void **)&sc->mfi_sense,
686 BUS_DMA_NOWAIT, &sc->mfi_sense_dmamap)) {
687 device_printf(sc->mfi_dev, "Cannot allocate sense memory\n");
690 bus_dmamap_load(sc->mfi_sense_dmat, sc->mfi_sense_dmamap,
691 sc->mfi_sense, sensesz, mfi_addr_cb, &sc->mfi_sense_busaddr, 0);
692 if ((error = mfi_alloc_commands(sc)) != 0)
695 /* Before moving the FW to operational state, check whether
696 * hostmemory is required by the FW or not
699 /* ThunderBolt MFI_IOC2 INIT */
700 if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
701 sc->mfi_disable_intr(sc);
702 mtx_lock(&sc->mfi_io_lock);
703 if ((error = mfi_tbolt_init_MFI_queue(sc)) != 0) {
704 device_printf(sc->mfi_dev,
705 "TB Init has failed with error %d\n",error);
706 mtx_unlock(&sc->mfi_io_lock);
709 mtx_unlock(&sc->mfi_io_lock);
711 if ((error = mfi_tbolt_alloc_cmd(sc)) != 0)
713 if (bus_setup_intr(sc->mfi_dev, sc->mfi_irq,
714 INTR_MPSAFE|INTR_TYPE_BIO, NULL, mfi_intr_tbolt, sc,
716 device_printf(sc->mfi_dev, "Cannot set up interrupt\n");
719 sc->mfi_intr_ptr = mfi_intr_tbolt;
720 sc->mfi_enable_intr(sc);
722 if ((error = mfi_comms_init(sc)) != 0)
725 if (bus_setup_intr(sc->mfi_dev, sc->mfi_irq,
726 INTR_MPSAFE|INTR_TYPE_BIO, NULL, mfi_intr, sc, &sc->mfi_intr)) {
727 device_printf(sc->mfi_dev, "Cannot set up interrupt\n");
730 sc->mfi_intr_ptr = mfi_intr;
731 sc->mfi_enable_intr(sc);
733 if ((error = mfi_get_controller_info(sc)) != 0)
735 sc->disableOnlineCtrlReset = 0;
737 /* Register a config hook to probe the bus for arrays */
738 sc->mfi_ich.ich_func = mfi_startup;
739 sc->mfi_ich.ich_arg = sc;
740 if (config_intrhook_establish(&sc->mfi_ich) != 0) {
741 device_printf(sc->mfi_dev, "Cannot establish configuration "
745 mtx_lock(&sc->mfi_io_lock);
746 if ((error = mfi_aen_setup(sc, 0), 0) != 0) {
747 mtx_unlock(&sc->mfi_io_lock);
750 mtx_unlock(&sc->mfi_io_lock);
753 * Register a shutdown handler.
755 if ((sc->mfi_eh = EVENTHANDLER_REGISTER(shutdown_final, mfi_shutdown,
756 sc, SHUTDOWN_PRI_DEFAULT)) == NULL) {
757 device_printf(sc->mfi_dev, "Warning: shutdown event "
758 "registration failed\n");
762 * Create the control device for doing management
764 unit = device_get_unit(sc->mfi_dev);
765 sc->mfi_cdev = make_dev(&mfi_cdevsw, unit, UID_ROOT, GID_OPERATOR,
766 0640, "mfi%d", unit);
768 make_dev_alias_p(MAKEDEV_CHECKNAME | MAKEDEV_WAITOK, &dev_t,
769 sc->mfi_cdev, "%s", "megaraid_sas_ioctl_node");
770 if (sc->mfi_cdev != NULL)
771 sc->mfi_cdev->si_drv1 = sc;
772 SYSCTL_ADD_INT(device_get_sysctl_ctx(sc->mfi_dev),
773 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->mfi_dev)),
774 OID_AUTO, "delete_busy_volumes", CTLFLAG_RW,
775 &sc->mfi_delete_busy_volumes, 0, "Allow removal of busy volumes");
776 SYSCTL_ADD_INT(device_get_sysctl_ctx(sc->mfi_dev),
777 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->mfi_dev)),
778 OID_AUTO, "keep_deleted_volumes", CTLFLAG_RW,
779 &sc->mfi_keep_deleted_volumes, 0,
780 "Don't detach the mfid device for a busy volume that is deleted");
782 device_add_child(sc->mfi_dev, "mfip", -1);
783 bus_generic_attach(sc->mfi_dev);
785 /* Start the timeout watchdog */
786 callout_init(&sc->mfi_watchdog_callout, 1);
787 callout_reset(&sc->mfi_watchdog_callout, mfi_cmd_timeout * hz,
790 if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
791 mtx_lock(&sc->mfi_io_lock);
792 mfi_tbolt_sync_map_info(sc);
793 mtx_unlock(&sc->mfi_io_lock);
800 mfi_alloc_commands(struct mfi_softc *sc)
802 struct mfi_command *cm;
806 * XXX Should we allocate all the commands up front, or allocate on
807 * demand later like 'aac' does?
809 sc->mfi_commands = malloc(sizeof(sc->mfi_commands[0]) *
810 sc->mfi_max_fw_cmds, M_MFIBUF, M_WAITOK | M_ZERO);
812 for (i = 0; i < sc->mfi_max_fw_cmds; i++) {
813 cm = &sc->mfi_commands[i];
814 cm->cm_frame = (union mfi_frame *)((uintptr_t)sc->mfi_frames +
815 sc->mfi_cmd_size * i);
816 cm->cm_frame_busaddr = sc->mfi_frames_busaddr +
817 sc->mfi_cmd_size * i;
818 cm->cm_frame->header.context = i;
819 cm->cm_sense = &sc->mfi_sense[i];
820 cm->cm_sense_busaddr= sc->mfi_sense_busaddr + MFI_SENSE_LEN * i;
823 if (bus_dmamap_create(sc->mfi_buffer_dmat, 0,
824 &cm->cm_dmamap) == 0) {
825 mtx_lock(&sc->mfi_io_lock);
826 mfi_release_command(cm);
827 mtx_unlock(&sc->mfi_io_lock);
829 device_printf(sc->mfi_dev, "Failed to allocate %d "
830 "command blocks, only allocated %d\n",
831 sc->mfi_max_fw_cmds, i - 1);
832 for (j = 0; j < i; j++) {
833 cm = &sc->mfi_commands[i];
834 bus_dmamap_destroy(sc->mfi_buffer_dmat,
837 free(sc->mfi_commands, M_MFIBUF);
838 sc->mfi_commands = NULL;
848 mfi_release_command(struct mfi_command *cm)
850 struct mfi_frame_header *hdr;
853 mtx_assert(&cm->cm_sc->mfi_io_lock, MA_OWNED);
856 * Zero out the important fields of the frame, but make sure the
857 * context field is preserved. For efficiency, handle the fields
858 * as 32 bit words. Clear out the first S/G entry too for safety.
860 hdr = &cm->cm_frame->header;
861 if (cm->cm_data != NULL && hdr->sg_count) {
862 cm->cm_sg->sg32[0].len = 0;
863 cm->cm_sg->sg32[0].addr = 0;
867 * Command may be on other queues e.g. busy queue depending on the
868 * flow of a previous call to mfi_mapcmd, so ensure its dequeued
871 if ((cm->cm_flags & MFI_ON_MFIQ_BUSY) != 0)
873 if ((cm->cm_flags & MFI_ON_MFIQ_READY) != 0)
874 mfi_remove_ready(cm);
876 /* We're not expecting it to be on any other queue but check */
877 if ((cm->cm_flags & MFI_ON_MFIQ_MASK) != 0) {
878 panic("Command %p is still on another queue, flags = %#x",
883 if ((cm->cm_flags & MFI_CMD_TBOLT) != 0) {
884 mfi_tbolt_return_cmd(cm->cm_sc,
885 cm->cm_sc->mfi_cmd_pool_tbolt[cm->cm_extra_frames - 1],
889 hdr_data = (uint32_t *)cm->cm_frame;
890 hdr_data[0] = 0; /* cmd, sense_len, cmd_status, scsi_status */
891 hdr_data[1] = 0; /* target_id, lun_id, cdb_len, sg_count */
892 hdr_data[4] = 0; /* flags, timeout */
893 hdr_data[5] = 0; /* data_len */
895 cm->cm_extra_frames = 0;
897 cm->cm_complete = NULL;
898 cm->cm_private = NULL;
901 cm->cm_total_frame_size = 0;
902 cm->retry_for_fw_reset = 0;
904 mfi_enqueue_free(cm);
908 mfi_dcmd_command(struct mfi_softc *sc, struct mfi_command **cmp,
909 uint32_t opcode, void **bufp, size_t bufsize)
911 struct mfi_command *cm;
912 struct mfi_dcmd_frame *dcmd;
914 uint32_t context = 0;
916 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
918 cm = mfi_dequeue_free(sc);
922 /* Zero out the MFI frame */
923 context = cm->cm_frame->header.context;
924 bzero(cm->cm_frame, sizeof(union mfi_frame));
925 cm->cm_frame->header.context = context;
927 if ((bufsize > 0) && (bufp != NULL)) {
929 buf = malloc(bufsize, M_MFIBUF, M_NOWAIT|M_ZERO);
931 mfi_release_command(cm);
940 dcmd = &cm->cm_frame->dcmd;
941 bzero(dcmd->mbox, MFI_MBOX_SIZE);
942 dcmd->header.cmd = MFI_CMD_DCMD;
943 dcmd->header.timeout = 0;
944 dcmd->header.flags = 0;
945 dcmd->header.data_len = bufsize;
946 dcmd->header.scsi_status = 0;
947 dcmd->opcode = opcode;
948 cm->cm_sg = &dcmd->sgl;
949 cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
952 cm->cm_private = buf;
953 cm->cm_len = bufsize;
956 if ((bufp != NULL) && (*bufp == NULL) && (buf != NULL))
962 mfi_comms_init(struct mfi_softc *sc)
964 struct mfi_command *cm;
965 struct mfi_init_frame *init;
966 struct mfi_init_qinfo *qinfo;
968 uint32_t context = 0;
970 mtx_lock(&sc->mfi_io_lock);
971 if ((cm = mfi_dequeue_free(sc)) == NULL) {
972 mtx_unlock(&sc->mfi_io_lock);
976 /* Zero out the MFI frame */
977 context = cm->cm_frame->header.context;
978 bzero(cm->cm_frame, sizeof(union mfi_frame));
979 cm->cm_frame->header.context = context;
982 * Abuse the SG list area of the frame to hold the init_qinfo
985 init = &cm->cm_frame->init;
986 qinfo = (struct mfi_init_qinfo *)((uintptr_t)init + MFI_FRAME_SIZE);
988 bzero(qinfo, sizeof(struct mfi_init_qinfo));
989 qinfo->rq_entries = sc->mfi_max_fw_cmds + 1;
990 qinfo->rq_addr_lo = sc->mfi_comms_busaddr +
991 offsetof(struct mfi_hwcomms, hw_reply_q);
992 qinfo->pi_addr_lo = sc->mfi_comms_busaddr +
993 offsetof(struct mfi_hwcomms, hw_pi);
994 qinfo->ci_addr_lo = sc->mfi_comms_busaddr +
995 offsetof(struct mfi_hwcomms, hw_ci);
997 init->header.cmd = MFI_CMD_INIT;
998 init->header.data_len = sizeof(struct mfi_init_qinfo);
999 init->qinfo_new_addr_lo = cm->cm_frame_busaddr + MFI_FRAME_SIZE;
1001 cm->cm_flags = MFI_CMD_POLLED;
1003 if ((error = mfi_mapcmd(sc, cm)) != 0)
1004 device_printf(sc->mfi_dev, "failed to send init command\n");
1005 mfi_release_command(cm);
1006 mtx_unlock(&sc->mfi_io_lock);
1012 mfi_get_controller_info(struct mfi_softc *sc)
1014 struct mfi_command *cm = NULL;
1015 struct mfi_ctrl_info *ci = NULL;
1016 uint32_t max_sectors_1, max_sectors_2;
1019 mtx_lock(&sc->mfi_io_lock);
1020 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_GETINFO,
1021 (void **)&ci, sizeof(*ci));
1024 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1026 if ((error = mfi_mapcmd(sc, cm)) != 0) {
1027 device_printf(sc->mfi_dev, "Failed to get controller info\n");
1028 sc->mfi_max_io = (sc->mfi_max_sge - 1) * PAGE_SIZE /
1034 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1035 BUS_DMASYNC_POSTREAD);
1036 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1038 max_sectors_1 = (1 << ci->stripe_sz_ops.max) * ci->max_strips_per_io;
1039 max_sectors_2 = ci->max_request_size;
1040 sc->mfi_max_io = min(max_sectors_1, max_sectors_2);
1041 sc->disableOnlineCtrlReset =
1042 ci->properties.OnOffProperties.disableOnlineCtrlReset;
1048 mfi_release_command(cm);
1049 mtx_unlock(&sc->mfi_io_lock);
1054 mfi_get_log_state(struct mfi_softc *sc, struct mfi_evt_log_state **log_state)
1056 struct mfi_command *cm = NULL;
1059 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1060 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_EVENT_GETINFO,
1061 (void **)log_state, sizeof(**log_state));
1064 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1066 if ((error = mfi_mapcmd(sc, cm)) != 0) {
1067 device_printf(sc->mfi_dev, "Failed to get log state\n");
1071 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1072 BUS_DMASYNC_POSTREAD);
1073 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1077 mfi_release_command(cm);
1083 mfi_aen_setup(struct mfi_softc *sc, uint32_t seq_start)
1085 struct mfi_evt_log_state *log_state = NULL;
1086 union mfi_evt class_locale;
1090 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1092 class_locale.members.reserved = 0;
1093 class_locale.members.locale = mfi_event_locale;
1094 class_locale.members.evt_class = mfi_event_class;
1096 if (seq_start == 0) {
1097 if ((error = mfi_get_log_state(sc, &log_state)) != 0)
1099 sc->mfi_boot_seq_num = log_state->boot_seq_num;
1102 * Walk through any events that fired since the last
1105 if ((error = mfi_parse_entries(sc, log_state->shutdown_seq_num,
1106 log_state->newest_seq_num)) != 0)
1108 seq = log_state->newest_seq_num;
1111 error = mfi_aen_register(sc, seq, class_locale.word);
1113 free(log_state, M_MFIBUF);
1119 mfi_wait_command(struct mfi_softc *sc, struct mfi_command *cm)
1122 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1123 cm->cm_complete = NULL;
1126 * MegaCli can issue a DCMD of 0. In this case do nothing
1127 * and return 0 to it as status
1129 if (cm->cm_frame->dcmd.opcode == 0) {
1130 cm->cm_frame->header.cmd_status = MFI_STAT_OK;
1132 return (cm->cm_error);
1134 mfi_enqueue_ready(cm);
1136 if ((cm->cm_flags & MFI_CMD_COMPLETED) == 0)
1137 msleep(cm, &sc->mfi_io_lock, PRIBIO, "mfiwait", 0);
1138 return (cm->cm_error);
1142 mfi_free(struct mfi_softc *sc)
1144 struct mfi_command *cm;
1147 callout_drain(&sc->mfi_watchdog_callout);
1149 if (sc->mfi_cdev != NULL)
1150 destroy_dev(sc->mfi_cdev);
1152 if (sc->mfi_commands != NULL) {
1153 for (i = 0; i < sc->mfi_max_fw_cmds; i++) {
1154 cm = &sc->mfi_commands[i];
1155 bus_dmamap_destroy(sc->mfi_buffer_dmat, cm->cm_dmamap);
1157 free(sc->mfi_commands, M_MFIBUF);
1158 sc->mfi_commands = NULL;
1162 bus_teardown_intr(sc->mfi_dev, sc->mfi_irq, sc->mfi_intr);
1163 if (sc->mfi_irq != NULL)
1164 bus_release_resource(sc->mfi_dev, SYS_RES_IRQ, sc->mfi_irq_rid,
1167 if (sc->mfi_sense_busaddr != 0)
1168 bus_dmamap_unload(sc->mfi_sense_dmat, sc->mfi_sense_dmamap);
1169 if (sc->mfi_sense != NULL)
1170 bus_dmamem_free(sc->mfi_sense_dmat, sc->mfi_sense,
1171 sc->mfi_sense_dmamap);
1172 if (sc->mfi_sense_dmat != NULL)
1173 bus_dma_tag_destroy(sc->mfi_sense_dmat);
1175 if (sc->mfi_frames_busaddr != 0)
1176 bus_dmamap_unload(sc->mfi_frames_dmat, sc->mfi_frames_dmamap);
1177 if (sc->mfi_frames != NULL)
1178 bus_dmamem_free(sc->mfi_frames_dmat, sc->mfi_frames,
1179 sc->mfi_frames_dmamap);
1180 if (sc->mfi_frames_dmat != NULL)
1181 bus_dma_tag_destroy(sc->mfi_frames_dmat);
1183 if (sc->mfi_comms_busaddr != 0)
1184 bus_dmamap_unload(sc->mfi_comms_dmat, sc->mfi_comms_dmamap);
1185 if (sc->mfi_comms != NULL)
1186 bus_dmamem_free(sc->mfi_comms_dmat, sc->mfi_comms,
1187 sc->mfi_comms_dmamap);
1188 if (sc->mfi_comms_dmat != NULL)
1189 bus_dma_tag_destroy(sc->mfi_comms_dmat);
1191 /* ThunderBolt contiguous memory free here */
1192 if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
1193 if (sc->mfi_tb_busaddr != 0)
1194 bus_dmamap_unload(sc->mfi_tb_dmat, sc->mfi_tb_dmamap);
1195 if (sc->request_message_pool != NULL)
1196 bus_dmamem_free(sc->mfi_tb_dmat, sc->request_message_pool,
1198 if (sc->mfi_tb_dmat != NULL)
1199 bus_dma_tag_destroy(sc->mfi_tb_dmat);
1201 /* Version buffer memory free */
1202 /* Start LSIP200113393 */
1203 if (sc->verbuf_h_busaddr != 0)
1204 bus_dmamap_unload(sc->verbuf_h_dmat, sc->verbuf_h_dmamap);
1205 if (sc->verbuf != NULL)
1206 bus_dmamem_free(sc->verbuf_h_dmat, sc->verbuf,
1207 sc->verbuf_h_dmamap);
1208 if (sc->verbuf_h_dmat != NULL)
1209 bus_dma_tag_destroy(sc->verbuf_h_dmat);
1211 /* End LSIP200113393 */
1212 /* ThunderBolt INIT packet memory Free */
1213 if (sc->mfi_tb_init_busaddr != 0)
1214 bus_dmamap_unload(sc->mfi_tb_init_dmat,
1215 sc->mfi_tb_init_dmamap);
1216 if (sc->mfi_tb_init != NULL)
1217 bus_dmamem_free(sc->mfi_tb_init_dmat, sc->mfi_tb_init,
1218 sc->mfi_tb_init_dmamap);
1219 if (sc->mfi_tb_init_dmat != NULL)
1220 bus_dma_tag_destroy(sc->mfi_tb_init_dmat);
1222 /* ThunderBolt IOC Init Desc memory free here */
1223 if (sc->mfi_tb_ioc_init_busaddr != 0)
1224 bus_dmamap_unload(sc->mfi_tb_ioc_init_dmat,
1225 sc->mfi_tb_ioc_init_dmamap);
1226 if (sc->mfi_tb_ioc_init_desc != NULL)
1227 bus_dmamem_free(sc->mfi_tb_ioc_init_dmat,
1228 sc->mfi_tb_ioc_init_desc,
1229 sc->mfi_tb_ioc_init_dmamap);
1230 if (sc->mfi_tb_ioc_init_dmat != NULL)
1231 bus_dma_tag_destroy(sc->mfi_tb_ioc_init_dmat);
1232 if (sc->mfi_cmd_pool_tbolt != NULL) {
1233 for (int i = 0; i < sc->mfi_max_fw_cmds; i++) {
1234 if (sc->mfi_cmd_pool_tbolt[i] != NULL) {
1235 free(sc->mfi_cmd_pool_tbolt[i],
1237 sc->mfi_cmd_pool_tbolt[i] = NULL;
1240 free(sc->mfi_cmd_pool_tbolt, M_MFIBUF);
1241 sc->mfi_cmd_pool_tbolt = NULL;
1243 if (sc->request_desc_pool != NULL) {
1244 free(sc->request_desc_pool, M_MFIBUF);
1245 sc->request_desc_pool = NULL;
1248 if (sc->mfi_buffer_dmat != NULL)
1249 bus_dma_tag_destroy(sc->mfi_buffer_dmat);
1250 if (sc->mfi_parent_dmat != NULL)
1251 bus_dma_tag_destroy(sc->mfi_parent_dmat);
1253 if (mtx_initialized(&sc->mfi_io_lock)) {
1254 mtx_destroy(&sc->mfi_io_lock);
1255 sx_destroy(&sc->mfi_config_lock);
1262 mfi_startup(void *arg)
1264 struct mfi_softc *sc;
1266 sc = (struct mfi_softc *)arg;
1268 sc->mfi_enable_intr(sc);
1269 sx_xlock(&sc->mfi_config_lock);
1270 mtx_lock(&sc->mfi_io_lock);
1272 if (sc->mfi_flags & MFI_FLAGS_SKINNY)
1274 mtx_unlock(&sc->mfi_io_lock);
1275 sx_xunlock(&sc->mfi_config_lock);
1277 config_intrhook_disestablish(&sc->mfi_ich);
1283 struct mfi_softc *sc;
1284 struct mfi_command *cm;
1285 uint32_t pi, ci, context;
1287 sc = (struct mfi_softc *)arg;
1289 if (sc->mfi_check_clear_intr(sc))
1293 pi = sc->mfi_comms->hw_pi;
1294 ci = sc->mfi_comms->hw_ci;
1295 mtx_lock(&sc->mfi_io_lock);
1297 context = sc->mfi_comms->hw_reply_q[ci];
1298 if (context < sc->mfi_max_fw_cmds) {
1299 cm = &sc->mfi_commands[context];
1300 mfi_remove_busy(cm);
1302 mfi_complete(sc, cm);
1304 if (++ci == (sc->mfi_max_fw_cmds + 1))
1308 sc->mfi_comms->hw_ci = ci;
1310 /* Give defered I/O a chance to run */
1311 sc->mfi_flags &= ~MFI_FLAGS_QFRZN;
1313 mtx_unlock(&sc->mfi_io_lock);
1316 * Dummy read to flush the bus; this ensures that the indexes are up
1317 * to date. Restart processing if more commands have come it.
1319 (void)sc->mfi_read_fw_status(sc);
1320 if (pi != sc->mfi_comms->hw_pi)
1327 mfi_shutdown(struct mfi_softc *sc)
1329 struct mfi_dcmd_frame *dcmd;
1330 struct mfi_command *cm;
1334 if (sc->mfi_aen_cm != NULL) {
1335 sc->cm_aen_abort = 1;
1336 mfi_abort(sc, &sc->mfi_aen_cm);
1339 if (sc->mfi_map_sync_cm != NULL) {
1340 sc->cm_map_abort = 1;
1341 mfi_abort(sc, &sc->mfi_map_sync_cm);
1344 mtx_lock(&sc->mfi_io_lock);
1345 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_SHUTDOWN, NULL, 0);
1347 mtx_unlock(&sc->mfi_io_lock);
1351 dcmd = &cm->cm_frame->dcmd;
1352 dcmd->header.flags = MFI_FRAME_DIR_NONE;
1353 cm->cm_flags = MFI_CMD_POLLED;
1356 if ((error = mfi_mapcmd(sc, cm)) != 0)
1357 device_printf(sc->mfi_dev, "Failed to shutdown controller\n");
1359 mfi_release_command(cm);
1360 mtx_unlock(&sc->mfi_io_lock);
1365 mfi_syspdprobe(struct mfi_softc *sc)
1367 struct mfi_frame_header *hdr;
1368 struct mfi_command *cm = NULL;
1369 struct mfi_pd_list *pdlist = NULL;
1370 struct mfi_system_pd *syspd, *tmp;
1371 struct mfi_system_pending *syspd_pend;
1372 int error, i, found;
1374 sx_assert(&sc->mfi_config_lock, SA_XLOCKED);
1375 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1376 /* Add SYSTEM PD's */
1377 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_PD_LIST_QUERY,
1378 (void **)&pdlist, sizeof(*pdlist));
1380 device_printf(sc->mfi_dev,
1381 "Error while forming SYSTEM PD list\n");
1385 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1386 cm->cm_frame->dcmd.mbox[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST;
1387 cm->cm_frame->dcmd.mbox[1] = 0;
1388 if (mfi_mapcmd(sc, cm) != 0) {
1389 device_printf(sc->mfi_dev,
1390 "Failed to get syspd device listing\n");
1393 bus_dmamap_sync(sc->mfi_buffer_dmat,cm->cm_dmamap,
1394 BUS_DMASYNC_POSTREAD);
1395 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1396 hdr = &cm->cm_frame->header;
1397 if (hdr->cmd_status != MFI_STAT_OK) {
1398 device_printf(sc->mfi_dev,
1399 "MFI_DCMD_PD_LIST_QUERY failed %x\n", hdr->cmd_status);
1402 /* Get each PD and add it to the system */
1403 for (i = 0; i < pdlist->count; i++) {
1404 if (pdlist->addr[i].device_id ==
1405 pdlist->addr[i].encl_device_id)
1408 TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh, pd_link) {
1409 if (syspd->pd_id == pdlist->addr[i].device_id)
1412 TAILQ_FOREACH(syspd_pend, &sc->mfi_syspd_pend_tqh, pd_link) {
1413 if (syspd_pend->pd_id == pdlist->addr[i].device_id)
1417 mfi_add_sys_pd(sc, pdlist->addr[i].device_id);
1419 /* Delete SYSPD's whose state has been changed */
1420 TAILQ_FOREACH_SAFE(syspd, &sc->mfi_syspd_tqh, pd_link, tmp) {
1422 for (i = 0; i < pdlist->count; i++) {
1423 if (syspd->pd_id == pdlist->addr[i].device_id) {
1430 mtx_unlock(&sc->mfi_io_lock);
1432 device_delete_child(sc->mfi_dev, syspd->pd_dev);
1434 mtx_lock(&sc->mfi_io_lock);
1439 free(pdlist, M_MFIBUF);
1441 mfi_release_command(cm);
1447 mfi_ldprobe(struct mfi_softc *sc)
1449 struct mfi_frame_header *hdr;
1450 struct mfi_command *cm = NULL;
1451 struct mfi_ld_list *list = NULL;
1452 struct mfi_disk *ld;
1453 struct mfi_disk_pending *ld_pend;
1456 sx_assert(&sc->mfi_config_lock, SA_XLOCKED);
1457 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1459 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_LIST,
1460 (void **)&list, sizeof(*list));
1464 cm->cm_flags = MFI_CMD_DATAIN;
1465 if (mfi_wait_command(sc, cm) != 0) {
1466 device_printf(sc->mfi_dev, "Failed to get device listing\n");
1470 hdr = &cm->cm_frame->header;
1471 if (hdr->cmd_status != MFI_STAT_OK) {
1472 device_printf(sc->mfi_dev, "MFI_DCMD_LD_GET_LIST failed %x\n",
1477 for (i = 0; i < list->ld_count; i++) {
1478 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
1479 if (ld->ld_id == list->ld_list[i].ld.v.target_id)
1482 TAILQ_FOREACH(ld_pend, &sc->mfi_ld_pend_tqh, ld_link) {
1483 if (ld_pend->ld_id == list->ld_list[i].ld.v.target_id)
1486 mfi_add_ld(sc, list->ld_list[i].ld.v.target_id);
1491 free(list, M_MFIBUF);
1493 mfi_release_command(cm);
1499 * The timestamp is the number of seconds since 00:00 Jan 1, 2000. If
1500 * the bits in 24-31 are all set, then it is the number of seconds since
1504 format_timestamp(uint32_t timestamp)
1506 static char buffer[32];
1508 if ((timestamp & 0xff000000) == 0xff000000)
1509 snprintf(buffer, sizeof(buffer), "boot + %us", timestamp &
1512 snprintf(buffer, sizeof(buffer), "%us", timestamp);
1517 format_class(int8_t class)
1519 static char buffer[6];
1522 case MFI_EVT_CLASS_DEBUG:
1524 case MFI_EVT_CLASS_PROGRESS:
1525 return ("progress");
1526 case MFI_EVT_CLASS_INFO:
1528 case MFI_EVT_CLASS_WARNING:
1530 case MFI_EVT_CLASS_CRITICAL:
1532 case MFI_EVT_CLASS_FATAL:
1534 case MFI_EVT_CLASS_DEAD:
1537 snprintf(buffer, sizeof(buffer), "%d", class);
1543 mfi_decode_evt(struct mfi_softc *sc, struct mfi_evt_detail *detail)
1545 struct mfi_system_pd *syspd = NULL;
1547 device_printf(sc->mfi_dev, "%d (%s/0x%04x/%s) - %s\n", detail->seq,
1548 format_timestamp(detail->time), detail->evt_class.members.locale,
1549 format_class(detail->evt_class.members.evt_class),
1550 detail->description);
1552 /* Don't act on old AEN's or while shutting down */
1553 if (detail->seq < sc->mfi_boot_seq_num || sc->mfi_detaching)
1556 switch (detail->arg_type) {
1557 case MR_EVT_ARGS_NONE:
1558 if (detail->code == MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED) {
1559 device_printf(sc->mfi_dev, "HostBus scan raised\n");
1560 if (mfi_detect_jbod_change) {
1562 * Probe for new SYSPD's and Delete
1565 sx_xlock(&sc->mfi_config_lock);
1566 mtx_lock(&sc->mfi_io_lock);
1568 mtx_unlock(&sc->mfi_io_lock);
1569 sx_xunlock(&sc->mfi_config_lock);
1573 case MR_EVT_ARGS_LD_STATE:
1574 /* During load time driver reads all the events starting
1575 * from the one that has been logged after shutdown. Avoid
1578 if (detail->args.ld_state.new_state == MFI_LD_STATE_OFFLINE ) {
1580 struct mfi_disk *ld;
1581 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
1583 detail->args.ld_state.ld.target_id)
1587 Fix: for kernel panics when SSCD is removed
1588 KASSERT(ld != NULL, ("volume dissappeared"));
1592 device_delete_child(sc->mfi_dev, ld->ld_dev);
1597 case MR_EVT_ARGS_PD:
1598 if (detail->code == MR_EVT_PD_REMOVED) {
1599 if (mfi_detect_jbod_change) {
1601 * If the removed device is a SYSPD then
1604 TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh,
1607 detail->args.pd.device_id) {
1609 device_delete_child(
1618 if (detail->code == MR_EVT_PD_INSERTED) {
1619 if (mfi_detect_jbod_change) {
1620 /* Probe for new SYSPD's */
1621 sx_xlock(&sc->mfi_config_lock);
1622 mtx_lock(&sc->mfi_io_lock);
1624 mtx_unlock(&sc->mfi_io_lock);
1625 sx_xunlock(&sc->mfi_config_lock);
1628 if (sc->mfi_cam_rescan_cb != NULL &&
1629 (detail->code == MR_EVT_PD_INSERTED ||
1630 detail->code == MR_EVT_PD_REMOVED)) {
1631 sc->mfi_cam_rescan_cb(sc, detail->args.pd.device_id);
1638 mfi_queue_evt(struct mfi_softc *sc, struct mfi_evt_detail *detail)
1640 struct mfi_evt_queue_elm *elm;
1642 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1643 elm = malloc(sizeof(*elm), M_MFIBUF, M_NOWAIT|M_ZERO);
1646 memcpy(&elm->detail, detail, sizeof(*detail));
1647 TAILQ_INSERT_TAIL(&sc->mfi_evt_queue, elm, link);
1648 taskqueue_enqueue(taskqueue_swi, &sc->mfi_evt_task);
1652 mfi_handle_evt(void *context, int pending)
1654 TAILQ_HEAD(,mfi_evt_queue_elm) queue;
1655 struct mfi_softc *sc;
1656 struct mfi_evt_queue_elm *elm;
1660 mtx_lock(&sc->mfi_io_lock);
1661 TAILQ_CONCAT(&queue, &sc->mfi_evt_queue, link);
1662 mtx_unlock(&sc->mfi_io_lock);
1663 while ((elm = TAILQ_FIRST(&queue)) != NULL) {
1664 TAILQ_REMOVE(&queue, elm, link);
1665 mfi_decode_evt(sc, &elm->detail);
1666 free(elm, M_MFIBUF);
1671 mfi_aen_register(struct mfi_softc *sc, int seq, int locale)
1673 struct mfi_command *cm;
1674 struct mfi_dcmd_frame *dcmd;
1675 union mfi_evt current_aen, prior_aen;
1676 struct mfi_evt_detail *ed = NULL;
1679 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1681 current_aen.word = locale;
1682 if (sc->mfi_aen_cm != NULL) {
1684 ((uint32_t *)&sc->mfi_aen_cm->cm_frame->dcmd.mbox)[1];
1685 if (prior_aen.members.evt_class <= current_aen.members.evt_class &&
1686 !((prior_aen.members.locale & current_aen.members.locale)
1687 ^current_aen.members.locale)) {
1690 prior_aen.members.locale |= current_aen.members.locale;
1691 if (prior_aen.members.evt_class
1692 < current_aen.members.evt_class)
1693 current_aen.members.evt_class =
1694 prior_aen.members.evt_class;
1695 mfi_abort(sc, &sc->mfi_aen_cm);
1699 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_EVENT_WAIT,
1700 (void **)&ed, sizeof(*ed));
1704 dcmd = &cm->cm_frame->dcmd;
1705 ((uint32_t *)&dcmd->mbox)[0] = seq;
1706 ((uint32_t *)&dcmd->mbox)[1] = locale;
1707 cm->cm_flags = MFI_CMD_DATAIN;
1708 cm->cm_complete = mfi_aen_complete;
1710 sc->last_seq_num = seq;
1711 sc->mfi_aen_cm = cm;
1713 mfi_enqueue_ready(cm);
1721 mfi_aen_complete(struct mfi_command *cm)
1723 struct mfi_frame_header *hdr;
1724 struct mfi_softc *sc;
1725 struct mfi_evt_detail *detail;
1726 struct mfi_aen *mfi_aen_entry, *tmp;
1727 int seq = 0, aborted = 0;
1730 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1732 if (sc->mfi_aen_cm == NULL)
1735 hdr = &cm->cm_frame->header;
1737 if (sc->cm_aen_abort ||
1738 hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
1739 sc->cm_aen_abort = 0;
1742 sc->mfi_aen_triggered = 1;
1743 if (sc->mfi_poll_waiting) {
1744 sc->mfi_poll_waiting = 0;
1745 selwakeup(&sc->mfi_select);
1747 detail = cm->cm_data;
1748 mfi_queue_evt(sc, detail);
1749 seq = detail->seq + 1;
1750 TAILQ_FOREACH_SAFE(mfi_aen_entry, &sc->mfi_aen_pids, aen_link,
1752 TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
1754 PROC_LOCK(mfi_aen_entry->p);
1755 kern_psignal(mfi_aen_entry->p, SIGIO);
1756 PROC_UNLOCK(mfi_aen_entry->p);
1757 free(mfi_aen_entry, M_MFIBUF);
1761 free(cm->cm_data, M_MFIBUF);
1762 wakeup(&sc->mfi_aen_cm);
1763 sc->mfi_aen_cm = NULL;
1764 mfi_release_command(cm);
1766 /* set it up again so the driver can catch more events */
1768 mfi_aen_setup(sc, seq);
1771 #define MAX_EVENTS 15
1774 mfi_parse_entries(struct mfi_softc *sc, int start_seq, int stop_seq)
1776 struct mfi_command *cm;
1777 struct mfi_dcmd_frame *dcmd;
1778 struct mfi_evt_list *el;
1779 union mfi_evt class_locale;
1780 int error, i, seq, size;
1782 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1784 class_locale.members.reserved = 0;
1785 class_locale.members.locale = mfi_event_locale;
1786 class_locale.members.evt_class = mfi_event_class;
1788 size = sizeof(struct mfi_evt_list) + sizeof(struct mfi_evt_detail)
1790 el = malloc(size, M_MFIBUF, M_NOWAIT | M_ZERO);
1794 for (seq = start_seq;;) {
1795 if ((cm = mfi_dequeue_free(sc)) == NULL) {
1800 dcmd = &cm->cm_frame->dcmd;
1801 bzero(dcmd->mbox, MFI_MBOX_SIZE);
1802 dcmd->header.cmd = MFI_CMD_DCMD;
1803 dcmd->header.timeout = 0;
1804 dcmd->header.data_len = size;
1805 dcmd->opcode = MFI_DCMD_CTRL_EVENT_GET;
1806 ((uint32_t *)&dcmd->mbox)[0] = seq;
1807 ((uint32_t *)&dcmd->mbox)[1] = class_locale.word;
1808 cm->cm_sg = &dcmd->sgl;
1809 cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
1810 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1814 if ((error = mfi_mapcmd(sc, cm)) != 0) {
1815 device_printf(sc->mfi_dev,
1816 "Failed to get controller entries\n");
1817 mfi_release_command(cm);
1821 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1822 BUS_DMASYNC_POSTREAD);
1823 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1825 if (dcmd->header.cmd_status == MFI_STAT_NOT_FOUND) {
1826 mfi_release_command(cm);
1829 if (dcmd->header.cmd_status != MFI_STAT_OK) {
1830 device_printf(sc->mfi_dev,
1831 "Error %d fetching controller entries\n",
1832 dcmd->header.cmd_status);
1833 mfi_release_command(cm);
1837 mfi_release_command(cm);
1839 for (i = 0; i < el->count; i++) {
1841 * If this event is newer than 'stop_seq' then
1842 * break out of the loop. Note that the log
1843 * is a circular buffer so we have to handle
1844 * the case that our stop point is earlier in
1845 * the buffer than our start point.
1847 if (el->event[i].seq >= stop_seq) {
1848 if (start_seq <= stop_seq)
1850 else if (el->event[i].seq < start_seq)
1853 mfi_queue_evt(sc, &el->event[i]);
1855 seq = el->event[el->count - 1].seq + 1;
1863 mfi_add_ld(struct mfi_softc *sc, int id)
1865 struct mfi_command *cm;
1866 struct mfi_dcmd_frame *dcmd = NULL;
1867 struct mfi_ld_info *ld_info = NULL;
1868 struct mfi_disk_pending *ld_pend;
1871 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1873 ld_pend = malloc(sizeof(*ld_pend), M_MFIBUF, M_NOWAIT | M_ZERO);
1874 if (ld_pend != NULL) {
1875 ld_pend->ld_id = id;
1876 TAILQ_INSERT_TAIL(&sc->mfi_ld_pend_tqh, ld_pend, ld_link);
1879 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_INFO,
1880 (void **)&ld_info, sizeof(*ld_info));
1882 device_printf(sc->mfi_dev,
1883 "Failed to allocate for MFI_DCMD_LD_GET_INFO %d\n", error);
1885 free(ld_info, M_MFIBUF);
1888 cm->cm_flags = MFI_CMD_DATAIN;
1889 dcmd = &cm->cm_frame->dcmd;
1891 if (mfi_wait_command(sc, cm) != 0) {
1892 device_printf(sc->mfi_dev,
1893 "Failed to get logical drive: %d\n", id);
1894 free(ld_info, M_MFIBUF);
1897 if (ld_info->ld_config.params.isSSCD != 1)
1898 mfi_add_ld_complete(cm);
1900 mfi_release_command(cm);
1901 if (ld_info) /* SSCD drives ld_info free here */
1902 free(ld_info, M_MFIBUF);
1908 mfi_add_ld_complete(struct mfi_command *cm)
1910 struct mfi_frame_header *hdr;
1911 struct mfi_ld_info *ld_info;
1912 struct mfi_softc *sc;
1916 hdr = &cm->cm_frame->header;
1917 ld_info = cm->cm_private;
1919 if (sc->cm_map_abort || hdr->cmd_status != MFI_STAT_OK) {
1920 free(ld_info, M_MFIBUF);
1921 wakeup(&sc->mfi_map_sync_cm);
1922 mfi_release_command(cm);
1925 wakeup(&sc->mfi_map_sync_cm);
1926 mfi_release_command(cm);
1928 mtx_unlock(&sc->mfi_io_lock);
1930 if ((child = device_add_child(sc->mfi_dev, "mfid", -1)) == NULL) {
1931 device_printf(sc->mfi_dev, "Failed to add logical disk\n");
1932 free(ld_info, M_MFIBUF);
1934 mtx_lock(&sc->mfi_io_lock);
1938 device_set_ivars(child, ld_info);
1939 device_set_desc(child, "MFI Logical Disk");
1940 bus_generic_attach(sc->mfi_dev);
1942 mtx_lock(&sc->mfi_io_lock);
1945 static int mfi_add_sys_pd(struct mfi_softc *sc, int id)
1947 struct mfi_command *cm;
1948 struct mfi_dcmd_frame *dcmd = NULL;
1949 struct mfi_pd_info *pd_info = NULL;
1950 struct mfi_system_pending *syspd_pend;
1953 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1955 syspd_pend = malloc(sizeof(*syspd_pend), M_MFIBUF, M_NOWAIT | M_ZERO);
1956 if (syspd_pend != NULL) {
1957 syspd_pend->pd_id = id;
1958 TAILQ_INSERT_TAIL(&sc->mfi_syspd_pend_tqh, syspd_pend, pd_link);
1961 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_PD_GET_INFO,
1962 (void **)&pd_info, sizeof(*pd_info));
1964 device_printf(sc->mfi_dev,
1965 "Failed to allocated for MFI_DCMD_PD_GET_INFO %d\n",
1968 free(pd_info, M_MFIBUF);
1971 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1972 dcmd = &cm->cm_frame->dcmd;
1974 dcmd->header.scsi_status = 0;
1975 dcmd->header.pad0 = 0;
1976 if ((error = mfi_mapcmd(sc, cm)) != 0) {
1977 device_printf(sc->mfi_dev,
1978 "Failed to get physical drive info %d\n", id);
1979 free(pd_info, M_MFIBUF);
1980 mfi_release_command(cm);
1983 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1984 BUS_DMASYNC_POSTREAD);
1985 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1986 mfi_add_sys_pd_complete(cm);
1991 mfi_add_sys_pd_complete(struct mfi_command *cm)
1993 struct mfi_frame_header *hdr;
1994 struct mfi_pd_info *pd_info;
1995 struct mfi_softc *sc;
1999 hdr = &cm->cm_frame->header;
2000 pd_info = cm->cm_private;
2002 if (hdr->cmd_status != MFI_STAT_OK) {
2003 free(pd_info, M_MFIBUF);
2004 mfi_release_command(cm);
2007 if (pd_info->fw_state != MFI_PD_STATE_SYSTEM) {
2008 device_printf(sc->mfi_dev, "PD=%x is not SYSTEM PD\n",
2009 pd_info->ref.v.device_id);
2010 free(pd_info, M_MFIBUF);
2011 mfi_release_command(cm);
2014 mfi_release_command(cm);
2016 mtx_unlock(&sc->mfi_io_lock);
2018 if ((child = device_add_child(sc->mfi_dev, "mfisyspd", -1)) == NULL) {
2019 device_printf(sc->mfi_dev, "Failed to add system pd\n");
2020 free(pd_info, M_MFIBUF);
2022 mtx_lock(&sc->mfi_io_lock);
2026 device_set_ivars(child, pd_info);
2027 device_set_desc(child, "MFI System PD");
2028 bus_generic_attach(sc->mfi_dev);
2030 mtx_lock(&sc->mfi_io_lock);
2033 static struct mfi_command *
2034 mfi_bio_command(struct mfi_softc *sc)
2037 struct mfi_command *cm = NULL;
2039 /*reserving two commands to avoid starvation for IOCTL*/
2040 if (sc->mfi_qstat[MFIQ_FREE].q_length < 2) {
2043 if ((bio = mfi_dequeue_bio(sc)) == NULL) {
2046 if ((uintptr_t)bio->bio_driver2 == MFI_LD_IO) {
2047 cm = mfi_build_ldio(sc, bio);
2048 } else if ((uintptr_t) bio->bio_driver2 == MFI_SYS_PD_IO) {
2049 cm = mfi_build_syspdio(sc, bio);
2052 mfi_enqueue_bio(sc, bio);
2057 * mostly copied from cam/scsi/scsi_all.c:scsi_read_write
2061 mfi_build_cdb(int readop, uint8_t byte2, u_int64_t lba, u_int32_t block_count, uint8_t *cdb)
2065 if (((lba & 0x1fffff) == lba)
2066 && ((block_count & 0xff) == block_count)
2068 /* We can fit in a 6 byte cdb */
2069 struct scsi_rw_6 *scsi_cmd;
2071 scsi_cmd = (struct scsi_rw_6 *)cdb;
2072 scsi_cmd->opcode = readop ? READ_6 : WRITE_6;
2073 scsi_ulto3b(lba, scsi_cmd->addr);
2074 scsi_cmd->length = block_count & 0xff;
2075 scsi_cmd->control = 0;
2076 cdb_len = sizeof(*scsi_cmd);
2077 } else if (((block_count & 0xffff) == block_count) && ((lba & 0xffffffff) == lba)) {
2078 /* Need a 10 byte CDB */
2079 struct scsi_rw_10 *scsi_cmd;
2081 scsi_cmd = (struct scsi_rw_10 *)cdb;
2082 scsi_cmd->opcode = readop ? READ_10 : WRITE_10;
2083 scsi_cmd->byte2 = byte2;
2084 scsi_ulto4b(lba, scsi_cmd->addr);
2085 scsi_cmd->reserved = 0;
2086 scsi_ulto2b(block_count, scsi_cmd->length);
2087 scsi_cmd->control = 0;
2088 cdb_len = sizeof(*scsi_cmd);
2089 } else if (((block_count & 0xffffffff) == block_count) &&
2090 ((lba & 0xffffffff) == lba)) {
2091 /* Block count is too big for 10 byte CDB use a 12 byte CDB */
2092 struct scsi_rw_12 *scsi_cmd;
2094 scsi_cmd = (struct scsi_rw_12 *)cdb;
2095 scsi_cmd->opcode = readop ? READ_12 : WRITE_12;
2096 scsi_cmd->byte2 = byte2;
2097 scsi_ulto4b(lba, scsi_cmd->addr);
2098 scsi_cmd->reserved = 0;
2099 scsi_ulto4b(block_count, scsi_cmd->length);
2100 scsi_cmd->control = 0;
2101 cdb_len = sizeof(*scsi_cmd);
2104 * 16 byte CDB. We'll only get here if the LBA is larger
2107 struct scsi_rw_16 *scsi_cmd;
2109 scsi_cmd = (struct scsi_rw_16 *)cdb;
2110 scsi_cmd->opcode = readop ? READ_16 : WRITE_16;
2111 scsi_cmd->byte2 = byte2;
2112 scsi_u64to8b(lba, scsi_cmd->addr);
2113 scsi_cmd->reserved = 0;
2114 scsi_ulto4b(block_count, scsi_cmd->length);
2115 scsi_cmd->control = 0;
2116 cdb_len = sizeof(*scsi_cmd);
2122 extern char *unmapped_buf;
2124 static struct mfi_command *
2125 mfi_build_syspdio(struct mfi_softc *sc, struct bio *bio)
2127 struct mfi_command *cm;
2128 struct mfi_pass_frame *pass;
2129 uint32_t context = 0;
2130 int flags = 0, blkcount = 0, readop;
2133 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2135 if ((cm = mfi_dequeue_free(sc)) == NULL)
2138 /* Zero out the MFI frame */
2139 context = cm->cm_frame->header.context;
2140 bzero(cm->cm_frame, sizeof(union mfi_frame));
2141 cm->cm_frame->header.context = context;
2142 pass = &cm->cm_frame->pass;
2143 bzero(pass->cdb, 16);
2144 pass->header.cmd = MFI_CMD_PD_SCSI_IO;
2145 switch (bio->bio_cmd) {
2147 flags = MFI_CMD_DATAIN | MFI_CMD_BIO;
2151 flags = MFI_CMD_DATAOUT | MFI_CMD_BIO;
2155 /* TODO: what about BIO_DELETE??? */
2156 panic("Unsupported bio command %x\n", bio->bio_cmd);
2159 /* Cheat with the sector length to avoid a non-constant division */
2160 blkcount = howmany(bio->bio_bcount, MFI_SECTOR_LEN);
2161 /* Fill the LBA and Transfer length in CDB */
2162 cdb_len = mfi_build_cdb(readop, 0, bio->bio_pblkno, blkcount,
2164 pass->header.target_id = (uintptr_t)bio->bio_driver1;
2165 pass->header.lun_id = 0;
2166 pass->header.timeout = 0;
2167 pass->header.flags = 0;
2168 pass->header.scsi_status = 0;
2169 pass->header.sense_len = MFI_SENSE_LEN;
2170 pass->header.data_len = bio->bio_bcount;
2171 pass->header.cdb_len = cdb_len;
2172 pass->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2173 pass->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2174 cm->cm_complete = mfi_bio_complete;
2175 cm->cm_private = bio;
2176 cm->cm_data = unmapped_buf;
2177 cm->cm_len = bio->bio_bcount;
2178 cm->cm_sg = &pass->sgl;
2179 cm->cm_total_frame_size = MFI_PASS_FRAME_SIZE;
2180 cm->cm_flags = flags;
2185 static struct mfi_command *
2186 mfi_build_ldio(struct mfi_softc *sc, struct bio *bio)
2188 struct mfi_io_frame *io;
2189 struct mfi_command *cm;
2192 uint32_t context = 0;
2194 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2196 if ((cm = mfi_dequeue_free(sc)) == NULL)
2199 /* Zero out the MFI frame */
2200 context = cm->cm_frame->header.context;
2201 bzero(cm->cm_frame, sizeof(union mfi_frame));
2202 cm->cm_frame->header.context = context;
2203 io = &cm->cm_frame->io;
2204 switch (bio->bio_cmd) {
2206 io->header.cmd = MFI_CMD_LD_READ;
2207 flags = MFI_CMD_DATAIN | MFI_CMD_BIO;
2210 io->header.cmd = MFI_CMD_LD_WRITE;
2211 flags = MFI_CMD_DATAOUT | MFI_CMD_BIO;
2214 /* TODO: what about BIO_DELETE??? */
2215 panic("Unsupported bio command %x\n", bio->bio_cmd);
2218 /* Cheat with the sector length to avoid a non-constant division */
2219 blkcount = howmany(bio->bio_bcount, MFI_SECTOR_LEN);
2220 io->header.target_id = (uintptr_t)bio->bio_driver1;
2221 io->header.timeout = 0;
2222 io->header.flags = 0;
2223 io->header.scsi_status = 0;
2224 io->header.sense_len = MFI_SENSE_LEN;
2225 io->header.data_len = blkcount;
2226 io->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2227 io->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2228 io->lba_hi = (bio->bio_pblkno & 0xffffffff00000000) >> 32;
2229 io->lba_lo = bio->bio_pblkno & 0xffffffff;
2230 cm->cm_complete = mfi_bio_complete;
2231 cm->cm_private = bio;
2232 cm->cm_data = unmapped_buf;
2233 cm->cm_len = bio->bio_bcount;
2234 cm->cm_sg = &io->sgl;
2235 cm->cm_total_frame_size = MFI_IO_FRAME_SIZE;
2236 cm->cm_flags = flags;
2242 mfi_bio_complete(struct mfi_command *cm)
2245 struct mfi_frame_header *hdr;
2246 struct mfi_softc *sc;
2248 bio = cm->cm_private;
2249 hdr = &cm->cm_frame->header;
2252 if ((hdr->cmd_status != MFI_STAT_OK) || (hdr->scsi_status != 0)) {
2253 bio->bio_flags |= BIO_ERROR;
2254 bio->bio_error = EIO;
2255 device_printf(sc->mfi_dev, "I/O error, cmd=%p, status=%#x, "
2256 "scsi_status=%#x\n", cm, hdr->cmd_status, hdr->scsi_status);
2257 mfi_print_sense(cm->cm_sc, cm->cm_sense);
2258 } else if (cm->cm_error != 0) {
2259 bio->bio_flags |= BIO_ERROR;
2260 bio->bio_error = cm->cm_error;
2261 device_printf(sc->mfi_dev, "I/O error, cmd=%p, error=%#x\n",
2265 mfi_release_command(cm);
2266 mfi_disk_complete(bio);
2270 mfi_startio(struct mfi_softc *sc)
2272 struct mfi_command *cm;
2273 struct ccb_hdr *ccbh;
2276 /* Don't bother if we're short on resources */
2277 if (sc->mfi_flags & MFI_FLAGS_QFRZN)
2280 /* Try a command that has already been prepared */
2281 cm = mfi_dequeue_ready(sc);
2284 if ((ccbh = TAILQ_FIRST(&sc->mfi_cam_ccbq)) != NULL)
2285 cm = sc->mfi_cam_start(ccbh);
2288 /* Nope, so look for work on the bioq */
2290 cm = mfi_bio_command(sc);
2292 /* No work available, so exit */
2296 /* Send the command to the controller */
2297 if (mfi_mapcmd(sc, cm) != 0) {
2298 device_printf(sc->mfi_dev, "Failed to startio\n");
2299 mfi_requeue_ready(cm);
2306 mfi_mapcmd(struct mfi_softc *sc, struct mfi_command *cm)
2310 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2312 if ((cm->cm_data != NULL) && (cm->cm_frame->header.cmd != MFI_CMD_STP )) {
2313 polled = (cm->cm_flags & MFI_CMD_POLLED) ? BUS_DMA_NOWAIT : 0;
2314 if (cm->cm_flags & MFI_CMD_CCB)
2315 error = bus_dmamap_load_ccb(sc->mfi_buffer_dmat,
2316 cm->cm_dmamap, cm->cm_data, mfi_data_cb, cm,
2318 else if (cm->cm_flags & MFI_CMD_BIO)
2319 error = bus_dmamap_load_bio(sc->mfi_buffer_dmat,
2320 cm->cm_dmamap, cm->cm_private, mfi_data_cb, cm,
2323 error = bus_dmamap_load(sc->mfi_buffer_dmat,
2324 cm->cm_dmamap, cm->cm_data, cm->cm_len,
2325 mfi_data_cb, cm, polled);
2326 if (error == EINPROGRESS) {
2327 sc->mfi_flags |= MFI_FLAGS_QFRZN;
2331 error = mfi_send_frame(sc, cm);
2338 mfi_data_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
2340 struct mfi_frame_header *hdr;
2341 struct mfi_command *cm;
2343 struct mfi_softc *sc;
2344 int i, j, first, dir;
2345 int sge_size, locked;
2347 cm = (struct mfi_command *)arg;
2349 hdr = &cm->cm_frame->header;
2353 * We need to check if we have the lock as this is async
2354 * callback so even though our caller mfi_mapcmd asserts
2355 * it has the lock, there is no guarantee that hasn't been
2356 * dropped if bus_dmamap_load returned prior to our
2359 if ((locked = mtx_owned(&sc->mfi_io_lock)) == 0)
2360 mtx_lock(&sc->mfi_io_lock);
2363 printf("error %d in callback\n", error);
2364 cm->cm_error = error;
2365 mfi_complete(sc, cm);
2368 /* Use IEEE sgl only for IO's on a SKINNY controller
2369 * For other commands on a SKINNY controller use either
2370 * sg32 or sg64 based on the sizeof(bus_addr_t).
2371 * Also calculate the total frame size based on the type
2374 if (((cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) ||
2375 (cm->cm_frame->header.cmd == MFI_CMD_LD_READ) ||
2376 (cm->cm_frame->header.cmd == MFI_CMD_LD_WRITE)) &&
2377 (sc->mfi_flags & MFI_FLAGS_SKINNY)) {
2378 for (i = 0; i < nsegs; i++) {
2379 sgl->sg_skinny[i].addr = segs[i].ds_addr;
2380 sgl->sg_skinny[i].len = segs[i].ds_len;
2381 sgl->sg_skinny[i].flag = 0;
2383 hdr->flags |= MFI_FRAME_IEEE_SGL | MFI_FRAME_SGL64;
2384 sge_size = sizeof(struct mfi_sg_skinny);
2385 hdr->sg_count = nsegs;
2388 if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
2389 first = cm->cm_stp_len;
2390 if ((sc->mfi_flags & MFI_FLAGS_SG64) == 0) {
2391 sgl->sg32[j].addr = segs[0].ds_addr;
2392 sgl->sg32[j++].len = first;
2394 sgl->sg64[j].addr = segs[0].ds_addr;
2395 sgl->sg64[j++].len = first;
2399 if ((sc->mfi_flags & MFI_FLAGS_SG64) == 0) {
2400 for (i = 0; i < nsegs; i++) {
2401 sgl->sg32[j].addr = segs[i].ds_addr + first;
2402 sgl->sg32[j++].len = segs[i].ds_len - first;
2406 for (i = 0; i < nsegs; i++) {
2407 sgl->sg64[j].addr = segs[i].ds_addr + first;
2408 sgl->sg64[j++].len = segs[i].ds_len - first;
2411 hdr->flags |= MFI_FRAME_SGL64;
2414 sge_size = sc->mfi_sge_size;
2418 if (cm->cm_flags & MFI_CMD_DATAIN) {
2419 dir |= BUS_DMASYNC_PREREAD;
2420 hdr->flags |= MFI_FRAME_DIR_READ;
2422 if (cm->cm_flags & MFI_CMD_DATAOUT) {
2423 dir |= BUS_DMASYNC_PREWRITE;
2424 hdr->flags |= MFI_FRAME_DIR_WRITE;
2426 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir);
2427 cm->cm_flags |= MFI_CMD_MAPPED;
2430 * Instead of calculating the total number of frames in the
2431 * compound frame, it's already assumed that there will be at
2432 * least 1 frame, so don't compensate for the modulo of the
2433 * following division.
2435 cm->cm_total_frame_size += (sc->mfi_sge_size * nsegs);
2436 cm->cm_extra_frames = (cm->cm_total_frame_size - 1) / MFI_FRAME_SIZE;
2438 if ((error = mfi_send_frame(sc, cm)) != 0) {
2439 printf("error %d in callback from mfi_send_frame\n", error);
2440 cm->cm_error = error;
2441 mfi_complete(sc, cm);
2446 /* leave the lock in the state we found it */
2448 mtx_unlock(&sc->mfi_io_lock);
2454 mfi_send_frame(struct mfi_softc *sc, struct mfi_command *cm)
2458 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2460 if (sc->MFA_enabled)
2461 error = mfi_tbolt_send_frame(sc, cm);
2463 error = mfi_std_send_frame(sc, cm);
2465 if (error != 0 && (cm->cm_flags & MFI_ON_MFIQ_BUSY) != 0)
2466 mfi_remove_busy(cm);
2472 mfi_std_send_frame(struct mfi_softc *sc, struct mfi_command *cm)
2474 struct mfi_frame_header *hdr;
2475 int tm = mfi_polled_cmd_timeout * 1000;
2477 hdr = &cm->cm_frame->header;
2479 if ((cm->cm_flags & MFI_CMD_POLLED) == 0) {
2480 cm->cm_timestamp = time_uptime;
2481 mfi_enqueue_busy(cm);
2483 hdr->cmd_status = MFI_STAT_INVALID_STATUS;
2484 hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
2488 * The bus address of the command is aligned on a 64 byte boundary,
2489 * leaving the least 6 bits as zero. For whatever reason, the
2490 * hardware wants the address shifted right by three, leaving just
2491 * 3 zero bits. These three bits are then used as a prefetching
2492 * hint for the hardware to predict how many frames need to be
2493 * fetched across the bus. If a command has more than 8 frames
2494 * then the 3 bits are set to 0x7 and the firmware uses other
2495 * information in the command to determine the total amount to fetch.
2496 * However, FreeBSD doesn't support I/O larger than 128K, so 8 frames
2497 * is enough for both 32bit and 64bit systems.
2499 if (cm->cm_extra_frames > 7)
2500 cm->cm_extra_frames = 7;
2502 sc->mfi_issue_cmd(sc, cm->cm_frame_busaddr, cm->cm_extra_frames);
2504 if ((cm->cm_flags & MFI_CMD_POLLED) == 0)
2507 /* This is a polled command, so busy-wait for it to complete. */
2508 while (hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
2515 if (hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
2516 device_printf(sc->mfi_dev, "Frame %p timed out "
2517 "command 0x%X\n", hdr, cm->cm_frame->dcmd.opcode);
2526 mfi_complete(struct mfi_softc *sc, struct mfi_command *cm)
2529 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2531 if ((cm->cm_flags & MFI_CMD_MAPPED) != 0) {
2533 if ((cm->cm_flags & MFI_CMD_DATAIN) ||
2534 (cm->cm_frame->header.cmd == MFI_CMD_STP))
2535 dir |= BUS_DMASYNC_POSTREAD;
2536 if (cm->cm_flags & MFI_CMD_DATAOUT)
2537 dir |= BUS_DMASYNC_POSTWRITE;
2539 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir);
2540 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2541 cm->cm_flags &= ~MFI_CMD_MAPPED;
2544 cm->cm_flags |= MFI_CMD_COMPLETED;
2546 if (cm->cm_complete != NULL)
2547 cm->cm_complete(cm);
2553 mfi_abort(struct mfi_softc *sc, struct mfi_command **cm_abort)
2555 struct mfi_command *cm;
2556 struct mfi_abort_frame *abort;
2558 uint32_t context = 0;
2560 mtx_lock(&sc->mfi_io_lock);
2561 if ((cm = mfi_dequeue_free(sc)) == NULL) {
2562 mtx_unlock(&sc->mfi_io_lock);
2566 /* Zero out the MFI frame */
2567 context = cm->cm_frame->header.context;
2568 bzero(cm->cm_frame, sizeof(union mfi_frame));
2569 cm->cm_frame->header.context = context;
2571 abort = &cm->cm_frame->abort;
2572 abort->header.cmd = MFI_CMD_ABORT;
2573 abort->header.flags = 0;
2574 abort->header.scsi_status = 0;
2575 abort->abort_context = (*cm_abort)->cm_frame->header.context;
2576 abort->abort_mfi_addr_lo = (uint32_t)(*cm_abort)->cm_frame_busaddr;
2577 abort->abort_mfi_addr_hi =
2578 (uint32_t)((uint64_t)(*cm_abort)->cm_frame_busaddr >> 32);
2580 cm->cm_flags = MFI_CMD_POLLED;
2582 if ((error = mfi_mapcmd(sc, cm)) != 0)
2583 device_printf(sc->mfi_dev, "failed to abort command\n");
2584 mfi_release_command(cm);
2586 mtx_unlock(&sc->mfi_io_lock);
2587 while (i < 5 && *cm_abort != NULL) {
2588 tsleep(cm_abort, 0, "mfiabort",
2592 if (*cm_abort != NULL) {
2593 /* Force a complete if command didn't abort */
2594 mtx_lock(&sc->mfi_io_lock);
2595 (*cm_abort)->cm_complete(*cm_abort);
2596 mtx_unlock(&sc->mfi_io_lock);
2603 mfi_dump_blocks(struct mfi_softc *sc, int id, uint64_t lba, void *virt,
2606 struct mfi_command *cm;
2607 struct mfi_io_frame *io;
2609 uint32_t context = 0;
2611 if ((cm = mfi_dequeue_free(sc)) == NULL)
2614 /* Zero out the MFI frame */
2615 context = cm->cm_frame->header.context;
2616 bzero(cm->cm_frame, sizeof(union mfi_frame));
2617 cm->cm_frame->header.context = context;
2619 io = &cm->cm_frame->io;
2620 io->header.cmd = MFI_CMD_LD_WRITE;
2621 io->header.target_id = id;
2622 io->header.timeout = 0;
2623 io->header.flags = 0;
2624 io->header.scsi_status = 0;
2625 io->header.sense_len = MFI_SENSE_LEN;
2626 io->header.data_len = howmany(len, MFI_SECTOR_LEN);
2627 io->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2628 io->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2629 io->lba_hi = (lba & 0xffffffff00000000) >> 32;
2630 io->lba_lo = lba & 0xffffffff;
2633 cm->cm_sg = &io->sgl;
2634 cm->cm_total_frame_size = MFI_IO_FRAME_SIZE;
2635 cm->cm_flags = MFI_CMD_POLLED | MFI_CMD_DATAOUT;
2637 if ((error = mfi_mapcmd(sc, cm)) != 0)
2638 device_printf(sc->mfi_dev, "failed dump blocks\n");
2639 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
2640 BUS_DMASYNC_POSTWRITE);
2641 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2642 mfi_release_command(cm);
2648 mfi_dump_syspd_blocks(struct mfi_softc *sc, int id, uint64_t lba, void *virt,
2651 struct mfi_command *cm;
2652 struct mfi_pass_frame *pass;
2653 int error, readop, cdb_len;
2656 if ((cm = mfi_dequeue_free(sc)) == NULL)
2659 pass = &cm->cm_frame->pass;
2660 bzero(pass->cdb, 16);
2661 pass->header.cmd = MFI_CMD_PD_SCSI_IO;
2664 blkcount = howmany(len, MFI_SECTOR_LEN);
2665 cdb_len = mfi_build_cdb(readop, 0, lba, blkcount, pass->cdb);
2666 pass->header.target_id = id;
2667 pass->header.timeout = 0;
2668 pass->header.flags = 0;
2669 pass->header.scsi_status = 0;
2670 pass->header.sense_len = MFI_SENSE_LEN;
2671 pass->header.data_len = len;
2672 pass->header.cdb_len = cdb_len;
2673 pass->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2674 pass->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2677 cm->cm_sg = &pass->sgl;
2678 cm->cm_total_frame_size = MFI_PASS_FRAME_SIZE;
2679 cm->cm_flags = MFI_CMD_POLLED | MFI_CMD_DATAOUT | MFI_CMD_SCSI;
2681 if ((error = mfi_mapcmd(sc, cm)) != 0)
2682 device_printf(sc->mfi_dev, "failed dump blocks\n");
2683 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
2684 BUS_DMASYNC_POSTWRITE);
2685 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2686 mfi_release_command(cm);
2692 mfi_open(struct cdev *dev, int flags, int fmt, struct thread *td)
2694 struct mfi_softc *sc;
2699 mtx_lock(&sc->mfi_io_lock);
2700 if (sc->mfi_detaching)
2703 sc->mfi_flags |= MFI_FLAGS_OPEN;
2706 mtx_unlock(&sc->mfi_io_lock);
2712 mfi_close(struct cdev *dev, int flags, int fmt, struct thread *td)
2714 struct mfi_softc *sc;
2715 struct mfi_aen *mfi_aen_entry, *tmp;
2719 mtx_lock(&sc->mfi_io_lock);
2720 sc->mfi_flags &= ~MFI_FLAGS_OPEN;
2722 TAILQ_FOREACH_SAFE(mfi_aen_entry, &sc->mfi_aen_pids, aen_link, tmp) {
2723 if (mfi_aen_entry->p == curproc) {
2724 TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
2726 free(mfi_aen_entry, M_MFIBUF);
2729 mtx_unlock(&sc->mfi_io_lock);
2734 mfi_config_lock(struct mfi_softc *sc, uint32_t opcode)
2738 case MFI_DCMD_LD_DELETE:
2739 case MFI_DCMD_CFG_ADD:
2740 case MFI_DCMD_CFG_CLEAR:
2741 case MFI_DCMD_CFG_FOREIGN_IMPORT:
2742 sx_xlock(&sc->mfi_config_lock);
2750 mfi_config_unlock(struct mfi_softc *sc, int locked)
2754 sx_xunlock(&sc->mfi_config_lock);
2758 * Perform pre-issue checks on commands from userland and possibly veto
2762 mfi_check_command_pre(struct mfi_softc *sc, struct mfi_command *cm)
2764 struct mfi_disk *ld, *ld2;
2766 struct mfi_system_pd *syspd = NULL;
2770 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2772 switch (cm->cm_frame->dcmd.opcode) {
2773 case MFI_DCMD_LD_DELETE:
2774 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2775 if (ld->ld_id == cm->cm_frame->dcmd.mbox[0])
2781 error = mfi_disk_disable(ld);
2783 case MFI_DCMD_CFG_CLEAR:
2784 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2785 error = mfi_disk_disable(ld);
2790 TAILQ_FOREACH(ld2, &sc->mfi_ld_tqh, ld_link) {
2793 mfi_disk_enable(ld2);
2797 case MFI_DCMD_PD_STATE_SET:
2798 mbox = (uint16_t *) cm->cm_frame->dcmd.mbox;
2800 if (mbox[2] == MFI_PD_STATE_UNCONFIGURED_GOOD) {
2801 TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh, pd_link) {
2802 if (syspd->pd_id == syspd_id)
2809 error = mfi_syspd_disable(syspd);
2817 /* Perform post-issue checks on commands from userland. */
2819 mfi_check_command_post(struct mfi_softc *sc, struct mfi_command *cm)
2821 struct mfi_disk *ld, *ldn;
2822 struct mfi_system_pd *syspd = NULL;
2826 switch (cm->cm_frame->dcmd.opcode) {
2827 case MFI_DCMD_LD_DELETE:
2828 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2829 if (ld->ld_id == cm->cm_frame->dcmd.mbox[0])
2832 KASSERT(ld != NULL, ("volume dissappeared"));
2833 if (cm->cm_frame->header.cmd_status == MFI_STAT_OK) {
2834 mtx_unlock(&sc->mfi_io_lock);
2836 device_delete_child(sc->mfi_dev, ld->ld_dev);
2838 mtx_lock(&sc->mfi_io_lock);
2840 mfi_disk_enable(ld);
2842 case MFI_DCMD_CFG_CLEAR:
2843 if (cm->cm_frame->header.cmd_status == MFI_STAT_OK) {
2844 mtx_unlock(&sc->mfi_io_lock);
2846 TAILQ_FOREACH_SAFE(ld, &sc->mfi_ld_tqh, ld_link, ldn) {
2847 device_delete_child(sc->mfi_dev, ld->ld_dev);
2850 mtx_lock(&sc->mfi_io_lock);
2852 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link)
2853 mfi_disk_enable(ld);
2856 case MFI_DCMD_CFG_ADD:
2859 case MFI_DCMD_CFG_FOREIGN_IMPORT:
2862 case MFI_DCMD_PD_STATE_SET:
2863 mbox = (uint16_t *) cm->cm_frame->dcmd.mbox;
2865 if (mbox[2] == MFI_PD_STATE_UNCONFIGURED_GOOD) {
2866 TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh,pd_link) {
2867 if (syspd->pd_id == syspd_id)
2873 /* If the transition fails then enable the syspd again */
2874 if (syspd && cm->cm_frame->header.cmd_status != MFI_STAT_OK)
2875 mfi_syspd_enable(syspd);
2881 mfi_check_for_sscd(struct mfi_softc *sc, struct mfi_command *cm)
2883 struct mfi_config_data *conf_data;
2884 struct mfi_command *ld_cm = NULL;
2885 struct mfi_ld_info *ld_info = NULL;
2886 struct mfi_ld_config *ld;
2890 conf_data = (struct mfi_config_data *)cm->cm_data;
2892 if (cm->cm_frame->dcmd.opcode == MFI_DCMD_CFG_ADD) {
2893 p = (char *)conf_data->array;
2894 p += conf_data->array_size * conf_data->array_count;
2895 ld = (struct mfi_ld_config *)p;
2896 if (ld->params.isSSCD == 1)
2898 } else if (cm->cm_frame->dcmd.opcode == MFI_DCMD_LD_DELETE) {
2899 error = mfi_dcmd_command (sc, &ld_cm, MFI_DCMD_LD_GET_INFO,
2900 (void **)&ld_info, sizeof(*ld_info));
2902 device_printf(sc->mfi_dev, "Failed to allocate"
2903 "MFI_DCMD_LD_GET_INFO %d", error);
2905 free(ld_info, M_MFIBUF);
2908 ld_cm->cm_flags = MFI_CMD_DATAIN;
2909 ld_cm->cm_frame->dcmd.mbox[0]= cm->cm_frame->dcmd.mbox[0];
2910 ld_cm->cm_frame->header.target_id = cm->cm_frame->dcmd.mbox[0];
2911 if (mfi_wait_command(sc, ld_cm) != 0) {
2912 device_printf(sc->mfi_dev, "failed to get log drv\n");
2913 mfi_release_command(ld_cm);
2914 free(ld_info, M_MFIBUF);
2918 if (ld_cm->cm_frame->header.cmd_status != MFI_STAT_OK) {
2919 free(ld_info, M_MFIBUF);
2920 mfi_release_command(ld_cm);
2924 ld_info = (struct mfi_ld_info *)ld_cm->cm_private;
2926 if (ld_info->ld_config.params.isSSCD == 1)
2929 mfi_release_command(ld_cm);
2930 free(ld_info, M_MFIBUF);
2937 mfi_stp_cmd(struct mfi_softc *sc, struct mfi_command *cm,caddr_t arg)
2940 struct mfi_ioc_packet *ioc;
2941 ioc = (struct mfi_ioc_packet *)arg;
2942 int sge_size, error;
2943 struct megasas_sge *kern_sge;
2945 memset(sc->kbuff_arr, 0, sizeof(sc->kbuff_arr));
2946 kern_sge =(struct megasas_sge *) ((uintptr_t)cm->cm_frame + ioc->mfi_sgl_off);
2947 cm->cm_frame->header.sg_count = ioc->mfi_sge_count;
2949 if (sizeof(bus_addr_t) == 8) {
2950 cm->cm_frame->header.flags |= MFI_FRAME_SGL64;
2951 cm->cm_extra_frames = 2;
2952 sge_size = sizeof(struct mfi_sg64);
2954 cm->cm_extra_frames = (cm->cm_total_frame_size - 1) / MFI_FRAME_SIZE;
2955 sge_size = sizeof(struct mfi_sg32);
2958 cm->cm_total_frame_size += (sge_size * ioc->mfi_sge_count);
2959 for (i = 0; i < ioc->mfi_sge_count; i++) {
2960 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
2961 1, 0, /* algnmnt, boundary */
2962 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
2963 BUS_SPACE_MAXADDR, /* highaddr */
2964 NULL, NULL, /* filter, filterarg */
2965 ioc->mfi_sgl[i].iov_len,/* maxsize */
2967 ioc->mfi_sgl[i].iov_len,/* maxsegsize */
2968 BUS_DMA_ALLOCNOW, /* flags */
2969 NULL, NULL, /* lockfunc, lockarg */
2970 &sc->mfi_kbuff_arr_dmat[i])) {
2971 device_printf(sc->mfi_dev,
2972 "Cannot allocate mfi_kbuff_arr_dmat tag\n");
2976 if (bus_dmamem_alloc(sc->mfi_kbuff_arr_dmat[i],
2977 (void **)&sc->kbuff_arr[i], BUS_DMA_NOWAIT,
2978 &sc->mfi_kbuff_arr_dmamap[i])) {
2979 device_printf(sc->mfi_dev,
2980 "Cannot allocate mfi_kbuff_arr_dmamap memory\n");
2984 bus_dmamap_load(sc->mfi_kbuff_arr_dmat[i],
2985 sc->mfi_kbuff_arr_dmamap[i], sc->kbuff_arr[i],
2986 ioc->mfi_sgl[i].iov_len, mfi_addr_cb,
2987 &sc->mfi_kbuff_arr_busaddr[i], 0);
2989 if (!sc->kbuff_arr[i]) {
2990 device_printf(sc->mfi_dev,
2991 "Could not allocate memory for kbuff_arr info\n");
2994 kern_sge[i].phys_addr = sc->mfi_kbuff_arr_busaddr[i];
2995 kern_sge[i].length = ioc->mfi_sgl[i].iov_len;
2997 if (sizeof(bus_addr_t) == 8) {
2998 cm->cm_frame->stp.sgl.sg64[i].addr =
2999 kern_sge[i].phys_addr;
3000 cm->cm_frame->stp.sgl.sg64[i].len =
3001 ioc->mfi_sgl[i].iov_len;
3003 cm->cm_frame->stp.sgl.sg32[i].addr =
3004 kern_sge[i].phys_addr;
3005 cm->cm_frame->stp.sgl.sg32[i].len =
3006 ioc->mfi_sgl[i].iov_len;
3009 error = copyin(ioc->mfi_sgl[i].iov_base,
3011 ioc->mfi_sgl[i].iov_len);
3013 device_printf(sc->mfi_dev, "Copy in failed\n");
3018 cm->cm_flags |=MFI_CMD_MAPPED;
3023 mfi_user_command(struct mfi_softc *sc, struct mfi_ioc_passthru *ioc)
3025 struct mfi_command *cm;
3026 struct mfi_dcmd_frame *dcmd;
3027 void *ioc_buf = NULL;
3029 int error = 0, locked;
3032 if (ioc->buf_size > 0) {
3033 if (ioc->buf_size > 1024 * 1024)
3035 ioc_buf = malloc(ioc->buf_size, M_MFIBUF, M_WAITOK);
3036 error = copyin(ioc->buf, ioc_buf, ioc->buf_size);
3038 device_printf(sc->mfi_dev, "failed to copyin\n");
3039 free(ioc_buf, M_MFIBUF);
3044 locked = mfi_config_lock(sc, ioc->ioc_frame.opcode);
3046 mtx_lock(&sc->mfi_io_lock);
3047 while ((cm = mfi_dequeue_free(sc)) == NULL)
3048 msleep(mfi_user_command, &sc->mfi_io_lock, 0, "mfiioc", hz);
3050 /* Save context for later */
3051 context = cm->cm_frame->header.context;
3053 dcmd = &cm->cm_frame->dcmd;
3054 bcopy(&ioc->ioc_frame, dcmd, sizeof(struct mfi_dcmd_frame));
3056 cm->cm_sg = &dcmd->sgl;
3057 cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
3058 cm->cm_data = ioc_buf;
3059 cm->cm_len = ioc->buf_size;
3061 /* restore context */
3062 cm->cm_frame->header.context = context;
3064 /* Cheat since we don't know if we're writing or reading */
3065 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_DATAOUT;
3067 error = mfi_check_command_pre(sc, cm);
3071 error = mfi_wait_command(sc, cm);
3073 device_printf(sc->mfi_dev, "ioctl failed %d\n", error);
3076 bcopy(dcmd, &ioc->ioc_frame, sizeof(struct mfi_dcmd_frame));
3077 mfi_check_command_post(sc, cm);
3079 mfi_release_command(cm);
3080 mtx_unlock(&sc->mfi_io_lock);
3081 mfi_config_unlock(sc, locked);
3082 if (ioc->buf_size > 0)
3083 error = copyout(ioc_buf, ioc->buf, ioc->buf_size);
3085 free(ioc_buf, M_MFIBUF);
3089 #define PTRIN(p) ((void *)(uintptr_t)(p))
3092 mfi_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td)
3094 struct mfi_softc *sc;
3095 union mfi_statrequest *ms;
3096 struct mfi_ioc_packet *ioc;
3097 #ifdef COMPAT_FREEBSD32
3098 struct mfi_ioc_packet32 *ioc32;
3100 struct mfi_ioc_aen *aen;
3101 struct mfi_command *cm = NULL;
3102 uint32_t context = 0;
3103 union mfi_sense_ptr sense_ptr;
3104 uint8_t *data = NULL, *temp, *addr, skip_pre_post = 0;
3107 struct mfi_ioc_passthru *iop = (struct mfi_ioc_passthru *)arg;
3108 #ifdef COMPAT_FREEBSD32
3109 struct mfi_ioc_passthru32 *iop32 = (struct mfi_ioc_passthru32 *)arg;
3110 struct mfi_ioc_passthru iop_swab;
3120 if (sc->hw_crit_error)
3123 if (sc->issuepend_done == 0)
3128 ms = (union mfi_statrequest *)arg;
3129 switch (ms->ms_item) {
3134 bcopy(&sc->mfi_qstat[ms->ms_item], &ms->ms_qstat,
3135 sizeof(struct mfi_qstat));
3142 case MFIIO_QUERY_DISK:
3144 struct mfi_query_disk *qd;
3145 struct mfi_disk *ld;
3147 qd = (struct mfi_query_disk *)arg;
3148 mtx_lock(&sc->mfi_io_lock);
3149 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
3150 if (ld->ld_id == qd->array_id)
3155 mtx_unlock(&sc->mfi_io_lock);
3159 if (ld->ld_flags & MFI_DISK_FLAGS_OPEN)
3161 bzero(qd->devname, SPECNAMELEN + 1);
3162 snprintf(qd->devname, SPECNAMELEN, "mfid%d", ld->ld_unit);
3163 mtx_unlock(&sc->mfi_io_lock);
3167 #ifdef COMPAT_FREEBSD32
3171 devclass_t devclass;
3172 ioc = (struct mfi_ioc_packet *)arg;
3175 adapter = ioc->mfi_adapter_no;
3176 if (device_get_unit(sc->mfi_dev) == 0 && adapter != 0) {
3177 devclass = devclass_find("mfi");
3178 sc = devclass_get_softc(devclass, adapter);
3180 mtx_lock(&sc->mfi_io_lock);
3181 if ((cm = mfi_dequeue_free(sc)) == NULL) {
3182 mtx_unlock(&sc->mfi_io_lock);
3185 mtx_unlock(&sc->mfi_io_lock);
3189 * save off original context since copying from user
3190 * will clobber some data
3192 context = cm->cm_frame->header.context;
3193 cm->cm_frame->header.context = cm->cm_index;
3195 bcopy(ioc->mfi_frame.raw, cm->cm_frame,
3196 2 * MEGAMFI_FRAME_SIZE);
3197 cm->cm_total_frame_size = (sizeof(union mfi_sgl)
3198 * ioc->mfi_sge_count) + ioc->mfi_sgl_off;
3199 cm->cm_frame->header.scsi_status = 0;
3200 cm->cm_frame->header.pad0 = 0;
3201 if (ioc->mfi_sge_count) {
3203 (union mfi_sgl *)&cm->cm_frame->bytes[ioc->mfi_sgl_off];
3207 if (cm->cm_frame->header.flags & MFI_FRAME_DATAIN)
3208 cm->cm_flags |= MFI_CMD_DATAIN;
3209 if (cm->cm_frame->header.flags & MFI_FRAME_DATAOUT)
3210 cm->cm_flags |= MFI_CMD_DATAOUT;
3211 /* Legacy app shim */
3212 if (cm->cm_flags == 0)
3213 cm->cm_flags |= MFI_CMD_DATAIN | MFI_CMD_DATAOUT;
3214 cm->cm_len = cm->cm_frame->header.data_len;
3215 if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
3216 #ifdef COMPAT_FREEBSD32
3217 if (cmd == MFI_CMD) {
3220 cm->cm_stp_len = ioc->mfi_sgl[0].iov_len;
3221 #ifdef COMPAT_FREEBSD32
3223 /* 32bit on 64bit */
3224 ioc32 = (struct mfi_ioc_packet32 *)ioc;
3225 cm->cm_stp_len = ioc32->mfi_sgl[0].iov_len;
3228 cm->cm_len += cm->cm_stp_len;
3231 (cm->cm_flags & (MFI_CMD_DATAIN | MFI_CMD_DATAOUT))) {
3232 cm->cm_data = data = malloc(cm->cm_len, M_MFIBUF,
3238 /* restore header context */
3239 cm->cm_frame->header.context = context;
3241 if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
3242 res = mfi_stp_cmd(sc, cm, arg);
3247 if ((cm->cm_flags & MFI_CMD_DATAOUT) ||
3248 (cm->cm_frame->header.cmd == MFI_CMD_STP)) {
3249 for (i = 0; i < ioc->mfi_sge_count; i++) {
3250 #ifdef COMPAT_FREEBSD32
3251 if (cmd == MFI_CMD) {
3254 addr = ioc->mfi_sgl[i].iov_base;
3255 len = ioc->mfi_sgl[i].iov_len;
3256 #ifdef COMPAT_FREEBSD32
3258 /* 32bit on 64bit */
3259 ioc32 = (struct mfi_ioc_packet32 *)ioc;
3260 addr = PTRIN(ioc32->mfi_sgl[i].iov_base);
3261 len = ioc32->mfi_sgl[i].iov_len;
3264 error = copyin(addr, temp, len);
3266 device_printf(sc->mfi_dev,
3267 "Copy in failed\n");
3275 if (cm->cm_frame->header.cmd == MFI_CMD_DCMD)
3276 locked = mfi_config_lock(sc,
3277 cm->cm_frame->dcmd.opcode);
3279 if (cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) {
3280 cm->cm_frame->pass.sense_addr_lo =
3281 (uint32_t)cm->cm_sense_busaddr;
3282 cm->cm_frame->pass.sense_addr_hi =
3283 (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
3285 mtx_lock(&sc->mfi_io_lock);
3286 skip_pre_post = mfi_check_for_sscd (sc, cm);
3287 if (!skip_pre_post) {
3288 error = mfi_check_command_pre(sc, cm);
3290 mtx_unlock(&sc->mfi_io_lock);
3294 if ((error = mfi_wait_command(sc, cm)) != 0) {
3295 device_printf(sc->mfi_dev,
3296 "Controller polled failed\n");
3297 mtx_unlock(&sc->mfi_io_lock);
3300 if (!skip_pre_post) {
3301 mfi_check_command_post(sc, cm);
3303 mtx_unlock(&sc->mfi_io_lock);
3305 if (cm->cm_frame->header.cmd != MFI_CMD_STP) {
3307 if ((cm->cm_flags & MFI_CMD_DATAIN) ||
3308 (cm->cm_frame->header.cmd == MFI_CMD_STP)) {
3309 for (i = 0; i < ioc->mfi_sge_count; i++) {
3310 #ifdef COMPAT_FREEBSD32
3311 if (cmd == MFI_CMD) {
3314 addr = ioc->mfi_sgl[i].iov_base;
3315 len = ioc->mfi_sgl[i].iov_len;
3316 #ifdef COMPAT_FREEBSD32
3318 /* 32bit on 64bit */
3319 ioc32 = (struct mfi_ioc_packet32 *)ioc;
3320 addr = PTRIN(ioc32->mfi_sgl[i].iov_base);
3321 len = ioc32->mfi_sgl[i].iov_len;
3324 error = copyout(temp, addr, len);
3326 device_printf(sc->mfi_dev,
3327 "Copy out failed\n");
3335 if (ioc->mfi_sense_len) {
3336 /* get user-space sense ptr then copy out sense */
3337 bcopy(&ioc->mfi_frame.raw[ioc->mfi_sense_off],
3338 &sense_ptr.sense_ptr_data[0],
3339 sizeof(sense_ptr.sense_ptr_data));
3340 #ifdef COMPAT_FREEBSD32
3341 if (cmd != MFI_CMD) {
3343 * not 64bit native so zero out any address
3345 sense_ptr.addr.high = 0;
3348 error = copyout(cm->cm_sense, sense_ptr.user_space,
3349 ioc->mfi_sense_len);
3351 device_printf(sc->mfi_dev,
3352 "Copy out failed\n");
3357 ioc->mfi_frame.hdr.cmd_status = cm->cm_frame->header.cmd_status;
3359 mfi_config_unlock(sc, locked);
3361 free(data, M_MFIBUF);
3362 if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
3363 for (i = 0; i < 2; i++) {
3364 if (sc->kbuff_arr[i]) {
3365 if (sc->mfi_kbuff_arr_busaddr[i] != 0)
3367 sc->mfi_kbuff_arr_dmat[i],
3368 sc->mfi_kbuff_arr_dmamap[i]
3370 if (sc->kbuff_arr[i] != NULL)
3372 sc->mfi_kbuff_arr_dmat[i],
3374 sc->mfi_kbuff_arr_dmamap[i]
3376 if (sc->mfi_kbuff_arr_dmat[i] != NULL)
3377 bus_dma_tag_destroy(
3378 sc->mfi_kbuff_arr_dmat[i]);
3383 mtx_lock(&sc->mfi_io_lock);
3384 mfi_release_command(cm);
3385 mtx_unlock(&sc->mfi_io_lock);
3391 aen = (struct mfi_ioc_aen *)arg;
3392 mtx_lock(&sc->mfi_io_lock);
3393 error = mfi_aen_register(sc, aen->aen_seq_num,
3394 aen->aen_class_locale);
3395 mtx_unlock(&sc->mfi_io_lock);
3398 case MFI_LINUX_CMD_2: /* Firmware Linux ioctl shim */
3400 devclass_t devclass;
3401 struct mfi_linux_ioc_packet l_ioc;
3404 devclass = devclass_find("mfi");
3405 if (devclass == NULL)
3408 error = copyin(arg, &l_ioc, sizeof(l_ioc));
3411 adapter = l_ioc.lioc_adapter_no;
3412 sc = devclass_get_softc(devclass, adapter);
3415 return (mfi_linux_ioctl_int(sc->mfi_cdev,
3416 cmd, arg, flag, td));
3419 case MFI_LINUX_SET_AEN_2: /* AEN Linux ioctl shim */
3421 devclass_t devclass;
3422 struct mfi_linux_ioc_aen l_aen;
3425 devclass = devclass_find("mfi");
3426 if (devclass == NULL)
3429 error = copyin(arg, &l_aen, sizeof(l_aen));
3432 adapter = l_aen.laen_adapter_no;
3433 sc = devclass_get_softc(devclass, adapter);
3436 return (mfi_linux_ioctl_int(sc->mfi_cdev,
3437 cmd, arg, flag, td));
3440 #ifdef COMPAT_FREEBSD32
3441 case MFIIO_PASSTHRU32:
3442 if (!SV_CURPROC_FLAG(SV_ILP32)) {
3446 iop_swab.ioc_frame = iop32->ioc_frame;
3447 iop_swab.buf_size = iop32->buf_size;
3448 iop_swab.buf = PTRIN(iop32->buf);
3452 case MFIIO_PASSTHRU:
3453 error = mfi_user_command(sc, iop);
3454 #ifdef COMPAT_FREEBSD32
3455 if (cmd == MFIIO_PASSTHRU32)
3456 iop32->ioc_frame = iop_swab.ioc_frame;
3460 device_printf(sc->mfi_dev, "IOCTL 0x%lx not handled\n", cmd);
3469 mfi_linux_ioctl_int(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td)
3471 struct mfi_softc *sc;
3472 struct mfi_linux_ioc_packet l_ioc;
3473 struct mfi_linux_ioc_aen l_aen;
3474 struct mfi_command *cm = NULL;
3475 struct mfi_aen *mfi_aen_entry;
3476 union mfi_sense_ptr sense_ptr;
3477 uint32_t context = 0;
3478 uint8_t *data = NULL, *temp;
3485 case MFI_LINUX_CMD_2: /* Firmware Linux ioctl shim */
3486 error = copyin(arg, &l_ioc, sizeof(l_ioc));
3490 if (l_ioc.lioc_sge_count > MAX_LINUX_IOCTL_SGE) {
3494 mtx_lock(&sc->mfi_io_lock);
3495 if ((cm = mfi_dequeue_free(sc)) == NULL) {
3496 mtx_unlock(&sc->mfi_io_lock);
3499 mtx_unlock(&sc->mfi_io_lock);
3503 * save off original context since copying from user
3504 * will clobber some data
3506 context = cm->cm_frame->header.context;
3508 bcopy(l_ioc.lioc_frame.raw, cm->cm_frame,
3509 2 * MFI_DCMD_FRAME_SIZE); /* this isn't quite right */
3510 cm->cm_total_frame_size = (sizeof(union mfi_sgl)
3511 * l_ioc.lioc_sge_count) + l_ioc.lioc_sgl_off;
3512 cm->cm_frame->header.scsi_status = 0;
3513 cm->cm_frame->header.pad0 = 0;
3514 if (l_ioc.lioc_sge_count)
3516 (union mfi_sgl *)&cm->cm_frame->bytes[l_ioc.lioc_sgl_off];
3518 if (cm->cm_frame->header.flags & MFI_FRAME_DATAIN)
3519 cm->cm_flags |= MFI_CMD_DATAIN;
3520 if (cm->cm_frame->header.flags & MFI_FRAME_DATAOUT)
3521 cm->cm_flags |= MFI_CMD_DATAOUT;
3522 cm->cm_len = cm->cm_frame->header.data_len;
3524 (cm->cm_flags & (MFI_CMD_DATAIN | MFI_CMD_DATAOUT))) {
3525 cm->cm_data = data = malloc(cm->cm_len, M_MFIBUF,
3531 /* restore header context */
3532 cm->cm_frame->header.context = context;
3535 if (cm->cm_flags & MFI_CMD_DATAOUT) {
3536 for (i = 0; i < l_ioc.lioc_sge_count; i++) {
3537 error = copyin(PTRIN(l_ioc.lioc_sgl[i].iov_base),
3539 l_ioc.lioc_sgl[i].iov_len);
3541 device_printf(sc->mfi_dev,
3542 "Copy in failed\n");
3545 temp = &temp[l_ioc.lioc_sgl[i].iov_len];
3549 if (cm->cm_frame->header.cmd == MFI_CMD_DCMD)
3550 locked = mfi_config_lock(sc, cm->cm_frame->dcmd.opcode);
3552 if (cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) {
3553 cm->cm_frame->pass.sense_addr_lo =
3554 (uint32_t)cm->cm_sense_busaddr;
3555 cm->cm_frame->pass.sense_addr_hi =
3556 (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
3559 mtx_lock(&sc->mfi_io_lock);
3560 error = mfi_check_command_pre(sc, cm);
3562 mtx_unlock(&sc->mfi_io_lock);
3566 if ((error = mfi_wait_command(sc, cm)) != 0) {
3567 device_printf(sc->mfi_dev,
3568 "Controller polled failed\n");
3569 mtx_unlock(&sc->mfi_io_lock);
3573 mfi_check_command_post(sc, cm);
3574 mtx_unlock(&sc->mfi_io_lock);
3577 if (cm->cm_flags & MFI_CMD_DATAIN) {
3578 for (i = 0; i < l_ioc.lioc_sge_count; i++) {
3579 error = copyout(temp,
3580 PTRIN(l_ioc.lioc_sgl[i].iov_base),
3581 l_ioc.lioc_sgl[i].iov_len);
3583 device_printf(sc->mfi_dev,
3584 "Copy out failed\n");
3587 temp = &temp[l_ioc.lioc_sgl[i].iov_len];
3591 if (l_ioc.lioc_sense_len) {
3592 /* get user-space sense ptr then copy out sense */
3593 bcopy(&((struct mfi_linux_ioc_packet*)arg)
3594 ->lioc_frame.raw[l_ioc.lioc_sense_off],
3595 &sense_ptr.sense_ptr_data[0],
3596 sizeof(sense_ptr.sense_ptr_data));
3599 * only 32bit Linux support so zero out any
3600 * address over 32bit
3602 sense_ptr.addr.high = 0;
3604 error = copyout(cm->cm_sense, sense_ptr.user_space,
3605 l_ioc.lioc_sense_len);
3607 device_printf(sc->mfi_dev,
3608 "Copy out failed\n");
3613 error = copyout(&cm->cm_frame->header.cmd_status,
3614 &((struct mfi_linux_ioc_packet*)arg)
3615 ->lioc_frame.hdr.cmd_status,
3618 device_printf(sc->mfi_dev,
3619 "Copy out failed\n");
3624 mfi_config_unlock(sc, locked);
3626 free(data, M_MFIBUF);
3628 mtx_lock(&sc->mfi_io_lock);
3629 mfi_release_command(cm);
3630 mtx_unlock(&sc->mfi_io_lock);
3634 case MFI_LINUX_SET_AEN_2: /* AEN Linux ioctl shim */
3635 error = copyin(arg, &l_aen, sizeof(l_aen));
3638 printf("AEN IMPLEMENTED for pid %d\n", curproc->p_pid);
3639 mfi_aen_entry = malloc(sizeof(struct mfi_aen), M_MFIBUF,
3641 mtx_lock(&sc->mfi_io_lock);
3642 if (mfi_aen_entry != NULL) {
3643 mfi_aen_entry->p = curproc;
3644 TAILQ_INSERT_TAIL(&sc->mfi_aen_pids, mfi_aen_entry,
3647 error = mfi_aen_register(sc, l_aen.laen_seq_num,
3648 l_aen.laen_class_locale);
3651 TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
3653 free(mfi_aen_entry, M_MFIBUF);
3655 mtx_unlock(&sc->mfi_io_lock);
3659 device_printf(sc->mfi_dev, "IOCTL 0x%lx not handled\n", cmd);
3668 mfi_poll(struct cdev *dev, int poll_events, struct thread *td)
3670 struct mfi_softc *sc;
3675 if (poll_events & (POLLIN | POLLRDNORM)) {
3676 if (sc->mfi_aen_triggered != 0) {
3677 revents |= poll_events & (POLLIN | POLLRDNORM);
3678 sc->mfi_aen_triggered = 0;
3680 if (sc->mfi_aen_triggered == 0 && sc->mfi_aen_cm == NULL) {
3686 if (poll_events & (POLLIN | POLLRDNORM)) {
3687 sc->mfi_poll_waiting = 1;
3688 selrecord(td, &sc->mfi_select);
3698 struct mfi_softc *sc;
3699 struct mfi_command *cm;
3705 dc = devclass_find("mfi");
3707 printf("No mfi dev class\n");
3711 for (i = 0; ; i++) {
3712 sc = devclass_get_softc(dc, i);
3715 device_printf(sc->mfi_dev, "Dumping\n\n");
3717 deadline = time_uptime - mfi_cmd_timeout;
3718 mtx_lock(&sc->mfi_io_lock);
3719 TAILQ_FOREACH(cm, &sc->mfi_busy, cm_link) {
3720 if (cm->cm_timestamp <= deadline) {
3721 device_printf(sc->mfi_dev,
3722 "COMMAND %p TIMEOUT AFTER %d SECONDS\n",
3723 cm, (int)(time_uptime - cm->cm_timestamp));
3734 mtx_unlock(&sc->mfi_io_lock);
3741 mfi_timeout(void *data)
3743 struct mfi_softc *sc = (struct mfi_softc *)data;
3744 struct mfi_command *cm, *tmp;
3748 deadline = time_uptime - mfi_cmd_timeout;
3749 if (sc->adpreset == 0) {
3750 if (!mfi_tbolt_reset(sc)) {
3751 callout_reset(&sc->mfi_watchdog_callout,
3752 mfi_cmd_timeout * hz, mfi_timeout, sc);
3756 mtx_lock(&sc->mfi_io_lock);
3757 TAILQ_FOREACH_SAFE(cm, &sc->mfi_busy, cm_link, tmp) {
3758 if (sc->mfi_aen_cm == cm || sc->mfi_map_sync_cm == cm)
3760 if (cm->cm_timestamp <= deadline) {
3761 if (sc->adpreset != 0 && sc->issuepend_done == 0) {
3762 cm->cm_timestamp = time_uptime;
3764 device_printf(sc->mfi_dev,
3765 "COMMAND %p TIMEOUT AFTER %d SECONDS\n",
3766 cm, (int)(time_uptime - cm->cm_timestamp)
3769 MFI_VALIDATE_CMD(sc, cm);
3771 * While commands can get stuck forever we do
3772 * not fail them as there is no way to tell if
3773 * the controller has actually processed them
3776 * In addition its very likely that force
3777 * failing a command here would cause a panic
3790 mtx_unlock(&sc->mfi_io_lock);
3792 callout_reset(&sc->mfi_watchdog_callout, mfi_cmd_timeout * hz,