2 * Copyright (c) 2006 IronPort Systems
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * Copyright (c) 2007 LSI Corp.
28 * Copyright (c) 2007 Rajesh Prabhakaran.
29 * All rights reserved.
31 * Redistribution and use in source and binary forms, with or without
32 * modification, are permitted provided that the following conditions
34 * 1. Redistributions of source code must retain the above copyright
35 * notice, this list of conditions and the following disclaimer.
36 * 2. Redistributions in binary form must reproduce the above copyright
37 * notice, this list of conditions and the following disclaimer in the
38 * documentation and/or other materials provided with the distribution.
40 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
41 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
42 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
43 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
44 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
45 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
46 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
47 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
48 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
49 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
53 #include <sys/cdefs.h>
54 __FBSDID("$FreeBSD$");
56 #include "opt_compat.h"
59 #include <sys/param.h>
60 #include <sys/systm.h>
61 #include <sys/sysctl.h>
62 #include <sys/malloc.h>
63 #include <sys/kernel.h>
65 #include <sys/selinfo.h>
68 #include <sys/eventhandler.h>
70 #include <sys/bus_dma.h>
72 #include <sys/ioccom.h>
75 #include <sys/signalvar.h>
76 #include <sys/sysent.h>
77 #include <sys/taskqueue.h>
79 #include <machine/bus.h>
80 #include <machine/resource.h>
82 #include <dev/mfi/mfireg.h>
83 #include <dev/mfi/mfi_ioctl.h>
84 #include <dev/mfi/mfivar.h>
85 #include <sys/interrupt.h>
86 #include <sys/priority.h>
88 static int mfi_alloc_commands(struct mfi_softc *);
89 static int mfi_comms_init(struct mfi_softc *);
90 static int mfi_get_controller_info(struct mfi_softc *);
91 static int mfi_get_log_state(struct mfi_softc *,
92 struct mfi_evt_log_state **);
93 static int mfi_parse_entries(struct mfi_softc *, int, int);
94 static void mfi_data_cb(void *, bus_dma_segment_t *, int, int);
95 static void mfi_startup(void *arg);
96 static void mfi_intr(void *arg);
97 static void mfi_ldprobe(struct mfi_softc *sc);
98 static void mfi_syspdprobe(struct mfi_softc *sc);
99 static void mfi_handle_evt(void *context, int pending);
100 static int mfi_aen_register(struct mfi_softc *sc, int seq, int locale);
101 static void mfi_aen_complete(struct mfi_command *);
102 static int mfi_add_ld(struct mfi_softc *sc, int);
103 static void mfi_add_ld_complete(struct mfi_command *);
104 static int mfi_add_sys_pd(struct mfi_softc *sc, int);
105 static void mfi_add_sys_pd_complete(struct mfi_command *);
106 static struct mfi_command * mfi_bio_command(struct mfi_softc *);
107 static void mfi_bio_complete(struct mfi_command *);
108 static struct mfi_command *mfi_build_ldio(struct mfi_softc *,struct bio*);
109 static struct mfi_command *mfi_build_syspdio(struct mfi_softc *,struct bio*);
110 static int mfi_send_frame(struct mfi_softc *, struct mfi_command *);
111 static int mfi_std_send_frame(struct mfi_softc *, struct mfi_command *);
112 static int mfi_abort(struct mfi_softc *, struct mfi_command **);
113 static int mfi_linux_ioctl_int(struct cdev *, u_long, caddr_t, int, struct thread *);
114 static void mfi_timeout(void *);
115 static int mfi_user_command(struct mfi_softc *,
116 struct mfi_ioc_passthru *);
117 static void mfi_enable_intr_xscale(struct mfi_softc *sc);
118 static void mfi_enable_intr_ppc(struct mfi_softc *sc);
119 static int32_t mfi_read_fw_status_xscale(struct mfi_softc *sc);
120 static int32_t mfi_read_fw_status_ppc(struct mfi_softc *sc);
121 static int mfi_check_clear_intr_xscale(struct mfi_softc *sc);
122 static int mfi_check_clear_intr_ppc(struct mfi_softc *sc);
123 static void mfi_issue_cmd_xscale(struct mfi_softc *sc, bus_addr_t bus_add,
125 static void mfi_issue_cmd_ppc(struct mfi_softc *sc, bus_addr_t bus_add,
127 static int mfi_config_lock(struct mfi_softc *sc, uint32_t opcode);
128 static void mfi_config_unlock(struct mfi_softc *sc, int locked);
129 static int mfi_check_command_pre(struct mfi_softc *sc, struct mfi_command *cm);
130 static void mfi_check_command_post(struct mfi_softc *sc, struct mfi_command *cm);
131 static int mfi_check_for_sscd(struct mfi_softc *sc, struct mfi_command *cm);
133 SYSCTL_NODE(_hw, OID_AUTO, mfi, CTLFLAG_RD, 0, "MFI driver parameters");
134 static int mfi_event_locale = MFI_EVT_LOCALE_ALL;
135 TUNABLE_INT("hw.mfi.event_locale", &mfi_event_locale);
136 SYSCTL_INT(_hw_mfi, OID_AUTO, event_locale, CTLFLAG_RWTUN, &mfi_event_locale,
137 0, "event message locale");
139 static int mfi_event_class = MFI_EVT_CLASS_INFO;
140 TUNABLE_INT("hw.mfi.event_class", &mfi_event_class);
141 SYSCTL_INT(_hw_mfi, OID_AUTO, event_class, CTLFLAG_RWTUN, &mfi_event_class,
142 0, "event message class");
144 static int mfi_max_cmds = 128;
145 TUNABLE_INT("hw.mfi.max_cmds", &mfi_max_cmds);
146 SYSCTL_INT(_hw_mfi, OID_AUTO, max_cmds, CTLFLAG_RDTUN, &mfi_max_cmds,
147 0, "Max commands limit (-1 = controller limit)");
149 static int mfi_detect_jbod_change = 1;
150 TUNABLE_INT("hw.mfi.detect_jbod_change", &mfi_detect_jbod_change);
151 SYSCTL_INT(_hw_mfi, OID_AUTO, detect_jbod_change, CTLFLAG_RWTUN,
152 &mfi_detect_jbod_change, 0, "Detect a change to a JBOD");
154 int mfi_polled_cmd_timeout = MFI_POLL_TIMEOUT_SECS;
155 TUNABLE_INT("hw.mfi.polled_cmd_timeout", &mfi_polled_cmd_timeout);
156 SYSCTL_INT(_hw_mfi, OID_AUTO, polled_cmd_timeout, CTLFLAG_RWTUN,
157 &mfi_polled_cmd_timeout, 0,
158 "Polled command timeout - used for firmware flash etc (in seconds)");
160 static int mfi_cmd_timeout = MFI_CMD_TIMEOUT;
161 TUNABLE_INT("hw.mfi.cmd_timeout", &mfi_cmd_timeout);
162 SYSCTL_INT(_hw_mfi, OID_AUTO, cmd_timeout, CTLFLAG_RWTUN, &mfi_cmd_timeout,
163 0, "Command timeout (in seconds)");
165 /* Management interface */
166 static d_open_t mfi_open;
167 static d_close_t mfi_close;
168 static d_ioctl_t mfi_ioctl;
169 static d_poll_t mfi_poll;
171 static struct cdevsw mfi_cdevsw = {
172 .d_version = D_VERSION,
175 .d_close = mfi_close,
176 .d_ioctl = mfi_ioctl,
181 MALLOC_DEFINE(M_MFIBUF, "mfibuf", "Buffers for the MFI driver");
183 #define MFI_INQ_LENGTH SHORT_INQUIRY_LENGTH
184 struct mfi_skinny_dma_info mfi_skinny;
187 mfi_enable_intr_xscale(struct mfi_softc *sc)
189 MFI_WRITE4(sc, MFI_OMSK, 0x01);
193 mfi_enable_intr_ppc(struct mfi_softc *sc)
195 if (sc->mfi_flags & MFI_FLAGS_1078) {
196 MFI_WRITE4(sc, MFI_ODCR0, 0xFFFFFFFF);
197 MFI_WRITE4(sc, MFI_OMSK, ~MFI_1078_EIM);
199 else if (sc->mfi_flags & MFI_FLAGS_GEN2) {
200 MFI_WRITE4(sc, MFI_ODCR0, 0xFFFFFFFF);
201 MFI_WRITE4(sc, MFI_OMSK, ~MFI_GEN2_EIM);
203 else if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
204 MFI_WRITE4(sc, MFI_OMSK, ~0x00000001);
209 mfi_read_fw_status_xscale(struct mfi_softc *sc)
211 return MFI_READ4(sc, MFI_OMSG0);
215 mfi_read_fw_status_ppc(struct mfi_softc *sc)
217 return MFI_READ4(sc, MFI_OSP0);
221 mfi_check_clear_intr_xscale(struct mfi_softc *sc)
225 status = MFI_READ4(sc, MFI_OSTS);
226 if ((status & MFI_OSTS_INTR_VALID) == 0)
229 MFI_WRITE4(sc, MFI_OSTS, status);
234 mfi_check_clear_intr_ppc(struct mfi_softc *sc)
238 status = MFI_READ4(sc, MFI_OSTS);
239 if (sc->mfi_flags & MFI_FLAGS_1078) {
240 if (!(status & MFI_1078_RM)) {
244 else if (sc->mfi_flags & MFI_FLAGS_GEN2) {
245 if (!(status & MFI_GEN2_RM)) {
249 else if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
250 if (!(status & MFI_SKINNY_RM)) {
254 if (sc->mfi_flags & MFI_FLAGS_SKINNY)
255 MFI_WRITE4(sc, MFI_OSTS, status);
257 MFI_WRITE4(sc, MFI_ODCR0, status);
262 mfi_issue_cmd_xscale(struct mfi_softc *sc, bus_addr_t bus_add, uint32_t frame_cnt)
264 MFI_WRITE4(sc, MFI_IQP,(bus_add >>3)|frame_cnt);
268 mfi_issue_cmd_ppc(struct mfi_softc *sc, bus_addr_t bus_add, uint32_t frame_cnt)
270 if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
271 MFI_WRITE4(sc, MFI_IQPL, (bus_add | frame_cnt <<1)|1 );
272 MFI_WRITE4(sc, MFI_IQPH, 0x00000000);
274 MFI_WRITE4(sc, MFI_IQP, (bus_add | frame_cnt <<1)|1 );
279 mfi_transition_firmware(struct mfi_softc *sc)
281 uint32_t fw_state, cur_state;
283 uint32_t cur_abs_reg_val = 0;
284 uint32_t prev_abs_reg_val = 0;
286 cur_abs_reg_val = sc->mfi_read_fw_status(sc);
287 fw_state = cur_abs_reg_val & MFI_FWSTATE_MASK;
288 while (fw_state != MFI_FWSTATE_READY) {
290 device_printf(sc->mfi_dev, "Waiting for firmware to "
292 cur_state = fw_state;
294 case MFI_FWSTATE_FAULT:
295 device_printf(sc->mfi_dev, "Firmware fault\n");
297 case MFI_FWSTATE_WAIT_HANDSHAKE:
298 if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT)
299 MFI_WRITE4(sc, MFI_SKINNY_IDB, MFI_FWINIT_CLEAR_HANDSHAKE);
301 MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_CLEAR_HANDSHAKE);
302 max_wait = MFI_RESET_WAIT_TIME;
304 case MFI_FWSTATE_OPERATIONAL:
305 if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT)
306 MFI_WRITE4(sc, MFI_SKINNY_IDB, 7);
308 MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_READY);
309 max_wait = MFI_RESET_WAIT_TIME;
311 case MFI_FWSTATE_UNDEFINED:
312 case MFI_FWSTATE_BB_INIT:
313 max_wait = MFI_RESET_WAIT_TIME;
315 case MFI_FWSTATE_FW_INIT_2:
316 max_wait = MFI_RESET_WAIT_TIME;
318 case MFI_FWSTATE_FW_INIT:
319 case MFI_FWSTATE_FLUSH_CACHE:
320 max_wait = MFI_RESET_WAIT_TIME;
322 case MFI_FWSTATE_DEVICE_SCAN:
323 max_wait = MFI_RESET_WAIT_TIME; /* wait for 180 seconds */
324 prev_abs_reg_val = cur_abs_reg_val;
326 case MFI_FWSTATE_BOOT_MESSAGE_PENDING:
327 if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT)
328 MFI_WRITE4(sc, MFI_SKINNY_IDB, MFI_FWINIT_HOTPLUG);
330 MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_HOTPLUG);
331 max_wait = MFI_RESET_WAIT_TIME;
334 device_printf(sc->mfi_dev, "Unknown firmware state %#x\n",
338 for (i = 0; i < (max_wait * 10); i++) {
339 cur_abs_reg_val = sc->mfi_read_fw_status(sc);
340 fw_state = cur_abs_reg_val & MFI_FWSTATE_MASK;
341 if (fw_state == cur_state)
346 if (fw_state == MFI_FWSTATE_DEVICE_SCAN) {
347 /* Check the device scanning progress */
348 if (prev_abs_reg_val != cur_abs_reg_val) {
352 if (fw_state == cur_state) {
353 device_printf(sc->mfi_dev, "Firmware stuck in state "
362 mfi_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
367 *addr = segs[0].ds_addr;
372 mfi_attach(struct mfi_softc *sc)
375 int error, commsz, framessz, sensesz;
376 int frames, unit, max_fw_sge, max_fw_cmds;
377 uint32_t tb_mem_size = 0;
382 device_printf(sc->mfi_dev, "Megaraid SAS driver Ver %s \n",
385 mtx_init(&sc->mfi_io_lock, "MFI I/O lock", NULL, MTX_DEF);
386 sx_init(&sc->mfi_config_lock, "MFI config");
387 TAILQ_INIT(&sc->mfi_ld_tqh);
388 TAILQ_INIT(&sc->mfi_syspd_tqh);
389 TAILQ_INIT(&sc->mfi_ld_pend_tqh);
390 TAILQ_INIT(&sc->mfi_syspd_pend_tqh);
391 TAILQ_INIT(&sc->mfi_evt_queue);
392 TASK_INIT(&sc->mfi_evt_task, 0, mfi_handle_evt, sc);
393 TASK_INIT(&sc->mfi_map_sync_task, 0, mfi_handle_map_sync, sc);
394 TAILQ_INIT(&sc->mfi_aen_pids);
395 TAILQ_INIT(&sc->mfi_cam_ccbq);
403 sc->last_seq_num = 0;
404 sc->disableOnlineCtrlReset = 1;
405 sc->issuepend_done = 1;
406 sc->hw_crit_error = 0;
408 if (sc->mfi_flags & MFI_FLAGS_1064R) {
409 sc->mfi_enable_intr = mfi_enable_intr_xscale;
410 sc->mfi_read_fw_status = mfi_read_fw_status_xscale;
411 sc->mfi_check_clear_intr = mfi_check_clear_intr_xscale;
412 sc->mfi_issue_cmd = mfi_issue_cmd_xscale;
413 } else if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
414 sc->mfi_enable_intr = mfi_tbolt_enable_intr_ppc;
415 sc->mfi_disable_intr = mfi_tbolt_disable_intr_ppc;
416 sc->mfi_read_fw_status = mfi_tbolt_read_fw_status_ppc;
417 sc->mfi_check_clear_intr = mfi_tbolt_check_clear_intr_ppc;
418 sc->mfi_issue_cmd = mfi_tbolt_issue_cmd_ppc;
419 sc->mfi_adp_reset = mfi_tbolt_adp_reset;
421 TAILQ_INIT(&sc->mfi_cmd_tbolt_tqh);
423 sc->mfi_enable_intr = mfi_enable_intr_ppc;
424 sc->mfi_read_fw_status = mfi_read_fw_status_ppc;
425 sc->mfi_check_clear_intr = mfi_check_clear_intr_ppc;
426 sc->mfi_issue_cmd = mfi_issue_cmd_ppc;
430 /* Before we get too far, see if the firmware is working */
431 if ((error = mfi_transition_firmware(sc)) != 0) {
432 device_printf(sc->mfi_dev, "Firmware not in READY state, "
433 "error %d\n", error);
437 /* Start: LSIP200113393 */
438 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
439 1, 0, /* algnmnt, boundary */
440 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
441 BUS_SPACE_MAXADDR, /* highaddr */
442 NULL, NULL, /* filter, filterarg */
443 MEGASAS_MAX_NAME*sizeof(bus_addr_t), /* maxsize */
445 MEGASAS_MAX_NAME*sizeof(bus_addr_t), /* maxsegsize */
447 NULL, NULL, /* lockfunc, lockarg */
448 &sc->verbuf_h_dmat)) {
449 device_printf(sc->mfi_dev, "Cannot allocate verbuf_h_dmat DMA tag\n");
452 if (bus_dmamem_alloc(sc->verbuf_h_dmat, (void **)&sc->verbuf,
453 BUS_DMA_NOWAIT, &sc->verbuf_h_dmamap)) {
454 device_printf(sc->mfi_dev, "Cannot allocate verbuf_h_dmamap memory\n");
457 bzero(sc->verbuf, MEGASAS_MAX_NAME*sizeof(bus_addr_t));
458 bus_dmamap_load(sc->verbuf_h_dmat, sc->verbuf_h_dmamap,
459 sc->verbuf, MEGASAS_MAX_NAME*sizeof(bus_addr_t),
460 mfi_addr_cb, &sc->verbuf_h_busaddr, 0);
461 /* End: LSIP200113393 */
464 * Get information needed for sizing the contiguous memory for the
465 * frame pool. Size down the sgl parameter since we know that
466 * we will never need more than what's required for MAXPHYS.
467 * It would be nice if these constants were available at runtime
468 * instead of compile time.
470 status = sc->mfi_read_fw_status(sc);
471 max_fw_cmds = status & MFI_FWSTATE_MAXCMD_MASK;
472 if (mfi_max_cmds > 0 && mfi_max_cmds < max_fw_cmds) {
473 device_printf(sc->mfi_dev, "FW MaxCmds = %d, limiting to %d\n",
474 max_fw_cmds, mfi_max_cmds);
475 sc->mfi_max_fw_cmds = mfi_max_cmds;
477 sc->mfi_max_fw_cmds = max_fw_cmds;
479 max_fw_sge = (status & MFI_FWSTATE_MAXSGL_MASK) >> 16;
480 sc->mfi_max_sge = min(max_fw_sge, ((MFI_MAXPHYS / PAGE_SIZE) + 1));
482 /* ThunderBolt Support get the contiguous memory */
484 if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
485 mfi_tbolt_init_globals(sc);
486 device_printf(sc->mfi_dev, "MaxCmd = %d, Drv MaxCmd = %d, "
487 "MaxSgl = %d, state = %#x\n", max_fw_cmds,
488 sc->mfi_max_fw_cmds, sc->mfi_max_sge, status);
489 tb_mem_size = mfi_tbolt_get_memory_requirement(sc);
491 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
492 1, 0, /* algnmnt, boundary */
493 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
494 BUS_SPACE_MAXADDR, /* highaddr */
495 NULL, NULL, /* filter, filterarg */
496 tb_mem_size, /* maxsize */
498 tb_mem_size, /* maxsegsize */
500 NULL, NULL, /* lockfunc, lockarg */
502 device_printf(sc->mfi_dev, "Cannot allocate comms DMA tag\n");
505 if (bus_dmamem_alloc(sc->mfi_tb_dmat, (void **)&sc->request_message_pool,
506 BUS_DMA_NOWAIT, &sc->mfi_tb_dmamap)) {
507 device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
510 bzero(sc->request_message_pool, tb_mem_size);
511 bus_dmamap_load(sc->mfi_tb_dmat, sc->mfi_tb_dmamap,
512 sc->request_message_pool, tb_mem_size, mfi_addr_cb, &sc->mfi_tb_busaddr, 0);
514 /* For ThunderBolt memory init */
515 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
516 0x100, 0, /* alignmnt, boundary */
517 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
518 BUS_SPACE_MAXADDR, /* highaddr */
519 NULL, NULL, /* filter, filterarg */
520 MFI_FRAME_SIZE, /* maxsize */
522 MFI_FRAME_SIZE, /* maxsegsize */
524 NULL, NULL, /* lockfunc, lockarg */
525 &sc->mfi_tb_init_dmat)) {
526 device_printf(sc->mfi_dev, "Cannot allocate init DMA tag\n");
529 if (bus_dmamem_alloc(sc->mfi_tb_init_dmat, (void **)&sc->mfi_tb_init,
530 BUS_DMA_NOWAIT, &sc->mfi_tb_init_dmamap)) {
531 device_printf(sc->mfi_dev, "Cannot allocate init memory\n");
534 bzero(sc->mfi_tb_init, MFI_FRAME_SIZE);
535 bus_dmamap_load(sc->mfi_tb_init_dmat, sc->mfi_tb_init_dmamap,
536 sc->mfi_tb_init, MFI_FRAME_SIZE, mfi_addr_cb,
537 &sc->mfi_tb_init_busaddr, 0);
538 if (mfi_tbolt_init_desc_pool(sc, sc->request_message_pool,
540 device_printf(sc->mfi_dev,
541 "Thunderbolt pool preparation error\n");
546 Allocate DMA memory mapping for MPI2 IOC Init descriptor,
547 we are taking it diffrent from what we have allocated for Request
548 and reply descriptors to avoid confusion later
550 tb_mem_size = sizeof(struct MPI2_IOC_INIT_REQUEST);
551 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
552 1, 0, /* algnmnt, boundary */
553 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
554 BUS_SPACE_MAXADDR, /* highaddr */
555 NULL, NULL, /* filter, filterarg */
556 tb_mem_size, /* maxsize */
558 tb_mem_size, /* maxsegsize */
560 NULL, NULL, /* lockfunc, lockarg */
561 &sc->mfi_tb_ioc_init_dmat)) {
562 device_printf(sc->mfi_dev,
563 "Cannot allocate comms DMA tag\n");
566 if (bus_dmamem_alloc(sc->mfi_tb_ioc_init_dmat,
567 (void **)&sc->mfi_tb_ioc_init_desc,
568 BUS_DMA_NOWAIT, &sc->mfi_tb_ioc_init_dmamap)) {
569 device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
572 bzero(sc->mfi_tb_ioc_init_desc, tb_mem_size);
573 bus_dmamap_load(sc->mfi_tb_ioc_init_dmat, sc->mfi_tb_ioc_init_dmamap,
574 sc->mfi_tb_ioc_init_desc, tb_mem_size, mfi_addr_cb,
575 &sc->mfi_tb_ioc_init_busaddr, 0);
578 * Create the dma tag for data buffers. Used both for block I/O
579 * and for various internal data queries.
581 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
582 1, 0, /* algnmnt, boundary */
583 BUS_SPACE_MAXADDR, /* lowaddr */
584 BUS_SPACE_MAXADDR, /* highaddr */
585 NULL, NULL, /* filter, filterarg */
586 BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
587 sc->mfi_max_sge, /* nsegments */
588 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
589 BUS_DMA_ALLOCNOW, /* flags */
590 busdma_lock_mutex, /* lockfunc */
591 &sc->mfi_io_lock, /* lockfuncarg */
592 &sc->mfi_buffer_dmat)) {
593 device_printf(sc->mfi_dev, "Cannot allocate buffer DMA tag\n");
598 * Allocate DMA memory for the comms queues. Keep it under 4GB for
599 * efficiency. The mfi_hwcomms struct includes space for 1 reply queue
600 * entry, so the calculated size here will be will be 1 more than
601 * mfi_max_fw_cmds. This is apparently a requirement of the hardware.
603 commsz = (sizeof(uint32_t) * sc->mfi_max_fw_cmds) +
604 sizeof(struct mfi_hwcomms);
605 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
606 1, 0, /* algnmnt, boundary */
607 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
608 BUS_SPACE_MAXADDR, /* highaddr */
609 NULL, NULL, /* filter, filterarg */
610 commsz, /* maxsize */
612 commsz, /* maxsegsize */
614 NULL, NULL, /* lockfunc, lockarg */
615 &sc->mfi_comms_dmat)) {
616 device_printf(sc->mfi_dev, "Cannot allocate comms DMA tag\n");
619 if (bus_dmamem_alloc(sc->mfi_comms_dmat, (void **)&sc->mfi_comms,
620 BUS_DMA_NOWAIT, &sc->mfi_comms_dmamap)) {
621 device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
624 bzero(sc->mfi_comms, commsz);
625 bus_dmamap_load(sc->mfi_comms_dmat, sc->mfi_comms_dmamap,
626 sc->mfi_comms, commsz, mfi_addr_cb, &sc->mfi_comms_busaddr, 0);
628 * Allocate DMA memory for the command frames. Keep them in the
629 * lower 4GB for efficiency. Calculate the size of the commands at
630 * the same time; each command is one 64 byte frame plus a set of
631 * additional frames for holding sg lists or other data.
632 * The assumption here is that the SG list will start at the second
633 * frame and not use the unused bytes in the first frame. While this
634 * isn't technically correct, it simplifies the calculation and allows
635 * for command frames that might be larger than an mfi_io_frame.
637 if (sizeof(bus_addr_t) == 8) {
638 sc->mfi_sge_size = sizeof(struct mfi_sg64);
639 sc->mfi_flags |= MFI_FLAGS_SG64;
641 sc->mfi_sge_size = sizeof(struct mfi_sg32);
643 if (sc->mfi_flags & MFI_FLAGS_SKINNY)
644 sc->mfi_sge_size = sizeof(struct mfi_sg_skinny);
645 frames = (sc->mfi_sge_size * sc->mfi_max_sge - 1) / MFI_FRAME_SIZE + 2;
646 sc->mfi_cmd_size = frames * MFI_FRAME_SIZE;
647 framessz = sc->mfi_cmd_size * sc->mfi_max_fw_cmds;
648 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
649 64, 0, /* algnmnt, boundary */
650 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
651 BUS_SPACE_MAXADDR, /* highaddr */
652 NULL, NULL, /* filter, filterarg */
653 framessz, /* maxsize */
655 framessz, /* maxsegsize */
657 NULL, NULL, /* lockfunc, lockarg */
658 &sc->mfi_frames_dmat)) {
659 device_printf(sc->mfi_dev, "Cannot allocate frame DMA tag\n");
662 if (bus_dmamem_alloc(sc->mfi_frames_dmat, (void **)&sc->mfi_frames,
663 BUS_DMA_NOWAIT, &sc->mfi_frames_dmamap)) {
664 device_printf(sc->mfi_dev, "Cannot allocate frames memory\n");
667 bzero(sc->mfi_frames, framessz);
668 bus_dmamap_load(sc->mfi_frames_dmat, sc->mfi_frames_dmamap,
669 sc->mfi_frames, framessz, mfi_addr_cb, &sc->mfi_frames_busaddr,0);
671 * Allocate DMA memory for the frame sense data. Keep them in the
672 * lower 4GB for efficiency
674 sensesz = sc->mfi_max_fw_cmds * MFI_SENSE_LEN;
675 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
676 4, 0, /* algnmnt, boundary */
677 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
678 BUS_SPACE_MAXADDR, /* highaddr */
679 NULL, NULL, /* filter, filterarg */
680 sensesz, /* maxsize */
682 sensesz, /* maxsegsize */
684 NULL, NULL, /* lockfunc, lockarg */
685 &sc->mfi_sense_dmat)) {
686 device_printf(sc->mfi_dev, "Cannot allocate sense DMA tag\n");
689 if (bus_dmamem_alloc(sc->mfi_sense_dmat, (void **)&sc->mfi_sense,
690 BUS_DMA_NOWAIT, &sc->mfi_sense_dmamap)) {
691 device_printf(sc->mfi_dev, "Cannot allocate sense memory\n");
694 bus_dmamap_load(sc->mfi_sense_dmat, sc->mfi_sense_dmamap,
695 sc->mfi_sense, sensesz, mfi_addr_cb, &sc->mfi_sense_busaddr, 0);
696 if ((error = mfi_alloc_commands(sc)) != 0)
699 /* Before moving the FW to operational state, check whether
700 * hostmemory is required by the FW or not
703 /* ThunderBolt MFI_IOC2 INIT */
704 if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
705 sc->mfi_disable_intr(sc);
706 mtx_lock(&sc->mfi_io_lock);
707 if ((error = mfi_tbolt_init_MFI_queue(sc)) != 0) {
708 device_printf(sc->mfi_dev,
709 "TB Init has failed with error %d\n",error);
710 mtx_unlock(&sc->mfi_io_lock);
713 mtx_unlock(&sc->mfi_io_lock);
715 if ((error = mfi_tbolt_alloc_cmd(sc)) != 0)
717 if (bus_setup_intr(sc->mfi_dev, sc->mfi_irq,
718 INTR_MPSAFE|INTR_TYPE_BIO, NULL, mfi_intr_tbolt, sc,
720 device_printf(sc->mfi_dev, "Cannot set up interrupt\n");
723 sc->mfi_intr_ptr = mfi_intr_tbolt;
724 sc->mfi_enable_intr(sc);
726 if ((error = mfi_comms_init(sc)) != 0)
729 if (bus_setup_intr(sc->mfi_dev, sc->mfi_irq,
730 INTR_MPSAFE|INTR_TYPE_BIO, NULL, mfi_intr, sc, &sc->mfi_intr)) {
731 device_printf(sc->mfi_dev, "Cannot set up interrupt\n");
734 sc->mfi_intr_ptr = mfi_intr;
735 sc->mfi_enable_intr(sc);
737 if ((error = mfi_get_controller_info(sc)) != 0)
739 sc->disableOnlineCtrlReset = 0;
741 /* Register a config hook to probe the bus for arrays */
742 sc->mfi_ich.ich_func = mfi_startup;
743 sc->mfi_ich.ich_arg = sc;
744 if (config_intrhook_establish(&sc->mfi_ich) != 0) {
745 device_printf(sc->mfi_dev, "Cannot establish configuration "
749 mtx_lock(&sc->mfi_io_lock);
750 if ((error = mfi_aen_setup(sc, 0), 0) != 0) {
751 mtx_unlock(&sc->mfi_io_lock);
754 mtx_unlock(&sc->mfi_io_lock);
757 * Register a shutdown handler.
759 if ((sc->mfi_eh = EVENTHANDLER_REGISTER(shutdown_final, mfi_shutdown,
760 sc, SHUTDOWN_PRI_DEFAULT)) == NULL) {
761 device_printf(sc->mfi_dev, "Warning: shutdown event "
762 "registration failed\n");
766 * Create the control device for doing management
768 unit = device_get_unit(sc->mfi_dev);
769 sc->mfi_cdev = make_dev(&mfi_cdevsw, unit, UID_ROOT, GID_OPERATOR,
770 0640, "mfi%d", unit);
772 make_dev_alias(sc->mfi_cdev, "megaraid_sas_ioctl_node");
773 if (sc->mfi_cdev != NULL)
774 sc->mfi_cdev->si_drv1 = sc;
775 SYSCTL_ADD_INT(device_get_sysctl_ctx(sc->mfi_dev),
776 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->mfi_dev)),
777 OID_AUTO, "delete_busy_volumes", CTLFLAG_RW,
778 &sc->mfi_delete_busy_volumes, 0, "Allow removal of busy volumes");
779 SYSCTL_ADD_INT(device_get_sysctl_ctx(sc->mfi_dev),
780 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->mfi_dev)),
781 OID_AUTO, "keep_deleted_volumes", CTLFLAG_RW,
782 &sc->mfi_keep_deleted_volumes, 0,
783 "Don't detach the mfid device for a busy volume that is deleted");
785 device_add_child(sc->mfi_dev, "mfip", -1);
786 bus_generic_attach(sc->mfi_dev);
788 /* Start the timeout watchdog */
789 callout_init(&sc->mfi_watchdog_callout, CALLOUT_MPSAFE);
790 callout_reset(&sc->mfi_watchdog_callout, mfi_cmd_timeout * hz,
793 if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
794 mtx_lock(&sc->mfi_io_lock);
795 mfi_tbolt_sync_map_info(sc);
796 mtx_unlock(&sc->mfi_io_lock);
803 mfi_alloc_commands(struct mfi_softc *sc)
805 struct mfi_command *cm;
809 * XXX Should we allocate all the commands up front, or allocate on
810 * demand later like 'aac' does?
812 sc->mfi_commands = malloc(sizeof(sc->mfi_commands[0]) *
813 sc->mfi_max_fw_cmds, M_MFIBUF, M_WAITOK | M_ZERO);
815 for (i = 0; i < sc->mfi_max_fw_cmds; i++) {
816 cm = &sc->mfi_commands[i];
817 cm->cm_frame = (union mfi_frame *)((uintptr_t)sc->mfi_frames +
818 sc->mfi_cmd_size * i);
819 cm->cm_frame_busaddr = sc->mfi_frames_busaddr +
820 sc->mfi_cmd_size * i;
821 cm->cm_frame->header.context = i;
822 cm->cm_sense = &sc->mfi_sense[i];
823 cm->cm_sense_busaddr= sc->mfi_sense_busaddr + MFI_SENSE_LEN * i;
826 if (bus_dmamap_create(sc->mfi_buffer_dmat, 0,
827 &cm->cm_dmamap) == 0) {
828 mtx_lock(&sc->mfi_io_lock);
829 mfi_release_command(cm);
830 mtx_unlock(&sc->mfi_io_lock);
832 device_printf(sc->mfi_dev, "Failed to allocate %d "
833 "command blocks, only allocated %d\n",
834 sc->mfi_max_fw_cmds, i - 1);
835 for (j = 0; j < i; j++) {
836 cm = &sc->mfi_commands[i];
837 bus_dmamap_destroy(sc->mfi_buffer_dmat,
840 free(sc->mfi_commands, M_MFIBUF);
841 sc->mfi_commands = NULL;
851 mfi_release_command(struct mfi_command *cm)
853 struct mfi_frame_header *hdr;
856 mtx_assert(&cm->cm_sc->mfi_io_lock, MA_OWNED);
859 * Zero out the important fields of the frame, but make sure the
860 * context field is preserved. For efficiency, handle the fields
861 * as 32 bit words. Clear out the first S/G entry too for safety.
863 hdr = &cm->cm_frame->header;
864 if (cm->cm_data != NULL && hdr->sg_count) {
865 cm->cm_sg->sg32[0].len = 0;
866 cm->cm_sg->sg32[0].addr = 0;
870 * Command may be on other queues e.g. busy queue depending on the
871 * flow of a previous call to mfi_mapcmd, so ensure its dequeued
874 if ((cm->cm_flags & MFI_ON_MFIQ_BUSY) != 0)
876 if ((cm->cm_flags & MFI_ON_MFIQ_READY) != 0)
877 mfi_remove_ready(cm);
879 /* We're not expecting it to be on any other queue but check */
880 if ((cm->cm_flags & MFI_ON_MFIQ_MASK) != 0) {
881 panic("Command %p is still on another queue, flags = %#x",
886 if ((cm->cm_flags & MFI_CMD_TBOLT) != 0) {
887 mfi_tbolt_return_cmd(cm->cm_sc,
888 cm->cm_sc->mfi_cmd_pool_tbolt[cm->cm_extra_frames - 1],
892 hdr_data = (uint32_t *)cm->cm_frame;
893 hdr_data[0] = 0; /* cmd, sense_len, cmd_status, scsi_status */
894 hdr_data[1] = 0; /* target_id, lun_id, cdb_len, sg_count */
895 hdr_data[4] = 0; /* flags, timeout */
896 hdr_data[5] = 0; /* data_len */
898 cm->cm_extra_frames = 0;
900 cm->cm_complete = NULL;
901 cm->cm_private = NULL;
904 cm->cm_total_frame_size = 0;
905 cm->retry_for_fw_reset = 0;
907 mfi_enqueue_free(cm);
911 mfi_dcmd_command(struct mfi_softc *sc, struct mfi_command **cmp,
912 uint32_t opcode, void **bufp, size_t bufsize)
914 struct mfi_command *cm;
915 struct mfi_dcmd_frame *dcmd;
917 uint32_t context = 0;
919 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
921 cm = mfi_dequeue_free(sc);
925 /* Zero out the MFI frame */
926 context = cm->cm_frame->header.context;
927 bzero(cm->cm_frame, sizeof(union mfi_frame));
928 cm->cm_frame->header.context = context;
930 if ((bufsize > 0) && (bufp != NULL)) {
932 buf = malloc(bufsize, M_MFIBUF, M_NOWAIT|M_ZERO);
934 mfi_release_command(cm);
943 dcmd = &cm->cm_frame->dcmd;
944 bzero(dcmd->mbox, MFI_MBOX_SIZE);
945 dcmd->header.cmd = MFI_CMD_DCMD;
946 dcmd->header.timeout = 0;
947 dcmd->header.flags = 0;
948 dcmd->header.data_len = bufsize;
949 dcmd->header.scsi_status = 0;
950 dcmd->opcode = opcode;
951 cm->cm_sg = &dcmd->sgl;
952 cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
955 cm->cm_private = buf;
956 cm->cm_len = bufsize;
959 if ((bufp != NULL) && (*bufp == NULL) && (buf != NULL))
965 mfi_comms_init(struct mfi_softc *sc)
967 struct mfi_command *cm;
968 struct mfi_init_frame *init;
969 struct mfi_init_qinfo *qinfo;
971 uint32_t context = 0;
973 mtx_lock(&sc->mfi_io_lock);
974 if ((cm = mfi_dequeue_free(sc)) == NULL) {
975 mtx_unlock(&sc->mfi_io_lock);
979 /* Zero out the MFI frame */
980 context = cm->cm_frame->header.context;
981 bzero(cm->cm_frame, sizeof(union mfi_frame));
982 cm->cm_frame->header.context = context;
985 * Abuse the SG list area of the frame to hold the init_qinfo
988 init = &cm->cm_frame->init;
989 qinfo = (struct mfi_init_qinfo *)((uintptr_t)init + MFI_FRAME_SIZE);
991 bzero(qinfo, sizeof(struct mfi_init_qinfo));
992 qinfo->rq_entries = sc->mfi_max_fw_cmds + 1;
993 qinfo->rq_addr_lo = sc->mfi_comms_busaddr +
994 offsetof(struct mfi_hwcomms, hw_reply_q);
995 qinfo->pi_addr_lo = sc->mfi_comms_busaddr +
996 offsetof(struct mfi_hwcomms, hw_pi);
997 qinfo->ci_addr_lo = sc->mfi_comms_busaddr +
998 offsetof(struct mfi_hwcomms, hw_ci);
1000 init->header.cmd = MFI_CMD_INIT;
1001 init->header.data_len = sizeof(struct mfi_init_qinfo);
1002 init->qinfo_new_addr_lo = cm->cm_frame_busaddr + MFI_FRAME_SIZE;
1004 cm->cm_flags = MFI_CMD_POLLED;
1006 if ((error = mfi_mapcmd(sc, cm)) != 0)
1007 device_printf(sc->mfi_dev, "failed to send init command\n");
1008 mfi_release_command(cm);
1009 mtx_unlock(&sc->mfi_io_lock);
1015 mfi_get_controller_info(struct mfi_softc *sc)
1017 struct mfi_command *cm = NULL;
1018 struct mfi_ctrl_info *ci = NULL;
1019 uint32_t max_sectors_1, max_sectors_2;
1022 mtx_lock(&sc->mfi_io_lock);
1023 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_GETINFO,
1024 (void **)&ci, sizeof(*ci));
1027 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1029 if ((error = mfi_mapcmd(sc, cm)) != 0) {
1030 device_printf(sc->mfi_dev, "Failed to get controller info\n");
1031 sc->mfi_max_io = (sc->mfi_max_sge - 1) * PAGE_SIZE /
1037 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1038 BUS_DMASYNC_POSTREAD);
1039 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1041 max_sectors_1 = (1 << ci->stripe_sz_ops.max) * ci->max_strips_per_io;
1042 max_sectors_2 = ci->max_request_size;
1043 sc->mfi_max_io = min(max_sectors_1, max_sectors_2);
1044 sc->disableOnlineCtrlReset =
1045 ci->properties.OnOffProperties.disableOnlineCtrlReset;
1051 mfi_release_command(cm);
1052 mtx_unlock(&sc->mfi_io_lock);
1057 mfi_get_log_state(struct mfi_softc *sc, struct mfi_evt_log_state **log_state)
1059 struct mfi_command *cm = NULL;
1062 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1063 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_EVENT_GETINFO,
1064 (void **)log_state, sizeof(**log_state));
1067 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1069 if ((error = mfi_mapcmd(sc, cm)) != 0) {
1070 device_printf(sc->mfi_dev, "Failed to get log state\n");
1074 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1075 BUS_DMASYNC_POSTREAD);
1076 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1080 mfi_release_command(cm);
1086 mfi_aen_setup(struct mfi_softc *sc, uint32_t seq_start)
1088 struct mfi_evt_log_state *log_state = NULL;
1089 union mfi_evt class_locale;
1093 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1095 class_locale.members.reserved = 0;
1096 class_locale.members.locale = mfi_event_locale;
1097 class_locale.members.evt_class = mfi_event_class;
1099 if (seq_start == 0) {
1100 if ((error = mfi_get_log_state(sc, &log_state)) != 0)
1102 sc->mfi_boot_seq_num = log_state->boot_seq_num;
1105 * Walk through any events that fired since the last
1108 if ((error = mfi_parse_entries(sc, log_state->shutdown_seq_num,
1109 log_state->newest_seq_num)) != 0)
1111 seq = log_state->newest_seq_num;
1114 error = mfi_aen_register(sc, seq, class_locale.word);
1116 free(log_state, M_MFIBUF);
1122 mfi_wait_command(struct mfi_softc *sc, struct mfi_command *cm)
1125 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1126 cm->cm_complete = NULL;
1129 * MegaCli can issue a DCMD of 0. In this case do nothing
1130 * and return 0 to it as status
1132 if (cm->cm_frame->dcmd.opcode == 0) {
1133 cm->cm_frame->header.cmd_status = MFI_STAT_OK;
1135 return (cm->cm_error);
1137 mfi_enqueue_ready(cm);
1139 if ((cm->cm_flags & MFI_CMD_COMPLETED) == 0)
1140 msleep(cm, &sc->mfi_io_lock, PRIBIO, "mfiwait", 0);
1141 return (cm->cm_error);
1145 mfi_free(struct mfi_softc *sc)
1147 struct mfi_command *cm;
1150 callout_drain(&sc->mfi_watchdog_callout);
1152 if (sc->mfi_cdev != NULL)
1153 destroy_dev(sc->mfi_cdev);
1155 if (sc->mfi_commands != NULL) {
1156 for (i = 0; i < sc->mfi_max_fw_cmds; i++) {
1157 cm = &sc->mfi_commands[i];
1158 bus_dmamap_destroy(sc->mfi_buffer_dmat, cm->cm_dmamap);
1160 free(sc->mfi_commands, M_MFIBUF);
1161 sc->mfi_commands = NULL;
1165 bus_teardown_intr(sc->mfi_dev, sc->mfi_irq, sc->mfi_intr);
1166 if (sc->mfi_irq != NULL)
1167 bus_release_resource(sc->mfi_dev, SYS_RES_IRQ, sc->mfi_irq_rid,
1170 if (sc->mfi_sense_busaddr != 0)
1171 bus_dmamap_unload(sc->mfi_sense_dmat, sc->mfi_sense_dmamap);
1172 if (sc->mfi_sense != NULL)
1173 bus_dmamem_free(sc->mfi_sense_dmat, sc->mfi_sense,
1174 sc->mfi_sense_dmamap);
1175 if (sc->mfi_sense_dmat != NULL)
1176 bus_dma_tag_destroy(sc->mfi_sense_dmat);
1178 if (sc->mfi_frames_busaddr != 0)
1179 bus_dmamap_unload(sc->mfi_frames_dmat, sc->mfi_frames_dmamap);
1180 if (sc->mfi_frames != NULL)
1181 bus_dmamem_free(sc->mfi_frames_dmat, sc->mfi_frames,
1182 sc->mfi_frames_dmamap);
1183 if (sc->mfi_frames_dmat != NULL)
1184 bus_dma_tag_destroy(sc->mfi_frames_dmat);
1186 if (sc->mfi_comms_busaddr != 0)
1187 bus_dmamap_unload(sc->mfi_comms_dmat, sc->mfi_comms_dmamap);
1188 if (sc->mfi_comms != NULL)
1189 bus_dmamem_free(sc->mfi_comms_dmat, sc->mfi_comms,
1190 sc->mfi_comms_dmamap);
1191 if (sc->mfi_comms_dmat != NULL)
1192 bus_dma_tag_destroy(sc->mfi_comms_dmat);
1194 /* ThunderBolt contiguous memory free here */
1195 if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
1196 if (sc->mfi_tb_busaddr != 0)
1197 bus_dmamap_unload(sc->mfi_tb_dmat, sc->mfi_tb_dmamap);
1198 if (sc->request_message_pool != NULL)
1199 bus_dmamem_free(sc->mfi_tb_dmat, sc->request_message_pool,
1201 if (sc->mfi_tb_dmat != NULL)
1202 bus_dma_tag_destroy(sc->mfi_tb_dmat);
1204 /* Version buffer memory free */
1205 /* Start LSIP200113393 */
1206 if (sc->verbuf_h_busaddr != 0)
1207 bus_dmamap_unload(sc->verbuf_h_dmat, sc->verbuf_h_dmamap);
1208 if (sc->verbuf != NULL)
1209 bus_dmamem_free(sc->verbuf_h_dmat, sc->verbuf,
1210 sc->verbuf_h_dmamap);
1211 if (sc->verbuf_h_dmat != NULL)
1212 bus_dma_tag_destroy(sc->verbuf_h_dmat);
1214 /* End LSIP200113393 */
1215 /* ThunderBolt INIT packet memory Free */
1216 if (sc->mfi_tb_init_busaddr != 0)
1217 bus_dmamap_unload(sc->mfi_tb_init_dmat,
1218 sc->mfi_tb_init_dmamap);
1219 if (sc->mfi_tb_init != NULL)
1220 bus_dmamem_free(sc->mfi_tb_init_dmat, sc->mfi_tb_init,
1221 sc->mfi_tb_init_dmamap);
1222 if (sc->mfi_tb_init_dmat != NULL)
1223 bus_dma_tag_destroy(sc->mfi_tb_init_dmat);
1225 /* ThunderBolt IOC Init Desc memory free here */
1226 if (sc->mfi_tb_ioc_init_busaddr != 0)
1227 bus_dmamap_unload(sc->mfi_tb_ioc_init_dmat,
1228 sc->mfi_tb_ioc_init_dmamap);
1229 if (sc->mfi_tb_ioc_init_desc != NULL)
1230 bus_dmamem_free(sc->mfi_tb_ioc_init_dmat,
1231 sc->mfi_tb_ioc_init_desc,
1232 sc->mfi_tb_ioc_init_dmamap);
1233 if (sc->mfi_tb_ioc_init_dmat != NULL)
1234 bus_dma_tag_destroy(sc->mfi_tb_ioc_init_dmat);
1235 if (sc->mfi_cmd_pool_tbolt != NULL) {
1236 for (int i = 0; i < sc->mfi_max_fw_cmds; i++) {
1237 if (sc->mfi_cmd_pool_tbolt[i] != NULL) {
1238 free(sc->mfi_cmd_pool_tbolt[i],
1240 sc->mfi_cmd_pool_tbolt[i] = NULL;
1243 free(sc->mfi_cmd_pool_tbolt, M_MFIBUF);
1244 sc->mfi_cmd_pool_tbolt = NULL;
1246 if (sc->request_desc_pool != NULL) {
1247 free(sc->request_desc_pool, M_MFIBUF);
1248 sc->request_desc_pool = NULL;
1251 if (sc->mfi_buffer_dmat != NULL)
1252 bus_dma_tag_destroy(sc->mfi_buffer_dmat);
1253 if (sc->mfi_parent_dmat != NULL)
1254 bus_dma_tag_destroy(sc->mfi_parent_dmat);
1256 if (mtx_initialized(&sc->mfi_io_lock)) {
1257 mtx_destroy(&sc->mfi_io_lock);
1258 sx_destroy(&sc->mfi_config_lock);
1265 mfi_startup(void *arg)
1267 struct mfi_softc *sc;
1269 sc = (struct mfi_softc *)arg;
1271 config_intrhook_disestablish(&sc->mfi_ich);
1273 sc->mfi_enable_intr(sc);
1274 sx_xlock(&sc->mfi_config_lock);
1275 mtx_lock(&sc->mfi_io_lock);
1277 if (sc->mfi_flags & MFI_FLAGS_SKINNY)
1279 mtx_unlock(&sc->mfi_io_lock);
1280 sx_xunlock(&sc->mfi_config_lock);
1286 struct mfi_softc *sc;
1287 struct mfi_command *cm;
1288 uint32_t pi, ci, context;
1290 sc = (struct mfi_softc *)arg;
1292 if (sc->mfi_check_clear_intr(sc))
1296 pi = sc->mfi_comms->hw_pi;
1297 ci = sc->mfi_comms->hw_ci;
1298 mtx_lock(&sc->mfi_io_lock);
1300 context = sc->mfi_comms->hw_reply_q[ci];
1301 if (context < sc->mfi_max_fw_cmds) {
1302 cm = &sc->mfi_commands[context];
1303 mfi_remove_busy(cm);
1305 mfi_complete(sc, cm);
1307 if (++ci == (sc->mfi_max_fw_cmds + 1))
1311 sc->mfi_comms->hw_ci = ci;
1313 /* Give defered I/O a chance to run */
1314 sc->mfi_flags &= ~MFI_FLAGS_QFRZN;
1316 mtx_unlock(&sc->mfi_io_lock);
1319 * Dummy read to flush the bus; this ensures that the indexes are up
1320 * to date. Restart processing if more commands have come it.
1322 (void)sc->mfi_read_fw_status(sc);
1323 if (pi != sc->mfi_comms->hw_pi)
1330 mfi_shutdown(struct mfi_softc *sc)
1332 struct mfi_dcmd_frame *dcmd;
1333 struct mfi_command *cm;
1337 if (sc->mfi_aen_cm != NULL) {
1338 sc->cm_aen_abort = 1;
1339 mfi_abort(sc, &sc->mfi_aen_cm);
1342 if (sc->mfi_map_sync_cm != NULL) {
1343 sc->cm_map_abort = 1;
1344 mfi_abort(sc, &sc->mfi_map_sync_cm);
1347 mtx_lock(&sc->mfi_io_lock);
1348 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_SHUTDOWN, NULL, 0);
1350 mtx_unlock(&sc->mfi_io_lock);
1354 dcmd = &cm->cm_frame->dcmd;
1355 dcmd->header.flags = MFI_FRAME_DIR_NONE;
1356 cm->cm_flags = MFI_CMD_POLLED;
1359 if ((error = mfi_mapcmd(sc, cm)) != 0)
1360 device_printf(sc->mfi_dev, "Failed to shutdown controller\n");
1362 mfi_release_command(cm);
1363 mtx_unlock(&sc->mfi_io_lock);
1368 mfi_syspdprobe(struct mfi_softc *sc)
1370 struct mfi_frame_header *hdr;
1371 struct mfi_command *cm = NULL;
1372 struct mfi_pd_list *pdlist = NULL;
1373 struct mfi_system_pd *syspd, *tmp;
1374 struct mfi_system_pending *syspd_pend;
1375 int error, i, found;
1377 sx_assert(&sc->mfi_config_lock, SA_XLOCKED);
1378 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1379 /* Add SYSTEM PD's */
1380 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_PD_LIST_QUERY,
1381 (void **)&pdlist, sizeof(*pdlist));
1383 device_printf(sc->mfi_dev,
1384 "Error while forming SYSTEM PD list\n");
1388 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1389 cm->cm_frame->dcmd.mbox[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST;
1390 cm->cm_frame->dcmd.mbox[1] = 0;
1391 if (mfi_mapcmd(sc, cm) != 0) {
1392 device_printf(sc->mfi_dev,
1393 "Failed to get syspd device listing\n");
1396 bus_dmamap_sync(sc->mfi_buffer_dmat,cm->cm_dmamap,
1397 BUS_DMASYNC_POSTREAD);
1398 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1399 hdr = &cm->cm_frame->header;
1400 if (hdr->cmd_status != MFI_STAT_OK) {
1401 device_printf(sc->mfi_dev,
1402 "MFI_DCMD_PD_LIST_QUERY failed %x\n", hdr->cmd_status);
1405 /* Get each PD and add it to the system */
1406 for (i = 0; i < pdlist->count; i++) {
1407 if (pdlist->addr[i].device_id ==
1408 pdlist->addr[i].encl_device_id)
1411 TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh, pd_link) {
1412 if (syspd->pd_id == pdlist->addr[i].device_id)
1415 TAILQ_FOREACH(syspd_pend, &sc->mfi_syspd_pend_tqh, pd_link) {
1416 if (syspd_pend->pd_id == pdlist->addr[i].device_id)
1420 mfi_add_sys_pd(sc, pdlist->addr[i].device_id);
1422 /* Delete SYSPD's whose state has been changed */
1423 TAILQ_FOREACH_SAFE(syspd, &sc->mfi_syspd_tqh, pd_link, tmp) {
1425 for (i = 0; i < pdlist->count; i++) {
1426 if (syspd->pd_id == pdlist->addr[i].device_id) {
1433 mtx_unlock(&sc->mfi_io_lock);
1435 device_delete_child(sc->mfi_dev, syspd->pd_dev);
1437 mtx_lock(&sc->mfi_io_lock);
1442 free(pdlist, M_MFIBUF);
1444 mfi_release_command(cm);
1450 mfi_ldprobe(struct mfi_softc *sc)
1452 struct mfi_frame_header *hdr;
1453 struct mfi_command *cm = NULL;
1454 struct mfi_ld_list *list = NULL;
1455 struct mfi_disk *ld;
1456 struct mfi_disk_pending *ld_pend;
1459 sx_assert(&sc->mfi_config_lock, SA_XLOCKED);
1460 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1462 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_LIST,
1463 (void **)&list, sizeof(*list));
1467 cm->cm_flags = MFI_CMD_DATAIN;
1468 if (mfi_wait_command(sc, cm) != 0) {
1469 device_printf(sc->mfi_dev, "Failed to get device listing\n");
1473 hdr = &cm->cm_frame->header;
1474 if (hdr->cmd_status != MFI_STAT_OK) {
1475 device_printf(sc->mfi_dev, "MFI_DCMD_LD_GET_LIST failed %x\n",
1480 for (i = 0; i < list->ld_count; i++) {
1481 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
1482 if (ld->ld_id == list->ld_list[i].ld.v.target_id)
1485 TAILQ_FOREACH(ld_pend, &sc->mfi_ld_pend_tqh, ld_link) {
1486 if (ld_pend->ld_id == list->ld_list[i].ld.v.target_id)
1489 mfi_add_ld(sc, list->ld_list[i].ld.v.target_id);
1494 free(list, M_MFIBUF);
1496 mfi_release_command(cm);
1502 * The timestamp is the number of seconds since 00:00 Jan 1, 2000. If
1503 * the bits in 24-31 are all set, then it is the number of seconds since
1507 format_timestamp(uint32_t timestamp)
1509 static char buffer[32];
1511 if ((timestamp & 0xff000000) == 0xff000000)
1512 snprintf(buffer, sizeof(buffer), "boot + %us", timestamp &
1515 snprintf(buffer, sizeof(buffer), "%us", timestamp);
1520 format_class(int8_t class)
1522 static char buffer[6];
1525 case MFI_EVT_CLASS_DEBUG:
1527 case MFI_EVT_CLASS_PROGRESS:
1528 return ("progress");
1529 case MFI_EVT_CLASS_INFO:
1531 case MFI_EVT_CLASS_WARNING:
1533 case MFI_EVT_CLASS_CRITICAL:
1535 case MFI_EVT_CLASS_FATAL:
1537 case MFI_EVT_CLASS_DEAD:
1540 snprintf(buffer, sizeof(buffer), "%d", class);
1546 mfi_decode_evt(struct mfi_softc *sc, struct mfi_evt_detail *detail)
1548 struct mfi_system_pd *syspd = NULL;
1550 device_printf(sc->mfi_dev, "%d (%s/0x%04x/%s) - %s\n", detail->seq,
1551 format_timestamp(detail->time), detail->evt_class.members.locale,
1552 format_class(detail->evt_class.members.evt_class),
1553 detail->description);
1555 /* Don't act on old AEN's or while shutting down */
1556 if (detail->seq < sc->mfi_boot_seq_num || sc->mfi_detaching)
1559 switch (detail->arg_type) {
1560 case MR_EVT_ARGS_NONE:
1561 if (detail->code == MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED) {
1562 device_printf(sc->mfi_dev, "HostBus scan raised\n");
1563 if (mfi_detect_jbod_change) {
1565 * Probe for new SYSPD's and Delete
1568 sx_xlock(&sc->mfi_config_lock);
1569 mtx_lock(&sc->mfi_io_lock);
1571 mtx_unlock(&sc->mfi_io_lock);
1572 sx_xunlock(&sc->mfi_config_lock);
1576 case MR_EVT_ARGS_LD_STATE:
1577 /* During load time driver reads all the events starting
1578 * from the one that has been logged after shutdown. Avoid
1581 if (detail->args.ld_state.new_state == MFI_LD_STATE_OFFLINE ) {
1583 struct mfi_disk *ld;
1584 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
1586 detail->args.ld_state.ld.target_id)
1590 Fix: for kernel panics when SSCD is removed
1591 KASSERT(ld != NULL, ("volume dissappeared"));
1595 device_delete_child(sc->mfi_dev, ld->ld_dev);
1600 case MR_EVT_ARGS_PD:
1601 if (detail->code == MR_EVT_PD_REMOVED) {
1602 if (mfi_detect_jbod_change) {
1604 * If the removed device is a SYSPD then
1607 TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh,
1610 detail->args.pd.device_id) {
1612 device_delete_child(
1621 if (detail->code == MR_EVT_PD_INSERTED) {
1622 if (mfi_detect_jbod_change) {
1623 /* Probe for new SYSPD's */
1624 sx_xlock(&sc->mfi_config_lock);
1625 mtx_lock(&sc->mfi_io_lock);
1627 mtx_unlock(&sc->mfi_io_lock);
1628 sx_xunlock(&sc->mfi_config_lock);
1631 if (sc->mfi_cam_rescan_cb != NULL &&
1632 (detail->code == MR_EVT_PD_INSERTED ||
1633 detail->code == MR_EVT_PD_REMOVED)) {
1634 sc->mfi_cam_rescan_cb(sc, detail->args.pd.device_id);
1641 mfi_queue_evt(struct mfi_softc *sc, struct mfi_evt_detail *detail)
1643 struct mfi_evt_queue_elm *elm;
1645 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1646 elm = malloc(sizeof(*elm), M_MFIBUF, M_NOWAIT|M_ZERO);
1649 memcpy(&elm->detail, detail, sizeof(*detail));
1650 TAILQ_INSERT_TAIL(&sc->mfi_evt_queue, elm, link);
1651 taskqueue_enqueue(taskqueue_swi, &sc->mfi_evt_task);
1655 mfi_handle_evt(void *context, int pending)
1657 TAILQ_HEAD(,mfi_evt_queue_elm) queue;
1658 struct mfi_softc *sc;
1659 struct mfi_evt_queue_elm *elm;
1663 mtx_lock(&sc->mfi_io_lock);
1664 TAILQ_CONCAT(&queue, &sc->mfi_evt_queue, link);
1665 mtx_unlock(&sc->mfi_io_lock);
1666 while ((elm = TAILQ_FIRST(&queue)) != NULL) {
1667 TAILQ_REMOVE(&queue, elm, link);
1668 mfi_decode_evt(sc, &elm->detail);
1669 free(elm, M_MFIBUF);
1674 mfi_aen_register(struct mfi_softc *sc, int seq, int locale)
1676 struct mfi_command *cm;
1677 struct mfi_dcmd_frame *dcmd;
1678 union mfi_evt current_aen, prior_aen;
1679 struct mfi_evt_detail *ed = NULL;
1682 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1684 current_aen.word = locale;
1685 if (sc->mfi_aen_cm != NULL) {
1687 ((uint32_t *)&sc->mfi_aen_cm->cm_frame->dcmd.mbox)[1];
1688 if (prior_aen.members.evt_class <= current_aen.members.evt_class &&
1689 !((prior_aen.members.locale & current_aen.members.locale)
1690 ^current_aen.members.locale)) {
1693 prior_aen.members.locale |= current_aen.members.locale;
1694 if (prior_aen.members.evt_class
1695 < current_aen.members.evt_class)
1696 current_aen.members.evt_class =
1697 prior_aen.members.evt_class;
1698 mfi_abort(sc, &sc->mfi_aen_cm);
1702 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_EVENT_WAIT,
1703 (void **)&ed, sizeof(*ed));
1707 dcmd = &cm->cm_frame->dcmd;
1708 ((uint32_t *)&dcmd->mbox)[0] = seq;
1709 ((uint32_t *)&dcmd->mbox)[1] = locale;
1710 cm->cm_flags = MFI_CMD_DATAIN;
1711 cm->cm_complete = mfi_aen_complete;
1713 sc->last_seq_num = seq;
1714 sc->mfi_aen_cm = cm;
1716 mfi_enqueue_ready(cm);
1724 mfi_aen_complete(struct mfi_command *cm)
1726 struct mfi_frame_header *hdr;
1727 struct mfi_softc *sc;
1728 struct mfi_evt_detail *detail;
1729 struct mfi_aen *mfi_aen_entry, *tmp;
1730 int seq = 0, aborted = 0;
1733 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1735 if (sc->mfi_aen_cm == NULL)
1738 hdr = &cm->cm_frame->header;
1740 if (sc->cm_aen_abort ||
1741 hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
1742 sc->cm_aen_abort = 0;
1745 sc->mfi_aen_triggered = 1;
1746 if (sc->mfi_poll_waiting) {
1747 sc->mfi_poll_waiting = 0;
1748 selwakeup(&sc->mfi_select);
1750 detail = cm->cm_data;
1751 mfi_queue_evt(sc, detail);
1752 seq = detail->seq + 1;
1753 TAILQ_FOREACH_SAFE(mfi_aen_entry, &sc->mfi_aen_pids, aen_link,
1755 TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
1757 PROC_LOCK(mfi_aen_entry->p);
1758 kern_psignal(mfi_aen_entry->p, SIGIO);
1759 PROC_UNLOCK(mfi_aen_entry->p);
1760 free(mfi_aen_entry, M_MFIBUF);
1764 free(cm->cm_data, M_MFIBUF);
1765 wakeup(&sc->mfi_aen_cm);
1766 sc->mfi_aen_cm = NULL;
1767 mfi_release_command(cm);
1769 /* set it up again so the driver can catch more events */
1771 mfi_aen_setup(sc, seq);
1774 #define MAX_EVENTS 15
1777 mfi_parse_entries(struct mfi_softc *sc, int start_seq, int stop_seq)
1779 struct mfi_command *cm;
1780 struct mfi_dcmd_frame *dcmd;
1781 struct mfi_evt_list *el;
1782 union mfi_evt class_locale;
1783 int error, i, seq, size;
1785 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1787 class_locale.members.reserved = 0;
1788 class_locale.members.locale = mfi_event_locale;
1789 class_locale.members.evt_class = mfi_event_class;
1791 size = sizeof(struct mfi_evt_list) + sizeof(struct mfi_evt_detail)
1793 el = malloc(size, M_MFIBUF, M_NOWAIT | M_ZERO);
1797 for (seq = start_seq;;) {
1798 if ((cm = mfi_dequeue_free(sc)) == NULL) {
1803 dcmd = &cm->cm_frame->dcmd;
1804 bzero(dcmd->mbox, MFI_MBOX_SIZE);
1805 dcmd->header.cmd = MFI_CMD_DCMD;
1806 dcmd->header.timeout = 0;
1807 dcmd->header.data_len = size;
1808 dcmd->opcode = MFI_DCMD_CTRL_EVENT_GET;
1809 ((uint32_t *)&dcmd->mbox)[0] = seq;
1810 ((uint32_t *)&dcmd->mbox)[1] = class_locale.word;
1811 cm->cm_sg = &dcmd->sgl;
1812 cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
1813 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1817 if ((error = mfi_mapcmd(sc, cm)) != 0) {
1818 device_printf(sc->mfi_dev,
1819 "Failed to get controller entries\n");
1820 mfi_release_command(cm);
1824 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1825 BUS_DMASYNC_POSTREAD);
1826 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1828 if (dcmd->header.cmd_status == MFI_STAT_NOT_FOUND) {
1829 mfi_release_command(cm);
1832 if (dcmd->header.cmd_status != MFI_STAT_OK) {
1833 device_printf(sc->mfi_dev,
1834 "Error %d fetching controller entries\n",
1835 dcmd->header.cmd_status);
1836 mfi_release_command(cm);
1840 mfi_release_command(cm);
1842 for (i = 0; i < el->count; i++) {
1844 * If this event is newer than 'stop_seq' then
1845 * break out of the loop. Note that the log
1846 * is a circular buffer so we have to handle
1847 * the case that our stop point is earlier in
1848 * the buffer than our start point.
1850 if (el->event[i].seq >= stop_seq) {
1851 if (start_seq <= stop_seq)
1853 else if (el->event[i].seq < start_seq)
1856 mfi_queue_evt(sc, &el->event[i]);
1858 seq = el->event[el->count - 1].seq + 1;
1866 mfi_add_ld(struct mfi_softc *sc, int id)
1868 struct mfi_command *cm;
1869 struct mfi_dcmd_frame *dcmd = NULL;
1870 struct mfi_ld_info *ld_info = NULL;
1871 struct mfi_disk_pending *ld_pend;
1874 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1876 ld_pend = malloc(sizeof(*ld_pend), M_MFIBUF, M_NOWAIT | M_ZERO);
1877 if (ld_pend != NULL) {
1878 ld_pend->ld_id = id;
1879 TAILQ_INSERT_TAIL(&sc->mfi_ld_pend_tqh, ld_pend, ld_link);
1882 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_INFO,
1883 (void **)&ld_info, sizeof(*ld_info));
1885 device_printf(sc->mfi_dev,
1886 "Failed to allocate for MFI_DCMD_LD_GET_INFO %d\n", error);
1888 free(ld_info, M_MFIBUF);
1891 cm->cm_flags = MFI_CMD_DATAIN;
1892 dcmd = &cm->cm_frame->dcmd;
1894 if (mfi_wait_command(sc, cm) != 0) {
1895 device_printf(sc->mfi_dev,
1896 "Failed to get logical drive: %d\n", id);
1897 free(ld_info, M_MFIBUF);
1900 if (ld_info->ld_config.params.isSSCD != 1)
1901 mfi_add_ld_complete(cm);
1903 mfi_release_command(cm);
1904 if (ld_info) /* SSCD drives ld_info free here */
1905 free(ld_info, M_MFIBUF);
1911 mfi_add_ld_complete(struct mfi_command *cm)
1913 struct mfi_frame_header *hdr;
1914 struct mfi_ld_info *ld_info;
1915 struct mfi_softc *sc;
1919 hdr = &cm->cm_frame->header;
1920 ld_info = cm->cm_private;
1922 if (sc->cm_map_abort || hdr->cmd_status != MFI_STAT_OK) {
1923 free(ld_info, M_MFIBUF);
1924 wakeup(&sc->mfi_map_sync_cm);
1925 mfi_release_command(cm);
1928 wakeup(&sc->mfi_map_sync_cm);
1929 mfi_release_command(cm);
1931 mtx_unlock(&sc->mfi_io_lock);
1933 if ((child = device_add_child(sc->mfi_dev, "mfid", -1)) == NULL) {
1934 device_printf(sc->mfi_dev, "Failed to add logical disk\n");
1935 free(ld_info, M_MFIBUF);
1937 mtx_lock(&sc->mfi_io_lock);
1941 device_set_ivars(child, ld_info);
1942 device_set_desc(child, "MFI Logical Disk");
1943 bus_generic_attach(sc->mfi_dev);
1945 mtx_lock(&sc->mfi_io_lock);
1948 static int mfi_add_sys_pd(struct mfi_softc *sc, int id)
1950 struct mfi_command *cm;
1951 struct mfi_dcmd_frame *dcmd = NULL;
1952 struct mfi_pd_info *pd_info = NULL;
1953 struct mfi_system_pending *syspd_pend;
1956 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1958 syspd_pend = malloc(sizeof(*syspd_pend), M_MFIBUF, M_NOWAIT | M_ZERO);
1959 if (syspd_pend != NULL) {
1960 syspd_pend->pd_id = id;
1961 TAILQ_INSERT_TAIL(&sc->mfi_syspd_pend_tqh, syspd_pend, pd_link);
1964 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_PD_GET_INFO,
1965 (void **)&pd_info, sizeof(*pd_info));
1967 device_printf(sc->mfi_dev,
1968 "Failed to allocated for MFI_DCMD_PD_GET_INFO %d\n",
1971 free(pd_info, M_MFIBUF);
1974 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1975 dcmd = &cm->cm_frame->dcmd;
1977 dcmd->header.scsi_status = 0;
1978 dcmd->header.pad0 = 0;
1979 if ((error = mfi_mapcmd(sc, cm)) != 0) {
1980 device_printf(sc->mfi_dev,
1981 "Failed to get physical drive info %d\n", id);
1982 free(pd_info, M_MFIBUF);
1983 mfi_release_command(cm);
1986 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1987 BUS_DMASYNC_POSTREAD);
1988 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1989 mfi_add_sys_pd_complete(cm);
1994 mfi_add_sys_pd_complete(struct mfi_command *cm)
1996 struct mfi_frame_header *hdr;
1997 struct mfi_pd_info *pd_info;
1998 struct mfi_softc *sc;
2002 hdr = &cm->cm_frame->header;
2003 pd_info = cm->cm_private;
2005 if (hdr->cmd_status != MFI_STAT_OK) {
2006 free(pd_info, M_MFIBUF);
2007 mfi_release_command(cm);
2010 if (pd_info->fw_state != MFI_PD_STATE_SYSTEM) {
2011 device_printf(sc->mfi_dev, "PD=%x is not SYSTEM PD\n",
2012 pd_info->ref.v.device_id);
2013 free(pd_info, M_MFIBUF);
2014 mfi_release_command(cm);
2017 mfi_release_command(cm);
2019 mtx_unlock(&sc->mfi_io_lock);
2021 if ((child = device_add_child(sc->mfi_dev, "mfisyspd", -1)) == NULL) {
2022 device_printf(sc->mfi_dev, "Failed to add system pd\n");
2023 free(pd_info, M_MFIBUF);
2025 mtx_lock(&sc->mfi_io_lock);
2029 device_set_ivars(child, pd_info);
2030 device_set_desc(child, "MFI System PD");
2031 bus_generic_attach(sc->mfi_dev);
2033 mtx_lock(&sc->mfi_io_lock);
2036 static struct mfi_command *
2037 mfi_bio_command(struct mfi_softc *sc)
2040 struct mfi_command *cm = NULL;
2042 /*reserving two commands to avoid starvation for IOCTL*/
2043 if (sc->mfi_qstat[MFIQ_FREE].q_length < 2) {
2046 if ((bio = mfi_dequeue_bio(sc)) == NULL) {
2049 if ((uintptr_t)bio->bio_driver2 == MFI_LD_IO) {
2050 cm = mfi_build_ldio(sc, bio);
2051 } else if ((uintptr_t) bio->bio_driver2 == MFI_SYS_PD_IO) {
2052 cm = mfi_build_syspdio(sc, bio);
2055 mfi_enqueue_bio(sc, bio);
2060 * mostly copied from cam/scsi/scsi_all.c:scsi_read_write
2064 mfi_build_cdb(int readop, uint8_t byte2, u_int64_t lba, u_int32_t block_count, uint8_t *cdb)
2068 if (((lba & 0x1fffff) == lba)
2069 && ((block_count & 0xff) == block_count)
2071 /* We can fit in a 6 byte cdb */
2072 struct scsi_rw_6 *scsi_cmd;
2074 scsi_cmd = (struct scsi_rw_6 *)cdb;
2075 scsi_cmd->opcode = readop ? READ_6 : WRITE_6;
2076 scsi_ulto3b(lba, scsi_cmd->addr);
2077 scsi_cmd->length = block_count & 0xff;
2078 scsi_cmd->control = 0;
2079 cdb_len = sizeof(*scsi_cmd);
2080 } else if (((block_count & 0xffff) == block_count) && ((lba & 0xffffffff) == lba)) {
2081 /* Need a 10 byte CDB */
2082 struct scsi_rw_10 *scsi_cmd;
2084 scsi_cmd = (struct scsi_rw_10 *)cdb;
2085 scsi_cmd->opcode = readop ? READ_10 : WRITE_10;
2086 scsi_cmd->byte2 = byte2;
2087 scsi_ulto4b(lba, scsi_cmd->addr);
2088 scsi_cmd->reserved = 0;
2089 scsi_ulto2b(block_count, scsi_cmd->length);
2090 scsi_cmd->control = 0;
2091 cdb_len = sizeof(*scsi_cmd);
2092 } else if (((block_count & 0xffffffff) == block_count) &&
2093 ((lba & 0xffffffff) == lba)) {
2094 /* Block count is too big for 10 byte CDB use a 12 byte CDB */
2095 struct scsi_rw_12 *scsi_cmd;
2097 scsi_cmd = (struct scsi_rw_12 *)cdb;
2098 scsi_cmd->opcode = readop ? READ_12 : WRITE_12;
2099 scsi_cmd->byte2 = byte2;
2100 scsi_ulto4b(lba, scsi_cmd->addr);
2101 scsi_cmd->reserved = 0;
2102 scsi_ulto4b(block_count, scsi_cmd->length);
2103 scsi_cmd->control = 0;
2104 cdb_len = sizeof(*scsi_cmd);
2107 * 16 byte CDB. We'll only get here if the LBA is larger
2110 struct scsi_rw_16 *scsi_cmd;
2112 scsi_cmd = (struct scsi_rw_16 *)cdb;
2113 scsi_cmd->opcode = readop ? READ_16 : WRITE_16;
2114 scsi_cmd->byte2 = byte2;
2115 scsi_u64to8b(lba, scsi_cmd->addr);
2116 scsi_cmd->reserved = 0;
2117 scsi_ulto4b(block_count, scsi_cmd->length);
2118 scsi_cmd->control = 0;
2119 cdb_len = sizeof(*scsi_cmd);
2125 extern char *unmapped_buf;
2127 static struct mfi_command *
2128 mfi_build_syspdio(struct mfi_softc *sc, struct bio *bio)
2130 struct mfi_command *cm;
2131 struct mfi_pass_frame *pass;
2132 uint32_t context = 0;
2133 int flags = 0, blkcount = 0, readop;
2136 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2138 if ((cm = mfi_dequeue_free(sc)) == NULL)
2141 /* Zero out the MFI frame */
2142 context = cm->cm_frame->header.context;
2143 bzero(cm->cm_frame, sizeof(union mfi_frame));
2144 cm->cm_frame->header.context = context;
2145 pass = &cm->cm_frame->pass;
2146 bzero(pass->cdb, 16);
2147 pass->header.cmd = MFI_CMD_PD_SCSI_IO;
2148 switch (bio->bio_cmd & 0x03) {
2150 flags = MFI_CMD_DATAIN | MFI_CMD_BIO;
2154 flags = MFI_CMD_DATAOUT | MFI_CMD_BIO;
2158 /* TODO: what about BIO_DELETE??? */
2159 panic("Unsupported bio command %x\n", bio->bio_cmd);
2162 /* Cheat with the sector length to avoid a non-constant division */
2163 blkcount = (bio->bio_bcount + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
2164 /* Fill the LBA and Transfer length in CDB */
2165 cdb_len = mfi_build_cdb(readop, 0, bio->bio_pblkno, blkcount,
2167 pass->header.target_id = (uintptr_t)bio->bio_driver1;
2168 pass->header.lun_id = 0;
2169 pass->header.timeout = 0;
2170 pass->header.flags = 0;
2171 pass->header.scsi_status = 0;
2172 pass->header.sense_len = MFI_SENSE_LEN;
2173 pass->header.data_len = bio->bio_bcount;
2174 pass->header.cdb_len = cdb_len;
2175 pass->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2176 pass->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2177 cm->cm_complete = mfi_bio_complete;
2178 cm->cm_private = bio;
2179 cm->cm_data = unmapped_buf;
2180 cm->cm_len = bio->bio_bcount;
2181 cm->cm_sg = &pass->sgl;
2182 cm->cm_total_frame_size = MFI_PASS_FRAME_SIZE;
2183 cm->cm_flags = flags;
2188 static struct mfi_command *
2189 mfi_build_ldio(struct mfi_softc *sc, struct bio *bio)
2191 struct mfi_io_frame *io;
2192 struct mfi_command *cm;
2195 uint32_t context = 0;
2197 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2199 if ((cm = mfi_dequeue_free(sc)) == NULL)
2202 /* Zero out the MFI frame */
2203 context = cm->cm_frame->header.context;
2204 bzero(cm->cm_frame, sizeof(union mfi_frame));
2205 cm->cm_frame->header.context = context;
2206 io = &cm->cm_frame->io;
2207 switch (bio->bio_cmd & 0x03) {
2209 io->header.cmd = MFI_CMD_LD_READ;
2210 flags = MFI_CMD_DATAIN | MFI_CMD_BIO;
2213 io->header.cmd = MFI_CMD_LD_WRITE;
2214 flags = MFI_CMD_DATAOUT | MFI_CMD_BIO;
2217 /* TODO: what about BIO_DELETE??? */
2218 panic("Unsupported bio command %x\n", bio->bio_cmd);
2221 /* Cheat with the sector length to avoid a non-constant division */
2222 blkcount = (bio->bio_bcount + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
2223 io->header.target_id = (uintptr_t)bio->bio_driver1;
2224 io->header.timeout = 0;
2225 io->header.flags = 0;
2226 io->header.scsi_status = 0;
2227 io->header.sense_len = MFI_SENSE_LEN;
2228 io->header.data_len = blkcount;
2229 io->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2230 io->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2231 io->lba_hi = (bio->bio_pblkno & 0xffffffff00000000) >> 32;
2232 io->lba_lo = bio->bio_pblkno & 0xffffffff;
2233 cm->cm_complete = mfi_bio_complete;
2234 cm->cm_private = bio;
2235 cm->cm_data = unmapped_buf;
2236 cm->cm_len = bio->bio_bcount;
2237 cm->cm_sg = &io->sgl;
2238 cm->cm_total_frame_size = MFI_IO_FRAME_SIZE;
2239 cm->cm_flags = flags;
2245 mfi_bio_complete(struct mfi_command *cm)
2248 struct mfi_frame_header *hdr;
2249 struct mfi_softc *sc;
2251 bio = cm->cm_private;
2252 hdr = &cm->cm_frame->header;
2255 if ((hdr->cmd_status != MFI_STAT_OK) || (hdr->scsi_status != 0)) {
2256 bio->bio_flags |= BIO_ERROR;
2257 bio->bio_error = EIO;
2258 device_printf(sc->mfi_dev, "I/O error, cmd=%p, status=%#x, "
2259 "scsi_status=%#x\n", cm, hdr->cmd_status, hdr->scsi_status);
2260 mfi_print_sense(cm->cm_sc, cm->cm_sense);
2261 } else if (cm->cm_error != 0) {
2262 bio->bio_flags |= BIO_ERROR;
2263 bio->bio_error = cm->cm_error;
2264 device_printf(sc->mfi_dev, "I/O error, cmd=%p, error=%#x\n",
2268 mfi_release_command(cm);
2269 mfi_disk_complete(bio);
2273 mfi_startio(struct mfi_softc *sc)
2275 struct mfi_command *cm;
2276 struct ccb_hdr *ccbh;
2279 /* Don't bother if we're short on resources */
2280 if (sc->mfi_flags & MFI_FLAGS_QFRZN)
2283 /* Try a command that has already been prepared */
2284 cm = mfi_dequeue_ready(sc);
2287 if ((ccbh = TAILQ_FIRST(&sc->mfi_cam_ccbq)) != NULL)
2288 cm = sc->mfi_cam_start(ccbh);
2291 /* Nope, so look for work on the bioq */
2293 cm = mfi_bio_command(sc);
2295 /* No work available, so exit */
2299 /* Send the command to the controller */
2300 if (mfi_mapcmd(sc, cm) != 0) {
2301 device_printf(sc->mfi_dev, "Failed to startio\n");
2302 mfi_requeue_ready(cm);
2309 mfi_mapcmd(struct mfi_softc *sc, struct mfi_command *cm)
2313 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2315 if ((cm->cm_data != NULL) && (cm->cm_frame->header.cmd != MFI_CMD_STP )) {
2316 polled = (cm->cm_flags & MFI_CMD_POLLED) ? BUS_DMA_NOWAIT : 0;
2317 if (cm->cm_flags & MFI_CMD_CCB)
2318 error = bus_dmamap_load_ccb(sc->mfi_buffer_dmat,
2319 cm->cm_dmamap, cm->cm_data, mfi_data_cb, cm,
2321 else if (cm->cm_flags & MFI_CMD_BIO)
2322 error = bus_dmamap_load_bio(sc->mfi_buffer_dmat,
2323 cm->cm_dmamap, cm->cm_private, mfi_data_cb, cm,
2326 error = bus_dmamap_load(sc->mfi_buffer_dmat,
2327 cm->cm_dmamap, cm->cm_data, cm->cm_len,
2328 mfi_data_cb, cm, polled);
2329 if (error == EINPROGRESS) {
2330 sc->mfi_flags |= MFI_FLAGS_QFRZN;
2334 error = mfi_send_frame(sc, cm);
2341 mfi_data_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
2343 struct mfi_frame_header *hdr;
2344 struct mfi_command *cm;
2346 struct mfi_softc *sc;
2347 int i, j, first, dir;
2348 int sge_size, locked;
2350 cm = (struct mfi_command *)arg;
2352 hdr = &cm->cm_frame->header;
2356 * We need to check if we have the lock as this is async
2357 * callback so even though our caller mfi_mapcmd asserts
2358 * it has the lock, there is no garantee that hasn't been
2359 * dropped if bus_dmamap_load returned prior to our
2362 if ((locked = mtx_owned(&sc->mfi_io_lock)) == 0)
2363 mtx_lock(&sc->mfi_io_lock);
2366 printf("error %d in callback\n", error);
2367 cm->cm_error = error;
2368 mfi_complete(sc, cm);
2371 /* Use IEEE sgl only for IO's on a SKINNY controller
2372 * For other commands on a SKINNY controller use either
2373 * sg32 or sg64 based on the sizeof(bus_addr_t).
2374 * Also calculate the total frame size based on the type
2377 if (((cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) ||
2378 (cm->cm_frame->header.cmd == MFI_CMD_LD_READ) ||
2379 (cm->cm_frame->header.cmd == MFI_CMD_LD_WRITE)) &&
2380 (sc->mfi_flags & MFI_FLAGS_SKINNY)) {
2381 for (i = 0; i < nsegs; i++) {
2382 sgl->sg_skinny[i].addr = segs[i].ds_addr;
2383 sgl->sg_skinny[i].len = segs[i].ds_len;
2384 sgl->sg_skinny[i].flag = 0;
2386 hdr->flags |= MFI_FRAME_IEEE_SGL | MFI_FRAME_SGL64;
2387 sge_size = sizeof(struct mfi_sg_skinny);
2388 hdr->sg_count = nsegs;
2391 if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
2392 first = cm->cm_stp_len;
2393 if ((sc->mfi_flags & MFI_FLAGS_SG64) == 0) {
2394 sgl->sg32[j].addr = segs[0].ds_addr;
2395 sgl->sg32[j++].len = first;
2397 sgl->sg64[j].addr = segs[0].ds_addr;
2398 sgl->sg64[j++].len = first;
2402 if ((sc->mfi_flags & MFI_FLAGS_SG64) == 0) {
2403 for (i = 0; i < nsegs; i++) {
2404 sgl->sg32[j].addr = segs[i].ds_addr + first;
2405 sgl->sg32[j++].len = segs[i].ds_len - first;
2409 for (i = 0; i < nsegs; i++) {
2410 sgl->sg64[j].addr = segs[i].ds_addr + first;
2411 sgl->sg64[j++].len = segs[i].ds_len - first;
2414 hdr->flags |= MFI_FRAME_SGL64;
2417 sge_size = sc->mfi_sge_size;
2421 if (cm->cm_flags & MFI_CMD_DATAIN) {
2422 dir |= BUS_DMASYNC_PREREAD;
2423 hdr->flags |= MFI_FRAME_DIR_READ;
2425 if (cm->cm_flags & MFI_CMD_DATAOUT) {
2426 dir |= BUS_DMASYNC_PREWRITE;
2427 hdr->flags |= MFI_FRAME_DIR_WRITE;
2429 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir);
2430 cm->cm_flags |= MFI_CMD_MAPPED;
2433 * Instead of calculating the total number of frames in the
2434 * compound frame, it's already assumed that there will be at
2435 * least 1 frame, so don't compensate for the modulo of the
2436 * following division.
2438 cm->cm_total_frame_size += (sc->mfi_sge_size * nsegs);
2439 cm->cm_extra_frames = (cm->cm_total_frame_size - 1) / MFI_FRAME_SIZE;
2441 if ((error = mfi_send_frame(sc, cm)) != 0) {
2442 printf("error %d in callback from mfi_send_frame\n", error);
2443 cm->cm_error = error;
2444 mfi_complete(sc, cm);
2449 /* leave the lock in the state we found it */
2451 mtx_unlock(&sc->mfi_io_lock);
2457 mfi_send_frame(struct mfi_softc *sc, struct mfi_command *cm)
2461 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2463 if (sc->MFA_enabled)
2464 error = mfi_tbolt_send_frame(sc, cm);
2466 error = mfi_std_send_frame(sc, cm);
2468 if (error != 0 && (cm->cm_flags & MFI_ON_MFIQ_BUSY) != 0)
2469 mfi_remove_busy(cm);
2475 mfi_std_send_frame(struct mfi_softc *sc, struct mfi_command *cm)
2477 struct mfi_frame_header *hdr;
2478 int tm = mfi_polled_cmd_timeout * 1000;
2480 hdr = &cm->cm_frame->header;
2482 if ((cm->cm_flags & MFI_CMD_POLLED) == 0) {
2483 cm->cm_timestamp = time_uptime;
2484 mfi_enqueue_busy(cm);
2486 hdr->cmd_status = MFI_STAT_INVALID_STATUS;
2487 hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
2491 * The bus address of the command is aligned on a 64 byte boundary,
2492 * leaving the least 6 bits as zero. For whatever reason, the
2493 * hardware wants the address shifted right by three, leaving just
2494 * 3 zero bits. These three bits are then used as a prefetching
2495 * hint for the hardware to predict how many frames need to be
2496 * fetched across the bus. If a command has more than 8 frames
2497 * then the 3 bits are set to 0x7 and the firmware uses other
2498 * information in the command to determine the total amount to fetch.
2499 * However, FreeBSD doesn't support I/O larger than 128K, so 8 frames
2500 * is enough for both 32bit and 64bit systems.
2502 if (cm->cm_extra_frames > 7)
2503 cm->cm_extra_frames = 7;
2505 sc->mfi_issue_cmd(sc, cm->cm_frame_busaddr, cm->cm_extra_frames);
2507 if ((cm->cm_flags & MFI_CMD_POLLED) == 0)
2510 /* This is a polled command, so busy-wait for it to complete. */
2511 while (hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
2518 if (hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
2519 device_printf(sc->mfi_dev, "Frame %p timed out "
2520 "command 0x%X\n", hdr, cm->cm_frame->dcmd.opcode);
2529 mfi_complete(struct mfi_softc *sc, struct mfi_command *cm)
2532 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2534 if ((cm->cm_flags & MFI_CMD_MAPPED) != 0) {
2536 if ((cm->cm_flags & MFI_CMD_DATAIN) ||
2537 (cm->cm_frame->header.cmd == MFI_CMD_STP))
2538 dir |= BUS_DMASYNC_POSTREAD;
2539 if (cm->cm_flags & MFI_CMD_DATAOUT)
2540 dir |= BUS_DMASYNC_POSTWRITE;
2542 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir);
2543 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2544 cm->cm_flags &= ~MFI_CMD_MAPPED;
2547 cm->cm_flags |= MFI_CMD_COMPLETED;
2549 if (cm->cm_complete != NULL)
2550 cm->cm_complete(cm);
2556 mfi_abort(struct mfi_softc *sc, struct mfi_command **cm_abort)
2558 struct mfi_command *cm;
2559 struct mfi_abort_frame *abort;
2561 uint32_t context = 0;
2563 mtx_lock(&sc->mfi_io_lock);
2564 if ((cm = mfi_dequeue_free(sc)) == NULL) {
2565 mtx_unlock(&sc->mfi_io_lock);
2569 /* Zero out the MFI frame */
2570 context = cm->cm_frame->header.context;
2571 bzero(cm->cm_frame, sizeof(union mfi_frame));
2572 cm->cm_frame->header.context = context;
2574 abort = &cm->cm_frame->abort;
2575 abort->header.cmd = MFI_CMD_ABORT;
2576 abort->header.flags = 0;
2577 abort->header.scsi_status = 0;
2578 abort->abort_context = (*cm_abort)->cm_frame->header.context;
2579 abort->abort_mfi_addr_lo = (uint32_t)(*cm_abort)->cm_frame_busaddr;
2580 abort->abort_mfi_addr_hi =
2581 (uint32_t)((uint64_t)(*cm_abort)->cm_frame_busaddr >> 32);
2583 cm->cm_flags = MFI_CMD_POLLED;
2585 if ((error = mfi_mapcmd(sc, cm)) != 0)
2586 device_printf(sc->mfi_dev, "failed to abort command\n");
2587 mfi_release_command(cm);
2589 mtx_unlock(&sc->mfi_io_lock);
2590 while (i < 5 && *cm_abort != NULL) {
2591 tsleep(cm_abort, 0, "mfiabort",
2595 if (*cm_abort != NULL) {
2596 /* Force a complete if command didn't abort */
2597 mtx_lock(&sc->mfi_io_lock);
2598 (*cm_abort)->cm_complete(*cm_abort);
2599 mtx_unlock(&sc->mfi_io_lock);
2606 mfi_dump_blocks(struct mfi_softc *sc, int id, uint64_t lba, void *virt,
2609 struct mfi_command *cm;
2610 struct mfi_io_frame *io;
2612 uint32_t context = 0;
2614 if ((cm = mfi_dequeue_free(sc)) == NULL)
2617 /* Zero out the MFI frame */
2618 context = cm->cm_frame->header.context;
2619 bzero(cm->cm_frame, sizeof(union mfi_frame));
2620 cm->cm_frame->header.context = context;
2622 io = &cm->cm_frame->io;
2623 io->header.cmd = MFI_CMD_LD_WRITE;
2624 io->header.target_id = id;
2625 io->header.timeout = 0;
2626 io->header.flags = 0;
2627 io->header.scsi_status = 0;
2628 io->header.sense_len = MFI_SENSE_LEN;
2629 io->header.data_len = (len + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
2630 io->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2631 io->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2632 io->lba_hi = (lba & 0xffffffff00000000) >> 32;
2633 io->lba_lo = lba & 0xffffffff;
2636 cm->cm_sg = &io->sgl;
2637 cm->cm_total_frame_size = MFI_IO_FRAME_SIZE;
2638 cm->cm_flags = MFI_CMD_POLLED | MFI_CMD_DATAOUT;
2640 if ((error = mfi_mapcmd(sc, cm)) != 0)
2641 device_printf(sc->mfi_dev, "failed dump blocks\n");
2642 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
2643 BUS_DMASYNC_POSTWRITE);
2644 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2645 mfi_release_command(cm);
2651 mfi_dump_syspd_blocks(struct mfi_softc *sc, int id, uint64_t lba, void *virt,
2654 struct mfi_command *cm;
2655 struct mfi_pass_frame *pass;
2656 int error, readop, cdb_len;
2659 if ((cm = mfi_dequeue_free(sc)) == NULL)
2662 pass = &cm->cm_frame->pass;
2663 bzero(pass->cdb, 16);
2664 pass->header.cmd = MFI_CMD_PD_SCSI_IO;
2667 blkcount = (len + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
2668 cdb_len = mfi_build_cdb(readop, 0, lba, blkcount, pass->cdb);
2669 pass->header.target_id = id;
2670 pass->header.timeout = 0;
2671 pass->header.flags = 0;
2672 pass->header.scsi_status = 0;
2673 pass->header.sense_len = MFI_SENSE_LEN;
2674 pass->header.data_len = len;
2675 pass->header.cdb_len = cdb_len;
2676 pass->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2677 pass->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2680 cm->cm_sg = &pass->sgl;
2681 cm->cm_total_frame_size = MFI_PASS_FRAME_SIZE;
2682 cm->cm_flags = MFI_CMD_POLLED | MFI_CMD_DATAOUT | MFI_CMD_SCSI;
2684 if ((error = mfi_mapcmd(sc, cm)) != 0)
2685 device_printf(sc->mfi_dev, "failed dump blocks\n");
2686 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
2687 BUS_DMASYNC_POSTWRITE);
2688 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2689 mfi_release_command(cm);
2695 mfi_open(struct cdev *dev, int flags, int fmt, struct thread *td)
2697 struct mfi_softc *sc;
2702 mtx_lock(&sc->mfi_io_lock);
2703 if (sc->mfi_detaching)
2706 sc->mfi_flags |= MFI_FLAGS_OPEN;
2709 mtx_unlock(&sc->mfi_io_lock);
2715 mfi_close(struct cdev *dev, int flags, int fmt, struct thread *td)
2717 struct mfi_softc *sc;
2718 struct mfi_aen *mfi_aen_entry, *tmp;
2722 mtx_lock(&sc->mfi_io_lock);
2723 sc->mfi_flags &= ~MFI_FLAGS_OPEN;
2725 TAILQ_FOREACH_SAFE(mfi_aen_entry, &sc->mfi_aen_pids, aen_link, tmp) {
2726 if (mfi_aen_entry->p == curproc) {
2727 TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
2729 free(mfi_aen_entry, M_MFIBUF);
2732 mtx_unlock(&sc->mfi_io_lock);
2737 mfi_config_lock(struct mfi_softc *sc, uint32_t opcode)
2741 case MFI_DCMD_LD_DELETE:
2742 case MFI_DCMD_CFG_ADD:
2743 case MFI_DCMD_CFG_CLEAR:
2744 case MFI_DCMD_CFG_FOREIGN_IMPORT:
2745 sx_xlock(&sc->mfi_config_lock);
2753 mfi_config_unlock(struct mfi_softc *sc, int locked)
2757 sx_xunlock(&sc->mfi_config_lock);
2761 * Perform pre-issue checks on commands from userland and possibly veto
2765 mfi_check_command_pre(struct mfi_softc *sc, struct mfi_command *cm)
2767 struct mfi_disk *ld, *ld2;
2769 struct mfi_system_pd *syspd = NULL;
2773 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2775 switch (cm->cm_frame->dcmd.opcode) {
2776 case MFI_DCMD_LD_DELETE:
2777 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2778 if (ld->ld_id == cm->cm_frame->dcmd.mbox[0])
2784 error = mfi_disk_disable(ld);
2786 case MFI_DCMD_CFG_CLEAR:
2787 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2788 error = mfi_disk_disable(ld);
2793 TAILQ_FOREACH(ld2, &sc->mfi_ld_tqh, ld_link) {
2796 mfi_disk_enable(ld2);
2800 case MFI_DCMD_PD_STATE_SET:
2801 mbox = (uint16_t *) cm->cm_frame->dcmd.mbox;
2803 if (mbox[2] == MFI_PD_STATE_UNCONFIGURED_GOOD) {
2804 TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh, pd_link) {
2805 if (syspd->pd_id == syspd_id)
2812 error = mfi_syspd_disable(syspd);
2820 /* Perform post-issue checks on commands from userland. */
2822 mfi_check_command_post(struct mfi_softc *sc, struct mfi_command *cm)
2824 struct mfi_disk *ld, *ldn;
2825 struct mfi_system_pd *syspd = NULL;
2829 switch (cm->cm_frame->dcmd.opcode) {
2830 case MFI_DCMD_LD_DELETE:
2831 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2832 if (ld->ld_id == cm->cm_frame->dcmd.mbox[0])
2835 KASSERT(ld != NULL, ("volume dissappeared"));
2836 if (cm->cm_frame->header.cmd_status == MFI_STAT_OK) {
2837 mtx_unlock(&sc->mfi_io_lock);
2839 device_delete_child(sc->mfi_dev, ld->ld_dev);
2841 mtx_lock(&sc->mfi_io_lock);
2843 mfi_disk_enable(ld);
2845 case MFI_DCMD_CFG_CLEAR:
2846 if (cm->cm_frame->header.cmd_status == MFI_STAT_OK) {
2847 mtx_unlock(&sc->mfi_io_lock);
2849 TAILQ_FOREACH_SAFE(ld, &sc->mfi_ld_tqh, ld_link, ldn) {
2850 device_delete_child(sc->mfi_dev, ld->ld_dev);
2853 mtx_lock(&sc->mfi_io_lock);
2855 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link)
2856 mfi_disk_enable(ld);
2859 case MFI_DCMD_CFG_ADD:
2862 case MFI_DCMD_CFG_FOREIGN_IMPORT:
2865 case MFI_DCMD_PD_STATE_SET:
2866 mbox = (uint16_t *) cm->cm_frame->dcmd.mbox;
2868 if (mbox[2] == MFI_PD_STATE_UNCONFIGURED_GOOD) {
2869 TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh,pd_link) {
2870 if (syspd->pd_id == syspd_id)
2876 /* If the transition fails then enable the syspd again */
2877 if (syspd && cm->cm_frame->header.cmd_status != MFI_STAT_OK)
2878 mfi_syspd_enable(syspd);
2884 mfi_check_for_sscd(struct mfi_softc *sc, struct mfi_command *cm)
2886 struct mfi_config_data *conf_data;
2887 struct mfi_command *ld_cm = NULL;
2888 struct mfi_ld_info *ld_info = NULL;
2889 struct mfi_ld_config *ld;
2893 conf_data = (struct mfi_config_data *)cm->cm_data;
2895 if (cm->cm_frame->dcmd.opcode == MFI_DCMD_CFG_ADD) {
2896 p = (char *)conf_data->array;
2897 p += conf_data->array_size * conf_data->array_count;
2898 ld = (struct mfi_ld_config *)p;
2899 if (ld->params.isSSCD == 1)
2901 } else if (cm->cm_frame->dcmd.opcode == MFI_DCMD_LD_DELETE) {
2902 error = mfi_dcmd_command (sc, &ld_cm, MFI_DCMD_LD_GET_INFO,
2903 (void **)&ld_info, sizeof(*ld_info));
2905 device_printf(sc->mfi_dev, "Failed to allocate"
2906 "MFI_DCMD_LD_GET_INFO %d", error);
2908 free(ld_info, M_MFIBUF);
2911 ld_cm->cm_flags = MFI_CMD_DATAIN;
2912 ld_cm->cm_frame->dcmd.mbox[0]= cm->cm_frame->dcmd.mbox[0];
2913 ld_cm->cm_frame->header.target_id = cm->cm_frame->dcmd.mbox[0];
2914 if (mfi_wait_command(sc, ld_cm) != 0) {
2915 device_printf(sc->mfi_dev, "failed to get log drv\n");
2916 mfi_release_command(ld_cm);
2917 free(ld_info, M_MFIBUF);
2921 if (ld_cm->cm_frame->header.cmd_status != MFI_STAT_OK) {
2922 free(ld_info, M_MFIBUF);
2923 mfi_release_command(ld_cm);
2927 ld_info = (struct mfi_ld_info *)ld_cm->cm_private;
2929 if (ld_info->ld_config.params.isSSCD == 1)
2932 mfi_release_command(ld_cm);
2933 free(ld_info, M_MFIBUF);
2940 mfi_stp_cmd(struct mfi_softc *sc, struct mfi_command *cm,caddr_t arg)
2943 struct mfi_ioc_packet *ioc;
2944 ioc = (struct mfi_ioc_packet *)arg;
2945 int sge_size, error;
2946 struct megasas_sge *kern_sge;
2948 memset(sc->kbuff_arr, 0, sizeof(sc->kbuff_arr));
2949 kern_sge =(struct megasas_sge *) ((uintptr_t)cm->cm_frame + ioc->mfi_sgl_off);
2950 cm->cm_frame->header.sg_count = ioc->mfi_sge_count;
2952 if (sizeof(bus_addr_t) == 8) {
2953 cm->cm_frame->header.flags |= MFI_FRAME_SGL64;
2954 cm->cm_extra_frames = 2;
2955 sge_size = sizeof(struct mfi_sg64);
2957 cm->cm_extra_frames = (cm->cm_total_frame_size - 1) / MFI_FRAME_SIZE;
2958 sge_size = sizeof(struct mfi_sg32);
2961 cm->cm_total_frame_size += (sge_size * ioc->mfi_sge_count);
2962 for (i = 0; i < ioc->mfi_sge_count; i++) {
2963 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
2964 1, 0, /* algnmnt, boundary */
2965 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
2966 BUS_SPACE_MAXADDR, /* highaddr */
2967 NULL, NULL, /* filter, filterarg */
2968 ioc->mfi_sgl[i].iov_len,/* maxsize */
2970 ioc->mfi_sgl[i].iov_len,/* maxsegsize */
2971 BUS_DMA_ALLOCNOW, /* flags */
2972 NULL, NULL, /* lockfunc, lockarg */
2973 &sc->mfi_kbuff_arr_dmat[i])) {
2974 device_printf(sc->mfi_dev,
2975 "Cannot allocate mfi_kbuff_arr_dmat tag\n");
2979 if (bus_dmamem_alloc(sc->mfi_kbuff_arr_dmat[i],
2980 (void **)&sc->kbuff_arr[i], BUS_DMA_NOWAIT,
2981 &sc->mfi_kbuff_arr_dmamap[i])) {
2982 device_printf(sc->mfi_dev,
2983 "Cannot allocate mfi_kbuff_arr_dmamap memory\n");
2987 bus_dmamap_load(sc->mfi_kbuff_arr_dmat[i],
2988 sc->mfi_kbuff_arr_dmamap[i], sc->kbuff_arr[i],
2989 ioc->mfi_sgl[i].iov_len, mfi_addr_cb,
2990 &sc->mfi_kbuff_arr_busaddr[i], 0);
2992 if (!sc->kbuff_arr[i]) {
2993 device_printf(sc->mfi_dev,
2994 "Could not allocate memory for kbuff_arr info\n");
2997 kern_sge[i].phys_addr = sc->mfi_kbuff_arr_busaddr[i];
2998 kern_sge[i].length = ioc->mfi_sgl[i].iov_len;
3000 if (sizeof(bus_addr_t) == 8) {
3001 cm->cm_frame->stp.sgl.sg64[i].addr =
3002 kern_sge[i].phys_addr;
3003 cm->cm_frame->stp.sgl.sg64[i].len =
3004 ioc->mfi_sgl[i].iov_len;
3006 cm->cm_frame->stp.sgl.sg32[i].addr =
3007 kern_sge[i].phys_addr;
3008 cm->cm_frame->stp.sgl.sg32[i].len =
3009 ioc->mfi_sgl[i].iov_len;
3012 error = copyin(ioc->mfi_sgl[i].iov_base,
3014 ioc->mfi_sgl[i].iov_len);
3016 device_printf(sc->mfi_dev, "Copy in failed\n");
3021 cm->cm_flags |=MFI_CMD_MAPPED;
3026 mfi_user_command(struct mfi_softc *sc, struct mfi_ioc_passthru *ioc)
3028 struct mfi_command *cm;
3029 struct mfi_dcmd_frame *dcmd;
3030 void *ioc_buf = NULL;
3032 int error = 0, locked;
3035 if (ioc->buf_size > 0) {
3036 if (ioc->buf_size > 1024 * 1024)
3038 ioc_buf = malloc(ioc->buf_size, M_MFIBUF, M_WAITOK);
3039 error = copyin(ioc->buf, ioc_buf, ioc->buf_size);
3041 device_printf(sc->mfi_dev, "failed to copyin\n");
3042 free(ioc_buf, M_MFIBUF);
3047 locked = mfi_config_lock(sc, ioc->ioc_frame.opcode);
3049 mtx_lock(&sc->mfi_io_lock);
3050 while ((cm = mfi_dequeue_free(sc)) == NULL)
3051 msleep(mfi_user_command, &sc->mfi_io_lock, 0, "mfiioc", hz);
3053 /* Save context for later */
3054 context = cm->cm_frame->header.context;
3056 dcmd = &cm->cm_frame->dcmd;
3057 bcopy(&ioc->ioc_frame, dcmd, sizeof(struct mfi_dcmd_frame));
3059 cm->cm_sg = &dcmd->sgl;
3060 cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
3061 cm->cm_data = ioc_buf;
3062 cm->cm_len = ioc->buf_size;
3064 /* restore context */
3065 cm->cm_frame->header.context = context;
3067 /* Cheat since we don't know if we're writing or reading */
3068 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_DATAOUT;
3070 error = mfi_check_command_pre(sc, cm);
3074 error = mfi_wait_command(sc, cm);
3076 device_printf(sc->mfi_dev, "ioctl failed %d\n", error);
3079 bcopy(dcmd, &ioc->ioc_frame, sizeof(struct mfi_dcmd_frame));
3080 mfi_check_command_post(sc, cm);
3082 mfi_release_command(cm);
3083 mtx_unlock(&sc->mfi_io_lock);
3084 mfi_config_unlock(sc, locked);
3085 if (ioc->buf_size > 0)
3086 error = copyout(ioc_buf, ioc->buf, ioc->buf_size);
3088 free(ioc_buf, M_MFIBUF);
3092 #define PTRIN(p) ((void *)(uintptr_t)(p))
3095 mfi_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td)
3097 struct mfi_softc *sc;
3098 union mfi_statrequest *ms;
3099 struct mfi_ioc_packet *ioc;
3100 #ifdef COMPAT_FREEBSD32
3101 struct mfi_ioc_packet32 *ioc32;
3103 struct mfi_ioc_aen *aen;
3104 struct mfi_command *cm = NULL;
3105 uint32_t context = 0;
3106 union mfi_sense_ptr sense_ptr;
3107 uint8_t *data = NULL, *temp, *addr, skip_pre_post = 0;
3110 struct mfi_ioc_passthru *iop = (struct mfi_ioc_passthru *)arg;
3111 #ifdef COMPAT_FREEBSD32
3112 struct mfi_ioc_passthru32 *iop32 = (struct mfi_ioc_passthru32 *)arg;
3113 struct mfi_ioc_passthru iop_swab;
3123 if (sc->hw_crit_error)
3126 if (sc->issuepend_done == 0)
3131 ms = (union mfi_statrequest *)arg;
3132 switch (ms->ms_item) {
3137 bcopy(&sc->mfi_qstat[ms->ms_item], &ms->ms_qstat,
3138 sizeof(struct mfi_qstat));
3145 case MFIIO_QUERY_DISK:
3147 struct mfi_query_disk *qd;
3148 struct mfi_disk *ld;
3150 qd = (struct mfi_query_disk *)arg;
3151 mtx_lock(&sc->mfi_io_lock);
3152 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
3153 if (ld->ld_id == qd->array_id)
3158 mtx_unlock(&sc->mfi_io_lock);
3162 if (ld->ld_flags & MFI_DISK_FLAGS_OPEN)
3164 bzero(qd->devname, SPECNAMELEN + 1);
3165 snprintf(qd->devname, SPECNAMELEN, "mfid%d", ld->ld_unit);
3166 mtx_unlock(&sc->mfi_io_lock);
3170 #ifdef COMPAT_FREEBSD32
3174 devclass_t devclass;
3175 ioc = (struct mfi_ioc_packet *)arg;
3178 adapter = ioc->mfi_adapter_no;
3179 if (device_get_unit(sc->mfi_dev) == 0 && adapter != 0) {
3180 devclass = devclass_find("mfi");
3181 sc = devclass_get_softc(devclass, adapter);
3183 mtx_lock(&sc->mfi_io_lock);
3184 if ((cm = mfi_dequeue_free(sc)) == NULL) {
3185 mtx_unlock(&sc->mfi_io_lock);
3188 mtx_unlock(&sc->mfi_io_lock);
3192 * save off original context since copying from user
3193 * will clobber some data
3195 context = cm->cm_frame->header.context;
3196 cm->cm_frame->header.context = cm->cm_index;
3198 bcopy(ioc->mfi_frame.raw, cm->cm_frame,
3199 2 * MEGAMFI_FRAME_SIZE);
3200 cm->cm_total_frame_size = (sizeof(union mfi_sgl)
3201 * ioc->mfi_sge_count) + ioc->mfi_sgl_off;
3202 cm->cm_frame->header.scsi_status = 0;
3203 cm->cm_frame->header.pad0 = 0;
3204 if (ioc->mfi_sge_count) {
3206 (union mfi_sgl *)&cm->cm_frame->bytes[ioc->mfi_sgl_off];
3210 if (cm->cm_frame->header.flags & MFI_FRAME_DATAIN)
3211 cm->cm_flags |= MFI_CMD_DATAIN;
3212 if (cm->cm_frame->header.flags & MFI_FRAME_DATAOUT)
3213 cm->cm_flags |= MFI_CMD_DATAOUT;
3214 /* Legacy app shim */
3215 if (cm->cm_flags == 0)
3216 cm->cm_flags |= MFI_CMD_DATAIN | MFI_CMD_DATAOUT;
3217 cm->cm_len = cm->cm_frame->header.data_len;
3218 if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
3219 #ifdef COMPAT_FREEBSD32
3220 if (cmd == MFI_CMD) {
3223 cm->cm_stp_len = ioc->mfi_sgl[0].iov_len;
3224 #ifdef COMPAT_FREEBSD32
3226 /* 32bit on 64bit */
3227 ioc32 = (struct mfi_ioc_packet32 *)ioc;
3228 cm->cm_stp_len = ioc32->mfi_sgl[0].iov_len;
3231 cm->cm_len += cm->cm_stp_len;
3234 (cm->cm_flags & (MFI_CMD_DATAIN | MFI_CMD_DATAOUT))) {
3235 cm->cm_data = data = malloc(cm->cm_len, M_MFIBUF,
3237 if (cm->cm_data == NULL) {
3238 device_printf(sc->mfi_dev, "Malloc failed\n");
3245 /* restore header context */
3246 cm->cm_frame->header.context = context;
3248 if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
3249 res = mfi_stp_cmd(sc, cm, arg);
3254 if ((cm->cm_flags & MFI_CMD_DATAOUT) ||
3255 (cm->cm_frame->header.cmd == MFI_CMD_STP)) {
3256 for (i = 0; i < ioc->mfi_sge_count; i++) {
3257 #ifdef COMPAT_FREEBSD32
3258 if (cmd == MFI_CMD) {
3261 addr = ioc->mfi_sgl[i].iov_base;
3262 len = ioc->mfi_sgl[i].iov_len;
3263 #ifdef COMPAT_FREEBSD32
3265 /* 32bit on 64bit */
3266 ioc32 = (struct mfi_ioc_packet32 *)ioc;
3267 addr = PTRIN(ioc32->mfi_sgl[i].iov_base);
3268 len = ioc32->mfi_sgl[i].iov_len;
3271 error = copyin(addr, temp, len);
3273 device_printf(sc->mfi_dev,
3274 "Copy in failed\n");
3282 if (cm->cm_frame->header.cmd == MFI_CMD_DCMD)
3283 locked = mfi_config_lock(sc,
3284 cm->cm_frame->dcmd.opcode);
3286 if (cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) {
3287 cm->cm_frame->pass.sense_addr_lo =
3288 (uint32_t)cm->cm_sense_busaddr;
3289 cm->cm_frame->pass.sense_addr_hi =
3290 (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
3292 mtx_lock(&sc->mfi_io_lock);
3293 skip_pre_post = mfi_check_for_sscd (sc, cm);
3294 if (!skip_pre_post) {
3295 error = mfi_check_command_pre(sc, cm);
3297 mtx_unlock(&sc->mfi_io_lock);
3301 if ((error = mfi_wait_command(sc, cm)) != 0) {
3302 device_printf(sc->mfi_dev,
3303 "Controller polled failed\n");
3304 mtx_unlock(&sc->mfi_io_lock);
3307 if (!skip_pre_post) {
3308 mfi_check_command_post(sc, cm);
3310 mtx_unlock(&sc->mfi_io_lock);
3312 if (cm->cm_frame->header.cmd != MFI_CMD_STP) {
3314 if ((cm->cm_flags & MFI_CMD_DATAIN) ||
3315 (cm->cm_frame->header.cmd == MFI_CMD_STP)) {
3316 for (i = 0; i < ioc->mfi_sge_count; i++) {
3317 #ifdef COMPAT_FREEBSD32
3318 if (cmd == MFI_CMD) {
3321 addr = ioc->mfi_sgl[i].iov_base;
3322 len = ioc->mfi_sgl[i].iov_len;
3323 #ifdef COMPAT_FREEBSD32
3325 /* 32bit on 64bit */
3326 ioc32 = (struct mfi_ioc_packet32 *)ioc;
3327 addr = PTRIN(ioc32->mfi_sgl[i].iov_base);
3328 len = ioc32->mfi_sgl[i].iov_len;
3331 error = copyout(temp, addr, len);
3333 device_printf(sc->mfi_dev,
3334 "Copy out failed\n");
3342 if (ioc->mfi_sense_len) {
3343 /* get user-space sense ptr then copy out sense */
3344 bcopy(&ioc->mfi_frame.raw[ioc->mfi_sense_off],
3345 &sense_ptr.sense_ptr_data[0],
3346 sizeof(sense_ptr.sense_ptr_data));
3347 #ifdef COMPAT_FREEBSD32
3348 if (cmd != MFI_CMD) {
3350 * not 64bit native so zero out any address
3352 sense_ptr.addr.high = 0;
3355 error = copyout(cm->cm_sense, sense_ptr.user_space,
3356 ioc->mfi_sense_len);
3358 device_printf(sc->mfi_dev,
3359 "Copy out failed\n");
3364 ioc->mfi_frame.hdr.cmd_status = cm->cm_frame->header.cmd_status;
3366 mfi_config_unlock(sc, locked);
3368 free(data, M_MFIBUF);
3369 if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
3370 for (i = 0; i < 2; i++) {
3371 if (sc->kbuff_arr[i]) {
3372 if (sc->mfi_kbuff_arr_busaddr != 0)
3374 sc->mfi_kbuff_arr_dmat[i],
3375 sc->mfi_kbuff_arr_dmamap[i]
3377 if (sc->kbuff_arr[i] != NULL)
3379 sc->mfi_kbuff_arr_dmat[i],
3381 sc->mfi_kbuff_arr_dmamap[i]
3383 if (sc->mfi_kbuff_arr_dmat[i] != NULL)
3384 bus_dma_tag_destroy(
3385 sc->mfi_kbuff_arr_dmat[i]);
3390 mtx_lock(&sc->mfi_io_lock);
3391 mfi_release_command(cm);
3392 mtx_unlock(&sc->mfi_io_lock);
3398 aen = (struct mfi_ioc_aen *)arg;
3399 mtx_lock(&sc->mfi_io_lock);
3400 error = mfi_aen_register(sc, aen->aen_seq_num,
3401 aen->aen_class_locale);
3402 mtx_unlock(&sc->mfi_io_lock);
3405 case MFI_LINUX_CMD_2: /* Firmware Linux ioctl shim */
3407 devclass_t devclass;
3408 struct mfi_linux_ioc_packet l_ioc;
3411 devclass = devclass_find("mfi");
3412 if (devclass == NULL)
3415 error = copyin(arg, &l_ioc, sizeof(l_ioc));
3418 adapter = l_ioc.lioc_adapter_no;
3419 sc = devclass_get_softc(devclass, adapter);
3422 return (mfi_linux_ioctl_int(sc->mfi_cdev,
3423 cmd, arg, flag, td));
3426 case MFI_LINUX_SET_AEN_2: /* AEN Linux ioctl shim */
3428 devclass_t devclass;
3429 struct mfi_linux_ioc_aen l_aen;
3432 devclass = devclass_find("mfi");
3433 if (devclass == NULL)
3436 error = copyin(arg, &l_aen, sizeof(l_aen));
3439 adapter = l_aen.laen_adapter_no;
3440 sc = devclass_get_softc(devclass, adapter);
3443 return (mfi_linux_ioctl_int(sc->mfi_cdev,
3444 cmd, arg, flag, td));
3447 #ifdef COMPAT_FREEBSD32
3448 case MFIIO_PASSTHRU32:
3449 if (!SV_CURPROC_FLAG(SV_ILP32)) {
3453 iop_swab.ioc_frame = iop32->ioc_frame;
3454 iop_swab.buf_size = iop32->buf_size;
3455 iop_swab.buf = PTRIN(iop32->buf);
3459 case MFIIO_PASSTHRU:
3460 error = mfi_user_command(sc, iop);
3461 #ifdef COMPAT_FREEBSD32
3462 if (cmd == MFIIO_PASSTHRU32)
3463 iop32->ioc_frame = iop_swab.ioc_frame;
3467 device_printf(sc->mfi_dev, "IOCTL 0x%lx not handled\n", cmd);
3476 mfi_linux_ioctl_int(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td)
3478 struct mfi_softc *sc;
3479 struct mfi_linux_ioc_packet l_ioc;
3480 struct mfi_linux_ioc_aen l_aen;
3481 struct mfi_command *cm = NULL;
3482 struct mfi_aen *mfi_aen_entry;
3483 union mfi_sense_ptr sense_ptr;
3484 uint32_t context = 0;
3485 uint8_t *data = NULL, *temp;
3492 case MFI_LINUX_CMD_2: /* Firmware Linux ioctl shim */
3493 error = copyin(arg, &l_ioc, sizeof(l_ioc));
3497 if (l_ioc.lioc_sge_count > MAX_LINUX_IOCTL_SGE) {
3501 mtx_lock(&sc->mfi_io_lock);
3502 if ((cm = mfi_dequeue_free(sc)) == NULL) {
3503 mtx_unlock(&sc->mfi_io_lock);
3506 mtx_unlock(&sc->mfi_io_lock);
3510 * save off original context since copying from user
3511 * will clobber some data
3513 context = cm->cm_frame->header.context;
3515 bcopy(l_ioc.lioc_frame.raw, cm->cm_frame,
3516 2 * MFI_DCMD_FRAME_SIZE); /* this isn't quite right */
3517 cm->cm_total_frame_size = (sizeof(union mfi_sgl)
3518 * l_ioc.lioc_sge_count) + l_ioc.lioc_sgl_off;
3519 cm->cm_frame->header.scsi_status = 0;
3520 cm->cm_frame->header.pad0 = 0;
3521 if (l_ioc.lioc_sge_count)
3523 (union mfi_sgl *)&cm->cm_frame->bytes[l_ioc.lioc_sgl_off];
3525 if (cm->cm_frame->header.flags & MFI_FRAME_DATAIN)
3526 cm->cm_flags |= MFI_CMD_DATAIN;
3527 if (cm->cm_frame->header.flags & MFI_FRAME_DATAOUT)
3528 cm->cm_flags |= MFI_CMD_DATAOUT;
3529 cm->cm_len = cm->cm_frame->header.data_len;
3531 (cm->cm_flags & (MFI_CMD_DATAIN | MFI_CMD_DATAOUT))) {
3532 cm->cm_data = data = malloc(cm->cm_len, M_MFIBUF,
3534 if (cm->cm_data == NULL) {
3535 device_printf(sc->mfi_dev, "Malloc failed\n");
3542 /* restore header context */
3543 cm->cm_frame->header.context = context;
3546 if (cm->cm_flags & MFI_CMD_DATAOUT) {
3547 for (i = 0; i < l_ioc.lioc_sge_count; i++) {
3548 error = copyin(PTRIN(l_ioc.lioc_sgl[i].iov_base),
3550 l_ioc.lioc_sgl[i].iov_len);
3552 device_printf(sc->mfi_dev,
3553 "Copy in failed\n");
3556 temp = &temp[l_ioc.lioc_sgl[i].iov_len];
3560 if (cm->cm_frame->header.cmd == MFI_CMD_DCMD)
3561 locked = mfi_config_lock(sc, cm->cm_frame->dcmd.opcode);
3563 if (cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) {
3564 cm->cm_frame->pass.sense_addr_lo =
3565 (uint32_t)cm->cm_sense_busaddr;
3566 cm->cm_frame->pass.sense_addr_hi =
3567 (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
3570 mtx_lock(&sc->mfi_io_lock);
3571 error = mfi_check_command_pre(sc, cm);
3573 mtx_unlock(&sc->mfi_io_lock);
3577 if ((error = mfi_wait_command(sc, cm)) != 0) {
3578 device_printf(sc->mfi_dev,
3579 "Controller polled failed\n");
3580 mtx_unlock(&sc->mfi_io_lock);
3584 mfi_check_command_post(sc, cm);
3585 mtx_unlock(&sc->mfi_io_lock);
3588 if (cm->cm_flags & MFI_CMD_DATAIN) {
3589 for (i = 0; i < l_ioc.lioc_sge_count; i++) {
3590 error = copyout(temp,
3591 PTRIN(l_ioc.lioc_sgl[i].iov_base),
3592 l_ioc.lioc_sgl[i].iov_len);
3594 device_printf(sc->mfi_dev,
3595 "Copy out failed\n");
3598 temp = &temp[l_ioc.lioc_sgl[i].iov_len];
3602 if (l_ioc.lioc_sense_len) {
3603 /* get user-space sense ptr then copy out sense */
3604 bcopy(&((struct mfi_linux_ioc_packet*)arg)
3605 ->lioc_frame.raw[l_ioc.lioc_sense_off],
3606 &sense_ptr.sense_ptr_data[0],
3607 sizeof(sense_ptr.sense_ptr_data));
3610 * only 32bit Linux support so zero out any
3611 * address over 32bit
3613 sense_ptr.addr.high = 0;
3615 error = copyout(cm->cm_sense, sense_ptr.user_space,
3616 l_ioc.lioc_sense_len);
3618 device_printf(sc->mfi_dev,
3619 "Copy out failed\n");
3624 error = copyout(&cm->cm_frame->header.cmd_status,
3625 &((struct mfi_linux_ioc_packet*)arg)
3626 ->lioc_frame.hdr.cmd_status,
3629 device_printf(sc->mfi_dev,
3630 "Copy out failed\n");
3635 mfi_config_unlock(sc, locked);
3637 free(data, M_MFIBUF);
3639 mtx_lock(&sc->mfi_io_lock);
3640 mfi_release_command(cm);
3641 mtx_unlock(&sc->mfi_io_lock);
3645 case MFI_LINUX_SET_AEN_2: /* AEN Linux ioctl shim */
3646 error = copyin(arg, &l_aen, sizeof(l_aen));
3649 printf("AEN IMPLEMENTED for pid %d\n", curproc->p_pid);
3650 mfi_aen_entry = malloc(sizeof(struct mfi_aen), M_MFIBUF,
3652 mtx_lock(&sc->mfi_io_lock);
3653 if (mfi_aen_entry != NULL) {
3654 mfi_aen_entry->p = curproc;
3655 TAILQ_INSERT_TAIL(&sc->mfi_aen_pids, mfi_aen_entry,
3658 error = mfi_aen_register(sc, l_aen.laen_seq_num,
3659 l_aen.laen_class_locale);
3662 TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
3664 free(mfi_aen_entry, M_MFIBUF);
3666 mtx_unlock(&sc->mfi_io_lock);
3670 device_printf(sc->mfi_dev, "IOCTL 0x%lx not handled\n", cmd);
3679 mfi_poll(struct cdev *dev, int poll_events, struct thread *td)
3681 struct mfi_softc *sc;
3686 if (poll_events & (POLLIN | POLLRDNORM)) {
3687 if (sc->mfi_aen_triggered != 0) {
3688 revents |= poll_events & (POLLIN | POLLRDNORM);
3689 sc->mfi_aen_triggered = 0;
3691 if (sc->mfi_aen_triggered == 0 && sc->mfi_aen_cm == NULL) {
3697 if (poll_events & (POLLIN | POLLRDNORM)) {
3698 sc->mfi_poll_waiting = 1;
3699 selrecord(td, &sc->mfi_select);
3709 struct mfi_softc *sc;
3710 struct mfi_command *cm;
3716 dc = devclass_find("mfi");
3718 printf("No mfi dev class\n");
3722 for (i = 0; ; i++) {
3723 sc = devclass_get_softc(dc, i);
3726 device_printf(sc->mfi_dev, "Dumping\n\n");
3728 deadline = time_uptime - mfi_cmd_timeout;
3729 mtx_lock(&sc->mfi_io_lock);
3730 TAILQ_FOREACH(cm, &sc->mfi_busy, cm_link) {
3731 if (cm->cm_timestamp <= deadline) {
3732 device_printf(sc->mfi_dev,
3733 "COMMAND %p TIMEOUT AFTER %d SECONDS\n",
3734 cm, (int)(time_uptime - cm->cm_timestamp));
3745 mtx_unlock(&sc->mfi_io_lock);
3752 mfi_timeout(void *data)
3754 struct mfi_softc *sc = (struct mfi_softc *)data;
3755 struct mfi_command *cm, *tmp;
3759 deadline = time_uptime - mfi_cmd_timeout;
3760 if (sc->adpreset == 0) {
3761 if (!mfi_tbolt_reset(sc)) {
3762 callout_reset(&sc->mfi_watchdog_callout,
3763 mfi_cmd_timeout * hz, mfi_timeout, sc);
3767 mtx_lock(&sc->mfi_io_lock);
3768 TAILQ_FOREACH_SAFE(cm, &sc->mfi_busy, cm_link, tmp) {
3769 if (sc->mfi_aen_cm == cm || sc->mfi_map_sync_cm == cm)
3771 if (cm->cm_timestamp <= deadline) {
3772 if (sc->adpreset != 0 && sc->issuepend_done == 0) {
3773 cm->cm_timestamp = time_uptime;
3775 device_printf(sc->mfi_dev,
3776 "COMMAND %p TIMEOUT AFTER %d SECONDS\n",
3777 cm, (int)(time_uptime - cm->cm_timestamp)
3780 MFI_VALIDATE_CMD(sc, cm);
3782 * While commands can get stuck forever we do
3783 * not fail them as there is no way to tell if
3784 * the controller has actually processed them
3787 * In addition its very likely that force
3788 * failing a command here would cause a panic
3801 mtx_unlock(&sc->mfi_io_lock);
3803 callout_reset(&sc->mfi_watchdog_callout, mfi_cmd_timeout * hz,