2 * Copyright (c) 2006 IronPort Systems
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * Copyright (c) 2007 LSI Corp.
28 * Copyright (c) 2007 Rajesh Prabhakaran.
29 * All rights reserved.
31 * Redistribution and use in source and binary forms, with or without
32 * modification, are permitted provided that the following conditions
34 * 1. Redistributions of source code must retain the above copyright
35 * notice, this list of conditions and the following disclaimer.
36 * 2. Redistributions in binary form must reproduce the above copyright
37 * notice, this list of conditions and the following disclaimer in the
38 * documentation and/or other materials provided with the distribution.
40 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
41 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
42 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
43 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
44 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
45 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
46 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
47 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
48 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
49 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
53 #include <sys/cdefs.h>
54 __FBSDID("$FreeBSD$");
56 #include "opt_compat.h"
59 #include <sys/param.h>
60 #include <sys/systm.h>
61 #include <sys/sysctl.h>
62 #include <sys/malloc.h>
63 #include <sys/kernel.h>
65 #include <sys/selinfo.h>
68 #include <sys/eventhandler.h>
70 #include <sys/bus_dma.h>
72 #include <sys/ioccom.h>
75 #include <sys/signalvar.h>
76 #include <sys/sysent.h>
77 #include <sys/taskqueue.h>
79 #include <machine/bus.h>
80 #include <machine/resource.h>
82 #include <dev/mfi/mfireg.h>
83 #include <dev/mfi/mfi_ioctl.h>
84 #include <dev/mfi/mfivar.h>
85 #include <sys/interrupt.h>
86 #include <sys/priority.h>
88 static int mfi_alloc_commands(struct mfi_softc *);
89 static int mfi_comms_init(struct mfi_softc *);
90 static int mfi_get_controller_info(struct mfi_softc *);
91 static int mfi_get_log_state(struct mfi_softc *,
92 struct mfi_evt_log_state **);
93 static int mfi_parse_entries(struct mfi_softc *, int, int);
94 static void mfi_data_cb(void *, bus_dma_segment_t *, int, int);
95 static void mfi_startup(void *arg);
96 static void mfi_intr(void *arg);
97 static void mfi_ldprobe(struct mfi_softc *sc);
98 static void mfi_syspdprobe(struct mfi_softc *sc);
99 static void mfi_handle_evt(void *context, int pending);
100 static int mfi_aen_register(struct mfi_softc *sc, int seq, int locale);
101 static void mfi_aen_complete(struct mfi_command *);
102 static int mfi_add_ld(struct mfi_softc *sc, int);
103 static void mfi_add_ld_complete(struct mfi_command *);
104 static int mfi_add_sys_pd(struct mfi_softc *sc, int);
105 static void mfi_add_sys_pd_complete(struct mfi_command *);
106 static struct mfi_command * mfi_bio_command(struct mfi_softc *);
107 static void mfi_bio_complete(struct mfi_command *);
108 static struct mfi_command *mfi_build_ldio(struct mfi_softc *,struct bio*);
109 static struct mfi_command *mfi_build_syspdio(struct mfi_softc *,struct bio*);
110 static int mfi_send_frame(struct mfi_softc *, struct mfi_command *);
111 static int mfi_std_send_frame(struct mfi_softc *, struct mfi_command *);
112 static int mfi_abort(struct mfi_softc *, struct mfi_command **);
113 static int mfi_linux_ioctl_int(struct cdev *, u_long, caddr_t, int, struct thread *);
114 static void mfi_timeout(void *);
115 static int mfi_user_command(struct mfi_softc *,
116 struct mfi_ioc_passthru *);
117 static void mfi_enable_intr_xscale(struct mfi_softc *sc);
118 static void mfi_enable_intr_ppc(struct mfi_softc *sc);
119 static int32_t mfi_read_fw_status_xscale(struct mfi_softc *sc);
120 static int32_t mfi_read_fw_status_ppc(struct mfi_softc *sc);
121 static int mfi_check_clear_intr_xscale(struct mfi_softc *sc);
122 static int mfi_check_clear_intr_ppc(struct mfi_softc *sc);
123 static void mfi_issue_cmd_xscale(struct mfi_softc *sc, bus_addr_t bus_add,
125 static void mfi_issue_cmd_ppc(struct mfi_softc *sc, bus_addr_t bus_add,
127 static int mfi_config_lock(struct mfi_softc *sc, uint32_t opcode);
128 static void mfi_config_unlock(struct mfi_softc *sc, int locked);
129 static int mfi_check_command_pre(struct mfi_softc *sc, struct mfi_command *cm);
130 static void mfi_check_command_post(struct mfi_softc *sc, struct mfi_command *cm);
131 static int mfi_check_for_sscd(struct mfi_softc *sc, struct mfi_command *cm);
133 SYSCTL_NODE(_hw, OID_AUTO, mfi, CTLFLAG_RD, 0, "MFI driver parameters");
134 static int mfi_event_locale = MFI_EVT_LOCALE_ALL;
135 TUNABLE_INT("hw.mfi.event_locale", &mfi_event_locale);
136 SYSCTL_INT(_hw_mfi, OID_AUTO, event_locale, CTLFLAG_RWTUN, &mfi_event_locale,
137 0, "event message locale");
139 static int mfi_event_class = MFI_EVT_CLASS_INFO;
140 TUNABLE_INT("hw.mfi.event_class", &mfi_event_class);
141 SYSCTL_INT(_hw_mfi, OID_AUTO, event_class, CTLFLAG_RWTUN, &mfi_event_class,
142 0, "event message class");
144 static int mfi_max_cmds = 128;
145 TUNABLE_INT("hw.mfi.max_cmds", &mfi_max_cmds);
146 SYSCTL_INT(_hw_mfi, OID_AUTO, max_cmds, CTLFLAG_RDTUN, &mfi_max_cmds,
147 0, "Max commands limit (-1 = controller limit)");
149 static int mfi_detect_jbod_change = 1;
150 TUNABLE_INT("hw.mfi.detect_jbod_change", &mfi_detect_jbod_change);
151 SYSCTL_INT(_hw_mfi, OID_AUTO, detect_jbod_change, CTLFLAG_RWTUN,
152 &mfi_detect_jbod_change, 0, "Detect a change to a JBOD");
154 int mfi_polled_cmd_timeout = MFI_POLL_TIMEOUT_SECS;
155 TUNABLE_INT("hw.mfi.polled_cmd_timeout", &mfi_polled_cmd_timeout);
156 SYSCTL_INT(_hw_mfi, OID_AUTO, polled_cmd_timeout, CTLFLAG_RWTUN,
157 &mfi_polled_cmd_timeout, 0,
158 "Polled command timeout - used for firmware flash etc (in seconds)");
160 static int mfi_cmd_timeout = MFI_CMD_TIMEOUT;
161 TUNABLE_INT("hw.mfi.cmd_timeout", &mfi_cmd_timeout);
162 SYSCTL_INT(_hw_mfi, OID_AUTO, cmd_timeout, CTLFLAG_RWTUN, &mfi_cmd_timeout,
163 0, "Command timeout (in seconds)");
165 /* Management interface */
166 static d_open_t mfi_open;
167 static d_close_t mfi_close;
168 static d_ioctl_t mfi_ioctl;
169 static d_poll_t mfi_poll;
171 static struct cdevsw mfi_cdevsw = {
172 .d_version = D_VERSION,
175 .d_close = mfi_close,
176 .d_ioctl = mfi_ioctl,
181 MALLOC_DEFINE(M_MFIBUF, "mfibuf", "Buffers for the MFI driver");
183 #define MFI_INQ_LENGTH SHORT_INQUIRY_LENGTH
184 struct mfi_skinny_dma_info mfi_skinny;
187 mfi_enable_intr_xscale(struct mfi_softc *sc)
189 MFI_WRITE4(sc, MFI_OMSK, 0x01);
193 mfi_enable_intr_ppc(struct mfi_softc *sc)
195 if (sc->mfi_flags & MFI_FLAGS_1078) {
196 MFI_WRITE4(sc, MFI_ODCR0, 0xFFFFFFFF);
197 MFI_WRITE4(sc, MFI_OMSK, ~MFI_1078_EIM);
199 else if (sc->mfi_flags & MFI_FLAGS_GEN2) {
200 MFI_WRITE4(sc, MFI_ODCR0, 0xFFFFFFFF);
201 MFI_WRITE4(sc, MFI_OMSK, ~MFI_GEN2_EIM);
203 else if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
204 MFI_WRITE4(sc, MFI_OMSK, ~0x00000001);
209 mfi_read_fw_status_xscale(struct mfi_softc *sc)
211 return MFI_READ4(sc, MFI_OMSG0);
215 mfi_read_fw_status_ppc(struct mfi_softc *sc)
217 return MFI_READ4(sc, MFI_OSP0);
221 mfi_check_clear_intr_xscale(struct mfi_softc *sc)
225 status = MFI_READ4(sc, MFI_OSTS);
226 if ((status & MFI_OSTS_INTR_VALID) == 0)
229 MFI_WRITE4(sc, MFI_OSTS, status);
234 mfi_check_clear_intr_ppc(struct mfi_softc *sc)
238 status = MFI_READ4(sc, MFI_OSTS);
239 if (sc->mfi_flags & MFI_FLAGS_1078) {
240 if (!(status & MFI_1078_RM)) {
244 else if (sc->mfi_flags & MFI_FLAGS_GEN2) {
245 if (!(status & MFI_GEN2_RM)) {
249 else if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
250 if (!(status & MFI_SKINNY_RM)) {
254 if (sc->mfi_flags & MFI_FLAGS_SKINNY)
255 MFI_WRITE4(sc, MFI_OSTS, status);
257 MFI_WRITE4(sc, MFI_ODCR0, status);
262 mfi_issue_cmd_xscale(struct mfi_softc *sc, bus_addr_t bus_add, uint32_t frame_cnt)
264 MFI_WRITE4(sc, MFI_IQP,(bus_add >>3)|frame_cnt);
268 mfi_issue_cmd_ppc(struct mfi_softc *sc, bus_addr_t bus_add, uint32_t frame_cnt)
270 if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
271 MFI_WRITE4(sc, MFI_IQPL, (bus_add | frame_cnt <<1)|1 );
272 MFI_WRITE4(sc, MFI_IQPH, 0x00000000);
274 MFI_WRITE4(sc, MFI_IQP, (bus_add | frame_cnt <<1)|1 );
279 mfi_transition_firmware(struct mfi_softc *sc)
281 uint32_t fw_state, cur_state;
283 uint32_t cur_abs_reg_val = 0;
284 uint32_t prev_abs_reg_val = 0;
286 cur_abs_reg_val = sc->mfi_read_fw_status(sc);
287 fw_state = cur_abs_reg_val & MFI_FWSTATE_MASK;
288 while (fw_state != MFI_FWSTATE_READY) {
290 device_printf(sc->mfi_dev, "Waiting for firmware to "
292 cur_state = fw_state;
294 case MFI_FWSTATE_FAULT:
295 device_printf(sc->mfi_dev, "Firmware fault\n");
297 case MFI_FWSTATE_WAIT_HANDSHAKE:
298 if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT)
299 MFI_WRITE4(sc, MFI_SKINNY_IDB, MFI_FWINIT_CLEAR_HANDSHAKE);
301 MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_CLEAR_HANDSHAKE);
302 max_wait = MFI_RESET_WAIT_TIME;
304 case MFI_FWSTATE_OPERATIONAL:
305 if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT)
306 MFI_WRITE4(sc, MFI_SKINNY_IDB, 7);
308 MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_READY);
309 max_wait = MFI_RESET_WAIT_TIME;
311 case MFI_FWSTATE_UNDEFINED:
312 case MFI_FWSTATE_BB_INIT:
313 max_wait = MFI_RESET_WAIT_TIME;
315 case MFI_FWSTATE_FW_INIT_2:
316 max_wait = MFI_RESET_WAIT_TIME;
318 case MFI_FWSTATE_FW_INIT:
319 case MFI_FWSTATE_FLUSH_CACHE:
320 max_wait = MFI_RESET_WAIT_TIME;
322 case MFI_FWSTATE_DEVICE_SCAN:
323 max_wait = MFI_RESET_WAIT_TIME; /* wait for 180 seconds */
324 prev_abs_reg_val = cur_abs_reg_val;
326 case MFI_FWSTATE_BOOT_MESSAGE_PENDING:
327 if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT)
328 MFI_WRITE4(sc, MFI_SKINNY_IDB, MFI_FWINIT_HOTPLUG);
330 MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_HOTPLUG);
331 max_wait = MFI_RESET_WAIT_TIME;
334 device_printf(sc->mfi_dev, "Unknown firmware state %#x\n",
338 for (i = 0; i < (max_wait * 10); i++) {
339 cur_abs_reg_val = sc->mfi_read_fw_status(sc);
340 fw_state = cur_abs_reg_val & MFI_FWSTATE_MASK;
341 if (fw_state == cur_state)
346 if (fw_state == MFI_FWSTATE_DEVICE_SCAN) {
347 /* Check the device scanning progress */
348 if (prev_abs_reg_val != cur_abs_reg_val) {
352 if (fw_state == cur_state) {
353 device_printf(sc->mfi_dev, "Firmware stuck in state "
362 mfi_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
367 *addr = segs[0].ds_addr;
372 mfi_attach(struct mfi_softc *sc)
375 int error, commsz, framessz, sensesz;
376 int frames, unit, max_fw_sge, max_fw_cmds;
377 uint32_t tb_mem_size = 0;
383 device_printf(sc->mfi_dev, "Megaraid SAS driver Ver %s \n",
386 mtx_init(&sc->mfi_io_lock, "MFI I/O lock", NULL, MTX_DEF);
387 sx_init(&sc->mfi_config_lock, "MFI config");
388 TAILQ_INIT(&sc->mfi_ld_tqh);
389 TAILQ_INIT(&sc->mfi_syspd_tqh);
390 TAILQ_INIT(&sc->mfi_ld_pend_tqh);
391 TAILQ_INIT(&sc->mfi_syspd_pend_tqh);
392 TAILQ_INIT(&sc->mfi_evt_queue);
393 TASK_INIT(&sc->mfi_evt_task, 0, mfi_handle_evt, sc);
394 TASK_INIT(&sc->mfi_map_sync_task, 0, mfi_handle_map_sync, sc);
395 TAILQ_INIT(&sc->mfi_aen_pids);
396 TAILQ_INIT(&sc->mfi_cam_ccbq);
404 sc->last_seq_num = 0;
405 sc->disableOnlineCtrlReset = 1;
406 sc->issuepend_done = 1;
407 sc->hw_crit_error = 0;
409 if (sc->mfi_flags & MFI_FLAGS_1064R) {
410 sc->mfi_enable_intr = mfi_enable_intr_xscale;
411 sc->mfi_read_fw_status = mfi_read_fw_status_xscale;
412 sc->mfi_check_clear_intr = mfi_check_clear_intr_xscale;
413 sc->mfi_issue_cmd = mfi_issue_cmd_xscale;
414 } else if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
415 sc->mfi_enable_intr = mfi_tbolt_enable_intr_ppc;
416 sc->mfi_disable_intr = mfi_tbolt_disable_intr_ppc;
417 sc->mfi_read_fw_status = mfi_tbolt_read_fw_status_ppc;
418 sc->mfi_check_clear_intr = mfi_tbolt_check_clear_intr_ppc;
419 sc->mfi_issue_cmd = mfi_tbolt_issue_cmd_ppc;
420 sc->mfi_adp_reset = mfi_tbolt_adp_reset;
422 TAILQ_INIT(&sc->mfi_cmd_tbolt_tqh);
424 sc->mfi_enable_intr = mfi_enable_intr_ppc;
425 sc->mfi_read_fw_status = mfi_read_fw_status_ppc;
426 sc->mfi_check_clear_intr = mfi_check_clear_intr_ppc;
427 sc->mfi_issue_cmd = mfi_issue_cmd_ppc;
431 /* Before we get too far, see if the firmware is working */
432 if ((error = mfi_transition_firmware(sc)) != 0) {
433 device_printf(sc->mfi_dev, "Firmware not in READY state, "
434 "error %d\n", error);
438 /* Start: LSIP200113393 */
439 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
440 1, 0, /* algnmnt, boundary */
441 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
442 BUS_SPACE_MAXADDR, /* highaddr */
443 NULL, NULL, /* filter, filterarg */
444 MEGASAS_MAX_NAME*sizeof(bus_addr_t), /* maxsize */
446 MEGASAS_MAX_NAME*sizeof(bus_addr_t), /* maxsegsize */
448 NULL, NULL, /* lockfunc, lockarg */
449 &sc->verbuf_h_dmat)) {
450 device_printf(sc->mfi_dev, "Cannot allocate verbuf_h_dmat DMA tag\n");
453 if (bus_dmamem_alloc(sc->verbuf_h_dmat, (void **)&sc->verbuf,
454 BUS_DMA_NOWAIT, &sc->verbuf_h_dmamap)) {
455 device_printf(sc->mfi_dev, "Cannot allocate verbuf_h_dmamap memory\n");
458 bzero(sc->verbuf, MEGASAS_MAX_NAME*sizeof(bus_addr_t));
459 bus_dmamap_load(sc->verbuf_h_dmat, sc->verbuf_h_dmamap,
460 sc->verbuf, MEGASAS_MAX_NAME*sizeof(bus_addr_t),
461 mfi_addr_cb, &sc->verbuf_h_busaddr, 0);
462 /* End: LSIP200113393 */
465 * Get information needed for sizing the contiguous memory for the
466 * frame pool. Size down the sgl parameter since we know that
467 * we will never need more than what's required for MAXPHYS.
468 * It would be nice if these constants were available at runtime
469 * instead of compile time.
471 status = sc->mfi_read_fw_status(sc);
472 max_fw_cmds = status & MFI_FWSTATE_MAXCMD_MASK;
473 if (mfi_max_cmds > 0 && mfi_max_cmds < max_fw_cmds) {
474 device_printf(sc->mfi_dev, "FW MaxCmds = %d, limiting to %d\n",
475 max_fw_cmds, mfi_max_cmds);
476 sc->mfi_max_fw_cmds = mfi_max_cmds;
478 sc->mfi_max_fw_cmds = max_fw_cmds;
480 max_fw_sge = (status & MFI_FWSTATE_MAXSGL_MASK) >> 16;
481 sc->mfi_max_sge = min(max_fw_sge, ((MFI_MAXPHYS / PAGE_SIZE) + 1));
483 /* ThunderBolt Support get the contiguous memory */
485 if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
486 mfi_tbolt_init_globals(sc);
487 device_printf(sc->mfi_dev, "MaxCmd = %d, Drv MaxCmd = %d, "
488 "MaxSgl = %d, state = %#x\n", max_fw_cmds,
489 sc->mfi_max_fw_cmds, sc->mfi_max_sge, status);
490 tb_mem_size = mfi_tbolt_get_memory_requirement(sc);
492 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
493 1, 0, /* algnmnt, boundary */
494 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
495 BUS_SPACE_MAXADDR, /* highaddr */
496 NULL, NULL, /* filter, filterarg */
497 tb_mem_size, /* maxsize */
499 tb_mem_size, /* maxsegsize */
501 NULL, NULL, /* lockfunc, lockarg */
503 device_printf(sc->mfi_dev, "Cannot allocate comms DMA tag\n");
506 if (bus_dmamem_alloc(sc->mfi_tb_dmat, (void **)&sc->request_message_pool,
507 BUS_DMA_NOWAIT, &sc->mfi_tb_dmamap)) {
508 device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
511 bzero(sc->request_message_pool, tb_mem_size);
512 bus_dmamap_load(sc->mfi_tb_dmat, sc->mfi_tb_dmamap,
513 sc->request_message_pool, tb_mem_size, mfi_addr_cb, &sc->mfi_tb_busaddr, 0);
515 /* For ThunderBolt memory init */
516 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
517 0x100, 0, /* alignmnt, boundary */
518 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
519 BUS_SPACE_MAXADDR, /* highaddr */
520 NULL, NULL, /* filter, filterarg */
521 MFI_FRAME_SIZE, /* maxsize */
523 MFI_FRAME_SIZE, /* maxsegsize */
525 NULL, NULL, /* lockfunc, lockarg */
526 &sc->mfi_tb_init_dmat)) {
527 device_printf(sc->mfi_dev, "Cannot allocate init DMA tag\n");
530 if (bus_dmamem_alloc(sc->mfi_tb_init_dmat, (void **)&sc->mfi_tb_init,
531 BUS_DMA_NOWAIT, &sc->mfi_tb_init_dmamap)) {
532 device_printf(sc->mfi_dev, "Cannot allocate init memory\n");
535 bzero(sc->mfi_tb_init, MFI_FRAME_SIZE);
536 bus_dmamap_load(sc->mfi_tb_init_dmat, sc->mfi_tb_init_dmamap,
537 sc->mfi_tb_init, MFI_FRAME_SIZE, mfi_addr_cb,
538 &sc->mfi_tb_init_busaddr, 0);
539 if (mfi_tbolt_init_desc_pool(sc, sc->request_message_pool,
541 device_printf(sc->mfi_dev,
542 "Thunderbolt pool preparation error\n");
547 Allocate DMA memory mapping for MPI2 IOC Init descriptor,
548 we are taking it diffrent from what we have allocated for Request
549 and reply descriptors to avoid confusion later
551 tb_mem_size = sizeof(struct MPI2_IOC_INIT_REQUEST);
552 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
553 1, 0, /* algnmnt, boundary */
554 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
555 BUS_SPACE_MAXADDR, /* highaddr */
556 NULL, NULL, /* filter, filterarg */
557 tb_mem_size, /* maxsize */
559 tb_mem_size, /* maxsegsize */
561 NULL, NULL, /* lockfunc, lockarg */
562 &sc->mfi_tb_ioc_init_dmat)) {
563 device_printf(sc->mfi_dev,
564 "Cannot allocate comms DMA tag\n");
567 if (bus_dmamem_alloc(sc->mfi_tb_ioc_init_dmat,
568 (void **)&sc->mfi_tb_ioc_init_desc,
569 BUS_DMA_NOWAIT, &sc->mfi_tb_ioc_init_dmamap)) {
570 device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
573 bzero(sc->mfi_tb_ioc_init_desc, tb_mem_size);
574 bus_dmamap_load(sc->mfi_tb_ioc_init_dmat, sc->mfi_tb_ioc_init_dmamap,
575 sc->mfi_tb_ioc_init_desc, tb_mem_size, mfi_addr_cb,
576 &sc->mfi_tb_ioc_init_busaddr, 0);
579 * Create the dma tag for data buffers. Used both for block I/O
580 * and for various internal data queries.
582 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
583 1, 0, /* algnmnt, boundary */
584 BUS_SPACE_MAXADDR, /* lowaddr */
585 BUS_SPACE_MAXADDR, /* highaddr */
586 NULL, NULL, /* filter, filterarg */
587 BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
588 sc->mfi_max_sge, /* nsegments */
589 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
590 BUS_DMA_ALLOCNOW, /* flags */
591 busdma_lock_mutex, /* lockfunc */
592 &sc->mfi_io_lock, /* lockfuncarg */
593 &sc->mfi_buffer_dmat)) {
594 device_printf(sc->mfi_dev, "Cannot allocate buffer DMA tag\n");
599 * Allocate DMA memory for the comms queues. Keep it under 4GB for
600 * efficiency. The mfi_hwcomms struct includes space for 1 reply queue
601 * entry, so the calculated size here will be will be 1 more than
602 * mfi_max_fw_cmds. This is apparently a requirement of the hardware.
604 commsz = (sizeof(uint32_t) * sc->mfi_max_fw_cmds) +
605 sizeof(struct mfi_hwcomms);
606 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
607 1, 0, /* algnmnt, boundary */
608 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
609 BUS_SPACE_MAXADDR, /* highaddr */
610 NULL, NULL, /* filter, filterarg */
611 commsz, /* maxsize */
613 commsz, /* maxsegsize */
615 NULL, NULL, /* lockfunc, lockarg */
616 &sc->mfi_comms_dmat)) {
617 device_printf(sc->mfi_dev, "Cannot allocate comms DMA tag\n");
620 if (bus_dmamem_alloc(sc->mfi_comms_dmat, (void **)&sc->mfi_comms,
621 BUS_DMA_NOWAIT, &sc->mfi_comms_dmamap)) {
622 device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
625 bzero(sc->mfi_comms, commsz);
626 bus_dmamap_load(sc->mfi_comms_dmat, sc->mfi_comms_dmamap,
627 sc->mfi_comms, commsz, mfi_addr_cb, &sc->mfi_comms_busaddr, 0);
629 * Allocate DMA memory for the command frames. Keep them in the
630 * lower 4GB for efficiency. Calculate the size of the commands at
631 * the same time; each command is one 64 byte frame plus a set of
632 * additional frames for holding sg lists or other data.
633 * The assumption here is that the SG list will start at the second
634 * frame and not use the unused bytes in the first frame. While this
635 * isn't technically correct, it simplifies the calculation and allows
636 * for command frames that might be larger than an mfi_io_frame.
638 if (sizeof(bus_addr_t) == 8) {
639 sc->mfi_sge_size = sizeof(struct mfi_sg64);
640 sc->mfi_flags |= MFI_FLAGS_SG64;
642 sc->mfi_sge_size = sizeof(struct mfi_sg32);
644 if (sc->mfi_flags & MFI_FLAGS_SKINNY)
645 sc->mfi_sge_size = sizeof(struct mfi_sg_skinny);
646 frames = (sc->mfi_sge_size * sc->mfi_max_sge - 1) / MFI_FRAME_SIZE + 2;
647 sc->mfi_cmd_size = frames * MFI_FRAME_SIZE;
648 framessz = sc->mfi_cmd_size * sc->mfi_max_fw_cmds;
649 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
650 64, 0, /* algnmnt, boundary */
651 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
652 BUS_SPACE_MAXADDR, /* highaddr */
653 NULL, NULL, /* filter, filterarg */
654 framessz, /* maxsize */
656 framessz, /* maxsegsize */
658 NULL, NULL, /* lockfunc, lockarg */
659 &sc->mfi_frames_dmat)) {
660 device_printf(sc->mfi_dev, "Cannot allocate frame DMA tag\n");
663 if (bus_dmamem_alloc(sc->mfi_frames_dmat, (void **)&sc->mfi_frames,
664 BUS_DMA_NOWAIT, &sc->mfi_frames_dmamap)) {
665 device_printf(sc->mfi_dev, "Cannot allocate frames memory\n");
668 bzero(sc->mfi_frames, framessz);
669 bus_dmamap_load(sc->mfi_frames_dmat, sc->mfi_frames_dmamap,
670 sc->mfi_frames, framessz, mfi_addr_cb, &sc->mfi_frames_busaddr,0);
672 * Allocate DMA memory for the frame sense data. Keep them in the
673 * lower 4GB for efficiency
675 sensesz = sc->mfi_max_fw_cmds * MFI_SENSE_LEN;
676 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
677 4, 0, /* algnmnt, boundary */
678 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
679 BUS_SPACE_MAXADDR, /* highaddr */
680 NULL, NULL, /* filter, filterarg */
681 sensesz, /* maxsize */
683 sensesz, /* maxsegsize */
685 NULL, NULL, /* lockfunc, lockarg */
686 &sc->mfi_sense_dmat)) {
687 device_printf(sc->mfi_dev, "Cannot allocate sense DMA tag\n");
690 if (bus_dmamem_alloc(sc->mfi_sense_dmat, (void **)&sc->mfi_sense,
691 BUS_DMA_NOWAIT, &sc->mfi_sense_dmamap)) {
692 device_printf(sc->mfi_dev, "Cannot allocate sense memory\n");
695 bus_dmamap_load(sc->mfi_sense_dmat, sc->mfi_sense_dmamap,
696 sc->mfi_sense, sensesz, mfi_addr_cb, &sc->mfi_sense_busaddr, 0);
697 if ((error = mfi_alloc_commands(sc)) != 0)
700 /* Before moving the FW to operational state, check whether
701 * hostmemory is required by the FW or not
704 /* ThunderBolt MFI_IOC2 INIT */
705 if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
706 sc->mfi_disable_intr(sc);
707 mtx_lock(&sc->mfi_io_lock);
708 if ((error = mfi_tbolt_init_MFI_queue(sc)) != 0) {
709 device_printf(sc->mfi_dev,
710 "TB Init has failed with error %d\n",error);
711 mtx_unlock(&sc->mfi_io_lock);
714 mtx_unlock(&sc->mfi_io_lock);
716 if ((error = mfi_tbolt_alloc_cmd(sc)) != 0)
718 if (bus_setup_intr(sc->mfi_dev, sc->mfi_irq,
719 INTR_MPSAFE|INTR_TYPE_BIO, NULL, mfi_intr_tbolt, sc,
721 device_printf(sc->mfi_dev, "Cannot set up interrupt\n");
724 sc->mfi_intr_ptr = mfi_intr_tbolt;
725 sc->mfi_enable_intr(sc);
727 if ((error = mfi_comms_init(sc)) != 0)
730 if (bus_setup_intr(sc->mfi_dev, sc->mfi_irq,
731 INTR_MPSAFE|INTR_TYPE_BIO, NULL, mfi_intr, sc, &sc->mfi_intr)) {
732 device_printf(sc->mfi_dev, "Cannot set up interrupt\n");
735 sc->mfi_intr_ptr = mfi_intr;
736 sc->mfi_enable_intr(sc);
738 if ((error = mfi_get_controller_info(sc)) != 0)
740 sc->disableOnlineCtrlReset = 0;
742 /* Register a config hook to probe the bus for arrays */
743 sc->mfi_ich.ich_func = mfi_startup;
744 sc->mfi_ich.ich_arg = sc;
745 if (config_intrhook_establish(&sc->mfi_ich) != 0) {
746 device_printf(sc->mfi_dev, "Cannot establish configuration "
750 mtx_lock(&sc->mfi_io_lock);
751 if ((error = mfi_aen_setup(sc, 0), 0) != 0) {
752 mtx_unlock(&sc->mfi_io_lock);
755 mtx_unlock(&sc->mfi_io_lock);
758 * Register a shutdown handler.
760 if ((sc->mfi_eh = EVENTHANDLER_REGISTER(shutdown_final, mfi_shutdown,
761 sc, SHUTDOWN_PRI_DEFAULT)) == NULL) {
762 device_printf(sc->mfi_dev, "Warning: shutdown event "
763 "registration failed\n");
767 * Create the control device for doing management
769 unit = device_get_unit(sc->mfi_dev);
770 sc->mfi_cdev = make_dev(&mfi_cdevsw, unit, UID_ROOT, GID_OPERATOR,
771 0640, "mfi%d", unit);
773 make_dev_alias_p(MAKEDEV_CHECKNAME | MAKEDEV_WAITOK, &dev_t,
774 sc->mfi_cdev, "%s", "megaraid_sas_ioctl_node");
775 if (sc->mfi_cdev != NULL)
776 sc->mfi_cdev->si_drv1 = sc;
777 SYSCTL_ADD_INT(device_get_sysctl_ctx(sc->mfi_dev),
778 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->mfi_dev)),
779 OID_AUTO, "delete_busy_volumes", CTLFLAG_RW,
780 &sc->mfi_delete_busy_volumes, 0, "Allow removal of busy volumes");
781 SYSCTL_ADD_INT(device_get_sysctl_ctx(sc->mfi_dev),
782 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->mfi_dev)),
783 OID_AUTO, "keep_deleted_volumes", CTLFLAG_RW,
784 &sc->mfi_keep_deleted_volumes, 0,
785 "Don't detach the mfid device for a busy volume that is deleted");
787 device_add_child(sc->mfi_dev, "mfip", -1);
788 bus_generic_attach(sc->mfi_dev);
790 /* Start the timeout watchdog */
791 callout_init(&sc->mfi_watchdog_callout, CALLOUT_MPSAFE);
792 callout_reset(&sc->mfi_watchdog_callout, mfi_cmd_timeout * hz,
795 if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
796 mtx_lock(&sc->mfi_io_lock);
797 mfi_tbolt_sync_map_info(sc);
798 mtx_unlock(&sc->mfi_io_lock);
805 mfi_alloc_commands(struct mfi_softc *sc)
807 struct mfi_command *cm;
811 * XXX Should we allocate all the commands up front, or allocate on
812 * demand later like 'aac' does?
814 sc->mfi_commands = malloc(sizeof(sc->mfi_commands[0]) *
815 sc->mfi_max_fw_cmds, M_MFIBUF, M_WAITOK | M_ZERO);
817 for (i = 0; i < sc->mfi_max_fw_cmds; i++) {
818 cm = &sc->mfi_commands[i];
819 cm->cm_frame = (union mfi_frame *)((uintptr_t)sc->mfi_frames +
820 sc->mfi_cmd_size * i);
821 cm->cm_frame_busaddr = sc->mfi_frames_busaddr +
822 sc->mfi_cmd_size * i;
823 cm->cm_frame->header.context = i;
824 cm->cm_sense = &sc->mfi_sense[i];
825 cm->cm_sense_busaddr= sc->mfi_sense_busaddr + MFI_SENSE_LEN * i;
828 if (bus_dmamap_create(sc->mfi_buffer_dmat, 0,
829 &cm->cm_dmamap) == 0) {
830 mtx_lock(&sc->mfi_io_lock);
831 mfi_release_command(cm);
832 mtx_unlock(&sc->mfi_io_lock);
834 device_printf(sc->mfi_dev, "Failed to allocate %d "
835 "command blocks, only allocated %d\n",
836 sc->mfi_max_fw_cmds, i - 1);
837 for (j = 0; j < i; j++) {
838 cm = &sc->mfi_commands[i];
839 bus_dmamap_destroy(sc->mfi_buffer_dmat,
842 free(sc->mfi_commands, M_MFIBUF);
843 sc->mfi_commands = NULL;
853 mfi_release_command(struct mfi_command *cm)
855 struct mfi_frame_header *hdr;
858 mtx_assert(&cm->cm_sc->mfi_io_lock, MA_OWNED);
861 * Zero out the important fields of the frame, but make sure the
862 * context field is preserved. For efficiency, handle the fields
863 * as 32 bit words. Clear out the first S/G entry too for safety.
865 hdr = &cm->cm_frame->header;
866 if (cm->cm_data != NULL && hdr->sg_count) {
867 cm->cm_sg->sg32[0].len = 0;
868 cm->cm_sg->sg32[0].addr = 0;
872 * Command may be on other queues e.g. busy queue depending on the
873 * flow of a previous call to mfi_mapcmd, so ensure its dequeued
876 if ((cm->cm_flags & MFI_ON_MFIQ_BUSY) != 0)
878 if ((cm->cm_flags & MFI_ON_MFIQ_READY) != 0)
879 mfi_remove_ready(cm);
881 /* We're not expecting it to be on any other queue but check */
882 if ((cm->cm_flags & MFI_ON_MFIQ_MASK) != 0) {
883 panic("Command %p is still on another queue, flags = %#x",
888 if ((cm->cm_flags & MFI_CMD_TBOLT) != 0) {
889 mfi_tbolt_return_cmd(cm->cm_sc,
890 cm->cm_sc->mfi_cmd_pool_tbolt[cm->cm_extra_frames - 1],
894 hdr_data = (uint32_t *)cm->cm_frame;
895 hdr_data[0] = 0; /* cmd, sense_len, cmd_status, scsi_status */
896 hdr_data[1] = 0; /* target_id, lun_id, cdb_len, sg_count */
897 hdr_data[4] = 0; /* flags, timeout */
898 hdr_data[5] = 0; /* data_len */
900 cm->cm_extra_frames = 0;
902 cm->cm_complete = NULL;
903 cm->cm_private = NULL;
906 cm->cm_total_frame_size = 0;
907 cm->retry_for_fw_reset = 0;
909 mfi_enqueue_free(cm);
913 mfi_dcmd_command(struct mfi_softc *sc, struct mfi_command **cmp,
914 uint32_t opcode, void **bufp, size_t bufsize)
916 struct mfi_command *cm;
917 struct mfi_dcmd_frame *dcmd;
919 uint32_t context = 0;
921 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
923 cm = mfi_dequeue_free(sc);
927 /* Zero out the MFI frame */
928 context = cm->cm_frame->header.context;
929 bzero(cm->cm_frame, sizeof(union mfi_frame));
930 cm->cm_frame->header.context = context;
932 if ((bufsize > 0) && (bufp != NULL)) {
934 buf = malloc(bufsize, M_MFIBUF, M_NOWAIT|M_ZERO);
936 mfi_release_command(cm);
945 dcmd = &cm->cm_frame->dcmd;
946 bzero(dcmd->mbox, MFI_MBOX_SIZE);
947 dcmd->header.cmd = MFI_CMD_DCMD;
948 dcmd->header.timeout = 0;
949 dcmd->header.flags = 0;
950 dcmd->header.data_len = bufsize;
951 dcmd->header.scsi_status = 0;
952 dcmd->opcode = opcode;
953 cm->cm_sg = &dcmd->sgl;
954 cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
957 cm->cm_private = buf;
958 cm->cm_len = bufsize;
961 if ((bufp != NULL) && (*bufp == NULL) && (buf != NULL))
967 mfi_comms_init(struct mfi_softc *sc)
969 struct mfi_command *cm;
970 struct mfi_init_frame *init;
971 struct mfi_init_qinfo *qinfo;
973 uint32_t context = 0;
975 mtx_lock(&sc->mfi_io_lock);
976 if ((cm = mfi_dequeue_free(sc)) == NULL) {
977 mtx_unlock(&sc->mfi_io_lock);
981 /* Zero out the MFI frame */
982 context = cm->cm_frame->header.context;
983 bzero(cm->cm_frame, sizeof(union mfi_frame));
984 cm->cm_frame->header.context = context;
987 * Abuse the SG list area of the frame to hold the init_qinfo
990 init = &cm->cm_frame->init;
991 qinfo = (struct mfi_init_qinfo *)((uintptr_t)init + MFI_FRAME_SIZE);
993 bzero(qinfo, sizeof(struct mfi_init_qinfo));
994 qinfo->rq_entries = sc->mfi_max_fw_cmds + 1;
995 qinfo->rq_addr_lo = sc->mfi_comms_busaddr +
996 offsetof(struct mfi_hwcomms, hw_reply_q);
997 qinfo->pi_addr_lo = sc->mfi_comms_busaddr +
998 offsetof(struct mfi_hwcomms, hw_pi);
999 qinfo->ci_addr_lo = sc->mfi_comms_busaddr +
1000 offsetof(struct mfi_hwcomms, hw_ci);
1002 init->header.cmd = MFI_CMD_INIT;
1003 init->header.data_len = sizeof(struct mfi_init_qinfo);
1004 init->qinfo_new_addr_lo = cm->cm_frame_busaddr + MFI_FRAME_SIZE;
1006 cm->cm_flags = MFI_CMD_POLLED;
1008 if ((error = mfi_mapcmd(sc, cm)) != 0)
1009 device_printf(sc->mfi_dev, "failed to send init command\n");
1010 mfi_release_command(cm);
1011 mtx_unlock(&sc->mfi_io_lock);
1017 mfi_get_controller_info(struct mfi_softc *sc)
1019 struct mfi_command *cm = NULL;
1020 struct mfi_ctrl_info *ci = NULL;
1021 uint32_t max_sectors_1, max_sectors_2;
1024 mtx_lock(&sc->mfi_io_lock);
1025 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_GETINFO,
1026 (void **)&ci, sizeof(*ci));
1029 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1031 if ((error = mfi_mapcmd(sc, cm)) != 0) {
1032 device_printf(sc->mfi_dev, "Failed to get controller info\n");
1033 sc->mfi_max_io = (sc->mfi_max_sge - 1) * PAGE_SIZE /
1039 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1040 BUS_DMASYNC_POSTREAD);
1041 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1043 max_sectors_1 = (1 << ci->stripe_sz_ops.max) * ci->max_strips_per_io;
1044 max_sectors_2 = ci->max_request_size;
1045 sc->mfi_max_io = min(max_sectors_1, max_sectors_2);
1046 sc->disableOnlineCtrlReset =
1047 ci->properties.OnOffProperties.disableOnlineCtrlReset;
1053 mfi_release_command(cm);
1054 mtx_unlock(&sc->mfi_io_lock);
1059 mfi_get_log_state(struct mfi_softc *sc, struct mfi_evt_log_state **log_state)
1061 struct mfi_command *cm = NULL;
1064 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1065 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_EVENT_GETINFO,
1066 (void **)log_state, sizeof(**log_state));
1069 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1071 if ((error = mfi_mapcmd(sc, cm)) != 0) {
1072 device_printf(sc->mfi_dev, "Failed to get log state\n");
1076 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1077 BUS_DMASYNC_POSTREAD);
1078 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1082 mfi_release_command(cm);
1088 mfi_aen_setup(struct mfi_softc *sc, uint32_t seq_start)
1090 struct mfi_evt_log_state *log_state = NULL;
1091 union mfi_evt class_locale;
1095 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1097 class_locale.members.reserved = 0;
1098 class_locale.members.locale = mfi_event_locale;
1099 class_locale.members.evt_class = mfi_event_class;
1101 if (seq_start == 0) {
1102 if ((error = mfi_get_log_state(sc, &log_state)) != 0)
1104 sc->mfi_boot_seq_num = log_state->boot_seq_num;
1107 * Walk through any events that fired since the last
1110 if ((error = mfi_parse_entries(sc, log_state->shutdown_seq_num,
1111 log_state->newest_seq_num)) != 0)
1113 seq = log_state->newest_seq_num;
1116 error = mfi_aen_register(sc, seq, class_locale.word);
1118 free(log_state, M_MFIBUF);
1124 mfi_wait_command(struct mfi_softc *sc, struct mfi_command *cm)
1127 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1128 cm->cm_complete = NULL;
1131 * MegaCli can issue a DCMD of 0. In this case do nothing
1132 * and return 0 to it as status
1134 if (cm->cm_frame->dcmd.opcode == 0) {
1135 cm->cm_frame->header.cmd_status = MFI_STAT_OK;
1137 return (cm->cm_error);
1139 mfi_enqueue_ready(cm);
1141 if ((cm->cm_flags & MFI_CMD_COMPLETED) == 0)
1142 msleep(cm, &sc->mfi_io_lock, PRIBIO, "mfiwait", 0);
1143 return (cm->cm_error);
1147 mfi_free(struct mfi_softc *sc)
1149 struct mfi_command *cm;
1152 callout_drain(&sc->mfi_watchdog_callout);
1154 if (sc->mfi_cdev != NULL)
1155 destroy_dev(sc->mfi_cdev);
1157 if (sc->mfi_commands != NULL) {
1158 for (i = 0; i < sc->mfi_max_fw_cmds; i++) {
1159 cm = &sc->mfi_commands[i];
1160 bus_dmamap_destroy(sc->mfi_buffer_dmat, cm->cm_dmamap);
1162 free(sc->mfi_commands, M_MFIBUF);
1163 sc->mfi_commands = NULL;
1167 bus_teardown_intr(sc->mfi_dev, sc->mfi_irq, sc->mfi_intr);
1168 if (sc->mfi_irq != NULL)
1169 bus_release_resource(sc->mfi_dev, SYS_RES_IRQ, sc->mfi_irq_rid,
1172 if (sc->mfi_sense_busaddr != 0)
1173 bus_dmamap_unload(sc->mfi_sense_dmat, sc->mfi_sense_dmamap);
1174 if (sc->mfi_sense != NULL)
1175 bus_dmamem_free(sc->mfi_sense_dmat, sc->mfi_sense,
1176 sc->mfi_sense_dmamap);
1177 if (sc->mfi_sense_dmat != NULL)
1178 bus_dma_tag_destroy(sc->mfi_sense_dmat);
1180 if (sc->mfi_frames_busaddr != 0)
1181 bus_dmamap_unload(sc->mfi_frames_dmat, sc->mfi_frames_dmamap);
1182 if (sc->mfi_frames != NULL)
1183 bus_dmamem_free(sc->mfi_frames_dmat, sc->mfi_frames,
1184 sc->mfi_frames_dmamap);
1185 if (sc->mfi_frames_dmat != NULL)
1186 bus_dma_tag_destroy(sc->mfi_frames_dmat);
1188 if (sc->mfi_comms_busaddr != 0)
1189 bus_dmamap_unload(sc->mfi_comms_dmat, sc->mfi_comms_dmamap);
1190 if (sc->mfi_comms != NULL)
1191 bus_dmamem_free(sc->mfi_comms_dmat, sc->mfi_comms,
1192 sc->mfi_comms_dmamap);
1193 if (sc->mfi_comms_dmat != NULL)
1194 bus_dma_tag_destroy(sc->mfi_comms_dmat);
1196 /* ThunderBolt contiguous memory free here */
1197 if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
1198 if (sc->mfi_tb_busaddr != 0)
1199 bus_dmamap_unload(sc->mfi_tb_dmat, sc->mfi_tb_dmamap);
1200 if (sc->request_message_pool != NULL)
1201 bus_dmamem_free(sc->mfi_tb_dmat, sc->request_message_pool,
1203 if (sc->mfi_tb_dmat != NULL)
1204 bus_dma_tag_destroy(sc->mfi_tb_dmat);
1206 /* Version buffer memory free */
1207 /* Start LSIP200113393 */
1208 if (sc->verbuf_h_busaddr != 0)
1209 bus_dmamap_unload(sc->verbuf_h_dmat, sc->verbuf_h_dmamap);
1210 if (sc->verbuf != NULL)
1211 bus_dmamem_free(sc->verbuf_h_dmat, sc->verbuf,
1212 sc->verbuf_h_dmamap);
1213 if (sc->verbuf_h_dmat != NULL)
1214 bus_dma_tag_destroy(sc->verbuf_h_dmat);
1216 /* End LSIP200113393 */
1217 /* ThunderBolt INIT packet memory Free */
1218 if (sc->mfi_tb_init_busaddr != 0)
1219 bus_dmamap_unload(sc->mfi_tb_init_dmat,
1220 sc->mfi_tb_init_dmamap);
1221 if (sc->mfi_tb_init != NULL)
1222 bus_dmamem_free(sc->mfi_tb_init_dmat, sc->mfi_tb_init,
1223 sc->mfi_tb_init_dmamap);
1224 if (sc->mfi_tb_init_dmat != NULL)
1225 bus_dma_tag_destroy(sc->mfi_tb_init_dmat);
1227 /* ThunderBolt IOC Init Desc memory free here */
1228 if (sc->mfi_tb_ioc_init_busaddr != 0)
1229 bus_dmamap_unload(sc->mfi_tb_ioc_init_dmat,
1230 sc->mfi_tb_ioc_init_dmamap);
1231 if (sc->mfi_tb_ioc_init_desc != NULL)
1232 bus_dmamem_free(sc->mfi_tb_ioc_init_dmat,
1233 sc->mfi_tb_ioc_init_desc,
1234 sc->mfi_tb_ioc_init_dmamap);
1235 if (sc->mfi_tb_ioc_init_dmat != NULL)
1236 bus_dma_tag_destroy(sc->mfi_tb_ioc_init_dmat);
1237 if (sc->mfi_cmd_pool_tbolt != NULL) {
1238 for (int i = 0; i < sc->mfi_max_fw_cmds; i++) {
1239 if (sc->mfi_cmd_pool_tbolt[i] != NULL) {
1240 free(sc->mfi_cmd_pool_tbolt[i],
1242 sc->mfi_cmd_pool_tbolt[i] = NULL;
1245 free(sc->mfi_cmd_pool_tbolt, M_MFIBUF);
1246 sc->mfi_cmd_pool_tbolt = NULL;
1248 if (sc->request_desc_pool != NULL) {
1249 free(sc->request_desc_pool, M_MFIBUF);
1250 sc->request_desc_pool = NULL;
1253 if (sc->mfi_buffer_dmat != NULL)
1254 bus_dma_tag_destroy(sc->mfi_buffer_dmat);
1255 if (sc->mfi_parent_dmat != NULL)
1256 bus_dma_tag_destroy(sc->mfi_parent_dmat);
1258 if (mtx_initialized(&sc->mfi_io_lock)) {
1259 mtx_destroy(&sc->mfi_io_lock);
1260 sx_destroy(&sc->mfi_config_lock);
1267 mfi_startup(void *arg)
1269 struct mfi_softc *sc;
1271 sc = (struct mfi_softc *)arg;
1273 config_intrhook_disestablish(&sc->mfi_ich);
1275 sc->mfi_enable_intr(sc);
1276 sx_xlock(&sc->mfi_config_lock);
1277 mtx_lock(&sc->mfi_io_lock);
1279 if (sc->mfi_flags & MFI_FLAGS_SKINNY)
1281 mtx_unlock(&sc->mfi_io_lock);
1282 sx_xunlock(&sc->mfi_config_lock);
1288 struct mfi_softc *sc;
1289 struct mfi_command *cm;
1290 uint32_t pi, ci, context;
1292 sc = (struct mfi_softc *)arg;
1294 if (sc->mfi_check_clear_intr(sc))
1298 pi = sc->mfi_comms->hw_pi;
1299 ci = sc->mfi_comms->hw_ci;
1300 mtx_lock(&sc->mfi_io_lock);
1302 context = sc->mfi_comms->hw_reply_q[ci];
1303 if (context < sc->mfi_max_fw_cmds) {
1304 cm = &sc->mfi_commands[context];
1305 mfi_remove_busy(cm);
1307 mfi_complete(sc, cm);
1309 if (++ci == (sc->mfi_max_fw_cmds + 1))
1313 sc->mfi_comms->hw_ci = ci;
1315 /* Give defered I/O a chance to run */
1316 sc->mfi_flags &= ~MFI_FLAGS_QFRZN;
1318 mtx_unlock(&sc->mfi_io_lock);
1321 * Dummy read to flush the bus; this ensures that the indexes are up
1322 * to date. Restart processing if more commands have come it.
1324 (void)sc->mfi_read_fw_status(sc);
1325 if (pi != sc->mfi_comms->hw_pi)
1332 mfi_shutdown(struct mfi_softc *sc)
1334 struct mfi_dcmd_frame *dcmd;
1335 struct mfi_command *cm;
1339 if (sc->mfi_aen_cm != NULL) {
1340 sc->cm_aen_abort = 1;
1341 mfi_abort(sc, &sc->mfi_aen_cm);
1344 if (sc->mfi_map_sync_cm != NULL) {
1345 sc->cm_map_abort = 1;
1346 mfi_abort(sc, &sc->mfi_map_sync_cm);
1349 mtx_lock(&sc->mfi_io_lock);
1350 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_SHUTDOWN, NULL, 0);
1352 mtx_unlock(&sc->mfi_io_lock);
1356 dcmd = &cm->cm_frame->dcmd;
1357 dcmd->header.flags = MFI_FRAME_DIR_NONE;
1358 cm->cm_flags = MFI_CMD_POLLED;
1361 if ((error = mfi_mapcmd(sc, cm)) != 0)
1362 device_printf(sc->mfi_dev, "Failed to shutdown controller\n");
1364 mfi_release_command(cm);
1365 mtx_unlock(&sc->mfi_io_lock);
1370 mfi_syspdprobe(struct mfi_softc *sc)
1372 struct mfi_frame_header *hdr;
1373 struct mfi_command *cm = NULL;
1374 struct mfi_pd_list *pdlist = NULL;
1375 struct mfi_system_pd *syspd, *tmp;
1376 struct mfi_system_pending *syspd_pend;
1377 int error, i, found;
1379 sx_assert(&sc->mfi_config_lock, SA_XLOCKED);
1380 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1381 /* Add SYSTEM PD's */
1382 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_PD_LIST_QUERY,
1383 (void **)&pdlist, sizeof(*pdlist));
1385 device_printf(sc->mfi_dev,
1386 "Error while forming SYSTEM PD list\n");
1390 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1391 cm->cm_frame->dcmd.mbox[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST;
1392 cm->cm_frame->dcmd.mbox[1] = 0;
1393 if (mfi_mapcmd(sc, cm) != 0) {
1394 device_printf(sc->mfi_dev,
1395 "Failed to get syspd device listing\n");
1398 bus_dmamap_sync(sc->mfi_buffer_dmat,cm->cm_dmamap,
1399 BUS_DMASYNC_POSTREAD);
1400 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1401 hdr = &cm->cm_frame->header;
1402 if (hdr->cmd_status != MFI_STAT_OK) {
1403 device_printf(sc->mfi_dev,
1404 "MFI_DCMD_PD_LIST_QUERY failed %x\n", hdr->cmd_status);
1407 /* Get each PD and add it to the system */
1408 for (i = 0; i < pdlist->count; i++) {
1409 if (pdlist->addr[i].device_id ==
1410 pdlist->addr[i].encl_device_id)
1413 TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh, pd_link) {
1414 if (syspd->pd_id == pdlist->addr[i].device_id)
1417 TAILQ_FOREACH(syspd_pend, &sc->mfi_syspd_pend_tqh, pd_link) {
1418 if (syspd_pend->pd_id == pdlist->addr[i].device_id)
1422 mfi_add_sys_pd(sc, pdlist->addr[i].device_id);
1424 /* Delete SYSPD's whose state has been changed */
1425 TAILQ_FOREACH_SAFE(syspd, &sc->mfi_syspd_tqh, pd_link, tmp) {
1427 for (i = 0; i < pdlist->count; i++) {
1428 if (syspd->pd_id == pdlist->addr[i].device_id) {
1435 mtx_unlock(&sc->mfi_io_lock);
1437 device_delete_child(sc->mfi_dev, syspd->pd_dev);
1439 mtx_lock(&sc->mfi_io_lock);
1444 free(pdlist, M_MFIBUF);
1446 mfi_release_command(cm);
1452 mfi_ldprobe(struct mfi_softc *sc)
1454 struct mfi_frame_header *hdr;
1455 struct mfi_command *cm = NULL;
1456 struct mfi_ld_list *list = NULL;
1457 struct mfi_disk *ld;
1458 struct mfi_disk_pending *ld_pend;
1461 sx_assert(&sc->mfi_config_lock, SA_XLOCKED);
1462 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1464 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_LIST,
1465 (void **)&list, sizeof(*list));
1469 cm->cm_flags = MFI_CMD_DATAIN;
1470 if (mfi_wait_command(sc, cm) != 0) {
1471 device_printf(sc->mfi_dev, "Failed to get device listing\n");
1475 hdr = &cm->cm_frame->header;
1476 if (hdr->cmd_status != MFI_STAT_OK) {
1477 device_printf(sc->mfi_dev, "MFI_DCMD_LD_GET_LIST failed %x\n",
1482 for (i = 0; i < list->ld_count; i++) {
1483 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
1484 if (ld->ld_id == list->ld_list[i].ld.v.target_id)
1487 TAILQ_FOREACH(ld_pend, &sc->mfi_ld_pend_tqh, ld_link) {
1488 if (ld_pend->ld_id == list->ld_list[i].ld.v.target_id)
1491 mfi_add_ld(sc, list->ld_list[i].ld.v.target_id);
1496 free(list, M_MFIBUF);
1498 mfi_release_command(cm);
1504 * The timestamp is the number of seconds since 00:00 Jan 1, 2000. If
1505 * the bits in 24-31 are all set, then it is the number of seconds since
1509 format_timestamp(uint32_t timestamp)
1511 static char buffer[32];
1513 if ((timestamp & 0xff000000) == 0xff000000)
1514 snprintf(buffer, sizeof(buffer), "boot + %us", timestamp &
1517 snprintf(buffer, sizeof(buffer), "%us", timestamp);
1522 format_class(int8_t class)
1524 static char buffer[6];
1527 case MFI_EVT_CLASS_DEBUG:
1529 case MFI_EVT_CLASS_PROGRESS:
1530 return ("progress");
1531 case MFI_EVT_CLASS_INFO:
1533 case MFI_EVT_CLASS_WARNING:
1535 case MFI_EVT_CLASS_CRITICAL:
1537 case MFI_EVT_CLASS_FATAL:
1539 case MFI_EVT_CLASS_DEAD:
1542 snprintf(buffer, sizeof(buffer), "%d", class);
1548 mfi_decode_evt(struct mfi_softc *sc, struct mfi_evt_detail *detail)
1550 struct mfi_system_pd *syspd = NULL;
1552 device_printf(sc->mfi_dev, "%d (%s/0x%04x/%s) - %s\n", detail->seq,
1553 format_timestamp(detail->time), detail->evt_class.members.locale,
1554 format_class(detail->evt_class.members.evt_class),
1555 detail->description);
1557 /* Don't act on old AEN's or while shutting down */
1558 if (detail->seq < sc->mfi_boot_seq_num || sc->mfi_detaching)
1561 switch (detail->arg_type) {
1562 case MR_EVT_ARGS_NONE:
1563 if (detail->code == MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED) {
1564 device_printf(sc->mfi_dev, "HostBus scan raised\n");
1565 if (mfi_detect_jbod_change) {
1567 * Probe for new SYSPD's and Delete
1570 sx_xlock(&sc->mfi_config_lock);
1571 mtx_lock(&sc->mfi_io_lock);
1573 mtx_unlock(&sc->mfi_io_lock);
1574 sx_xunlock(&sc->mfi_config_lock);
1578 case MR_EVT_ARGS_LD_STATE:
1579 /* During load time driver reads all the events starting
1580 * from the one that has been logged after shutdown. Avoid
1583 if (detail->args.ld_state.new_state == MFI_LD_STATE_OFFLINE ) {
1585 struct mfi_disk *ld;
1586 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
1588 detail->args.ld_state.ld.target_id)
1592 Fix: for kernel panics when SSCD is removed
1593 KASSERT(ld != NULL, ("volume dissappeared"));
1597 device_delete_child(sc->mfi_dev, ld->ld_dev);
1602 case MR_EVT_ARGS_PD:
1603 if (detail->code == MR_EVT_PD_REMOVED) {
1604 if (mfi_detect_jbod_change) {
1606 * If the removed device is a SYSPD then
1609 TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh,
1612 detail->args.pd.device_id) {
1614 device_delete_child(
1623 if (detail->code == MR_EVT_PD_INSERTED) {
1624 if (mfi_detect_jbod_change) {
1625 /* Probe for new SYSPD's */
1626 sx_xlock(&sc->mfi_config_lock);
1627 mtx_lock(&sc->mfi_io_lock);
1629 mtx_unlock(&sc->mfi_io_lock);
1630 sx_xunlock(&sc->mfi_config_lock);
1633 if (sc->mfi_cam_rescan_cb != NULL &&
1634 (detail->code == MR_EVT_PD_INSERTED ||
1635 detail->code == MR_EVT_PD_REMOVED)) {
1636 sc->mfi_cam_rescan_cb(sc, detail->args.pd.device_id);
1643 mfi_queue_evt(struct mfi_softc *sc, struct mfi_evt_detail *detail)
1645 struct mfi_evt_queue_elm *elm;
1647 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1648 elm = malloc(sizeof(*elm), M_MFIBUF, M_NOWAIT|M_ZERO);
1651 memcpy(&elm->detail, detail, sizeof(*detail));
1652 TAILQ_INSERT_TAIL(&sc->mfi_evt_queue, elm, link);
1653 taskqueue_enqueue(taskqueue_swi, &sc->mfi_evt_task);
1657 mfi_handle_evt(void *context, int pending)
1659 TAILQ_HEAD(,mfi_evt_queue_elm) queue;
1660 struct mfi_softc *sc;
1661 struct mfi_evt_queue_elm *elm;
1665 mtx_lock(&sc->mfi_io_lock);
1666 TAILQ_CONCAT(&queue, &sc->mfi_evt_queue, link);
1667 mtx_unlock(&sc->mfi_io_lock);
1668 while ((elm = TAILQ_FIRST(&queue)) != NULL) {
1669 TAILQ_REMOVE(&queue, elm, link);
1670 mfi_decode_evt(sc, &elm->detail);
1671 free(elm, M_MFIBUF);
1676 mfi_aen_register(struct mfi_softc *sc, int seq, int locale)
1678 struct mfi_command *cm;
1679 struct mfi_dcmd_frame *dcmd;
1680 union mfi_evt current_aen, prior_aen;
1681 struct mfi_evt_detail *ed = NULL;
1684 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1686 current_aen.word = locale;
1687 if (sc->mfi_aen_cm != NULL) {
1689 ((uint32_t *)&sc->mfi_aen_cm->cm_frame->dcmd.mbox)[1];
1690 if (prior_aen.members.evt_class <= current_aen.members.evt_class &&
1691 !((prior_aen.members.locale & current_aen.members.locale)
1692 ^current_aen.members.locale)) {
1695 prior_aen.members.locale |= current_aen.members.locale;
1696 if (prior_aen.members.evt_class
1697 < current_aen.members.evt_class)
1698 current_aen.members.evt_class =
1699 prior_aen.members.evt_class;
1700 mfi_abort(sc, &sc->mfi_aen_cm);
1704 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_EVENT_WAIT,
1705 (void **)&ed, sizeof(*ed));
1709 dcmd = &cm->cm_frame->dcmd;
1710 ((uint32_t *)&dcmd->mbox)[0] = seq;
1711 ((uint32_t *)&dcmd->mbox)[1] = locale;
1712 cm->cm_flags = MFI_CMD_DATAIN;
1713 cm->cm_complete = mfi_aen_complete;
1715 sc->last_seq_num = seq;
1716 sc->mfi_aen_cm = cm;
1718 mfi_enqueue_ready(cm);
1726 mfi_aen_complete(struct mfi_command *cm)
1728 struct mfi_frame_header *hdr;
1729 struct mfi_softc *sc;
1730 struct mfi_evt_detail *detail;
1731 struct mfi_aen *mfi_aen_entry, *tmp;
1732 int seq = 0, aborted = 0;
1735 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1737 if (sc->mfi_aen_cm == NULL)
1740 hdr = &cm->cm_frame->header;
1742 if (sc->cm_aen_abort ||
1743 hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
1744 sc->cm_aen_abort = 0;
1747 sc->mfi_aen_triggered = 1;
1748 if (sc->mfi_poll_waiting) {
1749 sc->mfi_poll_waiting = 0;
1750 selwakeup(&sc->mfi_select);
1752 detail = cm->cm_data;
1753 mfi_queue_evt(sc, detail);
1754 seq = detail->seq + 1;
1755 TAILQ_FOREACH_SAFE(mfi_aen_entry, &sc->mfi_aen_pids, aen_link,
1757 TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
1759 PROC_LOCK(mfi_aen_entry->p);
1760 kern_psignal(mfi_aen_entry->p, SIGIO);
1761 PROC_UNLOCK(mfi_aen_entry->p);
1762 free(mfi_aen_entry, M_MFIBUF);
1766 free(cm->cm_data, M_MFIBUF);
1767 wakeup(&sc->mfi_aen_cm);
1768 sc->mfi_aen_cm = NULL;
1769 mfi_release_command(cm);
1771 /* set it up again so the driver can catch more events */
1773 mfi_aen_setup(sc, seq);
1776 #define MAX_EVENTS 15
1779 mfi_parse_entries(struct mfi_softc *sc, int start_seq, int stop_seq)
1781 struct mfi_command *cm;
1782 struct mfi_dcmd_frame *dcmd;
1783 struct mfi_evt_list *el;
1784 union mfi_evt class_locale;
1785 int error, i, seq, size;
1787 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1789 class_locale.members.reserved = 0;
1790 class_locale.members.locale = mfi_event_locale;
1791 class_locale.members.evt_class = mfi_event_class;
1793 size = sizeof(struct mfi_evt_list) + sizeof(struct mfi_evt_detail)
1795 el = malloc(size, M_MFIBUF, M_NOWAIT | M_ZERO);
1799 for (seq = start_seq;;) {
1800 if ((cm = mfi_dequeue_free(sc)) == NULL) {
1805 dcmd = &cm->cm_frame->dcmd;
1806 bzero(dcmd->mbox, MFI_MBOX_SIZE);
1807 dcmd->header.cmd = MFI_CMD_DCMD;
1808 dcmd->header.timeout = 0;
1809 dcmd->header.data_len = size;
1810 dcmd->opcode = MFI_DCMD_CTRL_EVENT_GET;
1811 ((uint32_t *)&dcmd->mbox)[0] = seq;
1812 ((uint32_t *)&dcmd->mbox)[1] = class_locale.word;
1813 cm->cm_sg = &dcmd->sgl;
1814 cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
1815 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1819 if ((error = mfi_mapcmd(sc, cm)) != 0) {
1820 device_printf(sc->mfi_dev,
1821 "Failed to get controller entries\n");
1822 mfi_release_command(cm);
1826 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1827 BUS_DMASYNC_POSTREAD);
1828 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1830 if (dcmd->header.cmd_status == MFI_STAT_NOT_FOUND) {
1831 mfi_release_command(cm);
1834 if (dcmd->header.cmd_status != MFI_STAT_OK) {
1835 device_printf(sc->mfi_dev,
1836 "Error %d fetching controller entries\n",
1837 dcmd->header.cmd_status);
1838 mfi_release_command(cm);
1842 mfi_release_command(cm);
1844 for (i = 0; i < el->count; i++) {
1846 * If this event is newer than 'stop_seq' then
1847 * break out of the loop. Note that the log
1848 * is a circular buffer so we have to handle
1849 * the case that our stop point is earlier in
1850 * the buffer than our start point.
1852 if (el->event[i].seq >= stop_seq) {
1853 if (start_seq <= stop_seq)
1855 else if (el->event[i].seq < start_seq)
1858 mfi_queue_evt(sc, &el->event[i]);
1860 seq = el->event[el->count - 1].seq + 1;
1868 mfi_add_ld(struct mfi_softc *sc, int id)
1870 struct mfi_command *cm;
1871 struct mfi_dcmd_frame *dcmd = NULL;
1872 struct mfi_ld_info *ld_info = NULL;
1873 struct mfi_disk_pending *ld_pend;
1876 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1878 ld_pend = malloc(sizeof(*ld_pend), M_MFIBUF, M_NOWAIT | M_ZERO);
1879 if (ld_pend != NULL) {
1880 ld_pend->ld_id = id;
1881 TAILQ_INSERT_TAIL(&sc->mfi_ld_pend_tqh, ld_pend, ld_link);
1884 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_INFO,
1885 (void **)&ld_info, sizeof(*ld_info));
1887 device_printf(sc->mfi_dev,
1888 "Failed to allocate for MFI_DCMD_LD_GET_INFO %d\n", error);
1890 free(ld_info, M_MFIBUF);
1893 cm->cm_flags = MFI_CMD_DATAIN;
1894 dcmd = &cm->cm_frame->dcmd;
1896 if (mfi_wait_command(sc, cm) != 0) {
1897 device_printf(sc->mfi_dev,
1898 "Failed to get logical drive: %d\n", id);
1899 free(ld_info, M_MFIBUF);
1902 if (ld_info->ld_config.params.isSSCD != 1)
1903 mfi_add_ld_complete(cm);
1905 mfi_release_command(cm);
1906 if (ld_info) /* SSCD drives ld_info free here */
1907 free(ld_info, M_MFIBUF);
1913 mfi_add_ld_complete(struct mfi_command *cm)
1915 struct mfi_frame_header *hdr;
1916 struct mfi_ld_info *ld_info;
1917 struct mfi_softc *sc;
1921 hdr = &cm->cm_frame->header;
1922 ld_info = cm->cm_private;
1924 if (sc->cm_map_abort || hdr->cmd_status != MFI_STAT_OK) {
1925 free(ld_info, M_MFIBUF);
1926 wakeup(&sc->mfi_map_sync_cm);
1927 mfi_release_command(cm);
1930 wakeup(&sc->mfi_map_sync_cm);
1931 mfi_release_command(cm);
1933 mtx_unlock(&sc->mfi_io_lock);
1935 if ((child = device_add_child(sc->mfi_dev, "mfid", -1)) == NULL) {
1936 device_printf(sc->mfi_dev, "Failed to add logical disk\n");
1937 free(ld_info, M_MFIBUF);
1939 mtx_lock(&sc->mfi_io_lock);
1943 device_set_ivars(child, ld_info);
1944 device_set_desc(child, "MFI Logical Disk");
1945 bus_generic_attach(sc->mfi_dev);
1947 mtx_lock(&sc->mfi_io_lock);
1950 static int mfi_add_sys_pd(struct mfi_softc *sc, int id)
1952 struct mfi_command *cm;
1953 struct mfi_dcmd_frame *dcmd = NULL;
1954 struct mfi_pd_info *pd_info = NULL;
1955 struct mfi_system_pending *syspd_pend;
1958 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1960 syspd_pend = malloc(sizeof(*syspd_pend), M_MFIBUF, M_NOWAIT | M_ZERO);
1961 if (syspd_pend != NULL) {
1962 syspd_pend->pd_id = id;
1963 TAILQ_INSERT_TAIL(&sc->mfi_syspd_pend_tqh, syspd_pend, pd_link);
1966 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_PD_GET_INFO,
1967 (void **)&pd_info, sizeof(*pd_info));
1969 device_printf(sc->mfi_dev,
1970 "Failed to allocated for MFI_DCMD_PD_GET_INFO %d\n",
1973 free(pd_info, M_MFIBUF);
1976 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1977 dcmd = &cm->cm_frame->dcmd;
1979 dcmd->header.scsi_status = 0;
1980 dcmd->header.pad0 = 0;
1981 if ((error = mfi_mapcmd(sc, cm)) != 0) {
1982 device_printf(sc->mfi_dev,
1983 "Failed to get physical drive info %d\n", id);
1984 free(pd_info, M_MFIBUF);
1985 mfi_release_command(cm);
1988 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1989 BUS_DMASYNC_POSTREAD);
1990 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1991 mfi_add_sys_pd_complete(cm);
1996 mfi_add_sys_pd_complete(struct mfi_command *cm)
1998 struct mfi_frame_header *hdr;
1999 struct mfi_pd_info *pd_info;
2000 struct mfi_softc *sc;
2004 hdr = &cm->cm_frame->header;
2005 pd_info = cm->cm_private;
2007 if (hdr->cmd_status != MFI_STAT_OK) {
2008 free(pd_info, M_MFIBUF);
2009 mfi_release_command(cm);
2012 if (pd_info->fw_state != MFI_PD_STATE_SYSTEM) {
2013 device_printf(sc->mfi_dev, "PD=%x is not SYSTEM PD\n",
2014 pd_info->ref.v.device_id);
2015 free(pd_info, M_MFIBUF);
2016 mfi_release_command(cm);
2019 mfi_release_command(cm);
2021 mtx_unlock(&sc->mfi_io_lock);
2023 if ((child = device_add_child(sc->mfi_dev, "mfisyspd", -1)) == NULL) {
2024 device_printf(sc->mfi_dev, "Failed to add system pd\n");
2025 free(pd_info, M_MFIBUF);
2027 mtx_lock(&sc->mfi_io_lock);
2031 device_set_ivars(child, pd_info);
2032 device_set_desc(child, "MFI System PD");
2033 bus_generic_attach(sc->mfi_dev);
2035 mtx_lock(&sc->mfi_io_lock);
2038 static struct mfi_command *
2039 mfi_bio_command(struct mfi_softc *sc)
2042 struct mfi_command *cm = NULL;
2044 /*reserving two commands to avoid starvation for IOCTL*/
2045 if (sc->mfi_qstat[MFIQ_FREE].q_length < 2) {
2048 if ((bio = mfi_dequeue_bio(sc)) == NULL) {
2051 if ((uintptr_t)bio->bio_driver2 == MFI_LD_IO) {
2052 cm = mfi_build_ldio(sc, bio);
2053 } else if ((uintptr_t) bio->bio_driver2 == MFI_SYS_PD_IO) {
2054 cm = mfi_build_syspdio(sc, bio);
2057 mfi_enqueue_bio(sc, bio);
2062 * mostly copied from cam/scsi/scsi_all.c:scsi_read_write
2066 mfi_build_cdb(int readop, uint8_t byte2, u_int64_t lba, u_int32_t block_count, uint8_t *cdb)
2070 if (((lba & 0x1fffff) == lba)
2071 && ((block_count & 0xff) == block_count)
2073 /* We can fit in a 6 byte cdb */
2074 struct scsi_rw_6 *scsi_cmd;
2076 scsi_cmd = (struct scsi_rw_6 *)cdb;
2077 scsi_cmd->opcode = readop ? READ_6 : WRITE_6;
2078 scsi_ulto3b(lba, scsi_cmd->addr);
2079 scsi_cmd->length = block_count & 0xff;
2080 scsi_cmd->control = 0;
2081 cdb_len = sizeof(*scsi_cmd);
2082 } else if (((block_count & 0xffff) == block_count) && ((lba & 0xffffffff) == lba)) {
2083 /* Need a 10 byte CDB */
2084 struct scsi_rw_10 *scsi_cmd;
2086 scsi_cmd = (struct scsi_rw_10 *)cdb;
2087 scsi_cmd->opcode = readop ? READ_10 : WRITE_10;
2088 scsi_cmd->byte2 = byte2;
2089 scsi_ulto4b(lba, scsi_cmd->addr);
2090 scsi_cmd->reserved = 0;
2091 scsi_ulto2b(block_count, scsi_cmd->length);
2092 scsi_cmd->control = 0;
2093 cdb_len = sizeof(*scsi_cmd);
2094 } else if (((block_count & 0xffffffff) == block_count) &&
2095 ((lba & 0xffffffff) == lba)) {
2096 /* Block count is too big for 10 byte CDB use a 12 byte CDB */
2097 struct scsi_rw_12 *scsi_cmd;
2099 scsi_cmd = (struct scsi_rw_12 *)cdb;
2100 scsi_cmd->opcode = readop ? READ_12 : WRITE_12;
2101 scsi_cmd->byte2 = byte2;
2102 scsi_ulto4b(lba, scsi_cmd->addr);
2103 scsi_cmd->reserved = 0;
2104 scsi_ulto4b(block_count, scsi_cmd->length);
2105 scsi_cmd->control = 0;
2106 cdb_len = sizeof(*scsi_cmd);
2109 * 16 byte CDB. We'll only get here if the LBA is larger
2112 struct scsi_rw_16 *scsi_cmd;
2114 scsi_cmd = (struct scsi_rw_16 *)cdb;
2115 scsi_cmd->opcode = readop ? READ_16 : WRITE_16;
2116 scsi_cmd->byte2 = byte2;
2117 scsi_u64to8b(lba, scsi_cmd->addr);
2118 scsi_cmd->reserved = 0;
2119 scsi_ulto4b(block_count, scsi_cmd->length);
2120 scsi_cmd->control = 0;
2121 cdb_len = sizeof(*scsi_cmd);
2127 extern char *unmapped_buf;
2129 static struct mfi_command *
2130 mfi_build_syspdio(struct mfi_softc *sc, struct bio *bio)
2132 struct mfi_command *cm;
2133 struct mfi_pass_frame *pass;
2134 uint32_t context = 0;
2135 int flags = 0, blkcount = 0, readop;
2138 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2140 if ((cm = mfi_dequeue_free(sc)) == NULL)
2143 /* Zero out the MFI frame */
2144 context = cm->cm_frame->header.context;
2145 bzero(cm->cm_frame, sizeof(union mfi_frame));
2146 cm->cm_frame->header.context = context;
2147 pass = &cm->cm_frame->pass;
2148 bzero(pass->cdb, 16);
2149 pass->header.cmd = MFI_CMD_PD_SCSI_IO;
2150 switch (bio->bio_cmd & 0x03) {
2152 flags = MFI_CMD_DATAIN | MFI_CMD_BIO;
2156 flags = MFI_CMD_DATAOUT | MFI_CMD_BIO;
2160 /* TODO: what about BIO_DELETE??? */
2161 panic("Unsupported bio command %x\n", bio->bio_cmd);
2164 /* Cheat with the sector length to avoid a non-constant division */
2165 blkcount = (bio->bio_bcount + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
2166 /* Fill the LBA and Transfer length in CDB */
2167 cdb_len = mfi_build_cdb(readop, 0, bio->bio_pblkno, blkcount,
2169 pass->header.target_id = (uintptr_t)bio->bio_driver1;
2170 pass->header.lun_id = 0;
2171 pass->header.timeout = 0;
2172 pass->header.flags = 0;
2173 pass->header.scsi_status = 0;
2174 pass->header.sense_len = MFI_SENSE_LEN;
2175 pass->header.data_len = bio->bio_bcount;
2176 pass->header.cdb_len = cdb_len;
2177 pass->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2178 pass->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2179 cm->cm_complete = mfi_bio_complete;
2180 cm->cm_private = bio;
2181 cm->cm_data = unmapped_buf;
2182 cm->cm_len = bio->bio_bcount;
2183 cm->cm_sg = &pass->sgl;
2184 cm->cm_total_frame_size = MFI_PASS_FRAME_SIZE;
2185 cm->cm_flags = flags;
2190 static struct mfi_command *
2191 mfi_build_ldio(struct mfi_softc *sc, struct bio *bio)
2193 struct mfi_io_frame *io;
2194 struct mfi_command *cm;
2197 uint32_t context = 0;
2199 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2201 if ((cm = mfi_dequeue_free(sc)) == NULL)
2204 /* Zero out the MFI frame */
2205 context = cm->cm_frame->header.context;
2206 bzero(cm->cm_frame, sizeof(union mfi_frame));
2207 cm->cm_frame->header.context = context;
2208 io = &cm->cm_frame->io;
2209 switch (bio->bio_cmd & 0x03) {
2211 io->header.cmd = MFI_CMD_LD_READ;
2212 flags = MFI_CMD_DATAIN | MFI_CMD_BIO;
2215 io->header.cmd = MFI_CMD_LD_WRITE;
2216 flags = MFI_CMD_DATAOUT | MFI_CMD_BIO;
2219 /* TODO: what about BIO_DELETE??? */
2220 panic("Unsupported bio command %x\n", bio->bio_cmd);
2223 /* Cheat with the sector length to avoid a non-constant division */
2224 blkcount = (bio->bio_bcount + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
2225 io->header.target_id = (uintptr_t)bio->bio_driver1;
2226 io->header.timeout = 0;
2227 io->header.flags = 0;
2228 io->header.scsi_status = 0;
2229 io->header.sense_len = MFI_SENSE_LEN;
2230 io->header.data_len = blkcount;
2231 io->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2232 io->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2233 io->lba_hi = (bio->bio_pblkno & 0xffffffff00000000) >> 32;
2234 io->lba_lo = bio->bio_pblkno & 0xffffffff;
2235 cm->cm_complete = mfi_bio_complete;
2236 cm->cm_private = bio;
2237 cm->cm_data = unmapped_buf;
2238 cm->cm_len = bio->bio_bcount;
2239 cm->cm_sg = &io->sgl;
2240 cm->cm_total_frame_size = MFI_IO_FRAME_SIZE;
2241 cm->cm_flags = flags;
2247 mfi_bio_complete(struct mfi_command *cm)
2250 struct mfi_frame_header *hdr;
2251 struct mfi_softc *sc;
2253 bio = cm->cm_private;
2254 hdr = &cm->cm_frame->header;
2257 if ((hdr->cmd_status != MFI_STAT_OK) || (hdr->scsi_status != 0)) {
2258 bio->bio_flags |= BIO_ERROR;
2259 bio->bio_error = EIO;
2260 device_printf(sc->mfi_dev, "I/O error, cmd=%p, status=%#x, "
2261 "scsi_status=%#x\n", cm, hdr->cmd_status, hdr->scsi_status);
2262 mfi_print_sense(cm->cm_sc, cm->cm_sense);
2263 } else if (cm->cm_error != 0) {
2264 bio->bio_flags |= BIO_ERROR;
2265 bio->bio_error = cm->cm_error;
2266 device_printf(sc->mfi_dev, "I/O error, cmd=%p, error=%#x\n",
2270 mfi_release_command(cm);
2271 mfi_disk_complete(bio);
2275 mfi_startio(struct mfi_softc *sc)
2277 struct mfi_command *cm;
2278 struct ccb_hdr *ccbh;
2281 /* Don't bother if we're short on resources */
2282 if (sc->mfi_flags & MFI_FLAGS_QFRZN)
2285 /* Try a command that has already been prepared */
2286 cm = mfi_dequeue_ready(sc);
2289 if ((ccbh = TAILQ_FIRST(&sc->mfi_cam_ccbq)) != NULL)
2290 cm = sc->mfi_cam_start(ccbh);
2293 /* Nope, so look for work on the bioq */
2295 cm = mfi_bio_command(sc);
2297 /* No work available, so exit */
2301 /* Send the command to the controller */
2302 if (mfi_mapcmd(sc, cm) != 0) {
2303 device_printf(sc->mfi_dev, "Failed to startio\n");
2304 mfi_requeue_ready(cm);
2311 mfi_mapcmd(struct mfi_softc *sc, struct mfi_command *cm)
2315 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2317 if ((cm->cm_data != NULL) && (cm->cm_frame->header.cmd != MFI_CMD_STP )) {
2318 polled = (cm->cm_flags & MFI_CMD_POLLED) ? BUS_DMA_NOWAIT : 0;
2319 if (cm->cm_flags & MFI_CMD_CCB)
2320 error = bus_dmamap_load_ccb(sc->mfi_buffer_dmat,
2321 cm->cm_dmamap, cm->cm_data, mfi_data_cb, cm,
2323 else if (cm->cm_flags & MFI_CMD_BIO)
2324 error = bus_dmamap_load_bio(sc->mfi_buffer_dmat,
2325 cm->cm_dmamap, cm->cm_private, mfi_data_cb, cm,
2328 error = bus_dmamap_load(sc->mfi_buffer_dmat,
2329 cm->cm_dmamap, cm->cm_data, cm->cm_len,
2330 mfi_data_cb, cm, polled);
2331 if (error == EINPROGRESS) {
2332 sc->mfi_flags |= MFI_FLAGS_QFRZN;
2336 error = mfi_send_frame(sc, cm);
2343 mfi_data_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
2345 struct mfi_frame_header *hdr;
2346 struct mfi_command *cm;
2348 struct mfi_softc *sc;
2349 int i, j, first, dir;
2350 int sge_size, locked;
2352 cm = (struct mfi_command *)arg;
2354 hdr = &cm->cm_frame->header;
2358 * We need to check if we have the lock as this is async
2359 * callback so even though our caller mfi_mapcmd asserts
2360 * it has the lock, there is no garantee that hasn't been
2361 * dropped if bus_dmamap_load returned prior to our
2364 if ((locked = mtx_owned(&sc->mfi_io_lock)) == 0)
2365 mtx_lock(&sc->mfi_io_lock);
2368 printf("error %d in callback\n", error);
2369 cm->cm_error = error;
2370 mfi_complete(sc, cm);
2373 /* Use IEEE sgl only for IO's on a SKINNY controller
2374 * For other commands on a SKINNY controller use either
2375 * sg32 or sg64 based on the sizeof(bus_addr_t).
2376 * Also calculate the total frame size based on the type
2379 if (((cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) ||
2380 (cm->cm_frame->header.cmd == MFI_CMD_LD_READ) ||
2381 (cm->cm_frame->header.cmd == MFI_CMD_LD_WRITE)) &&
2382 (sc->mfi_flags & MFI_FLAGS_SKINNY)) {
2383 for (i = 0; i < nsegs; i++) {
2384 sgl->sg_skinny[i].addr = segs[i].ds_addr;
2385 sgl->sg_skinny[i].len = segs[i].ds_len;
2386 sgl->sg_skinny[i].flag = 0;
2388 hdr->flags |= MFI_FRAME_IEEE_SGL | MFI_FRAME_SGL64;
2389 sge_size = sizeof(struct mfi_sg_skinny);
2390 hdr->sg_count = nsegs;
2393 if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
2394 first = cm->cm_stp_len;
2395 if ((sc->mfi_flags & MFI_FLAGS_SG64) == 0) {
2396 sgl->sg32[j].addr = segs[0].ds_addr;
2397 sgl->sg32[j++].len = first;
2399 sgl->sg64[j].addr = segs[0].ds_addr;
2400 sgl->sg64[j++].len = first;
2404 if ((sc->mfi_flags & MFI_FLAGS_SG64) == 0) {
2405 for (i = 0; i < nsegs; i++) {
2406 sgl->sg32[j].addr = segs[i].ds_addr + first;
2407 sgl->sg32[j++].len = segs[i].ds_len - first;
2411 for (i = 0; i < nsegs; i++) {
2412 sgl->sg64[j].addr = segs[i].ds_addr + first;
2413 sgl->sg64[j++].len = segs[i].ds_len - first;
2416 hdr->flags |= MFI_FRAME_SGL64;
2419 sge_size = sc->mfi_sge_size;
2423 if (cm->cm_flags & MFI_CMD_DATAIN) {
2424 dir |= BUS_DMASYNC_PREREAD;
2425 hdr->flags |= MFI_FRAME_DIR_READ;
2427 if (cm->cm_flags & MFI_CMD_DATAOUT) {
2428 dir |= BUS_DMASYNC_PREWRITE;
2429 hdr->flags |= MFI_FRAME_DIR_WRITE;
2431 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir);
2432 cm->cm_flags |= MFI_CMD_MAPPED;
2435 * Instead of calculating the total number of frames in the
2436 * compound frame, it's already assumed that there will be at
2437 * least 1 frame, so don't compensate for the modulo of the
2438 * following division.
2440 cm->cm_total_frame_size += (sc->mfi_sge_size * nsegs);
2441 cm->cm_extra_frames = (cm->cm_total_frame_size - 1) / MFI_FRAME_SIZE;
2443 if ((error = mfi_send_frame(sc, cm)) != 0) {
2444 printf("error %d in callback from mfi_send_frame\n", error);
2445 cm->cm_error = error;
2446 mfi_complete(sc, cm);
2451 /* leave the lock in the state we found it */
2453 mtx_unlock(&sc->mfi_io_lock);
2459 mfi_send_frame(struct mfi_softc *sc, struct mfi_command *cm)
2463 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2465 if (sc->MFA_enabled)
2466 error = mfi_tbolt_send_frame(sc, cm);
2468 error = mfi_std_send_frame(sc, cm);
2470 if (error != 0 && (cm->cm_flags & MFI_ON_MFIQ_BUSY) != 0)
2471 mfi_remove_busy(cm);
2477 mfi_std_send_frame(struct mfi_softc *sc, struct mfi_command *cm)
2479 struct mfi_frame_header *hdr;
2480 int tm = mfi_polled_cmd_timeout * 1000;
2482 hdr = &cm->cm_frame->header;
2484 if ((cm->cm_flags & MFI_CMD_POLLED) == 0) {
2485 cm->cm_timestamp = time_uptime;
2486 mfi_enqueue_busy(cm);
2488 hdr->cmd_status = MFI_STAT_INVALID_STATUS;
2489 hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
2493 * The bus address of the command is aligned on a 64 byte boundary,
2494 * leaving the least 6 bits as zero. For whatever reason, the
2495 * hardware wants the address shifted right by three, leaving just
2496 * 3 zero bits. These three bits are then used as a prefetching
2497 * hint for the hardware to predict how many frames need to be
2498 * fetched across the bus. If a command has more than 8 frames
2499 * then the 3 bits are set to 0x7 and the firmware uses other
2500 * information in the command to determine the total amount to fetch.
2501 * However, FreeBSD doesn't support I/O larger than 128K, so 8 frames
2502 * is enough for both 32bit and 64bit systems.
2504 if (cm->cm_extra_frames > 7)
2505 cm->cm_extra_frames = 7;
2507 sc->mfi_issue_cmd(sc, cm->cm_frame_busaddr, cm->cm_extra_frames);
2509 if ((cm->cm_flags & MFI_CMD_POLLED) == 0)
2512 /* This is a polled command, so busy-wait for it to complete. */
2513 while (hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
2520 if (hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
2521 device_printf(sc->mfi_dev, "Frame %p timed out "
2522 "command 0x%X\n", hdr, cm->cm_frame->dcmd.opcode);
2531 mfi_complete(struct mfi_softc *sc, struct mfi_command *cm)
2534 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2536 if ((cm->cm_flags & MFI_CMD_MAPPED) != 0) {
2538 if ((cm->cm_flags & MFI_CMD_DATAIN) ||
2539 (cm->cm_frame->header.cmd == MFI_CMD_STP))
2540 dir |= BUS_DMASYNC_POSTREAD;
2541 if (cm->cm_flags & MFI_CMD_DATAOUT)
2542 dir |= BUS_DMASYNC_POSTWRITE;
2544 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir);
2545 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2546 cm->cm_flags &= ~MFI_CMD_MAPPED;
2549 cm->cm_flags |= MFI_CMD_COMPLETED;
2551 if (cm->cm_complete != NULL)
2552 cm->cm_complete(cm);
2558 mfi_abort(struct mfi_softc *sc, struct mfi_command **cm_abort)
2560 struct mfi_command *cm;
2561 struct mfi_abort_frame *abort;
2563 uint32_t context = 0;
2565 mtx_lock(&sc->mfi_io_lock);
2566 if ((cm = mfi_dequeue_free(sc)) == NULL) {
2567 mtx_unlock(&sc->mfi_io_lock);
2571 /* Zero out the MFI frame */
2572 context = cm->cm_frame->header.context;
2573 bzero(cm->cm_frame, sizeof(union mfi_frame));
2574 cm->cm_frame->header.context = context;
2576 abort = &cm->cm_frame->abort;
2577 abort->header.cmd = MFI_CMD_ABORT;
2578 abort->header.flags = 0;
2579 abort->header.scsi_status = 0;
2580 abort->abort_context = (*cm_abort)->cm_frame->header.context;
2581 abort->abort_mfi_addr_lo = (uint32_t)(*cm_abort)->cm_frame_busaddr;
2582 abort->abort_mfi_addr_hi =
2583 (uint32_t)((uint64_t)(*cm_abort)->cm_frame_busaddr >> 32);
2585 cm->cm_flags = MFI_CMD_POLLED;
2587 if ((error = mfi_mapcmd(sc, cm)) != 0)
2588 device_printf(sc->mfi_dev, "failed to abort command\n");
2589 mfi_release_command(cm);
2591 mtx_unlock(&sc->mfi_io_lock);
2592 while (i < 5 && *cm_abort != NULL) {
2593 tsleep(cm_abort, 0, "mfiabort",
2597 if (*cm_abort != NULL) {
2598 /* Force a complete if command didn't abort */
2599 mtx_lock(&sc->mfi_io_lock);
2600 (*cm_abort)->cm_complete(*cm_abort);
2601 mtx_unlock(&sc->mfi_io_lock);
2608 mfi_dump_blocks(struct mfi_softc *sc, int id, uint64_t lba, void *virt,
2611 struct mfi_command *cm;
2612 struct mfi_io_frame *io;
2614 uint32_t context = 0;
2616 if ((cm = mfi_dequeue_free(sc)) == NULL)
2619 /* Zero out the MFI frame */
2620 context = cm->cm_frame->header.context;
2621 bzero(cm->cm_frame, sizeof(union mfi_frame));
2622 cm->cm_frame->header.context = context;
2624 io = &cm->cm_frame->io;
2625 io->header.cmd = MFI_CMD_LD_WRITE;
2626 io->header.target_id = id;
2627 io->header.timeout = 0;
2628 io->header.flags = 0;
2629 io->header.scsi_status = 0;
2630 io->header.sense_len = MFI_SENSE_LEN;
2631 io->header.data_len = (len + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
2632 io->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2633 io->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2634 io->lba_hi = (lba & 0xffffffff00000000) >> 32;
2635 io->lba_lo = lba & 0xffffffff;
2638 cm->cm_sg = &io->sgl;
2639 cm->cm_total_frame_size = MFI_IO_FRAME_SIZE;
2640 cm->cm_flags = MFI_CMD_POLLED | MFI_CMD_DATAOUT;
2642 if ((error = mfi_mapcmd(sc, cm)) != 0)
2643 device_printf(sc->mfi_dev, "failed dump blocks\n");
2644 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
2645 BUS_DMASYNC_POSTWRITE);
2646 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2647 mfi_release_command(cm);
2653 mfi_dump_syspd_blocks(struct mfi_softc *sc, int id, uint64_t lba, void *virt,
2656 struct mfi_command *cm;
2657 struct mfi_pass_frame *pass;
2658 int error, readop, cdb_len;
2661 if ((cm = mfi_dequeue_free(sc)) == NULL)
2664 pass = &cm->cm_frame->pass;
2665 bzero(pass->cdb, 16);
2666 pass->header.cmd = MFI_CMD_PD_SCSI_IO;
2669 blkcount = (len + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
2670 cdb_len = mfi_build_cdb(readop, 0, lba, blkcount, pass->cdb);
2671 pass->header.target_id = id;
2672 pass->header.timeout = 0;
2673 pass->header.flags = 0;
2674 pass->header.scsi_status = 0;
2675 pass->header.sense_len = MFI_SENSE_LEN;
2676 pass->header.data_len = len;
2677 pass->header.cdb_len = cdb_len;
2678 pass->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2679 pass->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2682 cm->cm_sg = &pass->sgl;
2683 cm->cm_total_frame_size = MFI_PASS_FRAME_SIZE;
2684 cm->cm_flags = MFI_CMD_POLLED | MFI_CMD_DATAOUT | MFI_CMD_SCSI;
2686 if ((error = mfi_mapcmd(sc, cm)) != 0)
2687 device_printf(sc->mfi_dev, "failed dump blocks\n");
2688 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
2689 BUS_DMASYNC_POSTWRITE);
2690 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2691 mfi_release_command(cm);
2697 mfi_open(struct cdev *dev, int flags, int fmt, struct thread *td)
2699 struct mfi_softc *sc;
2704 mtx_lock(&sc->mfi_io_lock);
2705 if (sc->mfi_detaching)
2708 sc->mfi_flags |= MFI_FLAGS_OPEN;
2711 mtx_unlock(&sc->mfi_io_lock);
2717 mfi_close(struct cdev *dev, int flags, int fmt, struct thread *td)
2719 struct mfi_softc *sc;
2720 struct mfi_aen *mfi_aen_entry, *tmp;
2724 mtx_lock(&sc->mfi_io_lock);
2725 sc->mfi_flags &= ~MFI_FLAGS_OPEN;
2727 TAILQ_FOREACH_SAFE(mfi_aen_entry, &sc->mfi_aen_pids, aen_link, tmp) {
2728 if (mfi_aen_entry->p == curproc) {
2729 TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
2731 free(mfi_aen_entry, M_MFIBUF);
2734 mtx_unlock(&sc->mfi_io_lock);
2739 mfi_config_lock(struct mfi_softc *sc, uint32_t opcode)
2743 case MFI_DCMD_LD_DELETE:
2744 case MFI_DCMD_CFG_ADD:
2745 case MFI_DCMD_CFG_CLEAR:
2746 case MFI_DCMD_CFG_FOREIGN_IMPORT:
2747 sx_xlock(&sc->mfi_config_lock);
2755 mfi_config_unlock(struct mfi_softc *sc, int locked)
2759 sx_xunlock(&sc->mfi_config_lock);
2763 * Perform pre-issue checks on commands from userland and possibly veto
2767 mfi_check_command_pre(struct mfi_softc *sc, struct mfi_command *cm)
2769 struct mfi_disk *ld, *ld2;
2771 struct mfi_system_pd *syspd = NULL;
2775 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2777 switch (cm->cm_frame->dcmd.opcode) {
2778 case MFI_DCMD_LD_DELETE:
2779 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2780 if (ld->ld_id == cm->cm_frame->dcmd.mbox[0])
2786 error = mfi_disk_disable(ld);
2788 case MFI_DCMD_CFG_CLEAR:
2789 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2790 error = mfi_disk_disable(ld);
2795 TAILQ_FOREACH(ld2, &sc->mfi_ld_tqh, ld_link) {
2798 mfi_disk_enable(ld2);
2802 case MFI_DCMD_PD_STATE_SET:
2803 mbox = (uint16_t *) cm->cm_frame->dcmd.mbox;
2805 if (mbox[2] == MFI_PD_STATE_UNCONFIGURED_GOOD) {
2806 TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh, pd_link) {
2807 if (syspd->pd_id == syspd_id)
2814 error = mfi_syspd_disable(syspd);
2822 /* Perform post-issue checks on commands from userland. */
2824 mfi_check_command_post(struct mfi_softc *sc, struct mfi_command *cm)
2826 struct mfi_disk *ld, *ldn;
2827 struct mfi_system_pd *syspd = NULL;
2831 switch (cm->cm_frame->dcmd.opcode) {
2832 case MFI_DCMD_LD_DELETE:
2833 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2834 if (ld->ld_id == cm->cm_frame->dcmd.mbox[0])
2837 KASSERT(ld != NULL, ("volume dissappeared"));
2838 if (cm->cm_frame->header.cmd_status == MFI_STAT_OK) {
2839 mtx_unlock(&sc->mfi_io_lock);
2841 device_delete_child(sc->mfi_dev, ld->ld_dev);
2843 mtx_lock(&sc->mfi_io_lock);
2845 mfi_disk_enable(ld);
2847 case MFI_DCMD_CFG_CLEAR:
2848 if (cm->cm_frame->header.cmd_status == MFI_STAT_OK) {
2849 mtx_unlock(&sc->mfi_io_lock);
2851 TAILQ_FOREACH_SAFE(ld, &sc->mfi_ld_tqh, ld_link, ldn) {
2852 device_delete_child(sc->mfi_dev, ld->ld_dev);
2855 mtx_lock(&sc->mfi_io_lock);
2857 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link)
2858 mfi_disk_enable(ld);
2861 case MFI_DCMD_CFG_ADD:
2864 case MFI_DCMD_CFG_FOREIGN_IMPORT:
2867 case MFI_DCMD_PD_STATE_SET:
2868 mbox = (uint16_t *) cm->cm_frame->dcmd.mbox;
2870 if (mbox[2] == MFI_PD_STATE_UNCONFIGURED_GOOD) {
2871 TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh,pd_link) {
2872 if (syspd->pd_id == syspd_id)
2878 /* If the transition fails then enable the syspd again */
2879 if (syspd && cm->cm_frame->header.cmd_status != MFI_STAT_OK)
2880 mfi_syspd_enable(syspd);
2886 mfi_check_for_sscd(struct mfi_softc *sc, struct mfi_command *cm)
2888 struct mfi_config_data *conf_data;
2889 struct mfi_command *ld_cm = NULL;
2890 struct mfi_ld_info *ld_info = NULL;
2891 struct mfi_ld_config *ld;
2895 conf_data = (struct mfi_config_data *)cm->cm_data;
2897 if (cm->cm_frame->dcmd.opcode == MFI_DCMD_CFG_ADD) {
2898 p = (char *)conf_data->array;
2899 p += conf_data->array_size * conf_data->array_count;
2900 ld = (struct mfi_ld_config *)p;
2901 if (ld->params.isSSCD == 1)
2903 } else if (cm->cm_frame->dcmd.opcode == MFI_DCMD_LD_DELETE) {
2904 error = mfi_dcmd_command (sc, &ld_cm, MFI_DCMD_LD_GET_INFO,
2905 (void **)&ld_info, sizeof(*ld_info));
2907 device_printf(sc->mfi_dev, "Failed to allocate"
2908 "MFI_DCMD_LD_GET_INFO %d", error);
2910 free(ld_info, M_MFIBUF);
2913 ld_cm->cm_flags = MFI_CMD_DATAIN;
2914 ld_cm->cm_frame->dcmd.mbox[0]= cm->cm_frame->dcmd.mbox[0];
2915 ld_cm->cm_frame->header.target_id = cm->cm_frame->dcmd.mbox[0];
2916 if (mfi_wait_command(sc, ld_cm) != 0) {
2917 device_printf(sc->mfi_dev, "failed to get log drv\n");
2918 mfi_release_command(ld_cm);
2919 free(ld_info, M_MFIBUF);
2923 if (ld_cm->cm_frame->header.cmd_status != MFI_STAT_OK) {
2924 free(ld_info, M_MFIBUF);
2925 mfi_release_command(ld_cm);
2929 ld_info = (struct mfi_ld_info *)ld_cm->cm_private;
2931 if (ld_info->ld_config.params.isSSCD == 1)
2934 mfi_release_command(ld_cm);
2935 free(ld_info, M_MFIBUF);
2942 mfi_stp_cmd(struct mfi_softc *sc, struct mfi_command *cm,caddr_t arg)
2945 struct mfi_ioc_packet *ioc;
2946 ioc = (struct mfi_ioc_packet *)arg;
2947 int sge_size, error;
2948 struct megasas_sge *kern_sge;
2950 memset(sc->kbuff_arr, 0, sizeof(sc->kbuff_arr));
2951 kern_sge =(struct megasas_sge *) ((uintptr_t)cm->cm_frame + ioc->mfi_sgl_off);
2952 cm->cm_frame->header.sg_count = ioc->mfi_sge_count;
2954 if (sizeof(bus_addr_t) == 8) {
2955 cm->cm_frame->header.flags |= MFI_FRAME_SGL64;
2956 cm->cm_extra_frames = 2;
2957 sge_size = sizeof(struct mfi_sg64);
2959 cm->cm_extra_frames = (cm->cm_total_frame_size - 1) / MFI_FRAME_SIZE;
2960 sge_size = sizeof(struct mfi_sg32);
2963 cm->cm_total_frame_size += (sge_size * ioc->mfi_sge_count);
2964 for (i = 0; i < ioc->mfi_sge_count; i++) {
2965 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
2966 1, 0, /* algnmnt, boundary */
2967 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
2968 BUS_SPACE_MAXADDR, /* highaddr */
2969 NULL, NULL, /* filter, filterarg */
2970 ioc->mfi_sgl[i].iov_len,/* maxsize */
2972 ioc->mfi_sgl[i].iov_len,/* maxsegsize */
2973 BUS_DMA_ALLOCNOW, /* flags */
2974 NULL, NULL, /* lockfunc, lockarg */
2975 &sc->mfi_kbuff_arr_dmat[i])) {
2976 device_printf(sc->mfi_dev,
2977 "Cannot allocate mfi_kbuff_arr_dmat tag\n");
2981 if (bus_dmamem_alloc(sc->mfi_kbuff_arr_dmat[i],
2982 (void **)&sc->kbuff_arr[i], BUS_DMA_NOWAIT,
2983 &sc->mfi_kbuff_arr_dmamap[i])) {
2984 device_printf(sc->mfi_dev,
2985 "Cannot allocate mfi_kbuff_arr_dmamap memory\n");
2989 bus_dmamap_load(sc->mfi_kbuff_arr_dmat[i],
2990 sc->mfi_kbuff_arr_dmamap[i], sc->kbuff_arr[i],
2991 ioc->mfi_sgl[i].iov_len, mfi_addr_cb,
2992 &sc->mfi_kbuff_arr_busaddr[i], 0);
2994 if (!sc->kbuff_arr[i]) {
2995 device_printf(sc->mfi_dev,
2996 "Could not allocate memory for kbuff_arr info\n");
2999 kern_sge[i].phys_addr = sc->mfi_kbuff_arr_busaddr[i];
3000 kern_sge[i].length = ioc->mfi_sgl[i].iov_len;
3002 if (sizeof(bus_addr_t) == 8) {
3003 cm->cm_frame->stp.sgl.sg64[i].addr =
3004 kern_sge[i].phys_addr;
3005 cm->cm_frame->stp.sgl.sg64[i].len =
3006 ioc->mfi_sgl[i].iov_len;
3008 cm->cm_frame->stp.sgl.sg32[i].addr =
3009 kern_sge[i].phys_addr;
3010 cm->cm_frame->stp.sgl.sg32[i].len =
3011 ioc->mfi_sgl[i].iov_len;
3014 error = copyin(ioc->mfi_sgl[i].iov_base,
3016 ioc->mfi_sgl[i].iov_len);
3018 device_printf(sc->mfi_dev, "Copy in failed\n");
3023 cm->cm_flags |=MFI_CMD_MAPPED;
3028 mfi_user_command(struct mfi_softc *sc, struct mfi_ioc_passthru *ioc)
3030 struct mfi_command *cm;
3031 struct mfi_dcmd_frame *dcmd;
3032 void *ioc_buf = NULL;
3034 int error = 0, locked;
3037 if (ioc->buf_size > 0) {
3038 if (ioc->buf_size > 1024 * 1024)
3040 ioc_buf = malloc(ioc->buf_size, M_MFIBUF, M_WAITOK);
3041 error = copyin(ioc->buf, ioc_buf, ioc->buf_size);
3043 device_printf(sc->mfi_dev, "failed to copyin\n");
3044 free(ioc_buf, M_MFIBUF);
3049 locked = mfi_config_lock(sc, ioc->ioc_frame.opcode);
3051 mtx_lock(&sc->mfi_io_lock);
3052 while ((cm = mfi_dequeue_free(sc)) == NULL)
3053 msleep(mfi_user_command, &sc->mfi_io_lock, 0, "mfiioc", hz);
3055 /* Save context for later */
3056 context = cm->cm_frame->header.context;
3058 dcmd = &cm->cm_frame->dcmd;
3059 bcopy(&ioc->ioc_frame, dcmd, sizeof(struct mfi_dcmd_frame));
3061 cm->cm_sg = &dcmd->sgl;
3062 cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
3063 cm->cm_data = ioc_buf;
3064 cm->cm_len = ioc->buf_size;
3066 /* restore context */
3067 cm->cm_frame->header.context = context;
3069 /* Cheat since we don't know if we're writing or reading */
3070 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_DATAOUT;
3072 error = mfi_check_command_pre(sc, cm);
3076 error = mfi_wait_command(sc, cm);
3078 device_printf(sc->mfi_dev, "ioctl failed %d\n", error);
3081 bcopy(dcmd, &ioc->ioc_frame, sizeof(struct mfi_dcmd_frame));
3082 mfi_check_command_post(sc, cm);
3084 mfi_release_command(cm);
3085 mtx_unlock(&sc->mfi_io_lock);
3086 mfi_config_unlock(sc, locked);
3087 if (ioc->buf_size > 0)
3088 error = copyout(ioc_buf, ioc->buf, ioc->buf_size);
3090 free(ioc_buf, M_MFIBUF);
3094 #define PTRIN(p) ((void *)(uintptr_t)(p))
3097 mfi_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td)
3099 struct mfi_softc *sc;
3100 union mfi_statrequest *ms;
3101 struct mfi_ioc_packet *ioc;
3102 #ifdef COMPAT_FREEBSD32
3103 struct mfi_ioc_packet32 *ioc32;
3105 struct mfi_ioc_aen *aen;
3106 struct mfi_command *cm = NULL;
3107 uint32_t context = 0;
3108 union mfi_sense_ptr sense_ptr;
3109 uint8_t *data = NULL, *temp, *addr, skip_pre_post = 0;
3112 struct mfi_ioc_passthru *iop = (struct mfi_ioc_passthru *)arg;
3113 #ifdef COMPAT_FREEBSD32
3114 struct mfi_ioc_passthru32 *iop32 = (struct mfi_ioc_passthru32 *)arg;
3115 struct mfi_ioc_passthru iop_swab;
3125 if (sc->hw_crit_error)
3128 if (sc->issuepend_done == 0)
3133 ms = (union mfi_statrequest *)arg;
3134 switch (ms->ms_item) {
3139 bcopy(&sc->mfi_qstat[ms->ms_item], &ms->ms_qstat,
3140 sizeof(struct mfi_qstat));
3147 case MFIIO_QUERY_DISK:
3149 struct mfi_query_disk *qd;
3150 struct mfi_disk *ld;
3152 qd = (struct mfi_query_disk *)arg;
3153 mtx_lock(&sc->mfi_io_lock);
3154 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
3155 if (ld->ld_id == qd->array_id)
3160 mtx_unlock(&sc->mfi_io_lock);
3164 if (ld->ld_flags & MFI_DISK_FLAGS_OPEN)
3166 bzero(qd->devname, SPECNAMELEN + 1);
3167 snprintf(qd->devname, SPECNAMELEN, "mfid%d", ld->ld_unit);
3168 mtx_unlock(&sc->mfi_io_lock);
3172 #ifdef COMPAT_FREEBSD32
3176 devclass_t devclass;
3177 ioc = (struct mfi_ioc_packet *)arg;
3180 adapter = ioc->mfi_adapter_no;
3181 if (device_get_unit(sc->mfi_dev) == 0 && adapter != 0) {
3182 devclass = devclass_find("mfi");
3183 sc = devclass_get_softc(devclass, adapter);
3185 mtx_lock(&sc->mfi_io_lock);
3186 if ((cm = mfi_dequeue_free(sc)) == NULL) {
3187 mtx_unlock(&sc->mfi_io_lock);
3190 mtx_unlock(&sc->mfi_io_lock);
3194 * save off original context since copying from user
3195 * will clobber some data
3197 context = cm->cm_frame->header.context;
3198 cm->cm_frame->header.context = cm->cm_index;
3200 bcopy(ioc->mfi_frame.raw, cm->cm_frame,
3201 2 * MEGAMFI_FRAME_SIZE);
3202 cm->cm_total_frame_size = (sizeof(union mfi_sgl)
3203 * ioc->mfi_sge_count) + ioc->mfi_sgl_off;
3204 cm->cm_frame->header.scsi_status = 0;
3205 cm->cm_frame->header.pad0 = 0;
3206 if (ioc->mfi_sge_count) {
3208 (union mfi_sgl *)&cm->cm_frame->bytes[ioc->mfi_sgl_off];
3212 if (cm->cm_frame->header.flags & MFI_FRAME_DATAIN)
3213 cm->cm_flags |= MFI_CMD_DATAIN;
3214 if (cm->cm_frame->header.flags & MFI_FRAME_DATAOUT)
3215 cm->cm_flags |= MFI_CMD_DATAOUT;
3216 /* Legacy app shim */
3217 if (cm->cm_flags == 0)
3218 cm->cm_flags |= MFI_CMD_DATAIN | MFI_CMD_DATAOUT;
3219 cm->cm_len = cm->cm_frame->header.data_len;
3220 if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
3221 #ifdef COMPAT_FREEBSD32
3222 if (cmd == MFI_CMD) {
3225 cm->cm_stp_len = ioc->mfi_sgl[0].iov_len;
3226 #ifdef COMPAT_FREEBSD32
3228 /* 32bit on 64bit */
3229 ioc32 = (struct mfi_ioc_packet32 *)ioc;
3230 cm->cm_stp_len = ioc32->mfi_sgl[0].iov_len;
3233 cm->cm_len += cm->cm_stp_len;
3236 (cm->cm_flags & (MFI_CMD_DATAIN | MFI_CMD_DATAOUT))) {
3237 cm->cm_data = data = malloc(cm->cm_len, M_MFIBUF,
3239 if (cm->cm_data == NULL) {
3240 device_printf(sc->mfi_dev, "Malloc failed\n");
3247 /* restore header context */
3248 cm->cm_frame->header.context = context;
3250 if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
3251 res = mfi_stp_cmd(sc, cm, arg);
3256 if ((cm->cm_flags & MFI_CMD_DATAOUT) ||
3257 (cm->cm_frame->header.cmd == MFI_CMD_STP)) {
3258 for (i = 0; i < ioc->mfi_sge_count; i++) {
3259 #ifdef COMPAT_FREEBSD32
3260 if (cmd == MFI_CMD) {
3263 addr = ioc->mfi_sgl[i].iov_base;
3264 len = ioc->mfi_sgl[i].iov_len;
3265 #ifdef COMPAT_FREEBSD32
3267 /* 32bit on 64bit */
3268 ioc32 = (struct mfi_ioc_packet32 *)ioc;
3269 addr = PTRIN(ioc32->mfi_sgl[i].iov_base);
3270 len = ioc32->mfi_sgl[i].iov_len;
3273 error = copyin(addr, temp, len);
3275 device_printf(sc->mfi_dev,
3276 "Copy in failed\n");
3284 if (cm->cm_frame->header.cmd == MFI_CMD_DCMD)
3285 locked = mfi_config_lock(sc,
3286 cm->cm_frame->dcmd.opcode);
3288 if (cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) {
3289 cm->cm_frame->pass.sense_addr_lo =
3290 (uint32_t)cm->cm_sense_busaddr;
3291 cm->cm_frame->pass.sense_addr_hi =
3292 (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
3294 mtx_lock(&sc->mfi_io_lock);
3295 skip_pre_post = mfi_check_for_sscd (sc, cm);
3296 if (!skip_pre_post) {
3297 error = mfi_check_command_pre(sc, cm);
3299 mtx_unlock(&sc->mfi_io_lock);
3303 if ((error = mfi_wait_command(sc, cm)) != 0) {
3304 device_printf(sc->mfi_dev,
3305 "Controller polled failed\n");
3306 mtx_unlock(&sc->mfi_io_lock);
3309 if (!skip_pre_post) {
3310 mfi_check_command_post(sc, cm);
3312 mtx_unlock(&sc->mfi_io_lock);
3314 if (cm->cm_frame->header.cmd != MFI_CMD_STP) {
3316 if ((cm->cm_flags & MFI_CMD_DATAIN) ||
3317 (cm->cm_frame->header.cmd == MFI_CMD_STP)) {
3318 for (i = 0; i < ioc->mfi_sge_count; i++) {
3319 #ifdef COMPAT_FREEBSD32
3320 if (cmd == MFI_CMD) {
3323 addr = ioc->mfi_sgl[i].iov_base;
3324 len = ioc->mfi_sgl[i].iov_len;
3325 #ifdef COMPAT_FREEBSD32
3327 /* 32bit on 64bit */
3328 ioc32 = (struct mfi_ioc_packet32 *)ioc;
3329 addr = PTRIN(ioc32->mfi_sgl[i].iov_base);
3330 len = ioc32->mfi_sgl[i].iov_len;
3333 error = copyout(temp, addr, len);
3335 device_printf(sc->mfi_dev,
3336 "Copy out failed\n");
3344 if (ioc->mfi_sense_len) {
3345 /* get user-space sense ptr then copy out sense */
3346 bcopy(&ioc->mfi_frame.raw[ioc->mfi_sense_off],
3347 &sense_ptr.sense_ptr_data[0],
3348 sizeof(sense_ptr.sense_ptr_data));
3349 #ifdef COMPAT_FREEBSD32
3350 if (cmd != MFI_CMD) {
3352 * not 64bit native so zero out any address
3354 sense_ptr.addr.high = 0;
3357 error = copyout(cm->cm_sense, sense_ptr.user_space,
3358 ioc->mfi_sense_len);
3360 device_printf(sc->mfi_dev,
3361 "Copy out failed\n");
3366 ioc->mfi_frame.hdr.cmd_status = cm->cm_frame->header.cmd_status;
3368 mfi_config_unlock(sc, locked);
3370 free(data, M_MFIBUF);
3371 if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
3372 for (i = 0; i < 2; i++) {
3373 if (sc->kbuff_arr[i]) {
3374 if (sc->mfi_kbuff_arr_busaddr != 0)
3376 sc->mfi_kbuff_arr_dmat[i],
3377 sc->mfi_kbuff_arr_dmamap[i]
3379 if (sc->kbuff_arr[i] != NULL)
3381 sc->mfi_kbuff_arr_dmat[i],
3383 sc->mfi_kbuff_arr_dmamap[i]
3385 if (sc->mfi_kbuff_arr_dmat[i] != NULL)
3386 bus_dma_tag_destroy(
3387 sc->mfi_kbuff_arr_dmat[i]);
3392 mtx_lock(&sc->mfi_io_lock);
3393 mfi_release_command(cm);
3394 mtx_unlock(&sc->mfi_io_lock);
3400 aen = (struct mfi_ioc_aen *)arg;
3401 mtx_lock(&sc->mfi_io_lock);
3402 error = mfi_aen_register(sc, aen->aen_seq_num,
3403 aen->aen_class_locale);
3404 mtx_unlock(&sc->mfi_io_lock);
3407 case MFI_LINUX_CMD_2: /* Firmware Linux ioctl shim */
3409 devclass_t devclass;
3410 struct mfi_linux_ioc_packet l_ioc;
3413 devclass = devclass_find("mfi");
3414 if (devclass == NULL)
3417 error = copyin(arg, &l_ioc, sizeof(l_ioc));
3420 adapter = l_ioc.lioc_adapter_no;
3421 sc = devclass_get_softc(devclass, adapter);
3424 return (mfi_linux_ioctl_int(sc->mfi_cdev,
3425 cmd, arg, flag, td));
3428 case MFI_LINUX_SET_AEN_2: /* AEN Linux ioctl shim */
3430 devclass_t devclass;
3431 struct mfi_linux_ioc_aen l_aen;
3434 devclass = devclass_find("mfi");
3435 if (devclass == NULL)
3438 error = copyin(arg, &l_aen, sizeof(l_aen));
3441 adapter = l_aen.laen_adapter_no;
3442 sc = devclass_get_softc(devclass, adapter);
3445 return (mfi_linux_ioctl_int(sc->mfi_cdev,
3446 cmd, arg, flag, td));
3449 #ifdef COMPAT_FREEBSD32
3450 case MFIIO_PASSTHRU32:
3451 if (!SV_CURPROC_FLAG(SV_ILP32)) {
3455 iop_swab.ioc_frame = iop32->ioc_frame;
3456 iop_swab.buf_size = iop32->buf_size;
3457 iop_swab.buf = PTRIN(iop32->buf);
3461 case MFIIO_PASSTHRU:
3462 error = mfi_user_command(sc, iop);
3463 #ifdef COMPAT_FREEBSD32
3464 if (cmd == MFIIO_PASSTHRU32)
3465 iop32->ioc_frame = iop_swab.ioc_frame;
3469 device_printf(sc->mfi_dev, "IOCTL 0x%lx not handled\n", cmd);
3478 mfi_linux_ioctl_int(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td)
3480 struct mfi_softc *sc;
3481 struct mfi_linux_ioc_packet l_ioc;
3482 struct mfi_linux_ioc_aen l_aen;
3483 struct mfi_command *cm = NULL;
3484 struct mfi_aen *mfi_aen_entry;
3485 union mfi_sense_ptr sense_ptr;
3486 uint32_t context = 0;
3487 uint8_t *data = NULL, *temp;
3494 case MFI_LINUX_CMD_2: /* Firmware Linux ioctl shim */
3495 error = copyin(arg, &l_ioc, sizeof(l_ioc));
3499 if (l_ioc.lioc_sge_count > MAX_LINUX_IOCTL_SGE) {
3503 mtx_lock(&sc->mfi_io_lock);
3504 if ((cm = mfi_dequeue_free(sc)) == NULL) {
3505 mtx_unlock(&sc->mfi_io_lock);
3508 mtx_unlock(&sc->mfi_io_lock);
3512 * save off original context since copying from user
3513 * will clobber some data
3515 context = cm->cm_frame->header.context;
3517 bcopy(l_ioc.lioc_frame.raw, cm->cm_frame,
3518 2 * MFI_DCMD_FRAME_SIZE); /* this isn't quite right */
3519 cm->cm_total_frame_size = (sizeof(union mfi_sgl)
3520 * l_ioc.lioc_sge_count) + l_ioc.lioc_sgl_off;
3521 cm->cm_frame->header.scsi_status = 0;
3522 cm->cm_frame->header.pad0 = 0;
3523 if (l_ioc.lioc_sge_count)
3525 (union mfi_sgl *)&cm->cm_frame->bytes[l_ioc.lioc_sgl_off];
3527 if (cm->cm_frame->header.flags & MFI_FRAME_DATAIN)
3528 cm->cm_flags |= MFI_CMD_DATAIN;
3529 if (cm->cm_frame->header.flags & MFI_FRAME_DATAOUT)
3530 cm->cm_flags |= MFI_CMD_DATAOUT;
3531 cm->cm_len = cm->cm_frame->header.data_len;
3533 (cm->cm_flags & (MFI_CMD_DATAIN | MFI_CMD_DATAOUT))) {
3534 cm->cm_data = data = malloc(cm->cm_len, M_MFIBUF,
3536 if (cm->cm_data == NULL) {
3537 device_printf(sc->mfi_dev, "Malloc failed\n");
3544 /* restore header context */
3545 cm->cm_frame->header.context = context;
3548 if (cm->cm_flags & MFI_CMD_DATAOUT) {
3549 for (i = 0; i < l_ioc.lioc_sge_count; i++) {
3550 error = copyin(PTRIN(l_ioc.lioc_sgl[i].iov_base),
3552 l_ioc.lioc_sgl[i].iov_len);
3554 device_printf(sc->mfi_dev,
3555 "Copy in failed\n");
3558 temp = &temp[l_ioc.lioc_sgl[i].iov_len];
3562 if (cm->cm_frame->header.cmd == MFI_CMD_DCMD)
3563 locked = mfi_config_lock(sc, cm->cm_frame->dcmd.opcode);
3565 if (cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) {
3566 cm->cm_frame->pass.sense_addr_lo =
3567 (uint32_t)cm->cm_sense_busaddr;
3568 cm->cm_frame->pass.sense_addr_hi =
3569 (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
3572 mtx_lock(&sc->mfi_io_lock);
3573 error = mfi_check_command_pre(sc, cm);
3575 mtx_unlock(&sc->mfi_io_lock);
3579 if ((error = mfi_wait_command(sc, cm)) != 0) {
3580 device_printf(sc->mfi_dev,
3581 "Controller polled failed\n");
3582 mtx_unlock(&sc->mfi_io_lock);
3586 mfi_check_command_post(sc, cm);
3587 mtx_unlock(&sc->mfi_io_lock);
3590 if (cm->cm_flags & MFI_CMD_DATAIN) {
3591 for (i = 0; i < l_ioc.lioc_sge_count; i++) {
3592 error = copyout(temp,
3593 PTRIN(l_ioc.lioc_sgl[i].iov_base),
3594 l_ioc.lioc_sgl[i].iov_len);
3596 device_printf(sc->mfi_dev,
3597 "Copy out failed\n");
3600 temp = &temp[l_ioc.lioc_sgl[i].iov_len];
3604 if (l_ioc.lioc_sense_len) {
3605 /* get user-space sense ptr then copy out sense */
3606 bcopy(&((struct mfi_linux_ioc_packet*)arg)
3607 ->lioc_frame.raw[l_ioc.lioc_sense_off],
3608 &sense_ptr.sense_ptr_data[0],
3609 sizeof(sense_ptr.sense_ptr_data));
3612 * only 32bit Linux support so zero out any
3613 * address over 32bit
3615 sense_ptr.addr.high = 0;
3617 error = copyout(cm->cm_sense, sense_ptr.user_space,
3618 l_ioc.lioc_sense_len);
3620 device_printf(sc->mfi_dev,
3621 "Copy out failed\n");
3626 error = copyout(&cm->cm_frame->header.cmd_status,
3627 &((struct mfi_linux_ioc_packet*)arg)
3628 ->lioc_frame.hdr.cmd_status,
3631 device_printf(sc->mfi_dev,
3632 "Copy out failed\n");
3637 mfi_config_unlock(sc, locked);
3639 free(data, M_MFIBUF);
3641 mtx_lock(&sc->mfi_io_lock);
3642 mfi_release_command(cm);
3643 mtx_unlock(&sc->mfi_io_lock);
3647 case MFI_LINUX_SET_AEN_2: /* AEN Linux ioctl shim */
3648 error = copyin(arg, &l_aen, sizeof(l_aen));
3651 printf("AEN IMPLEMENTED for pid %d\n", curproc->p_pid);
3652 mfi_aen_entry = malloc(sizeof(struct mfi_aen), M_MFIBUF,
3654 mtx_lock(&sc->mfi_io_lock);
3655 if (mfi_aen_entry != NULL) {
3656 mfi_aen_entry->p = curproc;
3657 TAILQ_INSERT_TAIL(&sc->mfi_aen_pids, mfi_aen_entry,
3660 error = mfi_aen_register(sc, l_aen.laen_seq_num,
3661 l_aen.laen_class_locale);
3664 TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
3666 free(mfi_aen_entry, M_MFIBUF);
3668 mtx_unlock(&sc->mfi_io_lock);
3672 device_printf(sc->mfi_dev, "IOCTL 0x%lx not handled\n", cmd);
3681 mfi_poll(struct cdev *dev, int poll_events, struct thread *td)
3683 struct mfi_softc *sc;
3688 if (poll_events & (POLLIN | POLLRDNORM)) {
3689 if (sc->mfi_aen_triggered != 0) {
3690 revents |= poll_events & (POLLIN | POLLRDNORM);
3691 sc->mfi_aen_triggered = 0;
3693 if (sc->mfi_aen_triggered == 0 && sc->mfi_aen_cm == NULL) {
3699 if (poll_events & (POLLIN | POLLRDNORM)) {
3700 sc->mfi_poll_waiting = 1;
3701 selrecord(td, &sc->mfi_select);
3711 struct mfi_softc *sc;
3712 struct mfi_command *cm;
3718 dc = devclass_find("mfi");
3720 printf("No mfi dev class\n");
3724 for (i = 0; ; i++) {
3725 sc = devclass_get_softc(dc, i);
3728 device_printf(sc->mfi_dev, "Dumping\n\n");
3730 deadline = time_uptime - mfi_cmd_timeout;
3731 mtx_lock(&sc->mfi_io_lock);
3732 TAILQ_FOREACH(cm, &sc->mfi_busy, cm_link) {
3733 if (cm->cm_timestamp <= deadline) {
3734 device_printf(sc->mfi_dev,
3735 "COMMAND %p TIMEOUT AFTER %d SECONDS\n",
3736 cm, (int)(time_uptime - cm->cm_timestamp));
3747 mtx_unlock(&sc->mfi_io_lock);
3754 mfi_timeout(void *data)
3756 struct mfi_softc *sc = (struct mfi_softc *)data;
3757 struct mfi_command *cm, *tmp;
3761 deadline = time_uptime - mfi_cmd_timeout;
3762 if (sc->adpreset == 0) {
3763 if (!mfi_tbolt_reset(sc)) {
3764 callout_reset(&sc->mfi_watchdog_callout,
3765 mfi_cmd_timeout * hz, mfi_timeout, sc);
3769 mtx_lock(&sc->mfi_io_lock);
3770 TAILQ_FOREACH_SAFE(cm, &sc->mfi_busy, cm_link, tmp) {
3771 if (sc->mfi_aen_cm == cm || sc->mfi_map_sync_cm == cm)
3773 if (cm->cm_timestamp <= deadline) {
3774 if (sc->adpreset != 0 && sc->issuepend_done == 0) {
3775 cm->cm_timestamp = time_uptime;
3777 device_printf(sc->mfi_dev,
3778 "COMMAND %p TIMEOUT AFTER %d SECONDS\n",
3779 cm, (int)(time_uptime - cm->cm_timestamp)
3782 MFI_VALIDATE_CMD(sc, cm);
3784 * While commands can get stuck forever we do
3785 * not fail them as there is no way to tell if
3786 * the controller has actually processed them
3789 * In addition its very likely that force
3790 * failing a command here would cause a panic
3803 mtx_unlock(&sc->mfi_io_lock);
3805 callout_reset(&sc->mfi_watchdog_callout, mfi_cmd_timeout * hz,