2 * Copyright (c) 2006 IronPort Systems
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * Copyright (c) 2007 LSI Corp.
28 * Copyright (c) 2007 Rajesh Prabhakaran.
29 * All rights reserved.
31 * Redistribution and use in source and binary forms, with or without
32 * modification, are permitted provided that the following conditions
34 * 1. Redistributions of source code must retain the above copyright
35 * notice, this list of conditions and the following disclaimer.
36 * 2. Redistributions in binary form must reproduce the above copyright
37 * notice, this list of conditions and the following disclaimer in the
38 * documentation and/or other materials provided with the distribution.
40 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
41 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
42 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
43 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
44 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
45 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
46 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
47 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
48 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
49 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
53 #include <sys/cdefs.h>
54 __FBSDID("$FreeBSD$");
56 #include "opt_compat.h"
59 #include <sys/param.h>
60 #include <sys/systm.h>
61 #include <sys/sysctl.h>
62 #include <sys/malloc.h>
63 #include <sys/kernel.h>
65 #include <sys/selinfo.h>
68 #include <sys/eventhandler.h>
70 #include <sys/bus_dma.h>
72 #include <sys/ioccom.h>
75 #include <sys/signalvar.h>
76 #include <sys/sysent.h>
77 #include <sys/taskqueue.h>
79 #include <machine/bus.h>
80 #include <machine/resource.h>
82 #include <dev/mfi/mfireg.h>
83 #include <dev/mfi/mfi_ioctl.h>
84 #include <dev/mfi/mfivar.h>
85 #include <sys/interrupt.h>
86 #include <sys/priority.h>
88 static int mfi_alloc_commands(struct mfi_softc *);
89 static int mfi_comms_init(struct mfi_softc *);
90 static int mfi_get_controller_info(struct mfi_softc *);
91 static int mfi_get_log_state(struct mfi_softc *,
92 struct mfi_evt_log_state **);
93 static int mfi_parse_entries(struct mfi_softc *, int, int);
94 static void mfi_data_cb(void *, bus_dma_segment_t *, int, int);
95 static void mfi_startup(void *arg);
96 static void mfi_intr(void *arg);
97 static void mfi_ldprobe(struct mfi_softc *sc);
98 static void mfi_syspdprobe(struct mfi_softc *sc);
99 static void mfi_handle_evt(void *context, int pending);
100 static int mfi_aen_register(struct mfi_softc *sc, int seq, int locale);
101 static void mfi_aen_complete(struct mfi_command *);
102 static int mfi_add_ld(struct mfi_softc *sc, int);
103 static void mfi_add_ld_complete(struct mfi_command *);
104 static int mfi_add_sys_pd(struct mfi_softc *sc, int);
105 static void mfi_add_sys_pd_complete(struct mfi_command *);
106 static struct mfi_command * mfi_bio_command(struct mfi_softc *);
107 static void mfi_bio_complete(struct mfi_command *);
108 static struct mfi_command *mfi_build_ldio(struct mfi_softc *,struct bio*);
109 static struct mfi_command *mfi_build_syspdio(struct mfi_softc *,struct bio*);
110 static int mfi_send_frame(struct mfi_softc *, struct mfi_command *);
111 static int mfi_std_send_frame(struct mfi_softc *, struct mfi_command *);
112 static int mfi_abort(struct mfi_softc *, struct mfi_command **);
113 static int mfi_linux_ioctl_int(struct cdev *, u_long, caddr_t, int, struct thread *);
114 static void mfi_timeout(void *);
115 static int mfi_user_command(struct mfi_softc *,
116 struct mfi_ioc_passthru *);
117 static void mfi_enable_intr_xscale(struct mfi_softc *sc);
118 static void mfi_enable_intr_ppc(struct mfi_softc *sc);
119 static int32_t mfi_read_fw_status_xscale(struct mfi_softc *sc);
120 static int32_t mfi_read_fw_status_ppc(struct mfi_softc *sc);
121 static int mfi_check_clear_intr_xscale(struct mfi_softc *sc);
122 static int mfi_check_clear_intr_ppc(struct mfi_softc *sc);
123 static void mfi_issue_cmd_xscale(struct mfi_softc *sc, bus_addr_t bus_add,
125 static void mfi_issue_cmd_ppc(struct mfi_softc *sc, bus_addr_t bus_add,
127 static int mfi_config_lock(struct mfi_softc *sc, uint32_t opcode);
128 static void mfi_config_unlock(struct mfi_softc *sc, int locked);
129 static int mfi_check_command_pre(struct mfi_softc *sc, struct mfi_command *cm);
130 static void mfi_check_command_post(struct mfi_softc *sc, struct mfi_command *cm);
131 static int mfi_check_for_sscd(struct mfi_softc *sc, struct mfi_command *cm);
133 SYSCTL_NODE(_hw, OID_AUTO, mfi, CTLFLAG_RD, 0, "MFI driver parameters");
134 static int mfi_event_locale = MFI_EVT_LOCALE_ALL;
135 TUNABLE_INT("hw.mfi.event_locale", &mfi_event_locale);
136 SYSCTL_INT(_hw_mfi, OID_AUTO, event_locale, CTLFLAG_RWTUN, &mfi_event_locale,
137 0, "event message locale");
139 static int mfi_event_class = MFI_EVT_CLASS_INFO;
140 TUNABLE_INT("hw.mfi.event_class", &mfi_event_class);
141 SYSCTL_INT(_hw_mfi, OID_AUTO, event_class, CTLFLAG_RWTUN, &mfi_event_class,
142 0, "event message class");
144 static int mfi_max_cmds = 128;
145 TUNABLE_INT("hw.mfi.max_cmds", &mfi_max_cmds);
146 SYSCTL_INT(_hw_mfi, OID_AUTO, max_cmds, CTLFLAG_RDTUN, &mfi_max_cmds,
147 0, "Max commands limit (-1 = controller limit)");
149 static int mfi_detect_jbod_change = 1;
150 TUNABLE_INT("hw.mfi.detect_jbod_change", &mfi_detect_jbod_change);
151 SYSCTL_INT(_hw_mfi, OID_AUTO, detect_jbod_change, CTLFLAG_RWTUN,
152 &mfi_detect_jbod_change, 0, "Detect a change to a JBOD");
154 int mfi_polled_cmd_timeout = MFI_POLL_TIMEOUT_SECS;
155 TUNABLE_INT("hw.mfi.polled_cmd_timeout", &mfi_polled_cmd_timeout);
156 SYSCTL_INT(_hw_mfi, OID_AUTO, polled_cmd_timeout, CTLFLAG_RWTUN,
157 &mfi_polled_cmd_timeout, 0,
158 "Polled command timeout - used for firmware flash etc (in seconds)");
160 /* Management interface */
161 static d_open_t mfi_open;
162 static d_close_t mfi_close;
163 static d_ioctl_t mfi_ioctl;
164 static d_poll_t mfi_poll;
166 static struct cdevsw mfi_cdevsw = {
167 .d_version = D_VERSION,
170 .d_close = mfi_close,
171 .d_ioctl = mfi_ioctl,
176 MALLOC_DEFINE(M_MFIBUF, "mfibuf", "Buffers for the MFI driver");
178 #define MFI_INQ_LENGTH SHORT_INQUIRY_LENGTH
179 struct mfi_skinny_dma_info mfi_skinny;
182 mfi_enable_intr_xscale(struct mfi_softc *sc)
184 MFI_WRITE4(sc, MFI_OMSK, 0x01);
188 mfi_enable_intr_ppc(struct mfi_softc *sc)
190 if (sc->mfi_flags & MFI_FLAGS_1078) {
191 MFI_WRITE4(sc, MFI_ODCR0, 0xFFFFFFFF);
192 MFI_WRITE4(sc, MFI_OMSK, ~MFI_1078_EIM);
194 else if (sc->mfi_flags & MFI_FLAGS_GEN2) {
195 MFI_WRITE4(sc, MFI_ODCR0, 0xFFFFFFFF);
196 MFI_WRITE4(sc, MFI_OMSK, ~MFI_GEN2_EIM);
198 else if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
199 MFI_WRITE4(sc, MFI_OMSK, ~0x00000001);
204 mfi_read_fw_status_xscale(struct mfi_softc *sc)
206 return MFI_READ4(sc, MFI_OMSG0);
210 mfi_read_fw_status_ppc(struct mfi_softc *sc)
212 return MFI_READ4(sc, MFI_OSP0);
216 mfi_check_clear_intr_xscale(struct mfi_softc *sc)
220 status = MFI_READ4(sc, MFI_OSTS);
221 if ((status & MFI_OSTS_INTR_VALID) == 0)
224 MFI_WRITE4(sc, MFI_OSTS, status);
229 mfi_check_clear_intr_ppc(struct mfi_softc *sc)
233 status = MFI_READ4(sc, MFI_OSTS);
234 if (sc->mfi_flags & MFI_FLAGS_1078) {
235 if (!(status & MFI_1078_RM)) {
239 else if (sc->mfi_flags & MFI_FLAGS_GEN2) {
240 if (!(status & MFI_GEN2_RM)) {
244 else if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
245 if (!(status & MFI_SKINNY_RM)) {
249 if (sc->mfi_flags & MFI_FLAGS_SKINNY)
250 MFI_WRITE4(sc, MFI_OSTS, status);
252 MFI_WRITE4(sc, MFI_ODCR0, status);
257 mfi_issue_cmd_xscale(struct mfi_softc *sc, bus_addr_t bus_add, uint32_t frame_cnt)
259 MFI_WRITE4(sc, MFI_IQP,(bus_add >>3)|frame_cnt);
263 mfi_issue_cmd_ppc(struct mfi_softc *sc, bus_addr_t bus_add, uint32_t frame_cnt)
265 if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
266 MFI_WRITE4(sc, MFI_IQPL, (bus_add | frame_cnt <<1)|1 );
267 MFI_WRITE4(sc, MFI_IQPH, 0x00000000);
269 MFI_WRITE4(sc, MFI_IQP, (bus_add | frame_cnt <<1)|1 );
274 mfi_transition_firmware(struct mfi_softc *sc)
276 uint32_t fw_state, cur_state;
278 uint32_t cur_abs_reg_val = 0;
279 uint32_t prev_abs_reg_val = 0;
281 cur_abs_reg_val = sc->mfi_read_fw_status(sc);
282 fw_state = cur_abs_reg_val & MFI_FWSTATE_MASK;
283 while (fw_state != MFI_FWSTATE_READY) {
285 device_printf(sc->mfi_dev, "Waiting for firmware to "
287 cur_state = fw_state;
289 case MFI_FWSTATE_FAULT:
290 device_printf(sc->mfi_dev, "Firmware fault\n");
292 case MFI_FWSTATE_WAIT_HANDSHAKE:
293 if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT)
294 MFI_WRITE4(sc, MFI_SKINNY_IDB, MFI_FWINIT_CLEAR_HANDSHAKE);
296 MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_CLEAR_HANDSHAKE);
297 max_wait = MFI_RESET_WAIT_TIME;
299 case MFI_FWSTATE_OPERATIONAL:
300 if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT)
301 MFI_WRITE4(sc, MFI_SKINNY_IDB, 7);
303 MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_READY);
304 max_wait = MFI_RESET_WAIT_TIME;
306 case MFI_FWSTATE_UNDEFINED:
307 case MFI_FWSTATE_BB_INIT:
308 max_wait = MFI_RESET_WAIT_TIME;
310 case MFI_FWSTATE_FW_INIT_2:
311 max_wait = MFI_RESET_WAIT_TIME;
313 case MFI_FWSTATE_FW_INIT:
314 case MFI_FWSTATE_FLUSH_CACHE:
315 max_wait = MFI_RESET_WAIT_TIME;
317 case MFI_FWSTATE_DEVICE_SCAN:
318 max_wait = MFI_RESET_WAIT_TIME; /* wait for 180 seconds */
319 prev_abs_reg_val = cur_abs_reg_val;
321 case MFI_FWSTATE_BOOT_MESSAGE_PENDING:
322 if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT)
323 MFI_WRITE4(sc, MFI_SKINNY_IDB, MFI_FWINIT_HOTPLUG);
325 MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_HOTPLUG);
326 max_wait = MFI_RESET_WAIT_TIME;
329 device_printf(sc->mfi_dev, "Unknown firmware state %#x\n",
333 for (i = 0; i < (max_wait * 10); i++) {
334 cur_abs_reg_val = sc->mfi_read_fw_status(sc);
335 fw_state = cur_abs_reg_val & MFI_FWSTATE_MASK;
336 if (fw_state == cur_state)
341 if (fw_state == MFI_FWSTATE_DEVICE_SCAN) {
342 /* Check the device scanning progress */
343 if (prev_abs_reg_val != cur_abs_reg_val) {
347 if (fw_state == cur_state) {
348 device_printf(sc->mfi_dev, "Firmware stuck in state "
357 mfi_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
362 *addr = segs[0].ds_addr;
367 mfi_attach(struct mfi_softc *sc)
370 int error, commsz, framessz, sensesz;
371 int frames, unit, max_fw_sge, max_fw_cmds;
372 uint32_t tb_mem_size = 0;
377 device_printf(sc->mfi_dev, "Megaraid SAS driver Ver %s \n",
380 mtx_init(&sc->mfi_io_lock, "MFI I/O lock", NULL, MTX_DEF);
381 sx_init(&sc->mfi_config_lock, "MFI config");
382 TAILQ_INIT(&sc->mfi_ld_tqh);
383 TAILQ_INIT(&sc->mfi_syspd_tqh);
384 TAILQ_INIT(&sc->mfi_ld_pend_tqh);
385 TAILQ_INIT(&sc->mfi_syspd_pend_tqh);
386 TAILQ_INIT(&sc->mfi_evt_queue);
387 TASK_INIT(&sc->mfi_evt_task, 0, mfi_handle_evt, sc);
388 TASK_INIT(&sc->mfi_map_sync_task, 0, mfi_handle_map_sync, sc);
389 TAILQ_INIT(&sc->mfi_aen_pids);
390 TAILQ_INIT(&sc->mfi_cam_ccbq);
398 sc->last_seq_num = 0;
399 sc->disableOnlineCtrlReset = 1;
400 sc->issuepend_done = 1;
401 sc->hw_crit_error = 0;
403 if (sc->mfi_flags & MFI_FLAGS_1064R) {
404 sc->mfi_enable_intr = mfi_enable_intr_xscale;
405 sc->mfi_read_fw_status = mfi_read_fw_status_xscale;
406 sc->mfi_check_clear_intr = mfi_check_clear_intr_xscale;
407 sc->mfi_issue_cmd = mfi_issue_cmd_xscale;
408 } else if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
409 sc->mfi_enable_intr = mfi_tbolt_enable_intr_ppc;
410 sc->mfi_disable_intr = mfi_tbolt_disable_intr_ppc;
411 sc->mfi_read_fw_status = mfi_tbolt_read_fw_status_ppc;
412 sc->mfi_check_clear_intr = mfi_tbolt_check_clear_intr_ppc;
413 sc->mfi_issue_cmd = mfi_tbolt_issue_cmd_ppc;
414 sc->mfi_adp_reset = mfi_tbolt_adp_reset;
416 TAILQ_INIT(&sc->mfi_cmd_tbolt_tqh);
418 sc->mfi_enable_intr = mfi_enable_intr_ppc;
419 sc->mfi_read_fw_status = mfi_read_fw_status_ppc;
420 sc->mfi_check_clear_intr = mfi_check_clear_intr_ppc;
421 sc->mfi_issue_cmd = mfi_issue_cmd_ppc;
425 /* Before we get too far, see if the firmware is working */
426 if ((error = mfi_transition_firmware(sc)) != 0) {
427 device_printf(sc->mfi_dev, "Firmware not in READY state, "
428 "error %d\n", error);
432 /* Start: LSIP200113393 */
433 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
434 1, 0, /* algnmnt, boundary */
435 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
436 BUS_SPACE_MAXADDR, /* highaddr */
437 NULL, NULL, /* filter, filterarg */
438 MEGASAS_MAX_NAME*sizeof(bus_addr_t), /* maxsize */
440 MEGASAS_MAX_NAME*sizeof(bus_addr_t), /* maxsegsize */
442 NULL, NULL, /* lockfunc, lockarg */
443 &sc->verbuf_h_dmat)) {
444 device_printf(sc->mfi_dev, "Cannot allocate verbuf_h_dmat DMA tag\n");
447 if (bus_dmamem_alloc(sc->verbuf_h_dmat, (void **)&sc->verbuf,
448 BUS_DMA_NOWAIT, &sc->verbuf_h_dmamap)) {
449 device_printf(sc->mfi_dev, "Cannot allocate verbuf_h_dmamap memory\n");
452 bzero(sc->verbuf, MEGASAS_MAX_NAME*sizeof(bus_addr_t));
453 bus_dmamap_load(sc->verbuf_h_dmat, sc->verbuf_h_dmamap,
454 sc->verbuf, MEGASAS_MAX_NAME*sizeof(bus_addr_t),
455 mfi_addr_cb, &sc->verbuf_h_busaddr, 0);
456 /* End: LSIP200113393 */
459 * Get information needed for sizing the contiguous memory for the
460 * frame pool. Size down the sgl parameter since we know that
461 * we will never need more than what's required for MAXPHYS.
462 * It would be nice if these constants were available at runtime
463 * instead of compile time.
465 status = sc->mfi_read_fw_status(sc);
466 max_fw_cmds = status & MFI_FWSTATE_MAXCMD_MASK;
467 if (mfi_max_cmds > 0 && mfi_max_cmds < max_fw_cmds) {
468 device_printf(sc->mfi_dev, "FW MaxCmds = %d, limiting to %d\n",
469 max_fw_cmds, mfi_max_cmds);
470 sc->mfi_max_fw_cmds = mfi_max_cmds;
472 sc->mfi_max_fw_cmds = max_fw_cmds;
474 max_fw_sge = (status & MFI_FWSTATE_MAXSGL_MASK) >> 16;
475 sc->mfi_max_sge = min(max_fw_sge, ((MFI_MAXPHYS / PAGE_SIZE) + 1));
477 /* ThunderBolt Support get the contiguous memory */
479 if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
480 mfi_tbolt_init_globals(sc);
481 device_printf(sc->mfi_dev, "MaxCmd = %d, Drv MaxCmd = %d, "
482 "MaxSgl = %d, state = %#x\n", max_fw_cmds,
483 sc->mfi_max_fw_cmds, sc->mfi_max_sge, status);
484 tb_mem_size = mfi_tbolt_get_memory_requirement(sc);
486 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
487 1, 0, /* algnmnt, boundary */
488 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
489 BUS_SPACE_MAXADDR, /* highaddr */
490 NULL, NULL, /* filter, filterarg */
491 tb_mem_size, /* maxsize */
493 tb_mem_size, /* maxsegsize */
495 NULL, NULL, /* lockfunc, lockarg */
497 device_printf(sc->mfi_dev, "Cannot allocate comms DMA tag\n");
500 if (bus_dmamem_alloc(sc->mfi_tb_dmat, (void **)&sc->request_message_pool,
501 BUS_DMA_NOWAIT, &sc->mfi_tb_dmamap)) {
502 device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
505 bzero(sc->request_message_pool, tb_mem_size);
506 bus_dmamap_load(sc->mfi_tb_dmat, sc->mfi_tb_dmamap,
507 sc->request_message_pool, tb_mem_size, mfi_addr_cb, &sc->mfi_tb_busaddr, 0);
509 /* For ThunderBolt memory init */
510 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
511 0x100, 0, /* alignmnt, boundary */
512 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
513 BUS_SPACE_MAXADDR, /* highaddr */
514 NULL, NULL, /* filter, filterarg */
515 MFI_FRAME_SIZE, /* maxsize */
517 MFI_FRAME_SIZE, /* maxsegsize */
519 NULL, NULL, /* lockfunc, lockarg */
520 &sc->mfi_tb_init_dmat)) {
521 device_printf(sc->mfi_dev, "Cannot allocate init DMA tag\n");
524 if (bus_dmamem_alloc(sc->mfi_tb_init_dmat, (void **)&sc->mfi_tb_init,
525 BUS_DMA_NOWAIT, &sc->mfi_tb_init_dmamap)) {
526 device_printf(sc->mfi_dev, "Cannot allocate init memory\n");
529 bzero(sc->mfi_tb_init, MFI_FRAME_SIZE);
530 bus_dmamap_load(sc->mfi_tb_init_dmat, sc->mfi_tb_init_dmamap,
531 sc->mfi_tb_init, MFI_FRAME_SIZE, mfi_addr_cb,
532 &sc->mfi_tb_init_busaddr, 0);
533 if (mfi_tbolt_init_desc_pool(sc, sc->request_message_pool,
535 device_printf(sc->mfi_dev,
536 "Thunderbolt pool preparation error\n");
541 Allocate DMA memory mapping for MPI2 IOC Init descriptor,
542 we are taking it diffrent from what we have allocated for Request
543 and reply descriptors to avoid confusion later
545 tb_mem_size = sizeof(struct MPI2_IOC_INIT_REQUEST);
546 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
547 1, 0, /* algnmnt, boundary */
548 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
549 BUS_SPACE_MAXADDR, /* highaddr */
550 NULL, NULL, /* filter, filterarg */
551 tb_mem_size, /* maxsize */
553 tb_mem_size, /* maxsegsize */
555 NULL, NULL, /* lockfunc, lockarg */
556 &sc->mfi_tb_ioc_init_dmat)) {
557 device_printf(sc->mfi_dev,
558 "Cannot allocate comms DMA tag\n");
561 if (bus_dmamem_alloc(sc->mfi_tb_ioc_init_dmat,
562 (void **)&sc->mfi_tb_ioc_init_desc,
563 BUS_DMA_NOWAIT, &sc->mfi_tb_ioc_init_dmamap)) {
564 device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
567 bzero(sc->mfi_tb_ioc_init_desc, tb_mem_size);
568 bus_dmamap_load(sc->mfi_tb_ioc_init_dmat, sc->mfi_tb_ioc_init_dmamap,
569 sc->mfi_tb_ioc_init_desc, tb_mem_size, mfi_addr_cb,
570 &sc->mfi_tb_ioc_init_busaddr, 0);
573 * Create the dma tag for data buffers. Used both for block I/O
574 * and for various internal data queries.
576 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
577 1, 0, /* algnmnt, boundary */
578 BUS_SPACE_MAXADDR, /* lowaddr */
579 BUS_SPACE_MAXADDR, /* highaddr */
580 NULL, NULL, /* filter, filterarg */
581 BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
582 sc->mfi_max_sge, /* nsegments */
583 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
584 BUS_DMA_ALLOCNOW, /* flags */
585 busdma_lock_mutex, /* lockfunc */
586 &sc->mfi_io_lock, /* lockfuncarg */
587 &sc->mfi_buffer_dmat)) {
588 device_printf(sc->mfi_dev, "Cannot allocate buffer DMA tag\n");
593 * Allocate DMA memory for the comms queues. Keep it under 4GB for
594 * efficiency. The mfi_hwcomms struct includes space for 1 reply queue
595 * entry, so the calculated size here will be will be 1 more than
596 * mfi_max_fw_cmds. This is apparently a requirement of the hardware.
598 commsz = (sizeof(uint32_t) * sc->mfi_max_fw_cmds) +
599 sizeof(struct mfi_hwcomms);
600 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
601 1, 0, /* algnmnt, boundary */
602 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
603 BUS_SPACE_MAXADDR, /* highaddr */
604 NULL, NULL, /* filter, filterarg */
605 commsz, /* maxsize */
607 commsz, /* maxsegsize */
609 NULL, NULL, /* lockfunc, lockarg */
610 &sc->mfi_comms_dmat)) {
611 device_printf(sc->mfi_dev, "Cannot allocate comms DMA tag\n");
614 if (bus_dmamem_alloc(sc->mfi_comms_dmat, (void **)&sc->mfi_comms,
615 BUS_DMA_NOWAIT, &sc->mfi_comms_dmamap)) {
616 device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
619 bzero(sc->mfi_comms, commsz);
620 bus_dmamap_load(sc->mfi_comms_dmat, sc->mfi_comms_dmamap,
621 sc->mfi_comms, commsz, mfi_addr_cb, &sc->mfi_comms_busaddr, 0);
623 * Allocate DMA memory for the command frames. Keep them in the
624 * lower 4GB for efficiency. Calculate the size of the commands at
625 * the same time; each command is one 64 byte frame plus a set of
626 * additional frames for holding sg lists or other data.
627 * The assumption here is that the SG list will start at the second
628 * frame and not use the unused bytes in the first frame. While this
629 * isn't technically correct, it simplifies the calculation and allows
630 * for command frames that might be larger than an mfi_io_frame.
632 if (sizeof(bus_addr_t) == 8) {
633 sc->mfi_sge_size = sizeof(struct mfi_sg64);
634 sc->mfi_flags |= MFI_FLAGS_SG64;
636 sc->mfi_sge_size = sizeof(struct mfi_sg32);
638 if (sc->mfi_flags & MFI_FLAGS_SKINNY)
639 sc->mfi_sge_size = sizeof(struct mfi_sg_skinny);
640 frames = (sc->mfi_sge_size * sc->mfi_max_sge - 1) / MFI_FRAME_SIZE + 2;
641 sc->mfi_cmd_size = frames * MFI_FRAME_SIZE;
642 framessz = sc->mfi_cmd_size * sc->mfi_max_fw_cmds;
643 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
644 64, 0, /* algnmnt, boundary */
645 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
646 BUS_SPACE_MAXADDR, /* highaddr */
647 NULL, NULL, /* filter, filterarg */
648 framessz, /* maxsize */
650 framessz, /* maxsegsize */
652 NULL, NULL, /* lockfunc, lockarg */
653 &sc->mfi_frames_dmat)) {
654 device_printf(sc->mfi_dev, "Cannot allocate frame DMA tag\n");
657 if (bus_dmamem_alloc(sc->mfi_frames_dmat, (void **)&sc->mfi_frames,
658 BUS_DMA_NOWAIT, &sc->mfi_frames_dmamap)) {
659 device_printf(sc->mfi_dev, "Cannot allocate frames memory\n");
662 bzero(sc->mfi_frames, framessz);
663 bus_dmamap_load(sc->mfi_frames_dmat, sc->mfi_frames_dmamap,
664 sc->mfi_frames, framessz, mfi_addr_cb, &sc->mfi_frames_busaddr,0);
666 * Allocate DMA memory for the frame sense data. Keep them in the
667 * lower 4GB for efficiency
669 sensesz = sc->mfi_max_fw_cmds * MFI_SENSE_LEN;
670 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
671 4, 0, /* algnmnt, boundary */
672 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
673 BUS_SPACE_MAXADDR, /* highaddr */
674 NULL, NULL, /* filter, filterarg */
675 sensesz, /* maxsize */
677 sensesz, /* maxsegsize */
679 NULL, NULL, /* lockfunc, lockarg */
680 &sc->mfi_sense_dmat)) {
681 device_printf(sc->mfi_dev, "Cannot allocate sense DMA tag\n");
684 if (bus_dmamem_alloc(sc->mfi_sense_dmat, (void **)&sc->mfi_sense,
685 BUS_DMA_NOWAIT, &sc->mfi_sense_dmamap)) {
686 device_printf(sc->mfi_dev, "Cannot allocate sense memory\n");
689 bus_dmamap_load(sc->mfi_sense_dmat, sc->mfi_sense_dmamap,
690 sc->mfi_sense, sensesz, mfi_addr_cb, &sc->mfi_sense_busaddr, 0);
691 if ((error = mfi_alloc_commands(sc)) != 0)
694 /* Before moving the FW to operational state, check whether
695 * hostmemory is required by the FW or not
698 /* ThunderBolt MFI_IOC2 INIT */
699 if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
700 sc->mfi_disable_intr(sc);
701 mtx_lock(&sc->mfi_io_lock);
702 if ((error = mfi_tbolt_init_MFI_queue(sc)) != 0) {
703 device_printf(sc->mfi_dev,
704 "TB Init has failed with error %d\n",error);
705 mtx_unlock(&sc->mfi_io_lock);
708 mtx_unlock(&sc->mfi_io_lock);
710 if ((error = mfi_tbolt_alloc_cmd(sc)) != 0)
712 if (bus_setup_intr(sc->mfi_dev, sc->mfi_irq,
713 INTR_MPSAFE|INTR_TYPE_BIO, NULL, mfi_intr_tbolt, sc,
715 device_printf(sc->mfi_dev, "Cannot set up interrupt\n");
718 sc->mfi_intr_ptr = mfi_intr_tbolt;
719 sc->mfi_enable_intr(sc);
721 if ((error = mfi_comms_init(sc)) != 0)
724 if (bus_setup_intr(sc->mfi_dev, sc->mfi_irq,
725 INTR_MPSAFE|INTR_TYPE_BIO, NULL, mfi_intr, sc, &sc->mfi_intr)) {
726 device_printf(sc->mfi_dev, "Cannot set up interrupt\n");
729 sc->mfi_intr_ptr = mfi_intr;
730 sc->mfi_enable_intr(sc);
732 if ((error = mfi_get_controller_info(sc)) != 0)
734 sc->disableOnlineCtrlReset = 0;
736 /* Register a config hook to probe the bus for arrays */
737 sc->mfi_ich.ich_func = mfi_startup;
738 sc->mfi_ich.ich_arg = sc;
739 if (config_intrhook_establish(&sc->mfi_ich) != 0) {
740 device_printf(sc->mfi_dev, "Cannot establish configuration "
744 mtx_lock(&sc->mfi_io_lock);
745 if ((error = mfi_aen_setup(sc, 0), 0) != 0) {
746 mtx_unlock(&sc->mfi_io_lock);
749 mtx_unlock(&sc->mfi_io_lock);
752 * Register a shutdown handler.
754 if ((sc->mfi_eh = EVENTHANDLER_REGISTER(shutdown_final, mfi_shutdown,
755 sc, SHUTDOWN_PRI_DEFAULT)) == NULL) {
756 device_printf(sc->mfi_dev, "Warning: shutdown event "
757 "registration failed\n");
761 * Create the control device for doing management
763 unit = device_get_unit(sc->mfi_dev);
764 sc->mfi_cdev = make_dev(&mfi_cdevsw, unit, UID_ROOT, GID_OPERATOR,
765 0640, "mfi%d", unit);
767 make_dev_alias(sc->mfi_cdev, "megaraid_sas_ioctl_node");
768 if (sc->mfi_cdev != NULL)
769 sc->mfi_cdev->si_drv1 = sc;
770 SYSCTL_ADD_INT(device_get_sysctl_ctx(sc->mfi_dev),
771 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->mfi_dev)),
772 OID_AUTO, "delete_busy_volumes", CTLFLAG_RW,
773 &sc->mfi_delete_busy_volumes, 0, "Allow removal of busy volumes");
774 SYSCTL_ADD_INT(device_get_sysctl_ctx(sc->mfi_dev),
775 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->mfi_dev)),
776 OID_AUTO, "keep_deleted_volumes", CTLFLAG_RW,
777 &sc->mfi_keep_deleted_volumes, 0,
778 "Don't detach the mfid device for a busy volume that is deleted");
780 device_add_child(sc->mfi_dev, "mfip", -1);
781 bus_generic_attach(sc->mfi_dev);
783 /* Start the timeout watchdog */
784 callout_init(&sc->mfi_watchdog_callout, CALLOUT_MPSAFE);
785 callout_reset(&sc->mfi_watchdog_callout, MFI_CMD_TIMEOUT * hz,
788 if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
789 mtx_lock(&sc->mfi_io_lock);
790 mfi_tbolt_sync_map_info(sc);
791 mtx_unlock(&sc->mfi_io_lock);
798 mfi_alloc_commands(struct mfi_softc *sc)
800 struct mfi_command *cm;
804 * XXX Should we allocate all the commands up front, or allocate on
805 * demand later like 'aac' does?
807 sc->mfi_commands = malloc(sizeof(sc->mfi_commands[0]) *
808 sc->mfi_max_fw_cmds, M_MFIBUF, M_WAITOK | M_ZERO);
810 for (i = 0; i < sc->mfi_max_fw_cmds; i++) {
811 cm = &sc->mfi_commands[i];
812 cm->cm_frame = (union mfi_frame *)((uintptr_t)sc->mfi_frames +
813 sc->mfi_cmd_size * i);
814 cm->cm_frame_busaddr = sc->mfi_frames_busaddr +
815 sc->mfi_cmd_size * i;
816 cm->cm_frame->header.context = i;
817 cm->cm_sense = &sc->mfi_sense[i];
818 cm->cm_sense_busaddr= sc->mfi_sense_busaddr + MFI_SENSE_LEN * i;
821 if (bus_dmamap_create(sc->mfi_buffer_dmat, 0,
822 &cm->cm_dmamap) == 0) {
823 mtx_lock(&sc->mfi_io_lock);
824 mfi_release_command(cm);
825 mtx_unlock(&sc->mfi_io_lock);
827 device_printf(sc->mfi_dev, "Failed to allocate %d "
828 "command blocks, only allocated %d\n",
829 sc->mfi_max_fw_cmds, i - 1);
830 for (j = 0; j < i; j++) {
831 cm = &sc->mfi_commands[i];
832 bus_dmamap_destroy(sc->mfi_buffer_dmat,
835 free(sc->mfi_commands, M_MFIBUF);
836 sc->mfi_commands = NULL;
846 mfi_release_command(struct mfi_command *cm)
848 struct mfi_frame_header *hdr;
851 mtx_assert(&cm->cm_sc->mfi_io_lock, MA_OWNED);
854 * Zero out the important fields of the frame, but make sure the
855 * context field is preserved. For efficiency, handle the fields
856 * as 32 bit words. Clear out the first S/G entry too for safety.
858 hdr = &cm->cm_frame->header;
859 if (cm->cm_data != NULL && hdr->sg_count) {
860 cm->cm_sg->sg32[0].len = 0;
861 cm->cm_sg->sg32[0].addr = 0;
865 * Command may be on other queues e.g. busy queue depending on the
866 * flow of a previous call to mfi_mapcmd, so ensure its dequeued
869 if ((cm->cm_flags & MFI_ON_MFIQ_BUSY) != 0)
871 if ((cm->cm_flags & MFI_ON_MFIQ_READY) != 0)
872 mfi_remove_ready(cm);
874 /* We're not expecting it to be on any other queue but check */
875 if ((cm->cm_flags & MFI_ON_MFIQ_MASK) != 0) {
876 panic("Command %p is still on another queue, flags = %#x",
881 if ((cm->cm_flags & MFI_CMD_TBOLT) != 0) {
882 mfi_tbolt_return_cmd(cm->cm_sc,
883 cm->cm_sc->mfi_cmd_pool_tbolt[cm->cm_extra_frames - 1],
887 hdr_data = (uint32_t *)cm->cm_frame;
888 hdr_data[0] = 0; /* cmd, sense_len, cmd_status, scsi_status */
889 hdr_data[1] = 0; /* target_id, lun_id, cdb_len, sg_count */
890 hdr_data[4] = 0; /* flags, timeout */
891 hdr_data[5] = 0; /* data_len */
893 cm->cm_extra_frames = 0;
895 cm->cm_complete = NULL;
896 cm->cm_private = NULL;
899 cm->cm_total_frame_size = 0;
900 cm->retry_for_fw_reset = 0;
902 mfi_enqueue_free(cm);
906 mfi_dcmd_command(struct mfi_softc *sc, struct mfi_command **cmp,
907 uint32_t opcode, void **bufp, size_t bufsize)
909 struct mfi_command *cm;
910 struct mfi_dcmd_frame *dcmd;
912 uint32_t context = 0;
914 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
916 cm = mfi_dequeue_free(sc);
920 /* Zero out the MFI frame */
921 context = cm->cm_frame->header.context;
922 bzero(cm->cm_frame, sizeof(union mfi_frame));
923 cm->cm_frame->header.context = context;
925 if ((bufsize > 0) && (bufp != NULL)) {
927 buf = malloc(bufsize, M_MFIBUF, M_NOWAIT|M_ZERO);
929 mfi_release_command(cm);
938 dcmd = &cm->cm_frame->dcmd;
939 bzero(dcmd->mbox, MFI_MBOX_SIZE);
940 dcmd->header.cmd = MFI_CMD_DCMD;
941 dcmd->header.timeout = 0;
942 dcmd->header.flags = 0;
943 dcmd->header.data_len = bufsize;
944 dcmd->header.scsi_status = 0;
945 dcmd->opcode = opcode;
946 cm->cm_sg = &dcmd->sgl;
947 cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
950 cm->cm_private = buf;
951 cm->cm_len = bufsize;
954 if ((bufp != NULL) && (*bufp == NULL) && (buf != NULL))
960 mfi_comms_init(struct mfi_softc *sc)
962 struct mfi_command *cm;
963 struct mfi_init_frame *init;
964 struct mfi_init_qinfo *qinfo;
966 uint32_t context = 0;
968 mtx_lock(&sc->mfi_io_lock);
969 if ((cm = mfi_dequeue_free(sc)) == NULL) {
970 mtx_unlock(&sc->mfi_io_lock);
974 /* Zero out the MFI frame */
975 context = cm->cm_frame->header.context;
976 bzero(cm->cm_frame, sizeof(union mfi_frame));
977 cm->cm_frame->header.context = context;
980 * Abuse the SG list area of the frame to hold the init_qinfo
983 init = &cm->cm_frame->init;
984 qinfo = (struct mfi_init_qinfo *)((uintptr_t)init + MFI_FRAME_SIZE);
986 bzero(qinfo, sizeof(struct mfi_init_qinfo));
987 qinfo->rq_entries = sc->mfi_max_fw_cmds + 1;
988 qinfo->rq_addr_lo = sc->mfi_comms_busaddr +
989 offsetof(struct mfi_hwcomms, hw_reply_q);
990 qinfo->pi_addr_lo = sc->mfi_comms_busaddr +
991 offsetof(struct mfi_hwcomms, hw_pi);
992 qinfo->ci_addr_lo = sc->mfi_comms_busaddr +
993 offsetof(struct mfi_hwcomms, hw_ci);
995 init->header.cmd = MFI_CMD_INIT;
996 init->header.data_len = sizeof(struct mfi_init_qinfo);
997 init->qinfo_new_addr_lo = cm->cm_frame_busaddr + MFI_FRAME_SIZE;
999 cm->cm_flags = MFI_CMD_POLLED;
1001 if ((error = mfi_mapcmd(sc, cm)) != 0)
1002 device_printf(sc->mfi_dev, "failed to send init command\n");
1003 mfi_release_command(cm);
1004 mtx_unlock(&sc->mfi_io_lock);
1010 mfi_get_controller_info(struct mfi_softc *sc)
1012 struct mfi_command *cm = NULL;
1013 struct mfi_ctrl_info *ci = NULL;
1014 uint32_t max_sectors_1, max_sectors_2;
1017 mtx_lock(&sc->mfi_io_lock);
1018 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_GETINFO,
1019 (void **)&ci, sizeof(*ci));
1022 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1024 if ((error = mfi_mapcmd(sc, cm)) != 0) {
1025 device_printf(sc->mfi_dev, "Failed to get controller info\n");
1026 sc->mfi_max_io = (sc->mfi_max_sge - 1) * PAGE_SIZE /
1032 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1033 BUS_DMASYNC_POSTREAD);
1034 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1036 max_sectors_1 = (1 << ci->stripe_sz_ops.max) * ci->max_strips_per_io;
1037 max_sectors_2 = ci->max_request_size;
1038 sc->mfi_max_io = min(max_sectors_1, max_sectors_2);
1039 sc->disableOnlineCtrlReset =
1040 ci->properties.OnOffProperties.disableOnlineCtrlReset;
1046 mfi_release_command(cm);
1047 mtx_unlock(&sc->mfi_io_lock);
1052 mfi_get_log_state(struct mfi_softc *sc, struct mfi_evt_log_state **log_state)
1054 struct mfi_command *cm = NULL;
1057 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1058 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_EVENT_GETINFO,
1059 (void **)log_state, sizeof(**log_state));
1062 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1064 if ((error = mfi_mapcmd(sc, cm)) != 0) {
1065 device_printf(sc->mfi_dev, "Failed to get log state\n");
1069 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1070 BUS_DMASYNC_POSTREAD);
1071 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1075 mfi_release_command(cm);
1081 mfi_aen_setup(struct mfi_softc *sc, uint32_t seq_start)
1083 struct mfi_evt_log_state *log_state = NULL;
1084 union mfi_evt class_locale;
1088 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1090 class_locale.members.reserved = 0;
1091 class_locale.members.locale = mfi_event_locale;
1092 class_locale.members.evt_class = mfi_event_class;
1094 if (seq_start == 0) {
1095 if ((error = mfi_get_log_state(sc, &log_state)) != 0)
1097 sc->mfi_boot_seq_num = log_state->boot_seq_num;
1100 * Walk through any events that fired since the last
1103 if ((error = mfi_parse_entries(sc, log_state->shutdown_seq_num,
1104 log_state->newest_seq_num)) != 0)
1106 seq = log_state->newest_seq_num;
1109 error = mfi_aen_register(sc, seq, class_locale.word);
1111 free(log_state, M_MFIBUF);
1117 mfi_wait_command(struct mfi_softc *sc, struct mfi_command *cm)
1120 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1121 cm->cm_complete = NULL;
1124 * MegaCli can issue a DCMD of 0. In this case do nothing
1125 * and return 0 to it as status
1127 if (cm->cm_frame->dcmd.opcode == 0) {
1128 cm->cm_frame->header.cmd_status = MFI_STAT_OK;
1130 return (cm->cm_error);
1132 mfi_enqueue_ready(cm);
1134 if ((cm->cm_flags & MFI_CMD_COMPLETED) == 0)
1135 msleep(cm, &sc->mfi_io_lock, PRIBIO, "mfiwait", 0);
1136 return (cm->cm_error);
1140 mfi_free(struct mfi_softc *sc)
1142 struct mfi_command *cm;
1145 callout_drain(&sc->mfi_watchdog_callout);
1147 if (sc->mfi_cdev != NULL)
1148 destroy_dev(sc->mfi_cdev);
1150 if (sc->mfi_commands != NULL) {
1151 for (i = 0; i < sc->mfi_max_fw_cmds; i++) {
1152 cm = &sc->mfi_commands[i];
1153 bus_dmamap_destroy(sc->mfi_buffer_dmat, cm->cm_dmamap);
1155 free(sc->mfi_commands, M_MFIBUF);
1156 sc->mfi_commands = NULL;
1160 bus_teardown_intr(sc->mfi_dev, sc->mfi_irq, sc->mfi_intr);
1161 if (sc->mfi_irq != NULL)
1162 bus_release_resource(sc->mfi_dev, SYS_RES_IRQ, sc->mfi_irq_rid,
1165 if (sc->mfi_sense_busaddr != 0)
1166 bus_dmamap_unload(sc->mfi_sense_dmat, sc->mfi_sense_dmamap);
1167 if (sc->mfi_sense != NULL)
1168 bus_dmamem_free(sc->mfi_sense_dmat, sc->mfi_sense,
1169 sc->mfi_sense_dmamap);
1170 if (sc->mfi_sense_dmat != NULL)
1171 bus_dma_tag_destroy(sc->mfi_sense_dmat);
1173 if (sc->mfi_frames_busaddr != 0)
1174 bus_dmamap_unload(sc->mfi_frames_dmat, sc->mfi_frames_dmamap);
1175 if (sc->mfi_frames != NULL)
1176 bus_dmamem_free(sc->mfi_frames_dmat, sc->mfi_frames,
1177 sc->mfi_frames_dmamap);
1178 if (sc->mfi_frames_dmat != NULL)
1179 bus_dma_tag_destroy(sc->mfi_frames_dmat);
1181 if (sc->mfi_comms_busaddr != 0)
1182 bus_dmamap_unload(sc->mfi_comms_dmat, sc->mfi_comms_dmamap);
1183 if (sc->mfi_comms != NULL)
1184 bus_dmamem_free(sc->mfi_comms_dmat, sc->mfi_comms,
1185 sc->mfi_comms_dmamap);
1186 if (sc->mfi_comms_dmat != NULL)
1187 bus_dma_tag_destroy(sc->mfi_comms_dmat);
1189 /* ThunderBolt contiguous memory free here */
1190 if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
1191 if (sc->mfi_tb_busaddr != 0)
1192 bus_dmamap_unload(sc->mfi_tb_dmat, sc->mfi_tb_dmamap);
1193 if (sc->request_message_pool != NULL)
1194 bus_dmamem_free(sc->mfi_tb_dmat, sc->request_message_pool,
1196 if (sc->mfi_tb_dmat != NULL)
1197 bus_dma_tag_destroy(sc->mfi_tb_dmat);
1199 /* Version buffer memory free */
1200 /* Start LSIP200113393 */
1201 if (sc->verbuf_h_busaddr != 0)
1202 bus_dmamap_unload(sc->verbuf_h_dmat, sc->verbuf_h_dmamap);
1203 if (sc->verbuf != NULL)
1204 bus_dmamem_free(sc->verbuf_h_dmat, sc->verbuf,
1205 sc->verbuf_h_dmamap);
1206 if (sc->verbuf_h_dmat != NULL)
1207 bus_dma_tag_destroy(sc->verbuf_h_dmat);
1209 /* End LSIP200113393 */
1210 /* ThunderBolt INIT packet memory Free */
1211 if (sc->mfi_tb_init_busaddr != 0)
1212 bus_dmamap_unload(sc->mfi_tb_init_dmat,
1213 sc->mfi_tb_init_dmamap);
1214 if (sc->mfi_tb_init != NULL)
1215 bus_dmamem_free(sc->mfi_tb_init_dmat, sc->mfi_tb_init,
1216 sc->mfi_tb_init_dmamap);
1217 if (sc->mfi_tb_init_dmat != NULL)
1218 bus_dma_tag_destroy(sc->mfi_tb_init_dmat);
1220 /* ThunderBolt IOC Init Desc memory free here */
1221 if (sc->mfi_tb_ioc_init_busaddr != 0)
1222 bus_dmamap_unload(sc->mfi_tb_ioc_init_dmat,
1223 sc->mfi_tb_ioc_init_dmamap);
1224 if (sc->mfi_tb_ioc_init_desc != NULL)
1225 bus_dmamem_free(sc->mfi_tb_ioc_init_dmat,
1226 sc->mfi_tb_ioc_init_desc,
1227 sc->mfi_tb_ioc_init_dmamap);
1228 if (sc->mfi_tb_ioc_init_dmat != NULL)
1229 bus_dma_tag_destroy(sc->mfi_tb_ioc_init_dmat);
1230 if (sc->mfi_cmd_pool_tbolt != NULL) {
1231 for (int i = 0; i < sc->mfi_max_fw_cmds; i++) {
1232 if (sc->mfi_cmd_pool_tbolt[i] != NULL) {
1233 free(sc->mfi_cmd_pool_tbolt[i],
1235 sc->mfi_cmd_pool_tbolt[i] = NULL;
1238 free(sc->mfi_cmd_pool_tbolt, M_MFIBUF);
1239 sc->mfi_cmd_pool_tbolt = NULL;
1241 if (sc->request_desc_pool != NULL) {
1242 free(sc->request_desc_pool, M_MFIBUF);
1243 sc->request_desc_pool = NULL;
1246 if (sc->mfi_buffer_dmat != NULL)
1247 bus_dma_tag_destroy(sc->mfi_buffer_dmat);
1248 if (sc->mfi_parent_dmat != NULL)
1249 bus_dma_tag_destroy(sc->mfi_parent_dmat);
1251 if (mtx_initialized(&sc->mfi_io_lock)) {
1252 mtx_destroy(&sc->mfi_io_lock);
1253 sx_destroy(&sc->mfi_config_lock);
1260 mfi_startup(void *arg)
1262 struct mfi_softc *sc;
1264 sc = (struct mfi_softc *)arg;
1266 config_intrhook_disestablish(&sc->mfi_ich);
1268 sc->mfi_enable_intr(sc);
1269 sx_xlock(&sc->mfi_config_lock);
1270 mtx_lock(&sc->mfi_io_lock);
1272 if (sc->mfi_flags & MFI_FLAGS_SKINNY)
1274 mtx_unlock(&sc->mfi_io_lock);
1275 sx_xunlock(&sc->mfi_config_lock);
1281 struct mfi_softc *sc;
1282 struct mfi_command *cm;
1283 uint32_t pi, ci, context;
1285 sc = (struct mfi_softc *)arg;
1287 if (sc->mfi_check_clear_intr(sc))
1291 pi = sc->mfi_comms->hw_pi;
1292 ci = sc->mfi_comms->hw_ci;
1293 mtx_lock(&sc->mfi_io_lock);
1295 context = sc->mfi_comms->hw_reply_q[ci];
1296 if (context < sc->mfi_max_fw_cmds) {
1297 cm = &sc->mfi_commands[context];
1298 mfi_remove_busy(cm);
1300 mfi_complete(sc, cm);
1302 if (++ci == (sc->mfi_max_fw_cmds + 1))
1306 sc->mfi_comms->hw_ci = ci;
1308 /* Give defered I/O a chance to run */
1309 sc->mfi_flags &= ~MFI_FLAGS_QFRZN;
1311 mtx_unlock(&sc->mfi_io_lock);
1314 * Dummy read to flush the bus; this ensures that the indexes are up
1315 * to date. Restart processing if more commands have come it.
1317 (void)sc->mfi_read_fw_status(sc);
1318 if (pi != sc->mfi_comms->hw_pi)
1325 mfi_shutdown(struct mfi_softc *sc)
1327 struct mfi_dcmd_frame *dcmd;
1328 struct mfi_command *cm;
1332 if (sc->mfi_aen_cm != NULL) {
1333 sc->cm_aen_abort = 1;
1334 mfi_abort(sc, &sc->mfi_aen_cm);
1337 if (sc->mfi_map_sync_cm != NULL) {
1338 sc->cm_map_abort = 1;
1339 mfi_abort(sc, &sc->mfi_map_sync_cm);
1342 mtx_lock(&sc->mfi_io_lock);
1343 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_SHUTDOWN, NULL, 0);
1345 mtx_unlock(&sc->mfi_io_lock);
1349 dcmd = &cm->cm_frame->dcmd;
1350 dcmd->header.flags = MFI_FRAME_DIR_NONE;
1351 cm->cm_flags = MFI_CMD_POLLED;
1354 if ((error = mfi_mapcmd(sc, cm)) != 0)
1355 device_printf(sc->mfi_dev, "Failed to shutdown controller\n");
1357 mfi_release_command(cm);
1358 mtx_unlock(&sc->mfi_io_lock);
1363 mfi_syspdprobe(struct mfi_softc *sc)
1365 struct mfi_frame_header *hdr;
1366 struct mfi_command *cm = NULL;
1367 struct mfi_pd_list *pdlist = NULL;
1368 struct mfi_system_pd *syspd, *tmp;
1369 struct mfi_system_pending *syspd_pend;
1370 int error, i, found;
1372 sx_assert(&sc->mfi_config_lock, SA_XLOCKED);
1373 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1374 /* Add SYSTEM PD's */
1375 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_PD_LIST_QUERY,
1376 (void **)&pdlist, sizeof(*pdlist));
1378 device_printf(sc->mfi_dev,
1379 "Error while forming SYSTEM PD list\n");
1383 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1384 cm->cm_frame->dcmd.mbox[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST;
1385 cm->cm_frame->dcmd.mbox[1] = 0;
1386 if (mfi_mapcmd(sc, cm) != 0) {
1387 device_printf(sc->mfi_dev,
1388 "Failed to get syspd device listing\n");
1391 bus_dmamap_sync(sc->mfi_buffer_dmat,cm->cm_dmamap,
1392 BUS_DMASYNC_POSTREAD);
1393 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1394 hdr = &cm->cm_frame->header;
1395 if (hdr->cmd_status != MFI_STAT_OK) {
1396 device_printf(sc->mfi_dev,
1397 "MFI_DCMD_PD_LIST_QUERY failed %x\n", hdr->cmd_status);
1400 /* Get each PD and add it to the system */
1401 for (i = 0; i < pdlist->count; i++) {
1402 if (pdlist->addr[i].device_id ==
1403 pdlist->addr[i].encl_device_id)
1406 TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh, pd_link) {
1407 if (syspd->pd_id == pdlist->addr[i].device_id)
1410 TAILQ_FOREACH(syspd_pend, &sc->mfi_syspd_pend_tqh, pd_link) {
1411 if (syspd_pend->pd_id == pdlist->addr[i].device_id)
1415 mfi_add_sys_pd(sc, pdlist->addr[i].device_id);
1417 /* Delete SYSPD's whose state has been changed */
1418 TAILQ_FOREACH_SAFE(syspd, &sc->mfi_syspd_tqh, pd_link, tmp) {
1420 for (i = 0; i < pdlist->count; i++) {
1421 if (syspd->pd_id == pdlist->addr[i].device_id) {
1428 mtx_unlock(&sc->mfi_io_lock);
1430 device_delete_child(sc->mfi_dev, syspd->pd_dev);
1432 mtx_lock(&sc->mfi_io_lock);
1437 free(pdlist, M_MFIBUF);
1439 mfi_release_command(cm);
1445 mfi_ldprobe(struct mfi_softc *sc)
1447 struct mfi_frame_header *hdr;
1448 struct mfi_command *cm = NULL;
1449 struct mfi_ld_list *list = NULL;
1450 struct mfi_disk *ld;
1451 struct mfi_disk_pending *ld_pend;
1454 sx_assert(&sc->mfi_config_lock, SA_XLOCKED);
1455 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1457 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_LIST,
1458 (void **)&list, sizeof(*list));
1462 cm->cm_flags = MFI_CMD_DATAIN;
1463 if (mfi_wait_command(sc, cm) != 0) {
1464 device_printf(sc->mfi_dev, "Failed to get device listing\n");
1468 hdr = &cm->cm_frame->header;
1469 if (hdr->cmd_status != MFI_STAT_OK) {
1470 device_printf(sc->mfi_dev, "MFI_DCMD_LD_GET_LIST failed %x\n",
1475 for (i = 0; i < list->ld_count; i++) {
1476 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
1477 if (ld->ld_id == list->ld_list[i].ld.v.target_id)
1480 TAILQ_FOREACH(ld_pend, &sc->mfi_ld_pend_tqh, ld_link) {
1481 if (ld_pend->ld_id == list->ld_list[i].ld.v.target_id)
1484 mfi_add_ld(sc, list->ld_list[i].ld.v.target_id);
1489 free(list, M_MFIBUF);
1491 mfi_release_command(cm);
1497 * The timestamp is the number of seconds since 00:00 Jan 1, 2000. If
1498 * the bits in 24-31 are all set, then it is the number of seconds since
1502 format_timestamp(uint32_t timestamp)
1504 static char buffer[32];
1506 if ((timestamp & 0xff000000) == 0xff000000)
1507 snprintf(buffer, sizeof(buffer), "boot + %us", timestamp &
1510 snprintf(buffer, sizeof(buffer), "%us", timestamp);
1515 format_class(int8_t class)
1517 static char buffer[6];
1520 case MFI_EVT_CLASS_DEBUG:
1522 case MFI_EVT_CLASS_PROGRESS:
1523 return ("progress");
1524 case MFI_EVT_CLASS_INFO:
1526 case MFI_EVT_CLASS_WARNING:
1528 case MFI_EVT_CLASS_CRITICAL:
1530 case MFI_EVT_CLASS_FATAL:
1532 case MFI_EVT_CLASS_DEAD:
1535 snprintf(buffer, sizeof(buffer), "%d", class);
1541 mfi_decode_evt(struct mfi_softc *sc, struct mfi_evt_detail *detail)
1543 struct mfi_system_pd *syspd = NULL;
1545 device_printf(sc->mfi_dev, "%d (%s/0x%04x/%s) - %s\n", detail->seq,
1546 format_timestamp(detail->time), detail->evt_class.members.locale,
1547 format_class(detail->evt_class.members.evt_class),
1548 detail->description);
1550 /* Don't act on old AEN's or while shutting down */
1551 if (detail->seq < sc->mfi_boot_seq_num || sc->mfi_detaching)
1554 switch (detail->arg_type) {
1555 case MR_EVT_ARGS_NONE:
1556 if (detail->code == MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED) {
1557 device_printf(sc->mfi_dev, "HostBus scan raised\n");
1558 if (mfi_detect_jbod_change) {
1560 * Probe for new SYSPD's and Delete
1563 sx_xlock(&sc->mfi_config_lock);
1564 mtx_lock(&sc->mfi_io_lock);
1566 mtx_unlock(&sc->mfi_io_lock);
1567 sx_xunlock(&sc->mfi_config_lock);
1571 case MR_EVT_ARGS_LD_STATE:
1572 /* During load time driver reads all the events starting
1573 * from the one that has been logged after shutdown. Avoid
1576 if (detail->args.ld_state.new_state == MFI_LD_STATE_OFFLINE ) {
1578 struct mfi_disk *ld;
1579 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
1581 detail->args.ld_state.ld.target_id)
1585 Fix: for kernel panics when SSCD is removed
1586 KASSERT(ld != NULL, ("volume dissappeared"));
1590 device_delete_child(sc->mfi_dev, ld->ld_dev);
1595 case MR_EVT_ARGS_PD:
1596 if (detail->code == MR_EVT_PD_REMOVED) {
1597 if (mfi_detect_jbod_change) {
1599 * If the removed device is a SYSPD then
1602 TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh,
1605 detail->args.pd.device_id) {
1607 device_delete_child(
1616 if (detail->code == MR_EVT_PD_INSERTED) {
1617 if (mfi_detect_jbod_change) {
1618 /* Probe for new SYSPD's */
1619 sx_xlock(&sc->mfi_config_lock);
1620 mtx_lock(&sc->mfi_io_lock);
1622 mtx_unlock(&sc->mfi_io_lock);
1623 sx_xunlock(&sc->mfi_config_lock);
1631 mfi_queue_evt(struct mfi_softc *sc, struct mfi_evt_detail *detail)
1633 struct mfi_evt_queue_elm *elm;
1635 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1636 elm = malloc(sizeof(*elm), M_MFIBUF, M_NOWAIT|M_ZERO);
1639 memcpy(&elm->detail, detail, sizeof(*detail));
1640 TAILQ_INSERT_TAIL(&sc->mfi_evt_queue, elm, link);
1641 taskqueue_enqueue(taskqueue_swi, &sc->mfi_evt_task);
1645 mfi_handle_evt(void *context, int pending)
1647 TAILQ_HEAD(,mfi_evt_queue_elm) queue;
1648 struct mfi_softc *sc;
1649 struct mfi_evt_queue_elm *elm;
1653 mtx_lock(&sc->mfi_io_lock);
1654 TAILQ_CONCAT(&queue, &sc->mfi_evt_queue, link);
1655 mtx_unlock(&sc->mfi_io_lock);
1656 while ((elm = TAILQ_FIRST(&queue)) != NULL) {
1657 TAILQ_REMOVE(&queue, elm, link);
1658 mfi_decode_evt(sc, &elm->detail);
1659 free(elm, M_MFIBUF);
1664 mfi_aen_register(struct mfi_softc *sc, int seq, int locale)
1666 struct mfi_command *cm;
1667 struct mfi_dcmd_frame *dcmd;
1668 union mfi_evt current_aen, prior_aen;
1669 struct mfi_evt_detail *ed = NULL;
1672 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1674 current_aen.word = locale;
1675 if (sc->mfi_aen_cm != NULL) {
1677 ((uint32_t *)&sc->mfi_aen_cm->cm_frame->dcmd.mbox)[1];
1678 if (prior_aen.members.evt_class <= current_aen.members.evt_class &&
1679 !((prior_aen.members.locale & current_aen.members.locale)
1680 ^current_aen.members.locale)) {
1683 prior_aen.members.locale |= current_aen.members.locale;
1684 if (prior_aen.members.evt_class
1685 < current_aen.members.evt_class)
1686 current_aen.members.evt_class =
1687 prior_aen.members.evt_class;
1688 mfi_abort(sc, &sc->mfi_aen_cm);
1692 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_EVENT_WAIT,
1693 (void **)&ed, sizeof(*ed));
1697 dcmd = &cm->cm_frame->dcmd;
1698 ((uint32_t *)&dcmd->mbox)[0] = seq;
1699 ((uint32_t *)&dcmd->mbox)[1] = locale;
1700 cm->cm_flags = MFI_CMD_DATAIN;
1701 cm->cm_complete = mfi_aen_complete;
1703 sc->last_seq_num = seq;
1704 sc->mfi_aen_cm = cm;
1706 mfi_enqueue_ready(cm);
1714 mfi_aen_complete(struct mfi_command *cm)
1716 struct mfi_frame_header *hdr;
1717 struct mfi_softc *sc;
1718 struct mfi_evt_detail *detail;
1719 struct mfi_aen *mfi_aen_entry, *tmp;
1720 int seq = 0, aborted = 0;
1723 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1725 if (sc->mfi_aen_cm == NULL)
1728 hdr = &cm->cm_frame->header;
1730 if (sc->cm_aen_abort ||
1731 hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
1732 sc->cm_aen_abort = 0;
1735 sc->mfi_aen_triggered = 1;
1736 if (sc->mfi_poll_waiting) {
1737 sc->mfi_poll_waiting = 0;
1738 selwakeup(&sc->mfi_select);
1740 detail = cm->cm_data;
1741 mfi_queue_evt(sc, detail);
1742 seq = detail->seq + 1;
1743 TAILQ_FOREACH_SAFE(mfi_aen_entry, &sc->mfi_aen_pids, aen_link,
1745 TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
1747 PROC_LOCK(mfi_aen_entry->p);
1748 kern_psignal(mfi_aen_entry->p, SIGIO);
1749 PROC_UNLOCK(mfi_aen_entry->p);
1750 free(mfi_aen_entry, M_MFIBUF);
1754 free(cm->cm_data, M_MFIBUF);
1755 wakeup(&sc->mfi_aen_cm);
1756 sc->mfi_aen_cm = NULL;
1757 mfi_release_command(cm);
1759 /* set it up again so the driver can catch more events */
1761 mfi_aen_setup(sc, seq);
1764 #define MAX_EVENTS 15
1767 mfi_parse_entries(struct mfi_softc *sc, int start_seq, int stop_seq)
1769 struct mfi_command *cm;
1770 struct mfi_dcmd_frame *dcmd;
1771 struct mfi_evt_list *el;
1772 union mfi_evt class_locale;
1773 int error, i, seq, size;
1775 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1777 class_locale.members.reserved = 0;
1778 class_locale.members.locale = mfi_event_locale;
1779 class_locale.members.evt_class = mfi_event_class;
1781 size = sizeof(struct mfi_evt_list) + sizeof(struct mfi_evt_detail)
1783 el = malloc(size, M_MFIBUF, M_NOWAIT | M_ZERO);
1787 for (seq = start_seq;;) {
1788 if ((cm = mfi_dequeue_free(sc)) == NULL) {
1793 dcmd = &cm->cm_frame->dcmd;
1794 bzero(dcmd->mbox, MFI_MBOX_SIZE);
1795 dcmd->header.cmd = MFI_CMD_DCMD;
1796 dcmd->header.timeout = 0;
1797 dcmd->header.data_len = size;
1798 dcmd->opcode = MFI_DCMD_CTRL_EVENT_GET;
1799 ((uint32_t *)&dcmd->mbox)[0] = seq;
1800 ((uint32_t *)&dcmd->mbox)[1] = class_locale.word;
1801 cm->cm_sg = &dcmd->sgl;
1802 cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
1803 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1807 if ((error = mfi_mapcmd(sc, cm)) != 0) {
1808 device_printf(sc->mfi_dev,
1809 "Failed to get controller entries\n");
1810 mfi_release_command(cm);
1814 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1815 BUS_DMASYNC_POSTREAD);
1816 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1818 if (dcmd->header.cmd_status == MFI_STAT_NOT_FOUND) {
1819 mfi_release_command(cm);
1822 if (dcmd->header.cmd_status != MFI_STAT_OK) {
1823 device_printf(sc->mfi_dev,
1824 "Error %d fetching controller entries\n",
1825 dcmd->header.cmd_status);
1826 mfi_release_command(cm);
1830 mfi_release_command(cm);
1832 for (i = 0; i < el->count; i++) {
1834 * If this event is newer than 'stop_seq' then
1835 * break out of the loop. Note that the log
1836 * is a circular buffer so we have to handle
1837 * the case that our stop point is earlier in
1838 * the buffer than our start point.
1840 if (el->event[i].seq >= stop_seq) {
1841 if (start_seq <= stop_seq)
1843 else if (el->event[i].seq < start_seq)
1846 mfi_queue_evt(sc, &el->event[i]);
1848 seq = el->event[el->count - 1].seq + 1;
1856 mfi_add_ld(struct mfi_softc *sc, int id)
1858 struct mfi_command *cm;
1859 struct mfi_dcmd_frame *dcmd = NULL;
1860 struct mfi_ld_info *ld_info = NULL;
1861 struct mfi_disk_pending *ld_pend;
1864 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1866 ld_pend = malloc(sizeof(*ld_pend), M_MFIBUF, M_NOWAIT | M_ZERO);
1867 if (ld_pend != NULL) {
1868 ld_pend->ld_id = id;
1869 TAILQ_INSERT_TAIL(&sc->mfi_ld_pend_tqh, ld_pend, ld_link);
1872 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_INFO,
1873 (void **)&ld_info, sizeof(*ld_info));
1875 device_printf(sc->mfi_dev,
1876 "Failed to allocate for MFI_DCMD_LD_GET_INFO %d\n", error);
1878 free(ld_info, M_MFIBUF);
1881 cm->cm_flags = MFI_CMD_DATAIN;
1882 dcmd = &cm->cm_frame->dcmd;
1884 if (mfi_wait_command(sc, cm) != 0) {
1885 device_printf(sc->mfi_dev,
1886 "Failed to get logical drive: %d\n", id);
1887 free(ld_info, M_MFIBUF);
1890 if (ld_info->ld_config.params.isSSCD != 1)
1891 mfi_add_ld_complete(cm);
1893 mfi_release_command(cm);
1894 if (ld_info) /* SSCD drives ld_info free here */
1895 free(ld_info, M_MFIBUF);
1901 mfi_add_ld_complete(struct mfi_command *cm)
1903 struct mfi_frame_header *hdr;
1904 struct mfi_ld_info *ld_info;
1905 struct mfi_softc *sc;
1909 hdr = &cm->cm_frame->header;
1910 ld_info = cm->cm_private;
1912 if (sc->cm_map_abort || hdr->cmd_status != MFI_STAT_OK) {
1913 free(ld_info, M_MFIBUF);
1914 wakeup(&sc->mfi_map_sync_cm);
1915 mfi_release_command(cm);
1918 wakeup(&sc->mfi_map_sync_cm);
1919 mfi_release_command(cm);
1921 mtx_unlock(&sc->mfi_io_lock);
1923 if ((child = device_add_child(sc->mfi_dev, "mfid", -1)) == NULL) {
1924 device_printf(sc->mfi_dev, "Failed to add logical disk\n");
1925 free(ld_info, M_MFIBUF);
1927 mtx_lock(&sc->mfi_io_lock);
1931 device_set_ivars(child, ld_info);
1932 device_set_desc(child, "MFI Logical Disk");
1933 bus_generic_attach(sc->mfi_dev);
1935 mtx_lock(&sc->mfi_io_lock);
1938 static int mfi_add_sys_pd(struct mfi_softc *sc, int id)
1940 struct mfi_command *cm;
1941 struct mfi_dcmd_frame *dcmd = NULL;
1942 struct mfi_pd_info *pd_info = NULL;
1943 struct mfi_system_pending *syspd_pend;
1946 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1948 syspd_pend = malloc(sizeof(*syspd_pend), M_MFIBUF, M_NOWAIT | M_ZERO);
1949 if (syspd_pend != NULL) {
1950 syspd_pend->pd_id = id;
1951 TAILQ_INSERT_TAIL(&sc->mfi_syspd_pend_tqh, syspd_pend, pd_link);
1954 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_PD_GET_INFO,
1955 (void **)&pd_info, sizeof(*pd_info));
1957 device_printf(sc->mfi_dev,
1958 "Failed to allocated for MFI_DCMD_PD_GET_INFO %d\n",
1961 free(pd_info, M_MFIBUF);
1964 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1965 dcmd = &cm->cm_frame->dcmd;
1967 dcmd->header.scsi_status = 0;
1968 dcmd->header.pad0 = 0;
1969 if ((error = mfi_mapcmd(sc, cm)) != 0) {
1970 device_printf(sc->mfi_dev,
1971 "Failed to get physical drive info %d\n", id);
1972 free(pd_info, M_MFIBUF);
1973 mfi_release_command(cm);
1976 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1977 BUS_DMASYNC_POSTREAD);
1978 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1979 mfi_add_sys_pd_complete(cm);
1984 mfi_add_sys_pd_complete(struct mfi_command *cm)
1986 struct mfi_frame_header *hdr;
1987 struct mfi_pd_info *pd_info;
1988 struct mfi_softc *sc;
1992 hdr = &cm->cm_frame->header;
1993 pd_info = cm->cm_private;
1995 if (hdr->cmd_status != MFI_STAT_OK) {
1996 free(pd_info, M_MFIBUF);
1997 mfi_release_command(cm);
2000 if (pd_info->fw_state != MFI_PD_STATE_SYSTEM) {
2001 device_printf(sc->mfi_dev, "PD=%x is not SYSTEM PD\n",
2002 pd_info->ref.v.device_id);
2003 free(pd_info, M_MFIBUF);
2004 mfi_release_command(cm);
2007 mfi_release_command(cm);
2009 mtx_unlock(&sc->mfi_io_lock);
2011 if ((child = device_add_child(sc->mfi_dev, "mfisyspd", -1)) == NULL) {
2012 device_printf(sc->mfi_dev, "Failed to add system pd\n");
2013 free(pd_info, M_MFIBUF);
2015 mtx_lock(&sc->mfi_io_lock);
2019 device_set_ivars(child, pd_info);
2020 device_set_desc(child, "MFI System PD");
2021 bus_generic_attach(sc->mfi_dev);
2023 mtx_lock(&sc->mfi_io_lock);
2026 static struct mfi_command *
2027 mfi_bio_command(struct mfi_softc *sc)
2030 struct mfi_command *cm = NULL;
2032 /*reserving two commands to avoid starvation for IOCTL*/
2033 if (sc->mfi_qstat[MFIQ_FREE].q_length < 2) {
2036 if ((bio = mfi_dequeue_bio(sc)) == NULL) {
2039 if ((uintptr_t)bio->bio_driver2 == MFI_LD_IO) {
2040 cm = mfi_build_ldio(sc, bio);
2041 } else if ((uintptr_t) bio->bio_driver2 == MFI_SYS_PD_IO) {
2042 cm = mfi_build_syspdio(sc, bio);
2045 mfi_enqueue_bio(sc, bio);
2050 * mostly copied from cam/scsi/scsi_all.c:scsi_read_write
2054 mfi_build_cdb(int readop, uint8_t byte2, u_int64_t lba, u_int32_t block_count, uint8_t *cdb)
2058 if (((lba & 0x1fffff) == lba)
2059 && ((block_count & 0xff) == block_count)
2061 /* We can fit in a 6 byte cdb */
2062 struct scsi_rw_6 *scsi_cmd;
2064 scsi_cmd = (struct scsi_rw_6 *)cdb;
2065 scsi_cmd->opcode = readop ? READ_6 : WRITE_6;
2066 scsi_ulto3b(lba, scsi_cmd->addr);
2067 scsi_cmd->length = block_count & 0xff;
2068 scsi_cmd->control = 0;
2069 cdb_len = sizeof(*scsi_cmd);
2070 } else if (((block_count & 0xffff) == block_count) && ((lba & 0xffffffff) == lba)) {
2071 /* Need a 10 byte CDB */
2072 struct scsi_rw_10 *scsi_cmd;
2074 scsi_cmd = (struct scsi_rw_10 *)cdb;
2075 scsi_cmd->opcode = readop ? READ_10 : WRITE_10;
2076 scsi_cmd->byte2 = byte2;
2077 scsi_ulto4b(lba, scsi_cmd->addr);
2078 scsi_cmd->reserved = 0;
2079 scsi_ulto2b(block_count, scsi_cmd->length);
2080 scsi_cmd->control = 0;
2081 cdb_len = sizeof(*scsi_cmd);
2082 } else if (((block_count & 0xffffffff) == block_count) &&
2083 ((lba & 0xffffffff) == lba)) {
2084 /* Block count is too big for 10 byte CDB use a 12 byte CDB */
2085 struct scsi_rw_12 *scsi_cmd;
2087 scsi_cmd = (struct scsi_rw_12 *)cdb;
2088 scsi_cmd->opcode = readop ? READ_12 : WRITE_12;
2089 scsi_cmd->byte2 = byte2;
2090 scsi_ulto4b(lba, scsi_cmd->addr);
2091 scsi_cmd->reserved = 0;
2092 scsi_ulto4b(block_count, scsi_cmd->length);
2093 scsi_cmd->control = 0;
2094 cdb_len = sizeof(*scsi_cmd);
2097 * 16 byte CDB. We'll only get here if the LBA is larger
2100 struct scsi_rw_16 *scsi_cmd;
2102 scsi_cmd = (struct scsi_rw_16 *)cdb;
2103 scsi_cmd->opcode = readop ? READ_16 : WRITE_16;
2104 scsi_cmd->byte2 = byte2;
2105 scsi_u64to8b(lba, scsi_cmd->addr);
2106 scsi_cmd->reserved = 0;
2107 scsi_ulto4b(block_count, scsi_cmd->length);
2108 scsi_cmd->control = 0;
2109 cdb_len = sizeof(*scsi_cmd);
2115 static struct mfi_command *
2116 mfi_build_syspdio(struct mfi_softc *sc, struct bio *bio)
2118 struct mfi_command *cm;
2119 struct mfi_pass_frame *pass;
2120 uint32_t context = 0;
2121 int flags = 0, blkcount = 0, readop;
2124 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2126 if ((cm = mfi_dequeue_free(sc)) == NULL)
2129 /* Zero out the MFI frame */
2130 context = cm->cm_frame->header.context;
2131 bzero(cm->cm_frame, sizeof(union mfi_frame));
2132 cm->cm_frame->header.context = context;
2133 pass = &cm->cm_frame->pass;
2134 bzero(pass->cdb, 16);
2135 pass->header.cmd = MFI_CMD_PD_SCSI_IO;
2136 switch (bio->bio_cmd & 0x03) {
2138 flags = MFI_CMD_DATAIN;
2142 flags = MFI_CMD_DATAOUT;
2146 /* TODO: what about BIO_DELETE??? */
2147 panic("Unsupported bio command %x\n", bio->bio_cmd);
2150 /* Cheat with the sector length to avoid a non-constant division */
2151 blkcount = (bio->bio_bcount + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
2152 /* Fill the LBA and Transfer length in CDB */
2153 cdb_len = mfi_build_cdb(readop, 0, bio->bio_pblkno, blkcount,
2155 pass->header.target_id = (uintptr_t)bio->bio_driver1;
2156 pass->header.lun_id = 0;
2157 pass->header.timeout = 0;
2158 pass->header.flags = 0;
2159 pass->header.scsi_status = 0;
2160 pass->header.sense_len = MFI_SENSE_LEN;
2161 pass->header.data_len = bio->bio_bcount;
2162 pass->header.cdb_len = cdb_len;
2163 pass->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2164 pass->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2165 cm->cm_complete = mfi_bio_complete;
2166 cm->cm_private = bio;
2167 cm->cm_data = bio->bio_data;
2168 cm->cm_len = bio->bio_bcount;
2169 cm->cm_sg = &pass->sgl;
2170 cm->cm_total_frame_size = MFI_PASS_FRAME_SIZE;
2171 cm->cm_flags = flags;
2176 static struct mfi_command *
2177 mfi_build_ldio(struct mfi_softc *sc, struct bio *bio)
2179 struct mfi_io_frame *io;
2180 struct mfi_command *cm;
2183 uint32_t context = 0;
2185 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2187 if ((cm = mfi_dequeue_free(sc)) == NULL)
2190 /* Zero out the MFI frame */
2191 context = cm->cm_frame->header.context;
2192 bzero(cm->cm_frame, sizeof(union mfi_frame));
2193 cm->cm_frame->header.context = context;
2194 io = &cm->cm_frame->io;
2195 switch (bio->bio_cmd & 0x03) {
2197 io->header.cmd = MFI_CMD_LD_READ;
2198 flags = MFI_CMD_DATAIN;
2201 io->header.cmd = MFI_CMD_LD_WRITE;
2202 flags = MFI_CMD_DATAOUT;
2205 /* TODO: what about BIO_DELETE??? */
2206 panic("Unsupported bio command %x\n", bio->bio_cmd);
2209 /* Cheat with the sector length to avoid a non-constant division */
2210 blkcount = (bio->bio_bcount + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
2211 io->header.target_id = (uintptr_t)bio->bio_driver1;
2212 io->header.timeout = 0;
2213 io->header.flags = 0;
2214 io->header.scsi_status = 0;
2215 io->header.sense_len = MFI_SENSE_LEN;
2216 io->header.data_len = blkcount;
2217 io->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2218 io->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2219 io->lba_hi = (bio->bio_pblkno & 0xffffffff00000000) >> 32;
2220 io->lba_lo = bio->bio_pblkno & 0xffffffff;
2221 cm->cm_complete = mfi_bio_complete;
2222 cm->cm_private = bio;
2223 cm->cm_data = bio->bio_data;
2224 cm->cm_len = bio->bio_bcount;
2225 cm->cm_sg = &io->sgl;
2226 cm->cm_total_frame_size = MFI_IO_FRAME_SIZE;
2227 cm->cm_flags = flags;
2233 mfi_bio_complete(struct mfi_command *cm)
2236 struct mfi_frame_header *hdr;
2237 struct mfi_softc *sc;
2239 bio = cm->cm_private;
2240 hdr = &cm->cm_frame->header;
2243 if ((hdr->cmd_status != MFI_STAT_OK) || (hdr->scsi_status != 0)) {
2244 bio->bio_flags |= BIO_ERROR;
2245 bio->bio_error = EIO;
2246 device_printf(sc->mfi_dev, "I/O error, cmd=%p, status=%#x, "
2247 "scsi_status=%#x\n", cm, hdr->cmd_status, hdr->scsi_status);
2248 mfi_print_sense(cm->cm_sc, cm->cm_sense);
2249 } else if (cm->cm_error != 0) {
2250 bio->bio_flags |= BIO_ERROR;
2251 bio->bio_error = cm->cm_error;
2252 device_printf(sc->mfi_dev, "I/O error, cmd=%p, error=%#x\n",
2256 mfi_release_command(cm);
2257 mfi_disk_complete(bio);
2261 mfi_startio(struct mfi_softc *sc)
2263 struct mfi_command *cm;
2264 struct ccb_hdr *ccbh;
2267 /* Don't bother if we're short on resources */
2268 if (sc->mfi_flags & MFI_FLAGS_QFRZN)
2271 /* Try a command that has already been prepared */
2272 cm = mfi_dequeue_ready(sc);
2275 if ((ccbh = TAILQ_FIRST(&sc->mfi_cam_ccbq)) != NULL)
2276 cm = sc->mfi_cam_start(ccbh);
2279 /* Nope, so look for work on the bioq */
2281 cm = mfi_bio_command(sc);
2283 /* No work available, so exit */
2287 /* Send the command to the controller */
2288 if (mfi_mapcmd(sc, cm) != 0) {
2289 device_printf(sc->mfi_dev, "Failed to startio\n");
2290 mfi_requeue_ready(cm);
2297 mfi_mapcmd(struct mfi_softc *sc, struct mfi_command *cm)
2301 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2303 if ((cm->cm_data != NULL) && (cm->cm_frame->header.cmd != MFI_CMD_STP )) {
2304 polled = (cm->cm_flags & MFI_CMD_POLLED) ? BUS_DMA_NOWAIT : 0;
2305 error = bus_dmamap_load(sc->mfi_buffer_dmat, cm->cm_dmamap,
2306 cm->cm_data, cm->cm_len, mfi_data_cb, cm, polled);
2307 if (error == EINPROGRESS) {
2308 sc->mfi_flags |= MFI_FLAGS_QFRZN;
2312 error = mfi_send_frame(sc, cm);
2319 mfi_data_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
2321 struct mfi_frame_header *hdr;
2322 struct mfi_command *cm;
2324 struct mfi_softc *sc;
2325 int i, j, first, dir;
2326 int sge_size, locked;
2328 cm = (struct mfi_command *)arg;
2330 hdr = &cm->cm_frame->header;
2334 * We need to check if we have the lock as this is async
2335 * callback so even though our caller mfi_mapcmd asserts
2336 * it has the lock, there is no garantee that hasn't been
2337 * dropped if bus_dmamap_load returned prior to our
2340 if ((locked = mtx_owned(&sc->mfi_io_lock)) == 0)
2341 mtx_lock(&sc->mfi_io_lock);
2344 printf("error %d in callback\n", error);
2345 cm->cm_error = error;
2346 mfi_complete(sc, cm);
2349 /* Use IEEE sgl only for IO's on a SKINNY controller
2350 * For other commands on a SKINNY controller use either
2351 * sg32 or sg64 based on the sizeof(bus_addr_t).
2352 * Also calculate the total frame size based on the type
2355 if (((cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) ||
2356 (cm->cm_frame->header.cmd == MFI_CMD_LD_READ) ||
2357 (cm->cm_frame->header.cmd == MFI_CMD_LD_WRITE)) &&
2358 (sc->mfi_flags & MFI_FLAGS_SKINNY)) {
2359 for (i = 0; i < nsegs; i++) {
2360 sgl->sg_skinny[i].addr = segs[i].ds_addr;
2361 sgl->sg_skinny[i].len = segs[i].ds_len;
2362 sgl->sg_skinny[i].flag = 0;
2364 hdr->flags |= MFI_FRAME_IEEE_SGL | MFI_FRAME_SGL64;
2365 sge_size = sizeof(struct mfi_sg_skinny);
2366 hdr->sg_count = nsegs;
2369 if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
2370 first = cm->cm_stp_len;
2371 if ((sc->mfi_flags & MFI_FLAGS_SG64) == 0) {
2372 sgl->sg32[j].addr = segs[0].ds_addr;
2373 sgl->sg32[j++].len = first;
2375 sgl->sg64[j].addr = segs[0].ds_addr;
2376 sgl->sg64[j++].len = first;
2380 if ((sc->mfi_flags & MFI_FLAGS_SG64) == 0) {
2381 for (i = 0; i < nsegs; i++) {
2382 sgl->sg32[j].addr = segs[i].ds_addr + first;
2383 sgl->sg32[j++].len = segs[i].ds_len - first;
2387 for (i = 0; i < nsegs; i++) {
2388 sgl->sg64[j].addr = segs[i].ds_addr + first;
2389 sgl->sg64[j++].len = segs[i].ds_len - first;
2392 hdr->flags |= MFI_FRAME_SGL64;
2395 sge_size = sc->mfi_sge_size;
2399 if (cm->cm_flags & MFI_CMD_DATAIN) {
2400 dir |= BUS_DMASYNC_PREREAD;
2401 hdr->flags |= MFI_FRAME_DIR_READ;
2403 if (cm->cm_flags & MFI_CMD_DATAOUT) {
2404 dir |= BUS_DMASYNC_PREWRITE;
2405 hdr->flags |= MFI_FRAME_DIR_WRITE;
2407 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir);
2408 cm->cm_flags |= MFI_CMD_MAPPED;
2411 * Instead of calculating the total number of frames in the
2412 * compound frame, it's already assumed that there will be at
2413 * least 1 frame, so don't compensate for the modulo of the
2414 * following division.
2416 cm->cm_total_frame_size += (sc->mfi_sge_size * nsegs);
2417 cm->cm_extra_frames = (cm->cm_total_frame_size - 1) / MFI_FRAME_SIZE;
2419 if ((error = mfi_send_frame(sc, cm)) != 0) {
2420 printf("error %d in callback from mfi_send_frame\n", error);
2421 cm->cm_error = error;
2422 mfi_complete(sc, cm);
2427 /* leave the lock in the state we found it */
2429 mtx_unlock(&sc->mfi_io_lock);
2435 mfi_send_frame(struct mfi_softc *sc, struct mfi_command *cm)
2439 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2441 if (sc->MFA_enabled)
2442 error = mfi_tbolt_send_frame(sc, cm);
2444 error = mfi_std_send_frame(sc, cm);
2446 if (error != 0 && (cm->cm_flags & MFI_ON_MFIQ_BUSY) != 0)
2447 mfi_remove_busy(cm);
2453 mfi_std_send_frame(struct mfi_softc *sc, struct mfi_command *cm)
2455 struct mfi_frame_header *hdr;
2456 int tm = mfi_polled_cmd_timeout * 1000;
2458 hdr = &cm->cm_frame->header;
2460 if ((cm->cm_flags & MFI_CMD_POLLED) == 0) {
2461 cm->cm_timestamp = time_uptime;
2462 mfi_enqueue_busy(cm);
2464 hdr->cmd_status = MFI_STAT_INVALID_STATUS;
2465 hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
2469 * The bus address of the command is aligned on a 64 byte boundary,
2470 * leaving the least 6 bits as zero. For whatever reason, the
2471 * hardware wants the address shifted right by three, leaving just
2472 * 3 zero bits. These three bits are then used as a prefetching
2473 * hint for the hardware to predict how many frames need to be
2474 * fetched across the bus. If a command has more than 8 frames
2475 * then the 3 bits are set to 0x7 and the firmware uses other
2476 * information in the command to determine the total amount to fetch.
2477 * However, FreeBSD doesn't support I/O larger than 128K, so 8 frames
2478 * is enough for both 32bit and 64bit systems.
2480 if (cm->cm_extra_frames > 7)
2481 cm->cm_extra_frames = 7;
2483 sc->mfi_issue_cmd(sc, cm->cm_frame_busaddr, cm->cm_extra_frames);
2485 if ((cm->cm_flags & MFI_CMD_POLLED) == 0)
2488 /* This is a polled command, so busy-wait for it to complete. */
2489 while (hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
2496 if (hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
2497 device_printf(sc->mfi_dev, "Frame %p timed out "
2498 "command 0x%X\n", hdr, cm->cm_frame->dcmd.opcode);
2507 mfi_complete(struct mfi_softc *sc, struct mfi_command *cm)
2510 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2512 if ((cm->cm_flags & MFI_CMD_MAPPED) != 0) {
2514 if ((cm->cm_flags & MFI_CMD_DATAIN) ||
2515 (cm->cm_frame->header.cmd == MFI_CMD_STP))
2516 dir |= BUS_DMASYNC_POSTREAD;
2517 if (cm->cm_flags & MFI_CMD_DATAOUT)
2518 dir |= BUS_DMASYNC_POSTWRITE;
2520 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir);
2521 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2522 cm->cm_flags &= ~MFI_CMD_MAPPED;
2525 cm->cm_flags |= MFI_CMD_COMPLETED;
2527 if (cm->cm_complete != NULL)
2528 cm->cm_complete(cm);
2534 mfi_abort(struct mfi_softc *sc, struct mfi_command **cm_abort)
2536 struct mfi_command *cm;
2537 struct mfi_abort_frame *abort;
2539 uint32_t context = 0;
2541 mtx_lock(&sc->mfi_io_lock);
2542 if ((cm = mfi_dequeue_free(sc)) == NULL) {
2543 mtx_unlock(&sc->mfi_io_lock);
2547 /* Zero out the MFI frame */
2548 context = cm->cm_frame->header.context;
2549 bzero(cm->cm_frame, sizeof(union mfi_frame));
2550 cm->cm_frame->header.context = context;
2552 abort = &cm->cm_frame->abort;
2553 abort->header.cmd = MFI_CMD_ABORT;
2554 abort->header.flags = 0;
2555 abort->header.scsi_status = 0;
2556 abort->abort_context = (*cm_abort)->cm_frame->header.context;
2557 abort->abort_mfi_addr_lo = (uint32_t)(*cm_abort)->cm_frame_busaddr;
2558 abort->abort_mfi_addr_hi =
2559 (uint32_t)((uint64_t)(*cm_abort)->cm_frame_busaddr >> 32);
2561 cm->cm_flags = MFI_CMD_POLLED;
2563 if ((error = mfi_mapcmd(sc, cm)) != 0)
2564 device_printf(sc->mfi_dev, "failed to abort command\n");
2565 mfi_release_command(cm);
2567 mtx_unlock(&sc->mfi_io_lock);
2568 while (i < 5 && *cm_abort != NULL) {
2569 tsleep(cm_abort, 0, "mfiabort",
2573 if (*cm_abort != NULL) {
2574 /* Force a complete if command didn't abort */
2575 mtx_lock(&sc->mfi_io_lock);
2576 (*cm_abort)->cm_complete(*cm_abort);
2577 mtx_unlock(&sc->mfi_io_lock);
2584 mfi_dump_blocks(struct mfi_softc *sc, int id, uint64_t lba, void *virt,
2587 struct mfi_command *cm;
2588 struct mfi_io_frame *io;
2590 uint32_t context = 0;
2592 if ((cm = mfi_dequeue_free(sc)) == NULL)
2595 /* Zero out the MFI frame */
2596 context = cm->cm_frame->header.context;
2597 bzero(cm->cm_frame, sizeof(union mfi_frame));
2598 cm->cm_frame->header.context = context;
2600 io = &cm->cm_frame->io;
2601 io->header.cmd = MFI_CMD_LD_WRITE;
2602 io->header.target_id = id;
2603 io->header.timeout = 0;
2604 io->header.flags = 0;
2605 io->header.scsi_status = 0;
2606 io->header.sense_len = MFI_SENSE_LEN;
2607 io->header.data_len = (len + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
2608 io->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2609 io->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2610 io->lba_hi = (lba & 0xffffffff00000000) >> 32;
2611 io->lba_lo = lba & 0xffffffff;
2614 cm->cm_sg = &io->sgl;
2615 cm->cm_total_frame_size = MFI_IO_FRAME_SIZE;
2616 cm->cm_flags = MFI_CMD_POLLED | MFI_CMD_DATAOUT;
2618 if ((error = mfi_mapcmd(sc, cm)) != 0)
2619 device_printf(sc->mfi_dev, "failed dump blocks\n");
2620 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
2621 BUS_DMASYNC_POSTWRITE);
2622 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2623 mfi_release_command(cm);
2629 mfi_dump_syspd_blocks(struct mfi_softc *sc, int id, uint64_t lba, void *virt,
2632 struct mfi_command *cm;
2633 struct mfi_pass_frame *pass;
2634 int error, readop, cdb_len;
2637 if ((cm = mfi_dequeue_free(sc)) == NULL)
2640 pass = &cm->cm_frame->pass;
2641 bzero(pass->cdb, 16);
2642 pass->header.cmd = MFI_CMD_PD_SCSI_IO;
2645 blkcount = (len + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
2646 cdb_len = mfi_build_cdb(readop, 0, lba, blkcount, pass->cdb);
2647 pass->header.target_id = id;
2648 pass->header.timeout = 0;
2649 pass->header.flags = 0;
2650 pass->header.scsi_status = 0;
2651 pass->header.sense_len = MFI_SENSE_LEN;
2652 pass->header.data_len = len;
2653 pass->header.cdb_len = cdb_len;
2654 pass->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2655 pass->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2658 cm->cm_sg = &pass->sgl;
2659 cm->cm_total_frame_size = MFI_PASS_FRAME_SIZE;
2660 cm->cm_flags = MFI_CMD_POLLED | MFI_CMD_DATAOUT | MFI_CMD_SCSI;
2662 if ((error = mfi_mapcmd(sc, cm)) != 0)
2663 device_printf(sc->mfi_dev, "failed dump blocks\n");
2664 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
2665 BUS_DMASYNC_POSTWRITE);
2666 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2667 mfi_release_command(cm);
2673 mfi_open(struct cdev *dev, int flags, int fmt, struct thread *td)
2675 struct mfi_softc *sc;
2680 mtx_lock(&sc->mfi_io_lock);
2681 if (sc->mfi_detaching)
2684 sc->mfi_flags |= MFI_FLAGS_OPEN;
2687 mtx_unlock(&sc->mfi_io_lock);
2693 mfi_close(struct cdev *dev, int flags, int fmt, struct thread *td)
2695 struct mfi_softc *sc;
2696 struct mfi_aen *mfi_aen_entry, *tmp;
2700 mtx_lock(&sc->mfi_io_lock);
2701 sc->mfi_flags &= ~MFI_FLAGS_OPEN;
2703 TAILQ_FOREACH_SAFE(mfi_aen_entry, &sc->mfi_aen_pids, aen_link, tmp) {
2704 if (mfi_aen_entry->p == curproc) {
2705 TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
2707 free(mfi_aen_entry, M_MFIBUF);
2710 mtx_unlock(&sc->mfi_io_lock);
2715 mfi_config_lock(struct mfi_softc *sc, uint32_t opcode)
2719 case MFI_DCMD_LD_DELETE:
2720 case MFI_DCMD_CFG_ADD:
2721 case MFI_DCMD_CFG_CLEAR:
2722 case MFI_DCMD_CFG_FOREIGN_IMPORT:
2723 sx_xlock(&sc->mfi_config_lock);
2731 mfi_config_unlock(struct mfi_softc *sc, int locked)
2735 sx_xunlock(&sc->mfi_config_lock);
2739 * Perform pre-issue checks on commands from userland and possibly veto
2743 mfi_check_command_pre(struct mfi_softc *sc, struct mfi_command *cm)
2745 struct mfi_disk *ld, *ld2;
2747 struct mfi_system_pd *syspd = NULL;
2751 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2753 switch (cm->cm_frame->dcmd.opcode) {
2754 case MFI_DCMD_LD_DELETE:
2755 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2756 if (ld->ld_id == cm->cm_frame->dcmd.mbox[0])
2762 error = mfi_disk_disable(ld);
2764 case MFI_DCMD_CFG_CLEAR:
2765 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2766 error = mfi_disk_disable(ld);
2771 TAILQ_FOREACH(ld2, &sc->mfi_ld_tqh, ld_link) {
2774 mfi_disk_enable(ld2);
2778 case MFI_DCMD_PD_STATE_SET:
2779 mbox = (uint16_t *) cm->cm_frame->dcmd.mbox;
2781 if (mbox[2] == MFI_PD_STATE_UNCONFIGURED_GOOD) {
2782 TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh, pd_link) {
2783 if (syspd->pd_id == syspd_id)
2790 error = mfi_syspd_disable(syspd);
2798 /* Perform post-issue checks on commands from userland. */
2800 mfi_check_command_post(struct mfi_softc *sc, struct mfi_command *cm)
2802 struct mfi_disk *ld, *ldn;
2803 struct mfi_system_pd *syspd = NULL;
2807 switch (cm->cm_frame->dcmd.opcode) {
2808 case MFI_DCMD_LD_DELETE:
2809 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2810 if (ld->ld_id == cm->cm_frame->dcmd.mbox[0])
2813 KASSERT(ld != NULL, ("volume dissappeared"));
2814 if (cm->cm_frame->header.cmd_status == MFI_STAT_OK) {
2815 mtx_unlock(&sc->mfi_io_lock);
2817 device_delete_child(sc->mfi_dev, ld->ld_dev);
2819 mtx_lock(&sc->mfi_io_lock);
2821 mfi_disk_enable(ld);
2823 case MFI_DCMD_CFG_CLEAR:
2824 if (cm->cm_frame->header.cmd_status == MFI_STAT_OK) {
2825 mtx_unlock(&sc->mfi_io_lock);
2827 TAILQ_FOREACH_SAFE(ld, &sc->mfi_ld_tqh, ld_link, ldn) {
2828 device_delete_child(sc->mfi_dev, ld->ld_dev);
2831 mtx_lock(&sc->mfi_io_lock);
2833 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link)
2834 mfi_disk_enable(ld);
2837 case MFI_DCMD_CFG_ADD:
2840 case MFI_DCMD_CFG_FOREIGN_IMPORT:
2843 case MFI_DCMD_PD_STATE_SET:
2844 mbox = (uint16_t *) cm->cm_frame->dcmd.mbox;
2846 if (mbox[2] == MFI_PD_STATE_UNCONFIGURED_GOOD) {
2847 TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh,pd_link) {
2848 if (syspd->pd_id == syspd_id)
2854 /* If the transition fails then enable the syspd again */
2855 if (syspd && cm->cm_frame->header.cmd_status != MFI_STAT_OK)
2856 mfi_syspd_enable(syspd);
2862 mfi_check_for_sscd(struct mfi_softc *sc, struct mfi_command *cm)
2864 struct mfi_config_data *conf_data;
2865 struct mfi_command *ld_cm = NULL;
2866 struct mfi_ld_info *ld_info = NULL;
2867 struct mfi_ld_config *ld;
2871 conf_data = (struct mfi_config_data *)cm->cm_data;
2873 if (cm->cm_frame->dcmd.opcode == MFI_DCMD_CFG_ADD) {
2874 p = (char *)conf_data->array;
2875 p += conf_data->array_size * conf_data->array_count;
2876 ld = (struct mfi_ld_config *)p;
2877 if (ld->params.isSSCD == 1)
2879 } else if (cm->cm_frame->dcmd.opcode == MFI_DCMD_LD_DELETE) {
2880 error = mfi_dcmd_command (sc, &ld_cm, MFI_DCMD_LD_GET_INFO,
2881 (void **)&ld_info, sizeof(*ld_info));
2883 device_printf(sc->mfi_dev, "Failed to allocate"
2884 "MFI_DCMD_LD_GET_INFO %d", error);
2886 free(ld_info, M_MFIBUF);
2889 ld_cm->cm_flags = MFI_CMD_DATAIN;
2890 ld_cm->cm_frame->dcmd.mbox[0]= cm->cm_frame->dcmd.mbox[0];
2891 ld_cm->cm_frame->header.target_id = cm->cm_frame->dcmd.mbox[0];
2892 if (mfi_wait_command(sc, ld_cm) != 0) {
2893 device_printf(sc->mfi_dev, "failed to get log drv\n");
2894 mfi_release_command(ld_cm);
2895 free(ld_info, M_MFIBUF);
2899 if (ld_cm->cm_frame->header.cmd_status != MFI_STAT_OK) {
2900 free(ld_info, M_MFIBUF);
2901 mfi_release_command(ld_cm);
2905 ld_info = (struct mfi_ld_info *)ld_cm->cm_private;
2907 if (ld_info->ld_config.params.isSSCD == 1)
2910 mfi_release_command(ld_cm);
2911 free(ld_info, M_MFIBUF);
2918 mfi_stp_cmd(struct mfi_softc *sc, struct mfi_command *cm,caddr_t arg)
2921 struct mfi_ioc_packet *ioc;
2922 ioc = (struct mfi_ioc_packet *)arg;
2923 int sge_size, error;
2924 struct megasas_sge *kern_sge;
2926 memset(sc->kbuff_arr, 0, sizeof(sc->kbuff_arr));
2927 kern_sge =(struct megasas_sge *) ((uintptr_t)cm->cm_frame + ioc->mfi_sgl_off);
2928 cm->cm_frame->header.sg_count = ioc->mfi_sge_count;
2930 if (sizeof(bus_addr_t) == 8) {
2931 cm->cm_frame->header.flags |= MFI_FRAME_SGL64;
2932 cm->cm_extra_frames = 2;
2933 sge_size = sizeof(struct mfi_sg64);
2935 cm->cm_extra_frames = (cm->cm_total_frame_size - 1) / MFI_FRAME_SIZE;
2936 sge_size = sizeof(struct mfi_sg32);
2939 cm->cm_total_frame_size += (sge_size * ioc->mfi_sge_count);
2940 for (i = 0; i < ioc->mfi_sge_count; i++) {
2941 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
2942 1, 0, /* algnmnt, boundary */
2943 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
2944 BUS_SPACE_MAXADDR, /* highaddr */
2945 NULL, NULL, /* filter, filterarg */
2946 ioc->mfi_sgl[i].iov_len,/* maxsize */
2948 ioc->mfi_sgl[i].iov_len,/* maxsegsize */
2949 BUS_DMA_ALLOCNOW, /* flags */
2950 NULL, NULL, /* lockfunc, lockarg */
2951 &sc->mfi_kbuff_arr_dmat[i])) {
2952 device_printf(sc->mfi_dev,
2953 "Cannot allocate mfi_kbuff_arr_dmat tag\n");
2957 if (bus_dmamem_alloc(sc->mfi_kbuff_arr_dmat[i],
2958 (void **)&sc->kbuff_arr[i], BUS_DMA_NOWAIT,
2959 &sc->mfi_kbuff_arr_dmamap[i])) {
2960 device_printf(sc->mfi_dev,
2961 "Cannot allocate mfi_kbuff_arr_dmamap memory\n");
2965 bus_dmamap_load(sc->mfi_kbuff_arr_dmat[i],
2966 sc->mfi_kbuff_arr_dmamap[i], sc->kbuff_arr[i],
2967 ioc->mfi_sgl[i].iov_len, mfi_addr_cb,
2968 &sc->mfi_kbuff_arr_busaddr[i], 0);
2970 if (!sc->kbuff_arr[i]) {
2971 device_printf(sc->mfi_dev,
2972 "Could not allocate memory for kbuff_arr info\n");
2975 kern_sge[i].phys_addr = sc->mfi_kbuff_arr_busaddr[i];
2976 kern_sge[i].length = ioc->mfi_sgl[i].iov_len;
2978 if (sizeof(bus_addr_t) == 8) {
2979 cm->cm_frame->stp.sgl.sg64[i].addr =
2980 kern_sge[i].phys_addr;
2981 cm->cm_frame->stp.sgl.sg64[i].len =
2982 ioc->mfi_sgl[i].iov_len;
2984 cm->cm_frame->stp.sgl.sg32[i].addr =
2985 kern_sge[i].phys_addr;
2986 cm->cm_frame->stp.sgl.sg32[i].len =
2987 ioc->mfi_sgl[i].iov_len;
2990 error = copyin(ioc->mfi_sgl[i].iov_base,
2992 ioc->mfi_sgl[i].iov_len);
2994 device_printf(sc->mfi_dev, "Copy in failed\n");
2999 cm->cm_flags |=MFI_CMD_MAPPED;
3004 mfi_user_command(struct mfi_softc *sc, struct mfi_ioc_passthru *ioc)
3006 struct mfi_command *cm;
3007 struct mfi_dcmd_frame *dcmd;
3008 void *ioc_buf = NULL;
3010 int error = 0, locked;
3013 if (ioc->buf_size > 0) {
3014 if (ioc->buf_size > 1024 * 1024)
3016 ioc_buf = malloc(ioc->buf_size, M_MFIBUF, M_WAITOK);
3017 error = copyin(ioc->buf, ioc_buf, ioc->buf_size);
3019 device_printf(sc->mfi_dev, "failed to copyin\n");
3020 free(ioc_buf, M_MFIBUF);
3025 locked = mfi_config_lock(sc, ioc->ioc_frame.opcode);
3027 mtx_lock(&sc->mfi_io_lock);
3028 while ((cm = mfi_dequeue_free(sc)) == NULL)
3029 msleep(mfi_user_command, &sc->mfi_io_lock, 0, "mfiioc", hz);
3031 /* Save context for later */
3032 context = cm->cm_frame->header.context;
3034 dcmd = &cm->cm_frame->dcmd;
3035 bcopy(&ioc->ioc_frame, dcmd, sizeof(struct mfi_dcmd_frame));
3037 cm->cm_sg = &dcmd->sgl;
3038 cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
3039 cm->cm_data = ioc_buf;
3040 cm->cm_len = ioc->buf_size;
3042 /* restore context */
3043 cm->cm_frame->header.context = context;
3045 /* Cheat since we don't know if we're writing or reading */
3046 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_DATAOUT;
3048 error = mfi_check_command_pre(sc, cm);
3052 error = mfi_wait_command(sc, cm);
3054 device_printf(sc->mfi_dev, "ioctl failed %d\n", error);
3057 bcopy(dcmd, &ioc->ioc_frame, sizeof(struct mfi_dcmd_frame));
3058 mfi_check_command_post(sc, cm);
3060 mfi_release_command(cm);
3061 mtx_unlock(&sc->mfi_io_lock);
3062 mfi_config_unlock(sc, locked);
3063 if (ioc->buf_size > 0)
3064 error = copyout(ioc_buf, ioc->buf, ioc->buf_size);
3066 free(ioc_buf, M_MFIBUF);
3070 #define PTRIN(p) ((void *)(uintptr_t)(p))
3073 mfi_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td)
3075 struct mfi_softc *sc;
3076 union mfi_statrequest *ms;
3077 struct mfi_ioc_packet *ioc;
3078 #ifdef COMPAT_FREEBSD32
3079 struct mfi_ioc_packet32 *ioc32;
3081 struct mfi_ioc_aen *aen;
3082 struct mfi_command *cm = NULL;
3083 uint32_t context = 0;
3084 union mfi_sense_ptr sense_ptr;
3085 uint8_t *data = NULL, *temp, *addr, skip_pre_post = 0;
3088 struct mfi_ioc_passthru *iop = (struct mfi_ioc_passthru *)arg;
3089 #ifdef COMPAT_FREEBSD32
3090 struct mfi_ioc_passthru32 *iop32 = (struct mfi_ioc_passthru32 *)arg;
3091 struct mfi_ioc_passthru iop_swab;
3101 if (sc->hw_crit_error)
3104 if (sc->issuepend_done == 0)
3109 ms = (union mfi_statrequest *)arg;
3110 switch (ms->ms_item) {
3115 bcopy(&sc->mfi_qstat[ms->ms_item], &ms->ms_qstat,
3116 sizeof(struct mfi_qstat));
3123 case MFIIO_QUERY_DISK:
3125 struct mfi_query_disk *qd;
3126 struct mfi_disk *ld;
3128 qd = (struct mfi_query_disk *)arg;
3129 mtx_lock(&sc->mfi_io_lock);
3130 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
3131 if (ld->ld_id == qd->array_id)
3136 mtx_unlock(&sc->mfi_io_lock);
3140 if (ld->ld_flags & MFI_DISK_FLAGS_OPEN)
3142 bzero(qd->devname, SPECNAMELEN + 1);
3143 snprintf(qd->devname, SPECNAMELEN, "mfid%d", ld->ld_unit);
3144 mtx_unlock(&sc->mfi_io_lock);
3148 #ifdef COMPAT_FREEBSD32
3152 devclass_t devclass;
3153 ioc = (struct mfi_ioc_packet *)arg;
3156 adapter = ioc->mfi_adapter_no;
3157 if (device_get_unit(sc->mfi_dev) == 0 && adapter != 0) {
3158 devclass = devclass_find("mfi");
3159 sc = devclass_get_softc(devclass, adapter);
3161 mtx_lock(&sc->mfi_io_lock);
3162 if ((cm = mfi_dequeue_free(sc)) == NULL) {
3163 mtx_unlock(&sc->mfi_io_lock);
3166 mtx_unlock(&sc->mfi_io_lock);
3170 * save off original context since copying from user
3171 * will clobber some data
3173 context = cm->cm_frame->header.context;
3174 cm->cm_frame->header.context = cm->cm_index;
3176 bcopy(ioc->mfi_frame.raw, cm->cm_frame,
3177 2 * MEGAMFI_FRAME_SIZE);
3178 cm->cm_total_frame_size = (sizeof(union mfi_sgl)
3179 * ioc->mfi_sge_count) + ioc->mfi_sgl_off;
3180 cm->cm_frame->header.scsi_status = 0;
3181 cm->cm_frame->header.pad0 = 0;
3182 if (ioc->mfi_sge_count) {
3184 (union mfi_sgl *)&cm->cm_frame->bytes[ioc->mfi_sgl_off];
3188 if (cm->cm_frame->header.flags & MFI_FRAME_DATAIN)
3189 cm->cm_flags |= MFI_CMD_DATAIN;
3190 if (cm->cm_frame->header.flags & MFI_FRAME_DATAOUT)
3191 cm->cm_flags |= MFI_CMD_DATAOUT;
3192 /* Legacy app shim */
3193 if (cm->cm_flags == 0)
3194 cm->cm_flags |= MFI_CMD_DATAIN | MFI_CMD_DATAOUT;
3195 cm->cm_len = cm->cm_frame->header.data_len;
3196 if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
3197 #ifdef COMPAT_FREEBSD32
3198 if (cmd == MFI_CMD) {
3201 cm->cm_stp_len = ioc->mfi_sgl[0].iov_len;
3202 #ifdef COMPAT_FREEBSD32
3204 /* 32bit on 64bit */
3205 ioc32 = (struct mfi_ioc_packet32 *)ioc;
3206 cm->cm_stp_len = ioc32->mfi_sgl[0].iov_len;
3209 cm->cm_len += cm->cm_stp_len;
3212 (cm->cm_flags & (MFI_CMD_DATAIN | MFI_CMD_DATAOUT))) {
3213 cm->cm_data = data = malloc(cm->cm_len, M_MFIBUF,
3215 if (cm->cm_data == NULL) {
3216 device_printf(sc->mfi_dev, "Malloc failed\n");
3223 /* restore header context */
3224 cm->cm_frame->header.context = context;
3226 if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
3227 res = mfi_stp_cmd(sc, cm, arg);
3232 if ((cm->cm_flags & MFI_CMD_DATAOUT) ||
3233 (cm->cm_frame->header.cmd == MFI_CMD_STP)) {
3234 for (i = 0; i < ioc->mfi_sge_count; i++) {
3235 #ifdef COMPAT_FREEBSD32
3236 if (cmd == MFI_CMD) {
3239 addr = ioc->mfi_sgl[i].iov_base;
3240 len = ioc->mfi_sgl[i].iov_len;
3241 #ifdef COMPAT_FREEBSD32
3243 /* 32bit on 64bit */
3244 ioc32 = (struct mfi_ioc_packet32 *)ioc;
3245 addr = PTRIN(ioc32->mfi_sgl[i].iov_base);
3246 len = ioc32->mfi_sgl[i].iov_len;
3249 error = copyin(addr, temp, len);
3251 device_printf(sc->mfi_dev,
3252 "Copy in failed\n");
3260 if (cm->cm_frame->header.cmd == MFI_CMD_DCMD)
3261 locked = mfi_config_lock(sc,
3262 cm->cm_frame->dcmd.opcode);
3264 if (cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) {
3265 cm->cm_frame->pass.sense_addr_lo =
3266 (uint32_t)cm->cm_sense_busaddr;
3267 cm->cm_frame->pass.sense_addr_hi =
3268 (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
3270 mtx_lock(&sc->mfi_io_lock);
3271 skip_pre_post = mfi_check_for_sscd (sc, cm);
3272 if (!skip_pre_post) {
3273 error = mfi_check_command_pre(sc, cm);
3275 mtx_unlock(&sc->mfi_io_lock);
3279 if ((error = mfi_wait_command(sc, cm)) != 0) {
3280 device_printf(sc->mfi_dev,
3281 "Controller polled failed\n");
3282 mtx_unlock(&sc->mfi_io_lock);
3285 if (!skip_pre_post) {
3286 mfi_check_command_post(sc, cm);
3288 mtx_unlock(&sc->mfi_io_lock);
3290 if (cm->cm_frame->header.cmd != MFI_CMD_STP) {
3292 if ((cm->cm_flags & MFI_CMD_DATAIN) ||
3293 (cm->cm_frame->header.cmd == MFI_CMD_STP)) {
3294 for (i = 0; i < ioc->mfi_sge_count; i++) {
3295 #ifdef COMPAT_FREEBSD32
3296 if (cmd == MFI_CMD) {
3299 addr = ioc->mfi_sgl[i].iov_base;
3300 len = ioc->mfi_sgl[i].iov_len;
3301 #ifdef COMPAT_FREEBSD32
3303 /* 32bit on 64bit */
3304 ioc32 = (struct mfi_ioc_packet32 *)ioc;
3305 addr = PTRIN(ioc32->mfi_sgl[i].iov_base);
3306 len = ioc32->mfi_sgl[i].iov_len;
3309 error = copyout(temp, addr, len);
3311 device_printf(sc->mfi_dev,
3312 "Copy out failed\n");
3320 if (ioc->mfi_sense_len) {
3321 /* get user-space sense ptr then copy out sense */
3322 bcopy(&ioc->mfi_frame.raw[ioc->mfi_sense_off],
3323 &sense_ptr.sense_ptr_data[0],
3324 sizeof(sense_ptr.sense_ptr_data));
3325 #ifdef COMPAT_FREEBSD32
3326 if (cmd != MFI_CMD) {
3328 * not 64bit native so zero out any address
3330 sense_ptr.addr.high = 0;
3333 error = copyout(cm->cm_sense, sense_ptr.user_space,
3334 ioc->mfi_sense_len);
3336 device_printf(sc->mfi_dev,
3337 "Copy out failed\n");
3342 ioc->mfi_frame.hdr.cmd_status = cm->cm_frame->header.cmd_status;
3344 mfi_config_unlock(sc, locked);
3346 free(data, M_MFIBUF);
3347 if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
3348 for (i = 0; i < 2; i++) {
3349 if (sc->kbuff_arr[i]) {
3350 if (sc->mfi_kbuff_arr_busaddr != 0)
3352 sc->mfi_kbuff_arr_dmat[i],
3353 sc->mfi_kbuff_arr_dmamap[i]
3355 if (sc->kbuff_arr[i] != NULL)
3357 sc->mfi_kbuff_arr_dmat[i],
3359 sc->mfi_kbuff_arr_dmamap[i]
3361 if (sc->mfi_kbuff_arr_dmat[i] != NULL)
3362 bus_dma_tag_destroy(
3363 sc->mfi_kbuff_arr_dmat[i]);
3368 mtx_lock(&sc->mfi_io_lock);
3369 mfi_release_command(cm);
3370 mtx_unlock(&sc->mfi_io_lock);
3376 aen = (struct mfi_ioc_aen *)arg;
3377 mtx_lock(&sc->mfi_io_lock);
3378 error = mfi_aen_register(sc, aen->aen_seq_num,
3379 aen->aen_class_locale);
3380 mtx_unlock(&sc->mfi_io_lock);
3383 case MFI_LINUX_CMD_2: /* Firmware Linux ioctl shim */
3385 devclass_t devclass;
3386 struct mfi_linux_ioc_packet l_ioc;
3389 devclass = devclass_find("mfi");
3390 if (devclass == NULL)
3393 error = copyin(arg, &l_ioc, sizeof(l_ioc));
3396 adapter = l_ioc.lioc_adapter_no;
3397 sc = devclass_get_softc(devclass, adapter);
3400 return (mfi_linux_ioctl_int(sc->mfi_cdev,
3401 cmd, arg, flag, td));
3404 case MFI_LINUX_SET_AEN_2: /* AEN Linux ioctl shim */
3406 devclass_t devclass;
3407 struct mfi_linux_ioc_aen l_aen;
3410 devclass = devclass_find("mfi");
3411 if (devclass == NULL)
3414 error = copyin(arg, &l_aen, sizeof(l_aen));
3417 adapter = l_aen.laen_adapter_no;
3418 sc = devclass_get_softc(devclass, adapter);
3421 return (mfi_linux_ioctl_int(sc->mfi_cdev,
3422 cmd, arg, flag, td));
3425 #ifdef COMPAT_FREEBSD32
3426 case MFIIO_PASSTHRU32:
3427 if (!SV_CURPROC_FLAG(SV_ILP32)) {
3431 iop_swab.ioc_frame = iop32->ioc_frame;
3432 iop_swab.buf_size = iop32->buf_size;
3433 iop_swab.buf = PTRIN(iop32->buf);
3437 case MFIIO_PASSTHRU:
3438 error = mfi_user_command(sc, iop);
3439 #ifdef COMPAT_FREEBSD32
3440 if (cmd == MFIIO_PASSTHRU32)
3441 iop32->ioc_frame = iop_swab.ioc_frame;
3445 device_printf(sc->mfi_dev, "IOCTL 0x%lx not handled\n", cmd);
3454 mfi_linux_ioctl_int(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td)
3456 struct mfi_softc *sc;
3457 struct mfi_linux_ioc_packet l_ioc;
3458 struct mfi_linux_ioc_aen l_aen;
3459 struct mfi_command *cm = NULL;
3460 struct mfi_aen *mfi_aen_entry;
3461 union mfi_sense_ptr sense_ptr;
3462 uint32_t context = 0;
3463 uint8_t *data = NULL, *temp;
3470 case MFI_LINUX_CMD_2: /* Firmware Linux ioctl shim */
3471 error = copyin(arg, &l_ioc, sizeof(l_ioc));
3475 if (l_ioc.lioc_sge_count > MAX_LINUX_IOCTL_SGE) {
3479 mtx_lock(&sc->mfi_io_lock);
3480 if ((cm = mfi_dequeue_free(sc)) == NULL) {
3481 mtx_unlock(&sc->mfi_io_lock);
3484 mtx_unlock(&sc->mfi_io_lock);
3488 * save off original context since copying from user
3489 * will clobber some data
3491 context = cm->cm_frame->header.context;
3493 bcopy(l_ioc.lioc_frame.raw, cm->cm_frame,
3494 2 * MFI_DCMD_FRAME_SIZE); /* this isn't quite right */
3495 cm->cm_total_frame_size = (sizeof(union mfi_sgl)
3496 * l_ioc.lioc_sge_count) + l_ioc.lioc_sgl_off;
3497 cm->cm_frame->header.scsi_status = 0;
3498 cm->cm_frame->header.pad0 = 0;
3499 if (l_ioc.lioc_sge_count)
3501 (union mfi_sgl *)&cm->cm_frame->bytes[l_ioc.lioc_sgl_off];
3503 if (cm->cm_frame->header.flags & MFI_FRAME_DATAIN)
3504 cm->cm_flags |= MFI_CMD_DATAIN;
3505 if (cm->cm_frame->header.flags & MFI_FRAME_DATAOUT)
3506 cm->cm_flags |= MFI_CMD_DATAOUT;
3507 cm->cm_len = cm->cm_frame->header.data_len;
3509 (cm->cm_flags & (MFI_CMD_DATAIN | MFI_CMD_DATAOUT))) {
3510 cm->cm_data = data = malloc(cm->cm_len, M_MFIBUF,
3512 if (cm->cm_data == NULL) {
3513 device_printf(sc->mfi_dev, "Malloc failed\n");
3520 /* restore header context */
3521 cm->cm_frame->header.context = context;
3524 if (cm->cm_flags & MFI_CMD_DATAOUT) {
3525 for (i = 0; i < l_ioc.lioc_sge_count; i++) {
3526 error = copyin(PTRIN(l_ioc.lioc_sgl[i].iov_base),
3528 l_ioc.lioc_sgl[i].iov_len);
3530 device_printf(sc->mfi_dev,
3531 "Copy in failed\n");
3534 temp = &temp[l_ioc.lioc_sgl[i].iov_len];
3538 if (cm->cm_frame->header.cmd == MFI_CMD_DCMD)
3539 locked = mfi_config_lock(sc, cm->cm_frame->dcmd.opcode);
3541 if (cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) {
3542 cm->cm_frame->pass.sense_addr_lo =
3543 (uint32_t)cm->cm_sense_busaddr;
3544 cm->cm_frame->pass.sense_addr_hi =
3545 (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
3548 mtx_lock(&sc->mfi_io_lock);
3549 error = mfi_check_command_pre(sc, cm);
3551 mtx_unlock(&sc->mfi_io_lock);
3555 if ((error = mfi_wait_command(sc, cm)) != 0) {
3556 device_printf(sc->mfi_dev,
3557 "Controller polled failed\n");
3558 mtx_unlock(&sc->mfi_io_lock);
3562 mfi_check_command_post(sc, cm);
3563 mtx_unlock(&sc->mfi_io_lock);
3566 if (cm->cm_flags & MFI_CMD_DATAIN) {
3567 for (i = 0; i < l_ioc.lioc_sge_count; i++) {
3568 error = copyout(temp,
3569 PTRIN(l_ioc.lioc_sgl[i].iov_base),
3570 l_ioc.lioc_sgl[i].iov_len);
3572 device_printf(sc->mfi_dev,
3573 "Copy out failed\n");
3576 temp = &temp[l_ioc.lioc_sgl[i].iov_len];
3580 if (l_ioc.lioc_sense_len) {
3581 /* get user-space sense ptr then copy out sense */
3582 bcopy(&((struct mfi_linux_ioc_packet*)arg)
3583 ->lioc_frame.raw[l_ioc.lioc_sense_off],
3584 &sense_ptr.sense_ptr_data[0],
3585 sizeof(sense_ptr.sense_ptr_data));
3588 * only 32bit Linux support so zero out any
3589 * address over 32bit
3591 sense_ptr.addr.high = 0;
3593 error = copyout(cm->cm_sense, sense_ptr.user_space,
3594 l_ioc.lioc_sense_len);
3596 device_printf(sc->mfi_dev,
3597 "Copy out failed\n");
3602 error = copyout(&cm->cm_frame->header.cmd_status,
3603 &((struct mfi_linux_ioc_packet*)arg)
3604 ->lioc_frame.hdr.cmd_status,
3607 device_printf(sc->mfi_dev,
3608 "Copy out failed\n");
3613 mfi_config_unlock(sc, locked);
3615 free(data, M_MFIBUF);
3617 mtx_lock(&sc->mfi_io_lock);
3618 mfi_release_command(cm);
3619 mtx_unlock(&sc->mfi_io_lock);
3623 case MFI_LINUX_SET_AEN_2: /* AEN Linux ioctl shim */
3624 error = copyin(arg, &l_aen, sizeof(l_aen));
3627 printf("AEN IMPLEMENTED for pid %d\n", curproc->p_pid);
3628 mfi_aen_entry = malloc(sizeof(struct mfi_aen), M_MFIBUF,
3630 mtx_lock(&sc->mfi_io_lock);
3631 if (mfi_aen_entry != NULL) {
3632 mfi_aen_entry->p = curproc;
3633 TAILQ_INSERT_TAIL(&sc->mfi_aen_pids, mfi_aen_entry,
3636 error = mfi_aen_register(sc, l_aen.laen_seq_num,
3637 l_aen.laen_class_locale);
3640 TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
3642 free(mfi_aen_entry, M_MFIBUF);
3644 mtx_unlock(&sc->mfi_io_lock);
3648 device_printf(sc->mfi_dev, "IOCTL 0x%lx not handled\n", cmd);
3657 mfi_poll(struct cdev *dev, int poll_events, struct thread *td)
3659 struct mfi_softc *sc;
3664 if (poll_events & (POLLIN | POLLRDNORM)) {
3665 if (sc->mfi_aen_triggered != 0) {
3666 revents |= poll_events & (POLLIN | POLLRDNORM);
3667 sc->mfi_aen_triggered = 0;
3669 if (sc->mfi_aen_triggered == 0 && sc->mfi_aen_cm == NULL) {
3675 if (poll_events & (POLLIN | POLLRDNORM)) {
3676 sc->mfi_poll_waiting = 1;
3677 selrecord(td, &sc->mfi_select);
3687 struct mfi_softc *sc;
3688 struct mfi_command *cm;
3694 dc = devclass_find("mfi");
3696 printf("No mfi dev class\n");
3700 for (i = 0; ; i++) {
3701 sc = devclass_get_softc(dc, i);
3704 device_printf(sc->mfi_dev, "Dumping\n\n");
3706 deadline = time_uptime - MFI_CMD_TIMEOUT;
3707 mtx_lock(&sc->mfi_io_lock);
3708 TAILQ_FOREACH(cm, &sc->mfi_busy, cm_link) {
3709 if (cm->cm_timestamp <= deadline) {
3710 device_printf(sc->mfi_dev,
3711 "COMMAND %p TIMEOUT AFTER %d SECONDS\n",
3712 cm, (int)(time_uptime - cm->cm_timestamp));
3723 mtx_unlock(&sc->mfi_io_lock);
3730 mfi_timeout(void *data)
3732 struct mfi_softc *sc = (struct mfi_softc *)data;
3733 struct mfi_command *cm, *tmp;
3737 deadline = time_uptime - MFI_CMD_TIMEOUT;
3738 if (sc->adpreset == 0) {
3739 if (!mfi_tbolt_reset(sc)) {
3740 callout_reset(&sc->mfi_watchdog_callout, MFI_CMD_TIMEOUT * hz, mfi_timeout, sc);
3744 mtx_lock(&sc->mfi_io_lock);
3745 TAILQ_FOREACH_SAFE(cm, &sc->mfi_busy, cm_link, tmp) {
3746 if (sc->mfi_aen_cm == cm || sc->mfi_map_sync_cm == cm)
3748 if (cm->cm_timestamp <= deadline) {
3749 if (sc->adpreset != 0 && sc->issuepend_done == 0) {
3750 cm->cm_timestamp = time_uptime;
3752 device_printf(sc->mfi_dev,
3753 "COMMAND %p TIMEOUT AFTER %d SECONDS\n",
3754 cm, (int)(time_uptime - cm->cm_timestamp)
3757 MFI_VALIDATE_CMD(sc, cm);
3759 * Fail the command instead of leaving it on
3760 * the queue where it could remain stuck forever
3762 mfi_remove_busy(cm);
3763 cm->cm_error = ETIMEDOUT;
3764 mfi_complete(sc, cm);
3775 mtx_unlock(&sc->mfi_io_lock);
3777 callout_reset(&sc->mfi_watchdog_callout, MFI_CMD_TIMEOUT * hz,