2 * Copyright (c) 2006 IronPort Systems
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * Copyright (c) 2007 LSI Corp.
28 * Copyright (c) 2007 Rajesh Prabhakaran.
29 * All rights reserved.
31 * Redistribution and use in source and binary forms, with or without
32 * modification, are permitted provided that the following conditions
34 * 1. Redistributions of source code must retain the above copyright
35 * notice, this list of conditions and the following disclaimer.
36 * 2. Redistributions in binary form must reproduce the above copyright
37 * notice, this list of conditions and the following disclaimer in the
38 * documentation and/or other materials provided with the distribution.
40 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
41 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
42 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
43 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
44 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
45 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
46 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
47 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
48 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
49 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
53 #include <sys/cdefs.h>
54 __FBSDID("$FreeBSD$");
56 #include "opt_compat.h"
59 #include <sys/param.h>
60 #include <sys/systm.h>
61 #include <sys/sysctl.h>
62 #include <sys/malloc.h>
63 #include <sys/kernel.h>
65 #include <sys/selinfo.h>
68 #include <sys/eventhandler.h>
70 #include <sys/bus_dma.h>
72 #include <sys/ioccom.h>
75 #include <sys/signalvar.h>
76 #include <sys/taskqueue.h>
78 #include <machine/bus.h>
79 #include <machine/resource.h>
81 #include <dev/mfi/mfireg.h>
82 #include <dev/mfi/mfi_ioctl.h>
83 #include <dev/mfi/mfivar.h>
84 #include <sys/interrupt.h>
85 #include <sys/priority.h>
87 static int mfi_alloc_commands(struct mfi_softc *);
88 static int mfi_comms_init(struct mfi_softc *);
89 static int mfi_get_controller_info(struct mfi_softc *);
90 static int mfi_get_log_state(struct mfi_softc *,
91 struct mfi_evt_log_state **);
92 static int mfi_parse_entries(struct mfi_softc *, int, int);
93 static void mfi_data_cb(void *, bus_dma_segment_t *, int, int);
94 static void mfi_startup(void *arg);
95 static void mfi_intr(void *arg);
96 static void mfi_ldprobe(struct mfi_softc *sc);
97 static void mfi_syspdprobe(struct mfi_softc *sc);
98 static void mfi_handle_evt(void *context, int pending);
99 static int mfi_aen_register(struct mfi_softc *sc, int seq, int locale);
100 static void mfi_aen_complete(struct mfi_command *);
101 static int mfi_add_ld(struct mfi_softc *sc, int);
102 static void mfi_add_ld_complete(struct mfi_command *);
103 static int mfi_add_sys_pd(struct mfi_softc *sc, int);
104 static void mfi_add_sys_pd_complete(struct mfi_command *);
105 static struct mfi_command * mfi_bio_command(struct mfi_softc *);
106 static void mfi_bio_complete(struct mfi_command *);
107 static struct mfi_command *mfi_build_ldio(struct mfi_softc *,struct bio*);
108 static struct mfi_command *mfi_build_syspdio(struct mfi_softc *,struct bio*);
109 static int mfi_send_frame(struct mfi_softc *, struct mfi_command *);
110 static int mfi_abort(struct mfi_softc *, struct mfi_command **);
111 static int mfi_linux_ioctl_int(struct cdev *, u_long, caddr_t, int, struct thread *);
112 static void mfi_timeout(void *);
113 static int mfi_user_command(struct mfi_softc *,
114 struct mfi_ioc_passthru *);
115 static void mfi_enable_intr_xscale(struct mfi_softc *sc);
116 static void mfi_enable_intr_ppc(struct mfi_softc *sc);
117 static int32_t mfi_read_fw_status_xscale(struct mfi_softc *sc);
118 static int32_t mfi_read_fw_status_ppc(struct mfi_softc *sc);
119 static int mfi_check_clear_intr_xscale(struct mfi_softc *sc);
120 static int mfi_check_clear_intr_ppc(struct mfi_softc *sc);
121 static void mfi_issue_cmd_xscale(struct mfi_softc *sc, bus_addr_t bus_add,
123 static void mfi_issue_cmd_ppc(struct mfi_softc *sc, bus_addr_t bus_add,
125 static int mfi_config_lock(struct mfi_softc *sc, uint32_t opcode);
126 static void mfi_config_unlock(struct mfi_softc *sc, int locked);
127 static int mfi_check_command_pre(struct mfi_softc *sc, struct mfi_command *cm);
128 static void mfi_check_command_post(struct mfi_softc *sc, struct mfi_command *cm);
129 static int mfi_check_for_sscd(struct mfi_softc *sc, struct mfi_command *cm);
131 SYSCTL_NODE(_hw, OID_AUTO, mfi, CTLFLAG_RD, 0, "MFI driver parameters");
132 static int mfi_event_locale = MFI_EVT_LOCALE_ALL;
133 TUNABLE_INT("hw.mfi.event_locale", &mfi_event_locale);
134 SYSCTL_INT(_hw_mfi, OID_AUTO, event_locale, CTLFLAG_RW, &mfi_event_locale,
135 0, "event message locale");
137 static int mfi_event_class = MFI_EVT_CLASS_INFO;
138 TUNABLE_INT("hw.mfi.event_class", &mfi_event_class);
139 SYSCTL_INT(_hw_mfi, OID_AUTO, event_class, CTLFLAG_RW, &mfi_event_class,
140 0, "event message class");
142 static int mfi_max_cmds = 128;
143 TUNABLE_INT("hw.mfi.max_cmds", &mfi_max_cmds);
144 SYSCTL_INT(_hw_mfi, OID_AUTO, max_cmds, CTLFLAG_RD, &mfi_max_cmds,
147 static int mfi_detect_jbod_change = 1;
148 TUNABLE_INT("hw.mfi.detect_jbod_change", &mfi_detect_jbod_change);
149 SYSCTL_INT(_hw_mfi, OID_AUTO, detect_jbod_change, CTLFLAG_RW,
150 &mfi_detect_jbod_change, 0, "Detect a change to a JBOD");
152 /* Management interface */
153 static d_open_t mfi_open;
154 static d_close_t mfi_close;
155 static d_ioctl_t mfi_ioctl;
156 static d_poll_t mfi_poll;
158 static struct cdevsw mfi_cdevsw = {
159 .d_version = D_VERSION,
162 .d_close = mfi_close,
163 .d_ioctl = mfi_ioctl,
168 MALLOC_DEFINE(M_MFIBUF, "mfibuf", "Buffers for the MFI driver");
170 #define MFI_INQ_LENGTH SHORT_INQUIRY_LENGTH
171 struct mfi_skinny_dma_info mfi_skinny;
174 mfi_enable_intr_xscale(struct mfi_softc *sc)
176 MFI_WRITE4(sc, MFI_OMSK, 0x01);
180 mfi_enable_intr_ppc(struct mfi_softc *sc)
182 if (sc->mfi_flags & MFI_FLAGS_1078) {
183 MFI_WRITE4(sc, MFI_ODCR0, 0xFFFFFFFF);
184 MFI_WRITE4(sc, MFI_OMSK, ~MFI_1078_EIM);
186 else if (sc->mfi_flags & MFI_FLAGS_GEN2) {
187 MFI_WRITE4(sc, MFI_ODCR0, 0xFFFFFFFF);
188 MFI_WRITE4(sc, MFI_OMSK, ~MFI_GEN2_EIM);
190 else if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
191 MFI_WRITE4(sc, MFI_OMSK, ~0x00000001);
196 mfi_read_fw_status_xscale(struct mfi_softc *sc)
198 return MFI_READ4(sc, MFI_OMSG0);
202 mfi_read_fw_status_ppc(struct mfi_softc *sc)
204 return MFI_READ4(sc, MFI_OSP0);
208 mfi_check_clear_intr_xscale(struct mfi_softc *sc)
212 status = MFI_READ4(sc, MFI_OSTS);
213 if ((status & MFI_OSTS_INTR_VALID) == 0)
216 MFI_WRITE4(sc, MFI_OSTS, status);
221 mfi_check_clear_intr_ppc(struct mfi_softc *sc)
225 status = MFI_READ4(sc, MFI_OSTS);
226 if (sc->mfi_flags & MFI_FLAGS_1078) {
227 if (!(status & MFI_1078_RM)) {
231 else if (sc->mfi_flags & MFI_FLAGS_GEN2) {
232 if (!(status & MFI_GEN2_RM)) {
236 else if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
237 if (!(status & MFI_SKINNY_RM)) {
241 if (sc->mfi_flags & MFI_FLAGS_SKINNY)
242 MFI_WRITE4(sc, MFI_OSTS, status);
244 MFI_WRITE4(sc, MFI_ODCR0, status);
249 mfi_issue_cmd_xscale(struct mfi_softc *sc, bus_addr_t bus_add, uint32_t frame_cnt)
251 MFI_WRITE4(sc, MFI_IQP,(bus_add >>3)|frame_cnt);
255 mfi_issue_cmd_ppc(struct mfi_softc *sc, bus_addr_t bus_add, uint32_t frame_cnt)
257 if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
258 MFI_WRITE4(sc, MFI_IQPL, (bus_add | frame_cnt <<1)|1 );
259 MFI_WRITE4(sc, MFI_IQPH, 0x00000000);
261 MFI_WRITE4(sc, MFI_IQP, (bus_add | frame_cnt <<1)|1 );
266 mfi_transition_firmware(struct mfi_softc *sc)
268 uint32_t fw_state, cur_state;
270 uint32_t cur_abs_reg_val = 0;
271 uint32_t prev_abs_reg_val = 0;
273 cur_abs_reg_val = sc->mfi_read_fw_status(sc);
274 fw_state = cur_abs_reg_val & MFI_FWSTATE_MASK;
275 while (fw_state != MFI_FWSTATE_READY) {
277 device_printf(sc->mfi_dev, "Waiting for firmware to "
279 cur_state = fw_state;
281 case MFI_FWSTATE_FAULT:
282 device_printf(sc->mfi_dev, "Firmware fault\n");
284 case MFI_FWSTATE_WAIT_HANDSHAKE:
285 if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT)
286 MFI_WRITE4(sc, MFI_SKINNY_IDB, MFI_FWINIT_CLEAR_HANDSHAKE);
288 MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_CLEAR_HANDSHAKE);
289 max_wait = MFI_RESET_WAIT_TIME;
291 case MFI_FWSTATE_OPERATIONAL:
292 if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT)
293 MFI_WRITE4(sc, MFI_SKINNY_IDB, 7);
295 MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_READY);
296 max_wait = MFI_RESET_WAIT_TIME;
298 case MFI_FWSTATE_UNDEFINED:
299 case MFI_FWSTATE_BB_INIT:
300 max_wait = MFI_RESET_WAIT_TIME;
302 case MFI_FWSTATE_FW_INIT_2:
303 max_wait = MFI_RESET_WAIT_TIME;
305 case MFI_FWSTATE_FW_INIT:
306 case MFI_FWSTATE_FLUSH_CACHE:
307 max_wait = MFI_RESET_WAIT_TIME;
309 case MFI_FWSTATE_DEVICE_SCAN:
310 max_wait = MFI_RESET_WAIT_TIME; /* wait for 180 seconds */
311 prev_abs_reg_val = cur_abs_reg_val;
313 case MFI_FWSTATE_BOOT_MESSAGE_PENDING:
314 if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT)
315 MFI_WRITE4(sc, MFI_SKINNY_IDB, MFI_FWINIT_HOTPLUG);
317 MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_HOTPLUG);
318 max_wait = MFI_RESET_WAIT_TIME;
321 device_printf(sc->mfi_dev, "Unknown firmware state %#x\n",
325 for (i = 0; i < (max_wait * 10); i++) {
326 cur_abs_reg_val = sc->mfi_read_fw_status(sc);
327 fw_state = cur_abs_reg_val & MFI_FWSTATE_MASK;
328 if (fw_state == cur_state)
333 if (fw_state == MFI_FWSTATE_DEVICE_SCAN) {
334 /* Check the device scanning progress */
335 if (prev_abs_reg_val != cur_abs_reg_val) {
339 if (fw_state == cur_state) {
340 device_printf(sc->mfi_dev, "Firmware stuck in state "
349 mfi_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
354 *addr = segs[0].ds_addr;
359 mfi_attach(struct mfi_softc *sc)
362 int error, commsz, framessz, sensesz;
363 int frames, unit, max_fw_sge;
364 uint32_t tb_mem_size = 0;
369 device_printf(sc->mfi_dev, "Megaraid SAS driver Ver %s \n",
372 mtx_init(&sc->mfi_io_lock, "MFI I/O lock", NULL, MTX_DEF);
373 sx_init(&sc->mfi_config_lock, "MFI config");
374 TAILQ_INIT(&sc->mfi_ld_tqh);
375 TAILQ_INIT(&sc->mfi_syspd_tqh);
376 TAILQ_INIT(&sc->mfi_ld_pend_tqh);
377 TAILQ_INIT(&sc->mfi_syspd_pend_tqh);
378 TAILQ_INIT(&sc->mfi_evt_queue);
379 TASK_INIT(&sc->mfi_evt_task, 0, mfi_handle_evt, sc);
380 TASK_INIT(&sc->mfi_map_sync_task, 0, mfi_handle_map_sync, sc);
381 TAILQ_INIT(&sc->mfi_aen_pids);
382 TAILQ_INIT(&sc->mfi_cam_ccbq);
390 sc->last_seq_num = 0;
391 sc->disableOnlineCtrlReset = 1;
392 sc->issuepend_done = 1;
393 sc->hw_crit_error = 0;
395 if (sc->mfi_flags & MFI_FLAGS_1064R) {
396 sc->mfi_enable_intr = mfi_enable_intr_xscale;
397 sc->mfi_read_fw_status = mfi_read_fw_status_xscale;
398 sc->mfi_check_clear_intr = mfi_check_clear_intr_xscale;
399 sc->mfi_issue_cmd = mfi_issue_cmd_xscale;
400 } else if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
401 sc->mfi_enable_intr = mfi_tbolt_enable_intr_ppc;
402 sc->mfi_disable_intr = mfi_tbolt_disable_intr_ppc;
403 sc->mfi_read_fw_status = mfi_tbolt_read_fw_status_ppc;
404 sc->mfi_check_clear_intr = mfi_tbolt_check_clear_intr_ppc;
405 sc->mfi_issue_cmd = mfi_tbolt_issue_cmd_ppc;
406 sc->mfi_adp_reset = mfi_tbolt_adp_reset;
408 TAILQ_INIT(&sc->mfi_cmd_tbolt_tqh);
410 sc->mfi_enable_intr = mfi_enable_intr_ppc;
411 sc->mfi_read_fw_status = mfi_read_fw_status_ppc;
412 sc->mfi_check_clear_intr = mfi_check_clear_intr_ppc;
413 sc->mfi_issue_cmd = mfi_issue_cmd_ppc;
417 /* Before we get too far, see if the firmware is working */
418 if ((error = mfi_transition_firmware(sc)) != 0) {
419 device_printf(sc->mfi_dev, "Firmware not in READY state, "
420 "error %d\n", error);
424 /* Start: LSIP200113393 */
425 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
426 1, 0, /* algnmnt, boundary */
427 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
428 BUS_SPACE_MAXADDR, /* highaddr */
429 NULL, NULL, /* filter, filterarg */
430 MEGASAS_MAX_NAME*sizeof(bus_addr_t), /* maxsize */
432 MEGASAS_MAX_NAME*sizeof(bus_addr_t), /* maxsegsize */
434 NULL, NULL, /* lockfunc, lockarg */
435 &sc->verbuf_h_dmat)) {
436 device_printf(sc->mfi_dev, "Cannot allocate verbuf_h_dmat DMA tag\n");
439 if (bus_dmamem_alloc(sc->verbuf_h_dmat, (void **)&sc->verbuf,
440 BUS_DMA_NOWAIT, &sc->verbuf_h_dmamap)) {
441 device_printf(sc->mfi_dev, "Cannot allocate verbuf_h_dmamap memory\n");
444 bzero(sc->verbuf, MEGASAS_MAX_NAME*sizeof(bus_addr_t));
445 bus_dmamap_load(sc->verbuf_h_dmat, sc->verbuf_h_dmamap,
446 sc->verbuf, MEGASAS_MAX_NAME*sizeof(bus_addr_t),
447 mfi_addr_cb, &sc->verbuf_h_busaddr, 0);
448 /* End: LSIP200113393 */
451 * Get information needed for sizing the contiguous memory for the
452 * frame pool. Size down the sgl parameter since we know that
453 * we will never need more than what's required for MAXPHYS.
454 * It would be nice if these constants were available at runtime
455 * instead of compile time.
457 status = sc->mfi_read_fw_status(sc);
458 sc->mfi_max_fw_cmds = status & MFI_FWSTATE_MAXCMD_MASK;
459 max_fw_sge = (status & MFI_FWSTATE_MAXSGL_MASK) >> 16;
460 sc->mfi_max_sge = min(max_fw_sge, ((MFI_MAXPHYS / PAGE_SIZE) + 1));
462 /* ThunderBolt Support get the contiguous memory */
464 if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
465 mfi_tbolt_init_globals(sc);
466 device_printf(sc->mfi_dev, "MaxCmd = %x MaxSgl = %x state = %x \n",
467 sc->mfi_max_fw_cmds, sc->mfi_max_sge, status);
468 tb_mem_size = mfi_tbolt_get_memory_requirement(sc);
470 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
471 1, 0, /* algnmnt, boundary */
472 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
473 BUS_SPACE_MAXADDR, /* highaddr */
474 NULL, NULL, /* filter, filterarg */
475 tb_mem_size, /* maxsize */
477 tb_mem_size, /* maxsegsize */
479 NULL, NULL, /* lockfunc, lockarg */
481 device_printf(sc->mfi_dev, "Cannot allocate comms DMA tag\n");
484 if (bus_dmamem_alloc(sc->mfi_tb_dmat, (void **)&sc->request_message_pool,
485 BUS_DMA_NOWAIT, &sc->mfi_tb_dmamap)) {
486 device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
489 bzero(sc->request_message_pool, tb_mem_size);
490 bus_dmamap_load(sc->mfi_tb_dmat, sc->mfi_tb_dmamap,
491 sc->request_message_pool, tb_mem_size, mfi_addr_cb, &sc->mfi_tb_busaddr, 0);
493 /* For ThunderBolt memory init */
494 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
495 0x100, 0, /* alignmnt, boundary */
496 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
497 BUS_SPACE_MAXADDR, /* highaddr */
498 NULL, NULL, /* filter, filterarg */
499 MFI_FRAME_SIZE, /* maxsize */
501 MFI_FRAME_SIZE, /* maxsegsize */
503 NULL, NULL, /* lockfunc, lockarg */
504 &sc->mfi_tb_init_dmat)) {
505 device_printf(sc->mfi_dev, "Cannot allocate init DMA tag\n");
508 if (bus_dmamem_alloc(sc->mfi_tb_init_dmat, (void **)&sc->mfi_tb_init,
509 BUS_DMA_NOWAIT, &sc->mfi_tb_init_dmamap)) {
510 device_printf(sc->mfi_dev, "Cannot allocate init memory\n");
513 bzero(sc->mfi_tb_init, MFI_FRAME_SIZE);
514 bus_dmamap_load(sc->mfi_tb_init_dmat, sc->mfi_tb_init_dmamap,
515 sc->mfi_tb_init, MFI_FRAME_SIZE, mfi_addr_cb,
516 &sc->mfi_tb_init_busaddr, 0);
517 if (mfi_tbolt_init_desc_pool(sc, sc->request_message_pool,
519 device_printf(sc->mfi_dev,
520 "Thunderbolt pool preparation error\n");
525 Allocate DMA memory mapping for MPI2 IOC Init descriptor,
526 we are taking it diffrent from what we have allocated for Request
527 and reply descriptors to avoid confusion later
529 tb_mem_size = sizeof(struct MPI2_IOC_INIT_REQUEST);
530 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
531 1, 0, /* algnmnt, boundary */
532 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
533 BUS_SPACE_MAXADDR, /* highaddr */
534 NULL, NULL, /* filter, filterarg */
535 tb_mem_size, /* maxsize */
537 tb_mem_size, /* maxsegsize */
539 NULL, NULL, /* lockfunc, lockarg */
540 &sc->mfi_tb_ioc_init_dmat)) {
541 device_printf(sc->mfi_dev,
542 "Cannot allocate comms DMA tag\n");
545 if (bus_dmamem_alloc(sc->mfi_tb_ioc_init_dmat,
546 (void **)&sc->mfi_tb_ioc_init_desc,
547 BUS_DMA_NOWAIT, &sc->mfi_tb_ioc_init_dmamap)) {
548 device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
551 bzero(sc->mfi_tb_ioc_init_desc, tb_mem_size);
552 bus_dmamap_load(sc->mfi_tb_ioc_init_dmat, sc->mfi_tb_ioc_init_dmamap,
553 sc->mfi_tb_ioc_init_desc, tb_mem_size, mfi_addr_cb,
554 &sc->mfi_tb_ioc_init_busaddr, 0);
557 * Create the dma tag for data buffers. Used both for block I/O
558 * and for various internal data queries.
560 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
561 1, 0, /* algnmnt, boundary */
562 BUS_SPACE_MAXADDR, /* lowaddr */
563 BUS_SPACE_MAXADDR, /* highaddr */
564 NULL, NULL, /* filter, filterarg */
565 BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
566 sc->mfi_max_sge, /* nsegments */
567 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
568 BUS_DMA_ALLOCNOW, /* flags */
569 busdma_lock_mutex, /* lockfunc */
570 &sc->mfi_io_lock, /* lockfuncarg */
571 &sc->mfi_buffer_dmat)) {
572 device_printf(sc->mfi_dev, "Cannot allocate buffer DMA tag\n");
577 * Allocate DMA memory for the comms queues. Keep it under 4GB for
578 * efficiency. The mfi_hwcomms struct includes space for 1 reply queue
579 * entry, so the calculated size here will be will be 1 more than
580 * mfi_max_fw_cmds. This is apparently a requirement of the hardware.
582 commsz = (sizeof(uint32_t) * sc->mfi_max_fw_cmds) +
583 sizeof(struct mfi_hwcomms);
584 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
585 1, 0, /* algnmnt, boundary */
586 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
587 BUS_SPACE_MAXADDR, /* highaddr */
588 NULL, NULL, /* filter, filterarg */
589 commsz, /* maxsize */
591 commsz, /* maxsegsize */
593 NULL, NULL, /* lockfunc, lockarg */
594 &sc->mfi_comms_dmat)) {
595 device_printf(sc->mfi_dev, "Cannot allocate comms DMA tag\n");
598 if (bus_dmamem_alloc(sc->mfi_comms_dmat, (void **)&sc->mfi_comms,
599 BUS_DMA_NOWAIT, &sc->mfi_comms_dmamap)) {
600 device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
603 bzero(sc->mfi_comms, commsz);
604 bus_dmamap_load(sc->mfi_comms_dmat, sc->mfi_comms_dmamap,
605 sc->mfi_comms, commsz, mfi_addr_cb, &sc->mfi_comms_busaddr, 0);
607 * Allocate DMA memory for the command frames. Keep them in the
608 * lower 4GB for efficiency. Calculate the size of the commands at
609 * the same time; each command is one 64 byte frame plus a set of
610 * additional frames for holding sg lists or other data.
611 * The assumption here is that the SG list will start at the second
612 * frame and not use the unused bytes in the first frame. While this
613 * isn't technically correct, it simplifies the calculation and allows
614 * for command frames that might be larger than an mfi_io_frame.
616 if (sizeof(bus_addr_t) == 8) {
617 sc->mfi_sge_size = sizeof(struct mfi_sg64);
618 sc->mfi_flags |= MFI_FLAGS_SG64;
620 sc->mfi_sge_size = sizeof(struct mfi_sg32);
622 if (sc->mfi_flags & MFI_FLAGS_SKINNY)
623 sc->mfi_sge_size = sizeof(struct mfi_sg_skinny);
624 frames = (sc->mfi_sge_size * sc->mfi_max_sge - 1) / MFI_FRAME_SIZE + 2;
625 sc->mfi_cmd_size = frames * MFI_FRAME_SIZE;
626 framessz = sc->mfi_cmd_size * sc->mfi_max_fw_cmds;
627 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
628 64, 0, /* algnmnt, boundary */
629 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
630 BUS_SPACE_MAXADDR, /* highaddr */
631 NULL, NULL, /* filter, filterarg */
632 framessz, /* maxsize */
634 framessz, /* maxsegsize */
636 NULL, NULL, /* lockfunc, lockarg */
637 &sc->mfi_frames_dmat)) {
638 device_printf(sc->mfi_dev, "Cannot allocate frame DMA tag\n");
641 if (bus_dmamem_alloc(sc->mfi_frames_dmat, (void **)&sc->mfi_frames,
642 BUS_DMA_NOWAIT, &sc->mfi_frames_dmamap)) {
643 device_printf(sc->mfi_dev, "Cannot allocate frames memory\n");
646 bzero(sc->mfi_frames, framessz);
647 bus_dmamap_load(sc->mfi_frames_dmat, sc->mfi_frames_dmamap,
648 sc->mfi_frames, framessz, mfi_addr_cb, &sc->mfi_frames_busaddr,0);
650 * Allocate DMA memory for the frame sense data. Keep them in the
651 * lower 4GB for efficiency
653 sensesz = sc->mfi_max_fw_cmds * MFI_SENSE_LEN;
654 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
655 4, 0, /* algnmnt, boundary */
656 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
657 BUS_SPACE_MAXADDR, /* highaddr */
658 NULL, NULL, /* filter, filterarg */
659 sensesz, /* maxsize */
661 sensesz, /* maxsegsize */
663 NULL, NULL, /* lockfunc, lockarg */
664 &sc->mfi_sense_dmat)) {
665 device_printf(sc->mfi_dev, "Cannot allocate sense DMA tag\n");
668 if (bus_dmamem_alloc(sc->mfi_sense_dmat, (void **)&sc->mfi_sense,
669 BUS_DMA_NOWAIT, &sc->mfi_sense_dmamap)) {
670 device_printf(sc->mfi_dev, "Cannot allocate sense memory\n");
673 bus_dmamap_load(sc->mfi_sense_dmat, sc->mfi_sense_dmamap,
674 sc->mfi_sense, sensesz, mfi_addr_cb, &sc->mfi_sense_busaddr, 0);
675 if ((error = mfi_alloc_commands(sc)) != 0)
678 /* Before moving the FW to operational state, check whether
679 * hostmemory is required by the FW or not
682 /* ThunderBolt MFI_IOC2 INIT */
683 if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
684 sc->mfi_disable_intr(sc);
685 if ((error = mfi_tbolt_init_MFI_queue(sc)) != 0) {
686 device_printf(sc->mfi_dev,
687 "TB Init has failed with error %d\n",error);
691 if ((error = mfi_tbolt_alloc_cmd(sc)) != 0)
693 if (bus_setup_intr(sc->mfi_dev, sc->mfi_irq,
694 INTR_MPSAFE|INTR_TYPE_BIO, NULL, mfi_intr_tbolt, sc,
696 device_printf(sc->mfi_dev, "Cannot set up interrupt\n");
699 sc->mfi_intr_ptr = mfi_intr_tbolt;
700 sc->mfi_enable_intr(sc);
702 if ((error = mfi_comms_init(sc)) != 0)
705 if (bus_setup_intr(sc->mfi_dev, sc->mfi_irq,
706 INTR_MPSAFE|INTR_TYPE_BIO, NULL, mfi_intr, sc, &sc->mfi_intr)) {
707 device_printf(sc->mfi_dev, "Cannot set up interrupt\n");
710 sc->mfi_intr_ptr = mfi_intr;
711 sc->mfi_enable_intr(sc);
713 if ((error = mfi_get_controller_info(sc)) != 0)
715 sc->disableOnlineCtrlReset = 0;
717 /* Register a config hook to probe the bus for arrays */
718 sc->mfi_ich.ich_func = mfi_startup;
719 sc->mfi_ich.ich_arg = sc;
720 if (config_intrhook_establish(&sc->mfi_ich) != 0) {
721 device_printf(sc->mfi_dev, "Cannot establish configuration "
725 if ((error = mfi_aen_setup(sc, 0), 0) != 0) {
726 mtx_unlock(&sc->mfi_io_lock);
731 * Register a shutdown handler.
733 if ((sc->mfi_eh = EVENTHANDLER_REGISTER(shutdown_final, mfi_shutdown,
734 sc, SHUTDOWN_PRI_DEFAULT)) == NULL) {
735 device_printf(sc->mfi_dev, "Warning: shutdown event "
736 "registration failed\n");
740 * Create the control device for doing management
742 unit = device_get_unit(sc->mfi_dev);
743 sc->mfi_cdev = make_dev(&mfi_cdevsw, unit, UID_ROOT, GID_OPERATOR,
744 0640, "mfi%d", unit);
746 make_dev_alias(sc->mfi_cdev, "megaraid_sas_ioctl_node");
747 if (sc->mfi_cdev != NULL)
748 sc->mfi_cdev->si_drv1 = sc;
749 SYSCTL_ADD_INT(device_get_sysctl_ctx(sc->mfi_dev),
750 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->mfi_dev)),
751 OID_AUTO, "delete_busy_volumes", CTLFLAG_RW,
752 &sc->mfi_delete_busy_volumes, 0, "Allow removal of busy volumes");
753 SYSCTL_ADD_INT(device_get_sysctl_ctx(sc->mfi_dev),
754 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->mfi_dev)),
755 OID_AUTO, "keep_deleted_volumes", CTLFLAG_RW,
756 &sc->mfi_keep_deleted_volumes, 0,
757 "Don't detach the mfid device for a busy volume that is deleted");
759 device_add_child(sc->mfi_dev, "mfip", -1);
760 bus_generic_attach(sc->mfi_dev);
762 /* Start the timeout watchdog */
763 callout_init(&sc->mfi_watchdog_callout, CALLOUT_MPSAFE);
764 callout_reset(&sc->mfi_watchdog_callout, MFI_CMD_TIMEOUT * hz,
767 if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
768 mfi_tbolt_sync_map_info(sc);
775 mfi_alloc_commands(struct mfi_softc *sc)
777 struct mfi_command *cm;
781 * XXX Should we allocate all the commands up front, or allocate on
782 * demand later like 'aac' does?
784 ncmds = MIN(mfi_max_cmds, sc->mfi_max_fw_cmds);
786 device_printf(sc->mfi_dev, "Max fw cmds= %d, sizing driver "
787 "pool to %d\n", sc->mfi_max_fw_cmds, ncmds);
789 sc->mfi_commands = malloc(sizeof(struct mfi_command) * ncmds, M_MFIBUF,
792 for (i = 0; i < ncmds; i++) {
793 cm = &sc->mfi_commands[i];
794 cm->cm_frame = (union mfi_frame *)((uintptr_t)sc->mfi_frames +
795 sc->mfi_cmd_size * i);
796 cm->cm_frame_busaddr = sc->mfi_frames_busaddr +
797 sc->mfi_cmd_size * i;
798 cm->cm_frame->header.context = i;
799 cm->cm_sense = &sc->mfi_sense[i];
800 cm->cm_sense_busaddr= sc->mfi_sense_busaddr + MFI_SENSE_LEN * i;
803 if (bus_dmamap_create(sc->mfi_buffer_dmat, 0,
804 &cm->cm_dmamap) == 0) {
805 mtx_lock(&sc->mfi_io_lock);
806 mfi_release_command(cm);
807 mtx_unlock(&sc->mfi_io_lock);
811 sc->mfi_total_cmds++;
818 mfi_release_command(struct mfi_command *cm)
820 struct mfi_frame_header *hdr;
823 mtx_assert(&cm->cm_sc->mfi_io_lock, MA_OWNED);
826 * Zero out the important fields of the frame, but make sure the
827 * context field is preserved. For efficiency, handle the fields
828 * as 32 bit words. Clear out the first S/G entry too for safety.
830 hdr = &cm->cm_frame->header;
831 if (cm->cm_data != NULL && hdr->sg_count) {
832 cm->cm_sg->sg32[0].len = 0;
833 cm->cm_sg->sg32[0].addr = 0;
836 hdr_data = (uint32_t *)cm->cm_frame;
837 hdr_data[0] = 0; /* cmd, sense_len, cmd_status, scsi_status */
838 hdr_data[1] = 0; /* target_id, lun_id, cdb_len, sg_count */
839 hdr_data[4] = 0; /* flags, timeout */
840 hdr_data[5] = 0; /* data_len */
842 cm->cm_extra_frames = 0;
844 cm->cm_complete = NULL;
845 cm->cm_private = NULL;
848 cm->cm_total_frame_size = 0;
849 cm->retry_for_fw_reset = 0;
851 mfi_enqueue_free(cm);
855 mfi_dcmd_command(struct mfi_softc *sc, struct mfi_command **cmp,
856 uint32_t opcode, void **bufp, size_t bufsize)
858 struct mfi_command *cm;
859 struct mfi_dcmd_frame *dcmd;
861 uint32_t context = 0;
863 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
865 cm = mfi_dequeue_free(sc);
869 /* Zero out the MFI frame */
870 context = cm->cm_frame->header.context;
871 bzero(cm->cm_frame, sizeof(union mfi_frame));
872 cm->cm_frame->header.context = context;
874 if ((bufsize > 0) && (bufp != NULL)) {
876 buf = malloc(bufsize, M_MFIBUF, M_NOWAIT|M_ZERO);
878 mfi_release_command(cm);
887 dcmd = &cm->cm_frame->dcmd;
888 bzero(dcmd->mbox, MFI_MBOX_SIZE);
889 dcmd->header.cmd = MFI_CMD_DCMD;
890 dcmd->header.timeout = 0;
891 dcmd->header.flags = 0;
892 dcmd->header.data_len = bufsize;
893 dcmd->header.scsi_status = 0;
894 dcmd->opcode = opcode;
895 cm->cm_sg = &dcmd->sgl;
896 cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
899 cm->cm_private = buf;
900 cm->cm_len = bufsize;
903 if ((bufp != NULL) && (*bufp == NULL) && (buf != NULL))
909 mfi_comms_init(struct mfi_softc *sc)
911 struct mfi_command *cm;
912 struct mfi_init_frame *init;
913 struct mfi_init_qinfo *qinfo;
915 uint32_t context = 0;
917 mtx_lock(&sc->mfi_io_lock);
918 if ((cm = mfi_dequeue_free(sc)) == NULL)
921 /* Zero out the MFI frame */
922 context = cm->cm_frame->header.context;
923 bzero(cm->cm_frame, sizeof(union mfi_frame));
924 cm->cm_frame->header.context = context;
927 * Abuse the SG list area of the frame to hold the init_qinfo
930 init = &cm->cm_frame->init;
931 qinfo = (struct mfi_init_qinfo *)((uintptr_t)init + MFI_FRAME_SIZE);
933 bzero(qinfo, sizeof(struct mfi_init_qinfo));
934 qinfo->rq_entries = sc->mfi_max_fw_cmds + 1;
935 qinfo->rq_addr_lo = sc->mfi_comms_busaddr +
936 offsetof(struct mfi_hwcomms, hw_reply_q);
937 qinfo->pi_addr_lo = sc->mfi_comms_busaddr +
938 offsetof(struct mfi_hwcomms, hw_pi);
939 qinfo->ci_addr_lo = sc->mfi_comms_busaddr +
940 offsetof(struct mfi_hwcomms, hw_ci);
942 init->header.cmd = MFI_CMD_INIT;
943 init->header.data_len = sizeof(struct mfi_init_qinfo);
944 init->qinfo_new_addr_lo = cm->cm_frame_busaddr + MFI_FRAME_SIZE;
946 cm->cm_flags = MFI_CMD_POLLED;
948 if ((error = mfi_mapcmd(sc, cm)) != 0) {
949 device_printf(sc->mfi_dev, "failed to send init command\n");
950 mtx_unlock(&sc->mfi_io_lock);
953 mfi_release_command(cm);
954 mtx_unlock(&sc->mfi_io_lock);
960 mfi_get_controller_info(struct mfi_softc *sc)
962 struct mfi_command *cm = NULL;
963 struct mfi_ctrl_info *ci = NULL;
964 uint32_t max_sectors_1, max_sectors_2;
967 mtx_lock(&sc->mfi_io_lock);
968 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_GETINFO,
969 (void **)&ci, sizeof(*ci));
972 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
974 if ((error = mfi_mapcmd(sc, cm)) != 0) {
975 device_printf(sc->mfi_dev, "Failed to get controller info\n");
976 sc->mfi_max_io = (sc->mfi_max_sge - 1) * PAGE_SIZE /
982 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
983 BUS_DMASYNC_POSTREAD);
984 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
986 max_sectors_1 = (1 << ci->stripe_sz_ops.max) * ci->max_strips_per_io;
987 max_sectors_2 = ci->max_request_size;
988 sc->mfi_max_io = min(max_sectors_1, max_sectors_2);
989 sc->disableOnlineCtrlReset =
990 ci->properties.OnOffProperties.disableOnlineCtrlReset;
996 mfi_release_command(cm);
997 mtx_unlock(&sc->mfi_io_lock);
1002 mfi_get_log_state(struct mfi_softc *sc, struct mfi_evt_log_state **log_state)
1004 struct mfi_command *cm = NULL;
1007 mtx_lock(&sc->mfi_io_lock);
1008 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_EVENT_GETINFO,
1009 (void **)log_state, sizeof(**log_state));
1012 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1014 if ((error = mfi_mapcmd(sc, cm)) != 0) {
1015 device_printf(sc->mfi_dev, "Failed to get log state\n");
1019 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1020 BUS_DMASYNC_POSTREAD);
1021 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1025 mfi_release_command(cm);
1026 mtx_unlock(&sc->mfi_io_lock);
1032 mfi_aen_setup(struct mfi_softc *sc, uint32_t seq_start)
1034 struct mfi_evt_log_state *log_state = NULL;
1035 union mfi_evt class_locale;
1039 class_locale.members.reserved = 0;
1040 class_locale.members.locale = mfi_event_locale;
1041 class_locale.members.evt_class = mfi_event_class;
1043 if (seq_start == 0) {
1044 error = mfi_get_log_state(sc, &log_state);
1045 sc->mfi_boot_seq_num = log_state->boot_seq_num;
1048 free(log_state, M_MFIBUF);
1053 * Walk through any events that fired since the last
1056 mfi_parse_entries(sc, log_state->shutdown_seq_num,
1057 log_state->newest_seq_num);
1058 seq = log_state->newest_seq_num;
1061 mfi_aen_register(sc, seq, class_locale.word);
1062 free(log_state, M_MFIBUF);
1068 mfi_wait_command(struct mfi_softc *sc, struct mfi_command *cm)
1071 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1072 cm->cm_complete = NULL;
1076 * MegaCli can issue a DCMD of 0. In this case do nothing
1077 * and return 0 to it as status
1079 if (cm->cm_frame->dcmd.opcode == 0) {
1080 cm->cm_frame->header.cmd_status = MFI_STAT_OK;
1082 return (cm->cm_error);
1084 mfi_enqueue_ready(cm);
1086 if ((cm->cm_flags & MFI_CMD_COMPLETED) == 0)
1087 msleep(cm, &sc->mfi_io_lock, PRIBIO, "mfiwait", 0);
1088 return (cm->cm_error);
1092 mfi_free(struct mfi_softc *sc)
1094 struct mfi_command *cm;
1097 callout_drain(&sc->mfi_watchdog_callout);
1099 if (sc->mfi_cdev != NULL)
1100 destroy_dev(sc->mfi_cdev);
1102 if (sc->mfi_total_cmds != 0) {
1103 for (i = 0; i < sc->mfi_total_cmds; i++) {
1104 cm = &sc->mfi_commands[i];
1105 bus_dmamap_destroy(sc->mfi_buffer_dmat, cm->cm_dmamap);
1107 free(sc->mfi_commands, M_MFIBUF);
1111 bus_teardown_intr(sc->mfi_dev, sc->mfi_irq, sc->mfi_intr);
1112 if (sc->mfi_irq != NULL)
1113 bus_release_resource(sc->mfi_dev, SYS_RES_IRQ, sc->mfi_irq_rid,
1116 if (sc->mfi_sense_busaddr != 0)
1117 bus_dmamap_unload(sc->mfi_sense_dmat, sc->mfi_sense_dmamap);
1118 if (sc->mfi_sense != NULL)
1119 bus_dmamem_free(sc->mfi_sense_dmat, sc->mfi_sense,
1120 sc->mfi_sense_dmamap);
1121 if (sc->mfi_sense_dmat != NULL)
1122 bus_dma_tag_destroy(sc->mfi_sense_dmat);
1124 if (sc->mfi_frames_busaddr != 0)
1125 bus_dmamap_unload(sc->mfi_frames_dmat, sc->mfi_frames_dmamap);
1126 if (sc->mfi_frames != NULL)
1127 bus_dmamem_free(sc->mfi_frames_dmat, sc->mfi_frames,
1128 sc->mfi_frames_dmamap);
1129 if (sc->mfi_frames_dmat != NULL)
1130 bus_dma_tag_destroy(sc->mfi_frames_dmat);
1132 if (sc->mfi_comms_busaddr != 0)
1133 bus_dmamap_unload(sc->mfi_comms_dmat, sc->mfi_comms_dmamap);
1134 if (sc->mfi_comms != NULL)
1135 bus_dmamem_free(sc->mfi_comms_dmat, sc->mfi_comms,
1136 sc->mfi_comms_dmamap);
1137 if (sc->mfi_comms_dmat != NULL)
1138 bus_dma_tag_destroy(sc->mfi_comms_dmat);
1140 /* ThunderBolt contiguous memory free here */
1141 if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
1142 if (sc->mfi_tb_busaddr != 0)
1143 bus_dmamap_unload(sc->mfi_tb_dmat, sc->mfi_tb_dmamap);
1144 if (sc->request_message_pool != NULL)
1145 bus_dmamem_free(sc->mfi_tb_dmat, sc->request_message_pool,
1147 if (sc->mfi_tb_dmat != NULL)
1148 bus_dma_tag_destroy(sc->mfi_tb_dmat);
1150 /* Version buffer memory free */
1151 /* Start LSIP200113393 */
1152 if (sc->verbuf_h_busaddr != 0)
1153 bus_dmamap_unload(sc->verbuf_h_dmat, sc->verbuf_h_dmamap);
1154 if (sc->verbuf != NULL)
1155 bus_dmamem_free(sc->verbuf_h_dmat, sc->verbuf,
1156 sc->verbuf_h_dmamap);
1157 if (sc->verbuf_h_dmat != NULL)
1158 bus_dma_tag_destroy(sc->verbuf_h_dmat);
1160 /* End LSIP200113393 */
1161 /* ThunderBolt INIT packet memory Free */
1162 if (sc->mfi_tb_init_busaddr != 0)
1163 bus_dmamap_unload(sc->mfi_tb_init_dmat, sc->mfi_tb_init_dmamap);
1164 if (sc->mfi_tb_init != NULL)
1165 bus_dmamem_free(sc->mfi_tb_init_dmat, sc->mfi_tb_init,
1166 sc->mfi_tb_init_dmamap);
1167 if (sc->mfi_tb_init_dmat != NULL)
1168 bus_dma_tag_destroy(sc->mfi_tb_init_dmat);
1170 /* ThunderBolt IOC Init Desc memory free here */
1171 if (sc->mfi_tb_ioc_init_busaddr != 0)
1172 bus_dmamap_unload(sc->mfi_tb_ioc_init_dmat,
1173 sc->mfi_tb_ioc_init_dmamap);
1174 if (sc->mfi_tb_ioc_init_desc != NULL)
1175 bus_dmamem_free(sc->mfi_tb_ioc_init_dmat,
1176 sc->mfi_tb_ioc_init_desc,
1177 sc->mfi_tb_ioc_init_dmamap);
1178 if (sc->mfi_tb_ioc_init_dmat != NULL)
1179 bus_dma_tag_destroy(sc->mfi_tb_ioc_init_dmat);
1180 for (int i = 0; i < sc->mfi_max_fw_cmds; i++) {
1181 if (sc->mfi_cmd_pool_tbolt != NULL) {
1182 if (sc->mfi_cmd_pool_tbolt[i] != NULL) {
1183 free(sc->mfi_cmd_pool_tbolt[i],
1185 sc->mfi_cmd_pool_tbolt[i] = NULL;
1189 if (sc->mfi_cmd_pool_tbolt != NULL) {
1190 free(sc->mfi_cmd_pool_tbolt, M_MFIBUF);
1191 sc->mfi_cmd_pool_tbolt = NULL;
1193 if (sc->request_desc_pool != NULL) {
1194 free(sc->request_desc_pool, M_MFIBUF);
1195 sc->request_desc_pool = NULL;
1198 if (sc->mfi_buffer_dmat != NULL)
1199 bus_dma_tag_destroy(sc->mfi_buffer_dmat);
1200 if (sc->mfi_parent_dmat != NULL)
1201 bus_dma_tag_destroy(sc->mfi_parent_dmat);
1203 if (mtx_initialized(&sc->mfi_io_lock)) {
1204 mtx_destroy(&sc->mfi_io_lock);
1205 sx_destroy(&sc->mfi_config_lock);
1212 mfi_startup(void *arg)
1214 struct mfi_softc *sc;
1216 sc = (struct mfi_softc *)arg;
1218 config_intrhook_disestablish(&sc->mfi_ich);
1220 sc->mfi_enable_intr(sc);
1221 sx_xlock(&sc->mfi_config_lock);
1222 mtx_lock(&sc->mfi_io_lock);
1224 if (sc->mfi_flags & MFI_FLAGS_SKINNY)
1226 mtx_unlock(&sc->mfi_io_lock);
1227 sx_xunlock(&sc->mfi_config_lock);
1233 struct mfi_softc *sc;
1234 struct mfi_command *cm;
1235 uint32_t pi, ci, context;
1237 sc = (struct mfi_softc *)arg;
1239 if (sc->mfi_check_clear_intr(sc))
1243 pi = sc->mfi_comms->hw_pi;
1244 ci = sc->mfi_comms->hw_ci;
1245 mtx_lock(&sc->mfi_io_lock);
1247 context = sc->mfi_comms->hw_reply_q[ci];
1248 if (context < sc->mfi_max_fw_cmds) {
1249 cm = &sc->mfi_commands[context];
1250 mfi_remove_busy(cm);
1252 mfi_complete(sc, cm);
1254 if (++ci == (sc->mfi_max_fw_cmds + 1)) {
1259 sc->mfi_comms->hw_ci = ci;
1261 /* Give defered I/O a chance to run */
1262 if (sc->mfi_flags & MFI_FLAGS_QFRZN)
1263 sc->mfi_flags &= ~MFI_FLAGS_QFRZN;
1265 mtx_unlock(&sc->mfi_io_lock);
1268 * Dummy read to flush the bus; this ensures that the indexes are up
1269 * to date. Restart processing if more commands have come it.
1271 (void)sc->mfi_read_fw_status(sc);
1272 if (pi != sc->mfi_comms->hw_pi)
1279 mfi_shutdown(struct mfi_softc *sc)
1281 struct mfi_dcmd_frame *dcmd;
1282 struct mfi_command *cm;
1287 sc->cm_aen_abort = 1;
1288 if (sc->mfi_aen_cm != NULL)
1289 mfi_abort(sc, &sc->mfi_aen_cm);
1291 if (sc->mfi_map_sync_cm)
1292 sc->cm_map_abort = 1;
1293 if (sc->mfi_map_sync_cm != NULL)
1294 mfi_abort(sc, &sc->mfi_map_sync_cm);
1296 mtx_lock(&sc->mfi_io_lock);
1297 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_SHUTDOWN, NULL, 0);
1299 mtx_unlock(&sc->mfi_io_lock);
1303 dcmd = &cm->cm_frame->dcmd;
1304 dcmd->header.flags = MFI_FRAME_DIR_NONE;
1305 cm->cm_flags = MFI_CMD_POLLED;
1308 if ((error = mfi_mapcmd(sc, cm)) != 0) {
1309 device_printf(sc->mfi_dev, "Failed to shutdown controller\n");
1312 mfi_release_command(cm);
1313 mtx_unlock(&sc->mfi_io_lock);
1318 mfi_syspdprobe(struct mfi_softc *sc)
1320 struct mfi_frame_header *hdr;
1321 struct mfi_command *cm = NULL;
1322 struct mfi_pd_list *pdlist = NULL;
1323 struct mfi_system_pd *syspd, *tmp;
1324 struct mfi_system_pending *syspd_pend;
1325 int error, i, found;
1327 sx_assert(&sc->mfi_config_lock, SA_XLOCKED);
1328 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1329 /* Add SYSTEM PD's */
1330 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_PD_LIST_QUERY,
1331 (void **)&pdlist, sizeof(*pdlist));
1333 device_printf(sc->mfi_dev,
1334 "Error while forming SYSTEM PD list\n");
1338 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1339 cm->cm_frame->dcmd.mbox[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST;
1340 cm->cm_frame->dcmd.mbox[1] = 0;
1341 if (mfi_mapcmd(sc, cm) != 0) {
1342 device_printf(sc->mfi_dev,
1343 "Failed to get syspd device listing\n");
1346 bus_dmamap_sync(sc->mfi_buffer_dmat,cm->cm_dmamap,
1347 BUS_DMASYNC_POSTREAD);
1348 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1349 hdr = &cm->cm_frame->header;
1350 if (hdr->cmd_status != MFI_STAT_OK) {
1351 device_printf(sc->mfi_dev,
1352 "MFI_DCMD_PD_LIST_QUERY failed %x\n", hdr->cmd_status);
1355 /* Get each PD and add it to the system */
1356 for (i = 0; i < pdlist->count; i++) {
1357 if (pdlist->addr[i].device_id ==
1358 pdlist->addr[i].encl_device_id)
1361 TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh, pd_link) {
1362 if (syspd->pd_id == pdlist->addr[i].device_id)
1365 TAILQ_FOREACH(syspd_pend, &sc->mfi_syspd_pend_tqh, pd_link) {
1366 if (syspd_pend->pd_id == pdlist->addr[i].device_id)
1370 mfi_add_sys_pd(sc, pdlist->addr[i].device_id);
1372 /* Delete SYSPD's whose state has been changed */
1373 TAILQ_FOREACH_SAFE(syspd, &sc->mfi_syspd_tqh, pd_link, tmp) {
1375 for (i = 0; i < pdlist->count; i++) {
1376 if (syspd->pd_id == pdlist->addr[i].device_id)
1381 mtx_unlock(&sc->mfi_io_lock);
1383 device_delete_child(sc->mfi_dev, syspd->pd_dev);
1385 mtx_lock(&sc->mfi_io_lock);
1390 free(pdlist, M_MFIBUF);
1392 mfi_release_command(cm);
1398 mfi_ldprobe(struct mfi_softc *sc)
1400 struct mfi_frame_header *hdr;
1401 struct mfi_command *cm = NULL;
1402 struct mfi_ld_list *list = NULL;
1403 struct mfi_disk *ld;
1404 struct mfi_disk_pending *ld_pend;
1407 sx_assert(&sc->mfi_config_lock, SA_XLOCKED);
1408 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1410 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_LIST,
1411 (void **)&list, sizeof(*list));
1415 cm->cm_flags = MFI_CMD_DATAIN;
1416 if (mfi_wait_command(sc, cm) != 0) {
1417 device_printf(sc->mfi_dev, "Failed to get device listing\n");
1421 hdr = &cm->cm_frame->header;
1422 if (hdr->cmd_status != MFI_STAT_OK) {
1423 device_printf(sc->mfi_dev, "MFI_DCMD_LD_GET_LIST failed %x\n",
1428 for (i = 0; i < list->ld_count; i++) {
1429 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
1430 if (ld->ld_id == list->ld_list[i].ld.v.target_id)
1433 TAILQ_FOREACH(ld_pend, &sc->mfi_ld_pend_tqh, ld_link) {
1434 if (ld_pend->ld_id == list->ld_list[i].ld.v.target_id)
1437 mfi_add_ld(sc, list->ld_list[i].ld.v.target_id);
1442 free(list, M_MFIBUF);
1444 mfi_release_command(cm);
1450 * The timestamp is the number of seconds since 00:00 Jan 1, 2000. If
1451 * the bits in 24-31 are all set, then it is the number of seconds since
1455 format_timestamp(uint32_t timestamp)
1457 static char buffer[32];
1459 if ((timestamp & 0xff000000) == 0xff000000)
1460 snprintf(buffer, sizeof(buffer), "boot + %us", timestamp &
1463 snprintf(buffer, sizeof(buffer), "%us", timestamp);
1468 format_class(int8_t class)
1470 static char buffer[6];
1473 case MFI_EVT_CLASS_DEBUG:
1475 case MFI_EVT_CLASS_PROGRESS:
1476 return ("progress");
1477 case MFI_EVT_CLASS_INFO:
1479 case MFI_EVT_CLASS_WARNING:
1481 case MFI_EVT_CLASS_CRITICAL:
1483 case MFI_EVT_CLASS_FATAL:
1485 case MFI_EVT_CLASS_DEAD:
1488 snprintf(buffer, sizeof(buffer), "%d", class);
1494 mfi_decode_evt(struct mfi_softc *sc, struct mfi_evt_detail *detail)
1496 struct mfi_system_pd *syspd = NULL;
1498 device_printf(sc->mfi_dev, "%d (%s/0x%04x/%s) - %s\n", detail->seq,
1499 format_timestamp(detail->time), detail->evt_class.members.locale,
1500 format_class(detail->evt_class.members.evt_class),
1501 detail->description);
1503 /* Don't act on old AEN's or while shutting down */
1504 if (detail->seq < sc->mfi_boot_seq_num || sc->mfi_detaching)
1507 switch (detail->arg_type) {
1508 case MR_EVT_ARGS_NONE:
1509 if (detail->code == MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED) {
1510 device_printf(sc->mfi_dev, "HostBus scan raised\n");
1511 if (mfi_detect_jbod_change) {
1513 * Probe for new SYSPD's and Delete
1516 sx_xlock(&sc->mfi_config_lock);
1517 mtx_lock(&sc->mfi_io_lock);
1519 mtx_unlock(&sc->mfi_io_lock);
1520 sx_xunlock(&sc->mfi_config_lock);
1524 case MR_EVT_ARGS_LD_STATE:
1525 /* During load time driver reads all the events starting
1526 * from the one that has been logged after shutdown. Avoid
1529 if (detail->args.ld_state.new_state == MFI_LD_STATE_OFFLINE ) {
1531 struct mfi_disk *ld;
1532 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
1534 detail->args.ld_state.ld.target_id)
1538 Fix: for kernel panics when SSCD is removed
1539 KASSERT(ld != NULL, ("volume dissappeared"));
1543 device_delete_child(sc->mfi_dev, ld->ld_dev);
1548 case MR_EVT_ARGS_PD:
1549 if (detail->code == MR_EVT_PD_REMOVED) {
1550 if (mfi_detect_jbod_change) {
1552 * If the removed device is a SYSPD then
1555 TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh,
1558 detail->args.pd.device_id) {
1560 device_delete_child(
1569 if (detail->code == MR_EVT_PD_INSERTED) {
1570 if (mfi_detect_jbod_change) {
1571 /* Probe for new SYSPD's */
1572 sx_xlock(&sc->mfi_config_lock);
1573 mtx_lock(&sc->mfi_io_lock);
1575 mtx_unlock(&sc->mfi_io_lock);
1576 sx_xunlock(&sc->mfi_config_lock);
1584 mfi_queue_evt(struct mfi_softc *sc, struct mfi_evt_detail *detail)
1586 struct mfi_evt_queue_elm *elm;
1588 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1589 elm = malloc(sizeof(*elm), M_MFIBUF, M_NOWAIT|M_ZERO);
1592 memcpy(&elm->detail, detail, sizeof(*detail));
1593 TAILQ_INSERT_TAIL(&sc->mfi_evt_queue, elm, link);
1594 taskqueue_enqueue(taskqueue_swi, &sc->mfi_evt_task);
1598 mfi_handle_evt(void *context, int pending)
1600 TAILQ_HEAD(,mfi_evt_queue_elm) queue;
1601 struct mfi_softc *sc;
1602 struct mfi_evt_queue_elm *elm;
1606 mtx_lock(&sc->mfi_io_lock);
1607 TAILQ_CONCAT(&queue, &sc->mfi_evt_queue, link);
1608 mtx_unlock(&sc->mfi_io_lock);
1609 while ((elm = TAILQ_FIRST(&queue)) != NULL) {
1610 TAILQ_REMOVE(&queue, elm, link);
1611 mfi_decode_evt(sc, &elm->detail);
1612 free(elm, M_MFIBUF);
1617 mfi_aen_register(struct mfi_softc *sc, int seq, int locale)
1619 struct mfi_command *cm;
1620 struct mfi_dcmd_frame *dcmd;
1621 union mfi_evt current_aen, prior_aen;
1622 struct mfi_evt_detail *ed = NULL;
1625 current_aen.word = locale;
1626 if (sc->mfi_aen_cm != NULL) {
1628 ((uint32_t *)&sc->mfi_aen_cm->cm_frame->dcmd.mbox)[1];
1629 if (prior_aen.members.evt_class <= current_aen.members.evt_class &&
1630 !((prior_aen.members.locale & current_aen.members.locale)
1631 ^current_aen.members.locale)) {
1634 prior_aen.members.locale |= current_aen.members.locale;
1635 if (prior_aen.members.evt_class
1636 < current_aen.members.evt_class)
1637 current_aen.members.evt_class =
1638 prior_aen.members.evt_class;
1639 mfi_abort(sc, &sc->mfi_aen_cm);
1643 mtx_lock(&sc->mfi_io_lock);
1644 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_EVENT_WAIT,
1645 (void **)&ed, sizeof(*ed));
1646 mtx_unlock(&sc->mfi_io_lock);
1651 dcmd = &cm->cm_frame->dcmd;
1652 ((uint32_t *)&dcmd->mbox)[0] = seq;
1653 ((uint32_t *)&dcmd->mbox)[1] = locale;
1654 cm->cm_flags = MFI_CMD_DATAIN;
1655 cm->cm_complete = mfi_aen_complete;
1657 sc->last_seq_num = seq;
1658 sc->mfi_aen_cm = cm;
1660 mtx_lock(&sc->mfi_io_lock);
1661 mfi_enqueue_ready(cm);
1663 mtx_unlock(&sc->mfi_io_lock);
1670 mfi_aen_complete(struct mfi_command *cm)
1672 struct mfi_frame_header *hdr;
1673 struct mfi_softc *sc;
1674 struct mfi_evt_detail *detail;
1675 struct mfi_aen *mfi_aen_entry, *tmp;
1676 int seq = 0, aborted = 0;
1679 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1681 hdr = &cm->cm_frame->header;
1683 if (sc->mfi_aen_cm == NULL)
1686 if (sc->cm_aen_abort ||
1687 hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
1688 sc->cm_aen_abort = 0;
1691 sc->mfi_aen_triggered = 1;
1692 if (sc->mfi_poll_waiting) {
1693 sc->mfi_poll_waiting = 0;
1694 selwakeup(&sc->mfi_select);
1696 detail = cm->cm_data;
1697 mfi_queue_evt(sc, detail);
1698 seq = detail->seq + 1;
1699 TAILQ_FOREACH_SAFE(mfi_aen_entry, &sc->mfi_aen_pids, aen_link,
1701 TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
1703 PROC_LOCK(mfi_aen_entry->p);
1704 kern_psignal(mfi_aen_entry->p, SIGIO);
1705 PROC_UNLOCK(mfi_aen_entry->p);
1706 free(mfi_aen_entry, M_MFIBUF);
1710 free(cm->cm_data, M_MFIBUF);
1711 sc->mfi_aen_cm = NULL;
1712 wakeup(&sc->mfi_aen_cm);
1713 mfi_release_command(cm);
1715 /* set it up again so the driver can catch more events */
1717 mtx_unlock(&sc->mfi_io_lock);
1718 mfi_aen_setup(sc, seq);
1719 mtx_lock(&sc->mfi_io_lock);
1723 #define MAX_EVENTS 15
1726 mfi_parse_entries(struct mfi_softc *sc, int start_seq, int stop_seq)
1728 struct mfi_command *cm;
1729 struct mfi_dcmd_frame *dcmd;
1730 struct mfi_evt_list *el;
1731 union mfi_evt class_locale;
1732 int error, i, seq, size;
1734 class_locale.members.reserved = 0;
1735 class_locale.members.locale = mfi_event_locale;
1736 class_locale.members.evt_class = mfi_event_class;
1738 size = sizeof(struct mfi_evt_list) + sizeof(struct mfi_evt_detail)
1740 el = malloc(size, M_MFIBUF, M_NOWAIT | M_ZERO);
1744 for (seq = start_seq;;) {
1745 mtx_lock(&sc->mfi_io_lock);
1746 if ((cm = mfi_dequeue_free(sc)) == NULL) {
1748 mtx_unlock(&sc->mfi_io_lock);
1751 mtx_unlock(&sc->mfi_io_lock);
1753 dcmd = &cm->cm_frame->dcmd;
1754 bzero(dcmd->mbox, MFI_MBOX_SIZE);
1755 dcmd->header.cmd = MFI_CMD_DCMD;
1756 dcmd->header.timeout = 0;
1757 dcmd->header.data_len = size;
1758 dcmd->opcode = MFI_DCMD_CTRL_EVENT_GET;
1759 ((uint32_t *)&dcmd->mbox)[0] = seq;
1760 ((uint32_t *)&dcmd->mbox)[1] = class_locale.word;
1761 cm->cm_sg = &dcmd->sgl;
1762 cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
1763 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1767 mtx_lock(&sc->mfi_io_lock);
1768 if ((error = mfi_mapcmd(sc, cm)) != 0) {
1769 device_printf(sc->mfi_dev,
1770 "Failed to get controller entries\n");
1771 mfi_release_command(cm);
1772 mtx_unlock(&sc->mfi_io_lock);
1776 mtx_unlock(&sc->mfi_io_lock);
1777 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1778 BUS_DMASYNC_POSTREAD);
1779 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1781 if (dcmd->header.cmd_status == MFI_STAT_NOT_FOUND) {
1782 mtx_lock(&sc->mfi_io_lock);
1783 mfi_release_command(cm);
1784 mtx_unlock(&sc->mfi_io_lock);
1787 if (dcmd->header.cmd_status != MFI_STAT_OK) {
1788 device_printf(sc->mfi_dev,
1789 "Error %d fetching controller entries\n",
1790 dcmd->header.cmd_status);
1791 mtx_lock(&sc->mfi_io_lock);
1792 mfi_release_command(cm);
1793 mtx_unlock(&sc->mfi_io_lock);
1796 mtx_lock(&sc->mfi_io_lock);
1797 mfi_release_command(cm);
1798 mtx_unlock(&sc->mfi_io_lock);
1800 for (i = 0; i < el->count; i++) {
1802 * If this event is newer than 'stop_seq' then
1803 * break out of the loop. Note that the log
1804 * is a circular buffer so we have to handle
1805 * the case that our stop point is earlier in
1806 * the buffer than our start point.
1808 if (el->event[i].seq >= stop_seq) {
1809 if (start_seq <= stop_seq)
1811 else if (el->event[i].seq < start_seq)
1814 mtx_lock(&sc->mfi_io_lock);
1815 mfi_queue_evt(sc, &el->event[i]);
1816 mtx_unlock(&sc->mfi_io_lock);
1818 seq = el->event[el->count - 1].seq + 1;
1826 mfi_add_ld(struct mfi_softc *sc, int id)
1828 struct mfi_command *cm;
1829 struct mfi_dcmd_frame *dcmd = NULL;
1830 struct mfi_ld_info *ld_info = NULL;
1831 struct mfi_disk_pending *ld_pend;
1834 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1836 ld_pend = malloc(sizeof(*ld_pend), M_MFIBUF, M_NOWAIT | M_ZERO);
1837 if (ld_pend != NULL) {
1838 ld_pend->ld_id = id;
1839 TAILQ_INSERT_TAIL(&sc->mfi_ld_pend_tqh, ld_pend, ld_link);
1842 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_INFO,
1843 (void **)&ld_info, sizeof(*ld_info));
1845 device_printf(sc->mfi_dev,
1846 "Failed to allocate for MFI_DCMD_LD_GET_INFO %d\n", error);
1848 free(ld_info, M_MFIBUF);
1851 cm->cm_flags = MFI_CMD_DATAIN;
1852 dcmd = &cm->cm_frame->dcmd;
1854 if (mfi_wait_command(sc, cm) != 0) {
1855 device_printf(sc->mfi_dev,
1856 "Failed to get logical drive: %d\n", id);
1857 free(ld_info, M_MFIBUF);
1860 if (ld_info->ld_config.params.isSSCD != 1)
1861 mfi_add_ld_complete(cm);
1863 mfi_release_command(cm);
1864 if (ld_info) /* SSCD drives ld_info free here */
1865 free(ld_info, M_MFIBUF);
1871 mfi_add_ld_complete(struct mfi_command *cm)
1873 struct mfi_frame_header *hdr;
1874 struct mfi_ld_info *ld_info;
1875 struct mfi_softc *sc;
1879 hdr = &cm->cm_frame->header;
1880 ld_info = cm->cm_private;
1882 if (sc->cm_map_abort || hdr->cmd_status != MFI_STAT_OK) {
1883 free(ld_info, M_MFIBUF);
1884 wakeup(&sc->mfi_map_sync_cm);
1885 mfi_release_command(cm);
1888 wakeup(&sc->mfi_map_sync_cm);
1889 mfi_release_command(cm);
1891 mtx_unlock(&sc->mfi_io_lock);
1893 if ((child = device_add_child(sc->mfi_dev, "mfid", -1)) == NULL) {
1894 device_printf(sc->mfi_dev, "Failed to add logical disk\n");
1895 free(ld_info, M_MFIBUF);
1897 mtx_lock(&sc->mfi_io_lock);
1901 device_set_ivars(child, ld_info);
1902 device_set_desc(child, "MFI Logical Disk");
1903 bus_generic_attach(sc->mfi_dev);
1905 mtx_lock(&sc->mfi_io_lock);
1908 static int mfi_add_sys_pd(struct mfi_softc *sc, int id)
1910 struct mfi_command *cm;
1911 struct mfi_dcmd_frame *dcmd = NULL;
1912 struct mfi_pd_info *pd_info = NULL;
1913 struct mfi_system_pending *syspd_pend;
1916 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1918 syspd_pend = malloc(sizeof(*syspd_pend), M_MFIBUF, M_NOWAIT | M_ZERO);
1919 if (syspd_pend != NULL) {
1920 syspd_pend->pd_id = id;
1921 TAILQ_INSERT_TAIL(&sc->mfi_syspd_pend_tqh, syspd_pend, pd_link);
1924 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_PD_GET_INFO,
1925 (void **)&pd_info, sizeof(*pd_info));
1927 device_printf(sc->mfi_dev,
1928 "Failed to allocated for MFI_DCMD_PD_GET_INFO %d\n",
1931 free(pd_info, M_MFIBUF);
1934 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1935 dcmd = &cm->cm_frame->dcmd;
1937 dcmd->header.scsi_status = 0;
1938 dcmd->header.pad0 = 0;
1939 if (mfi_mapcmd(sc, cm) != 0) {
1940 device_printf(sc->mfi_dev,
1941 "Failed to get physical drive info %d\n", id);
1942 free(pd_info, M_MFIBUF);
1945 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1946 BUS_DMASYNC_POSTREAD);
1947 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1948 mfi_add_sys_pd_complete(cm);
1953 mfi_add_sys_pd_complete(struct mfi_command *cm)
1955 struct mfi_frame_header *hdr;
1956 struct mfi_pd_info *pd_info;
1957 struct mfi_softc *sc;
1961 hdr = &cm->cm_frame->header;
1962 pd_info = cm->cm_private;
1964 if (hdr->cmd_status != MFI_STAT_OK) {
1965 free(pd_info, M_MFIBUF);
1966 mfi_release_command(cm);
1969 if (pd_info->fw_state != MFI_PD_STATE_SYSTEM) {
1970 device_printf(sc->mfi_dev, "PD=%x is not SYSTEM PD\n",
1971 pd_info->ref.v.device_id);
1972 free(pd_info, M_MFIBUF);
1973 mfi_release_command(cm);
1976 mfi_release_command(cm);
1978 mtx_unlock(&sc->mfi_io_lock);
1980 if ((child = device_add_child(sc->mfi_dev, "mfisyspd", -1)) == NULL) {
1981 device_printf(sc->mfi_dev, "Failed to add system pd\n");
1982 free(pd_info, M_MFIBUF);
1984 mtx_lock(&sc->mfi_io_lock);
1988 device_set_ivars(child, pd_info);
1989 device_set_desc(child, "MFI System PD");
1990 bus_generic_attach(sc->mfi_dev);
1992 mtx_lock(&sc->mfi_io_lock);
1995 static struct mfi_command *
1996 mfi_bio_command(struct mfi_softc *sc)
1999 struct mfi_command *cm = NULL;
2001 /*reserving two commands to avoid starvation for IOCTL*/
2002 if (sc->mfi_qstat[MFIQ_FREE].q_length < 2) {
2005 if ((bio = mfi_dequeue_bio(sc)) == NULL) {
2008 if ((uintptr_t)bio->bio_driver2 == MFI_LD_IO) {
2009 cm = mfi_build_ldio(sc, bio);
2010 } else if ((uintptr_t) bio->bio_driver2 == MFI_SYS_PD_IO) {
2011 cm = mfi_build_syspdio(sc, bio);
2014 mfi_enqueue_bio(sc, bio);
2019 * mostly copied from cam/scsi/scsi_all.c:scsi_read_write
2023 mfi_build_cdb(int readop, uint8_t byte2, u_int64_t lba, u_int32_t block_count, uint8_t *cdb)
2027 if (((lba & 0x1fffff) == lba)
2028 && ((block_count & 0xff) == block_count)
2030 /* We can fit in a 6 byte cdb */
2031 struct scsi_rw_6 *scsi_cmd;
2033 scsi_cmd = (struct scsi_rw_6 *)cdb;
2034 scsi_cmd->opcode = readop ? READ_6 : WRITE_6;
2035 scsi_ulto3b(lba, scsi_cmd->addr);
2036 scsi_cmd->length = block_count & 0xff;
2037 scsi_cmd->control = 0;
2038 cdb_len = sizeof(*scsi_cmd);
2039 } else if (((block_count & 0xffff) == block_count) && ((lba & 0xffffffff) == lba)) {
2040 /* Need a 10 byte CDB */
2041 struct scsi_rw_10 *scsi_cmd;
2043 scsi_cmd = (struct scsi_rw_10 *)cdb;
2044 scsi_cmd->opcode = readop ? READ_10 : WRITE_10;
2045 scsi_cmd->byte2 = byte2;
2046 scsi_ulto4b(lba, scsi_cmd->addr);
2047 scsi_cmd->reserved = 0;
2048 scsi_ulto2b(block_count, scsi_cmd->length);
2049 scsi_cmd->control = 0;
2050 cdb_len = sizeof(*scsi_cmd);
2051 } else if (((block_count & 0xffffffff) == block_count) &&
2052 ((lba & 0xffffffff) == lba)) {
2053 /* Block count is too big for 10 byte CDB use a 12 byte CDB */
2054 struct scsi_rw_12 *scsi_cmd;
2056 scsi_cmd = (struct scsi_rw_12 *)cdb;
2057 scsi_cmd->opcode = readop ? READ_12 : WRITE_12;
2058 scsi_cmd->byte2 = byte2;
2059 scsi_ulto4b(lba, scsi_cmd->addr);
2060 scsi_cmd->reserved = 0;
2061 scsi_ulto4b(block_count, scsi_cmd->length);
2062 scsi_cmd->control = 0;
2063 cdb_len = sizeof(*scsi_cmd);
2066 * 16 byte CDB. We'll only get here if the LBA is larger
2069 struct scsi_rw_16 *scsi_cmd;
2071 scsi_cmd = (struct scsi_rw_16 *)cdb;
2072 scsi_cmd->opcode = readop ? READ_16 : WRITE_16;
2073 scsi_cmd->byte2 = byte2;
2074 scsi_u64to8b(lba, scsi_cmd->addr);
2075 scsi_cmd->reserved = 0;
2076 scsi_ulto4b(block_count, scsi_cmd->length);
2077 scsi_cmd->control = 0;
2078 cdb_len = sizeof(*scsi_cmd);
2084 static struct mfi_command *
2085 mfi_build_syspdio(struct mfi_softc *sc, struct bio *bio)
2087 struct mfi_command *cm;
2088 struct mfi_pass_frame *pass;
2089 uint32_t context = 0;
2090 int flags = 0, blkcount = 0, readop;
2093 if ((cm = mfi_dequeue_free(sc)) == NULL)
2096 /* Zero out the MFI frame */
2097 context = cm->cm_frame->header.context;
2098 bzero(cm->cm_frame, sizeof(union mfi_frame));
2099 cm->cm_frame->header.context = context;
2100 pass = &cm->cm_frame->pass;
2101 bzero(pass->cdb, 16);
2102 pass->header.cmd = MFI_CMD_PD_SCSI_IO;
2103 switch (bio->bio_cmd & 0x03) {
2105 flags = MFI_CMD_DATAIN;
2109 flags = MFI_CMD_DATAOUT;
2113 /* TODO: what about BIO_DELETE??? */
2114 panic("Unsupported bio command %x\n", bio->bio_cmd);
2117 /* Cheat with the sector length to avoid a non-constant division */
2118 blkcount = (bio->bio_bcount + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
2119 /* Fill the LBA and Transfer length in CDB */
2120 cdb_len = mfi_build_cdb(readop, 0, bio->bio_pblkno, blkcount,
2122 pass->header.target_id = (uintptr_t)bio->bio_driver1;
2123 pass->header.lun_id = 0;
2124 pass->header.timeout = 0;
2125 pass->header.flags = 0;
2126 pass->header.scsi_status = 0;
2127 pass->header.sense_len = MFI_SENSE_LEN;
2128 pass->header.data_len = bio->bio_bcount;
2129 pass->header.cdb_len = cdb_len;
2130 pass->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2131 pass->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2132 cm->cm_complete = mfi_bio_complete;
2133 cm->cm_private = bio;
2134 cm->cm_data = bio->bio_data;
2135 cm->cm_len = bio->bio_bcount;
2136 cm->cm_sg = &pass->sgl;
2137 cm->cm_total_frame_size = MFI_PASS_FRAME_SIZE;
2138 cm->cm_flags = flags;
2142 static struct mfi_command *
2143 mfi_build_ldio(struct mfi_softc *sc, struct bio *bio)
2145 struct mfi_io_frame *io;
2146 struct mfi_command *cm;
2149 uint32_t context = 0;
2151 if ((cm = mfi_dequeue_free(sc)) == NULL)
2154 /* Zero out the MFI frame */
2155 context = cm->cm_frame->header.context;
2156 bzero(cm->cm_frame, sizeof(union mfi_frame));
2157 cm->cm_frame->header.context = context;
2158 io = &cm->cm_frame->io;
2159 switch (bio->bio_cmd & 0x03) {
2161 io->header.cmd = MFI_CMD_LD_READ;
2162 flags = MFI_CMD_DATAIN;
2165 io->header.cmd = MFI_CMD_LD_WRITE;
2166 flags = MFI_CMD_DATAOUT;
2169 /* TODO: what about BIO_DELETE??? */
2170 panic("Unsupported bio command %x\n", bio->bio_cmd);
2173 /* Cheat with the sector length to avoid a non-constant division */
2174 blkcount = (bio->bio_bcount + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
2175 io->header.target_id = (uintptr_t)bio->bio_driver1;
2176 io->header.timeout = 0;
2177 io->header.flags = 0;
2178 io->header.scsi_status = 0;
2179 io->header.sense_len = MFI_SENSE_LEN;
2180 io->header.data_len = blkcount;
2181 io->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2182 io->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2183 io->lba_hi = (bio->bio_pblkno & 0xffffffff00000000) >> 32;
2184 io->lba_lo = bio->bio_pblkno & 0xffffffff;
2185 cm->cm_complete = mfi_bio_complete;
2186 cm->cm_private = bio;
2187 cm->cm_data = bio->bio_data;
2188 cm->cm_len = bio->bio_bcount;
2189 cm->cm_sg = &io->sgl;
2190 cm->cm_total_frame_size = MFI_IO_FRAME_SIZE;
2191 cm->cm_flags = flags;
2196 mfi_bio_complete(struct mfi_command *cm)
2199 struct mfi_frame_header *hdr;
2200 struct mfi_softc *sc;
2202 bio = cm->cm_private;
2203 hdr = &cm->cm_frame->header;
2206 if ((hdr->cmd_status != MFI_STAT_OK) || (hdr->scsi_status != 0)) {
2207 bio->bio_flags |= BIO_ERROR;
2208 bio->bio_error = EIO;
2209 device_printf(sc->mfi_dev, "I/O error, status= %d "
2210 "scsi_status= %d\n", hdr->cmd_status, hdr->scsi_status);
2211 mfi_print_sense(cm->cm_sc, cm->cm_sense);
2212 } else if (cm->cm_error != 0) {
2213 bio->bio_flags |= BIO_ERROR;
2216 mfi_release_command(cm);
2217 mfi_disk_complete(bio);
2221 mfi_startio(struct mfi_softc *sc)
2223 struct mfi_command *cm;
2224 struct ccb_hdr *ccbh;
2227 /* Don't bother if we're short on resources */
2228 if (sc->mfi_flags & MFI_FLAGS_QFRZN)
2231 /* Try a command that has already been prepared */
2232 cm = mfi_dequeue_ready(sc);
2235 if ((ccbh = TAILQ_FIRST(&sc->mfi_cam_ccbq)) != NULL)
2236 cm = sc->mfi_cam_start(ccbh);
2239 /* Nope, so look for work on the bioq */
2241 cm = mfi_bio_command(sc);
2243 /* No work available, so exit */
2247 /* Send the command to the controller */
2248 if (mfi_mapcmd(sc, cm) != 0) {
2249 mfi_requeue_ready(cm);
2256 mfi_mapcmd(struct mfi_softc *sc, struct mfi_command *cm)
2260 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2262 if ((cm->cm_data != NULL) && (cm->cm_frame->header.cmd != MFI_CMD_STP )) {
2263 polled = (cm->cm_flags & MFI_CMD_POLLED) ? BUS_DMA_NOWAIT : 0;
2264 error = bus_dmamap_load(sc->mfi_buffer_dmat, cm->cm_dmamap,
2265 cm->cm_data, cm->cm_len, mfi_data_cb, cm, polled);
2266 if (error == EINPROGRESS) {
2267 sc->mfi_flags |= MFI_FLAGS_QFRZN;
2271 if (sc->MFA_enabled)
2272 error = mfi_tbolt_send_frame(sc, cm);
2274 error = mfi_send_frame(sc, cm);
2281 mfi_data_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
2283 struct mfi_frame_header *hdr;
2284 struct mfi_command *cm;
2286 struct mfi_softc *sc;
2287 int i, j, first, dir;
2290 cm = (struct mfi_command *)arg;
2292 hdr = &cm->cm_frame->header;
2296 printf("error %d in callback\n", error);
2297 cm->cm_error = error;
2298 mfi_complete(sc, cm);
2301 /* Use IEEE sgl only for IO's on a SKINNY controller
2302 * For other commands on a SKINNY controller use either
2303 * sg32 or sg64 based on the sizeof(bus_addr_t).
2304 * Also calculate the total frame size based on the type
2307 if (((cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) ||
2308 (cm->cm_frame->header.cmd == MFI_CMD_LD_READ) ||
2309 (cm->cm_frame->header.cmd == MFI_CMD_LD_WRITE)) &&
2310 (sc->mfi_flags & MFI_FLAGS_SKINNY)) {
2311 for (i = 0; i < nsegs; i++) {
2312 sgl->sg_skinny[i].addr = segs[i].ds_addr;
2313 sgl->sg_skinny[i].len = segs[i].ds_len;
2314 sgl->sg_skinny[i].flag = 0;
2316 hdr->flags |= MFI_FRAME_IEEE_SGL | MFI_FRAME_SGL64;
2317 sge_size = sizeof(struct mfi_sg_skinny);
2318 hdr->sg_count = nsegs;
2321 if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
2322 first = cm->cm_stp_len;
2323 if ((sc->mfi_flags & MFI_FLAGS_SG64) == 0) {
2324 sgl->sg32[j].addr = segs[0].ds_addr;
2325 sgl->sg32[j++].len = first;
2327 sgl->sg64[j].addr = segs[0].ds_addr;
2328 sgl->sg64[j++].len = first;
2332 if ((sc->mfi_flags & MFI_FLAGS_SG64) == 0) {
2333 for (i = 0; i < nsegs; i++) {
2334 sgl->sg32[j].addr = segs[i].ds_addr + first;
2335 sgl->sg32[j++].len = segs[i].ds_len - first;
2339 for (i = 0; i < nsegs; i++) {
2340 sgl->sg64[j].addr = segs[i].ds_addr + first;
2341 sgl->sg64[j++].len = segs[i].ds_len - first;
2344 hdr->flags |= MFI_FRAME_SGL64;
2347 sge_size = sc->mfi_sge_size;
2351 if (cm->cm_flags & MFI_CMD_DATAIN) {
2352 dir |= BUS_DMASYNC_PREREAD;
2353 hdr->flags |= MFI_FRAME_DIR_READ;
2355 if (cm->cm_flags & MFI_CMD_DATAOUT) {
2356 dir |= BUS_DMASYNC_PREWRITE;
2357 hdr->flags |= MFI_FRAME_DIR_WRITE;
2359 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir);
2360 cm->cm_flags |= MFI_CMD_MAPPED;
2363 * Instead of calculating the total number of frames in the
2364 * compound frame, it's already assumed that there will be at
2365 * least 1 frame, so don't compensate for the modulo of the
2366 * following division.
2368 cm->cm_total_frame_size += (sc->mfi_sge_size * nsegs);
2369 cm->cm_extra_frames = (cm->cm_total_frame_size - 1) / MFI_FRAME_SIZE;
2371 if (sc->MFA_enabled)
2372 mfi_tbolt_send_frame(sc, cm);
2374 mfi_send_frame(sc, cm);
2380 mfi_send_frame(struct mfi_softc *sc, struct mfi_command *cm)
2382 struct mfi_frame_header *hdr;
2383 int tm = MFI_POLL_TIMEOUT_SECS * 1000;
2385 hdr = &cm->cm_frame->header;
2387 if ((cm->cm_flags & MFI_CMD_POLLED) == 0) {
2388 cm->cm_timestamp = time_uptime;
2389 mfi_enqueue_busy(cm);
2391 hdr->cmd_status = MFI_STAT_INVALID_STATUS;
2392 hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
2396 * The bus address of the command is aligned on a 64 byte boundary,
2397 * leaving the least 6 bits as zero. For whatever reason, the
2398 * hardware wants the address shifted right by three, leaving just
2399 * 3 zero bits. These three bits are then used as a prefetching
2400 * hint for the hardware to predict how many frames need to be
2401 * fetched across the bus. If a command has more than 8 frames
2402 * then the 3 bits are set to 0x7 and the firmware uses other
2403 * information in the command to determine the total amount to fetch.
2404 * However, FreeBSD doesn't support I/O larger than 128K, so 8 frames
2405 * is enough for both 32bit and 64bit systems.
2407 if (cm->cm_extra_frames > 7)
2408 cm->cm_extra_frames = 7;
2410 sc->mfi_issue_cmd(sc, cm->cm_frame_busaddr, cm->cm_extra_frames);
2412 if ((cm->cm_flags & MFI_CMD_POLLED) == 0)
2415 /* This is a polled command, so busy-wait for it to complete. */
2416 while (hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
2423 if (hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
2424 device_printf(sc->mfi_dev, "Frame %p timed out "
2425 "command 0x%X\n", hdr, cm->cm_frame->dcmd.opcode);
2434 mfi_complete(struct mfi_softc *sc, struct mfi_command *cm)
2438 if ((cm->cm_flags & MFI_CMD_MAPPED) != 0) {
2440 if ((cm->cm_flags & MFI_CMD_DATAIN) ||
2441 (cm->cm_frame->header.cmd == MFI_CMD_STP))
2442 dir |= BUS_DMASYNC_POSTREAD;
2443 if (cm->cm_flags & MFI_CMD_DATAOUT)
2444 dir |= BUS_DMASYNC_POSTWRITE;
2446 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir);
2447 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2448 cm->cm_flags &= ~MFI_CMD_MAPPED;
2451 cm->cm_flags |= MFI_CMD_COMPLETED;
2453 if (cm->cm_complete != NULL)
2454 cm->cm_complete(cm);
2460 mfi_abort(struct mfi_softc *sc, struct mfi_command **cm_abort)
2462 struct mfi_command *cm;
2463 struct mfi_abort_frame *abort;
2465 uint32_t context = 0;
2467 mtx_lock(&sc->mfi_io_lock);
2468 if ((cm = mfi_dequeue_free(sc)) == NULL) {
2472 /* Zero out the MFI frame */
2473 context = cm->cm_frame->header.context;
2474 bzero(cm->cm_frame, sizeof(union mfi_frame));
2475 cm->cm_frame->header.context = context;
2477 abort = &cm->cm_frame->abort;
2478 abort->header.cmd = MFI_CMD_ABORT;
2479 abort->header.flags = 0;
2480 abort->header.scsi_status = 0;
2481 abort->abort_context = (*cm_abort)->cm_frame->header.context;
2482 abort->abort_mfi_addr_lo = (uint32_t)(*cm_abort)->cm_frame_busaddr;
2483 abort->abort_mfi_addr_hi =
2484 (uint32_t)((uint64_t)(*cm_abort)->cm_frame_busaddr >> 32);
2486 cm->cm_flags = MFI_CMD_POLLED;
2489 mfi_release_command(cm);
2491 mtx_unlock(&sc->mfi_io_lock);
2492 while (i < 5 && *cm_abort != NULL) {
2493 tsleep(cm_abort, 0, "mfiabort",
2497 if (*cm_abort != NULL) {
2498 /* Force a complete if command didn't abort */
2499 mtx_lock(&sc->mfi_io_lock);
2500 (*cm_abort)->cm_complete(*cm_abort);
2501 mtx_unlock(&sc->mfi_io_lock);
2508 mfi_dump_blocks(struct mfi_softc *sc, int id, uint64_t lba, void *virt,
2511 struct mfi_command *cm;
2512 struct mfi_io_frame *io;
2514 uint32_t context = 0;
2516 if ((cm = mfi_dequeue_free(sc)) == NULL)
2519 /* Zero out the MFI frame */
2520 context = cm->cm_frame->header.context;
2521 bzero(cm->cm_frame, sizeof(union mfi_frame));
2522 cm->cm_frame->header.context = context;
2524 io = &cm->cm_frame->io;
2525 io->header.cmd = MFI_CMD_LD_WRITE;
2526 io->header.target_id = id;
2527 io->header.timeout = 0;
2528 io->header.flags = 0;
2529 io->header.scsi_status = 0;
2530 io->header.sense_len = MFI_SENSE_LEN;
2531 io->header.data_len = (len + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
2532 io->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2533 io->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2534 io->lba_hi = (lba & 0xffffffff00000000) >> 32;
2535 io->lba_lo = lba & 0xffffffff;
2538 cm->cm_sg = &io->sgl;
2539 cm->cm_total_frame_size = MFI_IO_FRAME_SIZE;
2540 cm->cm_flags = MFI_CMD_POLLED | MFI_CMD_DATAOUT;
2542 error = mfi_mapcmd(sc, cm);
2543 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
2544 BUS_DMASYNC_POSTWRITE);
2545 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2546 mfi_release_command(cm);
2552 mfi_dump_syspd_blocks(struct mfi_softc *sc, int id, uint64_t lba, void *virt,
2555 struct mfi_command *cm;
2556 struct mfi_pass_frame *pass;
2557 int error, readop, cdb_len;
2560 if ((cm = mfi_dequeue_free(sc)) == NULL)
2563 pass = &cm->cm_frame->pass;
2564 bzero(pass->cdb, 16);
2565 pass->header.cmd = MFI_CMD_PD_SCSI_IO;
2568 blkcount = (len + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
2569 cdb_len = mfi_build_cdb(readop, 0, lba, blkcount, pass->cdb);
2570 pass->header.target_id = id;
2571 pass->header.timeout = 0;
2572 pass->header.flags = 0;
2573 pass->header.scsi_status = 0;
2574 pass->header.sense_len = MFI_SENSE_LEN;
2575 pass->header.data_len = len;
2576 pass->header.cdb_len = cdb_len;
2577 pass->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2578 pass->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2581 cm->cm_sg = &pass->sgl;
2582 cm->cm_total_frame_size = MFI_PASS_FRAME_SIZE;
2583 cm->cm_flags = MFI_CMD_POLLED | MFI_CMD_DATAOUT | MFI_CMD_SCSI;
2585 error = mfi_mapcmd(sc, cm);
2586 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
2587 BUS_DMASYNC_POSTWRITE);
2588 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2589 mfi_release_command(cm);
2595 mfi_open(struct cdev *dev, int flags, int fmt, struct thread *td)
2597 struct mfi_softc *sc;
2602 mtx_lock(&sc->mfi_io_lock);
2603 if (sc->mfi_detaching)
2606 sc->mfi_flags |= MFI_FLAGS_OPEN;
2609 mtx_unlock(&sc->mfi_io_lock);
2615 mfi_close(struct cdev *dev, int flags, int fmt, struct thread *td)
2617 struct mfi_softc *sc;
2618 struct mfi_aen *mfi_aen_entry, *tmp;
2622 mtx_lock(&sc->mfi_io_lock);
2623 sc->mfi_flags &= ~MFI_FLAGS_OPEN;
2625 TAILQ_FOREACH_SAFE(mfi_aen_entry, &sc->mfi_aen_pids, aen_link, tmp) {
2626 if (mfi_aen_entry->p == curproc) {
2627 TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
2629 free(mfi_aen_entry, M_MFIBUF);
2632 mtx_unlock(&sc->mfi_io_lock);
2637 mfi_config_lock(struct mfi_softc *sc, uint32_t opcode)
2641 case MFI_DCMD_LD_DELETE:
2642 case MFI_DCMD_CFG_ADD:
2643 case MFI_DCMD_CFG_CLEAR:
2644 sx_xlock(&sc->mfi_config_lock);
2652 mfi_config_unlock(struct mfi_softc *sc, int locked)
2656 sx_xunlock(&sc->mfi_config_lock);
2660 * Perform pre-issue checks on commands from userland and possibly veto
2664 mfi_check_command_pre(struct mfi_softc *sc, struct mfi_command *cm)
2666 struct mfi_disk *ld, *ld2;
2668 struct mfi_system_pd *syspd = NULL;
2672 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2674 switch (cm->cm_frame->dcmd.opcode) {
2675 case MFI_DCMD_LD_DELETE:
2676 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2677 if (ld->ld_id == cm->cm_frame->dcmd.mbox[0])
2683 error = mfi_disk_disable(ld);
2685 case MFI_DCMD_CFG_CLEAR:
2686 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2687 error = mfi_disk_disable(ld);
2692 TAILQ_FOREACH(ld2, &sc->mfi_ld_tqh, ld_link) {
2695 mfi_disk_enable(ld2);
2699 case MFI_DCMD_PD_STATE_SET:
2700 mbox = (uint16_t *) cm->cm_frame->dcmd.mbox;
2702 if (mbox[2] == MFI_PD_STATE_UNCONFIGURED_GOOD) {
2703 TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh, pd_link) {
2704 if (syspd->pd_id == syspd_id)
2711 error = mfi_syspd_disable(syspd);
2719 /* Perform post-issue checks on commands from userland. */
2721 mfi_check_command_post(struct mfi_softc *sc, struct mfi_command *cm)
2723 struct mfi_disk *ld, *ldn;
2724 struct mfi_system_pd *syspd = NULL;
2728 switch (cm->cm_frame->dcmd.opcode) {
2729 case MFI_DCMD_LD_DELETE:
2730 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2731 if (ld->ld_id == cm->cm_frame->dcmd.mbox[0])
2734 KASSERT(ld != NULL, ("volume dissappeared"));
2735 if (cm->cm_frame->header.cmd_status == MFI_STAT_OK) {
2736 mtx_unlock(&sc->mfi_io_lock);
2738 device_delete_child(sc->mfi_dev, ld->ld_dev);
2740 mtx_lock(&sc->mfi_io_lock);
2742 mfi_disk_enable(ld);
2744 case MFI_DCMD_CFG_CLEAR:
2745 if (cm->cm_frame->header.cmd_status == MFI_STAT_OK) {
2746 mtx_unlock(&sc->mfi_io_lock);
2748 TAILQ_FOREACH_SAFE(ld, &sc->mfi_ld_tqh, ld_link, ldn) {
2749 device_delete_child(sc->mfi_dev, ld->ld_dev);
2752 mtx_lock(&sc->mfi_io_lock);
2754 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link)
2755 mfi_disk_enable(ld);
2758 case MFI_DCMD_CFG_ADD:
2761 case MFI_DCMD_CFG_FOREIGN_IMPORT:
2764 case MFI_DCMD_PD_STATE_SET:
2765 mbox = (uint16_t *) cm->cm_frame->dcmd.mbox;
2767 if (mbox[2] == MFI_PD_STATE_UNCONFIGURED_GOOD) {
2768 TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh,pd_link) {
2769 if (syspd->pd_id == syspd_id)
2775 /* If the transition fails then enable the syspd again */
2776 if (syspd && cm->cm_frame->header.cmd_status != MFI_STAT_OK)
2777 mfi_syspd_enable(syspd);
2783 mfi_check_for_sscd(struct mfi_softc *sc, struct mfi_command *cm)
2785 struct mfi_config_data *conf_data;
2786 struct mfi_command *ld_cm = NULL;
2787 struct mfi_ld_info *ld_info = NULL;
2788 struct mfi_ld_config *ld;
2792 conf_data = (struct mfi_config_data *)cm->cm_data;
2794 if (cm->cm_frame->dcmd.opcode == MFI_DCMD_CFG_ADD) {
2795 p = (char *)conf_data->array;
2796 p += conf_data->array_size * conf_data->array_count;
2797 ld = (struct mfi_ld_config *)p;
2798 if (ld->params.isSSCD == 1)
2800 } else if (cm->cm_frame->dcmd.opcode == MFI_DCMD_LD_DELETE) {
2801 error = mfi_dcmd_command (sc, &ld_cm, MFI_DCMD_LD_GET_INFO,
2802 (void **)&ld_info, sizeof(*ld_info));
2804 device_printf(sc->mfi_dev, "Failed to allocate"
2805 "MFI_DCMD_LD_GET_INFO %d", error);
2807 free(ld_info, M_MFIBUF);
2810 ld_cm->cm_flags = MFI_CMD_DATAIN;
2811 ld_cm->cm_frame->dcmd.mbox[0]= cm->cm_frame->dcmd.mbox[0];
2812 ld_cm->cm_frame->header.target_id = cm->cm_frame->dcmd.mbox[0];
2813 if (mfi_wait_command(sc, ld_cm) != 0) {
2814 device_printf(sc->mfi_dev, "failed to get log drv\n");
2815 mfi_release_command(ld_cm);
2816 free(ld_info, M_MFIBUF);
2820 if (ld_cm->cm_frame->header.cmd_status != MFI_STAT_OK) {
2821 free(ld_info, M_MFIBUF);
2822 mfi_release_command(ld_cm);
2826 ld_info = (struct mfi_ld_info *)ld_cm->cm_private;
2828 if (ld_info->ld_config.params.isSSCD == 1)
2831 mfi_release_command(ld_cm);
2832 free(ld_info, M_MFIBUF);
2839 mfi_stp_cmd(struct mfi_softc *sc, struct mfi_command *cm,caddr_t arg)
2842 struct mfi_ioc_packet *ioc;
2843 ioc = (struct mfi_ioc_packet *)arg;
2844 int sge_size, error;
2845 struct megasas_sge *kern_sge;
2847 memset(sc->kbuff_arr, 0, sizeof(sc->kbuff_arr));
2848 kern_sge =(struct megasas_sge *) ((uintptr_t)cm->cm_frame + ioc->mfi_sgl_off);
2849 cm->cm_frame->header.sg_count = ioc->mfi_sge_count;
2851 if (sizeof(bus_addr_t) == 8) {
2852 cm->cm_frame->header.flags |= MFI_FRAME_SGL64;
2853 cm->cm_extra_frames = 2;
2854 sge_size = sizeof(struct mfi_sg64);
2856 cm->cm_extra_frames = (cm->cm_total_frame_size - 1) / MFI_FRAME_SIZE;
2857 sge_size = sizeof(struct mfi_sg32);
2860 cm->cm_total_frame_size += (sge_size * ioc->mfi_sge_count);
2861 for (i = 0; i < ioc->mfi_sge_count; i++) {
2862 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
2863 1, 0, /* algnmnt, boundary */
2864 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
2865 BUS_SPACE_MAXADDR, /* highaddr */
2866 NULL, NULL, /* filter, filterarg */
2867 ioc->mfi_sgl[i].iov_len,/* maxsize */
2869 ioc->mfi_sgl[i].iov_len,/* maxsegsize */
2870 BUS_DMA_ALLOCNOW, /* flags */
2871 NULL, NULL, /* lockfunc, lockarg */
2872 &sc->mfi_kbuff_arr_dmat[i])) {
2873 device_printf(sc->mfi_dev,
2874 "Cannot allocate mfi_kbuff_arr_dmat tag\n");
2878 if (bus_dmamem_alloc(sc->mfi_kbuff_arr_dmat[i],
2879 (void **)&sc->kbuff_arr[i], BUS_DMA_NOWAIT,
2880 &sc->mfi_kbuff_arr_dmamap[i])) {
2881 device_printf(sc->mfi_dev,
2882 "Cannot allocate mfi_kbuff_arr_dmamap memory\n");
2886 bus_dmamap_load(sc->mfi_kbuff_arr_dmat[i],
2887 sc->mfi_kbuff_arr_dmamap[i], sc->kbuff_arr[i],
2888 ioc->mfi_sgl[i].iov_len, mfi_addr_cb,
2889 &sc->mfi_kbuff_arr_busaddr[i], 0);
2891 if (!sc->kbuff_arr[i]) {
2892 device_printf(sc->mfi_dev,
2893 "Could not allocate memory for kbuff_arr info\n");
2896 kern_sge[i].phys_addr = sc->mfi_kbuff_arr_busaddr[i];
2897 kern_sge[i].length = ioc->mfi_sgl[i].iov_len;
2899 if (sizeof(bus_addr_t) == 8) {
2900 cm->cm_frame->stp.sgl.sg64[i].addr =
2901 kern_sge[i].phys_addr;
2902 cm->cm_frame->stp.sgl.sg64[i].len =
2903 ioc->mfi_sgl[i].iov_len;
2905 cm->cm_frame->stp.sgl.sg32[i].len =
2906 kern_sge[i].phys_addr;
2907 cm->cm_frame->stp.sgl.sg32[i].len =
2908 ioc->mfi_sgl[i].iov_len;
2911 error = copyin(ioc->mfi_sgl[i].iov_base,
2913 ioc->mfi_sgl[i].iov_len);
2915 device_printf(sc->mfi_dev, "Copy in failed\n");
2920 cm->cm_flags |=MFI_CMD_MAPPED;
2925 mfi_user_command(struct mfi_softc *sc, struct mfi_ioc_passthru *ioc)
2927 struct mfi_command *cm;
2928 struct mfi_dcmd_frame *dcmd;
2929 void *ioc_buf = NULL;
2931 int error = 0, locked;
2934 if (ioc->buf_size > 0) {
2935 ioc_buf = malloc(ioc->buf_size, M_MFIBUF, M_WAITOK);
2936 if (ioc_buf == NULL) {
2939 error = copyin(ioc->buf, ioc_buf, ioc->buf_size);
2941 device_printf(sc->mfi_dev, "failed to copyin\n");
2942 free(ioc_buf, M_MFIBUF);
2947 locked = mfi_config_lock(sc, ioc->ioc_frame.opcode);
2949 mtx_lock(&sc->mfi_io_lock);
2950 while ((cm = mfi_dequeue_free(sc)) == NULL)
2951 msleep(mfi_user_command, &sc->mfi_io_lock, 0, "mfiioc", hz);
2953 /* Save context for later */
2954 context = cm->cm_frame->header.context;
2956 dcmd = &cm->cm_frame->dcmd;
2957 bcopy(&ioc->ioc_frame, dcmd, sizeof(struct mfi_dcmd_frame));
2959 cm->cm_sg = &dcmd->sgl;
2960 cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
2961 cm->cm_data = ioc_buf;
2962 cm->cm_len = ioc->buf_size;
2964 /* restore context */
2965 cm->cm_frame->header.context = context;
2967 /* Cheat since we don't know if we're writing or reading */
2968 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_DATAOUT;
2970 error = mfi_check_command_pre(sc, cm);
2974 error = mfi_wait_command(sc, cm);
2976 device_printf(sc->mfi_dev, "ioctl failed %d\n", error);
2979 bcopy(dcmd, &ioc->ioc_frame, sizeof(struct mfi_dcmd_frame));
2980 mfi_check_command_post(sc, cm);
2982 mfi_release_command(cm);
2983 mtx_unlock(&sc->mfi_io_lock);
2984 mfi_config_unlock(sc, locked);
2985 if (ioc->buf_size > 0)
2986 error = copyout(ioc_buf, ioc->buf, ioc->buf_size);
2988 free(ioc_buf, M_MFIBUF);
2992 #define PTRIN(p) ((void *)(uintptr_t)(p))
2995 mfi_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td)
2997 struct mfi_softc *sc;
2998 union mfi_statrequest *ms;
2999 struct mfi_ioc_packet *ioc;
3000 #ifdef COMPAT_FREEBSD32
3001 struct mfi_ioc_packet32 *ioc32;
3003 struct mfi_ioc_aen *aen;
3004 struct mfi_command *cm = NULL;
3005 uint32_t context = 0;
3006 union mfi_sense_ptr sense_ptr;
3007 uint8_t *data = NULL, *temp, *addr, skip_pre_post = 0;
3010 struct mfi_ioc_passthru *iop = (struct mfi_ioc_passthru *)arg;
3011 #ifdef COMPAT_FREEBSD32
3012 struct mfi_ioc_passthru32 *iop32 = (struct mfi_ioc_passthru32 *)arg;
3013 struct mfi_ioc_passthru iop_swab;
3023 if (sc->hw_crit_error)
3026 if (sc->issuepend_done == 0)
3031 ms = (union mfi_statrequest *)arg;
3032 switch (ms->ms_item) {
3037 bcopy(&sc->mfi_qstat[ms->ms_item], &ms->ms_qstat,
3038 sizeof(struct mfi_qstat));
3045 case MFIIO_QUERY_DISK:
3047 struct mfi_query_disk *qd;
3048 struct mfi_disk *ld;
3050 qd = (struct mfi_query_disk *)arg;
3051 mtx_lock(&sc->mfi_io_lock);
3052 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
3053 if (ld->ld_id == qd->array_id)
3058 mtx_unlock(&sc->mfi_io_lock);
3062 if (ld->ld_flags & MFI_DISK_FLAGS_OPEN)
3064 bzero(qd->devname, SPECNAMELEN + 1);
3065 snprintf(qd->devname, SPECNAMELEN, "mfid%d", ld->ld_unit);
3066 mtx_unlock(&sc->mfi_io_lock);
3070 #ifdef COMPAT_FREEBSD32
3074 devclass_t devclass;
3075 ioc = (struct mfi_ioc_packet *)arg;
3078 adapter = ioc->mfi_adapter_no;
3079 if (device_get_unit(sc->mfi_dev) == 0 && adapter != 0) {
3080 devclass = devclass_find("mfi");
3081 sc = devclass_get_softc(devclass, adapter);
3083 mtx_lock(&sc->mfi_io_lock);
3084 if ((cm = mfi_dequeue_free(sc)) == NULL) {
3085 mtx_unlock(&sc->mfi_io_lock);
3088 mtx_unlock(&sc->mfi_io_lock);
3092 * save off original context since copying from user
3093 * will clobber some data
3095 context = cm->cm_frame->header.context;
3096 cm->cm_frame->header.context = cm->cm_index;
3098 bcopy(ioc->mfi_frame.raw, cm->cm_frame,
3099 2 * MEGAMFI_FRAME_SIZE);
3100 cm->cm_total_frame_size = (sizeof(union mfi_sgl)
3101 * ioc->mfi_sge_count) + ioc->mfi_sgl_off;
3102 cm->cm_frame->header.scsi_status = 0;
3103 cm->cm_frame->header.pad0 = 0;
3104 if (ioc->mfi_sge_count) {
3106 (union mfi_sgl *)&cm->cm_frame->bytes[ioc->mfi_sgl_off];
3110 if (cm->cm_frame->header.flags & MFI_FRAME_DATAIN)
3111 cm->cm_flags |= MFI_CMD_DATAIN;
3112 if (cm->cm_frame->header.flags & MFI_FRAME_DATAOUT)
3113 cm->cm_flags |= MFI_CMD_DATAOUT;
3114 /* Legacy app shim */
3115 if (cm->cm_flags == 0)
3116 cm->cm_flags |= MFI_CMD_DATAIN | MFI_CMD_DATAOUT;
3117 cm->cm_len = cm->cm_frame->header.data_len;
3118 if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
3119 #ifdef COMPAT_FREEBSD32
3120 if (cmd == MFI_CMD) {
3123 cm->cm_stp_len = ioc->mfi_sgl[0].iov_len;
3124 #ifdef COMPAT_FREEBSD32
3126 /* 32bit on 64bit */
3127 ioc32 = (struct mfi_ioc_packet32 *)ioc;
3128 cm->cm_stp_len = ioc32->mfi_sgl[0].iov_len;
3131 cm->cm_len += cm->cm_stp_len;
3134 (cm->cm_flags & (MFI_CMD_DATAIN | MFI_CMD_DATAOUT))) {
3135 cm->cm_data = data = malloc(cm->cm_len, M_MFIBUF,
3137 if (cm->cm_data == NULL) {
3138 device_printf(sc->mfi_dev, "Malloc failed\n");
3145 /* restore header context */
3146 cm->cm_frame->header.context = context;
3148 if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
3149 res = mfi_stp_cmd(sc, cm, arg);
3154 if ((cm->cm_flags & MFI_CMD_DATAOUT) ||
3155 (cm->cm_frame->header.cmd == MFI_CMD_STP)) {
3156 for (i = 0; i < ioc->mfi_sge_count; i++) {
3157 #ifdef COMPAT_FREEBSD32
3158 if (cmd == MFI_CMD) {
3161 addr = ioc->mfi_sgl[i].iov_base;
3162 len = ioc->mfi_sgl[i].iov_len;
3163 #ifdef COMPAT_FREEBSD32
3165 /* 32bit on 64bit */
3166 ioc32 = (struct mfi_ioc_packet32 *)ioc;
3167 addr = PTRIN(ioc32->mfi_sgl[i].iov_base);
3168 len = ioc32->mfi_sgl[i].iov_len;
3171 error = copyin(addr, temp, len);
3173 device_printf(sc->mfi_dev,
3174 "Copy in failed\n");
3182 if (cm->cm_frame->header.cmd == MFI_CMD_DCMD)
3183 locked = mfi_config_lock(sc,
3184 cm->cm_frame->dcmd.opcode);
3186 if (cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) {
3187 cm->cm_frame->pass.sense_addr_lo =
3188 (uint32_t)cm->cm_sense_busaddr;
3189 cm->cm_frame->pass.sense_addr_hi =
3190 (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
3192 mtx_lock(&sc->mfi_io_lock);
3193 skip_pre_post = mfi_check_for_sscd (sc, cm);
3194 if (!skip_pre_post) {
3195 error = mfi_check_command_pre(sc, cm);
3197 mtx_unlock(&sc->mfi_io_lock);
3201 if ((error = mfi_wait_command(sc, cm)) != 0) {
3202 device_printf(sc->mfi_dev,
3203 "Controller polled failed\n");
3204 mtx_unlock(&sc->mfi_io_lock);
3207 if (!skip_pre_post) {
3208 mfi_check_command_post(sc, cm);
3210 mtx_unlock(&sc->mfi_io_lock);
3212 if (cm->cm_frame->header.cmd != MFI_CMD_STP) {
3214 if ((cm->cm_flags & MFI_CMD_DATAIN) ||
3215 (cm->cm_frame->header.cmd == MFI_CMD_STP)) {
3216 for (i = 0; i < ioc->mfi_sge_count; i++) {
3217 #ifdef COMPAT_FREEBSD32
3218 if (cmd == MFI_CMD) {
3221 addr = ioc->mfi_sgl[i].iov_base;
3222 len = ioc->mfi_sgl[i].iov_len;
3223 #ifdef COMPAT_FREEBSD32
3225 /* 32bit on 64bit */
3226 ioc32 = (struct mfi_ioc_packet32 *)ioc;
3227 addr = PTRIN(ioc32->mfi_sgl[i].iov_base);
3228 len = ioc32->mfi_sgl[i].iov_len;
3231 error = copyout(temp, addr, len);
3233 device_printf(sc->mfi_dev,
3234 "Copy out failed\n");
3242 if (ioc->mfi_sense_len) {
3243 /* get user-space sense ptr then copy out sense */
3244 bcopy(&ioc->mfi_frame.raw[ioc->mfi_sense_off],
3245 &sense_ptr.sense_ptr_data[0],
3246 sizeof(sense_ptr.sense_ptr_data));
3247 #ifdef COMPAT_FREEBSD32
3248 if (cmd != MFI_CMD) {
3250 * not 64bit native so zero out any address
3252 sense_ptr.addr.high = 0;
3255 error = copyout(cm->cm_sense, sense_ptr.user_space,
3256 ioc->mfi_sense_len);
3258 device_printf(sc->mfi_dev,
3259 "Copy out failed\n");
3264 ioc->mfi_frame.hdr.cmd_status = cm->cm_frame->header.cmd_status;
3266 mfi_config_unlock(sc, locked);
3268 free(data, M_MFIBUF);
3269 if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
3270 for (i = 0; i < 2; i++) {
3271 if (sc->kbuff_arr[i]) {
3272 if (sc->mfi_kbuff_arr_busaddr != 0)
3274 sc->mfi_kbuff_arr_dmat[i],
3275 sc->mfi_kbuff_arr_dmamap[i]
3277 if (sc->kbuff_arr[i] != NULL)
3279 sc->mfi_kbuff_arr_dmat[i],
3281 sc->mfi_kbuff_arr_dmamap[i]
3283 if (sc->mfi_kbuff_arr_dmat[i] != NULL)
3284 bus_dma_tag_destroy(
3285 sc->mfi_kbuff_arr_dmat[i]);
3290 mtx_lock(&sc->mfi_io_lock);
3291 mfi_release_command(cm);
3292 mtx_unlock(&sc->mfi_io_lock);
3298 aen = (struct mfi_ioc_aen *)arg;
3299 error = mfi_aen_register(sc, aen->aen_seq_num,
3300 aen->aen_class_locale);
3303 case MFI_LINUX_CMD_2: /* Firmware Linux ioctl shim */
3305 devclass_t devclass;
3306 struct mfi_linux_ioc_packet l_ioc;
3309 devclass = devclass_find("mfi");
3310 if (devclass == NULL)
3313 error = copyin(arg, &l_ioc, sizeof(l_ioc));
3316 adapter = l_ioc.lioc_adapter_no;
3317 sc = devclass_get_softc(devclass, adapter);
3320 return (mfi_linux_ioctl_int(sc->mfi_cdev,
3321 cmd, arg, flag, td));
3324 case MFI_LINUX_SET_AEN_2: /* AEN Linux ioctl shim */
3326 devclass_t devclass;
3327 struct mfi_linux_ioc_aen l_aen;
3330 devclass = devclass_find("mfi");
3331 if (devclass == NULL)
3334 error = copyin(arg, &l_aen, sizeof(l_aen));
3337 adapter = l_aen.laen_adapter_no;
3338 sc = devclass_get_softc(devclass, adapter);
3341 return (mfi_linux_ioctl_int(sc->mfi_cdev,
3342 cmd, arg, flag, td));
3345 #ifdef COMPAT_FREEBSD32
3346 case MFIIO_PASSTHRU32:
3347 iop_swab.ioc_frame = iop32->ioc_frame;
3348 iop_swab.buf_size = iop32->buf_size;
3349 iop_swab.buf = PTRIN(iop32->buf);
3353 case MFIIO_PASSTHRU:
3354 error = mfi_user_command(sc, iop);
3355 #ifdef COMPAT_FREEBSD32
3356 if (cmd == MFIIO_PASSTHRU32)
3357 iop32->ioc_frame = iop_swab.ioc_frame;
3361 device_printf(sc->mfi_dev, "IOCTL 0x%lx not handled\n", cmd);
3370 mfi_linux_ioctl_int(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td)
3372 struct mfi_softc *sc;
3373 struct mfi_linux_ioc_packet l_ioc;
3374 struct mfi_linux_ioc_aen l_aen;
3375 struct mfi_command *cm = NULL;
3376 struct mfi_aen *mfi_aen_entry;
3377 union mfi_sense_ptr sense_ptr;
3378 uint32_t context = 0;
3379 uint8_t *data = NULL, *temp;
3386 case MFI_LINUX_CMD_2: /* Firmware Linux ioctl shim */
3387 error = copyin(arg, &l_ioc, sizeof(l_ioc));
3391 if (l_ioc.lioc_sge_count > MAX_LINUX_IOCTL_SGE) {
3395 mtx_lock(&sc->mfi_io_lock);
3396 if ((cm = mfi_dequeue_free(sc)) == NULL) {
3397 mtx_unlock(&sc->mfi_io_lock);
3400 mtx_unlock(&sc->mfi_io_lock);
3404 * save off original context since copying from user
3405 * will clobber some data
3407 context = cm->cm_frame->header.context;
3409 bcopy(l_ioc.lioc_frame.raw, cm->cm_frame,
3410 2 * MFI_DCMD_FRAME_SIZE); /* this isn't quite right */
3411 cm->cm_total_frame_size = (sizeof(union mfi_sgl)
3412 * l_ioc.lioc_sge_count) + l_ioc.lioc_sgl_off;
3413 cm->cm_frame->header.scsi_status = 0;
3414 cm->cm_frame->header.pad0 = 0;
3415 if (l_ioc.lioc_sge_count)
3417 (union mfi_sgl *)&cm->cm_frame->bytes[l_ioc.lioc_sgl_off];
3419 if (cm->cm_frame->header.flags & MFI_FRAME_DATAIN)
3420 cm->cm_flags |= MFI_CMD_DATAIN;
3421 if (cm->cm_frame->header.flags & MFI_FRAME_DATAOUT)
3422 cm->cm_flags |= MFI_CMD_DATAOUT;
3423 cm->cm_len = cm->cm_frame->header.data_len;
3425 (cm->cm_flags & (MFI_CMD_DATAIN | MFI_CMD_DATAOUT))) {
3426 cm->cm_data = data = malloc(cm->cm_len, M_MFIBUF,
3428 if (cm->cm_data == NULL) {
3429 device_printf(sc->mfi_dev, "Malloc failed\n");
3436 /* restore header context */
3437 cm->cm_frame->header.context = context;
3440 if (cm->cm_flags & MFI_CMD_DATAOUT) {
3441 for (i = 0; i < l_ioc.lioc_sge_count; i++) {
3442 error = copyin(PTRIN(l_ioc.lioc_sgl[i].iov_base),
3444 l_ioc.lioc_sgl[i].iov_len);
3446 device_printf(sc->mfi_dev,
3447 "Copy in failed\n");
3450 temp = &temp[l_ioc.lioc_sgl[i].iov_len];
3454 if (cm->cm_frame->header.cmd == MFI_CMD_DCMD)
3455 locked = mfi_config_lock(sc, cm->cm_frame->dcmd.opcode);
3457 if (cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) {
3458 cm->cm_frame->pass.sense_addr_lo =
3459 (uint32_t)cm->cm_sense_busaddr;
3460 cm->cm_frame->pass.sense_addr_hi =
3461 (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
3464 mtx_lock(&sc->mfi_io_lock);
3465 error = mfi_check_command_pre(sc, cm);
3467 mtx_unlock(&sc->mfi_io_lock);
3471 if ((error = mfi_wait_command(sc, cm)) != 0) {
3472 device_printf(sc->mfi_dev,
3473 "Controller polled failed\n");
3474 mtx_unlock(&sc->mfi_io_lock);
3478 mfi_check_command_post(sc, cm);
3479 mtx_unlock(&sc->mfi_io_lock);
3482 if (cm->cm_flags & MFI_CMD_DATAIN) {
3483 for (i = 0; i < l_ioc.lioc_sge_count; i++) {
3484 error = copyout(temp,
3485 PTRIN(l_ioc.lioc_sgl[i].iov_base),
3486 l_ioc.lioc_sgl[i].iov_len);
3488 device_printf(sc->mfi_dev,
3489 "Copy out failed\n");
3492 temp = &temp[l_ioc.lioc_sgl[i].iov_len];
3496 if (l_ioc.lioc_sense_len) {
3497 /* get user-space sense ptr then copy out sense */
3498 bcopy(&((struct mfi_linux_ioc_packet*)arg)
3499 ->lioc_frame.raw[l_ioc.lioc_sense_off],
3500 &sense_ptr.sense_ptr_data[0],
3501 sizeof(sense_ptr.sense_ptr_data));
3504 * only 32bit Linux support so zero out any
3505 * address over 32bit
3507 sense_ptr.addr.high = 0;
3509 error = copyout(cm->cm_sense, sense_ptr.user_space,
3510 l_ioc.lioc_sense_len);
3512 device_printf(sc->mfi_dev,
3513 "Copy out failed\n");
3518 error = copyout(&cm->cm_frame->header.cmd_status,
3519 &((struct mfi_linux_ioc_packet*)arg)
3520 ->lioc_frame.hdr.cmd_status,
3523 device_printf(sc->mfi_dev,
3524 "Copy out failed\n");
3529 mfi_config_unlock(sc, locked);
3531 free(data, M_MFIBUF);
3533 mtx_lock(&sc->mfi_io_lock);
3534 mfi_release_command(cm);
3535 mtx_unlock(&sc->mfi_io_lock);
3539 case MFI_LINUX_SET_AEN_2: /* AEN Linux ioctl shim */
3540 error = copyin(arg, &l_aen, sizeof(l_aen));
3543 printf("AEN IMPLEMENTED for pid %d\n", curproc->p_pid);
3544 mfi_aen_entry = malloc(sizeof(struct mfi_aen), M_MFIBUF,
3546 mtx_lock(&sc->mfi_io_lock);
3547 if (mfi_aen_entry != NULL) {
3548 mfi_aen_entry->p = curproc;
3549 TAILQ_INSERT_TAIL(&sc->mfi_aen_pids, mfi_aen_entry,
3552 error = mfi_aen_register(sc, l_aen.laen_seq_num,
3553 l_aen.laen_class_locale);
3556 TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
3558 free(mfi_aen_entry, M_MFIBUF);
3560 mtx_unlock(&sc->mfi_io_lock);
3564 device_printf(sc->mfi_dev, "IOCTL 0x%lx not handled\n", cmd);
3573 mfi_poll(struct cdev *dev, int poll_events, struct thread *td)
3575 struct mfi_softc *sc;
3580 if (poll_events & (POLLIN | POLLRDNORM)) {
3581 if (sc->mfi_aen_triggered != 0) {
3582 revents |= poll_events & (POLLIN | POLLRDNORM);
3583 sc->mfi_aen_triggered = 0;
3585 if (sc->mfi_aen_triggered == 0 && sc->mfi_aen_cm == NULL) {
3591 if (poll_events & (POLLIN | POLLRDNORM)) {
3592 sc->mfi_poll_waiting = 1;
3593 selrecord(td, &sc->mfi_select);
3603 struct mfi_softc *sc;
3604 struct mfi_command *cm;
3610 dc = devclass_find("mfi");
3612 printf("No mfi dev class\n");
3616 for (i = 0; ; i++) {
3617 sc = devclass_get_softc(dc, i);
3620 device_printf(sc->mfi_dev, "Dumping\n\n");
3622 deadline = time_uptime - MFI_CMD_TIMEOUT;
3623 mtx_lock(&sc->mfi_io_lock);
3624 TAILQ_FOREACH(cm, &sc->mfi_busy, cm_link) {
3625 if (cm->cm_timestamp < deadline) {
3626 device_printf(sc->mfi_dev,
3627 "COMMAND %p TIMEOUT AFTER %d SECONDS\n",
3628 cm, (int)(time_uptime - cm->cm_timestamp));
3639 mtx_unlock(&sc->mfi_io_lock);
3646 mfi_timeout(void *data)
3648 struct mfi_softc *sc = (struct mfi_softc *)data;
3649 struct mfi_command *cm;
3653 deadline = time_uptime - MFI_CMD_TIMEOUT;
3654 if (sc->adpreset == 0) {
3655 if (!mfi_tbolt_reset(sc)) {
3656 callout_reset(&sc->mfi_watchdog_callout, MFI_CMD_TIMEOUT * hz, mfi_timeout, sc);
3660 mtx_lock(&sc->mfi_io_lock);
3661 TAILQ_FOREACH(cm, &sc->mfi_busy, cm_link) {
3662 if (sc->mfi_aen_cm == cm || sc->mfi_map_sync_cm == cm)
3664 if (cm->cm_timestamp < deadline) {
3665 if (sc->adpreset != 0 && sc->issuepend_done == 0) {
3666 cm->cm_timestamp = time_uptime;
3668 device_printf(sc->mfi_dev,
3669 "COMMAND %p TIMEOUT AFTER %d SECONDS\n",
3670 cm, (int)(time_uptime - cm->cm_timestamp)
3673 MFI_VALIDATE_CMD(sc, cm);
3684 mtx_unlock(&sc->mfi_io_lock);
3686 callout_reset(&sc->mfi_watchdog_callout, MFI_CMD_TIMEOUT * hz,