2 * Copyright (c) 2006 IronPort Systems
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * Copyright (c) 2007 LSI Corp.
28 * Copyright (c) 2007 Rajesh Prabhakaran.
29 * All rights reserved.
31 * Redistribution and use in source and binary forms, with or without
32 * modification, are permitted provided that the following conditions
34 * 1. Redistributions of source code must retain the above copyright
35 * notice, this list of conditions and the following disclaimer.
36 * 2. Redistributions in binary form must reproduce the above copyright
37 * notice, this list of conditions and the following disclaimer in the
38 * documentation and/or other materials provided with the distribution.
40 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
41 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
42 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
43 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
44 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
45 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
46 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
47 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
48 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
49 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
53 #include <sys/cdefs.h>
54 __FBSDID("$FreeBSD$");
56 #include "opt_compat.h"
59 #include <sys/param.h>
60 #include <sys/systm.h>
61 #include <sys/sysctl.h>
62 #include <sys/malloc.h>
63 #include <sys/kernel.h>
65 #include <sys/selinfo.h>
68 #include <sys/eventhandler.h>
70 #include <sys/bus_dma.h>
72 #include <sys/ioccom.h>
75 #include <sys/signalvar.h>
76 #include <sys/sysent.h>
77 #include <sys/taskqueue.h>
79 #include <machine/bus.h>
80 #include <machine/resource.h>
82 #include <dev/mfi/mfireg.h>
83 #include <dev/mfi/mfi_ioctl.h>
84 #include <dev/mfi/mfivar.h>
85 #include <sys/interrupt.h>
86 #include <sys/priority.h>
88 static int mfi_alloc_commands(struct mfi_softc *);
89 static int mfi_comms_init(struct mfi_softc *);
90 static int mfi_get_controller_info(struct mfi_softc *);
91 static int mfi_get_log_state(struct mfi_softc *,
92 struct mfi_evt_log_state **);
93 static int mfi_parse_entries(struct mfi_softc *, int, int);
94 static void mfi_data_cb(void *, bus_dma_segment_t *, int, int);
95 static void mfi_startup(void *arg);
96 static void mfi_intr(void *arg);
97 static void mfi_ldprobe(struct mfi_softc *sc);
98 static void mfi_syspdprobe(struct mfi_softc *sc);
99 static void mfi_handle_evt(void *context, int pending);
100 static int mfi_aen_register(struct mfi_softc *sc, int seq, int locale);
101 static void mfi_aen_complete(struct mfi_command *);
102 static int mfi_add_ld(struct mfi_softc *sc, int);
103 static void mfi_add_ld_complete(struct mfi_command *);
104 static int mfi_add_sys_pd(struct mfi_softc *sc, int);
105 static void mfi_add_sys_pd_complete(struct mfi_command *);
106 static struct mfi_command * mfi_bio_command(struct mfi_softc *);
107 static void mfi_bio_complete(struct mfi_command *);
108 static struct mfi_command *mfi_build_ldio(struct mfi_softc *,struct bio*);
109 static struct mfi_command *mfi_build_syspdio(struct mfi_softc *,struct bio*);
110 static int mfi_send_frame(struct mfi_softc *, struct mfi_command *);
111 static int mfi_abort(struct mfi_softc *, struct mfi_command **);
112 static int mfi_linux_ioctl_int(struct cdev *, u_long, caddr_t, int, struct thread *);
113 static void mfi_timeout(void *);
114 static int mfi_user_command(struct mfi_softc *,
115 struct mfi_ioc_passthru *);
116 static void mfi_enable_intr_xscale(struct mfi_softc *sc);
117 static void mfi_enable_intr_ppc(struct mfi_softc *sc);
118 static int32_t mfi_read_fw_status_xscale(struct mfi_softc *sc);
119 static int32_t mfi_read_fw_status_ppc(struct mfi_softc *sc);
120 static int mfi_check_clear_intr_xscale(struct mfi_softc *sc);
121 static int mfi_check_clear_intr_ppc(struct mfi_softc *sc);
122 static void mfi_issue_cmd_xscale(struct mfi_softc *sc, bus_addr_t bus_add,
124 static void mfi_issue_cmd_ppc(struct mfi_softc *sc, bus_addr_t bus_add,
126 static int mfi_config_lock(struct mfi_softc *sc, uint32_t opcode);
127 static void mfi_config_unlock(struct mfi_softc *sc, int locked);
128 static int mfi_check_command_pre(struct mfi_softc *sc, struct mfi_command *cm);
129 static void mfi_check_command_post(struct mfi_softc *sc, struct mfi_command *cm);
130 static int mfi_check_for_sscd(struct mfi_softc *sc, struct mfi_command *cm);
132 SYSCTL_NODE(_hw, OID_AUTO, mfi, CTLFLAG_RD, 0, "MFI driver parameters");
133 static int mfi_event_locale = MFI_EVT_LOCALE_ALL;
134 TUNABLE_INT("hw.mfi.event_locale", &mfi_event_locale);
135 SYSCTL_INT(_hw_mfi, OID_AUTO, event_locale, CTLFLAG_RW, &mfi_event_locale,
136 0, "event message locale");
138 static int mfi_event_class = MFI_EVT_CLASS_INFO;
139 TUNABLE_INT("hw.mfi.event_class", &mfi_event_class);
140 SYSCTL_INT(_hw_mfi, OID_AUTO, event_class, CTLFLAG_RW, &mfi_event_class,
141 0, "event message class");
143 static int mfi_max_cmds = 128;
144 TUNABLE_INT("hw.mfi.max_cmds", &mfi_max_cmds);
145 SYSCTL_INT(_hw_mfi, OID_AUTO, max_cmds, CTLFLAG_RD, &mfi_max_cmds,
148 static int mfi_detect_jbod_change = 1;
149 TUNABLE_INT("hw.mfi.detect_jbod_change", &mfi_detect_jbod_change);
150 SYSCTL_INT(_hw_mfi, OID_AUTO, detect_jbod_change, CTLFLAG_RW,
151 &mfi_detect_jbod_change, 0, "Detect a change to a JBOD");
153 /* Management interface */
154 static d_open_t mfi_open;
155 static d_close_t mfi_close;
156 static d_ioctl_t mfi_ioctl;
157 static d_poll_t mfi_poll;
159 static struct cdevsw mfi_cdevsw = {
160 .d_version = D_VERSION,
163 .d_close = mfi_close,
164 .d_ioctl = mfi_ioctl,
169 MALLOC_DEFINE(M_MFIBUF, "mfibuf", "Buffers for the MFI driver");
171 #define MFI_INQ_LENGTH SHORT_INQUIRY_LENGTH
172 struct mfi_skinny_dma_info mfi_skinny;
175 mfi_enable_intr_xscale(struct mfi_softc *sc)
177 MFI_WRITE4(sc, MFI_OMSK, 0x01);
181 mfi_enable_intr_ppc(struct mfi_softc *sc)
183 if (sc->mfi_flags & MFI_FLAGS_1078) {
184 MFI_WRITE4(sc, MFI_ODCR0, 0xFFFFFFFF);
185 MFI_WRITE4(sc, MFI_OMSK, ~MFI_1078_EIM);
187 else if (sc->mfi_flags & MFI_FLAGS_GEN2) {
188 MFI_WRITE4(sc, MFI_ODCR0, 0xFFFFFFFF);
189 MFI_WRITE4(sc, MFI_OMSK, ~MFI_GEN2_EIM);
191 else if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
192 MFI_WRITE4(sc, MFI_OMSK, ~0x00000001);
197 mfi_read_fw_status_xscale(struct mfi_softc *sc)
199 return MFI_READ4(sc, MFI_OMSG0);
203 mfi_read_fw_status_ppc(struct mfi_softc *sc)
205 return MFI_READ4(sc, MFI_OSP0);
209 mfi_check_clear_intr_xscale(struct mfi_softc *sc)
213 status = MFI_READ4(sc, MFI_OSTS);
214 if ((status & MFI_OSTS_INTR_VALID) == 0)
217 MFI_WRITE4(sc, MFI_OSTS, status);
222 mfi_check_clear_intr_ppc(struct mfi_softc *sc)
226 status = MFI_READ4(sc, MFI_OSTS);
227 if (sc->mfi_flags & MFI_FLAGS_1078) {
228 if (!(status & MFI_1078_RM)) {
232 else if (sc->mfi_flags & MFI_FLAGS_GEN2) {
233 if (!(status & MFI_GEN2_RM)) {
237 else if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
238 if (!(status & MFI_SKINNY_RM)) {
242 if (sc->mfi_flags & MFI_FLAGS_SKINNY)
243 MFI_WRITE4(sc, MFI_OSTS, status);
245 MFI_WRITE4(sc, MFI_ODCR0, status);
250 mfi_issue_cmd_xscale(struct mfi_softc *sc, bus_addr_t bus_add, uint32_t frame_cnt)
252 MFI_WRITE4(sc, MFI_IQP,(bus_add >>3)|frame_cnt);
256 mfi_issue_cmd_ppc(struct mfi_softc *sc, bus_addr_t bus_add, uint32_t frame_cnt)
258 if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
259 MFI_WRITE4(sc, MFI_IQPL, (bus_add | frame_cnt <<1)|1 );
260 MFI_WRITE4(sc, MFI_IQPH, 0x00000000);
262 MFI_WRITE4(sc, MFI_IQP, (bus_add | frame_cnt <<1)|1 );
267 mfi_transition_firmware(struct mfi_softc *sc)
269 uint32_t fw_state, cur_state;
271 uint32_t cur_abs_reg_val = 0;
272 uint32_t prev_abs_reg_val = 0;
274 cur_abs_reg_val = sc->mfi_read_fw_status(sc);
275 fw_state = cur_abs_reg_val & MFI_FWSTATE_MASK;
276 while (fw_state != MFI_FWSTATE_READY) {
278 device_printf(sc->mfi_dev, "Waiting for firmware to "
280 cur_state = fw_state;
282 case MFI_FWSTATE_FAULT:
283 device_printf(sc->mfi_dev, "Firmware fault\n");
285 case MFI_FWSTATE_WAIT_HANDSHAKE:
286 if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT)
287 MFI_WRITE4(sc, MFI_SKINNY_IDB, MFI_FWINIT_CLEAR_HANDSHAKE);
289 MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_CLEAR_HANDSHAKE);
290 max_wait = MFI_RESET_WAIT_TIME;
292 case MFI_FWSTATE_OPERATIONAL:
293 if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT)
294 MFI_WRITE4(sc, MFI_SKINNY_IDB, 7);
296 MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_READY);
297 max_wait = MFI_RESET_WAIT_TIME;
299 case MFI_FWSTATE_UNDEFINED:
300 case MFI_FWSTATE_BB_INIT:
301 max_wait = MFI_RESET_WAIT_TIME;
303 case MFI_FWSTATE_FW_INIT_2:
304 max_wait = MFI_RESET_WAIT_TIME;
306 case MFI_FWSTATE_FW_INIT:
307 case MFI_FWSTATE_FLUSH_CACHE:
308 max_wait = MFI_RESET_WAIT_TIME;
310 case MFI_FWSTATE_DEVICE_SCAN:
311 max_wait = MFI_RESET_WAIT_TIME; /* wait for 180 seconds */
312 prev_abs_reg_val = cur_abs_reg_val;
314 case MFI_FWSTATE_BOOT_MESSAGE_PENDING:
315 if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT)
316 MFI_WRITE4(sc, MFI_SKINNY_IDB, MFI_FWINIT_HOTPLUG);
318 MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_HOTPLUG);
319 max_wait = MFI_RESET_WAIT_TIME;
322 device_printf(sc->mfi_dev, "Unknown firmware state %#x\n",
326 for (i = 0; i < (max_wait * 10); i++) {
327 cur_abs_reg_val = sc->mfi_read_fw_status(sc);
328 fw_state = cur_abs_reg_val & MFI_FWSTATE_MASK;
329 if (fw_state == cur_state)
334 if (fw_state == MFI_FWSTATE_DEVICE_SCAN) {
335 /* Check the device scanning progress */
336 if (prev_abs_reg_val != cur_abs_reg_val) {
340 if (fw_state == cur_state) {
341 device_printf(sc->mfi_dev, "Firmware stuck in state "
350 mfi_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
355 *addr = segs[0].ds_addr;
360 mfi_attach(struct mfi_softc *sc)
363 int error, commsz, framessz, sensesz;
364 int frames, unit, max_fw_sge;
365 uint32_t tb_mem_size = 0;
370 device_printf(sc->mfi_dev, "Megaraid SAS driver Ver %s \n",
373 mtx_init(&sc->mfi_io_lock, "MFI I/O lock", NULL, MTX_DEF);
374 sx_init(&sc->mfi_config_lock, "MFI config");
375 TAILQ_INIT(&sc->mfi_ld_tqh);
376 TAILQ_INIT(&sc->mfi_syspd_tqh);
377 TAILQ_INIT(&sc->mfi_ld_pend_tqh);
378 TAILQ_INIT(&sc->mfi_syspd_pend_tqh);
379 TAILQ_INIT(&sc->mfi_evt_queue);
380 TASK_INIT(&sc->mfi_evt_task, 0, mfi_handle_evt, sc);
381 TASK_INIT(&sc->mfi_map_sync_task, 0, mfi_handle_map_sync, sc);
382 TAILQ_INIT(&sc->mfi_aen_pids);
383 TAILQ_INIT(&sc->mfi_cam_ccbq);
391 sc->last_seq_num = 0;
392 sc->disableOnlineCtrlReset = 1;
393 sc->issuepend_done = 1;
394 sc->hw_crit_error = 0;
396 if (sc->mfi_flags & MFI_FLAGS_1064R) {
397 sc->mfi_enable_intr = mfi_enable_intr_xscale;
398 sc->mfi_read_fw_status = mfi_read_fw_status_xscale;
399 sc->mfi_check_clear_intr = mfi_check_clear_intr_xscale;
400 sc->mfi_issue_cmd = mfi_issue_cmd_xscale;
401 } else if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
402 sc->mfi_enable_intr = mfi_tbolt_enable_intr_ppc;
403 sc->mfi_disable_intr = mfi_tbolt_disable_intr_ppc;
404 sc->mfi_read_fw_status = mfi_tbolt_read_fw_status_ppc;
405 sc->mfi_check_clear_intr = mfi_tbolt_check_clear_intr_ppc;
406 sc->mfi_issue_cmd = mfi_tbolt_issue_cmd_ppc;
407 sc->mfi_adp_reset = mfi_tbolt_adp_reset;
409 TAILQ_INIT(&sc->mfi_cmd_tbolt_tqh);
411 sc->mfi_enable_intr = mfi_enable_intr_ppc;
412 sc->mfi_read_fw_status = mfi_read_fw_status_ppc;
413 sc->mfi_check_clear_intr = mfi_check_clear_intr_ppc;
414 sc->mfi_issue_cmd = mfi_issue_cmd_ppc;
418 /* Before we get too far, see if the firmware is working */
419 if ((error = mfi_transition_firmware(sc)) != 0) {
420 device_printf(sc->mfi_dev, "Firmware not in READY state, "
421 "error %d\n", error);
425 /* Start: LSIP200113393 */
426 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
427 1, 0, /* algnmnt, boundary */
428 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
429 BUS_SPACE_MAXADDR, /* highaddr */
430 NULL, NULL, /* filter, filterarg */
431 MEGASAS_MAX_NAME*sizeof(bus_addr_t), /* maxsize */
433 MEGASAS_MAX_NAME*sizeof(bus_addr_t), /* maxsegsize */
435 NULL, NULL, /* lockfunc, lockarg */
436 &sc->verbuf_h_dmat)) {
437 device_printf(sc->mfi_dev, "Cannot allocate verbuf_h_dmat DMA tag\n");
440 if (bus_dmamem_alloc(sc->verbuf_h_dmat, (void **)&sc->verbuf,
441 BUS_DMA_NOWAIT, &sc->verbuf_h_dmamap)) {
442 device_printf(sc->mfi_dev, "Cannot allocate verbuf_h_dmamap memory\n");
445 bzero(sc->verbuf, MEGASAS_MAX_NAME*sizeof(bus_addr_t));
446 bus_dmamap_load(sc->verbuf_h_dmat, sc->verbuf_h_dmamap,
447 sc->verbuf, MEGASAS_MAX_NAME*sizeof(bus_addr_t),
448 mfi_addr_cb, &sc->verbuf_h_busaddr, 0);
449 /* End: LSIP200113393 */
452 * Get information needed for sizing the contiguous memory for the
453 * frame pool. Size down the sgl parameter since we know that
454 * we will never need more than what's required for MAXPHYS.
455 * It would be nice if these constants were available at runtime
456 * instead of compile time.
458 status = sc->mfi_read_fw_status(sc);
459 sc->mfi_max_fw_cmds = status & MFI_FWSTATE_MAXCMD_MASK;
460 max_fw_sge = (status & MFI_FWSTATE_MAXSGL_MASK) >> 16;
461 sc->mfi_max_sge = min(max_fw_sge, ((MFI_MAXPHYS / PAGE_SIZE) + 1));
463 /* ThunderBolt Support get the contiguous memory */
465 if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
466 mfi_tbolt_init_globals(sc);
467 device_printf(sc->mfi_dev, "MaxCmd = %x MaxSgl = %x state = %x \n",
468 sc->mfi_max_fw_cmds, sc->mfi_max_sge, status);
469 tb_mem_size = mfi_tbolt_get_memory_requirement(sc);
471 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
472 1, 0, /* algnmnt, boundary */
473 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
474 BUS_SPACE_MAXADDR, /* highaddr */
475 NULL, NULL, /* filter, filterarg */
476 tb_mem_size, /* maxsize */
478 tb_mem_size, /* maxsegsize */
480 NULL, NULL, /* lockfunc, lockarg */
482 device_printf(sc->mfi_dev, "Cannot allocate comms DMA tag\n");
485 if (bus_dmamem_alloc(sc->mfi_tb_dmat, (void **)&sc->request_message_pool,
486 BUS_DMA_NOWAIT, &sc->mfi_tb_dmamap)) {
487 device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
490 bzero(sc->request_message_pool, tb_mem_size);
491 bus_dmamap_load(sc->mfi_tb_dmat, sc->mfi_tb_dmamap,
492 sc->request_message_pool, tb_mem_size, mfi_addr_cb, &sc->mfi_tb_busaddr, 0);
494 /* For ThunderBolt memory init */
495 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
496 0x100, 0, /* alignmnt, boundary */
497 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
498 BUS_SPACE_MAXADDR, /* highaddr */
499 NULL, NULL, /* filter, filterarg */
500 MFI_FRAME_SIZE, /* maxsize */
502 MFI_FRAME_SIZE, /* maxsegsize */
504 NULL, NULL, /* lockfunc, lockarg */
505 &sc->mfi_tb_init_dmat)) {
506 device_printf(sc->mfi_dev, "Cannot allocate init DMA tag\n");
509 if (bus_dmamem_alloc(sc->mfi_tb_init_dmat, (void **)&sc->mfi_tb_init,
510 BUS_DMA_NOWAIT, &sc->mfi_tb_init_dmamap)) {
511 device_printf(sc->mfi_dev, "Cannot allocate init memory\n");
514 bzero(sc->mfi_tb_init, MFI_FRAME_SIZE);
515 bus_dmamap_load(sc->mfi_tb_init_dmat, sc->mfi_tb_init_dmamap,
516 sc->mfi_tb_init, MFI_FRAME_SIZE, mfi_addr_cb,
517 &sc->mfi_tb_init_busaddr, 0);
518 if (mfi_tbolt_init_desc_pool(sc, sc->request_message_pool,
520 device_printf(sc->mfi_dev,
521 "Thunderbolt pool preparation error\n");
526 Allocate DMA memory mapping for MPI2 IOC Init descriptor,
527 we are taking it diffrent from what we have allocated for Request
528 and reply descriptors to avoid confusion later
530 tb_mem_size = sizeof(struct MPI2_IOC_INIT_REQUEST);
531 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
532 1, 0, /* algnmnt, boundary */
533 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
534 BUS_SPACE_MAXADDR, /* highaddr */
535 NULL, NULL, /* filter, filterarg */
536 tb_mem_size, /* maxsize */
538 tb_mem_size, /* maxsegsize */
540 NULL, NULL, /* lockfunc, lockarg */
541 &sc->mfi_tb_ioc_init_dmat)) {
542 device_printf(sc->mfi_dev,
543 "Cannot allocate comms DMA tag\n");
546 if (bus_dmamem_alloc(sc->mfi_tb_ioc_init_dmat,
547 (void **)&sc->mfi_tb_ioc_init_desc,
548 BUS_DMA_NOWAIT, &sc->mfi_tb_ioc_init_dmamap)) {
549 device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
552 bzero(sc->mfi_tb_ioc_init_desc, tb_mem_size);
553 bus_dmamap_load(sc->mfi_tb_ioc_init_dmat, sc->mfi_tb_ioc_init_dmamap,
554 sc->mfi_tb_ioc_init_desc, tb_mem_size, mfi_addr_cb,
555 &sc->mfi_tb_ioc_init_busaddr, 0);
558 * Create the dma tag for data buffers. Used both for block I/O
559 * and for various internal data queries.
561 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
562 1, 0, /* algnmnt, boundary */
563 BUS_SPACE_MAXADDR, /* lowaddr */
564 BUS_SPACE_MAXADDR, /* highaddr */
565 NULL, NULL, /* filter, filterarg */
566 BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
567 sc->mfi_max_sge, /* nsegments */
568 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
569 BUS_DMA_ALLOCNOW, /* flags */
570 busdma_lock_mutex, /* lockfunc */
571 &sc->mfi_io_lock, /* lockfuncarg */
572 &sc->mfi_buffer_dmat)) {
573 device_printf(sc->mfi_dev, "Cannot allocate buffer DMA tag\n");
578 * Allocate DMA memory for the comms queues. Keep it under 4GB for
579 * efficiency. The mfi_hwcomms struct includes space for 1 reply queue
580 * entry, so the calculated size here will be will be 1 more than
581 * mfi_max_fw_cmds. This is apparently a requirement of the hardware.
583 commsz = (sizeof(uint32_t) * sc->mfi_max_fw_cmds) +
584 sizeof(struct mfi_hwcomms);
585 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
586 1, 0, /* algnmnt, boundary */
587 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
588 BUS_SPACE_MAXADDR, /* highaddr */
589 NULL, NULL, /* filter, filterarg */
590 commsz, /* maxsize */
592 commsz, /* maxsegsize */
594 NULL, NULL, /* lockfunc, lockarg */
595 &sc->mfi_comms_dmat)) {
596 device_printf(sc->mfi_dev, "Cannot allocate comms DMA tag\n");
599 if (bus_dmamem_alloc(sc->mfi_comms_dmat, (void **)&sc->mfi_comms,
600 BUS_DMA_NOWAIT, &sc->mfi_comms_dmamap)) {
601 device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
604 bzero(sc->mfi_comms, commsz);
605 bus_dmamap_load(sc->mfi_comms_dmat, sc->mfi_comms_dmamap,
606 sc->mfi_comms, commsz, mfi_addr_cb, &sc->mfi_comms_busaddr, 0);
608 * Allocate DMA memory for the command frames. Keep them in the
609 * lower 4GB for efficiency. Calculate the size of the commands at
610 * the same time; each command is one 64 byte frame plus a set of
611 * additional frames for holding sg lists or other data.
612 * The assumption here is that the SG list will start at the second
613 * frame and not use the unused bytes in the first frame. While this
614 * isn't technically correct, it simplifies the calculation and allows
615 * for command frames that might be larger than an mfi_io_frame.
617 if (sizeof(bus_addr_t) == 8) {
618 sc->mfi_sge_size = sizeof(struct mfi_sg64);
619 sc->mfi_flags |= MFI_FLAGS_SG64;
621 sc->mfi_sge_size = sizeof(struct mfi_sg32);
623 if (sc->mfi_flags & MFI_FLAGS_SKINNY)
624 sc->mfi_sge_size = sizeof(struct mfi_sg_skinny);
625 frames = (sc->mfi_sge_size * sc->mfi_max_sge - 1) / MFI_FRAME_SIZE + 2;
626 sc->mfi_cmd_size = frames * MFI_FRAME_SIZE;
627 framessz = sc->mfi_cmd_size * sc->mfi_max_fw_cmds;
628 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
629 64, 0, /* algnmnt, boundary */
630 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
631 BUS_SPACE_MAXADDR, /* highaddr */
632 NULL, NULL, /* filter, filterarg */
633 framessz, /* maxsize */
635 framessz, /* maxsegsize */
637 NULL, NULL, /* lockfunc, lockarg */
638 &sc->mfi_frames_dmat)) {
639 device_printf(sc->mfi_dev, "Cannot allocate frame DMA tag\n");
642 if (bus_dmamem_alloc(sc->mfi_frames_dmat, (void **)&sc->mfi_frames,
643 BUS_DMA_NOWAIT, &sc->mfi_frames_dmamap)) {
644 device_printf(sc->mfi_dev, "Cannot allocate frames memory\n");
647 bzero(sc->mfi_frames, framessz);
648 bus_dmamap_load(sc->mfi_frames_dmat, sc->mfi_frames_dmamap,
649 sc->mfi_frames, framessz, mfi_addr_cb, &sc->mfi_frames_busaddr,0);
651 * Allocate DMA memory for the frame sense data. Keep them in the
652 * lower 4GB for efficiency
654 sensesz = sc->mfi_max_fw_cmds * MFI_SENSE_LEN;
655 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
656 4, 0, /* algnmnt, boundary */
657 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
658 BUS_SPACE_MAXADDR, /* highaddr */
659 NULL, NULL, /* filter, filterarg */
660 sensesz, /* maxsize */
662 sensesz, /* maxsegsize */
664 NULL, NULL, /* lockfunc, lockarg */
665 &sc->mfi_sense_dmat)) {
666 device_printf(sc->mfi_dev, "Cannot allocate sense DMA tag\n");
669 if (bus_dmamem_alloc(sc->mfi_sense_dmat, (void **)&sc->mfi_sense,
670 BUS_DMA_NOWAIT, &sc->mfi_sense_dmamap)) {
671 device_printf(sc->mfi_dev, "Cannot allocate sense memory\n");
674 bus_dmamap_load(sc->mfi_sense_dmat, sc->mfi_sense_dmamap,
675 sc->mfi_sense, sensesz, mfi_addr_cb, &sc->mfi_sense_busaddr, 0);
676 if ((error = mfi_alloc_commands(sc)) != 0)
679 /* Before moving the FW to operational state, check whether
680 * hostmemory is required by the FW or not
683 /* ThunderBolt MFI_IOC2 INIT */
684 if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
685 sc->mfi_disable_intr(sc);
686 if ((error = mfi_tbolt_init_MFI_queue(sc)) != 0) {
687 device_printf(sc->mfi_dev,
688 "TB Init has failed with error %d\n",error);
692 if ((error = mfi_tbolt_alloc_cmd(sc)) != 0)
694 if (bus_setup_intr(sc->mfi_dev, sc->mfi_irq,
695 INTR_MPSAFE|INTR_TYPE_BIO, NULL, mfi_intr_tbolt, sc,
697 device_printf(sc->mfi_dev, "Cannot set up interrupt\n");
700 sc->mfi_intr_ptr = mfi_intr_tbolt;
701 sc->mfi_enable_intr(sc);
703 if ((error = mfi_comms_init(sc)) != 0)
706 if (bus_setup_intr(sc->mfi_dev, sc->mfi_irq,
707 INTR_MPSAFE|INTR_TYPE_BIO, NULL, mfi_intr, sc, &sc->mfi_intr)) {
708 device_printf(sc->mfi_dev, "Cannot set up interrupt\n");
711 sc->mfi_intr_ptr = mfi_intr;
712 sc->mfi_enable_intr(sc);
714 if ((error = mfi_get_controller_info(sc)) != 0)
716 sc->disableOnlineCtrlReset = 0;
718 /* Register a config hook to probe the bus for arrays */
719 sc->mfi_ich.ich_func = mfi_startup;
720 sc->mfi_ich.ich_arg = sc;
721 if (config_intrhook_establish(&sc->mfi_ich) != 0) {
722 device_printf(sc->mfi_dev, "Cannot establish configuration "
726 if ((error = mfi_aen_setup(sc, 0), 0) != 0) {
727 mtx_unlock(&sc->mfi_io_lock);
732 * Register a shutdown handler.
734 if ((sc->mfi_eh = EVENTHANDLER_REGISTER(shutdown_final, mfi_shutdown,
735 sc, SHUTDOWN_PRI_DEFAULT)) == NULL) {
736 device_printf(sc->mfi_dev, "Warning: shutdown event "
737 "registration failed\n");
741 * Create the control device for doing management
743 unit = device_get_unit(sc->mfi_dev);
744 sc->mfi_cdev = make_dev(&mfi_cdevsw, unit, UID_ROOT, GID_OPERATOR,
745 0640, "mfi%d", unit);
747 make_dev_alias(sc->mfi_cdev, "megaraid_sas_ioctl_node");
748 if (sc->mfi_cdev != NULL)
749 sc->mfi_cdev->si_drv1 = sc;
750 SYSCTL_ADD_INT(device_get_sysctl_ctx(sc->mfi_dev),
751 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->mfi_dev)),
752 OID_AUTO, "delete_busy_volumes", CTLFLAG_RW,
753 &sc->mfi_delete_busy_volumes, 0, "Allow removal of busy volumes");
754 SYSCTL_ADD_INT(device_get_sysctl_ctx(sc->mfi_dev),
755 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->mfi_dev)),
756 OID_AUTO, "keep_deleted_volumes", CTLFLAG_RW,
757 &sc->mfi_keep_deleted_volumes, 0,
758 "Don't detach the mfid device for a busy volume that is deleted");
760 device_add_child(sc->mfi_dev, "mfip", -1);
761 bus_generic_attach(sc->mfi_dev);
763 /* Start the timeout watchdog */
764 callout_init(&sc->mfi_watchdog_callout, CALLOUT_MPSAFE);
765 callout_reset(&sc->mfi_watchdog_callout, MFI_CMD_TIMEOUT * hz,
768 if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
769 mfi_tbolt_sync_map_info(sc);
776 mfi_alloc_commands(struct mfi_softc *sc)
778 struct mfi_command *cm;
782 * XXX Should we allocate all the commands up front, or allocate on
783 * demand later like 'aac' does?
785 ncmds = MIN(mfi_max_cmds, sc->mfi_max_fw_cmds);
787 device_printf(sc->mfi_dev, "Max fw cmds= %d, sizing driver "
788 "pool to %d\n", sc->mfi_max_fw_cmds, ncmds);
790 sc->mfi_commands = malloc(sizeof(struct mfi_command) * ncmds, M_MFIBUF,
793 for (i = 0; i < ncmds; i++) {
794 cm = &sc->mfi_commands[i];
795 cm->cm_frame = (union mfi_frame *)((uintptr_t)sc->mfi_frames +
796 sc->mfi_cmd_size * i);
797 cm->cm_frame_busaddr = sc->mfi_frames_busaddr +
798 sc->mfi_cmd_size * i;
799 cm->cm_frame->header.context = i;
800 cm->cm_sense = &sc->mfi_sense[i];
801 cm->cm_sense_busaddr= sc->mfi_sense_busaddr + MFI_SENSE_LEN * i;
804 if (bus_dmamap_create(sc->mfi_buffer_dmat, 0,
805 &cm->cm_dmamap) == 0) {
806 mtx_lock(&sc->mfi_io_lock);
807 mfi_release_command(cm);
808 mtx_unlock(&sc->mfi_io_lock);
812 sc->mfi_total_cmds++;
819 mfi_release_command(struct mfi_command *cm)
821 struct mfi_frame_header *hdr;
824 mtx_assert(&cm->cm_sc->mfi_io_lock, MA_OWNED);
827 * Zero out the important fields of the frame, but make sure the
828 * context field is preserved. For efficiency, handle the fields
829 * as 32 bit words. Clear out the first S/G entry too for safety.
831 hdr = &cm->cm_frame->header;
832 if (cm->cm_data != NULL && hdr->sg_count) {
833 cm->cm_sg->sg32[0].len = 0;
834 cm->cm_sg->sg32[0].addr = 0;
837 hdr_data = (uint32_t *)cm->cm_frame;
838 hdr_data[0] = 0; /* cmd, sense_len, cmd_status, scsi_status */
839 hdr_data[1] = 0; /* target_id, lun_id, cdb_len, sg_count */
840 hdr_data[4] = 0; /* flags, timeout */
841 hdr_data[5] = 0; /* data_len */
843 cm->cm_extra_frames = 0;
845 cm->cm_complete = NULL;
846 cm->cm_private = NULL;
849 cm->cm_total_frame_size = 0;
850 cm->retry_for_fw_reset = 0;
852 mfi_enqueue_free(cm);
856 mfi_dcmd_command(struct mfi_softc *sc, struct mfi_command **cmp,
857 uint32_t opcode, void **bufp, size_t bufsize)
859 struct mfi_command *cm;
860 struct mfi_dcmd_frame *dcmd;
862 uint32_t context = 0;
864 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
866 cm = mfi_dequeue_free(sc);
870 /* Zero out the MFI frame */
871 context = cm->cm_frame->header.context;
872 bzero(cm->cm_frame, sizeof(union mfi_frame));
873 cm->cm_frame->header.context = context;
875 if ((bufsize > 0) && (bufp != NULL)) {
877 buf = malloc(bufsize, M_MFIBUF, M_NOWAIT|M_ZERO);
879 mfi_release_command(cm);
888 dcmd = &cm->cm_frame->dcmd;
889 bzero(dcmd->mbox, MFI_MBOX_SIZE);
890 dcmd->header.cmd = MFI_CMD_DCMD;
891 dcmd->header.timeout = 0;
892 dcmd->header.flags = 0;
893 dcmd->header.data_len = bufsize;
894 dcmd->header.scsi_status = 0;
895 dcmd->opcode = opcode;
896 cm->cm_sg = &dcmd->sgl;
897 cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
900 cm->cm_private = buf;
901 cm->cm_len = bufsize;
904 if ((bufp != NULL) && (*bufp == NULL) && (buf != NULL))
910 mfi_comms_init(struct mfi_softc *sc)
912 struct mfi_command *cm;
913 struct mfi_init_frame *init;
914 struct mfi_init_qinfo *qinfo;
916 uint32_t context = 0;
918 mtx_lock(&sc->mfi_io_lock);
919 if ((cm = mfi_dequeue_free(sc)) == NULL)
922 /* Zero out the MFI frame */
923 context = cm->cm_frame->header.context;
924 bzero(cm->cm_frame, sizeof(union mfi_frame));
925 cm->cm_frame->header.context = context;
928 * Abuse the SG list area of the frame to hold the init_qinfo
931 init = &cm->cm_frame->init;
932 qinfo = (struct mfi_init_qinfo *)((uintptr_t)init + MFI_FRAME_SIZE);
934 bzero(qinfo, sizeof(struct mfi_init_qinfo));
935 qinfo->rq_entries = sc->mfi_max_fw_cmds + 1;
936 qinfo->rq_addr_lo = sc->mfi_comms_busaddr +
937 offsetof(struct mfi_hwcomms, hw_reply_q);
938 qinfo->pi_addr_lo = sc->mfi_comms_busaddr +
939 offsetof(struct mfi_hwcomms, hw_pi);
940 qinfo->ci_addr_lo = sc->mfi_comms_busaddr +
941 offsetof(struct mfi_hwcomms, hw_ci);
943 init->header.cmd = MFI_CMD_INIT;
944 init->header.data_len = sizeof(struct mfi_init_qinfo);
945 init->qinfo_new_addr_lo = cm->cm_frame_busaddr + MFI_FRAME_SIZE;
947 cm->cm_flags = MFI_CMD_POLLED;
949 if ((error = mfi_mapcmd(sc, cm)) != 0) {
950 device_printf(sc->mfi_dev, "failed to send init command\n");
951 mtx_unlock(&sc->mfi_io_lock);
954 mfi_release_command(cm);
955 mtx_unlock(&sc->mfi_io_lock);
961 mfi_get_controller_info(struct mfi_softc *sc)
963 struct mfi_command *cm = NULL;
964 struct mfi_ctrl_info *ci = NULL;
965 uint32_t max_sectors_1, max_sectors_2;
968 mtx_lock(&sc->mfi_io_lock);
969 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_GETINFO,
970 (void **)&ci, sizeof(*ci));
973 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
975 if ((error = mfi_mapcmd(sc, cm)) != 0) {
976 device_printf(sc->mfi_dev, "Failed to get controller info\n");
977 sc->mfi_max_io = (sc->mfi_max_sge - 1) * PAGE_SIZE /
983 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
984 BUS_DMASYNC_POSTREAD);
985 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
987 max_sectors_1 = (1 << ci->stripe_sz_ops.max) * ci->max_strips_per_io;
988 max_sectors_2 = ci->max_request_size;
989 sc->mfi_max_io = min(max_sectors_1, max_sectors_2);
990 sc->disableOnlineCtrlReset =
991 ci->properties.OnOffProperties.disableOnlineCtrlReset;
997 mfi_release_command(cm);
998 mtx_unlock(&sc->mfi_io_lock);
1003 mfi_get_log_state(struct mfi_softc *sc, struct mfi_evt_log_state **log_state)
1005 struct mfi_command *cm = NULL;
1008 mtx_lock(&sc->mfi_io_lock);
1009 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_EVENT_GETINFO,
1010 (void **)log_state, sizeof(**log_state));
1013 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1015 if ((error = mfi_mapcmd(sc, cm)) != 0) {
1016 device_printf(sc->mfi_dev, "Failed to get log state\n");
1020 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1021 BUS_DMASYNC_POSTREAD);
1022 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1026 mfi_release_command(cm);
1027 mtx_unlock(&sc->mfi_io_lock);
1033 mfi_aen_setup(struct mfi_softc *sc, uint32_t seq_start)
1035 struct mfi_evt_log_state *log_state = NULL;
1036 union mfi_evt class_locale;
1040 class_locale.members.reserved = 0;
1041 class_locale.members.locale = mfi_event_locale;
1042 class_locale.members.evt_class = mfi_event_class;
1044 if (seq_start == 0) {
1045 error = mfi_get_log_state(sc, &log_state);
1046 sc->mfi_boot_seq_num = log_state->boot_seq_num;
1049 free(log_state, M_MFIBUF);
1054 * Walk through any events that fired since the last
1057 mfi_parse_entries(sc, log_state->shutdown_seq_num,
1058 log_state->newest_seq_num);
1059 seq = log_state->newest_seq_num;
1062 mfi_aen_register(sc, seq, class_locale.word);
1063 free(log_state, M_MFIBUF);
1069 mfi_wait_command(struct mfi_softc *sc, struct mfi_command *cm)
1072 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1073 cm->cm_complete = NULL;
1077 * MegaCli can issue a DCMD of 0. In this case do nothing
1078 * and return 0 to it as status
1080 if (cm->cm_frame->dcmd.opcode == 0) {
1081 cm->cm_frame->header.cmd_status = MFI_STAT_OK;
1083 return (cm->cm_error);
1085 mfi_enqueue_ready(cm);
1087 if ((cm->cm_flags & MFI_CMD_COMPLETED) == 0)
1088 msleep(cm, &sc->mfi_io_lock, PRIBIO, "mfiwait", 0);
1089 return (cm->cm_error);
1093 mfi_free(struct mfi_softc *sc)
1095 struct mfi_command *cm;
1098 callout_drain(&sc->mfi_watchdog_callout);
1100 if (sc->mfi_cdev != NULL)
1101 destroy_dev(sc->mfi_cdev);
1103 if (sc->mfi_total_cmds != 0) {
1104 for (i = 0; i < sc->mfi_total_cmds; i++) {
1105 cm = &sc->mfi_commands[i];
1106 bus_dmamap_destroy(sc->mfi_buffer_dmat, cm->cm_dmamap);
1108 free(sc->mfi_commands, M_MFIBUF);
1112 bus_teardown_intr(sc->mfi_dev, sc->mfi_irq, sc->mfi_intr);
1113 if (sc->mfi_irq != NULL)
1114 bus_release_resource(sc->mfi_dev, SYS_RES_IRQ, sc->mfi_irq_rid,
1117 if (sc->mfi_sense_busaddr != 0)
1118 bus_dmamap_unload(sc->mfi_sense_dmat, sc->mfi_sense_dmamap);
1119 if (sc->mfi_sense != NULL)
1120 bus_dmamem_free(sc->mfi_sense_dmat, sc->mfi_sense,
1121 sc->mfi_sense_dmamap);
1122 if (sc->mfi_sense_dmat != NULL)
1123 bus_dma_tag_destroy(sc->mfi_sense_dmat);
1125 if (sc->mfi_frames_busaddr != 0)
1126 bus_dmamap_unload(sc->mfi_frames_dmat, sc->mfi_frames_dmamap);
1127 if (sc->mfi_frames != NULL)
1128 bus_dmamem_free(sc->mfi_frames_dmat, sc->mfi_frames,
1129 sc->mfi_frames_dmamap);
1130 if (sc->mfi_frames_dmat != NULL)
1131 bus_dma_tag_destroy(sc->mfi_frames_dmat);
1133 if (sc->mfi_comms_busaddr != 0)
1134 bus_dmamap_unload(sc->mfi_comms_dmat, sc->mfi_comms_dmamap);
1135 if (sc->mfi_comms != NULL)
1136 bus_dmamem_free(sc->mfi_comms_dmat, sc->mfi_comms,
1137 sc->mfi_comms_dmamap);
1138 if (sc->mfi_comms_dmat != NULL)
1139 bus_dma_tag_destroy(sc->mfi_comms_dmat);
1141 /* ThunderBolt contiguous memory free here */
1142 if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
1143 if (sc->mfi_tb_busaddr != 0)
1144 bus_dmamap_unload(sc->mfi_tb_dmat, sc->mfi_tb_dmamap);
1145 if (sc->request_message_pool != NULL)
1146 bus_dmamem_free(sc->mfi_tb_dmat, sc->request_message_pool,
1148 if (sc->mfi_tb_dmat != NULL)
1149 bus_dma_tag_destroy(sc->mfi_tb_dmat);
1151 /* Version buffer memory free */
1152 /* Start LSIP200113393 */
1153 if (sc->verbuf_h_busaddr != 0)
1154 bus_dmamap_unload(sc->verbuf_h_dmat, sc->verbuf_h_dmamap);
1155 if (sc->verbuf != NULL)
1156 bus_dmamem_free(sc->verbuf_h_dmat, sc->verbuf,
1157 sc->verbuf_h_dmamap);
1158 if (sc->verbuf_h_dmat != NULL)
1159 bus_dma_tag_destroy(sc->verbuf_h_dmat);
1161 /* End LSIP200113393 */
1162 /* ThunderBolt INIT packet memory Free */
1163 if (sc->mfi_tb_init_busaddr != 0)
1164 bus_dmamap_unload(sc->mfi_tb_init_dmat, sc->mfi_tb_init_dmamap);
1165 if (sc->mfi_tb_init != NULL)
1166 bus_dmamem_free(sc->mfi_tb_init_dmat, sc->mfi_tb_init,
1167 sc->mfi_tb_init_dmamap);
1168 if (sc->mfi_tb_init_dmat != NULL)
1169 bus_dma_tag_destroy(sc->mfi_tb_init_dmat);
1171 /* ThunderBolt IOC Init Desc memory free here */
1172 if (sc->mfi_tb_ioc_init_busaddr != 0)
1173 bus_dmamap_unload(sc->mfi_tb_ioc_init_dmat,
1174 sc->mfi_tb_ioc_init_dmamap);
1175 if (sc->mfi_tb_ioc_init_desc != NULL)
1176 bus_dmamem_free(sc->mfi_tb_ioc_init_dmat,
1177 sc->mfi_tb_ioc_init_desc,
1178 sc->mfi_tb_ioc_init_dmamap);
1179 if (sc->mfi_tb_ioc_init_dmat != NULL)
1180 bus_dma_tag_destroy(sc->mfi_tb_ioc_init_dmat);
1181 for (int i = 0; i < sc->mfi_max_fw_cmds; i++) {
1182 if (sc->mfi_cmd_pool_tbolt != NULL) {
1183 if (sc->mfi_cmd_pool_tbolt[i] != NULL) {
1184 free(sc->mfi_cmd_pool_tbolt[i],
1186 sc->mfi_cmd_pool_tbolt[i] = NULL;
1190 if (sc->mfi_cmd_pool_tbolt != NULL) {
1191 free(sc->mfi_cmd_pool_tbolt, M_MFIBUF);
1192 sc->mfi_cmd_pool_tbolt = NULL;
1194 if (sc->request_desc_pool != NULL) {
1195 free(sc->request_desc_pool, M_MFIBUF);
1196 sc->request_desc_pool = NULL;
1199 if (sc->mfi_buffer_dmat != NULL)
1200 bus_dma_tag_destroy(sc->mfi_buffer_dmat);
1201 if (sc->mfi_parent_dmat != NULL)
1202 bus_dma_tag_destroy(sc->mfi_parent_dmat);
1204 if (mtx_initialized(&sc->mfi_io_lock)) {
1205 mtx_destroy(&sc->mfi_io_lock);
1206 sx_destroy(&sc->mfi_config_lock);
1213 mfi_startup(void *arg)
1215 struct mfi_softc *sc;
1217 sc = (struct mfi_softc *)arg;
1219 config_intrhook_disestablish(&sc->mfi_ich);
1221 sc->mfi_enable_intr(sc);
1222 sx_xlock(&sc->mfi_config_lock);
1223 mtx_lock(&sc->mfi_io_lock);
1225 if (sc->mfi_flags & MFI_FLAGS_SKINNY)
1227 mtx_unlock(&sc->mfi_io_lock);
1228 sx_xunlock(&sc->mfi_config_lock);
1234 struct mfi_softc *sc;
1235 struct mfi_command *cm;
1236 uint32_t pi, ci, context;
1238 sc = (struct mfi_softc *)arg;
1240 if (sc->mfi_check_clear_intr(sc))
1244 pi = sc->mfi_comms->hw_pi;
1245 ci = sc->mfi_comms->hw_ci;
1246 mtx_lock(&sc->mfi_io_lock);
1248 context = sc->mfi_comms->hw_reply_q[ci];
1249 if (context < sc->mfi_max_fw_cmds) {
1250 cm = &sc->mfi_commands[context];
1251 mfi_remove_busy(cm);
1253 mfi_complete(sc, cm);
1255 if (++ci == (sc->mfi_max_fw_cmds + 1)) {
1260 sc->mfi_comms->hw_ci = ci;
1262 /* Give defered I/O a chance to run */
1263 if (sc->mfi_flags & MFI_FLAGS_QFRZN)
1264 sc->mfi_flags &= ~MFI_FLAGS_QFRZN;
1266 mtx_unlock(&sc->mfi_io_lock);
1269 * Dummy read to flush the bus; this ensures that the indexes are up
1270 * to date. Restart processing if more commands have come it.
1272 (void)sc->mfi_read_fw_status(sc);
1273 if (pi != sc->mfi_comms->hw_pi)
1280 mfi_shutdown(struct mfi_softc *sc)
1282 struct mfi_dcmd_frame *dcmd;
1283 struct mfi_command *cm;
1288 sc->cm_aen_abort = 1;
1289 if (sc->mfi_aen_cm != NULL)
1290 mfi_abort(sc, &sc->mfi_aen_cm);
1292 if (sc->mfi_map_sync_cm)
1293 sc->cm_map_abort = 1;
1294 if (sc->mfi_map_sync_cm != NULL)
1295 mfi_abort(sc, &sc->mfi_map_sync_cm);
1297 mtx_lock(&sc->mfi_io_lock);
1298 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_SHUTDOWN, NULL, 0);
1300 mtx_unlock(&sc->mfi_io_lock);
1304 dcmd = &cm->cm_frame->dcmd;
1305 dcmd->header.flags = MFI_FRAME_DIR_NONE;
1306 cm->cm_flags = MFI_CMD_POLLED;
1309 if ((error = mfi_mapcmd(sc, cm)) != 0) {
1310 device_printf(sc->mfi_dev, "Failed to shutdown controller\n");
1313 mfi_release_command(cm);
1314 mtx_unlock(&sc->mfi_io_lock);
1319 mfi_syspdprobe(struct mfi_softc *sc)
1321 struct mfi_frame_header *hdr;
1322 struct mfi_command *cm = NULL;
1323 struct mfi_pd_list *pdlist = NULL;
1324 struct mfi_system_pd *syspd, *tmp;
1325 struct mfi_system_pending *syspd_pend;
1326 int error, i, found;
1328 sx_assert(&sc->mfi_config_lock, SA_XLOCKED);
1329 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1330 /* Add SYSTEM PD's */
1331 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_PD_LIST_QUERY,
1332 (void **)&pdlist, sizeof(*pdlist));
1334 device_printf(sc->mfi_dev,
1335 "Error while forming SYSTEM PD list\n");
1339 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1340 cm->cm_frame->dcmd.mbox[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST;
1341 cm->cm_frame->dcmd.mbox[1] = 0;
1342 if (mfi_mapcmd(sc, cm) != 0) {
1343 device_printf(sc->mfi_dev,
1344 "Failed to get syspd device listing\n");
1347 bus_dmamap_sync(sc->mfi_buffer_dmat,cm->cm_dmamap,
1348 BUS_DMASYNC_POSTREAD);
1349 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1350 hdr = &cm->cm_frame->header;
1351 if (hdr->cmd_status != MFI_STAT_OK) {
1352 device_printf(sc->mfi_dev,
1353 "MFI_DCMD_PD_LIST_QUERY failed %x\n", hdr->cmd_status);
1356 /* Get each PD and add it to the system */
1357 for (i = 0; i < pdlist->count; i++) {
1358 if (pdlist->addr[i].device_id ==
1359 pdlist->addr[i].encl_device_id)
1362 TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh, pd_link) {
1363 if (syspd->pd_id == pdlist->addr[i].device_id)
1366 TAILQ_FOREACH(syspd_pend, &sc->mfi_syspd_pend_tqh, pd_link) {
1367 if (syspd_pend->pd_id == pdlist->addr[i].device_id)
1371 mfi_add_sys_pd(sc, pdlist->addr[i].device_id);
1373 /* Delete SYSPD's whose state has been changed */
1374 TAILQ_FOREACH_SAFE(syspd, &sc->mfi_syspd_tqh, pd_link, tmp) {
1376 for (i = 0; i < pdlist->count; i++) {
1377 if (syspd->pd_id == pdlist->addr[i].device_id)
1382 mtx_unlock(&sc->mfi_io_lock);
1384 device_delete_child(sc->mfi_dev, syspd->pd_dev);
1386 mtx_lock(&sc->mfi_io_lock);
1391 free(pdlist, M_MFIBUF);
1393 mfi_release_command(cm);
1399 mfi_ldprobe(struct mfi_softc *sc)
1401 struct mfi_frame_header *hdr;
1402 struct mfi_command *cm = NULL;
1403 struct mfi_ld_list *list = NULL;
1404 struct mfi_disk *ld;
1405 struct mfi_disk_pending *ld_pend;
1408 sx_assert(&sc->mfi_config_lock, SA_XLOCKED);
1409 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1411 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_LIST,
1412 (void **)&list, sizeof(*list));
1416 cm->cm_flags = MFI_CMD_DATAIN;
1417 if (mfi_wait_command(sc, cm) != 0) {
1418 device_printf(sc->mfi_dev, "Failed to get device listing\n");
1422 hdr = &cm->cm_frame->header;
1423 if (hdr->cmd_status != MFI_STAT_OK) {
1424 device_printf(sc->mfi_dev, "MFI_DCMD_LD_GET_LIST failed %x\n",
1429 for (i = 0; i < list->ld_count; i++) {
1430 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
1431 if (ld->ld_id == list->ld_list[i].ld.v.target_id)
1434 TAILQ_FOREACH(ld_pend, &sc->mfi_ld_pend_tqh, ld_link) {
1435 if (ld_pend->ld_id == list->ld_list[i].ld.v.target_id)
1438 mfi_add_ld(sc, list->ld_list[i].ld.v.target_id);
1443 free(list, M_MFIBUF);
1445 mfi_release_command(cm);
1451 * The timestamp is the number of seconds since 00:00 Jan 1, 2000. If
1452 * the bits in 24-31 are all set, then it is the number of seconds since
1456 format_timestamp(uint32_t timestamp)
1458 static char buffer[32];
1460 if ((timestamp & 0xff000000) == 0xff000000)
1461 snprintf(buffer, sizeof(buffer), "boot + %us", timestamp &
1464 snprintf(buffer, sizeof(buffer), "%us", timestamp);
1469 format_class(int8_t class)
1471 static char buffer[6];
1474 case MFI_EVT_CLASS_DEBUG:
1476 case MFI_EVT_CLASS_PROGRESS:
1477 return ("progress");
1478 case MFI_EVT_CLASS_INFO:
1480 case MFI_EVT_CLASS_WARNING:
1482 case MFI_EVT_CLASS_CRITICAL:
1484 case MFI_EVT_CLASS_FATAL:
1486 case MFI_EVT_CLASS_DEAD:
1489 snprintf(buffer, sizeof(buffer), "%d", class);
1495 mfi_decode_evt(struct mfi_softc *sc, struct mfi_evt_detail *detail)
1497 struct mfi_system_pd *syspd = NULL;
1499 device_printf(sc->mfi_dev, "%d (%s/0x%04x/%s) - %s\n", detail->seq,
1500 format_timestamp(detail->time), detail->evt_class.members.locale,
1501 format_class(detail->evt_class.members.evt_class),
1502 detail->description);
1504 /* Don't act on old AEN's or while shutting down */
1505 if (detail->seq < sc->mfi_boot_seq_num || sc->mfi_detaching)
1508 switch (detail->arg_type) {
1509 case MR_EVT_ARGS_NONE:
1510 if (detail->code == MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED) {
1511 device_printf(sc->mfi_dev, "HostBus scan raised\n");
1512 if (mfi_detect_jbod_change) {
1514 * Probe for new SYSPD's and Delete
1517 sx_xlock(&sc->mfi_config_lock);
1518 mtx_lock(&sc->mfi_io_lock);
1520 mtx_unlock(&sc->mfi_io_lock);
1521 sx_xunlock(&sc->mfi_config_lock);
1525 case MR_EVT_ARGS_LD_STATE:
1526 /* During load time driver reads all the events starting
1527 * from the one that has been logged after shutdown. Avoid
1530 if (detail->args.ld_state.new_state == MFI_LD_STATE_OFFLINE ) {
1532 struct mfi_disk *ld;
1533 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
1535 detail->args.ld_state.ld.target_id)
1539 Fix: for kernel panics when SSCD is removed
1540 KASSERT(ld != NULL, ("volume dissappeared"));
1544 device_delete_child(sc->mfi_dev, ld->ld_dev);
1549 case MR_EVT_ARGS_PD:
1550 if (detail->code == MR_EVT_PD_REMOVED) {
1551 if (mfi_detect_jbod_change) {
1553 * If the removed device is a SYSPD then
1556 TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh,
1559 detail->args.pd.device_id) {
1561 device_delete_child(
1570 if (detail->code == MR_EVT_PD_INSERTED) {
1571 if (mfi_detect_jbod_change) {
1572 /* Probe for new SYSPD's */
1573 sx_xlock(&sc->mfi_config_lock);
1574 mtx_lock(&sc->mfi_io_lock);
1576 mtx_unlock(&sc->mfi_io_lock);
1577 sx_xunlock(&sc->mfi_config_lock);
1585 mfi_queue_evt(struct mfi_softc *sc, struct mfi_evt_detail *detail)
1587 struct mfi_evt_queue_elm *elm;
1589 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1590 elm = malloc(sizeof(*elm), M_MFIBUF, M_NOWAIT|M_ZERO);
1593 memcpy(&elm->detail, detail, sizeof(*detail));
1594 TAILQ_INSERT_TAIL(&sc->mfi_evt_queue, elm, link);
1595 taskqueue_enqueue(taskqueue_swi, &sc->mfi_evt_task);
1599 mfi_handle_evt(void *context, int pending)
1601 TAILQ_HEAD(,mfi_evt_queue_elm) queue;
1602 struct mfi_softc *sc;
1603 struct mfi_evt_queue_elm *elm;
1607 mtx_lock(&sc->mfi_io_lock);
1608 TAILQ_CONCAT(&queue, &sc->mfi_evt_queue, link);
1609 mtx_unlock(&sc->mfi_io_lock);
1610 while ((elm = TAILQ_FIRST(&queue)) != NULL) {
1611 TAILQ_REMOVE(&queue, elm, link);
1612 mfi_decode_evt(sc, &elm->detail);
1613 free(elm, M_MFIBUF);
1618 mfi_aen_register(struct mfi_softc *sc, int seq, int locale)
1620 struct mfi_command *cm;
1621 struct mfi_dcmd_frame *dcmd;
1622 union mfi_evt current_aen, prior_aen;
1623 struct mfi_evt_detail *ed = NULL;
1626 current_aen.word = locale;
1627 if (sc->mfi_aen_cm != NULL) {
1629 ((uint32_t *)&sc->mfi_aen_cm->cm_frame->dcmd.mbox)[1];
1630 if (prior_aen.members.evt_class <= current_aen.members.evt_class &&
1631 !((prior_aen.members.locale & current_aen.members.locale)
1632 ^current_aen.members.locale)) {
1635 prior_aen.members.locale |= current_aen.members.locale;
1636 if (prior_aen.members.evt_class
1637 < current_aen.members.evt_class)
1638 current_aen.members.evt_class =
1639 prior_aen.members.evt_class;
1640 mfi_abort(sc, &sc->mfi_aen_cm);
1644 mtx_lock(&sc->mfi_io_lock);
1645 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_EVENT_WAIT,
1646 (void **)&ed, sizeof(*ed));
1647 mtx_unlock(&sc->mfi_io_lock);
1652 dcmd = &cm->cm_frame->dcmd;
1653 ((uint32_t *)&dcmd->mbox)[0] = seq;
1654 ((uint32_t *)&dcmd->mbox)[1] = locale;
1655 cm->cm_flags = MFI_CMD_DATAIN;
1656 cm->cm_complete = mfi_aen_complete;
1658 sc->last_seq_num = seq;
1659 sc->mfi_aen_cm = cm;
1661 mtx_lock(&sc->mfi_io_lock);
1662 mfi_enqueue_ready(cm);
1664 mtx_unlock(&sc->mfi_io_lock);
1671 mfi_aen_complete(struct mfi_command *cm)
1673 struct mfi_frame_header *hdr;
1674 struct mfi_softc *sc;
1675 struct mfi_evt_detail *detail;
1676 struct mfi_aen *mfi_aen_entry, *tmp;
1677 int seq = 0, aborted = 0;
1680 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1682 hdr = &cm->cm_frame->header;
1684 if (sc->mfi_aen_cm == NULL)
1687 if (sc->cm_aen_abort ||
1688 hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
1689 sc->cm_aen_abort = 0;
1692 sc->mfi_aen_triggered = 1;
1693 if (sc->mfi_poll_waiting) {
1694 sc->mfi_poll_waiting = 0;
1695 selwakeup(&sc->mfi_select);
1697 detail = cm->cm_data;
1698 mfi_queue_evt(sc, detail);
1699 seq = detail->seq + 1;
1700 TAILQ_FOREACH_SAFE(mfi_aen_entry, &sc->mfi_aen_pids, aen_link,
1702 TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
1704 PROC_LOCK(mfi_aen_entry->p);
1705 kern_psignal(mfi_aen_entry->p, SIGIO);
1706 PROC_UNLOCK(mfi_aen_entry->p);
1707 free(mfi_aen_entry, M_MFIBUF);
1711 free(cm->cm_data, M_MFIBUF);
1712 sc->mfi_aen_cm = NULL;
1713 wakeup(&sc->mfi_aen_cm);
1714 mfi_release_command(cm);
1716 /* set it up again so the driver can catch more events */
1718 mtx_unlock(&sc->mfi_io_lock);
1719 mfi_aen_setup(sc, seq);
1720 mtx_lock(&sc->mfi_io_lock);
1724 #define MAX_EVENTS 15
1727 mfi_parse_entries(struct mfi_softc *sc, int start_seq, int stop_seq)
1729 struct mfi_command *cm;
1730 struct mfi_dcmd_frame *dcmd;
1731 struct mfi_evt_list *el;
1732 union mfi_evt class_locale;
1733 int error, i, seq, size;
1735 class_locale.members.reserved = 0;
1736 class_locale.members.locale = mfi_event_locale;
1737 class_locale.members.evt_class = mfi_event_class;
1739 size = sizeof(struct mfi_evt_list) + sizeof(struct mfi_evt_detail)
1741 el = malloc(size, M_MFIBUF, M_NOWAIT | M_ZERO);
1745 for (seq = start_seq;;) {
1746 mtx_lock(&sc->mfi_io_lock);
1747 if ((cm = mfi_dequeue_free(sc)) == NULL) {
1749 mtx_unlock(&sc->mfi_io_lock);
1752 mtx_unlock(&sc->mfi_io_lock);
1754 dcmd = &cm->cm_frame->dcmd;
1755 bzero(dcmd->mbox, MFI_MBOX_SIZE);
1756 dcmd->header.cmd = MFI_CMD_DCMD;
1757 dcmd->header.timeout = 0;
1758 dcmd->header.data_len = size;
1759 dcmd->opcode = MFI_DCMD_CTRL_EVENT_GET;
1760 ((uint32_t *)&dcmd->mbox)[0] = seq;
1761 ((uint32_t *)&dcmd->mbox)[1] = class_locale.word;
1762 cm->cm_sg = &dcmd->sgl;
1763 cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
1764 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1768 mtx_lock(&sc->mfi_io_lock);
1769 if ((error = mfi_mapcmd(sc, cm)) != 0) {
1770 device_printf(sc->mfi_dev,
1771 "Failed to get controller entries\n");
1772 mfi_release_command(cm);
1773 mtx_unlock(&sc->mfi_io_lock);
1777 mtx_unlock(&sc->mfi_io_lock);
1778 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1779 BUS_DMASYNC_POSTREAD);
1780 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1782 if (dcmd->header.cmd_status == MFI_STAT_NOT_FOUND) {
1783 mtx_lock(&sc->mfi_io_lock);
1784 mfi_release_command(cm);
1785 mtx_unlock(&sc->mfi_io_lock);
1788 if (dcmd->header.cmd_status != MFI_STAT_OK) {
1789 device_printf(sc->mfi_dev,
1790 "Error %d fetching controller entries\n",
1791 dcmd->header.cmd_status);
1792 mtx_lock(&sc->mfi_io_lock);
1793 mfi_release_command(cm);
1794 mtx_unlock(&sc->mfi_io_lock);
1797 mtx_lock(&sc->mfi_io_lock);
1798 mfi_release_command(cm);
1799 mtx_unlock(&sc->mfi_io_lock);
1801 for (i = 0; i < el->count; i++) {
1803 * If this event is newer than 'stop_seq' then
1804 * break out of the loop. Note that the log
1805 * is a circular buffer so we have to handle
1806 * the case that our stop point is earlier in
1807 * the buffer than our start point.
1809 if (el->event[i].seq >= stop_seq) {
1810 if (start_seq <= stop_seq)
1812 else if (el->event[i].seq < start_seq)
1815 mtx_lock(&sc->mfi_io_lock);
1816 mfi_queue_evt(sc, &el->event[i]);
1817 mtx_unlock(&sc->mfi_io_lock);
1819 seq = el->event[el->count - 1].seq + 1;
1827 mfi_add_ld(struct mfi_softc *sc, int id)
1829 struct mfi_command *cm;
1830 struct mfi_dcmd_frame *dcmd = NULL;
1831 struct mfi_ld_info *ld_info = NULL;
1832 struct mfi_disk_pending *ld_pend;
1835 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1837 ld_pend = malloc(sizeof(*ld_pend), M_MFIBUF, M_NOWAIT | M_ZERO);
1838 if (ld_pend != NULL) {
1839 ld_pend->ld_id = id;
1840 TAILQ_INSERT_TAIL(&sc->mfi_ld_pend_tqh, ld_pend, ld_link);
1843 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_INFO,
1844 (void **)&ld_info, sizeof(*ld_info));
1846 device_printf(sc->mfi_dev,
1847 "Failed to allocate for MFI_DCMD_LD_GET_INFO %d\n", error);
1849 free(ld_info, M_MFIBUF);
1852 cm->cm_flags = MFI_CMD_DATAIN;
1853 dcmd = &cm->cm_frame->dcmd;
1855 if (mfi_wait_command(sc, cm) != 0) {
1856 device_printf(sc->mfi_dev,
1857 "Failed to get logical drive: %d\n", id);
1858 free(ld_info, M_MFIBUF);
1861 if (ld_info->ld_config.params.isSSCD != 1)
1862 mfi_add_ld_complete(cm);
1864 mfi_release_command(cm);
1865 if (ld_info) /* SSCD drives ld_info free here */
1866 free(ld_info, M_MFIBUF);
1872 mfi_add_ld_complete(struct mfi_command *cm)
1874 struct mfi_frame_header *hdr;
1875 struct mfi_ld_info *ld_info;
1876 struct mfi_softc *sc;
1880 hdr = &cm->cm_frame->header;
1881 ld_info = cm->cm_private;
1883 if (sc->cm_map_abort || hdr->cmd_status != MFI_STAT_OK) {
1884 free(ld_info, M_MFIBUF);
1885 wakeup(&sc->mfi_map_sync_cm);
1886 mfi_release_command(cm);
1889 wakeup(&sc->mfi_map_sync_cm);
1890 mfi_release_command(cm);
1892 mtx_unlock(&sc->mfi_io_lock);
1894 if ((child = device_add_child(sc->mfi_dev, "mfid", -1)) == NULL) {
1895 device_printf(sc->mfi_dev, "Failed to add logical disk\n");
1896 free(ld_info, M_MFIBUF);
1898 mtx_lock(&sc->mfi_io_lock);
1902 device_set_ivars(child, ld_info);
1903 device_set_desc(child, "MFI Logical Disk");
1904 bus_generic_attach(sc->mfi_dev);
1906 mtx_lock(&sc->mfi_io_lock);
1909 static int mfi_add_sys_pd(struct mfi_softc *sc, int id)
1911 struct mfi_command *cm;
1912 struct mfi_dcmd_frame *dcmd = NULL;
1913 struct mfi_pd_info *pd_info = NULL;
1914 struct mfi_system_pending *syspd_pend;
1917 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1919 syspd_pend = malloc(sizeof(*syspd_pend), M_MFIBUF, M_NOWAIT | M_ZERO);
1920 if (syspd_pend != NULL) {
1921 syspd_pend->pd_id = id;
1922 TAILQ_INSERT_TAIL(&sc->mfi_syspd_pend_tqh, syspd_pend, pd_link);
1925 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_PD_GET_INFO,
1926 (void **)&pd_info, sizeof(*pd_info));
1928 device_printf(sc->mfi_dev,
1929 "Failed to allocated for MFI_DCMD_PD_GET_INFO %d\n",
1932 free(pd_info, M_MFIBUF);
1935 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1936 dcmd = &cm->cm_frame->dcmd;
1938 dcmd->header.scsi_status = 0;
1939 dcmd->header.pad0 = 0;
1940 if (mfi_mapcmd(sc, cm) != 0) {
1941 device_printf(sc->mfi_dev,
1942 "Failed to get physical drive info %d\n", id);
1943 free(pd_info, M_MFIBUF);
1946 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1947 BUS_DMASYNC_POSTREAD);
1948 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1949 mfi_add_sys_pd_complete(cm);
1954 mfi_add_sys_pd_complete(struct mfi_command *cm)
1956 struct mfi_frame_header *hdr;
1957 struct mfi_pd_info *pd_info;
1958 struct mfi_softc *sc;
1962 hdr = &cm->cm_frame->header;
1963 pd_info = cm->cm_private;
1965 if (hdr->cmd_status != MFI_STAT_OK) {
1966 free(pd_info, M_MFIBUF);
1967 mfi_release_command(cm);
1970 if (pd_info->fw_state != MFI_PD_STATE_SYSTEM) {
1971 device_printf(sc->mfi_dev, "PD=%x is not SYSTEM PD\n",
1972 pd_info->ref.v.device_id);
1973 free(pd_info, M_MFIBUF);
1974 mfi_release_command(cm);
1977 mfi_release_command(cm);
1979 mtx_unlock(&sc->mfi_io_lock);
1981 if ((child = device_add_child(sc->mfi_dev, "mfisyspd", -1)) == NULL) {
1982 device_printf(sc->mfi_dev, "Failed to add system pd\n");
1983 free(pd_info, M_MFIBUF);
1985 mtx_lock(&sc->mfi_io_lock);
1989 device_set_ivars(child, pd_info);
1990 device_set_desc(child, "MFI System PD");
1991 bus_generic_attach(sc->mfi_dev);
1993 mtx_lock(&sc->mfi_io_lock);
1996 static struct mfi_command *
1997 mfi_bio_command(struct mfi_softc *sc)
2000 struct mfi_command *cm = NULL;
2002 /*reserving two commands to avoid starvation for IOCTL*/
2003 if (sc->mfi_qstat[MFIQ_FREE].q_length < 2) {
2006 if ((bio = mfi_dequeue_bio(sc)) == NULL) {
2009 if ((uintptr_t)bio->bio_driver2 == MFI_LD_IO) {
2010 cm = mfi_build_ldio(sc, bio);
2011 } else if ((uintptr_t) bio->bio_driver2 == MFI_SYS_PD_IO) {
2012 cm = mfi_build_syspdio(sc, bio);
2015 mfi_enqueue_bio(sc, bio);
2020 * mostly copied from cam/scsi/scsi_all.c:scsi_read_write
2024 mfi_build_cdb(int readop, uint8_t byte2, u_int64_t lba, u_int32_t block_count, uint8_t *cdb)
2028 if (((lba & 0x1fffff) == lba)
2029 && ((block_count & 0xff) == block_count)
2031 /* We can fit in a 6 byte cdb */
2032 struct scsi_rw_6 *scsi_cmd;
2034 scsi_cmd = (struct scsi_rw_6 *)cdb;
2035 scsi_cmd->opcode = readop ? READ_6 : WRITE_6;
2036 scsi_ulto3b(lba, scsi_cmd->addr);
2037 scsi_cmd->length = block_count & 0xff;
2038 scsi_cmd->control = 0;
2039 cdb_len = sizeof(*scsi_cmd);
2040 } else if (((block_count & 0xffff) == block_count) && ((lba & 0xffffffff) == lba)) {
2041 /* Need a 10 byte CDB */
2042 struct scsi_rw_10 *scsi_cmd;
2044 scsi_cmd = (struct scsi_rw_10 *)cdb;
2045 scsi_cmd->opcode = readop ? READ_10 : WRITE_10;
2046 scsi_cmd->byte2 = byte2;
2047 scsi_ulto4b(lba, scsi_cmd->addr);
2048 scsi_cmd->reserved = 0;
2049 scsi_ulto2b(block_count, scsi_cmd->length);
2050 scsi_cmd->control = 0;
2051 cdb_len = sizeof(*scsi_cmd);
2052 } else if (((block_count & 0xffffffff) == block_count) &&
2053 ((lba & 0xffffffff) == lba)) {
2054 /* Block count is too big for 10 byte CDB use a 12 byte CDB */
2055 struct scsi_rw_12 *scsi_cmd;
2057 scsi_cmd = (struct scsi_rw_12 *)cdb;
2058 scsi_cmd->opcode = readop ? READ_12 : WRITE_12;
2059 scsi_cmd->byte2 = byte2;
2060 scsi_ulto4b(lba, scsi_cmd->addr);
2061 scsi_cmd->reserved = 0;
2062 scsi_ulto4b(block_count, scsi_cmd->length);
2063 scsi_cmd->control = 0;
2064 cdb_len = sizeof(*scsi_cmd);
2067 * 16 byte CDB. We'll only get here if the LBA is larger
2070 struct scsi_rw_16 *scsi_cmd;
2072 scsi_cmd = (struct scsi_rw_16 *)cdb;
2073 scsi_cmd->opcode = readop ? READ_16 : WRITE_16;
2074 scsi_cmd->byte2 = byte2;
2075 scsi_u64to8b(lba, scsi_cmd->addr);
2076 scsi_cmd->reserved = 0;
2077 scsi_ulto4b(block_count, scsi_cmd->length);
2078 scsi_cmd->control = 0;
2079 cdb_len = sizeof(*scsi_cmd);
2085 static struct mfi_command *
2086 mfi_build_syspdio(struct mfi_softc *sc, struct bio *bio)
2088 struct mfi_command *cm;
2089 struct mfi_pass_frame *pass;
2090 uint32_t context = 0;
2091 int flags = 0, blkcount = 0, readop;
2094 if ((cm = mfi_dequeue_free(sc)) == NULL)
2097 /* Zero out the MFI frame */
2098 context = cm->cm_frame->header.context;
2099 bzero(cm->cm_frame, sizeof(union mfi_frame));
2100 cm->cm_frame->header.context = context;
2101 pass = &cm->cm_frame->pass;
2102 bzero(pass->cdb, 16);
2103 pass->header.cmd = MFI_CMD_PD_SCSI_IO;
2104 switch (bio->bio_cmd & 0x03) {
2106 flags = MFI_CMD_DATAIN;
2110 flags = MFI_CMD_DATAOUT;
2114 /* TODO: what about BIO_DELETE??? */
2115 panic("Unsupported bio command %x\n", bio->bio_cmd);
2118 /* Cheat with the sector length to avoid a non-constant division */
2119 blkcount = (bio->bio_bcount + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
2120 /* Fill the LBA and Transfer length in CDB */
2121 cdb_len = mfi_build_cdb(readop, 0, bio->bio_pblkno, blkcount,
2123 pass->header.target_id = (uintptr_t)bio->bio_driver1;
2124 pass->header.lun_id = 0;
2125 pass->header.timeout = 0;
2126 pass->header.flags = 0;
2127 pass->header.scsi_status = 0;
2128 pass->header.sense_len = MFI_SENSE_LEN;
2129 pass->header.data_len = bio->bio_bcount;
2130 pass->header.cdb_len = cdb_len;
2131 pass->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2132 pass->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2133 cm->cm_complete = mfi_bio_complete;
2134 cm->cm_private = bio;
2135 cm->cm_data = bio->bio_data;
2136 cm->cm_len = bio->bio_bcount;
2137 cm->cm_sg = &pass->sgl;
2138 cm->cm_total_frame_size = MFI_PASS_FRAME_SIZE;
2139 cm->cm_flags = flags;
2143 static struct mfi_command *
2144 mfi_build_ldio(struct mfi_softc *sc, struct bio *bio)
2146 struct mfi_io_frame *io;
2147 struct mfi_command *cm;
2150 uint32_t context = 0;
2152 if ((cm = mfi_dequeue_free(sc)) == NULL)
2155 /* Zero out the MFI frame */
2156 context = cm->cm_frame->header.context;
2157 bzero(cm->cm_frame, sizeof(union mfi_frame));
2158 cm->cm_frame->header.context = context;
2159 io = &cm->cm_frame->io;
2160 switch (bio->bio_cmd & 0x03) {
2162 io->header.cmd = MFI_CMD_LD_READ;
2163 flags = MFI_CMD_DATAIN;
2166 io->header.cmd = MFI_CMD_LD_WRITE;
2167 flags = MFI_CMD_DATAOUT;
2170 /* TODO: what about BIO_DELETE??? */
2171 panic("Unsupported bio command %x\n", bio->bio_cmd);
2174 /* Cheat with the sector length to avoid a non-constant division */
2175 blkcount = (bio->bio_bcount + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
2176 io->header.target_id = (uintptr_t)bio->bio_driver1;
2177 io->header.timeout = 0;
2178 io->header.flags = 0;
2179 io->header.scsi_status = 0;
2180 io->header.sense_len = MFI_SENSE_LEN;
2181 io->header.data_len = blkcount;
2182 io->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2183 io->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2184 io->lba_hi = (bio->bio_pblkno & 0xffffffff00000000) >> 32;
2185 io->lba_lo = bio->bio_pblkno & 0xffffffff;
2186 cm->cm_complete = mfi_bio_complete;
2187 cm->cm_private = bio;
2188 cm->cm_data = bio->bio_data;
2189 cm->cm_len = bio->bio_bcount;
2190 cm->cm_sg = &io->sgl;
2191 cm->cm_total_frame_size = MFI_IO_FRAME_SIZE;
2192 cm->cm_flags = flags;
2197 mfi_bio_complete(struct mfi_command *cm)
2200 struct mfi_frame_header *hdr;
2201 struct mfi_softc *sc;
2203 bio = cm->cm_private;
2204 hdr = &cm->cm_frame->header;
2207 if ((hdr->cmd_status != MFI_STAT_OK) || (hdr->scsi_status != 0)) {
2208 bio->bio_flags |= BIO_ERROR;
2209 bio->bio_error = EIO;
2210 device_printf(sc->mfi_dev, "I/O error, status= %d "
2211 "scsi_status= %d\n", hdr->cmd_status, hdr->scsi_status);
2212 mfi_print_sense(cm->cm_sc, cm->cm_sense);
2213 } else if (cm->cm_error != 0) {
2214 bio->bio_flags |= BIO_ERROR;
2217 mfi_release_command(cm);
2218 mfi_disk_complete(bio);
2222 mfi_startio(struct mfi_softc *sc)
2224 struct mfi_command *cm;
2225 struct ccb_hdr *ccbh;
2228 /* Don't bother if we're short on resources */
2229 if (sc->mfi_flags & MFI_FLAGS_QFRZN)
2232 /* Try a command that has already been prepared */
2233 cm = mfi_dequeue_ready(sc);
2236 if ((ccbh = TAILQ_FIRST(&sc->mfi_cam_ccbq)) != NULL)
2237 cm = sc->mfi_cam_start(ccbh);
2240 /* Nope, so look for work on the bioq */
2242 cm = mfi_bio_command(sc);
2244 /* No work available, so exit */
2248 /* Send the command to the controller */
2249 if (mfi_mapcmd(sc, cm) != 0) {
2250 mfi_requeue_ready(cm);
2257 mfi_mapcmd(struct mfi_softc *sc, struct mfi_command *cm)
2261 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2263 if ((cm->cm_data != NULL) && (cm->cm_frame->header.cmd != MFI_CMD_STP )) {
2264 polled = (cm->cm_flags & MFI_CMD_POLLED) ? BUS_DMA_NOWAIT : 0;
2265 error = bus_dmamap_load(sc->mfi_buffer_dmat, cm->cm_dmamap,
2266 cm->cm_data, cm->cm_len, mfi_data_cb, cm, polled);
2267 if (error == EINPROGRESS) {
2268 sc->mfi_flags |= MFI_FLAGS_QFRZN;
2272 if (sc->MFA_enabled)
2273 error = mfi_tbolt_send_frame(sc, cm);
2275 error = mfi_send_frame(sc, cm);
2282 mfi_data_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
2284 struct mfi_frame_header *hdr;
2285 struct mfi_command *cm;
2287 struct mfi_softc *sc;
2288 int i, j, first, dir;
2291 cm = (struct mfi_command *)arg;
2293 hdr = &cm->cm_frame->header;
2297 printf("error %d in callback\n", error);
2298 cm->cm_error = error;
2299 mfi_complete(sc, cm);
2302 /* Use IEEE sgl only for IO's on a SKINNY controller
2303 * For other commands on a SKINNY controller use either
2304 * sg32 or sg64 based on the sizeof(bus_addr_t).
2305 * Also calculate the total frame size based on the type
2308 if (((cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) ||
2309 (cm->cm_frame->header.cmd == MFI_CMD_LD_READ) ||
2310 (cm->cm_frame->header.cmd == MFI_CMD_LD_WRITE)) &&
2311 (sc->mfi_flags & MFI_FLAGS_SKINNY)) {
2312 for (i = 0; i < nsegs; i++) {
2313 sgl->sg_skinny[i].addr = segs[i].ds_addr;
2314 sgl->sg_skinny[i].len = segs[i].ds_len;
2315 sgl->sg_skinny[i].flag = 0;
2317 hdr->flags |= MFI_FRAME_IEEE_SGL | MFI_FRAME_SGL64;
2318 sge_size = sizeof(struct mfi_sg_skinny);
2319 hdr->sg_count = nsegs;
2322 if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
2323 first = cm->cm_stp_len;
2324 if ((sc->mfi_flags & MFI_FLAGS_SG64) == 0) {
2325 sgl->sg32[j].addr = segs[0].ds_addr;
2326 sgl->sg32[j++].len = first;
2328 sgl->sg64[j].addr = segs[0].ds_addr;
2329 sgl->sg64[j++].len = first;
2333 if ((sc->mfi_flags & MFI_FLAGS_SG64) == 0) {
2334 for (i = 0; i < nsegs; i++) {
2335 sgl->sg32[j].addr = segs[i].ds_addr + first;
2336 sgl->sg32[j++].len = segs[i].ds_len - first;
2340 for (i = 0; i < nsegs; i++) {
2341 sgl->sg64[j].addr = segs[i].ds_addr + first;
2342 sgl->sg64[j++].len = segs[i].ds_len - first;
2345 hdr->flags |= MFI_FRAME_SGL64;
2348 sge_size = sc->mfi_sge_size;
2352 if (cm->cm_flags & MFI_CMD_DATAIN) {
2353 dir |= BUS_DMASYNC_PREREAD;
2354 hdr->flags |= MFI_FRAME_DIR_READ;
2356 if (cm->cm_flags & MFI_CMD_DATAOUT) {
2357 dir |= BUS_DMASYNC_PREWRITE;
2358 hdr->flags |= MFI_FRAME_DIR_WRITE;
2360 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir);
2361 cm->cm_flags |= MFI_CMD_MAPPED;
2364 * Instead of calculating the total number of frames in the
2365 * compound frame, it's already assumed that there will be at
2366 * least 1 frame, so don't compensate for the modulo of the
2367 * following division.
2369 cm->cm_total_frame_size += (sc->mfi_sge_size * nsegs);
2370 cm->cm_extra_frames = (cm->cm_total_frame_size - 1) / MFI_FRAME_SIZE;
2372 if (sc->MFA_enabled)
2373 mfi_tbolt_send_frame(sc, cm);
2375 mfi_send_frame(sc, cm);
2381 mfi_send_frame(struct mfi_softc *sc, struct mfi_command *cm)
2383 struct mfi_frame_header *hdr;
2384 int tm = MFI_POLL_TIMEOUT_SECS * 1000;
2386 hdr = &cm->cm_frame->header;
2388 if ((cm->cm_flags & MFI_CMD_POLLED) == 0) {
2389 cm->cm_timestamp = time_uptime;
2390 mfi_enqueue_busy(cm);
2392 hdr->cmd_status = MFI_STAT_INVALID_STATUS;
2393 hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
2397 * The bus address of the command is aligned on a 64 byte boundary,
2398 * leaving the least 6 bits as zero. For whatever reason, the
2399 * hardware wants the address shifted right by three, leaving just
2400 * 3 zero bits. These three bits are then used as a prefetching
2401 * hint for the hardware to predict how many frames need to be
2402 * fetched across the bus. If a command has more than 8 frames
2403 * then the 3 bits are set to 0x7 and the firmware uses other
2404 * information in the command to determine the total amount to fetch.
2405 * However, FreeBSD doesn't support I/O larger than 128K, so 8 frames
2406 * is enough for both 32bit and 64bit systems.
2408 if (cm->cm_extra_frames > 7)
2409 cm->cm_extra_frames = 7;
2411 sc->mfi_issue_cmd(sc, cm->cm_frame_busaddr, cm->cm_extra_frames);
2413 if ((cm->cm_flags & MFI_CMD_POLLED) == 0)
2416 /* This is a polled command, so busy-wait for it to complete. */
2417 while (hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
2424 if (hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
2425 device_printf(sc->mfi_dev, "Frame %p timed out "
2426 "command 0x%X\n", hdr, cm->cm_frame->dcmd.opcode);
2435 mfi_complete(struct mfi_softc *sc, struct mfi_command *cm)
2439 if ((cm->cm_flags & MFI_CMD_MAPPED) != 0) {
2441 if ((cm->cm_flags & MFI_CMD_DATAIN) ||
2442 (cm->cm_frame->header.cmd == MFI_CMD_STP))
2443 dir |= BUS_DMASYNC_POSTREAD;
2444 if (cm->cm_flags & MFI_CMD_DATAOUT)
2445 dir |= BUS_DMASYNC_POSTWRITE;
2447 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir);
2448 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2449 cm->cm_flags &= ~MFI_CMD_MAPPED;
2452 cm->cm_flags |= MFI_CMD_COMPLETED;
2454 if (cm->cm_complete != NULL)
2455 cm->cm_complete(cm);
2461 mfi_abort(struct mfi_softc *sc, struct mfi_command **cm_abort)
2463 struct mfi_command *cm;
2464 struct mfi_abort_frame *abort;
2466 uint32_t context = 0;
2468 mtx_lock(&sc->mfi_io_lock);
2469 if ((cm = mfi_dequeue_free(sc)) == NULL) {
2473 /* Zero out the MFI frame */
2474 context = cm->cm_frame->header.context;
2475 bzero(cm->cm_frame, sizeof(union mfi_frame));
2476 cm->cm_frame->header.context = context;
2478 abort = &cm->cm_frame->abort;
2479 abort->header.cmd = MFI_CMD_ABORT;
2480 abort->header.flags = 0;
2481 abort->header.scsi_status = 0;
2482 abort->abort_context = (*cm_abort)->cm_frame->header.context;
2483 abort->abort_mfi_addr_lo = (uint32_t)(*cm_abort)->cm_frame_busaddr;
2484 abort->abort_mfi_addr_hi =
2485 (uint32_t)((uint64_t)(*cm_abort)->cm_frame_busaddr >> 32);
2487 cm->cm_flags = MFI_CMD_POLLED;
2490 mfi_release_command(cm);
2492 mtx_unlock(&sc->mfi_io_lock);
2493 while (i < 5 && *cm_abort != NULL) {
2494 tsleep(cm_abort, 0, "mfiabort",
2498 if (*cm_abort != NULL) {
2499 /* Force a complete if command didn't abort */
2500 mtx_lock(&sc->mfi_io_lock);
2501 (*cm_abort)->cm_complete(*cm_abort);
2502 mtx_unlock(&sc->mfi_io_lock);
2509 mfi_dump_blocks(struct mfi_softc *sc, int id, uint64_t lba, void *virt,
2512 struct mfi_command *cm;
2513 struct mfi_io_frame *io;
2515 uint32_t context = 0;
2517 if ((cm = mfi_dequeue_free(sc)) == NULL)
2520 /* Zero out the MFI frame */
2521 context = cm->cm_frame->header.context;
2522 bzero(cm->cm_frame, sizeof(union mfi_frame));
2523 cm->cm_frame->header.context = context;
2525 io = &cm->cm_frame->io;
2526 io->header.cmd = MFI_CMD_LD_WRITE;
2527 io->header.target_id = id;
2528 io->header.timeout = 0;
2529 io->header.flags = 0;
2530 io->header.scsi_status = 0;
2531 io->header.sense_len = MFI_SENSE_LEN;
2532 io->header.data_len = (len + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
2533 io->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2534 io->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2535 io->lba_hi = (lba & 0xffffffff00000000) >> 32;
2536 io->lba_lo = lba & 0xffffffff;
2539 cm->cm_sg = &io->sgl;
2540 cm->cm_total_frame_size = MFI_IO_FRAME_SIZE;
2541 cm->cm_flags = MFI_CMD_POLLED | MFI_CMD_DATAOUT;
2543 error = mfi_mapcmd(sc, cm);
2544 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
2545 BUS_DMASYNC_POSTWRITE);
2546 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2547 mfi_release_command(cm);
2553 mfi_dump_syspd_blocks(struct mfi_softc *sc, int id, uint64_t lba, void *virt,
2556 struct mfi_command *cm;
2557 struct mfi_pass_frame *pass;
2558 int error, readop, cdb_len;
2561 if ((cm = mfi_dequeue_free(sc)) == NULL)
2564 pass = &cm->cm_frame->pass;
2565 bzero(pass->cdb, 16);
2566 pass->header.cmd = MFI_CMD_PD_SCSI_IO;
2569 blkcount = (len + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
2570 cdb_len = mfi_build_cdb(readop, 0, lba, blkcount, pass->cdb);
2571 pass->header.target_id = id;
2572 pass->header.timeout = 0;
2573 pass->header.flags = 0;
2574 pass->header.scsi_status = 0;
2575 pass->header.sense_len = MFI_SENSE_LEN;
2576 pass->header.data_len = len;
2577 pass->header.cdb_len = cdb_len;
2578 pass->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2579 pass->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2582 cm->cm_sg = &pass->sgl;
2583 cm->cm_total_frame_size = MFI_PASS_FRAME_SIZE;
2584 cm->cm_flags = MFI_CMD_POLLED | MFI_CMD_DATAOUT | MFI_CMD_SCSI;
2586 error = mfi_mapcmd(sc, cm);
2587 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
2588 BUS_DMASYNC_POSTWRITE);
2589 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2590 mfi_release_command(cm);
2596 mfi_open(struct cdev *dev, int flags, int fmt, struct thread *td)
2598 struct mfi_softc *sc;
2603 mtx_lock(&sc->mfi_io_lock);
2604 if (sc->mfi_detaching)
2607 sc->mfi_flags |= MFI_FLAGS_OPEN;
2610 mtx_unlock(&sc->mfi_io_lock);
2616 mfi_close(struct cdev *dev, int flags, int fmt, struct thread *td)
2618 struct mfi_softc *sc;
2619 struct mfi_aen *mfi_aen_entry, *tmp;
2623 mtx_lock(&sc->mfi_io_lock);
2624 sc->mfi_flags &= ~MFI_FLAGS_OPEN;
2626 TAILQ_FOREACH_SAFE(mfi_aen_entry, &sc->mfi_aen_pids, aen_link, tmp) {
2627 if (mfi_aen_entry->p == curproc) {
2628 TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
2630 free(mfi_aen_entry, M_MFIBUF);
2633 mtx_unlock(&sc->mfi_io_lock);
2638 mfi_config_lock(struct mfi_softc *sc, uint32_t opcode)
2642 case MFI_DCMD_LD_DELETE:
2643 case MFI_DCMD_CFG_ADD:
2644 case MFI_DCMD_CFG_CLEAR:
2645 case MFI_DCMD_CFG_FOREIGN_IMPORT:
2646 sx_xlock(&sc->mfi_config_lock);
2654 mfi_config_unlock(struct mfi_softc *sc, int locked)
2658 sx_xunlock(&sc->mfi_config_lock);
2662 * Perform pre-issue checks on commands from userland and possibly veto
2666 mfi_check_command_pre(struct mfi_softc *sc, struct mfi_command *cm)
2668 struct mfi_disk *ld, *ld2;
2670 struct mfi_system_pd *syspd = NULL;
2674 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2676 switch (cm->cm_frame->dcmd.opcode) {
2677 case MFI_DCMD_LD_DELETE:
2678 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2679 if (ld->ld_id == cm->cm_frame->dcmd.mbox[0])
2685 error = mfi_disk_disable(ld);
2687 case MFI_DCMD_CFG_CLEAR:
2688 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2689 error = mfi_disk_disable(ld);
2694 TAILQ_FOREACH(ld2, &sc->mfi_ld_tqh, ld_link) {
2697 mfi_disk_enable(ld2);
2701 case MFI_DCMD_PD_STATE_SET:
2702 mbox = (uint16_t *) cm->cm_frame->dcmd.mbox;
2704 if (mbox[2] == MFI_PD_STATE_UNCONFIGURED_GOOD) {
2705 TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh, pd_link) {
2706 if (syspd->pd_id == syspd_id)
2713 error = mfi_syspd_disable(syspd);
2721 /* Perform post-issue checks on commands from userland. */
2723 mfi_check_command_post(struct mfi_softc *sc, struct mfi_command *cm)
2725 struct mfi_disk *ld, *ldn;
2726 struct mfi_system_pd *syspd = NULL;
2730 switch (cm->cm_frame->dcmd.opcode) {
2731 case MFI_DCMD_LD_DELETE:
2732 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2733 if (ld->ld_id == cm->cm_frame->dcmd.mbox[0])
2736 KASSERT(ld != NULL, ("volume dissappeared"));
2737 if (cm->cm_frame->header.cmd_status == MFI_STAT_OK) {
2738 mtx_unlock(&sc->mfi_io_lock);
2740 device_delete_child(sc->mfi_dev, ld->ld_dev);
2742 mtx_lock(&sc->mfi_io_lock);
2744 mfi_disk_enable(ld);
2746 case MFI_DCMD_CFG_CLEAR:
2747 if (cm->cm_frame->header.cmd_status == MFI_STAT_OK) {
2748 mtx_unlock(&sc->mfi_io_lock);
2750 TAILQ_FOREACH_SAFE(ld, &sc->mfi_ld_tqh, ld_link, ldn) {
2751 device_delete_child(sc->mfi_dev, ld->ld_dev);
2754 mtx_lock(&sc->mfi_io_lock);
2756 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link)
2757 mfi_disk_enable(ld);
2760 case MFI_DCMD_CFG_ADD:
2763 case MFI_DCMD_CFG_FOREIGN_IMPORT:
2766 case MFI_DCMD_PD_STATE_SET:
2767 mbox = (uint16_t *) cm->cm_frame->dcmd.mbox;
2769 if (mbox[2] == MFI_PD_STATE_UNCONFIGURED_GOOD) {
2770 TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh,pd_link) {
2771 if (syspd->pd_id == syspd_id)
2777 /* If the transition fails then enable the syspd again */
2778 if (syspd && cm->cm_frame->header.cmd_status != MFI_STAT_OK)
2779 mfi_syspd_enable(syspd);
2785 mfi_check_for_sscd(struct mfi_softc *sc, struct mfi_command *cm)
2787 struct mfi_config_data *conf_data;
2788 struct mfi_command *ld_cm = NULL;
2789 struct mfi_ld_info *ld_info = NULL;
2790 struct mfi_ld_config *ld;
2794 conf_data = (struct mfi_config_data *)cm->cm_data;
2796 if (cm->cm_frame->dcmd.opcode == MFI_DCMD_CFG_ADD) {
2797 p = (char *)conf_data->array;
2798 p += conf_data->array_size * conf_data->array_count;
2799 ld = (struct mfi_ld_config *)p;
2800 if (ld->params.isSSCD == 1)
2802 } else if (cm->cm_frame->dcmd.opcode == MFI_DCMD_LD_DELETE) {
2803 error = mfi_dcmd_command (sc, &ld_cm, MFI_DCMD_LD_GET_INFO,
2804 (void **)&ld_info, sizeof(*ld_info));
2806 device_printf(sc->mfi_dev, "Failed to allocate"
2807 "MFI_DCMD_LD_GET_INFO %d", error);
2809 free(ld_info, M_MFIBUF);
2812 ld_cm->cm_flags = MFI_CMD_DATAIN;
2813 ld_cm->cm_frame->dcmd.mbox[0]= cm->cm_frame->dcmd.mbox[0];
2814 ld_cm->cm_frame->header.target_id = cm->cm_frame->dcmd.mbox[0];
2815 if (mfi_wait_command(sc, ld_cm) != 0) {
2816 device_printf(sc->mfi_dev, "failed to get log drv\n");
2817 mfi_release_command(ld_cm);
2818 free(ld_info, M_MFIBUF);
2822 if (ld_cm->cm_frame->header.cmd_status != MFI_STAT_OK) {
2823 free(ld_info, M_MFIBUF);
2824 mfi_release_command(ld_cm);
2828 ld_info = (struct mfi_ld_info *)ld_cm->cm_private;
2830 if (ld_info->ld_config.params.isSSCD == 1)
2833 mfi_release_command(ld_cm);
2834 free(ld_info, M_MFIBUF);
2841 mfi_stp_cmd(struct mfi_softc *sc, struct mfi_command *cm,caddr_t arg)
2844 struct mfi_ioc_packet *ioc;
2845 ioc = (struct mfi_ioc_packet *)arg;
2846 int sge_size, error;
2847 struct megasas_sge *kern_sge;
2849 memset(sc->kbuff_arr, 0, sizeof(sc->kbuff_arr));
2850 kern_sge =(struct megasas_sge *) ((uintptr_t)cm->cm_frame + ioc->mfi_sgl_off);
2851 cm->cm_frame->header.sg_count = ioc->mfi_sge_count;
2853 if (sizeof(bus_addr_t) == 8) {
2854 cm->cm_frame->header.flags |= MFI_FRAME_SGL64;
2855 cm->cm_extra_frames = 2;
2856 sge_size = sizeof(struct mfi_sg64);
2858 cm->cm_extra_frames = (cm->cm_total_frame_size - 1) / MFI_FRAME_SIZE;
2859 sge_size = sizeof(struct mfi_sg32);
2862 cm->cm_total_frame_size += (sge_size * ioc->mfi_sge_count);
2863 for (i = 0; i < ioc->mfi_sge_count; i++) {
2864 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
2865 1, 0, /* algnmnt, boundary */
2866 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
2867 BUS_SPACE_MAXADDR, /* highaddr */
2868 NULL, NULL, /* filter, filterarg */
2869 ioc->mfi_sgl[i].iov_len,/* maxsize */
2871 ioc->mfi_sgl[i].iov_len,/* maxsegsize */
2872 BUS_DMA_ALLOCNOW, /* flags */
2873 NULL, NULL, /* lockfunc, lockarg */
2874 &sc->mfi_kbuff_arr_dmat[i])) {
2875 device_printf(sc->mfi_dev,
2876 "Cannot allocate mfi_kbuff_arr_dmat tag\n");
2880 if (bus_dmamem_alloc(sc->mfi_kbuff_arr_dmat[i],
2881 (void **)&sc->kbuff_arr[i], BUS_DMA_NOWAIT,
2882 &sc->mfi_kbuff_arr_dmamap[i])) {
2883 device_printf(sc->mfi_dev,
2884 "Cannot allocate mfi_kbuff_arr_dmamap memory\n");
2888 bus_dmamap_load(sc->mfi_kbuff_arr_dmat[i],
2889 sc->mfi_kbuff_arr_dmamap[i], sc->kbuff_arr[i],
2890 ioc->mfi_sgl[i].iov_len, mfi_addr_cb,
2891 &sc->mfi_kbuff_arr_busaddr[i], 0);
2893 if (!sc->kbuff_arr[i]) {
2894 device_printf(sc->mfi_dev,
2895 "Could not allocate memory for kbuff_arr info\n");
2898 kern_sge[i].phys_addr = sc->mfi_kbuff_arr_busaddr[i];
2899 kern_sge[i].length = ioc->mfi_sgl[i].iov_len;
2901 if (sizeof(bus_addr_t) == 8) {
2902 cm->cm_frame->stp.sgl.sg64[i].addr =
2903 kern_sge[i].phys_addr;
2904 cm->cm_frame->stp.sgl.sg64[i].len =
2905 ioc->mfi_sgl[i].iov_len;
2907 cm->cm_frame->stp.sgl.sg32[i].len =
2908 kern_sge[i].phys_addr;
2909 cm->cm_frame->stp.sgl.sg32[i].len =
2910 ioc->mfi_sgl[i].iov_len;
2913 error = copyin(ioc->mfi_sgl[i].iov_base,
2915 ioc->mfi_sgl[i].iov_len);
2917 device_printf(sc->mfi_dev, "Copy in failed\n");
2922 cm->cm_flags |=MFI_CMD_MAPPED;
2927 mfi_user_command(struct mfi_softc *sc, struct mfi_ioc_passthru *ioc)
2929 struct mfi_command *cm;
2930 struct mfi_dcmd_frame *dcmd;
2931 void *ioc_buf = NULL;
2933 int error = 0, locked;
2936 if (ioc->buf_size > 0) {
2937 if (ioc->buf_size > 1024 * 1024)
2939 ioc_buf = malloc(ioc->buf_size, M_MFIBUF, M_WAITOK);
2940 error = copyin(ioc->buf, ioc_buf, ioc->buf_size);
2942 device_printf(sc->mfi_dev, "failed to copyin\n");
2943 free(ioc_buf, M_MFIBUF);
2948 locked = mfi_config_lock(sc, ioc->ioc_frame.opcode);
2950 mtx_lock(&sc->mfi_io_lock);
2951 while ((cm = mfi_dequeue_free(sc)) == NULL)
2952 msleep(mfi_user_command, &sc->mfi_io_lock, 0, "mfiioc", hz);
2954 /* Save context for later */
2955 context = cm->cm_frame->header.context;
2957 dcmd = &cm->cm_frame->dcmd;
2958 bcopy(&ioc->ioc_frame, dcmd, sizeof(struct mfi_dcmd_frame));
2960 cm->cm_sg = &dcmd->sgl;
2961 cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
2962 cm->cm_data = ioc_buf;
2963 cm->cm_len = ioc->buf_size;
2965 /* restore context */
2966 cm->cm_frame->header.context = context;
2968 /* Cheat since we don't know if we're writing or reading */
2969 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_DATAOUT;
2971 error = mfi_check_command_pre(sc, cm);
2975 error = mfi_wait_command(sc, cm);
2977 device_printf(sc->mfi_dev, "ioctl failed %d\n", error);
2980 bcopy(dcmd, &ioc->ioc_frame, sizeof(struct mfi_dcmd_frame));
2981 mfi_check_command_post(sc, cm);
2983 mfi_release_command(cm);
2984 mtx_unlock(&sc->mfi_io_lock);
2985 mfi_config_unlock(sc, locked);
2986 if (ioc->buf_size > 0)
2987 error = copyout(ioc_buf, ioc->buf, ioc->buf_size);
2989 free(ioc_buf, M_MFIBUF);
2993 #define PTRIN(p) ((void *)(uintptr_t)(p))
2996 mfi_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td)
2998 struct mfi_softc *sc;
2999 union mfi_statrequest *ms;
3000 struct mfi_ioc_packet *ioc;
3001 #ifdef COMPAT_FREEBSD32
3002 struct mfi_ioc_packet32 *ioc32;
3004 struct mfi_ioc_aen *aen;
3005 struct mfi_command *cm = NULL;
3006 uint32_t context = 0;
3007 union mfi_sense_ptr sense_ptr;
3008 uint8_t *data = NULL, *temp, *addr, skip_pre_post = 0;
3011 struct mfi_ioc_passthru *iop = (struct mfi_ioc_passthru *)arg;
3012 #ifdef COMPAT_FREEBSD32
3013 struct mfi_ioc_passthru32 *iop32 = (struct mfi_ioc_passthru32 *)arg;
3014 struct mfi_ioc_passthru iop_swab;
3024 if (sc->hw_crit_error)
3027 if (sc->issuepend_done == 0)
3032 ms = (union mfi_statrequest *)arg;
3033 switch (ms->ms_item) {
3038 bcopy(&sc->mfi_qstat[ms->ms_item], &ms->ms_qstat,
3039 sizeof(struct mfi_qstat));
3046 case MFIIO_QUERY_DISK:
3048 struct mfi_query_disk *qd;
3049 struct mfi_disk *ld;
3051 qd = (struct mfi_query_disk *)arg;
3052 mtx_lock(&sc->mfi_io_lock);
3053 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
3054 if (ld->ld_id == qd->array_id)
3059 mtx_unlock(&sc->mfi_io_lock);
3063 if (ld->ld_flags & MFI_DISK_FLAGS_OPEN)
3065 bzero(qd->devname, SPECNAMELEN + 1);
3066 snprintf(qd->devname, SPECNAMELEN, "mfid%d", ld->ld_unit);
3067 mtx_unlock(&sc->mfi_io_lock);
3071 #ifdef COMPAT_FREEBSD32
3075 devclass_t devclass;
3076 ioc = (struct mfi_ioc_packet *)arg;
3079 adapter = ioc->mfi_adapter_no;
3080 if (device_get_unit(sc->mfi_dev) == 0 && adapter != 0) {
3081 devclass = devclass_find("mfi");
3082 sc = devclass_get_softc(devclass, adapter);
3084 mtx_lock(&sc->mfi_io_lock);
3085 if ((cm = mfi_dequeue_free(sc)) == NULL) {
3086 mtx_unlock(&sc->mfi_io_lock);
3089 mtx_unlock(&sc->mfi_io_lock);
3093 * save off original context since copying from user
3094 * will clobber some data
3096 context = cm->cm_frame->header.context;
3097 cm->cm_frame->header.context = cm->cm_index;
3099 bcopy(ioc->mfi_frame.raw, cm->cm_frame,
3100 2 * MEGAMFI_FRAME_SIZE);
3101 cm->cm_total_frame_size = (sizeof(union mfi_sgl)
3102 * ioc->mfi_sge_count) + ioc->mfi_sgl_off;
3103 cm->cm_frame->header.scsi_status = 0;
3104 cm->cm_frame->header.pad0 = 0;
3105 if (ioc->mfi_sge_count) {
3107 (union mfi_sgl *)&cm->cm_frame->bytes[ioc->mfi_sgl_off];
3111 if (cm->cm_frame->header.flags & MFI_FRAME_DATAIN)
3112 cm->cm_flags |= MFI_CMD_DATAIN;
3113 if (cm->cm_frame->header.flags & MFI_FRAME_DATAOUT)
3114 cm->cm_flags |= MFI_CMD_DATAOUT;
3115 /* Legacy app shim */
3116 if (cm->cm_flags == 0)
3117 cm->cm_flags |= MFI_CMD_DATAIN | MFI_CMD_DATAOUT;
3118 cm->cm_len = cm->cm_frame->header.data_len;
3119 if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
3120 #ifdef COMPAT_FREEBSD32
3121 if (cmd == MFI_CMD) {
3124 cm->cm_stp_len = ioc->mfi_sgl[0].iov_len;
3125 #ifdef COMPAT_FREEBSD32
3127 /* 32bit on 64bit */
3128 ioc32 = (struct mfi_ioc_packet32 *)ioc;
3129 cm->cm_stp_len = ioc32->mfi_sgl[0].iov_len;
3132 cm->cm_len += cm->cm_stp_len;
3135 (cm->cm_flags & (MFI_CMD_DATAIN | MFI_CMD_DATAOUT))) {
3136 cm->cm_data = data = malloc(cm->cm_len, M_MFIBUF,
3138 if (cm->cm_data == NULL) {
3139 device_printf(sc->mfi_dev, "Malloc failed\n");
3146 /* restore header context */
3147 cm->cm_frame->header.context = context;
3149 if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
3150 res = mfi_stp_cmd(sc, cm, arg);
3155 if ((cm->cm_flags & MFI_CMD_DATAOUT) ||
3156 (cm->cm_frame->header.cmd == MFI_CMD_STP)) {
3157 for (i = 0; i < ioc->mfi_sge_count; i++) {
3158 #ifdef COMPAT_FREEBSD32
3159 if (cmd == MFI_CMD) {
3162 addr = ioc->mfi_sgl[i].iov_base;
3163 len = ioc->mfi_sgl[i].iov_len;
3164 #ifdef COMPAT_FREEBSD32
3166 /* 32bit on 64bit */
3167 ioc32 = (struct mfi_ioc_packet32 *)ioc;
3168 addr = PTRIN(ioc32->mfi_sgl[i].iov_base);
3169 len = ioc32->mfi_sgl[i].iov_len;
3172 error = copyin(addr, temp, len);
3174 device_printf(sc->mfi_dev,
3175 "Copy in failed\n");
3183 if (cm->cm_frame->header.cmd == MFI_CMD_DCMD)
3184 locked = mfi_config_lock(sc,
3185 cm->cm_frame->dcmd.opcode);
3187 if (cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) {
3188 cm->cm_frame->pass.sense_addr_lo =
3189 (uint32_t)cm->cm_sense_busaddr;
3190 cm->cm_frame->pass.sense_addr_hi =
3191 (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
3193 mtx_lock(&sc->mfi_io_lock);
3194 skip_pre_post = mfi_check_for_sscd (sc, cm);
3195 if (!skip_pre_post) {
3196 error = mfi_check_command_pre(sc, cm);
3198 mtx_unlock(&sc->mfi_io_lock);
3202 if ((error = mfi_wait_command(sc, cm)) != 0) {
3203 device_printf(sc->mfi_dev,
3204 "Controller polled failed\n");
3205 mtx_unlock(&sc->mfi_io_lock);
3208 if (!skip_pre_post) {
3209 mfi_check_command_post(sc, cm);
3211 mtx_unlock(&sc->mfi_io_lock);
3213 if (cm->cm_frame->header.cmd != MFI_CMD_STP) {
3215 if ((cm->cm_flags & MFI_CMD_DATAIN) ||
3216 (cm->cm_frame->header.cmd == MFI_CMD_STP)) {
3217 for (i = 0; i < ioc->mfi_sge_count; i++) {
3218 #ifdef COMPAT_FREEBSD32
3219 if (cmd == MFI_CMD) {
3222 addr = ioc->mfi_sgl[i].iov_base;
3223 len = ioc->mfi_sgl[i].iov_len;
3224 #ifdef COMPAT_FREEBSD32
3226 /* 32bit on 64bit */
3227 ioc32 = (struct mfi_ioc_packet32 *)ioc;
3228 addr = PTRIN(ioc32->mfi_sgl[i].iov_base);
3229 len = ioc32->mfi_sgl[i].iov_len;
3232 error = copyout(temp, addr, len);
3234 device_printf(sc->mfi_dev,
3235 "Copy out failed\n");
3243 if (ioc->mfi_sense_len) {
3244 /* get user-space sense ptr then copy out sense */
3245 bcopy(&ioc->mfi_frame.raw[ioc->mfi_sense_off],
3246 &sense_ptr.sense_ptr_data[0],
3247 sizeof(sense_ptr.sense_ptr_data));
3248 #ifdef COMPAT_FREEBSD32
3249 if (cmd != MFI_CMD) {
3251 * not 64bit native so zero out any address
3253 sense_ptr.addr.high = 0;
3256 error = copyout(cm->cm_sense, sense_ptr.user_space,
3257 ioc->mfi_sense_len);
3259 device_printf(sc->mfi_dev,
3260 "Copy out failed\n");
3265 ioc->mfi_frame.hdr.cmd_status = cm->cm_frame->header.cmd_status;
3267 mfi_config_unlock(sc, locked);
3269 free(data, M_MFIBUF);
3270 if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
3271 for (i = 0; i < 2; i++) {
3272 if (sc->kbuff_arr[i]) {
3273 if (sc->mfi_kbuff_arr_busaddr != 0)
3275 sc->mfi_kbuff_arr_dmat[i],
3276 sc->mfi_kbuff_arr_dmamap[i]
3278 if (sc->kbuff_arr[i] != NULL)
3280 sc->mfi_kbuff_arr_dmat[i],
3282 sc->mfi_kbuff_arr_dmamap[i]
3284 if (sc->mfi_kbuff_arr_dmat[i] != NULL)
3285 bus_dma_tag_destroy(
3286 sc->mfi_kbuff_arr_dmat[i]);
3291 mtx_lock(&sc->mfi_io_lock);
3292 mfi_release_command(cm);
3293 mtx_unlock(&sc->mfi_io_lock);
3299 aen = (struct mfi_ioc_aen *)arg;
3300 error = mfi_aen_register(sc, aen->aen_seq_num,
3301 aen->aen_class_locale);
3304 case MFI_LINUX_CMD_2: /* Firmware Linux ioctl shim */
3306 devclass_t devclass;
3307 struct mfi_linux_ioc_packet l_ioc;
3310 devclass = devclass_find("mfi");
3311 if (devclass == NULL)
3314 error = copyin(arg, &l_ioc, sizeof(l_ioc));
3317 adapter = l_ioc.lioc_adapter_no;
3318 sc = devclass_get_softc(devclass, adapter);
3321 return (mfi_linux_ioctl_int(sc->mfi_cdev,
3322 cmd, arg, flag, td));
3325 case MFI_LINUX_SET_AEN_2: /* AEN Linux ioctl shim */
3327 devclass_t devclass;
3328 struct mfi_linux_ioc_aen l_aen;
3331 devclass = devclass_find("mfi");
3332 if (devclass == NULL)
3335 error = copyin(arg, &l_aen, sizeof(l_aen));
3338 adapter = l_aen.laen_adapter_no;
3339 sc = devclass_get_softc(devclass, adapter);
3342 return (mfi_linux_ioctl_int(sc->mfi_cdev,
3343 cmd, arg, flag, td));
3346 #ifdef COMPAT_FREEBSD32
3347 case MFIIO_PASSTHRU32:
3348 if (!SV_CURPROC_FLAG(SV_ILP32)) {
3352 iop_swab.ioc_frame = iop32->ioc_frame;
3353 iop_swab.buf_size = iop32->buf_size;
3354 iop_swab.buf = PTRIN(iop32->buf);
3358 case MFIIO_PASSTHRU:
3359 error = mfi_user_command(sc, iop);
3360 #ifdef COMPAT_FREEBSD32
3361 if (cmd == MFIIO_PASSTHRU32)
3362 iop32->ioc_frame = iop_swab.ioc_frame;
3366 device_printf(sc->mfi_dev, "IOCTL 0x%lx not handled\n", cmd);
3375 mfi_linux_ioctl_int(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td)
3377 struct mfi_softc *sc;
3378 struct mfi_linux_ioc_packet l_ioc;
3379 struct mfi_linux_ioc_aen l_aen;
3380 struct mfi_command *cm = NULL;
3381 struct mfi_aen *mfi_aen_entry;
3382 union mfi_sense_ptr sense_ptr;
3383 uint32_t context = 0;
3384 uint8_t *data = NULL, *temp;
3391 case MFI_LINUX_CMD_2: /* Firmware Linux ioctl shim */
3392 error = copyin(arg, &l_ioc, sizeof(l_ioc));
3396 if (l_ioc.lioc_sge_count > MAX_LINUX_IOCTL_SGE) {
3400 mtx_lock(&sc->mfi_io_lock);
3401 if ((cm = mfi_dequeue_free(sc)) == NULL) {
3402 mtx_unlock(&sc->mfi_io_lock);
3405 mtx_unlock(&sc->mfi_io_lock);
3409 * save off original context since copying from user
3410 * will clobber some data
3412 context = cm->cm_frame->header.context;
3414 bcopy(l_ioc.lioc_frame.raw, cm->cm_frame,
3415 2 * MFI_DCMD_FRAME_SIZE); /* this isn't quite right */
3416 cm->cm_total_frame_size = (sizeof(union mfi_sgl)
3417 * l_ioc.lioc_sge_count) + l_ioc.lioc_sgl_off;
3418 cm->cm_frame->header.scsi_status = 0;
3419 cm->cm_frame->header.pad0 = 0;
3420 if (l_ioc.lioc_sge_count)
3422 (union mfi_sgl *)&cm->cm_frame->bytes[l_ioc.lioc_sgl_off];
3424 if (cm->cm_frame->header.flags & MFI_FRAME_DATAIN)
3425 cm->cm_flags |= MFI_CMD_DATAIN;
3426 if (cm->cm_frame->header.flags & MFI_FRAME_DATAOUT)
3427 cm->cm_flags |= MFI_CMD_DATAOUT;
3428 cm->cm_len = cm->cm_frame->header.data_len;
3430 (cm->cm_flags & (MFI_CMD_DATAIN | MFI_CMD_DATAOUT))) {
3431 cm->cm_data = data = malloc(cm->cm_len, M_MFIBUF,
3433 if (cm->cm_data == NULL) {
3434 device_printf(sc->mfi_dev, "Malloc failed\n");
3441 /* restore header context */
3442 cm->cm_frame->header.context = context;
3445 if (cm->cm_flags & MFI_CMD_DATAOUT) {
3446 for (i = 0; i < l_ioc.lioc_sge_count; i++) {
3447 error = copyin(PTRIN(l_ioc.lioc_sgl[i].iov_base),
3449 l_ioc.lioc_sgl[i].iov_len);
3451 device_printf(sc->mfi_dev,
3452 "Copy in failed\n");
3455 temp = &temp[l_ioc.lioc_sgl[i].iov_len];
3459 if (cm->cm_frame->header.cmd == MFI_CMD_DCMD)
3460 locked = mfi_config_lock(sc, cm->cm_frame->dcmd.opcode);
3462 if (cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) {
3463 cm->cm_frame->pass.sense_addr_lo =
3464 (uint32_t)cm->cm_sense_busaddr;
3465 cm->cm_frame->pass.sense_addr_hi =
3466 (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
3469 mtx_lock(&sc->mfi_io_lock);
3470 error = mfi_check_command_pre(sc, cm);
3472 mtx_unlock(&sc->mfi_io_lock);
3476 if ((error = mfi_wait_command(sc, cm)) != 0) {
3477 device_printf(sc->mfi_dev,
3478 "Controller polled failed\n");
3479 mtx_unlock(&sc->mfi_io_lock);
3483 mfi_check_command_post(sc, cm);
3484 mtx_unlock(&sc->mfi_io_lock);
3487 if (cm->cm_flags & MFI_CMD_DATAIN) {
3488 for (i = 0; i < l_ioc.lioc_sge_count; i++) {
3489 error = copyout(temp,
3490 PTRIN(l_ioc.lioc_sgl[i].iov_base),
3491 l_ioc.lioc_sgl[i].iov_len);
3493 device_printf(sc->mfi_dev,
3494 "Copy out failed\n");
3497 temp = &temp[l_ioc.lioc_sgl[i].iov_len];
3501 if (l_ioc.lioc_sense_len) {
3502 /* get user-space sense ptr then copy out sense */
3503 bcopy(&((struct mfi_linux_ioc_packet*)arg)
3504 ->lioc_frame.raw[l_ioc.lioc_sense_off],
3505 &sense_ptr.sense_ptr_data[0],
3506 sizeof(sense_ptr.sense_ptr_data));
3509 * only 32bit Linux support so zero out any
3510 * address over 32bit
3512 sense_ptr.addr.high = 0;
3514 error = copyout(cm->cm_sense, sense_ptr.user_space,
3515 l_ioc.lioc_sense_len);
3517 device_printf(sc->mfi_dev,
3518 "Copy out failed\n");
3523 error = copyout(&cm->cm_frame->header.cmd_status,
3524 &((struct mfi_linux_ioc_packet*)arg)
3525 ->lioc_frame.hdr.cmd_status,
3528 device_printf(sc->mfi_dev,
3529 "Copy out failed\n");
3534 mfi_config_unlock(sc, locked);
3536 free(data, M_MFIBUF);
3538 mtx_lock(&sc->mfi_io_lock);
3539 mfi_release_command(cm);
3540 mtx_unlock(&sc->mfi_io_lock);
3544 case MFI_LINUX_SET_AEN_2: /* AEN Linux ioctl shim */
3545 error = copyin(arg, &l_aen, sizeof(l_aen));
3548 printf("AEN IMPLEMENTED for pid %d\n", curproc->p_pid);
3549 mfi_aen_entry = malloc(sizeof(struct mfi_aen), M_MFIBUF,
3551 mtx_lock(&sc->mfi_io_lock);
3552 if (mfi_aen_entry != NULL) {
3553 mfi_aen_entry->p = curproc;
3554 TAILQ_INSERT_TAIL(&sc->mfi_aen_pids, mfi_aen_entry,
3557 error = mfi_aen_register(sc, l_aen.laen_seq_num,
3558 l_aen.laen_class_locale);
3561 TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
3563 free(mfi_aen_entry, M_MFIBUF);
3565 mtx_unlock(&sc->mfi_io_lock);
3569 device_printf(sc->mfi_dev, "IOCTL 0x%lx not handled\n", cmd);
3578 mfi_poll(struct cdev *dev, int poll_events, struct thread *td)
3580 struct mfi_softc *sc;
3585 if (poll_events & (POLLIN | POLLRDNORM)) {
3586 if (sc->mfi_aen_triggered != 0) {
3587 revents |= poll_events & (POLLIN | POLLRDNORM);
3588 sc->mfi_aen_triggered = 0;
3590 if (sc->mfi_aen_triggered == 0 && sc->mfi_aen_cm == NULL) {
3596 if (poll_events & (POLLIN | POLLRDNORM)) {
3597 sc->mfi_poll_waiting = 1;
3598 selrecord(td, &sc->mfi_select);
3608 struct mfi_softc *sc;
3609 struct mfi_command *cm;
3615 dc = devclass_find("mfi");
3617 printf("No mfi dev class\n");
3621 for (i = 0; ; i++) {
3622 sc = devclass_get_softc(dc, i);
3625 device_printf(sc->mfi_dev, "Dumping\n\n");
3627 deadline = time_uptime - MFI_CMD_TIMEOUT;
3628 mtx_lock(&sc->mfi_io_lock);
3629 TAILQ_FOREACH(cm, &sc->mfi_busy, cm_link) {
3630 if (cm->cm_timestamp < deadline) {
3631 device_printf(sc->mfi_dev,
3632 "COMMAND %p TIMEOUT AFTER %d SECONDS\n",
3633 cm, (int)(time_uptime - cm->cm_timestamp));
3644 mtx_unlock(&sc->mfi_io_lock);
3651 mfi_timeout(void *data)
3653 struct mfi_softc *sc = (struct mfi_softc *)data;
3654 struct mfi_command *cm;
3658 deadline = time_uptime - MFI_CMD_TIMEOUT;
3659 if (sc->adpreset == 0) {
3660 if (!mfi_tbolt_reset(sc)) {
3661 callout_reset(&sc->mfi_watchdog_callout, MFI_CMD_TIMEOUT * hz, mfi_timeout, sc);
3665 mtx_lock(&sc->mfi_io_lock);
3666 TAILQ_FOREACH(cm, &sc->mfi_busy, cm_link) {
3667 if (sc->mfi_aen_cm == cm || sc->mfi_map_sync_cm == cm)
3669 if (cm->cm_timestamp < deadline) {
3670 if (sc->adpreset != 0 && sc->issuepend_done == 0) {
3671 cm->cm_timestamp = time_uptime;
3673 device_printf(sc->mfi_dev,
3674 "COMMAND %p TIMEOUT AFTER %d SECONDS\n",
3675 cm, (int)(time_uptime - cm->cm_timestamp)
3678 MFI_VALIDATE_CMD(sc, cm);
3689 mtx_unlock(&sc->mfi_io_lock);
3691 callout_reset(&sc->mfi_watchdog_callout, MFI_CMD_TIMEOUT * hz,