2 * Copyright (c) 2006 IronPort Systems
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * Copyright (c) 2007 LSI Corp.
28 * Copyright (c) 2007 Rajesh Prabhakaran.
29 * All rights reserved.
31 * Redistribution and use in source and binary forms, with or without
32 * modification, are permitted provided that the following conditions
34 * 1. Redistributions of source code must retain the above copyright
35 * notice, this list of conditions and the following disclaimer.
36 * 2. Redistributions in binary form must reproduce the above copyright
37 * notice, this list of conditions and the following disclaimer in the
38 * documentation and/or other materials provided with the distribution.
40 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
41 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
42 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
43 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
44 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
45 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
46 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
47 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
48 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
49 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
53 #include <sys/cdefs.h>
54 __FBSDID("$FreeBSD$");
56 #include "opt_compat.h"
59 #include <sys/param.h>
60 #include <sys/systm.h>
61 #include <sys/sysctl.h>
62 #include <sys/malloc.h>
63 #include <sys/kernel.h>
65 #include <sys/selinfo.h>
68 #include <sys/eventhandler.h>
70 #include <sys/bus_dma.h>
72 #include <sys/ioccom.h>
75 #include <sys/signalvar.h>
76 #include <sys/taskqueue.h>
78 #include <machine/bus.h>
79 #include <machine/resource.h>
81 #include <dev/mfi/mfireg.h>
82 #include <dev/mfi/mfi_ioctl.h>
83 #include <dev/mfi/mfivar.h>
84 #include <sys/interrupt.h>
85 #include <sys/priority.h>
87 static int mfi_alloc_commands(struct mfi_softc *);
88 static int mfi_comms_init(struct mfi_softc *);
89 static int mfi_get_controller_info(struct mfi_softc *);
90 static int mfi_get_log_state(struct mfi_softc *,
91 struct mfi_evt_log_state **);
92 static int mfi_parse_entries(struct mfi_softc *, int, int);
93 static int mfi_dcmd_command(struct mfi_softc *, struct mfi_command **,
94 uint32_t, void **, size_t);
95 static void mfi_data_cb(void *, bus_dma_segment_t *, int, int);
96 static void mfi_startup(void *arg);
97 static void mfi_intr(void *arg);
98 static void mfi_ldprobe(struct mfi_softc *sc);
99 static void mfi_syspdprobe(struct mfi_softc *sc);
100 static void mfi_handle_evt(void *context, int pending);
101 static int mfi_aen_register(struct mfi_softc *sc, int seq, int locale);
102 static void mfi_aen_complete(struct mfi_command *);
103 static int mfi_add_ld(struct mfi_softc *sc, int);
104 static void mfi_add_ld_complete(struct mfi_command *);
105 static int mfi_add_sys_pd(struct mfi_softc *sc, int);
106 static void mfi_add_sys_pd_complete(struct mfi_command *);
107 static struct mfi_command * mfi_bio_command(struct mfi_softc *);
108 static void mfi_bio_complete(struct mfi_command *);
109 static struct mfi_command *mfi_build_ldio(struct mfi_softc *,struct bio*);
110 static struct mfi_command *mfi_build_syspdio(struct mfi_softc *,struct bio*);
111 static int mfi_send_frame(struct mfi_softc *, struct mfi_command *);
112 static int mfi_abort(struct mfi_softc *, struct mfi_command *);
113 static int mfi_linux_ioctl_int(struct cdev *, u_long, caddr_t, int, struct thread *);
114 static void mfi_timeout(void *);
115 static int mfi_user_command(struct mfi_softc *,
116 struct mfi_ioc_passthru *);
117 static void mfi_enable_intr_xscale(struct mfi_softc *sc);
118 static void mfi_enable_intr_ppc(struct mfi_softc *sc);
119 static int32_t mfi_read_fw_status_xscale(struct mfi_softc *sc);
120 static int32_t mfi_read_fw_status_ppc(struct mfi_softc *sc);
121 static int mfi_check_clear_intr_xscale(struct mfi_softc *sc);
122 static int mfi_check_clear_intr_ppc(struct mfi_softc *sc);
123 static void mfi_issue_cmd_xscale(struct mfi_softc *sc, bus_addr_t bus_add,
125 static void mfi_issue_cmd_ppc(struct mfi_softc *sc, bus_addr_t bus_add,
127 static int mfi_config_lock(struct mfi_softc *sc, uint32_t opcode);
128 static void mfi_config_unlock(struct mfi_softc *sc, int locked);
129 static int mfi_check_command_pre(struct mfi_softc *sc, struct mfi_command *cm);
130 static void mfi_check_command_post(struct mfi_softc *sc, struct mfi_command *cm);
131 static int mfi_check_for_sscd(struct mfi_softc *sc, struct mfi_command *cm);
133 SYSCTL_NODE(_hw, OID_AUTO, mfi, CTLFLAG_RD, 0, "MFI driver parameters");
134 static int mfi_event_locale = MFI_EVT_LOCALE_ALL;
135 TUNABLE_INT("hw.mfi.event_locale", &mfi_event_locale);
136 SYSCTL_INT(_hw_mfi, OID_AUTO, event_locale, CTLFLAG_RW, &mfi_event_locale,
137 0, "event message locale");
139 static int mfi_event_class = MFI_EVT_CLASS_INFO;
140 TUNABLE_INT("hw.mfi.event_class", &mfi_event_class);
141 SYSCTL_INT(_hw_mfi, OID_AUTO, event_class, CTLFLAG_RW, &mfi_event_class,
142 0, "event message class");
144 static int mfi_max_cmds = 128;
145 TUNABLE_INT("hw.mfi.max_cmds", &mfi_max_cmds);
146 SYSCTL_INT(_hw_mfi, OID_AUTO, max_cmds, CTLFLAG_RD, &mfi_max_cmds,
149 static int mfi_detect_jbod_change = 1;
150 TUNABLE_INT("hw.mfi.detect_jbod_change", &mfi_detect_jbod_change);
151 SYSCTL_INT(_hw_mfi, OID_AUTO, detect_jbod_change, CTLFLAG_RW,
152 &mfi_detect_jbod_change, 0, "Detect a change to a JBOD");
154 /* Management interface */
155 static d_open_t mfi_open;
156 static d_close_t mfi_close;
157 static d_ioctl_t mfi_ioctl;
158 static d_poll_t mfi_poll;
160 static struct cdevsw mfi_cdevsw = {
161 .d_version = D_VERSION,
164 .d_close = mfi_close,
165 .d_ioctl = mfi_ioctl,
170 MALLOC_DEFINE(M_MFIBUF, "mfibuf", "Buffers for the MFI driver");
172 #define MFI_INQ_LENGTH SHORT_INQUIRY_LENGTH
173 struct mfi_skinny_dma_info mfi_skinny;
176 mfi_enable_intr_xscale(struct mfi_softc *sc)
178 MFI_WRITE4(sc, MFI_OMSK, 0x01);
182 mfi_enable_intr_ppc(struct mfi_softc *sc)
184 if (sc->mfi_flags & MFI_FLAGS_1078) {
185 MFI_WRITE4(sc, MFI_ODCR0, 0xFFFFFFFF);
186 MFI_WRITE4(sc, MFI_OMSK, ~MFI_1078_EIM);
188 else if (sc->mfi_flags & MFI_FLAGS_GEN2) {
189 MFI_WRITE4(sc, MFI_ODCR0, 0xFFFFFFFF);
190 MFI_WRITE4(sc, MFI_OMSK, ~MFI_GEN2_EIM);
192 else if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
193 MFI_WRITE4(sc, MFI_OMSK, ~0x00000001);
198 mfi_read_fw_status_xscale(struct mfi_softc *sc)
200 return MFI_READ4(sc, MFI_OMSG0);
204 mfi_read_fw_status_ppc(struct mfi_softc *sc)
206 return MFI_READ4(sc, MFI_OSP0);
210 mfi_check_clear_intr_xscale(struct mfi_softc *sc)
214 status = MFI_READ4(sc, MFI_OSTS);
215 if ((status & MFI_OSTS_INTR_VALID) == 0)
218 MFI_WRITE4(sc, MFI_OSTS, status);
223 mfi_check_clear_intr_ppc(struct mfi_softc *sc)
227 status = MFI_READ4(sc, MFI_OSTS);
228 if (sc->mfi_flags & MFI_FLAGS_1078) {
229 if (!(status & MFI_1078_RM)) {
233 else if (sc->mfi_flags & MFI_FLAGS_GEN2) {
234 if (!(status & MFI_GEN2_RM)) {
238 else if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
239 if (!(status & MFI_SKINNY_RM)) {
243 if (sc->mfi_flags & MFI_FLAGS_SKINNY)
244 MFI_WRITE4(sc, MFI_OSTS, status);
246 MFI_WRITE4(sc, MFI_ODCR0, status);
251 mfi_issue_cmd_xscale(struct mfi_softc *sc, bus_addr_t bus_add, uint32_t frame_cnt)
253 MFI_WRITE4(sc, MFI_IQP,(bus_add >>3)|frame_cnt);
257 mfi_issue_cmd_ppc(struct mfi_softc *sc, bus_addr_t bus_add, uint32_t frame_cnt)
259 if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
260 MFI_WRITE4(sc, MFI_IQPL, (bus_add | frame_cnt <<1)|1 );
261 MFI_WRITE4(sc, MFI_IQPH, 0x00000000);
263 MFI_WRITE4(sc, MFI_IQP, (bus_add | frame_cnt <<1)|1 );
268 mfi_transition_firmware(struct mfi_softc *sc)
270 uint32_t fw_state, cur_state;
272 uint32_t cur_abs_reg_val = 0;
273 uint32_t prev_abs_reg_val = 0;
275 cur_abs_reg_val = sc->mfi_read_fw_status(sc);
276 fw_state = cur_abs_reg_val & MFI_FWSTATE_MASK;
277 while (fw_state != MFI_FWSTATE_READY) {
279 device_printf(sc->mfi_dev, "Waiting for firmware to "
281 cur_state = fw_state;
283 case MFI_FWSTATE_FAULT:
284 device_printf(sc->mfi_dev, "Firmware fault\n");
286 case MFI_FWSTATE_WAIT_HANDSHAKE:
287 if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT)
288 MFI_WRITE4(sc, MFI_SKINNY_IDB, MFI_FWINIT_CLEAR_HANDSHAKE);
290 MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_CLEAR_HANDSHAKE);
291 max_wait = MFI_RESET_WAIT_TIME;
293 case MFI_FWSTATE_OPERATIONAL:
294 if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT)
295 MFI_WRITE4(sc, MFI_SKINNY_IDB, 7);
297 MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_READY);
298 max_wait = MFI_RESET_WAIT_TIME;
300 case MFI_FWSTATE_UNDEFINED:
301 case MFI_FWSTATE_BB_INIT:
302 max_wait = MFI_RESET_WAIT_TIME;
304 case MFI_FWSTATE_FW_INIT_2:
305 max_wait = MFI_RESET_WAIT_TIME;
307 case MFI_FWSTATE_FW_INIT:
308 case MFI_FWSTATE_FLUSH_CACHE:
309 max_wait = MFI_RESET_WAIT_TIME;
311 case MFI_FWSTATE_DEVICE_SCAN:
312 max_wait = MFI_RESET_WAIT_TIME; /* wait for 180 seconds */
313 prev_abs_reg_val = cur_abs_reg_val;
315 case MFI_FWSTATE_BOOT_MESSAGE_PENDING:
316 if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT)
317 MFI_WRITE4(sc, MFI_SKINNY_IDB, MFI_FWINIT_HOTPLUG);
319 MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_HOTPLUG);
320 max_wait = MFI_RESET_WAIT_TIME;
323 device_printf(sc->mfi_dev, "Unknown firmware state %#x\n",
327 for (i = 0; i < (max_wait * 10); i++) {
328 cur_abs_reg_val = sc->mfi_read_fw_status(sc);
329 fw_state = cur_abs_reg_val & MFI_FWSTATE_MASK;
330 if (fw_state == cur_state)
335 if (fw_state == MFI_FWSTATE_DEVICE_SCAN) {
336 /* Check the device scanning progress */
337 if (prev_abs_reg_val != cur_abs_reg_val) {
341 if (fw_state == cur_state) {
342 device_printf(sc->mfi_dev, "Firmware stuck in state "
351 mfi_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
356 *addr = segs[0].ds_addr;
361 mfi_attach(struct mfi_softc *sc)
364 int error, commsz, framessz, sensesz;
365 int frames, unit, max_fw_sge;
366 uint32_t tb_mem_size = 0;
371 device_printf(sc->mfi_dev, "Megaraid SAS driver Ver %s \n",
374 mtx_init(&sc->mfi_io_lock, "MFI I/O lock", NULL, MTX_DEF);
375 sx_init(&sc->mfi_config_lock, "MFI config");
376 TAILQ_INIT(&sc->mfi_ld_tqh);
377 TAILQ_INIT(&sc->mfi_syspd_tqh);
378 TAILQ_INIT(&sc->mfi_evt_queue);
379 TASK_INIT(&sc->mfi_evt_task, 0, mfi_handle_evt, sc);
380 TAILQ_INIT(&sc->mfi_aen_pids);
381 TAILQ_INIT(&sc->mfi_cam_ccbq);
389 sc->last_seq_num = 0;
390 sc->disableOnlineCtrlReset = 1;
391 sc->issuepend_done = 1;
392 sc->hw_crit_error = 0;
394 if (sc->mfi_flags & MFI_FLAGS_1064R) {
395 sc->mfi_enable_intr = mfi_enable_intr_xscale;
396 sc->mfi_read_fw_status = mfi_read_fw_status_xscale;
397 sc->mfi_check_clear_intr = mfi_check_clear_intr_xscale;
398 sc->mfi_issue_cmd = mfi_issue_cmd_xscale;
399 } else if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
400 sc->mfi_enable_intr = mfi_tbolt_enable_intr_ppc;
401 sc->mfi_disable_intr = mfi_tbolt_disable_intr_ppc;
402 sc->mfi_read_fw_status = mfi_tbolt_read_fw_status_ppc;
403 sc->mfi_check_clear_intr = mfi_tbolt_check_clear_intr_ppc;
404 sc->mfi_issue_cmd = mfi_tbolt_issue_cmd_ppc;
405 sc->mfi_adp_reset = mfi_tbolt_adp_reset;
407 TAILQ_INIT(&sc->mfi_cmd_tbolt_tqh);
409 sc->mfi_enable_intr = mfi_enable_intr_ppc;
410 sc->mfi_read_fw_status = mfi_read_fw_status_ppc;
411 sc->mfi_check_clear_intr = mfi_check_clear_intr_ppc;
412 sc->mfi_issue_cmd = mfi_issue_cmd_ppc;
416 /* Before we get too far, see if the firmware is working */
417 if ((error = mfi_transition_firmware(sc)) != 0) {
418 device_printf(sc->mfi_dev, "Firmware not in READY state, "
419 "error %d\n", error);
423 /* Start: LSIP200113393 */
424 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
425 1, 0, /* algnmnt, boundary */
426 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
427 BUS_SPACE_MAXADDR, /* highaddr */
428 NULL, NULL, /* filter, filterarg */
429 MEGASAS_MAX_NAME*sizeof(bus_addr_t), /* maxsize */
431 MEGASAS_MAX_NAME*sizeof(bus_addr_t), /* maxsegsize */
433 NULL, NULL, /* lockfunc, lockarg */
434 &sc->verbuf_h_dmat)) {
435 device_printf(sc->mfi_dev, "Cannot allocate verbuf_h_dmat DMA tag\n");
438 if (bus_dmamem_alloc(sc->verbuf_h_dmat, (void **)&sc->verbuf,
439 BUS_DMA_NOWAIT, &sc->verbuf_h_dmamap)) {
440 device_printf(sc->mfi_dev, "Cannot allocate verbuf_h_dmamap memory\n");
443 bzero(sc->verbuf, MEGASAS_MAX_NAME*sizeof(bus_addr_t));
444 bus_dmamap_load(sc->verbuf_h_dmat, sc->verbuf_h_dmamap,
445 sc->verbuf, MEGASAS_MAX_NAME*sizeof(bus_addr_t),
446 mfi_addr_cb, &sc->verbuf_h_busaddr, 0);
447 /* End: LSIP200113393 */
450 * Get information needed for sizing the contiguous memory for the
451 * frame pool. Size down the sgl parameter since we know that
452 * we will never need more than what's required for MAXPHYS.
453 * It would be nice if these constants were available at runtime
454 * instead of compile time.
456 status = sc->mfi_read_fw_status(sc);
457 sc->mfi_max_fw_cmds = status & MFI_FWSTATE_MAXCMD_MASK;
458 max_fw_sge = (status & MFI_FWSTATE_MAXSGL_MASK) >> 16;
459 sc->mfi_max_sge = min(max_fw_sge, ((MFI_MAXPHYS / PAGE_SIZE) + 1));
461 /* ThunderBolt Support get the contiguous memory */
463 if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
464 mfi_tbolt_init_globals(sc);
465 device_printf(sc->mfi_dev, "MaxCmd = %x MaxSgl = %x state = %x \n",
466 sc->mfi_max_fw_cmds, sc->mfi_max_sge, status);
467 tb_mem_size = mfi_tbolt_get_memory_requirement(sc);
469 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
470 1, 0, /* algnmnt, boundary */
471 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
472 BUS_SPACE_MAXADDR, /* highaddr */
473 NULL, NULL, /* filter, filterarg */
474 tb_mem_size, /* maxsize */
476 tb_mem_size, /* maxsegsize */
478 NULL, NULL, /* lockfunc, lockarg */
480 device_printf(sc->mfi_dev, "Cannot allocate comms DMA tag\n");
483 if (bus_dmamem_alloc(sc->mfi_tb_dmat, (void **)&sc->request_message_pool,
484 BUS_DMA_NOWAIT, &sc->mfi_tb_dmamap)) {
485 device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
488 bzero(sc->request_message_pool, tb_mem_size);
489 bus_dmamap_load(sc->mfi_tb_dmat, sc->mfi_tb_dmamap,
490 sc->request_message_pool, tb_mem_size, mfi_addr_cb, &sc->mfi_tb_busaddr, 0);
492 /* For ThunderBolt memory init */
493 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
494 0x100, 0, /* alignmnt, boundary */
495 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
496 BUS_SPACE_MAXADDR, /* highaddr */
497 NULL, NULL, /* filter, filterarg */
498 MFI_FRAME_SIZE, /* maxsize */
500 MFI_FRAME_SIZE, /* maxsegsize */
502 NULL, NULL, /* lockfunc, lockarg */
503 &sc->mfi_tb_init_dmat)) {
504 device_printf(sc->mfi_dev, "Cannot allocate init DMA tag\n");
507 if (bus_dmamem_alloc(sc->mfi_tb_init_dmat, (void **)&sc->mfi_tb_init,
508 BUS_DMA_NOWAIT, &sc->mfi_tb_init_dmamap)) {
509 device_printf(sc->mfi_dev, "Cannot allocate init memory\n");
512 bzero(sc->mfi_tb_init, MFI_FRAME_SIZE);
513 bus_dmamap_load(sc->mfi_tb_init_dmat, sc->mfi_tb_init_dmamap,
514 sc->mfi_tb_init, MFI_FRAME_SIZE, mfi_addr_cb,
515 &sc->mfi_tb_init_busaddr, 0);
516 if (mfi_tbolt_init_desc_pool(sc, sc->request_message_pool,
518 device_printf(sc->mfi_dev,
519 "Thunderbolt pool preparation error\n");
524 Allocate DMA memory mapping for MPI2 IOC Init descriptor,
525 we are taking it diffrent from what we have allocated for Request
526 and reply descriptors to avoid confusion later
528 tb_mem_size = sizeof(struct MPI2_IOC_INIT_REQUEST);
529 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
530 1, 0, /* algnmnt, boundary */
531 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
532 BUS_SPACE_MAXADDR, /* highaddr */
533 NULL, NULL, /* filter, filterarg */
534 tb_mem_size, /* maxsize */
536 tb_mem_size, /* maxsegsize */
538 NULL, NULL, /* lockfunc, lockarg */
539 &sc->mfi_tb_ioc_init_dmat)) {
540 device_printf(sc->mfi_dev,
541 "Cannot allocate comms DMA tag\n");
544 if (bus_dmamem_alloc(sc->mfi_tb_ioc_init_dmat,
545 (void **)&sc->mfi_tb_ioc_init_desc,
546 BUS_DMA_NOWAIT, &sc->mfi_tb_ioc_init_dmamap)) {
547 device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
550 bzero(sc->mfi_tb_ioc_init_desc, tb_mem_size);
551 bus_dmamap_load(sc->mfi_tb_ioc_init_dmat, sc->mfi_tb_ioc_init_dmamap,
552 sc->mfi_tb_ioc_init_desc, tb_mem_size, mfi_addr_cb,
553 &sc->mfi_tb_ioc_init_busaddr, 0);
556 * Create the dma tag for data buffers. Used both for block I/O
557 * and for various internal data queries.
559 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
560 1, 0, /* algnmnt, boundary */
561 BUS_SPACE_MAXADDR, /* lowaddr */
562 BUS_SPACE_MAXADDR, /* highaddr */
563 NULL, NULL, /* filter, filterarg */
564 BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
565 sc->mfi_max_sge, /* nsegments */
566 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
567 BUS_DMA_ALLOCNOW, /* flags */
568 busdma_lock_mutex, /* lockfunc */
569 &sc->mfi_io_lock, /* lockfuncarg */
570 &sc->mfi_buffer_dmat)) {
571 device_printf(sc->mfi_dev, "Cannot allocate buffer DMA tag\n");
576 * Allocate DMA memory for the comms queues. Keep it under 4GB for
577 * efficiency. The mfi_hwcomms struct includes space for 1 reply queue
578 * entry, so the calculated size here will be will be 1 more than
579 * mfi_max_fw_cmds. This is apparently a requirement of the hardware.
581 commsz = (sizeof(uint32_t) * sc->mfi_max_fw_cmds) +
582 sizeof(struct mfi_hwcomms);
583 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
584 1, 0, /* algnmnt, boundary */
585 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
586 BUS_SPACE_MAXADDR, /* highaddr */
587 NULL, NULL, /* filter, filterarg */
588 commsz, /* maxsize */
590 commsz, /* maxsegsize */
592 NULL, NULL, /* lockfunc, lockarg */
593 &sc->mfi_comms_dmat)) {
594 device_printf(sc->mfi_dev, "Cannot allocate comms DMA tag\n");
597 if (bus_dmamem_alloc(sc->mfi_comms_dmat, (void **)&sc->mfi_comms,
598 BUS_DMA_NOWAIT, &sc->mfi_comms_dmamap)) {
599 device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
602 bzero(sc->mfi_comms, commsz);
603 bus_dmamap_load(sc->mfi_comms_dmat, sc->mfi_comms_dmamap,
604 sc->mfi_comms, commsz, mfi_addr_cb, &sc->mfi_comms_busaddr, 0);
606 * Allocate DMA memory for the command frames. Keep them in the
607 * lower 4GB for efficiency. Calculate the size of the commands at
608 * the same time; each command is one 64 byte frame plus a set of
609 * additional frames for holding sg lists or other data.
610 * The assumption here is that the SG list will start at the second
611 * frame and not use the unused bytes in the first frame. While this
612 * isn't technically correct, it simplifies the calculation and allows
613 * for command frames that might be larger than an mfi_io_frame.
615 if (sizeof(bus_addr_t) == 8) {
616 sc->mfi_sge_size = sizeof(struct mfi_sg64);
617 sc->mfi_flags |= MFI_FLAGS_SG64;
619 sc->mfi_sge_size = sizeof(struct mfi_sg32);
621 if (sc->mfi_flags & MFI_FLAGS_SKINNY)
622 sc->mfi_sge_size = sizeof(struct mfi_sg_skinny);
623 frames = (sc->mfi_sge_size * sc->mfi_max_sge - 1) / MFI_FRAME_SIZE + 2;
624 sc->mfi_cmd_size = frames * MFI_FRAME_SIZE;
625 framessz = sc->mfi_cmd_size * sc->mfi_max_fw_cmds;
626 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
627 64, 0, /* algnmnt, boundary */
628 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
629 BUS_SPACE_MAXADDR, /* highaddr */
630 NULL, NULL, /* filter, filterarg */
631 framessz, /* maxsize */
633 framessz, /* maxsegsize */
635 NULL, NULL, /* lockfunc, lockarg */
636 &sc->mfi_frames_dmat)) {
637 device_printf(sc->mfi_dev, "Cannot allocate frame DMA tag\n");
640 if (bus_dmamem_alloc(sc->mfi_frames_dmat, (void **)&sc->mfi_frames,
641 BUS_DMA_NOWAIT, &sc->mfi_frames_dmamap)) {
642 device_printf(sc->mfi_dev, "Cannot allocate frames memory\n");
645 bzero(sc->mfi_frames, framessz);
646 bus_dmamap_load(sc->mfi_frames_dmat, sc->mfi_frames_dmamap,
647 sc->mfi_frames, framessz, mfi_addr_cb, &sc->mfi_frames_busaddr,0);
649 * Allocate DMA memory for the frame sense data. Keep them in the
650 * lower 4GB for efficiency
652 sensesz = sc->mfi_max_fw_cmds * MFI_SENSE_LEN;
653 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
654 4, 0, /* algnmnt, boundary */
655 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
656 BUS_SPACE_MAXADDR, /* highaddr */
657 NULL, NULL, /* filter, filterarg */
658 sensesz, /* maxsize */
660 sensesz, /* maxsegsize */
662 NULL, NULL, /* lockfunc, lockarg */
663 &sc->mfi_sense_dmat)) {
664 device_printf(sc->mfi_dev, "Cannot allocate sense DMA tag\n");
667 if (bus_dmamem_alloc(sc->mfi_sense_dmat, (void **)&sc->mfi_sense,
668 BUS_DMA_NOWAIT, &sc->mfi_sense_dmamap)) {
669 device_printf(sc->mfi_dev, "Cannot allocate sense memory\n");
672 bus_dmamap_load(sc->mfi_sense_dmat, sc->mfi_sense_dmamap,
673 sc->mfi_sense, sensesz, mfi_addr_cb, &sc->mfi_sense_busaddr, 0);
674 if ((error = mfi_alloc_commands(sc)) != 0)
677 /* Before moving the FW to operational state, check whether
678 * hostmemory is required by the FW or not
681 /* ThunderBolt MFI_IOC2 INIT */
682 if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
683 sc->mfi_disable_intr(sc);
684 if ((error = mfi_tbolt_init_MFI_queue(sc)) != 0) {
685 device_printf(sc->mfi_dev,
686 "TB Init has failed with error %d\n",error);
690 if ((error = mfi_tbolt_alloc_cmd(sc)) != 0)
692 if (bus_setup_intr(sc->mfi_dev, sc->mfi_irq,
693 INTR_MPSAFE|INTR_TYPE_BIO, NULL, mfi_intr_tbolt, sc,
695 device_printf(sc->mfi_dev, "Cannot set up interrupt\n");
698 sc->mfi_enable_intr(sc);
701 if ((error = mfi_comms_init(sc)) != 0)
704 if (bus_setup_intr(sc->mfi_dev, sc->mfi_irq,
705 INTR_MPSAFE|INTR_TYPE_BIO, NULL, mfi_intr, sc, &sc->mfi_intr)) {
706 device_printf(sc->mfi_dev, "Cannot set up interrupt\n");
709 sc->mfi_enable_intr(sc);
711 if ((error = mfi_get_controller_info(sc)) != 0)
713 sc->disableOnlineCtrlReset = 0;
715 /* Register a config hook to probe the bus for arrays */
716 sc->mfi_ich.ich_func = mfi_startup;
717 sc->mfi_ich.ich_arg = sc;
718 if (config_intrhook_establish(&sc->mfi_ich) != 0) {
719 device_printf(sc->mfi_dev, "Cannot establish configuration "
723 if ((error = mfi_aen_setup(sc, 0), 0) != 0) {
724 mtx_unlock(&sc->mfi_io_lock);
729 * Register a shutdown handler.
731 if ((sc->mfi_eh = EVENTHANDLER_REGISTER(shutdown_final, mfi_shutdown,
732 sc, SHUTDOWN_PRI_DEFAULT)) == NULL) {
733 device_printf(sc->mfi_dev, "Warning: shutdown event "
734 "registration failed\n");
738 * Create the control device for doing management
740 unit = device_get_unit(sc->mfi_dev);
741 sc->mfi_cdev = make_dev(&mfi_cdevsw, unit, UID_ROOT, GID_OPERATOR,
742 0640, "mfi%d", unit);
744 make_dev_alias(sc->mfi_cdev, "megaraid_sas_ioctl_node");
745 if (sc->mfi_cdev != NULL)
746 sc->mfi_cdev->si_drv1 = sc;
747 SYSCTL_ADD_INT(device_get_sysctl_ctx(sc->mfi_dev),
748 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->mfi_dev)),
749 OID_AUTO, "delete_busy_volumes", CTLFLAG_RW,
750 &sc->mfi_delete_busy_volumes, 0, "Allow removal of busy volumes");
751 SYSCTL_ADD_INT(device_get_sysctl_ctx(sc->mfi_dev),
752 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->mfi_dev)),
753 OID_AUTO, "keep_deleted_volumes", CTLFLAG_RW,
754 &sc->mfi_keep_deleted_volumes, 0,
755 "Don't detach the mfid device for a busy volume that is deleted");
757 device_add_child(sc->mfi_dev, "mfip", -1);
758 bus_generic_attach(sc->mfi_dev);
760 /* Start the timeout watchdog */
761 callout_init(&sc->mfi_watchdog_callout, CALLOUT_MPSAFE);
762 callout_reset(&sc->mfi_watchdog_callout, MFI_CMD_TIMEOUT * hz,
769 mfi_alloc_commands(struct mfi_softc *sc)
771 struct mfi_command *cm;
775 * XXX Should we allocate all the commands up front, or allocate on
776 * demand later like 'aac' does?
778 ncmds = MIN(mfi_max_cmds, sc->mfi_max_fw_cmds);
780 device_printf(sc->mfi_dev, "Max fw cmds= %d, sizing driver "
781 "pool to %d\n", sc->mfi_max_fw_cmds, ncmds);
783 sc->mfi_commands = malloc(sizeof(struct mfi_command) * ncmds, M_MFIBUF,
786 for (i = 0; i < ncmds; i++) {
787 cm = &sc->mfi_commands[i];
788 cm->cm_frame = (union mfi_frame *)((uintptr_t)sc->mfi_frames +
789 sc->mfi_cmd_size * i);
790 cm->cm_frame_busaddr = sc->mfi_frames_busaddr +
791 sc->mfi_cmd_size * i;
792 cm->cm_frame->header.context = i;
793 cm->cm_sense = &sc->mfi_sense[i];
794 cm->cm_sense_busaddr= sc->mfi_sense_busaddr + MFI_SENSE_LEN * i;
797 if (bus_dmamap_create(sc->mfi_buffer_dmat, 0,
798 &cm->cm_dmamap) == 0) {
799 mtx_lock(&sc->mfi_io_lock);
800 mfi_release_command(cm);
801 mtx_unlock(&sc->mfi_io_lock);
805 sc->mfi_total_cmds++;
812 mfi_release_command(struct mfi_command *cm)
814 struct mfi_frame_header *hdr;
817 mtx_assert(&cm->cm_sc->mfi_io_lock, MA_OWNED);
820 * Zero out the important fields of the frame, but make sure the
821 * context field is preserved. For efficiency, handle the fields
822 * as 32 bit words. Clear out the first S/G entry too for safety.
824 hdr = &cm->cm_frame->header;
825 if (cm->cm_data != NULL && hdr->sg_count) {
826 cm->cm_sg->sg32[0].len = 0;
827 cm->cm_sg->sg32[0].addr = 0;
830 hdr_data = (uint32_t *)cm->cm_frame;
831 hdr_data[0] = 0; /* cmd, sense_len, cmd_status, scsi_status */
832 hdr_data[1] = 0; /* target_id, lun_id, cdb_len, sg_count */
833 hdr_data[4] = 0; /* flags, timeout */
834 hdr_data[5] = 0; /* data_len */
836 cm->cm_extra_frames = 0;
838 cm->cm_complete = NULL;
839 cm->cm_private = NULL;
842 cm->cm_total_frame_size = 0;
843 cm->retry_for_fw_reset = 0;
845 mfi_enqueue_free(cm);
849 mfi_dcmd_command(struct mfi_softc *sc, struct mfi_command **cmp,
850 uint32_t opcode, void **bufp, size_t bufsize)
852 struct mfi_command *cm;
853 struct mfi_dcmd_frame *dcmd;
855 uint32_t context = 0;
857 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
859 cm = mfi_dequeue_free(sc);
863 /* Zero out the MFI frame */
864 context = cm->cm_frame->header.context;
865 bzero(cm->cm_frame, sizeof(union mfi_frame));
866 cm->cm_frame->header.context = context;
868 if ((bufsize > 0) && (bufp != NULL)) {
870 buf = malloc(bufsize, M_MFIBUF, M_NOWAIT|M_ZERO);
872 mfi_release_command(cm);
881 dcmd = &cm->cm_frame->dcmd;
882 bzero(dcmd->mbox, MFI_MBOX_SIZE);
883 dcmd->header.cmd = MFI_CMD_DCMD;
884 dcmd->header.timeout = 0;
885 dcmd->header.flags = 0;
886 dcmd->header.data_len = bufsize;
887 dcmd->header.scsi_status = 0;
888 dcmd->opcode = opcode;
889 cm->cm_sg = &dcmd->sgl;
890 cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
893 cm->cm_private = buf;
894 cm->cm_len = bufsize;
897 if ((bufp != NULL) && (*bufp == NULL) && (buf != NULL))
903 mfi_comms_init(struct mfi_softc *sc)
905 struct mfi_command *cm;
906 struct mfi_init_frame *init;
907 struct mfi_init_qinfo *qinfo;
909 uint32_t context = 0;
911 mtx_lock(&sc->mfi_io_lock);
912 if ((cm = mfi_dequeue_free(sc)) == NULL)
915 /* Zero out the MFI frame */
916 context = cm->cm_frame->header.context;
917 bzero(cm->cm_frame, sizeof(union mfi_frame));
918 cm->cm_frame->header.context = context;
921 * Abuse the SG list area of the frame to hold the init_qinfo
924 init = &cm->cm_frame->init;
925 qinfo = (struct mfi_init_qinfo *)((uintptr_t)init + MFI_FRAME_SIZE);
927 bzero(qinfo, sizeof(struct mfi_init_qinfo));
928 qinfo->rq_entries = sc->mfi_max_fw_cmds + 1;
929 qinfo->rq_addr_lo = sc->mfi_comms_busaddr +
930 offsetof(struct mfi_hwcomms, hw_reply_q);
931 qinfo->pi_addr_lo = sc->mfi_comms_busaddr +
932 offsetof(struct mfi_hwcomms, hw_pi);
933 qinfo->ci_addr_lo = sc->mfi_comms_busaddr +
934 offsetof(struct mfi_hwcomms, hw_ci);
936 init->header.cmd = MFI_CMD_INIT;
937 init->header.data_len = sizeof(struct mfi_init_qinfo);
938 init->qinfo_new_addr_lo = cm->cm_frame_busaddr + MFI_FRAME_SIZE;
940 cm->cm_flags = MFI_CMD_POLLED;
942 if ((error = mfi_mapcmd(sc, cm)) != 0) {
943 device_printf(sc->mfi_dev, "failed to send init command\n");
944 mtx_unlock(&sc->mfi_io_lock);
947 mfi_release_command(cm);
948 mtx_unlock(&sc->mfi_io_lock);
954 mfi_get_controller_info(struct mfi_softc *sc)
956 struct mfi_command *cm = NULL;
957 struct mfi_ctrl_info *ci = NULL;
958 uint32_t max_sectors_1, max_sectors_2;
961 mtx_lock(&sc->mfi_io_lock);
962 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_GETINFO,
963 (void **)&ci, sizeof(*ci));
966 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
968 if ((error = mfi_mapcmd(sc, cm)) != 0) {
969 device_printf(sc->mfi_dev, "Failed to get controller info\n");
970 sc->mfi_max_io = (sc->mfi_max_sge - 1) * PAGE_SIZE /
976 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
977 BUS_DMASYNC_POSTREAD);
978 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
980 max_sectors_1 = (1 << ci->stripe_sz_ops.max) * ci->max_strips_per_io;
981 max_sectors_2 = ci->max_request_size;
982 sc->mfi_max_io = min(max_sectors_1, max_sectors_2);
983 sc->disableOnlineCtrlReset =
984 ci->properties.OnOffProperties.disableOnlineCtrlReset;
990 mfi_release_command(cm);
991 mtx_unlock(&sc->mfi_io_lock);
996 mfi_get_log_state(struct mfi_softc *sc, struct mfi_evt_log_state **log_state)
998 struct mfi_command *cm = NULL;
1001 mtx_lock(&sc->mfi_io_lock);
1002 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_EVENT_GETINFO,
1003 (void **)log_state, sizeof(**log_state));
1006 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1008 if ((error = mfi_mapcmd(sc, cm)) != 0) {
1009 device_printf(sc->mfi_dev, "Failed to get log state\n");
1013 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1014 BUS_DMASYNC_POSTREAD);
1015 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1019 mfi_release_command(cm);
1020 mtx_unlock(&sc->mfi_io_lock);
1026 mfi_aen_setup(struct mfi_softc *sc, uint32_t seq_start)
1028 struct mfi_evt_log_state *log_state = NULL;
1029 union mfi_evt class_locale;
1033 class_locale.members.reserved = 0;
1034 class_locale.members.locale = mfi_event_locale;
1035 class_locale.members.evt_class = mfi_event_class;
1037 if (seq_start == 0) {
1038 error = mfi_get_log_state(sc, &log_state);
1039 sc->mfi_boot_seq_num = log_state->boot_seq_num;
1042 free(log_state, M_MFIBUF);
1047 * Walk through any events that fired since the last
1050 mfi_parse_entries(sc, log_state->shutdown_seq_num,
1051 log_state->newest_seq_num);
1052 seq = log_state->newest_seq_num;
1055 mfi_aen_register(sc, seq, class_locale.word);
1056 free(log_state, M_MFIBUF);
1062 mfi_wait_command(struct mfi_softc *sc, struct mfi_command *cm)
1065 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1066 cm->cm_complete = NULL;
1070 * MegaCli can issue a DCMD of 0. In this case do nothing
1071 * and return 0 to it as status
1073 if (cm->cm_frame->dcmd.opcode == 0) {
1074 cm->cm_frame->header.cmd_status = MFI_STAT_OK;
1076 return (cm->cm_error);
1078 mfi_enqueue_ready(cm);
1080 if ((cm->cm_flags & MFI_CMD_COMPLETED) == 0)
1081 msleep(cm, &sc->mfi_io_lock, PRIBIO, "mfiwait", 0);
1082 return (cm->cm_error);
1086 mfi_free(struct mfi_softc *sc)
1088 struct mfi_command *cm;
1091 callout_drain(&sc->mfi_watchdog_callout);
1093 if (sc->mfi_cdev != NULL)
1094 destroy_dev(sc->mfi_cdev);
1096 if (sc->mfi_total_cmds != 0) {
1097 for (i = 0; i < sc->mfi_total_cmds; i++) {
1098 cm = &sc->mfi_commands[i];
1099 bus_dmamap_destroy(sc->mfi_buffer_dmat, cm->cm_dmamap);
1101 free(sc->mfi_commands, M_MFIBUF);
1105 bus_teardown_intr(sc->mfi_dev, sc->mfi_irq, sc->mfi_intr);
1106 if (sc->mfi_irq != NULL)
1107 bus_release_resource(sc->mfi_dev, SYS_RES_IRQ, sc->mfi_irq_rid,
1110 if (sc->mfi_sense_busaddr != 0)
1111 bus_dmamap_unload(sc->mfi_sense_dmat, sc->mfi_sense_dmamap);
1112 if (sc->mfi_sense != NULL)
1113 bus_dmamem_free(sc->mfi_sense_dmat, sc->mfi_sense,
1114 sc->mfi_sense_dmamap);
1115 if (sc->mfi_sense_dmat != NULL)
1116 bus_dma_tag_destroy(sc->mfi_sense_dmat);
1118 if (sc->mfi_frames_busaddr != 0)
1119 bus_dmamap_unload(sc->mfi_frames_dmat, sc->mfi_frames_dmamap);
1120 if (sc->mfi_frames != NULL)
1121 bus_dmamem_free(sc->mfi_frames_dmat, sc->mfi_frames,
1122 sc->mfi_frames_dmamap);
1123 if (sc->mfi_frames_dmat != NULL)
1124 bus_dma_tag_destroy(sc->mfi_frames_dmat);
1126 if (sc->mfi_comms_busaddr != 0)
1127 bus_dmamap_unload(sc->mfi_comms_dmat, sc->mfi_comms_dmamap);
1128 if (sc->mfi_comms != NULL)
1129 bus_dmamem_free(sc->mfi_comms_dmat, sc->mfi_comms,
1130 sc->mfi_comms_dmamap);
1131 if (sc->mfi_comms_dmat != NULL)
1132 bus_dma_tag_destroy(sc->mfi_comms_dmat);
1134 /* ThunderBolt contiguous memory free here */
1135 if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
1136 if (sc->mfi_tb_busaddr != 0)
1137 bus_dmamap_unload(sc->mfi_tb_dmat, sc->mfi_tb_dmamap);
1138 if (sc->request_message_pool != NULL)
1139 bus_dmamem_free(sc->mfi_tb_dmat, sc->request_message_pool,
1141 if (sc->mfi_tb_dmat != NULL)
1142 bus_dma_tag_destroy(sc->mfi_tb_dmat);
1144 /* Version buffer memory free */
1145 /* Start LSIP200113393 */
1146 if (sc->verbuf_h_busaddr != 0)
1147 bus_dmamap_unload(sc->verbuf_h_dmat, sc->verbuf_h_dmamap);
1148 if (sc->verbuf != NULL)
1149 bus_dmamem_free(sc->verbuf_h_dmat, sc->verbuf,
1150 sc->verbuf_h_dmamap);
1151 if (sc->verbuf_h_dmat != NULL)
1152 bus_dma_tag_destroy(sc->verbuf_h_dmat);
1154 /* End LSIP200113393 */
1155 /* ThunderBolt INIT packet memory Free */
1156 if (sc->mfi_tb_init_busaddr != 0)
1157 bus_dmamap_unload(sc->mfi_tb_init_dmat, sc->mfi_tb_init_dmamap);
1158 if (sc->mfi_tb_init != NULL)
1159 bus_dmamem_free(sc->mfi_tb_init_dmat, sc->mfi_tb_init,
1160 sc->mfi_tb_init_dmamap);
1161 if (sc->mfi_tb_init_dmat != NULL)
1162 bus_dma_tag_destroy(sc->mfi_tb_init_dmat);
1164 /* ThunderBolt IOC Init Desc memory free here */
1165 if (sc->mfi_tb_ioc_init_busaddr != 0)
1166 bus_dmamap_unload(sc->mfi_tb_ioc_init_dmat,
1167 sc->mfi_tb_ioc_init_dmamap);
1168 if (sc->mfi_tb_ioc_init_desc != NULL)
1169 bus_dmamem_free(sc->mfi_tb_ioc_init_dmat,
1170 sc->mfi_tb_ioc_init_desc,
1171 sc->mfi_tb_ioc_init_dmamap);
1172 if (sc->mfi_tb_ioc_init_dmat != NULL)
1173 bus_dma_tag_destroy(sc->mfi_tb_ioc_init_dmat);
1174 for (int i = 0; i < sc->mfi_max_fw_cmds; i++) {
1175 if (sc->mfi_cmd_pool_tbolt != NULL) {
1176 if (sc->mfi_cmd_pool_tbolt[i] != NULL) {
1177 free(sc->mfi_cmd_pool_tbolt[i],
1179 sc->mfi_cmd_pool_tbolt[i] = NULL;
1183 if (sc->mfi_cmd_pool_tbolt != NULL) {
1184 free(sc->mfi_cmd_pool_tbolt, M_MFIBUF);
1185 sc->mfi_cmd_pool_tbolt = NULL;
1187 if (sc->request_desc_pool != NULL) {
1188 free(sc->request_desc_pool, M_MFIBUF);
1189 sc->request_desc_pool = NULL;
1192 if (sc->mfi_buffer_dmat != NULL)
1193 bus_dma_tag_destroy(sc->mfi_buffer_dmat);
1194 if (sc->mfi_parent_dmat != NULL)
1195 bus_dma_tag_destroy(sc->mfi_parent_dmat);
1197 if (mtx_initialized(&sc->mfi_io_lock)) {
1198 mtx_destroy(&sc->mfi_io_lock);
1199 sx_destroy(&sc->mfi_config_lock);
1206 mfi_startup(void *arg)
1208 struct mfi_softc *sc;
1210 sc = (struct mfi_softc *)arg;
1212 config_intrhook_disestablish(&sc->mfi_ich);
1214 sc->mfi_enable_intr(sc);
1215 sx_xlock(&sc->mfi_config_lock);
1216 mtx_lock(&sc->mfi_io_lock);
1218 if (sc->mfi_flags & MFI_FLAGS_SKINNY)
1220 mtx_unlock(&sc->mfi_io_lock);
1221 sx_xunlock(&sc->mfi_config_lock);
1227 struct mfi_softc *sc;
1228 struct mfi_command *cm;
1229 uint32_t pi, ci, context;
1231 sc = (struct mfi_softc *)arg;
1233 if (sc->mfi_check_clear_intr(sc))
1237 pi = sc->mfi_comms->hw_pi;
1238 ci = sc->mfi_comms->hw_ci;
1239 mtx_lock(&sc->mfi_io_lock);
1241 context = sc->mfi_comms->hw_reply_q[ci];
1242 if (context < sc->mfi_max_fw_cmds) {
1243 cm = &sc->mfi_commands[context];
1244 mfi_remove_busy(cm);
1246 mfi_complete(sc, cm);
1248 if (++ci == (sc->mfi_max_fw_cmds + 1)) {
1253 sc->mfi_comms->hw_ci = ci;
1255 /* Give defered I/O a chance to run */
1256 if (sc->mfi_flags & MFI_FLAGS_QFRZN)
1257 sc->mfi_flags &= ~MFI_FLAGS_QFRZN;
1259 mtx_unlock(&sc->mfi_io_lock);
1262 * Dummy read to flush the bus; this ensures that the indexes are up
1263 * to date. Restart processing if more commands have come it.
1265 (void)sc->mfi_read_fw_status(sc);
1266 if (pi != sc->mfi_comms->hw_pi)
1273 mfi_shutdown(struct mfi_softc *sc)
1275 struct mfi_dcmd_frame *dcmd;
1276 struct mfi_command *cm;
1279 mtx_lock(&sc->mfi_io_lock);
1280 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_SHUTDOWN, NULL, 0);
1282 mtx_unlock(&sc->mfi_io_lock);
1286 if (sc->mfi_aen_cm != NULL)
1287 mfi_abort(sc, sc->mfi_aen_cm);
1289 if (sc->map_update_cmd != NULL)
1290 mfi_abort(sc, sc->map_update_cmd);
1292 dcmd = &cm->cm_frame->dcmd;
1293 dcmd->header.flags = MFI_FRAME_DIR_NONE;
1294 cm->cm_flags = MFI_CMD_POLLED;
1297 if ((error = mfi_mapcmd(sc, cm)) != 0) {
1298 device_printf(sc->mfi_dev, "Failed to shutdown controller\n");
1301 mfi_release_command(cm);
1302 mtx_unlock(&sc->mfi_io_lock);
1307 mfi_syspdprobe(struct mfi_softc *sc)
1309 struct mfi_frame_header *hdr;
1310 struct mfi_command *cm = NULL;
1311 struct mfi_pd_list *pdlist = NULL;
1312 struct mfi_system_pd *syspd, *tmp;
1313 int error, i, found;
1315 sx_assert(&sc->mfi_config_lock, SA_XLOCKED);
1316 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1317 /* Add SYSTEM PD's */
1318 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_PD_LIST_QUERY,
1319 (void **)&pdlist, sizeof(*pdlist));
1321 device_printf(sc->mfi_dev,
1322 "Error while forming SYSTEM PD list\n");
1326 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1327 cm->cm_frame->dcmd.mbox[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST;
1328 cm->cm_frame->dcmd.mbox[1] = 0;
1329 if (mfi_mapcmd(sc, cm) != 0) {
1330 device_printf(sc->mfi_dev,
1331 "Failed to get syspd device listing\n");
1334 bus_dmamap_sync(sc->mfi_buffer_dmat,cm->cm_dmamap,
1335 BUS_DMASYNC_POSTREAD);
1336 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1337 hdr = &cm->cm_frame->header;
1338 if (hdr->cmd_status != MFI_STAT_OK) {
1339 device_printf(sc->mfi_dev,
1340 "MFI_DCMD_PD_LIST_QUERY failed %x\n", hdr->cmd_status);
1343 /* Get each PD and add it to the system */
1344 for (i = 0; i < pdlist->count; i++) {
1345 if (pdlist->addr[i].device_id ==
1346 pdlist->addr[i].encl_device_id)
1349 TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh, pd_link) {
1350 if (syspd->pd_id == pdlist->addr[i].device_id)
1354 mfi_add_sys_pd(sc, pdlist->addr[i].device_id);
1356 /* Delete SYSPD's whose state has been changed */
1357 TAILQ_FOREACH_SAFE(syspd, &sc->mfi_syspd_tqh, pd_link, tmp) {
1359 for (i = 0; i < pdlist->count; i++) {
1360 if (syspd->pd_id == pdlist->addr[i].device_id)
1365 mtx_unlock(&sc->mfi_io_lock);
1367 device_delete_child(sc->mfi_dev, syspd->pd_dev);
1369 mtx_lock(&sc->mfi_io_lock);
1374 free(pdlist, M_MFIBUF);
1376 mfi_release_command(cm);
1382 mfi_ldprobe(struct mfi_softc *sc)
1384 struct mfi_frame_header *hdr;
1385 struct mfi_command *cm = NULL;
1386 struct mfi_ld_list *list = NULL;
1387 struct mfi_disk *ld;
1390 sx_assert(&sc->mfi_config_lock, SA_XLOCKED);
1391 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1393 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_LIST,
1394 (void **)&list, sizeof(*list));
1398 cm->cm_flags = MFI_CMD_DATAIN;
1399 if (mfi_wait_command(sc, cm) != 0) {
1400 device_printf(sc->mfi_dev, "Failed to get device listing\n");
1404 hdr = &cm->cm_frame->header;
1405 if (hdr->cmd_status != MFI_STAT_OK) {
1406 device_printf(sc->mfi_dev, "MFI_DCMD_LD_GET_LIST failed %x\n",
1411 for (i = 0; i < list->ld_count; i++) {
1412 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
1413 if (ld->ld_id == list->ld_list[i].ld.v.target_id)
1416 mfi_add_ld(sc, list->ld_list[i].ld.v.target_id);
1421 free(list, M_MFIBUF);
1423 mfi_release_command(cm);
1429 * The timestamp is the number of seconds since 00:00 Jan 1, 2000. If
1430 * the bits in 24-31 are all set, then it is the number of seconds since
1434 format_timestamp(uint32_t timestamp)
1436 static char buffer[32];
1438 if ((timestamp & 0xff000000) == 0xff000000)
1439 snprintf(buffer, sizeof(buffer), "boot + %us", timestamp &
1442 snprintf(buffer, sizeof(buffer), "%us", timestamp);
1447 format_class(int8_t class)
1449 static char buffer[6];
1452 case MFI_EVT_CLASS_DEBUG:
1454 case MFI_EVT_CLASS_PROGRESS:
1455 return ("progress");
1456 case MFI_EVT_CLASS_INFO:
1458 case MFI_EVT_CLASS_WARNING:
1460 case MFI_EVT_CLASS_CRITICAL:
1462 case MFI_EVT_CLASS_FATAL:
1464 case MFI_EVT_CLASS_DEAD:
1467 snprintf(buffer, sizeof(buffer), "%d", class);
1473 mfi_decode_evt(struct mfi_softc *sc, struct mfi_evt_detail *detail)
1475 struct mfi_system_pd *syspd = NULL;
1477 device_printf(sc->mfi_dev, "%d (%s/0x%04x/%s) - %s\n", detail->seq,
1478 format_timestamp(detail->time), detail->evt_class.members.locale,
1479 format_class(detail->evt_class.members.evt_class),
1480 detail->description);
1482 /* Don't act on old AEN's or while shutting down */
1483 if (detail->seq < sc->mfi_boot_seq_num || sc->mfi_detaching)
1486 switch (detail->arg_type) {
1487 case MR_EVT_ARGS_NONE:
1488 if (detail->code == MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED) {
1489 device_printf(sc->mfi_dev, "HostBus scan raised\n");
1490 if (mfi_detect_jbod_change) {
1492 * Probe for new SYSPD's and Delete
1495 sx_xlock(&sc->mfi_config_lock);
1496 mtx_lock(&sc->mfi_io_lock);
1498 mtx_unlock(&sc->mfi_io_lock);
1499 sx_xunlock(&sc->mfi_config_lock);
1503 case MR_EVT_ARGS_LD_STATE:
1504 /* During load time driver reads all the events starting
1505 * from the one that has been logged after shutdown. Avoid
1508 if (detail->args.ld_state.new_state == MFI_LD_STATE_OFFLINE ) {
1510 struct mfi_disk *ld;
1511 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
1513 detail->args.ld_state.ld.target_id)
1517 Fix: for kernel panics when SSCD is removed
1518 KASSERT(ld != NULL, ("volume dissappeared"));
1522 device_delete_child(sc->mfi_dev, ld->ld_dev);
1527 case MR_EVT_ARGS_PD:
1528 if (detail->code == MR_EVT_PD_REMOVED) {
1529 if (mfi_detect_jbod_change) {
1531 * If the removed device is a SYSPD then
1534 TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh,
1537 detail->args.pd.device_id) {
1539 device_delete_child(
1548 if (detail->code == MR_EVT_PD_INSERTED) {
1549 if (mfi_detect_jbod_change) {
1550 /* Probe for new SYSPD's */
1551 sx_xlock(&sc->mfi_config_lock);
1552 mtx_lock(&sc->mfi_io_lock);
1554 mtx_unlock(&sc->mfi_io_lock);
1555 sx_xunlock(&sc->mfi_config_lock);
1563 mfi_queue_evt(struct mfi_softc *sc, struct mfi_evt_detail *detail)
1565 struct mfi_evt_queue_elm *elm;
1567 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1568 elm = malloc(sizeof(*elm), M_MFIBUF, M_NOWAIT|M_ZERO);
1571 memcpy(&elm->detail, detail, sizeof(*detail));
1572 TAILQ_INSERT_TAIL(&sc->mfi_evt_queue, elm, link);
1573 taskqueue_enqueue(taskqueue_swi, &sc->mfi_evt_task);
1577 mfi_handle_evt(void *context, int pending)
1579 TAILQ_HEAD(,mfi_evt_queue_elm) queue;
1580 struct mfi_softc *sc;
1581 struct mfi_evt_queue_elm *elm;
1585 mtx_lock(&sc->mfi_io_lock);
1586 TAILQ_CONCAT(&queue, &sc->mfi_evt_queue, link);
1587 mtx_unlock(&sc->mfi_io_lock);
1588 while ((elm = TAILQ_FIRST(&queue)) != NULL) {
1589 TAILQ_REMOVE(&queue, elm, link);
1590 mfi_decode_evt(sc, &elm->detail);
1591 free(elm, M_MFIBUF);
1596 mfi_aen_register(struct mfi_softc *sc, int seq, int locale)
1598 struct mfi_command *cm;
1599 struct mfi_dcmd_frame *dcmd;
1600 union mfi_evt current_aen, prior_aen;
1601 struct mfi_evt_detail *ed = NULL;
1604 current_aen.word = locale;
1605 if (sc->mfi_aen_cm != NULL) {
1607 ((uint32_t *)&sc->mfi_aen_cm->cm_frame->dcmd.mbox)[1];
1608 if (prior_aen.members.evt_class <= current_aen.members.evt_class &&
1609 !((prior_aen.members.locale & current_aen.members.locale)
1610 ^current_aen.members.locale)) {
1613 prior_aen.members.locale |= current_aen.members.locale;
1614 if (prior_aen.members.evt_class
1615 < current_aen.members.evt_class)
1616 current_aen.members.evt_class =
1617 prior_aen.members.evt_class;
1618 mtx_lock(&sc->mfi_io_lock);
1619 mfi_abort(sc, sc->mfi_aen_cm);
1620 mtx_unlock(&sc->mfi_io_lock);
1624 mtx_lock(&sc->mfi_io_lock);
1625 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_EVENT_WAIT,
1626 (void **)&ed, sizeof(*ed));
1627 mtx_unlock(&sc->mfi_io_lock);
1632 dcmd = &cm->cm_frame->dcmd;
1633 ((uint32_t *)&dcmd->mbox)[0] = seq;
1634 ((uint32_t *)&dcmd->mbox)[1] = locale;
1635 cm->cm_flags = MFI_CMD_DATAIN;
1636 cm->cm_complete = mfi_aen_complete;
1638 sc->last_seq_num = seq;
1639 sc->mfi_aen_cm = cm;
1641 mtx_lock(&sc->mfi_io_lock);
1642 mfi_enqueue_ready(cm);
1644 mtx_unlock(&sc->mfi_io_lock);
1651 mfi_aen_complete(struct mfi_command *cm)
1653 struct mfi_frame_header *hdr;
1654 struct mfi_softc *sc;
1655 struct mfi_evt_detail *detail;
1656 struct mfi_aen *mfi_aen_entry, *tmp;
1657 int seq = 0, aborted = 0;
1660 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1662 hdr = &cm->cm_frame->header;
1664 if (sc->mfi_aen_cm == NULL)
1667 if (sc->mfi_aen_cm->cm_aen_abort ||
1668 hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
1669 sc->mfi_aen_cm->cm_aen_abort = 0;
1672 sc->mfi_aen_triggered = 1;
1673 if (sc->mfi_poll_waiting) {
1674 sc->mfi_poll_waiting = 0;
1675 selwakeup(&sc->mfi_select);
1677 detail = cm->cm_data;
1678 mfi_queue_evt(sc, detail);
1679 seq = detail->seq + 1;
1680 TAILQ_FOREACH_SAFE(mfi_aen_entry, &sc->mfi_aen_pids, aen_link,
1682 TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
1684 PROC_LOCK(mfi_aen_entry->p);
1685 kern_psignal(mfi_aen_entry->p, SIGIO);
1686 PROC_UNLOCK(mfi_aen_entry->p);
1687 free(mfi_aen_entry, M_MFIBUF);
1691 free(cm->cm_data, M_MFIBUF);
1692 sc->mfi_aen_cm = NULL;
1693 wakeup(&sc->mfi_aen_cm);
1694 mfi_release_command(cm);
1696 /* set it up again so the driver can catch more events */
1698 mtx_unlock(&sc->mfi_io_lock);
1699 mfi_aen_setup(sc, seq);
1700 mtx_lock(&sc->mfi_io_lock);
1704 #define MAX_EVENTS 15
1707 mfi_parse_entries(struct mfi_softc *sc, int start_seq, int stop_seq)
1709 struct mfi_command *cm;
1710 struct mfi_dcmd_frame *dcmd;
1711 struct mfi_evt_list *el;
1712 union mfi_evt class_locale;
1713 int error, i, seq, size;
1715 class_locale.members.reserved = 0;
1716 class_locale.members.locale = mfi_event_locale;
1717 class_locale.members.evt_class = mfi_event_class;
1719 size = sizeof(struct mfi_evt_list) + sizeof(struct mfi_evt_detail)
1721 el = malloc(size, M_MFIBUF, M_NOWAIT | M_ZERO);
1725 for (seq = start_seq;;) {
1726 mtx_lock(&sc->mfi_io_lock);
1727 if ((cm = mfi_dequeue_free(sc)) == NULL) {
1729 mtx_unlock(&sc->mfi_io_lock);
1732 mtx_unlock(&sc->mfi_io_lock);
1734 dcmd = &cm->cm_frame->dcmd;
1735 bzero(dcmd->mbox, MFI_MBOX_SIZE);
1736 dcmd->header.cmd = MFI_CMD_DCMD;
1737 dcmd->header.timeout = 0;
1738 dcmd->header.data_len = size;
1739 dcmd->opcode = MFI_DCMD_CTRL_EVENT_GET;
1740 ((uint32_t *)&dcmd->mbox)[0] = seq;
1741 ((uint32_t *)&dcmd->mbox)[1] = class_locale.word;
1742 cm->cm_sg = &dcmd->sgl;
1743 cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
1744 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1748 mtx_lock(&sc->mfi_io_lock);
1749 if ((error = mfi_mapcmd(sc, cm)) != 0) {
1750 device_printf(sc->mfi_dev,
1751 "Failed to get controller entries\n");
1752 mfi_release_command(cm);
1753 mtx_unlock(&sc->mfi_io_lock);
1757 mtx_unlock(&sc->mfi_io_lock);
1758 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1759 BUS_DMASYNC_POSTREAD);
1760 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1762 if (dcmd->header.cmd_status == MFI_STAT_NOT_FOUND) {
1763 mtx_lock(&sc->mfi_io_lock);
1764 mfi_release_command(cm);
1765 mtx_unlock(&sc->mfi_io_lock);
1768 if (dcmd->header.cmd_status != MFI_STAT_OK) {
1769 device_printf(sc->mfi_dev,
1770 "Error %d fetching controller entries\n",
1771 dcmd->header.cmd_status);
1772 mtx_lock(&sc->mfi_io_lock);
1773 mfi_release_command(cm);
1774 mtx_unlock(&sc->mfi_io_lock);
1777 mtx_lock(&sc->mfi_io_lock);
1778 mfi_release_command(cm);
1779 mtx_unlock(&sc->mfi_io_lock);
1781 for (i = 0; i < el->count; i++) {
1783 * If this event is newer than 'stop_seq' then
1784 * break out of the loop. Note that the log
1785 * is a circular buffer so we have to handle
1786 * the case that our stop point is earlier in
1787 * the buffer than our start point.
1789 if (el->event[i].seq >= stop_seq) {
1790 if (start_seq <= stop_seq)
1792 else if (el->event[i].seq < start_seq)
1795 mtx_lock(&sc->mfi_io_lock);
1796 mfi_queue_evt(sc, &el->event[i]);
1797 mtx_unlock(&sc->mfi_io_lock);
1799 seq = el->event[el->count - 1].seq + 1;
1807 mfi_add_ld(struct mfi_softc *sc, int id)
1809 struct mfi_command *cm;
1810 struct mfi_dcmd_frame *dcmd = NULL;
1811 struct mfi_ld_info *ld_info = NULL;
1814 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1816 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_INFO,
1817 (void **)&ld_info, sizeof(*ld_info));
1819 device_printf(sc->mfi_dev,
1820 "Failed to allocate for MFI_DCMD_LD_GET_INFO %d\n", error);
1822 free(ld_info, M_MFIBUF);
1825 cm->cm_flags = MFI_CMD_DATAIN;
1826 dcmd = &cm->cm_frame->dcmd;
1828 if (mfi_wait_command(sc, cm) != 0) {
1829 device_printf(sc->mfi_dev,
1830 "Failed to get logical drive: %d\n", id);
1831 free(ld_info, M_MFIBUF);
1834 if (ld_info->ld_config.params.isSSCD != 1)
1835 mfi_add_ld_complete(cm);
1837 mfi_release_command(cm);
1838 if (ld_info) /* SSCD drives ld_info free here */
1839 free(ld_info, M_MFIBUF);
1845 mfi_add_ld_complete(struct mfi_command *cm)
1847 struct mfi_frame_header *hdr;
1848 struct mfi_ld_info *ld_info;
1849 struct mfi_softc *sc;
1853 hdr = &cm->cm_frame->header;
1854 ld_info = cm->cm_private;
1856 if (hdr->cmd_status != MFI_STAT_OK) {
1857 free(ld_info, M_MFIBUF);
1858 mfi_release_command(cm);
1861 mfi_release_command(cm);
1863 mtx_unlock(&sc->mfi_io_lock);
1865 if ((child = device_add_child(sc->mfi_dev, "mfid", -1)) == NULL) {
1866 device_printf(sc->mfi_dev, "Failed to add logical disk\n");
1867 free(ld_info, M_MFIBUF);
1869 mtx_lock(&sc->mfi_io_lock);
1873 device_set_ivars(child, ld_info);
1874 device_set_desc(child, "MFI Logical Disk");
1875 bus_generic_attach(sc->mfi_dev);
1877 mtx_lock(&sc->mfi_io_lock);
1880 static int mfi_add_sys_pd(struct mfi_softc *sc, int id)
1882 struct mfi_command *cm;
1883 struct mfi_dcmd_frame *dcmd = NULL;
1884 struct mfi_pd_info *pd_info = NULL;
1887 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1889 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_PD_GET_INFO,
1890 (void **)&pd_info, sizeof(*pd_info));
1892 device_printf(sc->mfi_dev,
1893 "Failed to allocated for MFI_DCMD_PD_GET_INFO %d\n",
1896 free(pd_info, M_MFIBUF);
1899 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1900 dcmd = &cm->cm_frame->dcmd;
1902 dcmd->header.scsi_status = 0;
1903 dcmd->header.pad0 = 0;
1904 if (mfi_mapcmd(sc, cm) != 0) {
1905 device_printf(sc->mfi_dev,
1906 "Failed to get physical drive info %d\n", id);
1907 free(pd_info, M_MFIBUF);
1910 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1911 BUS_DMASYNC_POSTREAD);
1912 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1913 mfi_add_sys_pd_complete(cm);
1918 mfi_add_sys_pd_complete(struct mfi_command *cm)
1920 struct mfi_frame_header *hdr;
1921 struct mfi_pd_info *pd_info;
1922 struct mfi_softc *sc;
1926 hdr = &cm->cm_frame->header;
1927 pd_info = cm->cm_private;
1929 if (hdr->cmd_status != MFI_STAT_OK) {
1930 free(pd_info, M_MFIBUF);
1931 mfi_release_command(cm);
1934 if (pd_info->fw_state != MFI_PD_STATE_SYSTEM) {
1935 device_printf(sc->mfi_dev, "PD=%x is not SYSTEM PD\n",
1936 pd_info->ref.v.device_id);
1937 free(pd_info, M_MFIBUF);
1938 mfi_release_command(cm);
1941 mfi_release_command(cm);
1943 mtx_unlock(&sc->mfi_io_lock);
1945 if ((child = device_add_child(sc->mfi_dev, "mfisyspd", -1)) == NULL) {
1946 device_printf(sc->mfi_dev, "Failed to add system pd\n");
1947 free(pd_info, M_MFIBUF);
1949 mtx_lock(&sc->mfi_io_lock);
1953 device_set_ivars(child, pd_info);
1954 device_set_desc(child, "MFI System PD");
1955 bus_generic_attach(sc->mfi_dev);
1957 mtx_lock(&sc->mfi_io_lock);
1959 static struct mfi_command *
1960 mfi_bio_command(struct mfi_softc *sc)
1963 struct mfi_command *cm = NULL;
1965 /*reserving two commands to avoid starvation for IOCTL*/
1966 if (sc->mfi_qstat[MFIQ_FREE].q_length < 2){
1969 if ((bio = mfi_dequeue_bio(sc)) == NULL) {
1972 if ((uintptr_t)bio->bio_driver2 == MFI_LD_IO) {
1973 cm = mfi_build_ldio(sc, bio);
1974 } else if ((uintptr_t) bio->bio_driver2 == MFI_SYS_PD_IO) {
1975 cm = mfi_build_syspdio(sc, bio);
1978 mfi_enqueue_bio(sc, bio);
1981 static struct mfi_command *
1982 mfi_build_syspdio(struct mfi_softc *sc, struct bio *bio)
1984 struct mfi_command *cm;
1985 struct mfi_pass_frame *pass;
1986 int flags = 0, blkcount = 0;
1987 uint32_t context = 0;
1989 if ((cm = mfi_dequeue_free(sc)) == NULL)
1992 /* Zero out the MFI frame */
1993 context = cm->cm_frame->header.context;
1994 bzero(cm->cm_frame, sizeof(union mfi_frame));
1995 cm->cm_frame->header.context = context;
1996 pass = &cm->cm_frame->pass;
1997 bzero(pass->cdb, 16);
1998 pass->header.cmd = MFI_CMD_PD_SCSI_IO;
1999 switch (bio->bio_cmd & 0x03) {
2001 #define SCSI_READ 0x28
2002 pass->cdb[0] = SCSI_READ;
2003 flags = MFI_CMD_DATAIN;
2006 #define SCSI_WRITE 0x2a
2007 pass->cdb[0] = SCSI_WRITE;
2008 flags = MFI_CMD_DATAOUT;
2011 panic("Invalid bio command");
2014 /* Cheat with the sector length to avoid a non-constant division */
2015 blkcount = (bio->bio_bcount + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
2016 /* Fill the LBA and Transfer length in CDB */
2017 pass->cdb[2] = (bio->bio_pblkno & 0xff000000) >> 24;
2018 pass->cdb[3] = (bio->bio_pblkno & 0x00ff0000) >> 16;
2019 pass->cdb[4] = (bio->bio_pblkno & 0x0000ff00) >> 8;
2020 pass->cdb[5] = bio->bio_pblkno & 0x000000ff;
2021 pass->cdb[7] = (blkcount & 0xff00) >> 8;
2022 pass->cdb[8] = (blkcount & 0x00ff);
2023 pass->header.target_id = (uintptr_t)bio->bio_driver1;
2024 pass->header.timeout = 0;
2025 pass->header.flags = 0;
2026 pass->header.scsi_status = 0;
2027 pass->header.sense_len = MFI_SENSE_LEN;
2028 pass->header.data_len = bio->bio_bcount;
2029 pass->header.cdb_len = 10;
2030 pass->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2031 pass->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2032 cm->cm_complete = mfi_bio_complete;
2033 cm->cm_private = bio;
2034 cm->cm_data = bio->bio_data;
2035 cm->cm_len = bio->bio_bcount;
2036 cm->cm_sg = &pass->sgl;
2037 cm->cm_total_frame_size = MFI_PASS_FRAME_SIZE;
2038 cm->cm_flags = flags;
2042 static struct mfi_command *
2043 mfi_build_ldio(struct mfi_softc *sc, struct bio *bio)
2045 struct mfi_io_frame *io;
2046 struct mfi_command *cm;
2047 int flags, blkcount;
2048 uint32_t context = 0;
2050 if ((cm = mfi_dequeue_free(sc)) == NULL)
2053 /* Zero out the MFI frame */
2054 context = cm->cm_frame->header.context;
2055 bzero(cm->cm_frame, sizeof(union mfi_frame));
2056 cm->cm_frame->header.context = context;
2057 io = &cm->cm_frame->io;
2058 switch (bio->bio_cmd & 0x03) {
2060 io->header.cmd = MFI_CMD_LD_READ;
2061 flags = MFI_CMD_DATAIN;
2064 io->header.cmd = MFI_CMD_LD_WRITE;
2065 flags = MFI_CMD_DATAOUT;
2068 panic("Invalid bio command");
2071 /* Cheat with the sector length to avoid a non-constant division */
2072 blkcount = (bio->bio_bcount + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
2073 io->header.target_id = (uintptr_t)bio->bio_driver1;
2074 io->header.timeout = 0;
2075 io->header.flags = 0;
2076 io->header.scsi_status = 0;
2077 io->header.sense_len = MFI_SENSE_LEN;
2078 io->header.data_len = blkcount;
2079 io->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2080 io->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2081 io->lba_hi = (bio->bio_pblkno & 0xffffffff00000000) >> 32;
2082 io->lba_lo = bio->bio_pblkno & 0xffffffff;
2083 cm->cm_complete = mfi_bio_complete;
2084 cm->cm_private = bio;
2085 cm->cm_data = bio->bio_data;
2086 cm->cm_len = bio->bio_bcount;
2087 cm->cm_sg = &io->sgl;
2088 cm->cm_total_frame_size = MFI_IO_FRAME_SIZE;
2089 cm->cm_flags = flags;
2094 mfi_bio_complete(struct mfi_command *cm)
2097 struct mfi_frame_header *hdr;
2098 struct mfi_softc *sc;
2100 bio = cm->cm_private;
2101 hdr = &cm->cm_frame->header;
2104 if ((hdr->cmd_status != MFI_STAT_OK) || (hdr->scsi_status != 0)) {
2105 bio->bio_flags |= BIO_ERROR;
2106 bio->bio_error = EIO;
2107 device_printf(sc->mfi_dev, "I/O error, status= %d "
2108 "scsi_status= %d\n", hdr->cmd_status, hdr->scsi_status);
2109 mfi_print_sense(cm->cm_sc, cm->cm_sense);
2110 } else if (cm->cm_error != 0) {
2111 bio->bio_flags |= BIO_ERROR;
2114 mfi_release_command(cm);
2115 mfi_disk_complete(bio);
2119 mfi_startio(struct mfi_softc *sc)
2121 struct mfi_command *cm;
2122 struct ccb_hdr *ccbh;
2125 /* Don't bother if we're short on resources */
2126 if (sc->mfi_flags & MFI_FLAGS_QFRZN)
2129 /* Try a command that has already been prepared */
2130 cm = mfi_dequeue_ready(sc);
2133 if ((ccbh = TAILQ_FIRST(&sc->mfi_cam_ccbq)) != NULL)
2134 cm = sc->mfi_cam_start(ccbh);
2137 /* Nope, so look for work on the bioq */
2139 cm = mfi_bio_command(sc);
2141 /* No work available, so exit */
2145 /* Send the command to the controller */
2146 if (mfi_mapcmd(sc, cm) != 0) {
2147 mfi_requeue_ready(cm);
2154 mfi_mapcmd(struct mfi_softc *sc, struct mfi_command *cm)
2158 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2160 if ((cm->cm_data != NULL) && (cm->cm_frame->header.cmd != MFI_CMD_STP )) {
2161 polled = (cm->cm_flags & MFI_CMD_POLLED) ? BUS_DMA_NOWAIT : 0;
2162 error = bus_dmamap_load(sc->mfi_buffer_dmat, cm->cm_dmamap,
2163 cm->cm_data, cm->cm_len, mfi_data_cb, cm, polled);
2164 if (error == EINPROGRESS) {
2165 sc->mfi_flags |= MFI_FLAGS_QFRZN;
2169 if (sc->MFA_enabled)
2170 error = mfi_tbolt_send_frame(sc, cm);
2172 error = mfi_send_frame(sc, cm);
2179 mfi_data_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
2181 struct mfi_frame_header *hdr;
2182 struct mfi_command *cm;
2184 struct mfi_softc *sc;
2185 int i, j, first, dir;
2188 cm = (struct mfi_command *)arg;
2190 hdr = &cm->cm_frame->header;
2194 printf("error %d in callback\n", error);
2195 cm->cm_error = error;
2196 mfi_complete(sc, cm);
2199 /* Use IEEE sgl only for IO's on a SKINNY controller
2200 * For other commands on a SKINNY controller use either
2201 * sg32 or sg64 based on the sizeof(bus_addr_t).
2202 * Also calculate the total frame size based on the type
2205 if (((cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) ||
2206 (cm->cm_frame->header.cmd == MFI_CMD_LD_READ) ||
2207 (cm->cm_frame->header.cmd == MFI_CMD_LD_WRITE)) &&
2208 (sc->mfi_flags & MFI_FLAGS_SKINNY)) {
2209 for (i = 0; i < nsegs; i++) {
2210 sgl->sg_skinny[i].addr = segs[i].ds_addr;
2211 sgl->sg_skinny[i].len = segs[i].ds_len;
2212 sgl->sg_skinny[i].flag = 0;
2214 hdr->flags |= MFI_FRAME_IEEE_SGL | MFI_FRAME_SGL64;
2215 sge_size = sizeof(struct mfi_sg_skinny);
2216 hdr->sg_count = nsegs;
2219 if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
2220 first = cm->cm_stp_len;
2221 if ((sc->mfi_flags & MFI_FLAGS_SG64) == 0) {
2222 sgl->sg32[j].addr = segs[0].ds_addr;
2223 sgl->sg32[j++].len = first;
2225 sgl->sg64[j].addr = segs[0].ds_addr;
2226 sgl->sg64[j++].len = first;
2230 if ((sc->mfi_flags & MFI_FLAGS_SG64) == 0) {
2231 for (i = 0; i < nsegs; i++) {
2232 sgl->sg32[j].addr = segs[i].ds_addr + first;
2233 sgl->sg32[j++].len = segs[i].ds_len - first;
2237 for (i = 0; i < nsegs; i++) {
2238 sgl->sg64[j].addr = segs[i].ds_addr + first;
2239 sgl->sg64[j++].len = segs[i].ds_len - first;
2242 hdr->flags |= MFI_FRAME_SGL64;
2245 sge_size = sc->mfi_sge_size;
2249 if (cm->cm_flags & MFI_CMD_DATAIN) {
2250 dir |= BUS_DMASYNC_PREREAD;
2251 hdr->flags |= MFI_FRAME_DIR_READ;
2253 if (cm->cm_flags & MFI_CMD_DATAOUT) {
2254 dir |= BUS_DMASYNC_PREWRITE;
2255 hdr->flags |= MFI_FRAME_DIR_WRITE;
2257 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir);
2258 cm->cm_flags |= MFI_CMD_MAPPED;
2261 * Instead of calculating the total number of frames in the
2262 * compound frame, it's already assumed that there will be at
2263 * least 1 frame, so don't compensate for the modulo of the
2264 * following division.
2266 cm->cm_total_frame_size += (sc->mfi_sge_size * nsegs);
2267 cm->cm_extra_frames = (cm->cm_total_frame_size - 1) / MFI_FRAME_SIZE;
2269 if (sc->MFA_enabled)
2270 mfi_tbolt_send_frame(sc, cm);
2272 mfi_send_frame(sc, cm);
2278 mfi_send_frame(struct mfi_softc *sc, struct mfi_command *cm)
2280 struct mfi_frame_header *hdr;
2281 int tm = MFI_POLL_TIMEOUT_SECS * 1000;
2283 hdr = &cm->cm_frame->header;
2285 if ((cm->cm_flags & MFI_CMD_POLLED) == 0) {
2286 cm->cm_timestamp = time_uptime;
2287 mfi_enqueue_busy(cm);
2289 hdr->cmd_status = MFI_STAT_INVALID_STATUS;
2290 hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
2294 * The bus address of the command is aligned on a 64 byte boundary,
2295 * leaving the least 6 bits as zero. For whatever reason, the
2296 * hardware wants the address shifted right by three, leaving just
2297 * 3 zero bits. These three bits are then used as a prefetching
2298 * hint for the hardware to predict how many frames need to be
2299 * fetched across the bus. If a command has more than 8 frames
2300 * then the 3 bits are set to 0x7 and the firmware uses other
2301 * information in the command to determine the total amount to fetch.
2302 * However, FreeBSD doesn't support I/O larger than 128K, so 8 frames
2303 * is enough for both 32bit and 64bit systems.
2305 if (cm->cm_extra_frames > 7)
2306 cm->cm_extra_frames = 7;
2308 sc->mfi_issue_cmd(sc, cm->cm_frame_busaddr, cm->cm_extra_frames);
2310 if ((cm->cm_flags & MFI_CMD_POLLED) == 0)
2313 /* This is a polled command, so busy-wait for it to complete. */
2314 while (hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
2321 if (hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
2322 device_printf(sc->mfi_dev, "Frame %p timed out "
2323 "command 0x%X\n", hdr, cm->cm_frame->dcmd.opcode);
2332 mfi_complete(struct mfi_softc *sc, struct mfi_command *cm)
2336 if ((cm->cm_flags & MFI_CMD_MAPPED) != 0) {
2338 if ((cm->cm_flags & MFI_CMD_DATAIN) ||
2339 (cm->cm_frame->header.cmd == MFI_CMD_STP))
2340 dir |= BUS_DMASYNC_POSTREAD;
2341 if (cm->cm_flags & MFI_CMD_DATAOUT)
2342 dir |= BUS_DMASYNC_POSTWRITE;
2344 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir);
2345 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2346 cm->cm_flags &= ~MFI_CMD_MAPPED;
2349 cm->cm_flags |= MFI_CMD_COMPLETED;
2351 if (cm->cm_complete != NULL)
2352 cm->cm_complete(cm);
2358 mfi_abort(struct mfi_softc *sc, struct mfi_command *cm_abort)
2360 struct mfi_command *cm;
2361 struct mfi_abort_frame *abort;
2363 uint32_t context = 0;
2365 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2367 if ((cm = mfi_dequeue_free(sc)) == NULL) {
2371 /* Zero out the MFI frame */
2372 context = cm->cm_frame->header.context;
2373 bzero(cm->cm_frame, sizeof(union mfi_frame));
2374 cm->cm_frame->header.context = context;
2376 abort = &cm->cm_frame->abort;
2377 abort->header.cmd = MFI_CMD_ABORT;
2378 abort->header.flags = 0;
2379 abort->header.scsi_status = 0;
2380 abort->abort_context = cm_abort->cm_frame->header.context;
2381 abort->abort_mfi_addr_lo = (uint32_t)cm_abort->cm_frame_busaddr;
2382 abort->abort_mfi_addr_hi =
2383 (uint32_t)((uint64_t)cm_abort->cm_frame_busaddr >> 32);
2385 cm->cm_flags = MFI_CMD_POLLED;
2388 sc->mfi_aen_cm->cm_aen_abort = 1;
2390 mfi_release_command(cm);
2392 while (i < 5 && sc->mfi_aen_cm != NULL) {
2393 msleep(&sc->mfi_aen_cm, &sc->mfi_io_lock, 0, "mfiabort",
2402 mfi_dump_blocks(struct mfi_softc *sc, int id, uint64_t lba, void *virt,
2405 struct mfi_command *cm;
2406 struct mfi_io_frame *io;
2408 uint32_t context = 0;
2410 if ((cm = mfi_dequeue_free(sc)) == NULL)
2413 /* Zero out the MFI frame */
2414 context = cm->cm_frame->header.context;
2415 bzero(cm->cm_frame, sizeof(union mfi_frame));
2416 cm->cm_frame->header.context = context;
2418 io = &cm->cm_frame->io;
2419 io->header.cmd = MFI_CMD_LD_WRITE;
2420 io->header.target_id = id;
2421 io->header.timeout = 0;
2422 io->header.flags = 0;
2423 io->header.scsi_status = 0;
2424 io->header.sense_len = MFI_SENSE_LEN;
2425 io->header.data_len = (len + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
2426 io->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2427 io->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2428 io->lba_hi = (lba & 0xffffffff00000000) >> 32;
2429 io->lba_lo = lba & 0xffffffff;
2432 cm->cm_sg = &io->sgl;
2433 cm->cm_total_frame_size = MFI_IO_FRAME_SIZE;
2434 cm->cm_flags = MFI_CMD_POLLED | MFI_CMD_DATAOUT;
2436 error = mfi_mapcmd(sc, cm);
2437 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
2438 BUS_DMASYNC_POSTWRITE);
2439 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2440 mfi_release_command(cm);
2446 mfi_dump_syspd_blocks(struct mfi_softc *sc, int id, uint64_t lba, void *virt,
2449 struct mfi_command *cm;
2450 struct mfi_pass_frame *pass;
2454 if ((cm = mfi_dequeue_free(sc)) == NULL)
2457 pass = &cm->cm_frame->pass;
2458 bzero(pass->cdb, 16);
2459 pass->header.cmd = MFI_CMD_PD_SCSI_IO;
2460 pass->cdb[0] = SCSI_WRITE;
2461 pass->cdb[2] = (lba & 0xff000000) >> 24;
2462 pass->cdb[3] = (lba & 0x00ff0000) >> 16;
2463 pass->cdb[4] = (lba & 0x0000ff00) >> 8;
2464 pass->cdb[5] = (lba & 0x000000ff);
2465 blkcount = (len + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
2466 pass->cdb[7] = (blkcount & 0xff00) >> 8;
2467 pass->cdb[8] = (blkcount & 0x00ff);
2468 pass->header.target_id = id;
2469 pass->header.timeout = 0;
2470 pass->header.flags = 0;
2471 pass->header.scsi_status = 0;
2472 pass->header.sense_len = MFI_SENSE_LEN;
2473 pass->header.data_len = len;
2474 pass->header.cdb_len = 10;
2475 pass->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2476 pass->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2479 cm->cm_sg = &pass->sgl;
2480 cm->cm_total_frame_size = MFI_PASS_FRAME_SIZE;
2481 cm->cm_flags = MFI_CMD_POLLED | MFI_CMD_DATAOUT;
2483 error = mfi_mapcmd(sc, cm);
2484 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
2485 BUS_DMASYNC_POSTWRITE);
2486 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2487 mfi_release_command(cm);
2493 mfi_open(struct cdev *dev, int flags, int fmt, struct thread *td)
2495 struct mfi_softc *sc;
2500 mtx_lock(&sc->mfi_io_lock);
2501 if (sc->mfi_detaching)
2504 sc->mfi_flags |= MFI_FLAGS_OPEN;
2507 mtx_unlock(&sc->mfi_io_lock);
2513 mfi_close(struct cdev *dev, int flags, int fmt, struct thread *td)
2515 struct mfi_softc *sc;
2516 struct mfi_aen *mfi_aen_entry, *tmp;
2520 mtx_lock(&sc->mfi_io_lock);
2521 sc->mfi_flags &= ~MFI_FLAGS_OPEN;
2523 TAILQ_FOREACH_SAFE(mfi_aen_entry, &sc->mfi_aen_pids, aen_link, tmp) {
2524 if (mfi_aen_entry->p == curproc) {
2525 TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
2527 free(mfi_aen_entry, M_MFIBUF);
2530 mtx_unlock(&sc->mfi_io_lock);
2535 mfi_config_lock(struct mfi_softc *sc, uint32_t opcode)
2539 case MFI_DCMD_LD_DELETE:
2540 case MFI_DCMD_CFG_ADD:
2541 case MFI_DCMD_CFG_CLEAR:
2542 sx_xlock(&sc->mfi_config_lock);
2550 mfi_config_unlock(struct mfi_softc *sc, int locked)
2554 sx_xunlock(&sc->mfi_config_lock);
2558 * Perform pre-issue checks on commands from userland and possibly veto
2562 mfi_check_command_pre(struct mfi_softc *sc, struct mfi_command *cm)
2564 struct mfi_disk *ld, *ld2;
2566 struct mfi_system_pd *syspd = NULL;
2570 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2572 switch (cm->cm_frame->dcmd.opcode) {
2573 case MFI_DCMD_LD_DELETE:
2574 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2575 if (ld->ld_id == cm->cm_frame->dcmd.mbox[0])
2581 error = mfi_disk_disable(ld);
2583 case MFI_DCMD_CFG_CLEAR:
2584 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2585 error = mfi_disk_disable(ld);
2590 TAILQ_FOREACH(ld2, &sc->mfi_ld_tqh, ld_link) {
2593 mfi_disk_enable(ld2);
2597 case MFI_DCMD_PD_STATE_SET:
2598 mbox = (uint16_t *) cm->cm_frame->dcmd.mbox;
2600 if (mbox[2] == MFI_PD_STATE_UNCONFIGURED_GOOD) {
2601 TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh, pd_link) {
2602 if (syspd->pd_id == syspd_id)
2609 error = mfi_syspd_disable(syspd);
2617 /* Perform post-issue checks on commands from userland. */
2619 mfi_check_command_post(struct mfi_softc *sc, struct mfi_command *cm)
2621 struct mfi_disk *ld, *ldn;
2622 struct mfi_system_pd *syspd = NULL;
2626 switch (cm->cm_frame->dcmd.opcode) {
2627 case MFI_DCMD_LD_DELETE:
2628 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2629 if (ld->ld_id == cm->cm_frame->dcmd.mbox[0])
2632 KASSERT(ld != NULL, ("volume dissappeared"));
2633 if (cm->cm_frame->header.cmd_status == MFI_STAT_OK) {
2634 mtx_unlock(&sc->mfi_io_lock);
2636 device_delete_child(sc->mfi_dev, ld->ld_dev);
2638 mtx_lock(&sc->mfi_io_lock);
2640 mfi_disk_enable(ld);
2642 case MFI_DCMD_CFG_CLEAR:
2643 if (cm->cm_frame->header.cmd_status == MFI_STAT_OK) {
2644 mtx_unlock(&sc->mfi_io_lock);
2646 TAILQ_FOREACH_SAFE(ld, &sc->mfi_ld_tqh, ld_link, ldn) {
2647 device_delete_child(sc->mfi_dev, ld->ld_dev);
2650 mtx_lock(&sc->mfi_io_lock);
2652 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link)
2653 mfi_disk_enable(ld);
2656 case MFI_DCMD_CFG_ADD:
2659 case MFI_DCMD_CFG_FOREIGN_IMPORT:
2662 case MFI_DCMD_PD_STATE_SET:
2663 mbox = (uint16_t *) cm->cm_frame->dcmd.mbox;
2665 if (mbox[2] == MFI_PD_STATE_UNCONFIGURED_GOOD) {
2666 TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh,pd_link) {
2667 if (syspd->pd_id == syspd_id)
2673 /* If the transition fails then enable the syspd again */
2674 if (syspd && cm->cm_frame->header.cmd_status != MFI_STAT_OK)
2675 mfi_syspd_enable(syspd);
2680 static int mfi_check_for_sscd(struct mfi_softc *sc, struct mfi_command *cm)
2682 struct mfi_config_data *conf_data=(struct mfi_config_data *)cm->cm_data;
2683 struct mfi_command *ld_cm = NULL;
2684 struct mfi_ld_info *ld_info = NULL;
2687 if ((cm->cm_frame->dcmd.opcode == MFI_DCMD_CFG_ADD) &&
2688 (conf_data->ld[0].params.isSSCD == 1)){
2690 } else if (cm->cm_frame->dcmd.opcode == MFI_DCMD_LD_DELETE) {
2691 error = mfi_dcmd_command (sc, &ld_cm, MFI_DCMD_LD_GET_INFO,
2692 (void **)&ld_info, sizeof(*ld_info));
2694 device_printf(sc->mfi_dev, "Failed to allocate"
2695 "MFI_DCMD_LD_GET_INFO %d", error);
2697 free(ld_info, M_MFIBUF);
2700 ld_cm->cm_flags = MFI_CMD_DATAIN;
2701 ld_cm->cm_frame->dcmd.mbox[0]= cm->cm_frame->dcmd.mbox[0];
2702 ld_cm->cm_frame->header.target_id = cm->cm_frame->dcmd.mbox[0];
2703 if (mfi_wait_command(sc, ld_cm) != 0){
2704 device_printf(sc->mfi_dev, "failed to get log drv\n");
2705 mfi_release_command(ld_cm);
2706 free(ld_info, M_MFIBUF);
2710 if (ld_cm->cm_frame->header.cmd_status != MFI_STAT_OK) {
2711 free(ld_info, M_MFIBUF);
2712 mfi_release_command(ld_cm);
2716 ld_info = (struct mfi_ld_info *)ld_cm->cm_private;
2718 if (ld_info->ld_config.params.isSSCD == 1)
2721 mfi_release_command(ld_cm);
2722 free(ld_info, M_MFIBUF);
2729 mfi_stp_cmd(struct mfi_softc *sc, struct mfi_command *cm,caddr_t arg)
2732 struct mfi_ioc_packet *ioc;
2733 ioc = (struct mfi_ioc_packet *)arg;
2734 int sge_size, error;
2735 struct megasas_sge *kern_sge;
2737 memset(sc->kbuff_arr, 0, sizeof(sc->kbuff_arr));
2738 kern_sge =(struct megasas_sge *) ((uintptr_t)cm->cm_frame + ioc->mfi_sgl_off);
2739 cm->cm_frame->header.sg_count = ioc->mfi_sge_count;
2741 if (sizeof(bus_addr_t) == 8) {
2742 cm->cm_frame->header.flags |= MFI_FRAME_SGL64;
2743 cm->cm_extra_frames = 2;
2744 sge_size = sizeof(struct mfi_sg64);
2746 cm->cm_extra_frames = (cm->cm_total_frame_size - 1) / MFI_FRAME_SIZE;
2747 sge_size = sizeof(struct mfi_sg32);
2750 cm->cm_total_frame_size += (sge_size * ioc->mfi_sge_count);
2751 for (i = 0; i < ioc->mfi_sge_count; i++) {
2752 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
2753 1, 0, /* algnmnt, boundary */
2754 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
2755 BUS_SPACE_MAXADDR, /* highaddr */
2756 NULL, NULL, /* filter, filterarg */
2757 ioc->mfi_sgl[i].iov_len,/* maxsize */
2759 ioc->mfi_sgl[i].iov_len,/* maxsegsize */
2760 BUS_DMA_ALLOCNOW, /* flags */
2761 NULL, NULL, /* lockfunc, lockarg */
2762 &sc->mfi_kbuff_arr_dmat[i])) {
2763 device_printf(sc->mfi_dev,
2764 "Cannot allocate mfi_kbuff_arr_dmat tag\n");
2768 if (bus_dmamem_alloc(sc->mfi_kbuff_arr_dmat[i],
2769 (void **)&sc->kbuff_arr[i], BUS_DMA_NOWAIT,
2770 &sc->mfi_kbuff_arr_dmamap[i])) {
2771 device_printf(sc->mfi_dev,
2772 "Cannot allocate mfi_kbuff_arr_dmamap memory\n");
2776 bus_dmamap_load(sc->mfi_kbuff_arr_dmat[i],
2777 sc->mfi_kbuff_arr_dmamap[i], sc->kbuff_arr[i],
2778 ioc->mfi_sgl[i].iov_len, mfi_addr_cb,
2779 &sc->mfi_kbuff_arr_busaddr[i], 0);
2781 if (!sc->kbuff_arr[i]) {
2782 device_printf(sc->mfi_dev,
2783 "Could not allocate memory for kbuff_arr info\n");
2786 kern_sge[i].phys_addr = sc->mfi_kbuff_arr_busaddr[i];
2787 kern_sge[i].length = ioc->mfi_sgl[i].iov_len;
2789 if (sizeof(bus_addr_t) == 8) {
2790 cm->cm_frame->stp.sgl.sg64[i].addr =
2791 kern_sge[i].phys_addr;
2792 cm->cm_frame->stp.sgl.sg64[i].len =
2793 ioc->mfi_sgl[i].iov_len;
2795 cm->cm_frame->stp.sgl.sg32[i].len =
2796 kern_sge[i].phys_addr;
2797 cm->cm_frame->stp.sgl.sg32[i].len =
2798 ioc->mfi_sgl[i].iov_len;
2801 error = copyin(ioc->mfi_sgl[i].iov_base,
2803 ioc->mfi_sgl[i].iov_len);
2805 device_printf(sc->mfi_dev, "Copy in failed\n");
2810 cm->cm_flags |=MFI_CMD_MAPPED;
2815 mfi_user_command(struct mfi_softc *sc, struct mfi_ioc_passthru *ioc)
2817 struct mfi_command *cm;
2818 struct mfi_dcmd_frame *dcmd;
2819 void *ioc_buf = NULL;
2821 int error = 0, locked;
2824 if (ioc->buf_size > 0) {
2825 ioc_buf = malloc(ioc->buf_size, M_MFIBUF, M_WAITOK);
2826 if (ioc_buf == NULL) {
2829 error = copyin(ioc->buf, ioc_buf, ioc->buf_size);
2831 device_printf(sc->mfi_dev, "failed to copyin\n");
2832 free(ioc_buf, M_MFIBUF);
2837 locked = mfi_config_lock(sc, ioc->ioc_frame.opcode);
2839 mtx_lock(&sc->mfi_io_lock);
2840 while ((cm = mfi_dequeue_free(sc)) == NULL)
2841 msleep(mfi_user_command, &sc->mfi_io_lock, 0, "mfiioc", hz);
2843 /* Save context for later */
2844 context = cm->cm_frame->header.context;
2846 dcmd = &cm->cm_frame->dcmd;
2847 bcopy(&ioc->ioc_frame, dcmd, sizeof(struct mfi_dcmd_frame));
2849 cm->cm_sg = &dcmd->sgl;
2850 cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
2851 cm->cm_data = ioc_buf;
2852 cm->cm_len = ioc->buf_size;
2854 /* restore context */
2855 cm->cm_frame->header.context = context;
2857 /* Cheat since we don't know if we're writing or reading */
2858 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_DATAOUT;
2860 error = mfi_check_command_pre(sc, cm);
2864 error = mfi_wait_command(sc, cm);
2866 device_printf(sc->mfi_dev, "ioctl failed %d\n", error);
2869 bcopy(dcmd, &ioc->ioc_frame, sizeof(struct mfi_dcmd_frame));
2870 mfi_check_command_post(sc, cm);
2872 mfi_release_command(cm);
2873 mtx_unlock(&sc->mfi_io_lock);
2874 mfi_config_unlock(sc, locked);
2875 if (ioc->buf_size > 0)
2876 error = copyout(ioc_buf, ioc->buf, ioc->buf_size);
2878 free(ioc_buf, M_MFIBUF);
2882 #define PTRIN(p) ((void *)(uintptr_t)(p))
2885 mfi_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td)
2887 struct mfi_softc *sc;
2888 union mfi_statrequest *ms;
2889 struct mfi_ioc_packet *ioc;
2890 #ifdef COMPAT_FREEBSD32
2891 struct mfi_ioc_packet32 *ioc32;
2893 struct mfi_ioc_aen *aen;
2894 struct mfi_command *cm = NULL;
2895 uint32_t context = 0;
2896 union mfi_sense_ptr sense_ptr;
2897 uint8_t *data = NULL, *temp, *addr, skip_pre_post = 0;
2900 struct mfi_ioc_passthru *iop = (struct mfi_ioc_passthru *)arg;
2901 #ifdef COMPAT_FREEBSD32
2902 struct mfi_ioc_passthru32 *iop32 = (struct mfi_ioc_passthru32 *)arg;
2903 struct mfi_ioc_passthru iop_swab;
2913 if (sc->hw_crit_error)
2916 if (sc->issuepend_done == 0)
2921 ms = (union mfi_statrequest *)arg;
2922 switch (ms->ms_item) {
2927 bcopy(&sc->mfi_qstat[ms->ms_item], &ms->ms_qstat,
2928 sizeof(struct mfi_qstat));
2935 case MFIIO_QUERY_DISK:
2937 struct mfi_query_disk *qd;
2938 struct mfi_disk *ld;
2940 qd = (struct mfi_query_disk *)arg;
2941 mtx_lock(&sc->mfi_io_lock);
2942 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2943 if (ld->ld_id == qd->array_id)
2948 mtx_unlock(&sc->mfi_io_lock);
2952 if (ld->ld_flags & MFI_DISK_FLAGS_OPEN)
2954 bzero(qd->devname, SPECNAMELEN + 1);
2955 snprintf(qd->devname, SPECNAMELEN, "mfid%d", ld->ld_unit);
2956 mtx_unlock(&sc->mfi_io_lock);
2960 #ifdef COMPAT_FREEBSD32
2964 devclass_t devclass;
2965 ioc = (struct mfi_ioc_packet *)arg;
2968 adapter = ioc->mfi_adapter_no;
2969 if (device_get_unit(sc->mfi_dev) == 0 && adapter != 0) {
2970 devclass = devclass_find("mfi");
2971 sc = devclass_get_softc(devclass, adapter);
2973 mtx_lock(&sc->mfi_io_lock);
2974 if ((cm = mfi_dequeue_free(sc)) == NULL) {
2975 mtx_unlock(&sc->mfi_io_lock);
2978 mtx_unlock(&sc->mfi_io_lock);
2982 * save off original context since copying from user
2983 * will clobber some data
2985 context = cm->cm_frame->header.context;
2986 cm->cm_frame->header.context = cm->cm_index;
2988 bcopy(ioc->mfi_frame.raw, cm->cm_frame,
2989 2 * MEGAMFI_FRAME_SIZE);
2990 cm->cm_total_frame_size = (sizeof(union mfi_sgl)
2991 * ioc->mfi_sge_count) + ioc->mfi_sgl_off;
2992 cm->cm_frame->header.scsi_status = 0;
2993 cm->cm_frame->header.pad0 = 0;
2994 if (ioc->mfi_sge_count) {
2996 (union mfi_sgl *)&cm->cm_frame->bytes[ioc->mfi_sgl_off];
3000 if (cm->cm_frame->header.flags & MFI_FRAME_DATAIN)
3001 cm->cm_flags |= MFI_CMD_DATAIN;
3002 if (cm->cm_frame->header.flags & MFI_FRAME_DATAOUT)
3003 cm->cm_flags |= MFI_CMD_DATAOUT;
3004 /* Legacy app shim */
3005 if (cm->cm_flags == 0)
3006 cm->cm_flags |= MFI_CMD_DATAIN | MFI_CMD_DATAOUT;
3007 cm->cm_len = cm->cm_frame->header.data_len;
3008 if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
3009 #ifdef COMPAT_FREEBSD32
3010 if (cmd == MFI_CMD) {
3013 cm->cm_stp_len = ioc->mfi_sgl[0].iov_len;
3014 #ifdef COMPAT_FREEBSD32
3016 /* 32bit on 64bit */
3017 ioc32 = (struct mfi_ioc_packet32 *)ioc;
3018 cm->cm_stp_len = ioc32->mfi_sgl[0].iov_len;
3021 cm->cm_len += cm->cm_stp_len;
3024 (cm->cm_flags & (MFI_CMD_DATAIN | MFI_CMD_DATAOUT))) {
3025 cm->cm_data = data = malloc(cm->cm_len, M_MFIBUF,
3027 if (cm->cm_data == NULL) {
3028 device_printf(sc->mfi_dev, "Malloc failed\n");
3035 /* restore header context */
3036 cm->cm_frame->header.context = context;
3038 if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
3039 res = mfi_stp_cmd(sc, cm, arg);
3044 if ((cm->cm_flags & MFI_CMD_DATAOUT) ||
3045 (cm->cm_frame->header.cmd == MFI_CMD_STP)) {
3046 for (i = 0; i < ioc->mfi_sge_count; i++) {
3047 #ifdef COMPAT_FREEBSD32
3048 if (cmd == MFI_CMD) {
3051 addr = ioc->mfi_sgl[i].iov_base;
3052 len = ioc->mfi_sgl[i].iov_len;
3053 #ifdef COMPAT_FREEBSD32
3055 /* 32bit on 64bit */
3056 ioc32 = (struct mfi_ioc_packet32 *)ioc;
3057 addr = PTRIN(ioc32->mfi_sgl[i].iov_base);
3058 len = ioc32->mfi_sgl[i].iov_len;
3061 error = copyin(addr, temp, len);
3063 device_printf(sc->mfi_dev,
3064 "Copy in failed\n");
3072 if (cm->cm_frame->header.cmd == MFI_CMD_DCMD)
3073 locked = mfi_config_lock(sc,
3074 cm->cm_frame->dcmd.opcode);
3076 if (cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) {
3077 cm->cm_frame->pass.sense_addr_lo =
3078 (uint32_t)cm->cm_sense_busaddr;
3079 cm->cm_frame->pass.sense_addr_hi =
3080 (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
3082 mtx_lock(&sc->mfi_io_lock);
3083 skip_pre_post = mfi_check_for_sscd (sc, cm);
3084 if (!skip_pre_post) {
3085 error = mfi_check_command_pre(sc, cm);
3087 mtx_unlock(&sc->mfi_io_lock);
3091 if ((error = mfi_wait_command(sc, cm)) != 0) {
3092 device_printf(sc->mfi_dev,
3093 "Controller polled failed\n");
3094 mtx_unlock(&sc->mfi_io_lock);
3097 if (!skip_pre_post) {
3098 mfi_check_command_post(sc, cm);
3100 mtx_unlock(&sc->mfi_io_lock);
3102 if (cm->cm_frame->header.cmd != MFI_CMD_STP) {
3104 if ((cm->cm_flags & MFI_CMD_DATAIN) ||
3105 (cm->cm_frame->header.cmd == MFI_CMD_STP)) {
3106 for (i = 0; i < ioc->mfi_sge_count; i++) {
3107 #ifdef COMPAT_FREEBSD32
3108 if (cmd == MFI_CMD) {
3111 addr = ioc->mfi_sgl[i].iov_base;
3112 len = ioc->mfi_sgl[i].iov_len;
3113 #ifdef COMPAT_FREEBSD32
3115 /* 32bit on 64bit */
3116 ioc32 = (struct mfi_ioc_packet32 *)ioc;
3117 addr = PTRIN(ioc32->mfi_sgl[i].iov_base);
3118 len = ioc32->mfi_sgl[i].iov_len;
3121 error = copyout(temp, addr, len);
3123 device_printf(sc->mfi_dev,
3124 "Copy out failed\n");
3132 if (ioc->mfi_sense_len) {
3133 /* get user-space sense ptr then copy out sense */
3134 bcopy(&ioc->mfi_frame.raw[ioc->mfi_sense_off],
3135 &sense_ptr.sense_ptr_data[0],
3136 sizeof(sense_ptr.sense_ptr_data));
3137 #ifdef COMPAT_FREEBSD32
3138 if (cmd != MFI_CMD) {
3140 * not 64bit native so zero out any address
3142 sense_ptr.addr.high = 0;
3145 error = copyout(cm->cm_sense, sense_ptr.user_space,
3146 ioc->mfi_sense_len);
3148 device_printf(sc->mfi_dev,
3149 "Copy out failed\n");
3154 ioc->mfi_frame.hdr.cmd_status = cm->cm_frame->header.cmd_status;
3156 mfi_config_unlock(sc, locked);
3158 free(data, M_MFIBUF);
3159 if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
3160 for (i = 0; i < 2; i++) {
3161 if (sc->kbuff_arr[i]) {
3162 if (sc->mfi_kbuff_arr_busaddr != 0)
3164 sc->mfi_kbuff_arr_dmat[i],
3165 sc->mfi_kbuff_arr_dmamap[i]
3167 if (sc->kbuff_arr[i] != NULL)
3169 sc->mfi_kbuff_arr_dmat[i],
3171 sc->mfi_kbuff_arr_dmamap[i]
3173 if (sc->mfi_kbuff_arr_dmat[i] != NULL)
3174 bus_dma_tag_destroy(
3175 sc->mfi_kbuff_arr_dmat[i]);
3180 mtx_lock(&sc->mfi_io_lock);
3181 mfi_release_command(cm);
3182 mtx_unlock(&sc->mfi_io_lock);
3188 aen = (struct mfi_ioc_aen *)arg;
3189 error = mfi_aen_register(sc, aen->aen_seq_num,
3190 aen->aen_class_locale);
3193 case MFI_LINUX_CMD_2: /* Firmware Linux ioctl shim */
3195 devclass_t devclass;
3196 struct mfi_linux_ioc_packet l_ioc;
3199 devclass = devclass_find("mfi");
3200 if (devclass == NULL)
3203 error = copyin(arg, &l_ioc, sizeof(l_ioc));
3206 adapter = l_ioc.lioc_adapter_no;
3207 sc = devclass_get_softc(devclass, adapter);
3210 return (mfi_linux_ioctl_int(sc->mfi_cdev,
3211 cmd, arg, flag, td));
3214 case MFI_LINUX_SET_AEN_2: /* AEN Linux ioctl shim */
3216 devclass_t devclass;
3217 struct mfi_linux_ioc_aen l_aen;
3220 devclass = devclass_find("mfi");
3221 if (devclass == NULL)
3224 error = copyin(arg, &l_aen, sizeof(l_aen));
3227 adapter = l_aen.laen_adapter_no;
3228 sc = devclass_get_softc(devclass, adapter);
3231 return (mfi_linux_ioctl_int(sc->mfi_cdev,
3232 cmd, arg, flag, td));
3235 #ifdef COMPAT_FREEBSD32
3236 case MFIIO_PASSTHRU32:
3237 iop_swab.ioc_frame = iop32->ioc_frame;
3238 iop_swab.buf_size = iop32->buf_size;
3239 iop_swab.buf = PTRIN(iop32->buf);
3243 case MFIIO_PASSTHRU:
3244 error = mfi_user_command(sc, iop);
3245 #ifdef COMPAT_FREEBSD32
3246 if (cmd == MFIIO_PASSTHRU32)
3247 iop32->ioc_frame = iop_swab.ioc_frame;
3251 device_printf(sc->mfi_dev, "IOCTL 0x%lx not handled\n", cmd);
3260 mfi_linux_ioctl_int(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td)
3262 struct mfi_softc *sc;
3263 struct mfi_linux_ioc_packet l_ioc;
3264 struct mfi_linux_ioc_aen l_aen;
3265 struct mfi_command *cm = NULL;
3266 struct mfi_aen *mfi_aen_entry;
3267 union mfi_sense_ptr sense_ptr;
3268 uint32_t context = 0;
3269 uint8_t *data = NULL, *temp;
3276 case MFI_LINUX_CMD_2: /* Firmware Linux ioctl shim */
3277 error = copyin(arg, &l_ioc, sizeof(l_ioc));
3281 if (l_ioc.lioc_sge_count > MAX_LINUX_IOCTL_SGE) {
3285 mtx_lock(&sc->mfi_io_lock);
3286 if ((cm = mfi_dequeue_free(sc)) == NULL) {
3287 mtx_unlock(&sc->mfi_io_lock);
3290 mtx_unlock(&sc->mfi_io_lock);
3294 * save off original context since copying from user
3295 * will clobber some data
3297 context = cm->cm_frame->header.context;
3299 bcopy(l_ioc.lioc_frame.raw, cm->cm_frame,
3300 2 * MFI_DCMD_FRAME_SIZE); /* this isn't quite right */
3301 cm->cm_total_frame_size = (sizeof(union mfi_sgl)
3302 * l_ioc.lioc_sge_count) + l_ioc.lioc_sgl_off;
3303 cm->cm_frame->header.scsi_status = 0;
3304 cm->cm_frame->header.pad0 = 0;
3305 if (l_ioc.lioc_sge_count)
3307 (union mfi_sgl *)&cm->cm_frame->bytes[l_ioc.lioc_sgl_off];
3309 if (cm->cm_frame->header.flags & MFI_FRAME_DATAIN)
3310 cm->cm_flags |= MFI_CMD_DATAIN;
3311 if (cm->cm_frame->header.flags & MFI_FRAME_DATAOUT)
3312 cm->cm_flags |= MFI_CMD_DATAOUT;
3313 cm->cm_len = cm->cm_frame->header.data_len;
3315 (cm->cm_flags & (MFI_CMD_DATAIN | MFI_CMD_DATAOUT))) {
3316 cm->cm_data = data = malloc(cm->cm_len, M_MFIBUF,
3318 if (cm->cm_data == NULL) {
3319 device_printf(sc->mfi_dev, "Malloc failed\n");
3326 /* restore header context */
3327 cm->cm_frame->header.context = context;
3330 if (cm->cm_flags & MFI_CMD_DATAOUT) {
3331 for (i = 0; i < l_ioc.lioc_sge_count; i++) {
3332 error = copyin(PTRIN(l_ioc.lioc_sgl[i].iov_base),
3334 l_ioc.lioc_sgl[i].iov_len);
3336 device_printf(sc->mfi_dev,
3337 "Copy in failed\n");
3340 temp = &temp[l_ioc.lioc_sgl[i].iov_len];
3344 if (cm->cm_frame->header.cmd == MFI_CMD_DCMD)
3345 locked = mfi_config_lock(sc, cm->cm_frame->dcmd.opcode);
3347 if (cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) {
3348 cm->cm_frame->pass.sense_addr_lo =
3349 (uint32_t)cm->cm_sense_busaddr;
3350 cm->cm_frame->pass.sense_addr_hi =
3351 (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
3354 mtx_lock(&sc->mfi_io_lock);
3355 error = mfi_check_command_pre(sc, cm);
3357 mtx_unlock(&sc->mfi_io_lock);
3361 if ((error = mfi_wait_command(sc, cm)) != 0) {
3362 device_printf(sc->mfi_dev,
3363 "Controller polled failed\n");
3364 mtx_unlock(&sc->mfi_io_lock);
3368 mfi_check_command_post(sc, cm);
3369 mtx_unlock(&sc->mfi_io_lock);
3372 if (cm->cm_flags & MFI_CMD_DATAIN) {
3373 for (i = 0; i < l_ioc.lioc_sge_count; i++) {
3374 error = copyout(temp,
3375 PTRIN(l_ioc.lioc_sgl[i].iov_base),
3376 l_ioc.lioc_sgl[i].iov_len);
3378 device_printf(sc->mfi_dev,
3379 "Copy out failed\n");
3382 temp = &temp[l_ioc.lioc_sgl[i].iov_len];
3386 if (l_ioc.lioc_sense_len) {
3387 /* get user-space sense ptr then copy out sense */
3388 bcopy(&((struct mfi_linux_ioc_packet*)arg)
3389 ->lioc_frame.raw[l_ioc.lioc_sense_off],
3390 &sense_ptr.sense_ptr_data[0],
3391 sizeof(sense_ptr.sense_ptr_data));
3394 * only 32bit Linux support so zero out any
3395 * address over 32bit
3397 sense_ptr.addr.high = 0;
3399 error = copyout(cm->cm_sense, sense_ptr.user_space,
3400 l_ioc.lioc_sense_len);
3402 device_printf(sc->mfi_dev,
3403 "Copy out failed\n");
3408 error = copyout(&cm->cm_frame->header.cmd_status,
3409 &((struct mfi_linux_ioc_packet*)arg)
3410 ->lioc_frame.hdr.cmd_status,
3413 device_printf(sc->mfi_dev,
3414 "Copy out failed\n");
3419 mfi_config_unlock(sc, locked);
3421 free(data, M_MFIBUF);
3423 mtx_lock(&sc->mfi_io_lock);
3424 mfi_release_command(cm);
3425 mtx_unlock(&sc->mfi_io_lock);
3429 case MFI_LINUX_SET_AEN_2: /* AEN Linux ioctl shim */
3430 error = copyin(arg, &l_aen, sizeof(l_aen));
3433 printf("AEN IMPLEMENTED for pid %d\n", curproc->p_pid);
3434 mfi_aen_entry = malloc(sizeof(struct mfi_aen), M_MFIBUF,
3436 mtx_lock(&sc->mfi_io_lock);
3437 if (mfi_aen_entry != NULL) {
3438 mfi_aen_entry->p = curproc;
3439 TAILQ_INSERT_TAIL(&sc->mfi_aen_pids, mfi_aen_entry,
3442 error = mfi_aen_register(sc, l_aen.laen_seq_num,
3443 l_aen.laen_class_locale);
3446 TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
3448 free(mfi_aen_entry, M_MFIBUF);
3450 mtx_unlock(&sc->mfi_io_lock);
3454 device_printf(sc->mfi_dev, "IOCTL 0x%lx not handled\n", cmd);
3463 mfi_poll(struct cdev *dev, int poll_events, struct thread *td)
3465 struct mfi_softc *sc;
3470 if (poll_events & (POLLIN | POLLRDNORM)) {
3471 if (sc->mfi_aen_triggered != 0) {
3472 revents |= poll_events & (POLLIN | POLLRDNORM);
3473 sc->mfi_aen_triggered = 0;
3475 if (sc->mfi_aen_triggered == 0 && sc->mfi_aen_cm == NULL) {
3481 if (poll_events & (POLLIN | POLLRDNORM)) {
3482 sc->mfi_poll_waiting = 1;
3483 selrecord(td, &sc->mfi_select);
3493 struct mfi_softc *sc;
3494 struct mfi_command *cm;
3500 dc = devclass_find("mfi");
3502 printf("No mfi dev class\n");
3506 for (i = 0; ; i++) {
3507 sc = devclass_get_softc(dc, i);
3510 device_printf(sc->mfi_dev, "Dumping\n\n");
3512 deadline = time_uptime - MFI_CMD_TIMEOUT;
3513 mtx_lock(&sc->mfi_io_lock);
3514 TAILQ_FOREACH(cm, &sc->mfi_busy, cm_link) {
3515 if (cm->cm_timestamp < deadline) {
3516 device_printf(sc->mfi_dev,
3517 "COMMAND %p TIMEOUT AFTER %d SECONDS\n",
3518 cm, (int)(time_uptime - cm->cm_timestamp));
3529 mtx_unlock(&sc->mfi_io_lock);
3536 mfi_timeout(void *data)
3538 struct mfi_softc *sc = (struct mfi_softc *)data;
3539 struct mfi_command *cm;
3543 deadline = time_uptime - MFI_CMD_TIMEOUT;
3544 if (sc->adpreset == 0) {
3545 if (!mfi_tbolt_reset(sc)) {
3546 callout_reset(&sc->mfi_watchdog_callout, MFI_CMD_TIMEOUT * hz, mfi_timeout, sc);
3550 mtx_lock(&sc->mfi_io_lock);
3551 TAILQ_FOREACH(cm, &sc->mfi_busy, cm_link) {
3552 if (sc->mfi_aen_cm == cm)
3554 if ((sc->mfi_aen_cm != cm) && (cm->cm_timestamp < deadline)) {
3555 if (sc->adpreset != 0 && sc->issuepend_done == 0) {
3556 cm->cm_timestamp = time_uptime;
3558 device_printf(sc->mfi_dev,
3559 "COMMAND %p TIMEOUT AFTER %d SECONDS\n",
3560 cm, (int)(time_uptime - cm->cm_timestamp)
3563 MFI_VALIDATE_CMD(sc, cm);
3574 mtx_unlock(&sc->mfi_io_lock);
3576 callout_reset(&sc->mfi_watchdog_callout, MFI_CMD_TIMEOUT * hz,