2 * Copyright (c) 2015, AVAGO Tech. All rights reserved. Author: Marian Choy
3 * Copyright (c) 2014, LSI Corp. All rights reserved. Author: Marian Choy
4 * Support: freebsdraid@avagotech.com
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
10 * 1. Redistributions of source code must retain the above copyright notice,
11 * this list of conditions and the following disclaimer. 2. Redistributions
12 * in binary form must reproduce the above copyright notice, this list of
13 * conditions and the following disclaimer in the documentation and/or other
14 * materials provided with the distribution. 3. Neither the name of the
15 * <ORGANIZATION> nor the names of its contributors may be used to endorse or
16 * promote products derived from this software without specific prior written
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
31 * The views and conclusions contained in the software and documentation are
32 * those of the authors and should not be interpreted as representing
33 * official policies,either expressed or implied, of the FreeBSD Project.
35 * Send feedback to: <megaraidfbsd@avagotech.com> Mail to: AVAGO TECHNOLOGIES 1621
36 * Barber Lane, Milpitas, CA 95035 ATTN: MegaRaid FreeBSD
40 #include <sys/cdefs.h>
41 __FBSDID("$FreeBSD$");
43 #include <dev/mrsas/mrsas.h>
44 #include <dev/mrsas/mrsas_ioctl.h>
47 #include <cam/cam_ccb.h>
49 #include <sys/sysctl.h>
50 #include <sys/types.h>
51 #include <sys/sysent.h>
52 #include <sys/kthread.h>
53 #include <sys/taskqueue.h>
60 static d_open_t mrsas_open;
61 static d_close_t mrsas_close;
62 static d_read_t mrsas_read;
63 static d_write_t mrsas_write;
64 static d_ioctl_t mrsas_ioctl;
65 static d_poll_t mrsas_poll;
67 static void mrsas_ich_startup(void *arg);
68 static struct mrsas_mgmt_info mrsas_mgmt_info;
69 static struct mrsas_ident *mrsas_find_ident(device_t);
70 static int mrsas_setup_msix(struct mrsas_softc *sc);
71 static int mrsas_allocate_msix(struct mrsas_softc *sc);
72 static void mrsas_shutdown_ctlr(struct mrsas_softc *sc, u_int32_t opcode);
73 static void mrsas_flush_cache(struct mrsas_softc *sc);
74 static void mrsas_reset_reply_desc(struct mrsas_softc *sc);
75 static void mrsas_ocr_thread(void *arg);
76 static int mrsas_get_map_info(struct mrsas_softc *sc);
77 static int mrsas_get_ld_map_info(struct mrsas_softc *sc);
78 static int mrsas_sync_map_info(struct mrsas_softc *sc);
79 static int mrsas_get_pd_list(struct mrsas_softc *sc);
80 static int mrsas_get_ld_list(struct mrsas_softc *sc);
81 static int mrsas_setup_irq(struct mrsas_softc *sc);
82 static int mrsas_alloc_mem(struct mrsas_softc *sc);
83 static int mrsas_init_fw(struct mrsas_softc *sc);
84 static int mrsas_setup_raidmap(struct mrsas_softc *sc);
85 static void megasas_setup_jbod_map(struct mrsas_softc *sc);
86 static int megasas_sync_pd_seq_num(struct mrsas_softc *sc, boolean_t pend);
87 static int mrsas_clear_intr(struct mrsas_softc *sc);
88 static int mrsas_get_ctrl_info(struct mrsas_softc *sc);
89 static void mrsas_update_ext_vd_details(struct mrsas_softc *sc);
91 mrsas_issue_blocked_abort_cmd(struct mrsas_softc *sc,
92 struct mrsas_mfi_cmd *cmd_to_abort);
93 static struct mrsas_softc *
94 mrsas_get_softc_instance(struct cdev *dev,
95 u_long cmd, caddr_t arg);
96 u_int32_t mrsas_read_reg(struct mrsas_softc *sc, int offset);
98 mrsas_build_mptmfi_passthru(struct mrsas_softc *sc,
99 struct mrsas_mfi_cmd *mfi_cmd);
100 void mrsas_complete_outstanding_ioctls(struct mrsas_softc *sc);
101 int mrsas_transition_to_ready(struct mrsas_softc *sc, int ocr);
102 int mrsas_init_adapter(struct mrsas_softc *sc);
103 int mrsas_alloc_mpt_cmds(struct mrsas_softc *sc);
104 int mrsas_alloc_ioc_cmd(struct mrsas_softc *sc);
105 int mrsas_alloc_ctlr_info_cmd(struct mrsas_softc *sc);
106 int mrsas_ioc_init(struct mrsas_softc *sc);
107 int mrsas_bus_scan(struct mrsas_softc *sc);
108 int mrsas_issue_dcmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
109 int mrsas_issue_polled(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
110 int mrsas_reset_ctrl(struct mrsas_softc *sc, u_int8_t reset_reason);
111 int mrsas_wait_for_outstanding(struct mrsas_softc *sc, u_int8_t check_reason);
112 int mrsas_complete_cmd(struct mrsas_softc *sc, u_int32_t MSIxIndex);
114 mrsas_issue_blocked_cmd(struct mrsas_softc *sc,
115 struct mrsas_mfi_cmd *cmd);
117 mrsas_alloc_tmp_dcmd(struct mrsas_softc *sc, struct mrsas_tmp_dcmd *tcmd,
119 void mrsas_release_mfi_cmd(struct mrsas_mfi_cmd *cmd);
120 void mrsas_wakeup(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
121 void mrsas_complete_aen(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
122 void mrsas_complete_abort(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
123 void mrsas_disable_intr(struct mrsas_softc *sc);
124 void mrsas_enable_intr(struct mrsas_softc *sc);
125 void mrsas_free_ioc_cmd(struct mrsas_softc *sc);
126 void mrsas_free_mem(struct mrsas_softc *sc);
127 void mrsas_free_tmp_dcmd(struct mrsas_tmp_dcmd *tmp);
128 void mrsas_isr(void *arg);
129 void mrsas_teardown_intr(struct mrsas_softc *sc);
130 void mrsas_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error);
131 void mrsas_kill_hba(struct mrsas_softc *sc);
132 void mrsas_aen_handler(struct mrsas_softc *sc);
134 mrsas_write_reg(struct mrsas_softc *sc, int offset,
137 mrsas_fire_cmd(struct mrsas_softc *sc, u_int32_t req_desc_lo,
138 u_int32_t req_desc_hi);
139 void mrsas_free_ctlr_info_cmd(struct mrsas_softc *sc);
141 mrsas_complete_mptmfi_passthru(struct mrsas_softc *sc,
142 struct mrsas_mfi_cmd *cmd, u_int8_t status);
144 mrsas_map_mpt_cmd_status(struct mrsas_mpt_cmd *cmd, u_int8_t status,
146 struct mrsas_mfi_cmd *mrsas_get_mfi_cmd(struct mrsas_softc *sc);
148 MRSAS_REQUEST_DESCRIPTOR_UNION *mrsas_build_mpt_cmd
149 (struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
151 extern int mrsas_cam_attach(struct mrsas_softc *sc);
152 extern void mrsas_cam_detach(struct mrsas_softc *sc);
153 extern void mrsas_cmd_done(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd);
154 extern void mrsas_free_frame(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
155 extern int mrsas_alloc_mfi_cmds(struct mrsas_softc *sc);
156 extern void mrsas_release_mpt_cmd(struct mrsas_mpt_cmd *cmd);
157 extern struct mrsas_mpt_cmd *mrsas_get_mpt_cmd(struct mrsas_softc *sc);
158 extern int mrsas_passthru(struct mrsas_softc *sc, void *arg, u_long ioctlCmd);
159 extern uint8_t MR_ValidateMapInfo(struct mrsas_softc *sc);
160 extern u_int16_t MR_GetLDTgtId(u_int32_t ld, MR_DRV_RAID_MAP_ALL * map);
161 extern MR_LD_RAID *MR_LdRaidGet(u_int32_t ld, MR_DRV_RAID_MAP_ALL * map);
162 extern void mrsas_xpt_freeze(struct mrsas_softc *sc);
163 extern void mrsas_xpt_release(struct mrsas_softc *sc);
164 extern MRSAS_REQUEST_DESCRIPTOR_UNION *
165 mrsas_get_request_desc(struct mrsas_softc *sc,
167 extern int mrsas_bus_scan_sim(struct mrsas_softc *sc, struct cam_sim *sim);
168 static int mrsas_alloc_evt_log_info_cmd(struct mrsas_softc *sc);
169 static void mrsas_free_evt_log_info_cmd(struct mrsas_softc *sc);
171 SYSCTL_NODE(_hw, OID_AUTO, mrsas, CTLFLAG_RD, 0, "MRSAS Driver Parameters");
174 * PCI device struct and table
177 typedef struct mrsas_ident {
185 MRSAS_CTLR_ID device_table[] = {
186 {0x1000, MRSAS_TBOLT, 0xffff, 0xffff, "AVAGO Thunderbolt SAS Controller"},
187 {0x1000, MRSAS_INVADER, 0xffff, 0xffff, "AVAGO Invader SAS Controller"},
188 {0x1000, MRSAS_FURY, 0xffff, 0xffff, "AVAGO Fury SAS Controller"},
189 {0x1000, MRSAS_INTRUDER, 0xffff, 0xffff, "AVAGO Intruder SAS Controller"},
190 {0x1000, MRSAS_INTRUDER_24, 0xffff, 0xffff, "AVAGO Intruder_24 SAS Controller"},
191 {0x1000, MRSAS_CUTLASS_52, 0xffff, 0xffff, "AVAGO Cutlass_52 SAS Controller"},
192 {0x1000, MRSAS_CUTLASS_53, 0xffff, 0xffff, "AVAGO Cutlass_53 SAS Controller"},
197 * Character device entry points
200 static struct cdevsw mrsas_cdevsw = {
201 .d_version = D_VERSION,
202 .d_open = mrsas_open,
203 .d_close = mrsas_close,
204 .d_read = mrsas_read,
205 .d_write = mrsas_write,
206 .d_ioctl = mrsas_ioctl,
207 .d_poll = mrsas_poll,
211 MALLOC_DEFINE(M_MRSAS, "mrsasbuf", "Buffers for the MRSAS driver");
214 * In the cdevsw routines, we find our softc by using the si_drv1 member of
215 * struct cdev. We set this variable to point to our softc in our attach
216 * routine when we create the /dev entry.
219 mrsas_open(struct cdev *dev, int oflags, int devtype, d_thread_t *td)
221 struct mrsas_softc *sc;
228 mrsas_close(struct cdev *dev, int fflag, int devtype, d_thread_t *td)
230 struct mrsas_softc *sc;
237 mrsas_read(struct cdev *dev, struct uio *uio, int ioflag)
239 struct mrsas_softc *sc;
245 mrsas_write(struct cdev *dev, struct uio *uio, int ioflag)
247 struct mrsas_softc *sc;
254 * Register Read/Write Functions
258 mrsas_write_reg(struct mrsas_softc *sc, int offset,
261 bus_space_tag_t bus_tag = sc->bus_tag;
262 bus_space_handle_t bus_handle = sc->bus_handle;
264 bus_space_write_4(bus_tag, bus_handle, offset, value);
268 mrsas_read_reg(struct mrsas_softc *sc, int offset)
270 bus_space_tag_t bus_tag = sc->bus_tag;
271 bus_space_handle_t bus_handle = sc->bus_handle;
273 return ((u_int32_t)bus_space_read_4(bus_tag, bus_handle, offset));
278 * Interrupt Disable/Enable/Clear Functions
282 mrsas_disable_intr(struct mrsas_softc *sc)
284 u_int32_t mask = 0xFFFFFFFF;
287 sc->mask_interrupts = 1;
288 mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask), mask);
289 /* Dummy read to force pci flush */
290 status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask));
294 mrsas_enable_intr(struct mrsas_softc *sc)
296 u_int32_t mask = MFI_FUSION_ENABLE_INTERRUPT_MASK;
299 sc->mask_interrupts = 0;
300 mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status), ~0);
301 status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status));
303 mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask), ~mask);
304 status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask));
308 mrsas_clear_intr(struct mrsas_softc *sc)
310 u_int32_t status, fw_status, fw_state;
312 /* Read received interrupt */
313 status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status));
316 * If FW state change interrupt is received, write to it again to
319 if (status & MRSAS_FW_STATE_CHNG_INTERRUPT) {
320 fw_status = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
321 outbound_scratch_pad));
322 fw_state = fw_status & MFI_STATE_MASK;
323 if (fw_state == MFI_STATE_FAULT) {
324 device_printf(sc->mrsas_dev, "FW is in FAULT state!\n");
325 if (sc->ocr_thread_active)
326 wakeup(&sc->ocr_chan);
328 mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status), status);
329 mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status));
332 /* Not our interrupt, so just return */
333 if (!(status & MFI_FUSION_ENABLE_INTERRUPT_MASK))
336 /* We got a reply interrupt */
341 * PCI Support Functions
344 static struct mrsas_ident *
345 mrsas_find_ident(device_t dev)
347 struct mrsas_ident *pci_device;
349 for (pci_device = device_table; pci_device->vendor != 0; pci_device++) {
350 if ((pci_device->vendor == pci_get_vendor(dev)) &&
351 (pci_device->device == pci_get_device(dev)) &&
352 ((pci_device->subvendor == pci_get_subvendor(dev)) ||
353 (pci_device->subvendor == 0xffff)) &&
354 ((pci_device->subdevice == pci_get_subdevice(dev)) ||
355 (pci_device->subdevice == 0xffff)))
362 mrsas_probe(device_t dev)
364 static u_int8_t first_ctrl = 1;
365 struct mrsas_ident *id;
367 if ((id = mrsas_find_ident(dev)) != NULL) {
369 printf("AVAGO MegaRAID SAS FreeBSD mrsas driver version: %s\n",
373 device_set_desc(dev, id->desc);
374 /* between BUS_PROBE_DEFAULT and BUS_PROBE_LOW_PRIORITY */
381 * mrsas_setup_sysctl: setup sysctl values for mrsas
382 * input: Adapter instance soft state
384 * Setup sysctl entries for mrsas driver.
387 mrsas_setup_sysctl(struct mrsas_softc *sc)
389 struct sysctl_ctx_list *sysctl_ctx = NULL;
390 struct sysctl_oid *sysctl_tree = NULL;
391 char tmpstr[80], tmpstr2[80];
394 * Setup the sysctl variable so the user can change the debug level
397 snprintf(tmpstr, sizeof(tmpstr), "MRSAS controller %d",
398 device_get_unit(sc->mrsas_dev));
399 snprintf(tmpstr2, sizeof(tmpstr2), "%d", device_get_unit(sc->mrsas_dev));
401 sysctl_ctx = device_get_sysctl_ctx(sc->mrsas_dev);
402 if (sysctl_ctx != NULL)
403 sysctl_tree = device_get_sysctl_tree(sc->mrsas_dev);
405 if (sysctl_tree == NULL) {
406 sysctl_ctx_init(&sc->sysctl_ctx);
407 sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx,
408 SYSCTL_STATIC_CHILDREN(_hw_mrsas), OID_AUTO, tmpstr2,
409 CTLFLAG_RD, 0, tmpstr);
410 if (sc->sysctl_tree == NULL)
412 sysctl_ctx = &sc->sysctl_ctx;
413 sysctl_tree = sc->sysctl_tree;
415 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
416 OID_AUTO, "disable_ocr", CTLFLAG_RW, &sc->disableOnlineCtrlReset, 0,
417 "Disable the use of OCR");
419 SYSCTL_ADD_STRING(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
420 OID_AUTO, "driver_version", CTLFLAG_RD, MRSAS_VERSION,
421 strlen(MRSAS_VERSION), "driver version");
423 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
424 OID_AUTO, "reset_count", CTLFLAG_RD,
425 &sc->reset_count, 0, "number of ocr from start of the day");
427 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
428 OID_AUTO, "fw_outstanding", CTLFLAG_RD,
429 &sc->fw_outstanding.val_rdonly, 0, "FW outstanding commands");
431 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
432 OID_AUTO, "io_cmds_highwater", CTLFLAG_RD,
433 &sc->io_cmds_highwater, 0, "Max FW outstanding commands");
435 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
436 OID_AUTO, "mrsas_debug", CTLFLAG_RW, &sc->mrsas_debug, 0,
437 "Driver debug level");
439 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
440 OID_AUTO, "mrsas_io_timeout", CTLFLAG_RW, &sc->mrsas_io_timeout,
441 0, "Driver IO timeout value in mili-second.");
443 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
444 OID_AUTO, "mrsas_fw_fault_check_delay", CTLFLAG_RW,
445 &sc->mrsas_fw_fault_check_delay,
446 0, "FW fault check thread delay in seconds. <default is 1 sec>");
448 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
449 OID_AUTO, "reset_in_progress", CTLFLAG_RD,
450 &sc->reset_in_progress, 0, "ocr in progress status");
455 * mrsas_get_tunables: get tunable parameters.
456 * input: Adapter instance soft state
458 * Get tunable parameters. This will help to debug driver at boot time.
461 mrsas_get_tunables(struct mrsas_softc *sc)
465 /* XXX default to some debugging for now */
466 sc->mrsas_debug = MRSAS_FAULT;
467 sc->mrsas_io_timeout = MRSAS_IO_TIMEOUT;
468 sc->mrsas_fw_fault_check_delay = 1;
470 sc->reset_in_progress = 0;
473 * Grab the global variables.
475 TUNABLE_INT_FETCH("hw.mrsas.debug_level", &sc->mrsas_debug);
478 * Grab the global variables.
480 TUNABLE_INT_FETCH("hw.mrsas.lb_pending_cmds", &sc->lb_pending_cmds);
482 /* Grab the unit-instance variables */
483 snprintf(tmpstr, sizeof(tmpstr), "dev.mrsas.%d.debug_level",
484 device_get_unit(sc->mrsas_dev));
485 TUNABLE_INT_FETCH(tmpstr, &sc->mrsas_debug);
489 * mrsas_alloc_evt_log_info cmd: Allocates memory to get event log information.
490 * Used to get sequence number at driver load time.
491 * input: Adapter soft state
493 * Allocates DMAable memory for the event log info internal command.
496 mrsas_alloc_evt_log_info_cmd(struct mrsas_softc *sc)
500 /* Allocate get event log info command */
501 el_info_size = sizeof(struct mrsas_evt_log_info);
502 if (bus_dma_tag_create(sc->mrsas_parent_tag,
504 BUS_SPACE_MAXADDR_32BIT,
513 device_printf(sc->mrsas_dev, "Cannot allocate event log info tag\n");
516 if (bus_dmamem_alloc(sc->el_info_tag, (void **)&sc->el_info_mem,
517 BUS_DMA_NOWAIT, &sc->el_info_dmamap)) {
518 device_printf(sc->mrsas_dev, "Cannot allocate event log info cmd mem\n");
521 if (bus_dmamap_load(sc->el_info_tag, sc->el_info_dmamap,
522 sc->el_info_mem, el_info_size, mrsas_addr_cb,
523 &sc->el_info_phys_addr, BUS_DMA_NOWAIT)) {
524 device_printf(sc->mrsas_dev, "Cannot load event log info cmd mem\n");
527 memset(sc->el_info_mem, 0, el_info_size);
532 * mrsas_free_evt_info_cmd: Free memory for Event log info command
533 * input: Adapter soft state
535 * Deallocates memory for the event log info internal command.
538 mrsas_free_evt_log_info_cmd(struct mrsas_softc *sc)
540 if (sc->el_info_phys_addr)
541 bus_dmamap_unload(sc->el_info_tag, sc->el_info_dmamap);
542 if (sc->el_info_mem != NULL)
543 bus_dmamem_free(sc->el_info_tag, sc->el_info_mem, sc->el_info_dmamap);
544 if (sc->el_info_tag != NULL)
545 bus_dma_tag_destroy(sc->el_info_tag);
549 * mrsas_get_seq_num: Get latest event sequence number
550 * @sc: Adapter soft state
551 * @eli: Firmware event log sequence number information.
553 * Firmware maintains a log of all events in a non-volatile area.
554 * Driver get the sequence number using DCMD
555 * "MR_DCMD_CTRL_EVENT_GET_INFO" at driver load time.
559 mrsas_get_seq_num(struct mrsas_softc *sc,
560 struct mrsas_evt_log_info *eli)
562 struct mrsas_mfi_cmd *cmd;
563 struct mrsas_dcmd_frame *dcmd;
564 u_int8_t do_ocr = 1, retcode = 0;
566 cmd = mrsas_get_mfi_cmd(sc);
569 device_printf(sc->mrsas_dev, "Failed to get a free cmd\n");
572 dcmd = &cmd->frame->dcmd;
574 if (mrsas_alloc_evt_log_info_cmd(sc) != SUCCESS) {
575 device_printf(sc->mrsas_dev, "Cannot allocate evt log info cmd\n");
576 mrsas_release_mfi_cmd(cmd);
579 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
581 dcmd->cmd = MFI_CMD_DCMD;
582 dcmd->cmd_status = 0x0;
584 dcmd->flags = MFI_FRAME_DIR_READ;
587 dcmd->data_xfer_len = sizeof(struct mrsas_evt_log_info);
588 dcmd->opcode = MR_DCMD_CTRL_EVENT_GET_INFO;
589 dcmd->sgl.sge32[0].phys_addr = sc->el_info_phys_addr;
590 dcmd->sgl.sge32[0].length = sizeof(struct mrsas_evt_log_info);
592 retcode = mrsas_issue_blocked_cmd(sc, cmd);
593 if (retcode == ETIMEDOUT)
598 * Copy the data back into callers buffer
600 memcpy(eli, sc->el_info_mem, sizeof(struct mrsas_evt_log_info));
601 mrsas_free_evt_log_info_cmd(sc);
605 sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
607 mrsas_release_mfi_cmd(cmd);
614 * mrsas_register_aen: Register for asynchronous event notification
615 * @sc: Adapter soft state
616 * @seq_num: Starting sequence number
617 * @class_locale: Class of the event
619 * This function subscribes for events beyond the @seq_num
620 * and type @class_locale.
624 mrsas_register_aen(struct mrsas_softc *sc, u_int32_t seq_num,
625 u_int32_t class_locale_word)
628 struct mrsas_mfi_cmd *cmd;
629 struct mrsas_dcmd_frame *dcmd;
630 union mrsas_evt_class_locale curr_aen;
631 union mrsas_evt_class_locale prev_aen;
634 * If there an AEN pending already (aen_cmd), check if the
635 * class_locale of that pending AEN is inclusive of the new AEN
636 * request we currently have. If it is, then we don't have to do
637 * anything. In other words, whichever events the current AEN request
638 * is subscribing to, have already been subscribed to. If the old_cmd
639 * is _not_ inclusive, then we have to abort that command, form a
640 * class_locale that is superset of both old and current and re-issue
644 curr_aen.word = class_locale_word;
648 prev_aen.word = sc->aen_cmd->frame->dcmd.mbox.w[1];
651 * A class whose enum value is smaller is inclusive of all
652 * higher values. If a PROGRESS (= -1) was previously
653 * registered, then a new registration requests for higher
654 * classes need not be sent to FW. They are automatically
655 * included. Locale numbers don't have such hierarchy. They
658 if ((prev_aen.members.class <= curr_aen.members.class) &&
659 !((prev_aen.members.locale & curr_aen.members.locale) ^
660 curr_aen.members.locale)) {
662 * Previously issued event registration includes
663 * current request. Nothing to do.
667 curr_aen.members.locale |= prev_aen.members.locale;
669 if (prev_aen.members.class < curr_aen.members.class)
670 curr_aen.members.class = prev_aen.members.class;
672 sc->aen_cmd->abort_aen = 1;
673 ret_val = mrsas_issue_blocked_abort_cmd(sc,
677 printf("mrsas: Failed to abort "
678 "previous AEN command\n");
683 cmd = mrsas_get_mfi_cmd(sc);
688 dcmd = &cmd->frame->dcmd;
690 memset(sc->evt_detail_mem, 0, sizeof(struct mrsas_evt_detail));
693 * Prepare DCMD for aen registration
695 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
697 dcmd->cmd = MFI_CMD_DCMD;
698 dcmd->cmd_status = 0x0;
700 dcmd->flags = MFI_FRAME_DIR_READ;
703 dcmd->data_xfer_len = sizeof(struct mrsas_evt_detail);
704 dcmd->opcode = MR_DCMD_CTRL_EVENT_WAIT;
705 dcmd->mbox.w[0] = seq_num;
706 sc->last_seq_num = seq_num;
707 dcmd->mbox.w[1] = curr_aen.word;
708 dcmd->sgl.sge32[0].phys_addr = (u_int32_t)sc->evt_detail_phys_addr;
709 dcmd->sgl.sge32[0].length = sizeof(struct mrsas_evt_detail);
711 if (sc->aen_cmd != NULL) {
712 mrsas_release_mfi_cmd(cmd);
716 * Store reference to the cmd used to register for AEN. When an
717 * application wants us to register for AEN, we have to abort this
718 * cmd and re-register with a new EVENT LOCALE supplied by that app
723 * Issue the aen registration frame
725 if (mrsas_issue_dcmd(sc, cmd)) {
726 device_printf(sc->mrsas_dev, "Cannot issue AEN DCMD command.\n");
733 * mrsas_start_aen: Subscribes to AEN during driver load time
734 * @instance: Adapter soft state
737 mrsas_start_aen(struct mrsas_softc *sc)
739 struct mrsas_evt_log_info eli;
740 union mrsas_evt_class_locale class_locale;
743 /* Get the latest sequence number from FW */
745 memset(&eli, 0, sizeof(eli));
747 if (mrsas_get_seq_num(sc, &eli))
750 /* Register AEN with FW for latest sequence number plus 1 */
751 class_locale.members.reserved = 0;
752 class_locale.members.locale = MR_EVT_LOCALE_ALL;
753 class_locale.members.class = MR_EVT_CLASS_DEBUG;
755 return mrsas_register_aen(sc, eli.newest_seq_num + 1,
761 * mrsas_setup_msix: Allocate MSI-x vectors
762 * @sc: adapter soft state
765 mrsas_setup_msix(struct mrsas_softc *sc)
769 for (i = 0; i < sc->msix_vectors; i++) {
770 sc->irq_context[i].sc = sc;
771 sc->irq_context[i].MSIxIndex = i;
772 sc->irq_id[i] = i + 1;
773 sc->mrsas_irq[i] = bus_alloc_resource_any
774 (sc->mrsas_dev, SYS_RES_IRQ, &sc->irq_id[i]
776 if (sc->mrsas_irq[i] == NULL) {
777 device_printf(sc->mrsas_dev, "Can't allocate MSI-x\n");
778 goto irq_alloc_failed;
780 if (bus_setup_intr(sc->mrsas_dev,
782 INTR_MPSAFE | INTR_TYPE_CAM,
783 NULL, mrsas_isr, &sc->irq_context[i],
784 &sc->intr_handle[i])) {
785 device_printf(sc->mrsas_dev,
786 "Cannot set up MSI-x interrupt handler\n");
787 goto irq_alloc_failed;
793 mrsas_teardown_intr(sc);
798 * mrsas_allocate_msix: Setup MSI-x vectors
799 * @sc: adapter soft state
802 mrsas_allocate_msix(struct mrsas_softc *sc)
804 if (pci_alloc_msix(sc->mrsas_dev, &sc->msix_vectors) == 0) {
805 device_printf(sc->mrsas_dev, "Using MSI-X with %d number"
806 " of vectors\n", sc->msix_vectors);
808 device_printf(sc->mrsas_dev, "MSI-x setup failed\n");
809 goto irq_alloc_failed;
814 mrsas_teardown_intr(sc);
819 * mrsas_attach: PCI entry point
820 * input: pointer to device struct
822 * Performs setup of PCI and registers, initializes mutexes and linked lists,
823 * registers interrupts and CAM, and initializes the adapter/controller to
827 mrsas_attach(device_t dev)
829 struct mrsas_softc *sc = device_get_softc(dev);
830 uint32_t cmd, bar, error;
832 memset(sc, 0, sizeof(struct mrsas_softc));
834 /* Look up our softc and initialize its fields. */
836 sc->device_id = pci_get_device(dev);
838 mrsas_get_tunables(sc);
841 * Set up PCI and registers
843 cmd = pci_read_config(dev, PCIR_COMMAND, 2);
844 if ((cmd & PCIM_CMD_PORTEN) == 0) {
847 /* Force the busmaster enable bit on. */
848 cmd |= PCIM_CMD_BUSMASTEREN;
849 pci_write_config(dev, PCIR_COMMAND, cmd, 2);
851 bar = pci_read_config(dev, MRSAS_PCI_BAR1, 4);
853 sc->reg_res_id = MRSAS_PCI_BAR1;/* BAR1 offset */
854 if ((sc->reg_res = bus_alloc_resource(dev, SYS_RES_MEMORY,
855 &(sc->reg_res_id), 0, ~0, 1, RF_ACTIVE))
857 device_printf(dev, "Cannot allocate PCI registers\n");
860 sc->bus_tag = rman_get_bustag(sc->reg_res);
861 sc->bus_handle = rman_get_bushandle(sc->reg_res);
863 /* Intialize mutexes */
864 mtx_init(&sc->sim_lock, "mrsas_sim_lock", NULL, MTX_DEF);
865 mtx_init(&sc->pci_lock, "mrsas_pci_lock", NULL, MTX_DEF);
866 mtx_init(&sc->io_lock, "mrsas_io_lock", NULL, MTX_DEF);
867 mtx_init(&sc->aen_lock, "mrsas_aen_lock", NULL, MTX_DEF);
868 mtx_init(&sc->ioctl_lock, "mrsas_ioctl_lock", NULL, MTX_SPIN);
869 mtx_init(&sc->mpt_cmd_pool_lock, "mrsas_mpt_cmd_pool_lock", NULL, MTX_DEF);
870 mtx_init(&sc->mfi_cmd_pool_lock, "mrsas_mfi_cmd_pool_lock", NULL, MTX_DEF);
871 mtx_init(&sc->raidmap_lock, "mrsas_raidmap_lock", NULL, MTX_DEF);
873 /* Intialize linked list */
874 TAILQ_INIT(&sc->mrsas_mpt_cmd_list_head);
875 TAILQ_INIT(&sc->mrsas_mfi_cmd_list_head);
877 mrsas_atomic_set(&sc->fw_outstanding, 0);
879 sc->io_cmds_highwater = 0;
881 sc->adprecovery = MRSAS_HBA_OPERATIONAL;
882 sc->UnevenSpanSupport = 0;
886 /* Initialize Firmware */
887 if (mrsas_init_fw(sc) != SUCCESS) {
890 /* Register mrsas to CAM layer */
891 if ((mrsas_cam_attach(sc) != SUCCESS)) {
892 goto attach_fail_cam;
895 if (mrsas_setup_irq(sc) != SUCCESS) {
896 goto attach_fail_irq;
898 error = mrsas_kproc_create(mrsas_ocr_thread, sc,
899 &sc->ocr_thread, 0, 0, "mrsas_ocr%d",
900 device_get_unit(sc->mrsas_dev));
902 device_printf(sc->mrsas_dev, "Error %d starting OCR thread\n", error);
903 goto attach_fail_ocr_thread;
906 * After FW initialization and OCR thread creation
907 * we will defer the cdev creation, AEN setup on ICH callback
909 sc->mrsas_ich.ich_func = mrsas_ich_startup;
910 sc->mrsas_ich.ich_arg = sc;
911 if (config_intrhook_establish(&sc->mrsas_ich) != 0) {
912 device_printf(sc->mrsas_dev, "Config hook is already established\n");
914 mrsas_setup_sysctl(sc);
917 attach_fail_ocr_thread:
918 if (sc->ocr_thread_active)
919 wakeup(&sc->ocr_chan);
921 mrsas_teardown_intr(sc);
923 mrsas_cam_detach(sc);
925 /* if MSIX vector is allocated and FW Init FAILED then release MSIX */
926 if (sc->msix_enable == 1)
927 pci_release_msi(sc->mrsas_dev);
929 mtx_destroy(&sc->sim_lock);
930 mtx_destroy(&sc->aen_lock);
931 mtx_destroy(&sc->pci_lock);
932 mtx_destroy(&sc->io_lock);
933 mtx_destroy(&sc->ioctl_lock);
934 mtx_destroy(&sc->mpt_cmd_pool_lock);
935 mtx_destroy(&sc->mfi_cmd_pool_lock);
936 mtx_destroy(&sc->raidmap_lock);
939 bus_release_resource(sc->mrsas_dev, SYS_RES_MEMORY,
940 sc->reg_res_id, sc->reg_res);
946 * Interrupt config hook
949 mrsas_ich_startup(void *arg)
951 struct mrsas_softc *sc = (struct mrsas_softc *)arg;
954 * Intialize a counting Semaphore to take care no. of concurrent IOCTLs
956 sema_init(&sc->ioctl_count_sema,
957 MRSAS_MAX_MFI_CMDS - 5,
958 IOCTL_SEMA_DESCRIPTION);
960 /* Create a /dev entry for mrsas controller. */
961 sc->mrsas_cdev = make_dev(&mrsas_cdevsw, device_get_unit(sc->mrsas_dev), UID_ROOT,
962 GID_OPERATOR, (S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP), "mrsas%u",
963 device_get_unit(sc->mrsas_dev));
965 if (device_get_unit(sc->mrsas_dev) == 0) {
966 make_dev_alias_p(MAKEDEV_CHECKNAME,
967 &sc->mrsas_linux_emulator_cdev, sc->mrsas_cdev,
968 "megaraid_sas_ioctl_node");
971 sc->mrsas_cdev->si_drv1 = sc;
974 * Add this controller to mrsas_mgmt_info structure so that it can be
975 * exported to management applications
977 if (device_get_unit(sc->mrsas_dev) == 0)
978 memset(&mrsas_mgmt_info, 0, sizeof(mrsas_mgmt_info));
980 mrsas_mgmt_info.count++;
981 mrsas_mgmt_info.sc_ptr[mrsas_mgmt_info.max_index] = sc;
982 mrsas_mgmt_info.max_index++;
984 /* Enable Interrupts */
985 mrsas_enable_intr(sc);
987 /* Initiate AEN (Asynchronous Event Notification) */
988 if (mrsas_start_aen(sc)) {
989 device_printf(sc->mrsas_dev, "Error: AEN registration FAILED !!! "
990 "Further events from the controller will not be communicated.\n"
991 "Either there is some problem in the controller"
992 "or the controller does not support AEN.\n"
993 "Please contact to the SUPPORT TEAM if the problem persists\n");
995 if (sc->mrsas_ich.ich_arg != NULL) {
996 device_printf(sc->mrsas_dev, "Disestablish mrsas intr hook\n");
997 config_intrhook_disestablish(&sc->mrsas_ich);
998 sc->mrsas_ich.ich_arg = NULL;
1003 * mrsas_detach: De-allocates and teardown resources
1004 * input: pointer to device struct
1006 * This function is the entry point for device disconnect and detach.
1007 * It performs memory de-allocations, shutdown of the controller and various
1008 * teardown and destroy resource functions.
1011 mrsas_detach(device_t dev)
1013 struct mrsas_softc *sc;
1016 sc = device_get_softc(dev);
1017 sc->remove_in_progress = 1;
1019 /* Destroy the character device so no other IOCTL will be handled */
1020 if ((device_get_unit(dev) == 0) && sc->mrsas_linux_emulator_cdev)
1021 destroy_dev(sc->mrsas_linux_emulator_cdev);
1022 destroy_dev(sc->mrsas_cdev);
1025 * Take the instance off the instance array. Note that we will not
1026 * decrement the max_index. We let this array be sparse array
1028 for (i = 0; i < mrsas_mgmt_info.max_index; i++) {
1029 if (mrsas_mgmt_info.sc_ptr[i] == sc) {
1030 mrsas_mgmt_info.count--;
1031 mrsas_mgmt_info.sc_ptr[i] = NULL;
1036 if (sc->ocr_thread_active)
1037 wakeup(&sc->ocr_chan);
1038 while (sc->reset_in_progress) {
1040 if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
1041 mrsas_dprint(sc, MRSAS_INFO,
1042 "[%2d]waiting for OCR to be finished from %s\n", i, __func__);
1044 pause("mr_shutdown", hz);
1047 while (sc->ocr_thread_active) {
1049 if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
1050 mrsas_dprint(sc, MRSAS_INFO,
1052 "mrsas_ocr thread to quit ocr %d\n", i,
1053 sc->ocr_thread_active);
1055 pause("mr_shutdown", hz);
1057 mrsas_flush_cache(sc);
1058 mrsas_shutdown_ctlr(sc, MR_DCMD_CTRL_SHUTDOWN);
1059 mrsas_disable_intr(sc);
1060 mrsas_cam_detach(sc);
1061 mrsas_teardown_intr(sc);
1063 mtx_destroy(&sc->sim_lock);
1064 mtx_destroy(&sc->aen_lock);
1065 mtx_destroy(&sc->pci_lock);
1066 mtx_destroy(&sc->io_lock);
1067 mtx_destroy(&sc->ioctl_lock);
1068 mtx_destroy(&sc->mpt_cmd_pool_lock);
1069 mtx_destroy(&sc->mfi_cmd_pool_lock);
1070 mtx_destroy(&sc->raidmap_lock);
1072 /* Wait for all the semaphores to be released */
1073 while (sema_value(&sc->ioctl_count_sema) != (MRSAS_MAX_MFI_CMDS - 5))
1074 pause("mr_shutdown", hz);
1076 /* Destroy the counting semaphore created for Ioctl */
1077 sema_destroy(&sc->ioctl_count_sema);
1080 bus_release_resource(sc->mrsas_dev,
1081 SYS_RES_MEMORY, sc->reg_res_id, sc->reg_res);
1083 if (sc->sysctl_tree != NULL)
1084 sysctl_ctx_free(&sc->sysctl_ctx);
1090 * mrsas_free_mem: Frees allocated memory
1091 * input: Adapter instance soft state
1093 * This function is called from mrsas_detach() to free previously allocated
1097 mrsas_free_mem(struct mrsas_softc *sc)
1101 struct mrsas_mfi_cmd *mfi_cmd;
1102 struct mrsas_mpt_cmd *mpt_cmd;
1105 * Free RAID map memory
1107 for (i = 0; i < 2; i++) {
1108 if (sc->raidmap_phys_addr[i])
1109 bus_dmamap_unload(sc->raidmap_tag[i], sc->raidmap_dmamap[i]);
1110 if (sc->raidmap_mem[i] != NULL)
1111 bus_dmamem_free(sc->raidmap_tag[i], sc->raidmap_mem[i], sc->raidmap_dmamap[i]);
1112 if (sc->raidmap_tag[i] != NULL)
1113 bus_dma_tag_destroy(sc->raidmap_tag[i]);
1115 if (sc->ld_drv_map[i] != NULL)
1116 free(sc->ld_drv_map[i], M_MRSAS);
1118 for (i = 0; i < 2; i++) {
1119 if (sc->jbodmap_phys_addr[i])
1120 bus_dmamap_unload(sc->jbodmap_tag[i], sc->jbodmap_dmamap[i]);
1121 if (sc->jbodmap_mem[i] != NULL)
1122 bus_dmamem_free(sc->jbodmap_tag[i], sc->jbodmap_mem[i], sc->jbodmap_dmamap[i]);
1123 if (sc->jbodmap_tag[i] != NULL)
1124 bus_dma_tag_destroy(sc->jbodmap_tag[i]);
1127 * Free version buffer memroy
1129 if (sc->verbuf_phys_addr)
1130 bus_dmamap_unload(sc->verbuf_tag, sc->verbuf_dmamap);
1131 if (sc->verbuf_mem != NULL)
1132 bus_dmamem_free(sc->verbuf_tag, sc->verbuf_mem, sc->verbuf_dmamap);
1133 if (sc->verbuf_tag != NULL)
1134 bus_dma_tag_destroy(sc->verbuf_tag);
1138 * Free sense buffer memory
1140 if (sc->sense_phys_addr)
1141 bus_dmamap_unload(sc->sense_tag, sc->sense_dmamap);
1142 if (sc->sense_mem != NULL)
1143 bus_dmamem_free(sc->sense_tag, sc->sense_mem, sc->sense_dmamap);
1144 if (sc->sense_tag != NULL)
1145 bus_dma_tag_destroy(sc->sense_tag);
1148 * Free chain frame memory
1150 if (sc->chain_frame_phys_addr)
1151 bus_dmamap_unload(sc->chain_frame_tag, sc->chain_frame_dmamap);
1152 if (sc->chain_frame_mem != NULL)
1153 bus_dmamem_free(sc->chain_frame_tag, sc->chain_frame_mem, sc->chain_frame_dmamap);
1154 if (sc->chain_frame_tag != NULL)
1155 bus_dma_tag_destroy(sc->chain_frame_tag);
1158 * Free IO Request memory
1160 if (sc->io_request_phys_addr)
1161 bus_dmamap_unload(sc->io_request_tag, sc->io_request_dmamap);
1162 if (sc->io_request_mem != NULL)
1163 bus_dmamem_free(sc->io_request_tag, sc->io_request_mem, sc->io_request_dmamap);
1164 if (sc->io_request_tag != NULL)
1165 bus_dma_tag_destroy(sc->io_request_tag);
1168 * Free Reply Descriptor memory
1170 if (sc->reply_desc_phys_addr)
1171 bus_dmamap_unload(sc->reply_desc_tag, sc->reply_desc_dmamap);
1172 if (sc->reply_desc_mem != NULL)
1173 bus_dmamem_free(sc->reply_desc_tag, sc->reply_desc_mem, sc->reply_desc_dmamap);
1174 if (sc->reply_desc_tag != NULL)
1175 bus_dma_tag_destroy(sc->reply_desc_tag);
1178 * Free event detail memory
1180 if (sc->evt_detail_phys_addr)
1181 bus_dmamap_unload(sc->evt_detail_tag, sc->evt_detail_dmamap);
1182 if (sc->evt_detail_mem != NULL)
1183 bus_dmamem_free(sc->evt_detail_tag, sc->evt_detail_mem, sc->evt_detail_dmamap);
1184 if (sc->evt_detail_tag != NULL)
1185 bus_dma_tag_destroy(sc->evt_detail_tag);
1190 if (sc->mfi_cmd_list) {
1191 for (i = 0; i < MRSAS_MAX_MFI_CMDS; i++) {
1192 mfi_cmd = sc->mfi_cmd_list[i];
1193 mrsas_free_frame(sc, mfi_cmd);
1196 if (sc->mficmd_frame_tag != NULL)
1197 bus_dma_tag_destroy(sc->mficmd_frame_tag);
1200 * Free MPT internal command list
1202 max_cmd = sc->max_fw_cmds;
1203 if (sc->mpt_cmd_list) {
1204 for (i = 0; i < max_cmd; i++) {
1205 mpt_cmd = sc->mpt_cmd_list[i];
1206 bus_dmamap_destroy(sc->data_tag, mpt_cmd->data_dmamap);
1207 free(sc->mpt_cmd_list[i], M_MRSAS);
1209 free(sc->mpt_cmd_list, M_MRSAS);
1210 sc->mpt_cmd_list = NULL;
1213 * Free MFI internal command list
1216 if (sc->mfi_cmd_list) {
1217 for (i = 0; i < MRSAS_MAX_MFI_CMDS; i++) {
1218 free(sc->mfi_cmd_list[i], M_MRSAS);
1220 free(sc->mfi_cmd_list, M_MRSAS);
1221 sc->mfi_cmd_list = NULL;
1224 * Free request descriptor memory
1226 free(sc->req_desc, M_MRSAS);
1227 sc->req_desc = NULL;
1230 * Destroy parent tag
1232 if (sc->mrsas_parent_tag != NULL)
1233 bus_dma_tag_destroy(sc->mrsas_parent_tag);
1236 * Free ctrl_info memory
1238 if (sc->ctrl_info != NULL)
1239 free(sc->ctrl_info, M_MRSAS);
1243 * mrsas_teardown_intr: Teardown interrupt
1244 * input: Adapter instance soft state
1246 * This function is called from mrsas_detach() to teardown and release bus
1247 * interrupt resourse.
1250 mrsas_teardown_intr(struct mrsas_softc *sc)
1254 if (!sc->msix_enable) {
1255 if (sc->intr_handle[0])
1256 bus_teardown_intr(sc->mrsas_dev, sc->mrsas_irq[0], sc->intr_handle[0]);
1257 if (sc->mrsas_irq[0] != NULL)
1258 bus_release_resource(sc->mrsas_dev, SYS_RES_IRQ,
1259 sc->irq_id[0], sc->mrsas_irq[0]);
1260 sc->intr_handle[0] = NULL;
1262 for (i = 0; i < sc->msix_vectors; i++) {
1263 if (sc->intr_handle[i])
1264 bus_teardown_intr(sc->mrsas_dev, sc->mrsas_irq[i],
1265 sc->intr_handle[i]);
1267 if (sc->mrsas_irq[i] != NULL)
1268 bus_release_resource(sc->mrsas_dev, SYS_RES_IRQ,
1269 sc->irq_id[i], sc->mrsas_irq[i]);
1271 sc->intr_handle[i] = NULL;
1273 pci_release_msi(sc->mrsas_dev);
1279 * mrsas_suspend: Suspend entry point
1280 * input: Device struct pointer
1282 * This function is the entry point for system suspend from the OS.
1285 mrsas_suspend(device_t dev)
1287 /* This will be filled when the driver will have hibernation support */
1292 * mrsas_resume: Resume entry point
1293 * input: Device struct pointer
1295 * This function is the entry point for system resume from the OS.
1298 mrsas_resume(device_t dev)
1300 /* This will be filled when the driver will have hibernation support */
1305 * mrsas_get_softc_instance: Find softc instance based on cmd type
1307 * This function will return softc instance based on cmd type.
1308 * In some case, application fire ioctl on required management instance and
1309 * do not provide host_no. Use cdev->si_drv1 to get softc instance for those
1310 * case, else get the softc instance from host_no provided by application in
1314 static struct mrsas_softc *
1315 mrsas_get_softc_instance(struct cdev *dev, u_long cmd, caddr_t arg)
1317 struct mrsas_softc *sc = NULL;
1318 struct mrsas_iocpacket *user_ioc = (struct mrsas_iocpacket *)arg;
1320 if (cmd == MRSAS_IOC_GET_PCI_INFO) {
1324 * get the Host number & the softc from data sent by the
1327 sc = mrsas_mgmt_info.sc_ptr[user_ioc->host_no];
1329 printf("There is no Controller number %d\n",
1331 else if (user_ioc->host_no >= mrsas_mgmt_info.max_index)
1332 mrsas_dprint(sc, MRSAS_FAULT,
1333 "Invalid Controller number %d\n", user_ioc->host_no);
1340 * mrsas_ioctl: IOCtl commands entry point.
1342 * This function is the entry point for IOCtls from the OS. It calls the
1343 * appropriate function for processing depending on the command received.
1346 mrsas_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, d_thread_t *td)
1348 struct mrsas_softc *sc;
1350 MRSAS_DRV_PCI_INFORMATION *pciDrvInfo;
1352 sc = mrsas_get_softc_instance(dev, cmd, arg);
1356 if (sc->remove_in_progress) {
1357 mrsas_dprint(sc, MRSAS_INFO,
1358 "Driver remove or shutdown called.\n");
1361 mtx_lock_spin(&sc->ioctl_lock);
1362 if (!sc->reset_in_progress) {
1363 mtx_unlock_spin(&sc->ioctl_lock);
1366 mtx_unlock_spin(&sc->ioctl_lock);
1367 while (sc->reset_in_progress) {
1369 if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
1370 mrsas_dprint(sc, MRSAS_INFO,
1371 "[%2d]waiting for OCR to be finished from %s\n", i, __func__);
1373 pause("mr_ioctl", hz);
1378 case MRSAS_IOC_FIRMWARE_PASS_THROUGH64:
1379 #ifdef COMPAT_FREEBSD32
1380 case MRSAS_IOC_FIRMWARE_PASS_THROUGH32:
1383 * Decrement the Ioctl counting Semaphore before getting an
1386 sema_wait(&sc->ioctl_count_sema);
1388 ret = mrsas_passthru(sc, (void *)arg, cmd);
1390 /* Increment the Ioctl counting semaphore value */
1391 sema_post(&sc->ioctl_count_sema);
1394 case MRSAS_IOC_SCAN_BUS:
1395 ret = mrsas_bus_scan(sc);
1398 case MRSAS_IOC_GET_PCI_INFO:
1399 pciDrvInfo = (MRSAS_DRV_PCI_INFORMATION *) arg;
1400 memset(pciDrvInfo, 0, sizeof(MRSAS_DRV_PCI_INFORMATION));
1401 pciDrvInfo->busNumber = pci_get_bus(sc->mrsas_dev);
1402 pciDrvInfo->deviceNumber = pci_get_slot(sc->mrsas_dev);
1403 pciDrvInfo->functionNumber = pci_get_function(sc->mrsas_dev);
1404 pciDrvInfo->domainID = pci_get_domain(sc->mrsas_dev);
1405 mrsas_dprint(sc, MRSAS_INFO, "pci bus no: %d,"
1406 "pci device no: %d, pci function no: %d,"
1407 "pci domain ID: %d\n",
1408 pciDrvInfo->busNumber, pciDrvInfo->deviceNumber,
1409 pciDrvInfo->functionNumber, pciDrvInfo->domainID);
1414 mrsas_dprint(sc, MRSAS_TRACE, "IOCTL command 0x%lx is not handled\n", cmd);
1422 * mrsas_poll: poll entry point for mrsas driver fd
1424 * This function is the entry point for poll from the OS. It waits for some AEN
1425 * events to be triggered from the controller and notifies back.
1428 mrsas_poll(struct cdev *dev, int poll_events, struct thread *td)
1430 struct mrsas_softc *sc;
1435 if (poll_events & (POLLIN | POLLRDNORM)) {
1436 if (sc->mrsas_aen_triggered) {
1437 revents |= poll_events & (POLLIN | POLLRDNORM);
1441 if (poll_events & (POLLIN | POLLRDNORM)) {
1442 mtx_lock(&sc->aen_lock);
1443 sc->mrsas_poll_waiting = 1;
1444 selrecord(td, &sc->mrsas_select);
1445 mtx_unlock(&sc->aen_lock);
1452 * mrsas_setup_irq: Set up interrupt
1453 * input: Adapter instance soft state
1455 * This function sets up interrupts as a bus resource, with flags indicating
1456 * resource permitting contemporaneous sharing and for resource to activate
1460 mrsas_setup_irq(struct mrsas_softc *sc)
1462 if (sc->msix_enable && (mrsas_setup_msix(sc) == SUCCESS))
1463 device_printf(sc->mrsas_dev, "MSI-x interrupts setup success\n");
1466 device_printf(sc->mrsas_dev, "Fall back to legacy interrupt\n");
1467 sc->irq_context[0].sc = sc;
1468 sc->irq_context[0].MSIxIndex = 0;
1470 sc->mrsas_irq[0] = bus_alloc_resource_any(sc->mrsas_dev,
1471 SYS_RES_IRQ, &sc->irq_id[0], RF_SHAREABLE | RF_ACTIVE);
1472 if (sc->mrsas_irq[0] == NULL) {
1473 device_printf(sc->mrsas_dev, "Cannot allocate legcay"
1477 if (bus_setup_intr(sc->mrsas_dev, sc->mrsas_irq[0],
1478 INTR_MPSAFE | INTR_TYPE_CAM, NULL, mrsas_isr,
1479 &sc->irq_context[0], &sc->intr_handle[0])) {
1480 device_printf(sc->mrsas_dev, "Cannot set up legacy"
1489 * mrsas_isr: ISR entry point
1490 * input: argument pointer
1492 * This function is the interrupt service routine entry point. There are two
1493 * types of interrupts, state change interrupt and response interrupt. If an
1494 * interrupt is not ours, we just return.
1497 mrsas_isr(void *arg)
1499 struct mrsas_irq_context *irq_context = (struct mrsas_irq_context *)arg;
1500 struct mrsas_softc *sc = irq_context->sc;
1503 if (sc->mask_interrupts)
1506 if (!sc->msix_vectors) {
1507 status = mrsas_clear_intr(sc);
1511 /* If we are resetting, bail */
1512 if (mrsas_test_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags)) {
1513 printf(" Entered into ISR when OCR is going active. \n");
1514 mrsas_clear_intr(sc);
1517 /* Process for reply request and clear response interrupt */
1518 if (mrsas_complete_cmd(sc, irq_context->MSIxIndex) != SUCCESS)
1519 mrsas_clear_intr(sc);
1525 * mrsas_complete_cmd: Process reply request
1526 * input: Adapter instance soft state
1528 * This function is called from mrsas_isr() to process reply request and clear
1529 * response interrupt. Processing of the reply request entails walking
1530 * through the reply descriptor array for the command request pended from
1531 * Firmware. We look at the Function field to determine the command type and
1532 * perform the appropriate action. Before we return, we clear the response
1536 mrsas_complete_cmd(struct mrsas_softc *sc, u_int32_t MSIxIndex)
1538 Mpi2ReplyDescriptorsUnion_t *desc;
1539 MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *reply_desc;
1540 MRSAS_RAID_SCSI_IO_REQUEST *scsi_io_req;
1541 struct mrsas_mpt_cmd *cmd_mpt;
1542 struct mrsas_mfi_cmd *cmd_mfi;
1543 u_int8_t reply_descript_type;
1544 u_int16_t smid, num_completed;
1545 u_int8_t status, extStatus;
1546 union desc_value desc_val;
1547 PLD_LOAD_BALANCE_INFO lbinfo;
1548 u_int32_t device_id;
1549 int threshold_reply_count = 0;
1552 /* If we have a hardware error, not need to continue */
1553 if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR)
1556 desc = sc->reply_desc_mem;
1557 desc += ((MSIxIndex * sc->reply_alloc_sz) / sizeof(MPI2_REPLY_DESCRIPTORS_UNION))
1558 + sc->last_reply_idx[MSIxIndex];
1560 reply_desc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *) desc;
1562 desc_val.word = desc->Words;
1565 reply_descript_type = reply_desc->ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
1567 /* Find our reply descriptor for the command and process */
1568 while ((desc_val.u.low != 0xFFFFFFFF) && (desc_val.u.high != 0xFFFFFFFF)) {
1569 smid = reply_desc->SMID;
1570 cmd_mpt = sc->mpt_cmd_list[smid - 1];
1571 scsi_io_req = (MRSAS_RAID_SCSI_IO_REQUEST *) cmd_mpt->io_request;
1573 status = scsi_io_req->RaidContext.status;
1574 extStatus = scsi_io_req->RaidContext.exStatus;
1576 switch (scsi_io_req->Function) {
1577 case MPI2_FUNCTION_SCSI_IO_REQUEST: /* Fast Path IO. */
1578 device_id = cmd_mpt->ccb_ptr->ccb_h.target_id;
1579 lbinfo = &sc->load_balance_info[device_id];
1580 if (cmd_mpt->load_balance == MRSAS_LOAD_BALANCE_FLAG) {
1581 mrsas_atomic_dec(&lbinfo->scsi_pending_cmds[cmd_mpt->pd_r1_lb]);
1582 cmd_mpt->load_balance &= ~MRSAS_LOAD_BALANCE_FLAG;
1584 /* Fall thru and complete IO */
1585 case MRSAS_MPI2_FUNCTION_LD_IO_REQUEST:
1586 mrsas_map_mpt_cmd_status(cmd_mpt, status, extStatus);
1587 mrsas_cmd_done(sc, cmd_mpt);
1588 scsi_io_req->RaidContext.status = 0;
1589 scsi_io_req->RaidContext.exStatus = 0;
1590 mrsas_atomic_dec(&sc->fw_outstanding);
1592 case MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST: /* MFI command */
1593 cmd_mfi = sc->mfi_cmd_list[cmd_mpt->sync_cmd_idx];
1594 mrsas_complete_mptmfi_passthru(sc, cmd_mfi, status);
1596 mrsas_release_mpt_cmd(cmd_mpt);
1600 sc->last_reply_idx[MSIxIndex]++;
1601 if (sc->last_reply_idx[MSIxIndex] >= sc->reply_q_depth)
1602 sc->last_reply_idx[MSIxIndex] = 0;
1604 desc->Words = ~((uint64_t)0x00); /* set it back to all
1607 threshold_reply_count++;
1609 /* Get the next reply descriptor */
1610 if (!sc->last_reply_idx[MSIxIndex]) {
1611 desc = sc->reply_desc_mem;
1612 desc += ((MSIxIndex * sc->reply_alloc_sz) / sizeof(MPI2_REPLY_DESCRIPTORS_UNION));
1616 reply_desc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *) desc;
1617 desc_val.word = desc->Words;
1619 reply_descript_type = reply_desc->ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
1621 if (reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
1625 * Write to reply post index after completing threshold reply
1626 * count and still there are more replies in reply queue
1627 * pending to be completed.
1629 if (threshold_reply_count >= THRESHOLD_REPLY_COUNT) {
1630 if (sc->msix_enable) {
1631 if ((sc->device_id == MRSAS_INVADER) ||
1632 (sc->device_id == MRSAS_FURY) ||
1633 (sc->device_id == MRSAS_INTRUDER) ||
1634 (sc->device_id == MRSAS_INTRUDER_24) ||
1635 (sc->device_id == MRSAS_CUTLASS_52) ||
1636 (sc->device_id == MRSAS_CUTLASS_53))
1637 mrsas_write_reg(sc, sc->msix_reg_offset[MSIxIndex / 8],
1638 ((MSIxIndex & 0x7) << 24) |
1639 sc->last_reply_idx[MSIxIndex]);
1641 mrsas_write_reg(sc, sc->msix_reg_offset[0], (MSIxIndex << 24) |
1642 sc->last_reply_idx[MSIxIndex]);
1644 mrsas_write_reg(sc, offsetof(mrsas_reg_set,
1645 reply_post_host_index), sc->last_reply_idx[0]);
1647 threshold_reply_count = 0;
1651 /* No match, just return */
1652 if (num_completed == 0)
1655 /* Clear response interrupt */
1656 if (sc->msix_enable) {
1657 if ((sc->device_id == MRSAS_INVADER) ||
1658 (sc->device_id == MRSAS_FURY) ||
1659 (sc->device_id == MRSAS_INTRUDER) ||
1660 (sc->device_id == MRSAS_INTRUDER_24) ||
1661 (sc->device_id == MRSAS_CUTLASS_52) ||
1662 (sc->device_id == MRSAS_CUTLASS_53)) {
1663 mrsas_write_reg(sc, sc->msix_reg_offset[MSIxIndex / 8],
1664 ((MSIxIndex & 0x7) << 24) |
1665 sc->last_reply_idx[MSIxIndex]);
1667 mrsas_write_reg(sc, sc->msix_reg_offset[0], (MSIxIndex << 24) |
1668 sc->last_reply_idx[MSIxIndex]);
1670 mrsas_write_reg(sc, offsetof(mrsas_reg_set,
1671 reply_post_host_index), sc->last_reply_idx[0]);
1677 * mrsas_map_mpt_cmd_status: Allocate DMAable memory.
1678 * input: Adapter instance soft state
1680 * This function is called from mrsas_complete_cmd(), for LD IO and FastPath IO.
1681 * It checks the command status and maps the appropriate CAM status for the
1685 mrsas_map_mpt_cmd_status(struct mrsas_mpt_cmd *cmd, u_int8_t status, u_int8_t extStatus)
1687 struct mrsas_softc *sc = cmd->sc;
1688 u_int8_t *sense_data;
1692 cmd->ccb_ptr->ccb_h.status = CAM_REQ_CMP;
1694 case MFI_STAT_SCSI_IO_FAILED:
1695 case MFI_STAT_SCSI_DONE_WITH_ERROR:
1696 cmd->ccb_ptr->ccb_h.status = CAM_SCSI_STATUS_ERROR;
1697 sense_data = (u_int8_t *)&cmd->ccb_ptr->csio.sense_data;
1699 /* For now just copy 18 bytes back */
1700 memcpy(sense_data, cmd->sense, 18);
1701 cmd->ccb_ptr->csio.sense_len = 18;
1702 cmd->ccb_ptr->ccb_h.status |= CAM_AUTOSNS_VALID;
1705 case MFI_STAT_LD_OFFLINE:
1706 case MFI_STAT_DEVICE_NOT_FOUND:
1707 if (cmd->ccb_ptr->ccb_h.target_lun)
1708 cmd->ccb_ptr->ccb_h.status |= CAM_LUN_INVALID;
1710 cmd->ccb_ptr->ccb_h.status |= CAM_DEV_NOT_THERE;
1712 case MFI_STAT_CONFIG_SEQ_MISMATCH:
1713 cmd->ccb_ptr->ccb_h.status |= CAM_REQUEUE_REQ;
1716 device_printf(sc->mrsas_dev, "FW cmd complete status %x\n", status);
1717 cmd->ccb_ptr->ccb_h.status = CAM_REQ_CMP_ERR;
1718 cmd->ccb_ptr->csio.scsi_status = status;
1724 * mrsas_alloc_mem: Allocate DMAable memory
1725 * input: Adapter instance soft state
1727 * This function creates the parent DMA tag and allocates DMAable memory. DMA
1728 * tag describes constraints of DMA mapping. Memory allocated is mapped into
1729 * Kernel virtual address. Callback argument is physical memory address.
1732 mrsas_alloc_mem(struct mrsas_softc *sc)
1734 u_int32_t verbuf_size, io_req_size, reply_desc_size, sense_size,
1735 chain_frame_size, evt_detail_size, count;
1738 * Allocate parent DMA tag
1740 if (bus_dma_tag_create(NULL, /* parent */
1743 BUS_SPACE_MAXADDR, /* lowaddr */
1744 BUS_SPACE_MAXADDR, /* highaddr */
1745 NULL, NULL, /* filter, filterarg */
1746 MAXPHYS, /* maxsize */
1747 sc->max_num_sge, /* nsegments */
1748 MAXPHYS, /* maxsegsize */
1750 NULL, NULL, /* lockfunc, lockarg */
1751 &sc->mrsas_parent_tag /* tag */
1753 device_printf(sc->mrsas_dev, "Cannot allocate parent DMA tag\n");
1757 * Allocate for version buffer
1759 verbuf_size = MRSAS_MAX_NAME_LENGTH * (sizeof(bus_addr_t));
1760 if (bus_dma_tag_create(sc->mrsas_parent_tag,
1762 BUS_SPACE_MAXADDR_32BIT,
1771 device_printf(sc->mrsas_dev, "Cannot allocate verbuf DMA tag\n");
1774 if (bus_dmamem_alloc(sc->verbuf_tag, (void **)&sc->verbuf_mem,
1775 BUS_DMA_NOWAIT, &sc->verbuf_dmamap)) {
1776 device_printf(sc->mrsas_dev, "Cannot allocate verbuf memory\n");
1779 bzero(sc->verbuf_mem, verbuf_size);
1780 if (bus_dmamap_load(sc->verbuf_tag, sc->verbuf_dmamap, sc->verbuf_mem,
1781 verbuf_size, mrsas_addr_cb, &sc->verbuf_phys_addr,
1783 device_printf(sc->mrsas_dev, "Cannot load verbuf DMA map\n");
1787 * Allocate IO Request Frames
1789 io_req_size = sc->io_frames_alloc_sz;
1790 if (bus_dma_tag_create(sc->mrsas_parent_tag,
1792 BUS_SPACE_MAXADDR_32BIT,
1800 &sc->io_request_tag)) {
1801 device_printf(sc->mrsas_dev, "Cannot create IO request tag\n");
1804 if (bus_dmamem_alloc(sc->io_request_tag, (void **)&sc->io_request_mem,
1805 BUS_DMA_NOWAIT, &sc->io_request_dmamap)) {
1806 device_printf(sc->mrsas_dev, "Cannot alloc IO request memory\n");
1809 bzero(sc->io_request_mem, io_req_size);
1810 if (bus_dmamap_load(sc->io_request_tag, sc->io_request_dmamap,
1811 sc->io_request_mem, io_req_size, mrsas_addr_cb,
1812 &sc->io_request_phys_addr, BUS_DMA_NOWAIT)) {
1813 device_printf(sc->mrsas_dev, "Cannot load IO request memory\n");
1817 * Allocate Chain Frames
1819 chain_frame_size = sc->chain_frames_alloc_sz;
1820 if (bus_dma_tag_create(sc->mrsas_parent_tag,
1822 BUS_SPACE_MAXADDR_32BIT,
1830 &sc->chain_frame_tag)) {
1831 device_printf(sc->mrsas_dev, "Cannot create chain frame tag\n");
1834 if (bus_dmamem_alloc(sc->chain_frame_tag, (void **)&sc->chain_frame_mem,
1835 BUS_DMA_NOWAIT, &sc->chain_frame_dmamap)) {
1836 device_printf(sc->mrsas_dev, "Cannot alloc chain frame memory\n");
1839 bzero(sc->chain_frame_mem, chain_frame_size);
1840 if (bus_dmamap_load(sc->chain_frame_tag, sc->chain_frame_dmamap,
1841 sc->chain_frame_mem, chain_frame_size, mrsas_addr_cb,
1842 &sc->chain_frame_phys_addr, BUS_DMA_NOWAIT)) {
1843 device_printf(sc->mrsas_dev, "Cannot load chain frame memory\n");
1846 count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
1848 * Allocate Reply Descriptor Array
1850 reply_desc_size = sc->reply_alloc_sz * count;
1851 if (bus_dma_tag_create(sc->mrsas_parent_tag,
1853 BUS_SPACE_MAXADDR_32BIT,
1861 &sc->reply_desc_tag)) {
1862 device_printf(sc->mrsas_dev, "Cannot create reply descriptor tag\n");
1865 if (bus_dmamem_alloc(sc->reply_desc_tag, (void **)&sc->reply_desc_mem,
1866 BUS_DMA_NOWAIT, &sc->reply_desc_dmamap)) {
1867 device_printf(sc->mrsas_dev, "Cannot alloc reply descriptor memory\n");
1870 if (bus_dmamap_load(sc->reply_desc_tag, sc->reply_desc_dmamap,
1871 sc->reply_desc_mem, reply_desc_size, mrsas_addr_cb,
1872 &sc->reply_desc_phys_addr, BUS_DMA_NOWAIT)) {
1873 device_printf(sc->mrsas_dev, "Cannot load reply descriptor memory\n");
1877 * Allocate Sense Buffer Array. Keep in lower 4GB
1879 sense_size = sc->max_fw_cmds * MRSAS_SENSE_LEN;
1880 if (bus_dma_tag_create(sc->mrsas_parent_tag,
1882 BUS_SPACE_MAXADDR_32BIT,
1891 device_printf(sc->mrsas_dev, "Cannot allocate sense buf tag\n");
1894 if (bus_dmamem_alloc(sc->sense_tag, (void **)&sc->sense_mem,
1895 BUS_DMA_NOWAIT, &sc->sense_dmamap)) {
1896 device_printf(sc->mrsas_dev, "Cannot allocate sense buf memory\n");
1899 if (bus_dmamap_load(sc->sense_tag, sc->sense_dmamap,
1900 sc->sense_mem, sense_size, mrsas_addr_cb, &sc->sense_phys_addr,
1902 device_printf(sc->mrsas_dev, "Cannot load sense buf memory\n");
1906 * Allocate for Event detail structure
1908 evt_detail_size = sizeof(struct mrsas_evt_detail);
1909 if (bus_dma_tag_create(sc->mrsas_parent_tag,
1911 BUS_SPACE_MAXADDR_32BIT,
1919 &sc->evt_detail_tag)) {
1920 device_printf(sc->mrsas_dev, "Cannot create Event detail tag\n");
1923 if (bus_dmamem_alloc(sc->evt_detail_tag, (void **)&sc->evt_detail_mem,
1924 BUS_DMA_NOWAIT, &sc->evt_detail_dmamap)) {
1925 device_printf(sc->mrsas_dev, "Cannot alloc Event detail buffer memory\n");
1928 bzero(sc->evt_detail_mem, evt_detail_size);
1929 if (bus_dmamap_load(sc->evt_detail_tag, sc->evt_detail_dmamap,
1930 sc->evt_detail_mem, evt_detail_size, mrsas_addr_cb,
1931 &sc->evt_detail_phys_addr, BUS_DMA_NOWAIT)) {
1932 device_printf(sc->mrsas_dev, "Cannot load Event detail buffer memory\n");
1936 * Create a dma tag for data buffers; size will be the maximum
1937 * possible I/O size (280kB).
1939 if (bus_dma_tag_create(sc->mrsas_parent_tag,
1946 sc->max_num_sge, /* nsegments */
1952 device_printf(sc->mrsas_dev, "Cannot create data dma tag\n");
1959 * mrsas_addr_cb: Callback function of bus_dmamap_load()
1960 * input: callback argument, machine dependent type
1961 * that describes DMA segments, number of segments, error code
1963 * This function is for the driver to receive mapping information resultant of
1964 * the bus_dmamap_load(). The information is actually not being used, but the
1965 * address is saved anyway.
1968 mrsas_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1973 *addr = segs[0].ds_addr;
1977 * mrsas_setup_raidmap: Set up RAID map.
1978 * input: Adapter instance soft state
1980 * Allocate DMA memory for the RAID maps and perform setup.
1983 mrsas_setup_raidmap(struct mrsas_softc *sc)
1987 for (i = 0; i < 2; i++) {
1989 (void *)malloc(sc->drv_map_sz, M_MRSAS, M_NOWAIT);
1990 /* Do Error handling */
1991 if (!sc->ld_drv_map[i]) {
1992 device_printf(sc->mrsas_dev, "Could not allocate memory for local map");
1995 free(sc->ld_drv_map[0], M_MRSAS);
1996 /* ABORT driver initialization */
2001 for (int i = 0; i < 2; i++) {
2002 if (bus_dma_tag_create(sc->mrsas_parent_tag,
2004 BUS_SPACE_MAXADDR_32BIT,
2012 &sc->raidmap_tag[i])) {
2013 device_printf(sc->mrsas_dev,
2014 "Cannot allocate raid map tag.\n");
2017 if (bus_dmamem_alloc(sc->raidmap_tag[i],
2018 (void **)&sc->raidmap_mem[i],
2019 BUS_DMA_NOWAIT, &sc->raidmap_dmamap[i])) {
2020 device_printf(sc->mrsas_dev,
2021 "Cannot allocate raidmap memory.\n");
2024 bzero(sc->raidmap_mem[i], sc->max_map_sz);
2026 if (bus_dmamap_load(sc->raidmap_tag[i], sc->raidmap_dmamap[i],
2027 sc->raidmap_mem[i], sc->max_map_sz,
2028 mrsas_addr_cb, &sc->raidmap_phys_addr[i],
2030 device_printf(sc->mrsas_dev, "Cannot load raidmap memory.\n");
2033 if (!sc->raidmap_mem[i]) {
2034 device_printf(sc->mrsas_dev,
2035 "Cannot allocate memory for raid map.\n");
2040 if (!mrsas_get_map_info(sc))
2041 mrsas_sync_map_info(sc);
2050 * megasas_setup_jbod_map - setup jbod map for FP seq_number.
2051 * @sc: Adapter soft state
2053 * Return 0 on success.
2056 megasas_setup_jbod_map(struct mrsas_softc *sc)
2059 uint32_t pd_seq_map_sz;
2061 pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) +
2062 (sizeof(struct MR_PD_CFG_SEQ) * (MAX_PHYSICAL_DEVICES - 1));
2064 if (!sc->ctrl_info->adapterOperations3.useSeqNumJbodFP) {
2065 sc->use_seqnum_jbod_fp = 0;
2068 if (sc->jbodmap_mem[0])
2071 for (i = 0; i < 2; i++) {
2072 if (bus_dma_tag_create(sc->mrsas_parent_tag,
2074 BUS_SPACE_MAXADDR_32BIT,
2082 &sc->jbodmap_tag[i])) {
2083 device_printf(sc->mrsas_dev,
2084 "Cannot allocate jbod map tag.\n");
2087 if (bus_dmamem_alloc(sc->jbodmap_tag[i],
2088 (void **)&sc->jbodmap_mem[i],
2089 BUS_DMA_NOWAIT, &sc->jbodmap_dmamap[i])) {
2090 device_printf(sc->mrsas_dev,
2091 "Cannot allocate jbod map memory.\n");
2094 bzero(sc->jbodmap_mem[i], pd_seq_map_sz);
2096 if (bus_dmamap_load(sc->jbodmap_tag[i], sc->jbodmap_dmamap[i],
2097 sc->jbodmap_mem[i], pd_seq_map_sz,
2098 mrsas_addr_cb, &sc->jbodmap_phys_addr[i],
2100 device_printf(sc->mrsas_dev, "Cannot load jbod map memory.\n");
2103 if (!sc->jbodmap_mem[i]) {
2104 device_printf(sc->mrsas_dev,
2105 "Cannot allocate memory for jbod map.\n");
2106 sc->use_seqnum_jbod_fp = 0;
2112 if (!megasas_sync_pd_seq_num(sc, false) &&
2113 !megasas_sync_pd_seq_num(sc, true))
2114 sc->use_seqnum_jbod_fp = 1;
2116 sc->use_seqnum_jbod_fp = 0;
2118 device_printf(sc->mrsas_dev, "Jbod map is supported\n");
2122 * mrsas_init_fw: Initialize Firmware
2123 * input: Adapter soft state
2125 * Calls transition_to_ready() to make sure Firmware is in operational state and
2126 * calls mrsas_init_adapter() to send IOC_INIT command to Firmware. It
2127 * issues internal commands to get the controller info after the IOC_INIT
2128 * command response is received by Firmware. Note: code relating to
2129 * get_pdlist, get_ld_list and max_sectors are currently not being used, it
2130 * is left here as placeholder.
2133 mrsas_init_fw(struct mrsas_softc *sc)
2136 int ret, loop, ocr = 0;
2137 u_int32_t max_sectors_1;
2138 u_int32_t max_sectors_2;
2139 u_int32_t tmp_sectors;
2140 u_int32_t scratch_pad_2;
2141 int msix_enable = 0;
2142 int fw_msix_count = 0;
2144 /* Make sure Firmware is ready */
2145 ret = mrsas_transition_to_ready(sc, ocr);
2146 if (ret != SUCCESS) {
2149 /* MSI-x index 0- reply post host index register */
2150 sc->msix_reg_offset[0] = MPI2_REPLY_POST_HOST_INDEX_OFFSET;
2151 /* Check if MSI-X is supported while in ready state */
2152 msix_enable = (mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad)) & 0x4000000) >> 0x1a;
2155 scratch_pad_2 = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2156 outbound_scratch_pad_2));
2158 /* Check max MSI-X vectors */
2159 if (sc->device_id == MRSAS_TBOLT) {
2160 sc->msix_vectors = (scratch_pad_2
2161 & MR_MAX_REPLY_QUEUES_OFFSET) + 1;
2162 fw_msix_count = sc->msix_vectors;
2164 /* Invader/Fury supports 96 MSI-X vectors */
2165 sc->msix_vectors = ((scratch_pad_2
2166 & MR_MAX_REPLY_QUEUES_EXT_OFFSET)
2167 >> MR_MAX_REPLY_QUEUES_EXT_OFFSET_SHIFT) + 1;
2168 fw_msix_count = sc->msix_vectors;
2170 for (loop = 1; loop < MR_MAX_MSIX_REG_ARRAY;
2172 sc->msix_reg_offset[loop] =
2173 MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET +
2178 /* Don't bother allocating more MSI-X vectors than cpus */
2179 sc->msix_vectors = min(sc->msix_vectors,
2182 /* Allocate MSI-x vectors */
2183 if (mrsas_allocate_msix(sc) == SUCCESS)
2184 sc->msix_enable = 1;
2186 sc->msix_enable = 0;
2188 device_printf(sc->mrsas_dev, "FW supports <%d> MSIX vector,"
2189 "Online CPU %d Current MSIX <%d>\n",
2190 fw_msix_count, mp_ncpus, sc->msix_vectors);
2192 if (mrsas_init_adapter(sc) != SUCCESS) {
2193 device_printf(sc->mrsas_dev, "Adapter initialize Fail.\n");
2196 /* Allocate internal commands for pass-thru */
2197 if (mrsas_alloc_mfi_cmds(sc) != SUCCESS) {
2198 device_printf(sc->mrsas_dev, "Allocate MFI cmd failed.\n");
2201 sc->ctrl_info = malloc(sizeof(struct mrsas_ctrl_info), M_MRSAS, M_NOWAIT);
2202 if (!sc->ctrl_info) {
2203 device_printf(sc->mrsas_dev, "Malloc for ctrl_info failed.\n");
2207 * Get the controller info from FW, so that the MAX VD support
2208 * availability can be decided.
2210 if (mrsas_get_ctrl_info(sc)) {
2211 device_printf(sc->mrsas_dev, "Unable to get FW ctrl_info.\n");
2214 sc->secure_jbod_support =
2215 (u_int8_t)sc->ctrl_info->adapterOperations3.supportSecurityonJBOD;
2217 if (sc->secure_jbod_support)
2218 device_printf(sc->mrsas_dev, "FW supports SED \n");
2220 if (sc->use_seqnum_jbod_fp)
2221 device_printf(sc->mrsas_dev, "FW supports JBOD Map \n");
2223 if (mrsas_setup_raidmap(sc) != SUCCESS) {
2224 device_printf(sc->mrsas_dev, "Error: RAID map setup FAILED !!! "
2225 "There seems to be some problem in the controller\n"
2226 "Please contact to the SUPPORT TEAM if the problem persists\n");
2228 megasas_setup_jbod_map(sc);
2230 /* For pass-thru, get PD/LD list and controller info */
2231 memset(sc->pd_list, 0,
2232 MRSAS_MAX_PD * sizeof(struct mrsas_pd_list));
2233 if (mrsas_get_pd_list(sc) != SUCCESS) {
2234 device_printf(sc->mrsas_dev, "Get PD list failed.\n");
2237 memset(sc->ld_ids, 0xff, MRSAS_MAX_LD_IDS);
2238 if (mrsas_get_ld_list(sc) != SUCCESS) {
2239 device_printf(sc->mrsas_dev, "Get LD lsit failed.\n");
2243 * Compute the max allowed sectors per IO: The controller info has
2244 * two limits on max sectors. Driver should use the minimum of these
2247 * 1 << stripe_sz_ops.min = max sectors per strip
2249 * Note that older firmwares ( < FW ver 30) didn't report information to
2250 * calculate max_sectors_1. So the number ended up as zero always.
2253 max_sectors_1 = (1 << sc->ctrl_info->stripe_sz_ops.min) *
2254 sc->ctrl_info->max_strips_per_io;
2255 max_sectors_2 = sc->ctrl_info->max_request_size;
2256 tmp_sectors = min(max_sectors_1, max_sectors_2);
2257 sc->max_sectors_per_req = sc->max_num_sge * MRSAS_PAGE_SIZE / 512;
2259 if (tmp_sectors && (sc->max_sectors_per_req > tmp_sectors))
2260 sc->max_sectors_per_req = tmp_sectors;
2262 sc->disableOnlineCtrlReset =
2263 sc->ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset;
2264 sc->UnevenSpanSupport =
2265 sc->ctrl_info->adapterOperations2.supportUnevenSpans;
2266 if (sc->UnevenSpanSupport) {
2267 device_printf(sc->mrsas_dev, "FW supports: UnevenSpanSupport=%x\n\n",
2268 sc->UnevenSpanSupport);
2270 if (MR_ValidateMapInfo(sc))
2271 sc->fast_path_io = 1;
2273 sc->fast_path_io = 0;
2279 * mrsas_init_adapter: Initializes the adapter/controller
2280 * input: Adapter soft state
2282 * Prepares for the issuing of the IOC Init cmd to FW for initializing the
2283 * ROC/controller. The FW register is read to determined the number of
2284 * commands that is supported. All memory allocations for IO is based on
2285 * max_cmd. Appropriate calculations are performed in this function.
2288 mrsas_init_adapter(struct mrsas_softc *sc)
2291 u_int32_t max_cmd, scratch_pad_2;
2295 /* Read FW status register */
2296 status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad));
2298 /* Get operational params from status register */
2299 sc->max_fw_cmds = status & MRSAS_FWSTATE_MAXCMD_MASK;
2301 /* Decrement the max supported by 1, to correlate with FW */
2302 sc->max_fw_cmds = sc->max_fw_cmds - 1;
2303 max_cmd = sc->max_fw_cmds;
2305 /* Determine allocation size of command frames */
2306 sc->reply_q_depth = ((max_cmd + 1 + 15) / 16 * 16) * 2;
2307 sc->request_alloc_sz = sizeof(MRSAS_REQUEST_DESCRIPTOR_UNION) * max_cmd;
2308 sc->reply_alloc_sz = sizeof(MPI2_REPLY_DESCRIPTORS_UNION) * (sc->reply_q_depth);
2309 sc->io_frames_alloc_sz = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE + (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * (max_cmd + 1));
2310 scratch_pad_2 = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2311 outbound_scratch_pad_2));
2313 * If scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK is set,
2314 * Firmware support extended IO chain frame which is 4 time more
2315 * than legacy Firmware. Legacy Firmware - Frame size is (8 * 128) =
2316 * 1K 1M IO Firmware - Frame size is (8 * 128 * 4) = 4K
2318 if (scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK)
2319 sc->max_chain_frame_sz =
2320 ((scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_MASK) >> 5)
2323 sc->max_chain_frame_sz =
2324 ((scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_MASK) >> 5)
2327 sc->chain_frames_alloc_sz = sc->max_chain_frame_sz * max_cmd;
2328 sc->max_sge_in_main_msg = (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE -
2329 offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL)) / 16;
2331 sc->max_sge_in_chain = sc->max_chain_frame_sz / sizeof(MPI2_SGE_IO_UNION);
2332 sc->max_num_sge = sc->max_sge_in_main_msg + sc->max_sge_in_chain - 2;
2334 mrsas_dprint(sc, MRSAS_INFO, "Avago Debug: MAX sge 0x%X MAX chain frame size 0x%X \n",
2335 sc->max_num_sge, sc->max_chain_frame_sz);
2337 /* Used for pass thru MFI frame (DCMD) */
2338 sc->chain_offset_mfi_pthru = offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL) / 16;
2340 sc->chain_offset_io_request = (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE -
2341 sizeof(MPI2_SGE_IO_UNION)) / 16;
2343 int count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
2345 for (i = 0; i < count; i++)
2346 sc->last_reply_idx[i] = 0;
2348 ret = mrsas_alloc_mem(sc);
2352 ret = mrsas_alloc_mpt_cmds(sc);
2356 ret = mrsas_ioc_init(sc);
2364 * mrsas_alloc_ioc_cmd: Allocates memory for IOC Init command
2365 * input: Adapter soft state
2367 * Allocates for the IOC Init cmd to FW to initialize the ROC/controller.
2370 mrsas_alloc_ioc_cmd(struct mrsas_softc *sc)
2374 /* Allocate IOC INIT command */
2375 ioc_init_size = 1024 + sizeof(MPI2_IOC_INIT_REQUEST);
2376 if (bus_dma_tag_create(sc->mrsas_parent_tag,
2378 BUS_SPACE_MAXADDR_32BIT,
2386 &sc->ioc_init_tag)) {
2387 device_printf(sc->mrsas_dev, "Cannot allocate ioc init tag\n");
2390 if (bus_dmamem_alloc(sc->ioc_init_tag, (void **)&sc->ioc_init_mem,
2391 BUS_DMA_NOWAIT, &sc->ioc_init_dmamap)) {
2392 device_printf(sc->mrsas_dev, "Cannot allocate ioc init cmd mem\n");
2395 bzero(sc->ioc_init_mem, ioc_init_size);
2396 if (bus_dmamap_load(sc->ioc_init_tag, sc->ioc_init_dmamap,
2397 sc->ioc_init_mem, ioc_init_size, mrsas_addr_cb,
2398 &sc->ioc_init_phys_mem, BUS_DMA_NOWAIT)) {
2399 device_printf(sc->mrsas_dev, "Cannot load ioc init cmd mem\n");
2406 * mrsas_free_ioc_cmd: Allocates memory for IOC Init command
2407 * input: Adapter soft state
2409 * Deallocates memory of the IOC Init cmd.
2412 mrsas_free_ioc_cmd(struct mrsas_softc *sc)
2414 if (sc->ioc_init_phys_mem)
2415 bus_dmamap_unload(sc->ioc_init_tag, sc->ioc_init_dmamap);
2416 if (sc->ioc_init_mem != NULL)
2417 bus_dmamem_free(sc->ioc_init_tag, sc->ioc_init_mem, sc->ioc_init_dmamap);
2418 if (sc->ioc_init_tag != NULL)
2419 bus_dma_tag_destroy(sc->ioc_init_tag);
2423 * mrsas_ioc_init: Sends IOC Init command to FW
2424 * input: Adapter soft state
2426 * Issues the IOC Init cmd to FW to initialize the ROC/controller.
2429 mrsas_ioc_init(struct mrsas_softc *sc)
2431 struct mrsas_init_frame *init_frame;
2432 pMpi2IOCInitRequest_t IOCInitMsg;
2433 MRSAS_REQUEST_DESCRIPTOR_UNION req_desc;
2434 u_int8_t max_wait = MRSAS_IOC_INIT_WAIT_TIME;
2435 bus_addr_t phys_addr;
2438 /* Allocate memory for the IOC INIT command */
2439 if (mrsas_alloc_ioc_cmd(sc)) {
2440 device_printf(sc->mrsas_dev, "Cannot allocate IOC command.\n");
2443 IOCInitMsg = (pMpi2IOCInitRequest_t)(((char *)sc->ioc_init_mem) + 1024);
2444 IOCInitMsg->Function = MPI2_FUNCTION_IOC_INIT;
2445 IOCInitMsg->WhoInit = MPI2_WHOINIT_HOST_DRIVER;
2446 IOCInitMsg->MsgVersion = MPI2_VERSION;
2447 IOCInitMsg->HeaderVersion = MPI2_HEADER_VERSION;
2448 IOCInitMsg->SystemRequestFrameSize = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE / 4;
2449 IOCInitMsg->ReplyDescriptorPostQueueDepth = sc->reply_q_depth;
2450 IOCInitMsg->ReplyDescriptorPostQueueAddress = sc->reply_desc_phys_addr;
2451 IOCInitMsg->SystemRequestFrameBaseAddress = sc->io_request_phys_addr;
2452 IOCInitMsg->HostMSIxVectors = (sc->msix_vectors > 0 ? sc->msix_vectors : 0);
2454 init_frame = (struct mrsas_init_frame *)sc->ioc_init_mem;
2455 init_frame->cmd = MFI_CMD_INIT;
2456 init_frame->cmd_status = 0xFF;
2457 init_frame->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
2459 /* driver support Extended MSIX */
2460 if ((sc->device_id == MRSAS_INVADER) ||
2461 (sc->device_id == MRSAS_FURY) ||
2462 (sc->device_id == MRSAS_INTRUDER) ||
2463 (sc->device_id == MRSAS_INTRUDER_24) ||
2464 (sc->device_id == MRSAS_CUTLASS_52) ||
2465 (sc->device_id == MRSAS_CUTLASS_53)) {
2466 init_frame->driver_operations.
2467 mfi_capabilities.support_additional_msix = 1;
2469 if (sc->verbuf_mem) {
2470 snprintf((char *)sc->verbuf_mem, strlen(MRSAS_VERSION) + 2, "%s\n",
2472 init_frame->driver_ver_lo = (bus_addr_t)sc->verbuf_phys_addr;
2473 init_frame->driver_ver_hi = 0;
2475 init_frame->driver_operations.mfi_capabilities.support_ndrive_r1_lb = 1;
2476 init_frame->driver_operations.mfi_capabilities.support_max_255lds = 1;
2477 init_frame->driver_operations.mfi_capabilities.security_protocol_cmds_fw = 1;
2478 if (sc->max_chain_frame_sz > MEGASAS_CHAIN_FRAME_SZ_MIN)
2479 init_frame->driver_operations.mfi_capabilities.support_ext_io_size = 1;
2480 phys_addr = (bus_addr_t)sc->ioc_init_phys_mem + 1024;
2481 init_frame->queue_info_new_phys_addr_lo = phys_addr;
2482 init_frame->data_xfer_len = sizeof(Mpi2IOCInitRequest_t);
2484 req_desc.addr.Words = (bus_addr_t)sc->ioc_init_phys_mem;
2485 req_desc.MFAIo.RequestFlags =
2486 (MRSAS_REQ_DESCRIPT_FLAGS_MFA << MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
2488 mrsas_disable_intr(sc);
2489 mrsas_dprint(sc, MRSAS_OCR, "Issuing IOC INIT command to FW.\n");
2490 mrsas_fire_cmd(sc, req_desc.addr.u.low, req_desc.addr.u.high);
2493 * Poll response timer to wait for Firmware response. While this
2494 * timer with the DELAY call could block CPU, the time interval for
2495 * this is only 1 millisecond.
2497 if (init_frame->cmd_status == 0xFF) {
2498 for (i = 0; i < (max_wait * 1000); i++) {
2499 if (init_frame->cmd_status == 0xFF)
2505 if (init_frame->cmd_status == 0)
2506 mrsas_dprint(sc, MRSAS_OCR,
2507 "IOC INIT response received from FW.\n");
2509 if (init_frame->cmd_status == 0xFF)
2510 device_printf(sc->mrsas_dev, "IOC Init timed out after %d seconds.\n", max_wait);
2512 device_printf(sc->mrsas_dev, "IOC Init failed, status = 0x%x\n", init_frame->cmd_status);
2516 mrsas_free_ioc_cmd(sc);
2521 * mrsas_alloc_mpt_cmds: Allocates the command packets
2522 * input: Adapter instance soft state
2524 * This function allocates the internal commands for IOs. Each command that is
2525 * issued to FW is wrapped in a local data structure called mrsas_mpt_cmd. An
2526 * array is allocated with mrsas_mpt_cmd context. The free commands are
2527 * maintained in a linked list (cmd pool). SMID value range is from 1 to
2531 mrsas_alloc_mpt_cmds(struct mrsas_softc *sc)
2534 u_int32_t max_cmd, count;
2535 struct mrsas_mpt_cmd *cmd;
2536 pMpi2ReplyDescriptorsUnion_t reply_desc;
2537 u_int32_t offset, chain_offset, sense_offset;
2538 bus_addr_t io_req_base_phys, chain_frame_base_phys, sense_base_phys;
2539 u_int8_t *io_req_base, *chain_frame_base, *sense_base;
2541 max_cmd = sc->max_fw_cmds;
2543 sc->req_desc = malloc(sc->request_alloc_sz, M_MRSAS, M_NOWAIT);
2544 if (!sc->req_desc) {
2545 device_printf(sc->mrsas_dev, "Out of memory, cannot alloc req desc\n");
2548 memset(sc->req_desc, 0, sc->request_alloc_sz);
2551 * sc->mpt_cmd_list is an array of struct mrsas_mpt_cmd pointers.
2552 * Allocate the dynamic array first and then allocate individual
2555 sc->mpt_cmd_list = malloc(sizeof(struct mrsas_mpt_cmd *) * max_cmd, M_MRSAS, M_NOWAIT);
2556 if (!sc->mpt_cmd_list) {
2557 device_printf(sc->mrsas_dev, "Cannot alloc memory for mpt_cmd_list.\n");
2560 memset(sc->mpt_cmd_list, 0, sizeof(struct mrsas_mpt_cmd *) * max_cmd);
2561 for (i = 0; i < max_cmd; i++) {
2562 sc->mpt_cmd_list[i] = malloc(sizeof(struct mrsas_mpt_cmd),
2564 if (!sc->mpt_cmd_list[i]) {
2565 for (j = 0; j < i; j++)
2566 free(sc->mpt_cmd_list[j], M_MRSAS);
2567 free(sc->mpt_cmd_list, M_MRSAS);
2568 sc->mpt_cmd_list = NULL;
2573 io_req_base = (u_int8_t *)sc->io_request_mem + MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
2574 io_req_base_phys = (bus_addr_t)sc->io_request_phys_addr + MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
2575 chain_frame_base = (u_int8_t *)sc->chain_frame_mem;
2576 chain_frame_base_phys = (bus_addr_t)sc->chain_frame_phys_addr;
2577 sense_base = (u_int8_t *)sc->sense_mem;
2578 sense_base_phys = (bus_addr_t)sc->sense_phys_addr;
2579 for (i = 0; i < max_cmd; i++) {
2580 cmd = sc->mpt_cmd_list[i];
2581 offset = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * i;
2582 chain_offset = sc->max_chain_frame_sz * i;
2583 sense_offset = MRSAS_SENSE_LEN * i;
2584 memset(cmd, 0, sizeof(struct mrsas_mpt_cmd));
2586 cmd->ccb_ptr = NULL;
2587 callout_init(&cmd->cm_callout, 0);
2588 cmd->sync_cmd_idx = (u_int32_t)MRSAS_ULONG_MAX;
2590 cmd->io_request = (MRSAS_RAID_SCSI_IO_REQUEST *) (io_req_base + offset);
2591 memset(cmd->io_request, 0, sizeof(MRSAS_RAID_SCSI_IO_REQUEST));
2592 cmd->io_request_phys_addr = io_req_base_phys + offset;
2593 cmd->chain_frame = (MPI2_SGE_IO_UNION *) (chain_frame_base + chain_offset);
2594 cmd->chain_frame_phys_addr = chain_frame_base_phys + chain_offset;
2595 cmd->sense = sense_base + sense_offset;
2596 cmd->sense_phys_addr = sense_base_phys + sense_offset;
2597 if (bus_dmamap_create(sc->data_tag, 0, &cmd->data_dmamap)) {
2600 TAILQ_INSERT_TAIL(&(sc->mrsas_mpt_cmd_list_head), cmd, next);
2603 /* Initialize reply descriptor array to 0xFFFFFFFF */
2604 reply_desc = sc->reply_desc_mem;
2605 count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
2606 for (i = 0; i < sc->reply_q_depth * count; i++, reply_desc++) {
2607 reply_desc->Words = MRSAS_ULONG_MAX;
2613 * mrsas_fire_cmd: Sends command to FW
2614 * input: Adapter softstate
2615 * request descriptor address low
2616 * request descriptor address high
2618 * This functions fires the command to Firmware by writing to the
2619 * inbound_low_queue_port and inbound_high_queue_port.
2622 mrsas_fire_cmd(struct mrsas_softc *sc, u_int32_t req_desc_lo,
2623 u_int32_t req_desc_hi)
2625 mtx_lock(&sc->pci_lock);
2626 mrsas_write_reg(sc, offsetof(mrsas_reg_set, inbound_low_queue_port),
2628 mrsas_write_reg(sc, offsetof(mrsas_reg_set, inbound_high_queue_port),
2630 mtx_unlock(&sc->pci_lock);
2634 * mrsas_transition_to_ready: Move FW to Ready state input:
2635 * Adapter instance soft state
2637 * During the initialization, FW passes can potentially be in any one of several
2638 * possible states. If the FW in operational, waiting-for-handshake states,
2639 * driver must take steps to bring it to ready state. Otherwise, it has to
2640 * wait for the ready state.
2643 mrsas_transition_to_ready(struct mrsas_softc *sc, int ocr)
2647 u_int32_t val, fw_state;
2648 u_int32_t cur_state;
2649 u_int32_t abs_state, curr_abs_state;
2651 val = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad));
2652 fw_state = val & MFI_STATE_MASK;
2653 max_wait = MRSAS_RESET_WAIT_TIME;
2655 if (fw_state != MFI_STATE_READY)
2656 device_printf(sc->mrsas_dev, "Waiting for FW to come to ready state\n");
2658 while (fw_state != MFI_STATE_READY) {
2659 abs_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad));
2661 case MFI_STATE_FAULT:
2662 device_printf(sc->mrsas_dev, "FW is in FAULT state!!\n");
2664 cur_state = MFI_STATE_FAULT;
2668 case MFI_STATE_WAIT_HANDSHAKE:
2669 /* Set the CLR bit in inbound doorbell */
2670 mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell),
2671 MFI_INIT_CLEAR_HANDSHAKE | MFI_INIT_HOTPLUG);
2672 cur_state = MFI_STATE_WAIT_HANDSHAKE;
2674 case MFI_STATE_BOOT_MESSAGE_PENDING:
2675 mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell),
2677 cur_state = MFI_STATE_BOOT_MESSAGE_PENDING;
2679 case MFI_STATE_OPERATIONAL:
2681 * Bring it to READY state; assuming max wait 10
2684 mrsas_disable_intr(sc);
2685 mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell), MFI_RESET_FLAGS);
2686 for (i = 0; i < max_wait * 1000; i++) {
2687 if (mrsas_read_reg(sc, offsetof(mrsas_reg_set, doorbell)) & 1)
2692 cur_state = MFI_STATE_OPERATIONAL;
2694 case MFI_STATE_UNDEFINED:
2696 * This state should not last for more than 2
2699 cur_state = MFI_STATE_UNDEFINED;
2701 case MFI_STATE_BB_INIT:
2702 cur_state = MFI_STATE_BB_INIT;
2704 case MFI_STATE_FW_INIT:
2705 cur_state = MFI_STATE_FW_INIT;
2707 case MFI_STATE_FW_INIT_2:
2708 cur_state = MFI_STATE_FW_INIT_2;
2710 case MFI_STATE_DEVICE_SCAN:
2711 cur_state = MFI_STATE_DEVICE_SCAN;
2713 case MFI_STATE_FLUSH_CACHE:
2714 cur_state = MFI_STATE_FLUSH_CACHE;
2717 device_printf(sc->mrsas_dev, "Unknown state 0x%x\n", fw_state);
2722 * The cur_state should not last for more than max_wait secs
2724 for (i = 0; i < (max_wait * 1000); i++) {
2725 fw_state = (mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2726 outbound_scratch_pad)) & MFI_STATE_MASK);
2727 curr_abs_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2728 outbound_scratch_pad));
2729 if (abs_state == curr_abs_state)
2736 * Return error if fw_state hasn't changed after max_wait
2738 if (curr_abs_state == abs_state) {
2739 device_printf(sc->mrsas_dev, "FW state [%d] hasn't changed "
2740 "in %d secs\n", fw_state, max_wait);
2744 mrsas_dprint(sc, MRSAS_OCR, "FW now in Ready state\n");
2749 * mrsas_get_mfi_cmd: Get a cmd from free command pool
2750 * input: Adapter soft state
2752 * This function removes an MFI command from the command list.
2754 struct mrsas_mfi_cmd *
2755 mrsas_get_mfi_cmd(struct mrsas_softc *sc)
2757 struct mrsas_mfi_cmd *cmd = NULL;
2759 mtx_lock(&sc->mfi_cmd_pool_lock);
2760 if (!TAILQ_EMPTY(&sc->mrsas_mfi_cmd_list_head)) {
2761 cmd = TAILQ_FIRST(&sc->mrsas_mfi_cmd_list_head);
2762 TAILQ_REMOVE(&sc->mrsas_mfi_cmd_list_head, cmd, next);
2764 mtx_unlock(&sc->mfi_cmd_pool_lock);
2770 * mrsas_ocr_thread: Thread to handle OCR/Kill Adapter.
2771 * input: Adapter Context.
2773 * This function will check FW status register and flag do_timeout_reset flag.
2774 * It will do OCR/Kill adapter if FW is in fault state or IO timed out has
2778 mrsas_ocr_thread(void *arg)
2780 struct mrsas_softc *sc;
2781 u_int32_t fw_status, fw_state;
2783 sc = (struct mrsas_softc *)arg;
2785 mrsas_dprint(sc, MRSAS_TRACE, "%s\n", __func__);
2787 sc->ocr_thread_active = 1;
2788 mtx_lock(&sc->sim_lock);
2790 /* Sleep for 1 second and check the queue status */
2791 msleep(&sc->ocr_chan, &sc->sim_lock, PRIBIO,
2792 "mrsas_ocr", sc->mrsas_fw_fault_check_delay * hz);
2793 if (sc->remove_in_progress ||
2794 sc->adprecovery == MRSAS_HW_CRITICAL_ERROR) {
2795 mrsas_dprint(sc, MRSAS_OCR,
2796 "Exit due to %s from %s\n",
2797 sc->remove_in_progress ? "Shutdown" :
2798 "Hardware critical error", __func__);
2801 fw_status = mrsas_read_reg(sc,
2802 offsetof(mrsas_reg_set, outbound_scratch_pad));
2803 fw_state = fw_status & MFI_STATE_MASK;
2804 if (fw_state == MFI_STATE_FAULT || sc->do_timedout_reset) {
2805 device_printf(sc->mrsas_dev, "%s started due to %s!\n",
2806 sc->disableOnlineCtrlReset ? "Kill Adapter" : "OCR",
2807 sc->do_timedout_reset ? "IO Timeout" :
2808 "FW fault detected");
2809 mtx_lock_spin(&sc->ioctl_lock);
2810 sc->reset_in_progress = 1;
2812 mtx_unlock_spin(&sc->ioctl_lock);
2813 mrsas_xpt_freeze(sc);
2814 mrsas_reset_ctrl(sc, sc->do_timedout_reset);
2815 mrsas_xpt_release(sc);
2816 sc->reset_in_progress = 0;
2817 sc->do_timedout_reset = 0;
2820 mtx_unlock(&sc->sim_lock);
2821 sc->ocr_thread_active = 0;
2822 mrsas_kproc_exit(0);
2826 * mrsas_reset_reply_desc: Reset Reply descriptor as part of OCR.
2827 * input: Adapter Context.
2829 * This function will clear reply descriptor so that post OCR driver and FW will
2833 mrsas_reset_reply_desc(struct mrsas_softc *sc)
2836 pMpi2ReplyDescriptorsUnion_t reply_desc;
2838 count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
2839 for (i = 0; i < count; i++)
2840 sc->last_reply_idx[i] = 0;
2842 reply_desc = sc->reply_desc_mem;
2843 for (i = 0; i < sc->reply_q_depth; i++, reply_desc++) {
2844 reply_desc->Words = MRSAS_ULONG_MAX;
2849 * mrsas_reset_ctrl: Core function to OCR/Kill adapter.
2850 * input: Adapter Context.
2852 * This function will run from thread context so that it can sleep. 1. Do not
2853 * handle OCR if FW is in HW critical error. 2. Wait for outstanding command
2854 * to complete for 180 seconds. 3. If #2 does not find any outstanding
2855 * command Controller is in working state, so skip OCR. Otherwise, do
2856 * OCR/kill Adapter based on flag disableOnlineCtrlReset. 4. Start of the
2857 * OCR, return all SCSI command back to CAM layer which has ccb_ptr. 5. Post
2858 * OCR, Re-fire Managment command and move Controller to Operation state.
2861 mrsas_reset_ctrl(struct mrsas_softc *sc, u_int8_t reset_reason)
2863 int retval = SUCCESS, i, j, retry = 0;
2864 u_int32_t host_diag, abs_state, status_reg, reset_adapter;
2866 struct mrsas_mfi_cmd *mfi_cmd;
2867 struct mrsas_mpt_cmd *mpt_cmd;
2868 union mrsas_evt_class_locale class_locale;
2870 if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR) {
2871 device_printf(sc->mrsas_dev,
2872 "mrsas: Hardware critical error, returning FAIL.\n");
2875 mrsas_set_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
2876 sc->adprecovery = MRSAS_ADPRESET_SM_INFAULT;
2877 mrsas_disable_intr(sc);
2878 msleep(&sc->ocr_chan, &sc->sim_lock, PRIBIO, "mrsas_ocr",
2879 sc->mrsas_fw_fault_check_delay * hz);
2881 /* First try waiting for commands to complete */
2882 if (mrsas_wait_for_outstanding(sc, reset_reason)) {
2883 mrsas_dprint(sc, MRSAS_OCR,
2884 "resetting adapter from %s.\n",
2886 /* Now return commands back to the CAM layer */
2887 mtx_unlock(&sc->sim_lock);
2888 for (i = 0; i < sc->max_fw_cmds; i++) {
2889 mpt_cmd = sc->mpt_cmd_list[i];
2890 if (mpt_cmd->ccb_ptr) {
2891 ccb = (union ccb *)(mpt_cmd->ccb_ptr);
2892 ccb->ccb_h.status = CAM_SCSI_BUS_RESET;
2893 mrsas_cmd_done(sc, mpt_cmd);
2894 mrsas_atomic_dec(&sc->fw_outstanding);
2897 mtx_lock(&sc->sim_lock);
2899 status_reg = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2900 outbound_scratch_pad));
2901 abs_state = status_reg & MFI_STATE_MASK;
2902 reset_adapter = status_reg & MFI_RESET_ADAPTER;
2903 if (sc->disableOnlineCtrlReset ||
2904 (abs_state == MFI_STATE_FAULT && !reset_adapter)) {
2905 /* Reset not supported, kill adapter */
2906 mrsas_dprint(sc, MRSAS_OCR, "Reset not supported, killing adapter.\n");
2911 /* Now try to reset the chip */
2912 for (i = 0; i < MRSAS_FUSION_MAX_RESET_TRIES; i++) {
2913 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2914 MPI2_WRSEQ_FLUSH_KEY_VALUE);
2915 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2916 MPI2_WRSEQ_1ST_KEY_VALUE);
2917 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2918 MPI2_WRSEQ_2ND_KEY_VALUE);
2919 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2920 MPI2_WRSEQ_3RD_KEY_VALUE);
2921 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2922 MPI2_WRSEQ_4TH_KEY_VALUE);
2923 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2924 MPI2_WRSEQ_5TH_KEY_VALUE);
2925 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2926 MPI2_WRSEQ_6TH_KEY_VALUE);
2928 /* Check that the diag write enable (DRWE) bit is on */
2929 host_diag = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2932 while (!(host_diag & HOST_DIAG_WRITE_ENABLE)) {
2934 host_diag = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2936 if (retry++ == 100) {
2937 mrsas_dprint(sc, MRSAS_OCR,
2938 "Host diag unlock failed!\n");
2942 if (!(host_diag & HOST_DIAG_WRITE_ENABLE))
2945 /* Send chip reset command */
2946 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_host_diag),
2947 host_diag | HOST_DIAG_RESET_ADAPTER);
2950 /* Make sure reset adapter bit is cleared */
2951 host_diag = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2954 while (host_diag & HOST_DIAG_RESET_ADAPTER) {
2956 host_diag = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2958 if (retry++ == 1000) {
2959 mrsas_dprint(sc, MRSAS_OCR,
2960 "Diag reset adapter never cleared!\n");
2964 if (host_diag & HOST_DIAG_RESET_ADAPTER)
2967 abs_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2968 outbound_scratch_pad)) & MFI_STATE_MASK;
2971 while ((abs_state <= MFI_STATE_FW_INIT) && (retry++ < 1000)) {
2973 abs_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2974 outbound_scratch_pad)) & MFI_STATE_MASK;
2976 if (abs_state <= MFI_STATE_FW_INIT) {
2977 mrsas_dprint(sc, MRSAS_OCR, "firmware state < MFI_STATE_FW_INIT,"
2978 " state = 0x%x\n", abs_state);
2981 /* Wait for FW to become ready */
2982 if (mrsas_transition_to_ready(sc, 1)) {
2983 mrsas_dprint(sc, MRSAS_OCR,
2984 "mrsas: Failed to transition controller to ready.\n");
2987 mrsas_reset_reply_desc(sc);
2988 if (mrsas_ioc_init(sc)) {
2989 mrsas_dprint(sc, MRSAS_OCR, "mrsas_ioc_init() failed!\n");
2992 for (j = 0; j < sc->max_fw_cmds; j++) {
2993 mpt_cmd = sc->mpt_cmd_list[j];
2994 if (mpt_cmd->sync_cmd_idx != (u_int32_t)MRSAS_ULONG_MAX) {
2995 mfi_cmd = sc->mfi_cmd_list[mpt_cmd->sync_cmd_idx];
2996 mrsas_release_mfi_cmd(mfi_cmd);
2997 mrsas_release_mpt_cmd(mpt_cmd);
3003 /* Reset load balance info */
3004 memset(sc->load_balance_info, 0,
3005 sizeof(LD_LOAD_BALANCE_INFO) * MAX_LOGICAL_DRIVES_EXT);
3007 if (mrsas_get_ctrl_info(sc)) {
3012 if (!mrsas_get_map_info(sc))
3013 mrsas_sync_map_info(sc);
3015 megasas_setup_jbod_map(sc);
3017 memset(sc->pd_list, 0,
3018 MRSAS_MAX_PD * sizeof(struct mrsas_pd_list));
3019 if (mrsas_get_pd_list(sc) != SUCCESS) {
3020 device_printf(sc->mrsas_dev, "Get PD list failed from OCR.\n"
3021 "Will get the latest PD LIST after OCR on event.\n");
3023 memset(sc->ld_ids, 0xff, MRSAS_MAX_LD_IDS);
3024 if (mrsas_get_ld_list(sc) != SUCCESS) {
3025 device_printf(sc->mrsas_dev, "Get LD lsit failed from OCR.\n"
3026 "Will get the latest LD LIST after OCR on event.\n");
3028 mrsas_clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
3029 mrsas_enable_intr(sc);
3030 sc->adprecovery = MRSAS_HBA_OPERATIONAL;
3032 /* Register AEN with FW for last sequence number */
3033 class_locale.members.reserved = 0;
3034 class_locale.members.locale = MR_EVT_LOCALE_ALL;
3035 class_locale.members.class = MR_EVT_CLASS_DEBUG;
3037 if (mrsas_register_aen(sc, sc->last_seq_num,
3038 class_locale.word)) {
3039 device_printf(sc->mrsas_dev,
3040 "ERROR: AEN registration FAILED from OCR !!! "
3041 "Further events from the controller cannot be notified."
3042 "Either there is some problem in the controller"
3043 "or the controller does not support AEN.\n"
3044 "Please contact to the SUPPORT TEAM if the problem persists\n");
3046 /* Adapter reset completed successfully */
3047 device_printf(sc->mrsas_dev, "Reset successful\n");
3051 /* Reset failed, kill the adapter */
3052 device_printf(sc->mrsas_dev, "Reset failed, killing adapter.\n");
3056 mrsas_clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
3057 mrsas_enable_intr(sc);
3058 sc->adprecovery = MRSAS_HBA_OPERATIONAL;
3061 mrsas_clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
3062 mrsas_dprint(sc, MRSAS_OCR,
3063 "Reset Exit with %d.\n", retval);
3068 * mrsas_kill_hba: Kill HBA when OCR is not supported
3069 * input: Adapter Context.
3071 * This function will kill HBA when OCR is not supported.
3074 mrsas_kill_hba(struct mrsas_softc *sc)
3076 sc->adprecovery = MRSAS_HW_CRITICAL_ERROR;
3078 mrsas_dprint(sc, MRSAS_OCR, "%s\n", __func__);
3079 mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell),
3082 mrsas_read_reg(sc, offsetof(mrsas_reg_set, doorbell));
3083 mrsas_complete_outstanding_ioctls(sc);
3087 * mrsas_complete_outstanding_ioctls Complete pending IOCTLS after kill_hba
3088 * input: Controller softc
3093 mrsas_complete_outstanding_ioctls(struct mrsas_softc *sc)
3096 struct mrsas_mpt_cmd *cmd_mpt;
3097 struct mrsas_mfi_cmd *cmd_mfi;
3098 u_int32_t count, MSIxIndex;
3100 count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
3101 for (i = 0; i < sc->max_fw_cmds; i++) {
3102 cmd_mpt = sc->mpt_cmd_list[i];
3104 if (cmd_mpt->sync_cmd_idx != (u_int32_t)MRSAS_ULONG_MAX) {
3105 cmd_mfi = sc->mfi_cmd_list[cmd_mpt->sync_cmd_idx];
3106 if (cmd_mfi->sync_cmd && cmd_mfi->frame->hdr.cmd != MFI_CMD_ABORT) {
3107 for (MSIxIndex = 0; MSIxIndex < count; MSIxIndex++)
3108 mrsas_complete_mptmfi_passthru(sc, cmd_mfi,
3109 cmd_mpt->io_request->RaidContext.status);
3116 * mrsas_wait_for_outstanding: Wait for outstanding commands
3117 * input: Adapter Context.
3119 * This function will wait for 180 seconds for outstanding commands to be
3123 mrsas_wait_for_outstanding(struct mrsas_softc *sc, u_int8_t check_reason)
3125 int i, outstanding, retval = 0;
3126 u_int32_t fw_state, count, MSIxIndex;
3129 for (i = 0; i < MRSAS_RESET_WAIT_TIME; i++) {
3130 if (sc->remove_in_progress) {
3131 mrsas_dprint(sc, MRSAS_OCR,
3132 "Driver remove or shutdown called.\n");
3136 /* Check if firmware is in fault state */
3137 fw_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
3138 outbound_scratch_pad)) & MFI_STATE_MASK;
3139 if (fw_state == MFI_STATE_FAULT) {
3140 mrsas_dprint(sc, MRSAS_OCR,
3141 "Found FW in FAULT state, will reset adapter.\n");
3145 if (check_reason == MFI_DCMD_TIMEOUT_OCR) {
3146 mrsas_dprint(sc, MRSAS_OCR,
3147 "DCMD IO TIMEOUT detected, will reset adapter.\n");
3151 outstanding = mrsas_atomic_read(&sc->fw_outstanding);
3155 if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
3156 mrsas_dprint(sc, MRSAS_OCR, "[%2d]waiting for %d "
3157 "commands to complete\n", i, outstanding);
3158 count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
3159 for (MSIxIndex = 0; MSIxIndex < count; MSIxIndex++)
3160 mrsas_complete_cmd(sc, MSIxIndex);
3165 if (mrsas_atomic_read(&sc->fw_outstanding)) {
3166 mrsas_dprint(sc, MRSAS_OCR,
3167 " pending commands remain after waiting,"
3168 " will reset adapter.\n");
3176 * mrsas_release_mfi_cmd: Return a cmd to free command pool
3177 * input: Command packet for return to free cmd pool
3179 * This function returns the MFI command to the command list.
3182 mrsas_release_mfi_cmd(struct mrsas_mfi_cmd *cmd)
3184 struct mrsas_softc *sc = cmd->sc;
3186 mtx_lock(&sc->mfi_cmd_pool_lock);
3187 cmd->ccb_ptr = NULL;
3188 cmd->cmd_id.frame_count = 0;
3189 TAILQ_INSERT_TAIL(&(sc->mrsas_mfi_cmd_list_head), cmd, next);
3190 mtx_unlock(&sc->mfi_cmd_pool_lock);
3196 * mrsas_get_controller_info: Returns FW's controller structure
3197 * input: Adapter soft state
3198 * Controller information structure
3200 * Issues an internal command (DCMD) to get the FW's controller structure. This
3201 * information is mainly used to find out the maximum IO transfer per command
3202 * supported by the FW.
3205 mrsas_get_ctrl_info(struct mrsas_softc *sc)
3208 u_int8_t do_ocr = 1;
3209 struct mrsas_mfi_cmd *cmd;
3210 struct mrsas_dcmd_frame *dcmd;
3212 cmd = mrsas_get_mfi_cmd(sc);
3215 device_printf(sc->mrsas_dev, "Failed to get a free cmd\n");
3218 dcmd = &cmd->frame->dcmd;
3220 if (mrsas_alloc_ctlr_info_cmd(sc) != SUCCESS) {
3221 device_printf(sc->mrsas_dev, "Cannot allocate get ctlr info cmd\n");
3222 mrsas_release_mfi_cmd(cmd);
3225 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
3227 dcmd->cmd = MFI_CMD_DCMD;
3228 dcmd->cmd_status = 0xFF;
3229 dcmd->sge_count = 1;
3230 dcmd->flags = MFI_FRAME_DIR_READ;
3233 dcmd->data_xfer_len = sizeof(struct mrsas_ctrl_info);
3234 dcmd->opcode = MR_DCMD_CTRL_GET_INFO;
3235 dcmd->sgl.sge32[0].phys_addr = sc->ctlr_info_phys_addr;
3236 dcmd->sgl.sge32[0].length = sizeof(struct mrsas_ctrl_info);
3238 retcode = mrsas_issue_polled(sc, cmd);
3239 if (retcode == ETIMEDOUT)
3242 memcpy(sc->ctrl_info, sc->ctlr_info_mem, sizeof(struct mrsas_ctrl_info));
3245 mrsas_update_ext_vd_details(sc);
3247 sc->use_seqnum_jbod_fp =
3248 sc->ctrl_info->adapterOperations3.useSeqNumJbodFP;
3251 mrsas_free_ctlr_info_cmd(sc);
3254 sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
3256 mrsas_release_mfi_cmd(cmd);
3262 * mrsas_update_ext_vd_details : Update details w.r.t Extended VD
3264 * sc - Controller's softc
3267 mrsas_update_ext_vd_details(struct mrsas_softc *sc)
3269 sc->max256vdSupport =
3270 sc->ctrl_info->adapterOperations3.supportMaxExtLDs;
3271 /* Below is additional check to address future FW enhancement */
3272 if (sc->ctrl_info->max_lds > 64)
3273 sc->max256vdSupport = 1;
3275 sc->drv_supported_vd_count = MRSAS_MAX_LD_CHANNELS
3276 * MRSAS_MAX_DEV_PER_CHANNEL;
3277 sc->drv_supported_pd_count = MRSAS_MAX_PD_CHANNELS
3278 * MRSAS_MAX_DEV_PER_CHANNEL;
3279 if (sc->max256vdSupport) {
3280 sc->fw_supported_vd_count = MAX_LOGICAL_DRIVES_EXT;
3281 sc->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
3283 sc->fw_supported_vd_count = MAX_LOGICAL_DRIVES;
3284 sc->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
3287 sc->old_map_sz = sizeof(MR_FW_RAID_MAP) +
3288 (sizeof(MR_LD_SPAN_MAP) *
3289 (sc->fw_supported_vd_count - 1));
3290 sc->new_map_sz = sizeof(MR_FW_RAID_MAP_EXT);
3291 sc->drv_map_sz = sizeof(MR_DRV_RAID_MAP) +
3292 (sizeof(MR_LD_SPAN_MAP) *
3293 (sc->drv_supported_vd_count - 1));
3295 sc->max_map_sz = max(sc->old_map_sz, sc->new_map_sz);
3297 if (sc->max256vdSupport)
3298 sc->current_map_sz = sc->new_map_sz;
3300 sc->current_map_sz = sc->old_map_sz;
3304 * mrsas_alloc_ctlr_info_cmd: Allocates memory for controller info command
3305 * input: Adapter soft state
3307 * Allocates DMAable memory for the controller info internal command.
3310 mrsas_alloc_ctlr_info_cmd(struct mrsas_softc *sc)
3314 /* Allocate get controller info command */
3315 ctlr_info_size = sizeof(struct mrsas_ctrl_info);
3316 if (bus_dma_tag_create(sc->mrsas_parent_tag,
3318 BUS_SPACE_MAXADDR_32BIT,
3326 &sc->ctlr_info_tag)) {
3327 device_printf(sc->mrsas_dev, "Cannot allocate ctlr info tag\n");
3330 if (bus_dmamem_alloc(sc->ctlr_info_tag, (void **)&sc->ctlr_info_mem,
3331 BUS_DMA_NOWAIT, &sc->ctlr_info_dmamap)) {
3332 device_printf(sc->mrsas_dev, "Cannot allocate ctlr info cmd mem\n");
3335 if (bus_dmamap_load(sc->ctlr_info_tag, sc->ctlr_info_dmamap,
3336 sc->ctlr_info_mem, ctlr_info_size, mrsas_addr_cb,
3337 &sc->ctlr_info_phys_addr, BUS_DMA_NOWAIT)) {
3338 device_printf(sc->mrsas_dev, "Cannot load ctlr info cmd mem\n");
3341 memset(sc->ctlr_info_mem, 0, ctlr_info_size);
3346 * mrsas_free_ctlr_info_cmd: Free memory for controller info command
3347 * input: Adapter soft state
3349 * Deallocates memory of the get controller info cmd.
3352 mrsas_free_ctlr_info_cmd(struct mrsas_softc *sc)
3354 if (sc->ctlr_info_phys_addr)
3355 bus_dmamap_unload(sc->ctlr_info_tag, sc->ctlr_info_dmamap);
3356 if (sc->ctlr_info_mem != NULL)
3357 bus_dmamem_free(sc->ctlr_info_tag, sc->ctlr_info_mem, sc->ctlr_info_dmamap);
3358 if (sc->ctlr_info_tag != NULL)
3359 bus_dma_tag_destroy(sc->ctlr_info_tag);
3363 * mrsas_issue_polled: Issues a polling command
3364 * inputs: Adapter soft state
3365 * Command packet to be issued
3367 * This function is for posting of internal commands to Firmware. MFI requires
3368 * the cmd_status to be set to 0xFF before posting. The maximun wait time of
3369 * the poll response timer is 180 seconds.
3372 mrsas_issue_polled(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
3374 struct mrsas_header *frame_hdr = &cmd->frame->hdr;
3375 u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME;
3376 int i, retcode = SUCCESS;
3378 frame_hdr->cmd_status = 0xFF;
3379 frame_hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
3381 /* Issue the frame using inbound queue port */
3382 if (mrsas_issue_dcmd(sc, cmd)) {
3383 device_printf(sc->mrsas_dev, "Cannot issue DCMD internal command.\n");
3387 * Poll response timer to wait for Firmware response. While this
3388 * timer with the DELAY call could block CPU, the time interval for
3389 * this is only 1 millisecond.
3391 if (frame_hdr->cmd_status == 0xFF) {
3392 for (i = 0; i < (max_wait * 1000); i++) {
3393 if (frame_hdr->cmd_status == 0xFF)
3399 if (frame_hdr->cmd_status == 0xFF) {
3400 device_printf(sc->mrsas_dev, "DCMD timed out after %d "
3401 "seconds from %s\n", max_wait, __func__);
3402 device_printf(sc->mrsas_dev, "DCMD opcode 0x%X\n",
3403 cmd->frame->dcmd.opcode);
3404 retcode = ETIMEDOUT;
3410 * mrsas_issue_dcmd: Issues a MFI Pass thru cmd
3411 * input: Adapter soft state mfi cmd pointer
3413 * This function is called by mrsas_issued_blocked_cmd() and
3414 * mrsas_issued_polled(), to build the MPT command and then fire the command
3418 mrsas_issue_dcmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
3420 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
3422 req_desc = mrsas_build_mpt_cmd(sc, cmd);
3424 device_printf(sc->mrsas_dev, "Cannot build MPT cmd.\n");
3427 mrsas_fire_cmd(sc, req_desc->addr.u.low, req_desc->addr.u.high);
3433 * mrsas_build_mpt_cmd: Calls helper function to build Passthru cmd
3434 * input: Adapter soft state mfi cmd to build
3436 * This function is called by mrsas_issue_cmd() to build the MPT-MFI passthru
3437 * command and prepares the MPT command to send to Firmware.
3439 MRSAS_REQUEST_DESCRIPTOR_UNION *
3440 mrsas_build_mpt_cmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
3442 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
3445 if (mrsas_build_mptmfi_passthru(sc, cmd)) {
3446 device_printf(sc->mrsas_dev, "Cannot build MPT-MFI passthru cmd.\n");
3449 index = cmd->cmd_id.context.smid;
3451 req_desc = mrsas_get_request_desc(sc, index - 1);
3455 req_desc->addr.Words = 0;
3456 req_desc->SCSIIO.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO << MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
3458 req_desc->SCSIIO.SMID = index;
3464 * mrsas_build_mptmfi_passthru: Builds a MPT MFI Passthru command
3465 * input: Adapter soft state mfi cmd pointer
3467 * The MPT command and the io_request are setup as a passthru command. The SGE
3468 * chain address is set to frame_phys_addr of the MFI command.
3471 mrsas_build_mptmfi_passthru(struct mrsas_softc *sc, struct mrsas_mfi_cmd *mfi_cmd)
3473 MPI25_IEEE_SGE_CHAIN64 *mpi25_ieee_chain;
3474 PTR_MRSAS_RAID_SCSI_IO_REQUEST io_req;
3475 struct mrsas_mpt_cmd *mpt_cmd;
3476 struct mrsas_header *frame_hdr = &mfi_cmd->frame->hdr;
3478 mpt_cmd = mrsas_get_mpt_cmd(sc);
3482 /* Save the smid. To be used for returning the cmd */
3483 mfi_cmd->cmd_id.context.smid = mpt_cmd->index;
3485 mpt_cmd->sync_cmd_idx = mfi_cmd->index;
3488 * For cmds where the flag is set, store the flag and check on
3489 * completion. For cmds with this flag, don't call
3490 * mrsas_complete_cmd.
3493 if (frame_hdr->flags & MFI_FRAME_DONT_POST_IN_REPLY_QUEUE)
3494 mpt_cmd->flags = MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
3496 io_req = mpt_cmd->io_request;
3498 if ((sc->device_id == MRSAS_INVADER) ||
3499 (sc->device_id == MRSAS_FURY) ||
3500 (sc->device_id == MRSAS_INTRUDER) ||
3501 (sc->device_id == MRSAS_INTRUDER_24) ||
3502 (sc->device_id == MRSAS_CUTLASS_52) ||
3503 (sc->device_id == MRSAS_CUTLASS_53)) {
3504 pMpi25IeeeSgeChain64_t sgl_ptr_end = (pMpi25IeeeSgeChain64_t)&io_req->SGL;
3506 sgl_ptr_end += sc->max_sge_in_main_msg - 1;
3507 sgl_ptr_end->Flags = 0;
3509 mpi25_ieee_chain = (MPI25_IEEE_SGE_CHAIN64 *) & io_req->SGL.IeeeChain;
3511 io_req->Function = MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST;
3512 io_req->SGLOffset0 = offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL) / 4;
3513 io_req->ChainOffset = sc->chain_offset_mfi_pthru;
3515 mpi25_ieee_chain->Address = mfi_cmd->frame_phys_addr;
3517 mpi25_ieee_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT |
3518 MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR;
3520 mpi25_ieee_chain->Length = sc->max_chain_frame_sz;
3526 * mrsas_issue_blocked_cmd: Synchronous wrapper around regular FW cmds
3527 * input: Adapter soft state Command to be issued
3529 * This function waits on an event for the command to be returned from the ISR.
3530 * Max wait time is MRSAS_INTERNAL_CMD_WAIT_TIME secs. Used for issuing
3531 * internal and ioctl commands.
3534 mrsas_issue_blocked_cmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
3536 u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME;
3537 unsigned long total_time = 0;
3538 int retcode = SUCCESS;
3540 /* Initialize cmd_status */
3541 cmd->cmd_status = 0xFF;
3543 /* Build MPT-MFI command for issue to FW */
3544 if (mrsas_issue_dcmd(sc, cmd)) {
3545 device_printf(sc->mrsas_dev, "Cannot issue DCMD internal command.\n");
3548 sc->chan = (void *)&cmd;
3551 if (cmd->cmd_status == 0xFF) {
3552 tsleep((void *)&sc->chan, 0, "mrsas_sleep", hz);
3556 if (!cmd->sync_cmd) { /* cmd->sync will be set for an IOCTL
3559 if (total_time >= max_wait) {
3560 device_printf(sc->mrsas_dev,
3561 "Internal command timed out after %d seconds.\n", max_wait);
3568 if (cmd->cmd_status == 0xFF) {
3569 device_printf(sc->mrsas_dev, "DCMD timed out after %d "
3570 "seconds from %s\n", max_wait, __func__);
3571 device_printf(sc->mrsas_dev, "DCMD opcode 0x%X\n",
3572 cmd->frame->dcmd.opcode);
3573 retcode = ETIMEDOUT;
3579 * mrsas_complete_mptmfi_passthru: Completes a command
3580 * input: @sc: Adapter soft state
3581 * @cmd: Command to be completed
3582 * @status: cmd completion status
3584 * This function is called from mrsas_complete_cmd() after an interrupt is
3585 * received from Firmware, and io_request->Function is
3586 * MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST.
3589 mrsas_complete_mptmfi_passthru(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd,
3592 struct mrsas_header *hdr = &cmd->frame->hdr;
3593 u_int8_t cmd_status = cmd->frame->hdr.cmd_status;
3595 /* Reset the retry counter for future re-tries */
3596 cmd->retry_for_fw_reset = 0;
3599 cmd->ccb_ptr = NULL;
3602 case MFI_CMD_INVALID:
3603 device_printf(sc->mrsas_dev, "MFI_CMD_INVALID command.\n");
3605 case MFI_CMD_PD_SCSI_IO:
3606 case MFI_CMD_LD_SCSI_IO:
3608 * MFI_CMD_PD_SCSI_IO and MFI_CMD_LD_SCSI_IO could have been
3609 * issued either through an IO path or an IOCTL path. If it
3610 * was via IOCTL, we will send it to internal completion.
3612 if (cmd->sync_cmd) {
3614 mrsas_wakeup(sc, cmd);
3620 /* Check for LD map update */
3621 if ((cmd->frame->dcmd.opcode == MR_DCMD_LD_MAP_GET_INFO) &&
3622 (cmd->frame->dcmd.mbox.b[1] == 1)) {
3623 sc->fast_path_io = 0;
3624 mtx_lock(&sc->raidmap_lock);
3625 sc->map_update_cmd = NULL;
3626 if (cmd_status != 0) {
3627 if (cmd_status != MFI_STAT_NOT_FOUND)
3628 device_printf(sc->mrsas_dev, "map sync failed, status=%x\n", cmd_status);
3630 mrsas_release_mfi_cmd(cmd);
3631 mtx_unlock(&sc->raidmap_lock);
3636 mrsas_release_mfi_cmd(cmd);
3637 if (MR_ValidateMapInfo(sc))
3638 sc->fast_path_io = 0;
3640 sc->fast_path_io = 1;
3641 mrsas_sync_map_info(sc);
3642 mtx_unlock(&sc->raidmap_lock);
3645 if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET_INFO ||
3646 cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET) {
3647 sc->mrsas_aen_triggered = 0;
3649 /* FW has an updated PD sequence */
3650 if ((cmd->frame->dcmd.opcode ==
3651 MR_DCMD_SYSTEM_PD_MAP_GET_INFO) &&
3652 (cmd->frame->dcmd.mbox.b[0] == 1)) {
3654 mtx_lock(&sc->raidmap_lock);
3655 sc->jbod_seq_cmd = NULL;
3656 mrsas_release_mfi_cmd(cmd);
3658 if (cmd_status == MFI_STAT_OK) {
3659 sc->pd_seq_map_id++;
3660 /* Re-register a pd sync seq num cmd */
3661 if (megasas_sync_pd_seq_num(sc, true))
3662 sc->use_seqnum_jbod_fp = 0;
3664 sc->use_seqnum_jbod_fp = 0;
3665 device_printf(sc->mrsas_dev,
3666 "Jbod map sync failed, status=%x\n", cmd_status);
3668 mtx_unlock(&sc->raidmap_lock);
3671 /* See if got an event notification */
3672 if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_WAIT)
3673 mrsas_complete_aen(sc, cmd);
3675 mrsas_wakeup(sc, cmd);
3678 /* Command issued to abort another cmd return */
3679 mrsas_complete_abort(sc, cmd);
3682 device_printf(sc->mrsas_dev, "Unknown command completed! [0x%X]\n", hdr->cmd);
3688 * mrsas_wakeup: Completes an internal command
3689 * input: Adapter soft state
3690 * Command to be completed
3692 * In mrsas_issue_blocked_cmd(), after a command is issued to Firmware, a wait
3693 * timer is started. This function is called from
3694 * mrsas_complete_mptmfi_passthru() as it completes the command, to wake up
3695 * from the command wait.
3698 mrsas_wakeup(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
3700 cmd->cmd_status = cmd->frame->io.cmd_status;
3702 if (cmd->cmd_status == 0xFF)
3703 cmd->cmd_status = 0;
3705 sc->chan = (void *)&cmd;
3706 wakeup_one((void *)&sc->chan);
3711 * mrsas_shutdown_ctlr: Instructs FW to shutdown the controller input:
3712 * Adapter soft state Shutdown/Hibernate
3714 * This function issues a DCMD internal command to Firmware to initiate shutdown
3715 * of the controller.
3718 mrsas_shutdown_ctlr(struct mrsas_softc *sc, u_int32_t opcode)
3720 struct mrsas_mfi_cmd *cmd;
3721 struct mrsas_dcmd_frame *dcmd;
3723 if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR)
3726 cmd = mrsas_get_mfi_cmd(sc);
3728 device_printf(sc->mrsas_dev, "Cannot allocate for shutdown cmd.\n");
3732 mrsas_issue_blocked_abort_cmd(sc, sc->aen_cmd);
3733 if (sc->map_update_cmd)
3734 mrsas_issue_blocked_abort_cmd(sc, sc->map_update_cmd);
3735 if (sc->jbod_seq_cmd)
3736 mrsas_issue_blocked_abort_cmd(sc, sc->jbod_seq_cmd);
3738 dcmd = &cmd->frame->dcmd;
3739 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
3741 dcmd->cmd = MFI_CMD_DCMD;
3742 dcmd->cmd_status = 0x0;
3743 dcmd->sge_count = 0;
3744 dcmd->flags = MFI_FRAME_DIR_NONE;
3747 dcmd->data_xfer_len = 0;
3748 dcmd->opcode = opcode;
3750 device_printf(sc->mrsas_dev, "Preparing to shut down controller.\n");
3752 mrsas_issue_blocked_cmd(sc, cmd);
3753 mrsas_release_mfi_cmd(cmd);
3759 * mrsas_flush_cache: Requests FW to flush all its caches input:
3760 * Adapter soft state
3762 * This function is issues a DCMD internal command to Firmware to initiate
3763 * flushing of all caches.
3766 mrsas_flush_cache(struct mrsas_softc *sc)
3768 struct mrsas_mfi_cmd *cmd;
3769 struct mrsas_dcmd_frame *dcmd;
3771 if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR)
3774 cmd = mrsas_get_mfi_cmd(sc);
3776 device_printf(sc->mrsas_dev, "Cannot allocate for flush cache cmd.\n");
3779 dcmd = &cmd->frame->dcmd;
3780 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
3782 dcmd->cmd = MFI_CMD_DCMD;
3783 dcmd->cmd_status = 0x0;
3784 dcmd->sge_count = 0;
3785 dcmd->flags = MFI_FRAME_DIR_NONE;
3788 dcmd->data_xfer_len = 0;
3789 dcmd->opcode = MR_DCMD_CTRL_CACHE_FLUSH;
3790 dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE;
3792 mrsas_issue_blocked_cmd(sc, cmd);
3793 mrsas_release_mfi_cmd(cmd);
3799 megasas_sync_pd_seq_num(struct mrsas_softc *sc, boolean_t pend)
3802 u_int8_t do_ocr = 1;
3803 struct mrsas_mfi_cmd *cmd;
3804 struct mrsas_dcmd_frame *dcmd;
3805 uint32_t pd_seq_map_sz;
3806 struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync;
3807 bus_addr_t pd_seq_h;
3809 pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) +
3810 (sizeof(struct MR_PD_CFG_SEQ) *
3811 (MAX_PHYSICAL_DEVICES - 1));
3813 cmd = mrsas_get_mfi_cmd(sc);
3815 device_printf(sc->mrsas_dev,
3816 "Cannot alloc for ld map info cmd.\n");
3819 dcmd = &cmd->frame->dcmd;
3821 pd_sync = (void *)sc->jbodmap_mem[(sc->pd_seq_map_id & 1)];
3822 pd_seq_h = sc->jbodmap_phys_addr[(sc->pd_seq_map_id & 1)];
3824 device_printf(sc->mrsas_dev,
3825 "Failed to alloc mem for jbod map info.\n");
3826 mrsas_release_mfi_cmd(cmd);
3829 memset(pd_sync, 0, pd_seq_map_sz);
3830 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
3831 dcmd->cmd = MFI_CMD_DCMD;
3832 dcmd->cmd_status = 0xFF;
3833 dcmd->sge_count = 1;
3836 dcmd->data_xfer_len = (pd_seq_map_sz);
3837 dcmd->opcode = (MR_DCMD_SYSTEM_PD_MAP_GET_INFO);
3838 dcmd->sgl.sge32[0].phys_addr = (pd_seq_h);
3839 dcmd->sgl.sge32[0].length = (pd_seq_map_sz);
3842 dcmd->mbox.b[0] = MRSAS_DCMD_MBOX_PEND_FLAG;
3843 dcmd->flags = (MFI_FRAME_DIR_WRITE);
3844 sc->jbod_seq_cmd = cmd;
3845 if (mrsas_issue_dcmd(sc, cmd)) {
3846 device_printf(sc->mrsas_dev,
3847 "Fail to send sync map info command.\n");
3852 dcmd->flags = MFI_FRAME_DIR_READ;
3854 retcode = mrsas_issue_polled(sc, cmd);
3855 if (retcode == ETIMEDOUT)
3858 if (pd_sync->count > MAX_PHYSICAL_DEVICES) {
3859 device_printf(sc->mrsas_dev,
3860 "driver supports max %d JBOD, but FW reports %d\n",
3861 MAX_PHYSICAL_DEVICES, pd_sync->count);
3865 sc->pd_seq_map_id++;
3870 sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
3872 mrsas_release_mfi_cmd(cmd);
3878 * mrsas_get_map_info: Load and validate RAID map input:
3879 * Adapter instance soft state
3881 * This function calls mrsas_get_ld_map_info() and MR_ValidateMapInfo() to load
3882 * and validate RAID map. It returns 0 if successful, 1 other- wise.
3885 mrsas_get_map_info(struct mrsas_softc *sc)
3887 uint8_t retcode = 0;
3889 sc->fast_path_io = 0;
3890 if (!mrsas_get_ld_map_info(sc)) {
3891 retcode = MR_ValidateMapInfo(sc);
3893 sc->fast_path_io = 1;
3901 * mrsas_get_ld_map_info: Get FW's ld_map structure input:
3902 * Adapter instance soft state
3904 * Issues an internal command (DCMD) to get the FW's controller PD list
3908 mrsas_get_ld_map_info(struct mrsas_softc *sc)
3911 struct mrsas_mfi_cmd *cmd;
3912 struct mrsas_dcmd_frame *dcmd;
3914 bus_addr_t map_phys_addr = 0;
3916 cmd = mrsas_get_mfi_cmd(sc);
3918 device_printf(sc->mrsas_dev,
3919 "Cannot alloc for ld map info cmd.\n");
3922 dcmd = &cmd->frame->dcmd;
3924 map = (void *)sc->raidmap_mem[(sc->map_id & 1)];
3925 map_phys_addr = sc->raidmap_phys_addr[(sc->map_id & 1)];
3927 device_printf(sc->mrsas_dev,
3928 "Failed to alloc mem for ld map info.\n");
3929 mrsas_release_mfi_cmd(cmd);
3932 memset(map, 0, sizeof(sc->max_map_sz));
3933 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
3935 dcmd->cmd = MFI_CMD_DCMD;
3936 dcmd->cmd_status = 0xFF;
3937 dcmd->sge_count = 1;
3938 dcmd->flags = MFI_FRAME_DIR_READ;
3941 dcmd->data_xfer_len = sc->current_map_sz;
3942 dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO;
3943 dcmd->sgl.sge32[0].phys_addr = map_phys_addr;
3944 dcmd->sgl.sge32[0].length = sc->current_map_sz;
3946 retcode = mrsas_issue_polled(sc, cmd);
3947 if (retcode == ETIMEDOUT)
3948 sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
3950 mrsas_release_mfi_cmd(cmd);
3956 * mrsas_sync_map_info: Get FW's ld_map structure input:
3957 * Adapter instance soft state
3959 * Issues an internal command (DCMD) to get the FW's controller PD list
3963 mrsas_sync_map_info(struct mrsas_softc *sc)
3966 struct mrsas_mfi_cmd *cmd;
3967 struct mrsas_dcmd_frame *dcmd;
3968 uint32_t size_sync_info, num_lds;
3969 MR_LD_TARGET_SYNC *target_map = NULL;
3970 MR_DRV_RAID_MAP_ALL *map;
3972 MR_LD_TARGET_SYNC *ld_sync;
3973 bus_addr_t map_phys_addr = 0;
3975 cmd = mrsas_get_mfi_cmd(sc);
3977 device_printf(sc->mrsas_dev,
3978 "Cannot alloc for sync map info cmd\n");
3981 map = sc->ld_drv_map[sc->map_id & 1];
3982 num_lds = map->raidMap.ldCount;
3984 dcmd = &cmd->frame->dcmd;
3985 size_sync_info = sizeof(MR_LD_TARGET_SYNC) * num_lds;
3986 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
3988 target_map = (MR_LD_TARGET_SYNC *) sc->raidmap_mem[(sc->map_id - 1) & 1];
3989 memset(target_map, 0, sc->max_map_sz);
3991 map_phys_addr = sc->raidmap_phys_addr[(sc->map_id - 1) & 1];
3993 ld_sync = (MR_LD_TARGET_SYNC *) target_map;
3995 for (i = 0; i < num_lds; i++, ld_sync++) {
3996 raid = MR_LdRaidGet(i, map);
3997 ld_sync->targetId = MR_GetLDTgtId(i, map);
3998 ld_sync->seqNum = raid->seqNum;
4001 dcmd->cmd = MFI_CMD_DCMD;
4002 dcmd->cmd_status = 0xFF;
4003 dcmd->sge_count = 1;
4004 dcmd->flags = MFI_FRAME_DIR_WRITE;
4007 dcmd->data_xfer_len = sc->current_map_sz;
4008 dcmd->mbox.b[0] = num_lds;
4009 dcmd->mbox.b[1] = MRSAS_DCMD_MBOX_PEND_FLAG;
4010 dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO;
4011 dcmd->sgl.sge32[0].phys_addr = map_phys_addr;
4012 dcmd->sgl.sge32[0].length = sc->current_map_sz;
4014 sc->map_update_cmd = cmd;
4015 if (mrsas_issue_dcmd(sc, cmd)) {
4016 device_printf(sc->mrsas_dev,
4017 "Fail to send sync map info command.\n");
4024 * mrsas_get_pd_list: Returns FW's PD list structure input:
4025 * Adapter soft state
4027 * Issues an internal command (DCMD) to get the FW's controller PD list
4028 * structure. This information is mainly used to find out about system
4029 * supported by Firmware.
4032 mrsas_get_pd_list(struct mrsas_softc *sc)
4034 int retcode = 0, pd_index = 0, pd_count = 0, pd_list_size;
4035 u_int8_t do_ocr = 1;
4036 struct mrsas_mfi_cmd *cmd;
4037 struct mrsas_dcmd_frame *dcmd;
4038 struct MR_PD_LIST *pd_list_mem;
4039 struct MR_PD_ADDRESS *pd_addr;
4040 bus_addr_t pd_list_phys_addr = 0;
4041 struct mrsas_tmp_dcmd *tcmd;
4043 cmd = mrsas_get_mfi_cmd(sc);
4045 device_printf(sc->mrsas_dev,
4046 "Cannot alloc for get PD list cmd\n");
4049 dcmd = &cmd->frame->dcmd;
4051 tcmd = malloc(sizeof(struct mrsas_tmp_dcmd), M_MRSAS, M_NOWAIT);
4052 pd_list_size = MRSAS_MAX_PD * sizeof(struct MR_PD_LIST);
4053 if (mrsas_alloc_tmp_dcmd(sc, tcmd, pd_list_size) != SUCCESS) {
4054 device_printf(sc->mrsas_dev,
4055 "Cannot alloc dmamap for get PD list cmd\n");
4056 mrsas_release_mfi_cmd(cmd);
4057 mrsas_free_tmp_dcmd(tcmd);
4058 free(tcmd, M_MRSAS);
4061 pd_list_mem = tcmd->tmp_dcmd_mem;
4062 pd_list_phys_addr = tcmd->tmp_dcmd_phys_addr;
4064 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4066 dcmd->mbox.b[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST;
4067 dcmd->mbox.b[1] = 0;
4068 dcmd->cmd = MFI_CMD_DCMD;
4069 dcmd->cmd_status = 0xFF;
4070 dcmd->sge_count = 1;
4071 dcmd->flags = MFI_FRAME_DIR_READ;
4074 dcmd->data_xfer_len = MRSAS_MAX_PD * sizeof(struct MR_PD_LIST);
4075 dcmd->opcode = MR_DCMD_PD_LIST_QUERY;
4076 dcmd->sgl.sge32[0].phys_addr = pd_list_phys_addr;
4077 dcmd->sgl.sge32[0].length = MRSAS_MAX_PD * sizeof(struct MR_PD_LIST);
4079 retcode = mrsas_issue_polled(sc, cmd);
4080 if (retcode == ETIMEDOUT)
4083 /* Get the instance PD list */
4084 pd_count = MRSAS_MAX_PD;
4085 pd_addr = pd_list_mem->addr;
4086 if (pd_list_mem->count < pd_count) {
4087 memset(sc->local_pd_list, 0,
4088 MRSAS_MAX_PD * sizeof(struct mrsas_pd_list));
4089 for (pd_index = 0; pd_index < pd_list_mem->count; pd_index++) {
4090 sc->local_pd_list[pd_addr->deviceId].tid = pd_addr->deviceId;
4091 sc->local_pd_list[pd_addr->deviceId].driveType =
4092 pd_addr->scsiDevType;
4093 sc->local_pd_list[pd_addr->deviceId].driveState =
4098 * Use mutext/spinlock if pd_list component size increase more than
4101 memcpy(sc->pd_list, sc->local_pd_list, sizeof(sc->local_pd_list));
4105 mrsas_free_tmp_dcmd(tcmd);
4106 free(tcmd, M_MRSAS);
4109 sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
4111 mrsas_release_mfi_cmd(cmd);
4117 * mrsas_get_ld_list: Returns FW's LD list structure input:
4118 * Adapter soft state
4120 * Issues an internal command (DCMD) to get the FW's controller PD list
4121 * structure. This information is mainly used to find out about supported by
4125 mrsas_get_ld_list(struct mrsas_softc *sc)
4127 int ld_list_size, retcode = 0, ld_index = 0, ids = 0;
4128 u_int8_t do_ocr = 1;
4129 struct mrsas_mfi_cmd *cmd;
4130 struct mrsas_dcmd_frame *dcmd;
4131 struct MR_LD_LIST *ld_list_mem;
4132 bus_addr_t ld_list_phys_addr = 0;
4133 struct mrsas_tmp_dcmd *tcmd;
4135 cmd = mrsas_get_mfi_cmd(sc);
4137 device_printf(sc->mrsas_dev,
4138 "Cannot alloc for get LD list cmd\n");
4141 dcmd = &cmd->frame->dcmd;
4143 tcmd = malloc(sizeof(struct mrsas_tmp_dcmd), M_MRSAS, M_NOWAIT);
4144 ld_list_size = sizeof(struct MR_LD_LIST);
4145 if (mrsas_alloc_tmp_dcmd(sc, tcmd, ld_list_size) != SUCCESS) {
4146 device_printf(sc->mrsas_dev,
4147 "Cannot alloc dmamap for get LD list cmd\n");
4148 mrsas_release_mfi_cmd(cmd);
4149 mrsas_free_tmp_dcmd(tcmd);
4150 free(tcmd, M_MRSAS);
4153 ld_list_mem = tcmd->tmp_dcmd_mem;
4154 ld_list_phys_addr = tcmd->tmp_dcmd_phys_addr;
4156 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4158 if (sc->max256vdSupport)
4159 dcmd->mbox.b[0] = 1;
4161 dcmd->cmd = MFI_CMD_DCMD;
4162 dcmd->cmd_status = 0xFF;
4163 dcmd->sge_count = 1;
4164 dcmd->flags = MFI_FRAME_DIR_READ;
4166 dcmd->data_xfer_len = sizeof(struct MR_LD_LIST);
4167 dcmd->opcode = MR_DCMD_LD_GET_LIST;
4168 dcmd->sgl.sge32[0].phys_addr = ld_list_phys_addr;
4169 dcmd->sgl.sge32[0].length = sizeof(struct MR_LD_LIST);
4172 retcode = mrsas_issue_polled(sc, cmd);
4173 if (retcode == ETIMEDOUT)
4177 printf("Number of LDs %d\n", ld_list_mem->ldCount);
4180 /* Get the instance LD list */
4181 if (ld_list_mem->ldCount <= sc->fw_supported_vd_count) {
4182 sc->CurLdCount = ld_list_mem->ldCount;
4183 memset(sc->ld_ids, 0xff, MAX_LOGICAL_DRIVES_EXT);
4184 for (ld_index = 0; ld_index < ld_list_mem->ldCount; ld_index++) {
4185 if (ld_list_mem->ldList[ld_index].state != 0) {
4186 ids = ld_list_mem->ldList[ld_index].ref.ld_context.targetId;
4187 sc->ld_ids[ids] = ld_list_mem->ldList[ld_index].ref.ld_context.targetId;
4193 mrsas_free_tmp_dcmd(tcmd);
4194 free(tcmd, M_MRSAS);
4197 sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
4199 mrsas_release_mfi_cmd(cmd);
4205 * mrsas_alloc_tmp_dcmd: Allocates memory for temporary command input:
4206 * Adapter soft state Temp command Size of alloction
4208 * Allocates DMAable memory for a temporary internal command. The allocated
4209 * memory is initialized to all zeros upon successful loading of the dma
4213 mrsas_alloc_tmp_dcmd(struct mrsas_softc *sc,
4214 struct mrsas_tmp_dcmd *tcmd, int size)
4216 if (bus_dma_tag_create(sc->mrsas_parent_tag,
4218 BUS_SPACE_MAXADDR_32BIT,
4226 &tcmd->tmp_dcmd_tag)) {
4227 device_printf(sc->mrsas_dev, "Cannot allocate tmp dcmd tag\n");
4230 if (bus_dmamem_alloc(tcmd->tmp_dcmd_tag, (void **)&tcmd->tmp_dcmd_mem,
4231 BUS_DMA_NOWAIT, &tcmd->tmp_dcmd_dmamap)) {
4232 device_printf(sc->mrsas_dev, "Cannot allocate tmp dcmd mem\n");
4235 if (bus_dmamap_load(tcmd->tmp_dcmd_tag, tcmd->tmp_dcmd_dmamap,
4236 tcmd->tmp_dcmd_mem, size, mrsas_addr_cb,
4237 &tcmd->tmp_dcmd_phys_addr, BUS_DMA_NOWAIT)) {
4238 device_printf(sc->mrsas_dev, "Cannot load tmp dcmd mem\n");
4241 memset(tcmd->tmp_dcmd_mem, 0, size);
4246 * mrsas_free_tmp_dcmd: Free memory for temporary command input:
4247 * temporary dcmd pointer
4249 * Deallocates memory of the temporary command for use in the construction of
4250 * the internal DCMD.
4253 mrsas_free_tmp_dcmd(struct mrsas_tmp_dcmd *tmp)
4255 if (tmp->tmp_dcmd_phys_addr)
4256 bus_dmamap_unload(tmp->tmp_dcmd_tag, tmp->tmp_dcmd_dmamap);
4257 if (tmp->tmp_dcmd_mem != NULL)
4258 bus_dmamem_free(tmp->tmp_dcmd_tag, tmp->tmp_dcmd_mem, tmp->tmp_dcmd_dmamap);
4259 if (tmp->tmp_dcmd_tag != NULL)
4260 bus_dma_tag_destroy(tmp->tmp_dcmd_tag);
4264 * mrsas_issue_blocked_abort_cmd: Aborts previously issued cmd input:
4265 * Adapter soft state Previously issued cmd to be aborted
4267 * This function is used to abort previously issued commands, such as AEN and
4268 * RAID map sync map commands. The abort command is sent as a DCMD internal
4269 * command and subsequently the driver will wait for a return status. The
4270 * max wait time is MRSAS_INTERNAL_CMD_WAIT_TIME seconds.
4273 mrsas_issue_blocked_abort_cmd(struct mrsas_softc *sc,
4274 struct mrsas_mfi_cmd *cmd_to_abort)
4276 struct mrsas_mfi_cmd *cmd;
4277 struct mrsas_abort_frame *abort_fr;
4278 u_int8_t retcode = 0;
4279 unsigned long total_time = 0;
4280 u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME;
4282 cmd = mrsas_get_mfi_cmd(sc);
4284 device_printf(sc->mrsas_dev, "Cannot alloc for abort cmd\n");
4287 abort_fr = &cmd->frame->abort;
4289 /* Prepare and issue the abort frame */
4290 abort_fr->cmd = MFI_CMD_ABORT;
4291 abort_fr->cmd_status = 0xFF;
4292 abort_fr->flags = 0;
4293 abort_fr->abort_context = cmd_to_abort->index;
4294 abort_fr->abort_mfi_phys_addr_lo = cmd_to_abort->frame_phys_addr;
4295 abort_fr->abort_mfi_phys_addr_hi = 0;
4298 cmd->cmd_status = 0xFF;
4300 if (mrsas_issue_dcmd(sc, cmd)) {
4301 device_printf(sc->mrsas_dev, "Fail to send abort command.\n");
4304 /* Wait for this cmd to complete */
4305 sc->chan = (void *)&cmd;
4307 if (cmd->cmd_status == 0xFF) {
4308 tsleep((void *)&sc->chan, 0, "mrsas_sleep", hz);
4312 if (total_time >= max_wait) {
4313 device_printf(sc->mrsas_dev, "Abort cmd timed out after %d sec.\n", max_wait);
4320 mrsas_release_mfi_cmd(cmd);
4325 * mrsas_complete_abort: Completes aborting a command input:
4326 * Adapter soft state Cmd that was issued to abort another cmd
4328 * The mrsas_issue_blocked_abort_cmd() function waits for the command status to
4329 * change after sending the command. This function is called from
4330 * mrsas_complete_mptmfi_passthru() to wake up the sleep thread associated.
4333 mrsas_complete_abort(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
4335 if (cmd->sync_cmd) {
4337 cmd->cmd_status = 0;
4338 sc->chan = (void *)&cmd;
4339 wakeup_one((void *)&sc->chan);
4345 * mrsas_aen_handler: AEN processing callback function from thread context
4346 * input: Adapter soft state
4348 * Asynchronous event handler
4351 mrsas_aen_handler(struct mrsas_softc *sc)
4353 union mrsas_evt_class_locale class_locale;
4356 int error, fail_aen = 0;
4359 printf("invalid instance!\n");
4362 if (sc->evt_detail_mem) {
4363 switch (sc->evt_detail_mem->code) {
4364 case MR_EVT_PD_INSERTED:
4365 fail_aen = mrsas_get_pd_list(sc);
4367 mrsas_bus_scan_sim(sc, sc->sim_1);
4369 goto skip_register_aen;
4372 case MR_EVT_PD_REMOVED:
4373 fail_aen = mrsas_get_pd_list(sc);
4375 mrsas_bus_scan_sim(sc, sc->sim_1);
4377 goto skip_register_aen;
4380 case MR_EVT_LD_OFFLINE:
4381 case MR_EVT_CFG_CLEARED:
4382 case MR_EVT_LD_DELETED:
4383 mrsas_bus_scan_sim(sc, sc->sim_0);
4386 case MR_EVT_LD_CREATED:
4387 fail_aen = mrsas_get_ld_list(sc);
4389 mrsas_bus_scan_sim(sc, sc->sim_0);
4391 goto skip_register_aen;
4394 case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED:
4395 case MR_EVT_FOREIGN_CFG_IMPORTED:
4396 case MR_EVT_LD_STATE_CHANGE:
4404 device_printf(sc->mrsas_dev, "invalid evt_detail\n");
4408 fail_aen = mrsas_get_pd_list(sc);
4410 mrsas_dprint(sc, MRSAS_AEN, "scanning ...sim 1\n");
4411 mrsas_bus_scan_sim(sc, sc->sim_1);
4413 goto skip_register_aen;
4415 fail_aen = mrsas_get_ld_list(sc);
4417 mrsas_dprint(sc, MRSAS_AEN, "scanning ...sim 0\n");
4418 mrsas_bus_scan_sim(sc, sc->sim_0);
4420 goto skip_register_aen;
4422 seq_num = sc->evt_detail_mem->seq_num + 1;
4424 /* Register AEN with FW for latest sequence number plus 1 */
4425 class_locale.members.reserved = 0;
4426 class_locale.members.locale = MR_EVT_LOCALE_ALL;
4427 class_locale.members.class = MR_EVT_CLASS_DEBUG;
4429 if (sc->aen_cmd != NULL)
4432 mtx_lock(&sc->aen_lock);
4433 error = mrsas_register_aen(sc, seq_num,
4435 mtx_unlock(&sc->aen_lock);
4438 device_printf(sc->mrsas_dev, "register aen failed error %x\n", error);
4447 * mrsas_complete_aen: Completes AEN command
4448 * input: Adapter soft state
4449 * Cmd that was issued to abort another cmd
4451 * This function will be called from ISR and will continue event processing from
4452 * thread context by enqueuing task in ev_tq (callback function
4453 * "mrsas_aen_handler").
4456 mrsas_complete_aen(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
4459 * Don't signal app if it is just an aborted previously registered
4462 if ((!cmd->abort_aen) && (sc->remove_in_progress == 0)) {
4463 sc->mrsas_aen_triggered = 1;
4464 mtx_lock(&sc->aen_lock);
4465 if (sc->mrsas_poll_waiting) {
4466 sc->mrsas_poll_waiting = 0;
4467 selwakeup(&sc->mrsas_select);
4469 mtx_unlock(&sc->aen_lock);
4474 mrsas_release_mfi_cmd(cmd);
4476 if (!sc->remove_in_progress)
4477 taskqueue_enqueue(sc->ev_tq, &sc->ev_task);
4482 static device_method_t mrsas_methods[] = {
4483 DEVMETHOD(device_probe, mrsas_probe),
4484 DEVMETHOD(device_attach, mrsas_attach),
4485 DEVMETHOD(device_detach, mrsas_detach),
4486 DEVMETHOD(device_suspend, mrsas_suspend),
4487 DEVMETHOD(device_resume, mrsas_resume),
4488 DEVMETHOD(bus_print_child, bus_generic_print_child),
4489 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
4493 static driver_t mrsas_driver = {
4496 sizeof(struct mrsas_softc)
4499 static devclass_t mrsas_devclass;
4501 DRIVER_MODULE(mrsas, pci, mrsas_driver, mrsas_devclass, 0, 0);
4502 MODULE_DEPEND(mrsas, cam, 1, 1, 1);