2 * Copyright (c) 2015, AVAGO Tech. All rights reserved. Author: Marian Choy
3 * Copyright (c) 2014, LSI Corp. All rights reserved. Author: Marian Choy
4 * Support: freebsdraid@avagotech.com
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
10 * 1. Redistributions of source code must retain the above copyright notice,
11 * this list of conditions and the following disclaimer. 2. Redistributions
12 * in binary form must reproduce the above copyright notice, this list of
13 * conditions and the following disclaimer in the documentation and/or other
14 * materials provided with the distribution. 3. Neither the name of the
15 * <ORGANIZATION> nor the names of its contributors may be used to endorse or
16 * promote products derived from this software without specific prior written
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
31 * The views and conclusions contained in the software and documentation are
32 * those of the authors and should not be interpreted as representing
33 * official policies,either expressed or implied, of the FreeBSD Project.
35 * Send feedback to: <megaraidfbsd@avagotech.com> Mail to: AVAGO TECHNOLOGIES 1621
36 * Barber Lane, Milpitas, CA 95035 ATTN: MegaRaid FreeBSD
40 #include <sys/cdefs.h>
41 __FBSDID("$FreeBSD$");
43 #include <dev/mrsas/mrsas.h>
44 #include <dev/mrsas/mrsas_ioctl.h>
47 #include <cam/cam_ccb.h>
49 #include <sys/sysctl.h>
50 #include <sys/types.h>
51 #include <sys/sysent.h>
52 #include <sys/kthread.h>
53 #include <sys/taskqueue.h>
60 static d_open_t mrsas_open;
61 static d_close_t mrsas_close;
62 static d_read_t mrsas_read;
63 static d_write_t mrsas_write;
64 static d_ioctl_t mrsas_ioctl;
65 static d_poll_t mrsas_poll;
67 static void mrsas_ich_startup(void *arg);
68 static struct mrsas_mgmt_info mrsas_mgmt_info;
69 static struct mrsas_ident *mrsas_find_ident(device_t);
70 static int mrsas_setup_msix(struct mrsas_softc *sc);
71 static int mrsas_allocate_msix(struct mrsas_softc *sc);
72 static void mrsas_shutdown_ctlr(struct mrsas_softc *sc, u_int32_t opcode);
73 static void mrsas_flush_cache(struct mrsas_softc *sc);
74 static void mrsas_reset_reply_desc(struct mrsas_softc *sc);
75 static void mrsas_ocr_thread(void *arg);
76 static int mrsas_get_map_info(struct mrsas_softc *sc);
77 static int mrsas_get_ld_map_info(struct mrsas_softc *sc);
78 static int mrsas_sync_map_info(struct mrsas_softc *sc);
79 static int mrsas_get_pd_list(struct mrsas_softc *sc);
80 static int mrsas_get_ld_list(struct mrsas_softc *sc);
81 static int mrsas_setup_irq(struct mrsas_softc *sc);
82 static int mrsas_alloc_mem(struct mrsas_softc *sc);
83 static int mrsas_init_fw(struct mrsas_softc *sc);
84 static int mrsas_setup_raidmap(struct mrsas_softc *sc);
85 static void megasas_setup_jbod_map(struct mrsas_softc *sc);
86 static int megasas_sync_pd_seq_num(struct mrsas_softc *sc, boolean_t pend);
87 static int mrsas_clear_intr(struct mrsas_softc *sc);
88 static int mrsas_get_ctrl_info(struct mrsas_softc *sc);
89 static void mrsas_update_ext_vd_details(struct mrsas_softc *sc);
91 mrsas_issue_blocked_abort_cmd(struct mrsas_softc *sc,
92 struct mrsas_mfi_cmd *cmd_to_abort);
93 static struct mrsas_softc *
94 mrsas_get_softc_instance(struct cdev *dev,
95 u_long cmd, caddr_t arg);
96 u_int32_t mrsas_read_reg(struct mrsas_softc *sc, int offset);
98 mrsas_build_mptmfi_passthru(struct mrsas_softc *sc,
99 struct mrsas_mfi_cmd *mfi_cmd);
100 void mrsas_complete_outstanding_ioctls(struct mrsas_softc *sc);
101 int mrsas_transition_to_ready(struct mrsas_softc *sc, int ocr);
102 int mrsas_init_adapter(struct mrsas_softc *sc);
103 int mrsas_alloc_mpt_cmds(struct mrsas_softc *sc);
104 int mrsas_alloc_ioc_cmd(struct mrsas_softc *sc);
105 int mrsas_alloc_ctlr_info_cmd(struct mrsas_softc *sc);
106 int mrsas_ioc_init(struct mrsas_softc *sc);
107 int mrsas_bus_scan(struct mrsas_softc *sc);
108 int mrsas_issue_dcmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
109 int mrsas_issue_polled(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
110 int mrsas_reset_ctrl(struct mrsas_softc *sc, u_int8_t reset_reason);
111 int mrsas_wait_for_outstanding(struct mrsas_softc *sc, u_int8_t check_reason);
112 int mrsas_complete_cmd(struct mrsas_softc *sc, u_int32_t MSIxIndex);
114 mrsas_issue_blocked_cmd(struct mrsas_softc *sc,
115 struct mrsas_mfi_cmd *cmd);
117 mrsas_alloc_tmp_dcmd(struct mrsas_softc *sc, struct mrsas_tmp_dcmd *tcmd,
119 void mrsas_release_mfi_cmd(struct mrsas_mfi_cmd *cmd);
120 void mrsas_wakeup(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
121 void mrsas_complete_aen(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
122 void mrsas_complete_abort(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
123 void mrsas_disable_intr(struct mrsas_softc *sc);
124 void mrsas_enable_intr(struct mrsas_softc *sc);
125 void mrsas_free_ioc_cmd(struct mrsas_softc *sc);
126 void mrsas_free_mem(struct mrsas_softc *sc);
127 void mrsas_free_tmp_dcmd(struct mrsas_tmp_dcmd *tmp);
128 void mrsas_isr(void *arg);
129 void mrsas_teardown_intr(struct mrsas_softc *sc);
130 void mrsas_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error);
131 void mrsas_kill_hba(struct mrsas_softc *sc);
132 void mrsas_aen_handler(struct mrsas_softc *sc);
134 mrsas_write_reg(struct mrsas_softc *sc, int offset,
137 mrsas_fire_cmd(struct mrsas_softc *sc, u_int32_t req_desc_lo,
138 u_int32_t req_desc_hi);
139 void mrsas_free_ctlr_info_cmd(struct mrsas_softc *sc);
141 mrsas_complete_mptmfi_passthru(struct mrsas_softc *sc,
142 struct mrsas_mfi_cmd *cmd, u_int8_t status);
144 mrsas_map_mpt_cmd_status(struct mrsas_mpt_cmd *cmd, u_int8_t status,
146 struct mrsas_mfi_cmd *mrsas_get_mfi_cmd(struct mrsas_softc *sc);
148 MRSAS_REQUEST_DESCRIPTOR_UNION *mrsas_build_mpt_cmd
149 (struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
151 extern int mrsas_cam_attach(struct mrsas_softc *sc);
152 extern void mrsas_cam_detach(struct mrsas_softc *sc);
153 extern void mrsas_cmd_done(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd);
154 extern void mrsas_free_frame(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
155 extern int mrsas_alloc_mfi_cmds(struct mrsas_softc *sc);
156 extern void mrsas_release_mpt_cmd(struct mrsas_mpt_cmd *cmd);
157 extern struct mrsas_mpt_cmd *mrsas_get_mpt_cmd(struct mrsas_softc *sc);
158 extern int mrsas_passthru(struct mrsas_softc *sc, void *arg, u_long ioctlCmd);
159 extern uint8_t MR_ValidateMapInfo(struct mrsas_softc *sc);
160 extern u_int16_t MR_GetLDTgtId(u_int32_t ld, MR_DRV_RAID_MAP_ALL * map);
161 extern MR_LD_RAID *MR_LdRaidGet(u_int32_t ld, MR_DRV_RAID_MAP_ALL * map);
162 extern void mrsas_xpt_freeze(struct mrsas_softc *sc);
163 extern void mrsas_xpt_release(struct mrsas_softc *sc);
164 extern MRSAS_REQUEST_DESCRIPTOR_UNION *
165 mrsas_get_request_desc(struct mrsas_softc *sc,
167 extern int mrsas_bus_scan_sim(struct mrsas_softc *sc, struct cam_sim *sim);
168 static int mrsas_alloc_evt_log_info_cmd(struct mrsas_softc *sc);
169 static void mrsas_free_evt_log_info_cmd(struct mrsas_softc *sc);
171 SYSCTL_NODE(_hw, OID_AUTO, mrsas, CTLFLAG_RD, 0, "MRSAS Driver Parameters");
174 * PCI device struct and table
177 typedef struct mrsas_ident {
185 MRSAS_CTLR_ID device_table[] = {
186 {0x1000, MRSAS_TBOLT, 0xffff, 0xffff, "AVAGO Thunderbolt SAS Controller"},
187 {0x1000, MRSAS_INVADER, 0xffff, 0xffff, "AVAGO Invader SAS Controller"},
188 {0x1000, MRSAS_FURY, 0xffff, 0xffff, "AVAGO Fury SAS Controller"},
189 {0x1000, MRSAS_INTRUDER, 0xffff, 0xffff, "AVAGO Intruder SAS Controller"},
190 {0x1000, MRSAS_INTRUDER_24, 0xffff, 0xffff, "AVAGO Intruder_24 SAS Controller"},
191 {0x1000, MRSAS_CUTLASS_52, 0xffff, 0xffff, "AVAGO Cutlass_52 SAS Controller"},
192 {0x1000, MRSAS_CUTLASS_53, 0xffff, 0xffff, "AVAGO Cutlass_53 SAS Controller"},
197 * Character device entry points
200 static struct cdevsw mrsas_cdevsw = {
201 .d_version = D_VERSION,
202 .d_open = mrsas_open,
203 .d_close = mrsas_close,
204 .d_read = mrsas_read,
205 .d_write = mrsas_write,
206 .d_ioctl = mrsas_ioctl,
207 .d_poll = mrsas_poll,
211 MALLOC_DEFINE(M_MRSAS, "mrsasbuf", "Buffers for the MRSAS driver");
214 * In the cdevsw routines, we find our softc by using the si_drv1 member of
215 * struct cdev. We set this variable to point to our softc in our attach
216 * routine when we create the /dev entry.
219 mrsas_open(struct cdev *dev, int oflags, int devtype, struct thread *td)
221 struct mrsas_softc *sc;
228 mrsas_close(struct cdev *dev, int fflag, int devtype, struct thread *td)
230 struct mrsas_softc *sc;
237 mrsas_read(struct cdev *dev, struct uio *uio, int ioflag)
239 struct mrsas_softc *sc;
245 mrsas_write(struct cdev *dev, struct uio *uio, int ioflag)
247 struct mrsas_softc *sc;
254 * Register Read/Write Functions
258 mrsas_write_reg(struct mrsas_softc *sc, int offset,
261 bus_space_tag_t bus_tag = sc->bus_tag;
262 bus_space_handle_t bus_handle = sc->bus_handle;
264 bus_space_write_4(bus_tag, bus_handle, offset, value);
268 mrsas_read_reg(struct mrsas_softc *sc, int offset)
270 bus_space_tag_t bus_tag = sc->bus_tag;
271 bus_space_handle_t bus_handle = sc->bus_handle;
273 return ((u_int32_t)bus_space_read_4(bus_tag, bus_handle, offset));
278 * Interrupt Disable/Enable/Clear Functions
282 mrsas_disable_intr(struct mrsas_softc *sc)
284 u_int32_t mask = 0xFFFFFFFF;
287 sc->mask_interrupts = 1;
288 mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask), mask);
289 /* Dummy read to force pci flush */
290 status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask));
294 mrsas_enable_intr(struct mrsas_softc *sc)
296 u_int32_t mask = MFI_FUSION_ENABLE_INTERRUPT_MASK;
299 sc->mask_interrupts = 0;
300 mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status), ~0);
301 status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status));
303 mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask), ~mask);
304 status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask));
308 mrsas_clear_intr(struct mrsas_softc *sc)
310 u_int32_t status, fw_status, fw_state;
312 /* Read received interrupt */
313 status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status));
316 * If FW state change interrupt is received, write to it again to
319 if (status & MRSAS_FW_STATE_CHNG_INTERRUPT) {
320 fw_status = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
321 outbound_scratch_pad));
322 fw_state = fw_status & MFI_STATE_MASK;
323 if (fw_state == MFI_STATE_FAULT) {
324 device_printf(sc->mrsas_dev, "FW is in FAULT state!\n");
325 if (sc->ocr_thread_active)
326 wakeup(&sc->ocr_chan);
328 mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status), status);
329 mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status));
332 /* Not our interrupt, so just return */
333 if (!(status & MFI_FUSION_ENABLE_INTERRUPT_MASK))
336 /* We got a reply interrupt */
341 * PCI Support Functions
344 static struct mrsas_ident *
345 mrsas_find_ident(device_t dev)
347 struct mrsas_ident *pci_device;
349 for (pci_device = device_table; pci_device->vendor != 0; pci_device++) {
350 if ((pci_device->vendor == pci_get_vendor(dev)) &&
351 (pci_device->device == pci_get_device(dev)) &&
352 ((pci_device->subvendor == pci_get_subvendor(dev)) ||
353 (pci_device->subvendor == 0xffff)) &&
354 ((pci_device->subdevice == pci_get_subdevice(dev)) ||
355 (pci_device->subdevice == 0xffff)))
362 mrsas_probe(device_t dev)
364 static u_int8_t first_ctrl = 1;
365 struct mrsas_ident *id;
367 if ((id = mrsas_find_ident(dev)) != NULL) {
369 printf("AVAGO MegaRAID SAS FreeBSD mrsas driver version: %s\n",
373 device_set_desc(dev, id->desc);
374 /* between BUS_PROBE_DEFAULT and BUS_PROBE_LOW_PRIORITY */
381 * mrsas_setup_sysctl: setup sysctl values for mrsas
382 * input: Adapter instance soft state
384 * Setup sysctl entries for mrsas driver.
387 mrsas_setup_sysctl(struct mrsas_softc *sc)
389 struct sysctl_ctx_list *sysctl_ctx = NULL;
390 struct sysctl_oid *sysctl_tree = NULL;
391 char tmpstr[80], tmpstr2[80];
394 * Setup the sysctl variable so the user can change the debug level
397 snprintf(tmpstr, sizeof(tmpstr), "MRSAS controller %d",
398 device_get_unit(sc->mrsas_dev));
399 snprintf(tmpstr2, sizeof(tmpstr2), "%d", device_get_unit(sc->mrsas_dev));
401 sysctl_ctx = device_get_sysctl_ctx(sc->mrsas_dev);
402 if (sysctl_ctx != NULL)
403 sysctl_tree = device_get_sysctl_tree(sc->mrsas_dev);
405 if (sysctl_tree == NULL) {
406 sysctl_ctx_init(&sc->sysctl_ctx);
407 sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx,
408 SYSCTL_STATIC_CHILDREN(_hw_mrsas), OID_AUTO, tmpstr2,
409 CTLFLAG_RD, 0, tmpstr);
410 if (sc->sysctl_tree == NULL)
412 sysctl_ctx = &sc->sysctl_ctx;
413 sysctl_tree = sc->sysctl_tree;
415 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
416 OID_AUTO, "disable_ocr", CTLFLAG_RW, &sc->disableOnlineCtrlReset, 0,
417 "Disable the use of OCR");
419 SYSCTL_ADD_STRING(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
420 OID_AUTO, "driver_version", CTLFLAG_RD, MRSAS_VERSION,
421 strlen(MRSAS_VERSION), "driver version");
423 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
424 OID_AUTO, "reset_count", CTLFLAG_RD,
425 &sc->reset_count, 0, "number of ocr from start of the day");
427 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
428 OID_AUTO, "fw_outstanding", CTLFLAG_RD,
429 &sc->fw_outstanding.val_rdonly, 0, "FW outstanding commands");
431 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
432 OID_AUTO, "io_cmds_highwater", CTLFLAG_RD,
433 &sc->io_cmds_highwater, 0, "Max FW outstanding commands");
435 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
436 OID_AUTO, "mrsas_debug", CTLFLAG_RW, &sc->mrsas_debug, 0,
437 "Driver debug level");
439 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
440 OID_AUTO, "mrsas_io_timeout", CTLFLAG_RW, &sc->mrsas_io_timeout,
441 0, "Driver IO timeout value in mili-second.");
443 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
444 OID_AUTO, "mrsas_fw_fault_check_delay", CTLFLAG_RW,
445 &sc->mrsas_fw_fault_check_delay,
446 0, "FW fault check thread delay in seconds. <default is 1 sec>");
448 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
449 OID_AUTO, "reset_in_progress", CTLFLAG_RD,
450 &sc->reset_in_progress, 0, "ocr in progress status");
455 * mrsas_get_tunables: get tunable parameters.
456 * input: Adapter instance soft state
458 * Get tunable parameters. This will help to debug driver at boot time.
461 mrsas_get_tunables(struct mrsas_softc *sc)
465 /* XXX default to some debugging for now */
466 sc->mrsas_debug = MRSAS_FAULT;
467 sc->mrsas_io_timeout = MRSAS_IO_TIMEOUT;
468 sc->mrsas_fw_fault_check_delay = 1;
470 sc->reset_in_progress = 0;
473 * Grab the global variables.
475 TUNABLE_INT_FETCH("hw.mrsas.debug_level", &sc->mrsas_debug);
478 * Grab the global variables.
480 TUNABLE_INT_FETCH("hw.mrsas.lb_pending_cmds", &sc->lb_pending_cmds);
482 /* Grab the unit-instance variables */
483 snprintf(tmpstr, sizeof(tmpstr), "dev.mrsas.%d.debug_level",
484 device_get_unit(sc->mrsas_dev));
485 TUNABLE_INT_FETCH(tmpstr, &sc->mrsas_debug);
489 * mrsas_alloc_evt_log_info cmd: Allocates memory to get event log information.
490 * Used to get sequence number at driver load time.
491 * input: Adapter soft state
493 * Allocates DMAable memory for the event log info internal command.
496 mrsas_alloc_evt_log_info_cmd(struct mrsas_softc *sc)
500 /* Allocate get event log info command */
501 el_info_size = sizeof(struct mrsas_evt_log_info);
502 if (bus_dma_tag_create(sc->mrsas_parent_tag,
504 BUS_SPACE_MAXADDR_32BIT,
513 device_printf(sc->mrsas_dev, "Cannot allocate event log info tag\n");
516 if (bus_dmamem_alloc(sc->el_info_tag, (void **)&sc->el_info_mem,
517 BUS_DMA_NOWAIT, &sc->el_info_dmamap)) {
518 device_printf(sc->mrsas_dev, "Cannot allocate event log info cmd mem\n");
521 if (bus_dmamap_load(sc->el_info_tag, sc->el_info_dmamap,
522 sc->el_info_mem, el_info_size, mrsas_addr_cb,
523 &sc->el_info_phys_addr, BUS_DMA_NOWAIT)) {
524 device_printf(sc->mrsas_dev, "Cannot load event log info cmd mem\n");
527 memset(sc->el_info_mem, 0, el_info_size);
532 * mrsas_free_evt_info_cmd: Free memory for Event log info command
533 * input: Adapter soft state
535 * Deallocates memory for the event log info internal command.
538 mrsas_free_evt_log_info_cmd(struct mrsas_softc *sc)
540 if (sc->el_info_phys_addr)
541 bus_dmamap_unload(sc->el_info_tag, sc->el_info_dmamap);
542 if (sc->el_info_mem != NULL)
543 bus_dmamem_free(sc->el_info_tag, sc->el_info_mem, sc->el_info_dmamap);
544 if (sc->el_info_tag != NULL)
545 bus_dma_tag_destroy(sc->el_info_tag);
549 * mrsas_get_seq_num: Get latest event sequence number
550 * @sc: Adapter soft state
551 * @eli: Firmware event log sequence number information.
553 * Firmware maintains a log of all events in a non-volatile area.
554 * Driver get the sequence number using DCMD
555 * "MR_DCMD_CTRL_EVENT_GET_INFO" at driver load time.
559 mrsas_get_seq_num(struct mrsas_softc *sc,
560 struct mrsas_evt_log_info *eli)
562 struct mrsas_mfi_cmd *cmd;
563 struct mrsas_dcmd_frame *dcmd;
564 u_int8_t do_ocr = 1, retcode = 0;
566 cmd = mrsas_get_mfi_cmd(sc);
569 device_printf(sc->mrsas_dev, "Failed to get a free cmd\n");
572 dcmd = &cmd->frame->dcmd;
574 if (mrsas_alloc_evt_log_info_cmd(sc) != SUCCESS) {
575 device_printf(sc->mrsas_dev, "Cannot allocate evt log info cmd\n");
576 mrsas_release_mfi_cmd(cmd);
579 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
581 dcmd->cmd = MFI_CMD_DCMD;
582 dcmd->cmd_status = 0x0;
584 dcmd->flags = MFI_FRAME_DIR_READ;
587 dcmd->data_xfer_len = sizeof(struct mrsas_evt_log_info);
588 dcmd->opcode = MR_DCMD_CTRL_EVENT_GET_INFO;
589 dcmd->sgl.sge32[0].phys_addr = sc->el_info_phys_addr;
590 dcmd->sgl.sge32[0].length = sizeof(struct mrsas_evt_log_info);
592 retcode = mrsas_issue_blocked_cmd(sc, cmd);
593 if (retcode == ETIMEDOUT)
598 * Copy the data back into callers buffer
600 memcpy(eli, sc->el_info_mem, sizeof(struct mrsas_evt_log_info));
601 mrsas_free_evt_log_info_cmd(sc);
605 sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
607 mrsas_release_mfi_cmd(cmd);
614 * mrsas_register_aen: Register for asynchronous event notification
615 * @sc: Adapter soft state
616 * @seq_num: Starting sequence number
617 * @class_locale: Class of the event
619 * This function subscribes for events beyond the @seq_num
620 * and type @class_locale.
624 mrsas_register_aen(struct mrsas_softc *sc, u_int32_t seq_num,
625 u_int32_t class_locale_word)
628 struct mrsas_mfi_cmd *cmd;
629 struct mrsas_dcmd_frame *dcmd;
630 union mrsas_evt_class_locale curr_aen;
631 union mrsas_evt_class_locale prev_aen;
634 * If there an AEN pending already (aen_cmd), check if the
635 * class_locale of that pending AEN is inclusive of the new AEN
636 * request we currently have. If it is, then we don't have to do
637 * anything. In other words, whichever events the current AEN request
638 * is subscribing to, have already been subscribed to. If the old_cmd
639 * is _not_ inclusive, then we have to abort that command, form a
640 * class_locale that is superset of both old and current and re-issue
644 curr_aen.word = class_locale_word;
648 prev_aen.word = sc->aen_cmd->frame->dcmd.mbox.w[1];
651 * A class whose enum value is smaller is inclusive of all
652 * higher values. If a PROGRESS (= -1) was previously
653 * registered, then a new registration requests for higher
654 * classes need not be sent to FW. They are automatically
655 * included. Locale numbers don't have such hierarchy. They
658 if ((prev_aen.members.class <= curr_aen.members.class) &&
659 !((prev_aen.members.locale & curr_aen.members.locale) ^
660 curr_aen.members.locale)) {
662 * Previously issued event registration includes
663 * current request. Nothing to do.
667 curr_aen.members.locale |= prev_aen.members.locale;
669 if (prev_aen.members.class < curr_aen.members.class)
670 curr_aen.members.class = prev_aen.members.class;
672 sc->aen_cmd->abort_aen = 1;
673 ret_val = mrsas_issue_blocked_abort_cmd(sc,
677 printf("mrsas: Failed to abort "
678 "previous AEN command\n");
683 cmd = mrsas_get_mfi_cmd(sc);
688 dcmd = &cmd->frame->dcmd;
690 memset(sc->evt_detail_mem, 0, sizeof(struct mrsas_evt_detail));
693 * Prepare DCMD for aen registration
695 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
697 dcmd->cmd = MFI_CMD_DCMD;
698 dcmd->cmd_status = 0x0;
700 dcmd->flags = MFI_FRAME_DIR_READ;
703 dcmd->data_xfer_len = sizeof(struct mrsas_evt_detail);
704 dcmd->opcode = MR_DCMD_CTRL_EVENT_WAIT;
705 dcmd->mbox.w[0] = seq_num;
706 sc->last_seq_num = seq_num;
707 dcmd->mbox.w[1] = curr_aen.word;
708 dcmd->sgl.sge32[0].phys_addr = (u_int32_t)sc->evt_detail_phys_addr;
709 dcmd->sgl.sge32[0].length = sizeof(struct mrsas_evt_detail);
711 if (sc->aen_cmd != NULL) {
712 mrsas_release_mfi_cmd(cmd);
716 * Store reference to the cmd used to register for AEN. When an
717 * application wants us to register for AEN, we have to abort this
718 * cmd and re-register with a new EVENT LOCALE supplied by that app
723 * Issue the aen registration frame
725 if (mrsas_issue_dcmd(sc, cmd)) {
726 device_printf(sc->mrsas_dev, "Cannot issue AEN DCMD command.\n");
733 * mrsas_start_aen: Subscribes to AEN during driver load time
734 * @instance: Adapter soft state
737 mrsas_start_aen(struct mrsas_softc *sc)
739 struct mrsas_evt_log_info eli;
740 union mrsas_evt_class_locale class_locale;
743 /* Get the latest sequence number from FW */
745 memset(&eli, 0, sizeof(eli));
747 if (mrsas_get_seq_num(sc, &eli))
750 /* Register AEN with FW for latest sequence number plus 1 */
751 class_locale.members.reserved = 0;
752 class_locale.members.locale = MR_EVT_LOCALE_ALL;
753 class_locale.members.class = MR_EVT_CLASS_DEBUG;
755 return mrsas_register_aen(sc, eli.newest_seq_num + 1,
761 * mrsas_setup_msix: Allocate MSI-x vectors
762 * @sc: adapter soft state
765 mrsas_setup_msix(struct mrsas_softc *sc)
769 for (i = 0; i < sc->msix_vectors; i++) {
770 sc->irq_context[i].sc = sc;
771 sc->irq_context[i].MSIxIndex = i;
772 sc->irq_id[i] = i + 1;
773 sc->mrsas_irq[i] = bus_alloc_resource_any
774 (sc->mrsas_dev, SYS_RES_IRQ, &sc->irq_id[i]
776 if (sc->mrsas_irq[i] == NULL) {
777 device_printf(sc->mrsas_dev, "Can't allocate MSI-x\n");
778 goto irq_alloc_failed;
780 if (bus_setup_intr(sc->mrsas_dev,
782 INTR_MPSAFE | INTR_TYPE_CAM,
783 NULL, mrsas_isr, &sc->irq_context[i],
784 &sc->intr_handle[i])) {
785 device_printf(sc->mrsas_dev,
786 "Cannot set up MSI-x interrupt handler\n");
787 goto irq_alloc_failed;
793 mrsas_teardown_intr(sc);
798 * mrsas_allocate_msix: Setup MSI-x vectors
799 * @sc: adapter soft state
802 mrsas_allocate_msix(struct mrsas_softc *sc)
804 if (pci_alloc_msix(sc->mrsas_dev, &sc->msix_vectors) == 0) {
805 device_printf(sc->mrsas_dev, "Using MSI-X with %d number"
806 " of vectors\n", sc->msix_vectors);
808 device_printf(sc->mrsas_dev, "MSI-x setup failed\n");
809 goto irq_alloc_failed;
814 mrsas_teardown_intr(sc);
819 * mrsas_attach: PCI entry point
820 * input: pointer to device struct
822 * Performs setup of PCI and registers, initializes mutexes and linked lists,
823 * registers interrupts and CAM, and initializes the adapter/controller to
827 mrsas_attach(device_t dev)
829 struct mrsas_softc *sc = device_get_softc(dev);
830 uint32_t cmd, bar, error;
832 memset(sc, 0, sizeof(struct mrsas_softc));
834 /* Look up our softc and initialize its fields. */
836 sc->device_id = pci_get_device(dev);
838 mrsas_get_tunables(sc);
841 * Set up PCI and registers
843 cmd = pci_read_config(dev, PCIR_COMMAND, 2);
844 if ((cmd & PCIM_CMD_PORTEN) == 0) {
847 /* Force the busmaster enable bit on. */
848 cmd |= PCIM_CMD_BUSMASTEREN;
849 pci_write_config(dev, PCIR_COMMAND, cmd, 2);
851 bar = pci_read_config(dev, MRSAS_PCI_BAR1, 4);
853 sc->reg_res_id = MRSAS_PCI_BAR1;/* BAR1 offset */
854 if ((sc->reg_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
855 &(sc->reg_res_id), RF_ACTIVE))
857 device_printf(dev, "Cannot allocate PCI registers\n");
860 sc->bus_tag = rman_get_bustag(sc->reg_res);
861 sc->bus_handle = rman_get_bushandle(sc->reg_res);
863 /* Intialize mutexes */
864 mtx_init(&sc->sim_lock, "mrsas_sim_lock", NULL, MTX_DEF);
865 mtx_init(&sc->pci_lock, "mrsas_pci_lock", NULL, MTX_DEF);
866 mtx_init(&sc->io_lock, "mrsas_io_lock", NULL, MTX_DEF);
867 mtx_init(&sc->aen_lock, "mrsas_aen_lock", NULL, MTX_DEF);
868 mtx_init(&sc->ioctl_lock, "mrsas_ioctl_lock", NULL, MTX_SPIN);
869 mtx_init(&sc->mpt_cmd_pool_lock, "mrsas_mpt_cmd_pool_lock", NULL, MTX_DEF);
870 mtx_init(&sc->mfi_cmd_pool_lock, "mrsas_mfi_cmd_pool_lock", NULL, MTX_DEF);
871 mtx_init(&sc->raidmap_lock, "mrsas_raidmap_lock", NULL, MTX_DEF);
873 /* Intialize linked list */
874 TAILQ_INIT(&sc->mrsas_mpt_cmd_list_head);
875 TAILQ_INIT(&sc->mrsas_mfi_cmd_list_head);
877 mrsas_atomic_set(&sc->fw_outstanding, 0);
879 sc->io_cmds_highwater = 0;
881 sc->adprecovery = MRSAS_HBA_OPERATIONAL;
882 sc->UnevenSpanSupport = 0;
886 /* Initialize Firmware */
887 if (mrsas_init_fw(sc) != SUCCESS) {
890 /* Register mrsas to CAM layer */
891 if ((mrsas_cam_attach(sc) != SUCCESS)) {
892 goto attach_fail_cam;
895 if (mrsas_setup_irq(sc) != SUCCESS) {
896 goto attach_fail_irq;
898 error = mrsas_kproc_create(mrsas_ocr_thread, sc,
899 &sc->ocr_thread, 0, 0, "mrsas_ocr%d",
900 device_get_unit(sc->mrsas_dev));
902 device_printf(sc->mrsas_dev, "Error %d starting OCR thread\n", error);
903 goto attach_fail_ocr_thread;
906 * After FW initialization and OCR thread creation
907 * we will defer the cdev creation, AEN setup on ICH callback
909 sc->mrsas_ich.ich_func = mrsas_ich_startup;
910 sc->mrsas_ich.ich_arg = sc;
911 if (config_intrhook_establish(&sc->mrsas_ich) != 0) {
912 device_printf(sc->mrsas_dev, "Config hook is already established\n");
914 mrsas_setup_sysctl(sc);
917 attach_fail_ocr_thread:
918 if (sc->ocr_thread_active)
919 wakeup(&sc->ocr_chan);
921 mrsas_teardown_intr(sc);
923 mrsas_cam_detach(sc);
925 /* if MSIX vector is allocated and FW Init FAILED then release MSIX */
926 if (sc->msix_enable == 1)
927 pci_release_msi(sc->mrsas_dev);
929 mtx_destroy(&sc->sim_lock);
930 mtx_destroy(&sc->aen_lock);
931 mtx_destroy(&sc->pci_lock);
932 mtx_destroy(&sc->io_lock);
933 mtx_destroy(&sc->ioctl_lock);
934 mtx_destroy(&sc->mpt_cmd_pool_lock);
935 mtx_destroy(&sc->mfi_cmd_pool_lock);
936 mtx_destroy(&sc->raidmap_lock);
939 bus_release_resource(sc->mrsas_dev, SYS_RES_MEMORY,
940 sc->reg_res_id, sc->reg_res);
946 * Interrupt config hook
949 mrsas_ich_startup(void *arg)
951 struct mrsas_softc *sc = (struct mrsas_softc *)arg;
954 * Intialize a counting Semaphore to take care no. of concurrent IOCTLs
956 sema_init(&sc->ioctl_count_sema,
957 MRSAS_MAX_MFI_CMDS - 5,
958 IOCTL_SEMA_DESCRIPTION);
960 /* Create a /dev entry for mrsas controller. */
961 sc->mrsas_cdev = make_dev(&mrsas_cdevsw, device_get_unit(sc->mrsas_dev), UID_ROOT,
962 GID_OPERATOR, (S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP), "mrsas%u",
963 device_get_unit(sc->mrsas_dev));
965 if (device_get_unit(sc->mrsas_dev) == 0) {
966 make_dev_alias_p(MAKEDEV_CHECKNAME,
967 &sc->mrsas_linux_emulator_cdev, sc->mrsas_cdev,
968 "megaraid_sas_ioctl_node");
971 sc->mrsas_cdev->si_drv1 = sc;
974 * Add this controller to mrsas_mgmt_info structure so that it can be
975 * exported to management applications
977 if (device_get_unit(sc->mrsas_dev) == 0)
978 memset(&mrsas_mgmt_info, 0, sizeof(mrsas_mgmt_info));
980 mrsas_mgmt_info.count++;
981 mrsas_mgmt_info.sc_ptr[mrsas_mgmt_info.max_index] = sc;
982 mrsas_mgmt_info.max_index++;
984 /* Enable Interrupts */
985 mrsas_enable_intr(sc);
987 /* Initiate AEN (Asynchronous Event Notification) */
988 if (mrsas_start_aen(sc)) {
989 device_printf(sc->mrsas_dev, "Error: AEN registration FAILED !!! "
990 "Further events from the controller will not be communicated.\n"
991 "Either there is some problem in the controller"
992 "or the controller does not support AEN.\n"
993 "Please contact to the SUPPORT TEAM if the problem persists\n");
995 if (sc->mrsas_ich.ich_arg != NULL) {
996 device_printf(sc->mrsas_dev, "Disestablish mrsas intr hook\n");
997 config_intrhook_disestablish(&sc->mrsas_ich);
998 sc->mrsas_ich.ich_arg = NULL;
1003 * mrsas_detach: De-allocates and teardown resources
1004 * input: pointer to device struct
1006 * This function is the entry point for device disconnect and detach.
1007 * It performs memory de-allocations, shutdown of the controller and various
1008 * teardown and destroy resource functions.
1011 mrsas_detach(device_t dev)
1013 struct mrsas_softc *sc;
1016 sc = device_get_softc(dev);
1017 sc->remove_in_progress = 1;
1019 /* Destroy the character device so no other IOCTL will be handled */
1020 if ((device_get_unit(dev) == 0) && sc->mrsas_linux_emulator_cdev)
1021 destroy_dev(sc->mrsas_linux_emulator_cdev);
1022 destroy_dev(sc->mrsas_cdev);
1025 * Take the instance off the instance array. Note that we will not
1026 * decrement the max_index. We let this array be sparse array
1028 for (i = 0; i < mrsas_mgmt_info.max_index; i++) {
1029 if (mrsas_mgmt_info.sc_ptr[i] == sc) {
1030 mrsas_mgmt_info.count--;
1031 mrsas_mgmt_info.sc_ptr[i] = NULL;
1036 if (sc->ocr_thread_active)
1037 wakeup(&sc->ocr_chan);
1038 while (sc->reset_in_progress) {
1040 if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
1041 mrsas_dprint(sc, MRSAS_INFO,
1042 "[%2d]waiting for OCR to be finished from %s\n", i, __func__);
1044 pause("mr_shutdown", hz);
1047 while (sc->ocr_thread_active) {
1049 if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
1050 mrsas_dprint(sc, MRSAS_INFO,
1052 "mrsas_ocr thread to quit ocr %d\n", i,
1053 sc->ocr_thread_active);
1055 pause("mr_shutdown", hz);
1057 mrsas_flush_cache(sc);
1058 mrsas_shutdown_ctlr(sc, MR_DCMD_CTRL_SHUTDOWN);
1059 mrsas_disable_intr(sc);
1060 mrsas_cam_detach(sc);
1061 mrsas_teardown_intr(sc);
1063 mtx_destroy(&sc->sim_lock);
1064 mtx_destroy(&sc->aen_lock);
1065 mtx_destroy(&sc->pci_lock);
1066 mtx_destroy(&sc->io_lock);
1067 mtx_destroy(&sc->ioctl_lock);
1068 mtx_destroy(&sc->mpt_cmd_pool_lock);
1069 mtx_destroy(&sc->mfi_cmd_pool_lock);
1070 mtx_destroy(&sc->raidmap_lock);
1072 /* Wait for all the semaphores to be released */
1073 while (sema_value(&sc->ioctl_count_sema) != (MRSAS_MAX_MFI_CMDS - 5))
1074 pause("mr_shutdown", hz);
1076 /* Destroy the counting semaphore created for Ioctl */
1077 sema_destroy(&sc->ioctl_count_sema);
1080 bus_release_resource(sc->mrsas_dev,
1081 SYS_RES_MEMORY, sc->reg_res_id, sc->reg_res);
1083 if (sc->sysctl_tree != NULL)
1084 sysctl_ctx_free(&sc->sysctl_ctx);
1090 * mrsas_free_mem: Frees allocated memory
1091 * input: Adapter instance soft state
1093 * This function is called from mrsas_detach() to free previously allocated
1097 mrsas_free_mem(struct mrsas_softc *sc)
1101 struct mrsas_mfi_cmd *mfi_cmd;
1102 struct mrsas_mpt_cmd *mpt_cmd;
1105 * Free RAID map memory
1107 for (i = 0; i < 2; i++) {
1108 if (sc->raidmap_phys_addr[i])
1109 bus_dmamap_unload(sc->raidmap_tag[i], sc->raidmap_dmamap[i]);
1110 if (sc->raidmap_mem[i] != NULL)
1111 bus_dmamem_free(sc->raidmap_tag[i], sc->raidmap_mem[i], sc->raidmap_dmamap[i]);
1112 if (sc->raidmap_tag[i] != NULL)
1113 bus_dma_tag_destroy(sc->raidmap_tag[i]);
1115 if (sc->ld_drv_map[i] != NULL)
1116 free(sc->ld_drv_map[i], M_MRSAS);
1118 for (i = 0; i < 2; i++) {
1119 if (sc->jbodmap_phys_addr[i])
1120 bus_dmamap_unload(sc->jbodmap_tag[i], sc->jbodmap_dmamap[i]);
1121 if (sc->jbodmap_mem[i] != NULL)
1122 bus_dmamem_free(sc->jbodmap_tag[i], sc->jbodmap_mem[i], sc->jbodmap_dmamap[i]);
1123 if (sc->jbodmap_tag[i] != NULL)
1124 bus_dma_tag_destroy(sc->jbodmap_tag[i]);
1127 * Free version buffer memory
1129 if (sc->verbuf_phys_addr)
1130 bus_dmamap_unload(sc->verbuf_tag, sc->verbuf_dmamap);
1131 if (sc->verbuf_mem != NULL)
1132 bus_dmamem_free(sc->verbuf_tag, sc->verbuf_mem, sc->verbuf_dmamap);
1133 if (sc->verbuf_tag != NULL)
1134 bus_dma_tag_destroy(sc->verbuf_tag);
1138 * Free sense buffer memory
1140 if (sc->sense_phys_addr)
1141 bus_dmamap_unload(sc->sense_tag, sc->sense_dmamap);
1142 if (sc->sense_mem != NULL)
1143 bus_dmamem_free(sc->sense_tag, sc->sense_mem, sc->sense_dmamap);
1144 if (sc->sense_tag != NULL)
1145 bus_dma_tag_destroy(sc->sense_tag);
1148 * Free chain frame memory
1150 if (sc->chain_frame_phys_addr)
1151 bus_dmamap_unload(sc->chain_frame_tag, sc->chain_frame_dmamap);
1152 if (sc->chain_frame_mem != NULL)
1153 bus_dmamem_free(sc->chain_frame_tag, sc->chain_frame_mem, sc->chain_frame_dmamap);
1154 if (sc->chain_frame_tag != NULL)
1155 bus_dma_tag_destroy(sc->chain_frame_tag);
1158 * Free IO Request memory
1160 if (sc->io_request_phys_addr)
1161 bus_dmamap_unload(sc->io_request_tag, sc->io_request_dmamap);
1162 if (sc->io_request_mem != NULL)
1163 bus_dmamem_free(sc->io_request_tag, sc->io_request_mem, sc->io_request_dmamap);
1164 if (sc->io_request_tag != NULL)
1165 bus_dma_tag_destroy(sc->io_request_tag);
1168 * Free Reply Descriptor memory
1170 if (sc->reply_desc_phys_addr)
1171 bus_dmamap_unload(sc->reply_desc_tag, sc->reply_desc_dmamap);
1172 if (sc->reply_desc_mem != NULL)
1173 bus_dmamem_free(sc->reply_desc_tag, sc->reply_desc_mem, sc->reply_desc_dmamap);
1174 if (sc->reply_desc_tag != NULL)
1175 bus_dma_tag_destroy(sc->reply_desc_tag);
1178 * Free event detail memory
1180 if (sc->evt_detail_phys_addr)
1181 bus_dmamap_unload(sc->evt_detail_tag, sc->evt_detail_dmamap);
1182 if (sc->evt_detail_mem != NULL)
1183 bus_dmamem_free(sc->evt_detail_tag, sc->evt_detail_mem, sc->evt_detail_dmamap);
1184 if (sc->evt_detail_tag != NULL)
1185 bus_dma_tag_destroy(sc->evt_detail_tag);
1190 if (sc->mfi_cmd_list) {
1191 for (i = 0; i < MRSAS_MAX_MFI_CMDS; i++) {
1192 mfi_cmd = sc->mfi_cmd_list[i];
1193 mrsas_free_frame(sc, mfi_cmd);
1196 if (sc->mficmd_frame_tag != NULL)
1197 bus_dma_tag_destroy(sc->mficmd_frame_tag);
1200 * Free MPT internal command list
1202 max_cmd = sc->max_fw_cmds;
1203 if (sc->mpt_cmd_list) {
1204 for (i = 0; i < max_cmd; i++) {
1205 mpt_cmd = sc->mpt_cmd_list[i];
1206 bus_dmamap_destroy(sc->data_tag, mpt_cmd->data_dmamap);
1207 free(sc->mpt_cmd_list[i], M_MRSAS);
1209 free(sc->mpt_cmd_list, M_MRSAS);
1210 sc->mpt_cmd_list = NULL;
1213 * Free MFI internal command list
1216 if (sc->mfi_cmd_list) {
1217 for (i = 0; i < MRSAS_MAX_MFI_CMDS; i++) {
1218 free(sc->mfi_cmd_list[i], M_MRSAS);
1220 free(sc->mfi_cmd_list, M_MRSAS);
1221 sc->mfi_cmd_list = NULL;
1224 * Free request descriptor memory
1226 free(sc->req_desc, M_MRSAS);
1227 sc->req_desc = NULL;
1230 * Destroy parent tag
1232 if (sc->mrsas_parent_tag != NULL)
1233 bus_dma_tag_destroy(sc->mrsas_parent_tag);
1236 * Free ctrl_info memory
1238 if (sc->ctrl_info != NULL)
1239 free(sc->ctrl_info, M_MRSAS);
1243 * mrsas_teardown_intr: Teardown interrupt
1244 * input: Adapter instance soft state
1246 * This function is called from mrsas_detach() to teardown and release bus
1247 * interrupt resourse.
1250 mrsas_teardown_intr(struct mrsas_softc *sc)
1254 if (!sc->msix_enable) {
1255 if (sc->intr_handle[0])
1256 bus_teardown_intr(sc->mrsas_dev, sc->mrsas_irq[0], sc->intr_handle[0]);
1257 if (sc->mrsas_irq[0] != NULL)
1258 bus_release_resource(sc->mrsas_dev, SYS_RES_IRQ,
1259 sc->irq_id[0], sc->mrsas_irq[0]);
1260 sc->intr_handle[0] = NULL;
1262 for (i = 0; i < sc->msix_vectors; i++) {
1263 if (sc->intr_handle[i])
1264 bus_teardown_intr(sc->mrsas_dev, sc->mrsas_irq[i],
1265 sc->intr_handle[i]);
1267 if (sc->mrsas_irq[i] != NULL)
1268 bus_release_resource(sc->mrsas_dev, SYS_RES_IRQ,
1269 sc->irq_id[i], sc->mrsas_irq[i]);
1271 sc->intr_handle[i] = NULL;
1273 pci_release_msi(sc->mrsas_dev);
1279 * mrsas_suspend: Suspend entry point
1280 * input: Device struct pointer
1282 * This function is the entry point for system suspend from the OS.
1285 mrsas_suspend(device_t dev)
1287 /* This will be filled when the driver will have hibernation support */
1292 * mrsas_resume: Resume entry point
1293 * input: Device struct pointer
1295 * This function is the entry point for system resume from the OS.
1298 mrsas_resume(device_t dev)
1300 /* This will be filled when the driver will have hibernation support */
1305 * mrsas_get_softc_instance: Find softc instance based on cmd type
1307 * This function will return softc instance based on cmd type.
1308 * In some case, application fire ioctl on required management instance and
1309 * do not provide host_no. Use cdev->si_drv1 to get softc instance for those
1310 * case, else get the softc instance from host_no provided by application in
1314 static struct mrsas_softc *
1315 mrsas_get_softc_instance(struct cdev *dev, u_long cmd, caddr_t arg)
1317 struct mrsas_softc *sc = NULL;
1318 struct mrsas_iocpacket *user_ioc = (struct mrsas_iocpacket *)arg;
1320 if (cmd == MRSAS_IOC_GET_PCI_INFO) {
1324 * get the Host number & the softc from data sent by the
1327 sc = mrsas_mgmt_info.sc_ptr[user_ioc->host_no];
1329 printf("There is no Controller number %d\n",
1331 else if (user_ioc->host_no >= mrsas_mgmt_info.max_index)
1332 mrsas_dprint(sc, MRSAS_FAULT,
1333 "Invalid Controller number %d\n", user_ioc->host_no);
1340 * mrsas_ioctl: IOCtl commands entry point.
1342 * This function is the entry point for IOCtls from the OS. It calls the
1343 * appropriate function for processing depending on the command received.
1346 mrsas_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag,
1349 struct mrsas_softc *sc;
1351 MRSAS_DRV_PCI_INFORMATION *pciDrvInfo;
1353 sc = mrsas_get_softc_instance(dev, cmd, arg);
1357 if (sc->remove_in_progress) {
1358 mrsas_dprint(sc, MRSAS_INFO,
1359 "Driver remove or shutdown called.\n");
1362 mtx_lock_spin(&sc->ioctl_lock);
1363 if (!sc->reset_in_progress) {
1364 mtx_unlock_spin(&sc->ioctl_lock);
1367 mtx_unlock_spin(&sc->ioctl_lock);
1368 while (sc->reset_in_progress) {
1370 if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
1371 mrsas_dprint(sc, MRSAS_INFO,
1372 "[%2d]waiting for OCR to be finished from %s\n", i, __func__);
1374 pause("mr_ioctl", hz);
1379 case MRSAS_IOC_FIRMWARE_PASS_THROUGH64:
1380 #ifdef COMPAT_FREEBSD32
1381 case MRSAS_IOC_FIRMWARE_PASS_THROUGH32:
1384 * Decrement the Ioctl counting Semaphore before getting an
1387 sema_wait(&sc->ioctl_count_sema);
1389 ret = mrsas_passthru(sc, (void *)arg, cmd);
1391 /* Increment the Ioctl counting semaphore value */
1392 sema_post(&sc->ioctl_count_sema);
1395 case MRSAS_IOC_SCAN_BUS:
1396 ret = mrsas_bus_scan(sc);
1399 case MRSAS_IOC_GET_PCI_INFO:
1400 pciDrvInfo = (MRSAS_DRV_PCI_INFORMATION *) arg;
1401 memset(pciDrvInfo, 0, sizeof(MRSAS_DRV_PCI_INFORMATION));
1402 pciDrvInfo->busNumber = pci_get_bus(sc->mrsas_dev);
1403 pciDrvInfo->deviceNumber = pci_get_slot(sc->mrsas_dev);
1404 pciDrvInfo->functionNumber = pci_get_function(sc->mrsas_dev);
1405 pciDrvInfo->domainID = pci_get_domain(sc->mrsas_dev);
1406 mrsas_dprint(sc, MRSAS_INFO, "pci bus no: %d,"
1407 "pci device no: %d, pci function no: %d,"
1408 "pci domain ID: %d\n",
1409 pciDrvInfo->busNumber, pciDrvInfo->deviceNumber,
1410 pciDrvInfo->functionNumber, pciDrvInfo->domainID);
1415 mrsas_dprint(sc, MRSAS_TRACE, "IOCTL command 0x%lx is not handled\n", cmd);
1423 * mrsas_poll: poll entry point for mrsas driver fd
1425 * This function is the entry point for poll from the OS. It waits for some AEN
1426 * events to be triggered from the controller and notifies back.
1429 mrsas_poll(struct cdev *dev, int poll_events, struct thread *td)
1431 struct mrsas_softc *sc;
1436 if (poll_events & (POLLIN | POLLRDNORM)) {
1437 if (sc->mrsas_aen_triggered) {
1438 revents |= poll_events & (POLLIN | POLLRDNORM);
1442 if (poll_events & (POLLIN | POLLRDNORM)) {
1443 mtx_lock(&sc->aen_lock);
1444 sc->mrsas_poll_waiting = 1;
1445 selrecord(td, &sc->mrsas_select);
1446 mtx_unlock(&sc->aen_lock);
1453 * mrsas_setup_irq: Set up interrupt
1454 * input: Adapter instance soft state
1456 * This function sets up interrupts as a bus resource, with flags indicating
1457 * resource permitting contemporaneous sharing and for resource to activate
1461 mrsas_setup_irq(struct mrsas_softc *sc)
1463 if (sc->msix_enable && (mrsas_setup_msix(sc) == SUCCESS))
1464 device_printf(sc->mrsas_dev, "MSI-x interrupts setup success\n");
1467 device_printf(sc->mrsas_dev, "Fall back to legacy interrupt\n");
1468 sc->irq_context[0].sc = sc;
1469 sc->irq_context[0].MSIxIndex = 0;
1471 sc->mrsas_irq[0] = bus_alloc_resource_any(sc->mrsas_dev,
1472 SYS_RES_IRQ, &sc->irq_id[0], RF_SHAREABLE | RF_ACTIVE);
1473 if (sc->mrsas_irq[0] == NULL) {
1474 device_printf(sc->mrsas_dev, "Cannot allocate legcay"
1478 if (bus_setup_intr(sc->mrsas_dev, sc->mrsas_irq[0],
1479 INTR_MPSAFE | INTR_TYPE_CAM, NULL, mrsas_isr,
1480 &sc->irq_context[0], &sc->intr_handle[0])) {
1481 device_printf(sc->mrsas_dev, "Cannot set up legacy"
1490 * mrsas_isr: ISR entry point
1491 * input: argument pointer
1493 * This function is the interrupt service routine entry point. There are two
1494 * types of interrupts, state change interrupt and response interrupt. If an
1495 * interrupt is not ours, we just return.
1498 mrsas_isr(void *arg)
1500 struct mrsas_irq_context *irq_context = (struct mrsas_irq_context *)arg;
1501 struct mrsas_softc *sc = irq_context->sc;
1504 if (sc->mask_interrupts)
1507 if (!sc->msix_vectors) {
1508 status = mrsas_clear_intr(sc);
1512 /* If we are resetting, bail */
1513 if (mrsas_test_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags)) {
1514 printf(" Entered into ISR when OCR is going active. \n");
1515 mrsas_clear_intr(sc);
1518 /* Process for reply request and clear response interrupt */
1519 if (mrsas_complete_cmd(sc, irq_context->MSIxIndex) != SUCCESS)
1520 mrsas_clear_intr(sc);
1526 * mrsas_complete_cmd: Process reply request
1527 * input: Adapter instance soft state
1529 * This function is called from mrsas_isr() to process reply request and clear
1530 * response interrupt. Processing of the reply request entails walking
1531 * through the reply descriptor array for the command request pended from
1532 * Firmware. We look at the Function field to determine the command type and
1533 * perform the appropriate action. Before we return, we clear the response
1537 mrsas_complete_cmd(struct mrsas_softc *sc, u_int32_t MSIxIndex)
1539 Mpi2ReplyDescriptorsUnion_t *desc;
1540 MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *reply_desc;
1541 MRSAS_RAID_SCSI_IO_REQUEST *scsi_io_req;
1542 struct mrsas_mpt_cmd *cmd_mpt;
1543 struct mrsas_mfi_cmd *cmd_mfi;
1544 u_int8_t reply_descript_type;
1545 u_int16_t smid, num_completed;
1546 u_int8_t status, extStatus;
1547 union desc_value desc_val;
1548 PLD_LOAD_BALANCE_INFO lbinfo;
1549 u_int32_t device_id;
1550 int threshold_reply_count = 0;
1553 /* If we have a hardware error, not need to continue */
1554 if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR)
1557 desc = sc->reply_desc_mem;
1558 desc += ((MSIxIndex * sc->reply_alloc_sz) / sizeof(MPI2_REPLY_DESCRIPTORS_UNION))
1559 + sc->last_reply_idx[MSIxIndex];
1561 reply_desc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *) desc;
1563 desc_val.word = desc->Words;
1566 reply_descript_type = reply_desc->ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
1568 /* Find our reply descriptor for the command and process */
1569 while ((desc_val.u.low != 0xFFFFFFFF) && (desc_val.u.high != 0xFFFFFFFF)) {
1570 smid = reply_desc->SMID;
1571 cmd_mpt = sc->mpt_cmd_list[smid - 1];
1572 scsi_io_req = (MRSAS_RAID_SCSI_IO_REQUEST *) cmd_mpt->io_request;
1574 status = scsi_io_req->RaidContext.status;
1575 extStatus = scsi_io_req->RaidContext.exStatus;
1577 switch (scsi_io_req->Function) {
1578 case MPI2_FUNCTION_SCSI_IO_REQUEST: /* Fast Path IO. */
1579 device_id = cmd_mpt->ccb_ptr->ccb_h.target_id;
1580 lbinfo = &sc->load_balance_info[device_id];
1581 if (cmd_mpt->load_balance == MRSAS_LOAD_BALANCE_FLAG) {
1582 mrsas_atomic_dec(&lbinfo->scsi_pending_cmds[cmd_mpt->pd_r1_lb]);
1583 cmd_mpt->load_balance &= ~MRSAS_LOAD_BALANCE_FLAG;
1585 /* Fall thru and complete IO */
1586 case MRSAS_MPI2_FUNCTION_LD_IO_REQUEST:
1587 mrsas_map_mpt_cmd_status(cmd_mpt, status, extStatus);
1588 mrsas_cmd_done(sc, cmd_mpt);
1589 scsi_io_req->RaidContext.status = 0;
1590 scsi_io_req->RaidContext.exStatus = 0;
1591 mrsas_atomic_dec(&sc->fw_outstanding);
1593 case MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST: /* MFI command */
1594 cmd_mfi = sc->mfi_cmd_list[cmd_mpt->sync_cmd_idx];
1595 mrsas_complete_mptmfi_passthru(sc, cmd_mfi, status);
1597 mrsas_release_mpt_cmd(cmd_mpt);
1601 sc->last_reply_idx[MSIxIndex]++;
1602 if (sc->last_reply_idx[MSIxIndex] >= sc->reply_q_depth)
1603 sc->last_reply_idx[MSIxIndex] = 0;
1605 desc->Words = ~((uint64_t)0x00); /* set it back to all
1608 threshold_reply_count++;
1610 /* Get the next reply descriptor */
1611 if (!sc->last_reply_idx[MSIxIndex]) {
1612 desc = sc->reply_desc_mem;
1613 desc += ((MSIxIndex * sc->reply_alloc_sz) / sizeof(MPI2_REPLY_DESCRIPTORS_UNION));
1617 reply_desc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *) desc;
1618 desc_val.word = desc->Words;
1620 reply_descript_type = reply_desc->ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
1622 if (reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
1626 * Write to reply post index after completing threshold reply
1627 * count and still there are more replies in reply queue
1628 * pending to be completed.
1630 if (threshold_reply_count >= THRESHOLD_REPLY_COUNT) {
1631 if (sc->msix_enable) {
1632 if ((sc->device_id == MRSAS_INVADER) ||
1633 (sc->device_id == MRSAS_FURY) ||
1634 (sc->device_id == MRSAS_INTRUDER) ||
1635 (sc->device_id == MRSAS_INTRUDER_24) ||
1636 (sc->device_id == MRSAS_CUTLASS_52) ||
1637 (sc->device_id == MRSAS_CUTLASS_53))
1638 mrsas_write_reg(sc, sc->msix_reg_offset[MSIxIndex / 8],
1639 ((MSIxIndex & 0x7) << 24) |
1640 sc->last_reply_idx[MSIxIndex]);
1642 mrsas_write_reg(sc, sc->msix_reg_offset[0], (MSIxIndex << 24) |
1643 sc->last_reply_idx[MSIxIndex]);
1645 mrsas_write_reg(sc, offsetof(mrsas_reg_set,
1646 reply_post_host_index), sc->last_reply_idx[0]);
1648 threshold_reply_count = 0;
1652 /* No match, just return */
1653 if (num_completed == 0)
1656 /* Clear response interrupt */
1657 if (sc->msix_enable) {
1658 if ((sc->device_id == MRSAS_INVADER) ||
1659 (sc->device_id == MRSAS_FURY) ||
1660 (sc->device_id == MRSAS_INTRUDER) ||
1661 (sc->device_id == MRSAS_INTRUDER_24) ||
1662 (sc->device_id == MRSAS_CUTLASS_52) ||
1663 (sc->device_id == MRSAS_CUTLASS_53)) {
1664 mrsas_write_reg(sc, sc->msix_reg_offset[MSIxIndex / 8],
1665 ((MSIxIndex & 0x7) << 24) |
1666 sc->last_reply_idx[MSIxIndex]);
1668 mrsas_write_reg(sc, sc->msix_reg_offset[0], (MSIxIndex << 24) |
1669 sc->last_reply_idx[MSIxIndex]);
1671 mrsas_write_reg(sc, offsetof(mrsas_reg_set,
1672 reply_post_host_index), sc->last_reply_idx[0]);
1678 * mrsas_map_mpt_cmd_status: Allocate DMAable memory.
1679 * input: Adapter instance soft state
1681 * This function is called from mrsas_complete_cmd(), for LD IO and FastPath IO.
1682 * It checks the command status and maps the appropriate CAM status for the
1686 mrsas_map_mpt_cmd_status(struct mrsas_mpt_cmd *cmd, u_int8_t status, u_int8_t extStatus)
1688 struct mrsas_softc *sc = cmd->sc;
1689 u_int8_t *sense_data;
1693 cmd->ccb_ptr->ccb_h.status = CAM_REQ_CMP;
1695 case MFI_STAT_SCSI_IO_FAILED:
1696 case MFI_STAT_SCSI_DONE_WITH_ERROR:
1697 cmd->ccb_ptr->ccb_h.status = CAM_SCSI_STATUS_ERROR;
1698 sense_data = (u_int8_t *)&cmd->ccb_ptr->csio.sense_data;
1700 /* For now just copy 18 bytes back */
1701 memcpy(sense_data, cmd->sense, 18);
1702 cmd->ccb_ptr->csio.sense_len = 18;
1703 cmd->ccb_ptr->ccb_h.status |= CAM_AUTOSNS_VALID;
1706 case MFI_STAT_LD_OFFLINE:
1707 case MFI_STAT_DEVICE_NOT_FOUND:
1708 if (cmd->ccb_ptr->ccb_h.target_lun)
1709 cmd->ccb_ptr->ccb_h.status |= CAM_LUN_INVALID;
1711 cmd->ccb_ptr->ccb_h.status |= CAM_DEV_NOT_THERE;
1713 case MFI_STAT_CONFIG_SEQ_MISMATCH:
1714 cmd->ccb_ptr->ccb_h.status |= CAM_REQUEUE_REQ;
1717 device_printf(sc->mrsas_dev, "FW cmd complete status %x\n", status);
1718 cmd->ccb_ptr->ccb_h.status = CAM_REQ_CMP_ERR;
1719 cmd->ccb_ptr->csio.scsi_status = status;
1725 * mrsas_alloc_mem: Allocate DMAable memory
1726 * input: Adapter instance soft state
1728 * This function creates the parent DMA tag and allocates DMAable memory. DMA
1729 * tag describes constraints of DMA mapping. Memory allocated is mapped into
1730 * Kernel virtual address. Callback argument is physical memory address.
1733 mrsas_alloc_mem(struct mrsas_softc *sc)
1735 u_int32_t verbuf_size, io_req_size, reply_desc_size, sense_size,
1736 chain_frame_size, evt_detail_size, count;
1739 * Allocate parent DMA tag
1741 if (bus_dma_tag_create(NULL, /* parent */
1744 BUS_SPACE_MAXADDR, /* lowaddr */
1745 BUS_SPACE_MAXADDR, /* highaddr */
1746 NULL, NULL, /* filter, filterarg */
1747 MAXPHYS, /* maxsize */
1748 sc->max_num_sge, /* nsegments */
1749 MAXPHYS, /* maxsegsize */
1751 NULL, NULL, /* lockfunc, lockarg */
1752 &sc->mrsas_parent_tag /* tag */
1754 device_printf(sc->mrsas_dev, "Cannot allocate parent DMA tag\n");
1758 * Allocate for version buffer
1760 verbuf_size = MRSAS_MAX_NAME_LENGTH * (sizeof(bus_addr_t));
1761 if (bus_dma_tag_create(sc->mrsas_parent_tag,
1763 BUS_SPACE_MAXADDR_32BIT,
1772 device_printf(sc->mrsas_dev, "Cannot allocate verbuf DMA tag\n");
1775 if (bus_dmamem_alloc(sc->verbuf_tag, (void **)&sc->verbuf_mem,
1776 BUS_DMA_NOWAIT, &sc->verbuf_dmamap)) {
1777 device_printf(sc->mrsas_dev, "Cannot allocate verbuf memory\n");
1780 bzero(sc->verbuf_mem, verbuf_size);
1781 if (bus_dmamap_load(sc->verbuf_tag, sc->verbuf_dmamap, sc->verbuf_mem,
1782 verbuf_size, mrsas_addr_cb, &sc->verbuf_phys_addr,
1784 device_printf(sc->mrsas_dev, "Cannot load verbuf DMA map\n");
1788 * Allocate IO Request Frames
1790 io_req_size = sc->io_frames_alloc_sz;
1791 if (bus_dma_tag_create(sc->mrsas_parent_tag,
1793 BUS_SPACE_MAXADDR_32BIT,
1801 &sc->io_request_tag)) {
1802 device_printf(sc->mrsas_dev, "Cannot create IO request tag\n");
1805 if (bus_dmamem_alloc(sc->io_request_tag, (void **)&sc->io_request_mem,
1806 BUS_DMA_NOWAIT, &sc->io_request_dmamap)) {
1807 device_printf(sc->mrsas_dev, "Cannot alloc IO request memory\n");
1810 bzero(sc->io_request_mem, io_req_size);
1811 if (bus_dmamap_load(sc->io_request_tag, sc->io_request_dmamap,
1812 sc->io_request_mem, io_req_size, mrsas_addr_cb,
1813 &sc->io_request_phys_addr, BUS_DMA_NOWAIT)) {
1814 device_printf(sc->mrsas_dev, "Cannot load IO request memory\n");
1818 * Allocate Chain Frames
1820 chain_frame_size = sc->chain_frames_alloc_sz;
1821 if (bus_dma_tag_create(sc->mrsas_parent_tag,
1823 BUS_SPACE_MAXADDR_32BIT,
1831 &sc->chain_frame_tag)) {
1832 device_printf(sc->mrsas_dev, "Cannot create chain frame tag\n");
1835 if (bus_dmamem_alloc(sc->chain_frame_tag, (void **)&sc->chain_frame_mem,
1836 BUS_DMA_NOWAIT, &sc->chain_frame_dmamap)) {
1837 device_printf(sc->mrsas_dev, "Cannot alloc chain frame memory\n");
1840 bzero(sc->chain_frame_mem, chain_frame_size);
1841 if (bus_dmamap_load(sc->chain_frame_tag, sc->chain_frame_dmamap,
1842 sc->chain_frame_mem, chain_frame_size, mrsas_addr_cb,
1843 &sc->chain_frame_phys_addr, BUS_DMA_NOWAIT)) {
1844 device_printf(sc->mrsas_dev, "Cannot load chain frame memory\n");
1847 count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
1849 * Allocate Reply Descriptor Array
1851 reply_desc_size = sc->reply_alloc_sz * count;
1852 if (bus_dma_tag_create(sc->mrsas_parent_tag,
1854 BUS_SPACE_MAXADDR_32BIT,
1862 &sc->reply_desc_tag)) {
1863 device_printf(sc->mrsas_dev, "Cannot create reply descriptor tag\n");
1866 if (bus_dmamem_alloc(sc->reply_desc_tag, (void **)&sc->reply_desc_mem,
1867 BUS_DMA_NOWAIT, &sc->reply_desc_dmamap)) {
1868 device_printf(sc->mrsas_dev, "Cannot alloc reply descriptor memory\n");
1871 if (bus_dmamap_load(sc->reply_desc_tag, sc->reply_desc_dmamap,
1872 sc->reply_desc_mem, reply_desc_size, mrsas_addr_cb,
1873 &sc->reply_desc_phys_addr, BUS_DMA_NOWAIT)) {
1874 device_printf(sc->mrsas_dev, "Cannot load reply descriptor memory\n");
1878 * Allocate Sense Buffer Array. Keep in lower 4GB
1880 sense_size = sc->max_fw_cmds * MRSAS_SENSE_LEN;
1881 if (bus_dma_tag_create(sc->mrsas_parent_tag,
1883 BUS_SPACE_MAXADDR_32BIT,
1892 device_printf(sc->mrsas_dev, "Cannot allocate sense buf tag\n");
1895 if (bus_dmamem_alloc(sc->sense_tag, (void **)&sc->sense_mem,
1896 BUS_DMA_NOWAIT, &sc->sense_dmamap)) {
1897 device_printf(sc->mrsas_dev, "Cannot allocate sense buf memory\n");
1900 if (bus_dmamap_load(sc->sense_tag, sc->sense_dmamap,
1901 sc->sense_mem, sense_size, mrsas_addr_cb, &sc->sense_phys_addr,
1903 device_printf(sc->mrsas_dev, "Cannot load sense buf memory\n");
1907 * Allocate for Event detail structure
1909 evt_detail_size = sizeof(struct mrsas_evt_detail);
1910 if (bus_dma_tag_create(sc->mrsas_parent_tag,
1912 BUS_SPACE_MAXADDR_32BIT,
1920 &sc->evt_detail_tag)) {
1921 device_printf(sc->mrsas_dev, "Cannot create Event detail tag\n");
1924 if (bus_dmamem_alloc(sc->evt_detail_tag, (void **)&sc->evt_detail_mem,
1925 BUS_DMA_NOWAIT, &sc->evt_detail_dmamap)) {
1926 device_printf(sc->mrsas_dev, "Cannot alloc Event detail buffer memory\n");
1929 bzero(sc->evt_detail_mem, evt_detail_size);
1930 if (bus_dmamap_load(sc->evt_detail_tag, sc->evt_detail_dmamap,
1931 sc->evt_detail_mem, evt_detail_size, mrsas_addr_cb,
1932 &sc->evt_detail_phys_addr, BUS_DMA_NOWAIT)) {
1933 device_printf(sc->mrsas_dev, "Cannot load Event detail buffer memory\n");
1937 * Create a dma tag for data buffers; size will be the maximum
1938 * possible I/O size (280kB).
1940 if (bus_dma_tag_create(sc->mrsas_parent_tag,
1947 sc->max_num_sge, /* nsegments */
1953 device_printf(sc->mrsas_dev, "Cannot create data dma tag\n");
1960 * mrsas_addr_cb: Callback function of bus_dmamap_load()
1961 * input: callback argument, machine dependent type
1962 * that describes DMA segments, number of segments, error code
1964 * This function is for the driver to receive mapping information resultant of
1965 * the bus_dmamap_load(). The information is actually not being used, but the
1966 * address is saved anyway.
1969 mrsas_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1974 *addr = segs[0].ds_addr;
1978 * mrsas_setup_raidmap: Set up RAID map.
1979 * input: Adapter instance soft state
1981 * Allocate DMA memory for the RAID maps and perform setup.
1984 mrsas_setup_raidmap(struct mrsas_softc *sc)
1988 for (i = 0; i < 2; i++) {
1990 (void *)malloc(sc->drv_map_sz, M_MRSAS, M_NOWAIT);
1991 /* Do Error handling */
1992 if (!sc->ld_drv_map[i]) {
1993 device_printf(sc->mrsas_dev, "Could not allocate memory for local map");
1996 free(sc->ld_drv_map[0], M_MRSAS);
1997 /* ABORT driver initialization */
2002 for (int i = 0; i < 2; i++) {
2003 if (bus_dma_tag_create(sc->mrsas_parent_tag,
2005 BUS_SPACE_MAXADDR_32BIT,
2013 &sc->raidmap_tag[i])) {
2014 device_printf(sc->mrsas_dev,
2015 "Cannot allocate raid map tag.\n");
2018 if (bus_dmamem_alloc(sc->raidmap_tag[i],
2019 (void **)&sc->raidmap_mem[i],
2020 BUS_DMA_NOWAIT, &sc->raidmap_dmamap[i])) {
2021 device_printf(sc->mrsas_dev,
2022 "Cannot allocate raidmap memory.\n");
2025 bzero(sc->raidmap_mem[i], sc->max_map_sz);
2027 if (bus_dmamap_load(sc->raidmap_tag[i], sc->raidmap_dmamap[i],
2028 sc->raidmap_mem[i], sc->max_map_sz,
2029 mrsas_addr_cb, &sc->raidmap_phys_addr[i],
2031 device_printf(sc->mrsas_dev, "Cannot load raidmap memory.\n");
2034 if (!sc->raidmap_mem[i]) {
2035 device_printf(sc->mrsas_dev,
2036 "Cannot allocate memory for raid map.\n");
2041 if (!mrsas_get_map_info(sc))
2042 mrsas_sync_map_info(sc);
2051 * megasas_setup_jbod_map - setup jbod map for FP seq_number.
2052 * @sc: Adapter soft state
2054 * Return 0 on success.
2057 megasas_setup_jbod_map(struct mrsas_softc *sc)
2060 uint32_t pd_seq_map_sz;
2062 pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) +
2063 (sizeof(struct MR_PD_CFG_SEQ) * (MAX_PHYSICAL_DEVICES - 1));
2065 if (!sc->ctrl_info->adapterOperations3.useSeqNumJbodFP) {
2066 sc->use_seqnum_jbod_fp = 0;
2069 if (sc->jbodmap_mem[0])
2072 for (i = 0; i < 2; i++) {
2073 if (bus_dma_tag_create(sc->mrsas_parent_tag,
2075 BUS_SPACE_MAXADDR_32BIT,
2083 &sc->jbodmap_tag[i])) {
2084 device_printf(sc->mrsas_dev,
2085 "Cannot allocate jbod map tag.\n");
2088 if (bus_dmamem_alloc(sc->jbodmap_tag[i],
2089 (void **)&sc->jbodmap_mem[i],
2090 BUS_DMA_NOWAIT, &sc->jbodmap_dmamap[i])) {
2091 device_printf(sc->mrsas_dev,
2092 "Cannot allocate jbod map memory.\n");
2095 bzero(sc->jbodmap_mem[i], pd_seq_map_sz);
2097 if (bus_dmamap_load(sc->jbodmap_tag[i], sc->jbodmap_dmamap[i],
2098 sc->jbodmap_mem[i], pd_seq_map_sz,
2099 mrsas_addr_cb, &sc->jbodmap_phys_addr[i],
2101 device_printf(sc->mrsas_dev, "Cannot load jbod map memory.\n");
2104 if (!sc->jbodmap_mem[i]) {
2105 device_printf(sc->mrsas_dev,
2106 "Cannot allocate memory for jbod map.\n");
2107 sc->use_seqnum_jbod_fp = 0;
2113 if (!megasas_sync_pd_seq_num(sc, false) &&
2114 !megasas_sync_pd_seq_num(sc, true))
2115 sc->use_seqnum_jbod_fp = 1;
2117 sc->use_seqnum_jbod_fp = 0;
2119 device_printf(sc->mrsas_dev, "Jbod map is supported\n");
2123 * mrsas_init_fw: Initialize Firmware
2124 * input: Adapter soft state
2126 * Calls transition_to_ready() to make sure Firmware is in operational state and
2127 * calls mrsas_init_adapter() to send IOC_INIT command to Firmware. It
2128 * issues internal commands to get the controller info after the IOC_INIT
2129 * command response is received by Firmware. Note: code relating to
2130 * get_pdlist, get_ld_list and max_sectors are currently not being used, it
2131 * is left here as placeholder.
2134 mrsas_init_fw(struct mrsas_softc *sc)
2137 int ret, loop, ocr = 0;
2138 u_int32_t max_sectors_1;
2139 u_int32_t max_sectors_2;
2140 u_int32_t tmp_sectors;
2141 u_int32_t scratch_pad_2;
2142 int msix_enable = 0;
2143 int fw_msix_count = 0;
2145 /* Make sure Firmware is ready */
2146 ret = mrsas_transition_to_ready(sc, ocr);
2147 if (ret != SUCCESS) {
2150 /* MSI-x index 0- reply post host index register */
2151 sc->msix_reg_offset[0] = MPI2_REPLY_POST_HOST_INDEX_OFFSET;
2152 /* Check if MSI-X is supported while in ready state */
2153 msix_enable = (mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad)) & 0x4000000) >> 0x1a;
2156 scratch_pad_2 = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2157 outbound_scratch_pad_2));
2159 /* Check max MSI-X vectors */
2160 if (sc->device_id == MRSAS_TBOLT) {
2161 sc->msix_vectors = (scratch_pad_2
2162 & MR_MAX_REPLY_QUEUES_OFFSET) + 1;
2163 fw_msix_count = sc->msix_vectors;
2165 /* Invader/Fury supports 96 MSI-X vectors */
2166 sc->msix_vectors = ((scratch_pad_2
2167 & MR_MAX_REPLY_QUEUES_EXT_OFFSET)
2168 >> MR_MAX_REPLY_QUEUES_EXT_OFFSET_SHIFT) + 1;
2169 fw_msix_count = sc->msix_vectors;
2171 for (loop = 1; loop < MR_MAX_MSIX_REG_ARRAY;
2173 sc->msix_reg_offset[loop] =
2174 MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET +
2179 /* Don't bother allocating more MSI-X vectors than cpus */
2180 sc->msix_vectors = min(sc->msix_vectors,
2183 /* Allocate MSI-x vectors */
2184 if (mrsas_allocate_msix(sc) == SUCCESS)
2185 sc->msix_enable = 1;
2187 sc->msix_enable = 0;
2189 device_printf(sc->mrsas_dev, "FW supports <%d> MSIX vector,"
2190 "Online CPU %d Current MSIX <%d>\n",
2191 fw_msix_count, mp_ncpus, sc->msix_vectors);
2193 if (mrsas_init_adapter(sc) != SUCCESS) {
2194 device_printf(sc->mrsas_dev, "Adapter initialize Fail.\n");
2197 /* Allocate internal commands for pass-thru */
2198 if (mrsas_alloc_mfi_cmds(sc) != SUCCESS) {
2199 device_printf(sc->mrsas_dev, "Allocate MFI cmd failed.\n");
2202 sc->ctrl_info = malloc(sizeof(struct mrsas_ctrl_info), M_MRSAS, M_NOWAIT);
2203 if (!sc->ctrl_info) {
2204 device_printf(sc->mrsas_dev, "Malloc for ctrl_info failed.\n");
2208 * Get the controller info from FW, so that the MAX VD support
2209 * availability can be decided.
2211 if (mrsas_get_ctrl_info(sc)) {
2212 device_printf(sc->mrsas_dev, "Unable to get FW ctrl_info.\n");
2215 sc->secure_jbod_support =
2216 (u_int8_t)sc->ctrl_info->adapterOperations3.supportSecurityonJBOD;
2218 if (sc->secure_jbod_support)
2219 device_printf(sc->mrsas_dev, "FW supports SED \n");
2221 if (sc->use_seqnum_jbod_fp)
2222 device_printf(sc->mrsas_dev, "FW supports JBOD Map \n");
2224 if (mrsas_setup_raidmap(sc) != SUCCESS) {
2225 device_printf(sc->mrsas_dev, "Error: RAID map setup FAILED !!! "
2226 "There seems to be some problem in the controller\n"
2227 "Please contact to the SUPPORT TEAM if the problem persists\n");
2229 megasas_setup_jbod_map(sc);
2231 /* For pass-thru, get PD/LD list and controller info */
2232 memset(sc->pd_list, 0,
2233 MRSAS_MAX_PD * sizeof(struct mrsas_pd_list));
2234 if (mrsas_get_pd_list(sc) != SUCCESS) {
2235 device_printf(sc->mrsas_dev, "Get PD list failed.\n");
2238 memset(sc->ld_ids, 0xff, MRSAS_MAX_LD_IDS);
2239 if (mrsas_get_ld_list(sc) != SUCCESS) {
2240 device_printf(sc->mrsas_dev, "Get LD lsit failed.\n");
2244 * Compute the max allowed sectors per IO: The controller info has
2245 * two limits on max sectors. Driver should use the minimum of these
2248 * 1 << stripe_sz_ops.min = max sectors per strip
2250 * Note that older firmwares ( < FW ver 30) didn't report information to
2251 * calculate max_sectors_1. So the number ended up as zero always.
2254 max_sectors_1 = (1 << sc->ctrl_info->stripe_sz_ops.min) *
2255 sc->ctrl_info->max_strips_per_io;
2256 max_sectors_2 = sc->ctrl_info->max_request_size;
2257 tmp_sectors = min(max_sectors_1, max_sectors_2);
2258 sc->max_sectors_per_req = sc->max_num_sge * MRSAS_PAGE_SIZE / 512;
2260 if (tmp_sectors && (sc->max_sectors_per_req > tmp_sectors))
2261 sc->max_sectors_per_req = tmp_sectors;
2263 sc->disableOnlineCtrlReset =
2264 sc->ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset;
2265 sc->UnevenSpanSupport =
2266 sc->ctrl_info->adapterOperations2.supportUnevenSpans;
2267 if (sc->UnevenSpanSupport) {
2268 device_printf(sc->mrsas_dev, "FW supports: UnevenSpanSupport=%x\n\n",
2269 sc->UnevenSpanSupport);
2271 if (MR_ValidateMapInfo(sc))
2272 sc->fast_path_io = 1;
2274 sc->fast_path_io = 0;
2280 * mrsas_init_adapter: Initializes the adapter/controller
2281 * input: Adapter soft state
2283 * Prepares for the issuing of the IOC Init cmd to FW for initializing the
2284 * ROC/controller. The FW register is read to determined the number of
2285 * commands that is supported. All memory allocations for IO is based on
2286 * max_cmd. Appropriate calculations are performed in this function.
2289 mrsas_init_adapter(struct mrsas_softc *sc)
2292 u_int32_t max_cmd, scratch_pad_2;
2296 /* Read FW status register */
2297 status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad));
2299 /* Get operational params from status register */
2300 sc->max_fw_cmds = status & MRSAS_FWSTATE_MAXCMD_MASK;
2302 /* Decrement the max supported by 1, to correlate with FW */
2303 sc->max_fw_cmds = sc->max_fw_cmds - 1;
2304 max_cmd = sc->max_fw_cmds;
2306 /* Determine allocation size of command frames */
2307 sc->reply_q_depth = ((max_cmd + 1 + 15) / 16 * 16) * 2;
2308 sc->request_alloc_sz = sizeof(MRSAS_REQUEST_DESCRIPTOR_UNION) * max_cmd;
2309 sc->reply_alloc_sz = sizeof(MPI2_REPLY_DESCRIPTORS_UNION) * (sc->reply_q_depth);
2310 sc->io_frames_alloc_sz = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE + (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * (max_cmd + 1));
2311 scratch_pad_2 = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2312 outbound_scratch_pad_2));
2314 * If scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK is set,
2315 * Firmware support extended IO chain frame which is 4 time more
2316 * than legacy Firmware. Legacy Firmware - Frame size is (8 * 128) =
2317 * 1K 1M IO Firmware - Frame size is (8 * 128 * 4) = 4K
2319 if (scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK)
2320 sc->max_chain_frame_sz =
2321 ((scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_MASK) >> 5)
2324 sc->max_chain_frame_sz =
2325 ((scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_MASK) >> 5)
2328 sc->chain_frames_alloc_sz = sc->max_chain_frame_sz * max_cmd;
2329 sc->max_sge_in_main_msg = (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE -
2330 offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL)) / 16;
2332 sc->max_sge_in_chain = sc->max_chain_frame_sz / sizeof(MPI2_SGE_IO_UNION);
2333 sc->max_num_sge = sc->max_sge_in_main_msg + sc->max_sge_in_chain - 2;
2335 mrsas_dprint(sc, MRSAS_INFO, "Avago Debug: MAX sge 0x%X MAX chain frame size 0x%X \n",
2336 sc->max_num_sge, sc->max_chain_frame_sz);
2338 /* Used for pass thru MFI frame (DCMD) */
2339 sc->chain_offset_mfi_pthru = offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL) / 16;
2341 sc->chain_offset_io_request = (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE -
2342 sizeof(MPI2_SGE_IO_UNION)) / 16;
2344 int count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
2346 for (i = 0; i < count; i++)
2347 sc->last_reply_idx[i] = 0;
2349 ret = mrsas_alloc_mem(sc);
2353 ret = mrsas_alloc_mpt_cmds(sc);
2357 ret = mrsas_ioc_init(sc);
2365 * mrsas_alloc_ioc_cmd: Allocates memory for IOC Init command
2366 * input: Adapter soft state
2368 * Allocates for the IOC Init cmd to FW to initialize the ROC/controller.
2371 mrsas_alloc_ioc_cmd(struct mrsas_softc *sc)
2375 /* Allocate IOC INIT command */
2376 ioc_init_size = 1024 + sizeof(MPI2_IOC_INIT_REQUEST);
2377 if (bus_dma_tag_create(sc->mrsas_parent_tag,
2379 BUS_SPACE_MAXADDR_32BIT,
2387 &sc->ioc_init_tag)) {
2388 device_printf(sc->mrsas_dev, "Cannot allocate ioc init tag\n");
2391 if (bus_dmamem_alloc(sc->ioc_init_tag, (void **)&sc->ioc_init_mem,
2392 BUS_DMA_NOWAIT, &sc->ioc_init_dmamap)) {
2393 device_printf(sc->mrsas_dev, "Cannot allocate ioc init cmd mem\n");
2396 bzero(sc->ioc_init_mem, ioc_init_size);
2397 if (bus_dmamap_load(sc->ioc_init_tag, sc->ioc_init_dmamap,
2398 sc->ioc_init_mem, ioc_init_size, mrsas_addr_cb,
2399 &sc->ioc_init_phys_mem, BUS_DMA_NOWAIT)) {
2400 device_printf(sc->mrsas_dev, "Cannot load ioc init cmd mem\n");
2407 * mrsas_free_ioc_cmd: Allocates memory for IOC Init command
2408 * input: Adapter soft state
2410 * Deallocates memory of the IOC Init cmd.
2413 mrsas_free_ioc_cmd(struct mrsas_softc *sc)
2415 if (sc->ioc_init_phys_mem)
2416 bus_dmamap_unload(sc->ioc_init_tag, sc->ioc_init_dmamap);
2417 if (sc->ioc_init_mem != NULL)
2418 bus_dmamem_free(sc->ioc_init_tag, sc->ioc_init_mem, sc->ioc_init_dmamap);
2419 if (sc->ioc_init_tag != NULL)
2420 bus_dma_tag_destroy(sc->ioc_init_tag);
2424 * mrsas_ioc_init: Sends IOC Init command to FW
2425 * input: Adapter soft state
2427 * Issues the IOC Init cmd to FW to initialize the ROC/controller.
2430 mrsas_ioc_init(struct mrsas_softc *sc)
2432 struct mrsas_init_frame *init_frame;
2433 pMpi2IOCInitRequest_t IOCInitMsg;
2434 MRSAS_REQUEST_DESCRIPTOR_UNION req_desc;
2435 u_int8_t max_wait = MRSAS_IOC_INIT_WAIT_TIME;
2436 bus_addr_t phys_addr;
2439 /* Allocate memory for the IOC INIT command */
2440 if (mrsas_alloc_ioc_cmd(sc)) {
2441 device_printf(sc->mrsas_dev, "Cannot allocate IOC command.\n");
2444 IOCInitMsg = (pMpi2IOCInitRequest_t)(((char *)sc->ioc_init_mem) + 1024);
2445 IOCInitMsg->Function = MPI2_FUNCTION_IOC_INIT;
2446 IOCInitMsg->WhoInit = MPI2_WHOINIT_HOST_DRIVER;
2447 IOCInitMsg->MsgVersion = MPI2_VERSION;
2448 IOCInitMsg->HeaderVersion = MPI2_HEADER_VERSION;
2449 IOCInitMsg->SystemRequestFrameSize = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE / 4;
2450 IOCInitMsg->ReplyDescriptorPostQueueDepth = sc->reply_q_depth;
2451 IOCInitMsg->ReplyDescriptorPostQueueAddress = sc->reply_desc_phys_addr;
2452 IOCInitMsg->SystemRequestFrameBaseAddress = sc->io_request_phys_addr;
2453 IOCInitMsg->HostMSIxVectors = (sc->msix_vectors > 0 ? sc->msix_vectors : 0);
2455 init_frame = (struct mrsas_init_frame *)sc->ioc_init_mem;
2456 init_frame->cmd = MFI_CMD_INIT;
2457 init_frame->cmd_status = 0xFF;
2458 init_frame->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
2460 /* driver support Extended MSIX */
2461 if ((sc->device_id == MRSAS_INVADER) ||
2462 (sc->device_id == MRSAS_FURY) ||
2463 (sc->device_id == MRSAS_INTRUDER) ||
2464 (sc->device_id == MRSAS_INTRUDER_24) ||
2465 (sc->device_id == MRSAS_CUTLASS_52) ||
2466 (sc->device_id == MRSAS_CUTLASS_53)) {
2467 init_frame->driver_operations.
2468 mfi_capabilities.support_additional_msix = 1;
2470 if (sc->verbuf_mem) {
2471 snprintf((char *)sc->verbuf_mem, strlen(MRSAS_VERSION) + 2, "%s\n",
2473 init_frame->driver_ver_lo = (bus_addr_t)sc->verbuf_phys_addr;
2474 init_frame->driver_ver_hi = 0;
2476 init_frame->driver_operations.mfi_capabilities.support_ndrive_r1_lb = 1;
2477 init_frame->driver_operations.mfi_capabilities.support_max_255lds = 1;
2478 init_frame->driver_operations.mfi_capabilities.security_protocol_cmds_fw = 1;
2479 if (sc->max_chain_frame_sz > MEGASAS_CHAIN_FRAME_SZ_MIN)
2480 init_frame->driver_operations.mfi_capabilities.support_ext_io_size = 1;
2481 phys_addr = (bus_addr_t)sc->ioc_init_phys_mem + 1024;
2482 init_frame->queue_info_new_phys_addr_lo = phys_addr;
2483 init_frame->data_xfer_len = sizeof(Mpi2IOCInitRequest_t);
2485 req_desc.addr.Words = (bus_addr_t)sc->ioc_init_phys_mem;
2486 req_desc.MFAIo.RequestFlags =
2487 (MRSAS_REQ_DESCRIPT_FLAGS_MFA << MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
2489 mrsas_disable_intr(sc);
2490 mrsas_dprint(sc, MRSAS_OCR, "Issuing IOC INIT command to FW.\n");
2491 mrsas_fire_cmd(sc, req_desc.addr.u.low, req_desc.addr.u.high);
2494 * Poll response timer to wait for Firmware response. While this
2495 * timer with the DELAY call could block CPU, the time interval for
2496 * this is only 1 millisecond.
2498 if (init_frame->cmd_status == 0xFF) {
2499 for (i = 0; i < (max_wait * 1000); i++) {
2500 if (init_frame->cmd_status == 0xFF)
2506 if (init_frame->cmd_status == 0)
2507 mrsas_dprint(sc, MRSAS_OCR,
2508 "IOC INIT response received from FW.\n");
2510 if (init_frame->cmd_status == 0xFF)
2511 device_printf(sc->mrsas_dev, "IOC Init timed out after %d seconds.\n", max_wait);
2513 device_printf(sc->mrsas_dev, "IOC Init failed, status = 0x%x\n", init_frame->cmd_status);
2517 mrsas_free_ioc_cmd(sc);
2522 * mrsas_alloc_mpt_cmds: Allocates the command packets
2523 * input: Adapter instance soft state
2525 * This function allocates the internal commands for IOs. Each command that is
2526 * issued to FW is wrapped in a local data structure called mrsas_mpt_cmd. An
2527 * array is allocated with mrsas_mpt_cmd context. The free commands are
2528 * maintained in a linked list (cmd pool). SMID value range is from 1 to
2532 mrsas_alloc_mpt_cmds(struct mrsas_softc *sc)
2535 u_int32_t max_cmd, count;
2536 struct mrsas_mpt_cmd *cmd;
2537 pMpi2ReplyDescriptorsUnion_t reply_desc;
2538 u_int32_t offset, chain_offset, sense_offset;
2539 bus_addr_t io_req_base_phys, chain_frame_base_phys, sense_base_phys;
2540 u_int8_t *io_req_base, *chain_frame_base, *sense_base;
2542 max_cmd = sc->max_fw_cmds;
2544 sc->req_desc = malloc(sc->request_alloc_sz, M_MRSAS, M_NOWAIT);
2545 if (!sc->req_desc) {
2546 device_printf(sc->mrsas_dev, "Out of memory, cannot alloc req desc\n");
2549 memset(sc->req_desc, 0, sc->request_alloc_sz);
2552 * sc->mpt_cmd_list is an array of struct mrsas_mpt_cmd pointers.
2553 * Allocate the dynamic array first and then allocate individual
2556 sc->mpt_cmd_list = malloc(sizeof(struct mrsas_mpt_cmd *) * max_cmd, M_MRSAS, M_NOWAIT);
2557 if (!sc->mpt_cmd_list) {
2558 device_printf(sc->mrsas_dev, "Cannot alloc memory for mpt_cmd_list.\n");
2561 memset(sc->mpt_cmd_list, 0, sizeof(struct mrsas_mpt_cmd *) * max_cmd);
2562 for (i = 0; i < max_cmd; i++) {
2563 sc->mpt_cmd_list[i] = malloc(sizeof(struct mrsas_mpt_cmd),
2565 if (!sc->mpt_cmd_list[i]) {
2566 for (j = 0; j < i; j++)
2567 free(sc->mpt_cmd_list[j], M_MRSAS);
2568 free(sc->mpt_cmd_list, M_MRSAS);
2569 sc->mpt_cmd_list = NULL;
2574 io_req_base = (u_int8_t *)sc->io_request_mem + MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
2575 io_req_base_phys = (bus_addr_t)sc->io_request_phys_addr + MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
2576 chain_frame_base = (u_int8_t *)sc->chain_frame_mem;
2577 chain_frame_base_phys = (bus_addr_t)sc->chain_frame_phys_addr;
2578 sense_base = (u_int8_t *)sc->sense_mem;
2579 sense_base_phys = (bus_addr_t)sc->sense_phys_addr;
2580 for (i = 0; i < max_cmd; i++) {
2581 cmd = sc->mpt_cmd_list[i];
2582 offset = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * i;
2583 chain_offset = sc->max_chain_frame_sz * i;
2584 sense_offset = MRSAS_SENSE_LEN * i;
2585 memset(cmd, 0, sizeof(struct mrsas_mpt_cmd));
2587 cmd->ccb_ptr = NULL;
2588 callout_init(&cmd->cm_callout, 0);
2589 cmd->sync_cmd_idx = (u_int32_t)MRSAS_ULONG_MAX;
2591 cmd->io_request = (MRSAS_RAID_SCSI_IO_REQUEST *) (io_req_base + offset);
2592 memset(cmd->io_request, 0, sizeof(MRSAS_RAID_SCSI_IO_REQUEST));
2593 cmd->io_request_phys_addr = io_req_base_phys + offset;
2594 cmd->chain_frame = (MPI2_SGE_IO_UNION *) (chain_frame_base + chain_offset);
2595 cmd->chain_frame_phys_addr = chain_frame_base_phys + chain_offset;
2596 cmd->sense = sense_base + sense_offset;
2597 cmd->sense_phys_addr = sense_base_phys + sense_offset;
2598 if (bus_dmamap_create(sc->data_tag, 0, &cmd->data_dmamap)) {
2601 TAILQ_INSERT_TAIL(&(sc->mrsas_mpt_cmd_list_head), cmd, next);
2604 /* Initialize reply descriptor array to 0xFFFFFFFF */
2605 reply_desc = sc->reply_desc_mem;
2606 count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
2607 for (i = 0; i < sc->reply_q_depth * count; i++, reply_desc++) {
2608 reply_desc->Words = MRSAS_ULONG_MAX;
2614 * mrsas_fire_cmd: Sends command to FW
2615 * input: Adapter softstate
2616 * request descriptor address low
2617 * request descriptor address high
2619 * This functions fires the command to Firmware by writing to the
2620 * inbound_low_queue_port and inbound_high_queue_port.
2623 mrsas_fire_cmd(struct mrsas_softc *sc, u_int32_t req_desc_lo,
2624 u_int32_t req_desc_hi)
2626 mtx_lock(&sc->pci_lock);
2627 mrsas_write_reg(sc, offsetof(mrsas_reg_set, inbound_low_queue_port),
2629 mrsas_write_reg(sc, offsetof(mrsas_reg_set, inbound_high_queue_port),
2631 mtx_unlock(&sc->pci_lock);
2635 * mrsas_transition_to_ready: Move FW to Ready state input:
2636 * Adapter instance soft state
2638 * During the initialization, FW passes can potentially be in any one of several
2639 * possible states. If the FW in operational, waiting-for-handshake states,
2640 * driver must take steps to bring it to ready state. Otherwise, it has to
2641 * wait for the ready state.
2644 mrsas_transition_to_ready(struct mrsas_softc *sc, int ocr)
2648 u_int32_t val, fw_state;
2649 u_int32_t cur_state;
2650 u_int32_t abs_state, curr_abs_state;
2652 val = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad));
2653 fw_state = val & MFI_STATE_MASK;
2654 max_wait = MRSAS_RESET_WAIT_TIME;
2656 if (fw_state != MFI_STATE_READY)
2657 device_printf(sc->mrsas_dev, "Waiting for FW to come to ready state\n");
2659 while (fw_state != MFI_STATE_READY) {
2660 abs_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad));
2662 case MFI_STATE_FAULT:
2663 device_printf(sc->mrsas_dev, "FW is in FAULT state!!\n");
2665 cur_state = MFI_STATE_FAULT;
2669 case MFI_STATE_WAIT_HANDSHAKE:
2670 /* Set the CLR bit in inbound doorbell */
2671 mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell),
2672 MFI_INIT_CLEAR_HANDSHAKE | MFI_INIT_HOTPLUG);
2673 cur_state = MFI_STATE_WAIT_HANDSHAKE;
2675 case MFI_STATE_BOOT_MESSAGE_PENDING:
2676 mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell),
2678 cur_state = MFI_STATE_BOOT_MESSAGE_PENDING;
2680 case MFI_STATE_OPERATIONAL:
2682 * Bring it to READY state; assuming max wait 10
2685 mrsas_disable_intr(sc);
2686 mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell), MFI_RESET_FLAGS);
2687 for (i = 0; i < max_wait * 1000; i++) {
2688 if (mrsas_read_reg(sc, offsetof(mrsas_reg_set, doorbell)) & 1)
2693 cur_state = MFI_STATE_OPERATIONAL;
2695 case MFI_STATE_UNDEFINED:
2697 * This state should not last for more than 2
2700 cur_state = MFI_STATE_UNDEFINED;
2702 case MFI_STATE_BB_INIT:
2703 cur_state = MFI_STATE_BB_INIT;
2705 case MFI_STATE_FW_INIT:
2706 cur_state = MFI_STATE_FW_INIT;
2708 case MFI_STATE_FW_INIT_2:
2709 cur_state = MFI_STATE_FW_INIT_2;
2711 case MFI_STATE_DEVICE_SCAN:
2712 cur_state = MFI_STATE_DEVICE_SCAN;
2714 case MFI_STATE_FLUSH_CACHE:
2715 cur_state = MFI_STATE_FLUSH_CACHE;
2718 device_printf(sc->mrsas_dev, "Unknown state 0x%x\n", fw_state);
2723 * The cur_state should not last for more than max_wait secs
2725 for (i = 0; i < (max_wait * 1000); i++) {
2726 fw_state = (mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2727 outbound_scratch_pad)) & MFI_STATE_MASK);
2728 curr_abs_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2729 outbound_scratch_pad));
2730 if (abs_state == curr_abs_state)
2737 * Return error if fw_state hasn't changed after max_wait
2739 if (curr_abs_state == abs_state) {
2740 device_printf(sc->mrsas_dev, "FW state [%d] hasn't changed "
2741 "in %d secs\n", fw_state, max_wait);
2745 mrsas_dprint(sc, MRSAS_OCR, "FW now in Ready state\n");
2750 * mrsas_get_mfi_cmd: Get a cmd from free command pool
2751 * input: Adapter soft state
2753 * This function removes an MFI command from the command list.
2755 struct mrsas_mfi_cmd *
2756 mrsas_get_mfi_cmd(struct mrsas_softc *sc)
2758 struct mrsas_mfi_cmd *cmd = NULL;
2760 mtx_lock(&sc->mfi_cmd_pool_lock);
2761 if (!TAILQ_EMPTY(&sc->mrsas_mfi_cmd_list_head)) {
2762 cmd = TAILQ_FIRST(&sc->mrsas_mfi_cmd_list_head);
2763 TAILQ_REMOVE(&sc->mrsas_mfi_cmd_list_head, cmd, next);
2765 mtx_unlock(&sc->mfi_cmd_pool_lock);
2771 * mrsas_ocr_thread: Thread to handle OCR/Kill Adapter.
2772 * input: Adapter Context.
2774 * This function will check FW status register and flag do_timeout_reset flag.
2775 * It will do OCR/Kill adapter if FW is in fault state or IO timed out has
2779 mrsas_ocr_thread(void *arg)
2781 struct mrsas_softc *sc;
2782 u_int32_t fw_status, fw_state;
2784 sc = (struct mrsas_softc *)arg;
2786 mrsas_dprint(sc, MRSAS_TRACE, "%s\n", __func__);
2788 sc->ocr_thread_active = 1;
2789 mtx_lock(&sc->sim_lock);
2791 /* Sleep for 1 second and check the queue status */
2792 msleep(&sc->ocr_chan, &sc->sim_lock, PRIBIO,
2793 "mrsas_ocr", sc->mrsas_fw_fault_check_delay * hz);
2794 if (sc->remove_in_progress ||
2795 sc->adprecovery == MRSAS_HW_CRITICAL_ERROR) {
2796 mrsas_dprint(sc, MRSAS_OCR,
2797 "Exit due to %s from %s\n",
2798 sc->remove_in_progress ? "Shutdown" :
2799 "Hardware critical error", __func__);
2802 fw_status = mrsas_read_reg(sc,
2803 offsetof(mrsas_reg_set, outbound_scratch_pad));
2804 fw_state = fw_status & MFI_STATE_MASK;
2805 if (fw_state == MFI_STATE_FAULT || sc->do_timedout_reset) {
2806 device_printf(sc->mrsas_dev, "%s started due to %s!\n",
2807 sc->disableOnlineCtrlReset ? "Kill Adapter" : "OCR",
2808 sc->do_timedout_reset ? "IO Timeout" :
2809 "FW fault detected");
2810 mtx_lock_spin(&sc->ioctl_lock);
2811 sc->reset_in_progress = 1;
2813 mtx_unlock_spin(&sc->ioctl_lock);
2814 mrsas_xpt_freeze(sc);
2815 mrsas_reset_ctrl(sc, sc->do_timedout_reset);
2816 mrsas_xpt_release(sc);
2817 sc->reset_in_progress = 0;
2818 sc->do_timedout_reset = 0;
2821 mtx_unlock(&sc->sim_lock);
2822 sc->ocr_thread_active = 0;
2823 mrsas_kproc_exit(0);
2827 * mrsas_reset_reply_desc: Reset Reply descriptor as part of OCR.
2828 * input: Adapter Context.
2830 * This function will clear reply descriptor so that post OCR driver and FW will
2834 mrsas_reset_reply_desc(struct mrsas_softc *sc)
2837 pMpi2ReplyDescriptorsUnion_t reply_desc;
2839 count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
2840 for (i = 0; i < count; i++)
2841 sc->last_reply_idx[i] = 0;
2843 reply_desc = sc->reply_desc_mem;
2844 for (i = 0; i < sc->reply_q_depth; i++, reply_desc++) {
2845 reply_desc->Words = MRSAS_ULONG_MAX;
2850 * mrsas_reset_ctrl: Core function to OCR/Kill adapter.
2851 * input: Adapter Context.
2853 * This function will run from thread context so that it can sleep. 1. Do not
2854 * handle OCR if FW is in HW critical error. 2. Wait for outstanding command
2855 * to complete for 180 seconds. 3. If #2 does not find any outstanding
2856 * command Controller is in working state, so skip OCR. Otherwise, do
2857 * OCR/kill Adapter based on flag disableOnlineCtrlReset. 4. Start of the
2858 * OCR, return all SCSI command back to CAM layer which has ccb_ptr. 5. Post
2859 * OCR, Re-fire Management command and move Controller to Operation state.
2862 mrsas_reset_ctrl(struct mrsas_softc *sc, u_int8_t reset_reason)
2864 int retval = SUCCESS, i, j, retry = 0;
2865 u_int32_t host_diag, abs_state, status_reg, reset_adapter;
2867 struct mrsas_mfi_cmd *mfi_cmd;
2868 struct mrsas_mpt_cmd *mpt_cmd;
2869 union mrsas_evt_class_locale class_locale;
2871 if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR) {
2872 device_printf(sc->mrsas_dev,
2873 "mrsas: Hardware critical error, returning FAIL.\n");
2876 mrsas_set_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
2877 sc->adprecovery = MRSAS_ADPRESET_SM_INFAULT;
2878 mrsas_disable_intr(sc);
2879 msleep(&sc->ocr_chan, &sc->sim_lock, PRIBIO, "mrsas_ocr",
2880 sc->mrsas_fw_fault_check_delay * hz);
2882 /* First try waiting for commands to complete */
2883 if (mrsas_wait_for_outstanding(sc, reset_reason)) {
2884 mrsas_dprint(sc, MRSAS_OCR,
2885 "resetting adapter from %s.\n",
2887 /* Now return commands back to the CAM layer */
2888 mtx_unlock(&sc->sim_lock);
2889 for (i = 0; i < sc->max_fw_cmds; i++) {
2890 mpt_cmd = sc->mpt_cmd_list[i];
2891 if (mpt_cmd->ccb_ptr) {
2892 ccb = (union ccb *)(mpt_cmd->ccb_ptr);
2893 ccb->ccb_h.status = CAM_SCSI_BUS_RESET;
2894 mrsas_cmd_done(sc, mpt_cmd);
2895 mrsas_atomic_dec(&sc->fw_outstanding);
2898 mtx_lock(&sc->sim_lock);
2900 status_reg = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2901 outbound_scratch_pad));
2902 abs_state = status_reg & MFI_STATE_MASK;
2903 reset_adapter = status_reg & MFI_RESET_ADAPTER;
2904 if (sc->disableOnlineCtrlReset ||
2905 (abs_state == MFI_STATE_FAULT && !reset_adapter)) {
2906 /* Reset not supported, kill adapter */
2907 mrsas_dprint(sc, MRSAS_OCR, "Reset not supported, killing adapter.\n");
2912 /* Now try to reset the chip */
2913 for (i = 0; i < MRSAS_FUSION_MAX_RESET_TRIES; i++) {
2914 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2915 MPI2_WRSEQ_FLUSH_KEY_VALUE);
2916 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2917 MPI2_WRSEQ_1ST_KEY_VALUE);
2918 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2919 MPI2_WRSEQ_2ND_KEY_VALUE);
2920 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2921 MPI2_WRSEQ_3RD_KEY_VALUE);
2922 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2923 MPI2_WRSEQ_4TH_KEY_VALUE);
2924 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2925 MPI2_WRSEQ_5TH_KEY_VALUE);
2926 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2927 MPI2_WRSEQ_6TH_KEY_VALUE);
2929 /* Check that the diag write enable (DRWE) bit is on */
2930 host_diag = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2933 while (!(host_diag & HOST_DIAG_WRITE_ENABLE)) {
2935 host_diag = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2937 if (retry++ == 100) {
2938 mrsas_dprint(sc, MRSAS_OCR,
2939 "Host diag unlock failed!\n");
2943 if (!(host_diag & HOST_DIAG_WRITE_ENABLE))
2946 /* Send chip reset command */
2947 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_host_diag),
2948 host_diag | HOST_DIAG_RESET_ADAPTER);
2951 /* Make sure reset adapter bit is cleared */
2952 host_diag = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2955 while (host_diag & HOST_DIAG_RESET_ADAPTER) {
2957 host_diag = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2959 if (retry++ == 1000) {
2960 mrsas_dprint(sc, MRSAS_OCR,
2961 "Diag reset adapter never cleared!\n");
2965 if (host_diag & HOST_DIAG_RESET_ADAPTER)
2968 abs_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2969 outbound_scratch_pad)) & MFI_STATE_MASK;
2972 while ((abs_state <= MFI_STATE_FW_INIT) && (retry++ < 1000)) {
2974 abs_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2975 outbound_scratch_pad)) & MFI_STATE_MASK;
2977 if (abs_state <= MFI_STATE_FW_INIT) {
2978 mrsas_dprint(sc, MRSAS_OCR, "firmware state < MFI_STATE_FW_INIT,"
2979 " state = 0x%x\n", abs_state);
2982 /* Wait for FW to become ready */
2983 if (mrsas_transition_to_ready(sc, 1)) {
2984 mrsas_dprint(sc, MRSAS_OCR,
2985 "mrsas: Failed to transition controller to ready.\n");
2988 mrsas_reset_reply_desc(sc);
2989 if (mrsas_ioc_init(sc)) {
2990 mrsas_dprint(sc, MRSAS_OCR, "mrsas_ioc_init() failed!\n");
2993 for (j = 0; j < sc->max_fw_cmds; j++) {
2994 mpt_cmd = sc->mpt_cmd_list[j];
2995 if (mpt_cmd->sync_cmd_idx != (u_int32_t)MRSAS_ULONG_MAX) {
2996 mfi_cmd = sc->mfi_cmd_list[mpt_cmd->sync_cmd_idx];
2997 mrsas_release_mfi_cmd(mfi_cmd);
2998 mrsas_release_mpt_cmd(mpt_cmd);
3004 /* Reset load balance info */
3005 memset(sc->load_balance_info, 0,
3006 sizeof(LD_LOAD_BALANCE_INFO) * MAX_LOGICAL_DRIVES_EXT);
3008 if (mrsas_get_ctrl_info(sc)) {
3013 if (!mrsas_get_map_info(sc))
3014 mrsas_sync_map_info(sc);
3016 megasas_setup_jbod_map(sc);
3018 memset(sc->pd_list, 0,
3019 MRSAS_MAX_PD * sizeof(struct mrsas_pd_list));
3020 if (mrsas_get_pd_list(sc) != SUCCESS) {
3021 device_printf(sc->mrsas_dev, "Get PD list failed from OCR.\n"
3022 "Will get the latest PD LIST after OCR on event.\n");
3024 memset(sc->ld_ids, 0xff, MRSAS_MAX_LD_IDS);
3025 if (mrsas_get_ld_list(sc) != SUCCESS) {
3026 device_printf(sc->mrsas_dev, "Get LD lsit failed from OCR.\n"
3027 "Will get the latest LD LIST after OCR on event.\n");
3029 mrsas_clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
3030 mrsas_enable_intr(sc);
3031 sc->adprecovery = MRSAS_HBA_OPERATIONAL;
3033 /* Register AEN with FW for last sequence number */
3034 class_locale.members.reserved = 0;
3035 class_locale.members.locale = MR_EVT_LOCALE_ALL;
3036 class_locale.members.class = MR_EVT_CLASS_DEBUG;
3038 if (mrsas_register_aen(sc, sc->last_seq_num,
3039 class_locale.word)) {
3040 device_printf(sc->mrsas_dev,
3041 "ERROR: AEN registration FAILED from OCR !!! "
3042 "Further events from the controller cannot be notified."
3043 "Either there is some problem in the controller"
3044 "or the controller does not support AEN.\n"
3045 "Please contact to the SUPPORT TEAM if the problem persists\n");
3047 /* Adapter reset completed successfully */
3048 device_printf(sc->mrsas_dev, "Reset successful\n");
3052 /* Reset failed, kill the adapter */
3053 device_printf(sc->mrsas_dev, "Reset failed, killing adapter.\n");
3057 mrsas_clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
3058 mrsas_enable_intr(sc);
3059 sc->adprecovery = MRSAS_HBA_OPERATIONAL;
3062 mrsas_clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
3063 mrsas_dprint(sc, MRSAS_OCR,
3064 "Reset Exit with %d.\n", retval);
3069 * mrsas_kill_hba: Kill HBA when OCR is not supported
3070 * input: Adapter Context.
3072 * This function will kill HBA when OCR is not supported.
3075 mrsas_kill_hba(struct mrsas_softc *sc)
3077 sc->adprecovery = MRSAS_HW_CRITICAL_ERROR;
3079 mrsas_dprint(sc, MRSAS_OCR, "%s\n", __func__);
3080 mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell),
3083 mrsas_read_reg(sc, offsetof(mrsas_reg_set, doorbell));
3084 mrsas_complete_outstanding_ioctls(sc);
3088 * mrsas_complete_outstanding_ioctls Complete pending IOCTLS after kill_hba
3089 * input: Controller softc
3094 mrsas_complete_outstanding_ioctls(struct mrsas_softc *sc)
3097 struct mrsas_mpt_cmd *cmd_mpt;
3098 struct mrsas_mfi_cmd *cmd_mfi;
3099 u_int32_t count, MSIxIndex;
3101 count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
3102 for (i = 0; i < sc->max_fw_cmds; i++) {
3103 cmd_mpt = sc->mpt_cmd_list[i];
3105 if (cmd_mpt->sync_cmd_idx != (u_int32_t)MRSAS_ULONG_MAX) {
3106 cmd_mfi = sc->mfi_cmd_list[cmd_mpt->sync_cmd_idx];
3107 if (cmd_mfi->sync_cmd && cmd_mfi->frame->hdr.cmd != MFI_CMD_ABORT) {
3108 for (MSIxIndex = 0; MSIxIndex < count; MSIxIndex++)
3109 mrsas_complete_mptmfi_passthru(sc, cmd_mfi,
3110 cmd_mpt->io_request->RaidContext.status);
3117 * mrsas_wait_for_outstanding: Wait for outstanding commands
3118 * input: Adapter Context.
3120 * This function will wait for 180 seconds for outstanding commands to be
3124 mrsas_wait_for_outstanding(struct mrsas_softc *sc, u_int8_t check_reason)
3126 int i, outstanding, retval = 0;
3127 u_int32_t fw_state, count, MSIxIndex;
3130 for (i = 0; i < MRSAS_RESET_WAIT_TIME; i++) {
3131 if (sc->remove_in_progress) {
3132 mrsas_dprint(sc, MRSAS_OCR,
3133 "Driver remove or shutdown called.\n");
3137 /* Check if firmware is in fault state */
3138 fw_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
3139 outbound_scratch_pad)) & MFI_STATE_MASK;
3140 if (fw_state == MFI_STATE_FAULT) {
3141 mrsas_dprint(sc, MRSAS_OCR,
3142 "Found FW in FAULT state, will reset adapter.\n");
3146 if (check_reason == MFI_DCMD_TIMEOUT_OCR) {
3147 mrsas_dprint(sc, MRSAS_OCR,
3148 "DCMD IO TIMEOUT detected, will reset adapter.\n");
3152 outstanding = mrsas_atomic_read(&sc->fw_outstanding);
3156 if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
3157 mrsas_dprint(sc, MRSAS_OCR, "[%2d]waiting for %d "
3158 "commands to complete\n", i, outstanding);
3159 count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
3160 for (MSIxIndex = 0; MSIxIndex < count; MSIxIndex++)
3161 mrsas_complete_cmd(sc, MSIxIndex);
3166 if (mrsas_atomic_read(&sc->fw_outstanding)) {
3167 mrsas_dprint(sc, MRSAS_OCR,
3168 " pending commands remain after waiting,"
3169 " will reset adapter.\n");
3177 * mrsas_release_mfi_cmd: Return a cmd to free command pool
3178 * input: Command packet for return to free cmd pool
3180 * This function returns the MFI command to the command list.
3183 mrsas_release_mfi_cmd(struct mrsas_mfi_cmd *cmd)
3185 struct mrsas_softc *sc = cmd->sc;
3187 mtx_lock(&sc->mfi_cmd_pool_lock);
3188 cmd->ccb_ptr = NULL;
3189 cmd->cmd_id.frame_count = 0;
3190 TAILQ_INSERT_TAIL(&(sc->mrsas_mfi_cmd_list_head), cmd, next);
3191 mtx_unlock(&sc->mfi_cmd_pool_lock);
3197 * mrsas_get_controller_info: Returns FW's controller structure
3198 * input: Adapter soft state
3199 * Controller information structure
3201 * Issues an internal command (DCMD) to get the FW's controller structure. This
3202 * information is mainly used to find out the maximum IO transfer per command
3203 * supported by the FW.
3206 mrsas_get_ctrl_info(struct mrsas_softc *sc)
3209 u_int8_t do_ocr = 1;
3210 struct mrsas_mfi_cmd *cmd;
3211 struct mrsas_dcmd_frame *dcmd;
3213 cmd = mrsas_get_mfi_cmd(sc);
3216 device_printf(sc->mrsas_dev, "Failed to get a free cmd\n");
3219 dcmd = &cmd->frame->dcmd;
3221 if (mrsas_alloc_ctlr_info_cmd(sc) != SUCCESS) {
3222 device_printf(sc->mrsas_dev, "Cannot allocate get ctlr info cmd\n");
3223 mrsas_release_mfi_cmd(cmd);
3226 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
3228 dcmd->cmd = MFI_CMD_DCMD;
3229 dcmd->cmd_status = 0xFF;
3230 dcmd->sge_count = 1;
3231 dcmd->flags = MFI_FRAME_DIR_READ;
3234 dcmd->data_xfer_len = sizeof(struct mrsas_ctrl_info);
3235 dcmd->opcode = MR_DCMD_CTRL_GET_INFO;
3236 dcmd->sgl.sge32[0].phys_addr = sc->ctlr_info_phys_addr;
3237 dcmd->sgl.sge32[0].length = sizeof(struct mrsas_ctrl_info);
3239 retcode = mrsas_issue_polled(sc, cmd);
3240 if (retcode == ETIMEDOUT)
3243 memcpy(sc->ctrl_info, sc->ctlr_info_mem, sizeof(struct mrsas_ctrl_info));
3246 mrsas_update_ext_vd_details(sc);
3248 sc->use_seqnum_jbod_fp =
3249 sc->ctrl_info->adapterOperations3.useSeqNumJbodFP;
3252 mrsas_free_ctlr_info_cmd(sc);
3255 sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
3257 mrsas_release_mfi_cmd(cmd);
3263 * mrsas_update_ext_vd_details : Update details w.r.t Extended VD
3265 * sc - Controller's softc
3268 mrsas_update_ext_vd_details(struct mrsas_softc *sc)
3270 sc->max256vdSupport =
3271 sc->ctrl_info->adapterOperations3.supportMaxExtLDs;
3272 /* Below is additional check to address future FW enhancement */
3273 if (sc->ctrl_info->max_lds > 64)
3274 sc->max256vdSupport = 1;
3276 sc->drv_supported_vd_count = MRSAS_MAX_LD_CHANNELS
3277 * MRSAS_MAX_DEV_PER_CHANNEL;
3278 sc->drv_supported_pd_count = MRSAS_MAX_PD_CHANNELS
3279 * MRSAS_MAX_DEV_PER_CHANNEL;
3280 if (sc->max256vdSupport) {
3281 sc->fw_supported_vd_count = MAX_LOGICAL_DRIVES_EXT;
3282 sc->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
3284 sc->fw_supported_vd_count = MAX_LOGICAL_DRIVES;
3285 sc->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
3288 sc->old_map_sz = sizeof(MR_FW_RAID_MAP) +
3289 (sizeof(MR_LD_SPAN_MAP) *
3290 (sc->fw_supported_vd_count - 1));
3291 sc->new_map_sz = sizeof(MR_FW_RAID_MAP_EXT);
3292 sc->drv_map_sz = sizeof(MR_DRV_RAID_MAP) +
3293 (sizeof(MR_LD_SPAN_MAP) *
3294 (sc->drv_supported_vd_count - 1));
3296 sc->max_map_sz = max(sc->old_map_sz, sc->new_map_sz);
3298 if (sc->max256vdSupport)
3299 sc->current_map_sz = sc->new_map_sz;
3301 sc->current_map_sz = sc->old_map_sz;
3305 * mrsas_alloc_ctlr_info_cmd: Allocates memory for controller info command
3306 * input: Adapter soft state
3308 * Allocates DMAable memory for the controller info internal command.
3311 mrsas_alloc_ctlr_info_cmd(struct mrsas_softc *sc)
3315 /* Allocate get controller info command */
3316 ctlr_info_size = sizeof(struct mrsas_ctrl_info);
3317 if (bus_dma_tag_create(sc->mrsas_parent_tag,
3319 BUS_SPACE_MAXADDR_32BIT,
3327 &sc->ctlr_info_tag)) {
3328 device_printf(sc->mrsas_dev, "Cannot allocate ctlr info tag\n");
3331 if (bus_dmamem_alloc(sc->ctlr_info_tag, (void **)&sc->ctlr_info_mem,
3332 BUS_DMA_NOWAIT, &sc->ctlr_info_dmamap)) {
3333 device_printf(sc->mrsas_dev, "Cannot allocate ctlr info cmd mem\n");
3336 if (bus_dmamap_load(sc->ctlr_info_tag, sc->ctlr_info_dmamap,
3337 sc->ctlr_info_mem, ctlr_info_size, mrsas_addr_cb,
3338 &sc->ctlr_info_phys_addr, BUS_DMA_NOWAIT)) {
3339 device_printf(sc->mrsas_dev, "Cannot load ctlr info cmd mem\n");
3342 memset(sc->ctlr_info_mem, 0, ctlr_info_size);
3347 * mrsas_free_ctlr_info_cmd: Free memory for controller info command
3348 * input: Adapter soft state
3350 * Deallocates memory of the get controller info cmd.
3353 mrsas_free_ctlr_info_cmd(struct mrsas_softc *sc)
3355 if (sc->ctlr_info_phys_addr)
3356 bus_dmamap_unload(sc->ctlr_info_tag, sc->ctlr_info_dmamap);
3357 if (sc->ctlr_info_mem != NULL)
3358 bus_dmamem_free(sc->ctlr_info_tag, sc->ctlr_info_mem, sc->ctlr_info_dmamap);
3359 if (sc->ctlr_info_tag != NULL)
3360 bus_dma_tag_destroy(sc->ctlr_info_tag);
3364 * mrsas_issue_polled: Issues a polling command
3365 * inputs: Adapter soft state
3366 * Command packet to be issued
3368 * This function is for posting of internal commands to Firmware. MFI requires
3369 * the cmd_status to be set to 0xFF before posting. The maximun wait time of
3370 * the poll response timer is 180 seconds.
3373 mrsas_issue_polled(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
3375 struct mrsas_header *frame_hdr = &cmd->frame->hdr;
3376 u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME;
3377 int i, retcode = SUCCESS;
3379 frame_hdr->cmd_status = 0xFF;
3380 frame_hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
3382 /* Issue the frame using inbound queue port */
3383 if (mrsas_issue_dcmd(sc, cmd)) {
3384 device_printf(sc->mrsas_dev, "Cannot issue DCMD internal command.\n");
3388 * Poll response timer to wait for Firmware response. While this
3389 * timer with the DELAY call could block CPU, the time interval for
3390 * this is only 1 millisecond.
3392 if (frame_hdr->cmd_status == 0xFF) {
3393 for (i = 0; i < (max_wait * 1000); i++) {
3394 if (frame_hdr->cmd_status == 0xFF)
3400 if (frame_hdr->cmd_status == 0xFF) {
3401 device_printf(sc->mrsas_dev, "DCMD timed out after %d "
3402 "seconds from %s\n", max_wait, __func__);
3403 device_printf(sc->mrsas_dev, "DCMD opcode 0x%X\n",
3404 cmd->frame->dcmd.opcode);
3405 retcode = ETIMEDOUT;
3411 * mrsas_issue_dcmd: Issues a MFI Pass thru cmd
3412 * input: Adapter soft state mfi cmd pointer
3414 * This function is called by mrsas_issued_blocked_cmd() and
3415 * mrsas_issued_polled(), to build the MPT command and then fire the command
3419 mrsas_issue_dcmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
3421 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
3423 req_desc = mrsas_build_mpt_cmd(sc, cmd);
3425 device_printf(sc->mrsas_dev, "Cannot build MPT cmd.\n");
3428 mrsas_fire_cmd(sc, req_desc->addr.u.low, req_desc->addr.u.high);
3434 * mrsas_build_mpt_cmd: Calls helper function to build Passthru cmd
3435 * input: Adapter soft state mfi cmd to build
3437 * This function is called by mrsas_issue_cmd() to build the MPT-MFI passthru
3438 * command and prepares the MPT command to send to Firmware.
3440 MRSAS_REQUEST_DESCRIPTOR_UNION *
3441 mrsas_build_mpt_cmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
3443 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
3446 if (mrsas_build_mptmfi_passthru(sc, cmd)) {
3447 device_printf(sc->mrsas_dev, "Cannot build MPT-MFI passthru cmd.\n");
3450 index = cmd->cmd_id.context.smid;
3452 req_desc = mrsas_get_request_desc(sc, index - 1);
3456 req_desc->addr.Words = 0;
3457 req_desc->SCSIIO.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO << MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
3459 req_desc->SCSIIO.SMID = index;
3465 * mrsas_build_mptmfi_passthru: Builds a MPT MFI Passthru command
3466 * input: Adapter soft state mfi cmd pointer
3468 * The MPT command and the io_request are setup as a passthru command. The SGE
3469 * chain address is set to frame_phys_addr of the MFI command.
3472 mrsas_build_mptmfi_passthru(struct mrsas_softc *sc, struct mrsas_mfi_cmd *mfi_cmd)
3474 MPI25_IEEE_SGE_CHAIN64 *mpi25_ieee_chain;
3475 PTR_MRSAS_RAID_SCSI_IO_REQUEST io_req;
3476 struct mrsas_mpt_cmd *mpt_cmd;
3477 struct mrsas_header *frame_hdr = &mfi_cmd->frame->hdr;
3479 mpt_cmd = mrsas_get_mpt_cmd(sc);
3483 /* Save the smid. To be used for returning the cmd */
3484 mfi_cmd->cmd_id.context.smid = mpt_cmd->index;
3486 mpt_cmd->sync_cmd_idx = mfi_cmd->index;
3489 * For cmds where the flag is set, store the flag and check on
3490 * completion. For cmds with this flag, don't call
3491 * mrsas_complete_cmd.
3494 if (frame_hdr->flags & MFI_FRAME_DONT_POST_IN_REPLY_QUEUE)
3495 mpt_cmd->flags = MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
3497 io_req = mpt_cmd->io_request;
3499 if ((sc->device_id == MRSAS_INVADER) ||
3500 (sc->device_id == MRSAS_FURY) ||
3501 (sc->device_id == MRSAS_INTRUDER) ||
3502 (sc->device_id == MRSAS_INTRUDER_24) ||
3503 (sc->device_id == MRSAS_CUTLASS_52) ||
3504 (sc->device_id == MRSAS_CUTLASS_53)) {
3505 pMpi25IeeeSgeChain64_t sgl_ptr_end = (pMpi25IeeeSgeChain64_t)&io_req->SGL;
3507 sgl_ptr_end += sc->max_sge_in_main_msg - 1;
3508 sgl_ptr_end->Flags = 0;
3510 mpi25_ieee_chain = (MPI25_IEEE_SGE_CHAIN64 *) & io_req->SGL.IeeeChain;
3512 io_req->Function = MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST;
3513 io_req->SGLOffset0 = offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL) / 4;
3514 io_req->ChainOffset = sc->chain_offset_mfi_pthru;
3516 mpi25_ieee_chain->Address = mfi_cmd->frame_phys_addr;
3518 mpi25_ieee_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT |
3519 MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR;
3521 mpi25_ieee_chain->Length = sc->max_chain_frame_sz;
3527 * mrsas_issue_blocked_cmd: Synchronous wrapper around regular FW cmds
3528 * input: Adapter soft state Command to be issued
3530 * This function waits on an event for the command to be returned from the ISR.
3531 * Max wait time is MRSAS_INTERNAL_CMD_WAIT_TIME secs. Used for issuing
3532 * internal and ioctl commands.
3535 mrsas_issue_blocked_cmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
3537 u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME;
3538 unsigned long total_time = 0;
3539 int retcode = SUCCESS;
3541 /* Initialize cmd_status */
3542 cmd->cmd_status = 0xFF;
3544 /* Build MPT-MFI command for issue to FW */
3545 if (mrsas_issue_dcmd(sc, cmd)) {
3546 device_printf(sc->mrsas_dev, "Cannot issue DCMD internal command.\n");
3549 sc->chan = (void *)&cmd;
3552 if (cmd->cmd_status == 0xFF) {
3553 tsleep((void *)&sc->chan, 0, "mrsas_sleep", hz);
3557 if (!cmd->sync_cmd) { /* cmd->sync will be set for an IOCTL
3560 if (total_time >= max_wait) {
3561 device_printf(sc->mrsas_dev,
3562 "Internal command timed out after %d seconds.\n", max_wait);
3569 if (cmd->cmd_status == 0xFF) {
3570 device_printf(sc->mrsas_dev, "DCMD timed out after %d "
3571 "seconds from %s\n", max_wait, __func__);
3572 device_printf(sc->mrsas_dev, "DCMD opcode 0x%X\n",
3573 cmd->frame->dcmd.opcode);
3574 retcode = ETIMEDOUT;
3580 * mrsas_complete_mptmfi_passthru: Completes a command
3581 * input: @sc: Adapter soft state
3582 * @cmd: Command to be completed
3583 * @status: cmd completion status
3585 * This function is called from mrsas_complete_cmd() after an interrupt is
3586 * received from Firmware, and io_request->Function is
3587 * MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST.
3590 mrsas_complete_mptmfi_passthru(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd,
3593 struct mrsas_header *hdr = &cmd->frame->hdr;
3594 u_int8_t cmd_status = cmd->frame->hdr.cmd_status;
3596 /* Reset the retry counter for future re-tries */
3597 cmd->retry_for_fw_reset = 0;
3600 cmd->ccb_ptr = NULL;
3603 case MFI_CMD_INVALID:
3604 device_printf(sc->mrsas_dev, "MFI_CMD_INVALID command.\n");
3606 case MFI_CMD_PD_SCSI_IO:
3607 case MFI_CMD_LD_SCSI_IO:
3609 * MFI_CMD_PD_SCSI_IO and MFI_CMD_LD_SCSI_IO could have been
3610 * issued either through an IO path or an IOCTL path. If it
3611 * was via IOCTL, we will send it to internal completion.
3613 if (cmd->sync_cmd) {
3615 mrsas_wakeup(sc, cmd);
3621 /* Check for LD map update */
3622 if ((cmd->frame->dcmd.opcode == MR_DCMD_LD_MAP_GET_INFO) &&
3623 (cmd->frame->dcmd.mbox.b[1] == 1)) {
3624 sc->fast_path_io = 0;
3625 mtx_lock(&sc->raidmap_lock);
3626 sc->map_update_cmd = NULL;
3627 if (cmd_status != 0) {
3628 if (cmd_status != MFI_STAT_NOT_FOUND)
3629 device_printf(sc->mrsas_dev, "map sync failed, status=%x\n", cmd_status);
3631 mrsas_release_mfi_cmd(cmd);
3632 mtx_unlock(&sc->raidmap_lock);
3637 mrsas_release_mfi_cmd(cmd);
3638 if (MR_ValidateMapInfo(sc))
3639 sc->fast_path_io = 0;
3641 sc->fast_path_io = 1;
3642 mrsas_sync_map_info(sc);
3643 mtx_unlock(&sc->raidmap_lock);
3646 if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET_INFO ||
3647 cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET) {
3648 sc->mrsas_aen_triggered = 0;
3650 /* FW has an updated PD sequence */
3651 if ((cmd->frame->dcmd.opcode ==
3652 MR_DCMD_SYSTEM_PD_MAP_GET_INFO) &&
3653 (cmd->frame->dcmd.mbox.b[0] == 1)) {
3655 mtx_lock(&sc->raidmap_lock);
3656 sc->jbod_seq_cmd = NULL;
3657 mrsas_release_mfi_cmd(cmd);
3659 if (cmd_status == MFI_STAT_OK) {
3660 sc->pd_seq_map_id++;
3661 /* Re-register a pd sync seq num cmd */
3662 if (megasas_sync_pd_seq_num(sc, true))
3663 sc->use_seqnum_jbod_fp = 0;
3665 sc->use_seqnum_jbod_fp = 0;
3666 device_printf(sc->mrsas_dev,
3667 "Jbod map sync failed, status=%x\n", cmd_status);
3669 mtx_unlock(&sc->raidmap_lock);
3672 /* See if got an event notification */
3673 if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_WAIT)
3674 mrsas_complete_aen(sc, cmd);
3676 mrsas_wakeup(sc, cmd);
3679 /* Command issued to abort another cmd return */
3680 mrsas_complete_abort(sc, cmd);
3683 device_printf(sc->mrsas_dev, "Unknown command completed! [0x%X]\n", hdr->cmd);
3689 * mrsas_wakeup: Completes an internal command
3690 * input: Adapter soft state
3691 * Command to be completed
3693 * In mrsas_issue_blocked_cmd(), after a command is issued to Firmware, a wait
3694 * timer is started. This function is called from
3695 * mrsas_complete_mptmfi_passthru() as it completes the command, to wake up
3696 * from the command wait.
3699 mrsas_wakeup(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
3701 cmd->cmd_status = cmd->frame->io.cmd_status;
3703 if (cmd->cmd_status == 0xFF)
3704 cmd->cmd_status = 0;
3706 sc->chan = (void *)&cmd;
3707 wakeup_one((void *)&sc->chan);
3712 * mrsas_shutdown_ctlr: Instructs FW to shutdown the controller input:
3713 * Adapter soft state Shutdown/Hibernate
3715 * This function issues a DCMD internal command to Firmware to initiate shutdown
3716 * of the controller.
3719 mrsas_shutdown_ctlr(struct mrsas_softc *sc, u_int32_t opcode)
3721 struct mrsas_mfi_cmd *cmd;
3722 struct mrsas_dcmd_frame *dcmd;
3724 if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR)
3727 cmd = mrsas_get_mfi_cmd(sc);
3729 device_printf(sc->mrsas_dev, "Cannot allocate for shutdown cmd.\n");
3733 mrsas_issue_blocked_abort_cmd(sc, sc->aen_cmd);
3734 if (sc->map_update_cmd)
3735 mrsas_issue_blocked_abort_cmd(sc, sc->map_update_cmd);
3736 if (sc->jbod_seq_cmd)
3737 mrsas_issue_blocked_abort_cmd(sc, sc->jbod_seq_cmd);
3739 dcmd = &cmd->frame->dcmd;
3740 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
3742 dcmd->cmd = MFI_CMD_DCMD;
3743 dcmd->cmd_status = 0x0;
3744 dcmd->sge_count = 0;
3745 dcmd->flags = MFI_FRAME_DIR_NONE;
3748 dcmd->data_xfer_len = 0;
3749 dcmd->opcode = opcode;
3751 device_printf(sc->mrsas_dev, "Preparing to shut down controller.\n");
3753 mrsas_issue_blocked_cmd(sc, cmd);
3754 mrsas_release_mfi_cmd(cmd);
3760 * mrsas_flush_cache: Requests FW to flush all its caches input:
3761 * Adapter soft state
3763 * This function is issues a DCMD internal command to Firmware to initiate
3764 * flushing of all caches.
3767 mrsas_flush_cache(struct mrsas_softc *sc)
3769 struct mrsas_mfi_cmd *cmd;
3770 struct mrsas_dcmd_frame *dcmd;
3772 if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR)
3775 cmd = mrsas_get_mfi_cmd(sc);
3777 device_printf(sc->mrsas_dev, "Cannot allocate for flush cache cmd.\n");
3780 dcmd = &cmd->frame->dcmd;
3781 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
3783 dcmd->cmd = MFI_CMD_DCMD;
3784 dcmd->cmd_status = 0x0;
3785 dcmd->sge_count = 0;
3786 dcmd->flags = MFI_FRAME_DIR_NONE;
3789 dcmd->data_xfer_len = 0;
3790 dcmd->opcode = MR_DCMD_CTRL_CACHE_FLUSH;
3791 dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE;
3793 mrsas_issue_blocked_cmd(sc, cmd);
3794 mrsas_release_mfi_cmd(cmd);
3800 megasas_sync_pd_seq_num(struct mrsas_softc *sc, boolean_t pend)
3803 u_int8_t do_ocr = 1;
3804 struct mrsas_mfi_cmd *cmd;
3805 struct mrsas_dcmd_frame *dcmd;
3806 uint32_t pd_seq_map_sz;
3807 struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync;
3808 bus_addr_t pd_seq_h;
3810 pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) +
3811 (sizeof(struct MR_PD_CFG_SEQ) *
3812 (MAX_PHYSICAL_DEVICES - 1));
3814 cmd = mrsas_get_mfi_cmd(sc);
3816 device_printf(sc->mrsas_dev,
3817 "Cannot alloc for ld map info cmd.\n");
3820 dcmd = &cmd->frame->dcmd;
3822 pd_sync = (void *)sc->jbodmap_mem[(sc->pd_seq_map_id & 1)];
3823 pd_seq_h = sc->jbodmap_phys_addr[(sc->pd_seq_map_id & 1)];
3825 device_printf(sc->mrsas_dev,
3826 "Failed to alloc mem for jbod map info.\n");
3827 mrsas_release_mfi_cmd(cmd);
3830 memset(pd_sync, 0, pd_seq_map_sz);
3831 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
3832 dcmd->cmd = MFI_CMD_DCMD;
3833 dcmd->cmd_status = 0xFF;
3834 dcmd->sge_count = 1;
3837 dcmd->data_xfer_len = (pd_seq_map_sz);
3838 dcmd->opcode = (MR_DCMD_SYSTEM_PD_MAP_GET_INFO);
3839 dcmd->sgl.sge32[0].phys_addr = (pd_seq_h);
3840 dcmd->sgl.sge32[0].length = (pd_seq_map_sz);
3843 dcmd->mbox.b[0] = MRSAS_DCMD_MBOX_PEND_FLAG;
3844 dcmd->flags = (MFI_FRAME_DIR_WRITE);
3845 sc->jbod_seq_cmd = cmd;
3846 if (mrsas_issue_dcmd(sc, cmd)) {
3847 device_printf(sc->mrsas_dev,
3848 "Fail to send sync map info command.\n");
3853 dcmd->flags = MFI_FRAME_DIR_READ;
3855 retcode = mrsas_issue_polled(sc, cmd);
3856 if (retcode == ETIMEDOUT)
3859 if (pd_sync->count > MAX_PHYSICAL_DEVICES) {
3860 device_printf(sc->mrsas_dev,
3861 "driver supports max %d JBOD, but FW reports %d\n",
3862 MAX_PHYSICAL_DEVICES, pd_sync->count);
3866 sc->pd_seq_map_id++;
3871 sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
3873 mrsas_release_mfi_cmd(cmd);
3879 * mrsas_get_map_info: Load and validate RAID map input:
3880 * Adapter instance soft state
3882 * This function calls mrsas_get_ld_map_info() and MR_ValidateMapInfo() to load
3883 * and validate RAID map. It returns 0 if successful, 1 other- wise.
3886 mrsas_get_map_info(struct mrsas_softc *sc)
3888 uint8_t retcode = 0;
3890 sc->fast_path_io = 0;
3891 if (!mrsas_get_ld_map_info(sc)) {
3892 retcode = MR_ValidateMapInfo(sc);
3894 sc->fast_path_io = 1;
3902 * mrsas_get_ld_map_info: Get FW's ld_map structure input:
3903 * Adapter instance soft state
3905 * Issues an internal command (DCMD) to get the FW's controller PD list
3909 mrsas_get_ld_map_info(struct mrsas_softc *sc)
3912 struct mrsas_mfi_cmd *cmd;
3913 struct mrsas_dcmd_frame *dcmd;
3915 bus_addr_t map_phys_addr = 0;
3917 cmd = mrsas_get_mfi_cmd(sc);
3919 device_printf(sc->mrsas_dev,
3920 "Cannot alloc for ld map info cmd.\n");
3923 dcmd = &cmd->frame->dcmd;
3925 map = (void *)sc->raidmap_mem[(sc->map_id & 1)];
3926 map_phys_addr = sc->raidmap_phys_addr[(sc->map_id & 1)];
3928 device_printf(sc->mrsas_dev,
3929 "Failed to alloc mem for ld map info.\n");
3930 mrsas_release_mfi_cmd(cmd);
3933 memset(map, 0, sizeof(sc->max_map_sz));
3934 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
3936 dcmd->cmd = MFI_CMD_DCMD;
3937 dcmd->cmd_status = 0xFF;
3938 dcmd->sge_count = 1;
3939 dcmd->flags = MFI_FRAME_DIR_READ;
3942 dcmd->data_xfer_len = sc->current_map_sz;
3943 dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO;
3944 dcmd->sgl.sge32[0].phys_addr = map_phys_addr;
3945 dcmd->sgl.sge32[0].length = sc->current_map_sz;
3947 retcode = mrsas_issue_polled(sc, cmd);
3948 if (retcode == ETIMEDOUT)
3949 sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
3951 mrsas_release_mfi_cmd(cmd);
3957 * mrsas_sync_map_info: Get FW's ld_map structure input:
3958 * Adapter instance soft state
3960 * Issues an internal command (DCMD) to get the FW's controller PD list
3964 mrsas_sync_map_info(struct mrsas_softc *sc)
3967 struct mrsas_mfi_cmd *cmd;
3968 struct mrsas_dcmd_frame *dcmd;
3969 uint32_t size_sync_info, num_lds;
3970 MR_LD_TARGET_SYNC *target_map = NULL;
3971 MR_DRV_RAID_MAP_ALL *map;
3973 MR_LD_TARGET_SYNC *ld_sync;
3974 bus_addr_t map_phys_addr = 0;
3976 cmd = mrsas_get_mfi_cmd(sc);
3978 device_printf(sc->mrsas_dev,
3979 "Cannot alloc for sync map info cmd\n");
3982 map = sc->ld_drv_map[sc->map_id & 1];
3983 num_lds = map->raidMap.ldCount;
3985 dcmd = &cmd->frame->dcmd;
3986 size_sync_info = sizeof(MR_LD_TARGET_SYNC) * num_lds;
3987 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
3989 target_map = (MR_LD_TARGET_SYNC *) sc->raidmap_mem[(sc->map_id - 1) & 1];
3990 memset(target_map, 0, sc->max_map_sz);
3992 map_phys_addr = sc->raidmap_phys_addr[(sc->map_id - 1) & 1];
3994 ld_sync = (MR_LD_TARGET_SYNC *) target_map;
3996 for (i = 0; i < num_lds; i++, ld_sync++) {
3997 raid = MR_LdRaidGet(i, map);
3998 ld_sync->targetId = MR_GetLDTgtId(i, map);
3999 ld_sync->seqNum = raid->seqNum;
4002 dcmd->cmd = MFI_CMD_DCMD;
4003 dcmd->cmd_status = 0xFF;
4004 dcmd->sge_count = 1;
4005 dcmd->flags = MFI_FRAME_DIR_WRITE;
4008 dcmd->data_xfer_len = sc->current_map_sz;
4009 dcmd->mbox.b[0] = num_lds;
4010 dcmd->mbox.b[1] = MRSAS_DCMD_MBOX_PEND_FLAG;
4011 dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO;
4012 dcmd->sgl.sge32[0].phys_addr = map_phys_addr;
4013 dcmd->sgl.sge32[0].length = sc->current_map_sz;
4015 sc->map_update_cmd = cmd;
4016 if (mrsas_issue_dcmd(sc, cmd)) {
4017 device_printf(sc->mrsas_dev,
4018 "Fail to send sync map info command.\n");
4025 * mrsas_get_pd_list: Returns FW's PD list structure input:
4026 * Adapter soft state
4028 * Issues an internal command (DCMD) to get the FW's controller PD list
4029 * structure. This information is mainly used to find out about system
4030 * supported by Firmware.
4033 mrsas_get_pd_list(struct mrsas_softc *sc)
4035 int retcode = 0, pd_index = 0, pd_count = 0, pd_list_size;
4036 u_int8_t do_ocr = 1;
4037 struct mrsas_mfi_cmd *cmd;
4038 struct mrsas_dcmd_frame *dcmd;
4039 struct MR_PD_LIST *pd_list_mem;
4040 struct MR_PD_ADDRESS *pd_addr;
4041 bus_addr_t pd_list_phys_addr = 0;
4042 struct mrsas_tmp_dcmd *tcmd;
4044 cmd = mrsas_get_mfi_cmd(sc);
4046 device_printf(sc->mrsas_dev,
4047 "Cannot alloc for get PD list cmd\n");
4050 dcmd = &cmd->frame->dcmd;
4052 tcmd = malloc(sizeof(struct mrsas_tmp_dcmd), M_MRSAS, M_NOWAIT);
4053 pd_list_size = MRSAS_MAX_PD * sizeof(struct MR_PD_LIST);
4054 if (mrsas_alloc_tmp_dcmd(sc, tcmd, pd_list_size) != SUCCESS) {
4055 device_printf(sc->mrsas_dev,
4056 "Cannot alloc dmamap for get PD list cmd\n");
4057 mrsas_release_mfi_cmd(cmd);
4058 mrsas_free_tmp_dcmd(tcmd);
4059 free(tcmd, M_MRSAS);
4062 pd_list_mem = tcmd->tmp_dcmd_mem;
4063 pd_list_phys_addr = tcmd->tmp_dcmd_phys_addr;
4065 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4067 dcmd->mbox.b[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST;
4068 dcmd->mbox.b[1] = 0;
4069 dcmd->cmd = MFI_CMD_DCMD;
4070 dcmd->cmd_status = 0xFF;
4071 dcmd->sge_count = 1;
4072 dcmd->flags = MFI_FRAME_DIR_READ;
4075 dcmd->data_xfer_len = MRSAS_MAX_PD * sizeof(struct MR_PD_LIST);
4076 dcmd->opcode = MR_DCMD_PD_LIST_QUERY;
4077 dcmd->sgl.sge32[0].phys_addr = pd_list_phys_addr;
4078 dcmd->sgl.sge32[0].length = MRSAS_MAX_PD * sizeof(struct MR_PD_LIST);
4080 retcode = mrsas_issue_polled(sc, cmd);
4081 if (retcode == ETIMEDOUT)
4084 /* Get the instance PD list */
4085 pd_count = MRSAS_MAX_PD;
4086 pd_addr = pd_list_mem->addr;
4087 if (pd_list_mem->count < pd_count) {
4088 memset(sc->local_pd_list, 0,
4089 MRSAS_MAX_PD * sizeof(struct mrsas_pd_list));
4090 for (pd_index = 0; pd_index < pd_list_mem->count; pd_index++) {
4091 sc->local_pd_list[pd_addr->deviceId].tid = pd_addr->deviceId;
4092 sc->local_pd_list[pd_addr->deviceId].driveType =
4093 pd_addr->scsiDevType;
4094 sc->local_pd_list[pd_addr->deviceId].driveState =
4099 * Use mutext/spinlock if pd_list component size increase more than
4102 memcpy(sc->pd_list, sc->local_pd_list, sizeof(sc->local_pd_list));
4106 mrsas_free_tmp_dcmd(tcmd);
4107 free(tcmd, M_MRSAS);
4110 sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
4112 mrsas_release_mfi_cmd(cmd);
4118 * mrsas_get_ld_list: Returns FW's LD list structure input:
4119 * Adapter soft state
4121 * Issues an internal command (DCMD) to get the FW's controller PD list
4122 * structure. This information is mainly used to find out about supported by
4126 mrsas_get_ld_list(struct mrsas_softc *sc)
4128 int ld_list_size, retcode = 0, ld_index = 0, ids = 0;
4129 u_int8_t do_ocr = 1;
4130 struct mrsas_mfi_cmd *cmd;
4131 struct mrsas_dcmd_frame *dcmd;
4132 struct MR_LD_LIST *ld_list_mem;
4133 bus_addr_t ld_list_phys_addr = 0;
4134 struct mrsas_tmp_dcmd *tcmd;
4136 cmd = mrsas_get_mfi_cmd(sc);
4138 device_printf(sc->mrsas_dev,
4139 "Cannot alloc for get LD list cmd\n");
4142 dcmd = &cmd->frame->dcmd;
4144 tcmd = malloc(sizeof(struct mrsas_tmp_dcmd), M_MRSAS, M_NOWAIT);
4145 ld_list_size = sizeof(struct MR_LD_LIST);
4146 if (mrsas_alloc_tmp_dcmd(sc, tcmd, ld_list_size) != SUCCESS) {
4147 device_printf(sc->mrsas_dev,
4148 "Cannot alloc dmamap for get LD list cmd\n");
4149 mrsas_release_mfi_cmd(cmd);
4150 mrsas_free_tmp_dcmd(tcmd);
4151 free(tcmd, M_MRSAS);
4154 ld_list_mem = tcmd->tmp_dcmd_mem;
4155 ld_list_phys_addr = tcmd->tmp_dcmd_phys_addr;
4157 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4159 if (sc->max256vdSupport)
4160 dcmd->mbox.b[0] = 1;
4162 dcmd->cmd = MFI_CMD_DCMD;
4163 dcmd->cmd_status = 0xFF;
4164 dcmd->sge_count = 1;
4165 dcmd->flags = MFI_FRAME_DIR_READ;
4167 dcmd->data_xfer_len = sizeof(struct MR_LD_LIST);
4168 dcmd->opcode = MR_DCMD_LD_GET_LIST;
4169 dcmd->sgl.sge32[0].phys_addr = ld_list_phys_addr;
4170 dcmd->sgl.sge32[0].length = sizeof(struct MR_LD_LIST);
4173 retcode = mrsas_issue_polled(sc, cmd);
4174 if (retcode == ETIMEDOUT)
4178 printf("Number of LDs %d\n", ld_list_mem->ldCount);
4181 /* Get the instance LD list */
4182 if (ld_list_mem->ldCount <= sc->fw_supported_vd_count) {
4183 sc->CurLdCount = ld_list_mem->ldCount;
4184 memset(sc->ld_ids, 0xff, MAX_LOGICAL_DRIVES_EXT);
4185 for (ld_index = 0; ld_index < ld_list_mem->ldCount; ld_index++) {
4186 if (ld_list_mem->ldList[ld_index].state != 0) {
4187 ids = ld_list_mem->ldList[ld_index].ref.ld_context.targetId;
4188 sc->ld_ids[ids] = ld_list_mem->ldList[ld_index].ref.ld_context.targetId;
4194 mrsas_free_tmp_dcmd(tcmd);
4195 free(tcmd, M_MRSAS);
4198 sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
4200 mrsas_release_mfi_cmd(cmd);
4206 * mrsas_alloc_tmp_dcmd: Allocates memory for temporary command input:
4207 * Adapter soft state Temp command Size of alloction
4209 * Allocates DMAable memory for a temporary internal command. The allocated
4210 * memory is initialized to all zeros upon successful loading of the dma
4214 mrsas_alloc_tmp_dcmd(struct mrsas_softc *sc,
4215 struct mrsas_tmp_dcmd *tcmd, int size)
4217 if (bus_dma_tag_create(sc->mrsas_parent_tag,
4219 BUS_SPACE_MAXADDR_32BIT,
4227 &tcmd->tmp_dcmd_tag)) {
4228 device_printf(sc->mrsas_dev, "Cannot allocate tmp dcmd tag\n");
4231 if (bus_dmamem_alloc(tcmd->tmp_dcmd_tag, (void **)&tcmd->tmp_dcmd_mem,
4232 BUS_DMA_NOWAIT, &tcmd->tmp_dcmd_dmamap)) {
4233 device_printf(sc->mrsas_dev, "Cannot allocate tmp dcmd mem\n");
4236 if (bus_dmamap_load(tcmd->tmp_dcmd_tag, tcmd->tmp_dcmd_dmamap,
4237 tcmd->tmp_dcmd_mem, size, mrsas_addr_cb,
4238 &tcmd->tmp_dcmd_phys_addr, BUS_DMA_NOWAIT)) {
4239 device_printf(sc->mrsas_dev, "Cannot load tmp dcmd mem\n");
4242 memset(tcmd->tmp_dcmd_mem, 0, size);
4247 * mrsas_free_tmp_dcmd: Free memory for temporary command input:
4248 * temporary dcmd pointer
4250 * Deallocates memory of the temporary command for use in the construction of
4251 * the internal DCMD.
4254 mrsas_free_tmp_dcmd(struct mrsas_tmp_dcmd *tmp)
4256 if (tmp->tmp_dcmd_phys_addr)
4257 bus_dmamap_unload(tmp->tmp_dcmd_tag, tmp->tmp_dcmd_dmamap);
4258 if (tmp->tmp_dcmd_mem != NULL)
4259 bus_dmamem_free(tmp->tmp_dcmd_tag, tmp->tmp_dcmd_mem, tmp->tmp_dcmd_dmamap);
4260 if (tmp->tmp_dcmd_tag != NULL)
4261 bus_dma_tag_destroy(tmp->tmp_dcmd_tag);
4265 * mrsas_issue_blocked_abort_cmd: Aborts previously issued cmd input:
4266 * Adapter soft state Previously issued cmd to be aborted
4268 * This function is used to abort previously issued commands, such as AEN and
4269 * RAID map sync map commands. The abort command is sent as a DCMD internal
4270 * command and subsequently the driver will wait for a return status. The
4271 * max wait time is MRSAS_INTERNAL_CMD_WAIT_TIME seconds.
4274 mrsas_issue_blocked_abort_cmd(struct mrsas_softc *sc,
4275 struct mrsas_mfi_cmd *cmd_to_abort)
4277 struct mrsas_mfi_cmd *cmd;
4278 struct mrsas_abort_frame *abort_fr;
4279 u_int8_t retcode = 0;
4280 unsigned long total_time = 0;
4281 u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME;
4283 cmd = mrsas_get_mfi_cmd(sc);
4285 device_printf(sc->mrsas_dev, "Cannot alloc for abort cmd\n");
4288 abort_fr = &cmd->frame->abort;
4290 /* Prepare and issue the abort frame */
4291 abort_fr->cmd = MFI_CMD_ABORT;
4292 abort_fr->cmd_status = 0xFF;
4293 abort_fr->flags = 0;
4294 abort_fr->abort_context = cmd_to_abort->index;
4295 abort_fr->abort_mfi_phys_addr_lo = cmd_to_abort->frame_phys_addr;
4296 abort_fr->abort_mfi_phys_addr_hi = 0;
4299 cmd->cmd_status = 0xFF;
4301 if (mrsas_issue_dcmd(sc, cmd)) {
4302 device_printf(sc->mrsas_dev, "Fail to send abort command.\n");
4305 /* Wait for this cmd to complete */
4306 sc->chan = (void *)&cmd;
4308 if (cmd->cmd_status == 0xFF) {
4309 tsleep((void *)&sc->chan, 0, "mrsas_sleep", hz);
4313 if (total_time >= max_wait) {
4314 device_printf(sc->mrsas_dev, "Abort cmd timed out after %d sec.\n", max_wait);
4321 mrsas_release_mfi_cmd(cmd);
4326 * mrsas_complete_abort: Completes aborting a command input:
4327 * Adapter soft state Cmd that was issued to abort another cmd
4329 * The mrsas_issue_blocked_abort_cmd() function waits for the command status to
4330 * change after sending the command. This function is called from
4331 * mrsas_complete_mptmfi_passthru() to wake up the sleep thread associated.
4334 mrsas_complete_abort(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
4336 if (cmd->sync_cmd) {
4338 cmd->cmd_status = 0;
4339 sc->chan = (void *)&cmd;
4340 wakeup_one((void *)&sc->chan);
4346 * mrsas_aen_handler: AEN processing callback function from thread context
4347 * input: Adapter soft state
4349 * Asynchronous event handler
4352 mrsas_aen_handler(struct mrsas_softc *sc)
4354 union mrsas_evt_class_locale class_locale;
4357 int error, fail_aen = 0;
4360 printf("invalid instance!\n");
4363 if (sc->evt_detail_mem) {
4364 switch (sc->evt_detail_mem->code) {
4365 case MR_EVT_PD_INSERTED:
4366 fail_aen = mrsas_get_pd_list(sc);
4368 mrsas_bus_scan_sim(sc, sc->sim_1);
4370 goto skip_register_aen;
4373 case MR_EVT_PD_REMOVED:
4374 fail_aen = mrsas_get_pd_list(sc);
4376 mrsas_bus_scan_sim(sc, sc->sim_1);
4378 goto skip_register_aen;
4381 case MR_EVT_LD_OFFLINE:
4382 case MR_EVT_CFG_CLEARED:
4383 case MR_EVT_LD_DELETED:
4384 mrsas_bus_scan_sim(sc, sc->sim_0);
4387 case MR_EVT_LD_CREATED:
4388 fail_aen = mrsas_get_ld_list(sc);
4390 mrsas_bus_scan_sim(sc, sc->sim_0);
4392 goto skip_register_aen;
4395 case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED:
4396 case MR_EVT_FOREIGN_CFG_IMPORTED:
4397 case MR_EVT_LD_STATE_CHANGE:
4405 device_printf(sc->mrsas_dev, "invalid evt_detail\n");
4409 fail_aen = mrsas_get_pd_list(sc);
4411 mrsas_dprint(sc, MRSAS_AEN, "scanning ...sim 1\n");
4412 mrsas_bus_scan_sim(sc, sc->sim_1);
4414 goto skip_register_aen;
4416 fail_aen = mrsas_get_ld_list(sc);
4418 mrsas_dprint(sc, MRSAS_AEN, "scanning ...sim 0\n");
4419 mrsas_bus_scan_sim(sc, sc->sim_0);
4421 goto skip_register_aen;
4423 seq_num = sc->evt_detail_mem->seq_num + 1;
4425 /* Register AEN with FW for latest sequence number plus 1 */
4426 class_locale.members.reserved = 0;
4427 class_locale.members.locale = MR_EVT_LOCALE_ALL;
4428 class_locale.members.class = MR_EVT_CLASS_DEBUG;
4430 if (sc->aen_cmd != NULL)
4433 mtx_lock(&sc->aen_lock);
4434 error = mrsas_register_aen(sc, seq_num,
4436 mtx_unlock(&sc->aen_lock);
4439 device_printf(sc->mrsas_dev, "register aen failed error %x\n", error);
4448 * mrsas_complete_aen: Completes AEN command
4449 * input: Adapter soft state
4450 * Cmd that was issued to abort another cmd
4452 * This function will be called from ISR and will continue event processing from
4453 * thread context by enqueuing task in ev_tq (callback function
4454 * "mrsas_aen_handler").
4457 mrsas_complete_aen(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
4460 * Don't signal app if it is just an aborted previously registered
4463 if ((!cmd->abort_aen) && (sc->remove_in_progress == 0)) {
4464 sc->mrsas_aen_triggered = 1;
4465 mtx_lock(&sc->aen_lock);
4466 if (sc->mrsas_poll_waiting) {
4467 sc->mrsas_poll_waiting = 0;
4468 selwakeup(&sc->mrsas_select);
4470 mtx_unlock(&sc->aen_lock);
4475 mrsas_release_mfi_cmd(cmd);
4477 if (!sc->remove_in_progress)
4478 taskqueue_enqueue(sc->ev_tq, &sc->ev_task);
4483 static device_method_t mrsas_methods[] = {
4484 DEVMETHOD(device_probe, mrsas_probe),
4485 DEVMETHOD(device_attach, mrsas_attach),
4486 DEVMETHOD(device_detach, mrsas_detach),
4487 DEVMETHOD(device_suspend, mrsas_suspend),
4488 DEVMETHOD(device_resume, mrsas_resume),
4489 DEVMETHOD(bus_print_child, bus_generic_print_child),
4490 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
4494 static driver_t mrsas_driver = {
4497 sizeof(struct mrsas_softc)
4500 static devclass_t mrsas_devclass;
4502 DRIVER_MODULE(mrsas, pci, mrsas_driver, mrsas_devclass, 0, 0);
4503 MODULE_DEPEND(mrsas, cam, 1, 1, 1);