2 * Copyright (c) 2015, AVAGO Tech. All rights reserved. Author: Marian Choy
3 * Copyright (c) 2014, LSI Corp. All rights reserved. Author: Marian Choy
4 * Support: freebsdraid@avagotech.com
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
10 * 1. Redistributions of source code must retain the above copyright notice,
11 * this list of conditions and the following disclaimer. 2. Redistributions
12 * in binary form must reproduce the above copyright notice, this list of
13 * conditions and the following disclaimer in the documentation and/or other
14 * materials provided with the distribution. 3. Neither the name of the
15 * <ORGANIZATION> nor the names of its contributors may be used to endorse or
16 * promote products derived from this software without specific prior written
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
31 * The views and conclusions contained in the software and documentation are
32 * those of the authors and should not be interpreted as representing
33 * official policies,either expressed or implied, of the FreeBSD Project.
35 * Send feedback to: <megaraidfbsd@avagotech.com> Mail to: AVAGO TECHNOLOGIES 1621
36 * Barber Lane, Milpitas, CA 95035 ATTN: MegaRaid FreeBSD
40 #include <sys/cdefs.h>
41 __FBSDID("$FreeBSD$");
43 #include <dev/mrsas/mrsas.h>
44 #include <dev/mrsas/mrsas_ioctl.h>
47 #include <cam/cam_ccb.h>
49 #include <sys/sysctl.h>
50 #include <sys/types.h>
51 #include <sys/sysent.h>
52 #include <sys/kthread.h>
53 #include <sys/taskqueue.h>
60 static d_open_t mrsas_open;
61 static d_close_t mrsas_close;
62 static d_read_t mrsas_read;
63 static d_write_t mrsas_write;
64 static d_ioctl_t mrsas_ioctl;
65 static d_poll_t mrsas_poll;
67 static void mrsas_ich_startup(void *arg);
68 static struct mrsas_mgmt_info mrsas_mgmt_info;
69 static struct mrsas_ident *mrsas_find_ident(device_t);
70 static int mrsas_setup_msix(struct mrsas_softc *sc);
71 static int mrsas_allocate_msix(struct mrsas_softc *sc);
72 static void mrsas_shutdown_ctlr(struct mrsas_softc *sc, u_int32_t opcode);
73 static void mrsas_flush_cache(struct mrsas_softc *sc);
74 static void mrsas_reset_reply_desc(struct mrsas_softc *sc);
75 static void mrsas_ocr_thread(void *arg);
76 static int mrsas_get_map_info(struct mrsas_softc *sc);
77 static int mrsas_get_ld_map_info(struct mrsas_softc *sc);
78 static int mrsas_sync_map_info(struct mrsas_softc *sc);
79 static int mrsas_get_pd_list(struct mrsas_softc *sc);
80 static int mrsas_get_ld_list(struct mrsas_softc *sc);
81 static int mrsas_setup_irq(struct mrsas_softc *sc);
82 static int mrsas_alloc_mem(struct mrsas_softc *sc);
83 static int mrsas_init_fw(struct mrsas_softc *sc);
84 static int mrsas_setup_raidmap(struct mrsas_softc *sc);
85 static void megasas_setup_jbod_map(struct mrsas_softc *sc);
86 static int megasas_sync_pd_seq_num(struct mrsas_softc *sc, boolean_t pend);
87 static int mrsas_clear_intr(struct mrsas_softc *sc);
88 static int mrsas_get_ctrl_info(struct mrsas_softc *sc);
89 static void mrsas_update_ext_vd_details(struct mrsas_softc *sc);
91 mrsas_issue_blocked_abort_cmd(struct mrsas_softc *sc,
92 struct mrsas_mfi_cmd *cmd_to_abort);
93 static struct mrsas_softc *
94 mrsas_get_softc_instance(struct cdev *dev,
95 u_long cmd, caddr_t arg);
96 u_int32_t mrsas_read_reg(struct mrsas_softc *sc, int offset);
98 mrsas_build_mptmfi_passthru(struct mrsas_softc *sc,
99 struct mrsas_mfi_cmd *mfi_cmd);
100 void mrsas_complete_outstanding_ioctls(struct mrsas_softc *sc);
101 int mrsas_transition_to_ready(struct mrsas_softc *sc, int ocr);
102 int mrsas_init_adapter(struct mrsas_softc *sc);
103 int mrsas_alloc_mpt_cmds(struct mrsas_softc *sc);
104 int mrsas_alloc_ioc_cmd(struct mrsas_softc *sc);
105 int mrsas_alloc_ctlr_info_cmd(struct mrsas_softc *sc);
106 int mrsas_ioc_init(struct mrsas_softc *sc);
107 int mrsas_bus_scan(struct mrsas_softc *sc);
108 int mrsas_issue_dcmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
109 int mrsas_issue_polled(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
110 int mrsas_reset_ctrl(struct mrsas_softc *sc, u_int8_t reset_reason);
111 int mrsas_wait_for_outstanding(struct mrsas_softc *sc, u_int8_t check_reason);
112 int mrsas_complete_cmd(struct mrsas_softc *sc, u_int32_t MSIxIndex);
113 int mrsas_reset_targets(struct mrsas_softc *sc);
115 mrsas_issue_blocked_cmd(struct mrsas_softc *sc,
116 struct mrsas_mfi_cmd *cmd);
118 mrsas_alloc_tmp_dcmd(struct mrsas_softc *sc, struct mrsas_tmp_dcmd *tcmd,
120 void mrsas_release_mfi_cmd(struct mrsas_mfi_cmd *cmd);
121 void mrsas_wakeup(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
122 void mrsas_complete_aen(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
123 void mrsas_complete_abort(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
124 void mrsas_disable_intr(struct mrsas_softc *sc);
125 void mrsas_enable_intr(struct mrsas_softc *sc);
126 void mrsas_free_ioc_cmd(struct mrsas_softc *sc);
127 void mrsas_free_mem(struct mrsas_softc *sc);
128 void mrsas_free_tmp_dcmd(struct mrsas_tmp_dcmd *tmp);
129 void mrsas_isr(void *arg);
130 void mrsas_teardown_intr(struct mrsas_softc *sc);
131 void mrsas_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error);
132 void mrsas_kill_hba(struct mrsas_softc *sc);
133 void mrsas_aen_handler(struct mrsas_softc *sc);
135 mrsas_write_reg(struct mrsas_softc *sc, int offset,
138 mrsas_fire_cmd(struct mrsas_softc *sc, u_int32_t req_desc_lo,
139 u_int32_t req_desc_hi);
140 void mrsas_free_ctlr_info_cmd(struct mrsas_softc *sc);
142 mrsas_complete_mptmfi_passthru(struct mrsas_softc *sc,
143 struct mrsas_mfi_cmd *cmd, u_int8_t status);
145 mrsas_map_mpt_cmd_status(struct mrsas_mpt_cmd *cmd, u_int8_t status,
147 struct mrsas_mfi_cmd *mrsas_get_mfi_cmd(struct mrsas_softc *sc);
149 MRSAS_REQUEST_DESCRIPTOR_UNION *mrsas_build_mpt_cmd
150 (struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
152 extern int mrsas_cam_attach(struct mrsas_softc *sc);
153 extern void mrsas_cam_detach(struct mrsas_softc *sc);
154 extern void mrsas_cmd_done(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd);
155 extern void mrsas_free_frame(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
156 extern int mrsas_alloc_mfi_cmds(struct mrsas_softc *sc);
157 extern struct mrsas_mpt_cmd *mrsas_get_mpt_cmd(struct mrsas_softc *sc);
158 extern int mrsas_passthru(struct mrsas_softc *sc, void *arg, u_long ioctlCmd);
159 extern uint8_t MR_ValidateMapInfo(struct mrsas_softc *sc);
160 extern u_int16_t MR_GetLDTgtId(u_int32_t ld, MR_DRV_RAID_MAP_ALL * map);
161 extern MR_LD_RAID *MR_LdRaidGet(u_int32_t ld, MR_DRV_RAID_MAP_ALL * map);
162 extern void mrsas_xpt_freeze(struct mrsas_softc *sc);
163 extern void mrsas_xpt_release(struct mrsas_softc *sc);
164 extern MRSAS_REQUEST_DESCRIPTOR_UNION *
165 mrsas_get_request_desc(struct mrsas_softc *sc,
167 extern int mrsas_bus_scan_sim(struct mrsas_softc *sc, struct cam_sim *sim);
168 static int mrsas_alloc_evt_log_info_cmd(struct mrsas_softc *sc);
169 static void mrsas_free_evt_log_info_cmd(struct mrsas_softc *sc);
171 SYSCTL_NODE(_hw, OID_AUTO, mrsas, CTLFLAG_RD, 0, "MRSAS Driver Parameters");
174 * PCI device struct and table
177 typedef struct mrsas_ident {
185 MRSAS_CTLR_ID device_table[] = {
186 {0x1000, MRSAS_TBOLT, 0xffff, 0xffff, "AVAGO Thunderbolt SAS Controller"},
187 {0x1000, MRSAS_INVADER, 0xffff, 0xffff, "AVAGO Invader SAS Controller"},
188 {0x1000, MRSAS_FURY, 0xffff, 0xffff, "AVAGO Fury SAS Controller"},
189 {0x1000, MRSAS_INTRUDER, 0xffff, 0xffff, "AVAGO Intruder SAS Controller"},
190 {0x1000, MRSAS_INTRUDER_24, 0xffff, 0xffff, "AVAGO Intruder_24 SAS Controller"},
191 {0x1000, MRSAS_CUTLASS_52, 0xffff, 0xffff, "AVAGO Cutlass_52 SAS Controller"},
192 {0x1000, MRSAS_CUTLASS_53, 0xffff, 0xffff, "AVAGO Cutlass_53 SAS Controller"},
197 * Character device entry points
200 static struct cdevsw mrsas_cdevsw = {
201 .d_version = D_VERSION,
202 .d_open = mrsas_open,
203 .d_close = mrsas_close,
204 .d_read = mrsas_read,
205 .d_write = mrsas_write,
206 .d_ioctl = mrsas_ioctl,
207 .d_poll = mrsas_poll,
211 MALLOC_DEFINE(M_MRSAS, "mrsasbuf", "Buffers for the MRSAS driver");
214 * In the cdevsw routines, we find our softc by using the si_drv1 member of
215 * struct cdev. We set this variable to point to our softc in our attach
216 * routine when we create the /dev entry.
219 mrsas_open(struct cdev *dev, int oflags, int devtype, d_thread_t *td)
221 struct mrsas_softc *sc;
228 mrsas_close(struct cdev *dev, int fflag, int devtype, d_thread_t *td)
230 struct mrsas_softc *sc;
237 mrsas_read(struct cdev *dev, struct uio *uio, int ioflag)
239 struct mrsas_softc *sc;
245 mrsas_write(struct cdev *dev, struct uio *uio, int ioflag)
247 struct mrsas_softc *sc;
254 * Register Read/Write Functions
258 mrsas_write_reg(struct mrsas_softc *sc, int offset,
261 bus_space_tag_t bus_tag = sc->bus_tag;
262 bus_space_handle_t bus_handle = sc->bus_handle;
264 bus_space_write_4(bus_tag, bus_handle, offset, value);
268 mrsas_read_reg(struct mrsas_softc *sc, int offset)
270 bus_space_tag_t bus_tag = sc->bus_tag;
271 bus_space_handle_t bus_handle = sc->bus_handle;
273 return ((u_int32_t)bus_space_read_4(bus_tag, bus_handle, offset));
278 * Interrupt Disable/Enable/Clear Functions
282 mrsas_disable_intr(struct mrsas_softc *sc)
284 u_int32_t mask = 0xFFFFFFFF;
287 sc->mask_interrupts = 1;
288 mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask), mask);
289 /* Dummy read to force pci flush */
290 status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask));
294 mrsas_enable_intr(struct mrsas_softc *sc)
296 u_int32_t mask = MFI_FUSION_ENABLE_INTERRUPT_MASK;
299 sc->mask_interrupts = 0;
300 mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status), ~0);
301 status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status));
303 mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask), ~mask);
304 status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask));
308 mrsas_clear_intr(struct mrsas_softc *sc)
312 /* Read received interrupt */
313 status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status));
315 /* Not our interrupt, so just return */
316 if (!(status & MFI_FUSION_ENABLE_INTERRUPT_MASK))
319 /* We got a reply interrupt */
324 * PCI Support Functions
327 static struct mrsas_ident *
328 mrsas_find_ident(device_t dev)
330 struct mrsas_ident *pci_device;
332 for (pci_device = device_table; pci_device->vendor != 0; pci_device++) {
333 if ((pci_device->vendor == pci_get_vendor(dev)) &&
334 (pci_device->device == pci_get_device(dev)) &&
335 ((pci_device->subvendor == pci_get_subvendor(dev)) ||
336 (pci_device->subvendor == 0xffff)) &&
337 ((pci_device->subdevice == pci_get_subdevice(dev)) ||
338 (pci_device->subdevice == 0xffff)))
345 mrsas_probe(device_t dev)
347 static u_int8_t first_ctrl = 1;
348 struct mrsas_ident *id;
350 if ((id = mrsas_find_ident(dev)) != NULL) {
352 printf("AVAGO MegaRAID SAS FreeBSD mrsas driver version: %s\n",
356 device_set_desc(dev, id->desc);
357 /* between BUS_PROBE_DEFAULT and BUS_PROBE_LOW_PRIORITY */
364 * mrsas_setup_sysctl: setup sysctl values for mrsas
365 * input: Adapter instance soft state
367 * Setup sysctl entries for mrsas driver.
370 mrsas_setup_sysctl(struct mrsas_softc *sc)
372 struct sysctl_ctx_list *sysctl_ctx = NULL;
373 struct sysctl_oid *sysctl_tree = NULL;
374 char tmpstr[80], tmpstr2[80];
377 * Setup the sysctl variable so the user can change the debug level
380 snprintf(tmpstr, sizeof(tmpstr), "MRSAS controller %d",
381 device_get_unit(sc->mrsas_dev));
382 snprintf(tmpstr2, sizeof(tmpstr2), "%d", device_get_unit(sc->mrsas_dev));
384 sysctl_ctx = device_get_sysctl_ctx(sc->mrsas_dev);
385 if (sysctl_ctx != NULL)
386 sysctl_tree = device_get_sysctl_tree(sc->mrsas_dev);
388 if (sysctl_tree == NULL) {
389 sysctl_ctx_init(&sc->sysctl_ctx);
390 sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx,
391 SYSCTL_STATIC_CHILDREN(_hw_mrsas), OID_AUTO, tmpstr2,
392 CTLFLAG_RD, 0, tmpstr);
393 if (sc->sysctl_tree == NULL)
395 sysctl_ctx = &sc->sysctl_ctx;
396 sysctl_tree = sc->sysctl_tree;
398 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
399 OID_AUTO, "disable_ocr", CTLFLAG_RW, &sc->disableOnlineCtrlReset, 0,
400 "Disable the use of OCR");
402 SYSCTL_ADD_STRING(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
403 OID_AUTO, "driver_version", CTLFLAG_RD, MRSAS_VERSION,
404 strlen(MRSAS_VERSION), "driver version");
406 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
407 OID_AUTO, "reset_count", CTLFLAG_RD,
408 &sc->reset_count, 0, "number of ocr from start of the day");
410 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
411 OID_AUTO, "fw_outstanding", CTLFLAG_RD,
412 &sc->fw_outstanding.val_rdonly, 0, "FW outstanding commands");
414 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
415 OID_AUTO, "io_cmds_highwater", CTLFLAG_RD,
416 &sc->io_cmds_highwater, 0, "Max FW outstanding commands");
418 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
419 OID_AUTO, "mrsas_debug", CTLFLAG_RW, &sc->mrsas_debug, 0,
420 "Driver debug level");
422 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
423 OID_AUTO, "mrsas_io_timeout", CTLFLAG_RW, &sc->mrsas_io_timeout,
424 0, "Driver IO timeout value in mili-second.");
426 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
427 OID_AUTO, "mrsas_fw_fault_check_delay", CTLFLAG_RW,
428 &sc->mrsas_fw_fault_check_delay,
429 0, "FW fault check thread delay in seconds. <default is 1 sec>");
431 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
432 OID_AUTO, "reset_in_progress", CTLFLAG_RD,
433 &sc->reset_in_progress, 0, "ocr in progress status");
435 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
436 OID_AUTO, "block_sync_cache", CTLFLAG_RW,
437 &sc->block_sync_cache, 0,
438 "Block SYNC CACHE at driver. <default: 0, send it to FW>");
443 * mrsas_get_tunables: get tunable parameters.
444 * input: Adapter instance soft state
446 * Get tunable parameters. This will help to debug driver at boot time.
449 mrsas_get_tunables(struct mrsas_softc *sc)
453 /* XXX default to some debugging for now */
454 sc->mrsas_debug = MRSAS_FAULT;
455 sc->mrsas_io_timeout = MRSAS_IO_TIMEOUT;
456 sc->mrsas_fw_fault_check_delay = 1;
458 sc->reset_in_progress = 0;
459 sc->block_sync_cache = 0;
462 * Grab the global variables.
464 TUNABLE_INT_FETCH("hw.mrsas.debug_level", &sc->mrsas_debug);
467 * Grab the global variables.
469 TUNABLE_INT_FETCH("hw.mrsas.lb_pending_cmds", &sc->lb_pending_cmds);
471 /* Grab the unit-instance variables */
472 snprintf(tmpstr, sizeof(tmpstr), "dev.mrsas.%d.debug_level",
473 device_get_unit(sc->mrsas_dev));
474 TUNABLE_INT_FETCH(tmpstr, &sc->mrsas_debug);
478 * mrsas_alloc_evt_log_info cmd: Allocates memory to get event log information.
479 * Used to get sequence number at driver load time.
480 * input: Adapter soft state
482 * Allocates DMAable memory for the event log info internal command.
485 mrsas_alloc_evt_log_info_cmd(struct mrsas_softc *sc)
489 /* Allocate get event log info command */
490 el_info_size = sizeof(struct mrsas_evt_log_info);
491 if (bus_dma_tag_create(sc->mrsas_parent_tag,
493 BUS_SPACE_MAXADDR_32BIT,
502 device_printf(sc->mrsas_dev, "Cannot allocate event log info tag\n");
505 if (bus_dmamem_alloc(sc->el_info_tag, (void **)&sc->el_info_mem,
506 BUS_DMA_NOWAIT, &sc->el_info_dmamap)) {
507 device_printf(sc->mrsas_dev, "Cannot allocate event log info cmd mem\n");
510 if (bus_dmamap_load(sc->el_info_tag, sc->el_info_dmamap,
511 sc->el_info_mem, el_info_size, mrsas_addr_cb,
512 &sc->el_info_phys_addr, BUS_DMA_NOWAIT)) {
513 device_printf(sc->mrsas_dev, "Cannot load event log info cmd mem\n");
516 memset(sc->el_info_mem, 0, el_info_size);
521 * mrsas_free_evt_info_cmd: Free memory for Event log info command
522 * input: Adapter soft state
524 * Deallocates memory for the event log info internal command.
527 mrsas_free_evt_log_info_cmd(struct mrsas_softc *sc)
529 if (sc->el_info_phys_addr)
530 bus_dmamap_unload(sc->el_info_tag, sc->el_info_dmamap);
531 if (sc->el_info_mem != NULL)
532 bus_dmamem_free(sc->el_info_tag, sc->el_info_mem, sc->el_info_dmamap);
533 if (sc->el_info_tag != NULL)
534 bus_dma_tag_destroy(sc->el_info_tag);
538 * mrsas_get_seq_num: Get latest event sequence number
539 * @sc: Adapter soft state
540 * @eli: Firmware event log sequence number information.
542 * Firmware maintains a log of all events in a non-volatile area.
543 * Driver get the sequence number using DCMD
544 * "MR_DCMD_CTRL_EVENT_GET_INFO" at driver load time.
548 mrsas_get_seq_num(struct mrsas_softc *sc,
549 struct mrsas_evt_log_info *eli)
551 struct mrsas_mfi_cmd *cmd;
552 struct mrsas_dcmd_frame *dcmd;
553 u_int8_t do_ocr = 1, retcode = 0;
555 cmd = mrsas_get_mfi_cmd(sc);
558 device_printf(sc->mrsas_dev, "Failed to get a free cmd\n");
561 dcmd = &cmd->frame->dcmd;
563 if (mrsas_alloc_evt_log_info_cmd(sc) != SUCCESS) {
564 device_printf(sc->mrsas_dev, "Cannot allocate evt log info cmd\n");
565 mrsas_release_mfi_cmd(cmd);
568 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
570 dcmd->cmd = MFI_CMD_DCMD;
571 dcmd->cmd_status = 0x0;
573 dcmd->flags = MFI_FRAME_DIR_READ;
576 dcmd->data_xfer_len = sizeof(struct mrsas_evt_log_info);
577 dcmd->opcode = MR_DCMD_CTRL_EVENT_GET_INFO;
578 dcmd->sgl.sge32[0].phys_addr = sc->el_info_phys_addr;
579 dcmd->sgl.sge32[0].length = sizeof(struct mrsas_evt_log_info);
581 retcode = mrsas_issue_blocked_cmd(sc, cmd);
582 if (retcode == ETIMEDOUT)
587 * Copy the data back into callers buffer
589 memcpy(eli, sc->el_info_mem, sizeof(struct mrsas_evt_log_info));
590 mrsas_free_evt_log_info_cmd(sc);
594 sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
596 mrsas_release_mfi_cmd(cmd);
603 * mrsas_register_aen: Register for asynchronous event notification
604 * @sc: Adapter soft state
605 * @seq_num: Starting sequence number
606 * @class_locale: Class of the event
608 * This function subscribes for events beyond the @seq_num
609 * and type @class_locale.
613 mrsas_register_aen(struct mrsas_softc *sc, u_int32_t seq_num,
614 u_int32_t class_locale_word)
617 struct mrsas_mfi_cmd *cmd;
618 struct mrsas_dcmd_frame *dcmd;
619 union mrsas_evt_class_locale curr_aen;
620 union mrsas_evt_class_locale prev_aen;
623 * If there an AEN pending already (aen_cmd), check if the
624 * class_locale of that pending AEN is inclusive of the new AEN
625 * request we currently have. If it is, then we don't have to do
626 * anything. In other words, whichever events the current AEN request
627 * is subscribing to, have already been subscribed to. If the old_cmd
628 * is _not_ inclusive, then we have to abort that command, form a
629 * class_locale that is superset of both old and current and re-issue
633 curr_aen.word = class_locale_word;
637 prev_aen.word = sc->aen_cmd->frame->dcmd.mbox.w[1];
640 * A class whose enum value is smaller is inclusive of all
641 * higher values. If a PROGRESS (= -1) was previously
642 * registered, then a new registration requests for higher
643 * classes need not be sent to FW. They are automatically
644 * included. Locale numbers don't have such hierarchy. They
647 if ((prev_aen.members.class <= curr_aen.members.class) &&
648 !((prev_aen.members.locale & curr_aen.members.locale) ^
649 curr_aen.members.locale)) {
651 * Previously issued event registration includes
652 * current request. Nothing to do.
656 curr_aen.members.locale |= prev_aen.members.locale;
658 if (prev_aen.members.class < curr_aen.members.class)
659 curr_aen.members.class = prev_aen.members.class;
661 sc->aen_cmd->abort_aen = 1;
662 ret_val = mrsas_issue_blocked_abort_cmd(sc,
666 printf("mrsas: Failed to abort previous AEN command\n");
672 cmd = mrsas_get_mfi_cmd(sc);
676 dcmd = &cmd->frame->dcmd;
678 memset(sc->evt_detail_mem, 0, sizeof(struct mrsas_evt_detail));
681 * Prepare DCMD for aen registration
683 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
685 dcmd->cmd = MFI_CMD_DCMD;
686 dcmd->cmd_status = 0x0;
688 dcmd->flags = MFI_FRAME_DIR_READ;
691 dcmd->data_xfer_len = sizeof(struct mrsas_evt_detail);
692 dcmd->opcode = MR_DCMD_CTRL_EVENT_WAIT;
693 dcmd->mbox.w[0] = seq_num;
694 sc->last_seq_num = seq_num;
695 dcmd->mbox.w[1] = curr_aen.word;
696 dcmd->sgl.sge32[0].phys_addr = (u_int32_t)sc->evt_detail_phys_addr;
697 dcmd->sgl.sge32[0].length = sizeof(struct mrsas_evt_detail);
699 if (sc->aen_cmd != NULL) {
700 mrsas_release_mfi_cmd(cmd);
704 * Store reference to the cmd used to register for AEN. When an
705 * application wants us to register for AEN, we have to abort this
706 * cmd and re-register with a new EVENT LOCALE supplied by that app
711 * Issue the aen registration frame
713 if (mrsas_issue_dcmd(sc, cmd)) {
714 device_printf(sc->mrsas_dev, "Cannot issue AEN DCMD command.\n");
721 * mrsas_start_aen: Subscribes to AEN during driver load time
722 * @instance: Adapter soft state
725 mrsas_start_aen(struct mrsas_softc *sc)
727 struct mrsas_evt_log_info eli;
728 union mrsas_evt_class_locale class_locale;
731 /* Get the latest sequence number from FW */
733 memset(&eli, 0, sizeof(eli));
735 if (mrsas_get_seq_num(sc, &eli))
738 /* Register AEN with FW for latest sequence number plus 1 */
739 class_locale.members.reserved = 0;
740 class_locale.members.locale = MR_EVT_LOCALE_ALL;
741 class_locale.members.class = MR_EVT_CLASS_DEBUG;
743 return mrsas_register_aen(sc, eli.newest_seq_num + 1,
749 * mrsas_setup_msix: Allocate MSI-x vectors
750 * @sc: adapter soft state
753 mrsas_setup_msix(struct mrsas_softc *sc)
757 for (i = 0; i < sc->msix_vectors; i++) {
758 sc->irq_context[i].sc = sc;
759 sc->irq_context[i].MSIxIndex = i;
760 sc->irq_id[i] = i + 1;
761 sc->mrsas_irq[i] = bus_alloc_resource_any
762 (sc->mrsas_dev, SYS_RES_IRQ, &sc->irq_id[i]
764 if (sc->mrsas_irq[i] == NULL) {
765 device_printf(sc->mrsas_dev, "Can't allocate MSI-x\n");
766 goto irq_alloc_failed;
768 if (bus_setup_intr(sc->mrsas_dev,
770 INTR_MPSAFE | INTR_TYPE_CAM,
771 NULL, mrsas_isr, &sc->irq_context[i],
772 &sc->intr_handle[i])) {
773 device_printf(sc->mrsas_dev,
774 "Cannot set up MSI-x interrupt handler\n");
775 goto irq_alloc_failed;
781 mrsas_teardown_intr(sc);
786 * mrsas_allocate_msix: Setup MSI-x vectors
787 * @sc: adapter soft state
790 mrsas_allocate_msix(struct mrsas_softc *sc)
792 if (pci_alloc_msix(sc->mrsas_dev, &sc->msix_vectors) == 0) {
793 device_printf(sc->mrsas_dev, "Using MSI-X with %d number"
794 " of vectors\n", sc->msix_vectors);
796 device_printf(sc->mrsas_dev, "MSI-x setup failed\n");
797 goto irq_alloc_failed;
802 mrsas_teardown_intr(sc);
807 * mrsas_attach: PCI entry point
808 * input: pointer to device struct
810 * Performs setup of PCI and registers, initializes mutexes and linked lists,
811 * registers interrupts and CAM, and initializes the adapter/controller to
815 mrsas_attach(device_t dev)
817 struct mrsas_softc *sc = device_get_softc(dev);
818 uint32_t cmd, bar, error;
820 memset(sc, 0, sizeof(struct mrsas_softc));
822 /* Look up our softc and initialize its fields. */
824 sc->device_id = pci_get_device(dev);
826 if ((sc->device_id == MRSAS_INVADER) ||
827 (sc->device_id == MRSAS_FURY) ||
828 (sc->device_id == MRSAS_INTRUDER) ||
829 (sc->device_id == MRSAS_INTRUDER_24) ||
830 (sc->device_id == MRSAS_CUTLASS_52) ||
831 (sc->device_id == MRSAS_CUTLASS_53)) {
832 sc->mrsas_gen3_ctrl = 1;
835 mrsas_get_tunables(sc);
838 * Set up PCI and registers
840 cmd = pci_read_config(dev, PCIR_COMMAND, 2);
841 if ((cmd & PCIM_CMD_PORTEN) == 0) {
844 /* Force the busmaster enable bit on. */
845 cmd |= PCIM_CMD_BUSMASTEREN;
846 pci_write_config(dev, PCIR_COMMAND, cmd, 2);
848 bar = pci_read_config(dev, MRSAS_PCI_BAR1, 4);
850 sc->reg_res_id = MRSAS_PCI_BAR1;/* BAR1 offset */
851 if ((sc->reg_res = bus_alloc_resource(dev, SYS_RES_MEMORY,
852 &(sc->reg_res_id), 0, ~0, 1, RF_ACTIVE))
854 device_printf(dev, "Cannot allocate PCI registers\n");
857 sc->bus_tag = rman_get_bustag(sc->reg_res);
858 sc->bus_handle = rman_get_bushandle(sc->reg_res);
860 /* Intialize mutexes */
861 mtx_init(&sc->sim_lock, "mrsas_sim_lock", NULL, MTX_DEF);
862 mtx_init(&sc->pci_lock, "mrsas_pci_lock", NULL, MTX_DEF);
863 mtx_init(&sc->io_lock, "mrsas_io_lock", NULL, MTX_DEF);
864 mtx_init(&sc->aen_lock, "mrsas_aen_lock", NULL, MTX_DEF);
865 mtx_init(&sc->ioctl_lock, "mrsas_ioctl_lock", NULL, MTX_SPIN);
866 mtx_init(&sc->mpt_cmd_pool_lock, "mrsas_mpt_cmd_pool_lock", NULL, MTX_DEF);
867 mtx_init(&sc->mfi_cmd_pool_lock, "mrsas_mfi_cmd_pool_lock", NULL, MTX_DEF);
868 mtx_init(&sc->raidmap_lock, "mrsas_raidmap_lock", NULL, MTX_DEF);
870 /* Intialize linked list */
871 TAILQ_INIT(&sc->mrsas_mpt_cmd_list_head);
872 TAILQ_INIT(&sc->mrsas_mfi_cmd_list_head);
874 mrsas_atomic_set(&sc->fw_outstanding, 0);
875 mrsas_atomic_set(&sc->target_reset_outstanding, 0);
877 sc->io_cmds_highwater = 0;
879 sc->adprecovery = MRSAS_HBA_OPERATIONAL;
880 sc->UnevenSpanSupport = 0;
884 /* Initialize Firmware */
885 if (mrsas_init_fw(sc) != SUCCESS) {
888 /* Register mrsas to CAM layer */
889 if ((mrsas_cam_attach(sc) != SUCCESS)) {
890 goto attach_fail_cam;
893 if (mrsas_setup_irq(sc) != SUCCESS) {
894 goto attach_fail_irq;
896 error = mrsas_kproc_create(mrsas_ocr_thread, sc,
897 &sc->ocr_thread, 0, 0, "mrsas_ocr%d",
898 device_get_unit(sc->mrsas_dev));
900 device_printf(sc->mrsas_dev, "Error %d starting OCR thread\n", error);
901 goto attach_fail_ocr_thread;
904 * After FW initialization and OCR thread creation
905 * we will defer the cdev creation, AEN setup on ICH callback
907 sc->mrsas_ich.ich_func = mrsas_ich_startup;
908 sc->mrsas_ich.ich_arg = sc;
909 if (config_intrhook_establish(&sc->mrsas_ich) != 0) {
910 device_printf(sc->mrsas_dev, "Config hook is already established\n");
912 mrsas_setup_sysctl(sc);
915 attach_fail_ocr_thread:
916 if (sc->ocr_thread_active)
917 wakeup(&sc->ocr_chan);
919 mrsas_teardown_intr(sc);
921 mrsas_cam_detach(sc);
923 /* if MSIX vector is allocated and FW Init FAILED then release MSIX */
924 if (sc->msix_enable == 1)
925 pci_release_msi(sc->mrsas_dev);
927 mtx_destroy(&sc->sim_lock);
928 mtx_destroy(&sc->aen_lock);
929 mtx_destroy(&sc->pci_lock);
930 mtx_destroy(&sc->io_lock);
931 mtx_destroy(&sc->ioctl_lock);
932 mtx_destroy(&sc->mpt_cmd_pool_lock);
933 mtx_destroy(&sc->mfi_cmd_pool_lock);
934 mtx_destroy(&sc->raidmap_lock);
937 bus_release_resource(sc->mrsas_dev, SYS_RES_MEMORY,
938 sc->reg_res_id, sc->reg_res);
944 * Interrupt config hook
947 mrsas_ich_startup(void *arg)
949 struct mrsas_softc *sc = (struct mrsas_softc *)arg;
952 * Intialize a counting Semaphore to take care no. of concurrent IOCTLs
954 sema_init(&sc->ioctl_count_sema, MRSAS_MAX_IOCTL_CMDS,
955 IOCTL_SEMA_DESCRIPTION);
957 /* Create a /dev entry for mrsas controller. */
958 sc->mrsas_cdev = make_dev(&mrsas_cdevsw, device_get_unit(sc->mrsas_dev), UID_ROOT,
959 GID_OPERATOR, (S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP), "mrsas%u",
960 device_get_unit(sc->mrsas_dev));
962 if (device_get_unit(sc->mrsas_dev) == 0) {
963 make_dev_alias_p(MAKEDEV_CHECKNAME,
964 &sc->mrsas_linux_emulator_cdev, sc->mrsas_cdev,
965 "megaraid_sas_ioctl_node");
968 sc->mrsas_cdev->si_drv1 = sc;
971 * Add this controller to mrsas_mgmt_info structure so that it can be
972 * exported to management applications
974 if (device_get_unit(sc->mrsas_dev) == 0)
975 memset(&mrsas_mgmt_info, 0, sizeof(mrsas_mgmt_info));
977 mrsas_mgmt_info.count++;
978 mrsas_mgmt_info.sc_ptr[mrsas_mgmt_info.max_index] = sc;
979 mrsas_mgmt_info.max_index++;
981 /* Enable Interrupts */
982 mrsas_enable_intr(sc);
984 /* Initiate AEN (Asynchronous Event Notification) */
985 if (mrsas_start_aen(sc)) {
986 device_printf(sc->mrsas_dev, "Error: AEN registration FAILED !!! "
987 "Further events from the controller will not be communicated.\n"
988 "Either there is some problem in the controller"
989 "or the controller does not support AEN.\n"
990 "Please contact to the SUPPORT TEAM if the problem persists\n");
992 if (sc->mrsas_ich.ich_arg != NULL) {
993 device_printf(sc->mrsas_dev, "Disestablish mrsas intr hook\n");
994 config_intrhook_disestablish(&sc->mrsas_ich);
995 sc->mrsas_ich.ich_arg = NULL;
1000 * mrsas_detach: De-allocates and teardown resources
1001 * input: pointer to device struct
1003 * This function is the entry point for device disconnect and detach.
1004 * It performs memory de-allocations, shutdown of the controller and various
1005 * teardown and destroy resource functions.
1008 mrsas_detach(device_t dev)
1010 struct mrsas_softc *sc;
1013 sc = device_get_softc(dev);
1014 sc->remove_in_progress = 1;
1016 /* Destroy the character device so no other IOCTL will be handled */
1017 if ((device_get_unit(dev) == 0) && sc->mrsas_linux_emulator_cdev)
1018 destroy_dev(sc->mrsas_linux_emulator_cdev);
1019 destroy_dev(sc->mrsas_cdev);
1022 * Take the instance off the instance array. Note that we will not
1023 * decrement the max_index. We let this array be sparse array
1025 for (i = 0; i < mrsas_mgmt_info.max_index; i++) {
1026 if (mrsas_mgmt_info.sc_ptr[i] == sc) {
1027 mrsas_mgmt_info.count--;
1028 mrsas_mgmt_info.sc_ptr[i] = NULL;
1033 if (sc->ocr_thread_active)
1034 wakeup(&sc->ocr_chan);
1035 while (sc->reset_in_progress) {
1037 if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
1038 mrsas_dprint(sc, MRSAS_INFO,
1039 "[%2d]waiting for OCR to be finished from %s\n", i, __func__);
1041 pause("mr_shutdown", hz);
1044 while (sc->ocr_thread_active) {
1046 if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
1047 mrsas_dprint(sc, MRSAS_INFO,
1049 "mrsas_ocr thread to quit ocr %d\n", i,
1050 sc->ocr_thread_active);
1052 pause("mr_shutdown", hz);
1054 mrsas_flush_cache(sc);
1055 mrsas_shutdown_ctlr(sc, MR_DCMD_CTRL_SHUTDOWN);
1056 mrsas_disable_intr(sc);
1057 mrsas_cam_detach(sc);
1058 mrsas_teardown_intr(sc);
1060 mtx_destroy(&sc->sim_lock);
1061 mtx_destroy(&sc->aen_lock);
1062 mtx_destroy(&sc->pci_lock);
1063 mtx_destroy(&sc->io_lock);
1064 mtx_destroy(&sc->ioctl_lock);
1065 mtx_destroy(&sc->mpt_cmd_pool_lock);
1066 mtx_destroy(&sc->mfi_cmd_pool_lock);
1067 mtx_destroy(&sc->raidmap_lock);
1069 /* Wait for all the semaphores to be released */
1070 while (sema_value(&sc->ioctl_count_sema) != MRSAS_MAX_IOCTL_CMDS)
1071 pause("mr_shutdown", hz);
1073 /* Destroy the counting semaphore created for Ioctl */
1074 sema_destroy(&sc->ioctl_count_sema);
1077 bus_release_resource(sc->mrsas_dev,
1078 SYS_RES_MEMORY, sc->reg_res_id, sc->reg_res);
1080 if (sc->sysctl_tree != NULL)
1081 sysctl_ctx_free(&sc->sysctl_ctx);
1087 * mrsas_free_mem: Frees allocated memory
1088 * input: Adapter instance soft state
1090 * This function is called from mrsas_detach() to free previously allocated
1094 mrsas_free_mem(struct mrsas_softc *sc)
1098 struct mrsas_mfi_cmd *mfi_cmd;
1099 struct mrsas_mpt_cmd *mpt_cmd;
1102 * Free RAID map memory
1104 for (i = 0; i < 2; i++) {
1105 if (sc->raidmap_phys_addr[i])
1106 bus_dmamap_unload(sc->raidmap_tag[i], sc->raidmap_dmamap[i]);
1107 if (sc->raidmap_mem[i] != NULL)
1108 bus_dmamem_free(sc->raidmap_tag[i], sc->raidmap_mem[i], sc->raidmap_dmamap[i]);
1109 if (sc->raidmap_tag[i] != NULL)
1110 bus_dma_tag_destroy(sc->raidmap_tag[i]);
1112 if (sc->ld_drv_map[i] != NULL)
1113 free(sc->ld_drv_map[i], M_MRSAS);
1115 for (i = 0; i < 2; i++) {
1116 if (sc->jbodmap_phys_addr[i])
1117 bus_dmamap_unload(sc->jbodmap_tag[i], sc->jbodmap_dmamap[i]);
1118 if (sc->jbodmap_mem[i] != NULL)
1119 bus_dmamem_free(sc->jbodmap_tag[i], sc->jbodmap_mem[i], sc->jbodmap_dmamap[i]);
1120 if (sc->jbodmap_tag[i] != NULL)
1121 bus_dma_tag_destroy(sc->jbodmap_tag[i]);
1124 * Free version buffer memroy
1126 if (sc->verbuf_phys_addr)
1127 bus_dmamap_unload(sc->verbuf_tag, sc->verbuf_dmamap);
1128 if (sc->verbuf_mem != NULL)
1129 bus_dmamem_free(sc->verbuf_tag, sc->verbuf_mem, sc->verbuf_dmamap);
1130 if (sc->verbuf_tag != NULL)
1131 bus_dma_tag_destroy(sc->verbuf_tag);
1135 * Free sense buffer memory
1137 if (sc->sense_phys_addr)
1138 bus_dmamap_unload(sc->sense_tag, sc->sense_dmamap);
1139 if (sc->sense_mem != NULL)
1140 bus_dmamem_free(sc->sense_tag, sc->sense_mem, sc->sense_dmamap);
1141 if (sc->sense_tag != NULL)
1142 bus_dma_tag_destroy(sc->sense_tag);
1145 * Free chain frame memory
1147 if (sc->chain_frame_phys_addr)
1148 bus_dmamap_unload(sc->chain_frame_tag, sc->chain_frame_dmamap);
1149 if (sc->chain_frame_mem != NULL)
1150 bus_dmamem_free(sc->chain_frame_tag, sc->chain_frame_mem, sc->chain_frame_dmamap);
1151 if (sc->chain_frame_tag != NULL)
1152 bus_dma_tag_destroy(sc->chain_frame_tag);
1155 * Free IO Request memory
1157 if (sc->io_request_phys_addr)
1158 bus_dmamap_unload(sc->io_request_tag, sc->io_request_dmamap);
1159 if (sc->io_request_mem != NULL)
1160 bus_dmamem_free(sc->io_request_tag, sc->io_request_mem, sc->io_request_dmamap);
1161 if (sc->io_request_tag != NULL)
1162 bus_dma_tag_destroy(sc->io_request_tag);
1165 * Free Reply Descriptor memory
1167 if (sc->reply_desc_phys_addr)
1168 bus_dmamap_unload(sc->reply_desc_tag, sc->reply_desc_dmamap);
1169 if (sc->reply_desc_mem != NULL)
1170 bus_dmamem_free(sc->reply_desc_tag, sc->reply_desc_mem, sc->reply_desc_dmamap);
1171 if (sc->reply_desc_tag != NULL)
1172 bus_dma_tag_destroy(sc->reply_desc_tag);
1175 * Free event detail memory
1177 if (sc->evt_detail_phys_addr)
1178 bus_dmamap_unload(sc->evt_detail_tag, sc->evt_detail_dmamap);
1179 if (sc->evt_detail_mem != NULL)
1180 bus_dmamem_free(sc->evt_detail_tag, sc->evt_detail_mem, sc->evt_detail_dmamap);
1181 if (sc->evt_detail_tag != NULL)
1182 bus_dma_tag_destroy(sc->evt_detail_tag);
1187 if (sc->mfi_cmd_list) {
1188 for (i = 0; i < MRSAS_MAX_MFI_CMDS; i++) {
1189 mfi_cmd = sc->mfi_cmd_list[i];
1190 mrsas_free_frame(sc, mfi_cmd);
1193 if (sc->mficmd_frame_tag != NULL)
1194 bus_dma_tag_destroy(sc->mficmd_frame_tag);
1197 * Free MPT internal command list
1199 max_cmd = sc->max_fw_cmds;
1200 if (sc->mpt_cmd_list) {
1201 for (i = 0; i < max_cmd; i++) {
1202 mpt_cmd = sc->mpt_cmd_list[i];
1203 bus_dmamap_destroy(sc->data_tag, mpt_cmd->data_dmamap);
1204 free(sc->mpt_cmd_list[i], M_MRSAS);
1206 free(sc->mpt_cmd_list, M_MRSAS);
1207 sc->mpt_cmd_list = NULL;
1210 * Free MFI internal command list
1213 if (sc->mfi_cmd_list) {
1214 for (i = 0; i < MRSAS_MAX_MFI_CMDS; i++) {
1215 free(sc->mfi_cmd_list[i], M_MRSAS);
1217 free(sc->mfi_cmd_list, M_MRSAS);
1218 sc->mfi_cmd_list = NULL;
1221 * Free request descriptor memory
1223 free(sc->req_desc, M_MRSAS);
1224 sc->req_desc = NULL;
1227 * Destroy parent tag
1229 if (sc->mrsas_parent_tag != NULL)
1230 bus_dma_tag_destroy(sc->mrsas_parent_tag);
1233 * Free ctrl_info memory
1235 if (sc->ctrl_info != NULL)
1236 free(sc->ctrl_info, M_MRSAS);
1240 * mrsas_teardown_intr: Teardown interrupt
1241 * input: Adapter instance soft state
1243 * This function is called from mrsas_detach() to teardown and release bus
1244 * interrupt resourse.
1247 mrsas_teardown_intr(struct mrsas_softc *sc)
1251 if (!sc->msix_enable) {
1252 if (sc->intr_handle[0])
1253 bus_teardown_intr(sc->mrsas_dev, sc->mrsas_irq[0], sc->intr_handle[0]);
1254 if (sc->mrsas_irq[0] != NULL)
1255 bus_release_resource(sc->mrsas_dev, SYS_RES_IRQ,
1256 sc->irq_id[0], sc->mrsas_irq[0]);
1257 sc->intr_handle[0] = NULL;
1259 for (i = 0; i < sc->msix_vectors; i++) {
1260 if (sc->intr_handle[i])
1261 bus_teardown_intr(sc->mrsas_dev, sc->mrsas_irq[i],
1262 sc->intr_handle[i]);
1264 if (sc->mrsas_irq[i] != NULL)
1265 bus_release_resource(sc->mrsas_dev, SYS_RES_IRQ,
1266 sc->irq_id[i], sc->mrsas_irq[i]);
1268 sc->intr_handle[i] = NULL;
1270 pci_release_msi(sc->mrsas_dev);
1276 * mrsas_suspend: Suspend entry point
1277 * input: Device struct pointer
1279 * This function is the entry point for system suspend from the OS.
1282 mrsas_suspend(device_t dev)
1284 /* This will be filled when the driver will have hibernation support */
1289 * mrsas_resume: Resume entry point
1290 * input: Device struct pointer
1292 * This function is the entry point for system resume from the OS.
1295 mrsas_resume(device_t dev)
1297 /* This will be filled when the driver will have hibernation support */
1302 * mrsas_get_softc_instance: Find softc instance based on cmd type
1304 * This function will return softc instance based on cmd type.
1305 * In some case, application fire ioctl on required management instance and
1306 * do not provide host_no. Use cdev->si_drv1 to get softc instance for those
1307 * case, else get the softc instance from host_no provided by application in
1311 static struct mrsas_softc *
1312 mrsas_get_softc_instance(struct cdev *dev, u_long cmd, caddr_t arg)
1314 struct mrsas_softc *sc = NULL;
1315 struct mrsas_iocpacket *user_ioc = (struct mrsas_iocpacket *)arg;
1317 if (cmd == MRSAS_IOC_GET_PCI_INFO) {
1321 * get the Host number & the softc from data sent by the
1324 sc = mrsas_mgmt_info.sc_ptr[user_ioc->host_no];
1326 printf("There is no Controller number %d\n",
1328 else if (user_ioc->host_no >= mrsas_mgmt_info.max_index)
1329 mrsas_dprint(sc, MRSAS_FAULT,
1330 "Invalid Controller number %d\n", user_ioc->host_no);
1337 * mrsas_ioctl: IOCtl commands entry point.
1339 * This function is the entry point for IOCtls from the OS. It calls the
1340 * appropriate function for processing depending on the command received.
1343 mrsas_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, d_thread_t *td)
1345 struct mrsas_softc *sc;
1347 MRSAS_DRV_PCI_INFORMATION *pciDrvInfo;
1349 sc = mrsas_get_softc_instance(dev, cmd, arg);
1353 if (sc->remove_in_progress ||
1354 (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR)) {
1355 mrsas_dprint(sc, MRSAS_INFO,
1356 "Either driver remove or shutdown called or "
1357 "HW is in unrecoverable critical error state.\n");
1360 mtx_lock_spin(&sc->ioctl_lock);
1361 if (!sc->reset_in_progress) {
1362 mtx_unlock_spin(&sc->ioctl_lock);
1365 mtx_unlock_spin(&sc->ioctl_lock);
1366 while (sc->reset_in_progress) {
1368 if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
1369 mrsas_dprint(sc, MRSAS_INFO,
1370 "[%2d]waiting for OCR to be finished from %s\n", i, __func__);
1372 pause("mr_ioctl", hz);
1377 case MRSAS_IOC_FIRMWARE_PASS_THROUGH64:
1378 #ifdef COMPAT_FREEBSD32
1379 case MRSAS_IOC_FIRMWARE_PASS_THROUGH32:
1382 * Decrement the Ioctl counting Semaphore before getting an
1385 sema_wait(&sc->ioctl_count_sema);
1387 ret = mrsas_passthru(sc, (void *)arg, cmd);
1389 /* Increment the Ioctl counting semaphore value */
1390 sema_post(&sc->ioctl_count_sema);
1393 case MRSAS_IOC_SCAN_BUS:
1394 ret = mrsas_bus_scan(sc);
1397 case MRSAS_IOC_GET_PCI_INFO:
1398 pciDrvInfo = (MRSAS_DRV_PCI_INFORMATION *) arg;
1399 memset(pciDrvInfo, 0, sizeof(MRSAS_DRV_PCI_INFORMATION));
1400 pciDrvInfo->busNumber = pci_get_bus(sc->mrsas_dev);
1401 pciDrvInfo->deviceNumber = pci_get_slot(sc->mrsas_dev);
1402 pciDrvInfo->functionNumber = pci_get_function(sc->mrsas_dev);
1403 pciDrvInfo->domainID = pci_get_domain(sc->mrsas_dev);
1404 mrsas_dprint(sc, MRSAS_INFO, "pci bus no: %d,"
1405 "pci device no: %d, pci function no: %d,"
1406 "pci domain ID: %d\n",
1407 pciDrvInfo->busNumber, pciDrvInfo->deviceNumber,
1408 pciDrvInfo->functionNumber, pciDrvInfo->domainID);
1413 mrsas_dprint(sc, MRSAS_TRACE, "IOCTL command 0x%lx is not handled\n", cmd);
1421 * mrsas_poll: poll entry point for mrsas driver fd
1423 * This function is the entry point for poll from the OS. It waits for some AEN
1424 * events to be triggered from the controller and notifies back.
1427 mrsas_poll(struct cdev *dev, int poll_events, struct thread *td)
1429 struct mrsas_softc *sc;
1434 if (poll_events & (POLLIN | POLLRDNORM)) {
1435 if (sc->mrsas_aen_triggered) {
1436 revents |= poll_events & (POLLIN | POLLRDNORM);
1440 if (poll_events & (POLLIN | POLLRDNORM)) {
1441 mtx_lock(&sc->aen_lock);
1442 sc->mrsas_poll_waiting = 1;
1443 selrecord(td, &sc->mrsas_select);
1444 mtx_unlock(&sc->aen_lock);
1451 * mrsas_setup_irq: Set up interrupt
1452 * input: Adapter instance soft state
1454 * This function sets up interrupts as a bus resource, with flags indicating
1455 * resource permitting contemporaneous sharing and for resource to activate
1459 mrsas_setup_irq(struct mrsas_softc *sc)
1461 if (sc->msix_enable && (mrsas_setup_msix(sc) == SUCCESS))
1462 device_printf(sc->mrsas_dev, "MSI-x interrupts setup success\n");
1465 device_printf(sc->mrsas_dev, "Fall back to legacy interrupt\n");
1466 sc->irq_context[0].sc = sc;
1467 sc->irq_context[0].MSIxIndex = 0;
1469 sc->mrsas_irq[0] = bus_alloc_resource_any(sc->mrsas_dev,
1470 SYS_RES_IRQ, &sc->irq_id[0], RF_SHAREABLE | RF_ACTIVE);
1471 if (sc->mrsas_irq[0] == NULL) {
1472 device_printf(sc->mrsas_dev, "Cannot allocate legcay"
1476 if (bus_setup_intr(sc->mrsas_dev, sc->mrsas_irq[0],
1477 INTR_MPSAFE | INTR_TYPE_CAM, NULL, mrsas_isr,
1478 &sc->irq_context[0], &sc->intr_handle[0])) {
1479 device_printf(sc->mrsas_dev, "Cannot set up legacy"
1488 * mrsas_isr: ISR entry point
1489 * input: argument pointer
1491 * This function is the interrupt service routine entry point. There are two
1492 * types of interrupts, state change interrupt and response interrupt. If an
1493 * interrupt is not ours, we just return.
1496 mrsas_isr(void *arg)
1498 struct mrsas_irq_context *irq_context = (struct mrsas_irq_context *)arg;
1499 struct mrsas_softc *sc = irq_context->sc;
1502 if (sc->mask_interrupts)
1505 if (!sc->msix_vectors) {
1506 status = mrsas_clear_intr(sc);
1510 /* If we are resetting, bail */
1511 if (mrsas_test_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags)) {
1512 printf(" Entered into ISR when OCR is going active. \n");
1513 mrsas_clear_intr(sc);
1516 /* Process for reply request and clear response interrupt */
1517 if (mrsas_complete_cmd(sc, irq_context->MSIxIndex) != SUCCESS)
1518 mrsas_clear_intr(sc);
1524 * mrsas_complete_cmd: Process reply request
1525 * input: Adapter instance soft state
1527 * This function is called from mrsas_isr() to process reply request and clear
1528 * response interrupt. Processing of the reply request entails walking
1529 * through the reply descriptor array for the command request pended from
1530 * Firmware. We look at the Function field to determine the command type and
1531 * perform the appropriate action. Before we return, we clear the response
1535 mrsas_complete_cmd(struct mrsas_softc *sc, u_int32_t MSIxIndex)
1537 Mpi2ReplyDescriptorsUnion_t *desc;
1538 MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *reply_desc;
1539 MRSAS_RAID_SCSI_IO_REQUEST *scsi_io_req;
1540 struct mrsas_mpt_cmd *cmd_mpt;
1541 struct mrsas_mfi_cmd *cmd_mfi;
1542 u_int8_t reply_descript_type;
1543 u_int16_t smid, num_completed;
1544 u_int8_t status, extStatus;
1545 union desc_value desc_val;
1546 PLD_LOAD_BALANCE_INFO lbinfo;
1547 u_int32_t device_id;
1548 int threshold_reply_count = 0;
1550 MR_TASK_MANAGE_REQUEST *mr_tm_req;
1551 MPI2_SCSI_TASK_MANAGE_REQUEST *mpi_tm_req;
1554 /* If we have a hardware error, not need to continue */
1555 if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR)
1558 desc = sc->reply_desc_mem;
1559 desc += ((MSIxIndex * sc->reply_alloc_sz) / sizeof(MPI2_REPLY_DESCRIPTORS_UNION))
1560 + sc->last_reply_idx[MSIxIndex];
1562 reply_desc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *) desc;
1564 desc_val.word = desc->Words;
1567 reply_descript_type = reply_desc->ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
1569 /* Find our reply descriptor for the command and process */
1570 while ((desc_val.u.low != 0xFFFFFFFF) && (desc_val.u.high != 0xFFFFFFFF)) {
1571 smid = reply_desc->SMID;
1572 cmd_mpt = sc->mpt_cmd_list[smid - 1];
1573 scsi_io_req = (MRSAS_RAID_SCSI_IO_REQUEST *) cmd_mpt->io_request;
1575 status = scsi_io_req->RaidContext.status;
1576 extStatus = scsi_io_req->RaidContext.exStatus;
1578 switch (scsi_io_req->Function) {
1579 case MPI2_FUNCTION_SCSI_TASK_MGMT:
1581 mr_tm_req = (MR_TASK_MANAGE_REQUEST *) cmd_mpt->io_request;
1582 mpi_tm_req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)
1583 &mr_tm_req->TmRequest;
1584 device_printf(sc->mrsas_dev, "TM completion type 0x%X, "
1585 "TaskMID: 0x%X", mpi_tm_req->TaskType, mpi_tm_req->TaskMID);
1587 wakeup_one((void *)&sc->ocr_chan);
1589 case MPI2_FUNCTION_SCSI_IO_REQUEST: /* Fast Path IO. */
1590 device_id = cmd_mpt->ccb_ptr->ccb_h.target_id;
1591 lbinfo = &sc->load_balance_info[device_id];
1592 if (cmd_mpt->load_balance == MRSAS_LOAD_BALANCE_FLAG) {
1593 mrsas_atomic_dec(&lbinfo->scsi_pending_cmds[cmd_mpt->pd_r1_lb]);
1594 cmd_mpt->load_balance &= ~MRSAS_LOAD_BALANCE_FLAG;
1596 /* Fall thru and complete IO */
1597 case MRSAS_MPI2_FUNCTION_LD_IO_REQUEST:
1598 mrsas_map_mpt_cmd_status(cmd_mpt, status, extStatus);
1599 mrsas_cmd_done(sc, cmd_mpt);
1600 scsi_io_req->RaidContext.status = 0;
1601 scsi_io_req->RaidContext.exStatus = 0;
1602 mrsas_atomic_dec(&sc->fw_outstanding);
1604 case MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST: /* MFI command */
1605 cmd_mfi = sc->mfi_cmd_list[cmd_mpt->sync_cmd_idx];
1607 * Make sure NOT TO release the mfi command from the called
1608 * function's context if it is fired with issue_polled call.
1609 * And also make sure that the issue_polled call should only be
1610 * used if INTERRUPT IS DISABLED.
1612 if (cmd_mfi->frame->hdr.flags & MFI_FRAME_DONT_POST_IN_REPLY_QUEUE)
1613 mrsas_release_mfi_cmd(cmd_mfi);
1615 mrsas_complete_mptmfi_passthru(sc, cmd_mfi, status);
1619 sc->last_reply_idx[MSIxIndex]++;
1620 if (sc->last_reply_idx[MSIxIndex] >= sc->reply_q_depth)
1621 sc->last_reply_idx[MSIxIndex] = 0;
1623 desc->Words = ~((uint64_t)0x00); /* set it back to all
1626 threshold_reply_count++;
1628 /* Get the next reply descriptor */
1629 if (!sc->last_reply_idx[MSIxIndex]) {
1630 desc = sc->reply_desc_mem;
1631 desc += ((MSIxIndex * sc->reply_alloc_sz) / sizeof(MPI2_REPLY_DESCRIPTORS_UNION));
1635 reply_desc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *) desc;
1636 desc_val.word = desc->Words;
1638 reply_descript_type = reply_desc->ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
1640 if (reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
1644 * Write to reply post index after completing threshold reply
1645 * count and still there are more replies in reply queue
1646 * pending to be completed.
1648 if (threshold_reply_count >= THRESHOLD_REPLY_COUNT) {
1649 if (sc->msix_enable) {
1650 if (sc->mrsas_gen3_ctrl)
1651 mrsas_write_reg(sc, sc->msix_reg_offset[MSIxIndex / 8],
1652 ((MSIxIndex & 0x7) << 24) |
1653 sc->last_reply_idx[MSIxIndex]);
1655 mrsas_write_reg(sc, sc->msix_reg_offset[0], (MSIxIndex << 24) |
1656 sc->last_reply_idx[MSIxIndex]);
1658 mrsas_write_reg(sc, offsetof(mrsas_reg_set,
1659 reply_post_host_index), sc->last_reply_idx[0]);
1661 threshold_reply_count = 0;
1665 /* No match, just return */
1666 if (num_completed == 0)
1669 /* Clear response interrupt */
1670 if (sc->msix_enable) {
1671 if (sc->mrsas_gen3_ctrl) {
1672 mrsas_write_reg(sc, sc->msix_reg_offset[MSIxIndex / 8],
1673 ((MSIxIndex & 0x7) << 24) |
1674 sc->last_reply_idx[MSIxIndex]);
1676 mrsas_write_reg(sc, sc->msix_reg_offset[0], (MSIxIndex << 24) |
1677 sc->last_reply_idx[MSIxIndex]);
1679 mrsas_write_reg(sc, offsetof(mrsas_reg_set,
1680 reply_post_host_index), sc->last_reply_idx[0]);
1686 * mrsas_map_mpt_cmd_status: Allocate DMAable memory.
1687 * input: Adapter instance soft state
1689 * This function is called from mrsas_complete_cmd(), for LD IO and FastPath IO.
1690 * It checks the command status and maps the appropriate CAM status for the
1694 mrsas_map_mpt_cmd_status(struct mrsas_mpt_cmd *cmd, u_int8_t status, u_int8_t extStatus)
1696 struct mrsas_softc *sc = cmd->sc;
1697 u_int8_t *sense_data;
1701 cmd->ccb_ptr->ccb_h.status = CAM_REQ_CMP;
1703 case MFI_STAT_SCSI_IO_FAILED:
1704 case MFI_STAT_SCSI_DONE_WITH_ERROR:
1705 cmd->ccb_ptr->ccb_h.status = CAM_SCSI_STATUS_ERROR;
1706 sense_data = (u_int8_t *)&cmd->ccb_ptr->csio.sense_data;
1708 /* For now just copy 18 bytes back */
1709 memcpy(sense_data, cmd->sense, 18);
1710 cmd->ccb_ptr->csio.sense_len = 18;
1711 cmd->ccb_ptr->ccb_h.status |= CAM_AUTOSNS_VALID;
1714 case MFI_STAT_LD_OFFLINE:
1715 case MFI_STAT_DEVICE_NOT_FOUND:
1716 if (cmd->ccb_ptr->ccb_h.target_lun)
1717 cmd->ccb_ptr->ccb_h.status |= CAM_LUN_INVALID;
1719 cmd->ccb_ptr->ccb_h.status |= CAM_DEV_NOT_THERE;
1721 case MFI_STAT_CONFIG_SEQ_MISMATCH:
1722 cmd->ccb_ptr->ccb_h.status |= CAM_REQUEUE_REQ;
1725 device_printf(sc->mrsas_dev, "FW cmd complete status %x\n", status);
1726 cmd->ccb_ptr->ccb_h.status = CAM_REQ_CMP_ERR;
1727 cmd->ccb_ptr->csio.scsi_status = status;
1733 * mrsas_alloc_mem: Allocate DMAable memory
1734 * input: Adapter instance soft state
1736 * This function creates the parent DMA tag and allocates DMAable memory. DMA
1737 * tag describes constraints of DMA mapping. Memory allocated is mapped into
1738 * Kernel virtual address. Callback argument is physical memory address.
1741 mrsas_alloc_mem(struct mrsas_softc *sc)
1743 u_int32_t verbuf_size, io_req_size, reply_desc_size, sense_size,
1744 chain_frame_size, evt_detail_size, count;
1747 * Allocate parent DMA tag
1749 if (bus_dma_tag_create(NULL, /* parent */
1752 BUS_SPACE_MAXADDR, /* lowaddr */
1753 BUS_SPACE_MAXADDR, /* highaddr */
1754 NULL, NULL, /* filter, filterarg */
1755 MAXPHYS, /* maxsize */
1756 sc->max_num_sge, /* nsegments */
1757 MAXPHYS, /* maxsegsize */
1759 NULL, NULL, /* lockfunc, lockarg */
1760 &sc->mrsas_parent_tag /* tag */
1762 device_printf(sc->mrsas_dev, "Cannot allocate parent DMA tag\n");
1766 * Allocate for version buffer
1768 verbuf_size = MRSAS_MAX_NAME_LENGTH * (sizeof(bus_addr_t));
1769 if (bus_dma_tag_create(sc->mrsas_parent_tag,
1771 BUS_SPACE_MAXADDR_32BIT,
1780 device_printf(sc->mrsas_dev, "Cannot allocate verbuf DMA tag\n");
1783 if (bus_dmamem_alloc(sc->verbuf_tag, (void **)&sc->verbuf_mem,
1784 BUS_DMA_NOWAIT, &sc->verbuf_dmamap)) {
1785 device_printf(sc->mrsas_dev, "Cannot allocate verbuf memory\n");
1788 bzero(sc->verbuf_mem, verbuf_size);
1789 if (bus_dmamap_load(sc->verbuf_tag, sc->verbuf_dmamap, sc->verbuf_mem,
1790 verbuf_size, mrsas_addr_cb, &sc->verbuf_phys_addr,
1792 device_printf(sc->mrsas_dev, "Cannot load verbuf DMA map\n");
1796 * Allocate IO Request Frames
1798 io_req_size = sc->io_frames_alloc_sz;
1799 if (bus_dma_tag_create(sc->mrsas_parent_tag,
1801 BUS_SPACE_MAXADDR_32BIT,
1809 &sc->io_request_tag)) {
1810 device_printf(sc->mrsas_dev, "Cannot create IO request tag\n");
1813 if (bus_dmamem_alloc(sc->io_request_tag, (void **)&sc->io_request_mem,
1814 BUS_DMA_NOWAIT, &sc->io_request_dmamap)) {
1815 device_printf(sc->mrsas_dev, "Cannot alloc IO request memory\n");
1818 bzero(sc->io_request_mem, io_req_size);
1819 if (bus_dmamap_load(sc->io_request_tag, sc->io_request_dmamap,
1820 sc->io_request_mem, io_req_size, mrsas_addr_cb,
1821 &sc->io_request_phys_addr, BUS_DMA_NOWAIT)) {
1822 device_printf(sc->mrsas_dev, "Cannot load IO request memory\n");
1826 * Allocate Chain Frames
1828 chain_frame_size = sc->chain_frames_alloc_sz;
1829 if (bus_dma_tag_create(sc->mrsas_parent_tag,
1831 BUS_SPACE_MAXADDR_32BIT,
1839 &sc->chain_frame_tag)) {
1840 device_printf(sc->mrsas_dev, "Cannot create chain frame tag\n");
1843 if (bus_dmamem_alloc(sc->chain_frame_tag, (void **)&sc->chain_frame_mem,
1844 BUS_DMA_NOWAIT, &sc->chain_frame_dmamap)) {
1845 device_printf(sc->mrsas_dev, "Cannot alloc chain frame memory\n");
1848 bzero(sc->chain_frame_mem, chain_frame_size);
1849 if (bus_dmamap_load(sc->chain_frame_tag, sc->chain_frame_dmamap,
1850 sc->chain_frame_mem, chain_frame_size, mrsas_addr_cb,
1851 &sc->chain_frame_phys_addr, BUS_DMA_NOWAIT)) {
1852 device_printf(sc->mrsas_dev, "Cannot load chain frame memory\n");
1855 count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
1857 * Allocate Reply Descriptor Array
1859 reply_desc_size = sc->reply_alloc_sz * count;
1860 if (bus_dma_tag_create(sc->mrsas_parent_tag,
1862 BUS_SPACE_MAXADDR_32BIT,
1870 &sc->reply_desc_tag)) {
1871 device_printf(sc->mrsas_dev, "Cannot create reply descriptor tag\n");
1874 if (bus_dmamem_alloc(sc->reply_desc_tag, (void **)&sc->reply_desc_mem,
1875 BUS_DMA_NOWAIT, &sc->reply_desc_dmamap)) {
1876 device_printf(sc->mrsas_dev, "Cannot alloc reply descriptor memory\n");
1879 if (bus_dmamap_load(sc->reply_desc_tag, sc->reply_desc_dmamap,
1880 sc->reply_desc_mem, reply_desc_size, mrsas_addr_cb,
1881 &sc->reply_desc_phys_addr, BUS_DMA_NOWAIT)) {
1882 device_printf(sc->mrsas_dev, "Cannot load reply descriptor memory\n");
1886 * Allocate Sense Buffer Array. Keep in lower 4GB
1888 sense_size = sc->max_fw_cmds * MRSAS_SENSE_LEN;
1889 if (bus_dma_tag_create(sc->mrsas_parent_tag,
1891 BUS_SPACE_MAXADDR_32BIT,
1900 device_printf(sc->mrsas_dev, "Cannot allocate sense buf tag\n");
1903 if (bus_dmamem_alloc(sc->sense_tag, (void **)&sc->sense_mem,
1904 BUS_DMA_NOWAIT, &sc->sense_dmamap)) {
1905 device_printf(sc->mrsas_dev, "Cannot allocate sense buf memory\n");
1908 if (bus_dmamap_load(sc->sense_tag, sc->sense_dmamap,
1909 sc->sense_mem, sense_size, mrsas_addr_cb, &sc->sense_phys_addr,
1911 device_printf(sc->mrsas_dev, "Cannot load sense buf memory\n");
1915 * Allocate for Event detail structure
1917 evt_detail_size = sizeof(struct mrsas_evt_detail);
1918 if (bus_dma_tag_create(sc->mrsas_parent_tag,
1920 BUS_SPACE_MAXADDR_32BIT,
1928 &sc->evt_detail_tag)) {
1929 device_printf(sc->mrsas_dev, "Cannot create Event detail tag\n");
1932 if (bus_dmamem_alloc(sc->evt_detail_tag, (void **)&sc->evt_detail_mem,
1933 BUS_DMA_NOWAIT, &sc->evt_detail_dmamap)) {
1934 device_printf(sc->mrsas_dev, "Cannot alloc Event detail buffer memory\n");
1937 bzero(sc->evt_detail_mem, evt_detail_size);
1938 if (bus_dmamap_load(sc->evt_detail_tag, sc->evt_detail_dmamap,
1939 sc->evt_detail_mem, evt_detail_size, mrsas_addr_cb,
1940 &sc->evt_detail_phys_addr, BUS_DMA_NOWAIT)) {
1941 device_printf(sc->mrsas_dev, "Cannot load Event detail buffer memory\n");
1945 * Create a dma tag for data buffers; size will be the maximum
1946 * possible I/O size (280kB).
1948 if (bus_dma_tag_create(sc->mrsas_parent_tag,
1955 sc->max_num_sge, /* nsegments */
1961 device_printf(sc->mrsas_dev, "Cannot create data dma tag\n");
1968 * mrsas_addr_cb: Callback function of bus_dmamap_load()
1969 * input: callback argument, machine dependent type
1970 * that describes DMA segments, number of segments, error code
1972 * This function is for the driver to receive mapping information resultant of
1973 * the bus_dmamap_load(). The information is actually not being used, but the
1974 * address is saved anyway.
1977 mrsas_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1982 *addr = segs[0].ds_addr;
1986 * mrsas_setup_raidmap: Set up RAID map.
1987 * input: Adapter instance soft state
1989 * Allocate DMA memory for the RAID maps and perform setup.
1992 mrsas_setup_raidmap(struct mrsas_softc *sc)
1996 for (i = 0; i < 2; i++) {
1998 (void *)malloc(sc->drv_map_sz, M_MRSAS, M_NOWAIT);
1999 /* Do Error handling */
2000 if (!sc->ld_drv_map[i]) {
2001 device_printf(sc->mrsas_dev, "Could not allocate memory for local map");
2004 free(sc->ld_drv_map[0], M_MRSAS);
2005 /* ABORT driver initialization */
2010 for (int i = 0; i < 2; i++) {
2011 if (bus_dma_tag_create(sc->mrsas_parent_tag,
2013 BUS_SPACE_MAXADDR_32BIT,
2021 &sc->raidmap_tag[i])) {
2022 device_printf(sc->mrsas_dev,
2023 "Cannot allocate raid map tag.\n");
2026 if (bus_dmamem_alloc(sc->raidmap_tag[i],
2027 (void **)&sc->raidmap_mem[i],
2028 BUS_DMA_NOWAIT, &sc->raidmap_dmamap[i])) {
2029 device_printf(sc->mrsas_dev,
2030 "Cannot allocate raidmap memory.\n");
2033 bzero(sc->raidmap_mem[i], sc->max_map_sz);
2035 if (bus_dmamap_load(sc->raidmap_tag[i], sc->raidmap_dmamap[i],
2036 sc->raidmap_mem[i], sc->max_map_sz,
2037 mrsas_addr_cb, &sc->raidmap_phys_addr[i],
2039 device_printf(sc->mrsas_dev, "Cannot load raidmap memory.\n");
2042 if (!sc->raidmap_mem[i]) {
2043 device_printf(sc->mrsas_dev,
2044 "Cannot allocate memory for raid map.\n");
2049 if (!mrsas_get_map_info(sc))
2050 mrsas_sync_map_info(sc);
2059 * megasas_setup_jbod_map - setup jbod map for FP seq_number.
2060 * @sc: Adapter soft state
2062 * Return 0 on success.
2065 megasas_setup_jbod_map(struct mrsas_softc *sc)
2068 uint32_t pd_seq_map_sz;
2070 pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) +
2071 (sizeof(struct MR_PD_CFG_SEQ) * (MAX_PHYSICAL_DEVICES - 1));
2073 if (!sc->ctrl_info->adapterOperations3.useSeqNumJbodFP) {
2074 sc->use_seqnum_jbod_fp = 0;
2077 if (sc->jbodmap_mem[0])
2080 for (i = 0; i < 2; i++) {
2081 if (bus_dma_tag_create(sc->mrsas_parent_tag,
2083 BUS_SPACE_MAXADDR_32BIT,
2091 &sc->jbodmap_tag[i])) {
2092 device_printf(sc->mrsas_dev,
2093 "Cannot allocate jbod map tag.\n");
2096 if (bus_dmamem_alloc(sc->jbodmap_tag[i],
2097 (void **)&sc->jbodmap_mem[i],
2098 BUS_DMA_NOWAIT, &sc->jbodmap_dmamap[i])) {
2099 device_printf(sc->mrsas_dev,
2100 "Cannot allocate jbod map memory.\n");
2103 bzero(sc->jbodmap_mem[i], pd_seq_map_sz);
2105 if (bus_dmamap_load(sc->jbodmap_tag[i], sc->jbodmap_dmamap[i],
2106 sc->jbodmap_mem[i], pd_seq_map_sz,
2107 mrsas_addr_cb, &sc->jbodmap_phys_addr[i],
2109 device_printf(sc->mrsas_dev, "Cannot load jbod map memory.\n");
2112 if (!sc->jbodmap_mem[i]) {
2113 device_printf(sc->mrsas_dev,
2114 "Cannot allocate memory for jbod map.\n");
2115 sc->use_seqnum_jbod_fp = 0;
2121 if (!megasas_sync_pd_seq_num(sc, false) &&
2122 !megasas_sync_pd_seq_num(sc, true))
2123 sc->use_seqnum_jbod_fp = 1;
2125 sc->use_seqnum_jbod_fp = 0;
2127 device_printf(sc->mrsas_dev, "Jbod map is supported\n");
2131 * mrsas_init_fw: Initialize Firmware
2132 * input: Adapter soft state
2134 * Calls transition_to_ready() to make sure Firmware is in operational state and
2135 * calls mrsas_init_adapter() to send IOC_INIT command to Firmware. It
2136 * issues internal commands to get the controller info after the IOC_INIT
2137 * command response is received by Firmware. Note: code relating to
2138 * get_pdlist, get_ld_list and max_sectors are currently not being used, it
2139 * is left here as placeholder.
2142 mrsas_init_fw(struct mrsas_softc *sc)
2145 int ret, loop, ocr = 0;
2146 u_int32_t max_sectors_1;
2147 u_int32_t max_sectors_2;
2148 u_int32_t tmp_sectors;
2149 u_int32_t scratch_pad_2;
2150 int msix_enable = 0;
2151 int fw_msix_count = 0;
2153 /* Make sure Firmware is ready */
2154 ret = mrsas_transition_to_ready(sc, ocr);
2155 if (ret != SUCCESS) {
2158 /* MSI-x index 0- reply post host index register */
2159 sc->msix_reg_offset[0] = MPI2_REPLY_POST_HOST_INDEX_OFFSET;
2160 /* Check if MSI-X is supported while in ready state */
2161 msix_enable = (mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad)) & 0x4000000) >> 0x1a;
2164 scratch_pad_2 = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2165 outbound_scratch_pad_2));
2167 /* Check max MSI-X vectors */
2168 if (sc->device_id == MRSAS_TBOLT) {
2169 sc->msix_vectors = (scratch_pad_2
2170 & MR_MAX_REPLY_QUEUES_OFFSET) + 1;
2171 fw_msix_count = sc->msix_vectors;
2173 /* Invader/Fury supports 96 MSI-X vectors */
2174 sc->msix_vectors = ((scratch_pad_2
2175 & MR_MAX_REPLY_QUEUES_EXT_OFFSET)
2176 >> MR_MAX_REPLY_QUEUES_EXT_OFFSET_SHIFT) + 1;
2177 fw_msix_count = sc->msix_vectors;
2179 for (loop = 1; loop < MR_MAX_MSIX_REG_ARRAY;
2181 sc->msix_reg_offset[loop] =
2182 MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET +
2187 /* Don't bother allocating more MSI-X vectors than cpus */
2188 sc->msix_vectors = min(sc->msix_vectors,
2191 /* Allocate MSI-x vectors */
2192 if (mrsas_allocate_msix(sc) == SUCCESS)
2193 sc->msix_enable = 1;
2195 sc->msix_enable = 0;
2197 device_printf(sc->mrsas_dev, "FW supports <%d> MSIX vector,"
2198 "Online CPU %d Current MSIX <%d>\n",
2199 fw_msix_count, mp_ncpus, sc->msix_vectors);
2201 if (mrsas_init_adapter(sc) != SUCCESS) {
2202 device_printf(sc->mrsas_dev, "Adapter initialize Fail.\n");
2205 /* Allocate internal commands for pass-thru */
2206 if (mrsas_alloc_mfi_cmds(sc) != SUCCESS) {
2207 device_printf(sc->mrsas_dev, "Allocate MFI cmd failed.\n");
2210 sc->ctrl_info = malloc(sizeof(struct mrsas_ctrl_info), M_MRSAS, M_NOWAIT);
2211 if (!sc->ctrl_info) {
2212 device_printf(sc->mrsas_dev, "Malloc for ctrl_info failed.\n");
2216 * Get the controller info from FW, so that the MAX VD support
2217 * availability can be decided.
2219 if (mrsas_get_ctrl_info(sc)) {
2220 device_printf(sc->mrsas_dev, "Unable to get FW ctrl_info.\n");
2223 sc->secure_jbod_support =
2224 (u_int8_t)sc->ctrl_info->adapterOperations3.supportSecurityonJBOD;
2226 if (sc->secure_jbod_support)
2227 device_printf(sc->mrsas_dev, "FW supports SED \n");
2229 if (sc->use_seqnum_jbod_fp)
2230 device_printf(sc->mrsas_dev, "FW supports JBOD Map \n");
2232 if (mrsas_setup_raidmap(sc) != SUCCESS) {
2233 device_printf(sc->mrsas_dev, "Error: RAID map setup FAILED !!! "
2234 "There seems to be some problem in the controller\n"
2235 "Please contact to the SUPPORT TEAM if the problem persists\n");
2237 megasas_setup_jbod_map(sc);
2239 /* For pass-thru, get PD/LD list and controller info */
2240 memset(sc->pd_list, 0,
2241 MRSAS_MAX_PD * sizeof(struct mrsas_pd_list));
2242 if (mrsas_get_pd_list(sc) != SUCCESS) {
2243 device_printf(sc->mrsas_dev, "Get PD list failed.\n");
2246 memset(sc->ld_ids, 0xff, MRSAS_MAX_LD_IDS);
2247 if (mrsas_get_ld_list(sc) != SUCCESS) {
2248 device_printf(sc->mrsas_dev, "Get LD lsit failed.\n");
2252 * Compute the max allowed sectors per IO: The controller info has
2253 * two limits on max sectors. Driver should use the minimum of these
2256 * 1 << stripe_sz_ops.min = max sectors per strip
2258 * Note that older firmwares ( < FW ver 30) didn't report information to
2259 * calculate max_sectors_1. So the number ended up as zero always.
2262 max_sectors_1 = (1 << sc->ctrl_info->stripe_sz_ops.min) *
2263 sc->ctrl_info->max_strips_per_io;
2264 max_sectors_2 = sc->ctrl_info->max_request_size;
2265 tmp_sectors = min(max_sectors_1, max_sectors_2);
2266 sc->max_sectors_per_req = sc->max_num_sge * MRSAS_PAGE_SIZE / 512;
2268 if (tmp_sectors && (sc->max_sectors_per_req > tmp_sectors))
2269 sc->max_sectors_per_req = tmp_sectors;
2271 sc->disableOnlineCtrlReset =
2272 sc->ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset;
2273 sc->UnevenSpanSupport =
2274 sc->ctrl_info->adapterOperations2.supportUnevenSpans;
2275 if (sc->UnevenSpanSupport) {
2276 device_printf(sc->mrsas_dev, "FW supports: UnevenSpanSupport=%x\n\n",
2277 sc->UnevenSpanSupport);
2279 if (MR_ValidateMapInfo(sc))
2280 sc->fast_path_io = 1;
2282 sc->fast_path_io = 0;
2288 * mrsas_init_adapter: Initializes the adapter/controller
2289 * input: Adapter soft state
2291 * Prepares for the issuing of the IOC Init cmd to FW for initializing the
2292 * ROC/controller. The FW register is read to determined the number of
2293 * commands that is supported. All memory allocations for IO is based on
2294 * max_cmd. Appropriate calculations are performed in this function.
2297 mrsas_init_adapter(struct mrsas_softc *sc)
2300 u_int32_t max_cmd, scratch_pad_2;
2304 /* Read FW status register */
2305 status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad));
2307 /* Get operational params from status register */
2308 sc->max_fw_cmds = status & MRSAS_FWSTATE_MAXCMD_MASK;
2310 /* Decrement the max supported by 1, to correlate with FW */
2311 sc->max_fw_cmds = sc->max_fw_cmds - 1;
2312 max_cmd = sc->max_fw_cmds;
2314 /* Determine allocation size of command frames */
2315 sc->reply_q_depth = ((max_cmd + 1 + 15) / 16 * 16) * 2;
2316 sc->request_alloc_sz = sizeof(MRSAS_REQUEST_DESCRIPTOR_UNION) * max_cmd;
2317 sc->reply_alloc_sz = sizeof(MPI2_REPLY_DESCRIPTORS_UNION) * (sc->reply_q_depth);
2318 sc->io_frames_alloc_sz = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE + (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * (max_cmd + 1));
2319 scratch_pad_2 = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2320 outbound_scratch_pad_2));
2322 * If scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK is set,
2323 * Firmware support extended IO chain frame which is 4 time more
2324 * than legacy Firmware. Legacy Firmware - Frame size is (8 * 128) =
2325 * 1K 1M IO Firmware - Frame size is (8 * 128 * 4) = 4K
2327 if (scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK)
2328 sc->max_chain_frame_sz =
2329 ((scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_MASK) >> 5)
2332 sc->max_chain_frame_sz =
2333 ((scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_MASK) >> 5)
2336 sc->chain_frames_alloc_sz = sc->max_chain_frame_sz * max_cmd;
2337 sc->max_sge_in_main_msg = (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE -
2338 offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL)) / 16;
2340 sc->max_sge_in_chain = sc->max_chain_frame_sz / sizeof(MPI2_SGE_IO_UNION);
2341 sc->max_num_sge = sc->max_sge_in_main_msg + sc->max_sge_in_chain - 2;
2343 mrsas_dprint(sc, MRSAS_INFO, "Avago Debug: MAX sge 0x%X MAX chain frame size 0x%X \n",
2344 sc->max_num_sge, sc->max_chain_frame_sz);
2346 /* Used for pass thru MFI frame (DCMD) */
2347 sc->chain_offset_mfi_pthru = offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL) / 16;
2349 sc->chain_offset_io_request = (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE -
2350 sizeof(MPI2_SGE_IO_UNION)) / 16;
2352 int count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
2354 for (i = 0; i < count; i++)
2355 sc->last_reply_idx[i] = 0;
2357 ret = mrsas_alloc_mem(sc);
2361 ret = mrsas_alloc_mpt_cmds(sc);
2365 ret = mrsas_ioc_init(sc);
2373 * mrsas_alloc_ioc_cmd: Allocates memory for IOC Init command
2374 * input: Adapter soft state
2376 * Allocates for the IOC Init cmd to FW to initialize the ROC/controller.
2379 mrsas_alloc_ioc_cmd(struct mrsas_softc *sc)
2383 /* Allocate IOC INIT command */
2384 ioc_init_size = 1024 + sizeof(MPI2_IOC_INIT_REQUEST);
2385 if (bus_dma_tag_create(sc->mrsas_parent_tag,
2387 BUS_SPACE_MAXADDR_32BIT,
2395 &sc->ioc_init_tag)) {
2396 device_printf(sc->mrsas_dev, "Cannot allocate ioc init tag\n");
2399 if (bus_dmamem_alloc(sc->ioc_init_tag, (void **)&sc->ioc_init_mem,
2400 BUS_DMA_NOWAIT, &sc->ioc_init_dmamap)) {
2401 device_printf(sc->mrsas_dev, "Cannot allocate ioc init cmd mem\n");
2404 bzero(sc->ioc_init_mem, ioc_init_size);
2405 if (bus_dmamap_load(sc->ioc_init_tag, sc->ioc_init_dmamap,
2406 sc->ioc_init_mem, ioc_init_size, mrsas_addr_cb,
2407 &sc->ioc_init_phys_mem, BUS_DMA_NOWAIT)) {
2408 device_printf(sc->mrsas_dev, "Cannot load ioc init cmd mem\n");
2415 * mrsas_free_ioc_cmd: Allocates memory for IOC Init command
2416 * input: Adapter soft state
2418 * Deallocates memory of the IOC Init cmd.
2421 mrsas_free_ioc_cmd(struct mrsas_softc *sc)
2423 if (sc->ioc_init_phys_mem)
2424 bus_dmamap_unload(sc->ioc_init_tag, sc->ioc_init_dmamap);
2425 if (sc->ioc_init_mem != NULL)
2426 bus_dmamem_free(sc->ioc_init_tag, sc->ioc_init_mem, sc->ioc_init_dmamap);
2427 if (sc->ioc_init_tag != NULL)
2428 bus_dma_tag_destroy(sc->ioc_init_tag);
2432 * mrsas_ioc_init: Sends IOC Init command to FW
2433 * input: Adapter soft state
2435 * Issues the IOC Init cmd to FW to initialize the ROC/controller.
2438 mrsas_ioc_init(struct mrsas_softc *sc)
2440 struct mrsas_init_frame *init_frame;
2441 pMpi2IOCInitRequest_t IOCInitMsg;
2442 MRSAS_REQUEST_DESCRIPTOR_UNION req_desc;
2443 u_int8_t max_wait = MRSAS_IOC_INIT_WAIT_TIME;
2444 bus_addr_t phys_addr;
2446 u_int32_t scratch_pad_2;
2448 /* Allocate memory for the IOC INIT command */
2449 if (mrsas_alloc_ioc_cmd(sc)) {
2450 device_printf(sc->mrsas_dev, "Cannot allocate IOC command.\n");
2454 if (!sc->block_sync_cache) {
2455 scratch_pad_2 = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2456 outbound_scratch_pad_2));
2457 sc->fw_sync_cache_support = (scratch_pad_2 &
2458 MR_CAN_HANDLE_SYNC_CACHE_OFFSET) ? 1 : 0;
2461 IOCInitMsg = (pMpi2IOCInitRequest_t)(((char *)sc->ioc_init_mem) + 1024);
2462 IOCInitMsg->Function = MPI2_FUNCTION_IOC_INIT;
2463 IOCInitMsg->WhoInit = MPI2_WHOINIT_HOST_DRIVER;
2464 IOCInitMsg->MsgVersion = MPI2_VERSION;
2465 IOCInitMsg->HeaderVersion = MPI2_HEADER_VERSION;
2466 IOCInitMsg->SystemRequestFrameSize = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE / 4;
2467 IOCInitMsg->ReplyDescriptorPostQueueDepth = sc->reply_q_depth;
2468 IOCInitMsg->ReplyDescriptorPostQueueAddress = sc->reply_desc_phys_addr;
2469 IOCInitMsg->SystemRequestFrameBaseAddress = sc->io_request_phys_addr;
2470 IOCInitMsg->HostMSIxVectors = (sc->msix_vectors > 0 ? sc->msix_vectors : 0);
2472 init_frame = (struct mrsas_init_frame *)sc->ioc_init_mem;
2473 init_frame->cmd = MFI_CMD_INIT;
2474 init_frame->cmd_status = 0xFF;
2475 init_frame->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
2477 /* driver support Extended MSIX */
2478 if (sc->mrsas_gen3_ctrl) {
2479 init_frame->driver_operations.
2480 mfi_capabilities.support_additional_msix = 1;
2482 if (sc->verbuf_mem) {
2483 snprintf((char *)sc->verbuf_mem, strlen(MRSAS_VERSION) + 2, "%s\n",
2485 init_frame->driver_ver_lo = (bus_addr_t)sc->verbuf_phys_addr;
2486 init_frame->driver_ver_hi = 0;
2488 init_frame->driver_operations.mfi_capabilities.support_ndrive_r1_lb = 1;
2489 init_frame->driver_operations.mfi_capabilities.support_max_255lds = 1;
2490 init_frame->driver_operations.mfi_capabilities.security_protocol_cmds_fw = 1;
2491 if (sc->max_chain_frame_sz > MEGASAS_CHAIN_FRAME_SZ_MIN)
2492 init_frame->driver_operations.mfi_capabilities.support_ext_io_size = 1;
2493 phys_addr = (bus_addr_t)sc->ioc_init_phys_mem + 1024;
2494 init_frame->queue_info_new_phys_addr_lo = phys_addr;
2495 init_frame->data_xfer_len = sizeof(Mpi2IOCInitRequest_t);
2497 req_desc.addr.Words = (bus_addr_t)sc->ioc_init_phys_mem;
2498 req_desc.MFAIo.RequestFlags =
2499 (MRSAS_REQ_DESCRIPT_FLAGS_MFA << MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
2501 mrsas_disable_intr(sc);
2502 mrsas_dprint(sc, MRSAS_OCR, "Issuing IOC INIT command to FW.\n");
2503 mrsas_fire_cmd(sc, req_desc.addr.u.low, req_desc.addr.u.high);
2506 * Poll response timer to wait for Firmware response. While this
2507 * timer with the DELAY call could block CPU, the time interval for
2508 * this is only 1 millisecond.
2510 if (init_frame->cmd_status == 0xFF) {
2511 for (i = 0; i < (max_wait * 1000); i++) {
2512 if (init_frame->cmd_status == 0xFF)
2518 if (init_frame->cmd_status == 0)
2519 mrsas_dprint(sc, MRSAS_OCR,
2520 "IOC INIT response received from FW.\n");
2522 if (init_frame->cmd_status == 0xFF)
2523 device_printf(sc->mrsas_dev, "IOC Init timed out after %d seconds.\n", max_wait);
2525 device_printf(sc->mrsas_dev, "IOC Init failed, status = 0x%x\n", init_frame->cmd_status);
2529 mrsas_free_ioc_cmd(sc);
2534 * mrsas_alloc_mpt_cmds: Allocates the command packets
2535 * input: Adapter instance soft state
2537 * This function allocates the internal commands for IOs. Each command that is
2538 * issued to FW is wrapped in a local data structure called mrsas_mpt_cmd. An
2539 * array is allocated with mrsas_mpt_cmd context. The free commands are
2540 * maintained in a linked list (cmd pool). SMID value range is from 1 to
2544 mrsas_alloc_mpt_cmds(struct mrsas_softc *sc)
2547 u_int32_t max_cmd, count;
2548 struct mrsas_mpt_cmd *cmd;
2549 pMpi2ReplyDescriptorsUnion_t reply_desc;
2550 u_int32_t offset, chain_offset, sense_offset;
2551 bus_addr_t io_req_base_phys, chain_frame_base_phys, sense_base_phys;
2552 u_int8_t *io_req_base, *chain_frame_base, *sense_base;
2554 max_cmd = sc->max_fw_cmds;
2556 sc->req_desc = malloc(sc->request_alloc_sz, M_MRSAS, M_NOWAIT);
2557 if (!sc->req_desc) {
2558 device_printf(sc->mrsas_dev, "Out of memory, cannot alloc req desc\n");
2561 memset(sc->req_desc, 0, sc->request_alloc_sz);
2564 * sc->mpt_cmd_list is an array of struct mrsas_mpt_cmd pointers.
2565 * Allocate the dynamic array first and then allocate individual
2568 sc->mpt_cmd_list = malloc(sizeof(struct mrsas_mpt_cmd *) * max_cmd, M_MRSAS, M_NOWAIT);
2569 if (!sc->mpt_cmd_list) {
2570 device_printf(sc->mrsas_dev, "Cannot alloc memory for mpt_cmd_list.\n");
2573 memset(sc->mpt_cmd_list, 0, sizeof(struct mrsas_mpt_cmd *) * max_cmd);
2574 for (i = 0; i < max_cmd; i++) {
2575 sc->mpt_cmd_list[i] = malloc(sizeof(struct mrsas_mpt_cmd),
2577 if (!sc->mpt_cmd_list[i]) {
2578 for (j = 0; j < i; j++)
2579 free(sc->mpt_cmd_list[j], M_MRSAS);
2580 free(sc->mpt_cmd_list, M_MRSAS);
2581 sc->mpt_cmd_list = NULL;
2586 io_req_base = (u_int8_t *)sc->io_request_mem + MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
2587 io_req_base_phys = (bus_addr_t)sc->io_request_phys_addr + MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
2588 chain_frame_base = (u_int8_t *)sc->chain_frame_mem;
2589 chain_frame_base_phys = (bus_addr_t)sc->chain_frame_phys_addr;
2590 sense_base = (u_int8_t *)sc->sense_mem;
2591 sense_base_phys = (bus_addr_t)sc->sense_phys_addr;
2592 for (i = 0; i < max_cmd; i++) {
2593 cmd = sc->mpt_cmd_list[i];
2594 offset = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * i;
2595 chain_offset = sc->max_chain_frame_sz * i;
2596 sense_offset = MRSAS_SENSE_LEN * i;
2597 memset(cmd, 0, sizeof(struct mrsas_mpt_cmd));
2599 cmd->ccb_ptr = NULL;
2600 callout_init_mtx(&cmd->cm_callout, &sc->sim_lock, 0);
2601 cmd->sync_cmd_idx = (u_int32_t)MRSAS_ULONG_MAX;
2603 cmd->io_request = (MRSAS_RAID_SCSI_IO_REQUEST *) (io_req_base + offset);
2604 memset(cmd->io_request, 0, sizeof(MRSAS_RAID_SCSI_IO_REQUEST));
2605 cmd->io_request_phys_addr = io_req_base_phys + offset;
2606 cmd->chain_frame = (MPI2_SGE_IO_UNION *) (chain_frame_base + chain_offset);
2607 cmd->chain_frame_phys_addr = chain_frame_base_phys + chain_offset;
2608 cmd->sense = sense_base + sense_offset;
2609 cmd->sense_phys_addr = sense_base_phys + sense_offset;
2610 if (bus_dmamap_create(sc->data_tag, 0, &cmd->data_dmamap)) {
2613 TAILQ_INSERT_TAIL(&(sc->mrsas_mpt_cmd_list_head), cmd, next);
2616 /* Initialize reply descriptor array to 0xFFFFFFFF */
2617 reply_desc = sc->reply_desc_mem;
2618 count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
2619 for (i = 0; i < sc->reply_q_depth * count; i++, reply_desc++) {
2620 reply_desc->Words = MRSAS_ULONG_MAX;
2626 * mrsas_fire_cmd: Sends command to FW
2627 * input: Adapter softstate
2628 * request descriptor address low
2629 * request descriptor address high
2631 * This functions fires the command to Firmware by writing to the
2632 * inbound_low_queue_port and inbound_high_queue_port.
2635 mrsas_fire_cmd(struct mrsas_softc *sc, u_int32_t req_desc_lo,
2636 u_int32_t req_desc_hi)
2638 mtx_lock(&sc->pci_lock);
2639 mrsas_write_reg(sc, offsetof(mrsas_reg_set, inbound_low_queue_port),
2641 mrsas_write_reg(sc, offsetof(mrsas_reg_set, inbound_high_queue_port),
2643 mtx_unlock(&sc->pci_lock);
2647 * mrsas_transition_to_ready: Move FW to Ready state input:
2648 * Adapter instance soft state
2650 * During the initialization, FW passes can potentially be in any one of several
2651 * possible states. If the FW in operational, waiting-for-handshake states,
2652 * driver must take steps to bring it to ready state. Otherwise, it has to
2653 * wait for the ready state.
2656 mrsas_transition_to_ready(struct mrsas_softc *sc, int ocr)
2660 u_int32_t val, fw_state;
2661 u_int32_t cur_state;
2662 u_int32_t abs_state, curr_abs_state;
2664 val = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad));
2665 fw_state = val & MFI_STATE_MASK;
2666 max_wait = MRSAS_RESET_WAIT_TIME;
2668 if (fw_state != MFI_STATE_READY)
2669 device_printf(sc->mrsas_dev, "Waiting for FW to come to ready state\n");
2671 while (fw_state != MFI_STATE_READY) {
2672 abs_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad));
2674 case MFI_STATE_FAULT:
2675 device_printf(sc->mrsas_dev, "FW is in FAULT state!!\n");
2677 cur_state = MFI_STATE_FAULT;
2681 case MFI_STATE_WAIT_HANDSHAKE:
2682 /* Set the CLR bit in inbound doorbell */
2683 mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell),
2684 MFI_INIT_CLEAR_HANDSHAKE | MFI_INIT_HOTPLUG);
2685 cur_state = MFI_STATE_WAIT_HANDSHAKE;
2687 case MFI_STATE_BOOT_MESSAGE_PENDING:
2688 mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell),
2690 cur_state = MFI_STATE_BOOT_MESSAGE_PENDING;
2692 case MFI_STATE_OPERATIONAL:
2694 * Bring it to READY state; assuming max wait 10
2697 mrsas_disable_intr(sc);
2698 mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell), MFI_RESET_FLAGS);
2699 for (i = 0; i < max_wait * 1000; i++) {
2700 if (mrsas_read_reg(sc, offsetof(mrsas_reg_set, doorbell)) & 1)
2705 cur_state = MFI_STATE_OPERATIONAL;
2707 case MFI_STATE_UNDEFINED:
2709 * This state should not last for more than 2
2712 cur_state = MFI_STATE_UNDEFINED;
2714 case MFI_STATE_BB_INIT:
2715 cur_state = MFI_STATE_BB_INIT;
2717 case MFI_STATE_FW_INIT:
2718 cur_state = MFI_STATE_FW_INIT;
2720 case MFI_STATE_FW_INIT_2:
2721 cur_state = MFI_STATE_FW_INIT_2;
2723 case MFI_STATE_DEVICE_SCAN:
2724 cur_state = MFI_STATE_DEVICE_SCAN;
2726 case MFI_STATE_FLUSH_CACHE:
2727 cur_state = MFI_STATE_FLUSH_CACHE;
2730 device_printf(sc->mrsas_dev, "Unknown state 0x%x\n", fw_state);
2735 * The cur_state should not last for more than max_wait secs
2737 for (i = 0; i < (max_wait * 1000); i++) {
2738 fw_state = (mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2739 outbound_scratch_pad)) & MFI_STATE_MASK);
2740 curr_abs_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2741 outbound_scratch_pad));
2742 if (abs_state == curr_abs_state)
2749 * Return error if fw_state hasn't changed after max_wait
2751 if (curr_abs_state == abs_state) {
2752 device_printf(sc->mrsas_dev, "FW state [%d] hasn't changed "
2753 "in %d secs\n", fw_state, max_wait);
2757 mrsas_dprint(sc, MRSAS_OCR, "FW now in Ready state\n");
2762 * mrsas_get_mfi_cmd: Get a cmd from free command pool
2763 * input: Adapter soft state
2765 * This function removes an MFI command from the command list.
2767 struct mrsas_mfi_cmd *
2768 mrsas_get_mfi_cmd(struct mrsas_softc *sc)
2770 struct mrsas_mfi_cmd *cmd = NULL;
2772 mtx_lock(&sc->mfi_cmd_pool_lock);
2773 if (!TAILQ_EMPTY(&sc->mrsas_mfi_cmd_list_head)) {
2774 cmd = TAILQ_FIRST(&sc->mrsas_mfi_cmd_list_head);
2775 TAILQ_REMOVE(&sc->mrsas_mfi_cmd_list_head, cmd, next);
2777 mtx_unlock(&sc->mfi_cmd_pool_lock);
2783 * mrsas_ocr_thread: Thread to handle OCR/Kill Adapter.
2784 * input: Adapter Context.
2786 * This function will check FW status register and flag do_timeout_reset flag.
2787 * It will do OCR/Kill adapter if FW is in fault state or IO timed out has
2791 mrsas_ocr_thread(void *arg)
2793 struct mrsas_softc *sc;
2794 u_int32_t fw_status, fw_state;
2795 u_int8_t tm_target_reset_failed = 0;
2797 sc = (struct mrsas_softc *)arg;
2799 mrsas_dprint(sc, MRSAS_TRACE, "%s\n", __func__);
2801 sc->ocr_thread_active = 1;
2802 mtx_lock(&sc->sim_lock);
2804 /* Sleep for 1 second and check the queue status */
2805 msleep(&sc->ocr_chan, &sc->sim_lock, PRIBIO,
2806 "mrsas_ocr", sc->mrsas_fw_fault_check_delay * hz);
2807 if (sc->remove_in_progress ||
2808 sc->adprecovery == MRSAS_HW_CRITICAL_ERROR) {
2809 mrsas_dprint(sc, MRSAS_OCR,
2810 "Exit due to %s from %s\n",
2811 sc->remove_in_progress ? "Shutdown" :
2812 "Hardware critical error", __func__);
2815 fw_status = mrsas_read_reg(sc,
2816 offsetof(mrsas_reg_set, outbound_scratch_pad));
2817 fw_state = fw_status & MFI_STATE_MASK;
2818 if (fw_state == MFI_STATE_FAULT || sc->do_timedout_reset ||
2819 mrsas_atomic_read(&sc->target_reset_outstanding)) {
2821 /* First, freeze further IOs to come to the SIM */
2822 mrsas_xpt_freeze(sc);
2824 /* If this is an IO timeout then go for target reset */
2825 if (mrsas_atomic_read(&sc->target_reset_outstanding)) {
2826 device_printf(sc->mrsas_dev, "Initiating Target RESET "
2827 "because of SCSI IO timeout!\n");
2829 /* Let the remaining IOs to complete */
2830 msleep(&sc->ocr_chan, &sc->sim_lock, PRIBIO,
2831 "mrsas_reset_targets", 5 * hz);
2833 /* Try to reset the target device */
2834 if (mrsas_reset_targets(sc) == FAIL)
2835 tm_target_reset_failed = 1;
2838 /* If this is a DCMD timeout or FW fault,
2839 * then go for controller reset
2841 if (fw_state == MFI_STATE_FAULT || tm_target_reset_failed ||
2842 (sc->do_timedout_reset == MFI_DCMD_TIMEOUT_OCR)) {
2843 if (tm_target_reset_failed)
2844 device_printf(sc->mrsas_dev, "Initiaiting OCR because of "
2847 device_printf(sc->mrsas_dev, "Initiaiting OCR "
2848 "because of %s!\n", sc->do_timedout_reset ?
2849 "DCMD IO Timeout" : "FW fault");
2851 mtx_lock_spin(&sc->ioctl_lock);
2852 sc->reset_in_progress = 1;
2853 mtx_unlock_spin(&sc->ioctl_lock);
2857 * Wait for the AEN task to be completed if it is running.
2859 mtx_unlock(&sc->sim_lock);
2860 taskqueue_drain(sc->ev_tq, &sc->ev_task);
2861 mtx_lock(&sc->sim_lock);
2863 taskqueue_block(sc->ev_tq);
2864 /* Try to reset the controller */
2865 mrsas_reset_ctrl(sc, sc->do_timedout_reset);
2867 sc->do_timedout_reset = 0;
2868 sc->reset_in_progress = 0;
2869 tm_target_reset_failed = 0;
2870 mrsas_atomic_set(&sc->target_reset_outstanding, 0);
2871 memset(sc->target_reset_pool, 0,
2872 sizeof(sc->target_reset_pool));
2873 taskqueue_unblock(sc->ev_tq);
2876 /* Now allow IOs to come to the SIM */
2877 mrsas_xpt_release(sc);
2880 mtx_unlock(&sc->sim_lock);
2881 sc->ocr_thread_active = 0;
2882 mrsas_kproc_exit(0);
2886 * mrsas_reset_reply_desc: Reset Reply descriptor as part of OCR.
2887 * input: Adapter Context.
2889 * This function will clear reply descriptor so that post OCR driver and FW will
2893 mrsas_reset_reply_desc(struct mrsas_softc *sc)
2896 pMpi2ReplyDescriptorsUnion_t reply_desc;
2898 count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
2899 for (i = 0; i < count; i++)
2900 sc->last_reply_idx[i] = 0;
2902 reply_desc = sc->reply_desc_mem;
2903 for (i = 0; i < sc->reply_q_depth; i++, reply_desc++) {
2904 reply_desc->Words = MRSAS_ULONG_MAX;
2909 * mrsas_reset_ctrl: Core function to OCR/Kill adapter.
2910 * input: Adapter Context.
2912 * This function will run from thread context so that it can sleep. 1. Do not
2913 * handle OCR if FW is in HW critical error. 2. Wait for outstanding command
2914 * to complete for 180 seconds. 3. If #2 does not find any outstanding
2915 * command Controller is in working state, so skip OCR. Otherwise, do
2916 * OCR/kill Adapter based on flag disableOnlineCtrlReset. 4. Start of the
2917 * OCR, return all SCSI command back to CAM layer which has ccb_ptr. 5. Post
2918 * OCR, Re-fire Managment command and move Controller to Operation state.
2921 mrsas_reset_ctrl(struct mrsas_softc *sc, u_int8_t reset_reason)
2923 int retval = SUCCESS, i, j, retry = 0;
2924 u_int32_t host_diag, abs_state, status_reg, reset_adapter;
2926 struct mrsas_mfi_cmd *mfi_cmd;
2927 struct mrsas_mpt_cmd *mpt_cmd;
2928 union mrsas_evt_class_locale class_locale;
2929 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
2931 if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR) {
2932 device_printf(sc->mrsas_dev,
2933 "mrsas: Hardware critical error, returning FAIL.\n");
2936 mrsas_set_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
2937 sc->adprecovery = MRSAS_ADPRESET_SM_INFAULT;
2938 mrsas_disable_intr(sc);
2939 msleep(&sc->ocr_chan, &sc->sim_lock, PRIBIO, "mrsas_ocr",
2940 sc->mrsas_fw_fault_check_delay * hz);
2942 /* First try waiting for commands to complete */
2943 if (mrsas_wait_for_outstanding(sc, reset_reason)) {
2944 mrsas_dprint(sc, MRSAS_OCR,
2945 "resetting adapter from %s.\n",
2947 /* Now return commands back to the CAM layer */
2948 mtx_unlock(&sc->sim_lock);
2949 for (i = 0; i < sc->max_fw_cmds; i++) {
2950 mpt_cmd = sc->mpt_cmd_list[i];
2951 if (mpt_cmd->ccb_ptr) {
2952 ccb = (union ccb *)(mpt_cmd->ccb_ptr);
2953 ccb->ccb_h.status = CAM_SCSI_BUS_RESET;
2954 mrsas_cmd_done(sc, mpt_cmd);
2955 mrsas_atomic_dec(&sc->fw_outstanding);
2958 mtx_lock(&sc->sim_lock);
2960 status_reg = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2961 outbound_scratch_pad));
2962 abs_state = status_reg & MFI_STATE_MASK;
2963 reset_adapter = status_reg & MFI_RESET_ADAPTER;
2964 if (sc->disableOnlineCtrlReset ||
2965 (abs_state == MFI_STATE_FAULT && !reset_adapter)) {
2966 /* Reset not supported, kill adapter */
2967 mrsas_dprint(sc, MRSAS_OCR, "Reset not supported, killing adapter.\n");
2972 /* Now try to reset the chip */
2973 for (i = 0; i < MRSAS_FUSION_MAX_RESET_TRIES; i++) {
2974 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2975 MPI2_WRSEQ_FLUSH_KEY_VALUE);
2976 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2977 MPI2_WRSEQ_1ST_KEY_VALUE);
2978 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2979 MPI2_WRSEQ_2ND_KEY_VALUE);
2980 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2981 MPI2_WRSEQ_3RD_KEY_VALUE);
2982 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2983 MPI2_WRSEQ_4TH_KEY_VALUE);
2984 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2985 MPI2_WRSEQ_5TH_KEY_VALUE);
2986 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2987 MPI2_WRSEQ_6TH_KEY_VALUE);
2989 /* Check that the diag write enable (DRWE) bit is on */
2990 host_diag = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2993 while (!(host_diag & HOST_DIAG_WRITE_ENABLE)) {
2995 host_diag = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2997 if (retry++ == 100) {
2998 mrsas_dprint(sc, MRSAS_OCR,
2999 "Host diag unlock failed!\n");
3003 if (!(host_diag & HOST_DIAG_WRITE_ENABLE))
3006 /* Send chip reset command */
3007 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_host_diag),
3008 host_diag | HOST_DIAG_RESET_ADAPTER);
3011 /* Make sure reset adapter bit is cleared */
3012 host_diag = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
3015 while (host_diag & HOST_DIAG_RESET_ADAPTER) {
3017 host_diag = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
3019 if (retry++ == 1000) {
3020 mrsas_dprint(sc, MRSAS_OCR,
3021 "Diag reset adapter never cleared!\n");
3025 if (host_diag & HOST_DIAG_RESET_ADAPTER)
3028 abs_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
3029 outbound_scratch_pad)) & MFI_STATE_MASK;
3032 while ((abs_state <= MFI_STATE_FW_INIT) && (retry++ < 1000)) {
3034 abs_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
3035 outbound_scratch_pad)) & MFI_STATE_MASK;
3037 if (abs_state <= MFI_STATE_FW_INIT) {
3038 mrsas_dprint(sc, MRSAS_OCR, "firmware state < MFI_STATE_FW_INIT,"
3039 " state = 0x%x\n", abs_state);
3042 /* Wait for FW to become ready */
3043 if (mrsas_transition_to_ready(sc, 1)) {
3044 mrsas_dprint(sc, MRSAS_OCR,
3045 "mrsas: Failed to transition controller to ready.\n");
3048 mrsas_reset_reply_desc(sc);
3049 if (mrsas_ioc_init(sc)) {
3050 mrsas_dprint(sc, MRSAS_OCR, "mrsas_ioc_init() failed!\n");
3053 for (j = 0; j < sc->max_fw_cmds; j++) {
3054 mpt_cmd = sc->mpt_cmd_list[j];
3055 if (mpt_cmd->sync_cmd_idx != (u_int32_t)MRSAS_ULONG_MAX) {
3056 mfi_cmd = sc->mfi_cmd_list[mpt_cmd->sync_cmd_idx];
3057 /* If not an IOCTL then release the command else re-fire */
3058 if (!mfi_cmd->sync_cmd) {
3059 mrsas_release_mfi_cmd(mfi_cmd);
3061 req_desc = mrsas_get_request_desc(sc,
3062 mfi_cmd->cmd_id.context.smid - 1);
3063 mrsas_dprint(sc, MRSAS_OCR,
3064 "Re-fire command DCMD opcode 0x%x index %d\n ",
3065 mfi_cmd->frame->dcmd.opcode, j);
3067 device_printf(sc->mrsas_dev,
3068 "Cannot build MPT cmd.\n");
3070 mrsas_fire_cmd(sc, req_desc->addr.u.low,
3071 req_desc->addr.u.high);
3076 /* Reset load balance info */
3077 memset(sc->load_balance_info, 0,
3078 sizeof(LD_LOAD_BALANCE_INFO) * MAX_LOGICAL_DRIVES_EXT);
3080 if (mrsas_get_ctrl_info(sc)) {
3085 if (!mrsas_get_map_info(sc))
3086 mrsas_sync_map_info(sc);
3088 megasas_setup_jbod_map(sc);
3090 mrsas_clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
3091 mrsas_enable_intr(sc);
3092 sc->adprecovery = MRSAS_HBA_OPERATIONAL;
3094 /* Register AEN with FW for last sequence number */
3095 class_locale.members.reserved = 0;
3096 class_locale.members.locale = MR_EVT_LOCALE_ALL;
3097 class_locale.members.class = MR_EVT_CLASS_DEBUG;
3099 mtx_unlock(&sc->sim_lock);
3100 if (mrsas_register_aen(sc, sc->last_seq_num,
3101 class_locale.word)) {
3102 device_printf(sc->mrsas_dev,
3103 "ERROR: AEN registration FAILED from OCR !!! "
3104 "Further events from the controller cannot be notified."
3105 "Either there is some problem in the controller"
3106 "or the controller does not support AEN.\n"
3107 "Please contact to the SUPPORT TEAM if the problem persists\n");
3109 mtx_lock(&sc->sim_lock);
3111 /* Adapter reset completed successfully */
3112 device_printf(sc->mrsas_dev, "Reset successful\n");
3116 /* Reset failed, kill the adapter */
3117 device_printf(sc->mrsas_dev, "Reset failed, killing adapter.\n");
3121 mrsas_clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
3122 mrsas_enable_intr(sc);
3123 sc->adprecovery = MRSAS_HBA_OPERATIONAL;
3126 mrsas_clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
3127 mrsas_dprint(sc, MRSAS_OCR,
3128 "Reset Exit with %d.\n", retval);
3133 * mrsas_kill_hba: Kill HBA when OCR is not supported
3134 * input: Adapter Context.
3136 * This function will kill HBA when OCR is not supported.
3139 mrsas_kill_hba(struct mrsas_softc *sc)
3141 sc->adprecovery = MRSAS_HW_CRITICAL_ERROR;
3143 mrsas_dprint(sc, MRSAS_OCR, "%s\n", __func__);
3144 mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell),
3147 mrsas_read_reg(sc, offsetof(mrsas_reg_set, doorbell));
3148 mrsas_complete_outstanding_ioctls(sc);
3152 * mrsas_complete_outstanding_ioctls Complete pending IOCTLS after kill_hba
3153 * input: Controller softc
3158 mrsas_complete_outstanding_ioctls(struct mrsas_softc *sc)
3161 struct mrsas_mpt_cmd *cmd_mpt;
3162 struct mrsas_mfi_cmd *cmd_mfi;
3163 u_int32_t count, MSIxIndex;
3165 count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
3166 for (i = 0; i < sc->max_fw_cmds; i++) {
3167 cmd_mpt = sc->mpt_cmd_list[i];
3169 if (cmd_mpt->sync_cmd_idx != (u_int32_t)MRSAS_ULONG_MAX) {
3170 cmd_mfi = sc->mfi_cmd_list[cmd_mpt->sync_cmd_idx];
3171 if (cmd_mfi->sync_cmd && cmd_mfi->frame->hdr.cmd != MFI_CMD_ABORT) {
3172 for (MSIxIndex = 0; MSIxIndex < count; MSIxIndex++)
3173 mrsas_complete_mptmfi_passthru(sc, cmd_mfi,
3174 cmd_mpt->io_request->RaidContext.status);
3181 * mrsas_wait_for_outstanding: Wait for outstanding commands
3182 * input: Adapter Context.
3184 * This function will wait for 180 seconds for outstanding commands to be
3188 mrsas_wait_for_outstanding(struct mrsas_softc *sc, u_int8_t check_reason)
3190 int i, outstanding, retval = 0;
3191 u_int32_t fw_state, count, MSIxIndex;
3194 for (i = 0; i < MRSAS_RESET_WAIT_TIME; i++) {
3195 if (sc->remove_in_progress) {
3196 mrsas_dprint(sc, MRSAS_OCR,
3197 "Driver remove or shutdown called.\n");
3201 /* Check if firmware is in fault state */
3202 fw_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
3203 outbound_scratch_pad)) & MFI_STATE_MASK;
3204 if (fw_state == MFI_STATE_FAULT) {
3205 mrsas_dprint(sc, MRSAS_OCR,
3206 "Found FW in FAULT state, will reset adapter.\n");
3207 count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
3208 mtx_unlock(&sc->sim_lock);
3209 for (MSIxIndex = 0; MSIxIndex < count; MSIxIndex++)
3210 mrsas_complete_cmd(sc, MSIxIndex);
3211 mtx_lock(&sc->sim_lock);
3215 if (check_reason == MFI_DCMD_TIMEOUT_OCR) {
3216 mrsas_dprint(sc, MRSAS_OCR,
3217 "DCMD IO TIMEOUT detected, will reset adapter.\n");
3221 outstanding = mrsas_atomic_read(&sc->fw_outstanding);
3225 if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
3226 mrsas_dprint(sc, MRSAS_OCR, "[%2d]waiting for %d "
3227 "commands to complete\n", i, outstanding);
3228 count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
3229 mtx_unlock(&sc->sim_lock);
3230 for (MSIxIndex = 0; MSIxIndex < count; MSIxIndex++)
3231 mrsas_complete_cmd(sc, MSIxIndex);
3232 mtx_lock(&sc->sim_lock);
3237 if (mrsas_atomic_read(&sc->fw_outstanding)) {
3238 mrsas_dprint(sc, MRSAS_OCR,
3239 " pending commands remain after waiting,"
3240 " will reset adapter.\n");
3248 * mrsas_release_mfi_cmd: Return a cmd to free command pool
3249 * input: Command packet for return to free cmd pool
3251 * This function returns the MFI & MPT command to the command list.
3254 mrsas_release_mfi_cmd(struct mrsas_mfi_cmd *cmd_mfi)
3256 struct mrsas_softc *sc = cmd_mfi->sc;
3257 struct mrsas_mpt_cmd *cmd_mpt;
3260 mtx_lock(&sc->mfi_cmd_pool_lock);
3262 * Release the mpt command (if at all it is allocated
3263 * associated with the mfi command
3265 if (cmd_mfi->cmd_id.context.smid) {
3266 mtx_lock(&sc->mpt_cmd_pool_lock);
3267 /* Get the mpt cmd from mfi cmd frame's smid value */
3268 cmd_mpt = sc->mpt_cmd_list[cmd_mfi->cmd_id.context.smid-1];
3270 cmd_mpt->sync_cmd_idx = (u_int32_t)MRSAS_ULONG_MAX;
3271 TAILQ_INSERT_HEAD(&(sc->mrsas_mpt_cmd_list_head), cmd_mpt, next);
3272 mtx_unlock(&sc->mpt_cmd_pool_lock);
3274 /* Release the mfi command */
3275 cmd_mfi->ccb_ptr = NULL;
3276 cmd_mfi->cmd_id.frame_count = 0;
3277 TAILQ_INSERT_HEAD(&(sc->mrsas_mfi_cmd_list_head), cmd_mfi, next);
3278 mtx_unlock(&sc->mfi_cmd_pool_lock);
3284 * mrsas_get_controller_info: Returns FW's controller structure
3285 * input: Adapter soft state
3286 * Controller information structure
3288 * Issues an internal command (DCMD) to get the FW's controller structure. This
3289 * information is mainly used to find out the maximum IO transfer per command
3290 * supported by the FW.
3293 mrsas_get_ctrl_info(struct mrsas_softc *sc)
3296 u_int8_t do_ocr = 1;
3297 struct mrsas_mfi_cmd *cmd;
3298 struct mrsas_dcmd_frame *dcmd;
3300 cmd = mrsas_get_mfi_cmd(sc);
3303 device_printf(sc->mrsas_dev, "Failed to get a free cmd\n");
3306 dcmd = &cmd->frame->dcmd;
3308 if (mrsas_alloc_ctlr_info_cmd(sc) != SUCCESS) {
3309 device_printf(sc->mrsas_dev, "Cannot allocate get ctlr info cmd\n");
3310 mrsas_release_mfi_cmd(cmd);
3313 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
3315 dcmd->cmd = MFI_CMD_DCMD;
3316 dcmd->cmd_status = 0xFF;
3317 dcmd->sge_count = 1;
3318 dcmd->flags = MFI_FRAME_DIR_READ;
3321 dcmd->data_xfer_len = sizeof(struct mrsas_ctrl_info);
3322 dcmd->opcode = MR_DCMD_CTRL_GET_INFO;
3323 dcmd->sgl.sge32[0].phys_addr = sc->ctlr_info_phys_addr;
3324 dcmd->sgl.sge32[0].length = sizeof(struct mrsas_ctrl_info);
3326 if (!sc->mask_interrupts)
3327 retcode = mrsas_issue_blocked_cmd(sc, cmd);
3329 retcode = mrsas_issue_polled(sc, cmd);
3331 if (retcode == ETIMEDOUT)
3334 memcpy(sc->ctrl_info, sc->ctlr_info_mem, sizeof(struct mrsas_ctrl_info));
3337 mrsas_update_ext_vd_details(sc);
3339 sc->use_seqnum_jbod_fp =
3340 sc->ctrl_info->adapterOperations3.useSeqNumJbodFP;
3341 sc->disableOnlineCtrlReset =
3342 sc->ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset;
3345 mrsas_free_ctlr_info_cmd(sc);
3348 sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
3350 if (!sc->mask_interrupts)
3351 mrsas_release_mfi_cmd(cmd);
3357 * mrsas_update_ext_vd_details : Update details w.r.t Extended VD
3359 * sc - Controller's softc
3362 mrsas_update_ext_vd_details(struct mrsas_softc *sc)
3364 sc->max256vdSupport =
3365 sc->ctrl_info->adapterOperations3.supportMaxExtLDs;
3366 /* Below is additional check to address future FW enhancement */
3367 if (sc->ctrl_info->max_lds > 64)
3368 sc->max256vdSupport = 1;
3370 sc->drv_supported_vd_count = MRSAS_MAX_LD_CHANNELS
3371 * MRSAS_MAX_DEV_PER_CHANNEL;
3372 sc->drv_supported_pd_count = MRSAS_MAX_PD_CHANNELS
3373 * MRSAS_MAX_DEV_PER_CHANNEL;
3374 if (sc->max256vdSupport) {
3375 sc->fw_supported_vd_count = MAX_LOGICAL_DRIVES_EXT;
3376 sc->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
3378 sc->fw_supported_vd_count = MAX_LOGICAL_DRIVES;
3379 sc->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
3382 sc->old_map_sz = sizeof(MR_FW_RAID_MAP) +
3383 (sizeof(MR_LD_SPAN_MAP) *
3384 (sc->fw_supported_vd_count - 1));
3385 sc->new_map_sz = sizeof(MR_FW_RAID_MAP_EXT);
3386 sc->drv_map_sz = sizeof(MR_DRV_RAID_MAP) +
3387 (sizeof(MR_LD_SPAN_MAP) *
3388 (sc->drv_supported_vd_count - 1));
3390 sc->max_map_sz = max(sc->old_map_sz, sc->new_map_sz);
3392 if (sc->max256vdSupport)
3393 sc->current_map_sz = sc->new_map_sz;
3395 sc->current_map_sz = sc->old_map_sz;
3399 * mrsas_alloc_ctlr_info_cmd: Allocates memory for controller info command
3400 * input: Adapter soft state
3402 * Allocates DMAable memory for the controller info internal command.
3405 mrsas_alloc_ctlr_info_cmd(struct mrsas_softc *sc)
3409 /* Allocate get controller info command */
3410 ctlr_info_size = sizeof(struct mrsas_ctrl_info);
3411 if (bus_dma_tag_create(sc->mrsas_parent_tag,
3413 BUS_SPACE_MAXADDR_32BIT,
3421 &sc->ctlr_info_tag)) {
3422 device_printf(sc->mrsas_dev, "Cannot allocate ctlr info tag\n");
3425 if (bus_dmamem_alloc(sc->ctlr_info_tag, (void **)&sc->ctlr_info_mem,
3426 BUS_DMA_NOWAIT, &sc->ctlr_info_dmamap)) {
3427 device_printf(sc->mrsas_dev, "Cannot allocate ctlr info cmd mem\n");
3430 if (bus_dmamap_load(sc->ctlr_info_tag, sc->ctlr_info_dmamap,
3431 sc->ctlr_info_mem, ctlr_info_size, mrsas_addr_cb,
3432 &sc->ctlr_info_phys_addr, BUS_DMA_NOWAIT)) {
3433 device_printf(sc->mrsas_dev, "Cannot load ctlr info cmd mem\n");
3436 memset(sc->ctlr_info_mem, 0, ctlr_info_size);
3441 * mrsas_free_ctlr_info_cmd: Free memory for controller info command
3442 * input: Adapter soft state
3444 * Deallocates memory of the get controller info cmd.
3447 mrsas_free_ctlr_info_cmd(struct mrsas_softc *sc)
3449 if (sc->ctlr_info_phys_addr)
3450 bus_dmamap_unload(sc->ctlr_info_tag, sc->ctlr_info_dmamap);
3451 if (sc->ctlr_info_mem != NULL)
3452 bus_dmamem_free(sc->ctlr_info_tag, sc->ctlr_info_mem, sc->ctlr_info_dmamap);
3453 if (sc->ctlr_info_tag != NULL)
3454 bus_dma_tag_destroy(sc->ctlr_info_tag);
3458 * mrsas_issue_polled: Issues a polling command
3459 * inputs: Adapter soft state
3460 * Command packet to be issued
3462 * This function is for posting of internal commands to Firmware. MFI requires
3463 * the cmd_status to be set to 0xFF before posting. The maximun wait time of
3464 * the poll response timer is 180 seconds.
3467 mrsas_issue_polled(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
3469 struct mrsas_header *frame_hdr = &cmd->frame->hdr;
3470 u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME;
3471 int i, retcode = SUCCESS;
3473 frame_hdr->cmd_status = 0xFF;
3474 frame_hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
3476 /* Issue the frame using inbound queue port */
3477 if (mrsas_issue_dcmd(sc, cmd)) {
3478 device_printf(sc->mrsas_dev, "Cannot issue DCMD internal command.\n");
3482 * Poll response timer to wait for Firmware response. While this
3483 * timer with the DELAY call could block CPU, the time interval for
3484 * this is only 1 millisecond.
3486 if (frame_hdr->cmd_status == 0xFF) {
3487 for (i = 0; i < (max_wait * 1000); i++) {
3488 if (frame_hdr->cmd_status == 0xFF)
3494 if (frame_hdr->cmd_status == 0xFF) {
3495 device_printf(sc->mrsas_dev, "DCMD timed out after %d "
3496 "seconds from %s\n", max_wait, __func__);
3497 device_printf(sc->mrsas_dev, "DCMD opcode 0x%X\n",
3498 cmd->frame->dcmd.opcode);
3499 retcode = ETIMEDOUT;
3505 * mrsas_issue_dcmd: Issues a MFI Pass thru cmd
3506 * input: Adapter soft state mfi cmd pointer
3508 * This function is called by mrsas_issued_blocked_cmd() and
3509 * mrsas_issued_polled(), to build the MPT command and then fire the command
3513 mrsas_issue_dcmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
3515 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
3517 req_desc = mrsas_build_mpt_cmd(sc, cmd);
3519 device_printf(sc->mrsas_dev, "Cannot build MPT cmd.\n");
3522 mrsas_fire_cmd(sc, req_desc->addr.u.low, req_desc->addr.u.high);
3528 * mrsas_build_mpt_cmd: Calls helper function to build Passthru cmd
3529 * input: Adapter soft state mfi cmd to build
3531 * This function is called by mrsas_issue_cmd() to build the MPT-MFI passthru
3532 * command and prepares the MPT command to send to Firmware.
3534 MRSAS_REQUEST_DESCRIPTOR_UNION *
3535 mrsas_build_mpt_cmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
3537 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
3540 if (mrsas_build_mptmfi_passthru(sc, cmd)) {
3541 device_printf(sc->mrsas_dev, "Cannot build MPT-MFI passthru cmd.\n");
3544 index = cmd->cmd_id.context.smid;
3546 req_desc = mrsas_get_request_desc(sc, index - 1);
3550 req_desc->addr.Words = 0;
3551 req_desc->SCSIIO.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO << MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
3553 req_desc->SCSIIO.SMID = index;
3559 * mrsas_build_mptmfi_passthru: Builds a MPT MFI Passthru command
3560 * input: Adapter soft state mfi cmd pointer
3562 * The MPT command and the io_request are setup as a passthru command. The SGE
3563 * chain address is set to frame_phys_addr of the MFI command.
3566 mrsas_build_mptmfi_passthru(struct mrsas_softc *sc, struct mrsas_mfi_cmd *mfi_cmd)
3568 MPI25_IEEE_SGE_CHAIN64 *mpi25_ieee_chain;
3569 PTR_MRSAS_RAID_SCSI_IO_REQUEST io_req;
3570 struct mrsas_mpt_cmd *mpt_cmd;
3571 struct mrsas_header *frame_hdr = &mfi_cmd->frame->hdr;
3573 mpt_cmd = mrsas_get_mpt_cmd(sc);
3577 /* Save the smid. To be used for returning the cmd */
3578 mfi_cmd->cmd_id.context.smid = mpt_cmd->index;
3580 mpt_cmd->sync_cmd_idx = mfi_cmd->index;
3583 * For cmds where the flag is set, store the flag and check on
3584 * completion. For cmds with this flag, don't call
3585 * mrsas_complete_cmd.
3588 if (frame_hdr->flags & MFI_FRAME_DONT_POST_IN_REPLY_QUEUE)
3589 mpt_cmd->flags = MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
3591 io_req = mpt_cmd->io_request;
3593 if (sc->mrsas_gen3_ctrl) {
3594 pMpi25IeeeSgeChain64_t sgl_ptr_end = (pMpi25IeeeSgeChain64_t)&io_req->SGL;
3596 sgl_ptr_end += sc->max_sge_in_main_msg - 1;
3597 sgl_ptr_end->Flags = 0;
3599 mpi25_ieee_chain = (MPI25_IEEE_SGE_CHAIN64 *) & io_req->SGL.IeeeChain;
3601 io_req->Function = MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST;
3602 io_req->SGLOffset0 = offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL) / 4;
3603 io_req->ChainOffset = sc->chain_offset_mfi_pthru;
3605 mpi25_ieee_chain->Address = mfi_cmd->frame_phys_addr;
3607 mpi25_ieee_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT |
3608 MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR;
3610 mpi25_ieee_chain->Length = sc->max_chain_frame_sz;
3616 * mrsas_issue_blocked_cmd: Synchronous wrapper around regular FW cmds
3617 * input: Adapter soft state Command to be issued
3619 * This function waits on an event for the command to be returned from the ISR.
3620 * Max wait time is MRSAS_INTERNAL_CMD_WAIT_TIME secs. Used for issuing
3621 * internal and ioctl commands.
3624 mrsas_issue_blocked_cmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
3626 u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME;
3627 unsigned long total_time = 0;
3628 int retcode = SUCCESS;
3630 /* Initialize cmd_status */
3631 cmd->cmd_status = 0xFF;
3633 /* Build MPT-MFI command for issue to FW */
3634 if (mrsas_issue_dcmd(sc, cmd)) {
3635 device_printf(sc->mrsas_dev, "Cannot issue DCMD internal command.\n");
3638 sc->chan = (void *)&cmd;
3641 if (cmd->cmd_status == 0xFF) {
3642 tsleep((void *)&sc->chan, 0, "mrsas_sleep", hz);
3646 if (!cmd->sync_cmd) { /* cmd->sync will be set for an IOCTL
3649 if (total_time >= max_wait) {
3650 device_printf(sc->mrsas_dev,
3651 "Internal command timed out after %d seconds.\n", max_wait);
3658 if (cmd->cmd_status == 0xFF) {
3659 device_printf(sc->mrsas_dev, "DCMD timed out after %d "
3660 "seconds from %s\n", max_wait, __func__);
3661 device_printf(sc->mrsas_dev, "DCMD opcode 0x%X\n",
3662 cmd->frame->dcmd.opcode);
3663 retcode = ETIMEDOUT;
3669 * mrsas_complete_mptmfi_passthru: Completes a command
3670 * input: @sc: Adapter soft state
3671 * @cmd: Command to be completed
3672 * @status: cmd completion status
3674 * This function is called from mrsas_complete_cmd() after an interrupt is
3675 * received from Firmware, and io_request->Function is
3676 * MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST.
3679 mrsas_complete_mptmfi_passthru(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd,
3682 struct mrsas_header *hdr = &cmd->frame->hdr;
3683 u_int8_t cmd_status = cmd->frame->hdr.cmd_status;
3685 /* Reset the retry counter for future re-tries */
3686 cmd->retry_for_fw_reset = 0;
3689 cmd->ccb_ptr = NULL;
3692 case MFI_CMD_INVALID:
3693 device_printf(sc->mrsas_dev, "MFI_CMD_INVALID command.\n");
3695 case MFI_CMD_PD_SCSI_IO:
3696 case MFI_CMD_LD_SCSI_IO:
3698 * MFI_CMD_PD_SCSI_IO and MFI_CMD_LD_SCSI_IO could have been
3699 * issued either through an IO path or an IOCTL path. If it
3700 * was via IOCTL, we will send it to internal completion.
3702 if (cmd->sync_cmd) {
3704 mrsas_wakeup(sc, cmd);
3710 /* Check for LD map update */
3711 if ((cmd->frame->dcmd.opcode == MR_DCMD_LD_MAP_GET_INFO) &&
3712 (cmd->frame->dcmd.mbox.b[1] == 1)) {
3713 sc->fast_path_io = 0;
3714 mtx_lock(&sc->raidmap_lock);
3715 sc->map_update_cmd = NULL;
3716 if (cmd_status != 0) {
3717 if (cmd_status != MFI_STAT_NOT_FOUND)
3718 device_printf(sc->mrsas_dev, "map sync failed, status=%x\n", cmd_status);
3720 mrsas_release_mfi_cmd(cmd);
3721 mtx_unlock(&sc->raidmap_lock);
3726 mrsas_release_mfi_cmd(cmd);
3727 if (MR_ValidateMapInfo(sc))
3728 sc->fast_path_io = 0;
3730 sc->fast_path_io = 1;
3731 mrsas_sync_map_info(sc);
3732 mtx_unlock(&sc->raidmap_lock);
3735 if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET_INFO ||
3736 cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET) {
3737 sc->mrsas_aen_triggered = 0;
3739 /* FW has an updated PD sequence */
3740 if ((cmd->frame->dcmd.opcode ==
3741 MR_DCMD_SYSTEM_PD_MAP_GET_INFO) &&
3742 (cmd->frame->dcmd.mbox.b[0] == 1)) {
3744 mtx_lock(&sc->raidmap_lock);
3745 sc->jbod_seq_cmd = NULL;
3746 mrsas_release_mfi_cmd(cmd);
3748 if (cmd_status == MFI_STAT_OK) {
3749 sc->pd_seq_map_id++;
3750 /* Re-register a pd sync seq num cmd */
3751 if (megasas_sync_pd_seq_num(sc, true))
3752 sc->use_seqnum_jbod_fp = 0;
3754 sc->use_seqnum_jbod_fp = 0;
3755 device_printf(sc->mrsas_dev,
3756 "Jbod map sync failed, status=%x\n", cmd_status);
3758 mtx_unlock(&sc->raidmap_lock);
3761 /* See if got an event notification */
3762 if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_WAIT)
3763 mrsas_complete_aen(sc, cmd);
3765 mrsas_wakeup(sc, cmd);
3768 /* Command issued to abort another cmd return */
3769 mrsas_complete_abort(sc, cmd);
3772 device_printf(sc->mrsas_dev, "Unknown command completed! [0x%X]\n", hdr->cmd);
3778 * mrsas_wakeup: Completes an internal command
3779 * input: Adapter soft state
3780 * Command to be completed
3782 * In mrsas_issue_blocked_cmd(), after a command is issued to Firmware, a wait
3783 * timer is started. This function is called from
3784 * mrsas_complete_mptmfi_passthru() as it completes the command, to wake up
3785 * from the command wait.
3788 mrsas_wakeup(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
3790 cmd->cmd_status = cmd->frame->io.cmd_status;
3792 if (cmd->cmd_status == 0xFF)
3793 cmd->cmd_status = 0;
3795 sc->chan = (void *)&cmd;
3796 wakeup_one((void *)&sc->chan);
3801 * mrsas_shutdown_ctlr: Instructs FW to shutdown the controller input:
3802 * Adapter soft state Shutdown/Hibernate
3804 * This function issues a DCMD internal command to Firmware to initiate shutdown
3805 * of the controller.
3808 mrsas_shutdown_ctlr(struct mrsas_softc *sc, u_int32_t opcode)
3810 struct mrsas_mfi_cmd *cmd;
3811 struct mrsas_dcmd_frame *dcmd;
3813 if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR)
3816 cmd = mrsas_get_mfi_cmd(sc);
3818 device_printf(sc->mrsas_dev, "Cannot allocate for shutdown cmd.\n");
3822 mrsas_issue_blocked_abort_cmd(sc, sc->aen_cmd);
3823 if (sc->map_update_cmd)
3824 mrsas_issue_blocked_abort_cmd(sc, sc->map_update_cmd);
3825 if (sc->jbod_seq_cmd)
3826 mrsas_issue_blocked_abort_cmd(sc, sc->jbod_seq_cmd);
3828 dcmd = &cmd->frame->dcmd;
3829 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
3831 dcmd->cmd = MFI_CMD_DCMD;
3832 dcmd->cmd_status = 0x0;
3833 dcmd->sge_count = 0;
3834 dcmd->flags = MFI_FRAME_DIR_NONE;
3837 dcmd->data_xfer_len = 0;
3838 dcmd->opcode = opcode;
3840 device_printf(sc->mrsas_dev, "Preparing to shut down controller.\n");
3842 mrsas_issue_blocked_cmd(sc, cmd);
3843 mrsas_release_mfi_cmd(cmd);
3849 * mrsas_flush_cache: Requests FW to flush all its caches input:
3850 * Adapter soft state
3852 * This function is issues a DCMD internal command to Firmware to initiate
3853 * flushing of all caches.
3856 mrsas_flush_cache(struct mrsas_softc *sc)
3858 struct mrsas_mfi_cmd *cmd;
3859 struct mrsas_dcmd_frame *dcmd;
3861 if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR)
3864 cmd = mrsas_get_mfi_cmd(sc);
3866 device_printf(sc->mrsas_dev, "Cannot allocate for flush cache cmd.\n");
3869 dcmd = &cmd->frame->dcmd;
3870 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
3872 dcmd->cmd = MFI_CMD_DCMD;
3873 dcmd->cmd_status = 0x0;
3874 dcmd->sge_count = 0;
3875 dcmd->flags = MFI_FRAME_DIR_NONE;
3878 dcmd->data_xfer_len = 0;
3879 dcmd->opcode = MR_DCMD_CTRL_CACHE_FLUSH;
3880 dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE;
3882 mrsas_issue_blocked_cmd(sc, cmd);
3883 mrsas_release_mfi_cmd(cmd);
3889 megasas_sync_pd_seq_num(struct mrsas_softc *sc, boolean_t pend)
3892 u_int8_t do_ocr = 1;
3893 struct mrsas_mfi_cmd *cmd;
3894 struct mrsas_dcmd_frame *dcmd;
3895 uint32_t pd_seq_map_sz;
3896 struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync;
3897 bus_addr_t pd_seq_h;
3899 pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) +
3900 (sizeof(struct MR_PD_CFG_SEQ) *
3901 (MAX_PHYSICAL_DEVICES - 1));
3903 cmd = mrsas_get_mfi_cmd(sc);
3905 device_printf(sc->mrsas_dev,
3906 "Cannot alloc for ld map info cmd.\n");
3909 dcmd = &cmd->frame->dcmd;
3911 pd_sync = (void *)sc->jbodmap_mem[(sc->pd_seq_map_id & 1)];
3912 pd_seq_h = sc->jbodmap_phys_addr[(sc->pd_seq_map_id & 1)];
3914 device_printf(sc->mrsas_dev,
3915 "Failed to alloc mem for jbod map info.\n");
3916 mrsas_release_mfi_cmd(cmd);
3919 memset(pd_sync, 0, pd_seq_map_sz);
3920 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
3921 dcmd->cmd = MFI_CMD_DCMD;
3922 dcmd->cmd_status = 0xFF;
3923 dcmd->sge_count = 1;
3926 dcmd->data_xfer_len = (pd_seq_map_sz);
3927 dcmd->opcode = (MR_DCMD_SYSTEM_PD_MAP_GET_INFO);
3928 dcmd->sgl.sge32[0].phys_addr = (pd_seq_h);
3929 dcmd->sgl.sge32[0].length = (pd_seq_map_sz);
3932 dcmd->mbox.b[0] = MRSAS_DCMD_MBOX_PEND_FLAG;
3933 dcmd->flags = (MFI_FRAME_DIR_WRITE);
3934 sc->jbod_seq_cmd = cmd;
3935 if (mrsas_issue_dcmd(sc, cmd)) {
3936 device_printf(sc->mrsas_dev,
3937 "Fail to send sync map info command.\n");
3942 dcmd->flags = MFI_FRAME_DIR_READ;
3944 retcode = mrsas_issue_polled(sc, cmd);
3945 if (retcode == ETIMEDOUT)
3948 if (pd_sync->count > MAX_PHYSICAL_DEVICES) {
3949 device_printf(sc->mrsas_dev,
3950 "driver supports max %d JBOD, but FW reports %d\n",
3951 MAX_PHYSICAL_DEVICES, pd_sync->count);
3955 sc->pd_seq_map_id++;
3960 sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
3966 * mrsas_get_map_info: Load and validate RAID map input:
3967 * Adapter instance soft state
3969 * This function calls mrsas_get_ld_map_info() and MR_ValidateMapInfo() to load
3970 * and validate RAID map. It returns 0 if successful, 1 other- wise.
3973 mrsas_get_map_info(struct mrsas_softc *sc)
3975 uint8_t retcode = 0;
3977 sc->fast_path_io = 0;
3978 if (!mrsas_get_ld_map_info(sc)) {
3979 retcode = MR_ValidateMapInfo(sc);
3981 sc->fast_path_io = 1;
3989 * mrsas_get_ld_map_info: Get FW's ld_map structure input:
3990 * Adapter instance soft state
3992 * Issues an internal command (DCMD) to get the FW's controller PD list
3996 mrsas_get_ld_map_info(struct mrsas_softc *sc)
3999 struct mrsas_mfi_cmd *cmd;
4000 struct mrsas_dcmd_frame *dcmd;
4002 bus_addr_t map_phys_addr = 0;
4004 cmd = mrsas_get_mfi_cmd(sc);
4006 device_printf(sc->mrsas_dev,
4007 "Cannot alloc for ld map info cmd.\n");
4010 dcmd = &cmd->frame->dcmd;
4012 map = (void *)sc->raidmap_mem[(sc->map_id & 1)];
4013 map_phys_addr = sc->raidmap_phys_addr[(sc->map_id & 1)];
4015 device_printf(sc->mrsas_dev,
4016 "Failed to alloc mem for ld map info.\n");
4017 mrsas_release_mfi_cmd(cmd);
4020 memset(map, 0, sizeof(sc->max_map_sz));
4021 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4023 dcmd->cmd = MFI_CMD_DCMD;
4024 dcmd->cmd_status = 0xFF;
4025 dcmd->sge_count = 1;
4026 dcmd->flags = MFI_FRAME_DIR_READ;
4029 dcmd->data_xfer_len = sc->current_map_sz;
4030 dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO;
4031 dcmd->sgl.sge32[0].phys_addr = map_phys_addr;
4032 dcmd->sgl.sge32[0].length = sc->current_map_sz;
4034 retcode = mrsas_issue_polled(sc, cmd);
4035 if (retcode == ETIMEDOUT)
4036 sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
4042 * mrsas_sync_map_info: Get FW's ld_map structure input:
4043 * Adapter instance soft state
4045 * Issues an internal command (DCMD) to get the FW's controller PD list
4049 mrsas_sync_map_info(struct mrsas_softc *sc)
4052 struct mrsas_mfi_cmd *cmd;
4053 struct mrsas_dcmd_frame *dcmd;
4054 uint32_t size_sync_info, num_lds;
4055 MR_LD_TARGET_SYNC *target_map = NULL;
4056 MR_DRV_RAID_MAP_ALL *map;
4058 MR_LD_TARGET_SYNC *ld_sync;
4059 bus_addr_t map_phys_addr = 0;
4061 cmd = mrsas_get_mfi_cmd(sc);
4063 device_printf(sc->mrsas_dev, "Cannot alloc for sync map info cmd\n");
4066 map = sc->ld_drv_map[sc->map_id & 1];
4067 num_lds = map->raidMap.ldCount;
4069 dcmd = &cmd->frame->dcmd;
4070 size_sync_info = sizeof(MR_LD_TARGET_SYNC) * num_lds;
4071 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4073 target_map = (MR_LD_TARGET_SYNC *) sc->raidmap_mem[(sc->map_id - 1) & 1];
4074 memset(target_map, 0, sc->max_map_sz);
4076 map_phys_addr = sc->raidmap_phys_addr[(sc->map_id - 1) & 1];
4078 ld_sync = (MR_LD_TARGET_SYNC *) target_map;
4080 for (i = 0; i < num_lds; i++, ld_sync++) {
4081 raid = MR_LdRaidGet(i, map);
4082 ld_sync->targetId = MR_GetLDTgtId(i, map);
4083 ld_sync->seqNum = raid->seqNum;
4086 dcmd->cmd = MFI_CMD_DCMD;
4087 dcmd->cmd_status = 0xFF;
4088 dcmd->sge_count = 1;
4089 dcmd->flags = MFI_FRAME_DIR_WRITE;
4092 dcmd->data_xfer_len = sc->current_map_sz;
4093 dcmd->mbox.b[0] = num_lds;
4094 dcmd->mbox.b[1] = MRSAS_DCMD_MBOX_PEND_FLAG;
4095 dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO;
4096 dcmd->sgl.sge32[0].phys_addr = map_phys_addr;
4097 dcmd->sgl.sge32[0].length = sc->current_map_sz;
4099 sc->map_update_cmd = cmd;
4100 if (mrsas_issue_dcmd(sc, cmd)) {
4101 device_printf(sc->mrsas_dev,
4102 "Fail to send sync map info command.\n");
4109 * mrsas_get_pd_list: Returns FW's PD list structure input:
4110 * Adapter soft state
4112 * Issues an internal command (DCMD) to get the FW's controller PD list
4113 * structure. This information is mainly used to find out about system
4114 * supported by Firmware.
4117 mrsas_get_pd_list(struct mrsas_softc *sc)
4119 int retcode = 0, pd_index = 0, pd_count = 0, pd_list_size;
4120 u_int8_t do_ocr = 1;
4121 struct mrsas_mfi_cmd *cmd;
4122 struct mrsas_dcmd_frame *dcmd;
4123 struct MR_PD_LIST *pd_list_mem;
4124 struct MR_PD_ADDRESS *pd_addr;
4125 bus_addr_t pd_list_phys_addr = 0;
4126 struct mrsas_tmp_dcmd *tcmd;
4128 cmd = mrsas_get_mfi_cmd(sc);
4130 device_printf(sc->mrsas_dev,
4131 "Cannot alloc for get PD list cmd\n");
4134 dcmd = &cmd->frame->dcmd;
4136 tcmd = malloc(sizeof(struct mrsas_tmp_dcmd), M_MRSAS, M_NOWAIT);
4137 pd_list_size = MRSAS_MAX_PD * sizeof(struct MR_PD_LIST);
4138 if (mrsas_alloc_tmp_dcmd(sc, tcmd, pd_list_size) != SUCCESS) {
4139 device_printf(sc->mrsas_dev,
4140 "Cannot alloc dmamap for get PD list cmd\n");
4141 mrsas_release_mfi_cmd(cmd);
4142 mrsas_free_tmp_dcmd(tcmd);
4143 free(tcmd, M_MRSAS);
4146 pd_list_mem = tcmd->tmp_dcmd_mem;
4147 pd_list_phys_addr = tcmd->tmp_dcmd_phys_addr;
4149 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4151 dcmd->mbox.b[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST;
4152 dcmd->mbox.b[1] = 0;
4153 dcmd->cmd = MFI_CMD_DCMD;
4154 dcmd->cmd_status = 0xFF;
4155 dcmd->sge_count = 1;
4156 dcmd->flags = MFI_FRAME_DIR_READ;
4159 dcmd->data_xfer_len = MRSAS_MAX_PD * sizeof(struct MR_PD_LIST);
4160 dcmd->opcode = MR_DCMD_PD_LIST_QUERY;
4161 dcmd->sgl.sge32[0].phys_addr = pd_list_phys_addr;
4162 dcmd->sgl.sge32[0].length = MRSAS_MAX_PD * sizeof(struct MR_PD_LIST);
4164 if (!sc->mask_interrupts)
4165 retcode = mrsas_issue_blocked_cmd(sc, cmd);
4167 retcode = mrsas_issue_polled(sc, cmd);
4169 if (retcode == ETIMEDOUT)
4172 /* Get the instance PD list */
4173 pd_count = MRSAS_MAX_PD;
4174 pd_addr = pd_list_mem->addr;
4175 if (pd_list_mem->count < pd_count) {
4176 memset(sc->local_pd_list, 0,
4177 MRSAS_MAX_PD * sizeof(struct mrsas_pd_list));
4178 for (pd_index = 0; pd_index < pd_list_mem->count; pd_index++) {
4179 sc->local_pd_list[pd_addr->deviceId].tid = pd_addr->deviceId;
4180 sc->local_pd_list[pd_addr->deviceId].driveType =
4181 pd_addr->scsiDevType;
4182 sc->local_pd_list[pd_addr->deviceId].driveState =
4187 * Use mutext/spinlock if pd_list component size increase more than
4190 memcpy(sc->pd_list, sc->local_pd_list, sizeof(sc->local_pd_list));
4194 mrsas_free_tmp_dcmd(tcmd);
4195 free(tcmd, M_MRSAS);
4198 sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
4200 if (!sc->mask_interrupts)
4201 mrsas_release_mfi_cmd(cmd);
4207 * mrsas_get_ld_list: Returns FW's LD list structure input:
4208 * Adapter soft state
4210 * Issues an internal command (DCMD) to get the FW's controller PD list
4211 * structure. This information is mainly used to find out about supported by
4215 mrsas_get_ld_list(struct mrsas_softc *sc)
4217 int ld_list_size, retcode = 0, ld_index = 0, ids = 0;
4218 u_int8_t do_ocr = 1;
4219 struct mrsas_mfi_cmd *cmd;
4220 struct mrsas_dcmd_frame *dcmd;
4221 struct MR_LD_LIST *ld_list_mem;
4222 bus_addr_t ld_list_phys_addr = 0;
4223 struct mrsas_tmp_dcmd *tcmd;
4225 cmd = mrsas_get_mfi_cmd(sc);
4227 device_printf(sc->mrsas_dev,
4228 "Cannot alloc for get LD list cmd\n");
4231 dcmd = &cmd->frame->dcmd;
4233 tcmd = malloc(sizeof(struct mrsas_tmp_dcmd), M_MRSAS, M_NOWAIT);
4234 ld_list_size = sizeof(struct MR_LD_LIST);
4235 if (mrsas_alloc_tmp_dcmd(sc, tcmd, ld_list_size) != SUCCESS) {
4236 device_printf(sc->mrsas_dev,
4237 "Cannot alloc dmamap for get LD list cmd\n");
4238 mrsas_release_mfi_cmd(cmd);
4239 mrsas_free_tmp_dcmd(tcmd);
4240 free(tcmd, M_MRSAS);
4243 ld_list_mem = tcmd->tmp_dcmd_mem;
4244 ld_list_phys_addr = tcmd->tmp_dcmd_phys_addr;
4246 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4248 if (sc->max256vdSupport)
4249 dcmd->mbox.b[0] = 1;
4251 dcmd->cmd = MFI_CMD_DCMD;
4252 dcmd->cmd_status = 0xFF;
4253 dcmd->sge_count = 1;
4254 dcmd->flags = MFI_FRAME_DIR_READ;
4256 dcmd->data_xfer_len = sizeof(struct MR_LD_LIST);
4257 dcmd->opcode = MR_DCMD_LD_GET_LIST;
4258 dcmd->sgl.sge32[0].phys_addr = ld_list_phys_addr;
4259 dcmd->sgl.sge32[0].length = sizeof(struct MR_LD_LIST);
4262 if (!sc->mask_interrupts)
4263 retcode = mrsas_issue_blocked_cmd(sc, cmd);
4265 retcode = mrsas_issue_polled(sc, cmd);
4267 if (retcode == ETIMEDOUT)
4271 printf("Number of LDs %d\n", ld_list_mem->ldCount);
4274 /* Get the instance LD list */
4275 if (ld_list_mem->ldCount <= sc->fw_supported_vd_count) {
4276 sc->CurLdCount = ld_list_mem->ldCount;
4277 memset(sc->ld_ids, 0xff, MAX_LOGICAL_DRIVES_EXT);
4278 for (ld_index = 0; ld_index < ld_list_mem->ldCount; ld_index++) {
4279 if (ld_list_mem->ldList[ld_index].state != 0) {
4280 ids = ld_list_mem->ldList[ld_index].ref.ld_context.targetId;
4281 sc->ld_ids[ids] = ld_list_mem->ldList[ld_index].ref.ld_context.targetId;
4287 mrsas_free_tmp_dcmd(tcmd);
4288 free(tcmd, M_MRSAS);
4291 sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
4292 if (!sc->mask_interrupts)
4293 mrsas_release_mfi_cmd(cmd);
4299 * mrsas_alloc_tmp_dcmd: Allocates memory for temporary command input:
4300 * Adapter soft state Temp command Size of alloction
4302 * Allocates DMAable memory for a temporary internal command. The allocated
4303 * memory is initialized to all zeros upon successful loading of the dma
4307 mrsas_alloc_tmp_dcmd(struct mrsas_softc *sc,
4308 struct mrsas_tmp_dcmd *tcmd, int size)
4310 if (bus_dma_tag_create(sc->mrsas_parent_tag,
4312 BUS_SPACE_MAXADDR_32BIT,
4320 &tcmd->tmp_dcmd_tag)) {
4321 device_printf(sc->mrsas_dev, "Cannot allocate tmp dcmd tag\n");
4324 if (bus_dmamem_alloc(tcmd->tmp_dcmd_tag, (void **)&tcmd->tmp_dcmd_mem,
4325 BUS_DMA_NOWAIT, &tcmd->tmp_dcmd_dmamap)) {
4326 device_printf(sc->mrsas_dev, "Cannot allocate tmp dcmd mem\n");
4329 if (bus_dmamap_load(tcmd->tmp_dcmd_tag, tcmd->tmp_dcmd_dmamap,
4330 tcmd->tmp_dcmd_mem, size, mrsas_addr_cb,
4331 &tcmd->tmp_dcmd_phys_addr, BUS_DMA_NOWAIT)) {
4332 device_printf(sc->mrsas_dev, "Cannot load tmp dcmd mem\n");
4335 memset(tcmd->tmp_dcmd_mem, 0, size);
4340 * mrsas_free_tmp_dcmd: Free memory for temporary command input:
4341 * temporary dcmd pointer
4343 * Deallocates memory of the temporary command for use in the construction of
4344 * the internal DCMD.
4347 mrsas_free_tmp_dcmd(struct mrsas_tmp_dcmd *tmp)
4349 if (tmp->tmp_dcmd_phys_addr)
4350 bus_dmamap_unload(tmp->tmp_dcmd_tag, tmp->tmp_dcmd_dmamap);
4351 if (tmp->tmp_dcmd_mem != NULL)
4352 bus_dmamem_free(tmp->tmp_dcmd_tag, tmp->tmp_dcmd_mem, tmp->tmp_dcmd_dmamap);
4353 if (tmp->tmp_dcmd_tag != NULL)
4354 bus_dma_tag_destroy(tmp->tmp_dcmd_tag);
4358 * mrsas_issue_blocked_abort_cmd: Aborts previously issued cmd input:
4359 * Adapter soft state Previously issued cmd to be aborted
4361 * This function is used to abort previously issued commands, such as AEN and
4362 * RAID map sync map commands. The abort command is sent as a DCMD internal
4363 * command and subsequently the driver will wait for a return status. The
4364 * max wait time is MRSAS_INTERNAL_CMD_WAIT_TIME seconds.
4367 mrsas_issue_blocked_abort_cmd(struct mrsas_softc *sc,
4368 struct mrsas_mfi_cmd *cmd_to_abort)
4370 struct mrsas_mfi_cmd *cmd;
4371 struct mrsas_abort_frame *abort_fr;
4372 u_int8_t retcode = 0;
4373 unsigned long total_time = 0;
4374 u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME;
4376 cmd = mrsas_get_mfi_cmd(sc);
4378 device_printf(sc->mrsas_dev, "Cannot alloc for abort cmd\n");
4381 abort_fr = &cmd->frame->abort;
4383 /* Prepare and issue the abort frame */
4384 abort_fr->cmd = MFI_CMD_ABORT;
4385 abort_fr->cmd_status = 0xFF;
4386 abort_fr->flags = 0;
4387 abort_fr->abort_context = cmd_to_abort->index;
4388 abort_fr->abort_mfi_phys_addr_lo = cmd_to_abort->frame_phys_addr;
4389 abort_fr->abort_mfi_phys_addr_hi = 0;
4392 cmd->cmd_status = 0xFF;
4394 if (mrsas_issue_dcmd(sc, cmd)) {
4395 device_printf(sc->mrsas_dev, "Fail to send abort command.\n");
4398 /* Wait for this cmd to complete */
4399 sc->chan = (void *)&cmd;
4401 if (cmd->cmd_status == 0xFF) {
4402 tsleep((void *)&sc->chan, 0, "mrsas_sleep", hz);
4406 if (total_time >= max_wait) {
4407 device_printf(sc->mrsas_dev, "Abort cmd timed out after %d sec.\n", max_wait);
4414 mrsas_release_mfi_cmd(cmd);
4419 * mrsas_complete_abort: Completes aborting a command input:
4420 * Adapter soft state Cmd that was issued to abort another cmd
4422 * The mrsas_issue_blocked_abort_cmd() function waits for the command status to
4423 * change after sending the command. This function is called from
4424 * mrsas_complete_mptmfi_passthru() to wake up the sleep thread associated.
4427 mrsas_complete_abort(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
4429 if (cmd->sync_cmd) {
4431 cmd->cmd_status = 0;
4432 sc->chan = (void *)&cmd;
4433 wakeup_one((void *)&sc->chan);
4439 * mrsas_aen_handler: AEN processing callback function from thread context
4440 * input: Adapter soft state
4442 * Asynchronous event handler
4445 mrsas_aen_handler(struct mrsas_softc *sc)
4447 union mrsas_evt_class_locale class_locale;
4450 int error, fail_aen = 0;
4453 printf("invalid instance!\n");
4456 if (sc->remove_in_progress || sc->reset_in_progress) {
4457 device_printf(sc->mrsas_dev, "Returning from %s, line no %d\n",
4458 __func__, __LINE__);
4461 if (sc->evt_detail_mem) {
4462 switch (sc->evt_detail_mem->code) {
4463 case MR_EVT_PD_INSERTED:
4464 fail_aen = mrsas_get_pd_list(sc);
4466 mrsas_bus_scan_sim(sc, sc->sim_1);
4468 goto skip_register_aen;
4470 case MR_EVT_PD_REMOVED:
4471 fail_aen = mrsas_get_pd_list(sc);
4473 mrsas_bus_scan_sim(sc, sc->sim_1);
4475 goto skip_register_aen;
4477 case MR_EVT_LD_OFFLINE:
4478 case MR_EVT_CFG_CLEARED:
4479 case MR_EVT_LD_DELETED:
4480 mrsas_bus_scan_sim(sc, sc->sim_0);
4482 case MR_EVT_LD_CREATED:
4483 fail_aen = mrsas_get_ld_list(sc);
4485 mrsas_bus_scan_sim(sc, sc->sim_0);
4487 goto skip_register_aen;
4489 case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED:
4490 case MR_EVT_FOREIGN_CFG_IMPORTED:
4491 case MR_EVT_LD_STATE_CHANGE:
4494 case MR_EVT_CTRL_PROP_CHANGED:
4495 fail_aen = mrsas_get_ctrl_info(sc);
4497 goto skip_register_aen;
4503 device_printf(sc->mrsas_dev, "invalid evt_detail\n");
4507 fail_aen = mrsas_get_pd_list(sc);
4509 mrsas_dprint(sc, MRSAS_AEN, "scanning ...sim 1\n");
4510 mrsas_bus_scan_sim(sc, sc->sim_1);
4512 goto skip_register_aen;
4514 fail_aen = mrsas_get_ld_list(sc);
4516 mrsas_dprint(sc, MRSAS_AEN, "scanning ...sim 0\n");
4517 mrsas_bus_scan_sim(sc, sc->sim_0);
4519 goto skip_register_aen;
4521 seq_num = sc->evt_detail_mem->seq_num + 1;
4523 /* Register AEN with FW for latest sequence number plus 1 */
4524 class_locale.members.reserved = 0;
4525 class_locale.members.locale = MR_EVT_LOCALE_ALL;
4526 class_locale.members.class = MR_EVT_CLASS_DEBUG;
4528 if (sc->aen_cmd != NULL)
4531 mtx_lock(&sc->aen_lock);
4532 error = mrsas_register_aen(sc, seq_num,
4534 mtx_unlock(&sc->aen_lock);
4537 device_printf(sc->mrsas_dev, "register aen failed error %x\n", error);
4546 * mrsas_complete_aen: Completes AEN command
4547 * input: Adapter soft state
4548 * Cmd that was issued to abort another cmd
4550 * This function will be called from ISR and will continue event processing from
4551 * thread context by enqueuing task in ev_tq (callback function
4552 * "mrsas_aen_handler").
4555 mrsas_complete_aen(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
4558 * Don't signal app if it is just an aborted previously registered
4561 if ((!cmd->abort_aen) && (sc->remove_in_progress == 0)) {
4562 sc->mrsas_aen_triggered = 1;
4563 mtx_lock(&sc->aen_lock);
4564 if (sc->mrsas_poll_waiting) {
4565 sc->mrsas_poll_waiting = 0;
4566 selwakeup(&sc->mrsas_select);
4568 mtx_unlock(&sc->aen_lock);
4573 mrsas_release_mfi_cmd(cmd);
4575 taskqueue_enqueue(sc->ev_tq, &sc->ev_task);
4580 static device_method_t mrsas_methods[] = {
4581 DEVMETHOD(device_probe, mrsas_probe),
4582 DEVMETHOD(device_attach, mrsas_attach),
4583 DEVMETHOD(device_detach, mrsas_detach),
4584 DEVMETHOD(device_suspend, mrsas_suspend),
4585 DEVMETHOD(device_resume, mrsas_resume),
4586 DEVMETHOD(bus_print_child, bus_generic_print_child),
4587 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
4591 static driver_t mrsas_driver = {
4594 sizeof(struct mrsas_softc)
4597 static devclass_t mrsas_devclass;
4599 DRIVER_MODULE(mrsas, pci, mrsas_driver, mrsas_devclass, 0, 0);
4600 MODULE_DEPEND(mrsas, cam, 1, 1, 1);