2 * Copyright (c) 2015, AVAGO Tech. All rights reserved. Author: Marian Choy
3 * Copyright (c) 2014, LSI Corp. All rights reserved. Author: Marian Choy
4 * Support: freebsdraid@avagotech.com
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
10 * 1. Redistributions of source code must retain the above copyright notice,
11 * this list of conditions and the following disclaimer. 2. Redistributions
12 * in binary form must reproduce the above copyright notice, this list of
13 * conditions and the following disclaimer in the documentation and/or other
14 * materials provided with the distribution. 3. Neither the name of the
15 * <ORGANIZATION> nor the names of its contributors may be used to endorse or
16 * promote products derived from this software without specific prior written
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
31 * The views and conclusions contained in the software and documentation are
32 * those of the authors and should not be interpreted as representing
33 * official policies,either expressed or implied, of the FreeBSD Project.
35 * Send feedback to: <megaraidfbsd@avagotech.com> Mail to: AVAGO TECHNOLOGIES 1621
36 * Barber Lane, Milpitas, CA 95035 ATTN: MegaRaid FreeBSD
40 #include <sys/cdefs.h>
41 __FBSDID("$FreeBSD$");
43 #include <dev/mrsas/mrsas.h>
44 #include <dev/mrsas/mrsas_ioctl.h>
47 #include <cam/cam_ccb.h>
49 #include <sys/sysctl.h>
50 #include <sys/types.h>
51 #include <sys/sysent.h>
52 #include <sys/kthread.h>
53 #include <sys/taskqueue.h>
60 static d_open_t mrsas_open;
61 static d_close_t mrsas_close;
62 static d_read_t mrsas_read;
63 static d_write_t mrsas_write;
64 static d_ioctl_t mrsas_ioctl;
65 static d_poll_t mrsas_poll;
67 static void mrsas_ich_startup(void *arg);
68 static struct mrsas_mgmt_info mrsas_mgmt_info;
69 static struct mrsas_ident *mrsas_find_ident(device_t);
70 static int mrsas_setup_msix(struct mrsas_softc *sc);
71 static int mrsas_allocate_msix(struct mrsas_softc *sc);
72 static void mrsas_shutdown_ctlr(struct mrsas_softc *sc, u_int32_t opcode);
73 static void mrsas_flush_cache(struct mrsas_softc *sc);
74 static void mrsas_reset_reply_desc(struct mrsas_softc *sc);
75 static void mrsas_ocr_thread(void *arg);
76 static int mrsas_get_map_info(struct mrsas_softc *sc);
77 static int mrsas_get_ld_map_info(struct mrsas_softc *sc);
78 static int mrsas_sync_map_info(struct mrsas_softc *sc);
79 static int mrsas_get_pd_list(struct mrsas_softc *sc);
80 static int mrsas_get_ld_list(struct mrsas_softc *sc);
81 static int mrsas_setup_irq(struct mrsas_softc *sc);
82 static int mrsas_alloc_mem(struct mrsas_softc *sc);
83 static int mrsas_init_fw(struct mrsas_softc *sc);
84 static int mrsas_setup_raidmap(struct mrsas_softc *sc);
85 static void megasas_setup_jbod_map(struct mrsas_softc *sc);
86 static int megasas_sync_pd_seq_num(struct mrsas_softc *sc, boolean_t pend);
87 static int mrsas_clear_intr(struct mrsas_softc *sc);
88 static int mrsas_get_ctrl_info(struct mrsas_softc *sc);
89 static void mrsas_update_ext_vd_details(struct mrsas_softc *sc);
91 mrsas_issue_blocked_abort_cmd(struct mrsas_softc *sc,
92 struct mrsas_mfi_cmd *cmd_to_abort);
93 static struct mrsas_softc *
94 mrsas_get_softc_instance(struct cdev *dev,
95 u_long cmd, caddr_t arg);
96 u_int32_t mrsas_read_reg(struct mrsas_softc *sc, int offset);
98 mrsas_build_mptmfi_passthru(struct mrsas_softc *sc,
99 struct mrsas_mfi_cmd *mfi_cmd);
100 void mrsas_complete_outstanding_ioctls(struct mrsas_softc *sc);
101 int mrsas_transition_to_ready(struct mrsas_softc *sc, int ocr);
102 int mrsas_init_adapter(struct mrsas_softc *sc);
103 int mrsas_alloc_mpt_cmds(struct mrsas_softc *sc);
104 int mrsas_alloc_ioc_cmd(struct mrsas_softc *sc);
105 int mrsas_alloc_ctlr_info_cmd(struct mrsas_softc *sc);
106 int mrsas_ioc_init(struct mrsas_softc *sc);
107 int mrsas_bus_scan(struct mrsas_softc *sc);
108 int mrsas_issue_dcmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
109 int mrsas_issue_polled(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
110 int mrsas_reset_ctrl(struct mrsas_softc *sc, u_int8_t reset_reason);
111 int mrsas_wait_for_outstanding(struct mrsas_softc *sc, u_int8_t check_reason);
112 int mrsas_complete_cmd(struct mrsas_softc *sc, u_int32_t MSIxIndex);
113 int mrsas_reset_targets(struct mrsas_softc *sc);
115 mrsas_issue_blocked_cmd(struct mrsas_softc *sc,
116 struct mrsas_mfi_cmd *cmd);
118 mrsas_alloc_tmp_dcmd(struct mrsas_softc *sc, struct mrsas_tmp_dcmd *tcmd,
120 void mrsas_release_mfi_cmd(struct mrsas_mfi_cmd *cmd);
121 void mrsas_wakeup(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
122 void mrsas_complete_aen(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
123 void mrsas_complete_abort(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
124 void mrsas_disable_intr(struct mrsas_softc *sc);
125 void mrsas_enable_intr(struct mrsas_softc *sc);
126 void mrsas_free_ioc_cmd(struct mrsas_softc *sc);
127 void mrsas_free_mem(struct mrsas_softc *sc);
128 void mrsas_free_tmp_dcmd(struct mrsas_tmp_dcmd *tmp);
129 void mrsas_isr(void *arg);
130 void mrsas_teardown_intr(struct mrsas_softc *sc);
131 void mrsas_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error);
132 void mrsas_kill_hba(struct mrsas_softc *sc);
133 void mrsas_aen_handler(struct mrsas_softc *sc);
135 mrsas_write_reg(struct mrsas_softc *sc, int offset,
138 mrsas_fire_cmd(struct mrsas_softc *sc, u_int32_t req_desc_lo,
139 u_int32_t req_desc_hi);
140 void mrsas_free_ctlr_info_cmd(struct mrsas_softc *sc);
142 mrsas_complete_mptmfi_passthru(struct mrsas_softc *sc,
143 struct mrsas_mfi_cmd *cmd, u_int8_t status);
145 mrsas_map_mpt_cmd_status(struct mrsas_mpt_cmd *cmd, u_int8_t status,
147 struct mrsas_mfi_cmd *mrsas_get_mfi_cmd(struct mrsas_softc *sc);
149 MRSAS_REQUEST_DESCRIPTOR_UNION *mrsas_build_mpt_cmd
150 (struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
152 extern int mrsas_cam_attach(struct mrsas_softc *sc);
153 extern void mrsas_cam_detach(struct mrsas_softc *sc);
154 extern void mrsas_cmd_done(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd);
155 extern void mrsas_free_frame(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
156 extern int mrsas_alloc_mfi_cmds(struct mrsas_softc *sc);
157 extern struct mrsas_mpt_cmd *mrsas_get_mpt_cmd(struct mrsas_softc *sc);
158 extern int mrsas_passthru(struct mrsas_softc *sc, void *arg, u_long ioctlCmd);
159 extern uint8_t MR_ValidateMapInfo(struct mrsas_softc *sc);
160 extern u_int16_t MR_GetLDTgtId(u_int32_t ld, MR_DRV_RAID_MAP_ALL * map);
161 extern MR_LD_RAID *MR_LdRaidGet(u_int32_t ld, MR_DRV_RAID_MAP_ALL * map);
162 extern void mrsas_xpt_freeze(struct mrsas_softc *sc);
163 extern void mrsas_xpt_release(struct mrsas_softc *sc);
164 extern MRSAS_REQUEST_DESCRIPTOR_UNION *
165 mrsas_get_request_desc(struct mrsas_softc *sc,
167 extern int mrsas_bus_scan_sim(struct mrsas_softc *sc, struct cam_sim *sim);
168 static int mrsas_alloc_evt_log_info_cmd(struct mrsas_softc *sc);
169 static void mrsas_free_evt_log_info_cmd(struct mrsas_softc *sc);
171 SYSCTL_NODE(_hw, OID_AUTO, mrsas, CTLFLAG_RD, 0, "MRSAS Driver Parameters");
174 * PCI device struct and table
177 typedef struct mrsas_ident {
185 MRSAS_CTLR_ID device_table[] = {
186 {0x1000, MRSAS_TBOLT, 0xffff, 0xffff, "AVAGO Thunderbolt SAS Controller"},
187 {0x1000, MRSAS_INVADER, 0xffff, 0xffff, "AVAGO Invader SAS Controller"},
188 {0x1000, MRSAS_FURY, 0xffff, 0xffff, "AVAGO Fury SAS Controller"},
189 {0x1000, MRSAS_INTRUDER, 0xffff, 0xffff, "AVAGO Intruder SAS Controller"},
190 {0x1000, MRSAS_INTRUDER_24, 0xffff, 0xffff, "AVAGO Intruder_24 SAS Controller"},
191 {0x1000, MRSAS_CUTLASS_52, 0xffff, 0xffff, "AVAGO Cutlass_52 SAS Controller"},
192 {0x1000, MRSAS_CUTLASS_53, 0xffff, 0xffff, "AVAGO Cutlass_53 SAS Controller"},
197 * Character device entry points
200 static struct cdevsw mrsas_cdevsw = {
201 .d_version = D_VERSION,
202 .d_open = mrsas_open,
203 .d_close = mrsas_close,
204 .d_read = mrsas_read,
205 .d_write = mrsas_write,
206 .d_ioctl = mrsas_ioctl,
207 .d_poll = mrsas_poll,
211 MALLOC_DEFINE(M_MRSAS, "mrsasbuf", "Buffers for the MRSAS driver");
214 * In the cdevsw routines, we find our softc by using the si_drv1 member of
215 * struct cdev. We set this variable to point to our softc in our attach
216 * routine when we create the /dev entry.
219 mrsas_open(struct cdev *dev, int oflags, int devtype, struct thread *td)
221 struct mrsas_softc *sc;
228 mrsas_close(struct cdev *dev, int fflag, int devtype, struct thread *td)
230 struct mrsas_softc *sc;
237 mrsas_read(struct cdev *dev, struct uio *uio, int ioflag)
239 struct mrsas_softc *sc;
245 mrsas_write(struct cdev *dev, struct uio *uio, int ioflag)
247 struct mrsas_softc *sc;
254 * Register Read/Write Functions
258 mrsas_write_reg(struct mrsas_softc *sc, int offset,
261 bus_space_tag_t bus_tag = sc->bus_tag;
262 bus_space_handle_t bus_handle = sc->bus_handle;
264 bus_space_write_4(bus_tag, bus_handle, offset, value);
268 mrsas_read_reg(struct mrsas_softc *sc, int offset)
270 bus_space_tag_t bus_tag = sc->bus_tag;
271 bus_space_handle_t bus_handle = sc->bus_handle;
273 return ((u_int32_t)bus_space_read_4(bus_tag, bus_handle, offset));
278 * Interrupt Disable/Enable/Clear Functions
282 mrsas_disable_intr(struct mrsas_softc *sc)
284 u_int32_t mask = 0xFFFFFFFF;
287 sc->mask_interrupts = 1;
288 mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask), mask);
289 /* Dummy read to force pci flush */
290 status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask));
294 mrsas_enable_intr(struct mrsas_softc *sc)
296 u_int32_t mask = MFI_FUSION_ENABLE_INTERRUPT_MASK;
299 sc->mask_interrupts = 0;
300 mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status), ~0);
301 status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status));
303 mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask), ~mask);
304 status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask));
308 mrsas_clear_intr(struct mrsas_softc *sc)
312 /* Read received interrupt */
313 status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status));
315 /* Not our interrupt, so just return */
316 if (!(status & MFI_FUSION_ENABLE_INTERRUPT_MASK))
319 /* We got a reply interrupt */
324 * PCI Support Functions
327 static struct mrsas_ident *
328 mrsas_find_ident(device_t dev)
330 struct mrsas_ident *pci_device;
332 for (pci_device = device_table; pci_device->vendor != 0; pci_device++) {
333 if ((pci_device->vendor == pci_get_vendor(dev)) &&
334 (pci_device->device == pci_get_device(dev)) &&
335 ((pci_device->subvendor == pci_get_subvendor(dev)) ||
336 (pci_device->subvendor == 0xffff)) &&
337 ((pci_device->subdevice == pci_get_subdevice(dev)) ||
338 (pci_device->subdevice == 0xffff)))
345 mrsas_probe(device_t dev)
347 static u_int8_t first_ctrl = 1;
348 struct mrsas_ident *id;
350 if ((id = mrsas_find_ident(dev)) != NULL) {
352 printf("AVAGO MegaRAID SAS FreeBSD mrsas driver version: %s\n",
356 device_set_desc(dev, id->desc);
357 /* between BUS_PROBE_DEFAULT and BUS_PROBE_LOW_PRIORITY */
364 * mrsas_setup_sysctl: setup sysctl values for mrsas
365 * input: Adapter instance soft state
367 * Setup sysctl entries for mrsas driver.
370 mrsas_setup_sysctl(struct mrsas_softc *sc)
372 struct sysctl_ctx_list *sysctl_ctx = NULL;
373 struct sysctl_oid *sysctl_tree = NULL;
374 char tmpstr[80], tmpstr2[80];
377 * Setup the sysctl variable so the user can change the debug level
380 snprintf(tmpstr, sizeof(tmpstr), "MRSAS controller %d",
381 device_get_unit(sc->mrsas_dev));
382 snprintf(tmpstr2, sizeof(tmpstr2), "%d", device_get_unit(sc->mrsas_dev));
384 sysctl_ctx = device_get_sysctl_ctx(sc->mrsas_dev);
385 if (sysctl_ctx != NULL)
386 sysctl_tree = device_get_sysctl_tree(sc->mrsas_dev);
388 if (sysctl_tree == NULL) {
389 sysctl_ctx_init(&sc->sysctl_ctx);
390 sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx,
391 SYSCTL_STATIC_CHILDREN(_hw_mrsas), OID_AUTO, tmpstr2,
392 CTLFLAG_RD, 0, tmpstr);
393 if (sc->sysctl_tree == NULL)
395 sysctl_ctx = &sc->sysctl_ctx;
396 sysctl_tree = sc->sysctl_tree;
398 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
399 OID_AUTO, "disable_ocr", CTLFLAG_RW, &sc->disableOnlineCtrlReset, 0,
400 "Disable the use of OCR");
402 SYSCTL_ADD_STRING(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
403 OID_AUTO, "driver_version", CTLFLAG_RD, MRSAS_VERSION,
404 strlen(MRSAS_VERSION), "driver version");
406 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
407 OID_AUTO, "reset_count", CTLFLAG_RD,
408 &sc->reset_count, 0, "number of ocr from start of the day");
410 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
411 OID_AUTO, "fw_outstanding", CTLFLAG_RD,
412 &sc->fw_outstanding.val_rdonly, 0, "FW outstanding commands");
414 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
415 OID_AUTO, "io_cmds_highwater", CTLFLAG_RD,
416 &sc->io_cmds_highwater, 0, "Max FW outstanding commands");
418 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
419 OID_AUTO, "mrsas_debug", CTLFLAG_RW, &sc->mrsas_debug, 0,
420 "Driver debug level");
422 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
423 OID_AUTO, "mrsas_io_timeout", CTLFLAG_RW, &sc->mrsas_io_timeout,
424 0, "Driver IO timeout value in mili-second.");
426 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
427 OID_AUTO, "mrsas_fw_fault_check_delay", CTLFLAG_RW,
428 &sc->mrsas_fw_fault_check_delay,
429 0, "FW fault check thread delay in seconds. <default is 1 sec>");
431 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
432 OID_AUTO, "reset_in_progress", CTLFLAG_RD,
433 &sc->reset_in_progress, 0, "ocr in progress status");
435 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
436 OID_AUTO, "block_sync_cache", CTLFLAG_RW,
437 &sc->block_sync_cache, 0,
438 "Block SYNC CACHE at driver. <default: 0, send it to FW>");
443 * mrsas_get_tunables: get tunable parameters.
444 * input: Adapter instance soft state
446 * Get tunable parameters. This will help to debug driver at boot time.
449 mrsas_get_tunables(struct mrsas_softc *sc)
453 /* XXX default to some debugging for now */
454 sc->mrsas_debug = MRSAS_FAULT;
455 sc->mrsas_io_timeout = MRSAS_IO_TIMEOUT;
456 sc->mrsas_fw_fault_check_delay = 1;
458 sc->reset_in_progress = 0;
459 sc->block_sync_cache = 0;
462 * Grab the global variables.
464 TUNABLE_INT_FETCH("hw.mrsas.debug_level", &sc->mrsas_debug);
467 * Grab the global variables.
469 TUNABLE_INT_FETCH("hw.mrsas.lb_pending_cmds", &sc->lb_pending_cmds);
471 /* Grab the unit-instance variables */
472 snprintf(tmpstr, sizeof(tmpstr), "dev.mrsas.%d.debug_level",
473 device_get_unit(sc->mrsas_dev));
474 TUNABLE_INT_FETCH(tmpstr, &sc->mrsas_debug);
478 * mrsas_alloc_evt_log_info cmd: Allocates memory to get event log information.
479 * Used to get sequence number at driver load time.
480 * input: Adapter soft state
482 * Allocates DMAable memory for the event log info internal command.
485 mrsas_alloc_evt_log_info_cmd(struct mrsas_softc *sc)
489 /* Allocate get event log info command */
490 el_info_size = sizeof(struct mrsas_evt_log_info);
491 if (bus_dma_tag_create(sc->mrsas_parent_tag,
493 BUS_SPACE_MAXADDR_32BIT,
502 device_printf(sc->mrsas_dev, "Cannot allocate event log info tag\n");
505 if (bus_dmamem_alloc(sc->el_info_tag, (void **)&sc->el_info_mem,
506 BUS_DMA_NOWAIT, &sc->el_info_dmamap)) {
507 device_printf(sc->mrsas_dev, "Cannot allocate event log info cmd mem\n");
510 if (bus_dmamap_load(sc->el_info_tag, sc->el_info_dmamap,
511 sc->el_info_mem, el_info_size, mrsas_addr_cb,
512 &sc->el_info_phys_addr, BUS_DMA_NOWAIT)) {
513 device_printf(sc->mrsas_dev, "Cannot load event log info cmd mem\n");
516 memset(sc->el_info_mem, 0, el_info_size);
521 * mrsas_free_evt_info_cmd: Free memory for Event log info command
522 * input: Adapter soft state
524 * Deallocates memory for the event log info internal command.
527 mrsas_free_evt_log_info_cmd(struct mrsas_softc *sc)
529 if (sc->el_info_phys_addr)
530 bus_dmamap_unload(sc->el_info_tag, sc->el_info_dmamap);
531 if (sc->el_info_mem != NULL)
532 bus_dmamem_free(sc->el_info_tag, sc->el_info_mem, sc->el_info_dmamap);
533 if (sc->el_info_tag != NULL)
534 bus_dma_tag_destroy(sc->el_info_tag);
538 * mrsas_get_seq_num: Get latest event sequence number
539 * @sc: Adapter soft state
540 * @eli: Firmware event log sequence number information.
542 * Firmware maintains a log of all events in a non-volatile area.
543 * Driver get the sequence number using DCMD
544 * "MR_DCMD_CTRL_EVENT_GET_INFO" at driver load time.
548 mrsas_get_seq_num(struct mrsas_softc *sc,
549 struct mrsas_evt_log_info *eli)
551 struct mrsas_mfi_cmd *cmd;
552 struct mrsas_dcmd_frame *dcmd;
553 u_int8_t do_ocr = 1, retcode = 0;
555 cmd = mrsas_get_mfi_cmd(sc);
558 device_printf(sc->mrsas_dev, "Failed to get a free cmd\n");
561 dcmd = &cmd->frame->dcmd;
563 if (mrsas_alloc_evt_log_info_cmd(sc) != SUCCESS) {
564 device_printf(sc->mrsas_dev, "Cannot allocate evt log info cmd\n");
565 mrsas_release_mfi_cmd(cmd);
568 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
570 dcmd->cmd = MFI_CMD_DCMD;
571 dcmd->cmd_status = 0x0;
573 dcmd->flags = MFI_FRAME_DIR_READ;
576 dcmd->data_xfer_len = sizeof(struct mrsas_evt_log_info);
577 dcmd->opcode = MR_DCMD_CTRL_EVENT_GET_INFO;
578 dcmd->sgl.sge32[0].phys_addr = sc->el_info_phys_addr;
579 dcmd->sgl.sge32[0].length = sizeof(struct mrsas_evt_log_info);
581 retcode = mrsas_issue_blocked_cmd(sc, cmd);
582 if (retcode == ETIMEDOUT)
587 * Copy the data back into callers buffer
589 memcpy(eli, sc->el_info_mem, sizeof(struct mrsas_evt_log_info));
590 mrsas_free_evt_log_info_cmd(sc);
594 sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
596 mrsas_release_mfi_cmd(cmd);
603 * mrsas_register_aen: Register for asynchronous event notification
604 * @sc: Adapter soft state
605 * @seq_num: Starting sequence number
606 * @class_locale: Class of the event
608 * This function subscribes for events beyond the @seq_num
609 * and type @class_locale.
613 mrsas_register_aen(struct mrsas_softc *sc, u_int32_t seq_num,
614 u_int32_t class_locale_word)
617 struct mrsas_mfi_cmd *cmd;
618 struct mrsas_dcmd_frame *dcmd;
619 union mrsas_evt_class_locale curr_aen;
620 union mrsas_evt_class_locale prev_aen;
623 * If there an AEN pending already (aen_cmd), check if the
624 * class_locale of that pending AEN is inclusive of the new AEN
625 * request we currently have. If it is, then we don't have to do
626 * anything. In other words, whichever events the current AEN request
627 * is subscribing to, have already been subscribed to. If the old_cmd
628 * is _not_ inclusive, then we have to abort that command, form a
629 * class_locale that is superset of both old and current and re-issue
633 curr_aen.word = class_locale_word;
637 prev_aen.word = sc->aen_cmd->frame->dcmd.mbox.w[1];
640 * A class whose enum value is smaller is inclusive of all
641 * higher values. If a PROGRESS (= -1) was previously
642 * registered, then a new registration requests for higher
643 * classes need not be sent to FW. They are automatically
644 * included. Locale numbers don't have such hierarchy. They
647 if ((prev_aen.members.class <= curr_aen.members.class) &&
648 !((prev_aen.members.locale & curr_aen.members.locale) ^
649 curr_aen.members.locale)) {
651 * Previously issued event registration includes
652 * current request. Nothing to do.
656 curr_aen.members.locale |= prev_aen.members.locale;
658 if (prev_aen.members.class < curr_aen.members.class)
659 curr_aen.members.class = prev_aen.members.class;
661 sc->aen_cmd->abort_aen = 1;
662 ret_val = mrsas_issue_blocked_abort_cmd(sc,
666 printf("mrsas: Failed to abort previous AEN command\n");
672 cmd = mrsas_get_mfi_cmd(sc);
676 dcmd = &cmd->frame->dcmd;
678 memset(sc->evt_detail_mem, 0, sizeof(struct mrsas_evt_detail));
681 * Prepare DCMD for aen registration
683 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
685 dcmd->cmd = MFI_CMD_DCMD;
686 dcmd->cmd_status = 0x0;
688 dcmd->flags = MFI_FRAME_DIR_READ;
691 dcmd->data_xfer_len = sizeof(struct mrsas_evt_detail);
692 dcmd->opcode = MR_DCMD_CTRL_EVENT_WAIT;
693 dcmd->mbox.w[0] = seq_num;
694 sc->last_seq_num = seq_num;
695 dcmd->mbox.w[1] = curr_aen.word;
696 dcmd->sgl.sge32[0].phys_addr = (u_int32_t)sc->evt_detail_phys_addr;
697 dcmd->sgl.sge32[0].length = sizeof(struct mrsas_evt_detail);
699 if (sc->aen_cmd != NULL) {
700 mrsas_release_mfi_cmd(cmd);
704 * Store reference to the cmd used to register for AEN. When an
705 * application wants us to register for AEN, we have to abort this
706 * cmd and re-register with a new EVENT LOCALE supplied by that app
711 * Issue the aen registration frame
713 if (mrsas_issue_dcmd(sc, cmd)) {
714 device_printf(sc->mrsas_dev, "Cannot issue AEN DCMD command.\n");
721 * mrsas_start_aen: Subscribes to AEN during driver load time
722 * @instance: Adapter soft state
725 mrsas_start_aen(struct mrsas_softc *sc)
727 struct mrsas_evt_log_info eli;
728 union mrsas_evt_class_locale class_locale;
731 /* Get the latest sequence number from FW */
733 memset(&eli, 0, sizeof(eli));
735 if (mrsas_get_seq_num(sc, &eli))
738 /* Register AEN with FW for latest sequence number plus 1 */
739 class_locale.members.reserved = 0;
740 class_locale.members.locale = MR_EVT_LOCALE_ALL;
741 class_locale.members.class = MR_EVT_CLASS_DEBUG;
743 return mrsas_register_aen(sc, eli.newest_seq_num + 1,
749 * mrsas_setup_msix: Allocate MSI-x vectors
750 * @sc: adapter soft state
753 mrsas_setup_msix(struct mrsas_softc *sc)
757 for (i = 0; i < sc->msix_vectors; i++) {
758 sc->irq_context[i].sc = sc;
759 sc->irq_context[i].MSIxIndex = i;
760 sc->irq_id[i] = i + 1;
761 sc->mrsas_irq[i] = bus_alloc_resource_any
762 (sc->mrsas_dev, SYS_RES_IRQ, &sc->irq_id[i]
764 if (sc->mrsas_irq[i] == NULL) {
765 device_printf(sc->mrsas_dev, "Can't allocate MSI-x\n");
766 goto irq_alloc_failed;
768 if (bus_setup_intr(sc->mrsas_dev,
770 INTR_MPSAFE | INTR_TYPE_CAM,
771 NULL, mrsas_isr, &sc->irq_context[i],
772 &sc->intr_handle[i])) {
773 device_printf(sc->mrsas_dev,
774 "Cannot set up MSI-x interrupt handler\n");
775 goto irq_alloc_failed;
781 mrsas_teardown_intr(sc);
786 * mrsas_allocate_msix: Setup MSI-x vectors
787 * @sc: adapter soft state
790 mrsas_allocate_msix(struct mrsas_softc *sc)
792 if (pci_alloc_msix(sc->mrsas_dev, &sc->msix_vectors) == 0) {
793 device_printf(sc->mrsas_dev, "Using MSI-X with %d number"
794 " of vectors\n", sc->msix_vectors);
796 device_printf(sc->mrsas_dev, "MSI-x setup failed\n");
797 goto irq_alloc_failed;
802 mrsas_teardown_intr(sc);
807 * mrsas_attach: PCI entry point
808 * input: pointer to device struct
810 * Performs setup of PCI and registers, initializes mutexes and linked lists,
811 * registers interrupts and CAM, and initializes the adapter/controller to
815 mrsas_attach(device_t dev)
817 struct mrsas_softc *sc = device_get_softc(dev);
818 uint32_t cmd, bar, error;
820 memset(sc, 0, sizeof(struct mrsas_softc));
822 /* Look up our softc and initialize its fields. */
824 sc->device_id = pci_get_device(dev);
826 if ((sc->device_id == MRSAS_INVADER) ||
827 (sc->device_id == MRSAS_FURY) ||
828 (sc->device_id == MRSAS_INTRUDER) ||
829 (sc->device_id == MRSAS_INTRUDER_24) ||
830 (sc->device_id == MRSAS_CUTLASS_52) ||
831 (sc->device_id == MRSAS_CUTLASS_53)) {
832 sc->mrsas_gen3_ctrl = 1;
835 mrsas_get_tunables(sc);
838 * Set up PCI and registers
840 cmd = pci_read_config(dev, PCIR_COMMAND, 2);
841 if ((cmd & PCIM_CMD_PORTEN) == 0) {
844 /* Force the busmaster enable bit on. */
845 cmd |= PCIM_CMD_BUSMASTEREN;
846 pci_write_config(dev, PCIR_COMMAND, cmd, 2);
848 bar = pci_read_config(dev, MRSAS_PCI_BAR1, 4);
850 sc->reg_res_id = MRSAS_PCI_BAR1;/* BAR1 offset */
851 if ((sc->reg_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
852 &(sc->reg_res_id), RF_ACTIVE))
854 device_printf(dev, "Cannot allocate PCI registers\n");
857 sc->bus_tag = rman_get_bustag(sc->reg_res);
858 sc->bus_handle = rman_get_bushandle(sc->reg_res);
860 /* Intialize mutexes */
861 mtx_init(&sc->sim_lock, "mrsas_sim_lock", NULL, MTX_DEF);
862 mtx_init(&sc->pci_lock, "mrsas_pci_lock", NULL, MTX_DEF);
863 mtx_init(&sc->io_lock, "mrsas_io_lock", NULL, MTX_DEF);
864 mtx_init(&sc->aen_lock, "mrsas_aen_lock", NULL, MTX_DEF);
865 mtx_init(&sc->ioctl_lock, "mrsas_ioctl_lock", NULL, MTX_SPIN);
866 mtx_init(&sc->mpt_cmd_pool_lock, "mrsas_mpt_cmd_pool_lock", NULL, MTX_DEF);
867 mtx_init(&sc->mfi_cmd_pool_lock, "mrsas_mfi_cmd_pool_lock", NULL, MTX_DEF);
868 mtx_init(&sc->raidmap_lock, "mrsas_raidmap_lock", NULL, MTX_DEF);
870 /* Intialize linked list */
871 TAILQ_INIT(&sc->mrsas_mpt_cmd_list_head);
872 TAILQ_INIT(&sc->mrsas_mfi_cmd_list_head);
874 mrsas_atomic_set(&sc->fw_outstanding, 0);
875 mrsas_atomic_set(&sc->target_reset_outstanding, 0);
877 sc->io_cmds_highwater = 0;
879 sc->adprecovery = MRSAS_HBA_OPERATIONAL;
880 sc->UnevenSpanSupport = 0;
884 /* Initialize Firmware */
885 if (mrsas_init_fw(sc) != SUCCESS) {
888 /* Register mrsas to CAM layer */
889 if ((mrsas_cam_attach(sc) != SUCCESS)) {
890 goto attach_fail_cam;
893 if (mrsas_setup_irq(sc) != SUCCESS) {
894 goto attach_fail_irq;
896 error = mrsas_kproc_create(mrsas_ocr_thread, sc,
897 &sc->ocr_thread, 0, 0, "mrsas_ocr%d",
898 device_get_unit(sc->mrsas_dev));
900 device_printf(sc->mrsas_dev, "Error %d starting OCR thread\n", error);
901 goto attach_fail_ocr_thread;
904 * After FW initialization and OCR thread creation
905 * we will defer the cdev creation, AEN setup on ICH callback
907 sc->mrsas_ich.ich_func = mrsas_ich_startup;
908 sc->mrsas_ich.ich_arg = sc;
909 if (config_intrhook_establish(&sc->mrsas_ich) != 0) {
910 device_printf(sc->mrsas_dev, "Config hook is already established\n");
912 mrsas_setup_sysctl(sc);
915 attach_fail_ocr_thread:
916 if (sc->ocr_thread_active)
917 wakeup(&sc->ocr_chan);
919 mrsas_teardown_intr(sc);
921 mrsas_cam_detach(sc);
923 /* if MSIX vector is allocated and FW Init FAILED then release MSIX */
924 if (sc->msix_enable == 1)
925 pci_release_msi(sc->mrsas_dev);
927 mtx_destroy(&sc->sim_lock);
928 mtx_destroy(&sc->aen_lock);
929 mtx_destroy(&sc->pci_lock);
930 mtx_destroy(&sc->io_lock);
931 mtx_destroy(&sc->ioctl_lock);
932 mtx_destroy(&sc->mpt_cmd_pool_lock);
933 mtx_destroy(&sc->mfi_cmd_pool_lock);
934 mtx_destroy(&sc->raidmap_lock);
937 bus_release_resource(sc->mrsas_dev, SYS_RES_MEMORY,
938 sc->reg_res_id, sc->reg_res);
944 * Interrupt config hook
947 mrsas_ich_startup(void *arg)
949 struct mrsas_softc *sc = (struct mrsas_softc *)arg;
952 * Intialize a counting Semaphore to take care no. of concurrent IOCTLs
954 sema_init(&sc->ioctl_count_sema, MRSAS_MAX_IOCTL_CMDS,
955 IOCTL_SEMA_DESCRIPTION);
957 /* Create a /dev entry for mrsas controller. */
958 sc->mrsas_cdev = make_dev(&mrsas_cdevsw, device_get_unit(sc->mrsas_dev), UID_ROOT,
959 GID_OPERATOR, (S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP), "mrsas%u",
960 device_get_unit(sc->mrsas_dev));
962 if (device_get_unit(sc->mrsas_dev) == 0) {
963 make_dev_alias_p(MAKEDEV_CHECKNAME,
964 &sc->mrsas_linux_emulator_cdev, sc->mrsas_cdev,
965 "megaraid_sas_ioctl_node");
968 sc->mrsas_cdev->si_drv1 = sc;
971 * Add this controller to mrsas_mgmt_info structure so that it can be
972 * exported to management applications
974 if (device_get_unit(sc->mrsas_dev) == 0)
975 memset(&mrsas_mgmt_info, 0, sizeof(mrsas_mgmt_info));
977 mrsas_mgmt_info.count++;
978 mrsas_mgmt_info.sc_ptr[mrsas_mgmt_info.max_index] = sc;
979 mrsas_mgmt_info.max_index++;
981 /* Enable Interrupts */
982 mrsas_enable_intr(sc);
984 /* Initiate AEN (Asynchronous Event Notification) */
985 if (mrsas_start_aen(sc)) {
986 device_printf(sc->mrsas_dev, "Error: AEN registration FAILED !!! "
987 "Further events from the controller will not be communicated.\n"
988 "Either there is some problem in the controller"
989 "or the controller does not support AEN.\n"
990 "Please contact to the SUPPORT TEAM if the problem persists\n");
992 if (sc->mrsas_ich.ich_arg != NULL) {
993 device_printf(sc->mrsas_dev, "Disestablish mrsas intr hook\n");
994 config_intrhook_disestablish(&sc->mrsas_ich);
995 sc->mrsas_ich.ich_arg = NULL;
1000 * mrsas_detach: De-allocates and teardown resources
1001 * input: pointer to device struct
1003 * This function is the entry point for device disconnect and detach.
1004 * It performs memory de-allocations, shutdown of the controller and various
1005 * teardown and destroy resource functions.
1008 mrsas_detach(device_t dev)
1010 struct mrsas_softc *sc;
1013 sc = device_get_softc(dev);
1014 sc->remove_in_progress = 1;
1016 /* Destroy the character device so no other IOCTL will be handled */
1017 if ((device_get_unit(dev) == 0) && sc->mrsas_linux_emulator_cdev)
1018 destroy_dev(sc->mrsas_linux_emulator_cdev);
1019 destroy_dev(sc->mrsas_cdev);
1022 * Take the instance off the instance array. Note that we will not
1023 * decrement the max_index. We let this array be sparse array
1025 for (i = 0; i < mrsas_mgmt_info.max_index; i++) {
1026 if (mrsas_mgmt_info.sc_ptr[i] == sc) {
1027 mrsas_mgmt_info.count--;
1028 mrsas_mgmt_info.sc_ptr[i] = NULL;
1033 if (sc->ocr_thread_active)
1034 wakeup(&sc->ocr_chan);
1035 while (sc->reset_in_progress) {
1037 if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
1038 mrsas_dprint(sc, MRSAS_INFO,
1039 "[%2d]waiting for OCR to be finished from %s\n", i, __func__);
1041 pause("mr_shutdown", hz);
1044 while (sc->ocr_thread_active) {
1046 if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
1047 mrsas_dprint(sc, MRSAS_INFO,
1049 "mrsas_ocr thread to quit ocr %d\n", i,
1050 sc->ocr_thread_active);
1052 pause("mr_shutdown", hz);
1054 mrsas_flush_cache(sc);
1055 mrsas_shutdown_ctlr(sc, MR_DCMD_CTRL_SHUTDOWN);
1056 mrsas_disable_intr(sc);
1057 mrsas_cam_detach(sc);
1058 mrsas_teardown_intr(sc);
1060 mtx_destroy(&sc->sim_lock);
1061 mtx_destroy(&sc->aen_lock);
1062 mtx_destroy(&sc->pci_lock);
1063 mtx_destroy(&sc->io_lock);
1064 mtx_destroy(&sc->ioctl_lock);
1065 mtx_destroy(&sc->mpt_cmd_pool_lock);
1066 mtx_destroy(&sc->mfi_cmd_pool_lock);
1067 mtx_destroy(&sc->raidmap_lock);
1069 /* Wait for all the semaphores to be released */
1070 while (sema_value(&sc->ioctl_count_sema) != MRSAS_MAX_IOCTL_CMDS)
1071 pause("mr_shutdown", hz);
1073 /* Destroy the counting semaphore created for Ioctl */
1074 sema_destroy(&sc->ioctl_count_sema);
1077 bus_release_resource(sc->mrsas_dev,
1078 SYS_RES_MEMORY, sc->reg_res_id, sc->reg_res);
1080 if (sc->sysctl_tree != NULL)
1081 sysctl_ctx_free(&sc->sysctl_ctx);
1087 * mrsas_free_mem: Frees allocated memory
1088 * input: Adapter instance soft state
1090 * This function is called from mrsas_detach() to free previously allocated
1094 mrsas_free_mem(struct mrsas_softc *sc)
1098 struct mrsas_mfi_cmd *mfi_cmd;
1099 struct mrsas_mpt_cmd *mpt_cmd;
1102 * Free RAID map memory
1104 for (i = 0; i < 2; i++) {
1105 if (sc->raidmap_phys_addr[i])
1106 bus_dmamap_unload(sc->raidmap_tag[i], sc->raidmap_dmamap[i]);
1107 if (sc->raidmap_mem[i] != NULL)
1108 bus_dmamem_free(sc->raidmap_tag[i], sc->raidmap_mem[i], sc->raidmap_dmamap[i]);
1109 if (sc->raidmap_tag[i] != NULL)
1110 bus_dma_tag_destroy(sc->raidmap_tag[i]);
1112 if (sc->ld_drv_map[i] != NULL)
1113 free(sc->ld_drv_map[i], M_MRSAS);
1115 for (i = 0; i < 2; i++) {
1116 if (sc->jbodmap_phys_addr[i])
1117 bus_dmamap_unload(sc->jbodmap_tag[i], sc->jbodmap_dmamap[i]);
1118 if (sc->jbodmap_mem[i] != NULL)
1119 bus_dmamem_free(sc->jbodmap_tag[i], sc->jbodmap_mem[i], sc->jbodmap_dmamap[i]);
1120 if (sc->jbodmap_tag[i] != NULL)
1121 bus_dma_tag_destroy(sc->jbodmap_tag[i]);
1124 * Free version buffer memory
1126 if (sc->verbuf_phys_addr)
1127 bus_dmamap_unload(sc->verbuf_tag, sc->verbuf_dmamap);
1128 if (sc->verbuf_mem != NULL)
1129 bus_dmamem_free(sc->verbuf_tag, sc->verbuf_mem, sc->verbuf_dmamap);
1130 if (sc->verbuf_tag != NULL)
1131 bus_dma_tag_destroy(sc->verbuf_tag);
1135 * Free sense buffer memory
1137 if (sc->sense_phys_addr)
1138 bus_dmamap_unload(sc->sense_tag, sc->sense_dmamap);
1139 if (sc->sense_mem != NULL)
1140 bus_dmamem_free(sc->sense_tag, sc->sense_mem, sc->sense_dmamap);
1141 if (sc->sense_tag != NULL)
1142 bus_dma_tag_destroy(sc->sense_tag);
1145 * Free chain frame memory
1147 if (sc->chain_frame_phys_addr)
1148 bus_dmamap_unload(sc->chain_frame_tag, sc->chain_frame_dmamap);
1149 if (sc->chain_frame_mem != NULL)
1150 bus_dmamem_free(sc->chain_frame_tag, sc->chain_frame_mem, sc->chain_frame_dmamap);
1151 if (sc->chain_frame_tag != NULL)
1152 bus_dma_tag_destroy(sc->chain_frame_tag);
1155 * Free IO Request memory
1157 if (sc->io_request_phys_addr)
1158 bus_dmamap_unload(sc->io_request_tag, sc->io_request_dmamap);
1159 if (sc->io_request_mem != NULL)
1160 bus_dmamem_free(sc->io_request_tag, sc->io_request_mem, sc->io_request_dmamap);
1161 if (sc->io_request_tag != NULL)
1162 bus_dma_tag_destroy(sc->io_request_tag);
1165 * Free Reply Descriptor memory
1167 if (sc->reply_desc_phys_addr)
1168 bus_dmamap_unload(sc->reply_desc_tag, sc->reply_desc_dmamap);
1169 if (sc->reply_desc_mem != NULL)
1170 bus_dmamem_free(sc->reply_desc_tag, sc->reply_desc_mem, sc->reply_desc_dmamap);
1171 if (sc->reply_desc_tag != NULL)
1172 bus_dma_tag_destroy(sc->reply_desc_tag);
1175 * Free event detail memory
1177 if (sc->evt_detail_phys_addr)
1178 bus_dmamap_unload(sc->evt_detail_tag, sc->evt_detail_dmamap);
1179 if (sc->evt_detail_mem != NULL)
1180 bus_dmamem_free(sc->evt_detail_tag, sc->evt_detail_mem, sc->evt_detail_dmamap);
1181 if (sc->evt_detail_tag != NULL)
1182 bus_dma_tag_destroy(sc->evt_detail_tag);
1187 if (sc->mfi_cmd_list) {
1188 for (i = 0; i < MRSAS_MAX_MFI_CMDS; i++) {
1189 mfi_cmd = sc->mfi_cmd_list[i];
1190 mrsas_free_frame(sc, mfi_cmd);
1193 if (sc->mficmd_frame_tag != NULL)
1194 bus_dma_tag_destroy(sc->mficmd_frame_tag);
1197 * Free MPT internal command list
1199 max_cmd = sc->max_fw_cmds;
1200 if (sc->mpt_cmd_list) {
1201 for (i = 0; i < max_cmd; i++) {
1202 mpt_cmd = sc->mpt_cmd_list[i];
1203 bus_dmamap_destroy(sc->data_tag, mpt_cmd->data_dmamap);
1204 free(sc->mpt_cmd_list[i], M_MRSAS);
1206 free(sc->mpt_cmd_list, M_MRSAS);
1207 sc->mpt_cmd_list = NULL;
1210 * Free MFI internal command list
1213 if (sc->mfi_cmd_list) {
1214 for (i = 0; i < MRSAS_MAX_MFI_CMDS; i++) {
1215 free(sc->mfi_cmd_list[i], M_MRSAS);
1217 free(sc->mfi_cmd_list, M_MRSAS);
1218 sc->mfi_cmd_list = NULL;
1221 * Free request descriptor memory
1223 free(sc->req_desc, M_MRSAS);
1224 sc->req_desc = NULL;
1227 * Destroy parent tag
1229 if (sc->mrsas_parent_tag != NULL)
1230 bus_dma_tag_destroy(sc->mrsas_parent_tag);
1233 * Free ctrl_info memory
1235 if (sc->ctrl_info != NULL)
1236 free(sc->ctrl_info, M_MRSAS);
1240 * mrsas_teardown_intr: Teardown interrupt
1241 * input: Adapter instance soft state
1243 * This function is called from mrsas_detach() to teardown and release bus
1244 * interrupt resourse.
1247 mrsas_teardown_intr(struct mrsas_softc *sc)
1251 if (!sc->msix_enable) {
1252 if (sc->intr_handle[0])
1253 bus_teardown_intr(sc->mrsas_dev, sc->mrsas_irq[0], sc->intr_handle[0]);
1254 if (sc->mrsas_irq[0] != NULL)
1255 bus_release_resource(sc->mrsas_dev, SYS_RES_IRQ,
1256 sc->irq_id[0], sc->mrsas_irq[0]);
1257 sc->intr_handle[0] = NULL;
1259 for (i = 0; i < sc->msix_vectors; i++) {
1260 if (sc->intr_handle[i])
1261 bus_teardown_intr(sc->mrsas_dev, sc->mrsas_irq[i],
1262 sc->intr_handle[i]);
1264 if (sc->mrsas_irq[i] != NULL)
1265 bus_release_resource(sc->mrsas_dev, SYS_RES_IRQ,
1266 sc->irq_id[i], sc->mrsas_irq[i]);
1268 sc->intr_handle[i] = NULL;
1270 pci_release_msi(sc->mrsas_dev);
1276 * mrsas_suspend: Suspend entry point
1277 * input: Device struct pointer
1279 * This function is the entry point for system suspend from the OS.
1282 mrsas_suspend(device_t dev)
1284 /* This will be filled when the driver will have hibernation support */
1289 * mrsas_resume: Resume entry point
1290 * input: Device struct pointer
1292 * This function is the entry point for system resume from the OS.
1295 mrsas_resume(device_t dev)
1297 /* This will be filled when the driver will have hibernation support */
1302 * mrsas_get_softc_instance: Find softc instance based on cmd type
1304 * This function will return softc instance based on cmd type.
1305 * In some case, application fire ioctl on required management instance and
1306 * do not provide host_no. Use cdev->si_drv1 to get softc instance for those
1307 * case, else get the softc instance from host_no provided by application in
1311 static struct mrsas_softc *
1312 mrsas_get_softc_instance(struct cdev *dev, u_long cmd, caddr_t arg)
1314 struct mrsas_softc *sc = NULL;
1315 struct mrsas_iocpacket *user_ioc = (struct mrsas_iocpacket *)arg;
1317 if (cmd == MRSAS_IOC_GET_PCI_INFO) {
1321 * get the Host number & the softc from data sent by the
1324 sc = mrsas_mgmt_info.sc_ptr[user_ioc->host_no];
1326 printf("There is no Controller number %d\n",
1328 else if (user_ioc->host_no >= mrsas_mgmt_info.max_index)
1329 mrsas_dprint(sc, MRSAS_FAULT,
1330 "Invalid Controller number %d\n", user_ioc->host_no);
1337 * mrsas_ioctl: IOCtl commands entry point.
1339 * This function is the entry point for IOCtls from the OS. It calls the
1340 * appropriate function for processing depending on the command received.
1343 mrsas_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag,
1346 struct mrsas_softc *sc;
1348 MRSAS_DRV_PCI_INFORMATION *pciDrvInfo;
1350 sc = mrsas_get_softc_instance(dev, cmd, arg);
1354 if (sc->remove_in_progress ||
1355 (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR)) {
1356 mrsas_dprint(sc, MRSAS_INFO,
1357 "Either driver remove or shutdown called or "
1358 "HW is in unrecoverable critical error state.\n");
1361 mtx_lock_spin(&sc->ioctl_lock);
1362 if (!sc->reset_in_progress) {
1363 mtx_unlock_spin(&sc->ioctl_lock);
1366 mtx_unlock_spin(&sc->ioctl_lock);
1367 while (sc->reset_in_progress) {
1369 if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
1370 mrsas_dprint(sc, MRSAS_INFO,
1371 "[%2d]waiting for OCR to be finished from %s\n", i, __func__);
1373 pause("mr_ioctl", hz);
1378 case MRSAS_IOC_FIRMWARE_PASS_THROUGH64:
1379 #ifdef COMPAT_FREEBSD32
1380 case MRSAS_IOC_FIRMWARE_PASS_THROUGH32:
1383 * Decrement the Ioctl counting Semaphore before getting an
1386 sema_wait(&sc->ioctl_count_sema);
1388 ret = mrsas_passthru(sc, (void *)arg, cmd);
1390 /* Increment the Ioctl counting semaphore value */
1391 sema_post(&sc->ioctl_count_sema);
1394 case MRSAS_IOC_SCAN_BUS:
1395 ret = mrsas_bus_scan(sc);
1398 case MRSAS_IOC_GET_PCI_INFO:
1399 pciDrvInfo = (MRSAS_DRV_PCI_INFORMATION *) arg;
1400 memset(pciDrvInfo, 0, sizeof(MRSAS_DRV_PCI_INFORMATION));
1401 pciDrvInfo->busNumber = pci_get_bus(sc->mrsas_dev);
1402 pciDrvInfo->deviceNumber = pci_get_slot(sc->mrsas_dev);
1403 pciDrvInfo->functionNumber = pci_get_function(sc->mrsas_dev);
1404 pciDrvInfo->domainID = pci_get_domain(sc->mrsas_dev);
1405 mrsas_dprint(sc, MRSAS_INFO, "pci bus no: %d,"
1406 "pci device no: %d, pci function no: %d,"
1407 "pci domain ID: %d\n",
1408 pciDrvInfo->busNumber, pciDrvInfo->deviceNumber,
1409 pciDrvInfo->functionNumber, pciDrvInfo->domainID);
1414 mrsas_dprint(sc, MRSAS_TRACE, "IOCTL command 0x%lx is not handled\n", cmd);
1422 * mrsas_poll: poll entry point for mrsas driver fd
1424 * This function is the entry point for poll from the OS. It waits for some AEN
1425 * events to be triggered from the controller and notifies back.
1428 mrsas_poll(struct cdev *dev, int poll_events, struct thread *td)
1430 struct mrsas_softc *sc;
1435 if (poll_events & (POLLIN | POLLRDNORM)) {
1436 if (sc->mrsas_aen_triggered) {
1437 revents |= poll_events & (POLLIN | POLLRDNORM);
1441 if (poll_events & (POLLIN | POLLRDNORM)) {
1442 mtx_lock(&sc->aen_lock);
1443 sc->mrsas_poll_waiting = 1;
1444 selrecord(td, &sc->mrsas_select);
1445 mtx_unlock(&sc->aen_lock);
1452 * mrsas_setup_irq: Set up interrupt
1453 * input: Adapter instance soft state
1455 * This function sets up interrupts as a bus resource, with flags indicating
1456 * resource permitting contemporaneous sharing and for resource to activate
1460 mrsas_setup_irq(struct mrsas_softc *sc)
1462 if (sc->msix_enable && (mrsas_setup_msix(sc) == SUCCESS))
1463 device_printf(sc->mrsas_dev, "MSI-x interrupts setup success\n");
1466 device_printf(sc->mrsas_dev, "Fall back to legacy interrupt\n");
1467 sc->irq_context[0].sc = sc;
1468 sc->irq_context[0].MSIxIndex = 0;
1470 sc->mrsas_irq[0] = bus_alloc_resource_any(sc->mrsas_dev,
1471 SYS_RES_IRQ, &sc->irq_id[0], RF_SHAREABLE | RF_ACTIVE);
1472 if (sc->mrsas_irq[0] == NULL) {
1473 device_printf(sc->mrsas_dev, "Cannot allocate legcay"
1477 if (bus_setup_intr(sc->mrsas_dev, sc->mrsas_irq[0],
1478 INTR_MPSAFE | INTR_TYPE_CAM, NULL, mrsas_isr,
1479 &sc->irq_context[0], &sc->intr_handle[0])) {
1480 device_printf(sc->mrsas_dev, "Cannot set up legacy"
1489 * mrsas_isr: ISR entry point
1490 * input: argument pointer
1492 * This function is the interrupt service routine entry point. There are two
1493 * types of interrupts, state change interrupt and response interrupt. If an
1494 * interrupt is not ours, we just return.
1497 mrsas_isr(void *arg)
1499 struct mrsas_irq_context *irq_context = (struct mrsas_irq_context *)arg;
1500 struct mrsas_softc *sc = irq_context->sc;
1503 if (sc->mask_interrupts)
1506 if (!sc->msix_vectors) {
1507 status = mrsas_clear_intr(sc);
1511 /* If we are resetting, bail */
1512 if (mrsas_test_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags)) {
1513 printf(" Entered into ISR when OCR is going active. \n");
1514 mrsas_clear_intr(sc);
1517 /* Process for reply request and clear response interrupt */
1518 if (mrsas_complete_cmd(sc, irq_context->MSIxIndex) != SUCCESS)
1519 mrsas_clear_intr(sc);
1525 * mrsas_complete_cmd: Process reply request
1526 * input: Adapter instance soft state
1528 * This function is called from mrsas_isr() to process reply request and clear
1529 * response interrupt. Processing of the reply request entails walking
1530 * through the reply descriptor array for the command request pended from
1531 * Firmware. We look at the Function field to determine the command type and
1532 * perform the appropriate action. Before we return, we clear the response
1536 mrsas_complete_cmd(struct mrsas_softc *sc, u_int32_t MSIxIndex)
1538 Mpi2ReplyDescriptorsUnion_t *desc;
1539 MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *reply_desc;
1540 MRSAS_RAID_SCSI_IO_REQUEST *scsi_io_req;
1541 struct mrsas_mpt_cmd *cmd_mpt;
1542 struct mrsas_mfi_cmd *cmd_mfi;
1543 u_int8_t reply_descript_type;
1544 u_int16_t smid, num_completed;
1545 u_int8_t status, extStatus;
1546 union desc_value desc_val;
1547 PLD_LOAD_BALANCE_INFO lbinfo;
1548 u_int32_t device_id;
1549 int threshold_reply_count = 0;
1551 MR_TASK_MANAGE_REQUEST *mr_tm_req;
1552 MPI2_SCSI_TASK_MANAGE_REQUEST *mpi_tm_req;
1555 /* If we have a hardware error, not need to continue */
1556 if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR)
1559 desc = sc->reply_desc_mem;
1560 desc += ((MSIxIndex * sc->reply_alloc_sz) / sizeof(MPI2_REPLY_DESCRIPTORS_UNION))
1561 + sc->last_reply_idx[MSIxIndex];
1563 reply_desc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *) desc;
1565 desc_val.word = desc->Words;
1568 reply_descript_type = reply_desc->ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
1570 /* Find our reply descriptor for the command and process */
1571 while ((desc_val.u.low != 0xFFFFFFFF) && (desc_val.u.high != 0xFFFFFFFF)) {
1572 smid = reply_desc->SMID;
1573 cmd_mpt = sc->mpt_cmd_list[smid - 1];
1574 scsi_io_req = (MRSAS_RAID_SCSI_IO_REQUEST *) cmd_mpt->io_request;
1576 status = scsi_io_req->RaidContext.status;
1577 extStatus = scsi_io_req->RaidContext.exStatus;
1579 switch (scsi_io_req->Function) {
1580 case MPI2_FUNCTION_SCSI_TASK_MGMT:
1582 mr_tm_req = (MR_TASK_MANAGE_REQUEST *) cmd_mpt->io_request;
1583 mpi_tm_req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)
1584 &mr_tm_req->TmRequest;
1585 device_printf(sc->mrsas_dev, "TM completion type 0x%X, "
1586 "TaskMID: 0x%X", mpi_tm_req->TaskType, mpi_tm_req->TaskMID);
1588 wakeup_one((void *)&sc->ocr_chan);
1590 case MPI2_FUNCTION_SCSI_IO_REQUEST: /* Fast Path IO. */
1591 device_id = cmd_mpt->ccb_ptr->ccb_h.target_id;
1592 lbinfo = &sc->load_balance_info[device_id];
1593 if (cmd_mpt->load_balance == MRSAS_LOAD_BALANCE_FLAG) {
1594 mrsas_atomic_dec(&lbinfo->scsi_pending_cmds[cmd_mpt->pd_r1_lb]);
1595 cmd_mpt->load_balance &= ~MRSAS_LOAD_BALANCE_FLAG;
1597 /* Fall thru and complete IO */
1598 case MRSAS_MPI2_FUNCTION_LD_IO_REQUEST:
1599 mrsas_map_mpt_cmd_status(cmd_mpt, status, extStatus);
1600 mrsas_cmd_done(sc, cmd_mpt);
1601 scsi_io_req->RaidContext.status = 0;
1602 scsi_io_req->RaidContext.exStatus = 0;
1603 mrsas_atomic_dec(&sc->fw_outstanding);
1605 case MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST: /* MFI command */
1606 cmd_mfi = sc->mfi_cmd_list[cmd_mpt->sync_cmd_idx];
1608 * Make sure NOT TO release the mfi command from the called
1609 * function's context if it is fired with issue_polled call.
1610 * And also make sure that the issue_polled call should only be
1611 * used if INTERRUPT IS DISABLED.
1613 if (cmd_mfi->frame->hdr.flags & MFI_FRAME_DONT_POST_IN_REPLY_QUEUE)
1614 mrsas_release_mfi_cmd(cmd_mfi);
1616 mrsas_complete_mptmfi_passthru(sc, cmd_mfi, status);
1620 sc->last_reply_idx[MSIxIndex]++;
1621 if (sc->last_reply_idx[MSIxIndex] >= sc->reply_q_depth)
1622 sc->last_reply_idx[MSIxIndex] = 0;
1624 desc->Words = ~((uint64_t)0x00); /* set it back to all
1627 threshold_reply_count++;
1629 /* Get the next reply descriptor */
1630 if (!sc->last_reply_idx[MSIxIndex]) {
1631 desc = sc->reply_desc_mem;
1632 desc += ((MSIxIndex * sc->reply_alloc_sz) / sizeof(MPI2_REPLY_DESCRIPTORS_UNION));
1636 reply_desc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *) desc;
1637 desc_val.word = desc->Words;
1639 reply_descript_type = reply_desc->ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
1641 if (reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
1645 * Write to reply post index after completing threshold reply
1646 * count and still there are more replies in reply queue
1647 * pending to be completed.
1649 if (threshold_reply_count >= THRESHOLD_REPLY_COUNT) {
1650 if (sc->msix_enable) {
1651 if (sc->mrsas_gen3_ctrl)
1652 mrsas_write_reg(sc, sc->msix_reg_offset[MSIxIndex / 8],
1653 ((MSIxIndex & 0x7) << 24) |
1654 sc->last_reply_idx[MSIxIndex]);
1656 mrsas_write_reg(sc, sc->msix_reg_offset[0], (MSIxIndex << 24) |
1657 sc->last_reply_idx[MSIxIndex]);
1659 mrsas_write_reg(sc, offsetof(mrsas_reg_set,
1660 reply_post_host_index), sc->last_reply_idx[0]);
1662 threshold_reply_count = 0;
1666 /* No match, just return */
1667 if (num_completed == 0)
1670 /* Clear response interrupt */
1671 if (sc->msix_enable) {
1672 if (sc->mrsas_gen3_ctrl) {
1673 mrsas_write_reg(sc, sc->msix_reg_offset[MSIxIndex / 8],
1674 ((MSIxIndex & 0x7) << 24) |
1675 sc->last_reply_idx[MSIxIndex]);
1677 mrsas_write_reg(sc, sc->msix_reg_offset[0], (MSIxIndex << 24) |
1678 sc->last_reply_idx[MSIxIndex]);
1680 mrsas_write_reg(sc, offsetof(mrsas_reg_set,
1681 reply_post_host_index), sc->last_reply_idx[0]);
1687 * mrsas_map_mpt_cmd_status: Allocate DMAable memory.
1688 * input: Adapter instance soft state
1690 * This function is called from mrsas_complete_cmd(), for LD IO and FastPath IO.
1691 * It checks the command status and maps the appropriate CAM status for the
1695 mrsas_map_mpt_cmd_status(struct mrsas_mpt_cmd *cmd, u_int8_t status, u_int8_t extStatus)
1697 struct mrsas_softc *sc = cmd->sc;
1698 u_int8_t *sense_data;
1702 cmd->ccb_ptr->ccb_h.status = CAM_REQ_CMP;
1704 case MFI_STAT_SCSI_IO_FAILED:
1705 case MFI_STAT_SCSI_DONE_WITH_ERROR:
1706 cmd->ccb_ptr->ccb_h.status = CAM_SCSI_STATUS_ERROR;
1707 sense_data = (u_int8_t *)&cmd->ccb_ptr->csio.sense_data;
1709 /* For now just copy 18 bytes back */
1710 memcpy(sense_data, cmd->sense, 18);
1711 cmd->ccb_ptr->csio.sense_len = 18;
1712 cmd->ccb_ptr->ccb_h.status |= CAM_AUTOSNS_VALID;
1715 case MFI_STAT_LD_OFFLINE:
1716 case MFI_STAT_DEVICE_NOT_FOUND:
1717 if (cmd->ccb_ptr->ccb_h.target_lun)
1718 cmd->ccb_ptr->ccb_h.status |= CAM_LUN_INVALID;
1720 cmd->ccb_ptr->ccb_h.status |= CAM_DEV_NOT_THERE;
1722 case MFI_STAT_CONFIG_SEQ_MISMATCH:
1723 cmd->ccb_ptr->ccb_h.status |= CAM_REQUEUE_REQ;
1726 device_printf(sc->mrsas_dev, "FW cmd complete status %x\n", status);
1727 cmd->ccb_ptr->ccb_h.status = CAM_REQ_CMP_ERR;
1728 cmd->ccb_ptr->csio.scsi_status = status;
1734 * mrsas_alloc_mem: Allocate DMAable memory
1735 * input: Adapter instance soft state
1737 * This function creates the parent DMA tag and allocates DMAable memory. DMA
1738 * tag describes constraints of DMA mapping. Memory allocated is mapped into
1739 * Kernel virtual address. Callback argument is physical memory address.
1742 mrsas_alloc_mem(struct mrsas_softc *sc)
1744 u_int32_t verbuf_size, io_req_size, reply_desc_size, sense_size,
1745 chain_frame_size, evt_detail_size, count;
1748 * Allocate parent DMA tag
1750 if (bus_dma_tag_create(NULL, /* parent */
1753 BUS_SPACE_MAXADDR, /* lowaddr */
1754 BUS_SPACE_MAXADDR, /* highaddr */
1755 NULL, NULL, /* filter, filterarg */
1756 MAXPHYS, /* maxsize */
1757 sc->max_num_sge, /* nsegments */
1758 MAXPHYS, /* maxsegsize */
1760 NULL, NULL, /* lockfunc, lockarg */
1761 &sc->mrsas_parent_tag /* tag */
1763 device_printf(sc->mrsas_dev, "Cannot allocate parent DMA tag\n");
1767 * Allocate for version buffer
1769 verbuf_size = MRSAS_MAX_NAME_LENGTH * (sizeof(bus_addr_t));
1770 if (bus_dma_tag_create(sc->mrsas_parent_tag,
1772 BUS_SPACE_MAXADDR_32BIT,
1781 device_printf(sc->mrsas_dev, "Cannot allocate verbuf DMA tag\n");
1784 if (bus_dmamem_alloc(sc->verbuf_tag, (void **)&sc->verbuf_mem,
1785 BUS_DMA_NOWAIT, &sc->verbuf_dmamap)) {
1786 device_printf(sc->mrsas_dev, "Cannot allocate verbuf memory\n");
1789 bzero(sc->verbuf_mem, verbuf_size);
1790 if (bus_dmamap_load(sc->verbuf_tag, sc->verbuf_dmamap, sc->verbuf_mem,
1791 verbuf_size, mrsas_addr_cb, &sc->verbuf_phys_addr,
1793 device_printf(sc->mrsas_dev, "Cannot load verbuf DMA map\n");
1797 * Allocate IO Request Frames
1799 io_req_size = sc->io_frames_alloc_sz;
1800 if (bus_dma_tag_create(sc->mrsas_parent_tag,
1802 BUS_SPACE_MAXADDR_32BIT,
1810 &sc->io_request_tag)) {
1811 device_printf(sc->mrsas_dev, "Cannot create IO request tag\n");
1814 if (bus_dmamem_alloc(sc->io_request_tag, (void **)&sc->io_request_mem,
1815 BUS_DMA_NOWAIT, &sc->io_request_dmamap)) {
1816 device_printf(sc->mrsas_dev, "Cannot alloc IO request memory\n");
1819 bzero(sc->io_request_mem, io_req_size);
1820 if (bus_dmamap_load(sc->io_request_tag, sc->io_request_dmamap,
1821 sc->io_request_mem, io_req_size, mrsas_addr_cb,
1822 &sc->io_request_phys_addr, BUS_DMA_NOWAIT)) {
1823 device_printf(sc->mrsas_dev, "Cannot load IO request memory\n");
1827 * Allocate Chain Frames
1829 chain_frame_size = sc->chain_frames_alloc_sz;
1830 if (bus_dma_tag_create(sc->mrsas_parent_tag,
1832 BUS_SPACE_MAXADDR_32BIT,
1840 &sc->chain_frame_tag)) {
1841 device_printf(sc->mrsas_dev, "Cannot create chain frame tag\n");
1844 if (bus_dmamem_alloc(sc->chain_frame_tag, (void **)&sc->chain_frame_mem,
1845 BUS_DMA_NOWAIT, &sc->chain_frame_dmamap)) {
1846 device_printf(sc->mrsas_dev, "Cannot alloc chain frame memory\n");
1849 bzero(sc->chain_frame_mem, chain_frame_size);
1850 if (bus_dmamap_load(sc->chain_frame_tag, sc->chain_frame_dmamap,
1851 sc->chain_frame_mem, chain_frame_size, mrsas_addr_cb,
1852 &sc->chain_frame_phys_addr, BUS_DMA_NOWAIT)) {
1853 device_printf(sc->mrsas_dev, "Cannot load chain frame memory\n");
1856 count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
1858 * Allocate Reply Descriptor Array
1860 reply_desc_size = sc->reply_alloc_sz * count;
1861 if (bus_dma_tag_create(sc->mrsas_parent_tag,
1863 BUS_SPACE_MAXADDR_32BIT,
1871 &sc->reply_desc_tag)) {
1872 device_printf(sc->mrsas_dev, "Cannot create reply descriptor tag\n");
1875 if (bus_dmamem_alloc(sc->reply_desc_tag, (void **)&sc->reply_desc_mem,
1876 BUS_DMA_NOWAIT, &sc->reply_desc_dmamap)) {
1877 device_printf(sc->mrsas_dev, "Cannot alloc reply descriptor memory\n");
1880 if (bus_dmamap_load(sc->reply_desc_tag, sc->reply_desc_dmamap,
1881 sc->reply_desc_mem, reply_desc_size, mrsas_addr_cb,
1882 &sc->reply_desc_phys_addr, BUS_DMA_NOWAIT)) {
1883 device_printf(sc->mrsas_dev, "Cannot load reply descriptor memory\n");
1887 * Allocate Sense Buffer Array. Keep in lower 4GB
1889 sense_size = sc->max_fw_cmds * MRSAS_SENSE_LEN;
1890 if (bus_dma_tag_create(sc->mrsas_parent_tag,
1892 BUS_SPACE_MAXADDR_32BIT,
1901 device_printf(sc->mrsas_dev, "Cannot allocate sense buf tag\n");
1904 if (bus_dmamem_alloc(sc->sense_tag, (void **)&sc->sense_mem,
1905 BUS_DMA_NOWAIT, &sc->sense_dmamap)) {
1906 device_printf(sc->mrsas_dev, "Cannot allocate sense buf memory\n");
1909 if (bus_dmamap_load(sc->sense_tag, sc->sense_dmamap,
1910 sc->sense_mem, sense_size, mrsas_addr_cb, &sc->sense_phys_addr,
1912 device_printf(sc->mrsas_dev, "Cannot load sense buf memory\n");
1916 * Allocate for Event detail structure
1918 evt_detail_size = sizeof(struct mrsas_evt_detail);
1919 if (bus_dma_tag_create(sc->mrsas_parent_tag,
1921 BUS_SPACE_MAXADDR_32BIT,
1929 &sc->evt_detail_tag)) {
1930 device_printf(sc->mrsas_dev, "Cannot create Event detail tag\n");
1933 if (bus_dmamem_alloc(sc->evt_detail_tag, (void **)&sc->evt_detail_mem,
1934 BUS_DMA_NOWAIT, &sc->evt_detail_dmamap)) {
1935 device_printf(sc->mrsas_dev, "Cannot alloc Event detail buffer memory\n");
1938 bzero(sc->evt_detail_mem, evt_detail_size);
1939 if (bus_dmamap_load(sc->evt_detail_tag, sc->evt_detail_dmamap,
1940 sc->evt_detail_mem, evt_detail_size, mrsas_addr_cb,
1941 &sc->evt_detail_phys_addr, BUS_DMA_NOWAIT)) {
1942 device_printf(sc->mrsas_dev, "Cannot load Event detail buffer memory\n");
1946 * Create a dma tag for data buffers; size will be the maximum
1947 * possible I/O size (280kB).
1949 if (bus_dma_tag_create(sc->mrsas_parent_tag,
1956 sc->max_num_sge, /* nsegments */
1962 device_printf(sc->mrsas_dev, "Cannot create data dma tag\n");
1969 * mrsas_addr_cb: Callback function of bus_dmamap_load()
1970 * input: callback argument, machine dependent type
1971 * that describes DMA segments, number of segments, error code
1973 * This function is for the driver to receive mapping information resultant of
1974 * the bus_dmamap_load(). The information is actually not being used, but the
1975 * address is saved anyway.
1978 mrsas_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1983 *addr = segs[0].ds_addr;
1987 * mrsas_setup_raidmap: Set up RAID map.
1988 * input: Adapter instance soft state
1990 * Allocate DMA memory for the RAID maps and perform setup.
1993 mrsas_setup_raidmap(struct mrsas_softc *sc)
1997 for (i = 0; i < 2; i++) {
1999 (void *)malloc(sc->drv_map_sz, M_MRSAS, M_NOWAIT);
2000 /* Do Error handling */
2001 if (!sc->ld_drv_map[i]) {
2002 device_printf(sc->mrsas_dev, "Could not allocate memory for local map");
2005 free(sc->ld_drv_map[0], M_MRSAS);
2006 /* ABORT driver initialization */
2011 for (int i = 0; i < 2; i++) {
2012 if (bus_dma_tag_create(sc->mrsas_parent_tag,
2014 BUS_SPACE_MAXADDR_32BIT,
2022 &sc->raidmap_tag[i])) {
2023 device_printf(sc->mrsas_dev,
2024 "Cannot allocate raid map tag.\n");
2027 if (bus_dmamem_alloc(sc->raidmap_tag[i],
2028 (void **)&sc->raidmap_mem[i],
2029 BUS_DMA_NOWAIT, &sc->raidmap_dmamap[i])) {
2030 device_printf(sc->mrsas_dev,
2031 "Cannot allocate raidmap memory.\n");
2034 bzero(sc->raidmap_mem[i], sc->max_map_sz);
2036 if (bus_dmamap_load(sc->raidmap_tag[i], sc->raidmap_dmamap[i],
2037 sc->raidmap_mem[i], sc->max_map_sz,
2038 mrsas_addr_cb, &sc->raidmap_phys_addr[i],
2040 device_printf(sc->mrsas_dev, "Cannot load raidmap memory.\n");
2043 if (!sc->raidmap_mem[i]) {
2044 device_printf(sc->mrsas_dev,
2045 "Cannot allocate memory for raid map.\n");
2050 if (!mrsas_get_map_info(sc))
2051 mrsas_sync_map_info(sc);
2060 * megasas_setup_jbod_map - setup jbod map for FP seq_number.
2061 * @sc: Adapter soft state
2063 * Return 0 on success.
2066 megasas_setup_jbod_map(struct mrsas_softc *sc)
2069 uint32_t pd_seq_map_sz;
2071 pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) +
2072 (sizeof(struct MR_PD_CFG_SEQ) * (MAX_PHYSICAL_DEVICES - 1));
2074 if (!sc->ctrl_info->adapterOperations3.useSeqNumJbodFP) {
2075 sc->use_seqnum_jbod_fp = 0;
2078 if (sc->jbodmap_mem[0])
2081 for (i = 0; i < 2; i++) {
2082 if (bus_dma_tag_create(sc->mrsas_parent_tag,
2084 BUS_SPACE_MAXADDR_32BIT,
2092 &sc->jbodmap_tag[i])) {
2093 device_printf(sc->mrsas_dev,
2094 "Cannot allocate jbod map tag.\n");
2097 if (bus_dmamem_alloc(sc->jbodmap_tag[i],
2098 (void **)&sc->jbodmap_mem[i],
2099 BUS_DMA_NOWAIT, &sc->jbodmap_dmamap[i])) {
2100 device_printf(sc->mrsas_dev,
2101 "Cannot allocate jbod map memory.\n");
2104 bzero(sc->jbodmap_mem[i], pd_seq_map_sz);
2106 if (bus_dmamap_load(sc->jbodmap_tag[i], sc->jbodmap_dmamap[i],
2107 sc->jbodmap_mem[i], pd_seq_map_sz,
2108 mrsas_addr_cb, &sc->jbodmap_phys_addr[i],
2110 device_printf(sc->mrsas_dev, "Cannot load jbod map memory.\n");
2113 if (!sc->jbodmap_mem[i]) {
2114 device_printf(sc->mrsas_dev,
2115 "Cannot allocate memory for jbod map.\n");
2116 sc->use_seqnum_jbod_fp = 0;
2122 if (!megasas_sync_pd_seq_num(sc, false) &&
2123 !megasas_sync_pd_seq_num(sc, true))
2124 sc->use_seqnum_jbod_fp = 1;
2126 sc->use_seqnum_jbod_fp = 0;
2128 device_printf(sc->mrsas_dev, "Jbod map is supported\n");
2132 * mrsas_init_fw: Initialize Firmware
2133 * input: Adapter soft state
2135 * Calls transition_to_ready() to make sure Firmware is in operational state and
2136 * calls mrsas_init_adapter() to send IOC_INIT command to Firmware. It
2137 * issues internal commands to get the controller info after the IOC_INIT
2138 * command response is received by Firmware. Note: code relating to
2139 * get_pdlist, get_ld_list and max_sectors are currently not being used, it
2140 * is left here as placeholder.
2143 mrsas_init_fw(struct mrsas_softc *sc)
2146 int ret, loop, ocr = 0;
2147 u_int32_t max_sectors_1;
2148 u_int32_t max_sectors_2;
2149 u_int32_t tmp_sectors;
2150 u_int32_t scratch_pad_2;
2151 int msix_enable = 0;
2152 int fw_msix_count = 0;
2154 /* Make sure Firmware is ready */
2155 ret = mrsas_transition_to_ready(sc, ocr);
2156 if (ret != SUCCESS) {
2159 /* MSI-x index 0- reply post host index register */
2160 sc->msix_reg_offset[0] = MPI2_REPLY_POST_HOST_INDEX_OFFSET;
2161 /* Check if MSI-X is supported while in ready state */
2162 msix_enable = (mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad)) & 0x4000000) >> 0x1a;
2165 scratch_pad_2 = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2166 outbound_scratch_pad_2));
2168 /* Check max MSI-X vectors */
2169 if (sc->device_id == MRSAS_TBOLT) {
2170 sc->msix_vectors = (scratch_pad_2
2171 & MR_MAX_REPLY_QUEUES_OFFSET) + 1;
2172 fw_msix_count = sc->msix_vectors;
2174 /* Invader/Fury supports 96 MSI-X vectors */
2175 sc->msix_vectors = ((scratch_pad_2
2176 & MR_MAX_REPLY_QUEUES_EXT_OFFSET)
2177 >> MR_MAX_REPLY_QUEUES_EXT_OFFSET_SHIFT) + 1;
2178 fw_msix_count = sc->msix_vectors;
2180 for (loop = 1; loop < MR_MAX_MSIX_REG_ARRAY;
2182 sc->msix_reg_offset[loop] =
2183 MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET +
2188 /* Don't bother allocating more MSI-X vectors than cpus */
2189 sc->msix_vectors = min(sc->msix_vectors,
2192 /* Allocate MSI-x vectors */
2193 if (mrsas_allocate_msix(sc) == SUCCESS)
2194 sc->msix_enable = 1;
2196 sc->msix_enable = 0;
2198 device_printf(sc->mrsas_dev, "FW supports <%d> MSIX vector,"
2199 "Online CPU %d Current MSIX <%d>\n",
2200 fw_msix_count, mp_ncpus, sc->msix_vectors);
2202 if (mrsas_init_adapter(sc) != SUCCESS) {
2203 device_printf(sc->mrsas_dev, "Adapter initialize Fail.\n");
2206 /* Allocate internal commands for pass-thru */
2207 if (mrsas_alloc_mfi_cmds(sc) != SUCCESS) {
2208 device_printf(sc->mrsas_dev, "Allocate MFI cmd failed.\n");
2211 sc->ctrl_info = malloc(sizeof(struct mrsas_ctrl_info), M_MRSAS, M_NOWAIT);
2212 if (!sc->ctrl_info) {
2213 device_printf(sc->mrsas_dev, "Malloc for ctrl_info failed.\n");
2217 * Get the controller info from FW, so that the MAX VD support
2218 * availability can be decided.
2220 if (mrsas_get_ctrl_info(sc)) {
2221 device_printf(sc->mrsas_dev, "Unable to get FW ctrl_info.\n");
2224 sc->secure_jbod_support =
2225 (u_int8_t)sc->ctrl_info->adapterOperations3.supportSecurityonJBOD;
2227 if (sc->secure_jbod_support)
2228 device_printf(sc->mrsas_dev, "FW supports SED \n");
2230 if (sc->use_seqnum_jbod_fp)
2231 device_printf(sc->mrsas_dev, "FW supports JBOD Map \n");
2233 if (mrsas_setup_raidmap(sc) != SUCCESS) {
2234 device_printf(sc->mrsas_dev, "Error: RAID map setup FAILED !!! "
2235 "There seems to be some problem in the controller\n"
2236 "Please contact to the SUPPORT TEAM if the problem persists\n");
2238 megasas_setup_jbod_map(sc);
2240 /* For pass-thru, get PD/LD list and controller info */
2241 memset(sc->pd_list, 0,
2242 MRSAS_MAX_PD * sizeof(struct mrsas_pd_list));
2243 if (mrsas_get_pd_list(sc) != SUCCESS) {
2244 device_printf(sc->mrsas_dev, "Get PD list failed.\n");
2247 memset(sc->ld_ids, 0xff, MRSAS_MAX_LD_IDS);
2248 if (mrsas_get_ld_list(sc) != SUCCESS) {
2249 device_printf(sc->mrsas_dev, "Get LD lsit failed.\n");
2253 * Compute the max allowed sectors per IO: The controller info has
2254 * two limits on max sectors. Driver should use the minimum of these
2257 * 1 << stripe_sz_ops.min = max sectors per strip
2259 * Note that older firmwares ( < FW ver 30) didn't report information to
2260 * calculate max_sectors_1. So the number ended up as zero always.
2263 max_sectors_1 = (1 << sc->ctrl_info->stripe_sz_ops.min) *
2264 sc->ctrl_info->max_strips_per_io;
2265 max_sectors_2 = sc->ctrl_info->max_request_size;
2266 tmp_sectors = min(max_sectors_1, max_sectors_2);
2267 sc->max_sectors_per_req = sc->max_num_sge * MRSAS_PAGE_SIZE / 512;
2269 if (tmp_sectors && (sc->max_sectors_per_req > tmp_sectors))
2270 sc->max_sectors_per_req = tmp_sectors;
2272 sc->disableOnlineCtrlReset =
2273 sc->ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset;
2274 sc->UnevenSpanSupport =
2275 sc->ctrl_info->adapterOperations2.supportUnevenSpans;
2276 if (sc->UnevenSpanSupport) {
2277 device_printf(sc->mrsas_dev, "FW supports: UnevenSpanSupport=%x\n\n",
2278 sc->UnevenSpanSupport);
2280 if (MR_ValidateMapInfo(sc))
2281 sc->fast_path_io = 1;
2283 sc->fast_path_io = 0;
2289 * mrsas_init_adapter: Initializes the adapter/controller
2290 * input: Adapter soft state
2292 * Prepares for the issuing of the IOC Init cmd to FW for initializing the
2293 * ROC/controller. The FW register is read to determined the number of
2294 * commands that is supported. All memory allocations for IO is based on
2295 * max_cmd. Appropriate calculations are performed in this function.
2298 mrsas_init_adapter(struct mrsas_softc *sc)
2301 u_int32_t max_cmd, scratch_pad_2;
2305 /* Read FW status register */
2306 status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad));
2308 /* Get operational params from status register */
2309 sc->max_fw_cmds = status & MRSAS_FWSTATE_MAXCMD_MASK;
2311 /* Decrement the max supported by 1, to correlate with FW */
2312 sc->max_fw_cmds = sc->max_fw_cmds - 1;
2313 max_cmd = sc->max_fw_cmds;
2315 /* Determine allocation size of command frames */
2316 sc->reply_q_depth = ((max_cmd + 1 + 15) / 16 * 16) * 2;
2317 sc->request_alloc_sz = sizeof(MRSAS_REQUEST_DESCRIPTOR_UNION) * max_cmd;
2318 sc->reply_alloc_sz = sizeof(MPI2_REPLY_DESCRIPTORS_UNION) * (sc->reply_q_depth);
2319 sc->io_frames_alloc_sz = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE + (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * (max_cmd + 1));
2320 scratch_pad_2 = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2321 outbound_scratch_pad_2));
2323 * If scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK is set,
2324 * Firmware support extended IO chain frame which is 4 time more
2325 * than legacy Firmware. Legacy Firmware - Frame size is (8 * 128) =
2326 * 1K 1M IO Firmware - Frame size is (8 * 128 * 4) = 4K
2328 if (scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK)
2329 sc->max_chain_frame_sz =
2330 ((scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_MASK) >> 5)
2333 sc->max_chain_frame_sz =
2334 ((scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_MASK) >> 5)
2337 sc->chain_frames_alloc_sz = sc->max_chain_frame_sz * max_cmd;
2338 sc->max_sge_in_main_msg = (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE -
2339 offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL)) / 16;
2341 sc->max_sge_in_chain = sc->max_chain_frame_sz / sizeof(MPI2_SGE_IO_UNION);
2342 sc->max_num_sge = sc->max_sge_in_main_msg + sc->max_sge_in_chain - 2;
2344 mrsas_dprint(sc, MRSAS_INFO, "Avago Debug: MAX sge 0x%X MAX chain frame size 0x%X \n",
2345 sc->max_num_sge, sc->max_chain_frame_sz);
2347 /* Used for pass thru MFI frame (DCMD) */
2348 sc->chain_offset_mfi_pthru = offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL) / 16;
2350 sc->chain_offset_io_request = (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE -
2351 sizeof(MPI2_SGE_IO_UNION)) / 16;
2353 int count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
2355 for (i = 0; i < count; i++)
2356 sc->last_reply_idx[i] = 0;
2358 ret = mrsas_alloc_mem(sc);
2362 ret = mrsas_alloc_mpt_cmds(sc);
2366 ret = mrsas_ioc_init(sc);
2374 * mrsas_alloc_ioc_cmd: Allocates memory for IOC Init command
2375 * input: Adapter soft state
2377 * Allocates for the IOC Init cmd to FW to initialize the ROC/controller.
2380 mrsas_alloc_ioc_cmd(struct mrsas_softc *sc)
2384 /* Allocate IOC INIT command */
2385 ioc_init_size = 1024 + sizeof(MPI2_IOC_INIT_REQUEST);
2386 if (bus_dma_tag_create(sc->mrsas_parent_tag,
2388 BUS_SPACE_MAXADDR_32BIT,
2396 &sc->ioc_init_tag)) {
2397 device_printf(sc->mrsas_dev, "Cannot allocate ioc init tag\n");
2400 if (bus_dmamem_alloc(sc->ioc_init_tag, (void **)&sc->ioc_init_mem,
2401 BUS_DMA_NOWAIT, &sc->ioc_init_dmamap)) {
2402 device_printf(sc->mrsas_dev, "Cannot allocate ioc init cmd mem\n");
2405 bzero(sc->ioc_init_mem, ioc_init_size);
2406 if (bus_dmamap_load(sc->ioc_init_tag, sc->ioc_init_dmamap,
2407 sc->ioc_init_mem, ioc_init_size, mrsas_addr_cb,
2408 &sc->ioc_init_phys_mem, BUS_DMA_NOWAIT)) {
2409 device_printf(sc->mrsas_dev, "Cannot load ioc init cmd mem\n");
2416 * mrsas_free_ioc_cmd: Allocates memory for IOC Init command
2417 * input: Adapter soft state
2419 * Deallocates memory of the IOC Init cmd.
2422 mrsas_free_ioc_cmd(struct mrsas_softc *sc)
2424 if (sc->ioc_init_phys_mem)
2425 bus_dmamap_unload(sc->ioc_init_tag, sc->ioc_init_dmamap);
2426 if (sc->ioc_init_mem != NULL)
2427 bus_dmamem_free(sc->ioc_init_tag, sc->ioc_init_mem, sc->ioc_init_dmamap);
2428 if (sc->ioc_init_tag != NULL)
2429 bus_dma_tag_destroy(sc->ioc_init_tag);
2433 * mrsas_ioc_init: Sends IOC Init command to FW
2434 * input: Adapter soft state
2436 * Issues the IOC Init cmd to FW to initialize the ROC/controller.
2439 mrsas_ioc_init(struct mrsas_softc *sc)
2441 struct mrsas_init_frame *init_frame;
2442 pMpi2IOCInitRequest_t IOCInitMsg;
2443 MRSAS_REQUEST_DESCRIPTOR_UNION req_desc;
2444 u_int8_t max_wait = MRSAS_IOC_INIT_WAIT_TIME;
2445 bus_addr_t phys_addr;
2447 u_int32_t scratch_pad_2;
2449 /* Allocate memory for the IOC INIT command */
2450 if (mrsas_alloc_ioc_cmd(sc)) {
2451 device_printf(sc->mrsas_dev, "Cannot allocate IOC command.\n");
2455 if (!sc->block_sync_cache) {
2456 scratch_pad_2 = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2457 outbound_scratch_pad_2));
2458 sc->fw_sync_cache_support = (scratch_pad_2 &
2459 MR_CAN_HANDLE_SYNC_CACHE_OFFSET) ? 1 : 0;
2462 IOCInitMsg = (pMpi2IOCInitRequest_t)(((char *)sc->ioc_init_mem) + 1024);
2463 IOCInitMsg->Function = MPI2_FUNCTION_IOC_INIT;
2464 IOCInitMsg->WhoInit = MPI2_WHOINIT_HOST_DRIVER;
2465 IOCInitMsg->MsgVersion = MPI2_VERSION;
2466 IOCInitMsg->HeaderVersion = MPI2_HEADER_VERSION;
2467 IOCInitMsg->SystemRequestFrameSize = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE / 4;
2468 IOCInitMsg->ReplyDescriptorPostQueueDepth = sc->reply_q_depth;
2469 IOCInitMsg->ReplyDescriptorPostQueueAddress = sc->reply_desc_phys_addr;
2470 IOCInitMsg->SystemRequestFrameBaseAddress = sc->io_request_phys_addr;
2471 IOCInitMsg->HostMSIxVectors = (sc->msix_vectors > 0 ? sc->msix_vectors : 0);
2473 init_frame = (struct mrsas_init_frame *)sc->ioc_init_mem;
2474 init_frame->cmd = MFI_CMD_INIT;
2475 init_frame->cmd_status = 0xFF;
2476 init_frame->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
2478 /* driver support Extended MSIX */
2479 if (sc->mrsas_gen3_ctrl) {
2480 init_frame->driver_operations.
2481 mfi_capabilities.support_additional_msix = 1;
2483 if (sc->verbuf_mem) {
2484 snprintf((char *)sc->verbuf_mem, strlen(MRSAS_VERSION) + 2, "%s\n",
2486 init_frame->driver_ver_lo = (bus_addr_t)sc->verbuf_phys_addr;
2487 init_frame->driver_ver_hi = 0;
2489 init_frame->driver_operations.mfi_capabilities.support_ndrive_r1_lb = 1;
2490 init_frame->driver_operations.mfi_capabilities.support_max_255lds = 1;
2491 init_frame->driver_operations.mfi_capabilities.security_protocol_cmds_fw = 1;
2492 if (sc->max_chain_frame_sz > MEGASAS_CHAIN_FRAME_SZ_MIN)
2493 init_frame->driver_operations.mfi_capabilities.support_ext_io_size = 1;
2494 phys_addr = (bus_addr_t)sc->ioc_init_phys_mem + 1024;
2495 init_frame->queue_info_new_phys_addr_lo = phys_addr;
2496 init_frame->data_xfer_len = sizeof(Mpi2IOCInitRequest_t);
2498 req_desc.addr.Words = (bus_addr_t)sc->ioc_init_phys_mem;
2499 req_desc.MFAIo.RequestFlags =
2500 (MRSAS_REQ_DESCRIPT_FLAGS_MFA << MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
2502 mrsas_disable_intr(sc);
2503 mrsas_dprint(sc, MRSAS_OCR, "Issuing IOC INIT command to FW.\n");
2504 mrsas_fire_cmd(sc, req_desc.addr.u.low, req_desc.addr.u.high);
2507 * Poll response timer to wait for Firmware response. While this
2508 * timer with the DELAY call could block CPU, the time interval for
2509 * this is only 1 millisecond.
2511 if (init_frame->cmd_status == 0xFF) {
2512 for (i = 0; i < (max_wait * 1000); i++) {
2513 if (init_frame->cmd_status == 0xFF)
2519 if (init_frame->cmd_status == 0)
2520 mrsas_dprint(sc, MRSAS_OCR,
2521 "IOC INIT response received from FW.\n");
2523 if (init_frame->cmd_status == 0xFF)
2524 device_printf(sc->mrsas_dev, "IOC Init timed out after %d seconds.\n", max_wait);
2526 device_printf(sc->mrsas_dev, "IOC Init failed, status = 0x%x\n", init_frame->cmd_status);
2530 mrsas_free_ioc_cmd(sc);
2535 * mrsas_alloc_mpt_cmds: Allocates the command packets
2536 * input: Adapter instance soft state
2538 * This function allocates the internal commands for IOs. Each command that is
2539 * issued to FW is wrapped in a local data structure called mrsas_mpt_cmd. An
2540 * array is allocated with mrsas_mpt_cmd context. The free commands are
2541 * maintained in a linked list (cmd pool). SMID value range is from 1 to
2545 mrsas_alloc_mpt_cmds(struct mrsas_softc *sc)
2548 u_int32_t max_cmd, count;
2549 struct mrsas_mpt_cmd *cmd;
2550 pMpi2ReplyDescriptorsUnion_t reply_desc;
2551 u_int32_t offset, chain_offset, sense_offset;
2552 bus_addr_t io_req_base_phys, chain_frame_base_phys, sense_base_phys;
2553 u_int8_t *io_req_base, *chain_frame_base, *sense_base;
2555 max_cmd = sc->max_fw_cmds;
2557 sc->req_desc = malloc(sc->request_alloc_sz, M_MRSAS, M_NOWAIT);
2558 if (!sc->req_desc) {
2559 device_printf(sc->mrsas_dev, "Out of memory, cannot alloc req desc\n");
2562 memset(sc->req_desc, 0, sc->request_alloc_sz);
2565 * sc->mpt_cmd_list is an array of struct mrsas_mpt_cmd pointers.
2566 * Allocate the dynamic array first and then allocate individual
2569 sc->mpt_cmd_list = malloc(sizeof(struct mrsas_mpt_cmd *) * max_cmd, M_MRSAS, M_NOWAIT);
2570 if (!sc->mpt_cmd_list) {
2571 device_printf(sc->mrsas_dev, "Cannot alloc memory for mpt_cmd_list.\n");
2574 memset(sc->mpt_cmd_list, 0, sizeof(struct mrsas_mpt_cmd *) * max_cmd);
2575 for (i = 0; i < max_cmd; i++) {
2576 sc->mpt_cmd_list[i] = malloc(sizeof(struct mrsas_mpt_cmd),
2578 if (!sc->mpt_cmd_list[i]) {
2579 for (j = 0; j < i; j++)
2580 free(sc->mpt_cmd_list[j], M_MRSAS);
2581 free(sc->mpt_cmd_list, M_MRSAS);
2582 sc->mpt_cmd_list = NULL;
2587 io_req_base = (u_int8_t *)sc->io_request_mem + MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
2588 io_req_base_phys = (bus_addr_t)sc->io_request_phys_addr + MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
2589 chain_frame_base = (u_int8_t *)sc->chain_frame_mem;
2590 chain_frame_base_phys = (bus_addr_t)sc->chain_frame_phys_addr;
2591 sense_base = (u_int8_t *)sc->sense_mem;
2592 sense_base_phys = (bus_addr_t)sc->sense_phys_addr;
2593 for (i = 0; i < max_cmd; i++) {
2594 cmd = sc->mpt_cmd_list[i];
2595 offset = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * i;
2596 chain_offset = sc->max_chain_frame_sz * i;
2597 sense_offset = MRSAS_SENSE_LEN * i;
2598 memset(cmd, 0, sizeof(struct mrsas_mpt_cmd));
2600 cmd->ccb_ptr = NULL;
2601 callout_init_mtx(&cmd->cm_callout, &sc->sim_lock, 0);
2602 cmd->sync_cmd_idx = (u_int32_t)MRSAS_ULONG_MAX;
2604 cmd->io_request = (MRSAS_RAID_SCSI_IO_REQUEST *) (io_req_base + offset);
2605 memset(cmd->io_request, 0, sizeof(MRSAS_RAID_SCSI_IO_REQUEST));
2606 cmd->io_request_phys_addr = io_req_base_phys + offset;
2607 cmd->chain_frame = (MPI2_SGE_IO_UNION *) (chain_frame_base + chain_offset);
2608 cmd->chain_frame_phys_addr = chain_frame_base_phys + chain_offset;
2609 cmd->sense = sense_base + sense_offset;
2610 cmd->sense_phys_addr = sense_base_phys + sense_offset;
2611 if (bus_dmamap_create(sc->data_tag, 0, &cmd->data_dmamap)) {
2614 TAILQ_INSERT_TAIL(&(sc->mrsas_mpt_cmd_list_head), cmd, next);
2617 /* Initialize reply descriptor array to 0xFFFFFFFF */
2618 reply_desc = sc->reply_desc_mem;
2619 count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
2620 for (i = 0; i < sc->reply_q_depth * count; i++, reply_desc++) {
2621 reply_desc->Words = MRSAS_ULONG_MAX;
2627 * mrsas_fire_cmd: Sends command to FW
2628 * input: Adapter softstate
2629 * request descriptor address low
2630 * request descriptor address high
2632 * This functions fires the command to Firmware by writing to the
2633 * inbound_low_queue_port and inbound_high_queue_port.
2636 mrsas_fire_cmd(struct mrsas_softc *sc, u_int32_t req_desc_lo,
2637 u_int32_t req_desc_hi)
2639 mtx_lock(&sc->pci_lock);
2640 mrsas_write_reg(sc, offsetof(mrsas_reg_set, inbound_low_queue_port),
2642 mrsas_write_reg(sc, offsetof(mrsas_reg_set, inbound_high_queue_port),
2644 mtx_unlock(&sc->pci_lock);
2648 * mrsas_transition_to_ready: Move FW to Ready state input:
2649 * Adapter instance soft state
2651 * During the initialization, FW passes can potentially be in any one of several
2652 * possible states. If the FW in operational, waiting-for-handshake states,
2653 * driver must take steps to bring it to ready state. Otherwise, it has to
2654 * wait for the ready state.
2657 mrsas_transition_to_ready(struct mrsas_softc *sc, int ocr)
2661 u_int32_t val, fw_state;
2662 u_int32_t cur_state;
2663 u_int32_t abs_state, curr_abs_state;
2665 val = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad));
2666 fw_state = val & MFI_STATE_MASK;
2667 max_wait = MRSAS_RESET_WAIT_TIME;
2669 if (fw_state != MFI_STATE_READY)
2670 device_printf(sc->mrsas_dev, "Waiting for FW to come to ready state\n");
2672 while (fw_state != MFI_STATE_READY) {
2673 abs_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad));
2675 case MFI_STATE_FAULT:
2676 device_printf(sc->mrsas_dev, "FW is in FAULT state!!\n");
2678 cur_state = MFI_STATE_FAULT;
2682 case MFI_STATE_WAIT_HANDSHAKE:
2683 /* Set the CLR bit in inbound doorbell */
2684 mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell),
2685 MFI_INIT_CLEAR_HANDSHAKE | MFI_INIT_HOTPLUG);
2686 cur_state = MFI_STATE_WAIT_HANDSHAKE;
2688 case MFI_STATE_BOOT_MESSAGE_PENDING:
2689 mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell),
2691 cur_state = MFI_STATE_BOOT_MESSAGE_PENDING;
2693 case MFI_STATE_OPERATIONAL:
2695 * Bring it to READY state; assuming max wait 10
2698 mrsas_disable_intr(sc);
2699 mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell), MFI_RESET_FLAGS);
2700 for (i = 0; i < max_wait * 1000; i++) {
2701 if (mrsas_read_reg(sc, offsetof(mrsas_reg_set, doorbell)) & 1)
2706 cur_state = MFI_STATE_OPERATIONAL;
2708 case MFI_STATE_UNDEFINED:
2710 * This state should not last for more than 2
2713 cur_state = MFI_STATE_UNDEFINED;
2715 case MFI_STATE_BB_INIT:
2716 cur_state = MFI_STATE_BB_INIT;
2718 case MFI_STATE_FW_INIT:
2719 cur_state = MFI_STATE_FW_INIT;
2721 case MFI_STATE_FW_INIT_2:
2722 cur_state = MFI_STATE_FW_INIT_2;
2724 case MFI_STATE_DEVICE_SCAN:
2725 cur_state = MFI_STATE_DEVICE_SCAN;
2727 case MFI_STATE_FLUSH_CACHE:
2728 cur_state = MFI_STATE_FLUSH_CACHE;
2731 device_printf(sc->mrsas_dev, "Unknown state 0x%x\n", fw_state);
2736 * The cur_state should not last for more than max_wait secs
2738 for (i = 0; i < (max_wait * 1000); i++) {
2739 fw_state = (mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2740 outbound_scratch_pad)) & MFI_STATE_MASK);
2741 curr_abs_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2742 outbound_scratch_pad));
2743 if (abs_state == curr_abs_state)
2750 * Return error if fw_state hasn't changed after max_wait
2752 if (curr_abs_state == abs_state) {
2753 device_printf(sc->mrsas_dev, "FW state [%d] hasn't changed "
2754 "in %d secs\n", fw_state, max_wait);
2758 mrsas_dprint(sc, MRSAS_OCR, "FW now in Ready state\n");
2763 * mrsas_get_mfi_cmd: Get a cmd from free command pool
2764 * input: Adapter soft state
2766 * This function removes an MFI command from the command list.
2768 struct mrsas_mfi_cmd *
2769 mrsas_get_mfi_cmd(struct mrsas_softc *sc)
2771 struct mrsas_mfi_cmd *cmd = NULL;
2773 mtx_lock(&sc->mfi_cmd_pool_lock);
2774 if (!TAILQ_EMPTY(&sc->mrsas_mfi_cmd_list_head)) {
2775 cmd = TAILQ_FIRST(&sc->mrsas_mfi_cmd_list_head);
2776 TAILQ_REMOVE(&sc->mrsas_mfi_cmd_list_head, cmd, next);
2778 mtx_unlock(&sc->mfi_cmd_pool_lock);
2784 * mrsas_ocr_thread: Thread to handle OCR/Kill Adapter.
2785 * input: Adapter Context.
2787 * This function will check FW status register and flag do_timeout_reset flag.
2788 * It will do OCR/Kill adapter if FW is in fault state or IO timed out has
2792 mrsas_ocr_thread(void *arg)
2794 struct mrsas_softc *sc;
2795 u_int32_t fw_status, fw_state;
2796 u_int8_t tm_target_reset_failed = 0;
2798 sc = (struct mrsas_softc *)arg;
2800 mrsas_dprint(sc, MRSAS_TRACE, "%s\n", __func__);
2802 sc->ocr_thread_active = 1;
2803 mtx_lock(&sc->sim_lock);
2805 /* Sleep for 1 second and check the queue status */
2806 msleep(&sc->ocr_chan, &sc->sim_lock, PRIBIO,
2807 "mrsas_ocr", sc->mrsas_fw_fault_check_delay * hz);
2808 if (sc->remove_in_progress ||
2809 sc->adprecovery == MRSAS_HW_CRITICAL_ERROR) {
2810 mrsas_dprint(sc, MRSAS_OCR,
2811 "Exit due to %s from %s\n",
2812 sc->remove_in_progress ? "Shutdown" :
2813 "Hardware critical error", __func__);
2816 fw_status = mrsas_read_reg(sc,
2817 offsetof(mrsas_reg_set, outbound_scratch_pad));
2818 fw_state = fw_status & MFI_STATE_MASK;
2819 if (fw_state == MFI_STATE_FAULT || sc->do_timedout_reset ||
2820 mrsas_atomic_read(&sc->target_reset_outstanding)) {
2822 /* First, freeze further IOs to come to the SIM */
2823 mrsas_xpt_freeze(sc);
2825 /* If this is an IO timeout then go for target reset */
2826 if (mrsas_atomic_read(&sc->target_reset_outstanding)) {
2827 device_printf(sc->mrsas_dev, "Initiating Target RESET "
2828 "because of SCSI IO timeout!\n");
2830 /* Let the remaining IOs to complete */
2831 msleep(&sc->ocr_chan, &sc->sim_lock, PRIBIO,
2832 "mrsas_reset_targets", 5 * hz);
2834 /* Try to reset the target device */
2835 if (mrsas_reset_targets(sc) == FAIL)
2836 tm_target_reset_failed = 1;
2839 /* If this is a DCMD timeout or FW fault,
2840 * then go for controller reset
2842 if (fw_state == MFI_STATE_FAULT || tm_target_reset_failed ||
2843 (sc->do_timedout_reset == MFI_DCMD_TIMEOUT_OCR)) {
2844 if (tm_target_reset_failed)
2845 device_printf(sc->mrsas_dev, "Initiaiting OCR because of "
2848 device_printf(sc->mrsas_dev, "Initiaiting OCR "
2849 "because of %s!\n", sc->do_timedout_reset ?
2850 "DCMD IO Timeout" : "FW fault");
2852 mtx_lock_spin(&sc->ioctl_lock);
2853 sc->reset_in_progress = 1;
2854 mtx_unlock_spin(&sc->ioctl_lock);
2858 * Wait for the AEN task to be completed if it is running.
2860 mtx_unlock(&sc->sim_lock);
2861 taskqueue_drain(sc->ev_tq, &sc->ev_task);
2862 mtx_lock(&sc->sim_lock);
2864 taskqueue_block(sc->ev_tq);
2865 /* Try to reset the controller */
2866 mrsas_reset_ctrl(sc, sc->do_timedout_reset);
2868 sc->do_timedout_reset = 0;
2869 sc->reset_in_progress = 0;
2870 tm_target_reset_failed = 0;
2871 mrsas_atomic_set(&sc->target_reset_outstanding, 0);
2872 memset(sc->target_reset_pool, 0,
2873 sizeof(sc->target_reset_pool));
2874 taskqueue_unblock(sc->ev_tq);
2877 /* Now allow IOs to come to the SIM */
2878 mrsas_xpt_release(sc);
2881 mtx_unlock(&sc->sim_lock);
2882 sc->ocr_thread_active = 0;
2883 mrsas_kproc_exit(0);
2887 * mrsas_reset_reply_desc: Reset Reply descriptor as part of OCR.
2888 * input: Adapter Context.
2890 * This function will clear reply descriptor so that post OCR driver and FW will
2894 mrsas_reset_reply_desc(struct mrsas_softc *sc)
2897 pMpi2ReplyDescriptorsUnion_t reply_desc;
2899 count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
2900 for (i = 0; i < count; i++)
2901 sc->last_reply_idx[i] = 0;
2903 reply_desc = sc->reply_desc_mem;
2904 for (i = 0; i < sc->reply_q_depth; i++, reply_desc++) {
2905 reply_desc->Words = MRSAS_ULONG_MAX;
2910 * mrsas_reset_ctrl: Core function to OCR/Kill adapter.
2911 * input: Adapter Context.
2913 * This function will run from thread context so that it can sleep. 1. Do not
2914 * handle OCR if FW is in HW critical error. 2. Wait for outstanding command
2915 * to complete for 180 seconds. 3. If #2 does not find any outstanding
2916 * command Controller is in working state, so skip OCR. Otherwise, do
2917 * OCR/kill Adapter based on flag disableOnlineCtrlReset. 4. Start of the
2918 * OCR, return all SCSI command back to CAM layer which has ccb_ptr. 5. Post
2919 * OCR, Re-fire Management command and move Controller to Operation state.
2922 mrsas_reset_ctrl(struct mrsas_softc *sc, u_int8_t reset_reason)
2924 int retval = SUCCESS, i, j, retry = 0;
2925 u_int32_t host_diag, abs_state, status_reg, reset_adapter;
2927 struct mrsas_mfi_cmd *mfi_cmd;
2928 struct mrsas_mpt_cmd *mpt_cmd;
2929 union mrsas_evt_class_locale class_locale;
2930 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
2932 if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR) {
2933 device_printf(sc->mrsas_dev,
2934 "mrsas: Hardware critical error, returning FAIL.\n");
2937 mrsas_set_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
2938 sc->adprecovery = MRSAS_ADPRESET_SM_INFAULT;
2939 mrsas_disable_intr(sc);
2940 msleep(&sc->ocr_chan, &sc->sim_lock, PRIBIO, "mrsas_ocr",
2941 sc->mrsas_fw_fault_check_delay * hz);
2943 /* First try waiting for commands to complete */
2944 if (mrsas_wait_for_outstanding(sc, reset_reason)) {
2945 mrsas_dprint(sc, MRSAS_OCR,
2946 "resetting adapter from %s.\n",
2948 /* Now return commands back to the CAM layer */
2949 mtx_unlock(&sc->sim_lock);
2950 for (i = 0; i < sc->max_fw_cmds; i++) {
2951 mpt_cmd = sc->mpt_cmd_list[i];
2952 if (mpt_cmd->ccb_ptr) {
2953 ccb = (union ccb *)(mpt_cmd->ccb_ptr);
2954 ccb->ccb_h.status = CAM_SCSI_BUS_RESET;
2955 mrsas_cmd_done(sc, mpt_cmd);
2956 mrsas_atomic_dec(&sc->fw_outstanding);
2959 mtx_lock(&sc->sim_lock);
2961 status_reg = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2962 outbound_scratch_pad));
2963 abs_state = status_reg & MFI_STATE_MASK;
2964 reset_adapter = status_reg & MFI_RESET_ADAPTER;
2965 if (sc->disableOnlineCtrlReset ||
2966 (abs_state == MFI_STATE_FAULT && !reset_adapter)) {
2967 /* Reset not supported, kill adapter */
2968 mrsas_dprint(sc, MRSAS_OCR, "Reset not supported, killing adapter.\n");
2973 /* Now try to reset the chip */
2974 for (i = 0; i < MRSAS_FUSION_MAX_RESET_TRIES; i++) {
2975 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2976 MPI2_WRSEQ_FLUSH_KEY_VALUE);
2977 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2978 MPI2_WRSEQ_1ST_KEY_VALUE);
2979 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2980 MPI2_WRSEQ_2ND_KEY_VALUE);
2981 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2982 MPI2_WRSEQ_3RD_KEY_VALUE);
2983 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2984 MPI2_WRSEQ_4TH_KEY_VALUE);
2985 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2986 MPI2_WRSEQ_5TH_KEY_VALUE);
2987 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2988 MPI2_WRSEQ_6TH_KEY_VALUE);
2990 /* Check that the diag write enable (DRWE) bit is on */
2991 host_diag = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2994 while (!(host_diag & HOST_DIAG_WRITE_ENABLE)) {
2996 host_diag = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2998 if (retry++ == 100) {
2999 mrsas_dprint(sc, MRSAS_OCR,
3000 "Host diag unlock failed!\n");
3004 if (!(host_diag & HOST_DIAG_WRITE_ENABLE))
3007 /* Send chip reset command */
3008 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_host_diag),
3009 host_diag | HOST_DIAG_RESET_ADAPTER);
3012 /* Make sure reset adapter bit is cleared */
3013 host_diag = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
3016 while (host_diag & HOST_DIAG_RESET_ADAPTER) {
3018 host_diag = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
3020 if (retry++ == 1000) {
3021 mrsas_dprint(sc, MRSAS_OCR,
3022 "Diag reset adapter never cleared!\n");
3026 if (host_diag & HOST_DIAG_RESET_ADAPTER)
3029 abs_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
3030 outbound_scratch_pad)) & MFI_STATE_MASK;
3033 while ((abs_state <= MFI_STATE_FW_INIT) && (retry++ < 1000)) {
3035 abs_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
3036 outbound_scratch_pad)) & MFI_STATE_MASK;
3038 if (abs_state <= MFI_STATE_FW_INIT) {
3039 mrsas_dprint(sc, MRSAS_OCR, "firmware state < MFI_STATE_FW_INIT,"
3040 " state = 0x%x\n", abs_state);
3043 /* Wait for FW to become ready */
3044 if (mrsas_transition_to_ready(sc, 1)) {
3045 mrsas_dprint(sc, MRSAS_OCR,
3046 "mrsas: Failed to transition controller to ready.\n");
3049 mrsas_reset_reply_desc(sc);
3050 if (mrsas_ioc_init(sc)) {
3051 mrsas_dprint(sc, MRSAS_OCR, "mrsas_ioc_init() failed!\n");
3054 for (j = 0; j < sc->max_fw_cmds; j++) {
3055 mpt_cmd = sc->mpt_cmd_list[j];
3056 if (mpt_cmd->sync_cmd_idx != (u_int32_t)MRSAS_ULONG_MAX) {
3057 mfi_cmd = sc->mfi_cmd_list[mpt_cmd->sync_cmd_idx];
3058 /* If not an IOCTL then release the command else re-fire */
3059 if (!mfi_cmd->sync_cmd) {
3060 mrsas_release_mfi_cmd(mfi_cmd);
3062 req_desc = mrsas_get_request_desc(sc,
3063 mfi_cmd->cmd_id.context.smid - 1);
3064 mrsas_dprint(sc, MRSAS_OCR,
3065 "Re-fire command DCMD opcode 0x%x index %d\n ",
3066 mfi_cmd->frame->dcmd.opcode, j);
3068 device_printf(sc->mrsas_dev,
3069 "Cannot build MPT cmd.\n");
3071 mrsas_fire_cmd(sc, req_desc->addr.u.low,
3072 req_desc->addr.u.high);
3077 /* Reset load balance info */
3078 memset(sc->load_balance_info, 0,
3079 sizeof(LD_LOAD_BALANCE_INFO) * MAX_LOGICAL_DRIVES_EXT);
3081 if (mrsas_get_ctrl_info(sc)) {
3086 if (!mrsas_get_map_info(sc))
3087 mrsas_sync_map_info(sc);
3089 megasas_setup_jbod_map(sc);
3091 mrsas_clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
3092 mrsas_enable_intr(sc);
3093 sc->adprecovery = MRSAS_HBA_OPERATIONAL;
3095 /* Register AEN with FW for last sequence number */
3096 class_locale.members.reserved = 0;
3097 class_locale.members.locale = MR_EVT_LOCALE_ALL;
3098 class_locale.members.class = MR_EVT_CLASS_DEBUG;
3100 mtx_unlock(&sc->sim_lock);
3101 if (mrsas_register_aen(sc, sc->last_seq_num,
3102 class_locale.word)) {
3103 device_printf(sc->mrsas_dev,
3104 "ERROR: AEN registration FAILED from OCR !!! "
3105 "Further events from the controller cannot be notified."
3106 "Either there is some problem in the controller"
3107 "or the controller does not support AEN.\n"
3108 "Please contact to the SUPPORT TEAM if the problem persists\n");
3110 mtx_lock(&sc->sim_lock);
3112 /* Adapter reset completed successfully */
3113 device_printf(sc->mrsas_dev, "Reset successful\n");
3117 /* Reset failed, kill the adapter */
3118 device_printf(sc->mrsas_dev, "Reset failed, killing adapter.\n");
3122 mrsas_clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
3123 mrsas_enable_intr(sc);
3124 sc->adprecovery = MRSAS_HBA_OPERATIONAL;
3127 mrsas_clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
3128 mrsas_dprint(sc, MRSAS_OCR,
3129 "Reset Exit with %d.\n", retval);
3134 * mrsas_kill_hba: Kill HBA when OCR is not supported
3135 * input: Adapter Context.
3137 * This function will kill HBA when OCR is not supported.
3140 mrsas_kill_hba(struct mrsas_softc *sc)
3142 sc->adprecovery = MRSAS_HW_CRITICAL_ERROR;
3144 mrsas_dprint(sc, MRSAS_OCR, "%s\n", __func__);
3145 mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell),
3148 mrsas_read_reg(sc, offsetof(mrsas_reg_set, doorbell));
3149 mrsas_complete_outstanding_ioctls(sc);
3153 * mrsas_complete_outstanding_ioctls Complete pending IOCTLS after kill_hba
3154 * input: Controller softc
3159 mrsas_complete_outstanding_ioctls(struct mrsas_softc *sc)
3162 struct mrsas_mpt_cmd *cmd_mpt;
3163 struct mrsas_mfi_cmd *cmd_mfi;
3164 u_int32_t count, MSIxIndex;
3166 count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
3167 for (i = 0; i < sc->max_fw_cmds; i++) {
3168 cmd_mpt = sc->mpt_cmd_list[i];
3170 if (cmd_mpt->sync_cmd_idx != (u_int32_t)MRSAS_ULONG_MAX) {
3171 cmd_mfi = sc->mfi_cmd_list[cmd_mpt->sync_cmd_idx];
3172 if (cmd_mfi->sync_cmd && cmd_mfi->frame->hdr.cmd != MFI_CMD_ABORT) {
3173 for (MSIxIndex = 0; MSIxIndex < count; MSIxIndex++)
3174 mrsas_complete_mptmfi_passthru(sc, cmd_mfi,
3175 cmd_mpt->io_request->RaidContext.status);
3182 * mrsas_wait_for_outstanding: Wait for outstanding commands
3183 * input: Adapter Context.
3185 * This function will wait for 180 seconds for outstanding commands to be
3189 mrsas_wait_for_outstanding(struct mrsas_softc *sc, u_int8_t check_reason)
3191 int i, outstanding, retval = 0;
3192 u_int32_t fw_state, count, MSIxIndex;
3195 for (i = 0; i < MRSAS_RESET_WAIT_TIME; i++) {
3196 if (sc->remove_in_progress) {
3197 mrsas_dprint(sc, MRSAS_OCR,
3198 "Driver remove or shutdown called.\n");
3202 /* Check if firmware is in fault state */
3203 fw_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
3204 outbound_scratch_pad)) & MFI_STATE_MASK;
3205 if (fw_state == MFI_STATE_FAULT) {
3206 mrsas_dprint(sc, MRSAS_OCR,
3207 "Found FW in FAULT state, will reset adapter.\n");
3208 count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
3209 mtx_unlock(&sc->sim_lock);
3210 for (MSIxIndex = 0; MSIxIndex < count; MSIxIndex++)
3211 mrsas_complete_cmd(sc, MSIxIndex);
3212 mtx_lock(&sc->sim_lock);
3216 if (check_reason == MFI_DCMD_TIMEOUT_OCR) {
3217 mrsas_dprint(sc, MRSAS_OCR,
3218 "DCMD IO TIMEOUT detected, will reset adapter.\n");
3222 outstanding = mrsas_atomic_read(&sc->fw_outstanding);
3226 if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
3227 mrsas_dprint(sc, MRSAS_OCR, "[%2d]waiting for %d "
3228 "commands to complete\n", i, outstanding);
3229 count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
3230 mtx_unlock(&sc->sim_lock);
3231 for (MSIxIndex = 0; MSIxIndex < count; MSIxIndex++)
3232 mrsas_complete_cmd(sc, MSIxIndex);
3233 mtx_lock(&sc->sim_lock);
3238 if (mrsas_atomic_read(&sc->fw_outstanding)) {
3239 mrsas_dprint(sc, MRSAS_OCR,
3240 " pending commands remain after waiting,"
3241 " will reset adapter.\n");
3249 * mrsas_release_mfi_cmd: Return a cmd to free command pool
3250 * input: Command packet for return to free cmd pool
3252 * This function returns the MFI & MPT command to the command list.
3255 mrsas_release_mfi_cmd(struct mrsas_mfi_cmd *cmd_mfi)
3257 struct mrsas_softc *sc = cmd_mfi->sc;
3258 struct mrsas_mpt_cmd *cmd_mpt;
3261 mtx_lock(&sc->mfi_cmd_pool_lock);
3263 * Release the mpt command (if at all it is allocated
3264 * associated with the mfi command
3266 if (cmd_mfi->cmd_id.context.smid) {
3267 mtx_lock(&sc->mpt_cmd_pool_lock);
3268 /* Get the mpt cmd from mfi cmd frame's smid value */
3269 cmd_mpt = sc->mpt_cmd_list[cmd_mfi->cmd_id.context.smid-1];
3271 cmd_mpt->sync_cmd_idx = (u_int32_t)MRSAS_ULONG_MAX;
3272 TAILQ_INSERT_HEAD(&(sc->mrsas_mpt_cmd_list_head), cmd_mpt, next);
3273 mtx_unlock(&sc->mpt_cmd_pool_lock);
3275 /* Release the mfi command */
3276 cmd_mfi->ccb_ptr = NULL;
3277 cmd_mfi->cmd_id.frame_count = 0;
3278 TAILQ_INSERT_HEAD(&(sc->mrsas_mfi_cmd_list_head), cmd_mfi, next);
3279 mtx_unlock(&sc->mfi_cmd_pool_lock);
3285 * mrsas_get_controller_info: Returns FW's controller structure
3286 * input: Adapter soft state
3287 * Controller information structure
3289 * Issues an internal command (DCMD) to get the FW's controller structure. This
3290 * information is mainly used to find out the maximum IO transfer per command
3291 * supported by the FW.
3294 mrsas_get_ctrl_info(struct mrsas_softc *sc)
3297 u_int8_t do_ocr = 1;
3298 struct mrsas_mfi_cmd *cmd;
3299 struct mrsas_dcmd_frame *dcmd;
3301 cmd = mrsas_get_mfi_cmd(sc);
3304 device_printf(sc->mrsas_dev, "Failed to get a free cmd\n");
3307 dcmd = &cmd->frame->dcmd;
3309 if (mrsas_alloc_ctlr_info_cmd(sc) != SUCCESS) {
3310 device_printf(sc->mrsas_dev, "Cannot allocate get ctlr info cmd\n");
3311 mrsas_release_mfi_cmd(cmd);
3314 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
3316 dcmd->cmd = MFI_CMD_DCMD;
3317 dcmd->cmd_status = 0xFF;
3318 dcmd->sge_count = 1;
3319 dcmd->flags = MFI_FRAME_DIR_READ;
3322 dcmd->data_xfer_len = sizeof(struct mrsas_ctrl_info);
3323 dcmd->opcode = MR_DCMD_CTRL_GET_INFO;
3324 dcmd->sgl.sge32[0].phys_addr = sc->ctlr_info_phys_addr;
3325 dcmd->sgl.sge32[0].length = sizeof(struct mrsas_ctrl_info);
3327 if (!sc->mask_interrupts)
3328 retcode = mrsas_issue_blocked_cmd(sc, cmd);
3330 retcode = mrsas_issue_polled(sc, cmd);
3332 if (retcode == ETIMEDOUT)
3335 memcpy(sc->ctrl_info, sc->ctlr_info_mem, sizeof(struct mrsas_ctrl_info));
3338 mrsas_update_ext_vd_details(sc);
3340 sc->use_seqnum_jbod_fp =
3341 sc->ctrl_info->adapterOperations3.useSeqNumJbodFP;
3342 sc->disableOnlineCtrlReset =
3343 sc->ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset;
3346 mrsas_free_ctlr_info_cmd(sc);
3349 sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
3351 if (!sc->mask_interrupts)
3352 mrsas_release_mfi_cmd(cmd);
3358 * mrsas_update_ext_vd_details : Update details w.r.t Extended VD
3360 * sc - Controller's softc
3363 mrsas_update_ext_vd_details(struct mrsas_softc *sc)
3365 sc->max256vdSupport =
3366 sc->ctrl_info->adapterOperations3.supportMaxExtLDs;
3367 /* Below is additional check to address future FW enhancement */
3368 if (sc->ctrl_info->max_lds > 64)
3369 sc->max256vdSupport = 1;
3371 sc->drv_supported_vd_count = MRSAS_MAX_LD_CHANNELS
3372 * MRSAS_MAX_DEV_PER_CHANNEL;
3373 sc->drv_supported_pd_count = MRSAS_MAX_PD_CHANNELS
3374 * MRSAS_MAX_DEV_PER_CHANNEL;
3375 if (sc->max256vdSupport) {
3376 sc->fw_supported_vd_count = MAX_LOGICAL_DRIVES_EXT;
3377 sc->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
3379 sc->fw_supported_vd_count = MAX_LOGICAL_DRIVES;
3380 sc->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
3383 sc->old_map_sz = sizeof(MR_FW_RAID_MAP) +
3384 (sizeof(MR_LD_SPAN_MAP) *
3385 (sc->fw_supported_vd_count - 1));
3386 sc->new_map_sz = sizeof(MR_FW_RAID_MAP_EXT);
3387 sc->drv_map_sz = sizeof(MR_DRV_RAID_MAP) +
3388 (sizeof(MR_LD_SPAN_MAP) *
3389 (sc->drv_supported_vd_count - 1));
3391 sc->max_map_sz = max(sc->old_map_sz, sc->new_map_sz);
3393 if (sc->max256vdSupport)
3394 sc->current_map_sz = sc->new_map_sz;
3396 sc->current_map_sz = sc->old_map_sz;
3400 * mrsas_alloc_ctlr_info_cmd: Allocates memory for controller info command
3401 * input: Adapter soft state
3403 * Allocates DMAable memory for the controller info internal command.
3406 mrsas_alloc_ctlr_info_cmd(struct mrsas_softc *sc)
3410 /* Allocate get controller info command */
3411 ctlr_info_size = sizeof(struct mrsas_ctrl_info);
3412 if (bus_dma_tag_create(sc->mrsas_parent_tag,
3414 BUS_SPACE_MAXADDR_32BIT,
3422 &sc->ctlr_info_tag)) {
3423 device_printf(sc->mrsas_dev, "Cannot allocate ctlr info tag\n");
3426 if (bus_dmamem_alloc(sc->ctlr_info_tag, (void **)&sc->ctlr_info_mem,
3427 BUS_DMA_NOWAIT, &sc->ctlr_info_dmamap)) {
3428 device_printf(sc->mrsas_dev, "Cannot allocate ctlr info cmd mem\n");
3431 if (bus_dmamap_load(sc->ctlr_info_tag, sc->ctlr_info_dmamap,
3432 sc->ctlr_info_mem, ctlr_info_size, mrsas_addr_cb,
3433 &sc->ctlr_info_phys_addr, BUS_DMA_NOWAIT)) {
3434 device_printf(sc->mrsas_dev, "Cannot load ctlr info cmd mem\n");
3437 memset(sc->ctlr_info_mem, 0, ctlr_info_size);
3442 * mrsas_free_ctlr_info_cmd: Free memory for controller info command
3443 * input: Adapter soft state
3445 * Deallocates memory of the get controller info cmd.
3448 mrsas_free_ctlr_info_cmd(struct mrsas_softc *sc)
3450 if (sc->ctlr_info_phys_addr)
3451 bus_dmamap_unload(sc->ctlr_info_tag, sc->ctlr_info_dmamap);
3452 if (sc->ctlr_info_mem != NULL)
3453 bus_dmamem_free(sc->ctlr_info_tag, sc->ctlr_info_mem, sc->ctlr_info_dmamap);
3454 if (sc->ctlr_info_tag != NULL)
3455 bus_dma_tag_destroy(sc->ctlr_info_tag);
3459 * mrsas_issue_polled: Issues a polling command
3460 * inputs: Adapter soft state
3461 * Command packet to be issued
3463 * This function is for posting of internal commands to Firmware. MFI requires
3464 * the cmd_status to be set to 0xFF before posting. The maximun wait time of
3465 * the poll response timer is 180 seconds.
3468 mrsas_issue_polled(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
3470 struct mrsas_header *frame_hdr = &cmd->frame->hdr;
3471 u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME;
3472 int i, retcode = SUCCESS;
3474 frame_hdr->cmd_status = 0xFF;
3475 frame_hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
3477 /* Issue the frame using inbound queue port */
3478 if (mrsas_issue_dcmd(sc, cmd)) {
3479 device_printf(sc->mrsas_dev, "Cannot issue DCMD internal command.\n");
3483 * Poll response timer to wait for Firmware response. While this
3484 * timer with the DELAY call could block CPU, the time interval for
3485 * this is only 1 millisecond.
3487 if (frame_hdr->cmd_status == 0xFF) {
3488 for (i = 0; i < (max_wait * 1000); i++) {
3489 if (frame_hdr->cmd_status == 0xFF)
3495 if (frame_hdr->cmd_status == 0xFF) {
3496 device_printf(sc->mrsas_dev, "DCMD timed out after %d "
3497 "seconds from %s\n", max_wait, __func__);
3498 device_printf(sc->mrsas_dev, "DCMD opcode 0x%X\n",
3499 cmd->frame->dcmd.opcode);
3500 retcode = ETIMEDOUT;
3506 * mrsas_issue_dcmd: Issues a MFI Pass thru cmd
3507 * input: Adapter soft state mfi cmd pointer
3509 * This function is called by mrsas_issued_blocked_cmd() and
3510 * mrsas_issued_polled(), to build the MPT command and then fire the command
3514 mrsas_issue_dcmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
3516 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
3518 req_desc = mrsas_build_mpt_cmd(sc, cmd);
3520 device_printf(sc->mrsas_dev, "Cannot build MPT cmd.\n");
3523 mrsas_fire_cmd(sc, req_desc->addr.u.low, req_desc->addr.u.high);
3529 * mrsas_build_mpt_cmd: Calls helper function to build Passthru cmd
3530 * input: Adapter soft state mfi cmd to build
3532 * This function is called by mrsas_issue_cmd() to build the MPT-MFI passthru
3533 * command and prepares the MPT command to send to Firmware.
3535 MRSAS_REQUEST_DESCRIPTOR_UNION *
3536 mrsas_build_mpt_cmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
3538 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
3541 if (mrsas_build_mptmfi_passthru(sc, cmd)) {
3542 device_printf(sc->mrsas_dev, "Cannot build MPT-MFI passthru cmd.\n");
3545 index = cmd->cmd_id.context.smid;
3547 req_desc = mrsas_get_request_desc(sc, index - 1);
3551 req_desc->addr.Words = 0;
3552 req_desc->SCSIIO.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO << MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
3554 req_desc->SCSIIO.SMID = index;
3560 * mrsas_build_mptmfi_passthru: Builds a MPT MFI Passthru command
3561 * input: Adapter soft state mfi cmd pointer
3563 * The MPT command and the io_request are setup as a passthru command. The SGE
3564 * chain address is set to frame_phys_addr of the MFI command.
3567 mrsas_build_mptmfi_passthru(struct mrsas_softc *sc, struct mrsas_mfi_cmd *mfi_cmd)
3569 MPI25_IEEE_SGE_CHAIN64 *mpi25_ieee_chain;
3570 PTR_MRSAS_RAID_SCSI_IO_REQUEST io_req;
3571 struct mrsas_mpt_cmd *mpt_cmd;
3572 struct mrsas_header *frame_hdr = &mfi_cmd->frame->hdr;
3574 mpt_cmd = mrsas_get_mpt_cmd(sc);
3578 /* Save the smid. To be used for returning the cmd */
3579 mfi_cmd->cmd_id.context.smid = mpt_cmd->index;
3581 mpt_cmd->sync_cmd_idx = mfi_cmd->index;
3584 * For cmds where the flag is set, store the flag and check on
3585 * completion. For cmds with this flag, don't call
3586 * mrsas_complete_cmd.
3589 if (frame_hdr->flags & MFI_FRAME_DONT_POST_IN_REPLY_QUEUE)
3590 mpt_cmd->flags = MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
3592 io_req = mpt_cmd->io_request;
3594 if (sc->mrsas_gen3_ctrl) {
3595 pMpi25IeeeSgeChain64_t sgl_ptr_end = (pMpi25IeeeSgeChain64_t)&io_req->SGL;
3597 sgl_ptr_end += sc->max_sge_in_main_msg - 1;
3598 sgl_ptr_end->Flags = 0;
3600 mpi25_ieee_chain = (MPI25_IEEE_SGE_CHAIN64 *) & io_req->SGL.IeeeChain;
3602 io_req->Function = MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST;
3603 io_req->SGLOffset0 = offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL) / 4;
3604 io_req->ChainOffset = sc->chain_offset_mfi_pthru;
3606 mpi25_ieee_chain->Address = mfi_cmd->frame_phys_addr;
3608 mpi25_ieee_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT |
3609 MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR;
3611 mpi25_ieee_chain->Length = sc->max_chain_frame_sz;
3617 * mrsas_issue_blocked_cmd: Synchronous wrapper around regular FW cmds
3618 * input: Adapter soft state Command to be issued
3620 * This function waits on an event for the command to be returned from the ISR.
3621 * Max wait time is MRSAS_INTERNAL_CMD_WAIT_TIME secs. Used for issuing
3622 * internal and ioctl commands.
3625 mrsas_issue_blocked_cmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
3627 u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME;
3628 unsigned long total_time = 0;
3629 int retcode = SUCCESS;
3631 /* Initialize cmd_status */
3632 cmd->cmd_status = 0xFF;
3634 /* Build MPT-MFI command for issue to FW */
3635 if (mrsas_issue_dcmd(sc, cmd)) {
3636 device_printf(sc->mrsas_dev, "Cannot issue DCMD internal command.\n");
3639 sc->chan = (void *)&cmd;
3642 if (cmd->cmd_status == 0xFF) {
3643 tsleep((void *)&sc->chan, 0, "mrsas_sleep", hz);
3647 if (!cmd->sync_cmd) { /* cmd->sync will be set for an IOCTL
3650 if (total_time >= max_wait) {
3651 device_printf(sc->mrsas_dev,
3652 "Internal command timed out after %d seconds.\n", max_wait);
3659 if (cmd->cmd_status == 0xFF) {
3660 device_printf(sc->mrsas_dev, "DCMD timed out after %d "
3661 "seconds from %s\n", max_wait, __func__);
3662 device_printf(sc->mrsas_dev, "DCMD opcode 0x%X\n",
3663 cmd->frame->dcmd.opcode);
3664 retcode = ETIMEDOUT;
3670 * mrsas_complete_mptmfi_passthru: Completes a command
3671 * input: @sc: Adapter soft state
3672 * @cmd: Command to be completed
3673 * @status: cmd completion status
3675 * This function is called from mrsas_complete_cmd() after an interrupt is
3676 * received from Firmware, and io_request->Function is
3677 * MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST.
3680 mrsas_complete_mptmfi_passthru(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd,
3683 struct mrsas_header *hdr = &cmd->frame->hdr;
3684 u_int8_t cmd_status = cmd->frame->hdr.cmd_status;
3686 /* Reset the retry counter for future re-tries */
3687 cmd->retry_for_fw_reset = 0;
3690 cmd->ccb_ptr = NULL;
3693 case MFI_CMD_INVALID:
3694 device_printf(sc->mrsas_dev, "MFI_CMD_INVALID command.\n");
3696 case MFI_CMD_PD_SCSI_IO:
3697 case MFI_CMD_LD_SCSI_IO:
3699 * MFI_CMD_PD_SCSI_IO and MFI_CMD_LD_SCSI_IO could have been
3700 * issued either through an IO path or an IOCTL path. If it
3701 * was via IOCTL, we will send it to internal completion.
3703 if (cmd->sync_cmd) {
3705 mrsas_wakeup(sc, cmd);
3711 /* Check for LD map update */
3712 if ((cmd->frame->dcmd.opcode == MR_DCMD_LD_MAP_GET_INFO) &&
3713 (cmd->frame->dcmd.mbox.b[1] == 1)) {
3714 sc->fast_path_io = 0;
3715 mtx_lock(&sc->raidmap_lock);
3716 sc->map_update_cmd = NULL;
3717 if (cmd_status != 0) {
3718 if (cmd_status != MFI_STAT_NOT_FOUND)
3719 device_printf(sc->mrsas_dev, "map sync failed, status=%x\n", cmd_status);
3721 mrsas_release_mfi_cmd(cmd);
3722 mtx_unlock(&sc->raidmap_lock);
3727 mrsas_release_mfi_cmd(cmd);
3728 if (MR_ValidateMapInfo(sc))
3729 sc->fast_path_io = 0;
3731 sc->fast_path_io = 1;
3732 mrsas_sync_map_info(sc);
3733 mtx_unlock(&sc->raidmap_lock);
3736 if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET_INFO ||
3737 cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET) {
3738 sc->mrsas_aen_triggered = 0;
3740 /* FW has an updated PD sequence */
3741 if ((cmd->frame->dcmd.opcode ==
3742 MR_DCMD_SYSTEM_PD_MAP_GET_INFO) &&
3743 (cmd->frame->dcmd.mbox.b[0] == 1)) {
3745 mtx_lock(&sc->raidmap_lock);
3746 sc->jbod_seq_cmd = NULL;
3747 mrsas_release_mfi_cmd(cmd);
3749 if (cmd_status == MFI_STAT_OK) {
3750 sc->pd_seq_map_id++;
3751 /* Re-register a pd sync seq num cmd */
3752 if (megasas_sync_pd_seq_num(sc, true))
3753 sc->use_seqnum_jbod_fp = 0;
3755 sc->use_seqnum_jbod_fp = 0;
3756 device_printf(sc->mrsas_dev,
3757 "Jbod map sync failed, status=%x\n", cmd_status);
3759 mtx_unlock(&sc->raidmap_lock);
3762 /* See if got an event notification */
3763 if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_WAIT)
3764 mrsas_complete_aen(sc, cmd);
3766 mrsas_wakeup(sc, cmd);
3769 /* Command issued to abort another cmd return */
3770 mrsas_complete_abort(sc, cmd);
3773 device_printf(sc->mrsas_dev, "Unknown command completed! [0x%X]\n", hdr->cmd);
3779 * mrsas_wakeup: Completes an internal command
3780 * input: Adapter soft state
3781 * Command to be completed
3783 * In mrsas_issue_blocked_cmd(), after a command is issued to Firmware, a wait
3784 * timer is started. This function is called from
3785 * mrsas_complete_mptmfi_passthru() as it completes the command, to wake up
3786 * from the command wait.
3789 mrsas_wakeup(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
3791 cmd->cmd_status = cmd->frame->io.cmd_status;
3793 if (cmd->cmd_status == 0xFF)
3794 cmd->cmd_status = 0;
3796 sc->chan = (void *)&cmd;
3797 wakeup_one((void *)&sc->chan);
3802 * mrsas_shutdown_ctlr: Instructs FW to shutdown the controller input:
3803 * Adapter soft state Shutdown/Hibernate
3805 * This function issues a DCMD internal command to Firmware to initiate shutdown
3806 * of the controller.
3809 mrsas_shutdown_ctlr(struct mrsas_softc *sc, u_int32_t opcode)
3811 struct mrsas_mfi_cmd *cmd;
3812 struct mrsas_dcmd_frame *dcmd;
3814 if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR)
3817 cmd = mrsas_get_mfi_cmd(sc);
3819 device_printf(sc->mrsas_dev, "Cannot allocate for shutdown cmd.\n");
3823 mrsas_issue_blocked_abort_cmd(sc, sc->aen_cmd);
3824 if (sc->map_update_cmd)
3825 mrsas_issue_blocked_abort_cmd(sc, sc->map_update_cmd);
3826 if (sc->jbod_seq_cmd)
3827 mrsas_issue_blocked_abort_cmd(sc, sc->jbod_seq_cmd);
3829 dcmd = &cmd->frame->dcmd;
3830 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
3832 dcmd->cmd = MFI_CMD_DCMD;
3833 dcmd->cmd_status = 0x0;
3834 dcmd->sge_count = 0;
3835 dcmd->flags = MFI_FRAME_DIR_NONE;
3838 dcmd->data_xfer_len = 0;
3839 dcmd->opcode = opcode;
3841 device_printf(sc->mrsas_dev, "Preparing to shut down controller.\n");
3843 mrsas_issue_blocked_cmd(sc, cmd);
3844 mrsas_release_mfi_cmd(cmd);
3850 * mrsas_flush_cache: Requests FW to flush all its caches input:
3851 * Adapter soft state
3853 * This function is issues a DCMD internal command to Firmware to initiate
3854 * flushing of all caches.
3857 mrsas_flush_cache(struct mrsas_softc *sc)
3859 struct mrsas_mfi_cmd *cmd;
3860 struct mrsas_dcmd_frame *dcmd;
3862 if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR)
3865 cmd = mrsas_get_mfi_cmd(sc);
3867 device_printf(sc->mrsas_dev, "Cannot allocate for flush cache cmd.\n");
3870 dcmd = &cmd->frame->dcmd;
3871 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
3873 dcmd->cmd = MFI_CMD_DCMD;
3874 dcmd->cmd_status = 0x0;
3875 dcmd->sge_count = 0;
3876 dcmd->flags = MFI_FRAME_DIR_NONE;
3879 dcmd->data_xfer_len = 0;
3880 dcmd->opcode = MR_DCMD_CTRL_CACHE_FLUSH;
3881 dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE;
3883 mrsas_issue_blocked_cmd(sc, cmd);
3884 mrsas_release_mfi_cmd(cmd);
3890 megasas_sync_pd_seq_num(struct mrsas_softc *sc, boolean_t pend)
3893 u_int8_t do_ocr = 1;
3894 struct mrsas_mfi_cmd *cmd;
3895 struct mrsas_dcmd_frame *dcmd;
3896 uint32_t pd_seq_map_sz;
3897 struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync;
3898 bus_addr_t pd_seq_h;
3900 pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) +
3901 (sizeof(struct MR_PD_CFG_SEQ) *
3902 (MAX_PHYSICAL_DEVICES - 1));
3904 cmd = mrsas_get_mfi_cmd(sc);
3906 device_printf(sc->mrsas_dev,
3907 "Cannot alloc for ld map info cmd.\n");
3910 dcmd = &cmd->frame->dcmd;
3912 pd_sync = (void *)sc->jbodmap_mem[(sc->pd_seq_map_id & 1)];
3913 pd_seq_h = sc->jbodmap_phys_addr[(sc->pd_seq_map_id & 1)];
3915 device_printf(sc->mrsas_dev,
3916 "Failed to alloc mem for jbod map info.\n");
3917 mrsas_release_mfi_cmd(cmd);
3920 memset(pd_sync, 0, pd_seq_map_sz);
3921 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
3922 dcmd->cmd = MFI_CMD_DCMD;
3923 dcmd->cmd_status = 0xFF;
3924 dcmd->sge_count = 1;
3927 dcmd->data_xfer_len = (pd_seq_map_sz);
3928 dcmd->opcode = (MR_DCMD_SYSTEM_PD_MAP_GET_INFO);
3929 dcmd->sgl.sge32[0].phys_addr = (pd_seq_h);
3930 dcmd->sgl.sge32[0].length = (pd_seq_map_sz);
3933 dcmd->mbox.b[0] = MRSAS_DCMD_MBOX_PEND_FLAG;
3934 dcmd->flags = (MFI_FRAME_DIR_WRITE);
3935 sc->jbod_seq_cmd = cmd;
3936 if (mrsas_issue_dcmd(sc, cmd)) {
3937 device_printf(sc->mrsas_dev,
3938 "Fail to send sync map info command.\n");
3943 dcmd->flags = MFI_FRAME_DIR_READ;
3945 retcode = mrsas_issue_polled(sc, cmd);
3946 if (retcode == ETIMEDOUT)
3949 if (pd_sync->count > MAX_PHYSICAL_DEVICES) {
3950 device_printf(sc->mrsas_dev,
3951 "driver supports max %d JBOD, but FW reports %d\n",
3952 MAX_PHYSICAL_DEVICES, pd_sync->count);
3956 sc->pd_seq_map_id++;
3961 sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
3967 * mrsas_get_map_info: Load and validate RAID map input:
3968 * Adapter instance soft state
3970 * This function calls mrsas_get_ld_map_info() and MR_ValidateMapInfo() to load
3971 * and validate RAID map. It returns 0 if successful, 1 other- wise.
3974 mrsas_get_map_info(struct mrsas_softc *sc)
3976 uint8_t retcode = 0;
3978 sc->fast_path_io = 0;
3979 if (!mrsas_get_ld_map_info(sc)) {
3980 retcode = MR_ValidateMapInfo(sc);
3982 sc->fast_path_io = 1;
3990 * mrsas_get_ld_map_info: Get FW's ld_map structure input:
3991 * Adapter instance soft state
3993 * Issues an internal command (DCMD) to get the FW's controller PD list
3997 mrsas_get_ld_map_info(struct mrsas_softc *sc)
4000 struct mrsas_mfi_cmd *cmd;
4001 struct mrsas_dcmd_frame *dcmd;
4003 bus_addr_t map_phys_addr = 0;
4005 cmd = mrsas_get_mfi_cmd(sc);
4007 device_printf(sc->mrsas_dev,
4008 "Cannot alloc for ld map info cmd.\n");
4011 dcmd = &cmd->frame->dcmd;
4013 map = (void *)sc->raidmap_mem[(sc->map_id & 1)];
4014 map_phys_addr = sc->raidmap_phys_addr[(sc->map_id & 1)];
4016 device_printf(sc->mrsas_dev,
4017 "Failed to alloc mem for ld map info.\n");
4018 mrsas_release_mfi_cmd(cmd);
4021 memset(map, 0, sizeof(sc->max_map_sz));
4022 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4024 dcmd->cmd = MFI_CMD_DCMD;
4025 dcmd->cmd_status = 0xFF;
4026 dcmd->sge_count = 1;
4027 dcmd->flags = MFI_FRAME_DIR_READ;
4030 dcmd->data_xfer_len = sc->current_map_sz;
4031 dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO;
4032 dcmd->sgl.sge32[0].phys_addr = map_phys_addr;
4033 dcmd->sgl.sge32[0].length = sc->current_map_sz;
4035 retcode = mrsas_issue_polled(sc, cmd);
4036 if (retcode == ETIMEDOUT)
4037 sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
4043 * mrsas_sync_map_info: Get FW's ld_map structure input:
4044 * Adapter instance soft state
4046 * Issues an internal command (DCMD) to get the FW's controller PD list
4050 mrsas_sync_map_info(struct mrsas_softc *sc)
4053 struct mrsas_mfi_cmd *cmd;
4054 struct mrsas_dcmd_frame *dcmd;
4055 uint32_t size_sync_info, num_lds;
4056 MR_LD_TARGET_SYNC *target_map = NULL;
4057 MR_DRV_RAID_MAP_ALL *map;
4059 MR_LD_TARGET_SYNC *ld_sync;
4060 bus_addr_t map_phys_addr = 0;
4062 cmd = mrsas_get_mfi_cmd(sc);
4064 device_printf(sc->mrsas_dev, "Cannot alloc for sync map info cmd\n");
4067 map = sc->ld_drv_map[sc->map_id & 1];
4068 num_lds = map->raidMap.ldCount;
4070 dcmd = &cmd->frame->dcmd;
4071 size_sync_info = sizeof(MR_LD_TARGET_SYNC) * num_lds;
4072 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4074 target_map = (MR_LD_TARGET_SYNC *) sc->raidmap_mem[(sc->map_id - 1) & 1];
4075 memset(target_map, 0, sc->max_map_sz);
4077 map_phys_addr = sc->raidmap_phys_addr[(sc->map_id - 1) & 1];
4079 ld_sync = (MR_LD_TARGET_SYNC *) target_map;
4081 for (i = 0; i < num_lds; i++, ld_sync++) {
4082 raid = MR_LdRaidGet(i, map);
4083 ld_sync->targetId = MR_GetLDTgtId(i, map);
4084 ld_sync->seqNum = raid->seqNum;
4087 dcmd->cmd = MFI_CMD_DCMD;
4088 dcmd->cmd_status = 0xFF;
4089 dcmd->sge_count = 1;
4090 dcmd->flags = MFI_FRAME_DIR_WRITE;
4093 dcmd->data_xfer_len = sc->current_map_sz;
4094 dcmd->mbox.b[0] = num_lds;
4095 dcmd->mbox.b[1] = MRSAS_DCMD_MBOX_PEND_FLAG;
4096 dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO;
4097 dcmd->sgl.sge32[0].phys_addr = map_phys_addr;
4098 dcmd->sgl.sge32[0].length = sc->current_map_sz;
4100 sc->map_update_cmd = cmd;
4101 if (mrsas_issue_dcmd(sc, cmd)) {
4102 device_printf(sc->mrsas_dev,
4103 "Fail to send sync map info command.\n");
4110 * mrsas_get_pd_list: Returns FW's PD list structure input:
4111 * Adapter soft state
4113 * Issues an internal command (DCMD) to get the FW's controller PD list
4114 * structure. This information is mainly used to find out about system
4115 * supported by Firmware.
4118 mrsas_get_pd_list(struct mrsas_softc *sc)
4120 int retcode = 0, pd_index = 0, pd_count = 0, pd_list_size;
4121 u_int8_t do_ocr = 1;
4122 struct mrsas_mfi_cmd *cmd;
4123 struct mrsas_dcmd_frame *dcmd;
4124 struct MR_PD_LIST *pd_list_mem;
4125 struct MR_PD_ADDRESS *pd_addr;
4126 bus_addr_t pd_list_phys_addr = 0;
4127 struct mrsas_tmp_dcmd *tcmd;
4129 cmd = mrsas_get_mfi_cmd(sc);
4131 device_printf(sc->mrsas_dev,
4132 "Cannot alloc for get PD list cmd\n");
4135 dcmd = &cmd->frame->dcmd;
4137 tcmd = malloc(sizeof(struct mrsas_tmp_dcmd), M_MRSAS, M_NOWAIT);
4138 pd_list_size = MRSAS_MAX_PD * sizeof(struct MR_PD_LIST);
4139 if (mrsas_alloc_tmp_dcmd(sc, tcmd, pd_list_size) != SUCCESS) {
4140 device_printf(sc->mrsas_dev,
4141 "Cannot alloc dmamap for get PD list cmd\n");
4142 mrsas_release_mfi_cmd(cmd);
4143 mrsas_free_tmp_dcmd(tcmd);
4144 free(tcmd, M_MRSAS);
4147 pd_list_mem = tcmd->tmp_dcmd_mem;
4148 pd_list_phys_addr = tcmd->tmp_dcmd_phys_addr;
4150 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4152 dcmd->mbox.b[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST;
4153 dcmd->mbox.b[1] = 0;
4154 dcmd->cmd = MFI_CMD_DCMD;
4155 dcmd->cmd_status = 0xFF;
4156 dcmd->sge_count = 1;
4157 dcmd->flags = MFI_FRAME_DIR_READ;
4160 dcmd->data_xfer_len = MRSAS_MAX_PD * sizeof(struct MR_PD_LIST);
4161 dcmd->opcode = MR_DCMD_PD_LIST_QUERY;
4162 dcmd->sgl.sge32[0].phys_addr = pd_list_phys_addr;
4163 dcmd->sgl.sge32[0].length = MRSAS_MAX_PD * sizeof(struct MR_PD_LIST);
4165 if (!sc->mask_interrupts)
4166 retcode = mrsas_issue_blocked_cmd(sc, cmd);
4168 retcode = mrsas_issue_polled(sc, cmd);
4170 if (retcode == ETIMEDOUT)
4173 /* Get the instance PD list */
4174 pd_count = MRSAS_MAX_PD;
4175 pd_addr = pd_list_mem->addr;
4176 if (pd_list_mem->count < pd_count) {
4177 memset(sc->local_pd_list, 0,
4178 MRSAS_MAX_PD * sizeof(struct mrsas_pd_list));
4179 for (pd_index = 0; pd_index < pd_list_mem->count; pd_index++) {
4180 sc->local_pd_list[pd_addr->deviceId].tid = pd_addr->deviceId;
4181 sc->local_pd_list[pd_addr->deviceId].driveType =
4182 pd_addr->scsiDevType;
4183 sc->local_pd_list[pd_addr->deviceId].driveState =
4188 * Use mutext/spinlock if pd_list component size increase more than
4191 memcpy(sc->pd_list, sc->local_pd_list, sizeof(sc->local_pd_list));
4195 mrsas_free_tmp_dcmd(tcmd);
4196 free(tcmd, M_MRSAS);
4199 sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
4201 if (!sc->mask_interrupts)
4202 mrsas_release_mfi_cmd(cmd);
4208 * mrsas_get_ld_list: Returns FW's LD list structure input:
4209 * Adapter soft state
4211 * Issues an internal command (DCMD) to get the FW's controller PD list
4212 * structure. This information is mainly used to find out about supported by
4216 mrsas_get_ld_list(struct mrsas_softc *sc)
4218 int ld_list_size, retcode = 0, ld_index = 0, ids = 0;
4219 u_int8_t do_ocr = 1;
4220 struct mrsas_mfi_cmd *cmd;
4221 struct mrsas_dcmd_frame *dcmd;
4222 struct MR_LD_LIST *ld_list_mem;
4223 bus_addr_t ld_list_phys_addr = 0;
4224 struct mrsas_tmp_dcmd *tcmd;
4226 cmd = mrsas_get_mfi_cmd(sc);
4228 device_printf(sc->mrsas_dev,
4229 "Cannot alloc for get LD list cmd\n");
4232 dcmd = &cmd->frame->dcmd;
4234 tcmd = malloc(sizeof(struct mrsas_tmp_dcmd), M_MRSAS, M_NOWAIT);
4235 ld_list_size = sizeof(struct MR_LD_LIST);
4236 if (mrsas_alloc_tmp_dcmd(sc, tcmd, ld_list_size) != SUCCESS) {
4237 device_printf(sc->mrsas_dev,
4238 "Cannot alloc dmamap for get LD list cmd\n");
4239 mrsas_release_mfi_cmd(cmd);
4240 mrsas_free_tmp_dcmd(tcmd);
4241 free(tcmd, M_MRSAS);
4244 ld_list_mem = tcmd->tmp_dcmd_mem;
4245 ld_list_phys_addr = tcmd->tmp_dcmd_phys_addr;
4247 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4249 if (sc->max256vdSupport)
4250 dcmd->mbox.b[0] = 1;
4252 dcmd->cmd = MFI_CMD_DCMD;
4253 dcmd->cmd_status = 0xFF;
4254 dcmd->sge_count = 1;
4255 dcmd->flags = MFI_FRAME_DIR_READ;
4257 dcmd->data_xfer_len = sizeof(struct MR_LD_LIST);
4258 dcmd->opcode = MR_DCMD_LD_GET_LIST;
4259 dcmd->sgl.sge32[0].phys_addr = ld_list_phys_addr;
4260 dcmd->sgl.sge32[0].length = sizeof(struct MR_LD_LIST);
4263 if (!sc->mask_interrupts)
4264 retcode = mrsas_issue_blocked_cmd(sc, cmd);
4266 retcode = mrsas_issue_polled(sc, cmd);
4268 if (retcode == ETIMEDOUT)
4272 printf("Number of LDs %d\n", ld_list_mem->ldCount);
4275 /* Get the instance LD list */
4276 if (ld_list_mem->ldCount <= sc->fw_supported_vd_count) {
4277 sc->CurLdCount = ld_list_mem->ldCount;
4278 memset(sc->ld_ids, 0xff, MAX_LOGICAL_DRIVES_EXT);
4279 for (ld_index = 0; ld_index < ld_list_mem->ldCount; ld_index++) {
4280 if (ld_list_mem->ldList[ld_index].state != 0) {
4281 ids = ld_list_mem->ldList[ld_index].ref.ld_context.targetId;
4282 sc->ld_ids[ids] = ld_list_mem->ldList[ld_index].ref.ld_context.targetId;
4288 mrsas_free_tmp_dcmd(tcmd);
4289 free(tcmd, M_MRSAS);
4292 sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
4293 if (!sc->mask_interrupts)
4294 mrsas_release_mfi_cmd(cmd);
4300 * mrsas_alloc_tmp_dcmd: Allocates memory for temporary command input:
4301 * Adapter soft state Temp command Size of alloction
4303 * Allocates DMAable memory for a temporary internal command. The allocated
4304 * memory is initialized to all zeros upon successful loading of the dma
4308 mrsas_alloc_tmp_dcmd(struct mrsas_softc *sc,
4309 struct mrsas_tmp_dcmd *tcmd, int size)
4311 if (bus_dma_tag_create(sc->mrsas_parent_tag,
4313 BUS_SPACE_MAXADDR_32BIT,
4321 &tcmd->tmp_dcmd_tag)) {
4322 device_printf(sc->mrsas_dev, "Cannot allocate tmp dcmd tag\n");
4325 if (bus_dmamem_alloc(tcmd->tmp_dcmd_tag, (void **)&tcmd->tmp_dcmd_mem,
4326 BUS_DMA_NOWAIT, &tcmd->tmp_dcmd_dmamap)) {
4327 device_printf(sc->mrsas_dev, "Cannot allocate tmp dcmd mem\n");
4330 if (bus_dmamap_load(tcmd->tmp_dcmd_tag, tcmd->tmp_dcmd_dmamap,
4331 tcmd->tmp_dcmd_mem, size, mrsas_addr_cb,
4332 &tcmd->tmp_dcmd_phys_addr, BUS_DMA_NOWAIT)) {
4333 device_printf(sc->mrsas_dev, "Cannot load tmp dcmd mem\n");
4336 memset(tcmd->tmp_dcmd_mem, 0, size);
4341 * mrsas_free_tmp_dcmd: Free memory for temporary command input:
4342 * temporary dcmd pointer
4344 * Deallocates memory of the temporary command for use in the construction of
4345 * the internal DCMD.
4348 mrsas_free_tmp_dcmd(struct mrsas_tmp_dcmd *tmp)
4350 if (tmp->tmp_dcmd_phys_addr)
4351 bus_dmamap_unload(tmp->tmp_dcmd_tag, tmp->tmp_dcmd_dmamap);
4352 if (tmp->tmp_dcmd_mem != NULL)
4353 bus_dmamem_free(tmp->tmp_dcmd_tag, tmp->tmp_dcmd_mem, tmp->tmp_dcmd_dmamap);
4354 if (tmp->tmp_dcmd_tag != NULL)
4355 bus_dma_tag_destroy(tmp->tmp_dcmd_tag);
4359 * mrsas_issue_blocked_abort_cmd: Aborts previously issued cmd input:
4360 * Adapter soft state Previously issued cmd to be aborted
4362 * This function is used to abort previously issued commands, such as AEN and
4363 * RAID map sync map commands. The abort command is sent as a DCMD internal
4364 * command and subsequently the driver will wait for a return status. The
4365 * max wait time is MRSAS_INTERNAL_CMD_WAIT_TIME seconds.
4368 mrsas_issue_blocked_abort_cmd(struct mrsas_softc *sc,
4369 struct mrsas_mfi_cmd *cmd_to_abort)
4371 struct mrsas_mfi_cmd *cmd;
4372 struct mrsas_abort_frame *abort_fr;
4373 u_int8_t retcode = 0;
4374 unsigned long total_time = 0;
4375 u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME;
4377 cmd = mrsas_get_mfi_cmd(sc);
4379 device_printf(sc->mrsas_dev, "Cannot alloc for abort cmd\n");
4382 abort_fr = &cmd->frame->abort;
4384 /* Prepare and issue the abort frame */
4385 abort_fr->cmd = MFI_CMD_ABORT;
4386 abort_fr->cmd_status = 0xFF;
4387 abort_fr->flags = 0;
4388 abort_fr->abort_context = cmd_to_abort->index;
4389 abort_fr->abort_mfi_phys_addr_lo = cmd_to_abort->frame_phys_addr;
4390 abort_fr->abort_mfi_phys_addr_hi = 0;
4393 cmd->cmd_status = 0xFF;
4395 if (mrsas_issue_dcmd(sc, cmd)) {
4396 device_printf(sc->mrsas_dev, "Fail to send abort command.\n");
4399 /* Wait for this cmd to complete */
4400 sc->chan = (void *)&cmd;
4402 if (cmd->cmd_status == 0xFF) {
4403 tsleep((void *)&sc->chan, 0, "mrsas_sleep", hz);
4407 if (total_time >= max_wait) {
4408 device_printf(sc->mrsas_dev, "Abort cmd timed out after %d sec.\n", max_wait);
4415 mrsas_release_mfi_cmd(cmd);
4420 * mrsas_complete_abort: Completes aborting a command input:
4421 * Adapter soft state Cmd that was issued to abort another cmd
4423 * The mrsas_issue_blocked_abort_cmd() function waits for the command status to
4424 * change after sending the command. This function is called from
4425 * mrsas_complete_mptmfi_passthru() to wake up the sleep thread associated.
4428 mrsas_complete_abort(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
4430 if (cmd->sync_cmd) {
4432 cmd->cmd_status = 0;
4433 sc->chan = (void *)&cmd;
4434 wakeup_one((void *)&sc->chan);
4440 * mrsas_aen_handler: AEN processing callback function from thread context
4441 * input: Adapter soft state
4443 * Asynchronous event handler
4446 mrsas_aen_handler(struct mrsas_softc *sc)
4448 union mrsas_evt_class_locale class_locale;
4451 int error, fail_aen = 0;
4454 printf("invalid instance!\n");
4457 if (sc->remove_in_progress || sc->reset_in_progress) {
4458 device_printf(sc->mrsas_dev, "Returning from %s, line no %d\n",
4459 __func__, __LINE__);
4462 if (sc->evt_detail_mem) {
4463 switch (sc->evt_detail_mem->code) {
4464 case MR_EVT_PD_INSERTED:
4465 fail_aen = mrsas_get_pd_list(sc);
4467 mrsas_bus_scan_sim(sc, sc->sim_1);
4469 goto skip_register_aen;
4471 case MR_EVT_PD_REMOVED:
4472 fail_aen = mrsas_get_pd_list(sc);
4474 mrsas_bus_scan_sim(sc, sc->sim_1);
4476 goto skip_register_aen;
4478 case MR_EVT_LD_OFFLINE:
4479 case MR_EVT_CFG_CLEARED:
4480 case MR_EVT_LD_DELETED:
4481 mrsas_bus_scan_sim(sc, sc->sim_0);
4483 case MR_EVT_LD_CREATED:
4484 fail_aen = mrsas_get_ld_list(sc);
4486 mrsas_bus_scan_sim(sc, sc->sim_0);
4488 goto skip_register_aen;
4490 case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED:
4491 case MR_EVT_FOREIGN_CFG_IMPORTED:
4492 case MR_EVT_LD_STATE_CHANGE:
4495 case MR_EVT_CTRL_PROP_CHANGED:
4496 fail_aen = mrsas_get_ctrl_info(sc);
4498 goto skip_register_aen;
4504 device_printf(sc->mrsas_dev, "invalid evt_detail\n");
4508 fail_aen = mrsas_get_pd_list(sc);
4510 mrsas_dprint(sc, MRSAS_AEN, "scanning ...sim 1\n");
4511 mrsas_bus_scan_sim(sc, sc->sim_1);
4513 goto skip_register_aen;
4515 fail_aen = mrsas_get_ld_list(sc);
4517 mrsas_dprint(sc, MRSAS_AEN, "scanning ...sim 0\n");
4518 mrsas_bus_scan_sim(sc, sc->sim_0);
4520 goto skip_register_aen;
4522 seq_num = sc->evt_detail_mem->seq_num + 1;
4524 /* Register AEN with FW for latest sequence number plus 1 */
4525 class_locale.members.reserved = 0;
4526 class_locale.members.locale = MR_EVT_LOCALE_ALL;
4527 class_locale.members.class = MR_EVT_CLASS_DEBUG;
4529 if (sc->aen_cmd != NULL)
4532 mtx_lock(&sc->aen_lock);
4533 error = mrsas_register_aen(sc, seq_num,
4535 mtx_unlock(&sc->aen_lock);
4538 device_printf(sc->mrsas_dev, "register aen failed error %x\n", error);
4547 * mrsas_complete_aen: Completes AEN command
4548 * input: Adapter soft state
4549 * Cmd that was issued to abort another cmd
4551 * This function will be called from ISR and will continue event processing from
4552 * thread context by enqueuing task in ev_tq (callback function
4553 * "mrsas_aen_handler").
4556 mrsas_complete_aen(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
4559 * Don't signal app if it is just an aborted previously registered
4562 if ((!cmd->abort_aen) && (sc->remove_in_progress == 0)) {
4563 sc->mrsas_aen_triggered = 1;
4564 mtx_lock(&sc->aen_lock);
4565 if (sc->mrsas_poll_waiting) {
4566 sc->mrsas_poll_waiting = 0;
4567 selwakeup(&sc->mrsas_select);
4569 mtx_unlock(&sc->aen_lock);
4574 mrsas_release_mfi_cmd(cmd);
4576 taskqueue_enqueue(sc->ev_tq, &sc->ev_task);
4581 static device_method_t mrsas_methods[] = {
4582 DEVMETHOD(device_probe, mrsas_probe),
4583 DEVMETHOD(device_attach, mrsas_attach),
4584 DEVMETHOD(device_detach, mrsas_detach),
4585 DEVMETHOD(device_suspend, mrsas_suspend),
4586 DEVMETHOD(device_resume, mrsas_resume),
4587 DEVMETHOD(bus_print_child, bus_generic_print_child),
4588 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
4592 static driver_t mrsas_driver = {
4595 sizeof(struct mrsas_softc)
4598 static devclass_t mrsas_devclass;
4600 DRIVER_MODULE(mrsas, pci, mrsas_driver, mrsas_devclass, 0, 0);
4601 MODULE_DEPEND(mrsas, cam, 1, 1, 1);