2 * Copyright (c) 2015, AVAGO Tech. All rights reserved. Author: Marian Choy
3 * Copyright (c) 2014, LSI Corp. All rights reserved. Author: Marian Choy
4 * Support: freebsdraid@avagotech.com
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
10 * 1. Redistributions of source code must retain the above copyright notice,
11 * this list of conditions and the following disclaimer. 2. Redistributions
12 * in binary form must reproduce the above copyright notice, this list of
13 * conditions and the following disclaimer in the documentation and/or other
14 * materials provided with the distribution. 3. Neither the name of the
15 * <ORGANIZATION> nor the names of its contributors may be used to endorse or
16 * promote products derived from this software without specific prior written
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
31 * The views and conclusions contained in the software and documentation are
32 * those of the authors and should not be interpreted as representing
33 * official policies,either expressed or implied, of the FreeBSD Project.
35 * Send feedback to: <megaraidfbsd@avagotech.com> Mail to: AVAGO TECHNOLOGIES 1621
36 * Barber Lane, Milpitas, CA 95035 ATTN: MegaRaid FreeBSD
40 #include <sys/cdefs.h>
41 __FBSDID("$FreeBSD$");
43 #include <dev/mrsas/mrsas.h>
44 #include <dev/mrsas/mrsas_ioctl.h>
47 #include <cam/cam_ccb.h>
49 #include <sys/sysctl.h>
50 #include <sys/types.h>
51 #include <sys/kthread.h>
52 #include <sys/taskqueue.h>
59 static d_open_t mrsas_open;
60 static d_close_t mrsas_close;
61 static d_read_t mrsas_read;
62 static d_write_t mrsas_write;
63 static d_ioctl_t mrsas_ioctl;
64 static d_poll_t mrsas_poll;
66 static struct mrsas_mgmt_info mrsas_mgmt_info;
67 static struct mrsas_ident *mrsas_find_ident(device_t);
68 static int mrsas_setup_msix(struct mrsas_softc *sc);
69 static int mrsas_allocate_msix(struct mrsas_softc *sc);
70 static void mrsas_shutdown_ctlr(struct mrsas_softc *sc, u_int32_t opcode);
71 static void mrsas_flush_cache(struct mrsas_softc *sc);
72 static void mrsas_reset_reply_desc(struct mrsas_softc *sc);
73 static void mrsas_ocr_thread(void *arg);
74 static int mrsas_get_map_info(struct mrsas_softc *sc);
75 static int mrsas_get_ld_map_info(struct mrsas_softc *sc);
76 static int mrsas_sync_map_info(struct mrsas_softc *sc);
77 static int mrsas_get_pd_list(struct mrsas_softc *sc);
78 static int mrsas_get_ld_list(struct mrsas_softc *sc);
79 static int mrsas_setup_irq(struct mrsas_softc *sc);
80 static int mrsas_alloc_mem(struct mrsas_softc *sc);
81 static int mrsas_init_fw(struct mrsas_softc *sc);
82 static int mrsas_setup_raidmap(struct mrsas_softc *sc);
83 static int mrsas_complete_cmd(struct mrsas_softc *sc, u_int32_t MSIxIndex);
84 static int mrsas_clear_intr(struct mrsas_softc *sc);
85 static int mrsas_get_ctrl_info(struct mrsas_softc *sc);
86 static void mrsas_update_ext_vd_details(struct mrsas_softc *sc);
88 mrsas_issue_blocked_abort_cmd(struct mrsas_softc *sc,
89 struct mrsas_mfi_cmd *cmd_to_abort);
90 static struct mrsas_softc *
91 mrsas_get_softc_instance(struct cdev *dev,
92 u_long cmd, caddr_t arg);
93 u_int32_t mrsas_read_reg(struct mrsas_softc *sc, int offset);
95 mrsas_build_mptmfi_passthru(struct mrsas_softc *sc,
96 struct mrsas_mfi_cmd *mfi_cmd);
97 void mrsas_complete_outstanding_ioctls(struct mrsas_softc *sc);
98 int mrsas_transition_to_ready(struct mrsas_softc *sc, int ocr);
99 int mrsas_init_adapter(struct mrsas_softc *sc);
100 int mrsas_alloc_mpt_cmds(struct mrsas_softc *sc);
101 int mrsas_alloc_ioc_cmd(struct mrsas_softc *sc);
102 int mrsas_alloc_ctlr_info_cmd(struct mrsas_softc *sc);
103 int mrsas_ioc_init(struct mrsas_softc *sc);
104 int mrsas_bus_scan(struct mrsas_softc *sc);
105 int mrsas_issue_dcmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
106 int mrsas_issue_polled(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
107 int mrsas_reset_ctrl(struct mrsas_softc *sc);
108 int mrsas_wait_for_outstanding(struct mrsas_softc *sc);
110 mrsas_issue_blocked_cmd(struct mrsas_softc *sc,
111 struct mrsas_mfi_cmd *cmd);
113 mrsas_alloc_tmp_dcmd(struct mrsas_softc *sc, struct mrsas_tmp_dcmd *tcmd,
115 void mrsas_release_mfi_cmd(struct mrsas_mfi_cmd *cmd);
116 void mrsas_wakeup(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
117 void mrsas_complete_aen(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
118 void mrsas_complete_abort(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
119 void mrsas_disable_intr(struct mrsas_softc *sc);
120 void mrsas_enable_intr(struct mrsas_softc *sc);
121 void mrsas_free_ioc_cmd(struct mrsas_softc *sc);
122 void mrsas_free_mem(struct mrsas_softc *sc);
123 void mrsas_free_tmp_dcmd(struct mrsas_tmp_dcmd *tmp);
124 void mrsas_isr(void *arg);
125 void mrsas_teardown_intr(struct mrsas_softc *sc);
126 void mrsas_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error);
127 void mrsas_kill_hba(struct mrsas_softc *sc);
128 void mrsas_aen_handler(struct mrsas_softc *sc);
130 mrsas_write_reg(struct mrsas_softc *sc, int offset,
133 mrsas_fire_cmd(struct mrsas_softc *sc, u_int32_t req_desc_lo,
134 u_int32_t req_desc_hi);
135 void mrsas_free_ctlr_info_cmd(struct mrsas_softc *sc);
137 mrsas_complete_mptmfi_passthru(struct mrsas_softc *sc,
138 struct mrsas_mfi_cmd *cmd, u_int8_t status);
140 mrsas_map_mpt_cmd_status(struct mrsas_mpt_cmd *cmd, u_int8_t status,
142 struct mrsas_mfi_cmd *mrsas_get_mfi_cmd(struct mrsas_softc *sc);
144 MRSAS_REQUEST_DESCRIPTOR_UNION *mrsas_build_mpt_cmd
145 (struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
147 extern int mrsas_cam_attach(struct mrsas_softc *sc);
148 extern void mrsas_cam_detach(struct mrsas_softc *sc);
149 extern void mrsas_cmd_done(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd);
150 extern void mrsas_free_frame(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
151 extern int mrsas_alloc_mfi_cmds(struct mrsas_softc *sc);
152 extern void mrsas_release_mpt_cmd(struct mrsas_mpt_cmd *cmd);
153 extern struct mrsas_mpt_cmd *mrsas_get_mpt_cmd(struct mrsas_softc *sc);
154 extern int mrsas_passthru(struct mrsas_softc *sc, void *arg, u_long ioctlCmd);
155 extern uint8_t MR_ValidateMapInfo(struct mrsas_softc *sc);
156 extern u_int16_t MR_GetLDTgtId(u_int32_t ld, MR_DRV_RAID_MAP_ALL * map);
157 extern MR_LD_RAID *MR_LdRaidGet(u_int32_t ld, MR_DRV_RAID_MAP_ALL * map);
158 extern void mrsas_xpt_freeze(struct mrsas_softc *sc);
159 extern void mrsas_xpt_release(struct mrsas_softc *sc);
160 extern MRSAS_REQUEST_DESCRIPTOR_UNION *
161 mrsas_get_request_desc(struct mrsas_softc *sc,
163 extern int mrsas_bus_scan_sim(struct mrsas_softc *sc, struct cam_sim *sim);
164 static int mrsas_alloc_evt_log_info_cmd(struct mrsas_softc *sc);
165 static void mrsas_free_evt_log_info_cmd(struct mrsas_softc *sc);
167 SYSCTL_NODE(_hw, OID_AUTO, mrsas, CTLFLAG_RD, 0, "MRSAS Driver Parameters");
170 * PCI device struct and table
173 typedef struct mrsas_ident {
181 MRSAS_CTLR_ID device_table[] = {
182 {0x1000, MRSAS_TBOLT, 0xffff, 0xffff, "AVAGO Thunderbolt SAS Controller"},
183 {0x1000, MRSAS_INVADER, 0xffff, 0xffff, "AVAGO Invader SAS Controller"},
184 {0x1000, MRSAS_FURY, 0xffff, 0xffff, "AVAGO Fury SAS Controller"},
189 * Character device entry points
192 static struct cdevsw mrsas_cdevsw = {
193 .d_version = D_VERSION,
194 .d_open = mrsas_open,
195 .d_close = mrsas_close,
196 .d_read = mrsas_read,
197 .d_write = mrsas_write,
198 .d_ioctl = mrsas_ioctl,
199 .d_poll = mrsas_poll,
203 MALLOC_DEFINE(M_MRSAS, "mrsasbuf", "Buffers for the MRSAS driver");
206 * In the cdevsw routines, we find our softc by using the si_drv1 member of
207 * struct cdev. We set this variable to point to our softc in our attach
208 * routine when we create the /dev entry.
211 mrsas_open(struct cdev *dev, int oflags, int devtype, d_thread_t *td)
213 struct mrsas_softc *sc;
220 mrsas_close(struct cdev *dev, int fflag, int devtype, d_thread_t *td)
222 struct mrsas_softc *sc;
229 mrsas_read(struct cdev *dev, struct uio *uio, int ioflag)
231 struct mrsas_softc *sc;
237 mrsas_write(struct cdev *dev, struct uio *uio, int ioflag)
239 struct mrsas_softc *sc;
246 * Register Read/Write Functions
250 mrsas_write_reg(struct mrsas_softc *sc, int offset,
253 bus_space_tag_t bus_tag = sc->bus_tag;
254 bus_space_handle_t bus_handle = sc->bus_handle;
256 bus_space_write_4(bus_tag, bus_handle, offset, value);
260 mrsas_read_reg(struct mrsas_softc *sc, int offset)
262 bus_space_tag_t bus_tag = sc->bus_tag;
263 bus_space_handle_t bus_handle = sc->bus_handle;
265 return ((u_int32_t)bus_space_read_4(bus_tag, bus_handle, offset));
270 * Interrupt Disable/Enable/Clear Functions
274 mrsas_disable_intr(struct mrsas_softc *sc)
276 u_int32_t mask = 0xFFFFFFFF;
279 sc->mask_interrupts = 1;
280 mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask), mask);
281 /* Dummy read to force pci flush */
282 status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask));
286 mrsas_enable_intr(struct mrsas_softc *sc)
288 u_int32_t mask = MFI_FUSION_ENABLE_INTERRUPT_MASK;
291 sc->mask_interrupts = 0;
292 mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status), ~0);
293 status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status));
295 mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask), ~mask);
296 status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask));
300 mrsas_clear_intr(struct mrsas_softc *sc)
302 u_int32_t status, fw_status, fw_state;
304 /* Read received interrupt */
305 status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status));
308 * If FW state change interrupt is received, write to it again to
311 if (status & MRSAS_FW_STATE_CHNG_INTERRUPT) {
312 fw_status = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
313 outbound_scratch_pad));
314 fw_state = fw_status & MFI_STATE_MASK;
315 if (fw_state == MFI_STATE_FAULT) {
316 device_printf(sc->mrsas_dev, "FW is in FAULT state!\n");
317 if (sc->ocr_thread_active)
318 wakeup(&sc->ocr_chan);
320 mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status), status);
321 mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status));
324 /* Not our interrupt, so just return */
325 if (!(status & MFI_FUSION_ENABLE_INTERRUPT_MASK))
328 /* We got a reply interrupt */
333 * PCI Support Functions
336 static struct mrsas_ident *
337 mrsas_find_ident(device_t dev)
339 struct mrsas_ident *pci_device;
341 for (pci_device = device_table; pci_device->vendor != 0; pci_device++) {
342 if ((pci_device->vendor == pci_get_vendor(dev)) &&
343 (pci_device->device == pci_get_device(dev)) &&
344 ((pci_device->subvendor == pci_get_subvendor(dev)) ||
345 (pci_device->subvendor == 0xffff)) &&
346 ((pci_device->subdevice == pci_get_subdevice(dev)) ||
347 (pci_device->subdevice == 0xffff)))
354 mrsas_probe(device_t dev)
356 static u_int8_t first_ctrl = 1;
357 struct mrsas_ident *id;
359 if ((id = mrsas_find_ident(dev)) != NULL) {
361 printf("AVAGO MegaRAID SAS FreeBSD mrsas driver version: %s\n",
365 device_set_desc(dev, id->desc);
366 /* between BUS_PROBE_DEFAULT and BUS_PROBE_LOW_PRIORITY */
373 * mrsas_setup_sysctl: setup sysctl values for mrsas
374 * input: Adapter instance soft state
376 * Setup sysctl entries for mrsas driver.
379 mrsas_setup_sysctl(struct mrsas_softc *sc)
381 struct sysctl_ctx_list *sysctl_ctx = NULL;
382 struct sysctl_oid *sysctl_tree = NULL;
383 char tmpstr[80], tmpstr2[80];
386 * Setup the sysctl variable so the user can change the debug level
389 snprintf(tmpstr, sizeof(tmpstr), "MRSAS controller %d",
390 device_get_unit(sc->mrsas_dev));
391 snprintf(tmpstr2, sizeof(tmpstr2), "%d", device_get_unit(sc->mrsas_dev));
393 sysctl_ctx = device_get_sysctl_ctx(sc->mrsas_dev);
394 if (sysctl_ctx != NULL)
395 sysctl_tree = device_get_sysctl_tree(sc->mrsas_dev);
397 if (sysctl_tree == NULL) {
398 sysctl_ctx_init(&sc->sysctl_ctx);
399 sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx,
400 SYSCTL_STATIC_CHILDREN(_hw_mrsas), OID_AUTO, tmpstr2,
401 CTLFLAG_RD, 0, tmpstr);
402 if (sc->sysctl_tree == NULL)
404 sysctl_ctx = &sc->sysctl_ctx;
405 sysctl_tree = sc->sysctl_tree;
407 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
408 OID_AUTO, "disable_ocr", CTLFLAG_RW, &sc->disableOnlineCtrlReset, 0,
409 "Disable the use of OCR");
411 SYSCTL_ADD_STRING(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
412 OID_AUTO, "driver_version", CTLFLAG_RD, MRSAS_VERSION,
413 strlen(MRSAS_VERSION), "driver version");
415 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
416 OID_AUTO, "reset_count", CTLFLAG_RD,
417 &sc->reset_count, 0, "number of ocr from start of the day");
419 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
420 OID_AUTO, "fw_outstanding", CTLFLAG_RD,
421 &sc->fw_outstanding.val_rdonly, 0, "FW outstanding commands");
423 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
424 OID_AUTO, "io_cmds_highwater", CTLFLAG_RD,
425 &sc->io_cmds_highwater, 0, "Max FW outstanding commands");
427 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
428 OID_AUTO, "mrsas_debug", CTLFLAG_RW, &sc->mrsas_debug, 0,
429 "Driver debug level");
431 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
432 OID_AUTO, "mrsas_io_timeout", CTLFLAG_RW, &sc->mrsas_io_timeout,
433 0, "Driver IO timeout value in mili-second.");
435 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
436 OID_AUTO, "mrsas_fw_fault_check_delay", CTLFLAG_RW,
437 &sc->mrsas_fw_fault_check_delay,
438 0, "FW fault check thread delay in seconds. <default is 1 sec>");
440 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
441 OID_AUTO, "reset_in_progress", CTLFLAG_RD,
442 &sc->reset_in_progress, 0, "ocr in progress status");
447 * mrsas_get_tunables: get tunable parameters.
448 * input: Adapter instance soft state
450 * Get tunable parameters. This will help to debug driver at boot time.
453 mrsas_get_tunables(struct mrsas_softc *sc)
457 /* XXX default to some debugging for now */
458 sc->mrsas_debug = MRSAS_FAULT;
459 sc->mrsas_io_timeout = MRSAS_IO_TIMEOUT;
460 sc->mrsas_fw_fault_check_delay = 1;
462 sc->reset_in_progress = 0;
465 * Grab the global variables.
467 TUNABLE_INT_FETCH("hw.mrsas.debug_level", &sc->mrsas_debug);
470 * Grab the global variables.
472 TUNABLE_INT_FETCH("hw.mrsas.lb_pending_cmds", &sc->lb_pending_cmds);
474 /* Grab the unit-instance variables */
475 snprintf(tmpstr, sizeof(tmpstr), "dev.mrsas.%d.debug_level",
476 device_get_unit(sc->mrsas_dev));
477 TUNABLE_INT_FETCH(tmpstr, &sc->mrsas_debug);
481 * mrsas_alloc_evt_log_info cmd: Allocates memory to get event log information.
482 * Used to get sequence number at driver load time.
483 * input: Adapter soft state
485 * Allocates DMAable memory for the event log info internal command.
488 mrsas_alloc_evt_log_info_cmd(struct mrsas_softc *sc)
492 /* Allocate get event log info command */
493 el_info_size = sizeof(struct mrsas_evt_log_info);
494 if (bus_dma_tag_create(sc->mrsas_parent_tag,
496 BUS_SPACE_MAXADDR_32BIT,
505 device_printf(sc->mrsas_dev, "Cannot allocate event log info tag\n");
508 if (bus_dmamem_alloc(sc->el_info_tag, (void **)&sc->el_info_mem,
509 BUS_DMA_NOWAIT, &sc->el_info_dmamap)) {
510 device_printf(sc->mrsas_dev, "Cannot allocate event log info cmd mem\n");
513 if (bus_dmamap_load(sc->el_info_tag, sc->el_info_dmamap,
514 sc->el_info_mem, el_info_size, mrsas_addr_cb,
515 &sc->el_info_phys_addr, BUS_DMA_NOWAIT)) {
516 device_printf(sc->mrsas_dev, "Cannot load event log info cmd mem\n");
519 memset(sc->el_info_mem, 0, el_info_size);
524 * mrsas_free_evt_info_cmd: Free memory for Event log info command
525 * input: Adapter soft state
527 * Deallocates memory for the event log info internal command.
530 mrsas_free_evt_log_info_cmd(struct mrsas_softc *sc)
532 if (sc->el_info_phys_addr)
533 bus_dmamap_unload(sc->el_info_tag, sc->el_info_dmamap);
534 if (sc->el_info_mem != NULL)
535 bus_dmamem_free(sc->el_info_tag, sc->el_info_mem, sc->el_info_dmamap);
536 if (sc->el_info_tag != NULL)
537 bus_dma_tag_destroy(sc->el_info_tag);
541 * mrsas_get_seq_num: Get latest event sequence number
542 * @sc: Adapter soft state
543 * @eli: Firmware event log sequence number information.
545 * Firmware maintains a log of all events in a non-volatile area.
546 * Driver get the sequence number using DCMD
547 * "MR_DCMD_CTRL_EVENT_GET_INFO" at driver load time.
551 mrsas_get_seq_num(struct mrsas_softc *sc,
552 struct mrsas_evt_log_info *eli)
554 struct mrsas_mfi_cmd *cmd;
555 struct mrsas_dcmd_frame *dcmd;
557 cmd = mrsas_get_mfi_cmd(sc);
560 device_printf(sc->mrsas_dev, "Failed to get a free cmd\n");
563 dcmd = &cmd->frame->dcmd;
565 if (mrsas_alloc_evt_log_info_cmd(sc) != SUCCESS) {
566 device_printf(sc->mrsas_dev, "Cannot allocate evt log info cmd\n");
567 mrsas_release_mfi_cmd(cmd);
570 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
572 dcmd->cmd = MFI_CMD_DCMD;
573 dcmd->cmd_status = 0x0;
575 dcmd->flags = MFI_FRAME_DIR_READ;
578 dcmd->data_xfer_len = sizeof(struct mrsas_evt_log_info);
579 dcmd->opcode = MR_DCMD_CTRL_EVENT_GET_INFO;
580 dcmd->sgl.sge32[0].phys_addr = sc->el_info_phys_addr;
581 dcmd->sgl.sge32[0].length = sizeof(struct mrsas_evt_log_info);
583 mrsas_issue_blocked_cmd(sc, cmd);
586 * Copy the data back into callers buffer
588 memcpy(eli, sc->el_info_mem, sizeof(struct mrsas_evt_log_info));
589 mrsas_free_evt_log_info_cmd(sc);
590 mrsas_release_mfi_cmd(cmd);
597 * mrsas_register_aen: Register for asynchronous event notification
598 * @sc: Adapter soft state
599 * @seq_num: Starting sequence number
600 * @class_locale: Class of the event
602 * This function subscribes for events beyond the @seq_num
603 * and type @class_locale.
607 mrsas_register_aen(struct mrsas_softc *sc, u_int32_t seq_num,
608 u_int32_t class_locale_word)
611 struct mrsas_mfi_cmd *cmd;
612 struct mrsas_dcmd_frame *dcmd;
613 union mrsas_evt_class_locale curr_aen;
614 union mrsas_evt_class_locale prev_aen;
617 * If there an AEN pending already (aen_cmd), check if the
618 * class_locale of that pending AEN is inclusive of the new AEN
619 * request we currently have. If it is, then we don't have to do
620 * anything. In other words, whichever events the current AEN request
621 * is subscribing to, have already been subscribed to. If the old_cmd
622 * is _not_ inclusive, then we have to abort that command, form a
623 * class_locale that is superset of both old and current and re-issue
627 curr_aen.word = class_locale_word;
631 prev_aen.word = sc->aen_cmd->frame->dcmd.mbox.w[1];
634 * A class whose enum value is smaller is inclusive of all
635 * higher values. If a PROGRESS (= -1) was previously
636 * registered, then a new registration requests for higher
637 * classes need not be sent to FW. They are automatically
638 * included. Locale numbers don't have such hierarchy. They
641 if ((prev_aen.members.class <= curr_aen.members.class) &&
642 !((prev_aen.members.locale & curr_aen.members.locale) ^
643 curr_aen.members.locale)) {
645 * Previously issued event registration includes
646 * current request. Nothing to do.
650 curr_aen.members.locale |= prev_aen.members.locale;
652 if (prev_aen.members.class < curr_aen.members.class)
653 curr_aen.members.class = prev_aen.members.class;
655 sc->aen_cmd->abort_aen = 1;
656 ret_val = mrsas_issue_blocked_abort_cmd(sc,
660 printf("mrsas: Failed to abort "
661 "previous AEN command\n");
666 cmd = mrsas_get_mfi_cmd(sc);
671 dcmd = &cmd->frame->dcmd;
673 memset(sc->evt_detail_mem, 0, sizeof(struct mrsas_evt_detail));
676 * Prepare DCMD for aen registration
678 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
680 dcmd->cmd = MFI_CMD_DCMD;
681 dcmd->cmd_status = 0x0;
683 dcmd->flags = MFI_FRAME_DIR_READ;
686 dcmd->data_xfer_len = sizeof(struct mrsas_evt_detail);
687 dcmd->opcode = MR_DCMD_CTRL_EVENT_WAIT;
688 dcmd->mbox.w[0] = seq_num;
689 sc->last_seq_num = seq_num;
690 dcmd->mbox.w[1] = curr_aen.word;
691 dcmd->sgl.sge32[0].phys_addr = (u_int32_t)sc->evt_detail_phys_addr;
692 dcmd->sgl.sge32[0].length = sizeof(struct mrsas_evt_detail);
694 if (sc->aen_cmd != NULL) {
695 mrsas_release_mfi_cmd(cmd);
699 * Store reference to the cmd used to register for AEN. When an
700 * application wants us to register for AEN, we have to abort this
701 * cmd and re-register with a new EVENT LOCALE supplied by that app
706 * Issue the aen registration frame
708 if (mrsas_issue_dcmd(sc, cmd)) {
709 device_printf(sc->mrsas_dev, "Cannot issue AEN DCMD command.\n");
716 * mrsas_start_aen: Subscribes to AEN during driver load time
717 * @instance: Adapter soft state
720 mrsas_start_aen(struct mrsas_softc *sc)
722 struct mrsas_evt_log_info eli;
723 union mrsas_evt_class_locale class_locale;
726 /* Get the latest sequence number from FW */
728 memset(&eli, 0, sizeof(eli));
730 if (mrsas_get_seq_num(sc, &eli))
733 /* Register AEN with FW for latest sequence number plus 1 */
734 class_locale.members.reserved = 0;
735 class_locale.members.locale = MR_EVT_LOCALE_ALL;
736 class_locale.members.class = MR_EVT_CLASS_DEBUG;
738 return mrsas_register_aen(sc, eli.newest_seq_num + 1,
744 * mrsas_setup_msix: Allocate MSI-x vectors
745 * @sc: adapter soft state
748 mrsas_setup_msix(struct mrsas_softc *sc)
752 for (i = 0; i < sc->msix_vectors; i++) {
753 sc->irq_context[i].sc = sc;
754 sc->irq_context[i].MSIxIndex = i;
755 sc->irq_id[i] = i + 1;
756 sc->mrsas_irq[i] = bus_alloc_resource_any
757 (sc->mrsas_dev, SYS_RES_IRQ, &sc->irq_id[i]
759 if (sc->mrsas_irq[i] == NULL) {
760 device_printf(sc->mrsas_dev, "Can't allocate MSI-x\n");
761 goto irq_alloc_failed;
763 if (bus_setup_intr(sc->mrsas_dev,
765 INTR_MPSAFE | INTR_TYPE_CAM,
766 NULL, mrsas_isr, &sc->irq_context[i],
767 &sc->intr_handle[i])) {
768 device_printf(sc->mrsas_dev,
769 "Cannot set up MSI-x interrupt handler\n");
770 goto irq_alloc_failed;
776 mrsas_teardown_intr(sc);
781 * mrsas_allocate_msix: Setup MSI-x vectors
782 * @sc: adapter soft state
785 mrsas_allocate_msix(struct mrsas_softc *sc)
787 if (pci_alloc_msix(sc->mrsas_dev, &sc->msix_vectors) == 0) {
788 device_printf(sc->mrsas_dev, "Using MSI-X with %d number"
789 " of vectors\n", sc->msix_vectors);
791 device_printf(sc->mrsas_dev, "MSI-x setup failed\n");
792 goto irq_alloc_failed;
797 mrsas_teardown_intr(sc);
802 * mrsas_attach: PCI entry point
803 * input: pointer to device struct
805 * Performs setup of PCI and registers, initializes mutexes and linked lists,
806 * registers interrupts and CAM, and initializes the adapter/controller to
810 mrsas_attach(device_t dev)
812 struct mrsas_softc *sc = device_get_softc(dev);
813 uint32_t cmd, bar, error;
815 /* Look up our softc and initialize its fields. */
817 sc->device_id = pci_get_device(dev);
819 mrsas_get_tunables(sc);
822 * Set up PCI and registers
824 cmd = pci_read_config(dev, PCIR_COMMAND, 2);
825 if ((cmd & PCIM_CMD_PORTEN) == 0) {
828 /* Force the busmaster enable bit on. */
829 cmd |= PCIM_CMD_BUSMASTEREN;
830 pci_write_config(dev, PCIR_COMMAND, cmd, 2);
832 bar = pci_read_config(dev, MRSAS_PCI_BAR1, 4);
834 sc->reg_res_id = MRSAS_PCI_BAR1;/* BAR1 offset */
835 if ((sc->reg_res = bus_alloc_resource(dev, SYS_RES_MEMORY,
836 &(sc->reg_res_id), 0, ~0, 1, RF_ACTIVE))
838 device_printf(dev, "Cannot allocate PCI registers\n");
841 sc->bus_tag = rman_get_bustag(sc->reg_res);
842 sc->bus_handle = rman_get_bushandle(sc->reg_res);
844 /* Intialize mutexes */
845 mtx_init(&sc->sim_lock, "mrsas_sim_lock", NULL, MTX_DEF);
846 mtx_init(&sc->pci_lock, "mrsas_pci_lock", NULL, MTX_DEF);
847 mtx_init(&sc->io_lock, "mrsas_io_lock", NULL, MTX_DEF);
848 mtx_init(&sc->aen_lock, "mrsas_aen_lock", NULL, MTX_DEF);
849 mtx_init(&sc->ioctl_lock, "mrsas_ioctl_lock", NULL, MTX_SPIN);
850 mtx_init(&sc->mpt_cmd_pool_lock, "mrsas_mpt_cmd_pool_lock", NULL, MTX_DEF);
851 mtx_init(&sc->mfi_cmd_pool_lock, "mrsas_mfi_cmd_pool_lock", NULL, MTX_DEF);
852 mtx_init(&sc->raidmap_lock, "mrsas_raidmap_lock", NULL, MTX_DEF);
855 * Intialize a counting Semaphore to take care no. of concurrent
858 sema_init(&sc->ioctl_count_sema, MRSAS_MAX_MFI_CMDS - 5, IOCTL_SEMA_DESCRIPTION);
860 /* Intialize linked list */
861 TAILQ_INIT(&sc->mrsas_mpt_cmd_list_head);
862 TAILQ_INIT(&sc->mrsas_mfi_cmd_list_head);
864 mrsas_atomic_set(&sc->fw_outstanding, 0);
866 sc->io_cmds_highwater = 0;
868 /* Create a /dev entry for this device. */
869 sc->mrsas_cdev = make_dev(&mrsas_cdevsw, device_get_unit(dev), UID_ROOT,
870 GID_OPERATOR, (S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP), "mrsas%u",
871 device_get_unit(dev));
872 if (device_get_unit(dev) == 0)
873 make_dev_alias(sc->mrsas_cdev, "megaraid_sas_ioctl_node");
875 sc->mrsas_cdev->si_drv1 = sc;
877 sc->adprecovery = MRSAS_HBA_OPERATIONAL;
878 sc->UnevenSpanSupport = 0;
882 /* Initialize Firmware */
883 if (mrsas_init_fw(sc) != SUCCESS) {
886 /* Register SCSI mid-layer */
887 if ((mrsas_cam_attach(sc) != SUCCESS)) {
888 goto attach_fail_cam;
891 if (mrsas_setup_irq(sc) != SUCCESS) {
892 goto attach_fail_irq;
894 /* Enable Interrupts */
895 mrsas_enable_intr(sc);
897 error = mrsas_kproc_create(mrsas_ocr_thread, sc,
898 &sc->ocr_thread, 0, 0, "mrsas_ocr%d",
899 device_get_unit(sc->mrsas_dev));
901 printf("Error %d starting rescan thread\n", error);
902 goto attach_fail_irq;
904 mrsas_setup_sysctl(sc);
906 /* Initiate AEN (Asynchronous Event Notification) */
908 if (mrsas_start_aen(sc)) {
909 printf("Error: start aen failed\n");
913 * Add this controller to mrsas_mgmt_info structure so that it can be
914 * exported to management applications
916 if (device_get_unit(dev) == 0)
917 memset(&mrsas_mgmt_info, 0, sizeof(mrsas_mgmt_info));
919 mrsas_mgmt_info.count++;
920 mrsas_mgmt_info.sc_ptr[mrsas_mgmt_info.max_index] = sc;
921 mrsas_mgmt_info.max_index++;
927 mrsas_teardown_intr(sc);
929 mrsas_cam_detach(sc);
931 /* if MSIX vector is allocated and FW Init FAILED then release MSIX */
932 if (sc->msix_enable == 1)
933 pci_release_msi(sc->mrsas_dev);
935 mtx_destroy(&sc->sim_lock);
936 mtx_destroy(&sc->aen_lock);
937 mtx_destroy(&sc->pci_lock);
938 mtx_destroy(&sc->io_lock);
939 mtx_destroy(&sc->ioctl_lock);
940 mtx_destroy(&sc->mpt_cmd_pool_lock);
941 mtx_destroy(&sc->mfi_cmd_pool_lock);
942 mtx_destroy(&sc->raidmap_lock);
943 /* Destroy the counting semaphore created for Ioctl */
944 sema_destroy(&sc->ioctl_count_sema);
946 destroy_dev(sc->mrsas_cdev);
948 bus_release_resource(sc->mrsas_dev, SYS_RES_MEMORY,
949 sc->reg_res_id, sc->reg_res);
955 * mrsas_detach: De-allocates and teardown resources
956 * input: pointer to device struct
958 * This function is the entry point for device disconnect and detach.
959 * It performs memory de-allocations, shutdown of the controller and various
960 * teardown and destroy resource functions.
963 mrsas_detach(device_t dev)
965 struct mrsas_softc *sc;
968 sc = device_get_softc(dev);
969 sc->remove_in_progress = 1;
971 /* Destroy the character device so no other IOCTL will be handled */
972 destroy_dev(sc->mrsas_cdev);
975 * Take the instance off the instance array. Note that we will not
976 * decrement the max_index. We let this array be sparse array
978 for (i = 0; i < mrsas_mgmt_info.max_index; i++) {
979 if (mrsas_mgmt_info.sc_ptr[i] == sc) {
980 mrsas_mgmt_info.count--;
981 mrsas_mgmt_info.sc_ptr[i] = NULL;
986 if (sc->ocr_thread_active)
987 wakeup(&sc->ocr_chan);
988 while (sc->reset_in_progress) {
990 if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
991 mrsas_dprint(sc, MRSAS_INFO,
992 "[%2d]waiting for ocr to be finished\n", i);
994 pause("mr_shutdown", hz);
997 while (sc->ocr_thread_active) {
999 if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
1000 mrsas_dprint(sc, MRSAS_INFO,
1002 "mrsas_ocr thread to quit ocr %d\n", i,
1003 sc->ocr_thread_active);
1005 pause("mr_shutdown", hz);
1007 mrsas_flush_cache(sc);
1008 mrsas_shutdown_ctlr(sc, MR_DCMD_CTRL_SHUTDOWN);
1009 mrsas_disable_intr(sc);
1010 mrsas_cam_detach(sc);
1011 mrsas_teardown_intr(sc);
1013 mtx_destroy(&sc->sim_lock);
1014 mtx_destroy(&sc->aen_lock);
1015 mtx_destroy(&sc->pci_lock);
1016 mtx_destroy(&sc->io_lock);
1017 mtx_destroy(&sc->ioctl_lock);
1018 mtx_destroy(&sc->mpt_cmd_pool_lock);
1019 mtx_destroy(&sc->mfi_cmd_pool_lock);
1020 mtx_destroy(&sc->raidmap_lock);
1022 /* Wait for all the semaphores to be released */
1023 while (sema_value(&sc->ioctl_count_sema) != (MRSAS_MAX_MFI_CMDS - 5))
1024 pause("mr_shutdown", hz);
1026 /* Destroy the counting semaphore created for Ioctl */
1027 sema_destroy(&sc->ioctl_count_sema);
1030 bus_release_resource(sc->mrsas_dev,
1031 SYS_RES_MEMORY, sc->reg_res_id, sc->reg_res);
1033 if (sc->sysctl_tree != NULL)
1034 sysctl_ctx_free(&sc->sysctl_ctx);
1040 * mrsas_free_mem: Frees allocated memory
1041 * input: Adapter instance soft state
1043 * This function is called from mrsas_detach() to free previously allocated
1047 mrsas_free_mem(struct mrsas_softc *sc)
1051 struct mrsas_mfi_cmd *mfi_cmd;
1052 struct mrsas_mpt_cmd *mpt_cmd;
1055 * Free RAID map memory
1057 for (i = 0; i < 2; i++) {
1058 if (sc->raidmap_phys_addr[i])
1059 bus_dmamap_unload(sc->raidmap_tag[i], sc->raidmap_dmamap[i]);
1060 if (sc->raidmap_mem[i] != NULL)
1061 bus_dmamem_free(sc->raidmap_tag[i], sc->raidmap_mem[i], sc->raidmap_dmamap[i]);
1062 if (sc->raidmap_tag[i] != NULL)
1063 bus_dma_tag_destroy(sc->raidmap_tag[i]);
1065 if (sc->ld_drv_map[i] != NULL)
1066 free(sc->ld_drv_map[i], M_MRSAS);
1070 * Free version buffer memroy
1072 if (sc->verbuf_phys_addr)
1073 bus_dmamap_unload(sc->verbuf_tag, sc->verbuf_dmamap);
1074 if (sc->verbuf_mem != NULL)
1075 bus_dmamem_free(sc->verbuf_tag, sc->verbuf_mem, sc->verbuf_dmamap);
1076 if (sc->verbuf_tag != NULL)
1077 bus_dma_tag_destroy(sc->verbuf_tag);
1081 * Free sense buffer memory
1083 if (sc->sense_phys_addr)
1084 bus_dmamap_unload(sc->sense_tag, sc->sense_dmamap);
1085 if (sc->sense_mem != NULL)
1086 bus_dmamem_free(sc->sense_tag, sc->sense_mem, sc->sense_dmamap);
1087 if (sc->sense_tag != NULL)
1088 bus_dma_tag_destroy(sc->sense_tag);
1091 * Free chain frame memory
1093 if (sc->chain_frame_phys_addr)
1094 bus_dmamap_unload(sc->chain_frame_tag, sc->chain_frame_dmamap);
1095 if (sc->chain_frame_mem != NULL)
1096 bus_dmamem_free(sc->chain_frame_tag, sc->chain_frame_mem, sc->chain_frame_dmamap);
1097 if (sc->chain_frame_tag != NULL)
1098 bus_dma_tag_destroy(sc->chain_frame_tag);
1101 * Free IO Request memory
1103 if (sc->io_request_phys_addr)
1104 bus_dmamap_unload(sc->io_request_tag, sc->io_request_dmamap);
1105 if (sc->io_request_mem != NULL)
1106 bus_dmamem_free(sc->io_request_tag, sc->io_request_mem, sc->io_request_dmamap);
1107 if (sc->io_request_tag != NULL)
1108 bus_dma_tag_destroy(sc->io_request_tag);
1111 * Free Reply Descriptor memory
1113 if (sc->reply_desc_phys_addr)
1114 bus_dmamap_unload(sc->reply_desc_tag, sc->reply_desc_dmamap);
1115 if (sc->reply_desc_mem != NULL)
1116 bus_dmamem_free(sc->reply_desc_tag, sc->reply_desc_mem, sc->reply_desc_dmamap);
1117 if (sc->reply_desc_tag != NULL)
1118 bus_dma_tag_destroy(sc->reply_desc_tag);
1121 * Free event detail memory
1123 if (sc->evt_detail_phys_addr)
1124 bus_dmamap_unload(sc->evt_detail_tag, sc->evt_detail_dmamap);
1125 if (sc->evt_detail_mem != NULL)
1126 bus_dmamem_free(sc->evt_detail_tag, sc->evt_detail_mem, sc->evt_detail_dmamap);
1127 if (sc->evt_detail_tag != NULL)
1128 bus_dma_tag_destroy(sc->evt_detail_tag);
1133 if (sc->mfi_cmd_list) {
1134 for (i = 0; i < MRSAS_MAX_MFI_CMDS; i++) {
1135 mfi_cmd = sc->mfi_cmd_list[i];
1136 mrsas_free_frame(sc, mfi_cmd);
1139 if (sc->mficmd_frame_tag != NULL)
1140 bus_dma_tag_destroy(sc->mficmd_frame_tag);
1143 * Free MPT internal command list
1145 max_cmd = sc->max_fw_cmds;
1146 if (sc->mpt_cmd_list) {
1147 for (i = 0; i < max_cmd; i++) {
1148 mpt_cmd = sc->mpt_cmd_list[i];
1149 bus_dmamap_destroy(sc->data_tag, mpt_cmd->data_dmamap);
1150 free(sc->mpt_cmd_list[i], M_MRSAS);
1152 free(sc->mpt_cmd_list, M_MRSAS);
1153 sc->mpt_cmd_list = NULL;
1156 * Free MFI internal command list
1159 if (sc->mfi_cmd_list) {
1160 for (i = 0; i < MRSAS_MAX_MFI_CMDS; i++) {
1161 free(sc->mfi_cmd_list[i], M_MRSAS);
1163 free(sc->mfi_cmd_list, M_MRSAS);
1164 sc->mfi_cmd_list = NULL;
1167 * Free request descriptor memory
1169 free(sc->req_desc, M_MRSAS);
1170 sc->req_desc = NULL;
1173 * Destroy parent tag
1175 if (sc->mrsas_parent_tag != NULL)
1176 bus_dma_tag_destroy(sc->mrsas_parent_tag);
1179 * Free ctrl_info memory
1181 if (sc->ctrl_info != NULL)
1182 free(sc->ctrl_info, M_MRSAS);
1186 * mrsas_teardown_intr: Teardown interrupt
1187 * input: Adapter instance soft state
1189 * This function is called from mrsas_detach() to teardown and release bus
1190 * interrupt resourse.
1193 mrsas_teardown_intr(struct mrsas_softc *sc)
1197 if (!sc->msix_enable) {
1198 if (sc->intr_handle[0])
1199 bus_teardown_intr(sc->mrsas_dev, sc->mrsas_irq[0], sc->intr_handle[0]);
1200 if (sc->mrsas_irq[0] != NULL)
1201 bus_release_resource(sc->mrsas_dev, SYS_RES_IRQ,
1202 sc->irq_id[0], sc->mrsas_irq[0]);
1203 sc->intr_handle[0] = NULL;
1205 for (i = 0; i < sc->msix_vectors; i++) {
1206 if (sc->intr_handle[i])
1207 bus_teardown_intr(sc->mrsas_dev, sc->mrsas_irq[i],
1208 sc->intr_handle[i]);
1210 if (sc->mrsas_irq[i] != NULL)
1211 bus_release_resource(sc->mrsas_dev, SYS_RES_IRQ,
1212 sc->irq_id[i], sc->mrsas_irq[i]);
1214 sc->intr_handle[i] = NULL;
1216 pci_release_msi(sc->mrsas_dev);
1222 * mrsas_suspend: Suspend entry point
1223 * input: Device struct pointer
1225 * This function is the entry point for system suspend from the OS.
1228 mrsas_suspend(device_t dev)
1230 struct mrsas_softc *sc;
1232 sc = device_get_softc(dev);
1237 * mrsas_resume: Resume entry point
1238 * input: Device struct pointer
1240 * This function is the entry point for system resume from the OS.
1243 mrsas_resume(device_t dev)
1245 struct mrsas_softc *sc;
1247 sc = device_get_softc(dev);
1252 * mrsas_get_softc_instance: Find softc instance based on cmd type
1254 * This function will return softc instance based on cmd type.
1255 * In some case, application fire ioctl on required management instance and
1256 * do not provide host_no. Use cdev->si_drv1 to get softc instance for those
1257 * case, else get the softc instance from host_no provided by application in
1261 static struct mrsas_softc *
1262 mrsas_get_softc_instance(struct cdev *dev, u_long cmd, caddr_t arg)
1264 struct mrsas_softc *sc = NULL;
1265 struct mrsas_iocpacket *user_ioc = (struct mrsas_iocpacket *)arg;
1267 if (cmd == MRSAS_IOC_GET_PCI_INFO) {
1271 * get the Host number & the softc from data sent by the
1274 sc = mrsas_mgmt_info.sc_ptr[user_ioc->host_no];
1275 if ((user_ioc->host_no >= mrsas_mgmt_info.max_index) || (sc == NULL)) {
1277 mrsas_dprint(sc, MRSAS_FAULT,
1278 "There is no Controller number %d .\n", user_ioc->host_no);
1280 mrsas_dprint(sc, MRSAS_FAULT,
1281 "Invalid Controller number %d .\n", user_ioc->host_no);
1289 * mrsas_ioctl: IOCtl commands entry point.
1291 * This function is the entry point for IOCtls from the OS. It calls the
1292 * appropriate function for processing depending on the command received.
1295 mrsas_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, d_thread_t *td)
1297 struct mrsas_softc *sc;
1299 MRSAS_DRV_PCI_INFORMATION *pciDrvInfo;
1301 sc = mrsas_get_softc_instance(dev, cmd, arg);
1305 if (sc->remove_in_progress) {
1306 mrsas_dprint(sc, MRSAS_INFO,
1307 "Driver remove or shutdown called.\n");
1310 mtx_lock_spin(&sc->ioctl_lock);
1311 if (!sc->reset_in_progress) {
1312 mtx_unlock_spin(&sc->ioctl_lock);
1315 mtx_unlock_spin(&sc->ioctl_lock);
1316 while (sc->reset_in_progress) {
1318 if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
1319 mrsas_dprint(sc, MRSAS_INFO,
1321 "OCR to be finished %d\n", i,
1322 sc->ocr_thread_active);
1324 pause("mr_ioctl", hz);
1329 case MRSAS_IOC_FIRMWARE_PASS_THROUGH64:
1330 #ifdef COMPAT_FREEBSD32
1331 case MRSAS_IOC_FIRMWARE_PASS_THROUGH32:
1334 * Decrement the Ioctl counting Semaphore before getting an
1337 sema_wait(&sc->ioctl_count_sema);
1339 ret = mrsas_passthru(sc, (void *)arg, cmd);
1341 /* Increment the Ioctl counting semaphore value */
1342 sema_post(&sc->ioctl_count_sema);
1345 case MRSAS_IOC_SCAN_BUS:
1346 ret = mrsas_bus_scan(sc);
1349 case MRSAS_IOC_GET_PCI_INFO:
1350 pciDrvInfo = (MRSAS_DRV_PCI_INFORMATION *) arg;
1351 memset(pciDrvInfo, 0, sizeof(MRSAS_DRV_PCI_INFORMATION));
1352 pciDrvInfo->busNumber = pci_get_bus(sc->mrsas_dev);
1353 pciDrvInfo->deviceNumber = pci_get_slot(sc->mrsas_dev);
1354 pciDrvInfo->functionNumber = pci_get_function(sc->mrsas_dev);
1355 pciDrvInfo->domainID = pci_get_domain(sc->mrsas_dev);
1356 mrsas_dprint(sc, MRSAS_INFO, "pci bus no: %d,"
1357 "pci device no: %d, pci function no: %d,"
1358 "pci domain ID: %d\n",
1359 pciDrvInfo->busNumber, pciDrvInfo->deviceNumber,
1360 pciDrvInfo->functionNumber, pciDrvInfo->domainID);
1365 mrsas_dprint(sc, MRSAS_TRACE, "IOCTL command 0x%lx is not handled\n", cmd);
1373 * mrsas_poll: poll entry point for mrsas driver fd
1375 * This function is the entry point for poll from the OS. It waits for some AEN
1376 * events to be triggered from the controller and notifies back.
1379 mrsas_poll(struct cdev *dev, int poll_events, struct thread *td)
1381 struct mrsas_softc *sc;
1386 if (poll_events & (POLLIN | POLLRDNORM)) {
1387 if (sc->mrsas_aen_triggered) {
1388 revents |= poll_events & (POLLIN | POLLRDNORM);
1392 if (poll_events & (POLLIN | POLLRDNORM)) {
1393 mtx_lock(&sc->aen_lock);
1394 sc->mrsas_poll_waiting = 1;
1395 selrecord(td, &sc->mrsas_select);
1396 mtx_unlock(&sc->aen_lock);
1403 * mrsas_setup_irq: Set up interrupt
1404 * input: Adapter instance soft state
1406 * This function sets up interrupts as a bus resource, with flags indicating
1407 * resource permitting contemporaneous sharing and for resource to activate
1411 mrsas_setup_irq(struct mrsas_softc *sc)
1413 if (sc->msix_enable && (mrsas_setup_msix(sc) == SUCCESS))
1414 device_printf(sc->mrsas_dev, "MSI-x interrupts setup success\n");
1417 device_printf(sc->mrsas_dev, "Fall back to legacy interrupt\n");
1418 sc->irq_context[0].sc = sc;
1419 sc->irq_context[0].MSIxIndex = 0;
1421 sc->mrsas_irq[0] = bus_alloc_resource_any(sc->mrsas_dev,
1422 SYS_RES_IRQ, &sc->irq_id[0], RF_SHAREABLE | RF_ACTIVE);
1423 if (sc->mrsas_irq[0] == NULL) {
1424 device_printf(sc->mrsas_dev, "Cannot allocate legcay"
1428 if (bus_setup_intr(sc->mrsas_dev, sc->mrsas_irq[0],
1429 INTR_MPSAFE | INTR_TYPE_CAM, NULL, mrsas_isr,
1430 &sc->irq_context[0], &sc->intr_handle[0])) {
1431 device_printf(sc->mrsas_dev, "Cannot set up legacy"
1440 * mrsas_isr: ISR entry point
1441 * input: argument pointer
1443 * This function is the interrupt service routine entry point. There are two
1444 * types of interrupts, state change interrupt and response interrupt. If an
1445 * interrupt is not ours, we just return.
1448 mrsas_isr(void *arg)
1450 struct mrsas_irq_context *irq_context = (struct mrsas_irq_context *)arg;
1451 struct mrsas_softc *sc = irq_context->sc;
1454 if (sc->mask_interrupts)
1457 if (!sc->msix_vectors) {
1458 status = mrsas_clear_intr(sc);
1462 /* If we are resetting, bail */
1463 if (mrsas_test_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags)) {
1464 printf(" Entered into ISR when OCR is going active. \n");
1465 mrsas_clear_intr(sc);
1468 /* Process for reply request and clear response interrupt */
1469 if (mrsas_complete_cmd(sc, irq_context->MSIxIndex) != SUCCESS)
1470 mrsas_clear_intr(sc);
1476 * mrsas_complete_cmd: Process reply request
1477 * input: Adapter instance soft state
1479 * This function is called from mrsas_isr() to process reply request and clear
1480 * response interrupt. Processing of the reply request entails walking
1481 * through the reply descriptor array for the command request pended from
1482 * Firmware. We look at the Function field to determine the command type and
1483 * perform the appropriate action. Before we return, we clear the response
1487 mrsas_complete_cmd(struct mrsas_softc *sc, u_int32_t MSIxIndex)
1489 Mpi2ReplyDescriptorsUnion_t *desc;
1490 MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *reply_desc;
1491 MRSAS_RAID_SCSI_IO_REQUEST *scsi_io_req;
1492 struct mrsas_mpt_cmd *cmd_mpt;
1493 struct mrsas_mfi_cmd *cmd_mfi;
1494 u_int8_t reply_descript_type;
1495 u_int16_t smid, num_completed;
1496 u_int8_t status, extStatus;
1497 union desc_value desc_val;
1498 PLD_LOAD_BALANCE_INFO lbinfo;
1499 u_int32_t device_id;
1500 int threshold_reply_count = 0;
1503 /* If we have a hardware error, not need to continue */
1504 if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR)
1507 desc = sc->reply_desc_mem;
1508 desc += ((MSIxIndex * sc->reply_alloc_sz) / sizeof(MPI2_REPLY_DESCRIPTORS_UNION))
1509 + sc->last_reply_idx[MSIxIndex];
1511 reply_desc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *) desc;
1513 desc_val.word = desc->Words;
1516 reply_descript_type = reply_desc->ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
1518 /* Find our reply descriptor for the command and process */
1519 while ((desc_val.u.low != 0xFFFFFFFF) && (desc_val.u.high != 0xFFFFFFFF)) {
1520 smid = reply_desc->SMID;
1521 cmd_mpt = sc->mpt_cmd_list[smid - 1];
1522 scsi_io_req = (MRSAS_RAID_SCSI_IO_REQUEST *) cmd_mpt->io_request;
1524 status = scsi_io_req->RaidContext.status;
1525 extStatus = scsi_io_req->RaidContext.exStatus;
1527 switch (scsi_io_req->Function) {
1528 case MPI2_FUNCTION_SCSI_IO_REQUEST: /* Fast Path IO. */
1529 device_id = cmd_mpt->ccb_ptr->ccb_h.target_id;
1530 lbinfo = &sc->load_balance_info[device_id];
1531 if (cmd_mpt->load_balance == MRSAS_LOAD_BALANCE_FLAG) {
1532 mrsas_atomic_dec(&lbinfo->scsi_pending_cmds[cmd_mpt->pd_r1_lb]);
1533 cmd_mpt->load_balance &= ~MRSAS_LOAD_BALANCE_FLAG;
1535 /* Fall thru and complete IO */
1536 case MRSAS_MPI2_FUNCTION_LD_IO_REQUEST:
1537 mrsas_map_mpt_cmd_status(cmd_mpt, status, extStatus);
1538 mrsas_cmd_done(sc, cmd_mpt);
1539 scsi_io_req->RaidContext.status = 0;
1540 scsi_io_req->RaidContext.exStatus = 0;
1541 mrsas_atomic_dec(&sc->fw_outstanding);
1543 case MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST: /* MFI command */
1544 cmd_mfi = sc->mfi_cmd_list[cmd_mpt->sync_cmd_idx];
1545 mrsas_complete_mptmfi_passthru(sc, cmd_mfi, status);
1547 mrsas_release_mpt_cmd(cmd_mpt);
1551 sc->last_reply_idx[MSIxIndex]++;
1552 if (sc->last_reply_idx[MSIxIndex] >= sc->reply_q_depth)
1553 sc->last_reply_idx[MSIxIndex] = 0;
1555 desc->Words = ~((uint64_t)0x00); /* set it back to all
1558 threshold_reply_count++;
1560 /* Get the next reply descriptor */
1561 if (!sc->last_reply_idx[MSIxIndex]) {
1562 desc = sc->reply_desc_mem;
1563 desc += ((MSIxIndex * sc->reply_alloc_sz) / sizeof(MPI2_REPLY_DESCRIPTORS_UNION));
1567 reply_desc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *) desc;
1568 desc_val.word = desc->Words;
1570 reply_descript_type = reply_desc->ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
1572 if (reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
1576 * Write to reply post index after completing threshold reply
1577 * count and still there are more replies in reply queue
1578 * pending to be completed.
1580 if (threshold_reply_count >= THRESHOLD_REPLY_COUNT) {
1581 if (sc->msix_enable) {
1582 if ((sc->device_id == MRSAS_INVADER) ||
1583 (sc->device_id == MRSAS_FURY))
1584 mrsas_write_reg(sc, sc->msix_reg_offset[MSIxIndex / 8],
1585 ((MSIxIndex & 0x7) << 24) |
1586 sc->last_reply_idx[MSIxIndex]);
1588 mrsas_write_reg(sc, sc->msix_reg_offset[0], (MSIxIndex << 24) |
1589 sc->last_reply_idx[MSIxIndex]);
1591 mrsas_write_reg(sc, offsetof(mrsas_reg_set,
1592 reply_post_host_index), sc->last_reply_idx[0]);
1594 threshold_reply_count = 0;
1598 /* No match, just return */
1599 if (num_completed == 0)
1602 /* Clear response interrupt */
1603 if (sc->msix_enable) {
1604 if ((sc->device_id == MRSAS_INVADER) ||
1605 (sc->device_id == MRSAS_FURY)) {
1606 mrsas_write_reg(sc, sc->msix_reg_offset[MSIxIndex / 8],
1607 ((MSIxIndex & 0x7) << 24) |
1608 sc->last_reply_idx[MSIxIndex]);
1610 mrsas_write_reg(sc, sc->msix_reg_offset[0], (MSIxIndex << 24) |
1611 sc->last_reply_idx[MSIxIndex]);
1613 mrsas_write_reg(sc, offsetof(mrsas_reg_set,
1614 reply_post_host_index), sc->last_reply_idx[0]);
1620 * mrsas_map_mpt_cmd_status: Allocate DMAable memory.
1621 * input: Adapter instance soft state
1623 * This function is called from mrsas_complete_cmd(), for LD IO and FastPath IO.
1624 * It checks the command status and maps the appropriate CAM status for the
1628 mrsas_map_mpt_cmd_status(struct mrsas_mpt_cmd *cmd, u_int8_t status, u_int8_t extStatus)
1630 struct mrsas_softc *sc = cmd->sc;
1631 u_int8_t *sense_data;
1635 cmd->ccb_ptr->ccb_h.status = CAM_REQ_CMP;
1637 case MFI_STAT_SCSI_IO_FAILED:
1638 case MFI_STAT_SCSI_DONE_WITH_ERROR:
1639 cmd->ccb_ptr->ccb_h.status = CAM_SCSI_STATUS_ERROR;
1640 sense_data = (u_int8_t *)&cmd->ccb_ptr->csio.sense_data;
1642 /* For now just copy 18 bytes back */
1643 memcpy(sense_data, cmd->sense, 18);
1644 cmd->ccb_ptr->csio.sense_len = 18;
1645 cmd->ccb_ptr->ccb_h.status |= CAM_AUTOSNS_VALID;
1648 case MFI_STAT_LD_OFFLINE:
1649 case MFI_STAT_DEVICE_NOT_FOUND:
1650 if (cmd->ccb_ptr->ccb_h.target_lun)
1651 cmd->ccb_ptr->ccb_h.status |= CAM_LUN_INVALID;
1653 cmd->ccb_ptr->ccb_h.status |= CAM_DEV_NOT_THERE;
1655 case MFI_STAT_CONFIG_SEQ_MISMATCH:
1656 cmd->ccb_ptr->ccb_h.status |= CAM_REQUEUE_REQ;
1659 device_printf(sc->mrsas_dev, "FW cmd complete status %x\n", status);
1660 cmd->ccb_ptr->ccb_h.status = CAM_REQ_CMP_ERR;
1661 cmd->ccb_ptr->csio.scsi_status = status;
1667 * mrsas_alloc_mem: Allocate DMAable memory
1668 * input: Adapter instance soft state
1670 * This function creates the parent DMA tag and allocates DMAable memory. DMA
1671 * tag describes constraints of DMA mapping. Memory allocated is mapped into
1672 * Kernel virtual address. Callback argument is physical memory address.
1675 mrsas_alloc_mem(struct mrsas_softc *sc)
1677 u_int32_t verbuf_size, io_req_size, reply_desc_size, sense_size,
1678 chain_frame_size, evt_detail_size, count;
1681 * Allocate parent DMA tag
1683 if (bus_dma_tag_create(NULL, /* parent */
1686 BUS_SPACE_MAXADDR, /* lowaddr */
1687 BUS_SPACE_MAXADDR, /* highaddr */
1688 NULL, NULL, /* filter, filterarg */
1689 MRSAS_MAX_IO_SIZE, /* maxsize */
1690 MRSAS_MAX_SGL, /* nsegments */
1691 MRSAS_MAX_IO_SIZE, /* maxsegsize */
1693 NULL, NULL, /* lockfunc, lockarg */
1694 &sc->mrsas_parent_tag /* tag */
1696 device_printf(sc->mrsas_dev, "Cannot allocate parent DMA tag\n");
1700 * Allocate for version buffer
1702 verbuf_size = MRSAS_MAX_NAME_LENGTH * (sizeof(bus_addr_t));
1703 if (bus_dma_tag_create(sc->mrsas_parent_tag,
1705 BUS_SPACE_MAXADDR_32BIT,
1714 device_printf(sc->mrsas_dev, "Cannot allocate verbuf DMA tag\n");
1717 if (bus_dmamem_alloc(sc->verbuf_tag, (void **)&sc->verbuf_mem,
1718 BUS_DMA_NOWAIT, &sc->verbuf_dmamap)) {
1719 device_printf(sc->mrsas_dev, "Cannot allocate verbuf memory\n");
1722 bzero(sc->verbuf_mem, verbuf_size);
1723 if (bus_dmamap_load(sc->verbuf_tag, sc->verbuf_dmamap, sc->verbuf_mem,
1724 verbuf_size, mrsas_addr_cb, &sc->verbuf_phys_addr,
1726 device_printf(sc->mrsas_dev, "Cannot load verbuf DMA map\n");
1730 * Allocate IO Request Frames
1732 io_req_size = sc->io_frames_alloc_sz;
1733 if (bus_dma_tag_create(sc->mrsas_parent_tag,
1735 BUS_SPACE_MAXADDR_32BIT,
1743 &sc->io_request_tag)) {
1744 device_printf(sc->mrsas_dev, "Cannot create IO request tag\n");
1747 if (bus_dmamem_alloc(sc->io_request_tag, (void **)&sc->io_request_mem,
1748 BUS_DMA_NOWAIT, &sc->io_request_dmamap)) {
1749 device_printf(sc->mrsas_dev, "Cannot alloc IO request memory\n");
1752 bzero(sc->io_request_mem, io_req_size);
1753 if (bus_dmamap_load(sc->io_request_tag, sc->io_request_dmamap,
1754 sc->io_request_mem, io_req_size, mrsas_addr_cb,
1755 &sc->io_request_phys_addr, BUS_DMA_NOWAIT)) {
1756 device_printf(sc->mrsas_dev, "Cannot load IO request memory\n");
1760 * Allocate Chain Frames
1762 chain_frame_size = sc->chain_frames_alloc_sz;
1763 if (bus_dma_tag_create(sc->mrsas_parent_tag,
1765 BUS_SPACE_MAXADDR_32BIT,
1773 &sc->chain_frame_tag)) {
1774 device_printf(sc->mrsas_dev, "Cannot create chain frame tag\n");
1777 if (bus_dmamem_alloc(sc->chain_frame_tag, (void **)&sc->chain_frame_mem,
1778 BUS_DMA_NOWAIT, &sc->chain_frame_dmamap)) {
1779 device_printf(sc->mrsas_dev, "Cannot alloc chain frame memory\n");
1782 bzero(sc->chain_frame_mem, chain_frame_size);
1783 if (bus_dmamap_load(sc->chain_frame_tag, sc->chain_frame_dmamap,
1784 sc->chain_frame_mem, chain_frame_size, mrsas_addr_cb,
1785 &sc->chain_frame_phys_addr, BUS_DMA_NOWAIT)) {
1786 device_printf(sc->mrsas_dev, "Cannot load chain frame memory\n");
1789 count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
1791 * Allocate Reply Descriptor Array
1793 reply_desc_size = sc->reply_alloc_sz * count;
1794 if (bus_dma_tag_create(sc->mrsas_parent_tag,
1796 BUS_SPACE_MAXADDR_32BIT,
1804 &sc->reply_desc_tag)) {
1805 device_printf(sc->mrsas_dev, "Cannot create reply descriptor tag\n");
1808 if (bus_dmamem_alloc(sc->reply_desc_tag, (void **)&sc->reply_desc_mem,
1809 BUS_DMA_NOWAIT, &sc->reply_desc_dmamap)) {
1810 device_printf(sc->mrsas_dev, "Cannot alloc reply descriptor memory\n");
1813 if (bus_dmamap_load(sc->reply_desc_tag, sc->reply_desc_dmamap,
1814 sc->reply_desc_mem, reply_desc_size, mrsas_addr_cb,
1815 &sc->reply_desc_phys_addr, BUS_DMA_NOWAIT)) {
1816 device_printf(sc->mrsas_dev, "Cannot load reply descriptor memory\n");
1820 * Allocate Sense Buffer Array. Keep in lower 4GB
1822 sense_size = sc->max_fw_cmds * MRSAS_SENSE_LEN;
1823 if (bus_dma_tag_create(sc->mrsas_parent_tag,
1825 BUS_SPACE_MAXADDR_32BIT,
1834 device_printf(sc->mrsas_dev, "Cannot allocate sense buf tag\n");
1837 if (bus_dmamem_alloc(sc->sense_tag, (void **)&sc->sense_mem,
1838 BUS_DMA_NOWAIT, &sc->sense_dmamap)) {
1839 device_printf(sc->mrsas_dev, "Cannot allocate sense buf memory\n");
1842 if (bus_dmamap_load(sc->sense_tag, sc->sense_dmamap,
1843 sc->sense_mem, sense_size, mrsas_addr_cb, &sc->sense_phys_addr,
1845 device_printf(sc->mrsas_dev, "Cannot load sense buf memory\n");
1849 * Allocate for Event detail structure
1851 evt_detail_size = sizeof(struct mrsas_evt_detail);
1852 if (bus_dma_tag_create(sc->mrsas_parent_tag,
1854 BUS_SPACE_MAXADDR_32BIT,
1862 &sc->evt_detail_tag)) {
1863 device_printf(sc->mrsas_dev, "Cannot create Event detail tag\n");
1866 if (bus_dmamem_alloc(sc->evt_detail_tag, (void **)&sc->evt_detail_mem,
1867 BUS_DMA_NOWAIT, &sc->evt_detail_dmamap)) {
1868 device_printf(sc->mrsas_dev, "Cannot alloc Event detail buffer memory\n");
1871 bzero(sc->evt_detail_mem, evt_detail_size);
1872 if (bus_dmamap_load(sc->evt_detail_tag, sc->evt_detail_dmamap,
1873 sc->evt_detail_mem, evt_detail_size, mrsas_addr_cb,
1874 &sc->evt_detail_phys_addr, BUS_DMA_NOWAIT)) {
1875 device_printf(sc->mrsas_dev, "Cannot load Event detail buffer memory\n");
1879 * Create a dma tag for data buffers; size will be the maximum
1880 * possible I/O size (280kB).
1882 if (bus_dma_tag_create(sc->mrsas_parent_tag,
1895 device_printf(sc->mrsas_dev, "Cannot create data dma tag\n");
1902 * mrsas_addr_cb: Callback function of bus_dmamap_load()
1903 * input: callback argument, machine dependent type
1904 * that describes DMA segments, number of segments, error code
1906 * This function is for the driver to receive mapping information resultant of
1907 * the bus_dmamap_load(). The information is actually not being used, but the
1908 * address is saved anyway.
1911 mrsas_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1916 *addr = segs[0].ds_addr;
1920 * mrsas_setup_raidmap: Set up RAID map.
1921 * input: Adapter instance soft state
1923 * Allocate DMA memory for the RAID maps and perform setup.
1926 mrsas_setup_raidmap(struct mrsas_softc *sc)
1930 for (i = 0; i < 2; i++) {
1932 (void *)malloc(sc->drv_map_sz, M_MRSAS, M_NOWAIT);
1933 /* Do Error handling */
1934 if (!sc->ld_drv_map[i]) {
1935 device_printf(sc->mrsas_dev, "Could not allocate memory for local map");
1938 free(sc->ld_drv_map[0], M_MRSAS);
1939 /* ABORT driver initialization */
1944 for (int i = 0; i < 2; i++) {
1945 if (bus_dma_tag_create(sc->mrsas_parent_tag,
1947 BUS_SPACE_MAXADDR_32BIT,
1955 &sc->raidmap_tag[i])) {
1956 device_printf(sc->mrsas_dev,
1957 "Cannot allocate raid map tag.\n");
1960 if (bus_dmamem_alloc(sc->raidmap_tag[i],
1961 (void **)&sc->raidmap_mem[i],
1962 BUS_DMA_NOWAIT, &sc->raidmap_dmamap[i])) {
1963 device_printf(sc->mrsas_dev,
1964 "Cannot allocate raidmap memory.\n");
1967 bzero(sc->raidmap_mem[i], sc->max_map_sz);
1969 if (bus_dmamap_load(sc->raidmap_tag[i], sc->raidmap_dmamap[i],
1970 sc->raidmap_mem[i], sc->max_map_sz,
1971 mrsas_addr_cb, &sc->raidmap_phys_addr[i],
1973 device_printf(sc->mrsas_dev, "Cannot load raidmap memory.\n");
1976 if (!sc->raidmap_mem[i]) {
1977 device_printf(sc->mrsas_dev,
1978 "Cannot allocate memory for raid map.\n");
1983 if (!mrsas_get_map_info(sc))
1984 mrsas_sync_map_info(sc);
1993 * mrsas_init_fw: Initialize Firmware
1994 * input: Adapter soft state
1996 * Calls transition_to_ready() to make sure Firmware is in operational state and
1997 * calls mrsas_init_adapter() to send IOC_INIT command to Firmware. It
1998 * issues internal commands to get the controller info after the IOC_INIT
1999 * command response is received by Firmware. Note: code relating to
2000 * get_pdlist, get_ld_list and max_sectors are currently not being used, it
2001 * is left here as placeholder.
2004 mrsas_init_fw(struct mrsas_softc *sc)
2007 int ret, loop, ocr = 0;
2008 u_int32_t max_sectors_1;
2009 u_int32_t max_sectors_2;
2010 u_int32_t tmp_sectors;
2011 u_int32_t scratch_pad_2;
2012 int msix_enable = 0;
2013 int fw_msix_count = 0;
2015 /* Make sure Firmware is ready */
2016 ret = mrsas_transition_to_ready(sc, ocr);
2017 if (ret != SUCCESS) {
2020 /* MSI-x index 0- reply post host index register */
2021 sc->msix_reg_offset[0] = MPI2_REPLY_POST_HOST_INDEX_OFFSET;
2022 /* Check if MSI-X is supported while in ready state */
2023 msix_enable = (mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad)) & 0x4000000) >> 0x1a;
2026 scratch_pad_2 = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2027 outbound_scratch_pad_2));
2029 /* Check max MSI-X vectors */
2030 if (sc->device_id == MRSAS_TBOLT) {
2031 sc->msix_vectors = (scratch_pad_2
2032 & MR_MAX_REPLY_QUEUES_OFFSET) + 1;
2033 fw_msix_count = sc->msix_vectors;
2035 /* Invader/Fury supports 96 MSI-X vectors */
2036 sc->msix_vectors = ((scratch_pad_2
2037 & MR_MAX_REPLY_QUEUES_EXT_OFFSET)
2038 >> MR_MAX_REPLY_QUEUES_EXT_OFFSET_SHIFT) + 1;
2039 fw_msix_count = sc->msix_vectors;
2041 for (loop = 1; loop < MR_MAX_MSIX_REG_ARRAY;
2043 sc->msix_reg_offset[loop] =
2044 MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET +
2049 /* Don't bother allocating more MSI-X vectors than cpus */
2050 sc->msix_vectors = min(sc->msix_vectors,
2053 /* Allocate MSI-x vectors */
2054 if (mrsas_allocate_msix(sc) == SUCCESS)
2055 sc->msix_enable = 1;
2057 sc->msix_enable = 0;
2059 device_printf(sc->mrsas_dev, "FW supports <%d> MSIX vector,"
2060 "Online CPU %d Current MSIX <%d>\n",
2061 fw_msix_count, mp_ncpus, sc->msix_vectors);
2063 if (mrsas_init_adapter(sc) != SUCCESS) {
2064 device_printf(sc->mrsas_dev, "Adapter initialize Fail.\n");
2067 /* Allocate internal commands for pass-thru */
2068 if (mrsas_alloc_mfi_cmds(sc) != SUCCESS) {
2069 device_printf(sc->mrsas_dev, "Allocate MFI cmd failed.\n");
2072 sc->ctrl_info = malloc(sizeof(struct mrsas_ctrl_info), M_MRSAS, M_NOWAIT);
2073 if (!sc->ctrl_info) {
2074 device_printf(sc->mrsas_dev, "Malloc for ctrl_info failed.\n");
2078 * Get the controller info from FW, so that the MAX VD support
2079 * availability can be decided.
2081 if (mrsas_get_ctrl_info(sc)) {
2082 device_printf(sc->mrsas_dev, "Unable to get FW ctrl_info.\n");
2085 sc->secure_jbod_support =
2086 (u_int8_t)sc->ctrl_info->adapterOperations3.supportSecurityonJBOD;
2088 if (sc->secure_jbod_support)
2089 device_printf(sc->mrsas_dev, "FW supports SED \n");
2091 if (mrsas_setup_raidmap(sc) != SUCCESS) {
2092 device_printf(sc->mrsas_dev, "Set up RAID map failed.\n");
2095 /* For pass-thru, get PD/LD list and controller info */
2096 memset(sc->pd_list, 0,
2097 MRSAS_MAX_PD * sizeof(struct mrsas_pd_list));
2098 mrsas_get_pd_list(sc);
2100 memset(sc->ld_ids, 0xff, MRSAS_MAX_LD_IDS);
2101 mrsas_get_ld_list(sc);
2104 * Compute the max allowed sectors per IO: The controller info has
2105 * two limits on max sectors. Driver should use the minimum of these
2108 * 1 << stripe_sz_ops.min = max sectors per strip
2110 * Note that older firmwares ( < FW ver 30) didn't report information to
2111 * calculate max_sectors_1. So the number ended up as zero always.
2114 max_sectors_1 = (1 << sc->ctrl_info->stripe_sz_ops.min) *
2115 sc->ctrl_info->max_strips_per_io;
2116 max_sectors_2 = sc->ctrl_info->max_request_size;
2117 tmp_sectors = min(max_sectors_1, max_sectors_2);
2118 sc->max_sectors_per_req = sc->max_num_sge * MRSAS_PAGE_SIZE / 512;
2120 if (tmp_sectors && (sc->max_sectors_per_req > tmp_sectors))
2121 sc->max_sectors_per_req = tmp_sectors;
2123 sc->disableOnlineCtrlReset =
2124 sc->ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset;
2125 sc->UnevenSpanSupport =
2126 sc->ctrl_info->adapterOperations2.supportUnevenSpans;
2127 if (sc->UnevenSpanSupport) {
2128 device_printf(sc->mrsas_dev, "FW supports: UnevenSpanSupport=%x\n\n",
2129 sc->UnevenSpanSupport);
2131 if (MR_ValidateMapInfo(sc))
2132 sc->fast_path_io = 1;
2134 sc->fast_path_io = 0;
2140 * mrsas_init_adapter: Initializes the adapter/controller
2141 * input: Adapter soft state
2143 * Prepares for the issuing of the IOC Init cmd to FW for initializing the
2144 * ROC/controller. The FW register is read to determined the number of
2145 * commands that is supported. All memory allocations for IO is based on
2146 * max_cmd. Appropriate calculations are performed in this function.
2149 mrsas_init_adapter(struct mrsas_softc *sc)
2156 /* Read FW status register */
2157 status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad));
2159 /* Get operational params from status register */
2160 sc->max_fw_cmds = status & MRSAS_FWSTATE_MAXCMD_MASK;
2162 /* Decrement the max supported by 1, to correlate with FW */
2163 sc->max_fw_cmds = sc->max_fw_cmds - 1;
2164 max_cmd = sc->max_fw_cmds;
2166 /* Determine allocation size of command frames */
2167 sc->reply_q_depth = ((max_cmd + 1 + 15) / 16 * 16) * 2;
2168 sc->request_alloc_sz = sizeof(MRSAS_REQUEST_DESCRIPTOR_UNION) * max_cmd;
2169 sc->reply_alloc_sz = sizeof(MPI2_REPLY_DESCRIPTORS_UNION) * (sc->reply_q_depth);
2170 sc->io_frames_alloc_sz = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE + (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * (max_cmd + 1));
2171 sc->chain_frames_alloc_sz = 1024 * max_cmd;
2172 sc->max_sge_in_main_msg = (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE -
2173 offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL)) / 16;
2175 sc->max_sge_in_chain = MRSAS_MAX_SZ_CHAIN_FRAME / sizeof(MPI2_SGE_IO_UNION);
2176 sc->max_num_sge = sc->max_sge_in_main_msg + sc->max_sge_in_chain - 2;
2178 /* Used for pass thru MFI frame (DCMD) */
2179 sc->chain_offset_mfi_pthru = offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL) / 16;
2181 sc->chain_offset_io_request = (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE -
2182 sizeof(MPI2_SGE_IO_UNION)) / 16;
2184 int count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
2186 for (i = 0; i < count; i++)
2187 sc->last_reply_idx[i] = 0;
2189 ret = mrsas_alloc_mem(sc);
2193 ret = mrsas_alloc_mpt_cmds(sc);
2197 ret = mrsas_ioc_init(sc);
2205 * mrsas_alloc_ioc_cmd: Allocates memory for IOC Init command
2206 * input: Adapter soft state
2208 * Allocates for the IOC Init cmd to FW to initialize the ROC/controller.
2211 mrsas_alloc_ioc_cmd(struct mrsas_softc *sc)
2215 /* Allocate IOC INIT command */
2216 ioc_init_size = 1024 + sizeof(MPI2_IOC_INIT_REQUEST);
2217 if (bus_dma_tag_create(sc->mrsas_parent_tag,
2219 BUS_SPACE_MAXADDR_32BIT,
2227 &sc->ioc_init_tag)) {
2228 device_printf(sc->mrsas_dev, "Cannot allocate ioc init tag\n");
2231 if (bus_dmamem_alloc(sc->ioc_init_tag, (void **)&sc->ioc_init_mem,
2232 BUS_DMA_NOWAIT, &sc->ioc_init_dmamap)) {
2233 device_printf(sc->mrsas_dev, "Cannot allocate ioc init cmd mem\n");
2236 bzero(sc->ioc_init_mem, ioc_init_size);
2237 if (bus_dmamap_load(sc->ioc_init_tag, sc->ioc_init_dmamap,
2238 sc->ioc_init_mem, ioc_init_size, mrsas_addr_cb,
2239 &sc->ioc_init_phys_mem, BUS_DMA_NOWAIT)) {
2240 device_printf(sc->mrsas_dev, "Cannot load ioc init cmd mem\n");
2247 * mrsas_free_ioc_cmd: Allocates memory for IOC Init command
2248 * input: Adapter soft state
2250 * Deallocates memory of the IOC Init cmd.
2253 mrsas_free_ioc_cmd(struct mrsas_softc *sc)
2255 if (sc->ioc_init_phys_mem)
2256 bus_dmamap_unload(sc->ioc_init_tag, sc->ioc_init_dmamap);
2257 if (sc->ioc_init_mem != NULL)
2258 bus_dmamem_free(sc->ioc_init_tag, sc->ioc_init_mem, sc->ioc_init_dmamap);
2259 if (sc->ioc_init_tag != NULL)
2260 bus_dma_tag_destroy(sc->ioc_init_tag);
2264 * mrsas_ioc_init: Sends IOC Init command to FW
2265 * input: Adapter soft state
2267 * Issues the IOC Init cmd to FW to initialize the ROC/controller.
2270 mrsas_ioc_init(struct mrsas_softc *sc)
2272 struct mrsas_init_frame *init_frame;
2273 pMpi2IOCInitRequest_t IOCInitMsg;
2274 MRSAS_REQUEST_DESCRIPTOR_UNION req_desc;
2275 u_int8_t max_wait = MRSAS_IOC_INIT_WAIT_TIME;
2276 bus_addr_t phys_addr;
2279 /* Allocate memory for the IOC INIT command */
2280 if (mrsas_alloc_ioc_cmd(sc)) {
2281 device_printf(sc->mrsas_dev, "Cannot allocate IOC command.\n");
2284 IOCInitMsg = (pMpi2IOCInitRequest_t)(((char *)sc->ioc_init_mem) + 1024);
2285 IOCInitMsg->Function = MPI2_FUNCTION_IOC_INIT;
2286 IOCInitMsg->WhoInit = MPI2_WHOINIT_HOST_DRIVER;
2287 IOCInitMsg->MsgVersion = MPI2_VERSION;
2288 IOCInitMsg->HeaderVersion = MPI2_HEADER_VERSION;
2289 IOCInitMsg->SystemRequestFrameSize = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE / 4;
2290 IOCInitMsg->ReplyDescriptorPostQueueDepth = sc->reply_q_depth;
2291 IOCInitMsg->ReplyDescriptorPostQueueAddress = sc->reply_desc_phys_addr;
2292 IOCInitMsg->SystemRequestFrameBaseAddress = sc->io_request_phys_addr;
2293 IOCInitMsg->HostMSIxVectors = (sc->msix_vectors > 0 ? sc->msix_vectors : 0);
2295 init_frame = (struct mrsas_init_frame *)sc->ioc_init_mem;
2296 init_frame->cmd = MFI_CMD_INIT;
2297 init_frame->cmd_status = 0xFF;
2298 init_frame->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
2300 /* driver support Extended MSIX */
2301 if ((sc->device_id == MRSAS_INVADER) ||
2302 (sc->device_id == MRSAS_FURY)) {
2303 init_frame->driver_operations.
2304 mfi_capabilities.support_additional_msix = 1;
2306 if (sc->verbuf_mem) {
2307 snprintf((char *)sc->verbuf_mem, strlen(MRSAS_VERSION) + 2, "%s\n",
2309 init_frame->driver_ver_lo = (bus_addr_t)sc->verbuf_phys_addr;
2310 init_frame->driver_ver_hi = 0;
2312 init_frame->driver_operations.mfi_capabilities.support_ndrive_r1_lb = 1;
2313 init_frame->driver_operations.mfi_capabilities.support_max_255lds = 1;
2314 init_frame->driver_operations.mfi_capabilities.security_protocol_cmds_fw = 1;
2315 phys_addr = (bus_addr_t)sc->ioc_init_phys_mem + 1024;
2316 init_frame->queue_info_new_phys_addr_lo = phys_addr;
2317 init_frame->data_xfer_len = sizeof(Mpi2IOCInitRequest_t);
2319 req_desc.addr.Words = (bus_addr_t)sc->ioc_init_phys_mem;
2320 req_desc.MFAIo.RequestFlags =
2321 (MRSAS_REQ_DESCRIPT_FLAGS_MFA << MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
2323 mrsas_disable_intr(sc);
2324 mrsas_dprint(sc, MRSAS_OCR, "Issuing IOC INIT command to FW.\n");
2325 mrsas_fire_cmd(sc, req_desc.addr.u.low, req_desc.addr.u.high);
2328 * Poll response timer to wait for Firmware response. While this
2329 * timer with the DELAY call could block CPU, the time interval for
2330 * this is only 1 millisecond.
2332 if (init_frame->cmd_status == 0xFF) {
2333 for (i = 0; i < (max_wait * 1000); i++) {
2334 if (init_frame->cmd_status == 0xFF)
2340 if (init_frame->cmd_status == 0)
2341 mrsas_dprint(sc, MRSAS_OCR,
2342 "IOC INIT response received from FW.\n");
2344 if (init_frame->cmd_status == 0xFF)
2345 device_printf(sc->mrsas_dev, "IOC Init timed out after %d seconds.\n", max_wait);
2347 device_printf(sc->mrsas_dev, "IOC Init failed, status = 0x%x\n", init_frame->cmd_status);
2351 mrsas_free_ioc_cmd(sc);
2356 * mrsas_alloc_mpt_cmds: Allocates the command packets
2357 * input: Adapter instance soft state
2359 * This function allocates the internal commands for IOs. Each command that is
2360 * issued to FW is wrapped in a local data structure called mrsas_mpt_cmd. An
2361 * array is allocated with mrsas_mpt_cmd context. The free commands are
2362 * maintained in a linked list (cmd pool). SMID value range is from 1 to
2366 mrsas_alloc_mpt_cmds(struct mrsas_softc *sc)
2369 u_int32_t max_cmd, count;
2370 struct mrsas_mpt_cmd *cmd;
2371 pMpi2ReplyDescriptorsUnion_t reply_desc;
2372 u_int32_t offset, chain_offset, sense_offset;
2373 bus_addr_t io_req_base_phys, chain_frame_base_phys, sense_base_phys;
2374 u_int8_t *io_req_base, *chain_frame_base, *sense_base;
2376 max_cmd = sc->max_fw_cmds;
2378 sc->req_desc = malloc(sc->request_alloc_sz, M_MRSAS, M_NOWAIT);
2379 if (!sc->req_desc) {
2380 device_printf(sc->mrsas_dev, "Out of memory, cannot alloc req desc\n");
2383 memset(sc->req_desc, 0, sc->request_alloc_sz);
2386 * sc->mpt_cmd_list is an array of struct mrsas_mpt_cmd pointers.
2387 * Allocate the dynamic array first and then allocate individual
2390 sc->mpt_cmd_list = malloc(sizeof(struct mrsas_mpt_cmd *) * max_cmd, M_MRSAS, M_NOWAIT);
2391 if (!sc->mpt_cmd_list) {
2392 device_printf(sc->mrsas_dev, "Cannot alloc memory for mpt_cmd_list.\n");
2395 memset(sc->mpt_cmd_list, 0, sizeof(struct mrsas_mpt_cmd *) * max_cmd);
2396 for (i = 0; i < max_cmd; i++) {
2397 sc->mpt_cmd_list[i] = malloc(sizeof(struct mrsas_mpt_cmd),
2399 if (!sc->mpt_cmd_list[i]) {
2400 for (j = 0; j < i; j++)
2401 free(sc->mpt_cmd_list[j], M_MRSAS);
2402 free(sc->mpt_cmd_list, M_MRSAS);
2403 sc->mpt_cmd_list = NULL;
2408 io_req_base = (u_int8_t *)sc->io_request_mem + MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
2409 io_req_base_phys = (bus_addr_t)sc->io_request_phys_addr + MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
2410 chain_frame_base = (u_int8_t *)sc->chain_frame_mem;
2411 chain_frame_base_phys = (bus_addr_t)sc->chain_frame_phys_addr;
2412 sense_base = (u_int8_t *)sc->sense_mem;
2413 sense_base_phys = (bus_addr_t)sc->sense_phys_addr;
2414 for (i = 0; i < max_cmd; i++) {
2415 cmd = sc->mpt_cmd_list[i];
2416 offset = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * i;
2417 chain_offset = 1024 * i;
2418 sense_offset = MRSAS_SENSE_LEN * i;
2419 memset(cmd, 0, sizeof(struct mrsas_mpt_cmd));
2421 cmd->ccb_ptr = NULL;
2422 callout_init(&cmd->cm_callout, 0);
2423 cmd->sync_cmd_idx = (u_int32_t)MRSAS_ULONG_MAX;
2425 cmd->io_request = (MRSAS_RAID_SCSI_IO_REQUEST *) (io_req_base + offset);
2426 memset(cmd->io_request, 0, sizeof(MRSAS_RAID_SCSI_IO_REQUEST));
2427 cmd->io_request_phys_addr = io_req_base_phys + offset;
2428 cmd->chain_frame = (MPI2_SGE_IO_UNION *) (chain_frame_base + chain_offset);
2429 cmd->chain_frame_phys_addr = chain_frame_base_phys + chain_offset;
2430 cmd->sense = sense_base + sense_offset;
2431 cmd->sense_phys_addr = sense_base_phys + sense_offset;
2432 if (bus_dmamap_create(sc->data_tag, 0, &cmd->data_dmamap)) {
2435 TAILQ_INSERT_TAIL(&(sc->mrsas_mpt_cmd_list_head), cmd, next);
2438 /* Initialize reply descriptor array to 0xFFFFFFFF */
2439 reply_desc = sc->reply_desc_mem;
2440 count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
2441 for (i = 0; i < sc->reply_q_depth * count; i++, reply_desc++) {
2442 reply_desc->Words = MRSAS_ULONG_MAX;
2448 * mrsas_fire_cmd: Sends command to FW
2449 * input: Adapter softstate
2450 * request descriptor address low
2451 * request descriptor address high
2453 * This functions fires the command to Firmware by writing to the
2454 * inbound_low_queue_port and inbound_high_queue_port.
2457 mrsas_fire_cmd(struct mrsas_softc *sc, u_int32_t req_desc_lo,
2458 u_int32_t req_desc_hi)
2460 mtx_lock(&sc->pci_lock);
2461 mrsas_write_reg(sc, offsetof(mrsas_reg_set, inbound_low_queue_port),
2463 mrsas_write_reg(sc, offsetof(mrsas_reg_set, inbound_high_queue_port),
2465 mtx_unlock(&sc->pci_lock);
2469 * mrsas_transition_to_ready: Move FW to Ready state input:
2470 * Adapter instance soft state
2472 * During the initialization, FW passes can potentially be in any one of several
2473 * possible states. If the FW in operational, waiting-for-handshake states,
2474 * driver must take steps to bring it to ready state. Otherwise, it has to
2475 * wait for the ready state.
2478 mrsas_transition_to_ready(struct mrsas_softc *sc, int ocr)
2482 u_int32_t val, fw_state;
2483 u_int32_t cur_state;
2484 u_int32_t abs_state, curr_abs_state;
2486 val = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad));
2487 fw_state = val & MFI_STATE_MASK;
2488 max_wait = MRSAS_RESET_WAIT_TIME;
2490 if (fw_state != MFI_STATE_READY)
2491 device_printf(sc->mrsas_dev, "Waiting for FW to come to ready state\n");
2493 while (fw_state != MFI_STATE_READY) {
2494 abs_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad));
2496 case MFI_STATE_FAULT:
2497 device_printf(sc->mrsas_dev, "FW is in FAULT state!!\n");
2499 cur_state = MFI_STATE_FAULT;
2503 case MFI_STATE_WAIT_HANDSHAKE:
2504 /* Set the CLR bit in inbound doorbell */
2505 mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell),
2506 MFI_INIT_CLEAR_HANDSHAKE | MFI_INIT_HOTPLUG);
2507 cur_state = MFI_STATE_WAIT_HANDSHAKE;
2509 case MFI_STATE_BOOT_MESSAGE_PENDING:
2510 mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell),
2512 cur_state = MFI_STATE_BOOT_MESSAGE_PENDING;
2514 case MFI_STATE_OPERATIONAL:
2516 * Bring it to READY state; assuming max wait 10
2519 mrsas_disable_intr(sc);
2520 mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell), MFI_RESET_FLAGS);
2521 for (i = 0; i < max_wait * 1000; i++) {
2522 if (mrsas_read_reg(sc, offsetof(mrsas_reg_set, doorbell)) & 1)
2527 cur_state = MFI_STATE_OPERATIONAL;
2529 case MFI_STATE_UNDEFINED:
2531 * This state should not last for more than 2
2534 cur_state = MFI_STATE_UNDEFINED;
2536 case MFI_STATE_BB_INIT:
2537 cur_state = MFI_STATE_BB_INIT;
2539 case MFI_STATE_FW_INIT:
2540 cur_state = MFI_STATE_FW_INIT;
2542 case MFI_STATE_FW_INIT_2:
2543 cur_state = MFI_STATE_FW_INIT_2;
2545 case MFI_STATE_DEVICE_SCAN:
2546 cur_state = MFI_STATE_DEVICE_SCAN;
2548 case MFI_STATE_FLUSH_CACHE:
2549 cur_state = MFI_STATE_FLUSH_CACHE;
2552 device_printf(sc->mrsas_dev, "Unknown state 0x%x\n", fw_state);
2557 * The cur_state should not last for more than max_wait secs
2559 for (i = 0; i < (max_wait * 1000); i++) {
2560 fw_state = (mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2561 outbound_scratch_pad)) & MFI_STATE_MASK);
2562 curr_abs_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2563 outbound_scratch_pad));
2564 if (abs_state == curr_abs_state)
2571 * Return error if fw_state hasn't changed after max_wait
2573 if (curr_abs_state == abs_state) {
2574 device_printf(sc->mrsas_dev, "FW state [%d] hasn't changed "
2575 "in %d secs\n", fw_state, max_wait);
2579 mrsas_dprint(sc, MRSAS_OCR, "FW now in Ready state\n");
2584 * mrsas_get_mfi_cmd: Get a cmd from free command pool
2585 * input: Adapter soft state
2587 * This function removes an MFI command from the command list.
2589 struct mrsas_mfi_cmd *
2590 mrsas_get_mfi_cmd(struct mrsas_softc *sc)
2592 struct mrsas_mfi_cmd *cmd = NULL;
2594 mtx_lock(&sc->mfi_cmd_pool_lock);
2595 if (!TAILQ_EMPTY(&sc->mrsas_mfi_cmd_list_head)) {
2596 cmd = TAILQ_FIRST(&sc->mrsas_mfi_cmd_list_head);
2597 TAILQ_REMOVE(&sc->mrsas_mfi_cmd_list_head, cmd, next);
2599 mtx_unlock(&sc->mfi_cmd_pool_lock);
2605 * mrsas_ocr_thread: Thread to handle OCR/Kill Adapter.
2606 * input: Adapter Context.
2608 * This function will check FW status register and flag do_timeout_reset flag.
2609 * It will do OCR/Kill adapter if FW is in fault state or IO timed out has
2613 mrsas_ocr_thread(void *arg)
2615 struct mrsas_softc *sc;
2616 u_int32_t fw_status, fw_state;
2618 sc = (struct mrsas_softc *)arg;
2620 mrsas_dprint(sc, MRSAS_TRACE, "%s\n", __func__);
2622 sc->ocr_thread_active = 1;
2623 mtx_lock(&sc->sim_lock);
2625 /* Sleep for 1 second and check the queue status */
2626 msleep(&sc->ocr_chan, &sc->sim_lock, PRIBIO,
2627 "mrsas_ocr", sc->mrsas_fw_fault_check_delay * hz);
2628 if (sc->remove_in_progress) {
2629 mrsas_dprint(sc, MRSAS_OCR,
2630 "Exit due to shutdown from %s\n", __func__);
2633 fw_status = mrsas_read_reg(sc,
2634 offsetof(mrsas_reg_set, outbound_scratch_pad));
2635 fw_state = fw_status & MFI_STATE_MASK;
2636 if (fw_state == MFI_STATE_FAULT || sc->do_timedout_reset) {
2637 device_printf(sc->mrsas_dev, "OCR started due to %s!\n",
2638 sc->do_timedout_reset ? "IO Timeout" :
2639 "FW fault detected");
2640 mtx_lock_spin(&sc->ioctl_lock);
2641 sc->reset_in_progress = 1;
2643 mtx_unlock_spin(&sc->ioctl_lock);
2644 mrsas_xpt_freeze(sc);
2645 mrsas_reset_ctrl(sc);
2646 mrsas_xpt_release(sc);
2647 sc->reset_in_progress = 0;
2648 sc->do_timedout_reset = 0;
2651 mtx_unlock(&sc->sim_lock);
2652 sc->ocr_thread_active = 0;
2653 mrsas_kproc_exit(0);
2657 * mrsas_reset_reply_desc: Reset Reply descriptor as part of OCR.
2658 * input: Adapter Context.
2660 * This function will clear reply descriptor so that post OCR driver and FW will
2664 mrsas_reset_reply_desc(struct mrsas_softc *sc)
2667 pMpi2ReplyDescriptorsUnion_t reply_desc;
2669 count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
2670 for (i = 0; i < count; i++)
2671 sc->last_reply_idx[i] = 0;
2673 reply_desc = sc->reply_desc_mem;
2674 for (i = 0; i < sc->reply_q_depth; i++, reply_desc++) {
2675 reply_desc->Words = MRSAS_ULONG_MAX;
2680 * mrsas_reset_ctrl: Core function to OCR/Kill adapter.
2681 * input: Adapter Context.
2683 * This function will run from thread context so that it can sleep. 1. Do not
2684 * handle OCR if FW is in HW critical error. 2. Wait for outstanding command
2685 * to complete for 180 seconds. 3. If #2 does not find any outstanding
2686 * command Controller is in working state, so skip OCR. Otherwise, do
2687 * OCR/kill Adapter based on flag disableOnlineCtrlReset. 4. Start of the
2688 * OCR, return all SCSI command back to CAM layer which has ccb_ptr. 5. Post
2689 * OCR, Re-fire Managment command and move Controller to Operation state.
2692 mrsas_reset_ctrl(struct mrsas_softc *sc)
2694 int retval = SUCCESS, i, j, retry = 0;
2695 u_int32_t host_diag, abs_state, status_reg, reset_adapter;
2697 struct mrsas_mfi_cmd *mfi_cmd;
2698 struct mrsas_mpt_cmd *mpt_cmd;
2699 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
2701 if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR) {
2702 device_printf(sc->mrsas_dev,
2703 "mrsas: Hardware critical error, returning FAIL.\n");
2706 mrsas_set_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
2707 sc->adprecovery = MRSAS_ADPRESET_SM_INFAULT;
2708 mrsas_disable_intr(sc);
2711 /* First try waiting for commands to complete */
2712 if (mrsas_wait_for_outstanding(sc)) {
2713 mrsas_dprint(sc, MRSAS_OCR,
2714 "resetting adapter from %s.\n",
2716 /* Now return commands back to the CAM layer */
2717 for (i = 0; i < sc->max_fw_cmds; i++) {
2718 mpt_cmd = sc->mpt_cmd_list[i];
2719 if (mpt_cmd->ccb_ptr) {
2720 ccb = (union ccb *)(mpt_cmd->ccb_ptr);
2721 ccb->ccb_h.status = CAM_SCSI_BUS_RESET;
2722 mrsas_cmd_done(sc, mpt_cmd);
2723 mrsas_atomic_dec(&sc->fw_outstanding);
2727 status_reg = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2728 outbound_scratch_pad));
2729 abs_state = status_reg & MFI_STATE_MASK;
2730 reset_adapter = status_reg & MFI_RESET_ADAPTER;
2731 if (sc->disableOnlineCtrlReset ||
2732 (abs_state == MFI_STATE_FAULT && !reset_adapter)) {
2733 /* Reset not supported, kill adapter */
2734 mrsas_dprint(sc, MRSAS_OCR, "Reset not supported, killing adapter.\n");
2739 /* Now try to reset the chip */
2740 for (i = 0; i < MRSAS_FUSION_MAX_RESET_TRIES; i++) {
2741 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2742 MPI2_WRSEQ_FLUSH_KEY_VALUE);
2743 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2744 MPI2_WRSEQ_1ST_KEY_VALUE);
2745 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2746 MPI2_WRSEQ_2ND_KEY_VALUE);
2747 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2748 MPI2_WRSEQ_3RD_KEY_VALUE);
2749 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2750 MPI2_WRSEQ_4TH_KEY_VALUE);
2751 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2752 MPI2_WRSEQ_5TH_KEY_VALUE);
2753 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2754 MPI2_WRSEQ_6TH_KEY_VALUE);
2756 /* Check that the diag write enable (DRWE) bit is on */
2757 host_diag = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2760 while (!(host_diag & HOST_DIAG_WRITE_ENABLE)) {
2762 host_diag = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2764 if (retry++ == 100) {
2765 mrsas_dprint(sc, MRSAS_OCR,
2766 "Host diag unlock failed!\n");
2770 if (!(host_diag & HOST_DIAG_WRITE_ENABLE))
2773 /* Send chip reset command */
2774 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_host_diag),
2775 host_diag | HOST_DIAG_RESET_ADAPTER);
2778 /* Make sure reset adapter bit is cleared */
2779 host_diag = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2782 while (host_diag & HOST_DIAG_RESET_ADAPTER) {
2784 host_diag = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2786 if (retry++ == 1000) {
2787 mrsas_dprint(sc, MRSAS_OCR,
2788 "Diag reset adapter never cleared!\n");
2792 if (host_diag & HOST_DIAG_RESET_ADAPTER)
2795 abs_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2796 outbound_scratch_pad)) & MFI_STATE_MASK;
2799 while ((abs_state <= MFI_STATE_FW_INIT) && (retry++ < 1000)) {
2801 abs_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2802 outbound_scratch_pad)) & MFI_STATE_MASK;
2804 if (abs_state <= MFI_STATE_FW_INIT) {
2805 mrsas_dprint(sc, MRSAS_OCR, "firmware state < MFI_STATE_FW_INIT,"
2806 " state = 0x%x\n", abs_state);
2809 /* Wait for FW to become ready */
2810 if (mrsas_transition_to_ready(sc, 1)) {
2811 mrsas_dprint(sc, MRSAS_OCR,
2812 "mrsas: Failed to transition controller to ready.\n");
2815 mrsas_reset_reply_desc(sc);
2816 if (mrsas_ioc_init(sc)) {
2817 mrsas_dprint(sc, MRSAS_OCR, "mrsas_ioc_init() failed!\n");
2820 /* Re-fire management commands */
2821 for (j = 0; j < sc->max_fw_cmds; j++) {
2822 mpt_cmd = sc->mpt_cmd_list[j];
2823 if (mpt_cmd->sync_cmd_idx != (u_int32_t)MRSAS_ULONG_MAX) {
2824 mfi_cmd = sc->mfi_cmd_list[mpt_cmd->sync_cmd_idx];
2825 if (mfi_cmd->frame->dcmd.opcode ==
2826 MR_DCMD_LD_MAP_GET_INFO) {
2827 mrsas_release_mfi_cmd(mfi_cmd);
2828 mrsas_release_mpt_cmd(mpt_cmd);
2830 req_desc = mrsas_get_request_desc(sc,
2831 mfi_cmd->cmd_id.context.smid - 1);
2832 mrsas_dprint(sc, MRSAS_OCR,
2833 "Re-fire command DCMD opcode 0x%x index %d\n ",
2834 mfi_cmd->frame->dcmd.opcode, j);
2836 device_printf(sc->mrsas_dev,
2837 "Cannot build MPT cmd.\n");
2839 mrsas_fire_cmd(sc, req_desc->addr.u.low,
2840 req_desc->addr.u.high);
2845 /* Reset load balance info */
2846 memset(sc->load_balance_info, 0,
2847 sizeof(LD_LOAD_BALANCE_INFO) * MAX_LOGICAL_DRIVES_EXT);
2849 if (mrsas_get_ctrl_info(sc)) {
2854 if (!mrsas_get_map_info(sc))
2855 mrsas_sync_map_info(sc);
2857 mrsas_clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
2858 mrsas_enable_intr(sc);
2859 sc->adprecovery = MRSAS_HBA_OPERATIONAL;
2861 /* Adapter reset completed successfully */
2862 device_printf(sc->mrsas_dev, "Reset successful\n");
2866 /* Reset failed, kill the adapter */
2867 device_printf(sc->mrsas_dev, "Reset failed, killing adapter.\n");
2871 mrsas_clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
2872 mrsas_enable_intr(sc);
2873 sc->adprecovery = MRSAS_HBA_OPERATIONAL;
2876 mrsas_clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
2877 mrsas_dprint(sc, MRSAS_OCR,
2878 "Reset Exit with %d.\n", retval);
2883 * mrsas_kill_hba: Kill HBA when OCR is not supported
2884 * input: Adapter Context.
2886 * This function will kill HBA when OCR is not supported.
2889 mrsas_kill_hba(struct mrsas_softc *sc)
2891 sc->adprecovery = MRSAS_HW_CRITICAL_ERROR;
2892 pause("mrsas_kill_hba", 1000);
2893 mrsas_dprint(sc, MRSAS_OCR, "%s\n", __func__);
2894 mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell),
2897 mrsas_read_reg(sc, offsetof(mrsas_reg_set, doorbell));
2898 mrsas_complete_outstanding_ioctls(sc);
2902 * mrsas_complete_outstanding_ioctls Complete pending IOCTLS after kill_hba
2903 * input: Controller softc
2908 mrsas_complete_outstanding_ioctls(struct mrsas_softc *sc)
2911 struct mrsas_mpt_cmd *cmd_mpt;
2912 struct mrsas_mfi_cmd *cmd_mfi;
2913 u_int32_t count, MSIxIndex;
2915 count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
2916 for (i = 0; i < sc->max_fw_cmds; i++) {
2917 cmd_mpt = sc->mpt_cmd_list[i];
2919 if (cmd_mpt->sync_cmd_idx != (u_int32_t)MRSAS_ULONG_MAX) {
2920 cmd_mfi = sc->mfi_cmd_list[cmd_mpt->sync_cmd_idx];
2921 if (cmd_mfi->sync_cmd && cmd_mfi->frame->hdr.cmd != MFI_CMD_ABORT) {
2922 for (MSIxIndex = 0; MSIxIndex < count; MSIxIndex++)
2923 mrsas_complete_mptmfi_passthru(sc, cmd_mfi,
2924 cmd_mpt->io_request->RaidContext.status);
2931 * mrsas_wait_for_outstanding: Wait for outstanding commands
2932 * input: Adapter Context.
2934 * This function will wait for 180 seconds for outstanding commands to be
2938 mrsas_wait_for_outstanding(struct mrsas_softc *sc)
2940 int i, outstanding, retval = 0;
2941 u_int32_t fw_state, count, MSIxIndex;
2944 for (i = 0; i < MRSAS_RESET_WAIT_TIME; i++) {
2945 if (sc->remove_in_progress) {
2946 mrsas_dprint(sc, MRSAS_OCR,
2947 "Driver remove or shutdown called.\n");
2951 /* Check if firmware is in fault state */
2952 fw_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2953 outbound_scratch_pad)) & MFI_STATE_MASK;
2954 if (fw_state == MFI_STATE_FAULT) {
2955 mrsas_dprint(sc, MRSAS_OCR,
2956 "Found FW in FAULT state, will reset adapter.\n");
2960 outstanding = mrsas_atomic_read(&sc->fw_outstanding);
2964 if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
2965 mrsas_dprint(sc, MRSAS_OCR, "[%2d]waiting for %d "
2966 "commands to complete\n", i, outstanding);
2967 count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
2968 for (MSIxIndex = 0; MSIxIndex < count; MSIxIndex++)
2969 mrsas_complete_cmd(sc, MSIxIndex);
2974 if (mrsas_atomic_read(&sc->fw_outstanding)) {
2975 mrsas_dprint(sc, MRSAS_OCR,
2976 " pending commands remain after waiting,"
2977 " will reset adapter.\n");
2985 * mrsas_release_mfi_cmd: Return a cmd to free command pool
2986 * input: Command packet for return to free cmd pool
2988 * This function returns the MFI command to the command list.
2991 mrsas_release_mfi_cmd(struct mrsas_mfi_cmd *cmd)
2993 struct mrsas_softc *sc = cmd->sc;
2995 mtx_lock(&sc->mfi_cmd_pool_lock);
2996 cmd->ccb_ptr = NULL;
2997 cmd->cmd_id.frame_count = 0;
2998 TAILQ_INSERT_TAIL(&(sc->mrsas_mfi_cmd_list_head), cmd, next);
2999 mtx_unlock(&sc->mfi_cmd_pool_lock);
3005 * mrsas_get_controller_info: Returns FW's controller structure
3006 * input: Adapter soft state
3007 * Controller information structure
3009 * Issues an internal command (DCMD) to get the FW's controller structure. This
3010 * information is mainly used to find out the maximum IO transfer per command
3011 * supported by the FW.
3014 mrsas_get_ctrl_info(struct mrsas_softc *sc)
3017 struct mrsas_mfi_cmd *cmd;
3018 struct mrsas_dcmd_frame *dcmd;
3020 cmd = mrsas_get_mfi_cmd(sc);
3023 device_printf(sc->mrsas_dev, "Failed to get a free cmd\n");
3026 dcmd = &cmd->frame->dcmd;
3028 if (mrsas_alloc_ctlr_info_cmd(sc) != SUCCESS) {
3029 device_printf(sc->mrsas_dev, "Cannot allocate get ctlr info cmd\n");
3030 mrsas_release_mfi_cmd(cmd);
3033 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
3035 dcmd->cmd = MFI_CMD_DCMD;
3036 dcmd->cmd_status = 0xFF;
3037 dcmd->sge_count = 1;
3038 dcmd->flags = MFI_FRAME_DIR_READ;
3041 dcmd->data_xfer_len = sizeof(struct mrsas_ctrl_info);
3042 dcmd->opcode = MR_DCMD_CTRL_GET_INFO;
3043 dcmd->sgl.sge32[0].phys_addr = sc->ctlr_info_phys_addr;
3044 dcmd->sgl.sge32[0].length = sizeof(struct mrsas_ctrl_info);
3046 if (!mrsas_issue_polled(sc, cmd))
3047 memcpy(sc->ctrl_info, sc->ctlr_info_mem, sizeof(struct mrsas_ctrl_info));
3051 mrsas_update_ext_vd_details(sc);
3053 mrsas_free_ctlr_info_cmd(sc);
3054 mrsas_release_mfi_cmd(cmd);
3059 * mrsas_update_ext_vd_details : Update details w.r.t Extended VD
3061 * sc - Controller's softc
3064 mrsas_update_ext_vd_details(struct mrsas_softc *sc)
3066 sc->max256vdSupport =
3067 sc->ctrl_info->adapterOperations3.supportMaxExtLDs;
3068 /* Below is additional check to address future FW enhancement */
3069 if (sc->ctrl_info->max_lds > 64)
3070 sc->max256vdSupport = 1;
3072 sc->drv_supported_vd_count = MRSAS_MAX_LD_CHANNELS
3073 * MRSAS_MAX_DEV_PER_CHANNEL;
3074 sc->drv_supported_pd_count = MRSAS_MAX_PD_CHANNELS
3075 * MRSAS_MAX_DEV_PER_CHANNEL;
3076 if (sc->max256vdSupport) {
3077 sc->fw_supported_vd_count = MAX_LOGICAL_DRIVES_EXT;
3078 sc->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
3080 sc->fw_supported_vd_count = MAX_LOGICAL_DRIVES;
3081 sc->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
3084 sc->old_map_sz = sizeof(MR_FW_RAID_MAP) +
3085 (sizeof(MR_LD_SPAN_MAP) *
3086 (sc->fw_supported_vd_count - 1));
3087 sc->new_map_sz = sizeof(MR_FW_RAID_MAP_EXT);
3088 sc->drv_map_sz = sizeof(MR_DRV_RAID_MAP) +
3089 (sizeof(MR_LD_SPAN_MAP) *
3090 (sc->drv_supported_vd_count - 1));
3092 sc->max_map_sz = max(sc->old_map_sz, sc->new_map_sz);
3094 if (sc->max256vdSupport)
3095 sc->current_map_sz = sc->new_map_sz;
3097 sc->current_map_sz = sc->old_map_sz;
3101 * mrsas_alloc_ctlr_info_cmd: Allocates memory for controller info command
3102 * input: Adapter soft state
3104 * Allocates DMAable memory for the controller info internal command.
3107 mrsas_alloc_ctlr_info_cmd(struct mrsas_softc *sc)
3111 /* Allocate get controller info command */
3112 ctlr_info_size = sizeof(struct mrsas_ctrl_info);
3113 if (bus_dma_tag_create(sc->mrsas_parent_tag,
3115 BUS_SPACE_MAXADDR_32BIT,
3123 &sc->ctlr_info_tag)) {
3124 device_printf(sc->mrsas_dev, "Cannot allocate ctlr info tag\n");
3127 if (bus_dmamem_alloc(sc->ctlr_info_tag, (void **)&sc->ctlr_info_mem,
3128 BUS_DMA_NOWAIT, &sc->ctlr_info_dmamap)) {
3129 device_printf(sc->mrsas_dev, "Cannot allocate ctlr info cmd mem\n");
3132 if (bus_dmamap_load(sc->ctlr_info_tag, sc->ctlr_info_dmamap,
3133 sc->ctlr_info_mem, ctlr_info_size, mrsas_addr_cb,
3134 &sc->ctlr_info_phys_addr, BUS_DMA_NOWAIT)) {
3135 device_printf(sc->mrsas_dev, "Cannot load ctlr info cmd mem\n");
3138 memset(sc->ctlr_info_mem, 0, ctlr_info_size);
3143 * mrsas_free_ctlr_info_cmd: Free memory for controller info command
3144 * input: Adapter soft state
3146 * Deallocates memory of the get controller info cmd.
3149 mrsas_free_ctlr_info_cmd(struct mrsas_softc *sc)
3151 if (sc->ctlr_info_phys_addr)
3152 bus_dmamap_unload(sc->ctlr_info_tag, sc->ctlr_info_dmamap);
3153 if (sc->ctlr_info_mem != NULL)
3154 bus_dmamem_free(sc->ctlr_info_tag, sc->ctlr_info_mem, sc->ctlr_info_dmamap);
3155 if (sc->ctlr_info_tag != NULL)
3156 bus_dma_tag_destroy(sc->ctlr_info_tag);
3160 * mrsas_issue_polled: Issues a polling command
3161 * inputs: Adapter soft state
3162 * Command packet to be issued
3164 * This function is for posting of internal commands to Firmware. MFI requires
3165 * the cmd_status to be set to 0xFF before posting. The maximun wait time of
3166 * the poll response timer is 180 seconds.
3169 mrsas_issue_polled(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
3171 struct mrsas_header *frame_hdr = &cmd->frame->hdr;
3172 u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME;
3175 frame_hdr->cmd_status = 0xFF;
3176 frame_hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
3178 /* Issue the frame using inbound queue port */
3179 if (mrsas_issue_dcmd(sc, cmd)) {
3180 device_printf(sc->mrsas_dev, "Cannot issue DCMD internal command.\n");
3184 * Poll response timer to wait for Firmware response. While this
3185 * timer with the DELAY call could block CPU, the time interval for
3186 * this is only 1 millisecond.
3188 if (frame_hdr->cmd_status == 0xFF) {
3189 for (i = 0; i < (max_wait * 1000); i++) {
3190 if (frame_hdr->cmd_status == 0xFF)
3196 if (frame_hdr->cmd_status != 0) {
3197 if (frame_hdr->cmd_status == 0xFF)
3198 device_printf(sc->mrsas_dev, "DCMD timed out after %d seconds.\n", max_wait);
3200 device_printf(sc->mrsas_dev, "DCMD failed, status = 0x%x\n", frame_hdr->cmd_status);
3207 * mrsas_issue_dcmd: Issues a MFI Pass thru cmd
3208 * input: Adapter soft state mfi cmd pointer
3210 * This function is called by mrsas_issued_blocked_cmd() and
3211 * mrsas_issued_polled(), to build the MPT command and then fire the command
3215 mrsas_issue_dcmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
3217 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
3219 req_desc = mrsas_build_mpt_cmd(sc, cmd);
3221 device_printf(sc->mrsas_dev, "Cannot build MPT cmd.\n");
3224 mrsas_fire_cmd(sc, req_desc->addr.u.low, req_desc->addr.u.high);
3230 * mrsas_build_mpt_cmd: Calls helper function to build Passthru cmd
3231 * input: Adapter soft state mfi cmd to build
3233 * This function is called by mrsas_issue_cmd() to build the MPT-MFI passthru
3234 * command and prepares the MPT command to send to Firmware.
3236 MRSAS_REQUEST_DESCRIPTOR_UNION *
3237 mrsas_build_mpt_cmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
3239 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
3242 if (mrsas_build_mptmfi_passthru(sc, cmd)) {
3243 device_printf(sc->mrsas_dev, "Cannot build MPT-MFI passthru cmd.\n");
3246 index = cmd->cmd_id.context.smid;
3248 req_desc = mrsas_get_request_desc(sc, index - 1);
3252 req_desc->addr.Words = 0;
3253 req_desc->SCSIIO.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO << MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
3255 req_desc->SCSIIO.SMID = index;
3261 * mrsas_build_mptmfi_passthru: Builds a MPT MFI Passthru command
3262 * input: Adapter soft state mfi cmd pointer
3264 * The MPT command and the io_request are setup as a passthru command. The SGE
3265 * chain address is set to frame_phys_addr of the MFI command.
3268 mrsas_build_mptmfi_passthru(struct mrsas_softc *sc, struct mrsas_mfi_cmd *mfi_cmd)
3270 MPI25_IEEE_SGE_CHAIN64 *mpi25_ieee_chain;
3271 PTR_MRSAS_RAID_SCSI_IO_REQUEST io_req;
3272 struct mrsas_mpt_cmd *mpt_cmd;
3273 struct mrsas_header *frame_hdr = &mfi_cmd->frame->hdr;
3275 mpt_cmd = mrsas_get_mpt_cmd(sc);
3279 /* Save the smid. To be used for returning the cmd */
3280 mfi_cmd->cmd_id.context.smid = mpt_cmd->index;
3282 mpt_cmd->sync_cmd_idx = mfi_cmd->index;
3285 * For cmds where the flag is set, store the flag and check on
3286 * completion. For cmds with this flag, don't call
3287 * mrsas_complete_cmd.
3290 if (frame_hdr->flags & MFI_FRAME_DONT_POST_IN_REPLY_QUEUE)
3291 mpt_cmd->flags = MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
3293 io_req = mpt_cmd->io_request;
3295 if ((sc->device_id == MRSAS_INVADER) || (sc->device_id == MRSAS_FURY)) {
3296 pMpi25IeeeSgeChain64_t sgl_ptr_end = (pMpi25IeeeSgeChain64_t)&io_req->SGL;
3298 sgl_ptr_end += sc->max_sge_in_main_msg - 1;
3299 sgl_ptr_end->Flags = 0;
3301 mpi25_ieee_chain = (MPI25_IEEE_SGE_CHAIN64 *) & io_req->SGL.IeeeChain;
3303 io_req->Function = MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST;
3304 io_req->SGLOffset0 = offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL) / 4;
3305 io_req->ChainOffset = sc->chain_offset_mfi_pthru;
3307 mpi25_ieee_chain->Address = mfi_cmd->frame_phys_addr;
3309 mpi25_ieee_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT |
3310 MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR;
3312 mpi25_ieee_chain->Length = MRSAS_MAX_SZ_CHAIN_FRAME;
3318 * mrsas_issue_blocked_cmd: Synchronous wrapper around regular FW cmds
3319 * input: Adapter soft state Command to be issued
3321 * This function waits on an event for the command to be returned from the ISR.
3322 * Max wait time is MRSAS_INTERNAL_CMD_WAIT_TIME secs. Used for issuing
3323 * internal and ioctl commands.
3326 mrsas_issue_blocked_cmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
3328 u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME;
3329 unsigned long total_time = 0;
3332 /* Initialize cmd_status */
3333 cmd->cmd_status = ECONNREFUSED;
3335 /* Build MPT-MFI command for issue to FW */
3336 if (mrsas_issue_dcmd(sc, cmd)) {
3337 device_printf(sc->mrsas_dev, "Cannot issue DCMD internal command.\n");
3340 sc->chan = (void *)&cmd;
3343 if (cmd->cmd_status == ECONNREFUSED) {
3344 tsleep((void *)&sc->chan, 0, "mrsas_sleep", hz);
3348 if (total_time >= max_wait) {
3349 device_printf(sc->mrsas_dev,
3350 "Internal command timed out after %d seconds.\n", max_wait);
3359 * mrsas_complete_mptmfi_passthru: Completes a command
3360 * input: @sc: Adapter soft state
3361 * @cmd: Command to be completed
3362 * @status: cmd completion status
3364 * This function is called from mrsas_complete_cmd() after an interrupt is
3365 * received from Firmware, and io_request->Function is
3366 * MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST.
3369 mrsas_complete_mptmfi_passthru(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd,
3372 struct mrsas_header *hdr = &cmd->frame->hdr;
3373 u_int8_t cmd_status = cmd->frame->hdr.cmd_status;
3375 /* Reset the retry counter for future re-tries */
3376 cmd->retry_for_fw_reset = 0;
3379 cmd->ccb_ptr = NULL;
3382 case MFI_CMD_INVALID:
3383 device_printf(sc->mrsas_dev, "MFI_CMD_INVALID command.\n");
3385 case MFI_CMD_PD_SCSI_IO:
3386 case MFI_CMD_LD_SCSI_IO:
3388 * MFI_CMD_PD_SCSI_IO and MFI_CMD_LD_SCSI_IO could have been
3389 * issued either through an IO path or an IOCTL path. If it
3390 * was via IOCTL, we will send it to internal completion.
3392 if (cmd->sync_cmd) {
3394 mrsas_wakeup(sc, cmd);
3400 /* Check for LD map update */
3401 if ((cmd->frame->dcmd.opcode == MR_DCMD_LD_MAP_GET_INFO) &&
3402 (cmd->frame->dcmd.mbox.b[1] == 1)) {
3403 sc->fast_path_io = 0;
3404 mtx_lock(&sc->raidmap_lock);
3405 if (cmd_status != 0) {
3406 if (cmd_status != MFI_STAT_NOT_FOUND)
3407 device_printf(sc->mrsas_dev, "map sync failed, status=%x\n", cmd_status);
3409 mrsas_release_mfi_cmd(cmd);
3410 mtx_unlock(&sc->raidmap_lock);
3415 mrsas_release_mfi_cmd(cmd);
3416 if (MR_ValidateMapInfo(sc))
3417 sc->fast_path_io = 0;
3419 sc->fast_path_io = 1;
3420 mrsas_sync_map_info(sc);
3421 mtx_unlock(&sc->raidmap_lock);
3424 if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET_INFO ||
3425 cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET) {
3426 sc->mrsas_aen_triggered = 0;
3428 /* See if got an event notification */
3429 if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_WAIT)
3430 mrsas_complete_aen(sc, cmd);
3432 mrsas_wakeup(sc, cmd);
3435 /* Command issued to abort another cmd return */
3436 mrsas_complete_abort(sc, cmd);
3439 device_printf(sc->mrsas_dev, "Unknown command completed! [0x%X]\n", hdr->cmd);
3445 * mrsas_wakeup: Completes an internal command
3446 * input: Adapter soft state
3447 * Command to be completed
3449 * In mrsas_issue_blocked_cmd(), after a command is issued to Firmware, a wait
3450 * timer is started. This function is called from
3451 * mrsas_complete_mptmfi_passthru() as it completes the command, to wake up
3452 * from the command wait.
3455 mrsas_wakeup(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
3457 cmd->cmd_status = cmd->frame->io.cmd_status;
3459 if (cmd->cmd_status == ECONNREFUSED)
3460 cmd->cmd_status = 0;
3462 sc->chan = (void *)&cmd;
3463 wakeup_one((void *)&sc->chan);
3468 * mrsas_shutdown_ctlr: Instructs FW to shutdown the controller input:
3469 * Adapter soft state Shutdown/Hibernate
3471 * This function issues a DCMD internal command to Firmware to initiate shutdown
3472 * of the controller.
3475 mrsas_shutdown_ctlr(struct mrsas_softc *sc, u_int32_t opcode)
3477 struct mrsas_mfi_cmd *cmd;
3478 struct mrsas_dcmd_frame *dcmd;
3480 if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR)
3483 cmd = mrsas_get_mfi_cmd(sc);
3485 device_printf(sc->mrsas_dev, "Cannot allocate for shutdown cmd.\n");
3489 mrsas_issue_blocked_abort_cmd(sc, sc->aen_cmd);
3491 if (sc->map_update_cmd)
3492 mrsas_issue_blocked_abort_cmd(sc, sc->map_update_cmd);
3494 dcmd = &cmd->frame->dcmd;
3495 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
3497 dcmd->cmd = MFI_CMD_DCMD;
3498 dcmd->cmd_status = 0x0;
3499 dcmd->sge_count = 0;
3500 dcmd->flags = MFI_FRAME_DIR_NONE;
3503 dcmd->data_xfer_len = 0;
3504 dcmd->opcode = opcode;
3506 device_printf(sc->mrsas_dev, "Preparing to shut down controller.\n");
3508 mrsas_issue_blocked_cmd(sc, cmd);
3509 mrsas_release_mfi_cmd(cmd);
3515 * mrsas_flush_cache: Requests FW to flush all its caches input:
3516 * Adapter soft state
3518 * This function is issues a DCMD internal command to Firmware to initiate
3519 * flushing of all caches.
3522 mrsas_flush_cache(struct mrsas_softc *sc)
3524 struct mrsas_mfi_cmd *cmd;
3525 struct mrsas_dcmd_frame *dcmd;
3527 if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR)
3530 cmd = mrsas_get_mfi_cmd(sc);
3532 device_printf(sc->mrsas_dev, "Cannot allocate for flush cache cmd.\n");
3535 dcmd = &cmd->frame->dcmd;
3536 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
3538 dcmd->cmd = MFI_CMD_DCMD;
3539 dcmd->cmd_status = 0x0;
3540 dcmd->sge_count = 0;
3541 dcmd->flags = MFI_FRAME_DIR_NONE;
3544 dcmd->data_xfer_len = 0;
3545 dcmd->opcode = MR_DCMD_CTRL_CACHE_FLUSH;
3546 dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE;
3548 mrsas_issue_blocked_cmd(sc, cmd);
3549 mrsas_release_mfi_cmd(cmd);
3555 * mrsas_get_map_info: Load and validate RAID map input:
3556 * Adapter instance soft state
3558 * This function calls mrsas_get_ld_map_info() and MR_ValidateMapInfo() to load
3559 * and validate RAID map. It returns 0 if successful, 1 other- wise.
3562 mrsas_get_map_info(struct mrsas_softc *sc)
3564 uint8_t retcode = 0;
3566 sc->fast_path_io = 0;
3567 if (!mrsas_get_ld_map_info(sc)) {
3568 retcode = MR_ValidateMapInfo(sc);
3570 sc->fast_path_io = 1;
3578 * mrsas_get_ld_map_info: Get FW's ld_map structure input:
3579 * Adapter instance soft state
3581 * Issues an internal command (DCMD) to get the FW's controller PD list
3585 mrsas_get_ld_map_info(struct mrsas_softc *sc)
3588 struct mrsas_mfi_cmd *cmd;
3589 struct mrsas_dcmd_frame *dcmd;
3591 bus_addr_t map_phys_addr = 0;
3593 cmd = mrsas_get_mfi_cmd(sc);
3595 device_printf(sc->mrsas_dev,
3596 "Cannot alloc for ld map info cmd.\n");
3599 dcmd = &cmd->frame->dcmd;
3601 map = (void *)sc->raidmap_mem[(sc->map_id & 1)];
3602 map_phys_addr = sc->raidmap_phys_addr[(sc->map_id & 1)];
3604 device_printf(sc->mrsas_dev,
3605 "Failed to alloc mem for ld map info.\n");
3606 mrsas_release_mfi_cmd(cmd);
3609 memset(map, 0, sizeof(sc->max_map_sz));
3610 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
3612 dcmd->cmd = MFI_CMD_DCMD;
3613 dcmd->cmd_status = 0xFF;
3614 dcmd->sge_count = 1;
3615 dcmd->flags = MFI_FRAME_DIR_READ;
3618 dcmd->data_xfer_len = sc->current_map_sz;
3619 dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO;
3620 dcmd->sgl.sge32[0].phys_addr = map_phys_addr;
3621 dcmd->sgl.sge32[0].length = sc->current_map_sz;
3623 if (!mrsas_issue_polled(sc, cmd))
3626 device_printf(sc->mrsas_dev,
3627 "Fail to send get LD map info cmd.\n");
3630 mrsas_release_mfi_cmd(cmd);
3636 * mrsas_sync_map_info: Get FW's ld_map structure input:
3637 * Adapter instance soft state
3639 * Issues an internal command (DCMD) to get the FW's controller PD list
3643 mrsas_sync_map_info(struct mrsas_softc *sc)
3646 struct mrsas_mfi_cmd *cmd;
3647 struct mrsas_dcmd_frame *dcmd;
3648 uint32_t size_sync_info, num_lds;
3649 MR_LD_TARGET_SYNC *target_map = NULL;
3650 MR_DRV_RAID_MAP_ALL *map;
3652 MR_LD_TARGET_SYNC *ld_sync;
3653 bus_addr_t map_phys_addr = 0;
3655 cmd = mrsas_get_mfi_cmd(sc);
3657 device_printf(sc->mrsas_dev,
3658 "Cannot alloc for sync map info cmd\n");
3661 map = sc->ld_drv_map[sc->map_id & 1];
3662 num_lds = map->raidMap.ldCount;
3664 dcmd = &cmd->frame->dcmd;
3665 size_sync_info = sizeof(MR_LD_TARGET_SYNC) * num_lds;
3666 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
3668 target_map = (MR_LD_TARGET_SYNC *) sc->raidmap_mem[(sc->map_id - 1) & 1];
3669 memset(target_map, 0, sc->max_map_sz);
3671 map_phys_addr = sc->raidmap_phys_addr[(sc->map_id - 1) & 1];
3673 ld_sync = (MR_LD_TARGET_SYNC *) target_map;
3675 for (i = 0; i < num_lds; i++, ld_sync++) {
3676 raid = MR_LdRaidGet(i, map);
3677 ld_sync->targetId = MR_GetLDTgtId(i, map);
3678 ld_sync->seqNum = raid->seqNum;
3681 dcmd->cmd = MFI_CMD_DCMD;
3682 dcmd->cmd_status = 0xFF;
3683 dcmd->sge_count = 1;
3684 dcmd->flags = MFI_FRAME_DIR_WRITE;
3687 dcmd->data_xfer_len = sc->current_map_sz;
3688 dcmd->mbox.b[0] = num_lds;
3689 dcmd->mbox.b[1] = MRSAS_DCMD_MBOX_PEND_FLAG;
3690 dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO;
3691 dcmd->sgl.sge32[0].phys_addr = map_phys_addr;
3692 dcmd->sgl.sge32[0].length = sc->current_map_sz;
3694 sc->map_update_cmd = cmd;
3695 if (mrsas_issue_dcmd(sc, cmd)) {
3696 device_printf(sc->mrsas_dev,
3697 "Fail to send sync map info command.\n");
3704 * mrsas_get_pd_list: Returns FW's PD list structure input:
3705 * Adapter soft state
3707 * Issues an internal command (DCMD) to get the FW's controller PD list
3708 * structure. This information is mainly used to find out about system
3709 * supported by Firmware.
3712 mrsas_get_pd_list(struct mrsas_softc *sc)
3714 int retcode = 0, pd_index = 0, pd_count = 0, pd_list_size;
3715 struct mrsas_mfi_cmd *cmd;
3716 struct mrsas_dcmd_frame *dcmd;
3717 struct MR_PD_LIST *pd_list_mem;
3718 struct MR_PD_ADDRESS *pd_addr;
3719 bus_addr_t pd_list_phys_addr = 0;
3720 struct mrsas_tmp_dcmd *tcmd;
3722 cmd = mrsas_get_mfi_cmd(sc);
3724 device_printf(sc->mrsas_dev,
3725 "Cannot alloc for get PD list cmd\n");
3728 dcmd = &cmd->frame->dcmd;
3730 tcmd = malloc(sizeof(struct mrsas_tmp_dcmd), M_MRSAS, M_NOWAIT);
3731 pd_list_size = MRSAS_MAX_PD * sizeof(struct MR_PD_LIST);
3732 if (mrsas_alloc_tmp_dcmd(sc, tcmd, pd_list_size) != SUCCESS) {
3733 device_printf(sc->mrsas_dev,
3734 "Cannot alloc dmamap for get PD list cmd\n");
3735 mrsas_release_mfi_cmd(cmd);
3738 pd_list_mem = tcmd->tmp_dcmd_mem;
3739 pd_list_phys_addr = tcmd->tmp_dcmd_phys_addr;
3741 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
3743 dcmd->mbox.b[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST;
3744 dcmd->mbox.b[1] = 0;
3745 dcmd->cmd = MFI_CMD_DCMD;
3746 dcmd->cmd_status = 0xFF;
3747 dcmd->sge_count = 1;
3748 dcmd->flags = MFI_FRAME_DIR_READ;
3751 dcmd->data_xfer_len = MRSAS_MAX_PD * sizeof(struct MR_PD_LIST);
3752 dcmd->opcode = MR_DCMD_PD_LIST_QUERY;
3753 dcmd->sgl.sge32[0].phys_addr = pd_list_phys_addr;
3754 dcmd->sgl.sge32[0].length = MRSAS_MAX_PD * sizeof(struct MR_PD_LIST);
3756 if (!mrsas_issue_polled(sc, cmd))
3761 /* Get the instance PD list */
3762 pd_count = MRSAS_MAX_PD;
3763 pd_addr = pd_list_mem->addr;
3764 if (retcode == 0 && pd_list_mem->count < pd_count) {
3765 memset(sc->local_pd_list, 0,
3766 MRSAS_MAX_PD * sizeof(struct mrsas_pd_list));
3767 for (pd_index = 0; pd_index < pd_list_mem->count; pd_index++) {
3768 sc->local_pd_list[pd_addr->deviceId].tid = pd_addr->deviceId;
3769 sc->local_pd_list[pd_addr->deviceId].driveType =
3770 pd_addr->scsiDevType;
3771 sc->local_pd_list[pd_addr->deviceId].driveState =
3777 * Use mutext/spinlock if pd_list component size increase more than
3780 memcpy(sc->pd_list, sc->local_pd_list, sizeof(sc->local_pd_list));
3781 mrsas_free_tmp_dcmd(tcmd);
3782 mrsas_release_mfi_cmd(cmd);
3783 free(tcmd, M_MRSAS);
3788 * mrsas_get_ld_list: Returns FW's LD list structure input:
3789 * Adapter soft state
3791 * Issues an internal command (DCMD) to get the FW's controller PD list
3792 * structure. This information is mainly used to find out about supported by
3796 mrsas_get_ld_list(struct mrsas_softc *sc)
3798 int ld_list_size, retcode = 0, ld_index = 0, ids = 0;
3799 struct mrsas_mfi_cmd *cmd;
3800 struct mrsas_dcmd_frame *dcmd;
3801 struct MR_LD_LIST *ld_list_mem;
3802 bus_addr_t ld_list_phys_addr = 0;
3803 struct mrsas_tmp_dcmd *tcmd;
3805 cmd = mrsas_get_mfi_cmd(sc);
3807 device_printf(sc->mrsas_dev,
3808 "Cannot alloc for get LD list cmd\n");
3811 dcmd = &cmd->frame->dcmd;
3813 tcmd = malloc(sizeof(struct mrsas_tmp_dcmd), M_MRSAS, M_NOWAIT);
3814 ld_list_size = sizeof(struct MR_LD_LIST);
3815 if (mrsas_alloc_tmp_dcmd(sc, tcmd, ld_list_size) != SUCCESS) {
3816 device_printf(sc->mrsas_dev,
3817 "Cannot alloc dmamap for get LD list cmd\n");
3818 mrsas_release_mfi_cmd(cmd);
3821 ld_list_mem = tcmd->tmp_dcmd_mem;
3822 ld_list_phys_addr = tcmd->tmp_dcmd_phys_addr;
3824 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
3826 if (sc->max256vdSupport)
3827 dcmd->mbox.b[0] = 1;
3829 dcmd->cmd = MFI_CMD_DCMD;
3830 dcmd->cmd_status = 0xFF;
3831 dcmd->sge_count = 1;
3832 dcmd->flags = MFI_FRAME_DIR_READ;
3834 dcmd->data_xfer_len = sizeof(struct MR_LD_LIST);
3835 dcmd->opcode = MR_DCMD_LD_GET_LIST;
3836 dcmd->sgl.sge32[0].phys_addr = ld_list_phys_addr;
3837 dcmd->sgl.sge32[0].length = sizeof(struct MR_LD_LIST);
3840 if (!mrsas_issue_polled(sc, cmd))
3846 printf("Number of LDs %d\n", ld_list_mem->ldCount);
3849 /* Get the instance LD list */
3850 if ((retcode == 0) &&
3851 (ld_list_mem->ldCount <= sc->fw_supported_vd_count)) {
3852 sc->CurLdCount = ld_list_mem->ldCount;
3853 memset(sc->ld_ids, 0xff, MAX_LOGICAL_DRIVES_EXT);
3854 for (ld_index = 0; ld_index < ld_list_mem->ldCount; ld_index++) {
3855 if (ld_list_mem->ldList[ld_index].state != 0) {
3856 ids = ld_list_mem->ldList[ld_index].ref.ld_context.targetId;
3857 sc->ld_ids[ids] = ld_list_mem->ldList[ld_index].ref.ld_context.targetId;
3861 mrsas_free_tmp_dcmd(tcmd);
3862 mrsas_release_mfi_cmd(cmd);
3863 free(tcmd, M_MRSAS);
3868 * mrsas_alloc_tmp_dcmd: Allocates memory for temporary command input:
3869 * Adapter soft state Temp command Size of alloction
3871 * Allocates DMAable memory for a temporary internal command. The allocated
3872 * memory is initialized to all zeros upon successful loading of the dma
3876 mrsas_alloc_tmp_dcmd(struct mrsas_softc *sc,
3877 struct mrsas_tmp_dcmd *tcmd, int size)
3879 if (bus_dma_tag_create(sc->mrsas_parent_tag,
3881 BUS_SPACE_MAXADDR_32BIT,
3889 &tcmd->tmp_dcmd_tag)) {
3890 device_printf(sc->mrsas_dev, "Cannot allocate tmp dcmd tag\n");
3893 if (bus_dmamem_alloc(tcmd->tmp_dcmd_tag, (void **)&tcmd->tmp_dcmd_mem,
3894 BUS_DMA_NOWAIT, &tcmd->tmp_dcmd_dmamap)) {
3895 device_printf(sc->mrsas_dev, "Cannot allocate tmp dcmd mem\n");
3898 if (bus_dmamap_load(tcmd->tmp_dcmd_tag, tcmd->tmp_dcmd_dmamap,
3899 tcmd->tmp_dcmd_mem, size, mrsas_addr_cb,
3900 &tcmd->tmp_dcmd_phys_addr, BUS_DMA_NOWAIT)) {
3901 device_printf(sc->mrsas_dev, "Cannot load tmp dcmd mem\n");
3904 memset(tcmd->tmp_dcmd_mem, 0, size);
3909 * mrsas_free_tmp_dcmd: Free memory for temporary command input:
3910 * temporary dcmd pointer
3912 * Deallocates memory of the temporary command for use in the construction of
3913 * the internal DCMD.
3916 mrsas_free_tmp_dcmd(struct mrsas_tmp_dcmd *tmp)
3918 if (tmp->tmp_dcmd_phys_addr)
3919 bus_dmamap_unload(tmp->tmp_dcmd_tag, tmp->tmp_dcmd_dmamap);
3920 if (tmp->tmp_dcmd_mem != NULL)
3921 bus_dmamem_free(tmp->tmp_dcmd_tag, tmp->tmp_dcmd_mem, tmp->tmp_dcmd_dmamap);
3922 if (tmp->tmp_dcmd_tag != NULL)
3923 bus_dma_tag_destroy(tmp->tmp_dcmd_tag);
3927 * mrsas_issue_blocked_abort_cmd: Aborts previously issued cmd input:
3928 * Adapter soft state Previously issued cmd to be aborted
3930 * This function is used to abort previously issued commands, such as AEN and
3931 * RAID map sync map commands. The abort command is sent as a DCMD internal
3932 * command and subsequently the driver will wait for a return status. The
3933 * max wait time is MRSAS_INTERNAL_CMD_WAIT_TIME seconds.
3936 mrsas_issue_blocked_abort_cmd(struct mrsas_softc *sc,
3937 struct mrsas_mfi_cmd *cmd_to_abort)
3939 struct mrsas_mfi_cmd *cmd;
3940 struct mrsas_abort_frame *abort_fr;
3941 u_int8_t retcode = 0;
3942 unsigned long total_time = 0;
3943 u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME;
3945 cmd = mrsas_get_mfi_cmd(sc);
3947 device_printf(sc->mrsas_dev, "Cannot alloc for abort cmd\n");
3950 abort_fr = &cmd->frame->abort;
3952 /* Prepare and issue the abort frame */
3953 abort_fr->cmd = MFI_CMD_ABORT;
3954 abort_fr->cmd_status = 0xFF;
3955 abort_fr->flags = 0;
3956 abort_fr->abort_context = cmd_to_abort->index;
3957 abort_fr->abort_mfi_phys_addr_lo = cmd_to_abort->frame_phys_addr;
3958 abort_fr->abort_mfi_phys_addr_hi = 0;
3961 cmd->cmd_status = 0xFF;
3963 if (mrsas_issue_dcmd(sc, cmd)) {
3964 device_printf(sc->mrsas_dev, "Fail to send abort command.\n");
3967 /* Wait for this cmd to complete */
3968 sc->chan = (void *)&cmd;
3970 if (cmd->cmd_status == 0xFF) {
3971 tsleep((void *)&sc->chan, 0, "mrsas_sleep", hz);
3975 if (total_time >= max_wait) {
3976 device_printf(sc->mrsas_dev, "Abort cmd timed out after %d sec.\n", max_wait);
3983 mrsas_release_mfi_cmd(cmd);
3988 * mrsas_complete_abort: Completes aborting a command input:
3989 * Adapter soft state Cmd that was issued to abort another cmd
3991 * The mrsas_issue_blocked_abort_cmd() function waits for the command status to
3992 * change after sending the command. This function is called from
3993 * mrsas_complete_mptmfi_passthru() to wake up the sleep thread associated.
3996 mrsas_complete_abort(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
3998 if (cmd->sync_cmd) {
4000 cmd->cmd_status = 0;
4001 sc->chan = (void *)&cmd;
4002 wakeup_one((void *)&sc->chan);
4008 * mrsas_aen_handler: AEN processing callback function from thread context
4009 * input: Adapter soft state
4011 * Asynchronous event handler
4014 mrsas_aen_handler(struct mrsas_softc *sc)
4016 union mrsas_evt_class_locale class_locale;
4022 device_printf(sc->mrsas_dev, "invalid instance!\n");
4025 if (sc->evt_detail_mem) {
4026 switch (sc->evt_detail_mem->code) {
4027 case MR_EVT_PD_INSERTED:
4028 mrsas_get_pd_list(sc);
4029 mrsas_bus_scan_sim(sc, sc->sim_1);
4032 case MR_EVT_PD_REMOVED:
4033 mrsas_get_pd_list(sc);
4034 mrsas_bus_scan_sim(sc, sc->sim_1);
4037 case MR_EVT_LD_OFFLINE:
4038 case MR_EVT_CFG_CLEARED:
4039 case MR_EVT_LD_DELETED:
4040 mrsas_bus_scan_sim(sc, sc->sim_0);
4043 case MR_EVT_LD_CREATED:
4044 mrsas_get_ld_list(sc);
4045 mrsas_bus_scan_sim(sc, sc->sim_0);
4048 case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED:
4049 case MR_EVT_FOREIGN_CFG_IMPORTED:
4050 case MR_EVT_LD_STATE_CHANGE:
4058 device_printf(sc->mrsas_dev, "invalid evt_detail\n");
4062 mrsas_get_pd_list(sc);
4063 mrsas_dprint(sc, MRSAS_AEN, "scanning ...sim 1\n");
4064 mrsas_bus_scan_sim(sc, sc->sim_1);
4065 mrsas_get_ld_list(sc);
4066 mrsas_dprint(sc, MRSAS_AEN, "scanning ...sim 0\n");
4067 mrsas_bus_scan_sim(sc, sc->sim_0);
4069 seq_num = sc->evt_detail_mem->seq_num + 1;
4071 /* Register AEN with FW for latest sequence number plus 1 */
4072 class_locale.members.reserved = 0;
4073 class_locale.members.locale = MR_EVT_LOCALE_ALL;
4074 class_locale.members.class = MR_EVT_CLASS_DEBUG;
4076 if (sc->aen_cmd != NULL)
4079 mtx_lock(&sc->aen_lock);
4080 error = mrsas_register_aen(sc, seq_num,
4082 mtx_unlock(&sc->aen_lock);
4085 device_printf(sc->mrsas_dev, "register aen failed error %x\n", error);
4091 * mrsas_complete_aen: Completes AEN command
4092 * input: Adapter soft state
4093 * Cmd that was issued to abort another cmd
4095 * This function will be called from ISR and will continue event processing from
4096 * thread context by enqueuing task in ev_tq (callback function
4097 * "mrsas_aen_handler").
4100 mrsas_complete_aen(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
4103 * Don't signal app if it is just an aborted previously registered
4106 if ((!cmd->abort_aen) && (sc->remove_in_progress == 0)) {
4107 sc->mrsas_aen_triggered = 1;
4108 mtx_lock(&sc->aen_lock);
4109 if (sc->mrsas_poll_waiting) {
4110 sc->mrsas_poll_waiting = 0;
4111 selwakeup(&sc->mrsas_select);
4113 mtx_unlock(&sc->aen_lock);
4118 mrsas_release_mfi_cmd(cmd);
4120 if (!sc->remove_in_progress)
4121 taskqueue_enqueue(sc->ev_tq, &sc->ev_task);
4126 static device_method_t mrsas_methods[] = {
4127 DEVMETHOD(device_probe, mrsas_probe),
4128 DEVMETHOD(device_attach, mrsas_attach),
4129 DEVMETHOD(device_detach, mrsas_detach),
4130 DEVMETHOD(device_suspend, mrsas_suspend),
4131 DEVMETHOD(device_resume, mrsas_resume),
4132 DEVMETHOD(bus_print_child, bus_generic_print_child),
4133 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
4137 static driver_t mrsas_driver = {
4140 sizeof(struct mrsas_softc)
4143 static devclass_t mrsas_devclass;
4145 DRIVER_MODULE(mrsas, pci, mrsas_driver, mrsas_devclass, 0, 0);
4146 MODULE_DEPEND(mrsas, cam, 1, 1, 1);