2 * Copyright (c) 2014, LSI Corp.
5 * Support: freebsdraid@lsi.com
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of the <ORGANIZATION> nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
29 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
31 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 * POSSIBILITY OF SUCH DAMAGE.
34 * The views and conclusions contained in the software and documentation
35 * are those of the authors and should not be interpreted as representing
36 * official policies,either expressed or implied, of the FreeBSD Project.
38 * Send feedback to: <megaraidfbsd@lsi.com>
39 * Mail to: LSI Corporation, 1621 Barber Lane, Milpitas, CA 95035
40 * ATTN: MegaRaid FreeBSD
44 #include <sys/cdefs.h>
45 __FBSDID("$FreeBSD$");
47 #include <dev/mrsas/mrsas.h>
48 #include <dev/mrsas/mrsas_ioctl.h>
51 #include <cam/cam_ccb.h>
53 #include <sys/sysctl.h>
54 #include <sys/types.h>
55 #include <sys/kthread.h>
56 #include <sys/taskqueue.h>
62 static d_open_t mrsas_open;
63 static d_close_t mrsas_close;
64 static d_read_t mrsas_read;
65 static d_write_t mrsas_write;
66 static d_ioctl_t mrsas_ioctl;
68 static struct mrsas_ident *mrsas_find_ident(device_t);
69 static void mrsas_shutdown_ctlr(struct mrsas_softc *sc, u_int32_t opcode);
70 static void mrsas_flush_cache(struct mrsas_softc *sc);
71 static void mrsas_reset_reply_desc(struct mrsas_softc *sc);
72 static void mrsas_ocr_thread(void *arg);
73 static int mrsas_get_map_info(struct mrsas_softc *sc);
74 static int mrsas_get_ld_map_info(struct mrsas_softc *sc);
75 static int mrsas_sync_map_info(struct mrsas_softc *sc);
76 static int mrsas_get_pd_list(struct mrsas_softc *sc);
77 static int mrsas_get_ld_list(struct mrsas_softc *sc);
78 static int mrsas_setup_irq(struct mrsas_softc *sc);
79 static int mrsas_alloc_mem(struct mrsas_softc *sc);
80 static int mrsas_init_fw(struct mrsas_softc *sc);
81 static int mrsas_setup_raidmap(struct mrsas_softc *sc);
82 static int mrsas_complete_cmd(struct mrsas_softc *sc);
83 static int mrsas_clear_intr(struct mrsas_softc *sc);
84 static int mrsas_get_ctrl_info(struct mrsas_softc *sc,
85 struct mrsas_ctrl_info *ctrl_info);
86 static int mrsas_issue_blocked_abort_cmd(struct mrsas_softc *sc,
87 struct mrsas_mfi_cmd *cmd_to_abort);
88 u_int32_t mrsas_read_reg(struct mrsas_softc *sc, int offset);
89 u_int8_t mrsas_build_mptmfi_passthru(struct mrsas_softc *sc,
90 struct mrsas_mfi_cmd *mfi_cmd);
91 int mrsas_transition_to_ready(struct mrsas_softc *sc, int ocr);
92 int mrsas_init_adapter(struct mrsas_softc *sc);
93 int mrsas_alloc_mpt_cmds(struct mrsas_softc *sc);
94 int mrsas_alloc_ioc_cmd(struct mrsas_softc *sc);
95 int mrsas_alloc_ctlr_info_cmd(struct mrsas_softc *sc);
96 int mrsas_ioc_init(struct mrsas_softc *sc);
97 int mrsas_bus_scan(struct mrsas_softc *sc);
98 int mrsas_issue_dcmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
99 int mrsas_issue_polled(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
100 int mrsas_reset_ctrl(struct mrsas_softc *sc);
101 int mrsas_wait_for_outstanding(struct mrsas_softc *sc);
102 int mrsas_issue_blocked_cmd(struct mrsas_softc *sc,
103 struct mrsas_mfi_cmd *cmd);
104 int mrsas_alloc_tmp_dcmd(struct mrsas_softc *sc, struct mrsas_tmp_dcmd *tcmd,
106 void mrsas_release_mfi_cmd(struct mrsas_mfi_cmd *cmd);
107 void mrsas_wakeup(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
108 void mrsas_complete_aen(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
109 void mrsas_complete_abort(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
110 void mrsas_disable_intr(struct mrsas_softc *sc);
111 void mrsas_enable_intr(struct mrsas_softc *sc);
112 void mrsas_free_ioc_cmd(struct mrsas_softc *sc);
113 void mrsas_free_mem(struct mrsas_softc *sc);
114 void mrsas_free_tmp_dcmd(struct mrsas_tmp_dcmd *tmp);
115 void mrsas_isr(void *arg);
116 void mrsas_teardown_intr(struct mrsas_softc *sc);
117 void mrsas_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error);
118 void mrsas_kill_hba (struct mrsas_softc *sc);
119 void mrsas_aen_handler(struct mrsas_softc *sc);
120 void mrsas_write_reg(struct mrsas_softc *sc, int offset,
122 void mrsas_fire_cmd(struct mrsas_softc *sc, u_int32_t req_desc_lo,
123 u_int32_t req_desc_hi);
124 void mrsas_free_ctlr_info_cmd(struct mrsas_softc *sc);
125 void mrsas_complete_mptmfi_passthru(struct mrsas_softc *sc,
126 struct mrsas_mfi_cmd *cmd, u_int8_t status);
127 void mrsas_map_mpt_cmd_status(struct mrsas_mpt_cmd *cmd, u_int8_t status,
129 struct mrsas_mfi_cmd* mrsas_get_mfi_cmd(struct mrsas_softc *sc);
130 MRSAS_REQUEST_DESCRIPTOR_UNION * mrsas_build_mpt_cmd(struct mrsas_softc *sc,
131 struct mrsas_mfi_cmd *cmd);
133 extern int mrsas_cam_attach(struct mrsas_softc *sc);
134 extern void mrsas_cam_detach(struct mrsas_softc *sc);
135 extern void mrsas_cmd_done(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd);
136 extern void mrsas_free_frame(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
137 extern int mrsas_alloc_mfi_cmds(struct mrsas_softc *sc);
138 extern void mrsas_release_mpt_cmd(struct mrsas_mpt_cmd *cmd);
139 extern struct mrsas_mpt_cmd *mrsas_get_mpt_cmd(struct mrsas_softc *sc);
140 extern int mrsas_passthru(struct mrsas_softc *sc, void *arg);
141 extern uint8_t MR_ValidateMapInfo(struct mrsas_softc *sc);
142 extern u_int16_t MR_GetLDTgtId(u_int32_t ld, MR_FW_RAID_MAP_ALL *map);
143 extern MR_LD_RAID *MR_LdRaidGet(u_int32_t ld, MR_FW_RAID_MAP_ALL *map);
144 extern void mrsas_xpt_freeze(struct mrsas_softc *sc);
145 extern void mrsas_xpt_release(struct mrsas_softc *sc);
146 extern MRSAS_REQUEST_DESCRIPTOR_UNION *mrsas_get_request_desc(struct mrsas_softc *sc,
148 extern int mrsas_bus_scan_sim(struct mrsas_softc *sc, struct cam_sim *sim);
149 static int mrsas_alloc_evt_log_info_cmd(struct mrsas_softc *sc);
150 static void mrsas_free_evt_log_info_cmd(struct mrsas_softc *sc);
151 SYSCTL_NODE(_hw, OID_AUTO, mrsas, CTLFLAG_RD, 0, "MRSAS Driver Parameters");
155 * PCI device struct and table
158 typedef struct mrsas_ident {
166 MRSAS_CTLR_ID device_table[] = {
167 {0x1000, MRSAS_TBOLT, 0xffff, 0xffff, "LSI Thunderbolt SAS Controller"},
168 {0x1000, MRSAS_INVADER, 0xffff, 0xffff, "LSI Invader SAS Controller"},
169 {0x1000, MRSAS_FURY, 0xffff, 0xffff, "LSI Fury SAS Controller"},
174 * Character device entry points
177 static struct cdevsw mrsas_cdevsw = {
178 .d_version = D_VERSION,
179 .d_open = mrsas_open,
180 .d_close = mrsas_close,
181 .d_read = mrsas_read,
182 .d_write = mrsas_write,
183 .d_ioctl = mrsas_ioctl,
187 MALLOC_DEFINE(M_MRSAS, "mrsasbuf", "Buffers for the MRSAS driver");
190 * In the cdevsw routines, we find our softc by using the si_drv1 member
191 * of struct cdev. We set this variable to point to our softc in our
192 * attach routine when we create the /dev entry.
195 mrsas_open(struct cdev *dev, int oflags, int devtype, d_thread_t *td)
197 struct mrsas_softc *sc;
204 mrsas_close(struct cdev *dev, int fflag, int devtype, d_thread_t *td)
206 struct mrsas_softc *sc;
213 mrsas_read(struct cdev *dev, struct uio *uio, int ioflag)
215 struct mrsas_softc *sc;
221 mrsas_write(struct cdev *dev, struct uio *uio, int ioflag)
223 struct mrsas_softc *sc;
230 * Register Read/Write Functions
234 mrsas_write_reg(struct mrsas_softc *sc, int offset,
237 bus_space_tag_t bus_tag = sc->bus_tag;
238 bus_space_handle_t bus_handle = sc->bus_handle;
240 bus_space_write_4(bus_tag, bus_handle, offset, value);
244 mrsas_read_reg(struct mrsas_softc *sc, int offset)
246 bus_space_tag_t bus_tag = sc->bus_tag;
247 bus_space_handle_t bus_handle = sc->bus_handle;
249 return((u_int32_t)bus_space_read_4(bus_tag, bus_handle, offset));
254 * Interrupt Disable/Enable/Clear Functions
257 void mrsas_disable_intr(struct mrsas_softc *sc)
259 u_int32_t mask = 0xFFFFFFFF;
262 mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask), mask);
263 /* Dummy read to force pci flush */
264 status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask));
267 void mrsas_enable_intr(struct mrsas_softc *sc)
269 u_int32_t mask = MFI_FUSION_ENABLE_INTERRUPT_MASK;
272 mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status), ~0);
273 status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status));
275 mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask), ~mask);
276 status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask));
279 static int mrsas_clear_intr(struct mrsas_softc *sc)
281 u_int32_t status, fw_status, fw_state;
283 /* Read received interrupt */
284 status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status));
286 /* If FW state change interrupt is received, write to it again to clear */
287 if (status & MRSAS_FW_STATE_CHNG_INTERRUPT) {
288 fw_status = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
289 outbound_scratch_pad));
290 fw_state = fw_status & MFI_STATE_MASK;
291 if (fw_state == MFI_STATE_FAULT) {
292 device_printf(sc->mrsas_dev, "FW is in FAULT state!\n");
293 if(sc->ocr_thread_active)
294 wakeup(&sc->ocr_chan);
296 mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status), status);
297 mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status));
301 /* Not our interrupt, so just return */
302 if (!(status & MFI_FUSION_ENABLE_INTERRUPT_MASK))
305 /* We got a reply interrupt */
310 * PCI Support Functions
313 static struct mrsas_ident * mrsas_find_ident(device_t dev)
315 struct mrsas_ident *pci_device;
317 for (pci_device=device_table; pci_device->vendor != 0; pci_device++)
319 if ((pci_device->vendor == pci_get_vendor(dev)) &&
320 (pci_device->device == pci_get_device(dev)) &&
321 ((pci_device->subvendor == pci_get_subvendor(dev)) ||
322 (pci_device->subvendor == 0xffff)) &&
323 ((pci_device->subdevice == pci_get_subdevice(dev)) ||
324 (pci_device->subdevice == 0xffff)))
330 static int mrsas_probe(device_t dev)
332 static u_int8_t first_ctrl = 1;
333 struct mrsas_ident *id;
335 if ((id = mrsas_find_ident(dev)) != NULL) {
337 printf("LSI MegaRAID SAS FreeBSD mrsas driver version: %s\n", MRSAS_VERSION);
340 device_set_desc(dev, id->desc);
341 /* between BUS_PROBE_DEFAULT and BUS_PROBE_LOW_PRIORITY */
348 * mrsas_setup_sysctl: setup sysctl values for mrsas
349 * input: Adapter instance soft state
351 * Setup sysctl entries for mrsas driver.
354 mrsas_setup_sysctl(struct mrsas_softc *sc)
356 struct sysctl_ctx_list *sysctl_ctx = NULL;
357 struct sysctl_oid *sysctl_tree = NULL;
358 char tmpstr[80], tmpstr2[80];
361 * Setup the sysctl variable so the user can change the debug level
364 snprintf(tmpstr, sizeof(tmpstr), "MRSAS controller %d",
365 device_get_unit(sc->mrsas_dev));
366 snprintf(tmpstr2, sizeof(tmpstr2), "%d", device_get_unit(sc->mrsas_dev));
368 sysctl_ctx = device_get_sysctl_ctx(sc->mrsas_dev);
369 if (sysctl_ctx != NULL)
370 sysctl_tree = device_get_sysctl_tree(sc->mrsas_dev);
372 if (sysctl_tree == NULL) {
373 sysctl_ctx_init(&sc->sysctl_ctx);
374 sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx,
375 SYSCTL_STATIC_CHILDREN(_hw_mrsas), OID_AUTO, tmpstr2,
376 CTLFLAG_RD, 0, tmpstr);
377 if (sc->sysctl_tree == NULL)
379 sysctl_ctx = &sc->sysctl_ctx;
380 sysctl_tree = sc->sysctl_tree;
382 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
383 OID_AUTO, "disable_ocr", CTLFLAG_RW, &sc->disableOnlineCtrlReset, 0,
384 "Disable the use of OCR");
386 SYSCTL_ADD_STRING(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
387 OID_AUTO, "driver_version", CTLFLAG_RD, MRSAS_VERSION,
388 strlen(MRSAS_VERSION), "driver version");
390 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
391 OID_AUTO, "reset_count", CTLFLAG_RD,
392 &sc->reset_count, 0, "number of ocr from start of the day");
394 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
395 OID_AUTO, "fw_outstanding", CTLFLAG_RD,
396 &sc->fw_outstanding.val_rdonly, 0, "FW outstanding commands");
398 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
399 OID_AUTO, "io_cmds_highwater", CTLFLAG_RD,
400 &sc->io_cmds_highwater, 0, "Max FW outstanding commands");
402 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
403 OID_AUTO, "mrsas_debug", CTLFLAG_RW, &sc->mrsas_debug, 0,
404 "Driver debug level");
406 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
407 OID_AUTO, "mrsas_io_timeout", CTLFLAG_RW, &sc->mrsas_io_timeout,
408 0, "Driver IO timeout value in mili-second.");
410 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
411 OID_AUTO, "mrsas_fw_fault_check_delay", CTLFLAG_RW,
412 &sc->mrsas_fw_fault_check_delay,
413 0, "FW fault check thread delay in seconds. <default is 1 sec>");
415 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
416 OID_AUTO, "reset_in_progress", CTLFLAG_RD,
417 &sc->reset_in_progress, 0, "ocr in progress status");
422 * mrsas_get_tunables: get tunable parameters.
423 * input: Adapter instance soft state
425 * Get tunable parameters. This will help to debug driver at boot time.
428 mrsas_get_tunables(struct mrsas_softc *sc)
432 /* XXX default to some debugging for now */
433 sc->mrsas_debug = MRSAS_FAULT;
434 sc->mrsas_io_timeout = MRSAS_IO_TIMEOUT;
435 sc->mrsas_fw_fault_check_delay = 1;
437 sc->reset_in_progress = 0;
440 * Grab the global variables.
442 TUNABLE_INT_FETCH("hw.mrsas.debug_level", &sc->mrsas_debug);
444 /* Grab the unit-instance variables */
445 snprintf(tmpstr, sizeof(tmpstr), "dev.mrsas.%d.debug_level",
446 device_get_unit(sc->mrsas_dev));
447 TUNABLE_INT_FETCH(tmpstr, &sc->mrsas_debug);
451 * mrsas_alloc_evt_log_info cmd: Allocates memory to get event log information.
452 * Used to get sequence number at driver load time.
453 * input: Adapter soft state
455 * Allocates DMAable memory for the event log info internal command.
457 int mrsas_alloc_evt_log_info_cmd(struct mrsas_softc *sc)
461 /* Allocate get event log info command */
462 el_info_size = sizeof(struct mrsas_evt_log_info);
463 if (bus_dma_tag_create( sc->mrsas_parent_tag, // parent
464 1, 0, // algnmnt, boundary
465 BUS_SPACE_MAXADDR_32BIT,// lowaddr
466 BUS_SPACE_MAXADDR, // highaddr
467 NULL, NULL, // filter, filterarg
468 el_info_size, // maxsize
470 el_info_size, // maxsegsize
471 BUS_DMA_ALLOCNOW, // flags
472 NULL, NULL, // lockfunc, lockarg
474 device_printf(sc->mrsas_dev, "Cannot allocate event log info tag\n");
477 if (bus_dmamem_alloc(sc->el_info_tag, (void **)&sc->el_info_mem,
478 BUS_DMA_NOWAIT, &sc->el_info_dmamap)) {
479 device_printf(sc->mrsas_dev, "Cannot allocate event log info cmd mem\n");
482 if (bus_dmamap_load(sc->el_info_tag, sc->el_info_dmamap,
483 sc->el_info_mem, el_info_size, mrsas_addr_cb,
484 &sc->el_info_phys_addr, BUS_DMA_NOWAIT)) {
485 device_printf(sc->mrsas_dev, "Cannot load event log info cmd mem\n");
489 memset(sc->el_info_mem, 0, el_info_size);
494 * mrsas_free_evt_info_cmd: Free memory for Event log info command
495 * input: Adapter soft state
497 * Deallocates memory for the event log info internal command.
499 void mrsas_free_evt_log_info_cmd(struct mrsas_softc *sc)
501 if (sc->el_info_phys_addr)
502 bus_dmamap_unload(sc->el_info_tag, sc->el_info_dmamap);
503 if (sc->el_info_mem != NULL)
504 bus_dmamem_free(sc->el_info_tag, sc->el_info_mem, sc->el_info_dmamap);
505 if (sc->el_info_tag != NULL)
506 bus_dma_tag_destroy(sc->el_info_tag);
510 * mrsas_get_seq_num: Get latest event sequence number
511 * @sc: Adapter soft state
512 * @eli: Firmware event log sequence number information.
513 * Firmware maintains a log of all events in a non-volatile area.
514 * Driver get the sequence number using DCMD
515 * "MR_DCMD_CTRL_EVENT_GET_INFO" at driver load time.
519 mrsas_get_seq_num(struct mrsas_softc *sc,
520 struct mrsas_evt_log_info *eli)
522 struct mrsas_mfi_cmd *cmd;
523 struct mrsas_dcmd_frame *dcmd;
525 cmd = mrsas_get_mfi_cmd(sc);
528 device_printf(sc->mrsas_dev, "Failed to get a free cmd\n");
532 dcmd = &cmd->frame->dcmd;
534 if (mrsas_alloc_evt_log_info_cmd(sc) != SUCCESS) {
535 device_printf(sc->mrsas_dev, "Cannot allocate evt log info cmd\n");
536 mrsas_release_mfi_cmd(cmd);
540 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
542 dcmd->cmd = MFI_CMD_DCMD;
543 dcmd->cmd_status = 0x0;
545 dcmd->flags = MFI_FRAME_DIR_READ;
548 dcmd->data_xfer_len = sizeof(struct mrsas_evt_log_info);
549 dcmd->opcode = MR_DCMD_CTRL_EVENT_GET_INFO;
550 dcmd->sgl.sge32[0].phys_addr = sc->el_info_phys_addr;
551 dcmd->sgl.sge32[0].length = sizeof(struct mrsas_evt_log_info);
553 mrsas_issue_blocked_cmd(sc, cmd);
556 * Copy the data back into callers buffer
558 memcpy(eli, sc->el_info_mem, sizeof(struct mrsas_evt_log_info));
559 mrsas_free_evt_log_info_cmd(sc);
560 mrsas_release_mfi_cmd(cmd);
567 * mrsas_register_aen: Register for asynchronous event notification
568 * @sc: Adapter soft state
569 * @seq_num: Starting sequence number
570 * @class_locale: Class of the event
571 * This function subscribes for events beyond the @seq_num
572 * and type @class_locale.
576 mrsas_register_aen(struct mrsas_softc *sc, u_int32_t seq_num,
577 u_int32_t class_locale_word)
580 struct mrsas_mfi_cmd *cmd;
581 struct mrsas_dcmd_frame *dcmd;
582 union mrsas_evt_class_locale curr_aen;
583 union mrsas_evt_class_locale prev_aen;
586 * If there an AEN pending already (aen_cmd), check if the
587 * class_locale of that pending AEN is inclusive of the new
588 * AEN request we currently have. If it is, then we don't have
589 * to do anything. In other words, whichever events the current
590 * AEN request is subscribing to, have already been subscribed
592 * If the old_cmd is _not_ inclusive, then we have to abort
593 * that command, form a class_locale that is superset of both
594 * old and current and re-issue to the FW
597 curr_aen.word = class_locale_word;
601 prev_aen.word = sc->aen_cmd->frame->dcmd.mbox.w[1];
604 * A class whose enum value is smaller is inclusive of all
605 * higher values. If a PROGRESS (= -1) was previously
606 * registered, then a new registration requests for higher
607 * classes need not be sent to FW. They are automatically
609 * Locale numbers don't have such hierarchy. They are bitmap values
611 if ((prev_aen.members.class <= curr_aen.members.class) &&
612 !((prev_aen.members.locale & curr_aen.members.locale) ^
613 curr_aen.members.locale)) {
615 * Previously issued event registration includes
616 * current request. Nothing to do.
620 curr_aen.members.locale |= prev_aen.members.locale;
622 if (prev_aen.members.class < curr_aen.members.class)
623 curr_aen.members.class = prev_aen.members.class;
625 sc->aen_cmd->abort_aen = 1;
626 ret_val = mrsas_issue_blocked_abort_cmd(sc,
630 printf("mrsas: Failed to abort "
631 "previous AEN command\n");
637 cmd = mrsas_get_mfi_cmd(sc);
642 dcmd = &cmd->frame->dcmd;
644 memset(sc->evt_detail_mem, 0, sizeof(struct mrsas_evt_detail));
647 * Prepare DCMD for aen registration
649 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
651 dcmd->cmd = MFI_CMD_DCMD;
652 dcmd->cmd_status = 0x0;
654 dcmd->flags = MFI_FRAME_DIR_READ;
657 dcmd->data_xfer_len = sizeof(struct mrsas_evt_detail);
658 dcmd->opcode = MR_DCMD_CTRL_EVENT_WAIT;
659 dcmd->mbox.w[0] = seq_num;
660 sc->last_seq_num = seq_num;
661 dcmd->mbox.w[1] = curr_aen.word;
662 dcmd->sgl.sge32[0].phys_addr = (u_int32_t) sc->evt_detail_phys_addr;
663 dcmd->sgl.sge32[0].length = sizeof(struct mrsas_evt_detail);
665 if (sc->aen_cmd != NULL) {
666 mrsas_release_mfi_cmd(cmd);
671 * Store reference to the cmd used to register for AEN. When an
672 * application wants us to register for AEN, we have to abort this
673 * cmd and re-register with a new EVENT LOCALE supplied by that app
678 Issue the aen registration frame
680 if (mrsas_issue_dcmd(sc, cmd)){
681 device_printf(sc->mrsas_dev, "Cannot issue AEN DCMD command.\n");
688 * mrsas_start_aen - Subscribes to AEN during driver load time
689 * @instance: Adapter soft state
691 static int mrsas_start_aen(struct mrsas_softc *sc)
693 struct mrsas_evt_log_info eli;
694 union mrsas_evt_class_locale class_locale;
697 /* Get the latest sequence number from FW*/
699 memset(&eli, 0, sizeof(eli));
701 if (mrsas_get_seq_num(sc, &eli))
704 /* Register AEN with FW for latest sequence number plus 1*/
705 class_locale.members.reserved = 0;
706 class_locale.members.locale = MR_EVT_LOCALE_ALL;
707 class_locale.members.class = MR_EVT_CLASS_DEBUG;
709 return mrsas_register_aen(sc, eli.newest_seq_num + 1,
714 * mrsas_attach: PCI entry point
715 * input: device struct pointer
717 * Performs setup of PCI and registers, initializes mutexes and
718 * linked lists, registers interrupts and CAM, and initializes
719 * the adapter/controller to its proper state.
721 static int mrsas_attach(device_t dev)
723 struct mrsas_softc *sc = device_get_softc(dev);
724 uint32_t cmd, bar, error;
726 /* Look up our softc and initialize its fields. */
728 sc->device_id = pci_get_device(dev);
730 mrsas_get_tunables(sc);
733 * Set up PCI and registers
735 cmd = pci_read_config(dev, PCIR_COMMAND, 2);
736 if ( (cmd & PCIM_CMD_PORTEN) == 0) {
739 /* Force the busmaster enable bit on. */
740 cmd |= PCIM_CMD_BUSMASTEREN;
741 pci_write_config(dev, PCIR_COMMAND, cmd, 2);
743 //bar = pci_read_config(dev, MRSAS_PCI_BAR0, 4);
744 bar = pci_read_config(dev, MRSAS_PCI_BAR1, 4);
746 sc->reg_res_id = MRSAS_PCI_BAR1; /* BAR1 offset */
747 if ((sc->reg_res = bus_alloc_resource(dev, SYS_RES_MEMORY,
748 &(sc->reg_res_id), 0, ~0, 1, RF_ACTIVE))
750 device_printf(dev, "Cannot allocate PCI registers\n");
753 sc->bus_tag = rman_get_bustag(sc->reg_res);
754 sc->bus_handle = rman_get_bushandle(sc->reg_res);
756 /* Intialize mutexes */
757 mtx_init(&sc->sim_lock, "mrsas_sim_lock", NULL, MTX_DEF);
758 mtx_init(&sc->pci_lock, "mrsas_pci_lock", NULL, MTX_DEF);
759 mtx_init(&sc->io_lock, "mrsas_io_lock", NULL, MTX_DEF);
760 mtx_init(&sc->aen_lock, "mrsas_aen_lock", NULL, MTX_DEF);
761 mtx_init(&sc->ioctl_lock, "mrsas_ioctl_lock", NULL, MTX_SPIN);
762 mtx_init(&sc->mpt_cmd_pool_lock, "mrsas_mpt_cmd_pool_lock", NULL, MTX_DEF);
763 mtx_init(&sc->mfi_cmd_pool_lock, "mrsas_mfi_cmd_pool_lock", NULL, MTX_DEF);
764 mtx_init(&sc->raidmap_lock, "mrsas_raidmap_lock", NULL, MTX_DEF);
766 /* Intialize linked list */
767 TAILQ_INIT(&sc->mrsas_mpt_cmd_list_head);
768 TAILQ_INIT(&sc->mrsas_mfi_cmd_list_head);
770 atomic_set(&sc->fw_outstanding,0);
772 sc->io_cmds_highwater = 0;
774 /* Create a /dev entry for this device. */
775 sc->mrsas_cdev = make_dev(&mrsas_cdevsw, device_get_unit(dev), UID_ROOT,
776 GID_OPERATOR, (S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP), "mrsas%u",
777 device_get_unit(dev));
779 sc->mrsas_cdev->si_drv1 = sc;
781 sc->adprecovery = MRSAS_HBA_OPERATIONAL;
782 sc->UnevenSpanSupport = 0;
784 /* Initialize Firmware */
785 if (mrsas_init_fw(sc) != SUCCESS) {
789 /* Register SCSI mid-layer */
790 if ((mrsas_cam_attach(sc) != SUCCESS)) {
791 goto attach_fail_cam;
795 if (mrsas_setup_irq(sc) != SUCCESS) {
796 goto attach_fail_irq;
799 /* Enable Interrupts */
800 mrsas_enable_intr(sc);
802 error = mrsas_kproc_create(mrsas_ocr_thread, sc,
803 &sc->ocr_thread, 0, 0, "mrsas_ocr%d",
804 device_get_unit(sc->mrsas_dev));
806 printf("Error %d starting rescan thread\n", error);
807 goto attach_fail_irq;
810 mrsas_setup_sysctl(sc);
812 /* Initiate AEN (Asynchronous Event Notification)*/
814 if (mrsas_start_aen(sc)) {
815 printf("Error: start aen failed\n");
823 mrsas_teardown_intr(sc);
825 mrsas_cam_detach(sc);
827 //attach_fail_raidmap:
829 mtx_destroy(&sc->sim_lock);
830 mtx_destroy(&sc->aen_lock);
831 mtx_destroy(&sc->pci_lock);
832 mtx_destroy(&sc->io_lock);
833 mtx_destroy(&sc->ioctl_lock);
834 mtx_destroy(&sc->mpt_cmd_pool_lock);
835 mtx_destroy(&sc->mfi_cmd_pool_lock);
836 mtx_destroy(&sc->raidmap_lock);
838 destroy_dev(sc->mrsas_cdev);
840 bus_release_resource(sc->mrsas_dev, SYS_RES_MEMORY,
841 sc->reg_res_id, sc->reg_res);
847 * mrsas_detach: De-allocates and teardown resources
848 * input: device struct pointer
850 * This function is the entry point for device disconnect and detach. It
851 * performs memory de-allocations, shutdown of the controller and various
852 * teardown and destroy resource functions.
854 static int mrsas_detach(device_t dev)
856 struct mrsas_softc *sc;
859 sc = device_get_softc(dev);
860 sc->remove_in_progress = 1;
861 if(sc->ocr_thread_active)
862 wakeup(&sc->ocr_chan);
863 while(sc->reset_in_progress){
865 if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
866 mrsas_dprint(sc, MRSAS_INFO,
867 "[%2d]waiting for ocr to be finished\n",i);
869 pause("mr_shutdown", hz);
872 while(sc->ocr_thread_active){
874 if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
875 mrsas_dprint(sc, MRSAS_INFO,
877 "mrsas_ocr thread to quit ocr %d\n",i,
878 sc->ocr_thread_active);
880 pause("mr_shutdown", hz);
882 mrsas_flush_cache(sc);
883 mrsas_shutdown_ctlr(sc, MR_DCMD_CTRL_SHUTDOWN);
884 mrsas_disable_intr(sc);
885 mrsas_cam_detach(sc);
886 mrsas_teardown_intr(sc);
888 mtx_destroy(&sc->sim_lock);
889 mtx_destroy(&sc->aen_lock);
890 mtx_destroy(&sc->pci_lock);
891 mtx_destroy(&sc->io_lock);
892 mtx_destroy(&sc->ioctl_lock);
893 mtx_destroy(&sc->mpt_cmd_pool_lock);
894 mtx_destroy(&sc->mfi_cmd_pool_lock);
895 mtx_destroy(&sc->raidmap_lock);
897 bus_release_resource(sc->mrsas_dev,
898 SYS_RES_MEMORY, sc->reg_res_id, sc->reg_res);
900 destroy_dev(sc->mrsas_cdev);
901 if (sc->sysctl_tree != NULL)
902 sysctl_ctx_free(&sc->sysctl_ctx);
907 * mrsas_free_mem: Frees allocated memory
908 * input: Adapter instance soft state
910 * This function is called from mrsas_detach() to free previously allocated
913 void mrsas_free_mem(struct mrsas_softc *sc)
917 struct mrsas_mfi_cmd *mfi_cmd;
918 struct mrsas_mpt_cmd *mpt_cmd;
921 * Free RAID map memory
923 for (i=0; i < 2; i++)
925 if (sc->raidmap_phys_addr[i])
926 bus_dmamap_unload(sc->raidmap_tag[i], sc->raidmap_dmamap[i]);
927 if (sc->raidmap_mem[i] != NULL)
928 bus_dmamem_free(sc->raidmap_tag[i], sc->raidmap_mem[i], sc->raidmap_dmamap[i]);
929 if (sc->raidmap_tag[i] != NULL)
930 bus_dma_tag_destroy(sc->raidmap_tag[i]);
934 * Free version buffer memroy
936 if (sc->verbuf_phys_addr)
937 bus_dmamap_unload(sc->verbuf_tag, sc->verbuf_dmamap);
938 if (sc->verbuf_mem != NULL)
939 bus_dmamem_free(sc->verbuf_tag, sc->verbuf_mem, sc->verbuf_dmamap);
940 if (sc->verbuf_tag != NULL)
941 bus_dma_tag_destroy(sc->verbuf_tag);
945 * Free sense buffer memory
947 if (sc->sense_phys_addr)
948 bus_dmamap_unload(sc->sense_tag, sc->sense_dmamap);
949 if (sc->sense_mem != NULL)
950 bus_dmamem_free(sc->sense_tag, sc->sense_mem, sc->sense_dmamap);
951 if (sc->sense_tag != NULL)
952 bus_dma_tag_destroy(sc->sense_tag);
955 * Free chain frame memory
957 if (sc->chain_frame_phys_addr)
958 bus_dmamap_unload(sc->chain_frame_tag, sc->chain_frame_dmamap);
959 if (sc->chain_frame_mem != NULL)
960 bus_dmamem_free(sc->chain_frame_tag, sc->chain_frame_mem, sc->chain_frame_dmamap);
961 if (sc->chain_frame_tag != NULL)
962 bus_dma_tag_destroy(sc->chain_frame_tag);
965 * Free IO Request memory
967 if (sc->io_request_phys_addr)
968 bus_dmamap_unload(sc->io_request_tag, sc->io_request_dmamap);
969 if (sc->io_request_mem != NULL)
970 bus_dmamem_free(sc->io_request_tag, sc->io_request_mem, sc->io_request_dmamap);
971 if (sc->io_request_tag != NULL)
972 bus_dma_tag_destroy(sc->io_request_tag);
975 * Free Reply Descriptor memory
977 if (sc->reply_desc_phys_addr)
978 bus_dmamap_unload(sc->reply_desc_tag, sc->reply_desc_dmamap);
979 if (sc->reply_desc_mem != NULL)
980 bus_dmamem_free(sc->reply_desc_tag, sc->reply_desc_mem, sc->reply_desc_dmamap);
981 if (sc->reply_desc_tag != NULL)
982 bus_dma_tag_destroy(sc->reply_desc_tag);
985 * Free event detail memory
987 if (sc->evt_detail_phys_addr)
988 bus_dmamap_unload(sc->evt_detail_tag, sc->evt_detail_dmamap);
989 if (sc->evt_detail_mem != NULL)
990 bus_dmamem_free(sc->evt_detail_tag, sc->evt_detail_mem, sc->evt_detail_dmamap);
991 if (sc->evt_detail_tag != NULL)
992 bus_dma_tag_destroy(sc->evt_detail_tag);
997 if (sc->mfi_cmd_list) {
998 for (i = 0; i < MRSAS_MAX_MFI_CMDS; i++) {
999 mfi_cmd = sc->mfi_cmd_list[i];
1000 mrsas_free_frame(sc, mfi_cmd);
1003 if (sc->mficmd_frame_tag != NULL)
1004 bus_dma_tag_destroy(sc->mficmd_frame_tag);
1007 * Free MPT internal command list
1009 max_cmd = sc->max_fw_cmds;
1010 if (sc->mpt_cmd_list) {
1011 for (i = 0; i < max_cmd; i++) {
1012 mpt_cmd = sc->mpt_cmd_list[i];
1013 bus_dmamap_destroy(sc->data_tag, mpt_cmd->data_dmamap);
1014 free(sc->mpt_cmd_list[i], M_MRSAS);
1016 free(sc->mpt_cmd_list, M_MRSAS);
1017 sc->mpt_cmd_list = NULL;
1021 * Free MFI internal command list
1024 if (sc->mfi_cmd_list) {
1025 for (i = 0; i < MRSAS_MAX_MFI_CMDS; i++) {
1026 free(sc->mfi_cmd_list[i], M_MRSAS);
1028 free(sc->mfi_cmd_list, M_MRSAS);
1029 sc->mfi_cmd_list = NULL;
1033 * Free request descriptor memory
1035 free(sc->req_desc, M_MRSAS);
1036 sc->req_desc = NULL;
1039 * Destroy parent tag
1041 if (sc->mrsas_parent_tag != NULL)
1042 bus_dma_tag_destroy(sc->mrsas_parent_tag);
1046 * mrsas_teardown_intr: Teardown interrupt
1047 * input: Adapter instance soft state
1049 * This function is called from mrsas_detach() to teardown and release
1050 * bus interrupt resourse.
1052 void mrsas_teardown_intr(struct mrsas_softc *sc)
1054 if (sc->intr_handle)
1055 bus_teardown_intr(sc->mrsas_dev, sc->mrsas_irq, sc->intr_handle);
1056 if (sc->mrsas_irq != NULL)
1057 bus_release_resource(sc->mrsas_dev, SYS_RES_IRQ, sc->irq_id, sc->mrsas_irq);
1058 sc->intr_handle = NULL;
1062 * mrsas_suspend: Suspend entry point
1063 * input: Device struct pointer
1065 * This function is the entry point for system suspend from the OS.
1067 static int mrsas_suspend(device_t dev)
1069 struct mrsas_softc *sc;
1071 sc = device_get_softc(dev);
1076 * mrsas_resume: Resume entry point
1077 * input: Device struct pointer
1079 * This function is the entry point for system resume from the OS.
1081 static int mrsas_resume(device_t dev)
1083 struct mrsas_softc *sc;
1085 sc = device_get_softc(dev);
1090 * mrsas_ioctl: IOCtl commands entry point.
1092 * This function is the entry point for IOCtls from the OS. It calls the
1093 * appropriate function for processing depending on the command received.
1096 mrsas_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, d_thread_t *td)
1098 struct mrsas_softc *sc;
1101 sc = (struct mrsas_softc *)(dev->si_drv1);
1103 if (sc->remove_in_progress) {
1104 mrsas_dprint(sc, MRSAS_INFO,
1105 "Driver remove or shutdown called.\n");
1109 mtx_lock_spin(&sc->ioctl_lock);
1110 if (!sc->reset_in_progress) {
1111 mtx_unlock_spin(&sc->ioctl_lock);
1115 /* Release ioclt_lock, and wait for OCR
1117 mtx_unlock_spin(&sc->ioctl_lock);
1118 while(sc->reset_in_progress){
1120 if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
1121 mrsas_dprint(sc, MRSAS_INFO,
1123 "OCR to be finished %d\n",i,
1124 sc->ocr_thread_active);
1126 pause("mr_ioctl", hz);
1131 case MRSAS_IOC_FIRMWARE_PASS_THROUGH:
1132 ret = mrsas_passthru(sc, (void *)arg);
1134 case MRSAS_IOC_SCAN_BUS:
1135 ret = mrsas_bus_scan(sc);
1143 * mrsas_setup_irq: Set up interrupt.
1144 * input: Adapter instance soft state
1146 * This function sets up interrupts as a bus resource, with flags indicating
1147 * resource permitting contemporaneous sharing and for resource to activate
1150 static int mrsas_setup_irq(struct mrsas_softc *sc)
1153 sc->mrsas_irq = bus_alloc_resource_any(sc->mrsas_dev, SYS_RES_IRQ,
1154 &sc->irq_id, RF_SHAREABLE | RF_ACTIVE);
1155 if (sc->mrsas_irq == NULL){
1156 device_printf(sc->mrsas_dev, "Cannot allocate interrupt\n");
1159 if (bus_setup_intr(sc->mrsas_dev, sc->mrsas_irq, INTR_MPSAFE|INTR_TYPE_CAM,
1160 NULL, mrsas_isr, sc, &sc->intr_handle)) {
1161 device_printf(sc->mrsas_dev, "Cannot set up interrupt\n");
1169 * mrsas_isr: ISR entry point
1170 * input: argument pointer
1172 * This function is the interrupt service routine entry point. There
1173 * are two types of interrupts, state change interrupt and response
1174 * interrupt. If an interrupt is not ours, we just return.
1176 void mrsas_isr(void *arg)
1178 struct mrsas_softc *sc = (struct mrsas_softc *)arg;
1181 /* Clear FW state change interrupt */
1182 status = mrsas_clear_intr(sc);
1184 /* Not our interrupt */
1188 /* If we are resetting, bail */
1189 if (test_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags)) {
1190 printf(" Entered into ISR when OCR is going active. \n");
1191 mrsas_clear_intr(sc);
1194 /* Process for reply request and clear response interrupt */
1195 if (mrsas_complete_cmd(sc) != SUCCESS)
1196 mrsas_clear_intr(sc);
1202 * mrsas_complete_cmd: Process reply request
1203 * input: Adapter instance soft state
1205 * This function is called from mrsas_isr() to process reply request and
1206 * clear response interrupt. Processing of the reply request entails
1207 * walking through the reply descriptor array for the command request
1208 * pended from Firmware. We look at the Function field to determine
1209 * the command type and perform the appropriate action. Before we
1210 * return, we clear the response interrupt.
1212 static int mrsas_complete_cmd(struct mrsas_softc *sc)
1214 Mpi2ReplyDescriptorsUnion_t *desc;
1215 MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *reply_desc;
1216 MRSAS_RAID_SCSI_IO_REQUEST *scsi_io_req;
1217 struct mrsas_mpt_cmd *cmd_mpt;
1218 struct mrsas_mfi_cmd *cmd_mfi;
1219 u_int8_t arm, reply_descript_type;
1220 u_int16_t smid, num_completed;
1221 u_int8_t status, extStatus;
1222 union desc_value desc_val;
1223 PLD_LOAD_BALANCE_INFO lbinfo;
1224 u_int32_t device_id;
1225 int threshold_reply_count = 0;
1228 /* If we have a hardware error, not need to continue */
1229 if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR)
1232 desc = sc->reply_desc_mem;
1233 desc += sc->last_reply_idx;
1235 reply_desc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *)desc;
1237 desc_val.word = desc->Words;
1240 reply_descript_type = reply_desc->ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
1242 /* Find our reply descriptor for the command and process */
1243 while((desc_val.u.low != 0xFFFFFFFF) && (desc_val.u.high != 0xFFFFFFFF))
1245 smid = reply_desc->SMID;
1246 cmd_mpt = sc->mpt_cmd_list[smid -1];
1247 scsi_io_req = (MRSAS_RAID_SCSI_IO_REQUEST *)cmd_mpt->io_request;
1249 status = scsi_io_req->RaidContext.status;
1250 extStatus = scsi_io_req->RaidContext.exStatus;
1252 switch (scsi_io_req->Function)
1254 case MPI2_FUNCTION_SCSI_IO_REQUEST : /*Fast Path IO.*/
1255 device_id = cmd_mpt->ccb_ptr->ccb_h.target_id;
1256 lbinfo = &sc->load_balance_info[device_id];
1257 if (cmd_mpt->load_balance == MRSAS_LOAD_BALANCE_FLAG) {
1258 arm = lbinfo->raid1DevHandle[0] == scsi_io_req->DevHandle ? 0 : 1;
1259 atomic_dec(&lbinfo->scsi_pending_cmds[arm]);
1260 cmd_mpt->load_balance &= ~MRSAS_LOAD_BALANCE_FLAG;
1262 //Fall thru and complete IO
1263 case MRSAS_MPI2_FUNCTION_LD_IO_REQUEST:
1264 mrsas_map_mpt_cmd_status(cmd_mpt, status, extStatus);
1265 mrsas_cmd_done(sc, cmd_mpt);
1266 scsi_io_req->RaidContext.status = 0;
1267 scsi_io_req->RaidContext.exStatus = 0;
1268 atomic_dec(&sc->fw_outstanding);
1270 case MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST: /*MFI command */
1271 cmd_mfi = sc->mfi_cmd_list[cmd_mpt->sync_cmd_idx];
1272 mrsas_complete_mptmfi_passthru(sc, cmd_mfi, status);
1274 mrsas_release_mpt_cmd(cmd_mpt);
1278 sc->last_reply_idx++;
1279 if (sc->last_reply_idx >= sc->reply_q_depth)
1280 sc->last_reply_idx = 0;
1282 desc->Words = ~((uint64_t)0x00); /* set it back to all 0xFFFFFFFFs */
1284 threshold_reply_count++;
1286 /* Get the next reply descriptor */
1287 if (!sc->last_reply_idx)
1288 desc = sc->reply_desc_mem;
1292 reply_desc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *)desc;
1293 desc_val.word = desc->Words;
1295 reply_descript_type = reply_desc->ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
1297 if(reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
1301 * Write to reply post index after completing threshold reply count
1302 * and still there are more replies in reply queue pending to be
1305 if (threshold_reply_count >= THRESHOLD_REPLY_COUNT) {
1306 mrsas_write_reg(sc, offsetof(mrsas_reg_set, reply_post_host_index),
1307 sc->last_reply_idx);
1308 threshold_reply_count = 0;
1312 /* No match, just return */
1313 if (num_completed == 0)
1316 /* Clear response interrupt */
1317 mrsas_write_reg(sc, offsetof(mrsas_reg_set, reply_post_host_index),sc->last_reply_idx);
1323 * mrsas_map_mpt_cmd_status: Allocate DMAable memory.
1324 * input: Adapter instance soft state
1326 * This function is called from mrsas_complete_cmd(), for LD IO and FastPath IO.
1327 * It checks the command status and maps the appropriate CAM status for the CCB.
1329 void mrsas_map_mpt_cmd_status(struct mrsas_mpt_cmd *cmd, u_int8_t status, u_int8_t extStatus)
1331 struct mrsas_softc *sc = cmd->sc;
1332 u_int8_t *sense_data;
1336 cmd->ccb_ptr->ccb_h.status = CAM_REQ_CMP;
1338 case MFI_STAT_SCSI_IO_FAILED:
1339 case MFI_STAT_SCSI_DONE_WITH_ERROR:
1340 cmd->ccb_ptr->ccb_h.status = CAM_SCSI_STATUS_ERROR;
1341 sense_data = (u_int8_t *)&cmd->ccb_ptr->csio.sense_data;
1343 /* For now just copy 18 bytes back */
1344 memcpy(sense_data, cmd->sense, 18);
1345 cmd->ccb_ptr->csio.sense_len = 18;
1346 cmd->ccb_ptr->ccb_h.status |= CAM_AUTOSNS_VALID;
1349 case MFI_STAT_LD_OFFLINE:
1350 case MFI_STAT_DEVICE_NOT_FOUND:
1351 if (cmd->ccb_ptr->ccb_h.target_lun)
1352 cmd->ccb_ptr->ccb_h.status |= CAM_LUN_INVALID;
1354 cmd->ccb_ptr->ccb_h.status |= CAM_DEV_NOT_THERE;
1356 case MFI_STAT_CONFIG_SEQ_MISMATCH:
1357 /*send status to CAM layer to retry sending command without
1358 * decrementing retry counter*/
1359 cmd->ccb_ptr->ccb_h.status |= CAM_REQUEUE_REQ;
1362 device_printf(sc->mrsas_dev, "FW cmd complete status %x\n", status);
1363 cmd->ccb_ptr->ccb_h.status = CAM_REQ_CMP_ERR;
1364 cmd->ccb_ptr->csio.scsi_status = status;
1370 * mrsas_alloc_mem: Allocate DMAable memory.
1371 * input: Adapter instance soft state
1373 * This function creates the parent DMA tag and allocates DMAable memory.
1374 * DMA tag describes constraints of DMA mapping. Memory allocated is mapped
1375 * into Kernel virtual address. Callback argument is physical memory address.
1377 static int mrsas_alloc_mem(struct mrsas_softc *sc)
1379 u_int32_t verbuf_size, io_req_size, reply_desc_size, sense_size,
1380 chain_frame_size, evt_detail_size;
1383 * Allocate parent DMA tag
1385 if (bus_dma_tag_create(NULL, /* parent */
1388 BUS_SPACE_MAXADDR, /* lowaddr */
1389 BUS_SPACE_MAXADDR, /* highaddr */
1390 NULL, NULL, /* filter, filterarg */
1391 MRSAS_MAX_IO_SIZE,/* maxsize */
1392 MRSAS_MAX_SGL, /* nsegments */
1393 MRSAS_MAX_IO_SIZE,/* maxsegsize */
1395 NULL, NULL, /* lockfunc, lockarg */
1396 &sc->mrsas_parent_tag /* tag */
1398 device_printf(sc->mrsas_dev, "Cannot allocate parent DMA tag\n");
1403 * Allocate for version buffer
1405 verbuf_size = MRSAS_MAX_NAME_LENGTH*(sizeof(bus_addr_t));
1406 if (bus_dma_tag_create(sc->mrsas_parent_tag, // parent
1407 1, 0, // algnmnt, boundary
1408 BUS_SPACE_MAXADDR_32BIT,// lowaddr
1409 BUS_SPACE_MAXADDR, // highaddr
1410 NULL, NULL, // filter, filterarg
1411 verbuf_size, // maxsize
1413 verbuf_size, // maxsegsize
1414 BUS_DMA_ALLOCNOW, // flags
1415 NULL, NULL, // lockfunc, lockarg
1417 device_printf(sc->mrsas_dev, "Cannot allocate verbuf DMA tag\n");
1420 if (bus_dmamem_alloc(sc->verbuf_tag, (void **)&sc->verbuf_mem,
1421 BUS_DMA_NOWAIT, &sc->verbuf_dmamap)) {
1422 device_printf(sc->mrsas_dev, "Cannot allocate verbuf memory\n");
1425 bzero(sc->verbuf_mem, verbuf_size);
1426 if (bus_dmamap_load(sc->verbuf_tag, sc->verbuf_dmamap, sc->verbuf_mem,
1427 verbuf_size, mrsas_addr_cb, &sc->verbuf_phys_addr, BUS_DMA_NOWAIT)){
1428 device_printf(sc->mrsas_dev, "Cannot load verbuf DMA map\n");
1433 * Allocate IO Request Frames
1435 io_req_size = sc->io_frames_alloc_sz;
1436 if (bus_dma_tag_create( sc->mrsas_parent_tag, // parent
1437 16, 0, // algnmnt, boundary
1438 BUS_SPACE_MAXADDR_32BIT,// lowaddr
1439 BUS_SPACE_MAXADDR, // highaddr
1440 NULL, NULL, // filter, filterarg
1441 io_req_size, // maxsize
1443 io_req_size, // maxsegsize
1444 BUS_DMA_ALLOCNOW, // flags
1445 NULL, NULL, // lockfunc, lockarg
1446 &sc->io_request_tag)) {
1447 device_printf(sc->mrsas_dev, "Cannot create IO request tag\n");
1450 if (bus_dmamem_alloc(sc->io_request_tag, (void **)&sc->io_request_mem,
1451 BUS_DMA_NOWAIT, &sc->io_request_dmamap)) {
1452 device_printf(sc->mrsas_dev, "Cannot alloc IO request memory\n");
1455 bzero(sc->io_request_mem, io_req_size);
1456 if (bus_dmamap_load(sc->io_request_tag, sc->io_request_dmamap,
1457 sc->io_request_mem, io_req_size, mrsas_addr_cb,
1458 &sc->io_request_phys_addr, BUS_DMA_NOWAIT)) {
1459 device_printf(sc->mrsas_dev, "Cannot load IO request memory\n");
1464 * Allocate Chain Frames
1466 chain_frame_size = sc->chain_frames_alloc_sz;
1467 if (bus_dma_tag_create( sc->mrsas_parent_tag, // parent
1468 4, 0, // algnmnt, boundary
1469 BUS_SPACE_MAXADDR_32BIT,// lowaddr
1470 BUS_SPACE_MAXADDR, // highaddr
1471 NULL, NULL, // filter, filterarg
1472 chain_frame_size, // maxsize
1474 chain_frame_size, // maxsegsize
1475 BUS_DMA_ALLOCNOW, // flags
1476 NULL, NULL, // lockfunc, lockarg
1477 &sc->chain_frame_tag)) {
1478 device_printf(sc->mrsas_dev, "Cannot create chain frame tag\n");
1481 if (bus_dmamem_alloc(sc->chain_frame_tag, (void **)&sc->chain_frame_mem,
1482 BUS_DMA_NOWAIT, &sc->chain_frame_dmamap)) {
1483 device_printf(sc->mrsas_dev, "Cannot alloc chain frame memory\n");
1486 bzero(sc->chain_frame_mem, chain_frame_size);
1487 if (bus_dmamap_load(sc->chain_frame_tag, sc->chain_frame_dmamap,
1488 sc->chain_frame_mem, chain_frame_size, mrsas_addr_cb,
1489 &sc->chain_frame_phys_addr, BUS_DMA_NOWAIT)) {
1490 device_printf(sc->mrsas_dev, "Cannot load chain frame memory\n");
1495 * Allocate Reply Descriptor Array
1497 reply_desc_size = sc->reply_alloc_sz;
1498 if (bus_dma_tag_create( sc->mrsas_parent_tag, // parent
1499 16, 0, // algnmnt, boundary
1500 BUS_SPACE_MAXADDR_32BIT,// lowaddr
1501 BUS_SPACE_MAXADDR, // highaddr
1502 NULL, NULL, // filter, filterarg
1503 reply_desc_size, // maxsize
1505 reply_desc_size, // maxsegsize
1506 BUS_DMA_ALLOCNOW, // flags
1507 NULL, NULL, // lockfunc, lockarg
1508 &sc->reply_desc_tag)) {
1509 device_printf(sc->mrsas_dev, "Cannot create reply descriptor tag\n");
1512 if (bus_dmamem_alloc(sc->reply_desc_tag, (void **)&sc->reply_desc_mem,
1513 BUS_DMA_NOWAIT, &sc->reply_desc_dmamap)) {
1514 device_printf(sc->mrsas_dev, "Cannot alloc reply descriptor memory\n");
1517 if (bus_dmamap_load(sc->reply_desc_tag, sc->reply_desc_dmamap,
1518 sc->reply_desc_mem, reply_desc_size, mrsas_addr_cb,
1519 &sc->reply_desc_phys_addr, BUS_DMA_NOWAIT)) {
1520 device_printf(sc->mrsas_dev, "Cannot load reply descriptor memory\n");
1525 * Allocate Sense Buffer Array. Keep in lower 4GB
1527 sense_size = sc->max_fw_cmds * MRSAS_SENSE_LEN;
1528 if (bus_dma_tag_create(sc->mrsas_parent_tag, // parent
1529 64, 0, // algnmnt, boundary
1530 BUS_SPACE_MAXADDR_32BIT,// lowaddr
1531 BUS_SPACE_MAXADDR, // highaddr
1532 NULL, NULL, // filter, filterarg
1533 sense_size, // maxsize
1535 sense_size, // maxsegsize
1536 BUS_DMA_ALLOCNOW, // flags
1537 NULL, NULL, // lockfunc, lockarg
1539 device_printf(sc->mrsas_dev, "Cannot allocate sense buf tag\n");
1542 if (bus_dmamem_alloc(sc->sense_tag, (void **)&sc->sense_mem,
1543 BUS_DMA_NOWAIT, &sc->sense_dmamap)) {
1544 device_printf(sc->mrsas_dev, "Cannot allocate sense buf memory\n");
1547 if (bus_dmamap_load(sc->sense_tag, sc->sense_dmamap,
1548 sc->sense_mem, sense_size, mrsas_addr_cb, &sc->sense_phys_addr,
1550 device_printf(sc->mrsas_dev, "Cannot load sense buf memory\n");
1555 * Allocate for Event detail structure
1557 evt_detail_size = sizeof(struct mrsas_evt_detail);
1558 if (bus_dma_tag_create( sc->mrsas_parent_tag, // parent
1559 1, 0, // algnmnt, boundary
1560 BUS_SPACE_MAXADDR_32BIT,// lowaddr
1561 BUS_SPACE_MAXADDR, // highaddr
1562 NULL, NULL, // filter, filterarg
1563 evt_detail_size, // maxsize
1565 evt_detail_size, // maxsegsize
1566 BUS_DMA_ALLOCNOW, // flags
1567 NULL, NULL, // lockfunc, lockarg
1568 &sc->evt_detail_tag)) {
1569 device_printf(sc->mrsas_dev, "Cannot create Event detail tag\n");
1572 if (bus_dmamem_alloc(sc->evt_detail_tag, (void **)&sc->evt_detail_mem,
1573 BUS_DMA_NOWAIT, &sc->evt_detail_dmamap)) {
1574 device_printf(sc->mrsas_dev, "Cannot alloc Event detail buffer memory\n");
1577 bzero(sc->evt_detail_mem, evt_detail_size);
1578 if (bus_dmamap_load(sc->evt_detail_tag, sc->evt_detail_dmamap,
1579 sc->evt_detail_mem, evt_detail_size, mrsas_addr_cb,
1580 &sc->evt_detail_phys_addr, BUS_DMA_NOWAIT)) {
1581 device_printf(sc->mrsas_dev, "Cannot load Event detail buffer memory\n");
1587 * Create a dma tag for data buffers; size will be the maximum
1588 * possible I/O size (280kB).
1590 if (bus_dma_tag_create(sc->mrsas_parent_tag, // parent
1593 BUS_SPACE_MAXADDR, // lowaddr
1594 BUS_SPACE_MAXADDR, // highaddr
1595 NULL, NULL, // filter, filterarg
1596 MRSAS_MAX_IO_SIZE, // maxsize
1597 MRSAS_MAX_SGL, // nsegments
1598 MRSAS_MAX_IO_SIZE, // maxsegsize
1599 BUS_DMA_ALLOCNOW, // flags
1600 busdma_lock_mutex, // lockfunc
1601 &sc->io_lock, // lockfuncarg
1603 device_printf(sc->mrsas_dev, "Cannot create data dma tag\n");
1611 * mrsas_addr_cb: Callback function of bus_dmamap_load()
1612 * input: callback argument,
1613 * machine dependent type that describes DMA segments,
1614 * number of segments,
1617 * This function is for the driver to receive mapping information resultant
1618 * of the bus_dmamap_load(). The information is actually not being used,
1619 * but the address is saved anyway.
1622 mrsas_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1627 *addr = segs[0].ds_addr;
1631 * mrsas_setup_raidmap: Set up RAID map.
1632 * input: Adapter instance soft state
1634 * Allocate DMA memory for the RAID maps and perform setup.
1636 static int mrsas_setup_raidmap(struct mrsas_softc *sc)
1638 sc->map_sz = sizeof(MR_FW_RAID_MAP) +
1639 (sizeof(MR_LD_SPAN_MAP) * (MAX_LOGICAL_DRIVES - 1));
1641 for (int i=0; i < 2; i++)
1643 if (bus_dma_tag_create(sc->mrsas_parent_tag, // parent
1644 4, 0, // algnmnt, boundary
1645 BUS_SPACE_MAXADDR_32BIT,// lowaddr
1646 BUS_SPACE_MAXADDR, // highaddr
1647 NULL, NULL, // filter, filterarg
1648 sc->map_sz, // maxsize
1650 sc->map_sz, // maxsegsize
1651 BUS_DMA_ALLOCNOW, // flags
1652 NULL, NULL, // lockfunc, lockarg
1653 &sc->raidmap_tag[i])) {
1654 device_printf(sc->mrsas_dev, "Cannot allocate raid map tag.\n");
1657 if (bus_dmamem_alloc(sc->raidmap_tag[i], (void **)&sc->raidmap_mem[i],
1658 BUS_DMA_NOWAIT, &sc->raidmap_dmamap[i])) {
1659 device_printf(sc->mrsas_dev, "Cannot allocate raidmap memory.\n");
1662 if (bus_dmamap_load(sc->raidmap_tag[i], sc->raidmap_dmamap[i],
1663 sc->raidmap_mem[i], sc->map_sz, mrsas_addr_cb, &sc->raidmap_phys_addr[i],
1665 device_printf(sc->mrsas_dev, "Cannot load raidmap memory.\n");
1668 if (!sc->raidmap_mem[i]) {
1669 device_printf(sc->mrsas_dev, "Cannot allocate memory for raid map.\n");
1674 if (!mrsas_get_map_info(sc))
1675 mrsas_sync_map_info(sc);
1681 * mrsas_init_fw: Initialize Firmware
1682 * input: Adapter soft state
1684 * Calls transition_to_ready() to make sure Firmware is in operational
1685 * state and calls mrsas_init_adapter() to send IOC_INIT command to
1686 * Firmware. It issues internal commands to get the controller info
1687 * after the IOC_INIT command response is received by Firmware.
1688 * Note: code relating to get_pdlist, get_ld_list and max_sectors
1689 * are currently not being used, it is left here as placeholder.
1691 static int mrsas_init_fw(struct mrsas_softc *sc)
1693 u_int32_t max_sectors_1;
1694 u_int32_t max_sectors_2;
1695 u_int32_t tmp_sectors;
1696 struct mrsas_ctrl_info *ctrl_info;
1701 /* Make sure Firmware is ready */
1702 ret = mrsas_transition_to_ready(sc, ocr);
1703 if (ret != SUCCESS) {
1707 /* Get operational params, sge flags, send init cmd to ctlr */
1708 if (mrsas_init_adapter(sc) != SUCCESS){
1709 device_printf(sc->mrsas_dev, "Adapter initialize Fail.\n");
1713 /* Allocate internal commands for pass-thru */
1714 if (mrsas_alloc_mfi_cmds(sc) != SUCCESS){
1715 device_printf(sc->mrsas_dev, "Allocate MFI cmd failed.\n");
1719 if (mrsas_setup_raidmap(sc) != SUCCESS) {
1720 device_printf(sc->mrsas_dev, "Set up RAID map failed.\n");
1724 /* For pass-thru, get PD/LD list and controller info */
1725 memset(sc->pd_list, 0, MRSAS_MAX_PD * sizeof(struct mrsas_pd_list));
1726 mrsas_get_pd_list(sc);
1728 memset(sc->ld_ids, 0xff, MRSAS_MAX_LD);
1729 mrsas_get_ld_list(sc);
1731 //memset(sc->log_to_span, 0, MRSAS_MAX_LD * sizeof(LD_SPAN_INFO));
1733 ctrl_info = malloc(sizeof(struct mrsas_ctrl_info), M_MRSAS, M_NOWAIT);
1736 * Compute the max allowed sectors per IO: The controller info has two
1737 * limits on max sectors. Driver should use the minimum of these two.
1739 * 1 << stripe_sz_ops.min = max sectors per strip
1741 * Note that older firmwares ( < FW ver 30) didn't report information
1742 * to calculate max_sectors_1. So the number ended up as zero always.
1745 if (ctrl_info && !mrsas_get_ctrl_info(sc, ctrl_info)) {
1746 max_sectors_1 = (1 << ctrl_info->stripe_sz_ops.min) *
1747 ctrl_info->max_strips_per_io;
1748 max_sectors_2 = ctrl_info->max_request_size;
1749 tmp_sectors = min(max_sectors_1 , max_sectors_2);
1750 sc->disableOnlineCtrlReset =
1751 ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset;
1752 sc->UnevenSpanSupport =
1753 ctrl_info->adapterOperations2.supportUnevenSpans;
1754 if(sc->UnevenSpanSupport) {
1755 device_printf(sc->mrsas_dev, "FW supports: UnevenSpanSupport=%x\n",
1756 sc->UnevenSpanSupport);
1757 if (MR_ValidateMapInfo(sc))
1758 sc->fast_path_io = 1;
1760 sc->fast_path_io = 0;
1764 sc->max_sectors_per_req = sc->max_num_sge * MRSAS_PAGE_SIZE / 512;
1766 if (tmp_sectors && (sc->max_sectors_per_req > tmp_sectors))
1767 sc->max_sectors_per_req = tmp_sectors;
1770 free(ctrl_info, M_MRSAS);
1776 * mrsas_init_adapter: Initializes the adapter/controller
1777 * input: Adapter soft state
1779 * Prepares for the issuing of the IOC Init cmd to FW for initializing the
1780 * ROC/controller. The FW register is read to determined the number of
1781 * commands that is supported. All memory allocations for IO is based on
1782 * max_cmd. Appropriate calculations are performed in this function.
1784 int mrsas_init_adapter(struct mrsas_softc *sc)
1790 /* Read FW status register */
1791 status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad));
1793 /* Get operational params from status register */
1794 sc->max_fw_cmds = status & MRSAS_FWSTATE_MAXCMD_MASK;
1796 /* Decrement the max supported by 1, to correlate with FW */
1797 sc->max_fw_cmds = sc->max_fw_cmds-1;
1798 max_cmd = sc->max_fw_cmds;
1800 /* Determine allocation size of command frames */
1801 sc->reply_q_depth = ((max_cmd *2 +1 +15)/16*16);
1802 sc->request_alloc_sz = sizeof(MRSAS_REQUEST_DESCRIPTOR_UNION) * max_cmd;
1803 sc->reply_alloc_sz = sizeof(MPI2_REPLY_DESCRIPTORS_UNION) * (sc->reply_q_depth);
1804 sc->io_frames_alloc_sz = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE + (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * (max_cmd + 1));
1805 sc->chain_frames_alloc_sz = 1024 * max_cmd;
1806 sc->max_sge_in_main_msg = (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE -
1807 offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL))/16;
1809 sc->max_sge_in_chain = MRSAS_MAX_SZ_CHAIN_FRAME / sizeof(MPI2_SGE_IO_UNION);
1810 sc->max_num_sge = sc->max_sge_in_main_msg + sc->max_sge_in_chain - 2;
1812 /* Used for pass thru MFI frame (DCMD) */
1813 sc->chain_offset_mfi_pthru = offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL)/16;
1815 sc->chain_offset_io_request = (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE -
1816 sizeof(MPI2_SGE_IO_UNION))/16;
1818 sc->last_reply_idx = 0;
1820 ret = mrsas_alloc_mem(sc);
1824 ret = mrsas_alloc_mpt_cmds(sc);
1828 ret = mrsas_ioc_init(sc);
1837 * mrsas_alloc_ioc_cmd: Allocates memory for IOC Init command
1838 * input: Adapter soft state
1840 * Allocates for the IOC Init cmd to FW to initialize the ROC/controller.
1842 int mrsas_alloc_ioc_cmd(struct mrsas_softc *sc)
1846 /* Allocate IOC INIT command */
1847 ioc_init_size = 1024 + sizeof(MPI2_IOC_INIT_REQUEST);
1848 if (bus_dma_tag_create( sc->mrsas_parent_tag, // parent
1849 1, 0, // algnmnt, boundary
1850 BUS_SPACE_MAXADDR_32BIT,// lowaddr
1851 BUS_SPACE_MAXADDR, // highaddr
1852 NULL, NULL, // filter, filterarg
1853 ioc_init_size, // maxsize
1855 ioc_init_size, // maxsegsize
1856 BUS_DMA_ALLOCNOW, // flags
1857 NULL, NULL, // lockfunc, lockarg
1858 &sc->ioc_init_tag)) {
1859 device_printf(sc->mrsas_dev, "Cannot allocate ioc init tag\n");
1862 if (bus_dmamem_alloc(sc->ioc_init_tag, (void **)&sc->ioc_init_mem,
1863 BUS_DMA_NOWAIT, &sc->ioc_init_dmamap)) {
1864 device_printf(sc->mrsas_dev, "Cannot allocate ioc init cmd mem\n");
1867 bzero(sc->ioc_init_mem, ioc_init_size);
1868 if (bus_dmamap_load(sc->ioc_init_tag, sc->ioc_init_dmamap,
1869 sc->ioc_init_mem, ioc_init_size, mrsas_addr_cb,
1870 &sc->ioc_init_phys_mem, BUS_DMA_NOWAIT)) {
1871 device_printf(sc->mrsas_dev, "Cannot load ioc init cmd mem\n");
1879 * mrsas_free_ioc_cmd: Allocates memory for IOC Init command
1880 * input: Adapter soft state
1882 * Deallocates memory of the IOC Init cmd.
1884 void mrsas_free_ioc_cmd(struct mrsas_softc *sc)
1886 if (sc->ioc_init_phys_mem)
1887 bus_dmamap_unload(sc->ioc_init_tag, sc->ioc_init_dmamap);
1888 if (sc->ioc_init_mem != NULL)
1889 bus_dmamem_free(sc->ioc_init_tag, sc->ioc_init_mem, sc->ioc_init_dmamap);
1890 if (sc->ioc_init_tag != NULL)
1891 bus_dma_tag_destroy(sc->ioc_init_tag);
1895 * mrsas_ioc_init: Sends IOC Init command to FW
1896 * input: Adapter soft state
1898 * Issues the IOC Init cmd to FW to initialize the ROC/controller.
1900 int mrsas_ioc_init(struct mrsas_softc *sc)
1902 struct mrsas_init_frame *init_frame;
1903 pMpi2IOCInitRequest_t IOCInitMsg;
1904 MRSAS_REQUEST_DESCRIPTOR_UNION req_desc;
1905 u_int8_t max_wait = MRSAS_IOC_INIT_WAIT_TIME;
1906 bus_addr_t phys_addr;
1909 /* Allocate memory for the IOC INIT command */
1910 if (mrsas_alloc_ioc_cmd(sc)) {
1911 device_printf(sc->mrsas_dev, "Cannot allocate IOC command.\n");
1915 IOCInitMsg = (pMpi2IOCInitRequest_t)(((char *)sc->ioc_init_mem) +1024);
1916 IOCInitMsg->Function = MPI2_FUNCTION_IOC_INIT;
1917 IOCInitMsg->WhoInit = MPI2_WHOINIT_HOST_DRIVER;
1918 IOCInitMsg->MsgVersion = MPI2_VERSION;
1919 IOCInitMsg->HeaderVersion = MPI2_HEADER_VERSION;
1920 IOCInitMsg->SystemRequestFrameSize = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE / 4;
1921 IOCInitMsg->ReplyDescriptorPostQueueDepth = sc->reply_q_depth;
1922 IOCInitMsg->ReplyDescriptorPostQueueAddress = sc->reply_desc_phys_addr;
1923 IOCInitMsg->SystemRequestFrameBaseAddress = sc->io_request_phys_addr;
1925 init_frame = (struct mrsas_init_frame *)sc->ioc_init_mem;
1926 init_frame->cmd = MFI_CMD_INIT;
1927 init_frame->cmd_status = 0xFF;
1928 init_frame->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
1930 if (sc->verbuf_mem) {
1931 snprintf((char *)sc->verbuf_mem, strlen(MRSAS_VERSION)+2,"%s\n",
1933 init_frame->driver_ver_lo = (bus_addr_t)sc->verbuf_phys_addr;
1934 init_frame->driver_ver_hi = 0;
1937 phys_addr = (bus_addr_t)sc->ioc_init_phys_mem + 1024;
1938 init_frame->queue_info_new_phys_addr_lo = phys_addr;
1939 init_frame->data_xfer_len = sizeof(Mpi2IOCInitRequest_t);
1941 req_desc.addr.Words = (bus_addr_t)sc->ioc_init_phys_mem;
1942 req_desc.MFAIo.RequestFlags =
1943 (MRSAS_REQ_DESCRIPT_FLAGS_MFA << MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1945 mrsas_disable_intr(sc);
1946 mrsas_dprint(sc, MRSAS_OCR, "Issuing IOC INIT command to FW.\n");
1947 //device_printf(sc->mrsas_dev, "Issuing IOC INIT command to FW.\n");del?
1948 mrsas_fire_cmd(sc, req_desc.addr.u.low, req_desc.addr.u.high);
1951 * Poll response timer to wait for Firmware response. While this
1952 * timer with the DELAY call could block CPU, the time interval for
1953 * this is only 1 millisecond.
1955 if (init_frame->cmd_status == 0xFF) {
1956 for (i=0; i < (max_wait * 1000); i++){
1957 if (init_frame->cmd_status == 0xFF)
1964 if (init_frame->cmd_status == 0)
1965 mrsas_dprint(sc, MRSAS_OCR,
1966 "IOC INIT response received from FW.\n");
1967 //device_printf(sc->mrsas_dev, "IOC INIT response received from FW.\n");del?
1970 if (init_frame->cmd_status == 0xFF)
1971 device_printf(sc->mrsas_dev, "IOC Init timed out after %d seconds.\n", max_wait);
1973 device_printf(sc->mrsas_dev, "IOC Init failed, status = 0x%x\n", init_frame->cmd_status);
1977 mrsas_free_ioc_cmd(sc);
1982 * mrsas_alloc_mpt_cmds: Allocates the command packets
1983 * input: Adapter instance soft state
1985 * This function allocates the internal commands for IOs. Each command that is
1986 * issued to FW is wrapped in a local data structure called mrsas_mpt_cmd.
1987 * An array is allocated with mrsas_mpt_cmd context. The free commands are
1988 * maintained in a linked list (cmd pool). SMID value range is from 1 to
1991 int mrsas_alloc_mpt_cmds(struct mrsas_softc *sc)
1995 struct mrsas_mpt_cmd *cmd;
1996 pMpi2ReplyDescriptorsUnion_t reply_desc;
1997 u_int32_t offset, chain_offset, sense_offset;
1998 bus_addr_t io_req_base_phys, chain_frame_base_phys, sense_base_phys;
1999 u_int8_t *io_req_base, *chain_frame_base, *sense_base;
2001 max_cmd = sc->max_fw_cmds;
2003 sc->req_desc = malloc(sc->request_alloc_sz, M_MRSAS, M_NOWAIT);
2004 if (!sc->req_desc) {
2005 device_printf(sc->mrsas_dev, "Out of memory, cannot alloc req desc\n");
2008 memset(sc->req_desc, 0, sc->request_alloc_sz);
2011 * sc->mpt_cmd_list is an array of struct mrsas_mpt_cmd pointers. Allocate the
2012 * dynamic array first and then allocate individual commands.
2014 sc->mpt_cmd_list = malloc(sizeof(struct mrsas_mpt_cmd*)*max_cmd, M_MRSAS, M_NOWAIT);
2015 if (!sc->mpt_cmd_list) {
2016 device_printf(sc->mrsas_dev, "Cannot alloc memory for mpt_cmd_list.\n");
2019 memset(sc->mpt_cmd_list, 0, sizeof(struct mrsas_mpt_cmd *)*max_cmd);
2020 for (i = 0; i < max_cmd; i++) {
2021 sc->mpt_cmd_list[i] = malloc(sizeof(struct mrsas_mpt_cmd),
2023 if (!sc->mpt_cmd_list[i]) {
2024 for (j = 0; j < i; j++)
2025 free(sc->mpt_cmd_list[j],M_MRSAS);
2026 free(sc->mpt_cmd_list, M_MRSAS);
2027 sc->mpt_cmd_list = NULL;
2032 io_req_base = (u_int8_t*)sc->io_request_mem + MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
2033 io_req_base_phys = (bus_addr_t)sc->io_request_phys_addr + MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
2034 chain_frame_base = (u_int8_t*)sc->chain_frame_mem;
2035 chain_frame_base_phys = (bus_addr_t)sc->chain_frame_phys_addr;
2036 sense_base = (u_int8_t*)sc->sense_mem;
2037 sense_base_phys = (bus_addr_t)sc->sense_phys_addr;
2038 for (i = 0; i < max_cmd; i++) {
2039 cmd = sc->mpt_cmd_list[i];
2040 offset = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * i;
2041 chain_offset = 1024 * i;
2042 sense_offset = MRSAS_SENSE_LEN * i;
2043 memset(cmd, 0, sizeof(struct mrsas_mpt_cmd));
2045 cmd->ccb_ptr = NULL;
2046 callout_init(&cmd->cm_callout, 0);
2047 cmd->sync_cmd_idx = (u_int32_t)MRSAS_ULONG_MAX;
2049 cmd->io_request = (MRSAS_RAID_SCSI_IO_REQUEST *) (io_req_base + offset);
2050 memset(cmd->io_request, 0, sizeof(MRSAS_RAID_SCSI_IO_REQUEST));
2051 cmd->io_request_phys_addr = io_req_base_phys + offset;
2052 cmd->chain_frame = (MPI2_SGE_IO_UNION *) (chain_frame_base + chain_offset);
2053 cmd->chain_frame_phys_addr = chain_frame_base_phys + chain_offset;
2054 cmd->sense = sense_base + sense_offset;
2055 cmd->sense_phys_addr = sense_base_phys + sense_offset;
2056 if (bus_dmamap_create(sc->data_tag, 0, &cmd->data_dmamap)) {
2059 TAILQ_INSERT_TAIL(&(sc->mrsas_mpt_cmd_list_head), cmd, next);
2062 /* Initialize reply descriptor array to 0xFFFFFFFF */
2063 reply_desc = sc->reply_desc_mem;
2064 for (i = 0; i < sc->reply_q_depth; i++, reply_desc++) {
2065 reply_desc->Words = MRSAS_ULONG_MAX;
2071 * mrsas_fire_cmd: Sends command to FW
2072 * input: Adapter soft state
2073 * request descriptor address low
2074 * request descriptor address high
2076 * This functions fires the command to Firmware by writing to the
2077 * inbound_low_queue_port and inbound_high_queue_port.
2079 void mrsas_fire_cmd(struct mrsas_softc *sc, u_int32_t req_desc_lo,
2080 u_int32_t req_desc_hi)
2082 mtx_lock(&sc->pci_lock);
2083 mrsas_write_reg(sc, offsetof(mrsas_reg_set, inbound_low_queue_port),
2085 mrsas_write_reg(sc, offsetof(mrsas_reg_set, inbound_high_queue_port),
2087 mtx_unlock(&sc->pci_lock);
2091 * mrsas_transition_to_ready: Move FW to Ready state
2092 * input: Adapter instance soft state
2094 * During the initialization, FW passes can potentially be in any one of
2095 * several possible states. If the FW in operational, waiting-for-handshake
2096 * states, driver must take steps to bring it to ready state. Otherwise, it
2097 * has to wait for the ready state.
2099 int mrsas_transition_to_ready(struct mrsas_softc *sc, int ocr)
2103 u_int32_t val, fw_state;
2104 u_int32_t cur_state;
2105 u_int32_t abs_state, curr_abs_state;
2107 val = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad));
2108 fw_state = val & MFI_STATE_MASK;
2109 max_wait = MRSAS_RESET_WAIT_TIME;
2111 if (fw_state != MFI_STATE_READY)
2112 device_printf(sc->mrsas_dev, "Waiting for FW to come to ready state\n");
2114 while (fw_state != MFI_STATE_READY) {
2115 abs_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad));
2117 case MFI_STATE_FAULT:
2118 device_printf(sc->mrsas_dev, "FW is in FAULT state!!\n");
2120 cur_state = MFI_STATE_FAULT;
2125 case MFI_STATE_WAIT_HANDSHAKE:
2126 /* Set the CLR bit in inbound doorbell */
2127 mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell),
2128 MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG);
2129 cur_state = MFI_STATE_WAIT_HANDSHAKE;
2131 case MFI_STATE_BOOT_MESSAGE_PENDING:
2132 mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell),
2134 cur_state = MFI_STATE_BOOT_MESSAGE_PENDING;
2136 case MFI_STATE_OPERATIONAL:
2137 /* Bring it to READY state; assuming max wait 10 secs */
2138 mrsas_disable_intr(sc);
2139 mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell), MFI_RESET_FLAGS);
2140 for (i=0; i < max_wait * 1000; i++) {
2141 if (mrsas_read_reg(sc, offsetof(mrsas_reg_set, doorbell)) & 1)
2146 cur_state = MFI_STATE_OPERATIONAL;
2148 case MFI_STATE_UNDEFINED:
2149 /* This state should not last for more than 2 seconds */
2150 cur_state = MFI_STATE_UNDEFINED;
2152 case MFI_STATE_BB_INIT:
2153 cur_state = MFI_STATE_BB_INIT;
2155 case MFI_STATE_FW_INIT:
2156 cur_state = MFI_STATE_FW_INIT;
2158 case MFI_STATE_FW_INIT_2:
2159 cur_state = MFI_STATE_FW_INIT_2;
2161 case MFI_STATE_DEVICE_SCAN:
2162 cur_state = MFI_STATE_DEVICE_SCAN;
2164 case MFI_STATE_FLUSH_CACHE:
2165 cur_state = MFI_STATE_FLUSH_CACHE;
2168 device_printf(sc->mrsas_dev, "Unknown state 0x%x\n", fw_state);
2173 * The cur_state should not last for more than max_wait secs
2175 for (i = 0; i < (max_wait * 1000); i++) {
2176 fw_state = (mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2177 outbound_scratch_pad))& MFI_STATE_MASK);
2178 curr_abs_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2179 outbound_scratch_pad));
2180 if (abs_state == curr_abs_state)
2187 * Return error if fw_state hasn't changed after max_wait
2189 if (curr_abs_state == abs_state) {
2190 device_printf(sc->mrsas_dev, "FW state [%d] hasn't changed "
2191 "in %d secs\n", fw_state, max_wait);
2195 mrsas_dprint(sc, MRSAS_OCR, "FW now in Ready state\n");
2196 //device_printf(sc->mrsas_dev, "FW now in Ready state\n");del?
2201 * mrsas_get_mfi_cmd: Get a cmd from free command pool
2202 * input: Adapter soft state
2204 * This function removes an MFI command from the command list.
2206 struct mrsas_mfi_cmd* mrsas_get_mfi_cmd(struct mrsas_softc *sc)
2208 struct mrsas_mfi_cmd *cmd = NULL;
2210 mtx_lock(&sc->mfi_cmd_pool_lock);
2211 if (!TAILQ_EMPTY(&sc->mrsas_mfi_cmd_list_head)){
2212 cmd = TAILQ_FIRST(&sc->mrsas_mfi_cmd_list_head);
2213 TAILQ_REMOVE(&sc->mrsas_mfi_cmd_list_head, cmd, next);
2215 mtx_unlock(&sc->mfi_cmd_pool_lock);
2221 * mrsas_ocr_thread Thread to handle OCR/Kill Adapter.
2222 * input: Adapter Context.
2224 * This function will check FW status register and flag
2225 * do_timeout_reset flag. It will do OCR/Kill adapter if
2226 * FW is in fault state or IO timed out has trigger reset.
2229 mrsas_ocr_thread(void *arg)
2231 struct mrsas_softc *sc;
2232 u_int32_t fw_status, fw_state;
2234 sc = (struct mrsas_softc *)arg;
2236 mrsas_dprint(sc, MRSAS_TRACE, "%s\n", __func__);
2238 sc->ocr_thread_active = 1;
2239 mtx_lock(&sc->sim_lock);
2241 /* Sleep for 1 second and check the queue status*/
2242 msleep(&sc->ocr_chan, &sc->sim_lock, PRIBIO,
2243 "mrsas_ocr", sc->mrsas_fw_fault_check_delay * hz);
2244 if (sc->remove_in_progress) {
2245 mrsas_dprint(sc, MRSAS_OCR,
2246 "Exit due to shutdown from %s\n", __func__);
2249 fw_status = mrsas_read_reg(sc,
2250 offsetof(mrsas_reg_set, outbound_scratch_pad));
2251 fw_state = fw_status & MFI_STATE_MASK;
2252 if (fw_state == MFI_STATE_FAULT || sc->do_timedout_reset) {
2253 device_printf(sc->mrsas_dev, "OCR started due to %s!\n",
2254 sc->do_timedout_reset?"IO Timeout":
2255 "FW fault detected");
2256 mtx_lock_spin(&sc->ioctl_lock);
2257 sc->reset_in_progress = 1;
2259 mtx_unlock_spin(&sc->ioctl_lock);
2260 mrsas_xpt_freeze(sc);
2261 mrsas_reset_ctrl(sc);
2262 mrsas_xpt_release(sc);
2263 sc->reset_in_progress = 0;
2264 sc->do_timedout_reset = 0;
2267 mtx_unlock(&sc->sim_lock);
2268 sc->ocr_thread_active = 0;
2269 mrsas_kproc_exit(0);
2273 * mrsas_reset_reply_desc Reset Reply descriptor as part of OCR.
2274 * input: Adapter Context.
2276 * This function will clear reply descriptor so that post OCR
2277 * driver and FW will lost old history.
2279 void mrsas_reset_reply_desc(struct mrsas_softc *sc)
2282 pMpi2ReplyDescriptorsUnion_t reply_desc;
2284 sc->last_reply_idx = 0;
2285 reply_desc = sc->reply_desc_mem;
2286 for (i = 0; i < sc->reply_q_depth; i++, reply_desc++) {
2287 reply_desc->Words = MRSAS_ULONG_MAX;
2292 * mrsas_reset_ctrl Core function to OCR/Kill adapter.
2293 * input: Adapter Context.
2295 * This function will run from thread context so that it can sleep.
2296 * 1. Do not handle OCR if FW is in HW critical error.
2297 * 2. Wait for outstanding command to complete for 180 seconds.
2298 * 3. If #2 does not find any outstanding command Controller is in working
2299 * state, so skip OCR.
2300 * Otherwise, do OCR/kill Adapter based on flag disableOnlineCtrlReset.
2301 * 4. Start of the OCR, return all SCSI command back to CAM layer which has
2303 * 5. Post OCR, Re-fire Managment command and move Controller to Operation
2306 int mrsas_reset_ctrl(struct mrsas_softc *sc)
2308 int retval = SUCCESS, i, j, retry = 0;
2309 u_int32_t host_diag, abs_state, status_reg, reset_adapter;
2311 struct mrsas_mfi_cmd *mfi_cmd;
2312 struct mrsas_mpt_cmd *mpt_cmd;
2313 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
2315 if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR) {
2316 device_printf(sc->mrsas_dev,
2317 "mrsas: Hardware critical error, returning FAIL.\n");
2321 set_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
2322 sc->adprecovery = MRSAS_ADPRESET_SM_INFAULT;
2323 mrsas_disable_intr(sc);
2326 /* First try waiting for commands to complete */
2327 if (mrsas_wait_for_outstanding(sc)) {
2328 mrsas_dprint(sc, MRSAS_OCR,
2329 "resetting adapter from %s.\n",
2331 /* Now return commands back to the CAM layer */
2332 for (i = 0 ; i < sc->max_fw_cmds; i++) {
2333 mpt_cmd = sc->mpt_cmd_list[i];
2334 if (mpt_cmd->ccb_ptr) {
2335 ccb = (union ccb *)(mpt_cmd->ccb_ptr);
2336 ccb->ccb_h.status = CAM_SCSI_BUS_RESET;
2337 mrsas_cmd_done(sc, mpt_cmd);
2338 atomic_dec(&sc->fw_outstanding);
2342 status_reg = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2343 outbound_scratch_pad));
2344 abs_state = status_reg & MFI_STATE_MASK;
2345 reset_adapter = status_reg & MFI_RESET_ADAPTER;
2346 if (sc->disableOnlineCtrlReset ||
2347 (abs_state == MFI_STATE_FAULT && !reset_adapter)) {
2348 /* Reset not supported, kill adapter */
2349 mrsas_dprint(sc, MRSAS_OCR,"Reset not supported, killing adapter.\n");
2351 sc->adprecovery = MRSAS_HW_CRITICAL_ERROR;
2356 /* Now try to reset the chip */
2357 for (i = 0; i < MRSAS_FUSION_MAX_RESET_TRIES; i++) {
2358 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2359 MPI2_WRSEQ_FLUSH_KEY_VALUE);
2360 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2361 MPI2_WRSEQ_1ST_KEY_VALUE);
2362 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2363 MPI2_WRSEQ_2ND_KEY_VALUE);
2364 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2365 MPI2_WRSEQ_3RD_KEY_VALUE);
2366 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2367 MPI2_WRSEQ_4TH_KEY_VALUE);
2368 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2369 MPI2_WRSEQ_5TH_KEY_VALUE);
2370 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2371 MPI2_WRSEQ_6TH_KEY_VALUE);
2373 /* Check that the diag write enable (DRWE) bit is on */
2374 host_diag = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2377 while (!(host_diag & HOST_DIAG_WRITE_ENABLE)) {
2379 host_diag = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2381 if (retry++ == 100) {
2382 mrsas_dprint(sc, MRSAS_OCR,
2383 "Host diag unlock failed!\n");
2387 if (!(host_diag & HOST_DIAG_WRITE_ENABLE))
2390 /* Send chip reset command */
2391 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_host_diag),
2392 host_diag | HOST_DIAG_RESET_ADAPTER);
2395 /* Make sure reset adapter bit is cleared */
2396 host_diag = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2399 while (host_diag & HOST_DIAG_RESET_ADAPTER) {
2401 host_diag = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2403 if (retry++ == 1000) {
2404 mrsas_dprint(sc, MRSAS_OCR,
2405 "Diag reset adapter never cleared!\n");
2409 if (host_diag & HOST_DIAG_RESET_ADAPTER)
2412 abs_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2413 outbound_scratch_pad)) & MFI_STATE_MASK;
2416 while ((abs_state <= MFI_STATE_FW_INIT) && (retry++ < 1000)) {
2418 abs_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2419 outbound_scratch_pad)) & MFI_STATE_MASK;
2421 if (abs_state <= MFI_STATE_FW_INIT) {
2422 mrsas_dprint(sc, MRSAS_OCR, "firmware state < MFI_STATE_FW_INIT,"
2423 " state = 0x%x\n", abs_state);
2427 /* Wait for FW to become ready */
2428 if (mrsas_transition_to_ready(sc, 1)) {
2429 mrsas_dprint(sc, MRSAS_OCR,
2430 "mrsas: Failed to transition controller to ready.\n");
2434 mrsas_reset_reply_desc(sc);
2435 if (mrsas_ioc_init(sc)) {
2436 mrsas_dprint(sc, MRSAS_OCR, "mrsas_ioc_init() failed!\n");
2440 clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
2441 mrsas_enable_intr(sc);
2442 sc->adprecovery = MRSAS_HBA_OPERATIONAL;
2444 /* Re-fire management commands */
2445 for (j = 0 ; j < sc->max_fw_cmds; j++) {
2446 mpt_cmd = sc->mpt_cmd_list[j];
2447 if (mpt_cmd->sync_cmd_idx != (u_int32_t)MRSAS_ULONG_MAX) {
2448 mfi_cmd = sc->mfi_cmd_list[mpt_cmd->sync_cmd_idx];
2449 if (mfi_cmd->frame->dcmd.opcode ==
2450 MR_DCMD_LD_MAP_GET_INFO) {
2451 mrsas_release_mfi_cmd(mfi_cmd);
2452 mrsas_release_mpt_cmd(mpt_cmd);
2454 req_desc = mrsas_get_request_desc(sc,
2455 mfi_cmd->cmd_id.context.smid - 1);
2456 mrsas_dprint(sc, MRSAS_OCR,
2457 "Re-fire command DCMD opcode 0x%x index %d\n ",
2458 mfi_cmd->frame->dcmd.opcode, j);
2460 device_printf(sc->mrsas_dev,
2461 "Cannot build MPT cmd.\n");
2463 mrsas_fire_cmd(sc, req_desc->addr.u.low,
2464 req_desc->addr.u.high);
2469 /* Reset load balance info */
2470 memset(sc->load_balance_info, 0,
2471 sizeof(LD_LOAD_BALANCE_INFO) * MAX_LOGICAL_DRIVES);
2473 if (!mrsas_get_map_info(sc))
2474 mrsas_sync_map_info(sc);
2476 /* Adapter reset completed successfully */
2477 device_printf(sc->mrsas_dev, "Reset successful\n");
2481 /* Reset failed, kill the adapter */
2482 device_printf(sc->mrsas_dev, "Reset failed, killing adapter.\n");
2486 clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
2487 mrsas_enable_intr(sc);
2488 sc->adprecovery = MRSAS_HBA_OPERATIONAL;
2491 clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
2492 mrsas_dprint(sc, MRSAS_OCR,
2493 "Reset Exit with %d.\n", retval);
2498 * mrsas_kill_hba Kill HBA when OCR is not supported.
2499 * input: Adapter Context.
2501 * This function will kill HBA when OCR is not supported.
2503 void mrsas_kill_hba (struct mrsas_softc *sc)
2505 mrsas_dprint(sc, MRSAS_OCR, "%s\n", __func__);
2506 mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell),
2509 mrsas_read_reg(sc, offsetof(mrsas_reg_set, doorbell));
2513 * mrsas_wait_for_outstanding Wait for outstanding commands
2514 * input: Adapter Context.
2516 * This function will wait for 180 seconds for outstanding
2517 * commands to be completed.
2519 int mrsas_wait_for_outstanding(struct mrsas_softc *sc)
2521 int i, outstanding, retval = 0;
2524 for (i = 0; i < MRSAS_RESET_WAIT_TIME; i++) {
2525 if (sc->remove_in_progress) {
2526 mrsas_dprint(sc, MRSAS_OCR,
2527 "Driver remove or shutdown called.\n");
2531 /* Check if firmware is in fault state */
2532 fw_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2533 outbound_scratch_pad)) & MFI_STATE_MASK;
2534 if (fw_state == MFI_STATE_FAULT) {
2535 mrsas_dprint(sc, MRSAS_OCR,
2536 "Found FW in FAULT state, will reset adapter.\n");
2540 outstanding = atomic_read(&sc->fw_outstanding);
2544 if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
2545 mrsas_dprint(sc, MRSAS_OCR, "[%2d]waiting for %d "
2546 "commands to complete\n",i,outstanding);
2547 mrsas_complete_cmd(sc);
2552 if (atomic_read(&sc->fw_outstanding)) {
2553 mrsas_dprint(sc, MRSAS_OCR,
2554 " pending commands remain after waiting,"
2555 " will reset adapter.\n");
2563 * mrsas_release_mfi_cmd: Return a cmd to free command pool
2564 * input: Command packet for return to free cmd pool
2566 * This function returns the MFI command to the command list.
2568 void mrsas_release_mfi_cmd(struct mrsas_mfi_cmd *cmd)
2570 struct mrsas_softc *sc = cmd->sc;
2572 mtx_lock(&sc->mfi_cmd_pool_lock);
2573 cmd->ccb_ptr = NULL;
2574 cmd->cmd_id.frame_count = 0;
2575 TAILQ_INSERT_TAIL(&(sc->mrsas_mfi_cmd_list_head), cmd, next);
2576 mtx_unlock(&sc->mfi_cmd_pool_lock);
2582 * mrsas_get_controller_info - Returns FW's controller structure
2583 * input: Adapter soft state
2584 * Controller information structure
2586 * Issues an internal command (DCMD) to get the FW's controller structure.
2587 * This information is mainly used to find out the maximum IO transfer per
2588 * command supported by the FW.
2590 static int mrsas_get_ctrl_info(struct mrsas_softc *sc,
2591 struct mrsas_ctrl_info *ctrl_info)
2594 struct mrsas_mfi_cmd *cmd;
2595 struct mrsas_dcmd_frame *dcmd;
2597 cmd = mrsas_get_mfi_cmd(sc);
2600 device_printf(sc->mrsas_dev, "Failed to get a free cmd\n");
2603 dcmd = &cmd->frame->dcmd;
2605 if (mrsas_alloc_ctlr_info_cmd(sc) != SUCCESS) {
2606 device_printf(sc->mrsas_dev, "Cannot allocate get ctlr info cmd\n");
2607 mrsas_release_mfi_cmd(cmd);
2610 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
2612 dcmd->cmd = MFI_CMD_DCMD;
2613 dcmd->cmd_status = 0xFF;
2614 dcmd->sge_count = 1;
2615 dcmd->flags = MFI_FRAME_DIR_READ;
2618 dcmd->data_xfer_len = sizeof(struct mrsas_ctrl_info);
2619 dcmd->opcode = MR_DCMD_CTRL_GET_INFO;
2620 dcmd->sgl.sge32[0].phys_addr = sc->ctlr_info_phys_addr;
2621 dcmd->sgl.sge32[0].length = sizeof(struct mrsas_ctrl_info);
2623 if (!mrsas_issue_polled(sc, cmd))
2624 memcpy(ctrl_info, sc->ctlr_info_mem, sizeof(struct mrsas_ctrl_info));
2628 mrsas_free_ctlr_info_cmd(sc);
2629 mrsas_release_mfi_cmd(cmd);
2634 * mrsas_alloc_ctlr_info_cmd: Allocates memory for controller info command
2635 * input: Adapter soft state
2637 * Allocates DMAable memory for the controller info internal command.
2639 int mrsas_alloc_ctlr_info_cmd(struct mrsas_softc *sc)
2643 /* Allocate get controller info command */
2644 ctlr_info_size = sizeof(struct mrsas_ctrl_info);
2645 if (bus_dma_tag_create( sc->mrsas_parent_tag, // parent
2646 1, 0, // algnmnt, boundary
2647 BUS_SPACE_MAXADDR_32BIT,// lowaddr
2648 BUS_SPACE_MAXADDR, // highaddr
2649 NULL, NULL, // filter, filterarg
2650 ctlr_info_size, // maxsize
2652 ctlr_info_size, // maxsegsize
2653 BUS_DMA_ALLOCNOW, // flags
2654 NULL, NULL, // lockfunc, lockarg
2655 &sc->ctlr_info_tag)) {
2656 device_printf(sc->mrsas_dev, "Cannot allocate ctlr info tag\n");
2659 if (bus_dmamem_alloc(sc->ctlr_info_tag, (void **)&sc->ctlr_info_mem,
2660 BUS_DMA_NOWAIT, &sc->ctlr_info_dmamap)) {
2661 device_printf(sc->mrsas_dev, "Cannot allocate ctlr info cmd mem\n");
2664 if (bus_dmamap_load(sc->ctlr_info_tag, sc->ctlr_info_dmamap,
2665 sc->ctlr_info_mem, ctlr_info_size, mrsas_addr_cb,
2666 &sc->ctlr_info_phys_addr, BUS_DMA_NOWAIT)) {
2667 device_printf(sc->mrsas_dev, "Cannot load ctlr info cmd mem\n");
2671 memset(sc->ctlr_info_mem, 0, ctlr_info_size);
2676 * mrsas_free_ctlr_info_cmd: Free memory for controller info command
2677 * input: Adapter soft state
2679 * Deallocates memory of the get controller info cmd.
2681 void mrsas_free_ctlr_info_cmd(struct mrsas_softc *sc)
2683 if (sc->ctlr_info_phys_addr)
2684 bus_dmamap_unload(sc->ctlr_info_tag, sc->ctlr_info_dmamap);
2685 if (sc->ctlr_info_mem != NULL)
2686 bus_dmamem_free(sc->ctlr_info_tag, sc->ctlr_info_mem, sc->ctlr_info_dmamap);
2687 if (sc->ctlr_info_tag != NULL)
2688 bus_dma_tag_destroy(sc->ctlr_info_tag);
2692 * mrsas_issue_polled: Issues a polling command
2693 * inputs: Adapter soft state
2694 * Command packet to be issued
2696 * This function is for posting of internal commands to Firmware. MFI
2697 * requires the cmd_status to be set to 0xFF before posting. The maximun
2698 * wait time of the poll response timer is 180 seconds.
2700 int mrsas_issue_polled(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
2702 struct mrsas_header *frame_hdr = &cmd->frame->hdr;
2703 u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME;
2706 frame_hdr->cmd_status = 0xFF;
2707 frame_hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
2709 /* Issue the frame using inbound queue port */
2710 if (mrsas_issue_dcmd(sc, cmd)) {
2711 device_printf(sc->mrsas_dev, "Cannot issue DCMD internal command.\n");
2716 * Poll response timer to wait for Firmware response. While this
2717 * timer with the DELAY call could block CPU, the time interval for
2718 * this is only 1 millisecond.
2720 if (frame_hdr->cmd_status == 0xFF) {
2721 for (i=0; i < (max_wait * 1000); i++){
2722 if (frame_hdr->cmd_status == 0xFF)
2728 if (frame_hdr->cmd_status != 0)
2730 if (frame_hdr->cmd_status == 0xFF)
2731 device_printf(sc->mrsas_dev, "DCMD timed out after %d seconds.\n", max_wait);
2733 device_printf(sc->mrsas_dev, "DCMD failed, status = 0x%x\n", frame_hdr->cmd_status);
2740 * mrsas_issue_dcmd - Issues a MFI Pass thru cmd
2741 * input: Adapter soft state
2744 * This function is called by mrsas_issued_blocked_cmd() and
2745 * mrsas_issued_polled(), to build the MPT command and then fire the
2746 * command to Firmware.
2749 mrsas_issue_dcmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
2751 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
2753 req_desc = mrsas_build_mpt_cmd(sc, cmd);
2755 device_printf(sc->mrsas_dev, "Cannot build MPT cmd.\n");
2759 mrsas_fire_cmd(sc, req_desc->addr.u.low, req_desc->addr.u.high);
2765 * mrsas_build_mpt_cmd - Calls helper function to build Passthru cmd
2766 * input: Adapter soft state
2769 * This function is called by mrsas_issue_cmd() to build the MPT-MFI
2770 * passthru command and prepares the MPT command to send to Firmware.
2772 MRSAS_REQUEST_DESCRIPTOR_UNION *
2773 mrsas_build_mpt_cmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
2775 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
2778 if (mrsas_build_mptmfi_passthru(sc, cmd)) {
2779 device_printf(sc->mrsas_dev, "Cannot build MPT-MFI passthru cmd.\n");
2783 index = cmd->cmd_id.context.smid;
2785 req_desc = mrsas_get_request_desc(sc, index-1);
2789 req_desc->addr.Words = 0;
2790 req_desc->SCSIIO.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO << MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
2792 req_desc->SCSIIO.SMID = index;
2798 * mrsas_build_mptmfi_passthru - Builds a MPT MFI Passthru command
2799 * input: Adapter soft state
2802 * The MPT command and the io_request are setup as a passthru command.
2803 * The SGE chain address is set to frame_phys_addr of the MFI command.
2806 mrsas_build_mptmfi_passthru(struct mrsas_softc *sc, struct mrsas_mfi_cmd *mfi_cmd)
2808 MPI25_IEEE_SGE_CHAIN64 *mpi25_ieee_chain;
2809 PTR_MRSAS_RAID_SCSI_IO_REQUEST io_req;
2810 struct mrsas_mpt_cmd *mpt_cmd;
2811 struct mrsas_header *frame_hdr = &mfi_cmd->frame->hdr;
2813 mpt_cmd = mrsas_get_mpt_cmd(sc);
2817 /* Save the smid. To be used for returning the cmd */
2818 mfi_cmd->cmd_id.context.smid = mpt_cmd->index;
2820 mpt_cmd->sync_cmd_idx = mfi_cmd->index;
2823 * For cmds where the flag is set, store the flag and check
2824 * on completion. For cmds with this flag, don't call
2825 * mrsas_complete_cmd.
2828 if (frame_hdr->flags & MFI_FRAME_DONT_POST_IN_REPLY_QUEUE)
2829 mpt_cmd->flags = MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
2831 io_req = mpt_cmd->io_request;
2833 if ((sc->device_id == MRSAS_INVADER) || (sc->device_id == MRSAS_FURY)) {
2834 pMpi25IeeeSgeChain64_t sgl_ptr_end = (pMpi25IeeeSgeChain64_t) &io_req->SGL;
2835 sgl_ptr_end += sc->max_sge_in_main_msg - 1;
2836 sgl_ptr_end->Flags = 0;
2839 mpi25_ieee_chain = (MPI25_IEEE_SGE_CHAIN64 *)&io_req->SGL.IeeeChain;
2841 io_req->Function = MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST;
2842 io_req->SGLOffset0 = offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL) / 4;
2843 io_req->ChainOffset = sc->chain_offset_mfi_pthru;
2845 mpi25_ieee_chain->Address = mfi_cmd->frame_phys_addr;
2847 mpi25_ieee_chain->Flags= IEEE_SGE_FLAGS_CHAIN_ELEMENT |
2848 MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR;
2850 mpi25_ieee_chain->Length = MRSAS_MAX_SZ_CHAIN_FRAME;
2856 * mrsas_issue_blocked_cmd - Synchronous wrapper around regular FW cmds
2857 * input: Adapter soft state
2858 * Command to be issued
2860 * This function waits on an event for the command to be returned
2861 * from the ISR. Max wait time is MRSAS_INTERNAL_CMD_WAIT_TIME secs.
2862 * Used for issuing internal and ioctl commands.
2864 int mrsas_issue_blocked_cmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
2866 u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME;
2867 unsigned long total_time = 0;
2870 /* Initialize cmd_status */
2871 cmd->cmd_status = ECONNREFUSED;
2873 /* Build MPT-MFI command for issue to FW */
2874 if (mrsas_issue_dcmd(sc, cmd)){
2875 device_printf(sc->mrsas_dev, "Cannot issue DCMD internal command.\n");
2879 sc->chan = (void*)&cmd;
2881 /* The following is for debug only... */
2882 //device_printf(sc->mrsas_dev,"DCMD issued to FW, about to sleep-wait...\n");
2883 //device_printf(sc->mrsas_dev,"sc->chan = %p\n", sc->chan);
2886 if (cmd->cmd_status == ECONNREFUSED){
2887 tsleep((void *)&sc->chan, 0, "mrsas_sleep", hz);
2892 if (total_time >= max_wait) {
2893 device_printf(sc->mrsas_dev, "Internal command timed out after %d seconds.\n", max_wait);
2902 * mrsas_complete_mptmfi_passthru - Completes a command
2903 * input: sc: Adapter soft state
2904 * cmd: Command to be completed
2905 * status: cmd completion status
2907 * This function is called from mrsas_complete_cmd() after an interrupt
2908 * is received from Firmware, and io_request->Function is
2909 * MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST.
2912 mrsas_complete_mptmfi_passthru(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd,
2915 struct mrsas_header *hdr = &cmd->frame->hdr;
2916 u_int8_t cmd_status = cmd->frame->hdr.cmd_status;
2918 /* Reset the retry counter for future re-tries */
2919 cmd->retry_for_fw_reset = 0;
2922 cmd->ccb_ptr = NULL;
2925 case MFI_CMD_INVALID:
2926 device_printf(sc->mrsas_dev, "MFI_CMD_INVALID command.\n");
2928 case MFI_CMD_PD_SCSI_IO:
2929 case MFI_CMD_LD_SCSI_IO:
2931 * MFI_CMD_PD_SCSI_IO and MFI_CMD_LD_SCSI_IO could have been
2932 * issued either through an IO path or an IOCTL path. If it
2933 * was via IOCTL, we will send it to internal completion.
2935 if (cmd->sync_cmd) {
2937 mrsas_wakeup(sc, cmd);
2943 /* Check for LD map update */
2944 if ((cmd->frame->dcmd.opcode == MR_DCMD_LD_MAP_GET_INFO) &&
2945 (cmd->frame->dcmd.mbox.b[1] == 1)) {
2946 sc->fast_path_io = 0;
2947 mtx_lock(&sc->raidmap_lock);
2948 if (cmd_status != 0) {
2949 if (cmd_status != MFI_STAT_NOT_FOUND)
2950 device_printf(sc->mrsas_dev, "map sync failed, status=%x\n",cmd_status);
2952 mrsas_release_mfi_cmd(cmd);
2953 mtx_unlock(&sc->raidmap_lock);
2959 mrsas_release_mfi_cmd(cmd);
2960 if (MR_ValidateMapInfo(sc))
2961 sc->fast_path_io = 0;
2963 sc->fast_path_io = 1;
2964 mrsas_sync_map_info(sc);
2965 mtx_unlock(&sc->raidmap_lock);
2968 #if 0 //currently not supporting event handling, so commenting out
2969 if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET_INFO ||
2970 cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET) {
2971 mrsas_poll_wait_aen = 0;
2974 /* See if got an event notification */
2975 if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_WAIT)
2976 mrsas_complete_aen(sc, cmd);
2978 mrsas_wakeup(sc, cmd);
2981 /* Command issued to abort another cmd return */
2982 mrsas_complete_abort(sc, cmd);
2985 device_printf(sc->mrsas_dev,"Unknown command completed! [0x%X]\n", hdr->cmd);
2991 * mrsas_wakeup - Completes an internal command
2992 * input: Adapter soft state
2993 * Command to be completed
2995 * In mrsas_issue_blocked_cmd(), after a command is issued to Firmware,
2996 * a wait timer is started. This function is called from
2997 * mrsas_complete_mptmfi_passthru() as it completes the command,
2998 * to wake up from the command wait.
3000 void mrsas_wakeup(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
3002 cmd->cmd_status = cmd->frame->io.cmd_status;
3004 if (cmd->cmd_status == ECONNREFUSED)
3005 cmd->cmd_status = 0;
3007 /* For debug only ... */
3008 //device_printf(sc->mrsas_dev,"DCMD rec'd for wakeup, sc->chan=%p\n", sc->chan);
3010 sc->chan = (void*)&cmd;
3011 wakeup_one((void *)&sc->chan);
3016 * mrsas_shutdown_ctlr: Instructs FW to shutdown the controller
3017 * input: Adapter soft state
3018 * Shutdown/Hibernate
3020 * This function issues a DCMD internal command to Firmware to initiate
3021 * shutdown of the controller.
3023 static void mrsas_shutdown_ctlr(struct mrsas_softc *sc, u_int32_t opcode)
3025 struct mrsas_mfi_cmd *cmd;
3026 struct mrsas_dcmd_frame *dcmd;
3028 if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR)
3031 cmd = mrsas_get_mfi_cmd(sc);
3033 device_printf(sc->mrsas_dev,"Cannot allocate for shutdown cmd.\n");
3038 mrsas_issue_blocked_abort_cmd(sc, sc->aen_cmd);
3040 if (sc->map_update_cmd)
3041 mrsas_issue_blocked_abort_cmd(sc, sc->map_update_cmd);
3043 dcmd = &cmd->frame->dcmd;
3044 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
3046 dcmd->cmd = MFI_CMD_DCMD;
3047 dcmd->cmd_status = 0x0;
3048 dcmd->sge_count = 0;
3049 dcmd->flags = MFI_FRAME_DIR_NONE;
3052 dcmd->data_xfer_len = 0;
3053 dcmd->opcode = opcode;
3055 device_printf(sc->mrsas_dev,"Preparing to shut down controller.\n");
3057 mrsas_issue_blocked_cmd(sc, cmd);
3058 mrsas_release_mfi_cmd(cmd);
3064 * mrsas_flush_cache: Requests FW to flush all its caches
3065 * input: Adapter soft state
3067 * This function is issues a DCMD internal command to Firmware to initiate
3068 * flushing of all caches.
3070 static void mrsas_flush_cache(struct mrsas_softc *sc)
3072 struct mrsas_mfi_cmd *cmd;
3073 struct mrsas_dcmd_frame *dcmd;
3075 if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR)
3078 cmd = mrsas_get_mfi_cmd(sc);
3080 device_printf(sc->mrsas_dev,"Cannot allocate for flush cache cmd.\n");
3084 dcmd = &cmd->frame->dcmd;
3085 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
3087 dcmd->cmd = MFI_CMD_DCMD;
3088 dcmd->cmd_status = 0x0;
3089 dcmd->sge_count = 0;
3090 dcmd->flags = MFI_FRAME_DIR_NONE;
3093 dcmd->data_xfer_len = 0;
3094 dcmd->opcode = MR_DCMD_CTRL_CACHE_FLUSH;
3095 dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE;
3097 mrsas_issue_blocked_cmd(sc, cmd);
3098 mrsas_release_mfi_cmd(cmd);
3104 * mrsas_get_map_info: Load and validate RAID map
3105 * input: Adapter instance soft state
3107 * This function calls mrsas_get_ld_map_info() and MR_ValidateMapInfo()
3108 * to load and validate RAID map. It returns 0 if successful, 1 other-
3111 static int mrsas_get_map_info(struct mrsas_softc *sc)
3113 uint8_t retcode = 0;
3115 sc->fast_path_io = 0;
3116 if (!mrsas_get_ld_map_info(sc)) {
3117 retcode = MR_ValidateMapInfo(sc);
3119 sc->fast_path_io = 1;
3127 * mrsas_get_ld_map_info: Get FW's ld_map structure
3128 * input: Adapter instance soft state
3130 * Issues an internal command (DCMD) to get the FW's controller PD
3133 static int mrsas_get_ld_map_info(struct mrsas_softc *sc)
3136 struct mrsas_mfi_cmd *cmd;
3137 struct mrsas_dcmd_frame *dcmd;
3138 MR_FW_RAID_MAP_ALL *map;
3139 bus_addr_t map_phys_addr = 0;
3141 cmd = mrsas_get_mfi_cmd(sc);
3143 device_printf(sc->mrsas_dev, "Cannot alloc for ld map info cmd.\n");
3147 dcmd = &cmd->frame->dcmd;
3149 map = sc->raidmap_mem[(sc->map_id & 1)];
3150 map_phys_addr = sc->raidmap_phys_addr[(sc->map_id & 1)];
3152 device_printf(sc->mrsas_dev, "Failed to alloc mem for ld map info.\n");
3153 mrsas_release_mfi_cmd(cmd);
3156 memset(map, 0, sizeof(*map));
3157 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
3159 dcmd->cmd = MFI_CMD_DCMD;
3160 dcmd->cmd_status = 0xFF;
3161 dcmd->sge_count = 1;
3162 dcmd->flags = MFI_FRAME_DIR_READ;
3165 dcmd->data_xfer_len = sc->map_sz;
3166 dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO;
3167 dcmd->sgl.sge32[0].phys_addr = map_phys_addr;
3168 dcmd->sgl.sge32[0].length = sc->map_sz;
3169 if (!mrsas_issue_polled(sc, cmd))
3173 device_printf(sc->mrsas_dev, "Fail to send get LD map info cmd.\n");
3176 mrsas_release_mfi_cmd(cmd);
3181 * mrsas_sync_map_info: Get FW's ld_map structure
3182 * input: Adapter instance soft state
3184 * Issues an internal command (DCMD) to get the FW's controller PD
3187 static int mrsas_sync_map_info(struct mrsas_softc *sc)
3190 struct mrsas_mfi_cmd *cmd;
3191 struct mrsas_dcmd_frame *dcmd;
3192 uint32_t size_sync_info, num_lds;
3193 MR_LD_TARGET_SYNC *target_map = NULL;
3194 MR_FW_RAID_MAP_ALL *map;
3196 MR_LD_TARGET_SYNC *ld_sync;
3197 bus_addr_t map_phys_addr = 0;
3199 cmd = mrsas_get_mfi_cmd(sc);
3201 device_printf(sc->mrsas_dev, "Cannot alloc for sync map info cmd\n");
3205 map = sc->raidmap_mem[sc->map_id & 1];
3206 num_lds = map->raidMap.ldCount;
3208 dcmd = &cmd->frame->dcmd;
3209 size_sync_info = sizeof(MR_LD_TARGET_SYNC) * num_lds;
3210 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
3212 target_map = (MR_LD_TARGET_SYNC *)sc->raidmap_mem[(sc->map_id - 1) & 1];
3213 memset(target_map, 0, sizeof(MR_FW_RAID_MAP_ALL));
3215 map_phys_addr = sc->raidmap_phys_addr[(sc->map_id - 1) & 1];
3217 ld_sync = (MR_LD_TARGET_SYNC *)target_map;
3219 for (i = 0; i < num_lds; i++, ld_sync++) {
3220 raid = MR_LdRaidGet(i, map);
3221 ld_sync->targetId = MR_GetLDTgtId(i, map);
3222 ld_sync->seqNum = raid->seqNum;
3225 dcmd->cmd = MFI_CMD_DCMD;
3226 dcmd->cmd_status = 0xFF;
3227 dcmd->sge_count = 1;
3228 dcmd->flags = MFI_FRAME_DIR_WRITE;
3231 dcmd->data_xfer_len = sc->map_sz;
3232 dcmd->mbox.b[0] = num_lds;
3233 dcmd->mbox.b[1] = MRSAS_DCMD_MBOX_PEND_FLAG;
3234 dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO;
3235 dcmd->sgl.sge32[0].phys_addr = map_phys_addr;
3236 dcmd->sgl.sge32[0].length = sc->map_sz;
3238 sc->map_update_cmd = cmd;
3239 if (mrsas_issue_dcmd(sc, cmd)) {
3240 device_printf(sc->mrsas_dev, "Fail to send sync map info command.\n");
3247 * mrsas_get_pd_list: Returns FW's PD list structure
3248 * input: Adapter soft state
3250 * Issues an internal command (DCMD) to get the FW's controller PD
3251 * list structure. This information is mainly used to find out about
3252 * system supported by Firmware.
3254 static int mrsas_get_pd_list(struct mrsas_softc *sc)
3256 int retcode = 0, pd_index = 0, pd_count=0, pd_list_size;
3257 struct mrsas_mfi_cmd *cmd;
3258 struct mrsas_dcmd_frame *dcmd;
3259 struct MR_PD_LIST *pd_list_mem;
3260 struct MR_PD_ADDRESS *pd_addr;
3261 bus_addr_t pd_list_phys_addr = 0;
3262 struct mrsas_tmp_dcmd *tcmd;
3264 cmd = mrsas_get_mfi_cmd(sc);
3266 device_printf(sc->mrsas_dev, "Cannot alloc for get PD list cmd\n");
3270 dcmd = &cmd->frame->dcmd;
3272 tcmd = malloc(sizeof(struct mrsas_tmp_dcmd), M_MRSAS, M_NOWAIT);
3273 pd_list_size = MRSAS_MAX_PD * sizeof(struct MR_PD_LIST);
3274 if (mrsas_alloc_tmp_dcmd(sc, tcmd, pd_list_size) != SUCCESS) {
3275 device_printf(sc->mrsas_dev, "Cannot alloc dmamap for get PD list cmd\n");
3276 mrsas_release_mfi_cmd(cmd);
3280 pd_list_mem = tcmd->tmp_dcmd_mem;
3281 pd_list_phys_addr = tcmd->tmp_dcmd_phys_addr;
3283 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
3285 dcmd->mbox.b[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST;
3286 dcmd->mbox.b[1] = 0;
3287 dcmd->cmd = MFI_CMD_DCMD;
3288 dcmd->cmd_status = 0xFF;
3289 dcmd->sge_count = 1;
3290 dcmd->flags = MFI_FRAME_DIR_READ;
3293 dcmd->data_xfer_len = MRSAS_MAX_PD * sizeof(struct MR_PD_LIST);
3294 dcmd->opcode = MR_DCMD_PD_LIST_QUERY;
3295 dcmd->sgl.sge32[0].phys_addr = pd_list_phys_addr;
3296 dcmd->sgl.sge32[0].length = MRSAS_MAX_PD * sizeof(struct MR_PD_LIST);
3298 if (!mrsas_issue_polled(sc, cmd))
3303 /* Get the instance PD list */
3304 pd_count = MRSAS_MAX_PD;
3305 pd_addr = pd_list_mem->addr;
3306 if (retcode == 0 && pd_list_mem->count < pd_count) {
3307 memset(sc->local_pd_list, 0, MRSAS_MAX_PD * sizeof(struct mrsas_pd_list));
3308 for (pd_index = 0; pd_index < pd_list_mem->count; pd_index++) {
3309 sc->local_pd_list[pd_addr->deviceId].tid = pd_addr->deviceId;
3310 sc->local_pd_list[pd_addr->deviceId].driveType = pd_addr->scsiDevType;
3311 sc->local_pd_list[pd_addr->deviceId].driveState = MR_PD_STATE_SYSTEM;
3316 /* Use mutext/spinlock if pd_list component size increase more than 32 bit. */
3317 memcpy(sc->pd_list, sc->local_pd_list, sizeof(sc->local_pd_list));
3318 mrsas_free_tmp_dcmd(tcmd);
3319 mrsas_release_mfi_cmd(cmd);
3320 free(tcmd, M_MRSAS);
3325 * mrsas_get_ld_list: Returns FW's LD list structure
3326 * input: Adapter soft state
3328 * Issues an internal command (DCMD) to get the FW's controller PD
3329 * list structure. This information is mainly used to find out about
3330 * supported by the FW.
3332 static int mrsas_get_ld_list(struct mrsas_softc *sc)
3334 int ld_list_size, retcode = 0, ld_index = 0, ids = 0;
3335 struct mrsas_mfi_cmd *cmd;
3336 struct mrsas_dcmd_frame *dcmd;
3337 struct MR_LD_LIST *ld_list_mem;
3338 bus_addr_t ld_list_phys_addr = 0;
3339 struct mrsas_tmp_dcmd *tcmd;
3341 cmd = mrsas_get_mfi_cmd(sc);
3343 device_printf(sc->mrsas_dev, "Cannot alloc for get LD list cmd\n");
3347 dcmd = &cmd->frame->dcmd;
3349 tcmd = malloc(sizeof(struct mrsas_tmp_dcmd), M_MRSAS, M_NOWAIT);
3350 ld_list_size = sizeof(struct MR_LD_LIST);
3351 if (mrsas_alloc_tmp_dcmd(sc, tcmd, ld_list_size) != SUCCESS) {
3352 device_printf(sc->mrsas_dev, "Cannot alloc dmamap for get LD list cmd\n");
3353 mrsas_release_mfi_cmd(cmd);
3357 ld_list_mem = tcmd->tmp_dcmd_mem;
3358 ld_list_phys_addr = tcmd->tmp_dcmd_phys_addr;
3360 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
3362 dcmd->cmd = MFI_CMD_DCMD;
3363 dcmd->cmd_status = 0xFF;
3364 dcmd->sge_count = 1;
3365 dcmd->flags = MFI_FRAME_DIR_READ;
3367 dcmd->data_xfer_len = sizeof(struct MR_LD_LIST);
3368 dcmd->opcode = MR_DCMD_LD_GET_LIST;
3369 dcmd->sgl.sge32[0].phys_addr = ld_list_phys_addr;
3370 dcmd->sgl.sge32[0].length = sizeof(struct MR_LD_LIST);
3373 if (!mrsas_issue_polled(sc, cmd))
3378 /* Get the instance LD list */
3379 if ((retcode == 0) && (ld_list_mem->ldCount <= (MAX_LOGICAL_DRIVES))){
3380 sc->CurLdCount = ld_list_mem->ldCount;
3381 memset(sc->ld_ids, 0xff, MRSAS_MAX_LD);
3382 for (ld_index = 0; ld_index < ld_list_mem->ldCount; ld_index++) {
3383 if (ld_list_mem->ldList[ld_index].state != 0) {
3384 ids = ld_list_mem->ldList[ld_index].ref.ld_context.targetId;
3385 sc->ld_ids[ids] = ld_list_mem->ldList[ld_index].ref.ld_context.targetId;
3390 mrsas_free_tmp_dcmd(tcmd);
3391 mrsas_release_mfi_cmd(cmd);
3392 free(tcmd, M_MRSAS);
3397 * mrsas_alloc_tmp_dcmd: Allocates memory for temporary command
3398 * input: Adapter soft state
3402 * Allocates DMAable memory for a temporary internal command. The allocated
3403 * memory is initialized to all zeros upon successful loading of the dma
3406 int mrsas_alloc_tmp_dcmd(struct mrsas_softc *sc, struct mrsas_tmp_dcmd *tcmd,
3409 if (bus_dma_tag_create( sc->mrsas_parent_tag, // parent
3410 1, 0, // algnmnt, boundary
3411 BUS_SPACE_MAXADDR_32BIT,// lowaddr
3412 BUS_SPACE_MAXADDR, // highaddr
3413 NULL, NULL, // filter, filterarg
3417 BUS_DMA_ALLOCNOW, // flags
3418 NULL, NULL, // lockfunc, lockarg
3419 &tcmd->tmp_dcmd_tag)) {
3420 device_printf(sc->mrsas_dev, "Cannot allocate tmp dcmd tag\n");
3423 if (bus_dmamem_alloc(tcmd->tmp_dcmd_tag, (void **)&tcmd->tmp_dcmd_mem,
3424 BUS_DMA_NOWAIT, &tcmd->tmp_dcmd_dmamap)) {
3425 device_printf(sc->mrsas_dev, "Cannot allocate tmp dcmd mem\n");
3428 if (bus_dmamap_load(tcmd->tmp_dcmd_tag, tcmd->tmp_dcmd_dmamap,
3429 tcmd->tmp_dcmd_mem, size, mrsas_addr_cb,
3430 &tcmd->tmp_dcmd_phys_addr, BUS_DMA_NOWAIT)) {
3431 device_printf(sc->mrsas_dev, "Cannot load tmp dcmd mem\n");
3435 memset(tcmd->tmp_dcmd_mem, 0, size);
3440 * mrsas_free_tmp_dcmd: Free memory for temporary command
3441 * input: temporary dcmd pointer
3443 * Deallocates memory of the temporary command for use in the construction
3444 * of the internal DCMD.
3446 void mrsas_free_tmp_dcmd(struct mrsas_tmp_dcmd *tmp)
3448 if (tmp->tmp_dcmd_phys_addr)
3449 bus_dmamap_unload(tmp->tmp_dcmd_tag, tmp->tmp_dcmd_dmamap);
3450 if (tmp->tmp_dcmd_mem != NULL)
3451 bus_dmamem_free(tmp->tmp_dcmd_tag, tmp->tmp_dcmd_mem, tmp->tmp_dcmd_dmamap);
3452 if (tmp->tmp_dcmd_tag != NULL)
3453 bus_dma_tag_destroy(tmp->tmp_dcmd_tag);
3457 * mrsas_issue_blocked_abort_cmd: Aborts previously issued cmd
3458 * input: Adapter soft state
3459 * Previously issued cmd to be aborted
3461 * This function is used to abort previously issued commands, such as AEN and
3462 * RAID map sync map commands. The abort command is sent as a DCMD internal
3463 * command and subsequently the driver will wait for a return status. The
3464 * max wait time is MRSAS_INTERNAL_CMD_WAIT_TIME seconds.
3466 static int mrsas_issue_blocked_abort_cmd(struct mrsas_softc *sc,
3467 struct mrsas_mfi_cmd *cmd_to_abort)
3469 struct mrsas_mfi_cmd *cmd;
3470 struct mrsas_abort_frame *abort_fr;
3471 u_int8_t retcode = 0;
3472 unsigned long total_time = 0;
3473 u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME;
3475 cmd = mrsas_get_mfi_cmd(sc);
3477 device_printf(sc->mrsas_dev, "Cannot alloc for abort cmd\n");
3481 abort_fr = &cmd->frame->abort;
3483 /* Prepare and issue the abort frame */
3484 abort_fr->cmd = MFI_CMD_ABORT;
3485 abort_fr->cmd_status = 0xFF;
3486 abort_fr->flags = 0;
3487 abort_fr->abort_context = cmd_to_abort->index;
3488 abort_fr->abort_mfi_phys_addr_lo = cmd_to_abort->frame_phys_addr;
3489 abort_fr->abort_mfi_phys_addr_hi = 0;
3492 cmd->cmd_status = 0xFF;
3494 if (mrsas_issue_dcmd(sc, cmd)) {
3495 device_printf(sc->mrsas_dev, "Fail to send abort command.\n");
3499 /* Wait for this cmd to complete */
3500 sc->chan = (void*)&cmd;
3502 if (cmd->cmd_status == 0xFF){
3503 tsleep((void *)&sc->chan, 0, "mrsas_sleep", hz);
3508 if (total_time >= max_wait) {
3509 device_printf(sc->mrsas_dev, "Abort cmd timed out after %d sec.\n", max_wait);
3516 mrsas_release_mfi_cmd(cmd);
3521 * mrsas_complete_abort: Completes aborting a command
3522 * input: Adapter soft state
3523 * Cmd that was issued to abort another cmd
3525 * The mrsas_issue_blocked_abort_cmd() function waits for the command status
3526 * to change after sending the command. This function is called from
3527 * mrsas_complete_mptmfi_passthru() to wake up the sleep thread associated.
3529 void mrsas_complete_abort(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
3531 if (cmd->sync_cmd) {
3533 cmd->cmd_status = 0;
3534 sc->chan = (void*)&cmd;
3535 wakeup_one((void *)&sc->chan);
3541 * mrsas_aen_handler: Callback function for AEN processing from thread context.
3542 * input: Adapter soft state
3545 void mrsas_aen_handler(struct mrsas_softc *sc)
3547 union mrsas_evt_class_locale class_locale;
3553 device_printf(sc->mrsas_dev, "invalid instance!\n");
3557 if (sc->evt_detail_mem) {
3558 switch (sc->evt_detail_mem->code) {
3559 case MR_EVT_PD_INSERTED:
3560 mrsas_get_pd_list(sc);
3561 mrsas_bus_scan_sim(sc, sc->sim_1);
3564 case MR_EVT_PD_REMOVED:
3565 mrsas_get_pd_list(sc);
3566 mrsas_bus_scan_sim(sc, sc->sim_1);
3569 case MR_EVT_LD_OFFLINE:
3570 case MR_EVT_CFG_CLEARED:
3571 case MR_EVT_LD_DELETED:
3572 mrsas_bus_scan_sim(sc, sc->sim_0);
3575 case MR_EVT_LD_CREATED:
3576 mrsas_get_ld_list(sc);
3577 mrsas_bus_scan_sim(sc, sc->sim_0);
3580 case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED:
3581 case MR_EVT_FOREIGN_CFG_IMPORTED:
3582 case MR_EVT_LD_STATE_CHANGE:
3590 device_printf(sc->mrsas_dev, "invalid evt_detail\n");
3594 mrsas_get_pd_list(sc);
3595 mrsas_dprint(sc, MRSAS_AEN, "scanning ...sim 1\n");
3596 mrsas_bus_scan_sim(sc, sc->sim_1);
3597 mrsas_get_ld_list(sc);
3598 mrsas_dprint(sc, MRSAS_AEN, "scanning ...sim 0\n");
3599 mrsas_bus_scan_sim(sc, sc->sim_0);
3602 seq_num = sc->evt_detail_mem->seq_num + 1;
3604 // Register AEN with FW for latest sequence number plus 1
3605 class_locale.members.reserved = 0;
3606 class_locale.members.locale = MR_EVT_LOCALE_ALL;
3607 class_locale.members.class = MR_EVT_CLASS_DEBUG;
3609 if (sc->aen_cmd != NULL )
3612 mtx_lock(&sc->aen_lock);
3613 error = mrsas_register_aen(sc, seq_num,
3615 mtx_unlock(&sc->aen_lock);
3618 device_printf(sc->mrsas_dev, "register aen failed error %x\n", error);
3624 * mrsas_complete_aen: Completes AEN command
3625 * input: Adapter soft state
3626 * Cmd that was issued to abort another cmd
3628 * This function will be called from ISR and will continue
3629 * event processing from thread context by enqueuing task
3630 * in ev_tq (callback function "mrsas_aen_handler").
3632 void mrsas_complete_aen(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
3635 * Don't signal app if it is just an aborted previously registered aen
3637 if ((!cmd->abort_aen) && (sc->remove_in_progress == 0)) {
3644 mrsas_release_mfi_cmd(cmd);
3646 if (!sc->remove_in_progress)
3647 taskqueue_enqueue(sc->ev_tq, &sc->ev_task);
3652 static device_method_t mrsas_methods[] = {
3653 DEVMETHOD(device_probe, mrsas_probe),
3654 DEVMETHOD(device_attach, mrsas_attach),
3655 DEVMETHOD(device_detach, mrsas_detach),
3656 DEVMETHOD(device_suspend, mrsas_suspend),
3657 DEVMETHOD(device_resume, mrsas_resume),
3658 DEVMETHOD(bus_print_child, bus_generic_print_child),
3659 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
3663 static driver_t mrsas_driver = {
3666 sizeof(struct mrsas_softc)
3669 static devclass_t mrsas_devclass;
3670 DRIVER_MODULE(mrsas, pci, mrsas_driver, mrsas_devclass, 0, 0);
3671 MODULE_DEPEND(mrsas, cam, 1,1,1);