2 * Copyright (c) 2015, AVAGO Tech. All rights reserved. Author: Marian Choy
3 * Copyright (c) 2014, LSI Corp. All rights reserved. Author: Marian Choy
4 * Support: freebsdraid@avagotech.com
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
10 * 1. Redistributions of source code must retain the above copyright notice,
11 * this list of conditions and the following disclaimer. 2. Redistributions
12 * in binary form must reproduce the above copyright notice, this list of
13 * conditions and the following disclaimer in the documentation and/or other
14 * materials provided with the distribution. 3. Neither the name of the
15 * <ORGANIZATION> nor the names of its contributors may be used to endorse or
16 * promote products derived from this software without specific prior written
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
31 * The views and conclusions contained in the software and documentation are
32 * those of the authors and should not be interpreted as representing
33 * official policies,either expressed or implied, of the FreeBSD Project.
35 * Send feedback to: <megaraidfbsd@avagotech.com> Mail to: AVAGO TECHNOLOGIES 1621
36 * Barber Lane, Milpitas, CA 95035 ATTN: MegaRaid FreeBSD
40 #include <sys/cdefs.h>
41 __FBSDID("$FreeBSD$");
43 #include <dev/mrsas/mrsas.h>
44 #include <dev/mrsas/mrsas_ioctl.h>
47 #include <cam/cam_ccb.h>
49 #include <sys/sysctl.h>
50 #include <sys/types.h>
51 #include <sys/sysent.h>
52 #include <sys/kthread.h>
53 #include <sys/taskqueue.h>
60 static d_open_t mrsas_open;
61 static d_close_t mrsas_close;
62 static d_read_t mrsas_read;
63 static d_write_t mrsas_write;
64 static d_ioctl_t mrsas_ioctl;
65 static d_poll_t mrsas_poll;
67 static void mrsas_ich_startup(void *arg);
68 static struct mrsas_mgmt_info mrsas_mgmt_info;
69 static struct mrsas_ident *mrsas_find_ident(device_t);
70 static int mrsas_setup_msix(struct mrsas_softc *sc);
71 static int mrsas_allocate_msix(struct mrsas_softc *sc);
72 static void mrsas_shutdown_ctlr(struct mrsas_softc *sc, u_int32_t opcode);
73 static void mrsas_flush_cache(struct mrsas_softc *sc);
74 static void mrsas_reset_reply_desc(struct mrsas_softc *sc);
75 static void mrsas_ocr_thread(void *arg);
76 static int mrsas_get_map_info(struct mrsas_softc *sc);
77 static int mrsas_get_ld_map_info(struct mrsas_softc *sc);
78 static int mrsas_sync_map_info(struct mrsas_softc *sc);
79 static int mrsas_get_pd_list(struct mrsas_softc *sc);
80 static int mrsas_get_ld_list(struct mrsas_softc *sc);
81 static int mrsas_setup_irq(struct mrsas_softc *sc);
82 static int mrsas_alloc_mem(struct mrsas_softc *sc);
83 static int mrsas_init_fw(struct mrsas_softc *sc);
84 static int mrsas_setup_raidmap(struct mrsas_softc *sc);
85 static void megasas_setup_jbod_map(struct mrsas_softc *sc);
86 static int megasas_sync_pd_seq_num(struct mrsas_softc *sc, boolean_t pend);
87 static int mrsas_clear_intr(struct mrsas_softc *sc);
88 static int mrsas_get_ctrl_info(struct mrsas_softc *sc);
89 static void mrsas_update_ext_vd_details(struct mrsas_softc *sc);
91 mrsas_issue_blocked_abort_cmd(struct mrsas_softc *sc,
92 struct mrsas_mfi_cmd *cmd_to_abort);
94 mrsas_get_pd_info(struct mrsas_softc *sc, u_int16_t device_id);
95 static struct mrsas_softc *
96 mrsas_get_softc_instance(struct cdev *dev,
97 u_long cmd, caddr_t arg);
99 mrsas_read_reg_with_retries(struct mrsas_softc *sc, int offset);
100 u_int32_t mrsas_read_reg(struct mrsas_softc *sc, int offset);
102 mrsas_build_mptmfi_passthru(struct mrsas_softc *sc,
103 struct mrsas_mfi_cmd *mfi_cmd);
104 void mrsas_complete_outstanding_ioctls(struct mrsas_softc *sc);
105 int mrsas_transition_to_ready(struct mrsas_softc *sc, int ocr);
106 int mrsas_init_adapter(struct mrsas_softc *sc);
107 int mrsas_alloc_mpt_cmds(struct mrsas_softc *sc);
108 int mrsas_alloc_ioc_cmd(struct mrsas_softc *sc);
109 int mrsas_alloc_ctlr_info_cmd(struct mrsas_softc *sc);
110 int mrsas_ioc_init(struct mrsas_softc *sc);
111 int mrsas_bus_scan(struct mrsas_softc *sc);
112 int mrsas_issue_dcmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
113 int mrsas_issue_polled(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
114 int mrsas_reset_ctrl(struct mrsas_softc *sc, u_int8_t reset_reason);
115 int mrsas_wait_for_outstanding(struct mrsas_softc *sc, u_int8_t check_reason);
116 int mrsas_complete_cmd(struct mrsas_softc *sc, u_int32_t MSIxIndex);
117 int mrsas_reset_targets(struct mrsas_softc *sc);
119 mrsas_issue_blocked_cmd(struct mrsas_softc *sc,
120 struct mrsas_mfi_cmd *cmd);
122 mrsas_alloc_tmp_dcmd(struct mrsas_softc *sc, struct mrsas_tmp_dcmd *tcmd,
124 void mrsas_release_mfi_cmd(struct mrsas_mfi_cmd *cmd);
125 void mrsas_wakeup(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
126 void mrsas_complete_aen(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
127 void mrsas_complete_abort(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
128 void mrsas_disable_intr(struct mrsas_softc *sc);
129 void mrsas_enable_intr(struct mrsas_softc *sc);
130 void mrsas_free_ioc_cmd(struct mrsas_softc *sc);
131 void mrsas_free_mem(struct mrsas_softc *sc);
132 void mrsas_free_tmp_dcmd(struct mrsas_tmp_dcmd *tmp);
133 void mrsas_isr(void *arg);
134 void mrsas_teardown_intr(struct mrsas_softc *sc);
135 void mrsas_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error);
136 void mrsas_kill_hba(struct mrsas_softc *sc);
137 void mrsas_aen_handler(struct mrsas_softc *sc);
139 mrsas_write_reg(struct mrsas_softc *sc, int offset,
142 mrsas_fire_cmd(struct mrsas_softc *sc, u_int32_t req_desc_lo,
143 u_int32_t req_desc_hi);
144 void mrsas_free_ctlr_info_cmd(struct mrsas_softc *sc);
146 mrsas_complete_mptmfi_passthru(struct mrsas_softc *sc,
147 struct mrsas_mfi_cmd *cmd, u_int8_t status);
148 struct mrsas_mfi_cmd *mrsas_get_mfi_cmd(struct mrsas_softc *sc);
150 MRSAS_REQUEST_DESCRIPTOR_UNION *mrsas_build_mpt_cmd
151 (struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
153 extern int mrsas_cam_attach(struct mrsas_softc *sc);
154 extern void mrsas_cam_detach(struct mrsas_softc *sc);
155 extern void mrsas_cmd_done(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd);
156 extern void mrsas_free_frame(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
157 extern int mrsas_alloc_mfi_cmds(struct mrsas_softc *sc);
158 extern struct mrsas_mpt_cmd *mrsas_get_mpt_cmd(struct mrsas_softc *sc);
159 extern int mrsas_passthru(struct mrsas_softc *sc, void *arg, u_long ioctlCmd);
160 extern uint8_t MR_ValidateMapInfo(struct mrsas_softc *sc);
161 extern u_int16_t MR_GetLDTgtId(u_int32_t ld, MR_DRV_RAID_MAP_ALL * map);
162 extern MR_LD_RAID *MR_LdRaidGet(u_int32_t ld, MR_DRV_RAID_MAP_ALL * map);
163 extern void mrsas_xpt_freeze(struct mrsas_softc *sc);
164 extern void mrsas_xpt_release(struct mrsas_softc *sc);
165 extern MRSAS_REQUEST_DESCRIPTOR_UNION *
166 mrsas_get_request_desc(struct mrsas_softc *sc,
168 extern int mrsas_bus_scan_sim(struct mrsas_softc *sc, struct cam_sim *sim);
169 static int mrsas_alloc_evt_log_info_cmd(struct mrsas_softc *sc);
170 static void mrsas_free_evt_log_info_cmd(struct mrsas_softc *sc);
171 void mrsas_release_mpt_cmd(struct mrsas_mpt_cmd *cmd);
173 void mrsas_map_mpt_cmd_status(struct mrsas_mpt_cmd *cmd,
174 union ccb *ccb_ptr, u_int8_t status, u_int8_t extStatus,
175 u_int32_t data_length, u_int8_t *sense);
177 mrsas_write_64bit_req_desc(struct mrsas_softc *sc, u_int32_t req_desc_lo,
178 u_int32_t req_desc_hi);
181 SYSCTL_NODE(_hw, OID_AUTO, mrsas, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
182 "MRSAS Driver Parameters");
185 * PCI device struct and table
188 typedef struct mrsas_ident {
196 MRSAS_CTLR_ID device_table[] = {
197 {0x1000, MRSAS_TBOLT, 0xffff, 0xffff, "AVAGO Thunderbolt SAS Controller"},
198 {0x1000, MRSAS_INVADER, 0xffff, 0xffff, "AVAGO Invader SAS Controller"},
199 {0x1000, MRSAS_FURY, 0xffff, 0xffff, "AVAGO Fury SAS Controller"},
200 {0x1000, MRSAS_INTRUDER, 0xffff, 0xffff, "AVAGO Intruder SAS Controller"},
201 {0x1000, MRSAS_INTRUDER_24, 0xffff, 0xffff, "AVAGO Intruder_24 SAS Controller"},
202 {0x1000, MRSAS_CUTLASS_52, 0xffff, 0xffff, "AVAGO Cutlass_52 SAS Controller"},
203 {0x1000, MRSAS_CUTLASS_53, 0xffff, 0xffff, "AVAGO Cutlass_53 SAS Controller"},
204 {0x1000, MRSAS_VENTURA, 0xffff, 0xffff, "AVAGO Ventura SAS Controller"},
205 {0x1000, MRSAS_CRUSADER, 0xffff, 0xffff, "AVAGO Crusader SAS Controller"},
206 {0x1000, MRSAS_HARPOON, 0xffff, 0xffff, "AVAGO Harpoon SAS Controller"},
207 {0x1000, MRSAS_TOMCAT, 0xffff, 0xffff, "AVAGO Tomcat SAS Controller"},
208 {0x1000, MRSAS_VENTURA_4PORT, 0xffff, 0xffff, "AVAGO Ventura_4Port SAS Controller"},
209 {0x1000, MRSAS_CRUSADER_4PORT, 0xffff, 0xffff, "AVAGO Crusader_4Port SAS Controller"},
210 {0x1000, MRSAS_AERO_10E0, 0xffff, 0xffff, "BROADCOM AERO-10E0 SAS Controller"},
211 {0x1000, MRSAS_AERO_10E1, 0xffff, 0xffff, "BROADCOM AERO-10E1 SAS Controller"},
212 {0x1000, MRSAS_AERO_10E2, 0xffff, 0xffff, "BROADCOM AERO-10E2 SAS Controller"},
213 {0x1000, MRSAS_AERO_10E3, 0xffff, 0xffff, "BROADCOM AERO-10E3 SAS Controller"},
214 {0x1000, MRSAS_AERO_10E4, 0xffff, 0xffff, "BROADCOM AERO-10E4 SAS Controller"},
215 {0x1000, MRSAS_AERO_10E5, 0xffff, 0xffff, "BROADCOM AERO-10E5 SAS Controller"},
216 {0x1000, MRSAS_AERO_10E6, 0xffff, 0xffff, "BROADCOM AERO-10E6 SAS Controller"},
217 {0x1000, MRSAS_AERO_10E7, 0xffff, 0xffff, "BROADCOM AERO-10E7 SAS Controller"},
222 * Character device entry points
225 static struct cdevsw mrsas_cdevsw = {
226 .d_version = D_VERSION,
227 .d_open = mrsas_open,
228 .d_close = mrsas_close,
229 .d_read = mrsas_read,
230 .d_write = mrsas_write,
231 .d_ioctl = mrsas_ioctl,
232 .d_poll = mrsas_poll,
236 MALLOC_DEFINE(M_MRSAS, "mrsasbuf", "Buffers for the MRSAS driver");
239 * In the cdevsw routines, we find our softc by using the si_drv1 member of
240 * struct cdev. We set this variable to point to our softc in our attach
241 * routine when we create the /dev entry.
244 mrsas_open(struct cdev *dev, int oflags, int devtype, struct thread *td)
246 struct mrsas_softc *sc;
253 mrsas_close(struct cdev *dev, int fflag, int devtype, struct thread *td)
255 struct mrsas_softc *sc;
262 mrsas_read(struct cdev *dev, struct uio *uio, int ioflag)
264 struct mrsas_softc *sc;
270 mrsas_write(struct cdev *dev, struct uio *uio, int ioflag)
272 struct mrsas_softc *sc;
279 mrsas_read_reg_with_retries(struct mrsas_softc *sc, int offset)
281 u_int32_t i = 0, ret_val;
285 ret_val = mrsas_read_reg(sc, offset);
287 } while(ret_val == 0 && i < 3);
289 ret_val = mrsas_read_reg(sc, offset);
295 * Register Read/Write Functions
299 mrsas_write_reg(struct mrsas_softc *sc, int offset,
302 bus_space_tag_t bus_tag = sc->bus_tag;
303 bus_space_handle_t bus_handle = sc->bus_handle;
305 bus_space_write_4(bus_tag, bus_handle, offset, value);
309 mrsas_read_reg(struct mrsas_softc *sc, int offset)
311 bus_space_tag_t bus_tag = sc->bus_tag;
312 bus_space_handle_t bus_handle = sc->bus_handle;
314 return ((u_int32_t)bus_space_read_4(bus_tag, bus_handle, offset));
319 * Interrupt Disable/Enable/Clear Functions
323 mrsas_disable_intr(struct mrsas_softc *sc)
325 u_int32_t mask = 0xFFFFFFFF;
328 sc->mask_interrupts = 1;
329 mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask), mask);
330 /* Dummy read to force pci flush */
331 status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask));
335 mrsas_enable_intr(struct mrsas_softc *sc)
337 u_int32_t mask = MFI_FUSION_ENABLE_INTERRUPT_MASK;
340 sc->mask_interrupts = 0;
341 mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status), ~0);
342 status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status));
344 mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask), ~mask);
345 status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask));
349 mrsas_clear_intr(struct mrsas_softc *sc)
353 /* Read received interrupt */
354 status = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, outbound_intr_status));
356 /* Not our interrupt, so just return */
357 if (!(status & MFI_FUSION_ENABLE_INTERRUPT_MASK))
360 /* We got a reply interrupt */
365 * PCI Support Functions
368 static struct mrsas_ident *
369 mrsas_find_ident(device_t dev)
371 struct mrsas_ident *pci_device;
373 for (pci_device = device_table; pci_device->vendor != 0; pci_device++) {
374 if ((pci_device->vendor == pci_get_vendor(dev)) &&
375 (pci_device->device == pci_get_device(dev)) &&
376 ((pci_device->subvendor == pci_get_subvendor(dev)) ||
377 (pci_device->subvendor == 0xffff)) &&
378 ((pci_device->subdevice == pci_get_subdevice(dev)) ||
379 (pci_device->subdevice == 0xffff)))
386 mrsas_probe(device_t dev)
388 static u_int8_t first_ctrl = 1;
389 struct mrsas_ident *id;
391 if ((id = mrsas_find_ident(dev)) != NULL) {
393 printf("AVAGO MegaRAID SAS FreeBSD mrsas driver version: %s\n",
397 device_set_desc(dev, id->desc);
398 /* between BUS_PROBE_DEFAULT and BUS_PROBE_LOW_PRIORITY */
405 * mrsas_setup_sysctl: setup sysctl values for mrsas
406 * input: Adapter instance soft state
408 * Setup sysctl entries for mrsas driver.
411 mrsas_setup_sysctl(struct mrsas_softc *sc)
413 struct sysctl_ctx_list *sysctl_ctx = NULL;
414 struct sysctl_oid *sysctl_tree = NULL;
415 char tmpstr[80], tmpstr2[80];
418 * Setup the sysctl variable so the user can change the debug level
421 snprintf(tmpstr, sizeof(tmpstr), "MRSAS controller %d",
422 device_get_unit(sc->mrsas_dev));
423 snprintf(tmpstr2, sizeof(tmpstr2), "%d", device_get_unit(sc->mrsas_dev));
425 sysctl_ctx = device_get_sysctl_ctx(sc->mrsas_dev);
426 if (sysctl_ctx != NULL)
427 sysctl_tree = device_get_sysctl_tree(sc->mrsas_dev);
429 if (sysctl_tree == NULL) {
430 sysctl_ctx_init(&sc->sysctl_ctx);
431 sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx,
432 SYSCTL_STATIC_CHILDREN(_hw_mrsas), OID_AUTO, tmpstr2,
433 CTLFLAG_RD | CTLFLAG_MPSAFE, 0, tmpstr);
434 if (sc->sysctl_tree == NULL)
436 sysctl_ctx = &sc->sysctl_ctx;
437 sysctl_tree = sc->sysctl_tree;
439 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
440 OID_AUTO, "disable_ocr", CTLFLAG_RW, &sc->disableOnlineCtrlReset, 0,
441 "Disable the use of OCR");
443 SYSCTL_ADD_STRING(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
444 OID_AUTO, "driver_version", CTLFLAG_RD, MRSAS_VERSION,
445 strlen(MRSAS_VERSION), "driver version");
447 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
448 OID_AUTO, "reset_count", CTLFLAG_RD,
449 &sc->reset_count, 0, "number of ocr from start of the day");
451 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
452 OID_AUTO, "fw_outstanding", CTLFLAG_RD,
453 &sc->fw_outstanding.val_rdonly, 0, "FW outstanding commands");
455 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
456 OID_AUTO, "io_cmds_highwater", CTLFLAG_RD,
457 &sc->io_cmds_highwater, 0, "Max FW outstanding commands");
459 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
460 OID_AUTO, "mrsas_debug", CTLFLAG_RW, &sc->mrsas_debug, 0,
461 "Driver debug level");
463 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
464 OID_AUTO, "mrsas_io_timeout", CTLFLAG_RW, &sc->mrsas_io_timeout,
465 0, "Driver IO timeout value in mili-second.");
467 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
468 OID_AUTO, "mrsas_fw_fault_check_delay", CTLFLAG_RW,
469 &sc->mrsas_fw_fault_check_delay,
470 0, "FW fault check thread delay in seconds. <default is 1 sec>");
472 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
473 OID_AUTO, "reset_in_progress", CTLFLAG_RD,
474 &sc->reset_in_progress, 0, "ocr in progress status");
476 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
477 OID_AUTO, "block_sync_cache", CTLFLAG_RW,
478 &sc->block_sync_cache, 0,
479 "Block SYNC CACHE at driver. <default: 0, send it to FW>");
480 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
481 OID_AUTO, "stream detection", CTLFLAG_RW,
482 &sc->drv_stream_detection, 0,
483 "Disable/Enable Stream detection. <default: 1, Enable Stream Detection>");
484 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
485 OID_AUTO, "prp_count", CTLFLAG_RD,
486 &sc->prp_count.val_rdonly, 0, "Number of IOs for which PRPs are built");
487 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
488 OID_AUTO, "SGE holes", CTLFLAG_RD,
489 &sc->sge_holes.val_rdonly, 0, "Number of IOs with holes in SGEs");
493 * mrsas_get_tunables: get tunable parameters.
494 * input: Adapter instance soft state
496 * Get tunable parameters. This will help to debug driver at boot time.
499 mrsas_get_tunables(struct mrsas_softc *sc)
503 /* XXX default to some debugging for now */
505 (MRSAS_FAULT | MRSAS_OCR | MRSAS_INFO | MRSAS_TRACE | MRSAS_AEN);
506 sc->mrsas_io_timeout = MRSAS_IO_TIMEOUT;
507 sc->mrsas_fw_fault_check_delay = 1;
509 sc->reset_in_progress = 0;
510 sc->block_sync_cache = 0;
511 sc->drv_stream_detection = 1;
514 * Grab the global variables.
516 TUNABLE_INT_FETCH("hw.mrsas.debug_level", &sc->mrsas_debug);
519 * Grab the global variables.
521 TUNABLE_INT_FETCH("hw.mrsas.lb_pending_cmds", &sc->lb_pending_cmds);
523 /* Grab the unit-instance variables */
524 snprintf(tmpstr, sizeof(tmpstr), "dev.mrsas.%d.debug_level",
525 device_get_unit(sc->mrsas_dev));
526 TUNABLE_INT_FETCH(tmpstr, &sc->mrsas_debug);
530 * mrsas_alloc_evt_log_info cmd: Allocates memory to get event log information.
531 * Used to get sequence number at driver load time.
532 * input: Adapter soft state
534 * Allocates DMAable memory for the event log info internal command.
537 mrsas_alloc_evt_log_info_cmd(struct mrsas_softc *sc)
541 /* Allocate get event log info command */
542 el_info_size = sizeof(struct mrsas_evt_log_info);
543 if (bus_dma_tag_create(sc->mrsas_parent_tag,
545 BUS_SPACE_MAXADDR_32BIT,
554 device_printf(sc->mrsas_dev, "Cannot allocate event log info tag\n");
557 if (bus_dmamem_alloc(sc->el_info_tag, (void **)&sc->el_info_mem,
558 BUS_DMA_NOWAIT, &sc->el_info_dmamap)) {
559 device_printf(sc->mrsas_dev, "Cannot allocate event log info cmd mem\n");
562 if (bus_dmamap_load(sc->el_info_tag, sc->el_info_dmamap,
563 sc->el_info_mem, el_info_size, mrsas_addr_cb,
564 &sc->el_info_phys_addr, BUS_DMA_NOWAIT)) {
565 device_printf(sc->mrsas_dev, "Cannot load event log info cmd mem\n");
568 memset(sc->el_info_mem, 0, el_info_size);
573 * mrsas_free_evt_info_cmd: Free memory for Event log info command
574 * input: Adapter soft state
576 * Deallocates memory for the event log info internal command.
579 mrsas_free_evt_log_info_cmd(struct mrsas_softc *sc)
581 if (sc->el_info_phys_addr)
582 bus_dmamap_unload(sc->el_info_tag, sc->el_info_dmamap);
583 if (sc->el_info_mem != NULL)
584 bus_dmamem_free(sc->el_info_tag, sc->el_info_mem, sc->el_info_dmamap);
585 if (sc->el_info_tag != NULL)
586 bus_dma_tag_destroy(sc->el_info_tag);
590 * mrsas_get_seq_num: Get latest event sequence number
591 * @sc: Adapter soft state
592 * @eli: Firmware event log sequence number information.
594 * Firmware maintains a log of all events in a non-volatile area.
595 * Driver get the sequence number using DCMD
596 * "MR_DCMD_CTRL_EVENT_GET_INFO" at driver load time.
600 mrsas_get_seq_num(struct mrsas_softc *sc,
601 struct mrsas_evt_log_info *eli)
603 struct mrsas_mfi_cmd *cmd;
604 struct mrsas_dcmd_frame *dcmd;
605 u_int8_t do_ocr = 1, retcode = 0;
607 cmd = mrsas_get_mfi_cmd(sc);
610 device_printf(sc->mrsas_dev, "Failed to get a free cmd\n");
613 dcmd = &cmd->frame->dcmd;
615 if (mrsas_alloc_evt_log_info_cmd(sc) != SUCCESS) {
616 device_printf(sc->mrsas_dev, "Cannot allocate evt log info cmd\n");
617 mrsas_release_mfi_cmd(cmd);
620 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
622 dcmd->cmd = MFI_CMD_DCMD;
623 dcmd->cmd_status = 0x0;
625 dcmd->flags = MFI_FRAME_DIR_READ;
628 dcmd->data_xfer_len = sizeof(struct mrsas_evt_log_info);
629 dcmd->opcode = MR_DCMD_CTRL_EVENT_GET_INFO;
630 dcmd->sgl.sge32[0].phys_addr = sc->el_info_phys_addr;
631 dcmd->sgl.sge32[0].length = sizeof(struct mrsas_evt_log_info);
633 retcode = mrsas_issue_blocked_cmd(sc, cmd);
634 if (retcode == ETIMEDOUT)
639 * Copy the data back into callers buffer
641 memcpy(eli, sc->el_info_mem, sizeof(struct mrsas_evt_log_info));
642 mrsas_free_evt_log_info_cmd(sc);
646 sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
648 mrsas_release_mfi_cmd(cmd);
655 * mrsas_register_aen: Register for asynchronous event notification
656 * @sc: Adapter soft state
657 * @seq_num: Starting sequence number
658 * @class_locale: Class of the event
660 * This function subscribes for events beyond the @seq_num
661 * and type @class_locale.
665 mrsas_register_aen(struct mrsas_softc *sc, u_int32_t seq_num,
666 u_int32_t class_locale_word)
669 struct mrsas_mfi_cmd *cmd;
670 struct mrsas_dcmd_frame *dcmd;
671 union mrsas_evt_class_locale curr_aen;
672 union mrsas_evt_class_locale prev_aen;
675 * If there an AEN pending already (aen_cmd), check if the
676 * class_locale of that pending AEN is inclusive of the new AEN
677 * request we currently have. If it is, then we don't have to do
678 * anything. In other words, whichever events the current AEN request
679 * is subscribing to, have already been subscribed to. If the old_cmd
680 * is _not_ inclusive, then we have to abort that command, form a
681 * class_locale that is superset of both old and current and re-issue
685 curr_aen.word = class_locale_word;
689 prev_aen.word = sc->aen_cmd->frame->dcmd.mbox.w[1];
692 * A class whose enum value is smaller is inclusive of all
693 * higher values. If a PROGRESS (= -1) was previously
694 * registered, then a new registration requests for higher
695 * classes need not be sent to FW. They are automatically
696 * included. Locale numbers don't have such hierarchy. They
699 if ((prev_aen.members.class <= curr_aen.members.class) &&
700 !((prev_aen.members.locale & curr_aen.members.locale) ^
701 curr_aen.members.locale)) {
703 * Previously issued event registration includes
704 * current request. Nothing to do.
708 curr_aen.members.locale |= prev_aen.members.locale;
710 if (prev_aen.members.class < curr_aen.members.class)
711 curr_aen.members.class = prev_aen.members.class;
713 sc->aen_cmd->abort_aen = 1;
714 ret_val = mrsas_issue_blocked_abort_cmd(sc,
718 printf("mrsas: Failed to abort previous AEN command\n");
724 cmd = mrsas_get_mfi_cmd(sc);
728 dcmd = &cmd->frame->dcmd;
730 memset(sc->evt_detail_mem, 0, sizeof(struct mrsas_evt_detail));
733 * Prepare DCMD for aen registration
735 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
737 dcmd->cmd = MFI_CMD_DCMD;
738 dcmd->cmd_status = 0x0;
740 dcmd->flags = MFI_FRAME_DIR_READ;
743 dcmd->data_xfer_len = sizeof(struct mrsas_evt_detail);
744 dcmd->opcode = MR_DCMD_CTRL_EVENT_WAIT;
745 dcmd->mbox.w[0] = seq_num;
746 sc->last_seq_num = seq_num;
747 dcmd->mbox.w[1] = curr_aen.word;
748 dcmd->sgl.sge32[0].phys_addr = (u_int32_t)sc->evt_detail_phys_addr;
749 dcmd->sgl.sge32[0].length = sizeof(struct mrsas_evt_detail);
751 if (sc->aen_cmd != NULL) {
752 mrsas_release_mfi_cmd(cmd);
756 * Store reference to the cmd used to register for AEN. When an
757 * application wants us to register for AEN, we have to abort this
758 * cmd and re-register with a new EVENT LOCALE supplied by that app
763 * Issue the aen registration frame
765 if (mrsas_issue_dcmd(sc, cmd)) {
766 device_printf(sc->mrsas_dev, "Cannot issue AEN DCMD command.\n");
773 * mrsas_start_aen: Subscribes to AEN during driver load time
774 * @instance: Adapter soft state
777 mrsas_start_aen(struct mrsas_softc *sc)
779 struct mrsas_evt_log_info eli;
780 union mrsas_evt_class_locale class_locale;
783 /* Get the latest sequence number from FW */
785 memset(&eli, 0, sizeof(eli));
787 if (mrsas_get_seq_num(sc, &eli))
790 /* Register AEN with FW for latest sequence number plus 1 */
791 class_locale.members.reserved = 0;
792 class_locale.members.locale = MR_EVT_LOCALE_ALL;
793 class_locale.members.class = MR_EVT_CLASS_DEBUG;
795 return mrsas_register_aen(sc, eli.newest_seq_num + 1,
801 * mrsas_setup_msix: Allocate MSI-x vectors
802 * @sc: adapter soft state
805 mrsas_setup_msix(struct mrsas_softc *sc)
809 for (i = 0; i < sc->msix_vectors; i++) {
810 sc->irq_context[i].sc = sc;
811 sc->irq_context[i].MSIxIndex = i;
812 sc->irq_id[i] = i + 1;
813 sc->mrsas_irq[i] = bus_alloc_resource_any
814 (sc->mrsas_dev, SYS_RES_IRQ, &sc->irq_id[i]
816 if (sc->mrsas_irq[i] == NULL) {
817 device_printf(sc->mrsas_dev, "Can't allocate MSI-x\n");
818 goto irq_alloc_failed;
820 if (bus_setup_intr(sc->mrsas_dev,
822 INTR_MPSAFE | INTR_TYPE_CAM,
823 NULL, mrsas_isr, &sc->irq_context[i],
824 &sc->intr_handle[i])) {
825 device_printf(sc->mrsas_dev,
826 "Cannot set up MSI-x interrupt handler\n");
827 goto irq_alloc_failed;
833 mrsas_teardown_intr(sc);
838 * mrsas_allocate_msix: Setup MSI-x vectors
839 * @sc: adapter soft state
842 mrsas_allocate_msix(struct mrsas_softc *sc)
844 if (pci_alloc_msix(sc->mrsas_dev, &sc->msix_vectors) == 0) {
845 device_printf(sc->mrsas_dev, "Using MSI-X with %d number"
846 " of vectors\n", sc->msix_vectors);
848 device_printf(sc->mrsas_dev, "MSI-x setup failed\n");
849 goto irq_alloc_failed;
854 mrsas_teardown_intr(sc);
859 * mrsas_attach: PCI entry point
860 * input: pointer to device struct
862 * Performs setup of PCI and registers, initializes mutexes and linked lists,
863 * registers interrupts and CAM, and initializes the adapter/controller to
867 mrsas_attach(device_t dev)
869 struct mrsas_softc *sc = device_get_softc(dev);
872 memset(sc, 0, sizeof(struct mrsas_softc));
874 /* Look up our softc and initialize its fields. */
876 sc->device_id = pci_get_device(dev);
878 switch (sc->device_id) {
882 case MRSAS_INTRUDER_24:
883 case MRSAS_CUTLASS_52:
884 case MRSAS_CUTLASS_53:
885 sc->mrsas_gen3_ctrl = 1;
891 case MRSAS_VENTURA_4PORT:
892 case MRSAS_CRUSADER_4PORT:
893 sc->is_ventura = true;
895 case MRSAS_AERO_10E1:
896 case MRSAS_AERO_10E5:
897 device_printf(dev, "Adapter is in configurable secure mode\n");
898 case MRSAS_AERO_10E2:
899 case MRSAS_AERO_10E6:
902 case MRSAS_AERO_10E0:
903 case MRSAS_AERO_10E3:
904 case MRSAS_AERO_10E4:
905 case MRSAS_AERO_10E7:
906 device_printf(dev, "Adapter is in non-secure mode\n");
911 mrsas_get_tunables(sc);
914 * Set up PCI and registers
916 cmd = pci_read_config(dev, PCIR_COMMAND, 2);
917 if ((cmd & PCIM_CMD_PORTEN) == 0) {
920 /* Force the busmaster enable bit on. */
921 cmd |= PCIM_CMD_BUSMASTEREN;
922 pci_write_config(dev, PCIR_COMMAND, cmd, 2);
924 /* For Ventura/Aero system registers are mapped to BAR0 */
925 if (sc->is_ventura || sc->is_aero)
926 sc->reg_res_id = PCIR_BAR(0); /* BAR0 offset */
928 sc->reg_res_id = PCIR_BAR(1); /* BAR1 offset */
930 if ((sc->reg_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
931 &(sc->reg_res_id), RF_ACTIVE))
933 device_printf(dev, "Cannot allocate PCI registers\n");
936 sc->bus_tag = rman_get_bustag(sc->reg_res);
937 sc->bus_handle = rman_get_bushandle(sc->reg_res);
939 /* Intialize mutexes */
940 mtx_init(&sc->sim_lock, "mrsas_sim_lock", NULL, MTX_DEF);
941 mtx_init(&sc->pci_lock, "mrsas_pci_lock", NULL, MTX_DEF);
942 mtx_init(&sc->io_lock, "mrsas_io_lock", NULL, MTX_DEF);
943 mtx_init(&sc->aen_lock, "mrsas_aen_lock", NULL, MTX_DEF);
944 mtx_init(&sc->ioctl_lock, "mrsas_ioctl_lock", NULL, MTX_SPIN);
945 mtx_init(&sc->mpt_cmd_pool_lock, "mrsas_mpt_cmd_pool_lock", NULL, MTX_DEF);
946 mtx_init(&sc->mfi_cmd_pool_lock, "mrsas_mfi_cmd_pool_lock", NULL, MTX_DEF);
947 mtx_init(&sc->raidmap_lock, "mrsas_raidmap_lock", NULL, MTX_DEF);
948 mtx_init(&sc->stream_lock, "mrsas_stream_lock", NULL, MTX_DEF);
950 /* Intialize linked list */
951 TAILQ_INIT(&sc->mrsas_mpt_cmd_list_head);
952 TAILQ_INIT(&sc->mrsas_mfi_cmd_list_head);
954 mrsas_atomic_set(&sc->fw_outstanding, 0);
955 mrsas_atomic_set(&sc->target_reset_outstanding, 0);
956 mrsas_atomic_set(&sc->prp_count, 0);
957 mrsas_atomic_set(&sc->sge_holes, 0);
959 sc->io_cmds_highwater = 0;
961 sc->adprecovery = MRSAS_HBA_OPERATIONAL;
962 sc->UnevenSpanSupport = 0;
966 /* Initialize Firmware */
967 if (mrsas_init_fw(sc) != SUCCESS) {
970 /* Register mrsas to CAM layer */
971 if ((mrsas_cam_attach(sc) != SUCCESS)) {
972 goto attach_fail_cam;
975 if (mrsas_setup_irq(sc) != SUCCESS) {
976 goto attach_fail_irq;
978 error = mrsas_kproc_create(mrsas_ocr_thread, sc,
979 &sc->ocr_thread, 0, 0, "mrsas_ocr%d",
980 device_get_unit(sc->mrsas_dev));
982 device_printf(sc->mrsas_dev, "Error %d starting OCR thread\n", error);
983 goto attach_fail_ocr_thread;
986 * After FW initialization and OCR thread creation
987 * we will defer the cdev creation, AEN setup on ICH callback
989 sc->mrsas_ich.ich_func = mrsas_ich_startup;
990 sc->mrsas_ich.ich_arg = sc;
991 if (config_intrhook_establish(&sc->mrsas_ich) != 0) {
992 device_printf(sc->mrsas_dev, "Config hook is already established\n");
994 mrsas_setup_sysctl(sc);
997 attach_fail_ocr_thread:
998 if (sc->ocr_thread_active)
999 wakeup(&sc->ocr_chan);
1001 mrsas_teardown_intr(sc);
1003 mrsas_cam_detach(sc);
1005 /* if MSIX vector is allocated and FW Init FAILED then release MSIX */
1006 if (sc->msix_enable == 1)
1007 pci_release_msi(sc->mrsas_dev);
1009 mtx_destroy(&sc->sim_lock);
1010 mtx_destroy(&sc->aen_lock);
1011 mtx_destroy(&sc->pci_lock);
1012 mtx_destroy(&sc->io_lock);
1013 mtx_destroy(&sc->ioctl_lock);
1014 mtx_destroy(&sc->mpt_cmd_pool_lock);
1015 mtx_destroy(&sc->mfi_cmd_pool_lock);
1016 mtx_destroy(&sc->raidmap_lock);
1017 mtx_destroy(&sc->stream_lock);
1020 bus_release_resource(sc->mrsas_dev, SYS_RES_MEMORY,
1021 sc->reg_res_id, sc->reg_res);
1027 * Interrupt config hook
1030 mrsas_ich_startup(void *arg)
1033 struct mrsas_softc *sc = (struct mrsas_softc *)arg;
1036 * Intialize a counting Semaphore to take care no. of concurrent IOCTLs
1038 sema_init(&sc->ioctl_count_sema, MRSAS_MAX_IOCTL_CMDS,
1039 IOCTL_SEMA_DESCRIPTION);
1041 /* Create a /dev entry for mrsas controller. */
1042 sc->mrsas_cdev = make_dev(&mrsas_cdevsw, device_get_unit(sc->mrsas_dev), UID_ROOT,
1043 GID_OPERATOR, (S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP), "mrsas%u",
1044 device_get_unit(sc->mrsas_dev));
1046 if (device_get_unit(sc->mrsas_dev) == 0) {
1047 make_dev_alias_p(MAKEDEV_CHECKNAME,
1048 &sc->mrsas_linux_emulator_cdev, sc->mrsas_cdev,
1049 "megaraid_sas_ioctl_node");
1052 sc->mrsas_cdev->si_drv1 = sc;
1055 * Add this controller to mrsas_mgmt_info structure so that it can be
1056 * exported to management applications
1058 if (device_get_unit(sc->mrsas_dev) == 0)
1059 memset(&mrsas_mgmt_info, 0, sizeof(mrsas_mgmt_info));
1061 mrsas_mgmt_info.count++;
1062 mrsas_mgmt_info.sc_ptr[mrsas_mgmt_info.max_index] = sc;
1063 mrsas_mgmt_info.max_index++;
1065 /* Enable Interrupts */
1066 mrsas_enable_intr(sc);
1068 /* Call DCMD get_pd_info for all system PDs */
1069 for (i = 0; i < MRSAS_MAX_PD; i++) {
1070 if ((sc->target_list[i].target_id != 0xffff) &&
1072 mrsas_get_pd_info(sc, sc->target_list[i].target_id);
1075 /* Initiate AEN (Asynchronous Event Notification) */
1076 if (mrsas_start_aen(sc)) {
1077 device_printf(sc->mrsas_dev, "Error: AEN registration FAILED !!! "
1078 "Further events from the controller will not be communicated.\n"
1079 "Either there is some problem in the controller"
1080 "or the controller does not support AEN.\n"
1081 "Please contact to the SUPPORT TEAM if the problem persists\n");
1083 if (sc->mrsas_ich.ich_arg != NULL) {
1084 device_printf(sc->mrsas_dev, "Disestablish mrsas intr hook\n");
1085 config_intrhook_disestablish(&sc->mrsas_ich);
1086 sc->mrsas_ich.ich_arg = NULL;
1091 * mrsas_detach: De-allocates and teardown resources
1092 * input: pointer to device struct
1094 * This function is the entry point for device disconnect and detach.
1095 * It performs memory de-allocations, shutdown of the controller and various
1096 * teardown and destroy resource functions.
1099 mrsas_detach(device_t dev)
1101 struct mrsas_softc *sc;
1104 sc = device_get_softc(dev);
1105 sc->remove_in_progress = 1;
1107 /* Destroy the character device so no other IOCTL will be handled */
1108 if ((device_get_unit(dev) == 0) && sc->mrsas_linux_emulator_cdev)
1109 destroy_dev(sc->mrsas_linux_emulator_cdev);
1110 destroy_dev(sc->mrsas_cdev);
1113 * Take the instance off the instance array. Note that we will not
1114 * decrement the max_index. We let this array be sparse array
1116 for (i = 0; i < mrsas_mgmt_info.max_index; i++) {
1117 if (mrsas_mgmt_info.sc_ptr[i] == sc) {
1118 mrsas_mgmt_info.count--;
1119 mrsas_mgmt_info.sc_ptr[i] = NULL;
1124 if (sc->ocr_thread_active)
1125 wakeup(&sc->ocr_chan);
1126 while (sc->reset_in_progress) {
1128 if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
1129 mrsas_dprint(sc, MRSAS_INFO,
1130 "[%2d]waiting for OCR to be finished from %s\n", i, __func__);
1132 pause("mr_shutdown", hz);
1135 while (sc->ocr_thread_active) {
1137 if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
1138 mrsas_dprint(sc, MRSAS_INFO,
1140 "mrsas_ocr thread to quit ocr %d\n", i,
1141 sc->ocr_thread_active);
1143 pause("mr_shutdown", hz);
1145 mrsas_flush_cache(sc);
1146 mrsas_shutdown_ctlr(sc, MR_DCMD_CTRL_SHUTDOWN);
1147 mrsas_disable_intr(sc);
1149 if ((sc->is_ventura || sc->is_aero) && sc->streamDetectByLD) {
1150 for (i = 0; i < MAX_LOGICAL_DRIVES_EXT; ++i)
1151 free(sc->streamDetectByLD[i], M_MRSAS);
1152 free(sc->streamDetectByLD, M_MRSAS);
1153 sc->streamDetectByLD = NULL;
1156 mrsas_cam_detach(sc);
1157 mrsas_teardown_intr(sc);
1159 mtx_destroy(&sc->sim_lock);
1160 mtx_destroy(&sc->aen_lock);
1161 mtx_destroy(&sc->pci_lock);
1162 mtx_destroy(&sc->io_lock);
1163 mtx_destroy(&sc->ioctl_lock);
1164 mtx_destroy(&sc->mpt_cmd_pool_lock);
1165 mtx_destroy(&sc->mfi_cmd_pool_lock);
1166 mtx_destroy(&sc->raidmap_lock);
1167 mtx_destroy(&sc->stream_lock);
1169 /* Wait for all the semaphores to be released */
1170 while (sema_value(&sc->ioctl_count_sema) != MRSAS_MAX_IOCTL_CMDS)
1171 pause("mr_shutdown", hz);
1173 /* Destroy the counting semaphore created for Ioctl */
1174 sema_destroy(&sc->ioctl_count_sema);
1177 bus_release_resource(sc->mrsas_dev,
1178 SYS_RES_MEMORY, sc->reg_res_id, sc->reg_res);
1180 if (sc->sysctl_tree != NULL)
1181 sysctl_ctx_free(&sc->sysctl_ctx);
1187 mrsas_shutdown(device_t dev)
1189 struct mrsas_softc *sc;
1192 sc = device_get_softc(dev);
1193 sc->remove_in_progress = 1;
1194 if (!KERNEL_PANICKED()) {
1195 if (sc->ocr_thread_active)
1196 wakeup(&sc->ocr_chan);
1198 while (sc->reset_in_progress && i < 15) {
1200 if ((i % MRSAS_RESET_NOTICE_INTERVAL) == 0) {
1201 mrsas_dprint(sc, MRSAS_INFO,
1202 "[%2d]waiting for OCR to be finished "
1203 "from %s\n", i, __func__);
1205 pause("mr_shutdown", hz);
1207 if (sc->reset_in_progress) {
1208 mrsas_dprint(sc, MRSAS_INFO,
1209 "gave up waiting for OCR to be finished\n");
1213 mrsas_flush_cache(sc);
1214 mrsas_shutdown_ctlr(sc, MR_DCMD_CTRL_SHUTDOWN);
1215 mrsas_disable_intr(sc);
1220 * mrsas_free_mem: Frees allocated memory
1221 * input: Adapter instance soft state
1223 * This function is called from mrsas_detach() to free previously allocated
1227 mrsas_free_mem(struct mrsas_softc *sc)
1230 u_int32_t max_fw_cmds;
1231 struct mrsas_mfi_cmd *mfi_cmd;
1232 struct mrsas_mpt_cmd *mpt_cmd;
1235 * Free RAID map memory
1237 for (i = 0; i < 2; i++) {
1238 if (sc->raidmap_phys_addr[i])
1239 bus_dmamap_unload(sc->raidmap_tag[i], sc->raidmap_dmamap[i]);
1240 if (sc->raidmap_mem[i] != NULL)
1241 bus_dmamem_free(sc->raidmap_tag[i], sc->raidmap_mem[i], sc->raidmap_dmamap[i]);
1242 if (sc->raidmap_tag[i] != NULL)
1243 bus_dma_tag_destroy(sc->raidmap_tag[i]);
1245 if (sc->ld_drv_map[i] != NULL)
1246 free(sc->ld_drv_map[i], M_MRSAS);
1248 for (i = 0; i < 2; i++) {
1249 if (sc->jbodmap_phys_addr[i])
1250 bus_dmamap_unload(sc->jbodmap_tag[i], sc->jbodmap_dmamap[i]);
1251 if (sc->jbodmap_mem[i] != NULL)
1252 bus_dmamem_free(sc->jbodmap_tag[i], sc->jbodmap_mem[i], sc->jbodmap_dmamap[i]);
1253 if (sc->jbodmap_tag[i] != NULL)
1254 bus_dma_tag_destroy(sc->jbodmap_tag[i]);
1257 * Free version buffer memory
1259 if (sc->verbuf_phys_addr)
1260 bus_dmamap_unload(sc->verbuf_tag, sc->verbuf_dmamap);
1261 if (sc->verbuf_mem != NULL)
1262 bus_dmamem_free(sc->verbuf_tag, sc->verbuf_mem, sc->verbuf_dmamap);
1263 if (sc->verbuf_tag != NULL)
1264 bus_dma_tag_destroy(sc->verbuf_tag);
1268 * Free sense buffer memory
1270 if (sc->sense_phys_addr)
1271 bus_dmamap_unload(sc->sense_tag, sc->sense_dmamap);
1272 if (sc->sense_mem != NULL)
1273 bus_dmamem_free(sc->sense_tag, sc->sense_mem, sc->sense_dmamap);
1274 if (sc->sense_tag != NULL)
1275 bus_dma_tag_destroy(sc->sense_tag);
1278 * Free chain frame memory
1280 if (sc->chain_frame_phys_addr)
1281 bus_dmamap_unload(sc->chain_frame_tag, sc->chain_frame_dmamap);
1282 if (sc->chain_frame_mem != NULL)
1283 bus_dmamem_free(sc->chain_frame_tag, sc->chain_frame_mem, sc->chain_frame_dmamap);
1284 if (sc->chain_frame_tag != NULL)
1285 bus_dma_tag_destroy(sc->chain_frame_tag);
1288 * Free IO Request memory
1290 if (sc->io_request_phys_addr)
1291 bus_dmamap_unload(sc->io_request_tag, sc->io_request_dmamap);
1292 if (sc->io_request_mem != NULL)
1293 bus_dmamem_free(sc->io_request_tag, sc->io_request_mem, sc->io_request_dmamap);
1294 if (sc->io_request_tag != NULL)
1295 bus_dma_tag_destroy(sc->io_request_tag);
1298 * Free Reply Descriptor memory
1300 if (sc->reply_desc_phys_addr)
1301 bus_dmamap_unload(sc->reply_desc_tag, sc->reply_desc_dmamap);
1302 if (sc->reply_desc_mem != NULL)
1303 bus_dmamem_free(sc->reply_desc_tag, sc->reply_desc_mem, sc->reply_desc_dmamap);
1304 if (sc->reply_desc_tag != NULL)
1305 bus_dma_tag_destroy(sc->reply_desc_tag);
1308 * Free event detail memory
1310 if (sc->evt_detail_phys_addr)
1311 bus_dmamap_unload(sc->evt_detail_tag, sc->evt_detail_dmamap);
1312 if (sc->evt_detail_mem != NULL)
1313 bus_dmamem_free(sc->evt_detail_tag, sc->evt_detail_mem, sc->evt_detail_dmamap);
1314 if (sc->evt_detail_tag != NULL)
1315 bus_dma_tag_destroy(sc->evt_detail_tag);
1318 * Free PD info memory
1320 if (sc->pd_info_phys_addr)
1321 bus_dmamap_unload(sc->pd_info_tag, sc->pd_info_dmamap);
1322 if (sc->pd_info_mem != NULL)
1323 bus_dmamem_free(sc->pd_info_tag, sc->pd_info_mem, sc->pd_info_dmamap);
1324 if (sc->pd_info_tag != NULL)
1325 bus_dma_tag_destroy(sc->pd_info_tag);
1330 if (sc->mfi_cmd_list) {
1331 for (i = 0; i < MRSAS_MAX_MFI_CMDS; i++) {
1332 mfi_cmd = sc->mfi_cmd_list[i];
1333 mrsas_free_frame(sc, mfi_cmd);
1336 if (sc->mficmd_frame_tag != NULL)
1337 bus_dma_tag_destroy(sc->mficmd_frame_tag);
1340 * Free MPT internal command list
1342 max_fw_cmds = sc->max_fw_cmds;
1343 if (sc->mpt_cmd_list) {
1344 for (i = 0; i < max_fw_cmds; i++) {
1345 mpt_cmd = sc->mpt_cmd_list[i];
1346 bus_dmamap_destroy(sc->data_tag, mpt_cmd->data_dmamap);
1347 free(sc->mpt_cmd_list[i], M_MRSAS);
1349 free(sc->mpt_cmd_list, M_MRSAS);
1350 sc->mpt_cmd_list = NULL;
1353 * Free MFI internal command list
1356 if (sc->mfi_cmd_list) {
1357 for (i = 0; i < MRSAS_MAX_MFI_CMDS; i++) {
1358 free(sc->mfi_cmd_list[i], M_MRSAS);
1360 free(sc->mfi_cmd_list, M_MRSAS);
1361 sc->mfi_cmd_list = NULL;
1364 * Free request descriptor memory
1366 free(sc->req_desc, M_MRSAS);
1367 sc->req_desc = NULL;
1370 * Destroy parent tag
1372 if (sc->mrsas_parent_tag != NULL)
1373 bus_dma_tag_destroy(sc->mrsas_parent_tag);
1376 * Free ctrl_info memory
1378 if (sc->ctrl_info != NULL)
1379 free(sc->ctrl_info, M_MRSAS);
1383 * mrsas_teardown_intr: Teardown interrupt
1384 * input: Adapter instance soft state
1386 * This function is called from mrsas_detach() to teardown and release bus
1387 * interrupt resourse.
1390 mrsas_teardown_intr(struct mrsas_softc *sc)
1394 if (!sc->msix_enable) {
1395 if (sc->intr_handle[0])
1396 bus_teardown_intr(sc->mrsas_dev, sc->mrsas_irq[0], sc->intr_handle[0]);
1397 if (sc->mrsas_irq[0] != NULL)
1398 bus_release_resource(sc->mrsas_dev, SYS_RES_IRQ,
1399 sc->irq_id[0], sc->mrsas_irq[0]);
1400 sc->intr_handle[0] = NULL;
1402 for (i = 0; i < sc->msix_vectors; i++) {
1403 if (sc->intr_handle[i])
1404 bus_teardown_intr(sc->mrsas_dev, sc->mrsas_irq[i],
1405 sc->intr_handle[i]);
1407 if (sc->mrsas_irq[i] != NULL)
1408 bus_release_resource(sc->mrsas_dev, SYS_RES_IRQ,
1409 sc->irq_id[i], sc->mrsas_irq[i]);
1411 sc->intr_handle[i] = NULL;
1413 pci_release_msi(sc->mrsas_dev);
1419 * mrsas_suspend: Suspend entry point
1420 * input: Device struct pointer
1422 * This function is the entry point for system suspend from the OS.
1425 mrsas_suspend(device_t dev)
1427 /* This will be filled when the driver will have hibernation support */
1432 * mrsas_resume: Resume entry point
1433 * input: Device struct pointer
1435 * This function is the entry point for system resume from the OS.
1438 mrsas_resume(device_t dev)
1440 /* This will be filled when the driver will have hibernation support */
1445 * mrsas_get_softc_instance: Find softc instance based on cmd type
1447 * This function will return softc instance based on cmd type.
1448 * In some case, application fire ioctl on required management instance and
1449 * do not provide host_no. Use cdev->si_drv1 to get softc instance for those
1450 * case, else get the softc instance from host_no provided by application in
1454 static struct mrsas_softc *
1455 mrsas_get_softc_instance(struct cdev *dev, u_long cmd, caddr_t arg)
1457 struct mrsas_softc *sc = NULL;
1458 struct mrsas_iocpacket *user_ioc = (struct mrsas_iocpacket *)arg;
1460 if (cmd == MRSAS_IOC_GET_PCI_INFO) {
1464 * get the Host number & the softc from data sent by the
1467 sc = mrsas_mgmt_info.sc_ptr[user_ioc->host_no];
1469 printf("There is no Controller number %d\n",
1471 else if (user_ioc->host_no >= mrsas_mgmt_info.max_index)
1472 mrsas_dprint(sc, MRSAS_FAULT,
1473 "Invalid Controller number %d\n", user_ioc->host_no);
1480 * mrsas_ioctl: IOCtl commands entry point.
1482 * This function is the entry point for IOCtls from the OS. It calls the
1483 * appropriate function for processing depending on the command received.
1486 mrsas_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag,
1489 struct mrsas_softc *sc;
1491 MRSAS_DRV_PCI_INFORMATION *pciDrvInfo;
1493 sc = mrsas_get_softc_instance(dev, cmd, arg);
1497 if (sc->remove_in_progress ||
1498 (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR)) {
1499 mrsas_dprint(sc, MRSAS_INFO,
1500 "Either driver remove or shutdown called or "
1501 "HW is in unrecoverable critical error state.\n");
1504 mtx_lock_spin(&sc->ioctl_lock);
1505 if (!sc->reset_in_progress) {
1506 mtx_unlock_spin(&sc->ioctl_lock);
1509 mtx_unlock_spin(&sc->ioctl_lock);
1510 while (sc->reset_in_progress) {
1512 if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
1513 mrsas_dprint(sc, MRSAS_INFO,
1514 "[%2d]waiting for OCR to be finished from %s\n", i, __func__);
1516 pause("mr_ioctl", hz);
1521 case MRSAS_IOC_FIRMWARE_PASS_THROUGH64:
1522 #ifdef COMPAT_FREEBSD32
1523 case MRSAS_IOC_FIRMWARE_PASS_THROUGH32:
1526 * Decrement the Ioctl counting Semaphore before getting an
1529 sema_wait(&sc->ioctl_count_sema);
1531 ret = mrsas_passthru(sc, (void *)arg, cmd);
1533 /* Increment the Ioctl counting semaphore value */
1534 sema_post(&sc->ioctl_count_sema);
1537 case MRSAS_IOC_SCAN_BUS:
1538 ret = mrsas_bus_scan(sc);
1541 case MRSAS_IOC_GET_PCI_INFO:
1542 pciDrvInfo = (MRSAS_DRV_PCI_INFORMATION *) arg;
1543 memset(pciDrvInfo, 0, sizeof(MRSAS_DRV_PCI_INFORMATION));
1544 pciDrvInfo->busNumber = pci_get_bus(sc->mrsas_dev);
1545 pciDrvInfo->deviceNumber = pci_get_slot(sc->mrsas_dev);
1546 pciDrvInfo->functionNumber = pci_get_function(sc->mrsas_dev);
1547 pciDrvInfo->domainID = pci_get_domain(sc->mrsas_dev);
1548 mrsas_dprint(sc, MRSAS_INFO, "pci bus no: %d,"
1549 "pci device no: %d, pci function no: %d,"
1550 "pci domain ID: %d\n",
1551 pciDrvInfo->busNumber, pciDrvInfo->deviceNumber,
1552 pciDrvInfo->functionNumber, pciDrvInfo->domainID);
1557 mrsas_dprint(sc, MRSAS_TRACE, "IOCTL command 0x%lx is not handled\n", cmd);
1565 * mrsas_poll: poll entry point for mrsas driver fd
1567 * This function is the entry point for poll from the OS. It waits for some AEN
1568 * events to be triggered from the controller and notifies back.
1571 mrsas_poll(struct cdev *dev, int poll_events, struct thread *td)
1573 struct mrsas_softc *sc;
1578 if (poll_events & (POLLIN | POLLRDNORM)) {
1579 if (sc->mrsas_aen_triggered) {
1580 revents |= poll_events & (POLLIN | POLLRDNORM);
1584 if (poll_events & (POLLIN | POLLRDNORM)) {
1585 mtx_lock(&sc->aen_lock);
1586 sc->mrsas_poll_waiting = 1;
1587 selrecord(td, &sc->mrsas_select);
1588 mtx_unlock(&sc->aen_lock);
1595 * mrsas_setup_irq: Set up interrupt
1596 * input: Adapter instance soft state
1598 * This function sets up interrupts as a bus resource, with flags indicating
1599 * resource permitting contemporaneous sharing and for resource to activate
1603 mrsas_setup_irq(struct mrsas_softc *sc)
1605 if (sc->msix_enable && (mrsas_setup_msix(sc) == SUCCESS))
1606 device_printf(sc->mrsas_dev, "MSI-x interrupts setup success\n");
1609 device_printf(sc->mrsas_dev, "Fall back to legacy interrupt\n");
1610 sc->irq_context[0].sc = sc;
1611 sc->irq_context[0].MSIxIndex = 0;
1613 sc->mrsas_irq[0] = bus_alloc_resource_any(sc->mrsas_dev,
1614 SYS_RES_IRQ, &sc->irq_id[0], RF_SHAREABLE | RF_ACTIVE);
1615 if (sc->mrsas_irq[0] == NULL) {
1616 device_printf(sc->mrsas_dev, "Cannot allocate legcay"
1620 if (bus_setup_intr(sc->mrsas_dev, sc->mrsas_irq[0],
1621 INTR_MPSAFE | INTR_TYPE_CAM, NULL, mrsas_isr,
1622 &sc->irq_context[0], &sc->intr_handle[0])) {
1623 device_printf(sc->mrsas_dev, "Cannot set up legacy"
1632 * mrsas_isr: ISR entry point
1633 * input: argument pointer
1635 * This function is the interrupt service routine entry point. There are two
1636 * types of interrupts, state change interrupt and response interrupt. If an
1637 * interrupt is not ours, we just return.
1640 mrsas_isr(void *arg)
1642 struct mrsas_irq_context *irq_context = (struct mrsas_irq_context *)arg;
1643 struct mrsas_softc *sc = irq_context->sc;
1646 if (sc->mask_interrupts)
1649 if (!sc->msix_vectors) {
1650 status = mrsas_clear_intr(sc);
1654 /* If we are resetting, bail */
1655 if (mrsas_test_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags)) {
1656 printf(" Entered into ISR when OCR is going active. \n");
1657 mrsas_clear_intr(sc);
1660 /* Process for reply request and clear response interrupt */
1661 if (mrsas_complete_cmd(sc, irq_context->MSIxIndex) != SUCCESS)
1662 mrsas_clear_intr(sc);
1668 * mrsas_complete_cmd: Process reply request
1669 * input: Adapter instance soft state
1671 * This function is called from mrsas_isr() to process reply request and clear
1672 * response interrupt. Processing of the reply request entails walking
1673 * through the reply descriptor array for the command request pended from
1674 * Firmware. We look at the Function field to determine the command type and
1675 * perform the appropriate action. Before we return, we clear the response
1679 mrsas_complete_cmd(struct mrsas_softc *sc, u_int32_t MSIxIndex)
1681 Mpi2ReplyDescriptorsUnion_t *desc;
1682 MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *reply_desc;
1683 MRSAS_RAID_SCSI_IO_REQUEST *scsi_io_req;
1684 struct mrsas_mpt_cmd *cmd_mpt, *r1_cmd = NULL;
1685 struct mrsas_mfi_cmd *cmd_mfi;
1686 u_int8_t reply_descript_type, *sense;
1687 u_int16_t smid, num_completed;
1688 u_int8_t status, extStatus;
1689 union desc_value desc_val;
1690 PLD_LOAD_BALANCE_INFO lbinfo;
1691 u_int32_t device_id, data_length;
1692 int threshold_reply_count = 0;
1694 MR_TASK_MANAGE_REQUEST *mr_tm_req;
1695 MPI2_SCSI_TASK_MANAGE_REQUEST *mpi_tm_req;
1698 /* If we have a hardware error, not need to continue */
1699 if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR)
1702 desc = sc->reply_desc_mem;
1703 desc += ((MSIxIndex * sc->reply_alloc_sz) / sizeof(MPI2_REPLY_DESCRIPTORS_UNION))
1704 + sc->last_reply_idx[MSIxIndex];
1706 reply_desc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *) desc;
1708 desc_val.word = desc->Words;
1711 reply_descript_type = reply_desc->ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
1713 /* Find our reply descriptor for the command and process */
1714 while ((desc_val.u.low != 0xFFFFFFFF) && (desc_val.u.high != 0xFFFFFFFF)) {
1715 smid = reply_desc->SMID;
1716 cmd_mpt = sc->mpt_cmd_list[smid - 1];
1717 scsi_io_req = (MRSAS_RAID_SCSI_IO_REQUEST *) cmd_mpt->io_request;
1719 status = scsi_io_req->RaidContext.raid_context.status;
1720 extStatus = scsi_io_req->RaidContext.raid_context.exStatus;
1721 sense = cmd_mpt->sense;
1722 data_length = scsi_io_req->DataLength;
1724 switch (scsi_io_req->Function) {
1725 case MPI2_FUNCTION_SCSI_TASK_MGMT:
1727 mr_tm_req = (MR_TASK_MANAGE_REQUEST *) cmd_mpt->io_request;
1728 mpi_tm_req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)
1729 &mr_tm_req->TmRequest;
1730 device_printf(sc->mrsas_dev, "TM completion type 0x%X, "
1731 "TaskMID: 0x%X", mpi_tm_req->TaskType, mpi_tm_req->TaskMID);
1733 wakeup_one((void *)&sc->ocr_chan);
1735 case MPI2_FUNCTION_SCSI_IO_REQUEST: /* Fast Path IO. */
1736 device_id = cmd_mpt->ccb_ptr->ccb_h.target_id;
1737 lbinfo = &sc->load_balance_info[device_id];
1738 /* R1 load balancing for READ */
1739 if (cmd_mpt->load_balance == MRSAS_LOAD_BALANCE_FLAG) {
1740 mrsas_atomic_dec(&lbinfo->scsi_pending_cmds[cmd_mpt->pd_r1_lb]);
1741 cmd_mpt->load_balance &= ~MRSAS_LOAD_BALANCE_FLAG;
1743 /* Fall thru and complete IO */
1744 case MRSAS_MPI2_FUNCTION_LD_IO_REQUEST:
1745 if (cmd_mpt->r1_alt_dev_handle == MR_DEVHANDLE_INVALID) {
1746 mrsas_map_mpt_cmd_status(cmd_mpt, cmd_mpt->ccb_ptr, status,
1747 extStatus, data_length, sense);
1748 mrsas_cmd_done(sc, cmd_mpt);
1749 mrsas_atomic_dec(&sc->fw_outstanding);
1752 * If the peer Raid 1/10 fast path failed,
1753 * mark IO as failed to the scsi layer.
1754 * Overwrite the current status by the failed status
1755 * and make sure that if any command fails,
1756 * driver returns fail status to CAM.
1758 cmd_mpt->cmd_completed = 1;
1759 r1_cmd = cmd_mpt->peer_cmd;
1760 if (r1_cmd->cmd_completed) {
1761 if (r1_cmd->io_request->RaidContext.raid_context.status != MFI_STAT_OK) {
1762 status = r1_cmd->io_request->RaidContext.raid_context.status;
1763 extStatus = r1_cmd->io_request->RaidContext.raid_context.exStatus;
1764 data_length = r1_cmd->io_request->DataLength;
1765 sense = r1_cmd->sense;
1767 r1_cmd->ccb_ptr = NULL;
1768 if (r1_cmd->callout_owner) {
1769 callout_stop(&r1_cmd->cm_callout);
1770 r1_cmd->callout_owner = false;
1772 mrsas_release_mpt_cmd(r1_cmd);
1773 mrsas_atomic_dec(&sc->fw_outstanding);
1774 mrsas_map_mpt_cmd_status(cmd_mpt, cmd_mpt->ccb_ptr, status,
1775 extStatus, data_length, sense);
1776 mrsas_cmd_done(sc, cmd_mpt);
1777 mrsas_atomic_dec(&sc->fw_outstanding);
1781 case MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST: /* MFI command */
1782 cmd_mfi = sc->mfi_cmd_list[cmd_mpt->sync_cmd_idx];
1784 * Make sure NOT TO release the mfi command from the called
1785 * function's context if it is fired with issue_polled call.
1786 * And also make sure that the issue_polled call should only be
1787 * used if INTERRUPT IS DISABLED.
1789 if (cmd_mfi->frame->hdr.flags & MFI_FRAME_DONT_POST_IN_REPLY_QUEUE)
1790 mrsas_release_mfi_cmd(cmd_mfi);
1792 mrsas_complete_mptmfi_passthru(sc, cmd_mfi, status);
1796 sc->last_reply_idx[MSIxIndex]++;
1797 if (sc->last_reply_idx[MSIxIndex] >= sc->reply_q_depth)
1798 sc->last_reply_idx[MSIxIndex] = 0;
1800 desc->Words = ~((uint64_t)0x00); /* set it back to all
1803 threshold_reply_count++;
1805 /* Get the next reply descriptor */
1806 if (!sc->last_reply_idx[MSIxIndex]) {
1807 desc = sc->reply_desc_mem;
1808 desc += ((MSIxIndex * sc->reply_alloc_sz) / sizeof(MPI2_REPLY_DESCRIPTORS_UNION));
1812 reply_desc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *) desc;
1813 desc_val.word = desc->Words;
1815 reply_descript_type = reply_desc->ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
1817 if (reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
1821 * Write to reply post index after completing threshold reply
1822 * count and still there are more replies in reply queue
1823 * pending to be completed.
1825 if (threshold_reply_count >= THRESHOLD_REPLY_COUNT) {
1826 if (sc->msix_enable) {
1827 if (sc->msix_combined)
1828 mrsas_write_reg(sc, sc->msix_reg_offset[MSIxIndex / 8],
1829 ((MSIxIndex & 0x7) << 24) |
1830 sc->last_reply_idx[MSIxIndex]);
1832 mrsas_write_reg(sc, sc->msix_reg_offset[0], (MSIxIndex << 24) |
1833 sc->last_reply_idx[MSIxIndex]);
1835 mrsas_write_reg(sc, offsetof(mrsas_reg_set,
1836 reply_post_host_index), sc->last_reply_idx[0]);
1838 threshold_reply_count = 0;
1842 /* No match, just return */
1843 if (num_completed == 0)
1846 /* Clear response interrupt */
1847 if (sc->msix_enable) {
1848 if (sc->msix_combined) {
1849 mrsas_write_reg(sc, sc->msix_reg_offset[MSIxIndex / 8],
1850 ((MSIxIndex & 0x7) << 24) |
1851 sc->last_reply_idx[MSIxIndex]);
1853 mrsas_write_reg(sc, sc->msix_reg_offset[0], (MSIxIndex << 24) |
1854 sc->last_reply_idx[MSIxIndex]);
1856 mrsas_write_reg(sc, offsetof(mrsas_reg_set,
1857 reply_post_host_index), sc->last_reply_idx[0]);
1863 * mrsas_map_mpt_cmd_status: Allocate DMAable memory.
1864 * input: Adapter instance soft state
1866 * This function is called from mrsas_complete_cmd(), for LD IO and FastPath IO.
1867 * It checks the command status and maps the appropriate CAM status for the
1871 mrsas_map_mpt_cmd_status(struct mrsas_mpt_cmd *cmd, union ccb *ccb_ptr, u_int8_t status,
1872 u_int8_t extStatus, u_int32_t data_length, u_int8_t *sense)
1874 struct mrsas_softc *sc = cmd->sc;
1875 u_int8_t *sense_data;
1879 ccb_ptr->ccb_h.status = CAM_REQ_CMP;
1881 case MFI_STAT_SCSI_IO_FAILED:
1882 case MFI_STAT_SCSI_DONE_WITH_ERROR:
1883 ccb_ptr->ccb_h.status = CAM_SCSI_STATUS_ERROR;
1884 sense_data = (u_int8_t *)&ccb_ptr->csio.sense_data;
1886 /* For now just copy 18 bytes back */
1887 memcpy(sense_data, sense, 18);
1888 ccb_ptr->csio.sense_len = 18;
1889 ccb_ptr->ccb_h.status |= CAM_AUTOSNS_VALID;
1892 case MFI_STAT_LD_OFFLINE:
1893 case MFI_STAT_DEVICE_NOT_FOUND:
1894 if (ccb_ptr->ccb_h.target_lun)
1895 ccb_ptr->ccb_h.status |= CAM_LUN_INVALID;
1897 ccb_ptr->ccb_h.status |= CAM_DEV_NOT_THERE;
1899 case MFI_STAT_CONFIG_SEQ_MISMATCH:
1900 ccb_ptr->ccb_h.status |= CAM_REQUEUE_REQ;
1903 device_printf(sc->mrsas_dev, "FW cmd complete status %x\n", status);
1904 ccb_ptr->ccb_h.status = CAM_REQ_CMP_ERR;
1905 ccb_ptr->csio.scsi_status = status;
1911 * mrsas_alloc_mem: Allocate DMAable memory
1912 * input: Adapter instance soft state
1914 * This function creates the parent DMA tag and allocates DMAable memory. DMA
1915 * tag describes constraints of DMA mapping. Memory allocated is mapped into
1916 * Kernel virtual address. Callback argument is physical memory address.
1919 mrsas_alloc_mem(struct mrsas_softc *sc)
1921 u_int32_t verbuf_size, io_req_size, reply_desc_size, sense_size, chain_frame_size,
1922 evt_detail_size, count, pd_info_size;
1925 * Allocate parent DMA tag
1927 if (bus_dma_tag_create(NULL, /* parent */
1930 BUS_SPACE_MAXADDR, /* lowaddr */
1931 BUS_SPACE_MAXADDR, /* highaddr */
1932 NULL, NULL, /* filter, filterarg */
1933 MAXPHYS, /* maxsize */
1934 sc->max_num_sge, /* nsegments */
1935 MAXPHYS, /* maxsegsize */
1937 NULL, NULL, /* lockfunc, lockarg */
1938 &sc->mrsas_parent_tag /* tag */
1940 device_printf(sc->mrsas_dev, "Cannot allocate parent DMA tag\n");
1944 * Allocate for version buffer
1946 verbuf_size = MRSAS_MAX_NAME_LENGTH * (sizeof(bus_addr_t));
1947 if (bus_dma_tag_create(sc->mrsas_parent_tag,
1949 BUS_SPACE_MAXADDR_32BIT,
1958 device_printf(sc->mrsas_dev, "Cannot allocate verbuf DMA tag\n");
1961 if (bus_dmamem_alloc(sc->verbuf_tag, (void **)&sc->verbuf_mem,
1962 BUS_DMA_NOWAIT, &sc->verbuf_dmamap)) {
1963 device_printf(sc->mrsas_dev, "Cannot allocate verbuf memory\n");
1966 bzero(sc->verbuf_mem, verbuf_size);
1967 if (bus_dmamap_load(sc->verbuf_tag, sc->verbuf_dmamap, sc->verbuf_mem,
1968 verbuf_size, mrsas_addr_cb, &sc->verbuf_phys_addr,
1970 device_printf(sc->mrsas_dev, "Cannot load verbuf DMA map\n");
1974 * Allocate IO Request Frames
1976 io_req_size = sc->io_frames_alloc_sz;
1977 if (bus_dma_tag_create(sc->mrsas_parent_tag,
1979 BUS_SPACE_MAXADDR_32BIT,
1987 &sc->io_request_tag)) {
1988 device_printf(sc->mrsas_dev, "Cannot create IO request tag\n");
1991 if (bus_dmamem_alloc(sc->io_request_tag, (void **)&sc->io_request_mem,
1992 BUS_DMA_NOWAIT, &sc->io_request_dmamap)) {
1993 device_printf(sc->mrsas_dev, "Cannot alloc IO request memory\n");
1996 bzero(sc->io_request_mem, io_req_size);
1997 if (bus_dmamap_load(sc->io_request_tag, sc->io_request_dmamap,
1998 sc->io_request_mem, io_req_size, mrsas_addr_cb,
1999 &sc->io_request_phys_addr, BUS_DMA_NOWAIT)) {
2000 device_printf(sc->mrsas_dev, "Cannot load IO request memory\n");
2004 * Allocate Chain Frames
2006 chain_frame_size = sc->chain_frames_alloc_sz;
2007 if (bus_dma_tag_create(sc->mrsas_parent_tag,
2009 BUS_SPACE_MAXADDR_32BIT,
2017 &sc->chain_frame_tag)) {
2018 device_printf(sc->mrsas_dev, "Cannot create chain frame tag\n");
2021 if (bus_dmamem_alloc(sc->chain_frame_tag, (void **)&sc->chain_frame_mem,
2022 BUS_DMA_NOWAIT, &sc->chain_frame_dmamap)) {
2023 device_printf(sc->mrsas_dev, "Cannot alloc chain frame memory\n");
2026 bzero(sc->chain_frame_mem, chain_frame_size);
2027 if (bus_dmamap_load(sc->chain_frame_tag, sc->chain_frame_dmamap,
2028 sc->chain_frame_mem, chain_frame_size, mrsas_addr_cb,
2029 &sc->chain_frame_phys_addr, BUS_DMA_NOWAIT)) {
2030 device_printf(sc->mrsas_dev, "Cannot load chain frame memory\n");
2033 count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
2035 * Allocate Reply Descriptor Array
2037 reply_desc_size = sc->reply_alloc_sz * count;
2038 if (bus_dma_tag_create(sc->mrsas_parent_tag,
2040 BUS_SPACE_MAXADDR_32BIT,
2048 &sc->reply_desc_tag)) {
2049 device_printf(sc->mrsas_dev, "Cannot create reply descriptor tag\n");
2052 if (bus_dmamem_alloc(sc->reply_desc_tag, (void **)&sc->reply_desc_mem,
2053 BUS_DMA_NOWAIT, &sc->reply_desc_dmamap)) {
2054 device_printf(sc->mrsas_dev, "Cannot alloc reply descriptor memory\n");
2057 if (bus_dmamap_load(sc->reply_desc_tag, sc->reply_desc_dmamap,
2058 sc->reply_desc_mem, reply_desc_size, mrsas_addr_cb,
2059 &sc->reply_desc_phys_addr, BUS_DMA_NOWAIT)) {
2060 device_printf(sc->mrsas_dev, "Cannot load reply descriptor memory\n");
2064 * Allocate Sense Buffer Array. Keep in lower 4GB
2066 sense_size = sc->max_fw_cmds * MRSAS_SENSE_LEN;
2067 if (bus_dma_tag_create(sc->mrsas_parent_tag,
2069 BUS_SPACE_MAXADDR_32BIT,
2078 device_printf(sc->mrsas_dev, "Cannot allocate sense buf tag\n");
2081 if (bus_dmamem_alloc(sc->sense_tag, (void **)&sc->sense_mem,
2082 BUS_DMA_NOWAIT, &sc->sense_dmamap)) {
2083 device_printf(sc->mrsas_dev, "Cannot allocate sense buf memory\n");
2086 if (bus_dmamap_load(sc->sense_tag, sc->sense_dmamap,
2087 sc->sense_mem, sense_size, mrsas_addr_cb, &sc->sense_phys_addr,
2089 device_printf(sc->mrsas_dev, "Cannot load sense buf memory\n");
2094 * Allocate for Event detail structure
2096 evt_detail_size = sizeof(struct mrsas_evt_detail);
2097 if (bus_dma_tag_create(sc->mrsas_parent_tag,
2099 BUS_SPACE_MAXADDR_32BIT,
2107 &sc->evt_detail_tag)) {
2108 device_printf(sc->mrsas_dev, "Cannot create Event detail tag\n");
2111 if (bus_dmamem_alloc(sc->evt_detail_tag, (void **)&sc->evt_detail_mem,
2112 BUS_DMA_NOWAIT, &sc->evt_detail_dmamap)) {
2113 device_printf(sc->mrsas_dev, "Cannot alloc Event detail buffer memory\n");
2116 bzero(sc->evt_detail_mem, evt_detail_size);
2117 if (bus_dmamap_load(sc->evt_detail_tag, sc->evt_detail_dmamap,
2118 sc->evt_detail_mem, evt_detail_size, mrsas_addr_cb,
2119 &sc->evt_detail_phys_addr, BUS_DMA_NOWAIT)) {
2120 device_printf(sc->mrsas_dev, "Cannot load Event detail buffer memory\n");
2125 * Allocate for PD INFO structure
2127 pd_info_size = sizeof(struct mrsas_pd_info);
2128 if (bus_dma_tag_create(sc->mrsas_parent_tag,
2130 BUS_SPACE_MAXADDR_32BIT,
2138 &sc->pd_info_tag)) {
2139 device_printf(sc->mrsas_dev, "Cannot create PD INFO tag\n");
2142 if (bus_dmamem_alloc(sc->pd_info_tag, (void **)&sc->pd_info_mem,
2143 BUS_DMA_NOWAIT, &sc->pd_info_dmamap)) {
2144 device_printf(sc->mrsas_dev, "Cannot alloc PD INFO buffer memory\n");
2147 bzero(sc->pd_info_mem, pd_info_size);
2148 if (bus_dmamap_load(sc->pd_info_tag, sc->pd_info_dmamap,
2149 sc->pd_info_mem, pd_info_size, mrsas_addr_cb,
2150 &sc->pd_info_phys_addr, BUS_DMA_NOWAIT)) {
2151 device_printf(sc->mrsas_dev, "Cannot load PD INFO buffer memory\n");
2156 * Create a dma tag for data buffers; size will be the maximum
2157 * possible I/O size (280kB).
2159 if (bus_dma_tag_create(sc->mrsas_parent_tag,
2166 sc->max_num_sge, /* nsegments */
2172 device_printf(sc->mrsas_dev, "Cannot create data dma tag\n");
2179 * mrsas_addr_cb: Callback function of bus_dmamap_load()
2180 * input: callback argument, machine dependent type
2181 * that describes DMA segments, number of segments, error code
2183 * This function is for the driver to receive mapping information resultant of
2184 * the bus_dmamap_load(). The information is actually not being used, but the
2185 * address is saved anyway.
2188 mrsas_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
2193 *addr = segs[0].ds_addr;
2197 * mrsas_setup_raidmap: Set up RAID map.
2198 * input: Adapter instance soft state
2200 * Allocate DMA memory for the RAID maps and perform setup.
2203 mrsas_setup_raidmap(struct mrsas_softc *sc)
2207 for (i = 0; i < 2; i++) {
2209 (void *)malloc(sc->drv_map_sz, M_MRSAS, M_NOWAIT);
2210 /* Do Error handling */
2211 if (!sc->ld_drv_map[i]) {
2212 device_printf(sc->mrsas_dev, "Could not allocate memory for local map");
2215 free(sc->ld_drv_map[0], M_MRSAS);
2216 /* ABORT driver initialization */
2221 for (int i = 0; i < 2; i++) {
2222 if (bus_dma_tag_create(sc->mrsas_parent_tag,
2224 BUS_SPACE_MAXADDR_32BIT,
2232 &sc->raidmap_tag[i])) {
2233 device_printf(sc->mrsas_dev,
2234 "Cannot allocate raid map tag.\n");
2237 if (bus_dmamem_alloc(sc->raidmap_tag[i],
2238 (void **)&sc->raidmap_mem[i],
2239 BUS_DMA_NOWAIT, &sc->raidmap_dmamap[i])) {
2240 device_printf(sc->mrsas_dev,
2241 "Cannot allocate raidmap memory.\n");
2244 bzero(sc->raidmap_mem[i], sc->max_map_sz);
2246 if (bus_dmamap_load(sc->raidmap_tag[i], sc->raidmap_dmamap[i],
2247 sc->raidmap_mem[i], sc->max_map_sz,
2248 mrsas_addr_cb, &sc->raidmap_phys_addr[i],
2250 device_printf(sc->mrsas_dev, "Cannot load raidmap memory.\n");
2253 if (!sc->raidmap_mem[i]) {
2254 device_printf(sc->mrsas_dev,
2255 "Cannot allocate memory for raid map.\n");
2260 if (!mrsas_get_map_info(sc))
2261 mrsas_sync_map_info(sc);
2270 * megasas_setup_jbod_map - setup jbod map for FP seq_number.
2271 * @sc: Adapter soft state
2273 * Return 0 on success.
2276 megasas_setup_jbod_map(struct mrsas_softc *sc)
2279 uint32_t pd_seq_map_sz;
2281 pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) +
2282 (sizeof(struct MR_PD_CFG_SEQ) * (MAX_PHYSICAL_DEVICES - 1));
2284 if (!sc->ctrl_info->adapterOperations3.useSeqNumJbodFP) {
2285 sc->use_seqnum_jbod_fp = 0;
2288 if (sc->jbodmap_mem[0])
2291 for (i = 0; i < 2; i++) {
2292 if (bus_dma_tag_create(sc->mrsas_parent_tag,
2294 BUS_SPACE_MAXADDR_32BIT,
2302 &sc->jbodmap_tag[i])) {
2303 device_printf(sc->mrsas_dev,
2304 "Cannot allocate jbod map tag.\n");
2307 if (bus_dmamem_alloc(sc->jbodmap_tag[i],
2308 (void **)&sc->jbodmap_mem[i],
2309 BUS_DMA_NOWAIT, &sc->jbodmap_dmamap[i])) {
2310 device_printf(sc->mrsas_dev,
2311 "Cannot allocate jbod map memory.\n");
2314 bzero(sc->jbodmap_mem[i], pd_seq_map_sz);
2316 if (bus_dmamap_load(sc->jbodmap_tag[i], sc->jbodmap_dmamap[i],
2317 sc->jbodmap_mem[i], pd_seq_map_sz,
2318 mrsas_addr_cb, &sc->jbodmap_phys_addr[i],
2320 device_printf(sc->mrsas_dev, "Cannot load jbod map memory.\n");
2323 if (!sc->jbodmap_mem[i]) {
2324 device_printf(sc->mrsas_dev,
2325 "Cannot allocate memory for jbod map.\n");
2326 sc->use_seqnum_jbod_fp = 0;
2332 if (!megasas_sync_pd_seq_num(sc, false) &&
2333 !megasas_sync_pd_seq_num(sc, true))
2334 sc->use_seqnum_jbod_fp = 1;
2336 sc->use_seqnum_jbod_fp = 0;
2338 device_printf(sc->mrsas_dev, "Jbod map is supported\n");
2342 * mrsas_init_fw: Initialize Firmware
2343 * input: Adapter soft state
2345 * Calls transition_to_ready() to make sure Firmware is in operational state and
2346 * calls mrsas_init_adapter() to send IOC_INIT command to Firmware. It
2347 * issues internal commands to get the controller info after the IOC_INIT
2348 * command response is received by Firmware. Note: code relating to
2349 * get_pdlist, get_ld_list and max_sectors are currently not being used, it
2350 * is left here as placeholder.
2353 mrsas_init_fw(struct mrsas_softc *sc)
2356 int ret, loop, ocr = 0;
2357 u_int32_t max_sectors_1;
2358 u_int32_t max_sectors_2;
2359 u_int32_t tmp_sectors;
2360 u_int32_t scratch_pad_2, scratch_pad_3, scratch_pad_4;
2361 int msix_enable = 0;
2362 int fw_msix_count = 0;
2365 /* Make sure Firmware is ready */
2366 ret = mrsas_transition_to_ready(sc, ocr);
2367 if (ret != SUCCESS) {
2370 if (sc->is_ventura || sc->is_aero) {
2371 scratch_pad_3 = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, outbound_scratch_pad_3));
2373 device_printf(sc->mrsas_dev, "scratch_pad_3 0x%x\n", scratch_pad_3);
2375 sc->maxRaidMapSize = ((scratch_pad_3 >>
2376 MR_MAX_RAID_MAP_SIZE_OFFSET_SHIFT) &
2377 MR_MAX_RAID_MAP_SIZE_MASK);
2379 /* MSI-x index 0- reply post host index register */
2380 sc->msix_reg_offset[0] = MPI2_REPLY_POST_HOST_INDEX_OFFSET;
2381 /* Check if MSI-X is supported while in ready state */
2382 msix_enable = (mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, outbound_scratch_pad)) & 0x4000000) >> 0x1a;
2385 scratch_pad_2 = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
2386 outbound_scratch_pad_2));
2388 /* Check max MSI-X vectors */
2389 if (sc->device_id == MRSAS_TBOLT) {
2390 sc->msix_vectors = (scratch_pad_2
2391 & MR_MAX_REPLY_QUEUES_OFFSET) + 1;
2392 fw_msix_count = sc->msix_vectors;
2394 /* Invader/Fury supports 96 MSI-X vectors */
2395 sc->msix_vectors = ((scratch_pad_2
2396 & MR_MAX_REPLY_QUEUES_EXT_OFFSET)
2397 >> MR_MAX_REPLY_QUEUES_EXT_OFFSET_SHIFT) + 1;
2398 fw_msix_count = sc->msix_vectors;
2400 if ((sc->mrsas_gen3_ctrl && (sc->msix_vectors > 8)) ||
2401 ((sc->is_ventura || sc->is_aero) && (sc->msix_vectors > 16)))
2402 sc->msix_combined = true;
2404 * Save 1-15 reply post index
2405 * address to local memory Index 0
2406 * is already saved from reg offset
2407 * MPI2_REPLY_POST_HOST_INDEX_OFFSET
2409 for (loop = 1; loop < MR_MAX_MSIX_REG_ARRAY;
2411 sc->msix_reg_offset[loop] =
2412 MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET +
2417 /* Don't bother allocating more MSI-X vectors than cpus */
2418 sc->msix_vectors = min(sc->msix_vectors,
2421 /* Allocate MSI-x vectors */
2422 if (mrsas_allocate_msix(sc) == SUCCESS)
2423 sc->msix_enable = 1;
2425 sc->msix_enable = 0;
2427 device_printf(sc->mrsas_dev, "FW supports <%d> MSIX vector,"
2428 "Online CPU %d Current MSIX <%d>\n",
2429 fw_msix_count, mp_ncpus, sc->msix_vectors);
2432 * MSI-X host index 0 is common for all adapter.
2433 * It is used for all MPT based Adapters.
2435 if (sc->msix_combined) {
2436 sc->msix_reg_offset[0] =
2437 MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET;
2439 if (mrsas_init_adapter(sc) != SUCCESS) {
2440 device_printf(sc->mrsas_dev, "Adapter initialize Fail.\n");
2444 if (sc->is_ventura || sc->is_aero) {
2445 scratch_pad_4 = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
2446 outbound_scratch_pad_4));
2447 if ((scratch_pad_4 & MR_NVME_PAGE_SIZE_MASK) >= MR_DEFAULT_NVME_PAGE_SHIFT)
2448 sc->nvme_page_size = 1 << (scratch_pad_4 & MR_NVME_PAGE_SIZE_MASK);
2450 device_printf(sc->mrsas_dev, "NVME page size\t: (%d)\n", sc->nvme_page_size);
2453 /* Allocate internal commands for pass-thru */
2454 if (mrsas_alloc_mfi_cmds(sc) != SUCCESS) {
2455 device_printf(sc->mrsas_dev, "Allocate MFI cmd failed.\n");
2458 sc->ctrl_info = malloc(sizeof(struct mrsas_ctrl_info), M_MRSAS, M_NOWAIT);
2459 if (!sc->ctrl_info) {
2460 device_printf(sc->mrsas_dev, "Malloc for ctrl_info failed.\n");
2464 * Get the controller info from FW, so that the MAX VD support
2465 * availability can be decided.
2467 if (mrsas_get_ctrl_info(sc)) {
2468 device_printf(sc->mrsas_dev, "Unable to get FW ctrl_info.\n");
2471 sc->secure_jbod_support =
2472 (u_int8_t)sc->ctrl_info->adapterOperations3.supportSecurityonJBOD;
2474 if (sc->secure_jbod_support)
2475 device_printf(sc->mrsas_dev, "FW supports SED \n");
2477 if (sc->use_seqnum_jbod_fp)
2478 device_printf(sc->mrsas_dev, "FW supports JBOD Map \n");
2480 if (sc->support_morethan256jbod)
2481 device_printf(sc->mrsas_dev, "FW supports JBOD Map Ext \n");
2483 if (mrsas_setup_raidmap(sc) != SUCCESS) {
2484 device_printf(sc->mrsas_dev, "Error: RAID map setup FAILED !!! "
2485 "There seems to be some problem in the controller\n"
2486 "Please contact to the SUPPORT TEAM if the problem persists\n");
2488 megasas_setup_jbod_map(sc);
2491 memset(sc->target_list, 0,
2492 MRSAS_MAX_TM_TARGETS * sizeof(struct mrsas_target));
2493 for (i = 0; i < MRSAS_MAX_TM_TARGETS; i++)
2494 sc->target_list[i].target_id = 0xffff;
2496 /* For pass-thru, get PD/LD list and controller info */
2497 memset(sc->pd_list, 0,
2498 MRSAS_MAX_PD * sizeof(struct mrsas_pd_list));
2499 if (mrsas_get_pd_list(sc) != SUCCESS) {
2500 device_printf(sc->mrsas_dev, "Get PD list failed.\n");
2503 memset(sc->ld_ids, 0xff, MRSAS_MAX_LD_IDS);
2504 if (mrsas_get_ld_list(sc) != SUCCESS) {
2505 device_printf(sc->mrsas_dev, "Get LD lsit failed.\n");
2509 if ((sc->is_ventura || sc->is_aero) && sc->drv_stream_detection) {
2510 sc->streamDetectByLD = malloc(sizeof(PTR_LD_STREAM_DETECT) *
2511 MAX_LOGICAL_DRIVES_EXT, M_MRSAS, M_NOWAIT);
2512 if (!sc->streamDetectByLD) {
2513 device_printf(sc->mrsas_dev,
2514 "unable to allocate stream detection for pool of LDs\n");
2517 for (i = 0; i < MAX_LOGICAL_DRIVES_EXT; ++i) {
2518 sc->streamDetectByLD[i] = malloc(sizeof(LD_STREAM_DETECT), M_MRSAS, M_NOWAIT);
2519 if (!sc->streamDetectByLD[i]) {
2520 device_printf(sc->mrsas_dev, "unable to allocate stream detect by LD\n");
2521 for (j = 0; j < i; ++j)
2522 free(sc->streamDetectByLD[j], M_MRSAS);
2523 free(sc->streamDetectByLD, M_MRSAS);
2524 sc->streamDetectByLD = NULL;
2527 memset(sc->streamDetectByLD[i], 0, sizeof(LD_STREAM_DETECT));
2528 sc->streamDetectByLD[i]->mruBitMap = MR_STREAM_BITMAP;
2533 * Compute the max allowed sectors per IO: The controller info has
2534 * two limits on max sectors. Driver should use the minimum of these
2537 * 1 << stripe_sz_ops.min = max sectors per strip
2539 * Note that older firmwares ( < FW ver 30) didn't report information to
2540 * calculate max_sectors_1. So the number ended up as zero always.
2543 max_sectors_1 = (1 << sc->ctrl_info->stripe_sz_ops.min) *
2544 sc->ctrl_info->max_strips_per_io;
2545 max_sectors_2 = sc->ctrl_info->max_request_size;
2546 tmp_sectors = min(max_sectors_1, max_sectors_2);
2547 sc->max_sectors_per_req = sc->max_num_sge * MRSAS_PAGE_SIZE / 512;
2549 if (tmp_sectors && (sc->max_sectors_per_req > tmp_sectors))
2550 sc->max_sectors_per_req = tmp_sectors;
2552 sc->disableOnlineCtrlReset =
2553 sc->ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset;
2554 sc->UnevenSpanSupport =
2555 sc->ctrl_info->adapterOperations2.supportUnevenSpans;
2556 if (sc->UnevenSpanSupport) {
2557 device_printf(sc->mrsas_dev, "FW supports: UnevenSpanSupport=%x\n\n",
2558 sc->UnevenSpanSupport);
2560 if (MR_ValidateMapInfo(sc))
2561 sc->fast_path_io = 1;
2563 sc->fast_path_io = 0;
2566 device_printf(sc->mrsas_dev, "max_fw_cmds: %u max_scsi_cmds: %u\n",
2567 sc->max_fw_cmds, sc->max_scsi_cmds);
2572 * mrsas_init_adapter: Initializes the adapter/controller
2573 * input: Adapter soft state
2575 * Prepares for the issuing of the IOC Init cmd to FW for initializing the
2576 * ROC/controller. The FW register is read to determined the number of
2577 * commands that is supported. All memory allocations for IO is based on
2578 * max_cmd. Appropriate calculations are performed in this function.
2581 mrsas_init_adapter(struct mrsas_softc *sc)
2584 u_int32_t scratch_pad_2;
2588 /* Read FW status register */
2589 status = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, outbound_scratch_pad));
2591 sc->max_fw_cmds = status & MRSAS_FWSTATE_MAXCMD_MASK;
2593 /* Decrement the max supported by 1, to correlate with FW */
2594 sc->max_fw_cmds = sc->max_fw_cmds - 1;
2595 sc->max_scsi_cmds = sc->max_fw_cmds - MRSAS_MAX_MFI_CMDS;
2597 /* Determine allocation size of command frames */
2598 sc->reply_q_depth = ((sc->max_fw_cmds + 1 + 15) / 16 * 16) * 2;
2599 sc->request_alloc_sz = sizeof(MRSAS_REQUEST_DESCRIPTOR_UNION) * sc->max_fw_cmds;
2600 sc->reply_alloc_sz = sizeof(MPI2_REPLY_DESCRIPTORS_UNION) * (sc->reply_q_depth);
2601 sc->io_frames_alloc_sz = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE +
2602 (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * (sc->max_fw_cmds + 1));
2603 scratch_pad_2 = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
2604 outbound_scratch_pad_2));
2606 * If scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK is set,
2607 * Firmware support extended IO chain frame which is 4 time more
2608 * than legacy Firmware. Legacy Firmware - Frame size is (8 * 128) =
2609 * 1K 1M IO Firmware - Frame size is (8 * 128 * 4) = 4K
2611 if (scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK)
2612 sc->max_chain_frame_sz =
2613 ((scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_MASK) >> 5)
2616 sc->max_chain_frame_sz =
2617 ((scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_MASK) >> 5)
2620 sc->chain_frames_alloc_sz = sc->max_chain_frame_sz * sc->max_fw_cmds;
2621 sc->max_sge_in_main_msg = (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE -
2622 offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL)) / 16;
2624 sc->max_sge_in_chain = sc->max_chain_frame_sz / sizeof(MPI2_SGE_IO_UNION);
2625 sc->max_num_sge = sc->max_sge_in_main_msg + sc->max_sge_in_chain - 2;
2627 mrsas_dprint(sc, MRSAS_INFO,
2628 "max sge: 0x%x, max chain frame size: 0x%x, "
2629 "max fw cmd: 0x%x\n", sc->max_num_sge,
2630 sc->max_chain_frame_sz, sc->max_fw_cmds);
2632 /* Used for pass thru MFI frame (DCMD) */
2633 sc->chain_offset_mfi_pthru = offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL) / 16;
2635 sc->chain_offset_io_request = (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE -
2636 sizeof(MPI2_SGE_IO_UNION)) / 16;
2638 int count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
2640 for (i = 0; i < count; i++)
2641 sc->last_reply_idx[i] = 0;
2643 ret = mrsas_alloc_mem(sc);
2647 ret = mrsas_alloc_mpt_cmds(sc);
2651 ret = mrsas_ioc_init(sc);
2659 * mrsas_alloc_ioc_cmd: Allocates memory for IOC Init command
2660 * input: Adapter soft state
2662 * Allocates for the IOC Init cmd to FW to initialize the ROC/controller.
2665 mrsas_alloc_ioc_cmd(struct mrsas_softc *sc)
2669 /* Allocate IOC INIT command */
2670 ioc_init_size = 1024 + sizeof(MPI2_IOC_INIT_REQUEST);
2671 if (bus_dma_tag_create(sc->mrsas_parent_tag,
2673 BUS_SPACE_MAXADDR_32BIT,
2681 &sc->ioc_init_tag)) {
2682 device_printf(sc->mrsas_dev, "Cannot allocate ioc init tag\n");
2685 if (bus_dmamem_alloc(sc->ioc_init_tag, (void **)&sc->ioc_init_mem,
2686 BUS_DMA_NOWAIT, &sc->ioc_init_dmamap)) {
2687 device_printf(sc->mrsas_dev, "Cannot allocate ioc init cmd mem\n");
2690 bzero(sc->ioc_init_mem, ioc_init_size);
2691 if (bus_dmamap_load(sc->ioc_init_tag, sc->ioc_init_dmamap,
2692 sc->ioc_init_mem, ioc_init_size, mrsas_addr_cb,
2693 &sc->ioc_init_phys_mem, BUS_DMA_NOWAIT)) {
2694 device_printf(sc->mrsas_dev, "Cannot load ioc init cmd mem\n");
2701 * mrsas_free_ioc_cmd: Allocates memory for IOC Init command
2702 * input: Adapter soft state
2704 * Deallocates memory of the IOC Init cmd.
2707 mrsas_free_ioc_cmd(struct mrsas_softc *sc)
2709 if (sc->ioc_init_phys_mem)
2710 bus_dmamap_unload(sc->ioc_init_tag, sc->ioc_init_dmamap);
2711 if (sc->ioc_init_mem != NULL)
2712 bus_dmamem_free(sc->ioc_init_tag, sc->ioc_init_mem, sc->ioc_init_dmamap);
2713 if (sc->ioc_init_tag != NULL)
2714 bus_dma_tag_destroy(sc->ioc_init_tag);
2718 * mrsas_ioc_init: Sends IOC Init command to FW
2719 * input: Adapter soft state
2721 * Issues the IOC Init cmd to FW to initialize the ROC/controller.
2724 mrsas_ioc_init(struct mrsas_softc *sc)
2726 struct mrsas_init_frame *init_frame;
2727 pMpi2IOCInitRequest_t IOCInitMsg;
2728 MRSAS_REQUEST_DESCRIPTOR_UNION req_desc;
2729 u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME;
2730 bus_addr_t phys_addr;
2732 u_int32_t scratch_pad_2;
2734 /* Allocate memory for the IOC INIT command */
2735 if (mrsas_alloc_ioc_cmd(sc)) {
2736 device_printf(sc->mrsas_dev, "Cannot allocate IOC command.\n");
2740 if (!sc->block_sync_cache) {
2741 scratch_pad_2 = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
2742 outbound_scratch_pad_2));
2743 sc->fw_sync_cache_support = (scratch_pad_2 &
2744 MR_CAN_HANDLE_SYNC_CACHE_OFFSET) ? 1 : 0;
2747 IOCInitMsg = (pMpi2IOCInitRequest_t)(((char *)sc->ioc_init_mem) + 1024);
2748 IOCInitMsg->Function = MPI2_FUNCTION_IOC_INIT;
2749 IOCInitMsg->WhoInit = MPI2_WHOINIT_HOST_DRIVER;
2750 IOCInitMsg->MsgVersion = MPI2_VERSION;
2751 IOCInitMsg->HeaderVersion = MPI2_HEADER_VERSION;
2752 IOCInitMsg->SystemRequestFrameSize = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE / 4;
2753 IOCInitMsg->ReplyDescriptorPostQueueDepth = sc->reply_q_depth;
2754 IOCInitMsg->ReplyDescriptorPostQueueAddress = sc->reply_desc_phys_addr;
2755 IOCInitMsg->SystemRequestFrameBaseAddress = sc->io_request_phys_addr;
2756 IOCInitMsg->HostMSIxVectors = (sc->msix_vectors > 0 ? sc->msix_vectors : 0);
2757 IOCInitMsg->HostPageSize = MR_DEFAULT_NVME_PAGE_SHIFT;
2759 init_frame = (struct mrsas_init_frame *)sc->ioc_init_mem;
2760 init_frame->cmd = MFI_CMD_INIT;
2761 init_frame->cmd_status = 0xFF;
2762 init_frame->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
2764 /* driver support Extended MSIX */
2765 if (sc->mrsas_gen3_ctrl || sc->is_ventura || sc->is_aero) {
2766 init_frame->driver_operations.
2767 mfi_capabilities.support_additional_msix = 1;
2769 if (sc->verbuf_mem) {
2770 snprintf((char *)sc->verbuf_mem, strlen(MRSAS_VERSION) + 2, "%s\n",
2772 init_frame->driver_ver_lo = (bus_addr_t)sc->verbuf_phys_addr;
2773 init_frame->driver_ver_hi = 0;
2775 init_frame->driver_operations.mfi_capabilities.support_ndrive_r1_lb = 1;
2776 init_frame->driver_operations.mfi_capabilities.support_max_255lds = 1;
2777 init_frame->driver_operations.mfi_capabilities.security_protocol_cmds_fw = 1;
2778 if (sc->max_chain_frame_sz > MEGASAS_CHAIN_FRAME_SZ_MIN)
2779 init_frame->driver_operations.mfi_capabilities.support_ext_io_size = 1;
2780 phys_addr = (bus_addr_t)sc->ioc_init_phys_mem + 1024;
2781 init_frame->queue_info_new_phys_addr_lo = phys_addr;
2782 init_frame->data_xfer_len = sizeof(Mpi2IOCInitRequest_t);
2784 req_desc.addr.Words = (bus_addr_t)sc->ioc_init_phys_mem;
2785 req_desc.MFAIo.RequestFlags =
2786 (MRSAS_REQ_DESCRIPT_FLAGS_MFA << MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
2788 mrsas_disable_intr(sc);
2789 mrsas_dprint(sc, MRSAS_OCR, "Issuing IOC INIT command to FW.\n");
2790 mrsas_write_64bit_req_desc(sc, req_desc.addr.u.low, req_desc.addr.u.high);
2793 * Poll response timer to wait for Firmware response. While this
2794 * timer with the DELAY call could block CPU, the time interval for
2795 * this is only 1 millisecond.
2797 if (init_frame->cmd_status == 0xFF) {
2798 for (i = 0; i < (max_wait * 1000); i++) {
2799 if (init_frame->cmd_status == 0xFF)
2805 if (init_frame->cmd_status == 0)
2806 mrsas_dprint(sc, MRSAS_OCR,
2807 "IOC INIT response received from FW.\n");
2809 if (init_frame->cmd_status == 0xFF)
2810 device_printf(sc->mrsas_dev, "IOC Init timed out after %d seconds.\n", max_wait);
2812 device_printf(sc->mrsas_dev, "IOC Init failed, status = 0x%x\n", init_frame->cmd_status);
2817 scratch_pad_2 = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
2818 outbound_scratch_pad_2));
2819 sc->atomic_desc_support = (scratch_pad_2 &
2820 MR_ATOMIC_DESCRIPTOR_SUPPORT_OFFSET) ? 1 : 0;
2821 device_printf(sc->mrsas_dev, "FW supports atomic descriptor: %s\n",
2822 sc->atomic_desc_support ? "Yes" : "No");
2825 mrsas_free_ioc_cmd(sc);
2830 * mrsas_alloc_mpt_cmds: Allocates the command packets
2831 * input: Adapter instance soft state
2833 * This function allocates the internal commands for IOs. Each command that is
2834 * issued to FW is wrapped in a local data structure called mrsas_mpt_cmd. An
2835 * array is allocated with mrsas_mpt_cmd context. The free commands are
2836 * maintained in a linked list (cmd pool). SMID value range is from 1 to
2840 mrsas_alloc_mpt_cmds(struct mrsas_softc *sc)
2843 u_int32_t max_fw_cmds, count;
2844 struct mrsas_mpt_cmd *cmd;
2845 pMpi2ReplyDescriptorsUnion_t reply_desc;
2846 u_int32_t offset, chain_offset, sense_offset;
2847 bus_addr_t io_req_base_phys, chain_frame_base_phys, sense_base_phys;
2848 u_int8_t *io_req_base, *chain_frame_base, *sense_base;
2850 max_fw_cmds = sc->max_fw_cmds;
2852 sc->req_desc = malloc(sc->request_alloc_sz, M_MRSAS, M_NOWAIT);
2853 if (!sc->req_desc) {
2854 device_printf(sc->mrsas_dev, "Out of memory, cannot alloc req desc\n");
2857 memset(sc->req_desc, 0, sc->request_alloc_sz);
2860 * sc->mpt_cmd_list is an array of struct mrsas_mpt_cmd pointers.
2861 * Allocate the dynamic array first and then allocate individual
2864 sc->mpt_cmd_list = malloc(sizeof(struct mrsas_mpt_cmd *) * max_fw_cmds,
2866 if (!sc->mpt_cmd_list) {
2867 device_printf(sc->mrsas_dev, "Cannot alloc memory for mpt_cmd_list.\n");
2870 memset(sc->mpt_cmd_list, 0, sizeof(struct mrsas_mpt_cmd *) * max_fw_cmds);
2871 for (i = 0; i < max_fw_cmds; i++) {
2872 sc->mpt_cmd_list[i] = malloc(sizeof(struct mrsas_mpt_cmd),
2874 if (!sc->mpt_cmd_list[i]) {
2875 for (j = 0; j < i; j++)
2876 free(sc->mpt_cmd_list[j], M_MRSAS);
2877 free(sc->mpt_cmd_list, M_MRSAS);
2878 sc->mpt_cmd_list = NULL;
2883 io_req_base = (u_int8_t *)sc->io_request_mem + MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
2884 io_req_base_phys = (bus_addr_t)sc->io_request_phys_addr + MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
2885 chain_frame_base = (u_int8_t *)sc->chain_frame_mem;
2886 chain_frame_base_phys = (bus_addr_t)sc->chain_frame_phys_addr;
2887 sense_base = (u_int8_t *)sc->sense_mem;
2888 sense_base_phys = (bus_addr_t)sc->sense_phys_addr;
2889 for (i = 0; i < max_fw_cmds; i++) {
2890 cmd = sc->mpt_cmd_list[i];
2891 offset = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * i;
2892 chain_offset = sc->max_chain_frame_sz * i;
2893 sense_offset = MRSAS_SENSE_LEN * i;
2894 memset(cmd, 0, sizeof(struct mrsas_mpt_cmd));
2896 cmd->ccb_ptr = NULL;
2897 cmd->r1_alt_dev_handle = MR_DEVHANDLE_INVALID;
2898 callout_init_mtx(&cmd->cm_callout, &sc->sim_lock, 0);
2899 cmd->sync_cmd_idx = (u_int32_t)MRSAS_ULONG_MAX;
2901 cmd->io_request = (MRSAS_RAID_SCSI_IO_REQUEST *) (io_req_base + offset);
2902 memset(cmd->io_request, 0, sizeof(MRSAS_RAID_SCSI_IO_REQUEST));
2903 cmd->io_request_phys_addr = io_req_base_phys + offset;
2904 cmd->chain_frame = (MPI2_SGE_IO_UNION *) (chain_frame_base + chain_offset);
2905 cmd->chain_frame_phys_addr = chain_frame_base_phys + chain_offset;
2906 cmd->sense = sense_base + sense_offset;
2907 cmd->sense_phys_addr = sense_base_phys + sense_offset;
2908 if (bus_dmamap_create(sc->data_tag, 0, &cmd->data_dmamap)) {
2911 TAILQ_INSERT_TAIL(&(sc->mrsas_mpt_cmd_list_head), cmd, next);
2914 /* Initialize reply descriptor array to 0xFFFFFFFF */
2915 reply_desc = sc->reply_desc_mem;
2916 count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
2917 for (i = 0; i < sc->reply_q_depth * count; i++, reply_desc++) {
2918 reply_desc->Words = MRSAS_ULONG_MAX;
2924 * mrsas_write_64bit_req_dsc: Writes 64 bit request descriptor to FW
2925 * input: Adapter softstate
2926 * request descriptor address low
2927 * request descriptor address high
2930 mrsas_write_64bit_req_desc(struct mrsas_softc *sc, u_int32_t req_desc_lo,
2931 u_int32_t req_desc_hi)
2933 mtx_lock(&sc->pci_lock);
2934 mrsas_write_reg(sc, offsetof(mrsas_reg_set, inbound_low_queue_port),
2936 mrsas_write_reg(sc, offsetof(mrsas_reg_set, inbound_high_queue_port),
2938 mtx_unlock(&sc->pci_lock);
2942 * mrsas_fire_cmd: Sends command to FW
2943 * input: Adapter softstate
2944 * request descriptor address low
2945 * request descriptor address high
2947 * This functions fires the command to Firmware by writing to the
2948 * inbound_low_queue_port and inbound_high_queue_port.
2951 mrsas_fire_cmd(struct mrsas_softc *sc, u_int32_t req_desc_lo,
2952 u_int32_t req_desc_hi)
2954 if (sc->atomic_desc_support)
2955 mrsas_write_reg(sc, offsetof(mrsas_reg_set, inbound_single_queue_port),
2958 mrsas_write_64bit_req_desc(sc, req_desc_lo, req_desc_hi);
2962 * mrsas_transition_to_ready: Move FW to Ready state input:
2963 * Adapter instance soft state
2965 * During the initialization, FW passes can potentially be in any one of several
2966 * possible states. If the FW in operational, waiting-for-handshake states,
2967 * driver must take steps to bring it to ready state. Otherwise, it has to
2968 * wait for the ready state.
2971 mrsas_transition_to_ready(struct mrsas_softc *sc, int ocr)
2975 u_int32_t val, fw_state;
2976 u_int32_t cur_state;
2977 u_int32_t abs_state, curr_abs_state;
2979 val = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, outbound_scratch_pad));
2980 fw_state = val & MFI_STATE_MASK;
2981 max_wait = MRSAS_RESET_WAIT_TIME;
2983 if (fw_state != MFI_STATE_READY)
2984 device_printf(sc->mrsas_dev, "Waiting for FW to come to ready state\n");
2986 while (fw_state != MFI_STATE_READY) {
2987 abs_state = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, outbound_scratch_pad));
2989 case MFI_STATE_FAULT:
2990 device_printf(sc->mrsas_dev, "FW is in FAULT state!!\n");
2992 cur_state = MFI_STATE_FAULT;
2996 case MFI_STATE_WAIT_HANDSHAKE:
2997 /* Set the CLR bit in inbound doorbell */
2998 mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell),
2999 MFI_INIT_CLEAR_HANDSHAKE | MFI_INIT_HOTPLUG);
3000 cur_state = MFI_STATE_WAIT_HANDSHAKE;
3002 case MFI_STATE_BOOT_MESSAGE_PENDING:
3003 mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell),
3005 cur_state = MFI_STATE_BOOT_MESSAGE_PENDING;
3007 case MFI_STATE_OPERATIONAL:
3009 * Bring it to READY state; assuming max wait 10
3012 mrsas_disable_intr(sc);
3013 mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell), MFI_RESET_FLAGS);
3014 for (i = 0; i < max_wait * 1000; i++) {
3015 if (mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, doorbell)) & 1)
3020 cur_state = MFI_STATE_OPERATIONAL;
3022 case MFI_STATE_UNDEFINED:
3024 * This state should not last for more than 2
3027 cur_state = MFI_STATE_UNDEFINED;
3029 case MFI_STATE_BB_INIT:
3030 cur_state = MFI_STATE_BB_INIT;
3032 case MFI_STATE_FW_INIT:
3033 cur_state = MFI_STATE_FW_INIT;
3035 case MFI_STATE_FW_INIT_2:
3036 cur_state = MFI_STATE_FW_INIT_2;
3038 case MFI_STATE_DEVICE_SCAN:
3039 cur_state = MFI_STATE_DEVICE_SCAN;
3041 case MFI_STATE_FLUSH_CACHE:
3042 cur_state = MFI_STATE_FLUSH_CACHE;
3045 device_printf(sc->mrsas_dev, "Unknown state 0x%x\n", fw_state);
3050 * The cur_state should not last for more than max_wait secs
3052 for (i = 0; i < (max_wait * 1000); i++) {
3053 fw_state = (mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
3054 outbound_scratch_pad)) & MFI_STATE_MASK);
3055 curr_abs_state = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
3056 outbound_scratch_pad));
3057 if (abs_state == curr_abs_state)
3064 * Return error if fw_state hasn't changed after max_wait
3066 if (curr_abs_state == abs_state) {
3067 device_printf(sc->mrsas_dev, "FW state [%d] hasn't changed "
3068 "in %d secs\n", fw_state, max_wait);
3072 mrsas_dprint(sc, MRSAS_OCR, "FW now in Ready state\n");
3077 * mrsas_get_mfi_cmd: Get a cmd from free command pool
3078 * input: Adapter soft state
3080 * This function removes an MFI command from the command list.
3082 struct mrsas_mfi_cmd *
3083 mrsas_get_mfi_cmd(struct mrsas_softc *sc)
3085 struct mrsas_mfi_cmd *cmd = NULL;
3087 mtx_lock(&sc->mfi_cmd_pool_lock);
3088 if (!TAILQ_EMPTY(&sc->mrsas_mfi_cmd_list_head)) {
3089 cmd = TAILQ_FIRST(&sc->mrsas_mfi_cmd_list_head);
3090 TAILQ_REMOVE(&sc->mrsas_mfi_cmd_list_head, cmd, next);
3092 mtx_unlock(&sc->mfi_cmd_pool_lock);
3098 * mrsas_ocr_thread: Thread to handle OCR/Kill Adapter.
3099 * input: Adapter Context.
3101 * This function will check FW status register and flag do_timeout_reset flag.
3102 * It will do OCR/Kill adapter if FW is in fault state or IO timed out has
3106 mrsas_ocr_thread(void *arg)
3108 struct mrsas_softc *sc;
3109 u_int32_t fw_status, fw_state;
3110 u_int8_t tm_target_reset_failed = 0;
3112 sc = (struct mrsas_softc *)arg;
3114 mrsas_dprint(sc, MRSAS_TRACE, "%s\n", __func__);
3116 sc->ocr_thread_active = 1;
3117 mtx_lock(&sc->sim_lock);
3119 /* Sleep for 1 second and check the queue status */
3120 msleep(&sc->ocr_chan, &sc->sim_lock, PRIBIO,
3121 "mrsas_ocr", sc->mrsas_fw_fault_check_delay * hz);
3122 if (sc->remove_in_progress ||
3123 sc->adprecovery == MRSAS_HW_CRITICAL_ERROR) {
3124 mrsas_dprint(sc, MRSAS_OCR,
3125 "Exit due to %s from %s\n",
3126 sc->remove_in_progress ? "Shutdown" :
3127 "Hardware critical error", __func__);
3130 fw_status = mrsas_read_reg_with_retries(sc,
3131 offsetof(mrsas_reg_set, outbound_scratch_pad));
3132 fw_state = fw_status & MFI_STATE_MASK;
3133 if (fw_state == MFI_STATE_FAULT || sc->do_timedout_reset ||
3134 mrsas_atomic_read(&sc->target_reset_outstanding)) {
3136 /* First, freeze further IOs to come to the SIM */
3137 mrsas_xpt_freeze(sc);
3139 /* If this is an IO timeout then go for target reset */
3140 if (mrsas_atomic_read(&sc->target_reset_outstanding)) {
3141 device_printf(sc->mrsas_dev, "Initiating Target RESET "
3142 "because of SCSI IO timeout!\n");
3144 /* Let the remaining IOs to complete */
3145 msleep(&sc->ocr_chan, &sc->sim_lock, PRIBIO,
3146 "mrsas_reset_targets", 5 * hz);
3148 /* Try to reset the target device */
3149 if (mrsas_reset_targets(sc) == FAIL)
3150 tm_target_reset_failed = 1;
3153 /* If this is a DCMD timeout or FW fault,
3154 * then go for controller reset
3156 if (fw_state == MFI_STATE_FAULT || tm_target_reset_failed ||
3157 (sc->do_timedout_reset == MFI_DCMD_TIMEOUT_OCR)) {
3158 if (tm_target_reset_failed)
3159 device_printf(sc->mrsas_dev, "Initiaiting OCR because of "
3162 device_printf(sc->mrsas_dev, "Initiaiting OCR "
3163 "because of %s!\n", sc->do_timedout_reset ?
3164 "DCMD IO Timeout" : "FW fault");
3166 mtx_lock_spin(&sc->ioctl_lock);
3167 sc->reset_in_progress = 1;
3168 mtx_unlock_spin(&sc->ioctl_lock);
3172 * Wait for the AEN task to be completed if it is running.
3174 mtx_unlock(&sc->sim_lock);
3175 taskqueue_drain(sc->ev_tq, &sc->ev_task);
3176 mtx_lock(&sc->sim_lock);
3178 taskqueue_block(sc->ev_tq);
3179 /* Try to reset the controller */
3180 mrsas_reset_ctrl(sc, sc->do_timedout_reset);
3182 sc->do_timedout_reset = 0;
3183 sc->reset_in_progress = 0;
3184 tm_target_reset_failed = 0;
3185 mrsas_atomic_set(&sc->target_reset_outstanding, 0);
3186 memset(sc->target_reset_pool, 0,
3187 sizeof(sc->target_reset_pool));
3188 taskqueue_unblock(sc->ev_tq);
3191 /* Now allow IOs to come to the SIM */
3192 mrsas_xpt_release(sc);
3195 mtx_unlock(&sc->sim_lock);
3196 sc->ocr_thread_active = 0;
3197 mrsas_kproc_exit(0);
3201 * mrsas_reset_reply_desc: Reset Reply descriptor as part of OCR.
3202 * input: Adapter Context.
3204 * This function will clear reply descriptor so that post OCR driver and FW will
3208 mrsas_reset_reply_desc(struct mrsas_softc *sc)
3211 pMpi2ReplyDescriptorsUnion_t reply_desc;
3213 count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
3214 for (i = 0; i < count; i++)
3215 sc->last_reply_idx[i] = 0;
3217 reply_desc = sc->reply_desc_mem;
3218 for (i = 0; i < sc->reply_q_depth; i++, reply_desc++) {
3219 reply_desc->Words = MRSAS_ULONG_MAX;
3224 * mrsas_reset_ctrl: Core function to OCR/Kill adapter.
3225 * input: Adapter Context.
3227 * This function will run from thread context so that it can sleep. 1. Do not
3228 * handle OCR if FW is in HW critical error. 2. Wait for outstanding command
3229 * to complete for 180 seconds. 3. If #2 does not find any outstanding
3230 * command Controller is in working state, so skip OCR. Otherwise, do
3231 * OCR/kill Adapter based on flag disableOnlineCtrlReset. 4. Start of the
3232 * OCR, return all SCSI command back to CAM layer which has ccb_ptr. 5. Post
3233 * OCR, Re-fire Management command and move Controller to Operation state.
3236 mrsas_reset_ctrl(struct mrsas_softc *sc, u_int8_t reset_reason)
3238 int retval = SUCCESS, i, j, retry = 0;
3239 u_int32_t host_diag, abs_state, status_reg, reset_adapter;
3241 struct mrsas_mfi_cmd *mfi_cmd;
3242 struct mrsas_mpt_cmd *mpt_cmd;
3243 union mrsas_evt_class_locale class_locale;
3244 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
3246 if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR) {
3247 device_printf(sc->mrsas_dev,
3248 "mrsas: Hardware critical error, returning FAIL.\n");
3251 mrsas_set_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
3252 sc->adprecovery = MRSAS_ADPRESET_SM_INFAULT;
3253 mrsas_disable_intr(sc);
3254 msleep(&sc->ocr_chan, &sc->sim_lock, PRIBIO, "mrsas_ocr",
3255 sc->mrsas_fw_fault_check_delay * hz);
3257 /* First try waiting for commands to complete */
3258 if (mrsas_wait_for_outstanding(sc, reset_reason)) {
3259 mrsas_dprint(sc, MRSAS_OCR,
3260 "resetting adapter from %s.\n",
3262 /* Now return commands back to the CAM layer */
3263 mtx_unlock(&sc->sim_lock);
3264 for (i = 0; i < sc->max_fw_cmds; i++) {
3265 mpt_cmd = sc->mpt_cmd_list[i];
3267 if (mpt_cmd->peer_cmd) {
3268 mrsas_dprint(sc, MRSAS_OCR,
3269 "R1 FP command [%d] - (mpt_cmd) %p, (peer_cmd) %p\n",
3270 i, mpt_cmd, mpt_cmd->peer_cmd);
3273 if (mpt_cmd->ccb_ptr) {
3274 if (mpt_cmd->callout_owner) {
3275 ccb = (union ccb *)(mpt_cmd->ccb_ptr);
3276 ccb->ccb_h.status = CAM_SCSI_BUS_RESET;
3277 mrsas_cmd_done(sc, mpt_cmd);
3279 mpt_cmd->ccb_ptr = NULL;
3280 mrsas_release_mpt_cmd(mpt_cmd);
3285 mrsas_atomic_set(&sc->fw_outstanding, 0);
3287 mtx_lock(&sc->sim_lock);
3289 status_reg = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
3290 outbound_scratch_pad));
3291 abs_state = status_reg & MFI_STATE_MASK;
3292 reset_adapter = status_reg & MFI_RESET_ADAPTER;
3293 if (sc->disableOnlineCtrlReset ||
3294 (abs_state == MFI_STATE_FAULT && !reset_adapter)) {
3295 /* Reset not supported, kill adapter */
3296 mrsas_dprint(sc, MRSAS_OCR, "Reset not supported, killing adapter.\n");
3301 /* Now try to reset the chip */
3302 for (i = 0; i < MRSAS_FUSION_MAX_RESET_TRIES; i++) {
3303 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
3304 MPI2_WRSEQ_FLUSH_KEY_VALUE);
3305 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
3306 MPI2_WRSEQ_1ST_KEY_VALUE);
3307 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
3308 MPI2_WRSEQ_2ND_KEY_VALUE);
3309 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
3310 MPI2_WRSEQ_3RD_KEY_VALUE);
3311 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
3312 MPI2_WRSEQ_4TH_KEY_VALUE);
3313 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
3314 MPI2_WRSEQ_5TH_KEY_VALUE);
3315 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
3316 MPI2_WRSEQ_6TH_KEY_VALUE);
3318 /* Check that the diag write enable (DRWE) bit is on */
3319 host_diag = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
3322 while (!(host_diag & HOST_DIAG_WRITE_ENABLE)) {
3324 host_diag = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
3326 if (retry++ == 100) {
3327 mrsas_dprint(sc, MRSAS_OCR,
3328 "Host diag unlock failed!\n");
3332 if (!(host_diag & HOST_DIAG_WRITE_ENABLE))
3335 /* Send chip reset command */
3336 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_host_diag),
3337 host_diag | HOST_DIAG_RESET_ADAPTER);
3340 /* Make sure reset adapter bit is cleared */
3341 host_diag = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
3344 while (host_diag & HOST_DIAG_RESET_ADAPTER) {
3346 host_diag = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
3348 if (retry++ == 1000) {
3349 mrsas_dprint(sc, MRSAS_OCR,
3350 "Diag reset adapter never cleared!\n");
3354 if (host_diag & HOST_DIAG_RESET_ADAPTER)
3357 abs_state = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
3358 outbound_scratch_pad)) & MFI_STATE_MASK;
3361 while ((abs_state <= MFI_STATE_FW_INIT) && (retry++ < 1000)) {
3363 abs_state = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
3364 outbound_scratch_pad)) & MFI_STATE_MASK;
3366 if (abs_state <= MFI_STATE_FW_INIT) {
3367 mrsas_dprint(sc, MRSAS_OCR, "firmware state < MFI_STATE_FW_INIT,"
3368 " state = 0x%x\n", abs_state);
3371 /* Wait for FW to become ready */
3372 if (mrsas_transition_to_ready(sc, 1)) {
3373 mrsas_dprint(sc, MRSAS_OCR,
3374 "mrsas: Failed to transition controller to ready.\n");
3377 mrsas_reset_reply_desc(sc);
3378 if (mrsas_ioc_init(sc)) {
3379 mrsas_dprint(sc, MRSAS_OCR, "mrsas_ioc_init() failed!\n");
3382 for (j = 0; j < sc->max_fw_cmds; j++) {
3383 mpt_cmd = sc->mpt_cmd_list[j];
3384 if (mpt_cmd->sync_cmd_idx != (u_int32_t)MRSAS_ULONG_MAX) {
3385 mfi_cmd = sc->mfi_cmd_list[mpt_cmd->sync_cmd_idx];
3386 /* If not an IOCTL then release the command else re-fire */
3387 if (!mfi_cmd->sync_cmd) {
3388 mrsas_release_mfi_cmd(mfi_cmd);
3390 req_desc = mrsas_get_request_desc(sc,
3391 mfi_cmd->cmd_id.context.smid - 1);
3392 mrsas_dprint(sc, MRSAS_OCR,
3393 "Re-fire command DCMD opcode 0x%x index %d\n ",
3394 mfi_cmd->frame->dcmd.opcode, j);
3396 device_printf(sc->mrsas_dev,
3397 "Cannot build MPT cmd.\n");
3399 mrsas_fire_cmd(sc, req_desc->addr.u.low,
3400 req_desc->addr.u.high);
3405 /* Reset load balance info */
3406 memset(sc->load_balance_info, 0,
3407 sizeof(LD_LOAD_BALANCE_INFO) * MAX_LOGICAL_DRIVES_EXT);
3409 if (mrsas_get_ctrl_info(sc)) {
3414 if (!mrsas_get_map_info(sc))
3415 mrsas_sync_map_info(sc);
3417 megasas_setup_jbod_map(sc);
3419 if ((sc->is_ventura || sc->is_aero) && sc->streamDetectByLD) {
3420 for (j = 0; j < MAX_LOGICAL_DRIVES_EXT; ++j) {
3421 memset(sc->streamDetectByLD[i], 0, sizeof(LD_STREAM_DETECT));
3422 sc->streamDetectByLD[i]->mruBitMap = MR_STREAM_BITMAP;
3426 mrsas_clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
3427 mrsas_enable_intr(sc);
3428 sc->adprecovery = MRSAS_HBA_OPERATIONAL;
3430 /* Register AEN with FW for last sequence number */
3431 class_locale.members.reserved = 0;
3432 class_locale.members.locale = MR_EVT_LOCALE_ALL;
3433 class_locale.members.class = MR_EVT_CLASS_DEBUG;
3435 mtx_unlock(&sc->sim_lock);
3436 if (mrsas_register_aen(sc, sc->last_seq_num,
3437 class_locale.word)) {
3438 device_printf(sc->mrsas_dev,
3439 "ERROR: AEN registration FAILED from OCR !!! "
3440 "Further events from the controller cannot be notified."
3441 "Either there is some problem in the controller"
3442 "or the controller does not support AEN.\n"
3443 "Please contact to the SUPPORT TEAM if the problem persists\n");
3445 mtx_lock(&sc->sim_lock);
3447 /* Adapter reset completed successfully */
3448 device_printf(sc->mrsas_dev, "Reset successful\n");
3452 /* Reset failed, kill the adapter */
3453 device_printf(sc->mrsas_dev, "Reset failed, killing adapter.\n");
3457 mrsas_clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
3458 mrsas_enable_intr(sc);
3459 sc->adprecovery = MRSAS_HBA_OPERATIONAL;
3462 mrsas_clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
3463 mrsas_dprint(sc, MRSAS_OCR,
3464 "Reset Exit with %d.\n", retval);
3469 * mrsas_kill_hba: Kill HBA when OCR is not supported
3470 * input: Adapter Context.
3472 * This function will kill HBA when OCR is not supported.
3475 mrsas_kill_hba(struct mrsas_softc *sc)
3477 sc->adprecovery = MRSAS_HW_CRITICAL_ERROR;
3479 mrsas_dprint(sc, MRSAS_OCR, "%s\n", __func__);
3480 mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell),
3483 mrsas_read_reg(sc, offsetof(mrsas_reg_set, doorbell));
3484 mrsas_complete_outstanding_ioctls(sc);
3488 * mrsas_complete_outstanding_ioctls Complete pending IOCTLS after kill_hba
3489 * input: Controller softc
3494 mrsas_complete_outstanding_ioctls(struct mrsas_softc *sc)
3497 struct mrsas_mpt_cmd *cmd_mpt;
3498 struct mrsas_mfi_cmd *cmd_mfi;
3499 u_int32_t count, MSIxIndex;
3501 count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
3502 for (i = 0; i < sc->max_fw_cmds; i++) {
3503 cmd_mpt = sc->mpt_cmd_list[i];
3505 if (cmd_mpt->sync_cmd_idx != (u_int32_t)MRSAS_ULONG_MAX) {
3506 cmd_mfi = sc->mfi_cmd_list[cmd_mpt->sync_cmd_idx];
3507 if (cmd_mfi->sync_cmd && cmd_mfi->frame->hdr.cmd != MFI_CMD_ABORT) {
3508 for (MSIxIndex = 0; MSIxIndex < count; MSIxIndex++)
3509 mrsas_complete_mptmfi_passthru(sc, cmd_mfi,
3510 cmd_mpt->io_request->RaidContext.raid_context.status);
3517 * mrsas_wait_for_outstanding: Wait for outstanding commands
3518 * input: Adapter Context.
3520 * This function will wait for 180 seconds for outstanding commands to be
3524 mrsas_wait_for_outstanding(struct mrsas_softc *sc, u_int8_t check_reason)
3526 int i, outstanding, retval = 0;
3527 u_int32_t fw_state, count, MSIxIndex;
3530 for (i = 0; i < MRSAS_RESET_WAIT_TIME; i++) {
3531 if (sc->remove_in_progress) {
3532 mrsas_dprint(sc, MRSAS_OCR,
3533 "Driver remove or shutdown called.\n");
3537 /* Check if firmware is in fault state */
3538 fw_state = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
3539 outbound_scratch_pad)) & MFI_STATE_MASK;
3540 if (fw_state == MFI_STATE_FAULT) {
3541 mrsas_dprint(sc, MRSAS_OCR,
3542 "Found FW in FAULT state, will reset adapter.\n");
3543 count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
3544 mtx_unlock(&sc->sim_lock);
3545 for (MSIxIndex = 0; MSIxIndex < count; MSIxIndex++)
3546 mrsas_complete_cmd(sc, MSIxIndex);
3547 mtx_lock(&sc->sim_lock);
3551 if (check_reason == MFI_DCMD_TIMEOUT_OCR) {
3552 mrsas_dprint(sc, MRSAS_OCR,
3553 "DCMD IO TIMEOUT detected, will reset adapter.\n");
3557 outstanding = mrsas_atomic_read(&sc->fw_outstanding);
3561 if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
3562 mrsas_dprint(sc, MRSAS_OCR, "[%2d]waiting for %d "
3563 "commands to complete\n", i, outstanding);
3564 count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
3565 mtx_unlock(&sc->sim_lock);
3566 for (MSIxIndex = 0; MSIxIndex < count; MSIxIndex++)
3567 mrsas_complete_cmd(sc, MSIxIndex);
3568 mtx_lock(&sc->sim_lock);
3573 if (mrsas_atomic_read(&sc->fw_outstanding)) {
3574 mrsas_dprint(sc, MRSAS_OCR,
3575 " pending commands remain after waiting,"
3576 " will reset adapter.\n");
3584 * mrsas_release_mfi_cmd: Return a cmd to free command pool
3585 * input: Command packet for return to free cmd pool
3587 * This function returns the MFI & MPT command to the command list.
3590 mrsas_release_mfi_cmd(struct mrsas_mfi_cmd *cmd_mfi)
3592 struct mrsas_softc *sc = cmd_mfi->sc;
3593 struct mrsas_mpt_cmd *cmd_mpt;
3596 mtx_lock(&sc->mfi_cmd_pool_lock);
3598 * Release the mpt command (if at all it is allocated
3599 * associated with the mfi command
3601 if (cmd_mfi->cmd_id.context.smid) {
3602 mtx_lock(&sc->mpt_cmd_pool_lock);
3603 /* Get the mpt cmd from mfi cmd frame's smid value */
3604 cmd_mpt = sc->mpt_cmd_list[cmd_mfi->cmd_id.context.smid-1];
3606 cmd_mpt->sync_cmd_idx = (u_int32_t)MRSAS_ULONG_MAX;
3607 TAILQ_INSERT_HEAD(&(sc->mrsas_mpt_cmd_list_head), cmd_mpt, next);
3608 mtx_unlock(&sc->mpt_cmd_pool_lock);
3610 /* Release the mfi command */
3611 cmd_mfi->ccb_ptr = NULL;
3612 cmd_mfi->cmd_id.frame_count = 0;
3613 TAILQ_INSERT_HEAD(&(sc->mrsas_mfi_cmd_list_head), cmd_mfi, next);
3614 mtx_unlock(&sc->mfi_cmd_pool_lock);
3620 * mrsas_get_controller_info: Returns FW's controller structure
3621 * input: Adapter soft state
3622 * Controller information structure
3624 * Issues an internal command (DCMD) to get the FW's controller structure. This
3625 * information is mainly used to find out the maximum IO transfer per command
3626 * supported by the FW.
3629 mrsas_get_ctrl_info(struct mrsas_softc *sc)
3632 u_int8_t do_ocr = 1;
3633 struct mrsas_mfi_cmd *cmd;
3634 struct mrsas_dcmd_frame *dcmd;
3636 cmd = mrsas_get_mfi_cmd(sc);
3639 device_printf(sc->mrsas_dev, "Failed to get a free cmd\n");
3642 dcmd = &cmd->frame->dcmd;
3644 if (mrsas_alloc_ctlr_info_cmd(sc) != SUCCESS) {
3645 device_printf(sc->mrsas_dev, "Cannot allocate get ctlr info cmd\n");
3646 mrsas_release_mfi_cmd(cmd);
3649 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
3651 dcmd->cmd = MFI_CMD_DCMD;
3652 dcmd->cmd_status = 0xFF;
3653 dcmd->sge_count = 1;
3654 dcmd->flags = MFI_FRAME_DIR_READ;
3657 dcmd->data_xfer_len = sizeof(struct mrsas_ctrl_info);
3658 dcmd->opcode = MR_DCMD_CTRL_GET_INFO;
3659 dcmd->sgl.sge32[0].phys_addr = sc->ctlr_info_phys_addr;
3660 dcmd->sgl.sge32[0].length = sizeof(struct mrsas_ctrl_info);
3662 if (!sc->mask_interrupts)
3663 retcode = mrsas_issue_blocked_cmd(sc, cmd);
3665 retcode = mrsas_issue_polled(sc, cmd);
3667 if (retcode == ETIMEDOUT)
3670 memcpy(sc->ctrl_info, sc->ctlr_info_mem, sizeof(struct mrsas_ctrl_info));
3673 mrsas_update_ext_vd_details(sc);
3675 sc->use_seqnum_jbod_fp =
3676 sc->ctrl_info->adapterOperations3.useSeqNumJbodFP;
3677 sc->support_morethan256jbod =
3678 sc->ctrl_info->adapterOperations4.supportPdMapTargetId;
3680 sc->disableOnlineCtrlReset =
3681 sc->ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset;
3684 mrsas_free_ctlr_info_cmd(sc);
3687 sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
3689 if (!sc->mask_interrupts)
3690 mrsas_release_mfi_cmd(cmd);
3696 * mrsas_update_ext_vd_details : Update details w.r.t Extended VD
3698 * sc - Controller's softc
3701 mrsas_update_ext_vd_details(struct mrsas_softc *sc)
3703 u_int32_t ventura_map_sz = 0;
3704 sc->max256vdSupport =
3705 sc->ctrl_info->adapterOperations3.supportMaxExtLDs;
3707 /* Below is additional check to address future FW enhancement */
3708 if (sc->ctrl_info->max_lds > 64)
3709 sc->max256vdSupport = 1;
3711 sc->drv_supported_vd_count = MRSAS_MAX_LD_CHANNELS
3712 * MRSAS_MAX_DEV_PER_CHANNEL;
3713 sc->drv_supported_pd_count = MRSAS_MAX_PD_CHANNELS
3714 * MRSAS_MAX_DEV_PER_CHANNEL;
3715 if (sc->max256vdSupport) {
3716 sc->fw_supported_vd_count = MAX_LOGICAL_DRIVES_EXT;
3717 sc->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
3719 sc->fw_supported_vd_count = MAX_LOGICAL_DRIVES;
3720 sc->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
3723 if (sc->maxRaidMapSize) {
3724 ventura_map_sz = sc->maxRaidMapSize *
3726 sc->current_map_sz = ventura_map_sz;
3727 sc->max_map_sz = ventura_map_sz;
3729 sc->old_map_sz = sizeof(MR_FW_RAID_MAP) +
3730 (sizeof(MR_LD_SPAN_MAP) * (sc->fw_supported_vd_count - 1));
3731 sc->new_map_sz = sizeof(MR_FW_RAID_MAP_EXT);
3732 sc->max_map_sz = max(sc->old_map_sz, sc->new_map_sz);
3733 if (sc->max256vdSupport)
3734 sc->current_map_sz = sc->new_map_sz;
3736 sc->current_map_sz = sc->old_map_sz;
3739 sc->drv_map_sz = sizeof(MR_DRV_RAID_MAP_ALL);
3741 device_printf(sc->mrsas_dev, "sc->maxRaidMapSize 0x%x \n",
3742 sc->maxRaidMapSize);
3743 device_printf(sc->mrsas_dev,
3744 "new_map_sz = 0x%x, old_map_sz = 0x%x, "
3745 "ventura_map_sz = 0x%x, current_map_sz = 0x%x "
3746 "fusion->drv_map_sz =0x%x, size of driver raid map 0x%lx \n",
3747 sc->new_map_sz, sc->old_map_sz, ventura_map_sz,
3748 sc->current_map_sz, sc->drv_map_sz, sizeof(MR_DRV_RAID_MAP_ALL));
3753 * mrsas_alloc_ctlr_info_cmd: Allocates memory for controller info command
3754 * input: Adapter soft state
3756 * Allocates DMAable memory for the controller info internal command.
3759 mrsas_alloc_ctlr_info_cmd(struct mrsas_softc *sc)
3763 /* Allocate get controller info command */
3764 ctlr_info_size = sizeof(struct mrsas_ctrl_info);
3765 if (bus_dma_tag_create(sc->mrsas_parent_tag,
3767 BUS_SPACE_MAXADDR_32BIT,
3775 &sc->ctlr_info_tag)) {
3776 device_printf(sc->mrsas_dev, "Cannot allocate ctlr info tag\n");
3779 if (bus_dmamem_alloc(sc->ctlr_info_tag, (void **)&sc->ctlr_info_mem,
3780 BUS_DMA_NOWAIT, &sc->ctlr_info_dmamap)) {
3781 device_printf(sc->mrsas_dev, "Cannot allocate ctlr info cmd mem\n");
3784 if (bus_dmamap_load(sc->ctlr_info_tag, sc->ctlr_info_dmamap,
3785 sc->ctlr_info_mem, ctlr_info_size, mrsas_addr_cb,
3786 &sc->ctlr_info_phys_addr, BUS_DMA_NOWAIT)) {
3787 device_printf(sc->mrsas_dev, "Cannot load ctlr info cmd mem\n");
3790 memset(sc->ctlr_info_mem, 0, ctlr_info_size);
3795 * mrsas_free_ctlr_info_cmd: Free memory for controller info command
3796 * input: Adapter soft state
3798 * Deallocates memory of the get controller info cmd.
3801 mrsas_free_ctlr_info_cmd(struct mrsas_softc *sc)
3803 if (sc->ctlr_info_phys_addr)
3804 bus_dmamap_unload(sc->ctlr_info_tag, sc->ctlr_info_dmamap);
3805 if (sc->ctlr_info_mem != NULL)
3806 bus_dmamem_free(sc->ctlr_info_tag, sc->ctlr_info_mem, sc->ctlr_info_dmamap);
3807 if (sc->ctlr_info_tag != NULL)
3808 bus_dma_tag_destroy(sc->ctlr_info_tag);
3812 * mrsas_issue_polled: Issues a polling command
3813 * inputs: Adapter soft state
3814 * Command packet to be issued
3816 * This function is for posting of internal commands to Firmware. MFI requires
3817 * the cmd_status to be set to 0xFF before posting. The maximun wait time of
3818 * the poll response timer is 180 seconds.
3821 mrsas_issue_polled(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
3823 struct mrsas_header *frame_hdr = &cmd->frame->hdr;
3824 u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME;
3825 int i, retcode = SUCCESS;
3827 frame_hdr->cmd_status = 0xFF;
3828 frame_hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
3830 /* Issue the frame using inbound queue port */
3831 if (mrsas_issue_dcmd(sc, cmd)) {
3832 device_printf(sc->mrsas_dev, "Cannot issue DCMD internal command.\n");
3836 * Poll response timer to wait for Firmware response. While this
3837 * timer with the DELAY call could block CPU, the time interval for
3838 * this is only 1 millisecond.
3840 if (frame_hdr->cmd_status == 0xFF) {
3841 for (i = 0; i < (max_wait * 1000); i++) {
3842 if (frame_hdr->cmd_status == 0xFF)
3848 if (frame_hdr->cmd_status == 0xFF) {
3849 device_printf(sc->mrsas_dev, "DCMD timed out after %d "
3850 "seconds from %s\n", max_wait, __func__);
3851 device_printf(sc->mrsas_dev, "DCMD opcode 0x%X\n",
3852 cmd->frame->dcmd.opcode);
3853 retcode = ETIMEDOUT;
3859 * mrsas_issue_dcmd: Issues a MFI Pass thru cmd
3860 * input: Adapter soft state mfi cmd pointer
3862 * This function is called by mrsas_issued_blocked_cmd() and
3863 * mrsas_issued_polled(), to build the MPT command and then fire the command
3867 mrsas_issue_dcmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
3869 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
3871 req_desc = mrsas_build_mpt_cmd(sc, cmd);
3873 device_printf(sc->mrsas_dev, "Cannot build MPT cmd.\n");
3876 mrsas_fire_cmd(sc, req_desc->addr.u.low, req_desc->addr.u.high);
3882 * mrsas_build_mpt_cmd: Calls helper function to build Passthru cmd
3883 * input: Adapter soft state mfi cmd to build
3885 * This function is called by mrsas_issue_cmd() to build the MPT-MFI passthru
3886 * command and prepares the MPT command to send to Firmware.
3888 MRSAS_REQUEST_DESCRIPTOR_UNION *
3889 mrsas_build_mpt_cmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
3891 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
3894 if (mrsas_build_mptmfi_passthru(sc, cmd)) {
3895 device_printf(sc->mrsas_dev, "Cannot build MPT-MFI passthru cmd.\n");
3898 index = cmd->cmd_id.context.smid;
3900 req_desc = mrsas_get_request_desc(sc, index - 1);
3904 req_desc->addr.Words = 0;
3905 req_desc->SCSIIO.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO << MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
3907 req_desc->SCSIIO.SMID = index;
3913 * mrsas_build_mptmfi_passthru: Builds a MPT MFI Passthru command
3914 * input: Adapter soft state mfi cmd pointer
3916 * The MPT command and the io_request are setup as a passthru command. The SGE
3917 * chain address is set to frame_phys_addr of the MFI command.
3920 mrsas_build_mptmfi_passthru(struct mrsas_softc *sc, struct mrsas_mfi_cmd *mfi_cmd)
3922 MPI25_IEEE_SGE_CHAIN64 *mpi25_ieee_chain;
3923 PTR_MRSAS_RAID_SCSI_IO_REQUEST io_req;
3924 struct mrsas_mpt_cmd *mpt_cmd;
3925 struct mrsas_header *frame_hdr = &mfi_cmd->frame->hdr;
3927 mpt_cmd = mrsas_get_mpt_cmd(sc);
3931 /* Save the smid. To be used for returning the cmd */
3932 mfi_cmd->cmd_id.context.smid = mpt_cmd->index;
3934 mpt_cmd->sync_cmd_idx = mfi_cmd->index;
3937 * For cmds where the flag is set, store the flag and check on
3938 * completion. For cmds with this flag, don't call
3939 * mrsas_complete_cmd.
3942 if (frame_hdr->flags & MFI_FRAME_DONT_POST_IN_REPLY_QUEUE)
3943 mpt_cmd->flags = MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
3945 io_req = mpt_cmd->io_request;
3947 if (sc->mrsas_gen3_ctrl || sc->is_ventura || sc->is_aero) {
3948 pMpi25IeeeSgeChain64_t sgl_ptr_end = (pMpi25IeeeSgeChain64_t)&io_req->SGL;
3950 sgl_ptr_end += sc->max_sge_in_main_msg - 1;
3951 sgl_ptr_end->Flags = 0;
3953 mpi25_ieee_chain = (MPI25_IEEE_SGE_CHAIN64 *) & io_req->SGL.IeeeChain;
3955 io_req->Function = MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST;
3956 io_req->SGLOffset0 = offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL) / 4;
3957 io_req->ChainOffset = sc->chain_offset_mfi_pthru;
3959 mpi25_ieee_chain->Address = mfi_cmd->frame_phys_addr;
3961 mpi25_ieee_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT |
3962 MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR;
3964 mpi25_ieee_chain->Length = sc->max_chain_frame_sz;
3970 * mrsas_issue_blocked_cmd: Synchronous wrapper around regular FW cmds
3971 * input: Adapter soft state Command to be issued
3973 * This function waits on an event for the command to be returned from the ISR.
3974 * Max wait time is MRSAS_INTERNAL_CMD_WAIT_TIME secs. Used for issuing
3975 * internal and ioctl commands.
3978 mrsas_issue_blocked_cmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
3980 u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME;
3981 unsigned long total_time = 0;
3982 int retcode = SUCCESS;
3984 /* Initialize cmd_status */
3985 cmd->cmd_status = 0xFF;
3987 /* Build MPT-MFI command for issue to FW */
3988 if (mrsas_issue_dcmd(sc, cmd)) {
3989 device_printf(sc->mrsas_dev, "Cannot issue DCMD internal command.\n");
3992 sc->chan = (void *)&cmd;
3995 if (cmd->cmd_status == 0xFF) {
3996 tsleep((void *)&sc->chan, 0, "mrsas_sleep", hz);
4000 if (!cmd->sync_cmd) { /* cmd->sync will be set for an IOCTL
4003 if (total_time >= max_wait) {
4004 device_printf(sc->mrsas_dev,
4005 "Internal command timed out after %d seconds.\n", max_wait);
4012 if (cmd->cmd_status == 0xFF) {
4013 device_printf(sc->mrsas_dev, "DCMD timed out after %d "
4014 "seconds from %s\n", max_wait, __func__);
4015 device_printf(sc->mrsas_dev, "DCMD opcode 0x%X\n",
4016 cmd->frame->dcmd.opcode);
4017 retcode = ETIMEDOUT;
4023 * mrsas_complete_mptmfi_passthru: Completes a command
4024 * input: @sc: Adapter soft state
4025 * @cmd: Command to be completed
4026 * @status: cmd completion status
4028 * This function is called from mrsas_complete_cmd() after an interrupt is
4029 * received from Firmware, and io_request->Function is
4030 * MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST.
4033 mrsas_complete_mptmfi_passthru(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd,
4036 struct mrsas_header *hdr = &cmd->frame->hdr;
4037 u_int8_t cmd_status = cmd->frame->hdr.cmd_status;
4039 /* Reset the retry counter for future re-tries */
4040 cmd->retry_for_fw_reset = 0;
4043 cmd->ccb_ptr = NULL;
4046 case MFI_CMD_INVALID:
4047 device_printf(sc->mrsas_dev, "MFI_CMD_INVALID command.\n");
4049 case MFI_CMD_PD_SCSI_IO:
4050 case MFI_CMD_LD_SCSI_IO:
4052 * MFI_CMD_PD_SCSI_IO and MFI_CMD_LD_SCSI_IO could have been
4053 * issued either through an IO path or an IOCTL path. If it
4054 * was via IOCTL, we will send it to internal completion.
4056 if (cmd->sync_cmd) {
4058 mrsas_wakeup(sc, cmd);
4064 /* Check for LD map update */
4065 if ((cmd->frame->dcmd.opcode == MR_DCMD_LD_MAP_GET_INFO) &&
4066 (cmd->frame->dcmd.mbox.b[1] == 1)) {
4067 sc->fast_path_io = 0;
4068 mtx_lock(&sc->raidmap_lock);
4069 sc->map_update_cmd = NULL;
4070 if (cmd_status != 0) {
4071 if (cmd_status != MFI_STAT_NOT_FOUND)
4072 device_printf(sc->mrsas_dev, "map sync failed, status=%x\n", cmd_status);
4074 mrsas_release_mfi_cmd(cmd);
4075 mtx_unlock(&sc->raidmap_lock);
4080 mrsas_release_mfi_cmd(cmd);
4081 if (MR_ValidateMapInfo(sc))
4082 sc->fast_path_io = 0;
4084 sc->fast_path_io = 1;
4085 mrsas_sync_map_info(sc);
4086 mtx_unlock(&sc->raidmap_lock);
4089 if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET_INFO ||
4090 cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET) {
4091 sc->mrsas_aen_triggered = 0;
4093 /* FW has an updated PD sequence */
4094 if ((cmd->frame->dcmd.opcode ==
4095 MR_DCMD_SYSTEM_PD_MAP_GET_INFO) &&
4096 (cmd->frame->dcmd.mbox.b[0] == 1)) {
4098 mtx_lock(&sc->raidmap_lock);
4099 sc->jbod_seq_cmd = NULL;
4100 mrsas_release_mfi_cmd(cmd);
4102 if (cmd_status == MFI_STAT_OK) {
4103 sc->pd_seq_map_id++;
4104 /* Re-register a pd sync seq num cmd */
4105 if (megasas_sync_pd_seq_num(sc, true))
4106 sc->use_seqnum_jbod_fp = 0;
4108 sc->use_seqnum_jbod_fp = 0;
4109 device_printf(sc->mrsas_dev,
4110 "Jbod map sync failed, status=%x\n", cmd_status);
4112 mtx_unlock(&sc->raidmap_lock);
4115 /* See if got an event notification */
4116 if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_WAIT)
4117 mrsas_complete_aen(sc, cmd);
4119 mrsas_wakeup(sc, cmd);
4122 /* Command issued to abort another cmd return */
4123 mrsas_complete_abort(sc, cmd);
4126 device_printf(sc->mrsas_dev, "Unknown command completed! [0x%X]\n", hdr->cmd);
4132 * mrsas_wakeup: Completes an internal command
4133 * input: Adapter soft state
4134 * Command to be completed
4136 * In mrsas_issue_blocked_cmd(), after a command is issued to Firmware, a wait
4137 * timer is started. This function is called from
4138 * mrsas_complete_mptmfi_passthru() as it completes the command, to wake up
4139 * from the command wait.
4142 mrsas_wakeup(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
4144 cmd->cmd_status = cmd->frame->io.cmd_status;
4146 if (cmd->cmd_status == 0xFF)
4147 cmd->cmd_status = 0;
4149 sc->chan = (void *)&cmd;
4150 wakeup_one((void *)&sc->chan);
4155 * mrsas_shutdown_ctlr: Instructs FW to shutdown the controller input:
4156 * Adapter soft state Shutdown/Hibernate
4158 * This function issues a DCMD internal command to Firmware to initiate shutdown
4159 * of the controller.
4162 mrsas_shutdown_ctlr(struct mrsas_softc *sc, u_int32_t opcode)
4164 struct mrsas_mfi_cmd *cmd;
4165 struct mrsas_dcmd_frame *dcmd;
4167 if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR)
4170 cmd = mrsas_get_mfi_cmd(sc);
4172 device_printf(sc->mrsas_dev, "Cannot allocate for shutdown cmd.\n");
4176 mrsas_issue_blocked_abort_cmd(sc, sc->aen_cmd);
4177 if (sc->map_update_cmd)
4178 mrsas_issue_blocked_abort_cmd(sc, sc->map_update_cmd);
4179 if (sc->jbod_seq_cmd)
4180 mrsas_issue_blocked_abort_cmd(sc, sc->jbod_seq_cmd);
4182 dcmd = &cmd->frame->dcmd;
4183 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4185 dcmd->cmd = MFI_CMD_DCMD;
4186 dcmd->cmd_status = 0x0;
4187 dcmd->sge_count = 0;
4188 dcmd->flags = MFI_FRAME_DIR_NONE;
4191 dcmd->data_xfer_len = 0;
4192 dcmd->opcode = opcode;
4194 device_printf(sc->mrsas_dev, "Preparing to shut down controller.\n");
4196 mrsas_issue_blocked_cmd(sc, cmd);
4197 mrsas_release_mfi_cmd(cmd);
4203 * mrsas_flush_cache: Requests FW to flush all its caches input:
4204 * Adapter soft state
4206 * This function is issues a DCMD internal command to Firmware to initiate
4207 * flushing of all caches.
4210 mrsas_flush_cache(struct mrsas_softc *sc)
4212 struct mrsas_mfi_cmd *cmd;
4213 struct mrsas_dcmd_frame *dcmd;
4215 if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR)
4218 cmd = mrsas_get_mfi_cmd(sc);
4220 device_printf(sc->mrsas_dev, "Cannot allocate for flush cache cmd.\n");
4223 dcmd = &cmd->frame->dcmd;
4224 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4226 dcmd->cmd = MFI_CMD_DCMD;
4227 dcmd->cmd_status = 0x0;
4228 dcmd->sge_count = 0;
4229 dcmd->flags = MFI_FRAME_DIR_NONE;
4232 dcmd->data_xfer_len = 0;
4233 dcmd->opcode = MR_DCMD_CTRL_CACHE_FLUSH;
4234 dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE;
4236 mrsas_issue_blocked_cmd(sc, cmd);
4237 mrsas_release_mfi_cmd(cmd);
4243 megasas_sync_pd_seq_num(struct mrsas_softc *sc, boolean_t pend)
4246 u_int8_t do_ocr = 1;
4247 struct mrsas_mfi_cmd *cmd;
4248 struct mrsas_dcmd_frame *dcmd;
4249 uint32_t pd_seq_map_sz;
4250 struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync;
4251 bus_addr_t pd_seq_h;
4253 pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) +
4254 (sizeof(struct MR_PD_CFG_SEQ) *
4255 (MAX_PHYSICAL_DEVICES - 1));
4257 cmd = mrsas_get_mfi_cmd(sc);
4259 device_printf(sc->mrsas_dev,
4260 "Cannot alloc for ld map info cmd.\n");
4263 dcmd = &cmd->frame->dcmd;
4265 pd_sync = (void *)sc->jbodmap_mem[(sc->pd_seq_map_id & 1)];
4266 pd_seq_h = sc->jbodmap_phys_addr[(sc->pd_seq_map_id & 1)];
4268 device_printf(sc->mrsas_dev,
4269 "Failed to alloc mem for jbod map info.\n");
4270 mrsas_release_mfi_cmd(cmd);
4273 memset(pd_sync, 0, pd_seq_map_sz);
4274 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4275 dcmd->cmd = MFI_CMD_DCMD;
4276 dcmd->cmd_status = 0xFF;
4277 dcmd->sge_count = 1;
4280 dcmd->data_xfer_len = (pd_seq_map_sz);
4281 dcmd->opcode = (MR_DCMD_SYSTEM_PD_MAP_GET_INFO);
4282 dcmd->sgl.sge32[0].phys_addr = (pd_seq_h);
4283 dcmd->sgl.sge32[0].length = (pd_seq_map_sz);
4286 dcmd->mbox.b[0] = MRSAS_DCMD_MBOX_PEND_FLAG;
4287 dcmd->flags = (MFI_FRAME_DIR_WRITE);
4288 sc->jbod_seq_cmd = cmd;
4289 if (mrsas_issue_dcmd(sc, cmd)) {
4290 device_printf(sc->mrsas_dev,
4291 "Fail to send sync map info command.\n");
4296 dcmd->flags = MFI_FRAME_DIR_READ;
4298 retcode = mrsas_issue_polled(sc, cmd);
4299 if (retcode == ETIMEDOUT)
4302 if (pd_sync->count > MAX_PHYSICAL_DEVICES) {
4303 device_printf(sc->mrsas_dev,
4304 "driver supports max %d JBOD, but FW reports %d\n",
4305 MAX_PHYSICAL_DEVICES, pd_sync->count);
4309 sc->pd_seq_map_id++;
4314 sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
4320 * mrsas_get_map_info: Load and validate RAID map input:
4321 * Adapter instance soft state
4323 * This function calls mrsas_get_ld_map_info() and MR_ValidateMapInfo() to load
4324 * and validate RAID map. It returns 0 if successful, 1 other- wise.
4327 mrsas_get_map_info(struct mrsas_softc *sc)
4329 uint8_t retcode = 0;
4331 sc->fast_path_io = 0;
4332 if (!mrsas_get_ld_map_info(sc)) {
4333 retcode = MR_ValidateMapInfo(sc);
4335 sc->fast_path_io = 1;
4343 * mrsas_get_ld_map_info: Get FW's ld_map structure input:
4344 * Adapter instance soft state
4346 * Issues an internal command (DCMD) to get the FW's controller PD list
4350 mrsas_get_ld_map_info(struct mrsas_softc *sc)
4353 struct mrsas_mfi_cmd *cmd;
4354 struct mrsas_dcmd_frame *dcmd;
4356 bus_addr_t map_phys_addr = 0;
4358 cmd = mrsas_get_mfi_cmd(sc);
4360 device_printf(sc->mrsas_dev,
4361 "Cannot alloc for ld map info cmd.\n");
4364 dcmd = &cmd->frame->dcmd;
4366 map = (void *)sc->raidmap_mem[(sc->map_id & 1)];
4367 map_phys_addr = sc->raidmap_phys_addr[(sc->map_id & 1)];
4369 device_printf(sc->mrsas_dev,
4370 "Failed to alloc mem for ld map info.\n");
4371 mrsas_release_mfi_cmd(cmd);
4374 memset(map, 0, sizeof(sc->max_map_sz));
4375 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4377 dcmd->cmd = MFI_CMD_DCMD;
4378 dcmd->cmd_status = 0xFF;
4379 dcmd->sge_count = 1;
4380 dcmd->flags = MFI_FRAME_DIR_READ;
4383 dcmd->data_xfer_len = sc->current_map_sz;
4384 dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO;
4385 dcmd->sgl.sge32[0].phys_addr = map_phys_addr;
4386 dcmd->sgl.sge32[0].length = sc->current_map_sz;
4388 retcode = mrsas_issue_polled(sc, cmd);
4389 if (retcode == ETIMEDOUT)
4390 sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
4396 * mrsas_sync_map_info: Get FW's ld_map structure input:
4397 * Adapter instance soft state
4399 * Issues an internal command (DCMD) to get the FW's controller PD list
4403 mrsas_sync_map_info(struct mrsas_softc *sc)
4406 struct mrsas_mfi_cmd *cmd;
4407 struct mrsas_dcmd_frame *dcmd;
4408 uint32_t size_sync_info, num_lds;
4409 MR_LD_TARGET_SYNC *target_map = NULL;
4410 MR_DRV_RAID_MAP_ALL *map;
4412 MR_LD_TARGET_SYNC *ld_sync;
4413 bus_addr_t map_phys_addr = 0;
4415 cmd = mrsas_get_mfi_cmd(sc);
4417 device_printf(sc->mrsas_dev, "Cannot alloc for sync map info cmd\n");
4420 map = sc->ld_drv_map[sc->map_id & 1];
4421 num_lds = map->raidMap.ldCount;
4423 dcmd = &cmd->frame->dcmd;
4424 size_sync_info = sizeof(MR_LD_TARGET_SYNC) * num_lds;
4425 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4427 target_map = (MR_LD_TARGET_SYNC *) sc->raidmap_mem[(sc->map_id - 1) & 1];
4428 memset(target_map, 0, sc->max_map_sz);
4430 map_phys_addr = sc->raidmap_phys_addr[(sc->map_id - 1) & 1];
4432 ld_sync = (MR_LD_TARGET_SYNC *) target_map;
4434 for (i = 0; i < num_lds; i++, ld_sync++) {
4435 raid = MR_LdRaidGet(i, map);
4436 ld_sync->targetId = MR_GetLDTgtId(i, map);
4437 ld_sync->seqNum = raid->seqNum;
4440 dcmd->cmd = MFI_CMD_DCMD;
4441 dcmd->cmd_status = 0xFF;
4442 dcmd->sge_count = 1;
4443 dcmd->flags = MFI_FRAME_DIR_WRITE;
4446 dcmd->data_xfer_len = sc->current_map_sz;
4447 dcmd->mbox.b[0] = num_lds;
4448 dcmd->mbox.b[1] = MRSAS_DCMD_MBOX_PEND_FLAG;
4449 dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO;
4450 dcmd->sgl.sge32[0].phys_addr = map_phys_addr;
4451 dcmd->sgl.sge32[0].length = sc->current_map_sz;
4453 sc->map_update_cmd = cmd;
4454 if (mrsas_issue_dcmd(sc, cmd)) {
4455 device_printf(sc->mrsas_dev,
4456 "Fail to send sync map info command.\n");
4462 /* Input: dcmd.opcode - MR_DCMD_PD_GET_INFO
4463 * dcmd.mbox.s[0] - deviceId for this physical drive
4464 * dcmd.sge IN - ptr to returned MR_PD_INFO structure
4465 * Desc: Firmware return the physical drive info structure
4469 mrsas_get_pd_info(struct mrsas_softc *sc, u_int16_t device_id)
4472 u_int8_t do_ocr = 1;
4473 struct mrsas_mfi_cmd *cmd;
4474 struct mrsas_dcmd_frame *dcmd;
4476 cmd = mrsas_get_mfi_cmd(sc);
4479 device_printf(sc->mrsas_dev,
4480 "Cannot alloc for get PD info cmd\n");
4483 dcmd = &cmd->frame->dcmd;
4485 memset(sc->pd_info_mem, 0, sizeof(struct mrsas_pd_info));
4486 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4488 dcmd->mbox.s[0] = device_id;
4489 dcmd->cmd = MFI_CMD_DCMD;
4490 dcmd->cmd_status = 0xFF;
4491 dcmd->sge_count = 1;
4492 dcmd->flags = MFI_FRAME_DIR_READ;
4495 dcmd->data_xfer_len = sizeof(struct mrsas_pd_info);
4496 dcmd->opcode = MR_DCMD_PD_GET_INFO;
4497 dcmd->sgl.sge32[0].phys_addr = (u_int32_t)sc->pd_info_phys_addr;
4498 dcmd->sgl.sge32[0].length = sizeof(struct mrsas_pd_info);
4500 if (!sc->mask_interrupts)
4501 retcode = mrsas_issue_blocked_cmd(sc, cmd);
4503 retcode = mrsas_issue_polled(sc, cmd);
4505 if (retcode == ETIMEDOUT)
4508 sc->target_list[device_id].interface_type =
4509 sc->pd_info_mem->state.ddf.pdType.intf;
4516 sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
4518 if (!sc->mask_interrupts)
4519 mrsas_release_mfi_cmd(cmd);
4523 * mrsas_add_target: Add target ID of system PD/VD to driver's data structure.
4524 * sc: Adapter's soft state
4525 * target_id: Unique target id per controller(managed by driver)
4526 * for system PDs- target ID ranges from 0 to (MRSAS_MAX_PD - 1)
4527 * for VDs- target ID ranges from MRSAS_MAX_PD to MRSAS_MAX_TM_TARGETS
4529 * Descripton: This function will be called whenever system PD or VD is created.
4531 static void mrsas_add_target(struct mrsas_softc *sc,
4532 u_int16_t target_id)
4534 sc->target_list[target_id].target_id = target_id;
4536 device_printf(sc->mrsas_dev,
4537 "%s created target ID: 0x%x\n",
4538 (target_id < MRSAS_MAX_PD ? "System PD" : "VD"),
4539 (target_id < MRSAS_MAX_PD ? target_id : (target_id - MRSAS_MAX_PD)));
4541 * If interrupts are enabled, then only fire DCMD to get pd_info
4544 if (!sc->mask_interrupts && sc->pd_info_mem &&
4545 (target_id < MRSAS_MAX_PD))
4546 mrsas_get_pd_info(sc, target_id);
4551 * mrsas_remove_target: Remove target ID of system PD/VD from driver's data structure.
4552 * sc: Adapter's soft state
4553 * target_id: Unique target id per controller(managed by driver)
4554 * for system PDs- target ID ranges from 0 to (MRSAS_MAX_PD - 1)
4555 * for VDs- target ID ranges from MRSAS_MAX_PD to MRSAS_MAX_TM_TARGETS
4557 * Descripton: This function will be called whenever system PD or VD is deleted
4559 static void mrsas_remove_target(struct mrsas_softc *sc,
4560 u_int16_t target_id)
4562 sc->target_list[target_id].target_id = 0xffff;
4563 device_printf(sc->mrsas_dev,
4564 "%s deleted target ID: 0x%x\n",
4565 (target_id < MRSAS_MAX_PD ? "System PD" : "VD"),
4566 (target_id < MRSAS_MAX_PD ? target_id : (target_id - MRSAS_MAX_PD)));
4570 * mrsas_get_pd_list: Returns FW's PD list structure input:
4571 * Adapter soft state
4573 * Issues an internal command (DCMD) to get the FW's controller PD list
4574 * structure. This information is mainly used to find out about system
4575 * supported by Firmware.
4578 mrsas_get_pd_list(struct mrsas_softc *sc)
4580 int retcode = 0, pd_index = 0, pd_count = 0, pd_list_size;
4581 u_int8_t do_ocr = 1;
4582 struct mrsas_mfi_cmd *cmd;
4583 struct mrsas_dcmd_frame *dcmd;
4584 struct MR_PD_LIST *pd_list_mem;
4585 struct MR_PD_ADDRESS *pd_addr;
4586 bus_addr_t pd_list_phys_addr = 0;
4587 struct mrsas_tmp_dcmd *tcmd;
4589 cmd = mrsas_get_mfi_cmd(sc);
4591 device_printf(sc->mrsas_dev,
4592 "Cannot alloc for get PD list cmd\n");
4595 dcmd = &cmd->frame->dcmd;
4597 tcmd = malloc(sizeof(struct mrsas_tmp_dcmd), M_MRSAS, M_NOWAIT);
4598 pd_list_size = MRSAS_MAX_PD * sizeof(struct MR_PD_LIST);
4599 if (mrsas_alloc_tmp_dcmd(sc, tcmd, pd_list_size) != SUCCESS) {
4600 device_printf(sc->mrsas_dev,
4601 "Cannot alloc dmamap for get PD list cmd\n");
4602 mrsas_release_mfi_cmd(cmd);
4603 mrsas_free_tmp_dcmd(tcmd);
4604 free(tcmd, M_MRSAS);
4607 pd_list_mem = tcmd->tmp_dcmd_mem;
4608 pd_list_phys_addr = tcmd->tmp_dcmd_phys_addr;
4610 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4612 dcmd->mbox.b[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST;
4613 dcmd->mbox.b[1] = 0;
4614 dcmd->cmd = MFI_CMD_DCMD;
4615 dcmd->cmd_status = 0xFF;
4616 dcmd->sge_count = 1;
4617 dcmd->flags = MFI_FRAME_DIR_READ;
4620 dcmd->data_xfer_len = MRSAS_MAX_PD * sizeof(struct MR_PD_LIST);
4621 dcmd->opcode = MR_DCMD_PD_LIST_QUERY;
4622 dcmd->sgl.sge32[0].phys_addr = pd_list_phys_addr;
4623 dcmd->sgl.sge32[0].length = MRSAS_MAX_PD * sizeof(struct MR_PD_LIST);
4625 if (!sc->mask_interrupts)
4626 retcode = mrsas_issue_blocked_cmd(sc, cmd);
4628 retcode = mrsas_issue_polled(sc, cmd);
4630 if (retcode == ETIMEDOUT)
4633 /* Get the instance PD list */
4634 pd_count = MRSAS_MAX_PD;
4635 pd_addr = pd_list_mem->addr;
4636 if (pd_list_mem->count < pd_count) {
4637 memset(sc->local_pd_list, 0,
4638 MRSAS_MAX_PD * sizeof(struct mrsas_pd_list));
4639 for (pd_index = 0; pd_index < pd_list_mem->count; pd_index++) {
4640 sc->local_pd_list[pd_addr->deviceId].tid = pd_addr->deviceId;
4641 sc->local_pd_list[pd_addr->deviceId].driveType =
4642 pd_addr->scsiDevType;
4643 sc->local_pd_list[pd_addr->deviceId].driveState =
4645 if (sc->target_list[pd_addr->deviceId].target_id == 0xffff)
4646 mrsas_add_target(sc, pd_addr->deviceId);
4649 for (pd_index = 0; pd_index < MRSAS_MAX_PD; pd_index++) {
4650 if ((sc->local_pd_list[pd_index].driveState !=
4651 MR_PD_STATE_SYSTEM) &&
4652 (sc->target_list[pd_index].target_id !=
4654 mrsas_remove_target(sc, pd_index);
4658 * Use mutext/spinlock if pd_list component size increase more than
4661 memcpy(sc->pd_list, sc->local_pd_list, sizeof(sc->local_pd_list));
4665 mrsas_free_tmp_dcmd(tcmd);
4666 free(tcmd, M_MRSAS);
4669 sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
4671 if (!sc->mask_interrupts)
4672 mrsas_release_mfi_cmd(cmd);
4678 * mrsas_get_ld_list: Returns FW's LD list structure input:
4679 * Adapter soft state
4681 * Issues an internal command (DCMD) to get the FW's controller PD list
4682 * structure. This information is mainly used to find out about supported by
4686 mrsas_get_ld_list(struct mrsas_softc *sc)
4688 int ld_list_size, retcode = 0, ld_index = 0, ids = 0, drv_tgt_id;
4689 u_int8_t do_ocr = 1;
4690 struct mrsas_mfi_cmd *cmd;
4691 struct mrsas_dcmd_frame *dcmd;
4692 struct MR_LD_LIST *ld_list_mem;
4693 bus_addr_t ld_list_phys_addr = 0;
4694 struct mrsas_tmp_dcmd *tcmd;
4696 cmd = mrsas_get_mfi_cmd(sc);
4698 device_printf(sc->mrsas_dev,
4699 "Cannot alloc for get LD list cmd\n");
4702 dcmd = &cmd->frame->dcmd;
4704 tcmd = malloc(sizeof(struct mrsas_tmp_dcmd), M_MRSAS, M_NOWAIT);
4705 ld_list_size = sizeof(struct MR_LD_LIST);
4706 if (mrsas_alloc_tmp_dcmd(sc, tcmd, ld_list_size) != SUCCESS) {
4707 device_printf(sc->mrsas_dev,
4708 "Cannot alloc dmamap for get LD list cmd\n");
4709 mrsas_release_mfi_cmd(cmd);
4710 mrsas_free_tmp_dcmd(tcmd);
4711 free(tcmd, M_MRSAS);
4714 ld_list_mem = tcmd->tmp_dcmd_mem;
4715 ld_list_phys_addr = tcmd->tmp_dcmd_phys_addr;
4717 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4719 if (sc->max256vdSupport)
4720 dcmd->mbox.b[0] = 1;
4722 dcmd->cmd = MFI_CMD_DCMD;
4723 dcmd->cmd_status = 0xFF;
4724 dcmd->sge_count = 1;
4725 dcmd->flags = MFI_FRAME_DIR_READ;
4727 dcmd->data_xfer_len = sizeof(struct MR_LD_LIST);
4728 dcmd->opcode = MR_DCMD_LD_GET_LIST;
4729 dcmd->sgl.sge32[0].phys_addr = ld_list_phys_addr;
4730 dcmd->sgl.sge32[0].length = sizeof(struct MR_LD_LIST);
4733 if (!sc->mask_interrupts)
4734 retcode = mrsas_issue_blocked_cmd(sc, cmd);
4736 retcode = mrsas_issue_polled(sc, cmd);
4738 if (retcode == ETIMEDOUT)
4742 printf("Number of LDs %d\n", ld_list_mem->ldCount);
4745 /* Get the instance LD list */
4746 if (ld_list_mem->ldCount <= sc->fw_supported_vd_count) {
4747 sc->CurLdCount = ld_list_mem->ldCount;
4748 memset(sc->ld_ids, 0xff, MAX_LOGICAL_DRIVES_EXT);
4749 for (ld_index = 0; ld_index < ld_list_mem->ldCount; ld_index++) {
4750 ids = ld_list_mem->ldList[ld_index].ref.ld_context.targetId;
4751 drv_tgt_id = ids + MRSAS_MAX_PD;
4752 if (ld_list_mem->ldList[ld_index].state != 0) {
4753 sc->ld_ids[ids] = ld_list_mem->ldList[ld_index].ref.ld_context.targetId;
4754 if (sc->target_list[drv_tgt_id].target_id ==
4756 mrsas_add_target(sc, drv_tgt_id);
4758 if (sc->target_list[drv_tgt_id].target_id !=
4760 mrsas_remove_target(sc,
4768 mrsas_free_tmp_dcmd(tcmd);
4769 free(tcmd, M_MRSAS);
4772 sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
4773 if (!sc->mask_interrupts)
4774 mrsas_release_mfi_cmd(cmd);
4780 * mrsas_alloc_tmp_dcmd: Allocates memory for temporary command input:
4781 * Adapter soft state Temp command Size of alloction
4783 * Allocates DMAable memory for a temporary internal command. The allocated
4784 * memory is initialized to all zeros upon successful loading of the dma
4788 mrsas_alloc_tmp_dcmd(struct mrsas_softc *sc,
4789 struct mrsas_tmp_dcmd *tcmd, int size)
4791 if (bus_dma_tag_create(sc->mrsas_parent_tag,
4793 BUS_SPACE_MAXADDR_32BIT,
4801 &tcmd->tmp_dcmd_tag)) {
4802 device_printf(sc->mrsas_dev, "Cannot allocate tmp dcmd tag\n");
4805 if (bus_dmamem_alloc(tcmd->tmp_dcmd_tag, (void **)&tcmd->tmp_dcmd_mem,
4806 BUS_DMA_NOWAIT, &tcmd->tmp_dcmd_dmamap)) {
4807 device_printf(sc->mrsas_dev, "Cannot allocate tmp dcmd mem\n");
4810 if (bus_dmamap_load(tcmd->tmp_dcmd_tag, tcmd->tmp_dcmd_dmamap,
4811 tcmd->tmp_dcmd_mem, size, mrsas_addr_cb,
4812 &tcmd->tmp_dcmd_phys_addr, BUS_DMA_NOWAIT)) {
4813 device_printf(sc->mrsas_dev, "Cannot load tmp dcmd mem\n");
4816 memset(tcmd->tmp_dcmd_mem, 0, size);
4821 * mrsas_free_tmp_dcmd: Free memory for temporary command input:
4822 * temporary dcmd pointer
4824 * Deallocates memory of the temporary command for use in the construction of
4825 * the internal DCMD.
4828 mrsas_free_tmp_dcmd(struct mrsas_tmp_dcmd *tmp)
4830 if (tmp->tmp_dcmd_phys_addr)
4831 bus_dmamap_unload(tmp->tmp_dcmd_tag, tmp->tmp_dcmd_dmamap);
4832 if (tmp->tmp_dcmd_mem != NULL)
4833 bus_dmamem_free(tmp->tmp_dcmd_tag, tmp->tmp_dcmd_mem, tmp->tmp_dcmd_dmamap);
4834 if (tmp->tmp_dcmd_tag != NULL)
4835 bus_dma_tag_destroy(tmp->tmp_dcmd_tag);
4839 * mrsas_issue_blocked_abort_cmd: Aborts previously issued cmd input:
4840 * Adapter soft state Previously issued cmd to be aborted
4842 * This function is used to abort previously issued commands, such as AEN and
4843 * RAID map sync map commands. The abort command is sent as a DCMD internal
4844 * command and subsequently the driver will wait for a return status. The
4845 * max wait time is MRSAS_INTERNAL_CMD_WAIT_TIME seconds.
4848 mrsas_issue_blocked_abort_cmd(struct mrsas_softc *sc,
4849 struct mrsas_mfi_cmd *cmd_to_abort)
4851 struct mrsas_mfi_cmd *cmd;
4852 struct mrsas_abort_frame *abort_fr;
4853 u_int8_t retcode = 0;
4854 unsigned long total_time = 0;
4855 u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME;
4857 cmd = mrsas_get_mfi_cmd(sc);
4859 device_printf(sc->mrsas_dev, "Cannot alloc for abort cmd\n");
4862 abort_fr = &cmd->frame->abort;
4864 /* Prepare and issue the abort frame */
4865 abort_fr->cmd = MFI_CMD_ABORT;
4866 abort_fr->cmd_status = 0xFF;
4867 abort_fr->flags = 0;
4868 abort_fr->abort_context = cmd_to_abort->index;
4869 abort_fr->abort_mfi_phys_addr_lo = cmd_to_abort->frame_phys_addr;
4870 abort_fr->abort_mfi_phys_addr_hi = 0;
4873 cmd->cmd_status = 0xFF;
4875 if (mrsas_issue_dcmd(sc, cmd)) {
4876 device_printf(sc->mrsas_dev, "Fail to send abort command.\n");
4879 /* Wait for this cmd to complete */
4880 sc->chan = (void *)&cmd;
4882 if (cmd->cmd_status == 0xFF) {
4883 tsleep((void *)&sc->chan, 0, "mrsas_sleep", hz);
4887 if (total_time >= max_wait) {
4888 device_printf(sc->mrsas_dev, "Abort cmd timed out after %d sec.\n", max_wait);
4895 mrsas_release_mfi_cmd(cmd);
4900 * mrsas_complete_abort: Completes aborting a command input:
4901 * Adapter soft state Cmd that was issued to abort another cmd
4903 * The mrsas_issue_blocked_abort_cmd() function waits for the command status to
4904 * change after sending the command. This function is called from
4905 * mrsas_complete_mptmfi_passthru() to wake up the sleep thread associated.
4908 mrsas_complete_abort(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
4910 if (cmd->sync_cmd) {
4912 cmd->cmd_status = 0;
4913 sc->chan = (void *)&cmd;
4914 wakeup_one((void *)&sc->chan);
4920 * mrsas_aen_handler: AEN processing callback function from thread context
4921 * input: Adapter soft state
4923 * Asynchronous event handler
4926 mrsas_aen_handler(struct mrsas_softc *sc)
4928 union mrsas_evt_class_locale class_locale;
4931 int error, fail_aen = 0;
4934 printf("invalid instance!\n");
4937 if (sc->remove_in_progress || sc->reset_in_progress) {
4938 device_printf(sc->mrsas_dev, "Returning from %s, line no %d\n",
4939 __func__, __LINE__);
4942 if (sc->evt_detail_mem) {
4943 switch (sc->evt_detail_mem->code) {
4944 case MR_EVT_PD_INSERTED:
4945 fail_aen = mrsas_get_pd_list(sc);
4947 mrsas_bus_scan_sim(sc, sc->sim_1);
4949 goto skip_register_aen;
4951 case MR_EVT_PD_REMOVED:
4952 fail_aen = mrsas_get_pd_list(sc);
4954 mrsas_bus_scan_sim(sc, sc->sim_1);
4956 goto skip_register_aen;
4958 case MR_EVT_LD_OFFLINE:
4959 case MR_EVT_CFG_CLEARED:
4960 case MR_EVT_LD_DELETED:
4961 mrsas_bus_scan_sim(sc, sc->sim_0);
4963 case MR_EVT_LD_CREATED:
4964 fail_aen = mrsas_get_ld_list(sc);
4966 mrsas_bus_scan_sim(sc, sc->sim_0);
4968 goto skip_register_aen;
4970 case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED:
4971 case MR_EVT_FOREIGN_CFG_IMPORTED:
4972 case MR_EVT_LD_STATE_CHANGE:
4975 case MR_EVT_CTRL_PROP_CHANGED:
4976 fail_aen = mrsas_get_ctrl_info(sc);
4978 goto skip_register_aen;
4984 device_printf(sc->mrsas_dev, "invalid evt_detail\n");
4988 fail_aen = mrsas_get_pd_list(sc);
4990 mrsas_dprint(sc, MRSAS_AEN, "scanning ...sim 1\n");
4991 mrsas_bus_scan_sim(sc, sc->sim_1);
4993 goto skip_register_aen;
4995 fail_aen = mrsas_get_ld_list(sc);
4997 mrsas_dprint(sc, MRSAS_AEN, "scanning ...sim 0\n");
4998 mrsas_bus_scan_sim(sc, sc->sim_0);
5000 goto skip_register_aen;
5002 seq_num = sc->evt_detail_mem->seq_num + 1;
5004 /* Register AEN with FW for latest sequence number plus 1 */
5005 class_locale.members.reserved = 0;
5006 class_locale.members.locale = MR_EVT_LOCALE_ALL;
5007 class_locale.members.class = MR_EVT_CLASS_DEBUG;
5009 if (sc->aen_cmd != NULL)
5012 mtx_lock(&sc->aen_lock);
5013 error = mrsas_register_aen(sc, seq_num,
5015 mtx_unlock(&sc->aen_lock);
5018 device_printf(sc->mrsas_dev, "register aen failed error %x\n", error);
5027 * mrsas_complete_aen: Completes AEN command
5028 * input: Adapter soft state
5029 * Cmd that was issued to abort another cmd
5031 * This function will be called from ISR and will continue event processing from
5032 * thread context by enqueuing task in ev_tq (callback function
5033 * "mrsas_aen_handler").
5036 mrsas_complete_aen(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
5039 * Don't signal app if it is just an aborted previously registered
5042 if ((!cmd->abort_aen) && (sc->remove_in_progress == 0)) {
5043 sc->mrsas_aen_triggered = 1;
5044 mtx_lock(&sc->aen_lock);
5045 if (sc->mrsas_poll_waiting) {
5046 sc->mrsas_poll_waiting = 0;
5047 selwakeup(&sc->mrsas_select);
5049 mtx_unlock(&sc->aen_lock);
5054 mrsas_release_mfi_cmd(cmd);
5056 taskqueue_enqueue(sc->ev_tq, &sc->ev_task);
5061 static device_method_t mrsas_methods[] = {
5062 DEVMETHOD(device_probe, mrsas_probe),
5063 DEVMETHOD(device_attach, mrsas_attach),
5064 DEVMETHOD(device_detach, mrsas_detach),
5065 DEVMETHOD(device_shutdown, mrsas_shutdown),
5066 DEVMETHOD(device_suspend, mrsas_suspend),
5067 DEVMETHOD(device_resume, mrsas_resume),
5068 DEVMETHOD(bus_print_child, bus_generic_print_child),
5069 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
5073 static driver_t mrsas_driver = {
5076 sizeof(struct mrsas_softc)
5079 static devclass_t mrsas_devclass;
5081 DRIVER_MODULE(mrsas, pci, mrsas_driver, mrsas_devclass, 0, 0);
5082 MODULE_DEPEND(mrsas, cam, 1, 1, 1);