2 * Copyright (c) 2015, AVAGO Tech. All rights reserved. Author: Marian Choy
3 * Copyright (c) 2014, LSI Corp. All rights reserved. Author: Marian Choy
4 * Support: freebsdraid@avagotech.com
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
10 * 1. Redistributions of source code must retain the above copyright notice,
11 * this list of conditions and the following disclaimer. 2. Redistributions
12 * in binary form must reproduce the above copyright notice, this list of
13 * conditions and the following disclaimer in the documentation and/or other
14 * materials provided with the distribution. 3. Neither the name of the
15 * <ORGANIZATION> nor the names of its contributors may be used to endorse or
16 * promote products derived from this software without specific prior written
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
31 * The views and conclusions contained in the software and documentation are
32 * those of the authors and should not be interpreted as representing
33 * official policies,either expressed or implied, of the FreeBSD Project.
35 * Send feedback to: <megaraidfbsd@avagotech.com> Mail to: AVAGO TECHNOLOGIES 1621
36 * Barber Lane, Milpitas, CA 95035 ATTN: MegaRaid FreeBSD
40 #include <sys/cdefs.h>
41 __FBSDID("$FreeBSD$");
43 #include <dev/mrsas/mrsas.h>
44 #include <dev/mrsas/mrsas_ioctl.h>
47 #include <cam/cam_ccb.h>
49 #include <sys/sysctl.h>
50 #include <sys/types.h>
51 #include <sys/sysent.h>
52 #include <sys/kthread.h>
53 #include <sys/taskqueue.h>
55 #include <sys/endian.h>
60 static d_open_t mrsas_open;
61 static d_close_t mrsas_close;
62 static d_read_t mrsas_read;
63 static d_write_t mrsas_write;
64 static d_ioctl_t mrsas_ioctl;
65 static d_poll_t mrsas_poll;
67 static void mrsas_ich_startup(void *arg);
68 static struct mrsas_mgmt_info mrsas_mgmt_info;
69 static struct mrsas_ident *mrsas_find_ident(device_t);
70 static int mrsas_setup_msix(struct mrsas_softc *sc);
71 static int mrsas_allocate_msix(struct mrsas_softc *sc);
72 static void mrsas_shutdown_ctlr(struct mrsas_softc *sc, u_int32_t opcode);
73 static void mrsas_flush_cache(struct mrsas_softc *sc);
74 static void mrsas_reset_reply_desc(struct mrsas_softc *sc);
75 static void mrsas_ocr_thread(void *arg);
76 static int mrsas_get_map_info(struct mrsas_softc *sc);
77 static int mrsas_get_ld_map_info(struct mrsas_softc *sc);
78 static int mrsas_sync_map_info(struct mrsas_softc *sc);
79 static int mrsas_get_pd_list(struct mrsas_softc *sc);
80 static int mrsas_get_ld_list(struct mrsas_softc *sc);
81 static int mrsas_setup_irq(struct mrsas_softc *sc);
82 static int mrsas_alloc_mem(struct mrsas_softc *sc);
83 static int mrsas_init_fw(struct mrsas_softc *sc);
84 static int mrsas_setup_raidmap(struct mrsas_softc *sc);
85 static void megasas_setup_jbod_map(struct mrsas_softc *sc);
86 static int megasas_sync_pd_seq_num(struct mrsas_softc *sc, boolean_t pend);
87 static int mrsas_clear_intr(struct mrsas_softc *sc);
88 static int mrsas_get_ctrl_info(struct mrsas_softc *sc);
89 static void mrsas_update_ext_vd_details(struct mrsas_softc *sc);
91 mrsas_issue_blocked_abort_cmd(struct mrsas_softc *sc,
92 struct mrsas_mfi_cmd *cmd_to_abort);
94 mrsas_get_pd_info(struct mrsas_softc *sc, u_int16_t device_id);
95 static struct mrsas_softc *
96 mrsas_get_softc_instance(struct cdev *dev,
97 u_long cmd, caddr_t arg);
99 mrsas_read_reg_with_retries(struct mrsas_softc *sc, int offset);
100 u_int32_t mrsas_read_reg(struct mrsas_softc *sc, int offset);
102 mrsas_build_mptmfi_passthru(struct mrsas_softc *sc,
103 struct mrsas_mfi_cmd *mfi_cmd);
104 void mrsas_complete_outstanding_ioctls(struct mrsas_softc *sc);
105 int mrsas_transition_to_ready(struct mrsas_softc *sc, int ocr);
106 int mrsas_init_adapter(struct mrsas_softc *sc);
107 int mrsas_alloc_mpt_cmds(struct mrsas_softc *sc);
108 int mrsas_alloc_ioc_cmd(struct mrsas_softc *sc);
109 int mrsas_alloc_ctlr_info_cmd(struct mrsas_softc *sc);
110 int mrsas_ioc_init(struct mrsas_softc *sc);
111 int mrsas_bus_scan(struct mrsas_softc *sc);
112 int mrsas_issue_dcmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
113 int mrsas_issue_polled(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
114 int mrsas_reset_ctrl(struct mrsas_softc *sc, u_int8_t reset_reason);
115 int mrsas_wait_for_outstanding(struct mrsas_softc *sc, u_int8_t check_reason);
116 int mrsas_complete_cmd(struct mrsas_softc *sc, u_int32_t MSIxIndex);
117 int mrsas_reset_targets(struct mrsas_softc *sc);
119 mrsas_issue_blocked_cmd(struct mrsas_softc *sc,
120 struct mrsas_mfi_cmd *cmd);
122 mrsas_alloc_tmp_dcmd(struct mrsas_softc *sc, struct mrsas_tmp_dcmd *tcmd,
124 void mrsas_release_mfi_cmd(struct mrsas_mfi_cmd *cmd);
125 void mrsas_wakeup(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
126 void mrsas_complete_aen(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
127 void mrsas_complete_abort(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
128 void mrsas_disable_intr(struct mrsas_softc *sc);
129 void mrsas_enable_intr(struct mrsas_softc *sc);
130 void mrsas_free_ioc_cmd(struct mrsas_softc *sc);
131 void mrsas_free_mem(struct mrsas_softc *sc);
132 void mrsas_free_tmp_dcmd(struct mrsas_tmp_dcmd *tmp);
133 void mrsas_isr(void *arg);
134 void mrsas_teardown_intr(struct mrsas_softc *sc);
135 void mrsas_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error);
136 void mrsas_kill_hba(struct mrsas_softc *sc);
137 void mrsas_aen_handler(struct mrsas_softc *sc);
139 mrsas_write_reg(struct mrsas_softc *sc, int offset,
142 mrsas_fire_cmd(struct mrsas_softc *sc, u_int32_t req_desc_lo,
143 u_int32_t req_desc_hi);
144 void mrsas_free_ctlr_info_cmd(struct mrsas_softc *sc);
146 mrsas_complete_mptmfi_passthru(struct mrsas_softc *sc,
147 struct mrsas_mfi_cmd *cmd, u_int8_t status);
148 struct mrsas_mfi_cmd *mrsas_get_mfi_cmd(struct mrsas_softc *sc);
150 MRSAS_REQUEST_DESCRIPTOR_UNION *mrsas_build_mpt_cmd
151 (struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
153 extern int mrsas_cam_attach(struct mrsas_softc *sc);
154 extern void mrsas_cam_detach(struct mrsas_softc *sc);
155 extern void mrsas_cmd_done(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd);
156 extern void mrsas_free_frame(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
157 extern int mrsas_alloc_mfi_cmds(struct mrsas_softc *sc);
158 extern struct mrsas_mpt_cmd *mrsas_get_mpt_cmd(struct mrsas_softc *sc);
159 extern int mrsas_passthru(struct mrsas_softc *sc, void *arg, u_long ioctlCmd);
160 extern uint8_t MR_ValidateMapInfo(struct mrsas_softc *sc);
161 extern u_int16_t MR_GetLDTgtId(u_int32_t ld, MR_DRV_RAID_MAP_ALL * map);
162 extern MR_LD_RAID *MR_LdRaidGet(u_int32_t ld, MR_DRV_RAID_MAP_ALL * map);
163 extern void mrsas_xpt_freeze(struct mrsas_softc *sc);
164 extern void mrsas_xpt_release(struct mrsas_softc *sc);
165 extern MRSAS_REQUEST_DESCRIPTOR_UNION *
166 mrsas_get_request_desc(struct mrsas_softc *sc,
168 extern int mrsas_bus_scan_sim(struct mrsas_softc *sc, struct cam_sim *sim);
169 static int mrsas_alloc_evt_log_info_cmd(struct mrsas_softc *sc);
170 static void mrsas_free_evt_log_info_cmd(struct mrsas_softc *sc);
171 void mrsas_release_mpt_cmd(struct mrsas_mpt_cmd *cmd);
173 void mrsas_map_mpt_cmd_status(struct mrsas_mpt_cmd *cmd,
174 union ccb *ccb_ptr, u_int8_t status, u_int8_t extStatus,
175 u_int32_t data_length, u_int8_t *sense);
177 mrsas_write_64bit_req_desc(struct mrsas_softc *sc, u_int32_t req_desc_lo,
178 u_int32_t req_desc_hi);
180 SYSCTL_NODE(_hw, OID_AUTO, mrsas, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
181 "MRSAS Driver Parameters");
184 * PCI device struct and table
187 typedef struct mrsas_ident {
195 MRSAS_CTLR_ID device_table[] = {
196 {0x1000, MRSAS_TBOLT, 0xffff, 0xffff, "AVAGO Thunderbolt SAS Controller"},
197 {0x1000, MRSAS_INVADER, 0xffff, 0xffff, "AVAGO Invader SAS Controller"},
198 {0x1000, MRSAS_FURY, 0xffff, 0xffff, "AVAGO Fury SAS Controller"},
199 {0x1000, MRSAS_INTRUDER, 0xffff, 0xffff, "AVAGO Intruder SAS Controller"},
200 {0x1000, MRSAS_INTRUDER_24, 0xffff, 0xffff, "AVAGO Intruder_24 SAS Controller"},
201 {0x1000, MRSAS_CUTLASS_52, 0xffff, 0xffff, "AVAGO Cutlass_52 SAS Controller"},
202 {0x1000, MRSAS_CUTLASS_53, 0xffff, 0xffff, "AVAGO Cutlass_53 SAS Controller"},
203 {0x1000, MRSAS_VENTURA, 0xffff, 0xffff, "AVAGO Ventura SAS Controller"},
204 {0x1000, MRSAS_CRUSADER, 0xffff, 0xffff, "AVAGO Crusader SAS Controller"},
205 {0x1000, MRSAS_HARPOON, 0xffff, 0xffff, "AVAGO Harpoon SAS Controller"},
206 {0x1000, MRSAS_TOMCAT, 0xffff, 0xffff, "AVAGO Tomcat SAS Controller"},
207 {0x1000, MRSAS_VENTURA_4PORT, 0xffff, 0xffff, "AVAGO Ventura_4Port SAS Controller"},
208 {0x1000, MRSAS_CRUSADER_4PORT, 0xffff, 0xffff, "AVAGO Crusader_4Port SAS Controller"},
209 {0x1000, MRSAS_AERO_10E0, 0xffff, 0xffff, "BROADCOM AERO-10E0 SAS Controller"},
210 {0x1000, MRSAS_AERO_10E1, 0xffff, 0xffff, "BROADCOM AERO-10E1 SAS Controller"},
211 {0x1000, MRSAS_AERO_10E2, 0xffff, 0xffff, "BROADCOM AERO-10E2 SAS Controller"},
212 {0x1000, MRSAS_AERO_10E3, 0xffff, 0xffff, "BROADCOM AERO-10E3 SAS Controller"},
213 {0x1000, MRSAS_AERO_10E4, 0xffff, 0xffff, "BROADCOM AERO-10E4 SAS Controller"},
214 {0x1000, MRSAS_AERO_10E5, 0xffff, 0xffff, "BROADCOM AERO-10E5 SAS Controller"},
215 {0x1000, MRSAS_AERO_10E6, 0xffff, 0xffff, "BROADCOM AERO-10E6 SAS Controller"},
216 {0x1000, MRSAS_AERO_10E7, 0xffff, 0xffff, "BROADCOM AERO-10E7 SAS Controller"},
221 * Character device entry points
224 static struct cdevsw mrsas_cdevsw = {
225 .d_version = D_VERSION,
226 .d_open = mrsas_open,
227 .d_close = mrsas_close,
228 .d_read = mrsas_read,
229 .d_write = mrsas_write,
230 .d_ioctl = mrsas_ioctl,
231 .d_poll = mrsas_poll,
235 MALLOC_DEFINE(M_MRSAS, "mrsasbuf", "Buffers for the MRSAS driver");
238 * In the cdevsw routines, we find our softc by using the si_drv1 member of
239 * struct cdev. We set this variable to point to our softc in our attach
240 * routine when we create the /dev entry.
243 mrsas_open(struct cdev *dev, int oflags, int devtype, struct thread *td)
245 struct mrsas_softc *sc;
252 mrsas_close(struct cdev *dev, int fflag, int devtype, struct thread *td)
254 struct mrsas_softc *sc;
261 mrsas_read(struct cdev *dev, struct uio *uio, int ioflag)
263 struct mrsas_softc *sc;
269 mrsas_write(struct cdev *dev, struct uio *uio, int ioflag)
271 struct mrsas_softc *sc;
278 mrsas_read_reg_with_retries(struct mrsas_softc *sc, int offset)
280 u_int32_t i = 0, ret_val;
284 ret_val = mrsas_read_reg(sc, offset);
286 } while(ret_val == 0 && i < 3);
288 ret_val = mrsas_read_reg(sc, offset);
294 * Register Read/Write Functions
298 mrsas_write_reg(struct mrsas_softc *sc, int offset,
301 bus_space_tag_t bus_tag = sc->bus_tag;
302 bus_space_handle_t bus_handle = sc->bus_handle;
304 bus_space_write_4(bus_tag, bus_handle, offset, value);
308 mrsas_read_reg(struct mrsas_softc *sc, int offset)
310 bus_space_tag_t bus_tag = sc->bus_tag;
311 bus_space_handle_t bus_handle = sc->bus_handle;
313 return ((u_int32_t)bus_space_read_4(bus_tag, bus_handle, offset));
317 * Interrupt Disable/Enable/Clear Functions
321 mrsas_disable_intr(struct mrsas_softc *sc)
323 u_int32_t mask = 0xFFFFFFFF;
326 sc->mask_interrupts = 1;
327 mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask), mask);
328 /* Dummy read to force pci flush */
329 status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask));
333 mrsas_enable_intr(struct mrsas_softc *sc)
335 u_int32_t mask = MFI_FUSION_ENABLE_INTERRUPT_MASK;
338 sc->mask_interrupts = 0;
339 mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status), ~0);
340 status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status));
342 mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask), ~mask);
343 status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask));
347 mrsas_clear_intr(struct mrsas_softc *sc)
351 /* Read received interrupt */
352 status = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, outbound_intr_status));
354 /* Not our interrupt, so just return */
355 if (!(status & MFI_FUSION_ENABLE_INTERRUPT_MASK))
358 /* We got a reply interrupt */
363 * PCI Support Functions
366 static struct mrsas_ident *
367 mrsas_find_ident(device_t dev)
369 struct mrsas_ident *pci_device;
371 for (pci_device = device_table; pci_device->vendor != 0; pci_device++) {
372 if ((pci_device->vendor == pci_get_vendor(dev)) &&
373 (pci_device->device == pci_get_device(dev)) &&
374 ((pci_device->subvendor == pci_get_subvendor(dev)) ||
375 (pci_device->subvendor == 0xffff)) &&
376 ((pci_device->subdevice == pci_get_subdevice(dev)) ||
377 (pci_device->subdevice == 0xffff)))
384 mrsas_probe(device_t dev)
386 static u_int8_t first_ctrl = 1;
387 struct mrsas_ident *id;
389 if ((id = mrsas_find_ident(dev)) != NULL) {
391 printf("AVAGO MegaRAID SAS FreeBSD mrsas driver version: %s\n",
395 device_set_desc(dev, id->desc);
396 /* between BUS_PROBE_DEFAULT and BUS_PROBE_LOW_PRIORITY */
403 * mrsas_setup_sysctl: setup sysctl values for mrsas
404 * input: Adapter instance soft state
406 * Setup sysctl entries for mrsas driver.
409 mrsas_setup_sysctl(struct mrsas_softc *sc)
411 struct sysctl_ctx_list *sysctl_ctx = NULL;
412 struct sysctl_oid *sysctl_tree = NULL;
413 char tmpstr[80], tmpstr2[80];
416 * Setup the sysctl variable so the user can change the debug level
419 snprintf(tmpstr, sizeof(tmpstr), "MRSAS controller %d",
420 device_get_unit(sc->mrsas_dev));
421 snprintf(tmpstr2, sizeof(tmpstr2), "%d", device_get_unit(sc->mrsas_dev));
423 sysctl_ctx = device_get_sysctl_ctx(sc->mrsas_dev);
424 if (sysctl_ctx != NULL)
425 sysctl_tree = device_get_sysctl_tree(sc->mrsas_dev);
427 if (sysctl_tree == NULL) {
428 sysctl_ctx_init(&sc->sysctl_ctx);
429 sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx,
430 SYSCTL_STATIC_CHILDREN(_hw_mrsas), OID_AUTO, tmpstr2,
431 CTLFLAG_RD | CTLFLAG_MPSAFE, 0, tmpstr);
432 if (sc->sysctl_tree == NULL)
434 sysctl_ctx = &sc->sysctl_ctx;
435 sysctl_tree = sc->sysctl_tree;
437 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
438 OID_AUTO, "disable_ocr", CTLFLAG_RW, &sc->disableOnlineCtrlReset, 0,
439 "Disable the use of OCR");
441 SYSCTL_ADD_STRING(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
442 OID_AUTO, "driver_version", CTLFLAG_RD, MRSAS_VERSION,
443 strlen(MRSAS_VERSION), "driver version");
445 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
446 OID_AUTO, "reset_count", CTLFLAG_RD,
447 &sc->reset_count, 0, "number of ocr from start of the day");
449 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
450 OID_AUTO, "fw_outstanding", CTLFLAG_RD,
451 &sc->fw_outstanding.val_rdonly, 0, "FW outstanding commands");
453 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
454 OID_AUTO, "io_cmds_highwater", CTLFLAG_RD,
455 &sc->io_cmds_highwater, 0, "Max FW outstanding commands");
457 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
458 OID_AUTO, "mrsas_debug", CTLFLAG_RW, &sc->mrsas_debug, 0,
459 "Driver debug level");
461 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
462 OID_AUTO, "mrsas_io_timeout", CTLFLAG_RW, &sc->mrsas_io_timeout,
463 0, "Driver IO timeout value in mili-second.");
465 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
466 OID_AUTO, "mrsas_fw_fault_check_delay", CTLFLAG_RW,
467 &sc->mrsas_fw_fault_check_delay,
468 0, "FW fault check thread delay in seconds. <default is 1 sec>");
470 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
471 OID_AUTO, "reset_in_progress", CTLFLAG_RD,
472 &sc->reset_in_progress, 0, "ocr in progress status");
474 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
475 OID_AUTO, "block_sync_cache", CTLFLAG_RW,
476 &sc->block_sync_cache, 0,
477 "Block SYNC CACHE at driver. <default: 0, send it to FW>");
478 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
479 OID_AUTO, "stream detection", CTLFLAG_RW,
480 &sc->drv_stream_detection, 0,
481 "Disable/Enable Stream detection. <default: 1, Enable Stream Detection>");
482 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
483 OID_AUTO, "prp_count", CTLFLAG_RD,
484 &sc->prp_count.val_rdonly, 0, "Number of IOs for which PRPs are built");
485 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
486 OID_AUTO, "SGE holes", CTLFLAG_RD,
487 &sc->sge_holes.val_rdonly, 0, "Number of IOs with holes in SGEs");
491 * mrsas_get_tunables: get tunable parameters.
492 * input: Adapter instance soft state
494 * Get tunable parameters. This will help to debug driver at boot time.
497 mrsas_get_tunables(struct mrsas_softc *sc)
501 /* XXX default to some debugging for now */
503 (MRSAS_FAULT | MRSAS_OCR | MRSAS_INFO | MRSAS_TRACE | MRSAS_AEN);
504 sc->mrsas_io_timeout = MRSAS_IO_TIMEOUT;
505 sc->mrsas_fw_fault_check_delay = 1;
507 sc->reset_in_progress = 0;
508 sc->block_sync_cache = 0;
509 sc->drv_stream_detection = 1;
512 * Grab the global variables.
514 TUNABLE_INT_FETCH("hw.mrsas.debug_level", &sc->mrsas_debug);
517 * Grab the global variables.
519 TUNABLE_INT_FETCH("hw.mrsas.lb_pending_cmds", &sc->lb_pending_cmds);
521 /* Grab the unit-instance variables */
522 snprintf(tmpstr, sizeof(tmpstr), "dev.mrsas.%d.debug_level",
523 device_get_unit(sc->mrsas_dev));
524 TUNABLE_INT_FETCH(tmpstr, &sc->mrsas_debug);
528 * mrsas_alloc_evt_log_info cmd: Allocates memory to get event log information.
529 * Used to get sequence number at driver load time.
530 * input: Adapter soft state
532 * Allocates DMAable memory for the event log info internal command.
535 mrsas_alloc_evt_log_info_cmd(struct mrsas_softc *sc)
539 /* Allocate get event log info command */
540 el_info_size = sizeof(struct mrsas_evt_log_info);
541 if (bus_dma_tag_create(sc->mrsas_parent_tag,
543 BUS_SPACE_MAXADDR_32BIT,
552 device_printf(sc->mrsas_dev, "Cannot allocate event log info tag\n");
555 if (bus_dmamem_alloc(sc->el_info_tag, (void **)&sc->el_info_mem,
556 BUS_DMA_NOWAIT, &sc->el_info_dmamap)) {
557 device_printf(sc->mrsas_dev, "Cannot allocate event log info cmd mem\n");
560 if (bus_dmamap_load(sc->el_info_tag, sc->el_info_dmamap,
561 sc->el_info_mem, el_info_size, mrsas_addr_cb,
562 &sc->el_info_phys_addr, BUS_DMA_NOWAIT)) {
563 device_printf(sc->mrsas_dev, "Cannot load event log info cmd mem\n");
566 memset(sc->el_info_mem, 0, el_info_size);
571 * mrsas_free_evt_info_cmd: Free memory for Event log info command
572 * input: Adapter soft state
574 * Deallocates memory for the event log info internal command.
577 mrsas_free_evt_log_info_cmd(struct mrsas_softc *sc)
579 if (sc->el_info_phys_addr)
580 bus_dmamap_unload(sc->el_info_tag, sc->el_info_dmamap);
581 if (sc->el_info_mem != NULL)
582 bus_dmamem_free(sc->el_info_tag, sc->el_info_mem, sc->el_info_dmamap);
583 if (sc->el_info_tag != NULL)
584 bus_dma_tag_destroy(sc->el_info_tag);
588 * mrsas_get_seq_num: Get latest event sequence number
589 * @sc: Adapter soft state
590 * @eli: Firmware event log sequence number information.
592 * Firmware maintains a log of all events in a non-volatile area.
593 * Driver get the sequence number using DCMD
594 * "MR_DCMD_CTRL_EVENT_GET_INFO" at driver load time.
598 mrsas_get_seq_num(struct mrsas_softc *sc,
599 struct mrsas_evt_log_info *eli)
601 struct mrsas_mfi_cmd *cmd;
602 struct mrsas_dcmd_frame *dcmd;
603 u_int8_t do_ocr = 1, retcode = 0;
605 cmd = mrsas_get_mfi_cmd(sc);
608 device_printf(sc->mrsas_dev, "Failed to get a free cmd\n");
611 dcmd = &cmd->frame->dcmd;
613 if (mrsas_alloc_evt_log_info_cmd(sc) != SUCCESS) {
614 device_printf(sc->mrsas_dev, "Cannot allocate evt log info cmd\n");
615 mrsas_release_mfi_cmd(cmd);
618 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
620 dcmd->cmd = MFI_CMD_DCMD;
621 dcmd->cmd_status = 0x0;
623 dcmd->flags = htole16(MFI_FRAME_DIR_READ);
626 dcmd->data_xfer_len = htole32(sizeof(struct mrsas_evt_log_info));
627 dcmd->opcode = htole32(MR_DCMD_CTRL_EVENT_GET_INFO);
628 dcmd->sgl.sge32[0].phys_addr = htole32(sc->el_info_phys_addr & 0xFFFFFFFF);
629 dcmd->sgl.sge32[0].length = htole32(sizeof(struct mrsas_evt_log_info));
631 retcode = mrsas_issue_blocked_cmd(sc, cmd);
632 if (retcode == ETIMEDOUT)
637 * Copy the data back into callers buffer
639 memcpy(eli, sc->el_info_mem, sizeof(struct mrsas_evt_log_info));
640 mrsas_free_evt_log_info_cmd(sc);
644 sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
646 mrsas_release_mfi_cmd(cmd);
652 * mrsas_register_aen: Register for asynchronous event notification
653 * @sc: Adapter soft state
654 * @seq_num: Starting sequence number
655 * @class_locale: Class of the event
657 * This function subscribes for events beyond the @seq_num
658 * and type @class_locale.
662 mrsas_register_aen(struct mrsas_softc *sc, u_int32_t seq_num,
663 u_int32_t class_locale_word)
666 struct mrsas_mfi_cmd *cmd;
667 struct mrsas_dcmd_frame *dcmd;
668 union mrsas_evt_class_locale curr_aen;
669 union mrsas_evt_class_locale prev_aen;
672 * If there an AEN pending already (aen_cmd), check if the
673 * class_locale of that pending AEN is inclusive of the new AEN
674 * request we currently have. If it is, then we don't have to do
675 * anything. In other words, whichever events the current AEN request
676 * is subscribing to, have already been subscribed to. If the old_cmd
677 * is _not_ inclusive, then we have to abort that command, form a
678 * class_locale that is superset of both old and current and re-issue
682 curr_aen.word = class_locale_word;
685 prev_aen.word = le32toh(sc->aen_cmd->frame->dcmd.mbox.w[1]);
688 * A class whose enum value is smaller is inclusive of all
689 * higher values. If a PROGRESS (= -1) was previously
690 * registered, then a new registration requests for higher
691 * classes need not be sent to FW. They are automatically
692 * included. Locale numbers don't have such hierarchy. They
695 if ((prev_aen.members.class <= curr_aen.members.class) &&
696 !((prev_aen.members.locale & curr_aen.members.locale) ^
697 curr_aen.members.locale)) {
699 * Previously issued event registration includes
700 * current request. Nothing to do.
704 curr_aen.members.locale |= prev_aen.members.locale;
706 if (prev_aen.members.class < curr_aen.members.class)
707 curr_aen.members.class = prev_aen.members.class;
709 sc->aen_cmd->abort_aen = 1;
710 ret_val = mrsas_issue_blocked_abort_cmd(sc,
714 printf("mrsas: Failed to abort previous AEN command\n");
720 cmd = mrsas_get_mfi_cmd(sc);
724 dcmd = &cmd->frame->dcmd;
726 memset(sc->evt_detail_mem, 0, sizeof(struct mrsas_evt_detail));
729 * Prepare DCMD for aen registration
731 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
733 dcmd->cmd = MFI_CMD_DCMD;
734 dcmd->cmd_status = 0x0;
736 dcmd->flags = htole16(MFI_FRAME_DIR_READ);
739 dcmd->data_xfer_len = htole32(sizeof(struct mrsas_evt_detail));
740 dcmd->opcode = htole32(MR_DCMD_CTRL_EVENT_WAIT);
741 dcmd->mbox.w[0] = htole32(seq_num);
742 sc->last_seq_num = seq_num;
743 dcmd->mbox.w[1] = htole32(curr_aen.word);
744 dcmd->sgl.sge32[0].phys_addr = htole32((u_int32_t)sc->evt_detail_phys_addr & 0xFFFFFFFF);
745 dcmd->sgl.sge32[0].length = htole32(sizeof(struct mrsas_evt_detail));
747 if (sc->aen_cmd != NULL) {
748 mrsas_release_mfi_cmd(cmd);
752 * Store reference to the cmd used to register for AEN. When an
753 * application wants us to register for AEN, we have to abort this
754 * cmd and re-register with a new EVENT LOCALE supplied by that app
759 * Issue the aen registration frame
761 if (mrsas_issue_dcmd(sc, cmd)) {
762 device_printf(sc->mrsas_dev, "Cannot issue AEN DCMD command.\n");
769 * mrsas_start_aen: Subscribes to AEN during driver load time
770 * @instance: Adapter soft state
773 mrsas_start_aen(struct mrsas_softc *sc)
775 struct mrsas_evt_log_info eli;
776 union mrsas_evt_class_locale class_locale;
778 /* Get the latest sequence number from FW */
780 memset(&eli, 0, sizeof(eli));
782 if (mrsas_get_seq_num(sc, &eli))
785 /* Register AEN with FW for latest sequence number plus 1 */
786 class_locale.members.reserved = 0;
787 class_locale.members.locale = MR_EVT_LOCALE_ALL;
788 class_locale.members.class = MR_EVT_CLASS_DEBUG;
790 return mrsas_register_aen(sc, eli.newest_seq_num + 1,
796 * mrsas_setup_msix: Allocate MSI-x vectors
797 * @sc: adapter soft state
800 mrsas_setup_msix(struct mrsas_softc *sc)
804 for (i = 0; i < sc->msix_vectors; i++) {
805 sc->irq_context[i].sc = sc;
806 sc->irq_context[i].MSIxIndex = i;
807 sc->irq_id[i] = i + 1;
808 sc->mrsas_irq[i] = bus_alloc_resource_any
809 (sc->mrsas_dev, SYS_RES_IRQ, &sc->irq_id[i]
811 if (sc->mrsas_irq[i] == NULL) {
812 device_printf(sc->mrsas_dev, "Can't allocate MSI-x\n");
813 goto irq_alloc_failed;
815 if (bus_setup_intr(sc->mrsas_dev,
817 INTR_MPSAFE | INTR_TYPE_CAM,
818 NULL, mrsas_isr, &sc->irq_context[i],
819 &sc->intr_handle[i])) {
820 device_printf(sc->mrsas_dev,
821 "Cannot set up MSI-x interrupt handler\n");
822 goto irq_alloc_failed;
828 mrsas_teardown_intr(sc);
833 * mrsas_allocate_msix: Setup MSI-x vectors
834 * @sc: adapter soft state
837 mrsas_allocate_msix(struct mrsas_softc *sc)
839 if (pci_alloc_msix(sc->mrsas_dev, &sc->msix_vectors) == 0) {
840 device_printf(sc->mrsas_dev, "Using MSI-X with %d number"
841 " of vectors\n", sc->msix_vectors);
843 device_printf(sc->mrsas_dev, "MSI-x setup failed\n");
844 goto irq_alloc_failed;
849 mrsas_teardown_intr(sc);
854 * mrsas_attach: PCI entry point
855 * input: pointer to device struct
857 * Performs setup of PCI and registers, initializes mutexes and linked lists,
858 * registers interrupts and CAM, and initializes the adapter/controller to
862 mrsas_attach(device_t dev)
864 struct mrsas_softc *sc = device_get_softc(dev);
867 memset(sc, 0, sizeof(struct mrsas_softc));
869 /* Look up our softc and initialize its fields. */
871 sc->device_id = pci_get_device(dev);
873 switch (sc->device_id) {
877 case MRSAS_INTRUDER_24:
878 case MRSAS_CUTLASS_52:
879 case MRSAS_CUTLASS_53:
880 sc->mrsas_gen3_ctrl = 1;
886 case MRSAS_VENTURA_4PORT:
887 case MRSAS_CRUSADER_4PORT:
888 sc->is_ventura = true;
890 case MRSAS_AERO_10E1:
891 case MRSAS_AERO_10E5:
892 device_printf(dev, "Adapter is in configurable secure mode\n");
893 case MRSAS_AERO_10E2:
894 case MRSAS_AERO_10E6:
897 case MRSAS_AERO_10E0:
898 case MRSAS_AERO_10E3:
899 case MRSAS_AERO_10E4:
900 case MRSAS_AERO_10E7:
901 device_printf(dev, "Adapter is in non-secure mode\n");
905 mrsas_get_tunables(sc);
908 * Set up PCI and registers
910 cmd = pci_read_config(dev, PCIR_COMMAND, 2);
911 /* Force the busmaster enable bit on. */
912 cmd |= PCIM_CMD_BUSMASTEREN;
913 pci_write_config(dev, PCIR_COMMAND, cmd, 2);
915 /* For Ventura/Aero system registers are mapped to BAR0 */
916 if (sc->is_ventura || sc->is_aero)
917 sc->reg_res_id = PCIR_BAR(0); /* BAR0 offset */
919 sc->reg_res_id = PCIR_BAR(1); /* BAR1 offset */
921 if ((sc->reg_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
922 &(sc->reg_res_id), RF_ACTIVE))
924 device_printf(dev, "Cannot allocate PCI registers\n");
927 sc->bus_tag = rman_get_bustag(sc->reg_res);
928 sc->bus_handle = rman_get_bushandle(sc->reg_res);
930 /* Intialize mutexes */
931 mtx_init(&sc->sim_lock, "mrsas_sim_lock", NULL, MTX_DEF);
932 mtx_init(&sc->pci_lock, "mrsas_pci_lock", NULL, MTX_DEF);
933 mtx_init(&sc->io_lock, "mrsas_io_lock", NULL, MTX_DEF);
934 mtx_init(&sc->aen_lock, "mrsas_aen_lock", NULL, MTX_DEF);
935 mtx_init(&sc->ioctl_lock, "mrsas_ioctl_lock", NULL, MTX_SPIN);
936 mtx_init(&sc->mpt_cmd_pool_lock, "mrsas_mpt_cmd_pool_lock", NULL, MTX_DEF);
937 mtx_init(&sc->mfi_cmd_pool_lock, "mrsas_mfi_cmd_pool_lock", NULL, MTX_DEF);
938 mtx_init(&sc->raidmap_lock, "mrsas_raidmap_lock", NULL, MTX_DEF);
939 mtx_init(&sc->stream_lock, "mrsas_stream_lock", NULL, MTX_DEF);
941 /* Intialize linked list */
942 TAILQ_INIT(&sc->mrsas_mpt_cmd_list_head);
943 TAILQ_INIT(&sc->mrsas_mfi_cmd_list_head);
945 mrsas_atomic_set(&sc->fw_outstanding, 0);
946 mrsas_atomic_set(&sc->target_reset_outstanding, 0);
947 mrsas_atomic_set(&sc->prp_count, 0);
948 mrsas_atomic_set(&sc->sge_holes, 0);
950 sc->io_cmds_highwater = 0;
952 sc->adprecovery = MRSAS_HBA_OPERATIONAL;
953 sc->UnevenSpanSupport = 0;
957 /* Initialize Firmware */
958 if (mrsas_init_fw(sc) != SUCCESS) {
961 /* Register mrsas to CAM layer */
962 if ((mrsas_cam_attach(sc) != SUCCESS)) {
963 goto attach_fail_cam;
966 if (mrsas_setup_irq(sc) != SUCCESS) {
967 goto attach_fail_irq;
969 error = mrsas_kproc_create(mrsas_ocr_thread, sc,
970 &sc->ocr_thread, 0, 0, "mrsas_ocr%d",
971 device_get_unit(sc->mrsas_dev));
973 device_printf(sc->mrsas_dev, "Error %d starting OCR thread\n", error);
974 goto attach_fail_ocr_thread;
977 * After FW initialization and OCR thread creation
978 * we will defer the cdev creation, AEN setup on ICH callback
980 sc->mrsas_ich.ich_func = mrsas_ich_startup;
981 sc->mrsas_ich.ich_arg = sc;
982 if (config_intrhook_establish(&sc->mrsas_ich) != 0) {
983 device_printf(sc->mrsas_dev, "Config hook is already established\n");
985 mrsas_setup_sysctl(sc);
988 attach_fail_ocr_thread:
989 if (sc->ocr_thread_active)
990 wakeup(&sc->ocr_chan);
992 mrsas_teardown_intr(sc);
994 mrsas_cam_detach(sc);
996 /* if MSIX vector is allocated and FW Init FAILED then release MSIX */
997 if (sc->msix_enable == 1)
998 pci_release_msi(sc->mrsas_dev);
1000 mtx_destroy(&sc->sim_lock);
1001 mtx_destroy(&sc->aen_lock);
1002 mtx_destroy(&sc->pci_lock);
1003 mtx_destroy(&sc->io_lock);
1004 mtx_destroy(&sc->ioctl_lock);
1005 mtx_destroy(&sc->mpt_cmd_pool_lock);
1006 mtx_destroy(&sc->mfi_cmd_pool_lock);
1007 mtx_destroy(&sc->raidmap_lock);
1008 mtx_destroy(&sc->stream_lock);
1011 bus_release_resource(sc->mrsas_dev, SYS_RES_MEMORY,
1012 sc->reg_res_id, sc->reg_res);
1018 * Interrupt config hook
1021 mrsas_ich_startup(void *arg)
1024 struct mrsas_softc *sc = (struct mrsas_softc *)arg;
1027 * Intialize a counting Semaphore to take care no. of concurrent IOCTLs
1029 sema_init(&sc->ioctl_count_sema, MRSAS_MAX_IOCTL_CMDS,
1030 IOCTL_SEMA_DESCRIPTION);
1032 /* Create a /dev entry for mrsas controller. */
1033 sc->mrsas_cdev = make_dev(&mrsas_cdevsw, device_get_unit(sc->mrsas_dev), UID_ROOT,
1034 GID_OPERATOR, (S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP), "mrsas%u",
1035 device_get_unit(sc->mrsas_dev));
1037 if (device_get_unit(sc->mrsas_dev) == 0) {
1038 make_dev_alias_p(MAKEDEV_CHECKNAME,
1039 &sc->mrsas_linux_emulator_cdev, sc->mrsas_cdev,
1040 "megaraid_sas_ioctl_node");
1043 sc->mrsas_cdev->si_drv1 = sc;
1046 * Add this controller to mrsas_mgmt_info structure so that it can be
1047 * exported to management applications
1049 if (device_get_unit(sc->mrsas_dev) == 0)
1050 memset(&mrsas_mgmt_info, 0, sizeof(mrsas_mgmt_info));
1052 mrsas_mgmt_info.count++;
1053 mrsas_mgmt_info.sc_ptr[mrsas_mgmt_info.max_index] = sc;
1054 mrsas_mgmt_info.max_index++;
1056 /* Enable Interrupts */
1057 mrsas_enable_intr(sc);
1059 /* Call DCMD get_pd_info for all system PDs */
1060 for (i = 0; i < MRSAS_MAX_PD; i++) {
1061 if ((sc->target_list[i].target_id != 0xffff) &&
1063 mrsas_get_pd_info(sc, sc->target_list[i].target_id);
1066 /* Initiate AEN (Asynchronous Event Notification) */
1067 if (mrsas_start_aen(sc)) {
1068 device_printf(sc->mrsas_dev, "Error: AEN registration FAILED !!! "
1069 "Further events from the controller will not be communicated.\n"
1070 "Either there is some problem in the controller"
1071 "or the controller does not support AEN.\n"
1072 "Please contact to the SUPPORT TEAM if the problem persists\n");
1074 if (sc->mrsas_ich.ich_arg != NULL) {
1075 device_printf(sc->mrsas_dev, "Disestablish mrsas intr hook\n");
1076 config_intrhook_disestablish(&sc->mrsas_ich);
1077 sc->mrsas_ich.ich_arg = NULL;
1082 * mrsas_detach: De-allocates and teardown resources
1083 * input: pointer to device struct
1085 * This function is the entry point for device disconnect and detach.
1086 * It performs memory de-allocations, shutdown of the controller and various
1087 * teardown and destroy resource functions.
1090 mrsas_detach(device_t dev)
1092 struct mrsas_softc *sc;
1095 sc = device_get_softc(dev);
1096 sc->remove_in_progress = 1;
1098 /* Destroy the character device so no other IOCTL will be handled */
1099 if ((device_get_unit(dev) == 0) && sc->mrsas_linux_emulator_cdev)
1100 destroy_dev(sc->mrsas_linux_emulator_cdev);
1101 destroy_dev(sc->mrsas_cdev);
1104 * Take the instance off the instance array. Note that we will not
1105 * decrement the max_index. We let this array be sparse array
1107 for (i = 0; i < mrsas_mgmt_info.max_index; i++) {
1108 if (mrsas_mgmt_info.sc_ptr[i] == sc) {
1109 mrsas_mgmt_info.count--;
1110 mrsas_mgmt_info.sc_ptr[i] = NULL;
1115 if (sc->ocr_thread_active)
1116 wakeup(&sc->ocr_chan);
1117 while (sc->reset_in_progress) {
1119 if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
1120 mrsas_dprint(sc, MRSAS_INFO,
1121 "[%2d]waiting for OCR to be finished from %s\n", i, __func__);
1123 pause("mr_shutdown", hz);
1126 while (sc->ocr_thread_active) {
1128 if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
1129 mrsas_dprint(sc, MRSAS_INFO,
1131 "mrsas_ocr thread to quit ocr %d\n", i,
1132 sc->ocr_thread_active);
1134 pause("mr_shutdown", hz);
1136 mrsas_flush_cache(sc);
1137 mrsas_shutdown_ctlr(sc, MR_DCMD_CTRL_SHUTDOWN);
1138 mrsas_disable_intr(sc);
1140 if ((sc->is_ventura || sc->is_aero) && sc->streamDetectByLD) {
1141 for (i = 0; i < MAX_LOGICAL_DRIVES_EXT; ++i)
1142 free(sc->streamDetectByLD[i], M_MRSAS);
1143 free(sc->streamDetectByLD, M_MRSAS);
1144 sc->streamDetectByLD = NULL;
1147 mrsas_cam_detach(sc);
1148 mrsas_teardown_intr(sc);
1150 mtx_destroy(&sc->sim_lock);
1151 mtx_destroy(&sc->aen_lock);
1152 mtx_destroy(&sc->pci_lock);
1153 mtx_destroy(&sc->io_lock);
1154 mtx_destroy(&sc->ioctl_lock);
1155 mtx_destroy(&sc->mpt_cmd_pool_lock);
1156 mtx_destroy(&sc->mfi_cmd_pool_lock);
1157 mtx_destroy(&sc->raidmap_lock);
1158 mtx_destroy(&sc->stream_lock);
1160 /* Wait for all the semaphores to be released */
1161 while (sema_value(&sc->ioctl_count_sema) != MRSAS_MAX_IOCTL_CMDS)
1162 pause("mr_shutdown", hz);
1164 /* Destroy the counting semaphore created for Ioctl */
1165 sema_destroy(&sc->ioctl_count_sema);
1168 bus_release_resource(sc->mrsas_dev,
1169 SYS_RES_MEMORY, sc->reg_res_id, sc->reg_res);
1171 if (sc->sysctl_tree != NULL)
1172 sysctl_ctx_free(&sc->sysctl_ctx);
1178 mrsas_shutdown(device_t dev)
1180 struct mrsas_softc *sc;
1183 sc = device_get_softc(dev);
1184 sc->remove_in_progress = 1;
1185 if (!KERNEL_PANICKED()) {
1186 if (sc->ocr_thread_active)
1187 wakeup(&sc->ocr_chan);
1189 while (sc->reset_in_progress && i < 15) {
1191 if ((i % MRSAS_RESET_NOTICE_INTERVAL) == 0) {
1192 mrsas_dprint(sc, MRSAS_INFO,
1193 "[%2d]waiting for OCR to be finished "
1194 "from %s\n", i, __func__);
1196 pause("mr_shutdown", hz);
1198 if (sc->reset_in_progress) {
1199 mrsas_dprint(sc, MRSAS_INFO,
1200 "gave up waiting for OCR to be finished\n");
1204 mrsas_flush_cache(sc);
1205 mrsas_shutdown_ctlr(sc, MR_DCMD_CTRL_SHUTDOWN);
1206 mrsas_disable_intr(sc);
1211 * mrsas_free_mem: Frees allocated memory
1212 * input: Adapter instance soft state
1214 * This function is called from mrsas_detach() to free previously allocated
1218 mrsas_free_mem(struct mrsas_softc *sc)
1221 u_int32_t max_fw_cmds;
1222 struct mrsas_mfi_cmd *mfi_cmd;
1223 struct mrsas_mpt_cmd *mpt_cmd;
1226 * Free RAID map memory
1228 for (i = 0; i < 2; i++) {
1229 if (sc->raidmap_phys_addr[i])
1230 bus_dmamap_unload(sc->raidmap_tag[i], sc->raidmap_dmamap[i]);
1231 if (sc->raidmap_mem[i] != NULL)
1232 bus_dmamem_free(sc->raidmap_tag[i], sc->raidmap_mem[i], sc->raidmap_dmamap[i]);
1233 if (sc->raidmap_tag[i] != NULL)
1234 bus_dma_tag_destroy(sc->raidmap_tag[i]);
1236 if (sc->ld_drv_map[i] != NULL)
1237 free(sc->ld_drv_map[i], M_MRSAS);
1239 for (i = 0; i < 2; i++) {
1240 if (sc->jbodmap_phys_addr[i])
1241 bus_dmamap_unload(sc->jbodmap_tag[i], sc->jbodmap_dmamap[i]);
1242 if (sc->jbodmap_mem[i] != NULL)
1243 bus_dmamem_free(sc->jbodmap_tag[i], sc->jbodmap_mem[i], sc->jbodmap_dmamap[i]);
1244 if (sc->jbodmap_tag[i] != NULL)
1245 bus_dma_tag_destroy(sc->jbodmap_tag[i]);
1248 * Free version buffer memory
1250 if (sc->verbuf_phys_addr)
1251 bus_dmamap_unload(sc->verbuf_tag, sc->verbuf_dmamap);
1252 if (sc->verbuf_mem != NULL)
1253 bus_dmamem_free(sc->verbuf_tag, sc->verbuf_mem, sc->verbuf_dmamap);
1254 if (sc->verbuf_tag != NULL)
1255 bus_dma_tag_destroy(sc->verbuf_tag);
1258 * Free sense buffer memory
1260 if (sc->sense_phys_addr)
1261 bus_dmamap_unload(sc->sense_tag, sc->sense_dmamap);
1262 if (sc->sense_mem != NULL)
1263 bus_dmamem_free(sc->sense_tag, sc->sense_mem, sc->sense_dmamap);
1264 if (sc->sense_tag != NULL)
1265 bus_dma_tag_destroy(sc->sense_tag);
1268 * Free chain frame memory
1270 if (sc->chain_frame_phys_addr)
1271 bus_dmamap_unload(sc->chain_frame_tag, sc->chain_frame_dmamap);
1272 if (sc->chain_frame_mem != NULL)
1273 bus_dmamem_free(sc->chain_frame_tag, sc->chain_frame_mem, sc->chain_frame_dmamap);
1274 if (sc->chain_frame_tag != NULL)
1275 bus_dma_tag_destroy(sc->chain_frame_tag);
1278 * Free IO Request memory
1280 if (sc->io_request_phys_addr)
1281 bus_dmamap_unload(sc->io_request_tag, sc->io_request_dmamap);
1282 if (sc->io_request_mem != NULL)
1283 bus_dmamem_free(sc->io_request_tag, sc->io_request_mem, sc->io_request_dmamap);
1284 if (sc->io_request_tag != NULL)
1285 bus_dma_tag_destroy(sc->io_request_tag);
1288 * Free Reply Descriptor memory
1290 if (sc->reply_desc_phys_addr)
1291 bus_dmamap_unload(sc->reply_desc_tag, sc->reply_desc_dmamap);
1292 if (sc->reply_desc_mem != NULL)
1293 bus_dmamem_free(sc->reply_desc_tag, sc->reply_desc_mem, sc->reply_desc_dmamap);
1294 if (sc->reply_desc_tag != NULL)
1295 bus_dma_tag_destroy(sc->reply_desc_tag);
1298 * Free event detail memory
1300 if (sc->evt_detail_phys_addr)
1301 bus_dmamap_unload(sc->evt_detail_tag, sc->evt_detail_dmamap);
1302 if (sc->evt_detail_mem != NULL)
1303 bus_dmamem_free(sc->evt_detail_tag, sc->evt_detail_mem, sc->evt_detail_dmamap);
1304 if (sc->evt_detail_tag != NULL)
1305 bus_dma_tag_destroy(sc->evt_detail_tag);
1308 * Free PD info memory
1310 if (sc->pd_info_phys_addr)
1311 bus_dmamap_unload(sc->pd_info_tag, sc->pd_info_dmamap);
1312 if (sc->pd_info_mem != NULL)
1313 bus_dmamem_free(sc->pd_info_tag, sc->pd_info_mem, sc->pd_info_dmamap);
1314 if (sc->pd_info_tag != NULL)
1315 bus_dma_tag_destroy(sc->pd_info_tag);
1320 if (sc->mfi_cmd_list) {
1321 for (i = 0; i < MRSAS_MAX_MFI_CMDS; i++) {
1322 mfi_cmd = sc->mfi_cmd_list[i];
1323 mrsas_free_frame(sc, mfi_cmd);
1326 if (sc->mficmd_frame_tag != NULL)
1327 bus_dma_tag_destroy(sc->mficmd_frame_tag);
1330 * Free MPT internal command list
1332 max_fw_cmds = sc->max_fw_cmds;
1333 if (sc->mpt_cmd_list) {
1334 for (i = 0; i < max_fw_cmds; i++) {
1335 mpt_cmd = sc->mpt_cmd_list[i];
1336 bus_dmamap_destroy(sc->data_tag, mpt_cmd->data_dmamap);
1337 free(sc->mpt_cmd_list[i], M_MRSAS);
1339 free(sc->mpt_cmd_list, M_MRSAS);
1340 sc->mpt_cmd_list = NULL;
1343 * Free MFI internal command list
1346 if (sc->mfi_cmd_list) {
1347 for (i = 0; i < MRSAS_MAX_MFI_CMDS; i++) {
1348 free(sc->mfi_cmd_list[i], M_MRSAS);
1350 free(sc->mfi_cmd_list, M_MRSAS);
1351 sc->mfi_cmd_list = NULL;
1354 * Free request descriptor memory
1356 free(sc->req_desc, M_MRSAS);
1357 sc->req_desc = NULL;
1360 * Destroy parent tag
1362 if (sc->mrsas_parent_tag != NULL)
1363 bus_dma_tag_destroy(sc->mrsas_parent_tag);
1366 * Free ctrl_info memory
1368 if (sc->ctrl_info != NULL)
1369 free(sc->ctrl_info, M_MRSAS);
1373 * mrsas_teardown_intr: Teardown interrupt
1374 * input: Adapter instance soft state
1376 * This function is called from mrsas_detach() to teardown and release bus
1377 * interrupt resourse.
1380 mrsas_teardown_intr(struct mrsas_softc *sc)
1384 if (!sc->msix_enable) {
1385 if (sc->intr_handle[0])
1386 bus_teardown_intr(sc->mrsas_dev, sc->mrsas_irq[0], sc->intr_handle[0]);
1387 if (sc->mrsas_irq[0] != NULL)
1388 bus_release_resource(sc->mrsas_dev, SYS_RES_IRQ,
1389 sc->irq_id[0], sc->mrsas_irq[0]);
1390 sc->intr_handle[0] = NULL;
1392 for (i = 0; i < sc->msix_vectors; i++) {
1393 if (sc->intr_handle[i])
1394 bus_teardown_intr(sc->mrsas_dev, sc->mrsas_irq[i],
1395 sc->intr_handle[i]);
1397 if (sc->mrsas_irq[i] != NULL)
1398 bus_release_resource(sc->mrsas_dev, SYS_RES_IRQ,
1399 sc->irq_id[i], sc->mrsas_irq[i]);
1401 sc->intr_handle[i] = NULL;
1403 pci_release_msi(sc->mrsas_dev);
1409 * mrsas_suspend: Suspend entry point
1410 * input: Device struct pointer
1412 * This function is the entry point for system suspend from the OS.
1415 mrsas_suspend(device_t dev)
1417 /* This will be filled when the driver will have hibernation support */
1422 * mrsas_resume: Resume entry point
1423 * input: Device struct pointer
1425 * This function is the entry point for system resume from the OS.
1428 mrsas_resume(device_t dev)
1430 /* This will be filled when the driver will have hibernation support */
1435 * mrsas_get_softc_instance: Find softc instance based on cmd type
1437 * This function will return softc instance based on cmd type.
1438 * In some case, application fire ioctl on required management instance and
1439 * do not provide host_no. Use cdev->si_drv1 to get softc instance for those
1440 * case, else get the softc instance from host_no provided by application in
1444 static struct mrsas_softc *
1445 mrsas_get_softc_instance(struct cdev *dev, u_long cmd, caddr_t arg)
1447 struct mrsas_softc *sc = NULL;
1448 struct mrsas_iocpacket *user_ioc = (struct mrsas_iocpacket *)arg;
1450 if (cmd == MRSAS_IOC_GET_PCI_INFO) {
1454 * get the Host number & the softc from data sent by the
1457 sc = mrsas_mgmt_info.sc_ptr[user_ioc->host_no];
1459 printf("There is no Controller number %d\n",
1461 else if (user_ioc->host_no >= mrsas_mgmt_info.max_index)
1462 mrsas_dprint(sc, MRSAS_FAULT,
1463 "Invalid Controller number %d\n", user_ioc->host_no);
1470 * mrsas_ioctl: IOCtl commands entry point.
1472 * This function is the entry point for IOCtls from the OS. It calls the
1473 * appropriate function for processing depending on the command received.
1476 mrsas_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag,
1479 struct mrsas_softc *sc;
1481 MRSAS_DRV_PCI_INFORMATION *pciDrvInfo;
1483 sc = mrsas_get_softc_instance(dev, cmd, arg);
1487 if (sc->remove_in_progress ||
1488 (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR)) {
1489 mrsas_dprint(sc, MRSAS_INFO,
1490 "Either driver remove or shutdown called or "
1491 "HW is in unrecoverable critical error state.\n");
1494 mtx_lock_spin(&sc->ioctl_lock);
1495 if (!sc->reset_in_progress) {
1496 mtx_unlock_spin(&sc->ioctl_lock);
1499 mtx_unlock_spin(&sc->ioctl_lock);
1500 while (sc->reset_in_progress) {
1502 if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
1503 mrsas_dprint(sc, MRSAS_INFO,
1504 "[%2d]waiting for OCR to be finished from %s\n", i, __func__);
1506 pause("mr_ioctl", hz);
1511 case MRSAS_IOC_FIRMWARE_PASS_THROUGH64:
1512 #ifdef COMPAT_FREEBSD32
1513 case MRSAS_IOC_FIRMWARE_PASS_THROUGH32:
1516 * Decrement the Ioctl counting Semaphore before getting an
1519 sema_wait(&sc->ioctl_count_sema);
1521 ret = mrsas_passthru(sc, (void *)arg, cmd);
1523 /* Increment the Ioctl counting semaphore value */
1524 sema_post(&sc->ioctl_count_sema);
1527 case MRSAS_IOC_SCAN_BUS:
1528 ret = mrsas_bus_scan(sc);
1531 case MRSAS_IOC_GET_PCI_INFO:
1532 pciDrvInfo = (MRSAS_DRV_PCI_INFORMATION *) arg;
1533 memset(pciDrvInfo, 0, sizeof(MRSAS_DRV_PCI_INFORMATION));
1534 pciDrvInfo->busNumber = pci_get_bus(sc->mrsas_dev);
1535 pciDrvInfo->deviceNumber = pci_get_slot(sc->mrsas_dev);
1536 pciDrvInfo->functionNumber = pci_get_function(sc->mrsas_dev);
1537 pciDrvInfo->domainID = pci_get_domain(sc->mrsas_dev);
1538 mrsas_dprint(sc, MRSAS_INFO, "pci bus no: %d,"
1539 "pci device no: %d, pci function no: %d,"
1540 "pci domain ID: %d\n",
1541 pciDrvInfo->busNumber, pciDrvInfo->deviceNumber,
1542 pciDrvInfo->functionNumber, pciDrvInfo->domainID);
1547 mrsas_dprint(sc, MRSAS_TRACE, "IOCTL command 0x%lx is not handled\n", cmd);
1555 * mrsas_poll: poll entry point for mrsas driver fd
1557 * This function is the entry point for poll from the OS. It waits for some AEN
1558 * events to be triggered from the controller and notifies back.
1561 mrsas_poll(struct cdev *dev, int poll_events, struct thread *td)
1563 struct mrsas_softc *sc;
1568 if (poll_events & (POLLIN | POLLRDNORM)) {
1569 if (sc->mrsas_aen_triggered) {
1570 revents |= poll_events & (POLLIN | POLLRDNORM);
1574 if (poll_events & (POLLIN | POLLRDNORM)) {
1575 mtx_lock(&sc->aen_lock);
1576 sc->mrsas_poll_waiting = 1;
1577 selrecord(td, &sc->mrsas_select);
1578 mtx_unlock(&sc->aen_lock);
1585 * mrsas_setup_irq: Set up interrupt
1586 * input: Adapter instance soft state
1588 * This function sets up interrupts as a bus resource, with flags indicating
1589 * resource permitting contemporaneous sharing and for resource to activate
1593 mrsas_setup_irq(struct mrsas_softc *sc)
1595 if (sc->msix_enable && (mrsas_setup_msix(sc) == SUCCESS))
1596 device_printf(sc->mrsas_dev, "MSI-x interrupts setup success\n");
1599 device_printf(sc->mrsas_dev, "Fall back to legacy interrupt\n");
1600 sc->irq_context[0].sc = sc;
1601 sc->irq_context[0].MSIxIndex = 0;
1603 sc->mrsas_irq[0] = bus_alloc_resource_any(sc->mrsas_dev,
1604 SYS_RES_IRQ, &sc->irq_id[0], RF_SHAREABLE | RF_ACTIVE);
1605 if (sc->mrsas_irq[0] == NULL) {
1606 device_printf(sc->mrsas_dev, "Cannot allocate legcay"
1610 if (bus_setup_intr(sc->mrsas_dev, sc->mrsas_irq[0],
1611 INTR_MPSAFE | INTR_TYPE_CAM, NULL, mrsas_isr,
1612 &sc->irq_context[0], &sc->intr_handle[0])) {
1613 device_printf(sc->mrsas_dev, "Cannot set up legacy"
1622 * mrsas_isr: ISR entry point
1623 * input: argument pointer
1625 * This function is the interrupt service routine entry point. There are two
1626 * types of interrupts, state change interrupt and response interrupt. If an
1627 * interrupt is not ours, we just return.
1630 mrsas_isr(void *arg)
1632 struct mrsas_irq_context *irq_context = (struct mrsas_irq_context *)arg;
1633 struct mrsas_softc *sc = irq_context->sc;
1636 if (sc->mask_interrupts)
1639 if (!sc->msix_vectors) {
1640 status = mrsas_clear_intr(sc);
1644 /* If we are resetting, bail */
1645 if (mrsas_test_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags)) {
1646 printf(" Entered into ISR when OCR is going active. \n");
1647 mrsas_clear_intr(sc);
1650 /* Process for reply request and clear response interrupt */
1651 if (mrsas_complete_cmd(sc, irq_context->MSIxIndex) != SUCCESS)
1652 mrsas_clear_intr(sc);
1658 * mrsas_complete_cmd: Process reply request
1659 * input: Adapter instance soft state
1661 * This function is called from mrsas_isr() to process reply request and clear
1662 * response interrupt. Processing of the reply request entails walking
1663 * through the reply descriptor array for the command request pended from
1664 * Firmware. We look at the Function field to determine the command type and
1665 * perform the appropriate action. Before we return, we clear the response
1669 mrsas_complete_cmd(struct mrsas_softc *sc, u_int32_t MSIxIndex)
1671 Mpi2ReplyDescriptorsUnion_t *desc;
1672 MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *reply_desc;
1673 MRSAS_RAID_SCSI_IO_REQUEST *scsi_io_req;
1674 struct mrsas_mpt_cmd *cmd_mpt, *r1_cmd = NULL;
1675 struct mrsas_mfi_cmd *cmd_mfi;
1676 u_int8_t reply_descript_type, *sense;
1677 u_int16_t smid, num_completed;
1678 u_int8_t status, extStatus;
1679 union desc_value desc_val;
1680 PLD_LOAD_BALANCE_INFO lbinfo;
1681 u_int32_t device_id, data_length;
1682 int threshold_reply_count = 0;
1684 MR_TASK_MANAGE_REQUEST *mr_tm_req;
1685 MPI2_SCSI_TASK_MANAGE_REQUEST *mpi_tm_req;
1688 /* If we have a hardware error, not need to continue */
1689 if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR)
1692 desc = sc->reply_desc_mem;
1693 desc += ((MSIxIndex * sc->reply_alloc_sz) / sizeof(MPI2_REPLY_DESCRIPTORS_UNION))
1694 + sc->last_reply_idx[MSIxIndex];
1696 reply_desc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *) desc;
1698 desc_val.word = desc->Words;
1701 reply_descript_type = reply_desc->ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
1703 /* Find our reply descriptor for the command and process */
1704 while ((desc_val.u.low != 0xFFFFFFFF) && (desc_val.u.high != 0xFFFFFFFF)) {
1705 smid = le16toh(reply_desc->SMID);
1706 cmd_mpt = sc->mpt_cmd_list[smid - 1];
1707 scsi_io_req = (MRSAS_RAID_SCSI_IO_REQUEST *) cmd_mpt->io_request;
1709 status = scsi_io_req->RaidContext.raid_context.status;
1710 extStatus = scsi_io_req->RaidContext.raid_context.exStatus;
1711 sense = cmd_mpt->sense;
1712 data_length = scsi_io_req->DataLength;
1714 switch (scsi_io_req->Function) {
1715 case MPI2_FUNCTION_SCSI_TASK_MGMT:
1717 mr_tm_req = (MR_TASK_MANAGE_REQUEST *) cmd_mpt->io_request;
1718 mpi_tm_req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)
1719 &mr_tm_req->TmRequest;
1720 device_printf(sc->mrsas_dev, "TM completion type 0x%X, "
1721 "TaskMID: 0x%X", mpi_tm_req->TaskType, mpi_tm_req->TaskMID);
1723 wakeup_one((void *)&sc->ocr_chan);
1725 case MPI2_FUNCTION_SCSI_IO_REQUEST: /* Fast Path IO. */
1726 device_id = cmd_mpt->ccb_ptr->ccb_h.target_id;
1727 lbinfo = &sc->load_balance_info[device_id];
1728 /* R1 load balancing for READ */
1729 if (cmd_mpt->load_balance == MRSAS_LOAD_BALANCE_FLAG) {
1730 mrsas_atomic_dec(&lbinfo->scsi_pending_cmds[cmd_mpt->pd_r1_lb]);
1731 cmd_mpt->load_balance &= ~MRSAS_LOAD_BALANCE_FLAG;
1733 /* Fall thru and complete IO */
1734 case MRSAS_MPI2_FUNCTION_LD_IO_REQUEST:
1735 if (cmd_mpt->r1_alt_dev_handle == MR_DEVHANDLE_INVALID) {
1736 mrsas_map_mpt_cmd_status(cmd_mpt, cmd_mpt->ccb_ptr, status,
1737 extStatus, le32toh(data_length), sense);
1738 mrsas_cmd_done(sc, cmd_mpt);
1739 mrsas_atomic_dec(&sc->fw_outstanding);
1742 * If the peer Raid 1/10 fast path failed,
1743 * mark IO as failed to the scsi layer.
1744 * Overwrite the current status by the failed status
1745 * and make sure that if any command fails,
1746 * driver returns fail status to CAM.
1748 cmd_mpt->cmd_completed = 1;
1749 r1_cmd = cmd_mpt->peer_cmd;
1750 if (r1_cmd->cmd_completed) {
1751 if (r1_cmd->io_request->RaidContext.raid_context.status != MFI_STAT_OK) {
1752 status = r1_cmd->io_request->RaidContext.raid_context.status;
1753 extStatus = r1_cmd->io_request->RaidContext.raid_context.exStatus;
1754 data_length = r1_cmd->io_request->DataLength;
1755 sense = r1_cmd->sense;
1757 r1_cmd->ccb_ptr = NULL;
1758 if (r1_cmd->callout_owner) {
1759 callout_stop(&r1_cmd->cm_callout);
1760 r1_cmd->callout_owner = false;
1762 mrsas_release_mpt_cmd(r1_cmd);
1763 mrsas_atomic_dec(&sc->fw_outstanding);
1764 mrsas_map_mpt_cmd_status(cmd_mpt, cmd_mpt->ccb_ptr, status,
1765 extStatus, le32toh(data_length), sense);
1766 mrsas_cmd_done(sc, cmd_mpt);
1767 mrsas_atomic_dec(&sc->fw_outstanding);
1771 case MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST: /* MFI command */
1772 cmd_mfi = sc->mfi_cmd_list[cmd_mpt->sync_cmd_idx];
1774 * Make sure NOT TO release the mfi command from the called
1775 * function's context if it is fired with issue_polled call.
1776 * And also make sure that the issue_polled call should only be
1777 * used if INTERRUPT IS DISABLED.
1779 if (cmd_mfi->frame->hdr.flags & htole16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE))
1780 mrsas_release_mfi_cmd(cmd_mfi);
1782 mrsas_complete_mptmfi_passthru(sc, cmd_mfi, status);
1786 sc->last_reply_idx[MSIxIndex]++;
1787 if (sc->last_reply_idx[MSIxIndex] >= sc->reply_q_depth)
1788 sc->last_reply_idx[MSIxIndex] = 0;
1790 desc->Words = ~((uint64_t)0x00); /* set it back to all
1793 threshold_reply_count++;
1795 /* Get the next reply descriptor */
1796 if (!sc->last_reply_idx[MSIxIndex]) {
1797 desc = sc->reply_desc_mem;
1798 desc += ((MSIxIndex * sc->reply_alloc_sz) / sizeof(MPI2_REPLY_DESCRIPTORS_UNION));
1802 reply_desc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *) desc;
1803 desc_val.word = desc->Words;
1805 reply_descript_type = reply_desc->ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
1807 if (reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
1811 * Write to reply post index after completing threshold reply
1812 * count and still there are more replies in reply queue
1813 * pending to be completed.
1815 if (threshold_reply_count >= THRESHOLD_REPLY_COUNT) {
1816 if (sc->msix_enable) {
1817 if (sc->msix_combined)
1818 mrsas_write_reg(sc, sc->msix_reg_offset[MSIxIndex / 8],
1819 ((MSIxIndex & 0x7) << 24) |
1820 sc->last_reply_idx[MSIxIndex]);
1822 mrsas_write_reg(sc, sc->msix_reg_offset[0], (MSIxIndex << 24) |
1823 sc->last_reply_idx[MSIxIndex]);
1825 mrsas_write_reg(sc, offsetof(mrsas_reg_set,
1826 reply_post_host_index), sc->last_reply_idx[0]);
1828 threshold_reply_count = 0;
1832 /* No match, just return */
1833 if (num_completed == 0)
1836 /* Clear response interrupt */
1837 if (sc->msix_enable) {
1838 if (sc->msix_combined) {
1839 mrsas_write_reg(sc, sc->msix_reg_offset[MSIxIndex / 8],
1840 ((MSIxIndex & 0x7) << 24) |
1841 sc->last_reply_idx[MSIxIndex]);
1843 mrsas_write_reg(sc, sc->msix_reg_offset[0], (MSIxIndex << 24) |
1844 sc->last_reply_idx[MSIxIndex]);
1846 mrsas_write_reg(sc, offsetof(mrsas_reg_set,
1847 reply_post_host_index), sc->last_reply_idx[0]);
1853 * mrsas_map_mpt_cmd_status: Allocate DMAable memory.
1854 * input: Adapter instance soft state
1856 * This function is called from mrsas_complete_cmd(), for LD IO and FastPath IO.
1857 * It checks the command status and maps the appropriate CAM status for the
1861 mrsas_map_mpt_cmd_status(struct mrsas_mpt_cmd *cmd, union ccb *ccb_ptr, u_int8_t status,
1862 u_int8_t extStatus, u_int32_t data_length, u_int8_t *sense)
1864 struct mrsas_softc *sc = cmd->sc;
1865 u_int8_t *sense_data;
1869 ccb_ptr->ccb_h.status = CAM_REQ_CMP;
1871 case MFI_STAT_SCSI_IO_FAILED:
1872 case MFI_STAT_SCSI_DONE_WITH_ERROR:
1873 ccb_ptr->ccb_h.status = CAM_SCSI_STATUS_ERROR;
1874 sense_data = (u_int8_t *)&ccb_ptr->csio.sense_data;
1876 /* For now just copy 18 bytes back */
1877 memcpy(sense_data, sense, 18);
1878 ccb_ptr->csio.sense_len = 18;
1879 ccb_ptr->ccb_h.status |= CAM_AUTOSNS_VALID;
1882 case MFI_STAT_LD_OFFLINE:
1883 case MFI_STAT_DEVICE_NOT_FOUND:
1884 if (ccb_ptr->ccb_h.target_lun)
1885 ccb_ptr->ccb_h.status |= CAM_LUN_INVALID;
1887 ccb_ptr->ccb_h.status |= CAM_DEV_NOT_THERE;
1889 case MFI_STAT_CONFIG_SEQ_MISMATCH:
1890 ccb_ptr->ccb_h.status |= CAM_REQUEUE_REQ;
1893 device_printf(sc->mrsas_dev, "FW cmd complete status %x\n", status);
1894 ccb_ptr->ccb_h.status = CAM_REQ_CMP_ERR;
1895 ccb_ptr->csio.scsi_status = status;
1901 * mrsas_alloc_mem: Allocate DMAable memory
1902 * input: Adapter instance soft state
1904 * This function creates the parent DMA tag and allocates DMAable memory. DMA
1905 * tag describes constraints of DMA mapping. Memory allocated is mapped into
1906 * Kernel virtual address. Callback argument is physical memory address.
1909 mrsas_alloc_mem(struct mrsas_softc *sc)
1911 u_int32_t verbuf_size, io_req_size, reply_desc_size, sense_size, chain_frame_size,
1912 evt_detail_size, count, pd_info_size;
1915 * Allocate parent DMA tag
1917 if (bus_dma_tag_create(NULL, /* parent */
1920 BUS_SPACE_MAXADDR, /* lowaddr */
1921 BUS_SPACE_MAXADDR, /* highaddr */
1922 NULL, NULL, /* filter, filterarg */
1923 maxphys, /* maxsize */
1924 sc->max_num_sge, /* nsegments */
1925 maxphys, /* maxsegsize */
1927 NULL, NULL, /* lockfunc, lockarg */
1928 &sc->mrsas_parent_tag /* tag */
1930 device_printf(sc->mrsas_dev, "Cannot allocate parent DMA tag\n");
1934 * Allocate for version buffer
1936 verbuf_size = MRSAS_MAX_NAME_LENGTH * (sizeof(bus_addr_t));
1937 if (bus_dma_tag_create(sc->mrsas_parent_tag,
1939 BUS_SPACE_MAXADDR_32BIT,
1948 device_printf(sc->mrsas_dev, "Cannot allocate verbuf DMA tag\n");
1951 if (bus_dmamem_alloc(sc->verbuf_tag, (void **)&sc->verbuf_mem,
1952 BUS_DMA_NOWAIT, &sc->verbuf_dmamap)) {
1953 device_printf(sc->mrsas_dev, "Cannot allocate verbuf memory\n");
1956 bzero(sc->verbuf_mem, verbuf_size);
1957 if (bus_dmamap_load(sc->verbuf_tag, sc->verbuf_dmamap, sc->verbuf_mem,
1958 verbuf_size, mrsas_addr_cb, &sc->verbuf_phys_addr,
1960 device_printf(sc->mrsas_dev, "Cannot load verbuf DMA map\n");
1964 * Allocate IO Request Frames
1966 io_req_size = sc->io_frames_alloc_sz;
1967 if (bus_dma_tag_create(sc->mrsas_parent_tag,
1969 BUS_SPACE_MAXADDR_32BIT,
1977 &sc->io_request_tag)) {
1978 device_printf(sc->mrsas_dev, "Cannot create IO request tag\n");
1981 if (bus_dmamem_alloc(sc->io_request_tag, (void **)&sc->io_request_mem,
1982 BUS_DMA_NOWAIT, &sc->io_request_dmamap)) {
1983 device_printf(sc->mrsas_dev, "Cannot alloc IO request memory\n");
1986 bzero(sc->io_request_mem, io_req_size);
1987 if (bus_dmamap_load(sc->io_request_tag, sc->io_request_dmamap,
1988 sc->io_request_mem, io_req_size, mrsas_addr_cb,
1989 &sc->io_request_phys_addr, BUS_DMA_NOWAIT)) {
1990 device_printf(sc->mrsas_dev, "Cannot load IO request memory\n");
1994 * Allocate Chain Frames
1996 chain_frame_size = sc->chain_frames_alloc_sz;
1997 if (bus_dma_tag_create(sc->mrsas_parent_tag,
1999 BUS_SPACE_MAXADDR_32BIT,
2007 &sc->chain_frame_tag)) {
2008 device_printf(sc->mrsas_dev, "Cannot create chain frame tag\n");
2011 if (bus_dmamem_alloc(sc->chain_frame_tag, (void **)&sc->chain_frame_mem,
2012 BUS_DMA_NOWAIT, &sc->chain_frame_dmamap)) {
2013 device_printf(sc->mrsas_dev, "Cannot alloc chain frame memory\n");
2016 bzero(sc->chain_frame_mem, chain_frame_size);
2017 if (bus_dmamap_load(sc->chain_frame_tag, sc->chain_frame_dmamap,
2018 sc->chain_frame_mem, chain_frame_size, mrsas_addr_cb,
2019 &sc->chain_frame_phys_addr, BUS_DMA_NOWAIT)) {
2020 device_printf(sc->mrsas_dev, "Cannot load chain frame memory\n");
2023 count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
2025 * Allocate Reply Descriptor Array
2027 reply_desc_size = sc->reply_alloc_sz * count;
2028 if (bus_dma_tag_create(sc->mrsas_parent_tag,
2030 BUS_SPACE_MAXADDR_32BIT,
2038 &sc->reply_desc_tag)) {
2039 device_printf(sc->mrsas_dev, "Cannot create reply descriptor tag\n");
2042 if (bus_dmamem_alloc(sc->reply_desc_tag, (void **)&sc->reply_desc_mem,
2043 BUS_DMA_NOWAIT, &sc->reply_desc_dmamap)) {
2044 device_printf(sc->mrsas_dev, "Cannot alloc reply descriptor memory\n");
2047 if (bus_dmamap_load(sc->reply_desc_tag, sc->reply_desc_dmamap,
2048 sc->reply_desc_mem, reply_desc_size, mrsas_addr_cb,
2049 &sc->reply_desc_phys_addr, BUS_DMA_NOWAIT)) {
2050 device_printf(sc->mrsas_dev, "Cannot load reply descriptor memory\n");
2054 * Allocate Sense Buffer Array. Keep in lower 4GB
2056 sense_size = sc->max_fw_cmds * MRSAS_SENSE_LEN;
2057 if (bus_dma_tag_create(sc->mrsas_parent_tag,
2059 BUS_SPACE_MAXADDR_32BIT,
2068 device_printf(sc->mrsas_dev, "Cannot allocate sense buf tag\n");
2071 if (bus_dmamem_alloc(sc->sense_tag, (void **)&sc->sense_mem,
2072 BUS_DMA_NOWAIT, &sc->sense_dmamap)) {
2073 device_printf(sc->mrsas_dev, "Cannot allocate sense buf memory\n");
2076 if (bus_dmamap_load(sc->sense_tag, sc->sense_dmamap,
2077 sc->sense_mem, sense_size, mrsas_addr_cb, &sc->sense_phys_addr,
2079 device_printf(sc->mrsas_dev, "Cannot load sense buf memory\n");
2084 * Allocate for Event detail structure
2086 evt_detail_size = sizeof(struct mrsas_evt_detail);
2087 if (bus_dma_tag_create(sc->mrsas_parent_tag,
2089 BUS_SPACE_MAXADDR_32BIT,
2097 &sc->evt_detail_tag)) {
2098 device_printf(sc->mrsas_dev, "Cannot create Event detail tag\n");
2101 if (bus_dmamem_alloc(sc->evt_detail_tag, (void **)&sc->evt_detail_mem,
2102 BUS_DMA_NOWAIT, &sc->evt_detail_dmamap)) {
2103 device_printf(sc->mrsas_dev, "Cannot alloc Event detail buffer memory\n");
2106 bzero(sc->evt_detail_mem, evt_detail_size);
2107 if (bus_dmamap_load(sc->evt_detail_tag, sc->evt_detail_dmamap,
2108 sc->evt_detail_mem, evt_detail_size, mrsas_addr_cb,
2109 &sc->evt_detail_phys_addr, BUS_DMA_NOWAIT)) {
2110 device_printf(sc->mrsas_dev, "Cannot load Event detail buffer memory\n");
2115 * Allocate for PD INFO structure
2117 pd_info_size = sizeof(struct mrsas_pd_info);
2118 if (bus_dma_tag_create(sc->mrsas_parent_tag,
2120 BUS_SPACE_MAXADDR_32BIT,
2128 &sc->pd_info_tag)) {
2129 device_printf(sc->mrsas_dev, "Cannot create PD INFO tag\n");
2132 if (bus_dmamem_alloc(sc->pd_info_tag, (void **)&sc->pd_info_mem,
2133 BUS_DMA_NOWAIT, &sc->pd_info_dmamap)) {
2134 device_printf(sc->mrsas_dev, "Cannot alloc PD INFO buffer memory\n");
2137 bzero(sc->pd_info_mem, pd_info_size);
2138 if (bus_dmamap_load(sc->pd_info_tag, sc->pd_info_dmamap,
2139 sc->pd_info_mem, pd_info_size, mrsas_addr_cb,
2140 &sc->pd_info_phys_addr, BUS_DMA_NOWAIT)) {
2141 device_printf(sc->mrsas_dev, "Cannot load PD INFO buffer memory\n");
2146 * Create a dma tag for data buffers; size will be the maximum
2147 * possible I/O size (280kB).
2149 if (bus_dma_tag_create(sc->mrsas_parent_tag,
2156 sc->max_num_sge, /* nsegments */
2162 device_printf(sc->mrsas_dev, "Cannot create data dma tag\n");
2169 * mrsas_addr_cb: Callback function of bus_dmamap_load()
2170 * input: callback argument, machine dependent type
2171 * that describes DMA segments, number of segments, error code
2173 * This function is for the driver to receive mapping information resultant of
2174 * the bus_dmamap_load(). The information is actually not being used, but the
2175 * address is saved anyway.
2178 mrsas_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
2183 *addr = segs[0].ds_addr;
2187 * mrsas_setup_raidmap: Set up RAID map.
2188 * input: Adapter instance soft state
2190 * Allocate DMA memory for the RAID maps and perform setup.
2193 mrsas_setup_raidmap(struct mrsas_softc *sc)
2197 for (i = 0; i < 2; i++) {
2199 (void *)malloc(sc->drv_map_sz, M_MRSAS, M_NOWAIT);
2200 /* Do Error handling */
2201 if (!sc->ld_drv_map[i]) {
2202 device_printf(sc->mrsas_dev, "Could not allocate memory for local map");
2205 free(sc->ld_drv_map[0], M_MRSAS);
2206 /* ABORT driver initialization */
2211 for (int i = 0; i < 2; i++) {
2212 if (bus_dma_tag_create(sc->mrsas_parent_tag,
2214 BUS_SPACE_MAXADDR_32BIT,
2222 &sc->raidmap_tag[i])) {
2223 device_printf(sc->mrsas_dev,
2224 "Cannot allocate raid map tag.\n");
2227 if (bus_dmamem_alloc(sc->raidmap_tag[i],
2228 (void **)&sc->raidmap_mem[i],
2229 BUS_DMA_NOWAIT, &sc->raidmap_dmamap[i])) {
2230 device_printf(sc->mrsas_dev,
2231 "Cannot allocate raidmap memory.\n");
2234 bzero(sc->raidmap_mem[i], sc->max_map_sz);
2236 if (bus_dmamap_load(sc->raidmap_tag[i], sc->raidmap_dmamap[i],
2237 sc->raidmap_mem[i], sc->max_map_sz,
2238 mrsas_addr_cb, &sc->raidmap_phys_addr[i],
2240 device_printf(sc->mrsas_dev, "Cannot load raidmap memory.\n");
2243 if (!sc->raidmap_mem[i]) {
2244 device_printf(sc->mrsas_dev,
2245 "Cannot allocate memory for raid map.\n");
2250 if (!mrsas_get_map_info(sc))
2251 mrsas_sync_map_info(sc);
2260 * megasas_setup_jbod_map - setup jbod map for FP seq_number.
2261 * @sc: Adapter soft state
2263 * Return 0 on success.
2266 megasas_setup_jbod_map(struct mrsas_softc *sc)
2269 uint32_t pd_seq_map_sz;
2271 pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) +
2272 (sizeof(struct MR_PD_CFG_SEQ) * (MAX_PHYSICAL_DEVICES - 1));
2274 if (!sc->ctrl_info->adapterOperations3.useSeqNumJbodFP) {
2275 sc->use_seqnum_jbod_fp = 0;
2278 if (sc->jbodmap_mem[0])
2281 for (i = 0; i < 2; i++) {
2282 if (bus_dma_tag_create(sc->mrsas_parent_tag,
2284 BUS_SPACE_MAXADDR_32BIT,
2292 &sc->jbodmap_tag[i])) {
2293 device_printf(sc->mrsas_dev,
2294 "Cannot allocate jbod map tag.\n");
2297 if (bus_dmamem_alloc(sc->jbodmap_tag[i],
2298 (void **)&sc->jbodmap_mem[i],
2299 BUS_DMA_NOWAIT, &sc->jbodmap_dmamap[i])) {
2300 device_printf(sc->mrsas_dev,
2301 "Cannot allocate jbod map memory.\n");
2304 bzero(sc->jbodmap_mem[i], pd_seq_map_sz);
2306 if (bus_dmamap_load(sc->jbodmap_tag[i], sc->jbodmap_dmamap[i],
2307 sc->jbodmap_mem[i], pd_seq_map_sz,
2308 mrsas_addr_cb, &sc->jbodmap_phys_addr[i],
2310 device_printf(sc->mrsas_dev, "Cannot load jbod map memory.\n");
2313 if (!sc->jbodmap_mem[i]) {
2314 device_printf(sc->mrsas_dev,
2315 "Cannot allocate memory for jbod map.\n");
2316 sc->use_seqnum_jbod_fp = 0;
2322 if (!megasas_sync_pd_seq_num(sc, false) &&
2323 !megasas_sync_pd_seq_num(sc, true))
2324 sc->use_seqnum_jbod_fp = 1;
2326 sc->use_seqnum_jbod_fp = 0;
2328 device_printf(sc->mrsas_dev, "Jbod map is supported\n");
2332 * mrsas_init_fw: Initialize Firmware
2333 * input: Adapter soft state
2335 * Calls transition_to_ready() to make sure Firmware is in operational state and
2336 * calls mrsas_init_adapter() to send IOC_INIT command to Firmware. It
2337 * issues internal commands to get the controller info after the IOC_INIT
2338 * command response is received by Firmware. Note: code relating to
2339 * get_pdlist, get_ld_list and max_sectors are currently not being used, it
2340 * is left here as placeholder.
2343 mrsas_init_fw(struct mrsas_softc *sc)
2346 int ret, loop, ocr = 0;
2347 u_int32_t max_sectors_1;
2348 u_int32_t max_sectors_2;
2349 u_int32_t tmp_sectors;
2350 u_int32_t scratch_pad_2, scratch_pad_3, scratch_pad_4;
2351 int msix_enable = 0;
2352 int fw_msix_count = 0;
2355 /* Make sure Firmware is ready */
2356 ret = mrsas_transition_to_ready(sc, ocr);
2357 if (ret != SUCCESS) {
2360 if (sc->is_ventura || sc->is_aero) {
2361 scratch_pad_3 = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, outbound_scratch_pad_3));
2363 device_printf(sc->mrsas_dev, "scratch_pad_3 0x%x\n", scratch_pad_3);
2365 sc->maxRaidMapSize = ((scratch_pad_3 >>
2366 MR_MAX_RAID_MAP_SIZE_OFFSET_SHIFT) &
2367 MR_MAX_RAID_MAP_SIZE_MASK);
2369 /* MSI-x index 0- reply post host index register */
2370 sc->msix_reg_offset[0] = MPI2_REPLY_POST_HOST_INDEX_OFFSET;
2371 /* Check if MSI-X is supported while in ready state */
2372 msix_enable = (mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, outbound_scratch_pad)) & 0x4000000) >> 0x1a;
2375 scratch_pad_2 = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
2376 outbound_scratch_pad_2));
2378 /* Check max MSI-X vectors */
2379 if (sc->device_id == MRSAS_TBOLT) {
2380 sc->msix_vectors = (scratch_pad_2
2381 & MR_MAX_REPLY_QUEUES_OFFSET) + 1;
2382 fw_msix_count = sc->msix_vectors;
2384 /* Invader/Fury supports 96 MSI-X vectors */
2385 sc->msix_vectors = ((scratch_pad_2
2386 & MR_MAX_REPLY_QUEUES_EXT_OFFSET)
2387 >> MR_MAX_REPLY_QUEUES_EXT_OFFSET_SHIFT) + 1;
2388 fw_msix_count = sc->msix_vectors;
2390 if ((sc->mrsas_gen3_ctrl && (sc->msix_vectors > 8)) ||
2391 ((sc->is_ventura || sc->is_aero) && (sc->msix_vectors > 16)))
2392 sc->msix_combined = true;
2394 * Save 1-15 reply post index
2395 * address to local memory Index 0
2396 * is already saved from reg offset
2397 * MPI2_REPLY_POST_HOST_INDEX_OFFSET
2399 for (loop = 1; loop < MR_MAX_MSIX_REG_ARRAY;
2401 sc->msix_reg_offset[loop] =
2402 MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET +
2407 /* Don't bother allocating more MSI-X vectors than cpus */
2408 sc->msix_vectors = min(sc->msix_vectors,
2411 /* Allocate MSI-x vectors */
2412 if (mrsas_allocate_msix(sc) == SUCCESS)
2413 sc->msix_enable = 1;
2415 sc->msix_enable = 0;
2417 device_printf(sc->mrsas_dev, "FW supports <%d> MSIX vector,"
2418 "Online CPU %d Current MSIX <%d>\n",
2419 fw_msix_count, mp_ncpus, sc->msix_vectors);
2422 * MSI-X host index 0 is common for all adapter.
2423 * It is used for all MPT based Adapters.
2425 if (sc->msix_combined) {
2426 sc->msix_reg_offset[0] =
2427 MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET;
2429 if (mrsas_init_adapter(sc) != SUCCESS) {
2430 device_printf(sc->mrsas_dev, "Adapter initialize Fail.\n");
2434 if (sc->is_ventura || sc->is_aero) {
2435 scratch_pad_4 = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
2436 outbound_scratch_pad_4));
2437 if ((scratch_pad_4 & MR_NVME_PAGE_SIZE_MASK) >= MR_DEFAULT_NVME_PAGE_SHIFT)
2438 sc->nvme_page_size = 1 << (scratch_pad_4 & MR_NVME_PAGE_SIZE_MASK);
2440 device_printf(sc->mrsas_dev, "NVME page size\t: (%d)\n", sc->nvme_page_size);
2443 /* Allocate internal commands for pass-thru */
2444 if (mrsas_alloc_mfi_cmds(sc) != SUCCESS) {
2445 device_printf(sc->mrsas_dev, "Allocate MFI cmd failed.\n");
2448 sc->ctrl_info = malloc(sizeof(struct mrsas_ctrl_info), M_MRSAS, M_NOWAIT);
2449 if (!sc->ctrl_info) {
2450 device_printf(sc->mrsas_dev, "Malloc for ctrl_info failed.\n");
2454 * Get the controller info from FW, so that the MAX VD support
2455 * availability can be decided.
2457 if (mrsas_get_ctrl_info(sc)) {
2458 device_printf(sc->mrsas_dev, "Unable to get FW ctrl_info.\n");
2461 sc->secure_jbod_support =
2462 (u_int8_t)sc->ctrl_info->adapterOperations3.supportSecurityonJBOD;
2464 if (sc->secure_jbod_support)
2465 device_printf(sc->mrsas_dev, "FW supports SED \n");
2467 if (sc->use_seqnum_jbod_fp)
2468 device_printf(sc->mrsas_dev, "FW supports JBOD Map \n");
2470 if (sc->support_morethan256jbod)
2471 device_printf(sc->mrsas_dev, "FW supports JBOD Map Ext \n");
2473 if (mrsas_setup_raidmap(sc) != SUCCESS) {
2474 device_printf(sc->mrsas_dev, "Error: RAID map setup FAILED !!! "
2475 "There seems to be some problem in the controller\n"
2476 "Please contact to the SUPPORT TEAM if the problem persists\n");
2478 megasas_setup_jbod_map(sc);
2480 memset(sc->target_list, 0,
2481 MRSAS_MAX_TM_TARGETS * sizeof(struct mrsas_target));
2482 for (i = 0; i < MRSAS_MAX_TM_TARGETS; i++)
2483 sc->target_list[i].target_id = 0xffff;
2485 /* For pass-thru, get PD/LD list and controller info */
2486 memset(sc->pd_list, 0,
2487 MRSAS_MAX_PD * sizeof(struct mrsas_pd_list));
2488 if (mrsas_get_pd_list(sc) != SUCCESS) {
2489 device_printf(sc->mrsas_dev, "Get PD list failed.\n");
2492 memset(sc->ld_ids, 0xff, MRSAS_MAX_LD_IDS);
2493 if (mrsas_get_ld_list(sc) != SUCCESS) {
2494 device_printf(sc->mrsas_dev, "Get LD lsit failed.\n");
2498 if ((sc->is_ventura || sc->is_aero) && sc->drv_stream_detection) {
2499 sc->streamDetectByLD = malloc(sizeof(PTR_LD_STREAM_DETECT) *
2500 MAX_LOGICAL_DRIVES_EXT, M_MRSAS, M_NOWAIT);
2501 if (!sc->streamDetectByLD) {
2502 device_printf(sc->mrsas_dev,
2503 "unable to allocate stream detection for pool of LDs\n");
2506 for (i = 0; i < MAX_LOGICAL_DRIVES_EXT; ++i) {
2507 sc->streamDetectByLD[i] = malloc(sizeof(LD_STREAM_DETECT), M_MRSAS, M_NOWAIT);
2508 if (!sc->streamDetectByLD[i]) {
2509 device_printf(sc->mrsas_dev, "unable to allocate stream detect by LD\n");
2510 for (j = 0; j < i; ++j)
2511 free(sc->streamDetectByLD[j], M_MRSAS);
2512 free(sc->streamDetectByLD, M_MRSAS);
2513 sc->streamDetectByLD = NULL;
2516 memset(sc->streamDetectByLD[i], 0, sizeof(LD_STREAM_DETECT));
2517 sc->streamDetectByLD[i]->mruBitMap = MR_STREAM_BITMAP;
2522 * Compute the max allowed sectors per IO: The controller info has
2523 * two limits on max sectors. Driver should use the minimum of these
2526 * 1 << stripe_sz_ops.min = max sectors per strip
2528 * Note that older firmwares ( < FW ver 30) didn't report information to
2529 * calculate max_sectors_1. So the number ended up as zero always.
2532 max_sectors_1 = (1 << sc->ctrl_info->stripe_sz_ops.min) *
2533 sc->ctrl_info->max_strips_per_io;
2534 max_sectors_2 = sc->ctrl_info->max_request_size;
2535 tmp_sectors = min(max_sectors_1, max_sectors_2);
2536 sc->max_sectors_per_req = sc->max_num_sge * MRSAS_PAGE_SIZE / 512;
2538 if (tmp_sectors && (sc->max_sectors_per_req > tmp_sectors))
2539 sc->max_sectors_per_req = tmp_sectors;
2541 sc->disableOnlineCtrlReset =
2542 sc->ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset;
2543 sc->UnevenSpanSupport =
2544 sc->ctrl_info->adapterOperations2.supportUnevenSpans;
2545 if (sc->UnevenSpanSupport) {
2546 device_printf(sc->mrsas_dev, "FW supports: UnevenSpanSupport=%x\n\n",
2547 sc->UnevenSpanSupport);
2549 if (MR_ValidateMapInfo(sc))
2550 sc->fast_path_io = 1;
2552 sc->fast_path_io = 0;
2555 device_printf(sc->mrsas_dev, "max_fw_cmds: %u max_scsi_cmds: %u\n",
2556 sc->max_fw_cmds, sc->max_scsi_cmds);
2561 * mrsas_init_adapter: Initializes the adapter/controller
2562 * input: Adapter soft state
2564 * Prepares for the issuing of the IOC Init cmd to FW for initializing the
2565 * ROC/controller. The FW register is read to determined the number of
2566 * commands that is supported. All memory allocations for IO is based on
2567 * max_cmd. Appropriate calculations are performed in this function.
2570 mrsas_init_adapter(struct mrsas_softc *sc)
2573 u_int32_t scratch_pad_2;
2577 /* Read FW status register */
2578 status = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, outbound_scratch_pad));
2580 sc->max_fw_cmds = status & MRSAS_FWSTATE_MAXCMD_MASK;
2582 /* Decrement the max supported by 1, to correlate with FW */
2583 sc->max_fw_cmds = sc->max_fw_cmds - 1;
2584 sc->max_scsi_cmds = sc->max_fw_cmds - MRSAS_MAX_MFI_CMDS;
2586 /* Determine allocation size of command frames */
2587 sc->reply_q_depth = ((sc->max_fw_cmds + 1 + 15) / 16 * 16) * 2;
2588 sc->request_alloc_sz = sizeof(MRSAS_REQUEST_DESCRIPTOR_UNION) * sc->max_fw_cmds;
2589 sc->reply_alloc_sz = sizeof(MPI2_REPLY_DESCRIPTORS_UNION) * (sc->reply_q_depth);
2590 sc->io_frames_alloc_sz = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE +
2591 (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * (sc->max_fw_cmds + 1));
2592 scratch_pad_2 = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
2593 outbound_scratch_pad_2));
2595 mrsas_dprint(sc, MRSAS_TRACE, "%s: sc->reply_q_depth 0x%x,"
2596 "sc->request_alloc_sz 0x%x, sc->reply_alloc_sz 0x%x,"
2597 "sc->io_frames_alloc_sz 0x%x\n", __func__,
2598 sc->reply_q_depth, sc->request_alloc_sz,
2599 sc->reply_alloc_sz, sc->io_frames_alloc_sz);
2602 * If scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK is set,
2603 * Firmware support extended IO chain frame which is 4 time more
2604 * than legacy Firmware. Legacy Firmware - Frame size is (8 * 128) =
2605 * 1K 1M IO Firmware - Frame size is (8 * 128 * 4) = 4K
2607 if (scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK)
2608 sc->max_chain_frame_sz =
2609 ((scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_MASK) >> 5)
2612 sc->max_chain_frame_sz =
2613 ((scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_MASK) >> 5)
2616 sc->chain_frames_alloc_sz = sc->max_chain_frame_sz * sc->max_fw_cmds;
2617 sc->max_sge_in_main_msg = (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE -
2618 offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL)) / 16;
2620 sc->max_sge_in_chain = sc->max_chain_frame_sz / sizeof(MPI2_SGE_IO_UNION);
2621 sc->max_num_sge = sc->max_sge_in_main_msg + sc->max_sge_in_chain - 2;
2623 mrsas_dprint(sc, MRSAS_INFO,
2624 "max sge: 0x%x, max chain frame size: 0x%x, "
2625 "max fw cmd: 0x%x sc->chain_frames_alloc_sz: 0x%x\n",
2627 sc->max_chain_frame_sz, sc->max_fw_cmds,
2628 sc->chain_frames_alloc_sz);
2630 /* Used for pass thru MFI frame (DCMD) */
2631 sc->chain_offset_mfi_pthru = offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL) / 16;
2633 sc->chain_offset_io_request = (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE -
2634 sizeof(MPI2_SGE_IO_UNION)) / 16;
2636 int count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
2638 for (i = 0; i < count; i++)
2639 sc->last_reply_idx[i] = 0;
2641 ret = mrsas_alloc_mem(sc);
2645 ret = mrsas_alloc_mpt_cmds(sc);
2649 ret = mrsas_ioc_init(sc);
2657 * mrsas_alloc_ioc_cmd: Allocates memory for IOC Init command
2658 * input: Adapter soft state
2660 * Allocates for the IOC Init cmd to FW to initialize the ROC/controller.
2663 mrsas_alloc_ioc_cmd(struct mrsas_softc *sc)
2667 /* Allocate IOC INIT command */
2668 ioc_init_size = 1024 + sizeof(MPI2_IOC_INIT_REQUEST);
2669 if (bus_dma_tag_create(sc->mrsas_parent_tag,
2671 BUS_SPACE_MAXADDR_32BIT,
2679 &sc->ioc_init_tag)) {
2680 device_printf(sc->mrsas_dev, "Cannot allocate ioc init tag\n");
2683 if (bus_dmamem_alloc(sc->ioc_init_tag, (void **)&sc->ioc_init_mem,
2684 BUS_DMA_NOWAIT, &sc->ioc_init_dmamap)) {
2685 device_printf(sc->mrsas_dev, "Cannot allocate ioc init cmd mem\n");
2688 bzero(sc->ioc_init_mem, ioc_init_size);
2689 if (bus_dmamap_load(sc->ioc_init_tag, sc->ioc_init_dmamap,
2690 sc->ioc_init_mem, ioc_init_size, mrsas_addr_cb,
2691 &sc->ioc_init_phys_mem, BUS_DMA_NOWAIT)) {
2692 device_printf(sc->mrsas_dev, "Cannot load ioc init cmd mem\n");
2699 * mrsas_free_ioc_cmd: Allocates memory for IOC Init command
2700 * input: Adapter soft state
2702 * Deallocates memory of the IOC Init cmd.
2705 mrsas_free_ioc_cmd(struct mrsas_softc *sc)
2707 if (sc->ioc_init_phys_mem)
2708 bus_dmamap_unload(sc->ioc_init_tag, sc->ioc_init_dmamap);
2709 if (sc->ioc_init_mem != NULL)
2710 bus_dmamem_free(sc->ioc_init_tag, sc->ioc_init_mem, sc->ioc_init_dmamap);
2711 if (sc->ioc_init_tag != NULL)
2712 bus_dma_tag_destroy(sc->ioc_init_tag);
2716 * mrsas_ioc_init: Sends IOC Init command to FW
2717 * input: Adapter soft state
2719 * Issues the IOC Init cmd to FW to initialize the ROC/controller.
2722 mrsas_ioc_init(struct mrsas_softc *sc)
2724 struct mrsas_init_frame *init_frame;
2725 pMpi2IOCInitRequest_t IOCInitMsg;
2726 MRSAS_REQUEST_DESCRIPTOR_UNION req_desc;
2727 u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME;
2728 bus_addr_t phys_addr;
2730 u_int32_t scratch_pad_2;
2732 /* Allocate memory for the IOC INIT command */
2733 if (mrsas_alloc_ioc_cmd(sc)) {
2734 device_printf(sc->mrsas_dev, "Cannot allocate IOC command.\n");
2738 if (!sc->block_sync_cache) {
2739 scratch_pad_2 = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
2740 outbound_scratch_pad_2));
2741 sc->fw_sync_cache_support = (scratch_pad_2 &
2742 MR_CAN_HANDLE_SYNC_CACHE_OFFSET) ? 1 : 0;
2745 IOCInitMsg = (pMpi2IOCInitRequest_t)(((char *)sc->ioc_init_mem) + 1024);
2746 IOCInitMsg->Function = MPI2_FUNCTION_IOC_INIT;
2747 IOCInitMsg->WhoInit = MPI2_WHOINIT_HOST_DRIVER;
2748 IOCInitMsg->MsgVersion = htole16(MPI2_VERSION);
2749 IOCInitMsg->HeaderVersion = htole16(MPI2_HEADER_VERSION);
2750 IOCInitMsg->SystemRequestFrameSize = htole16(MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE / 4);
2751 IOCInitMsg->ReplyDescriptorPostQueueDepth = htole16(sc->reply_q_depth);
2752 IOCInitMsg->ReplyDescriptorPostQueueAddress = htole64(sc->reply_desc_phys_addr);
2753 IOCInitMsg->SystemRequestFrameBaseAddress = htole64(sc->io_request_phys_addr);
2754 IOCInitMsg->HostMSIxVectors = (sc->msix_vectors > 0 ? sc->msix_vectors : 0);
2755 IOCInitMsg->HostPageSize = MR_DEFAULT_NVME_PAGE_SHIFT;
2757 init_frame = (struct mrsas_init_frame *)sc->ioc_init_mem;
2758 init_frame->cmd = MFI_CMD_INIT;
2759 init_frame->cmd_status = 0xFF;
2760 init_frame->flags |= htole16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE);
2762 /* driver support Extended MSIX */
2763 if (sc->mrsas_gen3_ctrl || sc->is_ventura || sc->is_aero) {
2764 init_frame->driver_operations.
2765 mfi_capabilities.support_additional_msix = 1;
2767 if (sc->verbuf_mem) {
2768 snprintf((char *)sc->verbuf_mem, strlen(MRSAS_VERSION) + 2, "%s\n",
2770 init_frame->driver_ver_lo = (bus_addr_t)sc->verbuf_phys_addr;
2771 init_frame->driver_ver_hi = 0;
2773 init_frame->driver_operations.mfi_capabilities.support_ndrive_r1_lb = 1;
2774 init_frame->driver_operations.mfi_capabilities.support_max_255lds = 1;
2775 init_frame->driver_operations.mfi_capabilities.security_protocol_cmds_fw = 1;
2776 if (sc->max_chain_frame_sz > MEGASAS_CHAIN_FRAME_SZ_MIN)
2777 init_frame->driver_operations.mfi_capabilities.support_ext_io_size = 1;
2779 init_frame->driver_operations.reg = htole32(init_frame->driver_operations.reg);
2781 phys_addr = (bus_addr_t)sc->ioc_init_phys_mem + 1024;
2782 init_frame->queue_info_new_phys_addr_lo = htole32(phys_addr);
2783 init_frame->data_xfer_len = htole32(sizeof(Mpi2IOCInitRequest_t));
2785 req_desc.addr.Words = htole64((bus_addr_t)sc->ioc_init_phys_mem);
2786 req_desc.MFAIo.RequestFlags =
2787 (MRSAS_REQ_DESCRIPT_FLAGS_MFA << MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
2789 mrsas_disable_intr(sc);
2790 mrsas_dprint(sc, MRSAS_OCR, "Issuing IOC INIT command to FW.\n");
2791 mrsas_write_64bit_req_desc(sc, req_desc.addr.u.low, req_desc.addr.u.high);
2794 * Poll response timer to wait for Firmware response. While this
2795 * timer with the DELAY call could block CPU, the time interval for
2796 * this is only 1 millisecond.
2798 if (init_frame->cmd_status == 0xFF) {
2799 for (i = 0; i < (max_wait * 1000); i++) {
2800 if (init_frame->cmd_status == 0xFF)
2806 if (init_frame->cmd_status == 0)
2807 mrsas_dprint(sc, MRSAS_OCR,
2808 "IOC INIT response received from FW.\n");
2810 if (init_frame->cmd_status == 0xFF)
2811 device_printf(sc->mrsas_dev, "IOC Init timed out after %d seconds.\n", max_wait);
2813 device_printf(sc->mrsas_dev, "IOC Init failed, status = 0x%x\n", init_frame->cmd_status);
2818 scratch_pad_2 = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
2819 outbound_scratch_pad_2));
2820 sc->atomic_desc_support = (scratch_pad_2 &
2821 MR_ATOMIC_DESCRIPTOR_SUPPORT_OFFSET) ? 1 : 0;
2822 device_printf(sc->mrsas_dev, "FW supports atomic descriptor: %s\n",
2823 sc->atomic_desc_support ? "Yes" : "No");
2826 mrsas_free_ioc_cmd(sc);
2831 * mrsas_alloc_mpt_cmds: Allocates the command packets
2832 * input: Adapter instance soft state
2834 * This function allocates the internal commands for IOs. Each command that is
2835 * issued to FW is wrapped in a local data structure called mrsas_mpt_cmd. An
2836 * array is allocated with mrsas_mpt_cmd context. The free commands are
2837 * maintained in a linked list (cmd pool). SMID value range is from 1 to
2841 mrsas_alloc_mpt_cmds(struct mrsas_softc *sc)
2844 u_int32_t max_fw_cmds, count;
2845 struct mrsas_mpt_cmd *cmd;
2846 pMpi2ReplyDescriptorsUnion_t reply_desc;
2847 u_int32_t offset, chain_offset, sense_offset;
2848 bus_addr_t io_req_base_phys, chain_frame_base_phys, sense_base_phys;
2849 u_int8_t *io_req_base, *chain_frame_base, *sense_base;
2851 max_fw_cmds = sc->max_fw_cmds;
2853 sc->req_desc = malloc(sc->request_alloc_sz, M_MRSAS, M_NOWAIT);
2854 if (!sc->req_desc) {
2855 device_printf(sc->mrsas_dev, "Out of memory, cannot alloc req desc\n");
2858 memset(sc->req_desc, 0, sc->request_alloc_sz);
2861 * sc->mpt_cmd_list is an array of struct mrsas_mpt_cmd pointers.
2862 * Allocate the dynamic array first and then allocate individual
2865 sc->mpt_cmd_list = malloc(sizeof(struct mrsas_mpt_cmd *) * max_fw_cmds,
2867 if (!sc->mpt_cmd_list) {
2868 device_printf(sc->mrsas_dev, "Cannot alloc memory for mpt_cmd_list.\n");
2871 memset(sc->mpt_cmd_list, 0, sizeof(struct mrsas_mpt_cmd *) * max_fw_cmds);
2872 for (i = 0; i < max_fw_cmds; i++) {
2873 sc->mpt_cmd_list[i] = malloc(sizeof(struct mrsas_mpt_cmd),
2875 if (!sc->mpt_cmd_list[i]) {
2876 for (j = 0; j < i; j++)
2877 free(sc->mpt_cmd_list[j], M_MRSAS);
2878 free(sc->mpt_cmd_list, M_MRSAS);
2879 sc->mpt_cmd_list = NULL;
2884 io_req_base = (u_int8_t *)sc->io_request_mem + MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
2885 io_req_base_phys = (bus_addr_t)sc->io_request_phys_addr + MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
2886 chain_frame_base = (u_int8_t *)sc->chain_frame_mem;
2887 chain_frame_base_phys = (bus_addr_t)sc->chain_frame_phys_addr;
2888 sense_base = (u_int8_t *)sc->sense_mem;
2889 sense_base_phys = (bus_addr_t)sc->sense_phys_addr;
2890 for (i = 0; i < max_fw_cmds; i++) {
2891 cmd = sc->mpt_cmd_list[i];
2892 offset = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * i;
2893 chain_offset = sc->max_chain_frame_sz * i;
2894 sense_offset = MRSAS_SENSE_LEN * i;
2895 memset(cmd, 0, sizeof(struct mrsas_mpt_cmd));
2897 cmd->ccb_ptr = NULL;
2898 cmd->r1_alt_dev_handle = MR_DEVHANDLE_INVALID;
2899 callout_init_mtx(&cmd->cm_callout, &sc->sim_lock, 0);
2900 cmd->sync_cmd_idx = (u_int32_t)MRSAS_ULONG_MAX;
2902 cmd->io_request = (MRSAS_RAID_SCSI_IO_REQUEST *) (io_req_base + offset);
2903 memset(cmd->io_request, 0, sizeof(MRSAS_RAID_SCSI_IO_REQUEST));
2904 cmd->io_request_phys_addr = io_req_base_phys + offset;
2905 cmd->chain_frame = (MPI2_SGE_IO_UNION *) (chain_frame_base + chain_offset);
2906 cmd->chain_frame_phys_addr = chain_frame_base_phys + chain_offset;
2907 cmd->sense = sense_base + sense_offset;
2908 cmd->sense_phys_addr = sense_base_phys + sense_offset;
2909 if (bus_dmamap_create(sc->data_tag, 0, &cmd->data_dmamap)) {
2912 TAILQ_INSERT_TAIL(&(sc->mrsas_mpt_cmd_list_head), cmd, next);
2915 /* Initialize reply descriptor array to 0xFFFFFFFF */
2916 reply_desc = sc->reply_desc_mem;
2917 count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
2918 for (i = 0; i < sc->reply_q_depth * count; i++, reply_desc++) {
2919 reply_desc->Words = MRSAS_ULONG_MAX;
2925 * mrsas_write_64bit_req_dsc: Writes 64 bit request descriptor to FW
2926 * input: Adapter softstate
2927 * request descriptor address low
2928 * request descriptor address high
2931 mrsas_write_64bit_req_desc(struct mrsas_softc *sc, u_int32_t req_desc_lo,
2932 u_int32_t req_desc_hi)
2934 mtx_lock(&sc->pci_lock);
2935 mrsas_write_reg(sc, offsetof(mrsas_reg_set, inbound_low_queue_port),
2936 le32toh(req_desc_lo));
2937 mrsas_write_reg(sc, offsetof(mrsas_reg_set, inbound_high_queue_port),
2938 le32toh(req_desc_hi));
2939 mtx_unlock(&sc->pci_lock);
2943 * mrsas_fire_cmd: Sends command to FW
2944 * input: Adapter softstate
2945 * request descriptor address low
2946 * request descriptor address high
2948 * This functions fires the command to Firmware by writing to the
2949 * inbound_low_queue_port and inbound_high_queue_port.
2952 mrsas_fire_cmd(struct mrsas_softc *sc, u_int32_t req_desc_lo,
2953 u_int32_t req_desc_hi)
2955 if (sc->atomic_desc_support)
2956 mrsas_write_reg(sc, offsetof(mrsas_reg_set, inbound_single_queue_port),
2957 le32toh(req_desc_lo));
2959 mrsas_write_64bit_req_desc(sc, req_desc_lo, req_desc_hi);
2963 * mrsas_transition_to_ready: Move FW to Ready state input:
2964 * Adapter instance soft state
2966 * During the initialization, FW passes can potentially be in any one of several
2967 * possible states. If the FW in operational, waiting-for-handshake states,
2968 * driver must take steps to bring it to ready state. Otherwise, it has to
2969 * wait for the ready state.
2972 mrsas_transition_to_ready(struct mrsas_softc *sc, int ocr)
2976 u_int32_t val, fw_state;
2977 u_int32_t cur_state;
2978 u_int32_t abs_state, curr_abs_state;
2980 val = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, outbound_scratch_pad));
2981 fw_state = val & MFI_STATE_MASK;
2982 max_wait = MRSAS_RESET_WAIT_TIME;
2984 if (fw_state != MFI_STATE_READY)
2985 device_printf(sc->mrsas_dev, "Waiting for FW to come to ready state\n");
2987 while (fw_state != MFI_STATE_READY) {
2988 abs_state = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, outbound_scratch_pad));
2990 case MFI_STATE_FAULT:
2991 device_printf(sc->mrsas_dev, "FW is in FAULT state!!\n");
2993 cur_state = MFI_STATE_FAULT;
2997 case MFI_STATE_WAIT_HANDSHAKE:
2998 /* Set the CLR bit in inbound doorbell */
2999 mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell),
3000 MFI_INIT_CLEAR_HANDSHAKE | MFI_INIT_HOTPLUG);
3001 cur_state = MFI_STATE_WAIT_HANDSHAKE;
3003 case MFI_STATE_BOOT_MESSAGE_PENDING:
3004 mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell),
3006 cur_state = MFI_STATE_BOOT_MESSAGE_PENDING;
3008 case MFI_STATE_OPERATIONAL:
3010 * Bring it to READY state; assuming max wait 10
3013 mrsas_disable_intr(sc);
3014 mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell), MFI_RESET_FLAGS);
3015 for (i = 0; i < max_wait * 1000; i++) {
3016 if (mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, doorbell)) & 1)
3021 cur_state = MFI_STATE_OPERATIONAL;
3023 case MFI_STATE_UNDEFINED:
3025 * This state should not last for more than 2
3028 cur_state = MFI_STATE_UNDEFINED;
3030 case MFI_STATE_BB_INIT:
3031 cur_state = MFI_STATE_BB_INIT;
3033 case MFI_STATE_FW_INIT:
3034 cur_state = MFI_STATE_FW_INIT;
3036 case MFI_STATE_FW_INIT_2:
3037 cur_state = MFI_STATE_FW_INIT_2;
3039 case MFI_STATE_DEVICE_SCAN:
3040 cur_state = MFI_STATE_DEVICE_SCAN;
3042 case MFI_STATE_FLUSH_CACHE:
3043 cur_state = MFI_STATE_FLUSH_CACHE;
3046 device_printf(sc->mrsas_dev, "Unknown state 0x%x\n", fw_state);
3051 * The cur_state should not last for more than max_wait secs
3053 for (i = 0; i < (max_wait * 1000); i++) {
3054 fw_state = (mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
3055 outbound_scratch_pad)) & MFI_STATE_MASK);
3056 curr_abs_state = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
3057 outbound_scratch_pad));
3058 if (abs_state == curr_abs_state)
3065 * Return error if fw_state hasn't changed after max_wait
3067 if (curr_abs_state == abs_state) {
3068 device_printf(sc->mrsas_dev, "FW state [%d] hasn't changed "
3069 "in %d secs\n", fw_state, max_wait);
3073 mrsas_dprint(sc, MRSAS_OCR, "FW now in Ready state\n");
3078 * mrsas_get_mfi_cmd: Get a cmd from free command pool
3079 * input: Adapter soft state
3081 * This function removes an MFI command from the command list.
3083 struct mrsas_mfi_cmd *
3084 mrsas_get_mfi_cmd(struct mrsas_softc *sc)
3086 struct mrsas_mfi_cmd *cmd = NULL;
3088 mtx_lock(&sc->mfi_cmd_pool_lock);
3089 if (!TAILQ_EMPTY(&sc->mrsas_mfi_cmd_list_head)) {
3090 cmd = TAILQ_FIRST(&sc->mrsas_mfi_cmd_list_head);
3091 TAILQ_REMOVE(&sc->mrsas_mfi_cmd_list_head, cmd, next);
3093 mtx_unlock(&sc->mfi_cmd_pool_lock);
3099 * mrsas_ocr_thread: Thread to handle OCR/Kill Adapter.
3100 * input: Adapter Context.
3102 * This function will check FW status register and flag do_timeout_reset flag.
3103 * It will do OCR/Kill adapter if FW is in fault state or IO timed out has
3107 mrsas_ocr_thread(void *arg)
3109 struct mrsas_softc *sc;
3110 u_int32_t fw_status, fw_state;
3111 u_int8_t tm_target_reset_failed = 0;
3113 sc = (struct mrsas_softc *)arg;
3115 mrsas_dprint(sc, MRSAS_TRACE, "%s\n", __func__);
3116 sc->ocr_thread_active = 1;
3117 mtx_lock(&sc->sim_lock);
3119 /* Sleep for 1 second and check the queue status */
3120 msleep(&sc->ocr_chan, &sc->sim_lock, PRIBIO,
3121 "mrsas_ocr", sc->mrsas_fw_fault_check_delay * hz);
3122 if (sc->remove_in_progress ||
3123 sc->adprecovery == MRSAS_HW_CRITICAL_ERROR) {
3124 mrsas_dprint(sc, MRSAS_OCR,
3125 "Exit due to %s from %s\n",
3126 sc->remove_in_progress ? "Shutdown" :
3127 "Hardware critical error", __func__);
3130 fw_status = mrsas_read_reg_with_retries(sc,
3131 offsetof(mrsas_reg_set, outbound_scratch_pad));
3132 fw_state = fw_status & MFI_STATE_MASK;
3133 if (fw_state == MFI_STATE_FAULT || sc->do_timedout_reset ||
3134 mrsas_atomic_read(&sc->target_reset_outstanding)) {
3135 /* First, freeze further IOs to come to the SIM */
3136 mrsas_xpt_freeze(sc);
3138 /* If this is an IO timeout then go for target reset */
3139 if (mrsas_atomic_read(&sc->target_reset_outstanding)) {
3140 device_printf(sc->mrsas_dev, "Initiating Target RESET "
3141 "because of SCSI IO timeout!\n");
3143 /* Let the remaining IOs to complete */
3144 msleep(&sc->ocr_chan, &sc->sim_lock, PRIBIO,
3145 "mrsas_reset_targets", 5 * hz);
3147 /* Try to reset the target device */
3148 if (mrsas_reset_targets(sc) == FAIL)
3149 tm_target_reset_failed = 1;
3152 /* If this is a DCMD timeout or FW fault,
3153 * then go for controller reset
3155 if (fw_state == MFI_STATE_FAULT || tm_target_reset_failed ||
3156 (sc->do_timedout_reset == MFI_DCMD_TIMEOUT_OCR)) {
3157 if (tm_target_reset_failed)
3158 device_printf(sc->mrsas_dev, "Initiaiting OCR because of "
3161 device_printf(sc->mrsas_dev, "Initiaiting OCR "
3162 "because of %s!\n", sc->do_timedout_reset ?
3163 "DCMD IO Timeout" : "FW fault");
3165 mtx_lock_spin(&sc->ioctl_lock);
3166 sc->reset_in_progress = 1;
3167 mtx_unlock_spin(&sc->ioctl_lock);
3171 * Wait for the AEN task to be completed if it is running.
3173 mtx_unlock(&sc->sim_lock);
3174 taskqueue_drain(sc->ev_tq, &sc->ev_task);
3175 mtx_lock(&sc->sim_lock);
3177 taskqueue_block(sc->ev_tq);
3178 /* Try to reset the controller */
3179 mrsas_reset_ctrl(sc, sc->do_timedout_reset);
3181 sc->do_timedout_reset = 0;
3182 sc->reset_in_progress = 0;
3183 tm_target_reset_failed = 0;
3184 mrsas_atomic_set(&sc->target_reset_outstanding, 0);
3185 memset(sc->target_reset_pool, 0,
3186 sizeof(sc->target_reset_pool));
3187 taskqueue_unblock(sc->ev_tq);
3190 /* Now allow IOs to come to the SIM */
3191 mrsas_xpt_release(sc);
3194 mtx_unlock(&sc->sim_lock);
3195 sc->ocr_thread_active = 0;
3196 mrsas_kproc_exit(0);
3200 * mrsas_reset_reply_desc: Reset Reply descriptor as part of OCR.
3201 * input: Adapter Context.
3203 * This function will clear reply descriptor so that post OCR driver and FW will
3207 mrsas_reset_reply_desc(struct mrsas_softc *sc)
3210 pMpi2ReplyDescriptorsUnion_t reply_desc;
3212 count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
3213 for (i = 0; i < count; i++)
3214 sc->last_reply_idx[i] = 0;
3216 reply_desc = sc->reply_desc_mem;
3217 for (i = 0; i < sc->reply_q_depth; i++, reply_desc++) {
3218 reply_desc->Words = MRSAS_ULONG_MAX;
3223 * mrsas_reset_ctrl: Core function to OCR/Kill adapter.
3224 * input: Adapter Context.
3226 * This function will run from thread context so that it can sleep. 1. Do not
3227 * handle OCR if FW is in HW critical error. 2. Wait for outstanding command
3228 * to complete for 180 seconds. 3. If #2 does not find any outstanding
3229 * command Controller is in working state, so skip OCR. Otherwise, do
3230 * OCR/kill Adapter based on flag disableOnlineCtrlReset. 4. Start of the
3231 * OCR, return all SCSI command back to CAM layer which has ccb_ptr. 5. Post
3232 * OCR, Re-fire Management command and move Controller to Operation state.
3235 mrsas_reset_ctrl(struct mrsas_softc *sc, u_int8_t reset_reason)
3237 int retval = SUCCESS, i, j, retry = 0;
3238 u_int32_t host_diag, abs_state, status_reg, reset_adapter;
3240 struct mrsas_mfi_cmd *mfi_cmd;
3241 struct mrsas_mpt_cmd *mpt_cmd;
3242 union mrsas_evt_class_locale class_locale;
3243 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
3245 if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR) {
3246 device_printf(sc->mrsas_dev,
3247 "mrsas: Hardware critical error, returning FAIL.\n");
3250 mrsas_set_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
3251 sc->adprecovery = MRSAS_ADPRESET_SM_INFAULT;
3252 mrsas_disable_intr(sc);
3253 msleep(&sc->ocr_chan, &sc->sim_lock, PRIBIO, "mrsas_ocr",
3254 sc->mrsas_fw_fault_check_delay * hz);
3256 /* First try waiting for commands to complete */
3257 if (mrsas_wait_for_outstanding(sc, reset_reason)) {
3258 mrsas_dprint(sc, MRSAS_OCR,
3259 "resetting adapter from %s.\n",
3261 /* Now return commands back to the CAM layer */
3262 mtx_unlock(&sc->sim_lock);
3263 for (i = 0; i < sc->max_fw_cmds; i++) {
3264 mpt_cmd = sc->mpt_cmd_list[i];
3266 if (mpt_cmd->peer_cmd) {
3267 mrsas_dprint(sc, MRSAS_OCR,
3268 "R1 FP command [%d] - (mpt_cmd) %p, (peer_cmd) %p\n",
3269 i, mpt_cmd, mpt_cmd->peer_cmd);
3272 if (mpt_cmd->ccb_ptr) {
3273 if (mpt_cmd->callout_owner) {
3274 ccb = (union ccb *)(mpt_cmd->ccb_ptr);
3275 ccb->ccb_h.status = CAM_SCSI_BUS_RESET;
3276 mrsas_cmd_done(sc, mpt_cmd);
3278 mpt_cmd->ccb_ptr = NULL;
3279 mrsas_release_mpt_cmd(mpt_cmd);
3284 mrsas_atomic_set(&sc->fw_outstanding, 0);
3286 mtx_lock(&sc->sim_lock);
3288 status_reg = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
3289 outbound_scratch_pad));
3290 abs_state = status_reg & MFI_STATE_MASK;
3291 reset_adapter = status_reg & MFI_RESET_ADAPTER;
3292 if (sc->disableOnlineCtrlReset ||
3293 (abs_state == MFI_STATE_FAULT && !reset_adapter)) {
3294 /* Reset not supported, kill adapter */
3295 mrsas_dprint(sc, MRSAS_OCR, "Reset not supported, killing adapter.\n");
3300 /* Now try to reset the chip */
3301 for (i = 0; i < MRSAS_FUSION_MAX_RESET_TRIES; i++) {
3302 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
3303 MPI2_WRSEQ_FLUSH_KEY_VALUE);
3304 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
3305 MPI2_WRSEQ_1ST_KEY_VALUE);
3306 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
3307 MPI2_WRSEQ_2ND_KEY_VALUE);
3308 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
3309 MPI2_WRSEQ_3RD_KEY_VALUE);
3310 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
3311 MPI2_WRSEQ_4TH_KEY_VALUE);
3312 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
3313 MPI2_WRSEQ_5TH_KEY_VALUE);
3314 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
3315 MPI2_WRSEQ_6TH_KEY_VALUE);
3317 /* Check that the diag write enable (DRWE) bit is on */
3318 host_diag = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
3321 while (!(host_diag & HOST_DIAG_WRITE_ENABLE)) {
3323 host_diag = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
3325 if (retry++ == 100) {
3326 mrsas_dprint(sc, MRSAS_OCR,
3327 "Host diag unlock failed!\n");
3331 if (!(host_diag & HOST_DIAG_WRITE_ENABLE))
3334 /* Send chip reset command */
3335 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_host_diag),
3336 host_diag | HOST_DIAG_RESET_ADAPTER);
3339 /* Make sure reset adapter bit is cleared */
3340 host_diag = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
3343 while (host_diag & HOST_DIAG_RESET_ADAPTER) {
3345 host_diag = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
3347 if (retry++ == 1000) {
3348 mrsas_dprint(sc, MRSAS_OCR,
3349 "Diag reset adapter never cleared!\n");
3353 if (host_diag & HOST_DIAG_RESET_ADAPTER)
3356 abs_state = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
3357 outbound_scratch_pad)) & MFI_STATE_MASK;
3360 while ((abs_state <= MFI_STATE_FW_INIT) && (retry++ < 1000)) {
3362 abs_state = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
3363 outbound_scratch_pad)) & MFI_STATE_MASK;
3365 if (abs_state <= MFI_STATE_FW_INIT) {
3366 mrsas_dprint(sc, MRSAS_OCR, "firmware state < MFI_STATE_FW_INIT,"
3367 " state = 0x%x\n", abs_state);
3370 /* Wait for FW to become ready */
3371 if (mrsas_transition_to_ready(sc, 1)) {
3372 mrsas_dprint(sc, MRSAS_OCR,
3373 "mrsas: Failed to transition controller to ready.\n");
3376 mrsas_reset_reply_desc(sc);
3377 if (mrsas_ioc_init(sc)) {
3378 mrsas_dprint(sc, MRSAS_OCR, "mrsas_ioc_init() failed!\n");
3381 for (j = 0; j < sc->max_fw_cmds; j++) {
3382 mpt_cmd = sc->mpt_cmd_list[j];
3383 if (mpt_cmd->sync_cmd_idx != (u_int32_t)MRSAS_ULONG_MAX) {
3384 mfi_cmd = sc->mfi_cmd_list[mpt_cmd->sync_cmd_idx];
3385 /* If not an IOCTL then release the command else re-fire */
3386 if (!mfi_cmd->sync_cmd) {
3387 mrsas_release_mfi_cmd(mfi_cmd);
3389 req_desc = mrsas_get_request_desc(sc,
3390 mfi_cmd->cmd_id.context.smid - 1);
3391 mrsas_dprint(sc, MRSAS_OCR,
3392 "Re-fire command DCMD opcode 0x%x index %d\n ",
3393 mfi_cmd->frame->dcmd.opcode, j);
3395 device_printf(sc->mrsas_dev,
3396 "Cannot build MPT cmd.\n");
3398 mrsas_fire_cmd(sc, req_desc->addr.u.low,
3399 req_desc->addr.u.high);
3404 /* Reset load balance info */
3405 memset(sc->load_balance_info, 0,
3406 sizeof(LD_LOAD_BALANCE_INFO) * MAX_LOGICAL_DRIVES_EXT);
3408 if (mrsas_get_ctrl_info(sc)) {
3413 if (!mrsas_get_map_info(sc))
3414 mrsas_sync_map_info(sc);
3416 megasas_setup_jbod_map(sc);
3418 if ((sc->is_ventura || sc->is_aero) && sc->streamDetectByLD) {
3419 for (j = 0; j < MAX_LOGICAL_DRIVES_EXT; ++j) {
3420 memset(sc->streamDetectByLD[i], 0, sizeof(LD_STREAM_DETECT));
3421 sc->streamDetectByLD[i]->mruBitMap = MR_STREAM_BITMAP;
3425 mrsas_clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
3426 mrsas_enable_intr(sc);
3427 sc->adprecovery = MRSAS_HBA_OPERATIONAL;
3429 /* Register AEN with FW for last sequence number */
3430 class_locale.members.reserved = 0;
3431 class_locale.members.locale = MR_EVT_LOCALE_ALL;
3432 class_locale.members.class = MR_EVT_CLASS_DEBUG;
3434 mtx_unlock(&sc->sim_lock);
3435 if (mrsas_register_aen(sc, sc->last_seq_num,
3436 class_locale.word)) {
3437 device_printf(sc->mrsas_dev,
3438 "ERROR: AEN registration FAILED from OCR !!! "
3439 "Further events from the controller cannot be notified."
3440 "Either there is some problem in the controller"
3441 "or the controller does not support AEN.\n"
3442 "Please contact to the SUPPORT TEAM if the problem persists\n");
3444 mtx_lock(&sc->sim_lock);
3446 /* Adapter reset completed successfully */
3447 device_printf(sc->mrsas_dev, "Reset successful\n");
3451 /* Reset failed, kill the adapter */
3452 device_printf(sc->mrsas_dev, "Reset failed, killing adapter.\n");
3456 mrsas_clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
3457 mrsas_enable_intr(sc);
3458 sc->adprecovery = MRSAS_HBA_OPERATIONAL;
3461 mrsas_clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
3462 mrsas_dprint(sc, MRSAS_OCR,
3463 "Reset Exit with %d.\n", retval);
3468 * mrsas_kill_hba: Kill HBA when OCR is not supported
3469 * input: Adapter Context.
3471 * This function will kill HBA when OCR is not supported.
3474 mrsas_kill_hba(struct mrsas_softc *sc)
3476 sc->adprecovery = MRSAS_HW_CRITICAL_ERROR;
3478 mrsas_dprint(sc, MRSAS_OCR, "%s\n", __func__);
3479 mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell),
3482 mrsas_read_reg(sc, offsetof(mrsas_reg_set, doorbell));
3483 mrsas_complete_outstanding_ioctls(sc);
3487 * mrsas_complete_outstanding_ioctls Complete pending IOCTLS after kill_hba
3488 * input: Controller softc
3493 mrsas_complete_outstanding_ioctls(struct mrsas_softc *sc)
3496 struct mrsas_mpt_cmd *cmd_mpt;
3497 struct mrsas_mfi_cmd *cmd_mfi;
3498 u_int32_t count, MSIxIndex;
3500 count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
3501 for (i = 0; i < sc->max_fw_cmds; i++) {
3502 cmd_mpt = sc->mpt_cmd_list[i];
3504 if (cmd_mpt->sync_cmd_idx != (u_int32_t)MRSAS_ULONG_MAX) {
3505 cmd_mfi = sc->mfi_cmd_list[cmd_mpt->sync_cmd_idx];
3506 if (cmd_mfi->sync_cmd && cmd_mfi->frame->hdr.cmd != MFI_CMD_ABORT) {
3507 for (MSIxIndex = 0; MSIxIndex < count; MSIxIndex++)
3508 mrsas_complete_mptmfi_passthru(sc, cmd_mfi,
3509 cmd_mpt->io_request->RaidContext.raid_context.status);
3516 * mrsas_wait_for_outstanding: Wait for outstanding commands
3517 * input: Adapter Context.
3519 * This function will wait for 180 seconds for outstanding commands to be
3523 mrsas_wait_for_outstanding(struct mrsas_softc *sc, u_int8_t check_reason)
3525 int i, outstanding, retval = 0;
3526 u_int32_t fw_state, count, MSIxIndex;
3528 for (i = 0; i < MRSAS_RESET_WAIT_TIME; i++) {
3529 if (sc->remove_in_progress) {
3530 mrsas_dprint(sc, MRSAS_OCR,
3531 "Driver remove or shutdown called.\n");
3535 /* Check if firmware is in fault state */
3536 fw_state = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
3537 outbound_scratch_pad)) & MFI_STATE_MASK;
3538 if (fw_state == MFI_STATE_FAULT) {
3539 mrsas_dprint(sc, MRSAS_OCR,
3540 "Found FW in FAULT state, will reset adapter.\n");
3541 count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
3542 mtx_unlock(&sc->sim_lock);
3543 for (MSIxIndex = 0; MSIxIndex < count; MSIxIndex++)
3544 mrsas_complete_cmd(sc, MSIxIndex);
3545 mtx_lock(&sc->sim_lock);
3549 if (check_reason == MFI_DCMD_TIMEOUT_OCR) {
3550 mrsas_dprint(sc, MRSAS_OCR,
3551 "DCMD IO TIMEOUT detected, will reset adapter.\n");
3555 outstanding = mrsas_atomic_read(&sc->fw_outstanding);
3559 if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
3560 mrsas_dprint(sc, MRSAS_OCR, "[%2d]waiting for %d "
3561 "commands to complete\n", i, outstanding);
3562 count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
3563 mtx_unlock(&sc->sim_lock);
3564 for (MSIxIndex = 0; MSIxIndex < count; MSIxIndex++)
3565 mrsas_complete_cmd(sc, MSIxIndex);
3566 mtx_lock(&sc->sim_lock);
3571 if (mrsas_atomic_read(&sc->fw_outstanding)) {
3572 mrsas_dprint(sc, MRSAS_OCR,
3573 " pending commands remain after waiting,"
3574 " will reset adapter.\n");
3582 * mrsas_release_mfi_cmd: Return a cmd to free command pool
3583 * input: Command packet for return to free cmd pool
3585 * This function returns the MFI & MPT command to the command list.
3588 mrsas_release_mfi_cmd(struct mrsas_mfi_cmd *cmd_mfi)
3590 struct mrsas_softc *sc = cmd_mfi->sc;
3591 struct mrsas_mpt_cmd *cmd_mpt;
3593 mtx_lock(&sc->mfi_cmd_pool_lock);
3595 * Release the mpt command (if at all it is allocated
3596 * associated with the mfi command
3598 if (cmd_mfi->cmd_id.context.smid) {
3599 mtx_lock(&sc->mpt_cmd_pool_lock);
3600 /* Get the mpt cmd from mfi cmd frame's smid value */
3601 cmd_mpt = sc->mpt_cmd_list[cmd_mfi->cmd_id.context.smid-1];
3603 cmd_mpt->sync_cmd_idx = (u_int32_t)MRSAS_ULONG_MAX;
3604 TAILQ_INSERT_HEAD(&(sc->mrsas_mpt_cmd_list_head), cmd_mpt, next);
3605 mtx_unlock(&sc->mpt_cmd_pool_lock);
3607 /* Release the mfi command */
3608 cmd_mfi->ccb_ptr = NULL;
3609 cmd_mfi->cmd_id.frame_count = 0;
3610 TAILQ_INSERT_HEAD(&(sc->mrsas_mfi_cmd_list_head), cmd_mfi, next);
3611 mtx_unlock(&sc->mfi_cmd_pool_lock);
3617 * mrsas_get_controller_info: Returns FW's controller structure
3618 * input: Adapter soft state
3619 * Controller information structure
3621 * Issues an internal command (DCMD) to get the FW's controller structure. This
3622 * information is mainly used to find out the maximum IO transfer per command
3623 * supported by the FW.
3626 mrsas_get_ctrl_info(struct mrsas_softc *sc)
3629 u_int8_t do_ocr = 1;
3630 struct mrsas_mfi_cmd *cmd;
3631 struct mrsas_dcmd_frame *dcmd;
3633 cmd = mrsas_get_mfi_cmd(sc);
3636 device_printf(sc->mrsas_dev, "Failed to get a free cmd\n");
3639 dcmd = &cmd->frame->dcmd;
3641 if (mrsas_alloc_ctlr_info_cmd(sc) != SUCCESS) {
3642 device_printf(sc->mrsas_dev, "Cannot allocate get ctlr info cmd\n");
3643 mrsas_release_mfi_cmd(cmd);
3646 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
3648 dcmd->cmd = MFI_CMD_DCMD;
3649 dcmd->cmd_status = 0xFF;
3650 dcmd->sge_count = 1;
3651 dcmd->flags = MFI_FRAME_DIR_READ;
3654 dcmd->data_xfer_len = htole32(sizeof(struct mrsas_ctrl_info));
3655 dcmd->opcode = htole32(MR_DCMD_CTRL_GET_INFO);
3656 dcmd->sgl.sge32[0].phys_addr = htole32(sc->ctlr_info_phys_addr & 0xFFFFFFFF);
3657 dcmd->sgl.sge32[0].length = htole32(sizeof(struct mrsas_ctrl_info));
3659 if (!sc->mask_interrupts)
3660 retcode = mrsas_issue_blocked_cmd(sc, cmd);
3662 retcode = mrsas_issue_polled(sc, cmd);
3664 if (retcode == ETIMEDOUT)
3667 memcpy(sc->ctrl_info, sc->ctlr_info_mem, sizeof(struct mrsas_ctrl_info));
3668 le32_to_cpus(&sc->ctrl_info->properties.OnOffProperties);
3669 le32_to_cpus(&sc->ctrl_info->adapterOperations2);
3670 le32_to_cpus(&sc->ctrl_info->adapterOperations3);
3671 le16_to_cpus(&sc->ctrl_info->adapterOperations4);
3675 mrsas_update_ext_vd_details(sc);
3677 sc->use_seqnum_jbod_fp =
3678 sc->ctrl_info->adapterOperations3.useSeqNumJbodFP;
3679 sc->support_morethan256jbod =
3680 sc->ctrl_info->adapterOperations4.supportPdMapTargetId;
3682 sc->disableOnlineCtrlReset =
3683 sc->ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset;
3686 mrsas_free_ctlr_info_cmd(sc);
3689 sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
3691 if (!sc->mask_interrupts)
3692 mrsas_release_mfi_cmd(cmd);
3698 * mrsas_update_ext_vd_details : Update details w.r.t Extended VD
3700 * sc - Controller's softc
3703 mrsas_update_ext_vd_details(struct mrsas_softc *sc)
3705 u_int32_t ventura_map_sz = 0;
3706 sc->max256vdSupport =
3707 sc->ctrl_info->adapterOperations3.supportMaxExtLDs;
3709 /* Below is additional check to address future FW enhancement */
3710 if (sc->ctrl_info->max_lds > 64)
3711 sc->max256vdSupport = 1;
3713 sc->drv_supported_vd_count = MRSAS_MAX_LD_CHANNELS
3714 * MRSAS_MAX_DEV_PER_CHANNEL;
3715 sc->drv_supported_pd_count = MRSAS_MAX_PD_CHANNELS
3716 * MRSAS_MAX_DEV_PER_CHANNEL;
3717 if (sc->max256vdSupport) {
3718 sc->fw_supported_vd_count = MAX_LOGICAL_DRIVES_EXT;
3719 sc->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
3721 sc->fw_supported_vd_count = MAX_LOGICAL_DRIVES;
3722 sc->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
3725 if (sc->maxRaidMapSize) {
3726 ventura_map_sz = sc->maxRaidMapSize *
3728 sc->current_map_sz = ventura_map_sz;
3729 sc->max_map_sz = ventura_map_sz;
3731 sc->old_map_sz = sizeof(MR_FW_RAID_MAP) +
3732 (sizeof(MR_LD_SPAN_MAP) * (sc->fw_supported_vd_count - 1));
3733 sc->new_map_sz = sizeof(MR_FW_RAID_MAP_EXT);
3734 sc->max_map_sz = max(sc->old_map_sz, sc->new_map_sz);
3735 if (sc->max256vdSupport)
3736 sc->current_map_sz = sc->new_map_sz;
3738 sc->current_map_sz = sc->old_map_sz;
3741 sc->drv_map_sz = sizeof(MR_DRV_RAID_MAP_ALL);
3743 device_printf(sc->mrsas_dev, "sc->maxRaidMapSize 0x%x \n",
3744 sc->maxRaidMapSize);
3745 device_printf(sc->mrsas_dev,
3746 "new_map_sz = 0x%x, old_map_sz = 0x%x, "
3747 "ventura_map_sz = 0x%x, current_map_sz = 0x%x "
3748 "fusion->drv_map_sz =0x%x, size of driver raid map 0x%lx \n",
3749 sc->new_map_sz, sc->old_map_sz, ventura_map_sz,
3750 sc->current_map_sz, sc->drv_map_sz, sizeof(MR_DRV_RAID_MAP_ALL));
3755 * mrsas_alloc_ctlr_info_cmd: Allocates memory for controller info command
3756 * input: Adapter soft state
3758 * Allocates DMAable memory for the controller info internal command.
3761 mrsas_alloc_ctlr_info_cmd(struct mrsas_softc *sc)
3765 /* Allocate get controller info command */
3766 ctlr_info_size = sizeof(struct mrsas_ctrl_info);
3767 if (bus_dma_tag_create(sc->mrsas_parent_tag,
3769 BUS_SPACE_MAXADDR_32BIT,
3777 &sc->ctlr_info_tag)) {
3778 device_printf(sc->mrsas_dev, "Cannot allocate ctlr info tag\n");
3781 if (bus_dmamem_alloc(sc->ctlr_info_tag, (void **)&sc->ctlr_info_mem,
3782 BUS_DMA_NOWAIT, &sc->ctlr_info_dmamap)) {
3783 device_printf(sc->mrsas_dev, "Cannot allocate ctlr info cmd mem\n");
3786 if (bus_dmamap_load(sc->ctlr_info_tag, sc->ctlr_info_dmamap,
3787 sc->ctlr_info_mem, ctlr_info_size, mrsas_addr_cb,
3788 &sc->ctlr_info_phys_addr, BUS_DMA_NOWAIT)) {
3789 device_printf(sc->mrsas_dev, "Cannot load ctlr info cmd mem\n");
3792 memset(sc->ctlr_info_mem, 0, ctlr_info_size);
3797 * mrsas_free_ctlr_info_cmd: Free memory for controller info command
3798 * input: Adapter soft state
3800 * Deallocates memory of the get controller info cmd.
3803 mrsas_free_ctlr_info_cmd(struct mrsas_softc *sc)
3805 if (sc->ctlr_info_phys_addr)
3806 bus_dmamap_unload(sc->ctlr_info_tag, sc->ctlr_info_dmamap);
3807 if (sc->ctlr_info_mem != NULL)
3808 bus_dmamem_free(sc->ctlr_info_tag, sc->ctlr_info_mem, sc->ctlr_info_dmamap);
3809 if (sc->ctlr_info_tag != NULL)
3810 bus_dma_tag_destroy(sc->ctlr_info_tag);
3814 * mrsas_issue_polled: Issues a polling command
3815 * inputs: Adapter soft state
3816 * Command packet to be issued
3818 * This function is for posting of internal commands to Firmware. MFI requires
3819 * the cmd_status to be set to 0xFF before posting. The maximun wait time of
3820 * the poll response timer is 180 seconds.
3823 mrsas_issue_polled(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
3825 struct mrsas_header *frame_hdr = &cmd->frame->hdr;
3826 u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME;
3827 int i, retcode = SUCCESS;
3829 frame_hdr->cmd_status = 0xFF;
3830 frame_hdr->flags |= htole16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE);
3832 /* Issue the frame using inbound queue port */
3833 if (mrsas_issue_dcmd(sc, cmd)) {
3834 device_printf(sc->mrsas_dev, "Cannot issue DCMD internal command.\n");
3838 * Poll response timer to wait for Firmware response. While this
3839 * timer with the DELAY call could block CPU, the time interval for
3840 * this is only 1 millisecond.
3842 if (frame_hdr->cmd_status == 0xFF) {
3843 for (i = 0; i < (max_wait * 1000); i++) {
3844 if (frame_hdr->cmd_status == 0xFF)
3850 if (frame_hdr->cmd_status == 0xFF) {
3851 device_printf(sc->mrsas_dev, "DCMD timed out after %d "
3852 "seconds from %s\n", max_wait, __func__);
3853 device_printf(sc->mrsas_dev, "DCMD opcode 0x%X\n",
3854 cmd->frame->dcmd.opcode);
3855 retcode = ETIMEDOUT;
3861 * mrsas_issue_dcmd: Issues a MFI Pass thru cmd
3862 * input: Adapter soft state mfi cmd pointer
3864 * This function is called by mrsas_issued_blocked_cmd() and
3865 * mrsas_issued_polled(), to build the MPT command and then fire the command
3869 mrsas_issue_dcmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
3871 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
3873 req_desc = mrsas_build_mpt_cmd(sc, cmd);
3875 device_printf(sc->mrsas_dev, "Cannot build MPT cmd.\n");
3878 mrsas_fire_cmd(sc, req_desc->addr.u.low, req_desc->addr.u.high);
3884 * mrsas_build_mpt_cmd: Calls helper function to build Passthru cmd
3885 * input: Adapter soft state mfi cmd to build
3887 * This function is called by mrsas_issue_cmd() to build the MPT-MFI passthru
3888 * command and prepares the MPT command to send to Firmware.
3890 MRSAS_REQUEST_DESCRIPTOR_UNION *
3891 mrsas_build_mpt_cmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
3893 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
3896 if (mrsas_build_mptmfi_passthru(sc, cmd)) {
3897 device_printf(sc->mrsas_dev, "Cannot build MPT-MFI passthru cmd.\n");
3900 index = cmd->cmd_id.context.smid;
3902 req_desc = mrsas_get_request_desc(sc, index - 1);
3906 req_desc->addr.Words = 0;
3907 req_desc->SCSIIO.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO << MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
3909 req_desc->SCSIIO.SMID = htole16(index);
3915 * mrsas_build_mptmfi_passthru: Builds a MPT MFI Passthru command
3916 * input: Adapter soft state mfi cmd pointer
3918 * The MPT command and the io_request are setup as a passthru command. The SGE
3919 * chain address is set to frame_phys_addr of the MFI command.
3922 mrsas_build_mptmfi_passthru(struct mrsas_softc *sc, struct mrsas_mfi_cmd *mfi_cmd)
3924 MPI25_IEEE_SGE_CHAIN64 *mpi25_ieee_chain;
3925 PTR_MRSAS_RAID_SCSI_IO_REQUEST io_req;
3926 struct mrsas_mpt_cmd *mpt_cmd;
3927 struct mrsas_header *frame_hdr = &mfi_cmd->frame->hdr;
3929 mpt_cmd = mrsas_get_mpt_cmd(sc);
3933 /* Save the smid. To be used for returning the cmd */
3934 mfi_cmd->cmd_id.context.smid = mpt_cmd->index;
3936 mpt_cmd->sync_cmd_idx = mfi_cmd->index;
3939 * For cmds where the flag is set, store the flag and check on
3940 * completion. For cmds with this flag, don't call
3941 * mrsas_complete_cmd.
3944 if (frame_hdr->flags & htole16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE))
3945 mpt_cmd->flags = MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
3947 io_req = mpt_cmd->io_request;
3949 if (sc->mrsas_gen3_ctrl || sc->is_ventura || sc->is_aero) {
3950 pMpi25IeeeSgeChain64_t sgl_ptr_end = (pMpi25IeeeSgeChain64_t)&io_req->SGL;
3952 sgl_ptr_end += sc->max_sge_in_main_msg - 1;
3953 sgl_ptr_end->Flags = 0;
3955 mpi25_ieee_chain = (MPI25_IEEE_SGE_CHAIN64 *) & io_req->SGL.IeeeChain;
3957 io_req->Function = MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST;
3958 io_req->SGLOffset0 = offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL) / 4;
3959 io_req->ChainOffset = sc->chain_offset_mfi_pthru;
3961 mpi25_ieee_chain->Address = htole64(mfi_cmd->frame_phys_addr);
3963 mpi25_ieee_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT |
3964 MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR;
3966 mpi25_ieee_chain->Length = htole32(sc->max_chain_frame_sz);
3972 * mrsas_issue_blocked_cmd: Synchronous wrapper around regular FW cmds
3973 * input: Adapter soft state Command to be issued
3975 * This function waits on an event for the command to be returned from the ISR.
3976 * Max wait time is MRSAS_INTERNAL_CMD_WAIT_TIME secs. Used for issuing
3977 * internal and ioctl commands.
3980 mrsas_issue_blocked_cmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
3982 u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME;
3983 unsigned long total_time = 0;
3984 int retcode = SUCCESS;
3986 /* Initialize cmd_status */
3987 cmd->cmd_status = 0xFF;
3989 /* Build MPT-MFI command for issue to FW */
3990 if (mrsas_issue_dcmd(sc, cmd)) {
3991 device_printf(sc->mrsas_dev, "Cannot issue DCMD internal command.\n");
3994 sc->chan = (void *)&cmd;
3997 if (cmd->cmd_status == 0xFF) {
3998 tsleep((void *)&sc->chan, 0, "mrsas_sleep", hz);
4002 if (!cmd->sync_cmd) { /* cmd->sync will be set for an IOCTL
4005 if (total_time >= max_wait) {
4006 device_printf(sc->mrsas_dev,
4007 "Internal command timed out after %d seconds.\n", max_wait);
4014 if (cmd->cmd_status == 0xFF) {
4015 device_printf(sc->mrsas_dev, "DCMD timed out after %d "
4016 "seconds from %s\n", max_wait, __func__);
4017 device_printf(sc->mrsas_dev, "DCMD opcode 0x%X\n",
4018 cmd->frame->dcmd.opcode);
4019 retcode = ETIMEDOUT;
4025 * mrsas_complete_mptmfi_passthru: Completes a command
4026 * input: @sc: Adapter soft state
4027 * @cmd: Command to be completed
4028 * @status: cmd completion status
4030 * This function is called from mrsas_complete_cmd() after an interrupt is
4031 * received from Firmware, and io_request->Function is
4032 * MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST.
4035 mrsas_complete_mptmfi_passthru(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd,
4038 struct mrsas_header *hdr = &cmd->frame->hdr;
4039 u_int8_t cmd_status = cmd->frame->hdr.cmd_status;
4041 /* Reset the retry counter for future re-tries */
4042 cmd->retry_for_fw_reset = 0;
4045 cmd->ccb_ptr = NULL;
4048 case MFI_CMD_INVALID:
4049 device_printf(sc->mrsas_dev, "MFI_CMD_INVALID command.\n");
4051 case MFI_CMD_PD_SCSI_IO:
4052 case MFI_CMD_LD_SCSI_IO:
4054 * MFI_CMD_PD_SCSI_IO and MFI_CMD_LD_SCSI_IO could have been
4055 * issued either through an IO path or an IOCTL path. If it
4056 * was via IOCTL, we will send it to internal completion.
4058 if (cmd->sync_cmd) {
4060 mrsas_wakeup(sc, cmd);
4066 /* Check for LD map update */
4067 if ((cmd->frame->dcmd.opcode == MR_DCMD_LD_MAP_GET_INFO) &&
4068 (cmd->frame->dcmd.mbox.b[1] == 1)) {
4069 sc->fast_path_io = 0;
4070 mtx_lock(&sc->raidmap_lock);
4071 sc->map_update_cmd = NULL;
4072 if (cmd_status != 0) {
4073 if (cmd_status != MFI_STAT_NOT_FOUND)
4074 device_printf(sc->mrsas_dev, "map sync failed, status=%x\n", cmd_status);
4076 mrsas_release_mfi_cmd(cmd);
4077 mtx_unlock(&sc->raidmap_lock);
4082 mrsas_release_mfi_cmd(cmd);
4083 if (MR_ValidateMapInfo(sc))
4084 sc->fast_path_io = 0;
4086 sc->fast_path_io = 1;
4087 mrsas_sync_map_info(sc);
4088 mtx_unlock(&sc->raidmap_lock);
4091 if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET_INFO ||
4092 cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET) {
4093 sc->mrsas_aen_triggered = 0;
4095 /* FW has an updated PD sequence */
4096 if ((cmd->frame->dcmd.opcode ==
4097 MR_DCMD_SYSTEM_PD_MAP_GET_INFO) &&
4098 (cmd->frame->dcmd.mbox.b[0] == 1)) {
4099 mtx_lock(&sc->raidmap_lock);
4100 sc->jbod_seq_cmd = NULL;
4101 mrsas_release_mfi_cmd(cmd);
4103 if (cmd_status == MFI_STAT_OK) {
4104 sc->pd_seq_map_id++;
4105 /* Re-register a pd sync seq num cmd */
4106 if (megasas_sync_pd_seq_num(sc, true))
4107 sc->use_seqnum_jbod_fp = 0;
4109 sc->use_seqnum_jbod_fp = 0;
4110 device_printf(sc->mrsas_dev,
4111 "Jbod map sync failed, status=%x\n", cmd_status);
4113 mtx_unlock(&sc->raidmap_lock);
4116 /* See if got an event notification */
4117 if (le32toh(cmd->frame->dcmd.opcode) == MR_DCMD_CTRL_EVENT_WAIT)
4118 mrsas_complete_aen(sc, cmd);
4120 mrsas_wakeup(sc, cmd);
4123 /* Command issued to abort another cmd return */
4124 mrsas_complete_abort(sc, cmd);
4127 device_printf(sc->mrsas_dev, "Unknown command completed! [0x%X]\n", hdr->cmd);
4133 * mrsas_wakeup: Completes an internal command
4134 * input: Adapter soft state
4135 * Command to be completed
4137 * In mrsas_issue_blocked_cmd(), after a command is issued to Firmware, a wait
4138 * timer is started. This function is called from
4139 * mrsas_complete_mptmfi_passthru() as it completes the command, to wake up
4140 * from the command wait.
4143 mrsas_wakeup(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
4145 cmd->cmd_status = cmd->frame->io.cmd_status;
4147 if (cmd->cmd_status == 0xFF)
4148 cmd->cmd_status = 0;
4150 sc->chan = (void *)&cmd;
4151 wakeup_one((void *)&sc->chan);
4156 * mrsas_shutdown_ctlr: Instructs FW to shutdown the controller input:
4157 * Adapter soft state Shutdown/Hibernate
4159 * This function issues a DCMD internal command to Firmware to initiate shutdown
4160 * of the controller.
4163 mrsas_shutdown_ctlr(struct mrsas_softc *sc, u_int32_t opcode)
4165 struct mrsas_mfi_cmd *cmd;
4166 struct mrsas_dcmd_frame *dcmd;
4168 if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR)
4171 cmd = mrsas_get_mfi_cmd(sc);
4173 device_printf(sc->mrsas_dev, "Cannot allocate for shutdown cmd.\n");
4177 mrsas_issue_blocked_abort_cmd(sc, sc->aen_cmd);
4178 if (sc->map_update_cmd)
4179 mrsas_issue_blocked_abort_cmd(sc, sc->map_update_cmd);
4180 if (sc->jbod_seq_cmd)
4181 mrsas_issue_blocked_abort_cmd(sc, sc->jbod_seq_cmd);
4183 dcmd = &cmd->frame->dcmd;
4184 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4186 dcmd->cmd = MFI_CMD_DCMD;
4187 dcmd->cmd_status = 0x0;
4188 dcmd->sge_count = 0;
4189 dcmd->flags = MFI_FRAME_DIR_NONE;
4192 dcmd->data_xfer_len = 0;
4193 dcmd->opcode = opcode;
4195 device_printf(sc->mrsas_dev, "Preparing to shut down controller.\n");
4197 mrsas_issue_blocked_cmd(sc, cmd);
4198 mrsas_release_mfi_cmd(cmd);
4204 * mrsas_flush_cache: Requests FW to flush all its caches input:
4205 * Adapter soft state
4207 * This function is issues a DCMD internal command to Firmware to initiate
4208 * flushing of all caches.
4211 mrsas_flush_cache(struct mrsas_softc *sc)
4213 struct mrsas_mfi_cmd *cmd;
4214 struct mrsas_dcmd_frame *dcmd;
4216 if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR)
4219 cmd = mrsas_get_mfi_cmd(sc);
4221 device_printf(sc->mrsas_dev, "Cannot allocate for flush cache cmd.\n");
4224 dcmd = &cmd->frame->dcmd;
4225 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4227 dcmd->cmd = MFI_CMD_DCMD;
4228 dcmd->cmd_status = 0x0;
4229 dcmd->sge_count = 0;
4230 dcmd->flags = MFI_FRAME_DIR_NONE;
4233 dcmd->data_xfer_len = 0;
4234 dcmd->opcode = MR_DCMD_CTRL_CACHE_FLUSH;
4235 dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE;
4237 mrsas_issue_blocked_cmd(sc, cmd);
4238 mrsas_release_mfi_cmd(cmd);
4244 megasas_sync_pd_seq_num(struct mrsas_softc *sc, boolean_t pend)
4247 u_int8_t do_ocr = 1;
4248 struct mrsas_mfi_cmd *cmd;
4249 struct mrsas_dcmd_frame *dcmd;
4250 uint32_t pd_seq_map_sz;
4251 struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync;
4252 bus_addr_t pd_seq_h;
4254 pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) +
4255 (sizeof(struct MR_PD_CFG_SEQ) *
4256 (MAX_PHYSICAL_DEVICES - 1));
4258 cmd = mrsas_get_mfi_cmd(sc);
4260 device_printf(sc->mrsas_dev,
4261 "Cannot alloc for ld map info cmd.\n");
4264 dcmd = &cmd->frame->dcmd;
4266 pd_sync = (void *)sc->jbodmap_mem[(sc->pd_seq_map_id & 1)];
4267 pd_seq_h = sc->jbodmap_phys_addr[(sc->pd_seq_map_id & 1)];
4269 device_printf(sc->mrsas_dev,
4270 "Failed to alloc mem for jbod map info.\n");
4271 mrsas_release_mfi_cmd(cmd);
4274 memset(pd_sync, 0, pd_seq_map_sz);
4275 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4276 dcmd->cmd = MFI_CMD_DCMD;
4277 dcmd->cmd_status = 0xFF;
4278 dcmd->sge_count = 1;
4281 dcmd->data_xfer_len = htole32(pd_seq_map_sz);
4282 dcmd->opcode = htole32(MR_DCMD_SYSTEM_PD_MAP_GET_INFO);
4283 dcmd->sgl.sge32[0].phys_addr = htole32(pd_seq_h & 0xFFFFFFFF);
4284 dcmd->sgl.sge32[0].length = htole32(pd_seq_map_sz);
4287 dcmd->mbox.b[0] = MRSAS_DCMD_MBOX_PEND_FLAG;
4288 dcmd->flags = htole16(MFI_FRAME_DIR_WRITE);
4289 sc->jbod_seq_cmd = cmd;
4290 if (mrsas_issue_dcmd(sc, cmd)) {
4291 device_printf(sc->mrsas_dev,
4292 "Fail to send sync map info command.\n");
4297 dcmd->flags = htole16(MFI_FRAME_DIR_READ);
4299 retcode = mrsas_issue_polled(sc, cmd);
4300 if (retcode == ETIMEDOUT)
4303 if (le32toh(pd_sync->count) > MAX_PHYSICAL_DEVICES) {
4304 device_printf(sc->mrsas_dev,
4305 "driver supports max %d JBOD, but FW reports %d\n",
4306 MAX_PHYSICAL_DEVICES, pd_sync->count);
4310 sc->pd_seq_map_id++;
4315 sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
4321 * mrsas_get_map_info: Load and validate RAID map input:
4322 * Adapter instance soft state
4324 * This function calls mrsas_get_ld_map_info() and MR_ValidateMapInfo() to load
4325 * and validate RAID map. It returns 0 if successful, 1 other- wise.
4328 mrsas_get_map_info(struct mrsas_softc *sc)
4330 uint8_t retcode = 0;
4332 sc->fast_path_io = 0;
4333 if (!mrsas_get_ld_map_info(sc)) {
4334 retcode = MR_ValidateMapInfo(sc);
4336 sc->fast_path_io = 1;
4344 * mrsas_get_ld_map_info: Get FW's ld_map structure input:
4345 * Adapter instance soft state
4347 * Issues an internal command (DCMD) to get the FW's controller PD list
4351 mrsas_get_ld_map_info(struct mrsas_softc *sc)
4354 struct mrsas_mfi_cmd *cmd;
4355 struct mrsas_dcmd_frame *dcmd;
4357 bus_addr_t map_phys_addr = 0;
4359 cmd = mrsas_get_mfi_cmd(sc);
4361 device_printf(sc->mrsas_dev,
4362 "Cannot alloc for ld map info cmd.\n");
4365 dcmd = &cmd->frame->dcmd;
4367 map = (void *)sc->raidmap_mem[(sc->map_id & 1)];
4368 map_phys_addr = sc->raidmap_phys_addr[(sc->map_id & 1)];
4370 device_printf(sc->mrsas_dev,
4371 "Failed to alloc mem for ld map info.\n");
4372 mrsas_release_mfi_cmd(cmd);
4375 memset(map, 0, sizeof(sc->max_map_sz));
4376 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4378 dcmd->cmd = MFI_CMD_DCMD;
4379 dcmd->cmd_status = 0xFF;
4380 dcmd->sge_count = 1;
4381 dcmd->flags = htole16(MFI_FRAME_DIR_READ);
4384 dcmd->data_xfer_len = htole32(sc->current_map_sz);
4385 dcmd->opcode = htole32(MR_DCMD_LD_MAP_GET_INFO);
4386 dcmd->sgl.sge32[0].phys_addr = htole32(map_phys_addr & 0xFFFFFFFF);
4387 dcmd->sgl.sge32[0].length = htole32(sc->current_map_sz);
4389 retcode = mrsas_issue_polled(sc, cmd);
4390 if (retcode == ETIMEDOUT)
4391 sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
4397 * mrsas_sync_map_info: Get FW's ld_map structure input:
4398 * Adapter instance soft state
4400 * Issues an internal command (DCMD) to get the FW's controller PD list
4404 mrsas_sync_map_info(struct mrsas_softc *sc)
4407 struct mrsas_mfi_cmd *cmd;
4408 struct mrsas_dcmd_frame *dcmd;
4409 uint32_t size_sync_info, num_lds;
4410 MR_LD_TARGET_SYNC *target_map = NULL;
4411 MR_DRV_RAID_MAP_ALL *map;
4413 MR_LD_TARGET_SYNC *ld_sync;
4414 bus_addr_t map_phys_addr = 0;
4416 cmd = mrsas_get_mfi_cmd(sc);
4418 device_printf(sc->mrsas_dev, "Cannot alloc for sync map info cmd\n");
4421 map = sc->ld_drv_map[sc->map_id & 1];
4422 num_lds = map->raidMap.ldCount;
4424 dcmd = &cmd->frame->dcmd;
4425 size_sync_info = sizeof(MR_LD_TARGET_SYNC) * num_lds;
4426 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4428 target_map = (MR_LD_TARGET_SYNC *) sc->raidmap_mem[(sc->map_id - 1) & 1];
4429 memset(target_map, 0, sc->max_map_sz);
4431 map_phys_addr = sc->raidmap_phys_addr[(sc->map_id - 1) & 1];
4433 ld_sync = (MR_LD_TARGET_SYNC *) target_map;
4435 for (i = 0; i < num_lds; i++, ld_sync++) {
4436 raid = MR_LdRaidGet(i, map);
4437 ld_sync->targetId = MR_GetLDTgtId(i, map);
4438 ld_sync->seqNum = raid->seqNum;
4441 dcmd->cmd = MFI_CMD_DCMD;
4442 dcmd->cmd_status = 0xFF;
4443 dcmd->sge_count = 1;
4444 dcmd->flags = htole16(MFI_FRAME_DIR_WRITE);
4447 dcmd->data_xfer_len = htole32(sc->current_map_sz);
4448 dcmd->mbox.b[0] = num_lds;
4449 dcmd->mbox.b[1] = MRSAS_DCMD_MBOX_PEND_FLAG;
4450 dcmd->opcode = htole32(MR_DCMD_LD_MAP_GET_INFO);
4451 dcmd->sgl.sge32[0].phys_addr = htole32(map_phys_addr & 0xFFFFFFFF);
4452 dcmd->sgl.sge32[0].length = htole32(sc->current_map_sz);
4454 sc->map_update_cmd = cmd;
4455 if (mrsas_issue_dcmd(sc, cmd)) {
4456 device_printf(sc->mrsas_dev,
4457 "Fail to send sync map info command.\n");
4463 /* Input: dcmd.opcode - MR_DCMD_PD_GET_INFO
4464 * dcmd.mbox.s[0] - deviceId for this physical drive
4465 * dcmd.sge IN - ptr to returned MR_PD_INFO structure
4466 * Desc: Firmware return the physical drive info structure
4470 mrsas_get_pd_info(struct mrsas_softc *sc, u_int16_t device_id)
4473 u_int8_t do_ocr = 1;
4474 struct mrsas_mfi_cmd *cmd;
4475 struct mrsas_dcmd_frame *dcmd;
4477 cmd = mrsas_get_mfi_cmd(sc);
4480 device_printf(sc->mrsas_dev,
4481 "Cannot alloc for get PD info cmd\n");
4484 dcmd = &cmd->frame->dcmd;
4486 memset(sc->pd_info_mem, 0, sizeof(struct mrsas_pd_info));
4487 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4489 dcmd->mbox.s[0] = htole16(device_id);
4490 dcmd->cmd = MFI_CMD_DCMD;
4491 dcmd->cmd_status = 0xFF;
4492 dcmd->sge_count = 1;
4493 dcmd->flags = MFI_FRAME_DIR_READ;
4496 dcmd->data_xfer_len = htole32(sizeof(struct mrsas_pd_info));
4497 dcmd->opcode = htole32(MR_DCMD_PD_GET_INFO);
4498 dcmd->sgl.sge32[0].phys_addr = htole32((u_int32_t)sc->pd_info_phys_addr & 0xFFFFFFFF);
4499 dcmd->sgl.sge32[0].length = htole32(sizeof(struct mrsas_pd_info));
4501 if (!sc->mask_interrupts)
4502 retcode = mrsas_issue_blocked_cmd(sc, cmd);
4504 retcode = mrsas_issue_polled(sc, cmd);
4506 if (retcode == ETIMEDOUT)
4509 sc->target_list[device_id].interface_type =
4510 le16toh(sc->pd_info_mem->state.ddf.pdType.intf);
4517 sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
4519 if (!sc->mask_interrupts)
4520 mrsas_release_mfi_cmd(cmd);
4524 * mrsas_add_target: Add target ID of system PD/VD to driver's data structure.
4525 * sc: Adapter's soft state
4526 * target_id: Unique target id per controller(managed by driver)
4527 * for system PDs- target ID ranges from 0 to (MRSAS_MAX_PD - 1)
4528 * for VDs- target ID ranges from MRSAS_MAX_PD to MRSAS_MAX_TM_TARGETS
4530 * Descripton: This function will be called whenever system PD or VD is created.
4532 static void mrsas_add_target(struct mrsas_softc *sc,
4533 u_int16_t target_id)
4535 sc->target_list[target_id].target_id = target_id;
4537 device_printf(sc->mrsas_dev,
4538 "%s created target ID: 0x%x\n",
4539 (target_id < MRSAS_MAX_PD ? "System PD" : "VD"),
4540 (target_id < MRSAS_MAX_PD ? target_id : (target_id - MRSAS_MAX_PD)));
4542 * If interrupts are enabled, then only fire DCMD to get pd_info
4545 if (!sc->mask_interrupts && sc->pd_info_mem &&
4546 (target_id < MRSAS_MAX_PD))
4547 mrsas_get_pd_info(sc, target_id);
4552 * mrsas_remove_target: Remove target ID of system PD/VD from driver's data structure.
4553 * sc: Adapter's soft state
4554 * target_id: Unique target id per controller(managed by driver)
4555 * for system PDs- target ID ranges from 0 to (MRSAS_MAX_PD - 1)
4556 * for VDs- target ID ranges from MRSAS_MAX_PD to MRSAS_MAX_TM_TARGETS
4558 * Descripton: This function will be called whenever system PD or VD is deleted
4560 static void mrsas_remove_target(struct mrsas_softc *sc,
4561 u_int16_t target_id)
4563 sc->target_list[target_id].target_id = 0xffff;
4564 device_printf(sc->mrsas_dev,
4565 "%s deleted target ID: 0x%x\n",
4566 (target_id < MRSAS_MAX_PD ? "System PD" : "VD"),
4567 (target_id < MRSAS_MAX_PD ? target_id : (target_id - MRSAS_MAX_PD)));
4571 * mrsas_get_pd_list: Returns FW's PD list structure input:
4572 * Adapter soft state
4574 * Issues an internal command (DCMD) to get the FW's controller PD list
4575 * structure. This information is mainly used to find out about system
4576 * supported by Firmware.
4579 mrsas_get_pd_list(struct mrsas_softc *sc)
4581 int retcode = 0, pd_index = 0, pd_count = 0, pd_list_size;
4582 u_int8_t do_ocr = 1;
4583 struct mrsas_mfi_cmd *cmd;
4584 struct mrsas_dcmd_frame *dcmd;
4585 struct MR_PD_LIST *pd_list_mem;
4586 struct MR_PD_ADDRESS *pd_addr;
4587 bus_addr_t pd_list_phys_addr = 0;
4588 struct mrsas_tmp_dcmd *tcmd;
4591 cmd = mrsas_get_mfi_cmd(sc);
4593 device_printf(sc->mrsas_dev,
4594 "Cannot alloc for get PD list cmd\n");
4597 dcmd = &cmd->frame->dcmd;
4599 tcmd = malloc(sizeof(struct mrsas_tmp_dcmd), M_MRSAS, M_NOWAIT);
4600 pd_list_size = MRSAS_MAX_PD * sizeof(struct MR_PD_LIST);
4601 if (mrsas_alloc_tmp_dcmd(sc, tcmd, pd_list_size) != SUCCESS) {
4602 device_printf(sc->mrsas_dev,
4603 "Cannot alloc dmamap for get PD list cmd\n");
4604 mrsas_release_mfi_cmd(cmd);
4605 mrsas_free_tmp_dcmd(tcmd);
4606 free(tcmd, M_MRSAS);
4609 pd_list_mem = tcmd->tmp_dcmd_mem;
4610 pd_list_phys_addr = tcmd->tmp_dcmd_phys_addr;
4612 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4614 dcmd->mbox.b[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST;
4615 dcmd->mbox.b[1] = 0;
4616 dcmd->cmd = MFI_CMD_DCMD;
4617 dcmd->cmd_status = 0xFF;
4618 dcmd->sge_count = 1;
4619 dcmd->flags = htole16(MFI_FRAME_DIR_READ);
4622 dcmd->data_xfer_len = htole32(MRSAS_MAX_PD * sizeof(struct MR_PD_LIST));
4623 dcmd->opcode = htole32(MR_DCMD_PD_LIST_QUERY);
4624 dcmd->sgl.sge32[0].phys_addr = htole32(pd_list_phys_addr & 0xFFFFFFFF);
4625 dcmd->sgl.sge32[0].length = htole32(MRSAS_MAX_PD * sizeof(struct MR_PD_LIST));
4627 if (!sc->mask_interrupts)
4628 retcode = mrsas_issue_blocked_cmd(sc, cmd);
4630 retcode = mrsas_issue_polled(sc, cmd);
4632 if (retcode == ETIMEDOUT)
4635 /* Get the instance PD list */
4636 pd_count = MRSAS_MAX_PD;
4637 pd_addr = pd_list_mem->addr;
4638 if (le32toh(pd_list_mem->count) < pd_count) {
4639 memset(sc->local_pd_list, 0,
4640 MRSAS_MAX_PD * sizeof(struct mrsas_pd_list));
4641 for (pd_index = 0; pd_index < le32toh(pd_list_mem->count); pd_index++) {
4642 dev_id = le16toh(pd_addr->deviceId);
4643 sc->local_pd_list[dev_id].tid = dev_id;
4644 sc->local_pd_list[dev_id].driveType =
4645 le16toh(pd_addr->scsiDevType);
4646 sc->local_pd_list[dev_id].driveState =
4648 if (sc->target_list[dev_id].target_id == 0xffff)
4649 mrsas_add_target(sc, dev_id);
4652 for (pd_index = 0; pd_index < MRSAS_MAX_PD; pd_index++) {
4653 if ((sc->local_pd_list[pd_index].driveState !=
4654 MR_PD_STATE_SYSTEM) &&
4655 (sc->target_list[pd_index].target_id !=
4657 mrsas_remove_target(sc, pd_index);
4661 * Use mutext/spinlock if pd_list component size increase more than
4664 memcpy(sc->pd_list, sc->local_pd_list, sizeof(sc->local_pd_list));
4668 mrsas_free_tmp_dcmd(tcmd);
4669 free(tcmd, M_MRSAS);
4672 sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
4674 if (!sc->mask_interrupts)
4675 mrsas_release_mfi_cmd(cmd);
4681 * mrsas_get_ld_list: Returns FW's LD list structure input:
4682 * Adapter soft state
4684 * Issues an internal command (DCMD) to get the FW's controller PD list
4685 * structure. This information is mainly used to find out about supported by
4689 mrsas_get_ld_list(struct mrsas_softc *sc)
4691 int ld_list_size, retcode = 0, ld_index = 0, ids = 0, drv_tgt_id;
4692 u_int8_t do_ocr = 1;
4693 struct mrsas_mfi_cmd *cmd;
4694 struct mrsas_dcmd_frame *dcmd;
4695 struct MR_LD_LIST *ld_list_mem;
4696 bus_addr_t ld_list_phys_addr = 0;
4697 struct mrsas_tmp_dcmd *tcmd;
4699 cmd = mrsas_get_mfi_cmd(sc);
4701 device_printf(sc->mrsas_dev,
4702 "Cannot alloc for get LD list cmd\n");
4705 dcmd = &cmd->frame->dcmd;
4707 tcmd = malloc(sizeof(struct mrsas_tmp_dcmd), M_MRSAS, M_NOWAIT);
4708 ld_list_size = sizeof(struct MR_LD_LIST);
4709 if (mrsas_alloc_tmp_dcmd(sc, tcmd, ld_list_size) != SUCCESS) {
4710 device_printf(sc->mrsas_dev,
4711 "Cannot alloc dmamap for get LD list cmd\n");
4712 mrsas_release_mfi_cmd(cmd);
4713 mrsas_free_tmp_dcmd(tcmd);
4714 free(tcmd, M_MRSAS);
4717 ld_list_mem = tcmd->tmp_dcmd_mem;
4718 ld_list_phys_addr = tcmd->tmp_dcmd_phys_addr;
4720 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4722 if (sc->max256vdSupport)
4723 dcmd->mbox.b[0] = 1;
4725 dcmd->cmd = MFI_CMD_DCMD;
4726 dcmd->cmd_status = 0xFF;
4727 dcmd->sge_count = 1;
4728 dcmd->flags = MFI_FRAME_DIR_READ;
4730 dcmd->data_xfer_len = htole32(sizeof(struct MR_LD_LIST));
4731 dcmd->opcode = htole32(MR_DCMD_LD_GET_LIST);
4732 dcmd->sgl.sge32[0].phys_addr = htole32(ld_list_phys_addr);
4733 dcmd->sgl.sge32[0].length = htole32(sizeof(struct MR_LD_LIST));
4736 if (!sc->mask_interrupts)
4737 retcode = mrsas_issue_blocked_cmd(sc, cmd);
4739 retcode = mrsas_issue_polled(sc, cmd);
4741 if (retcode == ETIMEDOUT)
4745 printf("Number of LDs %d\n", ld_list_mem->ldCount);
4748 /* Get the instance LD list */
4749 if (le32toh(ld_list_mem->ldCount) <= sc->fw_supported_vd_count) {
4750 sc->CurLdCount = le32toh(ld_list_mem->ldCount);
4751 memset(sc->ld_ids, 0xff, MAX_LOGICAL_DRIVES_EXT);
4752 for (ld_index = 0; ld_index < le32toh(ld_list_mem->ldCount); ld_index++) {
4753 ids = ld_list_mem->ldList[ld_index].ref.ld_context.targetId;
4754 drv_tgt_id = ids + MRSAS_MAX_PD;
4755 if (ld_list_mem->ldList[ld_index].state != 0) {
4756 sc->ld_ids[ids] = ld_list_mem->ldList[ld_index].ref.ld_context.targetId;
4757 if (sc->target_list[drv_tgt_id].target_id ==
4759 mrsas_add_target(sc, drv_tgt_id);
4761 if (sc->target_list[drv_tgt_id].target_id !=
4763 mrsas_remove_target(sc,
4771 mrsas_free_tmp_dcmd(tcmd);
4772 free(tcmd, M_MRSAS);
4775 sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
4776 if (!sc->mask_interrupts)
4777 mrsas_release_mfi_cmd(cmd);
4783 * mrsas_alloc_tmp_dcmd: Allocates memory for temporary command input:
4784 * Adapter soft state Temp command Size of alloction
4786 * Allocates DMAable memory for a temporary internal command. The allocated
4787 * memory is initialized to all zeros upon successful loading of the dma
4791 mrsas_alloc_tmp_dcmd(struct mrsas_softc *sc,
4792 struct mrsas_tmp_dcmd *tcmd, int size)
4794 if (bus_dma_tag_create(sc->mrsas_parent_tag,
4796 BUS_SPACE_MAXADDR_32BIT,
4804 &tcmd->tmp_dcmd_tag)) {
4805 device_printf(sc->mrsas_dev, "Cannot allocate tmp dcmd tag\n");
4808 if (bus_dmamem_alloc(tcmd->tmp_dcmd_tag, (void **)&tcmd->tmp_dcmd_mem,
4809 BUS_DMA_NOWAIT, &tcmd->tmp_dcmd_dmamap)) {
4810 device_printf(sc->mrsas_dev, "Cannot allocate tmp dcmd mem\n");
4813 if (bus_dmamap_load(tcmd->tmp_dcmd_tag, tcmd->tmp_dcmd_dmamap,
4814 tcmd->tmp_dcmd_mem, size, mrsas_addr_cb,
4815 &tcmd->tmp_dcmd_phys_addr, BUS_DMA_NOWAIT)) {
4816 device_printf(sc->mrsas_dev, "Cannot load tmp dcmd mem\n");
4819 memset(tcmd->tmp_dcmd_mem, 0, size);
4824 * mrsas_free_tmp_dcmd: Free memory for temporary command input:
4825 * temporary dcmd pointer
4827 * Deallocates memory of the temporary command for use in the construction of
4828 * the internal DCMD.
4831 mrsas_free_tmp_dcmd(struct mrsas_tmp_dcmd *tmp)
4833 if (tmp->tmp_dcmd_phys_addr)
4834 bus_dmamap_unload(tmp->tmp_dcmd_tag, tmp->tmp_dcmd_dmamap);
4835 if (tmp->tmp_dcmd_mem != NULL)
4836 bus_dmamem_free(tmp->tmp_dcmd_tag, tmp->tmp_dcmd_mem, tmp->tmp_dcmd_dmamap);
4837 if (tmp->tmp_dcmd_tag != NULL)
4838 bus_dma_tag_destroy(tmp->tmp_dcmd_tag);
4842 * mrsas_issue_blocked_abort_cmd: Aborts previously issued cmd input:
4843 * Adapter soft state Previously issued cmd to be aborted
4845 * This function is used to abort previously issued commands, such as AEN and
4846 * RAID map sync map commands. The abort command is sent as a DCMD internal
4847 * command and subsequently the driver will wait for a return status. The
4848 * max wait time is MRSAS_INTERNAL_CMD_WAIT_TIME seconds.
4851 mrsas_issue_blocked_abort_cmd(struct mrsas_softc *sc,
4852 struct mrsas_mfi_cmd *cmd_to_abort)
4854 struct mrsas_mfi_cmd *cmd;
4855 struct mrsas_abort_frame *abort_fr;
4856 u_int8_t retcode = 0;
4857 unsigned long total_time = 0;
4858 u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME;
4860 cmd = mrsas_get_mfi_cmd(sc);
4862 device_printf(sc->mrsas_dev, "Cannot alloc for abort cmd\n");
4865 abort_fr = &cmd->frame->abort;
4867 /* Prepare and issue the abort frame */
4868 abort_fr->cmd = MFI_CMD_ABORT;
4869 abort_fr->cmd_status = 0xFF;
4870 abort_fr->flags = 0;
4871 abort_fr->abort_context = cmd_to_abort->index;
4872 abort_fr->abort_mfi_phys_addr_lo = cmd_to_abort->frame_phys_addr;
4873 abort_fr->abort_mfi_phys_addr_hi = 0;
4876 cmd->cmd_status = 0xFF;
4878 if (mrsas_issue_dcmd(sc, cmd)) {
4879 device_printf(sc->mrsas_dev, "Fail to send abort command.\n");
4882 /* Wait for this cmd to complete */
4883 sc->chan = (void *)&cmd;
4885 if (cmd->cmd_status == 0xFF) {
4886 tsleep((void *)&sc->chan, 0, "mrsas_sleep", hz);
4890 if (total_time >= max_wait) {
4891 device_printf(sc->mrsas_dev, "Abort cmd timed out after %d sec.\n", max_wait);
4898 mrsas_release_mfi_cmd(cmd);
4903 * mrsas_complete_abort: Completes aborting a command input:
4904 * Adapter soft state Cmd that was issued to abort another cmd
4906 * The mrsas_issue_blocked_abort_cmd() function waits for the command status to
4907 * change after sending the command. This function is called from
4908 * mrsas_complete_mptmfi_passthru() to wake up the sleep thread associated.
4911 mrsas_complete_abort(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
4913 if (cmd->sync_cmd) {
4915 cmd->cmd_status = 0;
4916 sc->chan = (void *)&cmd;
4917 wakeup_one((void *)&sc->chan);
4923 * mrsas_aen_handler: AEN processing callback function from thread context
4924 * input: Adapter soft state
4926 * Asynchronous event handler
4929 mrsas_aen_handler(struct mrsas_softc *sc)
4931 union mrsas_evt_class_locale class_locale;
4934 int error, fail_aen = 0;
4937 printf("invalid instance!\n");
4940 if (sc->remove_in_progress || sc->reset_in_progress) {
4941 device_printf(sc->mrsas_dev, "Returning from %s, line no %d\n",
4942 __func__, __LINE__);
4945 if (sc->evt_detail_mem) {
4946 switch (sc->evt_detail_mem->code) {
4947 case MR_EVT_PD_INSERTED:
4948 fail_aen = mrsas_get_pd_list(sc);
4950 mrsas_bus_scan_sim(sc, sc->sim_1);
4952 goto skip_register_aen;
4954 case MR_EVT_PD_REMOVED:
4955 fail_aen = mrsas_get_pd_list(sc);
4957 mrsas_bus_scan_sim(sc, sc->sim_1);
4959 goto skip_register_aen;
4961 case MR_EVT_LD_OFFLINE:
4962 case MR_EVT_CFG_CLEARED:
4963 case MR_EVT_LD_DELETED:
4964 mrsas_bus_scan_sim(sc, sc->sim_0);
4966 case MR_EVT_LD_CREATED:
4967 fail_aen = mrsas_get_ld_list(sc);
4969 mrsas_bus_scan_sim(sc, sc->sim_0);
4971 goto skip_register_aen;
4973 case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED:
4974 case MR_EVT_FOREIGN_CFG_IMPORTED:
4975 case MR_EVT_LD_STATE_CHANGE:
4978 case MR_EVT_CTRL_PROP_CHANGED:
4979 fail_aen = mrsas_get_ctrl_info(sc);
4981 goto skip_register_aen;
4987 device_printf(sc->mrsas_dev, "invalid evt_detail\n");
4991 fail_aen = mrsas_get_pd_list(sc);
4993 mrsas_dprint(sc, MRSAS_AEN, "scanning ...sim 1\n");
4994 mrsas_bus_scan_sim(sc, sc->sim_1);
4996 goto skip_register_aen;
4998 fail_aen = mrsas_get_ld_list(sc);
5000 mrsas_dprint(sc, MRSAS_AEN, "scanning ...sim 0\n");
5001 mrsas_bus_scan_sim(sc, sc->sim_0);
5003 goto skip_register_aen;
5005 seq_num = sc->evt_detail_mem->seq_num + 1;
5007 /* Register AEN with FW for latest sequence number plus 1 */
5008 class_locale.members.reserved = 0;
5009 class_locale.members.locale = MR_EVT_LOCALE_ALL;
5010 class_locale.members.class = MR_EVT_CLASS_DEBUG;
5012 if (sc->aen_cmd != NULL)
5015 mtx_lock(&sc->aen_lock);
5016 error = mrsas_register_aen(sc, seq_num,
5018 mtx_unlock(&sc->aen_lock);
5021 device_printf(sc->mrsas_dev, "register aen failed error %x\n", error);
5029 * mrsas_complete_aen: Completes AEN command
5030 * input: Adapter soft state
5031 * Cmd that was issued to abort another cmd
5033 * This function will be called from ISR and will continue event processing from
5034 * thread context by enqueuing task in ev_tq (callback function
5035 * "mrsas_aen_handler").
5038 mrsas_complete_aen(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
5041 * Don't signal app if it is just an aborted previously registered
5044 if ((!cmd->abort_aen) && (sc->remove_in_progress == 0)) {
5045 sc->mrsas_aen_triggered = 1;
5046 mtx_lock(&sc->aen_lock);
5047 if (sc->mrsas_poll_waiting) {
5048 sc->mrsas_poll_waiting = 0;
5049 selwakeup(&sc->mrsas_select);
5051 mtx_unlock(&sc->aen_lock);
5056 mrsas_release_mfi_cmd(cmd);
5058 taskqueue_enqueue(sc->ev_tq, &sc->ev_task);
5063 static device_method_t mrsas_methods[] = {
5064 DEVMETHOD(device_probe, mrsas_probe),
5065 DEVMETHOD(device_attach, mrsas_attach),
5066 DEVMETHOD(device_detach, mrsas_detach),
5067 DEVMETHOD(device_shutdown, mrsas_shutdown),
5068 DEVMETHOD(device_suspend, mrsas_suspend),
5069 DEVMETHOD(device_resume, mrsas_resume),
5070 DEVMETHOD(bus_print_child, bus_generic_print_child),
5071 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
5075 static driver_t mrsas_driver = {
5078 sizeof(struct mrsas_softc)
5081 static devclass_t mrsas_devclass;
5083 DRIVER_MODULE(mrsas, pci, mrsas_driver, mrsas_devclass, 0, 0);
5084 MODULE_DEPEND(mrsas, cam, 1, 1, 1);