2 * Redistribution and use in source and binary forms, with or without
3 * modification, are permitted provided that the following conditions
6 * Copyright 1994-2009 The FreeBSD Project.
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE FREEBSD PROJECT``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
17 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
18 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FREEBSD PROJECT OR
19 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
20 * EXEMPLARY,OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
21 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
22 * PROFITS; OR BUSINESS INTERRUPTION)HOWEVER CAUSED AND ON ANY THEORY
23 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
24 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
25 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 * The views and conclusions contained in the software and documentation
28 * are those of the authors and should not be interpreted as representing
29 * official policies,either expressed or implied, of the FreeBSD Project.
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
38 #include <sys/param.h>
39 #include <sys/types.h>
40 #include <sys/kernel.h>
41 #include <sys/selinfo.h>
45 #include <sys/ioccom.h>
46 #include <sys/eventhandler.h>
47 #include <sys/callout.h>
49 #include <machine/bus.h>
50 #include <sys/sysctl.h>
51 #include <sys/systm.h>
52 #include <sys/malloc.h>
54 #include <dev/mfi/mfireg.h>
55 #include <dev/mfi/mfi_ioctl.h>
56 #include <dev/mfi/mfivar.h>
58 struct mfi_cmd_tbolt *mfi_tbolt_get_cmd(struct mfi_softc *sc, struct mfi_command *);
59 union mfi_mpi2_request_descriptor *
60 mfi_tbolt_get_request_descriptor(struct mfi_softc *sc, uint16_t index);
61 void mfi_tbolt_complete_cmd(struct mfi_softc *sc);
62 int mfi_tbolt_build_io(struct mfi_softc *sc, struct mfi_command *mfi_cmd,
63 struct mfi_cmd_tbolt *cmd);
64 union mfi_mpi2_request_descriptor *mfi_tbolt_build_mpt_cmd(struct mfi_softc
65 *sc, struct mfi_command *cmd);
67 mfi_build_mpt_pass_thru(struct mfi_softc *sc, struct mfi_command *mfi_cmd);
68 union mfi_mpi2_request_descriptor *mfi_build_and_issue_cmd(struct mfi_softc
69 *sc, struct mfi_command *mfi_cmd);
70 void mfi_tbolt_build_ldio(struct mfi_softc *sc, struct mfi_command *mfi_cmd,
71 struct mfi_cmd_tbolt *cmd);
72 static int mfi_tbolt_make_sgl(struct mfi_softc *sc, struct mfi_command
73 *mfi_cmd, pMpi25IeeeSgeChain64_t sgl_ptr, struct mfi_cmd_tbolt *cmd);
75 map_tbolt_cmd_status(struct mfi_command *mfi_cmd, uint8_t status,
77 static void mfi_issue_pending_cmds_again (struct mfi_softc *sc);
78 static void mfi_kill_hba (struct mfi_softc *sc);
79 static void mfi_process_fw_state_chg_isr(void *arg);
80 static void mfi_sync_map_complete(struct mfi_command *);
81 static void mfi_queue_map_sync(struct mfi_softc *sc);
83 #define MFI_FUSION_ENABLE_INTERRUPT_MASK (0x00000008)
86 extern int mfi_polled_cmd_timeout;
87 static int mfi_fw_reset_test = 0;
89 SYSCTL_INT(_hw_mfi, OID_AUTO, fw_reset_test, CTLFLAG_RWTUN, &mfi_fw_reset_test,
90 0, "Force a firmware reset condition");
94 mfi_tbolt_enable_intr_ppc(struct mfi_softc *sc)
96 MFI_WRITE4(sc, MFI_OMSK, ~MFI_FUSION_ENABLE_INTERRUPT_MASK);
97 MFI_READ4(sc, MFI_OMSK);
101 mfi_tbolt_disable_intr_ppc(struct mfi_softc *sc)
103 MFI_WRITE4(sc, MFI_OMSK, 0xFFFFFFFF);
104 MFI_READ4(sc, MFI_OMSK);
108 mfi_tbolt_read_fw_status_ppc(struct mfi_softc *sc)
110 return MFI_READ4(sc, MFI_OSP0);
114 mfi_tbolt_check_clear_intr_ppc(struct mfi_softc *sc)
116 int32_t status, mfi_status = 0;
118 status = MFI_READ4(sc, MFI_OSTS);
121 MFI_WRITE4(sc, MFI_OSTS, status);
122 MFI_READ4(sc, MFI_OSTS);
123 if (status & MFI_STATE_CHANGE_INTERRUPT) {
124 mfi_status |= MFI_FIRMWARE_STATE_CHANGE;
129 if (!(status & MFI_FUSION_ENABLE_INTERRUPT_MASK))
132 MFI_READ4(sc, MFI_OSTS);
138 mfi_tbolt_issue_cmd_ppc(struct mfi_softc *sc, bus_addr_t bus_add,
141 bus_add |= (MFI_REQ_DESCRIPT_FLAGS_MFA
142 << MFI_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
143 MFI_WRITE4(sc, MFI_IQPL, (uint32_t)bus_add);
144 MFI_WRITE4(sc, MFI_IQPH, (uint32_t)((uint64_t)bus_add >> 32));
148 * mfi_tbolt_adp_reset - For controller reset
149 * @regs: MFI register set
152 mfi_tbolt_adp_reset(struct mfi_softc *sc)
154 int retry = 0, i = 0;
157 MFI_WRITE4(sc, MFI_WSR, 0xF);
158 MFI_WRITE4(sc, MFI_WSR, 4);
159 MFI_WRITE4(sc, MFI_WSR, 0xB);
160 MFI_WRITE4(sc, MFI_WSR, 2);
161 MFI_WRITE4(sc, MFI_WSR, 7);
162 MFI_WRITE4(sc, MFI_WSR, 0xD);
164 for (i = 0; i < 10000; i++) ;
166 HostDiag = (uint32_t)MFI_READ4(sc, MFI_HDR);
168 while (!( HostDiag & DIAG_WRITE_ENABLE)) {
169 for (i = 0; i < 1000; i++);
170 HostDiag = (uint32_t)MFI_READ4(sc, MFI_HDR);
171 device_printf(sc->mfi_dev, "ADP_RESET_TBOLT: retry time=%d, "
172 "hostdiag=%#x\n", retry, HostDiag);
178 device_printf(sc->mfi_dev, "ADP_RESET_TBOLT: HostDiag=%#x\n", HostDiag);
180 MFI_WRITE4(sc, MFI_HDR, (HostDiag | DIAG_RESET_ADAPTER));
182 for (i=0; i < 10; i++) {
183 for (i = 0; i < 10000; i++);
186 HostDiag = (uint32_t)MFI_READ4(sc, MFI_RSR);
187 while (HostDiag & DIAG_RESET_ADAPTER) {
188 for (i = 0; i < 1000; i++) ;
189 HostDiag = (uint32_t)MFI_READ4(sc, MFI_RSR);
190 device_printf(sc->mfi_dev, "ADP_RESET_TBOLT: retry time=%d, "
191 "hostdiag=%#x\n", retry, HostDiag);
200 * This routine initialize Thunderbolt specific device information
203 mfi_tbolt_init_globals(struct mfi_softc *sc)
205 /* Initialize single reply size and Message size */
206 sc->reply_size = MEGASAS_THUNDERBOLT_REPLY_SIZE;
207 sc->raid_io_msg_size = MEGASAS_THUNDERBOLT_NEW_MSG_SIZE;
210 * Calculating how many SGEs allowed in a allocated main message
211 * (size of the Message - Raid SCSI IO message size(except SGE))
213 * (0x100 - (0x90 - 0x10)) / 0x10 = 8
215 sc->max_SGEs_in_main_message =
216 (uint8_t)((sc->raid_io_msg_size
217 - (sizeof(struct mfi_mpi2_request_raid_scsi_io)
218 - sizeof(MPI2_SGE_IO_UNION))) / sizeof(MPI2_SGE_IO_UNION));
220 * (Command frame size allocaed in SRB ext - Raid SCSI IO message size)
222 * (1280 - 256) / 16 = 64
224 sc->max_SGEs_in_chain_message = (MR_COMMAND_SIZE
225 - sc->raid_io_msg_size) / sizeof(MPI2_SGE_IO_UNION);
227 * (0x08-1) + 0x40 = 0x47 - 0x01 = 0x46 one is left for command
230 sc->mfi_max_sge = (sc->max_SGEs_in_main_message - 1)
231 + sc->max_SGEs_in_chain_message - 1;
233 * This is the offset in number of 4 * 32bit words to the next chain
234 * (0x100 - 0x10)/0x10 = 0xF(15)
236 sc->chain_offset_value_for_main_message = (sc->raid_io_msg_size
237 - sizeof(MPI2_SGE_IO_UNION))/16;
238 sc->chain_offset_value_for_mpt_ptmsg
239 = offsetof(struct mfi_mpi2_request_raid_scsi_io, SGL)/16;
240 sc->mfi_cmd_pool_tbolt = NULL;
241 sc->request_desc_pool = NULL;
245 * This function calculates the memory requirement for Thunderbolt
246 * controller, returns the total required memory in bytes
250 mfi_tbolt_get_memory_requirement(struct mfi_softc *sc)
253 size = MEGASAS_THUNDERBOLT_MSG_ALLIGNMENT; /* for Alignment */
254 size += sc->raid_io_msg_size * (sc->mfi_max_fw_cmds + 1);
255 size += sc->reply_size * sc->mfi_max_fw_cmds;
256 /* this is for SGL's */
257 size += MEGASAS_MAX_SZ_CHAIN_FRAME * sc->mfi_max_fw_cmds;
263 * This function will prepare message pools for the Thunderbolt controller
265 * DevExt - HBA miniport driver's adapter data storage structure
266 * pMemLocation - start of the memory allocated for Thunderbolt.
272 mfi_tbolt_init_desc_pool(struct mfi_softc *sc, uint8_t* mem_location,
273 uint32_t tbolt_contg_length)
276 uint8_t *addr = mem_location;
278 /* Request Descriptor Base physical Address */
280 /* For Request Decriptors Virtual Memory */
281 /* Initialise the aligned IO Frames Virtual Memory Pointer */
282 if (((uintptr_t)addr) & (0xFF)) {
283 addr = &addr[sc->raid_io_msg_size];
284 addr = (uint8_t *)((uintptr_t)addr & (~0xFF));
285 sc->request_message_pool_align = addr;
287 sc->request_message_pool_align = addr;
289 offset = sc->request_message_pool_align - sc->request_message_pool;
290 sc->request_msg_busaddr = sc->mfi_tb_busaddr + offset;
292 /* DJA XXX should this be bus dma ??? */
293 /* Skip request message pool */
294 addr = &addr[sc->raid_io_msg_size * (sc->mfi_max_fw_cmds + 1)];
295 /* Reply Frame Pool is initialized */
296 sc->reply_frame_pool = (struct mfi_mpi2_reply_header *) addr;
297 if (((uintptr_t)addr) & (0xFF)) {
298 addr = &addr[sc->reply_size];
299 addr = (uint8_t *)((uintptr_t)addr & (~0xFF));
301 sc->reply_frame_pool_align
302 = (struct mfi_mpi2_reply_header *)addr;
304 offset = (uintptr_t)sc->reply_frame_pool_align
305 - (uintptr_t)sc->request_message_pool;
306 sc->reply_frame_busaddr = sc->mfi_tb_busaddr + offset;
308 /* Skip Reply Frame Pool */
309 addr += sc->reply_size * sc->mfi_max_fw_cmds;
310 sc->reply_pool_limit = addr;
312 /* initializing reply address to 0xFFFFFFFF */
313 memset((uint8_t *)sc->reply_frame_pool, 0xFF,
314 (sc->reply_size * sc->mfi_max_fw_cmds));
316 offset = sc->reply_size * sc->mfi_max_fw_cmds;
317 sc->sg_frame_busaddr = sc->reply_frame_busaddr + offset;
318 /* initialize the last_reply_idx to 0 */
319 sc->last_reply_idx = 0;
320 MFI_WRITE4(sc, MFI_RFPI, sc->mfi_max_fw_cmds - 1);
321 MFI_WRITE4(sc, MFI_RPI, sc->last_reply_idx);
322 offset = (sc->sg_frame_busaddr + (MEGASAS_MAX_SZ_CHAIN_FRAME *
323 sc->mfi_max_fw_cmds)) - sc->mfi_tb_busaddr;
324 if (offset > tbolt_contg_length)
325 device_printf(sc->mfi_dev, "Error:Initialized more than "
331 * This routine prepare and issue INIT2 frame to the Firmware
335 mfi_tbolt_init_MFI_queue(struct mfi_softc *sc)
337 struct MPI2_IOC_INIT_REQUEST *mpi2IocInit;
338 struct mfi_init_frame *mfi_init;
339 uintptr_t offset = 0;
340 bus_addr_t phyAddress;
341 MFI_ADDRESS *mfiAddressTemp;
342 struct mfi_command *cm, cmd_tmp;
345 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
347 /* Check if initialization is already completed */
348 if (sc->MFA_enabled) {
349 device_printf(sc->mfi_dev, "tbolt_init already initialised!\n");
353 if ((cm = mfi_dequeue_free(sc)) == NULL) {
354 device_printf(sc->mfi_dev, "tbolt_init failed to get command "
359 cmd_tmp.cm_frame = cm->cm_frame;
360 cmd_tmp.cm_frame_busaddr = cm->cm_frame_busaddr;
361 cmd_tmp.cm_dmamap = cm->cm_dmamap;
363 cm->cm_frame = (union mfi_frame *)((uintptr_t)sc->mfi_tb_init);
364 cm->cm_frame_busaddr = sc->mfi_tb_init_busaddr;
365 cm->cm_dmamap = sc->mfi_tb_init_dmamap;
366 cm->cm_frame->header.context = 0;
369 * Abuse the SG list area of the frame to hold the init_qinfo
372 mfi_init = &cm->cm_frame->init;
374 mpi2IocInit = (struct MPI2_IOC_INIT_REQUEST *)sc->mfi_tb_ioc_init_desc;
375 bzero(mpi2IocInit, sizeof(struct MPI2_IOC_INIT_REQUEST));
376 mpi2IocInit->Function = MPI2_FUNCTION_IOC_INIT;
377 mpi2IocInit->WhoInit = MPI2_WHOINIT_HOST_DRIVER;
379 /* set MsgVersion and HeaderVersion host driver was built with */
380 mpi2IocInit->MsgVersion = MPI2_VERSION;
381 mpi2IocInit->HeaderVersion = MPI2_HEADER_VERSION;
382 mpi2IocInit->SystemRequestFrameSize = sc->raid_io_msg_size/4;
383 mpi2IocInit->ReplyDescriptorPostQueueDepth
384 = (uint16_t)sc->mfi_max_fw_cmds;
385 mpi2IocInit->ReplyFreeQueueDepth = 0; /* Not supported by MR. */
387 /* Get physical address of reply frame pool */
388 offset = (uintptr_t) sc->reply_frame_pool_align
389 - (uintptr_t)sc->request_message_pool;
390 phyAddress = sc->mfi_tb_busaddr + offset;
392 (MFI_ADDRESS *)&mpi2IocInit->ReplyDescriptorPostQueueAddress;
393 mfiAddressTemp->u.addressLow = (uint32_t)phyAddress;
394 mfiAddressTemp->u.addressHigh = (uint32_t)((uint64_t)phyAddress >> 32);
396 /* Get physical address of request message pool */
397 offset = sc->request_message_pool_align - sc->request_message_pool;
398 phyAddress = sc->mfi_tb_busaddr + offset;
399 mfiAddressTemp = (MFI_ADDRESS *)&mpi2IocInit->SystemRequestFrameBaseAddress;
400 mfiAddressTemp->u.addressLow = (uint32_t)phyAddress;
401 mfiAddressTemp->u.addressHigh = (uint32_t)((uint64_t)phyAddress >> 32);
402 mpi2IocInit->ReplyFreeQueueAddress = 0; /* Not supported by MR. */
403 mpi2IocInit->TimeStamp = time_uptime;
406 snprintf((char *)sc->verbuf, strlen(MEGASAS_VERSION) + 2, "%s\n",
408 mfi_init->driver_ver_lo = (uint32_t)sc->verbuf_h_busaddr;
409 mfi_init->driver_ver_hi =
410 (uint32_t)((uint64_t)sc->verbuf_h_busaddr >> 32);
412 /* Get the physical address of the mpi2 ioc init command */
413 phyAddress = sc->mfi_tb_ioc_init_busaddr;
414 mfi_init->qinfo_new_addr_lo = (uint32_t)phyAddress;
415 mfi_init->qinfo_new_addr_hi = (uint32_t)((uint64_t)phyAddress >> 32);
416 mfi_init->header.flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
418 mfi_init->header.cmd = MFI_CMD_INIT;
419 mfi_init->header.data_len = sizeof(struct MPI2_IOC_INIT_REQUEST);
420 mfi_init->header.cmd_status = MFI_STAT_INVALID_STATUS;
423 cm->cm_flags |= MFI_CMD_POLLED;
424 cm->cm_timestamp = time_uptime;
425 if ((error = mfi_mapcmd(sc, cm)) != 0) {
426 device_printf(sc->mfi_dev, "failed to send IOC init2 "
427 "command %d at %lx\n", error, (long)cm->cm_frame_busaddr);
431 if (mfi_init->header.cmd_status == MFI_STAT_OK) {
434 device_printf(sc->mfi_dev, "Init command Failed %#x\n",
435 mfi_init->header.cmd_status);
436 error = mfi_init->header.cmd_status;
441 cm->cm_frame = cmd_tmp.cm_frame;
442 cm->cm_frame_busaddr = cmd_tmp.cm_frame_busaddr;
443 cm->cm_dmamap = cmd_tmp.cm_dmamap;
444 mfi_release_command(cm);
451 mfi_tbolt_alloc_cmd(struct mfi_softc *sc)
453 struct mfi_cmd_tbolt *cmd;
454 bus_addr_t io_req_base_phys;
455 uint8_t *io_req_base;
456 int i = 0, j = 0, offset = 0;
459 * sc->mfi_cmd_pool_tbolt is an array of struct mfi_cmd_tbolt pointers.
460 * Allocate the dynamic array first and then allocate individual
463 sc->request_desc_pool = malloc(sizeof(
464 union mfi_mpi2_request_descriptor) * sc->mfi_max_fw_cmds,
465 M_MFIBUF, M_NOWAIT|M_ZERO);
467 if (sc->request_desc_pool == NULL) {
468 device_printf(sc->mfi_dev, "Could not alloc "
469 "memory for request_desc_pool\n");
473 sc->mfi_cmd_pool_tbolt = malloc(sizeof(struct mfi_cmd_tbolt*)
474 * sc->mfi_max_fw_cmds, M_MFIBUF, M_NOWAIT|M_ZERO);
476 if (sc->mfi_cmd_pool_tbolt == NULL) {
477 free(sc->request_desc_pool, M_MFIBUF);
478 device_printf(sc->mfi_dev, "Could not alloc "
479 "memory for cmd_pool_tbolt\n");
483 for (i = 0; i < sc->mfi_max_fw_cmds; i++) {
484 sc->mfi_cmd_pool_tbolt[i] = malloc(sizeof(
485 struct mfi_cmd_tbolt),M_MFIBUF, M_NOWAIT|M_ZERO);
487 if (!sc->mfi_cmd_pool_tbolt[i]) {
488 device_printf(sc->mfi_dev, "Could not alloc "
489 "cmd_pool_tbolt entry\n");
491 for (j = 0; j < i; j++)
492 free(sc->mfi_cmd_pool_tbolt[j], M_MFIBUF);
494 free(sc->request_desc_pool, M_MFIBUF);
495 sc->request_desc_pool = NULL;
496 free(sc->mfi_cmd_pool_tbolt, M_MFIBUF);
497 sc->mfi_cmd_pool_tbolt = NULL;
504 * The first 256 bytes (SMID 0) is not used. Don't add to the cmd
507 io_req_base = sc->request_message_pool_align
508 + MEGASAS_THUNDERBOLT_NEW_MSG_SIZE;
509 io_req_base_phys = sc->request_msg_busaddr
510 + MEGASAS_THUNDERBOLT_NEW_MSG_SIZE;
513 * Add all the commands to command pool (instance->cmd_pool)
515 /* SMID 0 is reserved. Set SMID/index from 1 */
517 for (i = 0; i < sc->mfi_max_fw_cmds; i++) {
518 cmd = sc->mfi_cmd_pool_tbolt[i];
519 offset = MEGASAS_THUNDERBOLT_NEW_MSG_SIZE * i;
521 cmd->request_desc = (union mfi_mpi2_request_descriptor *)
522 (sc->request_desc_pool + i);
523 cmd->io_request = (struct mfi_mpi2_request_raid_scsi_io *)
524 (io_req_base + offset);
525 cmd->io_request_phys_addr = io_req_base_phys + offset;
526 cmd->sg_frame = (MPI2_SGE_IO_UNION *)(sc->reply_pool_limit
527 + i * MEGASAS_MAX_SZ_CHAIN_FRAME);
528 cmd->sg_frame_phys_addr = sc->sg_frame_busaddr + i
529 * MEGASAS_MAX_SZ_CHAIN_FRAME;
530 cmd->sync_cmd_idx = sc->mfi_max_fw_cmds;
532 TAILQ_INSERT_TAIL(&(sc->mfi_cmd_tbolt_tqh), cmd, next);
538 mfi_tbolt_reset(struct mfi_softc *sc)
542 mtx_lock(&sc->mfi_io_lock);
543 if (sc->hw_crit_error) {
544 device_printf(sc->mfi_dev, "HW CRITICAL ERROR\n");
545 mtx_unlock(&sc->mfi_io_lock);
549 if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
550 fw_state = sc->mfi_read_fw_status(sc);
551 if ((fw_state & MFI_FWSTATE_FAULT) == MFI_FWSTATE_FAULT ||
553 if ((sc->disableOnlineCtrlReset == 0)
554 && (sc->adpreset == 0)) {
555 device_printf(sc->mfi_dev, "Adapter RESET "
556 "condition is detected\n");
558 sc->issuepend_done = 0;
560 sc->last_reply_idx = 0;
561 mfi_process_fw_state_chg_isr((void *) sc);
563 mtx_unlock(&sc->mfi_io_lock);
567 mtx_unlock(&sc->mfi_io_lock);
572 * mfi_intr_tbolt - isr entry point
575 mfi_intr_tbolt(void *arg)
577 struct mfi_softc *sc = (struct mfi_softc *)arg;
579 if (sc->mfi_check_clear_intr(sc) == 1) {
582 if (sc->mfi_detaching)
584 mtx_lock(&sc->mfi_io_lock);
585 mfi_tbolt_complete_cmd(sc);
586 sc->mfi_flags &= ~MFI_FLAGS_QFRZN;
588 mtx_unlock(&sc->mfi_io_lock);
593 * map_cmd_status - Maps FW cmd status to OS cmd status
594 * @cmd : Pointer to cmd
595 * @status : status of cmd returned by FW
596 * @ext_status : ext status of cmd returned by FW
600 map_tbolt_cmd_status(struct mfi_command *mfi_cmd, uint8_t status,
605 mfi_cmd->cm_frame->header.cmd_status = MFI_STAT_OK;
606 mfi_cmd->cm_frame->dcmd.header.cmd_status = MFI_STAT_OK;
607 mfi_cmd->cm_error = MFI_STAT_OK;
610 case MFI_STAT_SCSI_IO_FAILED:
611 case MFI_STAT_LD_INIT_IN_PROGRESS:
612 mfi_cmd->cm_frame->header.cmd_status = status;
613 mfi_cmd->cm_frame->header.scsi_status = ext_status;
614 mfi_cmd->cm_frame->dcmd.header.cmd_status = status;
615 mfi_cmd->cm_frame->dcmd.header.scsi_status
619 case MFI_STAT_SCSI_DONE_WITH_ERROR:
620 mfi_cmd->cm_frame->header.cmd_status = ext_status;
621 mfi_cmd->cm_frame->dcmd.header.cmd_status = ext_status;
624 case MFI_STAT_LD_OFFLINE:
625 case MFI_STAT_DEVICE_NOT_FOUND:
626 mfi_cmd->cm_frame->header.cmd_status = status;
627 mfi_cmd->cm_frame->dcmd.header.cmd_status = status;
631 mfi_cmd->cm_frame->header.cmd_status = status;
632 mfi_cmd->cm_frame->dcmd.header.cmd_status = status;
638 * mfi_tbolt_return_cmd - Return a cmd to free command pool
639 * @instance: Adapter soft state
640 * @tbolt_cmd: Tbolt command packet to be returned to free command pool
641 * @mfi_cmd: Oning MFI command packe
644 mfi_tbolt_return_cmd(struct mfi_softc *sc, struct mfi_cmd_tbolt *tbolt_cmd,
645 struct mfi_command *mfi_cmd)
647 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
649 mfi_cmd->cm_flags &= ~MFI_CMD_TBOLT;
650 mfi_cmd->cm_extra_frames = 0;
651 tbolt_cmd->sync_cmd_idx = sc->mfi_max_fw_cmds;
653 TAILQ_INSERT_TAIL(&sc->mfi_cmd_tbolt_tqh, tbolt_cmd, next);
657 mfi_tbolt_complete_cmd(struct mfi_softc *sc)
659 struct mfi_mpi2_reply_header *desc, *reply_desc;
660 struct mfi_command *cmd_mfi; /* For MFA Cmds */
661 struct mfi_cmd_tbolt *cmd_tbolt;
663 uint8_t reply_descript_type;
664 struct mfi_mpi2_request_raid_scsi_io *scsi_io_req;
665 uint32_t status, extStatus;
666 uint16_t num_completed;
667 union desc_value val;
668 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
670 desc = (struct mfi_mpi2_reply_header *)
671 ((uintptr_t)sc->reply_frame_pool_align
672 + sc->last_reply_idx * sc->reply_size);
675 if (reply_desc == NULL) {
676 device_printf(sc->mfi_dev, "reply desc is NULL!!\n");
680 reply_descript_type = reply_desc->ReplyFlags
681 & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
682 if (reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
686 val.word = ((union mfi_mpi2_reply_descriptor *)desc)->words;
688 /* Read Reply descriptor */
689 while ((val.u.low != 0xFFFFFFFF) && (val.u.high != 0xFFFFFFFF)) {
690 smid = reply_desc->SMID;
691 if (smid == 0 || smid > sc->mfi_max_fw_cmds) {
692 device_printf(sc->mfi_dev, "smid is %d cannot "
693 "proceed - skipping\n", smid);
696 cmd_tbolt = sc->mfi_cmd_pool_tbolt[smid - 1];
697 if (cmd_tbolt->sync_cmd_idx == sc->mfi_max_fw_cmds) {
698 device_printf(sc->mfi_dev, "cmd_tbolt %p "
699 "has invalid sync_cmd_idx=%d - skipping\n",
700 cmd_tbolt, cmd_tbolt->sync_cmd_idx);
703 cmd_mfi = &sc->mfi_commands[cmd_tbolt->sync_cmd_idx];
704 scsi_io_req = cmd_tbolt->io_request;
706 status = cmd_mfi->cm_frame->dcmd.header.cmd_status;
707 extStatus = cmd_mfi->cm_frame->dcmd.header.scsi_status;
708 map_tbolt_cmd_status(cmd_mfi, status, extStatus);
710 /* mfi_tbolt_return_cmd is handled by mfi complete / return */
711 if ((cmd_mfi->cm_flags & MFI_CMD_SCSI) != 0 &&
712 (cmd_mfi->cm_flags & MFI_CMD_POLLED) != 0) {
713 /* polled LD/SYSPD IO command */
714 /* XXX mark okay for now DJA */
715 cmd_mfi->cm_frame->header.cmd_status = MFI_STAT_OK;
718 /* remove command from busy queue if not polled */
719 if ((cmd_mfi->cm_flags & MFI_ON_MFIQ_BUSY) != 0)
720 mfi_remove_busy(cmd_mfi);
722 /* complete the command */
723 mfi_complete(sc, cmd_mfi);
727 sc->last_reply_idx++;
728 if (sc->last_reply_idx >= sc->mfi_max_fw_cmds) {
729 MFI_WRITE4(sc, MFI_RPI, sc->last_reply_idx);
730 sc->last_reply_idx = 0;
733 /* Set it back to all 0xfff */
734 ((union mfi_mpi2_reply_descriptor*)desc)->words =
739 /* Get the next reply descriptor */
740 desc = (struct mfi_mpi2_reply_header *)
741 ((uintptr_t)sc->reply_frame_pool_align
742 + sc->last_reply_idx * sc->reply_size);
744 val.word = ((union mfi_mpi2_reply_descriptor*)desc)->words;
745 reply_descript_type = reply_desc->ReplyFlags
746 & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
747 if (reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
754 /* update replyIndex to FW */
755 if (sc->last_reply_idx)
756 MFI_WRITE4(sc, MFI_RPI, sc->last_reply_idx);
762 * mfi_get_cmd - Get a command from the free pool
763 * @instance: Adapter soft state
765 * Returns a free command from the pool
768 struct mfi_cmd_tbolt *
769 mfi_tbolt_get_cmd(struct mfi_softc *sc, struct mfi_command *mfi_cmd)
771 struct mfi_cmd_tbolt *cmd = NULL;
773 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
775 if ((cmd = TAILQ_FIRST(&sc->mfi_cmd_tbolt_tqh)) == NULL)
777 TAILQ_REMOVE(&sc->mfi_cmd_tbolt_tqh, cmd, next);
778 memset((uint8_t *)cmd->sg_frame, 0, MEGASAS_MAX_SZ_CHAIN_FRAME);
779 memset((uint8_t *)cmd->io_request, 0,
780 MEGASAS_THUNDERBOLT_NEW_MSG_SIZE);
782 cmd->sync_cmd_idx = mfi_cmd->cm_index;
783 mfi_cmd->cm_extra_frames = cmd->index; /* Frame count used as SMID */
784 mfi_cmd->cm_flags |= MFI_CMD_TBOLT;
789 union mfi_mpi2_request_descriptor *
790 mfi_tbolt_get_request_descriptor(struct mfi_softc *sc, uint16_t index)
794 if (index >= sc->mfi_max_fw_cmds) {
795 device_printf(sc->mfi_dev, "Invalid SMID (0x%x)request "
796 "for descriptor\n", index);
799 p = sc->request_desc_pool + sizeof(union mfi_mpi2_request_descriptor)
801 memset(p, 0, sizeof(union mfi_mpi2_request_descriptor));
802 return (union mfi_mpi2_request_descriptor *)p;
806 /* Used to build IOCTL cmd */
808 mfi_build_mpt_pass_thru(struct mfi_softc *sc, struct mfi_command *mfi_cmd)
810 MPI25_IEEE_SGE_CHAIN64 *mpi25_ieee_chain;
811 struct mfi_mpi2_request_raid_scsi_io *io_req;
812 struct mfi_cmd_tbolt *cmd;
814 cmd = mfi_tbolt_get_cmd(sc, mfi_cmd);
817 io_req = cmd->io_request;
818 mpi25_ieee_chain = (MPI25_IEEE_SGE_CHAIN64 *)&io_req->SGL.IeeeChain;
820 io_req->Function = MPI2_FUNCTION_PASSTHRU_IO_REQUEST;
821 io_req->SGLOffset0 = offsetof(struct mfi_mpi2_request_raid_scsi_io,
823 io_req->ChainOffset = sc->chain_offset_value_for_mpt_ptmsg;
825 mpi25_ieee_chain->Address = mfi_cmd->cm_frame_busaddr;
828 In MFI pass thru, nextChainOffset will always be zero to
829 indicate the end of the chain.
831 mpi25_ieee_chain->Flags= MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT
832 | MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR;
834 /* setting the length to the maximum length */
835 mpi25_ieee_chain->Length = 1024;
841 mfi_tbolt_build_ldio(struct mfi_softc *sc, struct mfi_command *mfi_cmd,
842 struct mfi_cmd_tbolt *cmd)
844 uint32_t start_lba_lo = 0, start_lba_hi = 0, device_id;
845 struct mfi_mpi2_request_raid_scsi_io *io_request;
846 struct IO_REQUEST_INFO io_info;
848 device_id = mfi_cmd->cm_frame->io.header.target_id;
849 io_request = cmd->io_request;
850 io_request->RaidContext.TargetID = device_id;
851 io_request->RaidContext.Status = 0;
852 io_request->RaidContext.exStatus = 0;
853 io_request->RaidContext.regLockFlags = 0;
855 start_lba_lo = mfi_cmd->cm_frame->io.lba_lo;
856 start_lba_hi = mfi_cmd->cm_frame->io.lba_hi;
858 memset(&io_info, 0, sizeof(struct IO_REQUEST_INFO));
859 io_info.ldStartBlock = ((uint64_t)start_lba_hi << 32) | start_lba_lo;
860 io_info.numBlocks = mfi_cmd->cm_frame->io.header.data_len;
861 io_info.ldTgtId = device_id;
862 if ((mfi_cmd->cm_frame->header.flags & MFI_FRAME_DIR_READ) ==
866 io_request->RaidContext.timeoutValue
867 = MFI_FUSION_FP_DEFAULT_TIMEOUT;
868 io_request->Function = MPI2_FUNCTION_LD_IO_REQUEST;
869 io_request->DevHandle = device_id;
870 cmd->request_desc->header.RequestFlags
871 = (MFI_REQ_DESCRIPT_FLAGS_LD_IO
872 << MFI_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
873 if ((io_request->IoFlags == 6) && (io_info.numBlocks == 0))
874 io_request->RaidContext.RegLockLength = 0x100;
875 io_request->DataLength = mfi_cmd->cm_frame->io.header.data_len
880 mfi_tbolt_build_io(struct mfi_softc *sc, struct mfi_command *mfi_cmd,
881 struct mfi_cmd_tbolt *cmd)
883 struct mfi_mpi2_request_raid_scsi_io *io_request;
889 io_request = cmd->io_request;
890 if (!(mfi_cmd->cm_frame->header.cmd == MFI_CMD_LD_READ
891 || mfi_cmd->cm_frame->header.cmd == MFI_CMD_LD_WRITE))
894 mfi_tbolt_build_ldio(sc, mfi_cmd, cmd);
896 /* Convert to SCSI command CDB */
897 bzero(io_request->CDB.CDB32, sizeof(io_request->CDB.CDB32));
898 if (mfi_cmd->cm_frame->header.cmd == MFI_CMD_LD_WRITE)
903 lba = mfi_cmd->cm_frame->io.lba_hi;
904 lba = (lba << 32) + mfi_cmd->cm_frame->io.lba_lo;
905 cdb_len = mfi_build_cdb(readop, 0, lba,
906 mfi_cmd->cm_frame->io.header.data_len, io_request->CDB.CDB32);
908 /* Just the CDB length, rest of the Flags are zero */
909 io_request->IoFlags = cdb_len;
914 sge_count = mfi_tbolt_make_sgl(sc, mfi_cmd,
915 (pMpi25IeeeSgeChain64_t) &io_request->SGL, cmd);
916 if (sge_count > sc->mfi_max_sge) {
917 device_printf(sc->mfi_dev, "Error. sge_count (0x%x) exceeds "
918 "max (0x%x) allowed\n", sge_count, sc->mfi_max_sge);
921 io_request->RaidContext.numSGE = sge_count;
922 io_request->SGLFlags = MPI2_SGE_FLAGS_64_BIT_ADDRESSING;
924 if (mfi_cmd->cm_frame->header.cmd == MFI_CMD_LD_WRITE)
925 io_request->Control = MPI2_SCSIIO_CONTROL_WRITE;
927 io_request->Control = MPI2_SCSIIO_CONTROL_READ;
929 io_request->SGLOffset0 = offsetof(
930 struct mfi_mpi2_request_raid_scsi_io, SGL)/4;
932 io_request->SenseBufferLowAddress = mfi_cmd->cm_sense_busaddr;
933 io_request->SenseBufferLength = MFI_SENSE_LEN;
934 io_request->RaidContext.Status = MFI_STAT_INVALID_STATUS;
935 io_request->RaidContext.exStatus = MFI_STAT_INVALID_STATUS;
942 mfi_tbolt_make_sgl(struct mfi_softc *sc, struct mfi_command *mfi_cmd,
943 pMpi25IeeeSgeChain64_t sgl_ptr, struct mfi_cmd_tbolt *cmd)
945 uint8_t i, sg_processed, sg_to_process;
946 uint8_t sge_count, sge_idx;
947 union mfi_sgl *os_sgl;
948 pMpi25IeeeSgeChain64_t sgl_end;
951 * Return 0 if there is no data transfer
953 if (!mfi_cmd->cm_sg || !mfi_cmd->cm_len) {
954 device_printf(sc->mfi_dev, "Buffer empty \n");
957 os_sgl = mfi_cmd->cm_sg;
958 sge_count = mfi_cmd->cm_frame->header.sg_count;
960 if (sge_count > sc->mfi_max_sge) {
961 device_printf(sc->mfi_dev, "sgl ptr %p sg_cnt %d \n",
966 if (sge_count > sc->max_SGEs_in_main_message)
967 /* One element to store the chain info */
968 sge_idx = sc->max_SGEs_in_main_message - 1;
972 if (sc->mfi_flags & (MFI_FLAGS_INVADER | MFI_FLAGS_FURY)) {
973 sgl_end = sgl_ptr + (sc->max_SGEs_in_main_message - 1);
977 for (i = 0; i < sge_idx; i++) {
979 * For 32bit BSD we are getting 32 bit SGL's from OS
980 * but FW only take 64 bit SGL's so copying from 32 bit
983 if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
984 sgl_ptr->Length = os_sgl->sg_skinny[i].len;
985 sgl_ptr->Address = os_sgl->sg_skinny[i].addr;
987 sgl_ptr->Length = os_sgl->sg32[i].len;
988 sgl_ptr->Address = os_sgl->sg32[i].addr;
990 if (i == sge_count - 1 &&
991 (sc->mfi_flags & (MFI_FLAGS_INVADER | MFI_FLAGS_FURY)))
992 sgl_ptr->Flags = MPI25_IEEE_SGE_FLAGS_END_OF_LIST;
996 cmd->io_request->ChainOffset = 0;
1001 if (sg_processed < sge_count) {
1002 pMpi25IeeeSgeChain64_t sg_chain;
1003 sg_to_process = sge_count - sg_processed;
1004 cmd->io_request->ChainOffset =
1005 sc->chain_offset_value_for_main_message;
1007 /* Prepare chain element */
1008 sg_chain->NextChainOffset = 0;
1009 if (sc->mfi_flags & (MFI_FLAGS_INVADER | MFI_FLAGS_FURY))
1010 sg_chain->Flags = MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT;
1012 sg_chain->Flags = MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT |
1013 MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR;
1014 sg_chain->Length = (sizeof(MPI2_SGE_IO_UNION) *
1015 (sge_count - sg_processed));
1016 sg_chain->Address = cmd->sg_frame_phys_addr;
1017 sgl_ptr = (pMpi25IeeeSgeChain64_t)cmd->sg_frame;
1018 for (; i < sge_count; i++) {
1019 if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
1020 sgl_ptr->Length = os_sgl->sg_skinny[i].len;
1021 sgl_ptr->Address = os_sgl->sg_skinny[i].addr;
1023 sgl_ptr->Length = os_sgl->sg32[i].len;
1024 sgl_ptr->Address = os_sgl->sg32[i].addr;
1026 if (i == sge_count - 1 &&
1028 (MFI_FLAGS_INVADER | MFI_FLAGS_FURY)))
1030 MPI25_IEEE_SGE_FLAGS_END_OF_LIST;
1039 union mfi_mpi2_request_descriptor *
1040 mfi_build_and_issue_cmd(struct mfi_softc *sc, struct mfi_command *mfi_cmd)
1042 struct mfi_cmd_tbolt *cmd;
1043 union mfi_mpi2_request_descriptor *req_desc = NULL;
1045 cmd = mfi_tbolt_get_cmd(sc, mfi_cmd);
1050 req_desc = mfi_tbolt_get_request_descriptor(sc, index-1);
1051 if (req_desc == NULL) {
1052 mfi_tbolt_return_cmd(sc, cmd, mfi_cmd);
1056 if (mfi_tbolt_build_io(sc, mfi_cmd, cmd) != 0) {
1057 mfi_tbolt_return_cmd(sc, cmd, mfi_cmd);
1060 req_desc->header.SMID = index;
1064 union mfi_mpi2_request_descriptor *
1065 mfi_tbolt_build_mpt_cmd(struct mfi_softc *sc, struct mfi_command *cmd)
1067 union mfi_mpi2_request_descriptor *req_desc = NULL;
1069 if (mfi_build_mpt_pass_thru(sc, cmd)) {
1070 device_printf(sc->mfi_dev, "Couldn't build MFI pass thru "
1074 /* For fusion the frame_count variable is used for SMID */
1075 index = cmd->cm_extra_frames;
1077 req_desc = mfi_tbolt_get_request_descriptor(sc, index - 1);
1078 if (req_desc == NULL)
1081 bzero(req_desc, sizeof(*req_desc));
1082 req_desc->header.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
1083 MFI_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1084 req_desc->header.SMID = index;
1089 mfi_tbolt_send_frame(struct mfi_softc *sc, struct mfi_command *cm)
1091 struct mfi_frame_header *hdr;
1093 union mfi_mpi2_request_descriptor *req_desc = NULL;
1094 int tm = mfi_polled_cmd_timeout * 1000;
1096 hdr = &cm->cm_frame->header;
1097 cdb = cm->cm_frame->pass.cdb;
1100 if ((cm->cm_flags & MFI_CMD_POLLED) == 0) {
1101 cm->cm_timestamp = time_uptime;
1102 mfi_enqueue_busy(cm);
1103 } else { /* still get interrupts for it */
1104 hdr->cmd_status = MFI_STAT_INVALID_STATUS;
1105 hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
1108 if (hdr->cmd == MFI_CMD_PD_SCSI_IO) {
1109 /* check for inquiry commands coming from CLI */
1110 if (cdb[0] != 0x28 || cdb[0] != 0x2A) {
1111 if ((req_desc = mfi_tbolt_build_mpt_cmd(sc, cm)) ==
1113 device_printf(sc->mfi_dev, "Mapping from MFI "
1114 "to MPT Failed \n");
1119 device_printf(sc->mfi_dev, "DJA NA XXX SYSPDIO\n");
1120 } else if (hdr->cmd == MFI_CMD_LD_SCSI_IO ||
1121 hdr->cmd == MFI_CMD_LD_READ || hdr->cmd == MFI_CMD_LD_WRITE) {
1122 cm->cm_flags |= MFI_CMD_SCSI;
1123 if ((req_desc = mfi_build_and_issue_cmd(sc, cm)) == NULL) {
1124 device_printf(sc->mfi_dev, "LDIO Failed \n");
1127 } else if ((req_desc = mfi_tbolt_build_mpt_cmd(sc, cm)) == NULL) {
1128 device_printf(sc->mfi_dev, "Mapping from MFI to MPT Failed\n");
1132 if (cm->cm_flags & MFI_CMD_SCSI) {
1134 * LD IO needs to be posted since it doesn't get
1135 * acknowledged via a status update so have the
1136 * controller reply via mfi_tbolt_complete_cmd.
1138 hdr->flags &= ~MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
1141 MFI_WRITE4(sc, MFI_ILQP, (req_desc->words & 0xFFFFFFFF));
1142 MFI_WRITE4(sc, MFI_IHQP, (req_desc->words >>0x20));
1144 if ((cm->cm_flags & MFI_CMD_POLLED) == 0)
1148 * This is a polled command, so busy-wait for it to complete.
1150 * The value of hdr->cmd_status is updated directly by the hardware
1151 * so there is no guarantee that mfi_tbolt_complete_cmd is called
1152 * prior to this value changing.
1154 while (hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
1159 if (cm->cm_flags & MFI_CMD_SCSI) {
1161 * Force check reply queue.
1162 * This ensures that dump works correctly
1164 mfi_tbolt_complete_cmd(sc);
1168 /* ensure the command cleanup has been processed before returning */
1169 mfi_tbolt_complete_cmd(sc);
1171 if (hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
1172 device_printf(sc->mfi_dev, "Frame %p timed out "
1173 "command 0x%X\n", hdr, cm->cm_frame->dcmd.opcode);
1180 mfi_issue_pending_cmds_again(struct mfi_softc *sc)
1182 struct mfi_command *cm, *tmp;
1183 struct mfi_cmd_tbolt *cmd;
1185 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1186 TAILQ_FOREACH_REVERSE_SAFE(cm, &sc->mfi_busy, BUSYQ, cm_link, tmp) {
1188 cm->retry_for_fw_reset++;
1191 * If a command has continuously been tried multiple times
1192 * and causing a FW reset condition, no further recoveries
1193 * should be performed on the controller
1195 if (cm->retry_for_fw_reset == 3) {
1196 device_printf(sc->mfi_dev, "megaraid_sas: command %p "
1197 "index=%d was tried multiple times during adapter "
1198 "reset - Shutting down the HBA\n", cm, cm->cm_index);
1200 sc->hw_crit_error = 1;
1204 mfi_remove_busy(cm);
1205 if ((cm->cm_flags & MFI_CMD_TBOLT) != 0) {
1206 if (cm->cm_extra_frames != 0 && cm->cm_extra_frames <=
1207 sc->mfi_max_fw_cmds) {
1208 cmd = sc->mfi_cmd_pool_tbolt[cm->cm_extra_frames - 1];
1209 mfi_tbolt_return_cmd(sc, cmd, cm);
1211 device_printf(sc->mfi_dev,
1212 "Invalid extra_frames: %d detected\n",
1213 cm->cm_extra_frames);
1217 if (cm->cm_frame->dcmd.opcode != MFI_DCMD_CTRL_EVENT_WAIT) {
1218 device_printf(sc->mfi_dev,
1219 "APJ ****requeue command %p index=%d\n",
1221 mfi_requeue_ready(cm);
1223 mfi_release_command(cm);
1229 mfi_kill_hba(struct mfi_softc *sc)
1231 if (sc->mfi_flags & MFI_FLAGS_TBOLT)
1232 MFI_WRITE4(sc, 0x00, MFI_STOP_ADP);
1234 MFI_WRITE4(sc, MFI_IDB, MFI_STOP_ADP);
1238 mfi_process_fw_state_chg_isr(void *arg)
1240 struct mfi_softc *sc= (struct mfi_softc *)arg;
1243 if (sc->adpreset == 1) {
1244 device_printf(sc->mfi_dev, "First stage of FW reset "
1247 sc->mfi_adp_reset(sc);
1248 sc->mfi_enable_intr(sc);
1250 device_printf(sc->mfi_dev, "First stage of reset complete, "
1251 "second stage initiated...\n");
1255 /* waiting for about 20 second before start the second init */
1256 for (int wait = 0; wait < 20000; wait++)
1258 device_printf(sc->mfi_dev, "Second stage of FW reset "
1260 while ((status = MFI_READ4(sc, MFI_RSR)) & 0x04);
1262 sc->mfi_disable_intr(sc);
1264 /* We expect the FW state to be READY */
1265 if (mfi_transition_firmware(sc)) {
1266 device_printf(sc->mfi_dev, "controller is not in "
1269 sc->hw_crit_error = 1;
1272 if ((error = mfi_tbolt_init_MFI_queue(sc)) != 0) {
1273 device_printf(sc->mfi_dev, "Failed to initialise MFI "
1276 sc->hw_crit_error = 1;
1280 /* Init last reply index and max */
1281 MFI_WRITE4(sc, MFI_RFPI, sc->mfi_max_fw_cmds - 1);
1282 MFI_WRITE4(sc, MFI_RPI, sc->last_reply_idx);
1284 sc->mfi_enable_intr(sc);
1286 if (sc->mfi_aen_cm != NULL) {
1287 free(sc->mfi_aen_cm->cm_data, M_MFIBUF);
1288 mfi_remove_busy(sc->mfi_aen_cm);
1289 mfi_release_command(sc->mfi_aen_cm);
1290 sc->mfi_aen_cm = NULL;
1293 if (sc->mfi_map_sync_cm != NULL) {
1294 mfi_remove_busy(sc->mfi_map_sync_cm);
1295 mfi_release_command(sc->mfi_map_sync_cm);
1296 sc->mfi_map_sync_cm = NULL;
1298 mfi_issue_pending_cmds_again(sc);
1301 * Issue pending command can result in adapter being marked
1302 * dead because of too many re-tries. Check for that
1303 * condition before clearing the reset condition on the FW
1305 if (!sc->hw_crit_error) {
1307 * Initiate AEN (Asynchronous Event Notification) &
1310 mfi_aen_setup(sc, sc->last_seq_num);
1311 mfi_tbolt_sync_map_info(sc);
1313 sc->issuepend_done = 1;
1314 device_printf(sc->mfi_dev, "second stage of reset "
1315 "complete, FW is ready now.\n");
1317 device_printf(sc->mfi_dev, "second stage of reset "
1318 "never completed, hba was marked offline.\n");
1321 device_printf(sc->mfi_dev, "mfi_process_fw_state_chg_isr "
1322 "called with unhandled value:%d\n", sc->adpreset);
1327 * The ThunderBolt HW has an option for the driver to directly
1328 * access the underlying disks and operate on the RAID. To
1329 * do this there needs to be a capability to keep the RAID controller
1330 * and driver in sync. The FreeBSD driver does not take advantage
1331 * of this feature since it adds a lot of complexity and slows down
1332 * performance. Performance is gained by using the controller's
1335 * Even though this driver doesn't access the disks directly, an
1336 * AEN like command is used to inform the RAID firmware to "sync"
1337 * with all LD's via the MFI_DCMD_LD_MAP_GET_INFO command. This
1338 * command in write mode will return when the RAID firmware has
1339 * detected a change to the RAID state. Examples of this type
1340 * of change are removing a disk. Once the command returns then
1341 * the driver needs to acknowledge this and "sync" all LD's again.
1342 * This repeats until we shutdown. Then we need to cancel this
1345 * If this is not done right the RAID firmware will not remove a
1346 * pulled drive and the RAID won't go degraded etc. Effectively,
1347 * stopping any RAID mangement to functions.
1349 * Doing another LD sync, requires the use of an event since the
1350 * driver needs to do a mfi_wait_command and can't do that in an
1353 * The driver could get the RAID state via the MFI_DCMD_LD_MAP_GET_INFO
1354 * That requires a bunch of structure and it is simpler to just do
1355 * the MFI_DCMD_LD_GET_LIST versus walking the RAID map.
1359 mfi_tbolt_sync_map_info(struct mfi_softc *sc)
1362 struct mfi_command *cmd = NULL;
1363 struct mfi_dcmd_frame *dcmd = NULL;
1364 uint32_t context = 0;
1365 union mfi_ld_ref *ld_sync = NULL;
1367 struct mfi_frame_header *hdr;
1368 struct mfi_command *cm = NULL;
1369 struct mfi_ld_list *list = NULL;
1371 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1373 if (sc->mfi_map_sync_cm != NULL || sc->cm_map_abort)
1376 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_LIST,
1377 (void **)&list, sizeof(*list));
1381 cm->cm_flags = MFI_CMD_POLLED | MFI_CMD_DATAIN;
1383 if (mfi_wait_command(sc, cm) != 0) {
1384 device_printf(sc->mfi_dev, "Failed to get device listing\n");
1388 hdr = &cm->cm_frame->header;
1389 if (hdr->cmd_status != MFI_STAT_OK) {
1390 device_printf(sc->mfi_dev, "MFI_DCMD_LD_GET_LIST failed %x\n",
1395 ld_size = sizeof(*ld_sync) * list->ld_count;
1396 ld_sync = (union mfi_ld_ref *) malloc(ld_size, M_MFIBUF,
1398 if (ld_sync == NULL) {
1399 device_printf(sc->mfi_dev, "Failed to allocate sync\n");
1402 for (i = 0; i < list->ld_count; i++)
1403 ld_sync[i].ref = list->ld_list[i].ld.ref;
1405 if ((cmd = mfi_dequeue_free(sc)) == NULL) {
1406 device_printf(sc->mfi_dev, "Failed to get command\n");
1407 free(ld_sync, M_MFIBUF);
1411 context = cmd->cm_frame->header.context;
1412 bzero(cmd->cm_frame, sizeof(union mfi_frame));
1413 cmd->cm_frame->header.context = context;
1415 dcmd = &cmd->cm_frame->dcmd;
1416 bzero(dcmd->mbox, MFI_MBOX_SIZE);
1417 dcmd->header.cmd = MFI_CMD_DCMD;
1418 dcmd->header.flags = MFI_FRAME_DIR_WRITE;
1419 dcmd->header.timeout = 0;
1420 dcmd->header.data_len = ld_size;
1421 dcmd->header.scsi_status = 0;
1422 dcmd->opcode = MFI_DCMD_LD_MAP_GET_INFO;
1423 cmd->cm_sg = &dcmd->sgl;
1424 cmd->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
1425 cmd->cm_data = ld_sync;
1426 cmd->cm_private = ld_sync;
1428 cmd->cm_len = ld_size;
1429 cmd->cm_complete = mfi_sync_map_complete;
1430 sc->mfi_map_sync_cm = cmd;
1432 cmd->cm_flags = MFI_CMD_DATAOUT;
1433 cmd->cm_frame->dcmd.mbox[0] = list->ld_count;
1434 cmd->cm_frame->dcmd.mbox[1] = MFI_DCMD_MBOX_PEND_FLAG;
1436 if ((error = mfi_mapcmd(sc, cmd)) != 0) {
1437 device_printf(sc->mfi_dev, "failed to send map sync\n");
1438 free(ld_sync, M_MFIBUF);
1439 sc->mfi_map_sync_cm = NULL;
1440 mfi_release_command(cmd);
1446 free(list, M_MFIBUF);
1448 mfi_release_command(cm);
1452 mfi_sync_map_complete(struct mfi_command *cm)
1454 struct mfi_frame_header *hdr;
1455 struct mfi_softc *sc;
1459 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1461 hdr = &cm->cm_frame->header;
1463 if (sc->mfi_map_sync_cm == NULL)
1466 if (sc->cm_map_abort ||
1467 hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
1468 sc->cm_map_abort = 0;
1472 free(cm->cm_data, M_MFIBUF);
1473 wakeup(&sc->mfi_map_sync_cm);
1474 sc->mfi_map_sync_cm = NULL;
1475 mfi_release_command(cm);
1477 /* set it up again so the driver can catch more events */
1479 mfi_queue_map_sync(sc);
1483 mfi_queue_map_sync(struct mfi_softc *sc)
1485 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1486 taskqueue_enqueue(taskqueue_swi, &sc->mfi_map_sync_task);
1490 mfi_handle_map_sync(void *context, int pending)
1492 struct mfi_softc *sc;
1495 mtx_lock(&sc->mfi_io_lock);
1496 mfi_tbolt_sync_map_info(sc);
1497 mtx_unlock(&sc->mfi_io_lock);