2 * Copyright (c) 2009 Yahoo! Inc.
3 * Copyright (c) 2011-2015 LSI Corp.
4 * Copyright (c) 2013-2016 Avago Technologies
5 * Copyright 2000-2020 Broadcom Inc.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * Broadcom Inc. (LSI) MPT-Fusion Host Adapter FreeBSD
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
36 /* Communications core for Avago Technologies (LSI) MPT3 */
38 /* TODO Move headers to mprvar */
39 #include <sys/types.h>
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/kernel.h>
43 #include <sys/selinfo.h>
45 #include <sys/mutex.h>
46 #include <sys/module.h>
50 #include <sys/malloc.h>
52 #include <sys/sysctl.h>
54 #include <sys/queue.h>
55 #include <sys/kthread.h>
56 #include <sys/taskqueue.h>
57 #include <sys/endian.h>
58 #include <sys/eventhandler.h>
62 #include <machine/bus.h>
63 #include <machine/resource.h>
67 #include <dev/pci/pcivar.h>
70 #include <cam/cam_ccb.h>
71 #include <cam/scsi/scsi_all.h>
73 #include <dev/mpr/mpi/mpi2_type.h>
74 #include <dev/mpr/mpi/mpi2.h>
75 #include <dev/mpr/mpi/mpi2_ioc.h>
76 #include <dev/mpr/mpi/mpi2_sas.h>
77 #include <dev/mpr/mpi/mpi2_pci.h>
78 #include <dev/mpr/mpi/mpi2_cnfg.h>
79 #include <dev/mpr/mpi/mpi2_init.h>
80 #include <dev/mpr/mpi/mpi2_tool.h>
81 #include <dev/mpr/mpr_ioctl.h>
82 #include <dev/mpr/mprvar.h>
83 #include <dev/mpr/mpr_table.h>
84 #include <dev/mpr/mpr_sas.h>
86 static int mpr_diag_reset(struct mpr_softc *sc, int sleep_flag);
87 static int mpr_init_queues(struct mpr_softc *sc);
88 static void mpr_resize_queues(struct mpr_softc *sc);
89 static int mpr_message_unit_reset(struct mpr_softc *sc, int sleep_flag);
90 static int mpr_transition_operational(struct mpr_softc *sc);
91 static int mpr_iocfacts_allocate(struct mpr_softc *sc, uint8_t attaching);
92 static void mpr_iocfacts_free(struct mpr_softc *sc);
93 static void mpr_startup(void *arg);
94 static int mpr_send_iocinit(struct mpr_softc *sc);
95 static int mpr_alloc_queues(struct mpr_softc *sc);
96 static int mpr_alloc_hw_queues(struct mpr_softc *sc);
97 static int mpr_alloc_replies(struct mpr_softc *sc);
98 static int mpr_alloc_requests(struct mpr_softc *sc);
99 static int mpr_alloc_nvme_prp_pages(struct mpr_softc *sc);
100 static int mpr_attach_log(struct mpr_softc *sc);
101 static __inline void mpr_complete_command(struct mpr_softc *sc,
102 struct mpr_command *cm);
103 static void mpr_dispatch_event(struct mpr_softc *sc, uintptr_t data,
104 MPI2_EVENT_NOTIFICATION_REPLY *reply);
105 static void mpr_config_complete(struct mpr_softc *sc, struct mpr_command *cm);
106 static void mpr_periodic(void *);
107 static int mpr_reregister_events(struct mpr_softc *sc);
108 static void mpr_enqueue_request(struct mpr_softc *sc, struct mpr_command *cm);
109 static int mpr_get_iocfacts(struct mpr_softc *sc, MPI2_IOC_FACTS_REPLY *facts);
110 static int mpr_wait_db_ack(struct mpr_softc *sc, int timeout, int sleep_flag);
111 static int mpr_debug_sysctl(SYSCTL_HANDLER_ARGS);
112 static int mpr_dump_reqs(SYSCTL_HANDLER_ARGS);
113 static void mpr_parse_debug(struct mpr_softc *sc, char *list);
115 SYSCTL_NODE(_hw, OID_AUTO, mpr, CTLFLAG_RD, 0, "MPR Driver Parameters");
117 MALLOC_DEFINE(M_MPR, "mpr", "mpr driver memory");
120 * Do a "Diagnostic Reset" aka a hard reset. This should get the chip out of
121 * any state and back to its initialization state machine.
123 static char mpt2_reset_magic[] = { 0x00, 0x0f, 0x04, 0x0b, 0x02, 0x07, 0x0d };
126 * Added this union to smoothly convert le64toh cm->cm_desc.Words.
127 * Compiler only supports uint64_t to be passed as an argument.
128 * Otherwise it will throw this error:
129 * "aggregate value used where an integer was expected"
131 typedef union _reply_descriptor {
137 } reply_descriptor, request_descriptor;
139 /* Rate limit chain-fail messages to 1 per minute */
140 static struct timeval mpr_chainfail_interval = { 60, 0 };
143 * sleep_flag can be either CAN_SLEEP or NO_SLEEP.
144 * If this function is called from process context, it can sleep
145 * and there is no harm to sleep, in case if this fuction is called
146 * from Interrupt handler, we can not sleep and need NO_SLEEP flag set.
147 * based on sleep flags driver will call either msleep, pause or DELAY.
148 * msleep and pause are of same variant, but pause is used when mpr_mtx
149 * is not hold by driver.
152 mpr_diag_reset(struct mpr_softc *sc,int sleep_flag)
155 int i, error, tries = 0;
156 uint8_t first_wait_done = FALSE;
158 mpr_dprint(sc, MPR_INIT, "%s entered\n", __func__);
160 /* Clear any pending interrupts */
161 mpr_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0);
164 * Force NO_SLEEP for threads prohibited to sleep
165 * e.a Thread from interrupt handler are prohibited to sleep.
167 #if __FreeBSD_version >= 1000029
168 if (curthread->td_no_sleeping)
169 #else //__FreeBSD_version < 1000029
170 if (curthread->td_pflags & TDP_NOSLEEPING)
171 #endif //__FreeBSD_version >= 1000029
172 sleep_flag = NO_SLEEP;
174 mpr_dprint(sc, MPR_INIT, "sequence start, sleep_flag=%d\n", sleep_flag);
175 /* Push the magic sequence */
177 while (tries++ < 20) {
178 for (i = 0; i < sizeof(mpt2_reset_magic); i++)
179 mpr_regwrite(sc, MPI2_WRITE_SEQUENCE_OFFSET,
180 mpt2_reset_magic[i]);
183 if (mtx_owned(&sc->mpr_mtx) && sleep_flag == CAN_SLEEP)
184 msleep(&sc->msleep_fake_chan, &sc->mpr_mtx, 0,
186 else if (sleep_flag == CAN_SLEEP)
187 pause("mprdiag", hz/10);
191 reg = mpr_regread(sc, MPI2_HOST_DIAGNOSTIC_OFFSET);
192 if (reg & MPI2_DIAG_DIAG_WRITE_ENABLE) {
198 mpr_dprint(sc, MPR_INIT, "sequence failed, error=%d, exit\n",
203 /* Send the actual reset. XXX need to refresh the reg? */
204 reg |= MPI2_DIAG_RESET_ADAPTER;
205 mpr_dprint(sc, MPR_INIT, "sequence success, sending reset, reg= 0x%x\n",
207 mpr_regwrite(sc, MPI2_HOST_DIAGNOSTIC_OFFSET, reg);
209 /* Wait up to 300 seconds in 50ms intervals */
211 for (i = 0; i < 6000; i++) {
213 * Wait 50 msec. If this is the first time through, wait 256
214 * msec to satisfy Diag Reset timing requirements.
216 if (first_wait_done) {
217 if (mtx_owned(&sc->mpr_mtx) && sleep_flag == CAN_SLEEP)
218 msleep(&sc->msleep_fake_chan, &sc->mpr_mtx, 0,
220 else if (sleep_flag == CAN_SLEEP)
221 pause("mprdiag", hz/20);
226 first_wait_done = TRUE;
229 * Check for the RESET_ADAPTER bit to be cleared first, then
230 * wait for the RESET state to be cleared, which takes a little
233 reg = mpr_regread(sc, MPI2_HOST_DIAGNOSTIC_OFFSET);
234 if (reg & MPI2_DIAG_RESET_ADAPTER) {
237 reg = mpr_regread(sc, MPI2_DOORBELL_OFFSET);
238 if ((reg & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_RESET) {
244 mpr_dprint(sc, MPR_INIT, "reset failed, error= %d, exit\n",
249 mpr_regwrite(sc, MPI2_WRITE_SEQUENCE_OFFSET, 0x0);
250 mpr_dprint(sc, MPR_INIT, "diag reset success, exit\n");
256 mpr_message_unit_reset(struct mpr_softc *sc, int sleep_flag)
262 mpr_dprint(sc, MPR_INIT, "%s entered\n", __func__);
265 mpr_regwrite(sc, MPI2_DOORBELL_OFFSET,
266 MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET <<
267 MPI2_DOORBELL_FUNCTION_SHIFT);
269 if (mpr_wait_db_ack(sc, 5, sleep_flag) != 0) {
270 mpr_dprint(sc, MPR_INIT|MPR_FAULT,
271 "Doorbell handshake failed\n");
275 mpr_dprint(sc, MPR_INIT, "%s exit\n", __func__);
280 mpr_transition_ready(struct mpr_softc *sc)
283 int error, tries = 0;
287 /* If we are in attach call, do not sleep */
288 sleep_flags = (sc->mpr_flags & MPR_FLAGS_ATTACH_DONE)
289 ? CAN_SLEEP : NO_SLEEP;
293 mpr_dprint(sc, MPR_INIT, "%s entered, sleep_flags= %d\n",
294 __func__, sleep_flags);
296 while (tries++ < 1200) {
297 reg = mpr_regread(sc, MPI2_DOORBELL_OFFSET);
298 mpr_dprint(sc, MPR_INIT, " Doorbell= 0x%x\n", reg);
301 * Ensure the IOC is ready to talk. If it's not, try
304 if (reg & MPI2_DOORBELL_USED) {
305 mpr_dprint(sc, MPR_INIT, " Not ready, sending diag "
307 mpr_diag_reset(sc, sleep_flags);
312 /* Is the adapter owned by another peer? */
313 if ((reg & MPI2_DOORBELL_WHO_INIT_MASK) ==
314 (MPI2_WHOINIT_PCI_PEER << MPI2_DOORBELL_WHO_INIT_SHIFT)) {
315 mpr_dprint(sc, MPR_INIT|MPR_FAULT, "IOC is under the "
316 "control of another peer host, aborting "
317 "initialization.\n");
322 state = reg & MPI2_IOC_STATE_MASK;
323 if (state == MPI2_IOC_STATE_READY) {
327 } else if (state == MPI2_IOC_STATE_FAULT) {
328 mpr_dprint(sc, MPR_INIT|MPR_FAULT, "IOC in fault "
329 "state 0x%x, resetting\n",
330 state & MPI2_DOORBELL_FAULT_CODE_MASK);
331 mpr_diag_reset(sc, sleep_flags);
332 } else if (state == MPI2_IOC_STATE_OPERATIONAL) {
333 /* Need to take ownership */
334 mpr_message_unit_reset(sc, sleep_flags);
335 } else if (state == MPI2_IOC_STATE_RESET) {
336 /* Wait a bit, IOC might be in transition */
337 mpr_dprint(sc, MPR_INIT|MPR_FAULT,
338 "IOC in unexpected reset state\n");
340 mpr_dprint(sc, MPR_INIT|MPR_FAULT,
341 "IOC in unknown state 0x%x\n", state);
346 /* Wait 50ms for things to settle down. */
351 mpr_dprint(sc, MPR_INIT|MPR_FAULT,
352 "Cannot transition IOC to ready\n");
353 mpr_dprint(sc, MPR_INIT, "%s exit\n", __func__);
358 mpr_transition_operational(struct mpr_softc *sc)
366 reg = mpr_regread(sc, MPI2_DOORBELL_OFFSET);
367 mpr_dprint(sc, MPR_INIT, "%s entered, Doorbell= 0x%x\n", __func__, reg);
369 state = reg & MPI2_IOC_STATE_MASK;
370 if (state != MPI2_IOC_STATE_READY) {
371 mpr_dprint(sc, MPR_INIT, "IOC not ready\n");
372 if ((error = mpr_transition_ready(sc)) != 0) {
373 mpr_dprint(sc, MPR_INIT|MPR_FAULT,
374 "failed to transition ready, exit\n");
379 error = mpr_send_iocinit(sc);
380 mpr_dprint(sc, MPR_INIT, "%s exit\n", __func__);
386 mpr_resize_queues(struct mpr_softc *sc)
388 u_int reqcr, prireqcr, maxio, sges_per_frame, chain_seg_size;
391 * Size the queues. Since the reply queues always need one free
392 * entry, we'll deduct one reply message here. The LSI documents
393 * suggest instead to add a count to the request queue, but I think
394 * that it's better to deduct from reply queue.
396 prireqcr = MAX(1, sc->max_prireqframes);
397 prireqcr = MIN(prireqcr, sc->facts->HighPriorityCredit);
399 reqcr = MAX(2, sc->max_reqframes);
400 reqcr = MIN(reqcr, sc->facts->RequestCredit);
402 sc->num_reqs = prireqcr + reqcr;
403 sc->num_prireqs = prireqcr;
404 sc->num_replies = MIN(sc->max_replyframes + sc->max_evtframes,
405 sc->facts->MaxReplyDescriptorPostQueueDepth) - 1;
407 /* Store the request frame size in bytes rather than as 32bit words */
408 sc->reqframesz = sc->facts->IOCRequestFrameSize * 4;
411 * Gen3 and beyond uses the IOCMaxChainSegmentSize from IOC Facts to
412 * get the size of a Chain Frame. Previous versions use the size as a
413 * Request Frame for the Chain Frame size. If IOCMaxChainSegmentSize
414 * is 0, use the default value. The IOCMaxChainSegmentSize is the
415 * number of 16-byte elelements that can fit in a Chain Frame, which is
416 * the size of an IEEE Simple SGE.
418 if (sc->facts->MsgVersion >= MPI2_VERSION_02_05) {
419 chain_seg_size = htole16(sc->facts->IOCMaxChainSegmentSize);
420 if (chain_seg_size == 0)
421 chain_seg_size = MPR_DEFAULT_CHAIN_SEG_SIZE;
422 sc->chain_frame_size = chain_seg_size *
423 MPR_MAX_CHAIN_ELEMENT_SIZE;
425 sc->chain_frame_size = sc->reqframesz;
429 * Max IO Size is Page Size * the following:
430 * ((SGEs per frame - 1 for chain element) * Max Chain Depth)
431 * + 1 for no chain needed in last frame
433 * If user suggests a Max IO size to use, use the smaller of the
434 * user's value and the calculated value as long as the user's
435 * value is larger than 0. The user's value is in pages.
437 sges_per_frame = sc->chain_frame_size/sizeof(MPI2_IEEE_SGE_SIMPLE64)-1;
438 maxio = (sges_per_frame * sc->facts->MaxChainDepth + 1) * PAGE_SIZE;
441 * If I/O size limitation requested then use it and pass up to CAM.
442 * If not, use MAXPHYS as an optimization hint, but report HW limit.
444 if (sc->max_io_pages > 0) {
445 maxio = min(maxio, sc->max_io_pages * PAGE_SIZE);
449 maxio = min(maxio, MAXPHYS);
452 sc->num_chains = (maxio / PAGE_SIZE + sges_per_frame - 2) /
453 sges_per_frame * reqcr;
454 if (sc->max_chains > 0 && sc->max_chains < sc->num_chains)
455 sc->num_chains = sc->max_chains;
458 * Figure out the number of MSIx-based queues. If the firmware or
459 * user has done something crazy and not allowed enough credit for
460 * the queues to be useful then don't enable multi-queue.
462 if (sc->facts->MaxMSIxVectors < 2)
465 if (sc->msi_msgs > 1) {
466 sc->msi_msgs = MIN(sc->msi_msgs, mp_ncpus);
467 sc->msi_msgs = MIN(sc->msi_msgs, sc->facts->MaxMSIxVectors);
468 if (sc->num_reqs / sc->msi_msgs < 2)
472 mpr_dprint(sc, MPR_INIT, "Sized queues to q=%d reqs=%d replies=%d\n",
473 sc->msi_msgs, sc->num_reqs, sc->num_replies);
477 * This is called during attach and when re-initializing due to a Diag Reset.
478 * IOC Facts is used to allocate many of the structures needed by the driver.
479 * If called from attach, de-allocation is not required because the driver has
480 * not allocated any structures yet, but if called from a Diag Reset, previously
481 * allocated structures based on IOC Facts will need to be freed and re-
482 * allocated bases on the latest IOC Facts.
485 mpr_iocfacts_allocate(struct mpr_softc *sc, uint8_t attaching)
488 Mpi2IOCFactsReply_t saved_facts;
489 uint8_t saved_mode, reallocating;
491 mpr_dprint(sc, MPR_INIT|MPR_TRACE, "%s entered\n", __func__);
493 /* Save old IOC Facts and then only reallocate if Facts have changed */
495 bcopy(sc->facts, &saved_facts, sizeof(MPI2_IOC_FACTS_REPLY));
499 * Get IOC Facts. In all cases throughout this function, panic if doing
500 * a re-initialization and only return the error if attaching so the OS
503 if ((error = mpr_get_iocfacts(sc, sc->facts)) != 0) {
505 mpr_dprint(sc, MPR_INIT|MPR_FAULT, "Failed to get "
506 "IOC Facts with error %d, exit\n", error);
509 panic("%s failed to get IOC Facts with error %d\n",
514 MPR_DPRINT_PAGE(sc, MPR_XINFO, iocfacts, sc->facts);
516 snprintf(sc->fw_version, sizeof(sc->fw_version),
517 "%02d.%02d.%02d.%02d",
518 sc->facts->FWVersion.Struct.Major,
519 sc->facts->FWVersion.Struct.Minor,
520 sc->facts->FWVersion.Struct.Unit,
521 sc->facts->FWVersion.Struct.Dev);
523 mpr_dprint(sc, MPR_INFO, "Firmware: %s, Driver: %s\n", sc->fw_version,
525 mpr_dprint(sc, MPR_INFO,
526 "IOCCapabilities: %b\n", sc->facts->IOCCapabilities,
527 "\20" "\3ScsiTaskFull" "\4DiagTrace" "\5SnapBuf" "\6ExtBuf"
528 "\7EEDP" "\10BiDirTarg" "\11Multicast" "\14TransRetry" "\15IR"
529 "\16EventReplay" "\17RaidAccel" "\20MSIXIndex" "\21HostDisc"
530 "\22FastPath" "\23RDPQArray" "\24AtomicReqDesc" "\25PCIeSRIOV");
533 * If the chip doesn't support event replay then a hard reset will be
534 * required to trigger a full discovery. Do the reset here then
535 * retransition to Ready. A hard reset might have already been done,
536 * but it doesn't hurt to do it again. Only do this if attaching, not
539 if (attaching && ((sc->facts->IOCCapabilities &
540 MPI2_IOCFACTS_CAPABILITY_EVENT_REPLAY) == 0)) {
541 mpr_dprint(sc, MPR_INIT, "No event replay, resetting\n");
542 mpr_diag_reset(sc, NO_SLEEP);
543 if ((error = mpr_transition_ready(sc)) != 0) {
544 mpr_dprint(sc, MPR_INIT|MPR_FAULT, "Failed to "
545 "transition to ready with error %d, exit\n",
552 * Set flag if IR Firmware is loaded. If the RAID Capability has
553 * changed from the previous IOC Facts, log a warning, but only if
554 * checking this after a Diag Reset and not during attach.
556 saved_mode = sc->ir_firmware;
557 if (sc->facts->IOCCapabilities &
558 MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID)
561 if (sc->ir_firmware != saved_mode) {
562 mpr_dprint(sc, MPR_INIT|MPR_FAULT, "new IR/IT mode "
563 "in IOC Facts does not match previous mode\n");
567 /* Only deallocate and reallocate if relevant IOC Facts have changed */
568 reallocating = FALSE;
569 sc->mpr_flags &= ~MPR_FLAGS_REALLOCATED;
572 ((saved_facts.MsgVersion != sc->facts->MsgVersion) ||
573 (saved_facts.HeaderVersion != sc->facts->HeaderVersion) ||
574 (saved_facts.MaxChainDepth != sc->facts->MaxChainDepth) ||
575 (saved_facts.RequestCredit != sc->facts->RequestCredit) ||
576 (saved_facts.ProductID != sc->facts->ProductID) ||
577 (saved_facts.IOCCapabilities != sc->facts->IOCCapabilities) ||
578 (saved_facts.IOCRequestFrameSize !=
579 sc->facts->IOCRequestFrameSize) ||
580 (saved_facts.IOCMaxChainSegmentSize !=
581 sc->facts->IOCMaxChainSegmentSize) ||
582 (saved_facts.MaxTargets != sc->facts->MaxTargets) ||
583 (saved_facts.MaxSasExpanders != sc->facts->MaxSasExpanders) ||
584 (saved_facts.MaxEnclosures != sc->facts->MaxEnclosures) ||
585 (saved_facts.HighPriorityCredit != sc->facts->HighPriorityCredit) ||
586 (saved_facts.MaxReplyDescriptorPostQueueDepth !=
587 sc->facts->MaxReplyDescriptorPostQueueDepth) ||
588 (saved_facts.ReplyFrameSize != sc->facts->ReplyFrameSize) ||
589 (saved_facts.MaxVolumes != sc->facts->MaxVolumes) ||
590 (saved_facts.MaxPersistentEntries !=
591 sc->facts->MaxPersistentEntries))) {
594 /* Record that we reallocated everything */
595 sc->mpr_flags |= MPR_FLAGS_REALLOCATED;
599 * Some things should be done if attaching or re-allocating after a Diag
600 * Reset, but are not needed after a Diag Reset if the FW has not
603 if (attaching || reallocating) {
605 * Check if controller supports FW diag buffers and set flag to
608 if (sc->facts->IOCCapabilities &
609 MPI2_IOCFACTS_CAPABILITY_DIAG_TRACE_BUFFER)
610 sc->fw_diag_buffer_list[MPI2_DIAG_BUF_TYPE_TRACE].
612 if (sc->facts->IOCCapabilities &
613 MPI2_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER)
614 sc->fw_diag_buffer_list[MPI2_DIAG_BUF_TYPE_SNAPSHOT].
616 if (sc->facts->IOCCapabilities &
617 MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER)
618 sc->fw_diag_buffer_list[MPI2_DIAG_BUF_TYPE_EXTENDED].
622 * Set flags for some supported items.
624 if (sc->facts->IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_EEDP)
625 sc->eedp_enabled = TRUE;
626 if (sc->facts->IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR)
627 sc->control_TLR = TRUE;
628 if ((sc->facts->IOCCapabilities &
629 MPI26_IOCFACTS_CAPABILITY_ATOMIC_REQ) &&
630 (sc->mpr_flags & MPR_FLAGS_SEA_IOC))
631 sc->atomic_desc_capable = TRUE;
633 mpr_resize_queues(sc);
636 * Initialize all Tail Queues
638 TAILQ_INIT(&sc->req_list);
639 TAILQ_INIT(&sc->high_priority_req_list);
640 TAILQ_INIT(&sc->chain_list);
641 TAILQ_INIT(&sc->prp_page_list);
642 TAILQ_INIT(&sc->tm_list);
646 * If doing a Diag Reset and the FW is significantly different
647 * (reallocating will be set above in IOC Facts comparison), then all
648 * buffers based on the IOC Facts will need to be freed before they are
652 mpr_iocfacts_free(sc);
653 mprsas_realloc_targets(sc, saved_facts.MaxTargets +
654 saved_facts.MaxVolumes);
658 * Any deallocation has been completed. Now start reallocating
659 * if needed. Will only need to reallocate if attaching or if the new
660 * IOC Facts are different from the previous IOC Facts after a Diag
661 * Reset. Targets have already been allocated above if needed.
664 while (attaching || reallocating) {
665 if ((error = mpr_alloc_hw_queues(sc)) != 0)
667 if ((error = mpr_alloc_replies(sc)) != 0)
669 if ((error = mpr_alloc_requests(sc)) != 0)
671 if ((error = mpr_alloc_queues(sc)) != 0)
676 mpr_dprint(sc, MPR_INIT|MPR_ERROR,
677 "Failed to alloc queues with error %d\n", error);
682 /* Always initialize the queues */
683 bzero(sc->free_queue, sc->fqdepth * 4);
687 * Always get the chip out of the reset state, but only panic if not
688 * attaching. If attaching and there is an error, that is handled by
691 error = mpr_transition_operational(sc);
693 mpr_dprint(sc, MPR_INIT|MPR_FAULT, "Failed to "
694 "transition to operational with error %d\n", error);
700 * Finish the queue initialization.
701 * These are set here instead of in mpr_init_queues() because the
702 * IOC resets these values during the state transition in
703 * mpr_transition_operational(). The free index is set to 1
704 * because the corresponding index in the IOC is set to 0, and the
705 * IOC treats the queues as full if both are set to the same value.
706 * Hence the reason that the queue can't hold all of the possible
709 sc->replypostindex = 0;
710 mpr_regwrite(sc, MPI2_REPLY_FREE_HOST_INDEX_OFFSET, sc->replyfreeindex);
711 mpr_regwrite(sc, MPI2_REPLY_POST_HOST_INDEX_OFFSET, 0);
714 * Attach the subsystems so they can prepare their event masks.
715 * XXX Should be dynamic so that IM/IR and user modules can attach
719 mpr_dprint(sc, MPR_INIT, "Attaching subsystems\n");
720 if ((error = mpr_attach_log(sc)) != 0)
722 if ((error = mpr_attach_sas(sc)) != 0)
724 if ((error = mpr_attach_user(sc)) != 0)
729 mpr_dprint(sc, MPR_INIT|MPR_ERROR,
730 "Failed to attach all subsystems: error %d\n", error);
736 * XXX If the number of MSI-X vectors changes during re-init, this
737 * won't see it and adjust.
739 if (attaching && (error = mpr_pci_setup_interrupts(sc)) != 0) {
740 mpr_dprint(sc, MPR_INIT|MPR_ERROR,
741 "Failed to setup interrupts\n");
750 * This is called if memory is being free (during detach for example) and when
751 * buffers need to be reallocated due to a Diag Reset.
754 mpr_iocfacts_free(struct mpr_softc *sc)
756 struct mpr_command *cm;
759 mpr_dprint(sc, MPR_TRACE, "%s\n", __func__);
761 if (sc->free_busaddr != 0)
762 bus_dmamap_unload(sc->queues_dmat, sc->queues_map);
763 if (sc->free_queue != NULL)
764 bus_dmamem_free(sc->queues_dmat, sc->free_queue,
766 if (sc->queues_dmat != NULL)
767 bus_dma_tag_destroy(sc->queues_dmat);
769 if (sc->chain_frames != NULL) {
770 bus_dmamap_unload(sc->chain_dmat, sc->chain_map);
771 bus_dmamem_free(sc->chain_dmat, sc->chain_frames,
774 if (sc->chain_dmat != NULL)
775 bus_dma_tag_destroy(sc->chain_dmat);
777 if (sc->sense_busaddr != 0)
778 bus_dmamap_unload(sc->sense_dmat, sc->sense_map);
779 if (sc->sense_frames != NULL)
780 bus_dmamem_free(sc->sense_dmat, sc->sense_frames,
782 if (sc->sense_dmat != NULL)
783 bus_dma_tag_destroy(sc->sense_dmat);
785 if (sc->prp_page_busaddr != 0)
786 bus_dmamap_unload(sc->prp_page_dmat, sc->prp_page_map);
787 if (sc->prp_pages != NULL)
788 bus_dmamem_free(sc->prp_page_dmat, sc->prp_pages,
790 if (sc->prp_page_dmat != NULL)
791 bus_dma_tag_destroy(sc->prp_page_dmat);
793 if (sc->reply_busaddr != 0)
794 bus_dmamap_unload(sc->reply_dmat, sc->reply_map);
795 if (sc->reply_frames != NULL)
796 bus_dmamem_free(sc->reply_dmat, sc->reply_frames,
798 if (sc->reply_dmat != NULL)
799 bus_dma_tag_destroy(sc->reply_dmat);
801 if (sc->req_busaddr != 0)
802 bus_dmamap_unload(sc->req_dmat, sc->req_map);
803 if (sc->req_frames != NULL)
804 bus_dmamem_free(sc->req_dmat, sc->req_frames, sc->req_map);
805 if (sc->req_dmat != NULL)
806 bus_dma_tag_destroy(sc->req_dmat);
808 if (sc->chains != NULL)
809 free(sc->chains, M_MPR);
810 if (sc->prps != NULL)
811 free(sc->prps, M_MPR);
812 if (sc->commands != NULL) {
813 for (i = 1; i < sc->num_reqs; i++) {
814 cm = &sc->commands[i];
815 bus_dmamap_destroy(sc->buffer_dmat, cm->cm_dmamap);
817 free(sc->commands, M_MPR);
819 if (sc->buffer_dmat != NULL)
820 bus_dma_tag_destroy(sc->buffer_dmat);
822 mpr_pci_free_interrupts(sc);
823 free(sc->queues, M_MPR);
828 * The terms diag reset and hard reset are used interchangeably in the MPI
829 * docs to mean resetting the controller chip. In this code diag reset
830 * cleans everything up, and the hard reset function just sends the reset
831 * sequence to the chip. This should probably be refactored so that every
832 * subsystem gets a reset notification of some sort, and can clean up
836 mpr_reinit(struct mpr_softc *sc)
839 struct mprsas_softc *sassc;
845 mtx_assert(&sc->mpr_mtx, MA_OWNED);
847 mpr_dprint(sc, MPR_INIT|MPR_INFO, "Reinitializing controller\n");
848 if (sc->mpr_flags & MPR_FLAGS_DIAGRESET) {
849 mpr_dprint(sc, MPR_INIT, "Reset already in progress\n");
854 * Make sure the completion callbacks can recognize they're getting
855 * a NULL cm_reply due to a reset.
857 sc->mpr_flags |= MPR_FLAGS_DIAGRESET;
860 * Mask interrupts here.
862 mpr_dprint(sc, MPR_INIT, "Masking interrupts and resetting\n");
865 error = mpr_diag_reset(sc, CAN_SLEEP);
867 panic("%s hard reset failed with error %d\n", __func__, error);
870 /* Restore the PCI state, including the MSI-X registers */
873 /* Give the I/O subsystem special priority to get itself prepared */
874 mprsas_handle_reinit(sc);
877 * Get IOC Facts and allocate all structures based on this information.
878 * The attach function will also call mpr_iocfacts_allocate at startup.
879 * If relevant values have changed in IOC Facts, this function will free
880 * all of the memory based on IOC Facts and reallocate that memory.
882 if ((error = mpr_iocfacts_allocate(sc, FALSE)) != 0) {
883 panic("%s IOC Facts based allocation failed with error %d\n",
888 * Mapping structures will be re-allocated after getting IOC Page8, so
889 * free these structures here.
891 mpr_mapping_exit(sc);
894 * The static page function currently read is IOC Page8. Others can be
895 * added in future. It's possible that the values in IOC Page8 have
896 * changed after a Diag Reset due to user modification, so always read
897 * these. Interrupts are masked, so unmask them before getting config
901 sc->mpr_flags &= ~MPR_FLAGS_DIAGRESET;
902 mpr_base_static_config_pages(sc);
905 * Some mapping info is based in IOC Page8 data, so re-initialize the
908 mpr_mapping_initialize(sc);
911 * Restart will reload the event masks clobbered by the reset, and
912 * then enable the port.
914 mpr_reregister_events(sc);
916 /* the end of discovery will release the simq, so we're done. */
917 mpr_dprint(sc, MPR_INIT|MPR_XINFO, "Finished sc %p post %u free %u\n",
918 sc, sc->replypostindex, sc->replyfreeindex);
919 mprsas_release_simq_reinit(sassc);
920 mpr_dprint(sc, MPR_INIT, "%s exit error= %d\n", __func__, error);
925 /* Wait for the chip to ACK a word that we've put into its FIFO
926 * Wait for <timeout> seconds. In single loop wait for busy loop
927 * for 500 microseconds.
928 * Total is [ 0.5 * (2000 * <timeout>) ] in miliseconds.
931 mpr_wait_db_ack(struct mpr_softc *sc, int timeout, int sleep_flag)
938 cntdn = (sleep_flag == CAN_SLEEP) ? 1000*timeout : 2000*timeout;
940 int_status = mpr_regread(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET);
941 if (!(int_status & MPI2_HIS_SYS2IOC_DB_STATUS)) {
942 mpr_dprint(sc, MPR_TRACE, "%s: successful count(%d), "
943 "timeout(%d)\n", __func__, count, timeout);
945 } else if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
946 doorbell = mpr_regread(sc, MPI2_DOORBELL_OFFSET);
947 if ((doorbell & MPI2_IOC_STATE_MASK) ==
948 MPI2_IOC_STATE_FAULT) {
949 mpr_dprint(sc, MPR_FAULT,
950 "fault_state(0x%04x)!\n", doorbell);
953 } else if (int_status == 0xFFFFFFFF)
957 * If it can sleep, sleep for 1 milisecond, else busy loop for
960 if (mtx_owned(&sc->mpr_mtx) && sleep_flag == CAN_SLEEP)
961 msleep(&sc->msleep_fake_chan, &sc->mpr_mtx, 0, "mprdba",
963 else if (sleep_flag == CAN_SLEEP)
964 pause("mprdba", hz/1000);
971 mpr_dprint(sc, MPR_FAULT, "%s: failed due to timeout count(%d), "
972 "int_status(%x)!\n", __func__, count, int_status);
976 /* Wait for the chip to signal that the next word in its FIFO can be fetched */
978 mpr_wait_db_int(struct mpr_softc *sc)
982 for (retry = 0; retry < MPR_DB_MAX_WAIT; retry++) {
983 if ((mpr_regread(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET) &
984 MPI2_HIS_IOC2SYS_DB_STATUS) != 0)
991 /* Step through the synchronous command state machine, i.e. "Doorbell mode" */
993 mpr_request_sync(struct mpr_softc *sc, void *req, MPI2_DEFAULT_REPLY *reply,
994 int req_sz, int reply_sz, int timeout)
998 int i, count, ioc_sz, residual;
999 int sleep_flags = CAN_SLEEP;
1001 #if __FreeBSD_version >= 1000029
1002 if (curthread->td_no_sleeping)
1003 #else //__FreeBSD_version < 1000029
1004 if (curthread->td_pflags & TDP_NOSLEEPING)
1005 #endif //__FreeBSD_version >= 1000029
1006 sleep_flags = NO_SLEEP;
1009 mpr_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0);
1012 if (mpr_regread(sc, MPI2_DOORBELL_OFFSET) & MPI2_DOORBELL_USED)
1016 * Announce that a message is coming through the doorbell. Messages
1017 * are pushed at 32bit words, so round up if needed.
1019 count = (req_sz + 3) / 4;
1020 mpr_regwrite(sc, MPI2_DOORBELL_OFFSET,
1021 (MPI2_FUNCTION_HANDSHAKE << MPI2_DOORBELL_FUNCTION_SHIFT) |
1022 (count << MPI2_DOORBELL_ADD_DWORDS_SHIFT));
1025 if (mpr_wait_db_int(sc) ||
1026 (mpr_regread(sc, MPI2_DOORBELL_OFFSET) & MPI2_DOORBELL_USED) == 0) {
1027 mpr_dprint(sc, MPR_FAULT, "Doorbell failed to activate\n");
1030 mpr_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0);
1031 if (mpr_wait_db_ack(sc, 5, sleep_flags) != 0) {
1032 mpr_dprint(sc, MPR_FAULT, "Doorbell handshake failed\n");
1037 /* Clock out the message data synchronously in 32-bit dwords*/
1038 data32 = (uint32_t *)req;
1039 for (i = 0; i < count; i++) {
1040 mpr_regwrite(sc, MPI2_DOORBELL_OFFSET, htole32(data32[i]));
1041 if (mpr_wait_db_ack(sc, 5, sleep_flags) != 0) {
1042 mpr_dprint(sc, MPR_FAULT,
1043 "Timeout while writing doorbell\n");
1049 /* Clock in the reply in 16-bit words. The total length of the
1050 * message is always in the 4th byte, so clock out the first 2 words
1051 * manually, then loop the rest.
1053 data16 = (uint16_t *)reply;
1054 if (mpr_wait_db_int(sc) != 0) {
1055 mpr_dprint(sc, MPR_FAULT, "Timeout reading doorbell 0\n");
1059 mpr_regread(sc, MPI2_DOORBELL_OFFSET) & MPI2_DOORBELL_DATA_MASK;
1060 mpr_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0);
1061 if (mpr_wait_db_int(sc) != 0) {
1062 mpr_dprint(sc, MPR_FAULT, "Timeout reading doorbell 1\n");
1066 mpr_regread(sc, MPI2_DOORBELL_OFFSET) & MPI2_DOORBELL_DATA_MASK;
1067 mpr_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0);
1069 /* Number of 32bit words in the message */
1070 ioc_sz = reply->MsgLength;
1073 * Figure out how many 16bit words to clock in without overrunning.
1074 * The precision loss with dividing reply_sz can safely be
1075 * ignored because the messages can only be multiples of 32bits.
1078 count = MIN((reply_sz / 4), ioc_sz) * 2;
1079 if (count < ioc_sz * 2) {
1080 residual = ioc_sz * 2 - count;
1081 mpr_dprint(sc, MPR_ERROR, "Driver error, throwing away %d "
1082 "residual message words\n", residual);
1085 for (i = 2; i < count; i++) {
1086 if (mpr_wait_db_int(sc) != 0) {
1087 mpr_dprint(sc, MPR_FAULT,
1088 "Timeout reading doorbell %d\n", i);
1091 data16[i] = mpr_regread(sc, MPI2_DOORBELL_OFFSET) &
1092 MPI2_DOORBELL_DATA_MASK;
1093 mpr_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0);
1097 * Pull out residual words that won't fit into the provided buffer.
1098 * This keeps the chip from hanging due to a driver programming
1101 while (residual--) {
1102 if (mpr_wait_db_int(sc) != 0) {
1103 mpr_dprint(sc, MPR_FAULT, "Timeout reading doorbell\n");
1106 (void)mpr_regread(sc, MPI2_DOORBELL_OFFSET);
1107 mpr_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0);
1111 if (mpr_wait_db_int(sc) != 0) {
1112 mpr_dprint(sc, MPR_FAULT, "Timeout waiting to exit doorbell\n");
1115 if (mpr_regread(sc, MPI2_DOORBELL_OFFSET) & MPI2_DOORBELL_USED)
1116 mpr_dprint(sc, MPR_FAULT, "Warning, doorbell still active\n");
1117 mpr_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0);
1123 mpr_enqueue_request(struct mpr_softc *sc, struct mpr_command *cm)
1125 request_descriptor rd;
1128 mpr_dprint(sc, MPR_TRACE, "SMID %u cm %p ccb %p\n",
1129 cm->cm_desc.Default.SMID, cm, cm->cm_ccb);
1131 if (sc->mpr_flags & MPR_FLAGS_ATTACH_DONE && !(sc->mpr_flags &
1132 MPR_FLAGS_SHUTDOWN))
1133 mtx_assert(&sc->mpr_mtx, MA_OWNED);
1135 if (++sc->io_cmds_active > sc->io_cmds_highwater)
1136 sc->io_cmds_highwater++;
1138 KASSERT(cm->cm_state == MPR_CM_STATE_BUSY, ("command not busy\n"));
1139 cm->cm_state = MPR_CM_STATE_INQUEUE;
1141 if (sc->atomic_desc_capable) {
1142 rd.u.low = cm->cm_desc.Words.Low;
1143 mpr_regwrite(sc, MPI26_ATOMIC_REQUEST_DESCRIPTOR_POST_OFFSET,
1146 rd.u.low = cm->cm_desc.Words.Low;
1147 rd.u.high = cm->cm_desc.Words.High;
1148 rd.word = htole64(rd.word);
1149 mpr_regwrite(sc, MPI2_REQUEST_DESCRIPTOR_POST_LOW_OFFSET,
1151 mpr_regwrite(sc, MPI2_REQUEST_DESCRIPTOR_POST_HIGH_OFFSET,
1157 * Just the FACTS, ma'am.
1160 mpr_get_iocfacts(struct mpr_softc *sc, MPI2_IOC_FACTS_REPLY *facts)
1162 MPI2_DEFAULT_REPLY *reply;
1163 MPI2_IOC_FACTS_REQUEST request;
1164 int error, req_sz, reply_sz;
1167 mpr_dprint(sc, MPR_INIT, "%s entered\n", __func__);
1169 req_sz = sizeof(MPI2_IOC_FACTS_REQUEST);
1170 reply_sz = sizeof(MPI2_IOC_FACTS_REPLY);
1171 reply = (MPI2_DEFAULT_REPLY *)facts;
1173 bzero(&request, req_sz);
1174 request.Function = MPI2_FUNCTION_IOC_FACTS;
1175 error = mpr_request_sync(sc, &request, reply, req_sz, reply_sz, 5);
1177 mpr_dprint(sc, MPR_INIT, "%s exit, error= %d\n", __func__, error);
1182 mpr_send_iocinit(struct mpr_softc *sc)
1184 MPI2_IOC_INIT_REQUEST init;
1185 MPI2_DEFAULT_REPLY reply;
1186 int req_sz, reply_sz, error;
1188 uint64_t time_in_msec;
1191 mpr_dprint(sc, MPR_INIT, "%s entered\n", __func__);
1193 /* Do a quick sanity check on proper initialization */
1194 if ((sc->pqdepth == 0) || (sc->fqdepth == 0) || (sc->reqframesz == 0)
1195 || (sc->replyframesz == 0)) {
1196 mpr_dprint(sc, MPR_INIT|MPR_ERROR,
1197 "Driver not fully initialized for IOCInit\n");
1201 req_sz = sizeof(MPI2_IOC_INIT_REQUEST);
1202 reply_sz = sizeof(MPI2_IOC_INIT_REPLY);
1203 bzero(&init, req_sz);
1204 bzero(&reply, reply_sz);
1207 * Fill in the init block. Note that most addresses are
1208 * deliberately in the lower 32bits of memory. This is a micro-
1209 * optimzation for PCI/PCIX, though it's not clear if it helps PCIe.
1211 init.Function = MPI2_FUNCTION_IOC_INIT;
1212 init.WhoInit = MPI2_WHOINIT_HOST_DRIVER;
1213 init.MsgVersion = htole16(MPI2_VERSION);
1214 init.HeaderVersion = htole16(MPI2_HEADER_VERSION);
1215 init.SystemRequestFrameSize = htole16((uint16_t)(sc->reqframesz / 4));
1216 init.ReplyDescriptorPostQueueDepth = htole16(sc->pqdepth);
1217 init.ReplyFreeQueueDepth = htole16(sc->fqdepth);
1218 init.SenseBufferAddressHigh = 0;
1219 init.SystemReplyAddressHigh = 0;
1220 init.SystemRequestFrameBaseAddress.High = 0;
1221 init.SystemRequestFrameBaseAddress.Low =
1222 htole32((uint32_t)sc->req_busaddr);
1223 init.ReplyDescriptorPostQueueAddress.High = 0;
1224 init.ReplyDescriptorPostQueueAddress.Low =
1225 htole32((uint32_t)sc->post_busaddr);
1226 init.ReplyFreeQueueAddress.High = 0;
1227 init.ReplyFreeQueueAddress.Low = htole32((uint32_t)sc->free_busaddr);
1229 time_in_msec = (now.tv_sec * 1000 + now.tv_usec/1000);
1230 init.TimeStamp.High = htole32((time_in_msec >> 32) & 0xFFFFFFFF);
1231 init.TimeStamp.Low = htole32(time_in_msec & 0xFFFFFFFF);
1232 init.HostPageSize = HOST_PAGE_SIZE_4K;
1234 error = mpr_request_sync(sc, &init, &reply, req_sz, reply_sz, 5);
1235 if ((reply.IOCStatus & MPI2_IOCSTATUS_MASK) != MPI2_IOCSTATUS_SUCCESS)
1238 mpr_dprint(sc, MPR_INIT, "IOCInit status= 0x%x\n", reply.IOCStatus);
1239 mpr_dprint(sc, MPR_INIT, "%s exit\n", __func__);
1244 mpr_memaddr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1249 *addr = segs[0].ds_addr;
1253 mpr_memaddr_wait_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1255 struct mpr_busdma_context *ctx;
1256 int need_unload, need_free;
1258 ctx = (struct mpr_busdma_context *)arg;
1262 mpr_lock(ctx->softc);
1265 if ((error == 0) && (ctx->abandoned == 0)) {
1266 *ctx->addr = segs[0].ds_addr;
1270 if (ctx->abandoned != 0)
1276 mpr_unlock(ctx->softc);
1278 if (need_unload != 0) {
1279 bus_dmamap_unload(ctx->buffer_dmat,
1280 ctx->buffer_dmamap);
1289 mpr_alloc_queues(struct mpr_softc *sc)
1291 struct mpr_queue *q;
1295 mpr_dprint(sc, MPR_INIT|MPR_XINFO, "Allocating %d I/O queues\n", nq);
1297 sc->queues = malloc(sizeof(struct mpr_queue) * nq, M_MPR,
1299 if (sc->queues == NULL)
1302 for (i = 0; i < nq; i++) {
1304 mpr_dprint(sc, MPR_INIT, "Configuring queue %d %p\n", i, q);
1312 mpr_alloc_hw_queues(struct mpr_softc *sc)
1314 bus_addr_t queues_busaddr;
1316 int qsize, fqsize, pqsize;
1319 * The reply free queue contains 4 byte entries in multiples of 16 and
1320 * aligned on a 16 byte boundary. There must always be an unused entry.
1321 * This queue supplies fresh reply frames for the firmware to use.
1323 * The reply descriptor post queue contains 8 byte entries in
1324 * multiples of 16 and aligned on a 16 byte boundary. This queue
1325 * contains filled-in reply frames sent from the firmware to the host.
1327 * These two queues are allocated together for simplicity.
1329 sc->fqdepth = roundup2(sc->num_replies + 1, 16);
1330 sc->pqdepth = roundup2(sc->num_replies + 1, 16);
1331 fqsize= sc->fqdepth * 4;
1332 pqsize = sc->pqdepth * 8;
1333 qsize = fqsize + pqsize;
1335 if (bus_dma_tag_create( sc->mpr_parent_dmat, /* parent */
1336 16, 0, /* algnmnt, boundary */
1337 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
1338 BUS_SPACE_MAXADDR, /* highaddr */
1339 NULL, NULL, /* filter, filterarg */
1340 qsize, /* maxsize */
1342 qsize, /* maxsegsize */
1344 NULL, NULL, /* lockfunc, lockarg */
1345 &sc->queues_dmat)) {
1346 mpr_dprint(sc, MPR_ERROR, "Cannot allocate queues DMA tag\n");
1349 if (bus_dmamem_alloc(sc->queues_dmat, (void **)&queues, BUS_DMA_NOWAIT,
1351 mpr_dprint(sc, MPR_ERROR, "Cannot allocate queues memory\n");
1354 bzero(queues, qsize);
1355 bus_dmamap_load(sc->queues_dmat, sc->queues_map, queues, qsize,
1356 mpr_memaddr_cb, &queues_busaddr, 0);
1358 sc->free_queue = (uint32_t *)queues;
1359 sc->free_busaddr = queues_busaddr;
1360 sc->post_queue = (MPI2_REPLY_DESCRIPTORS_UNION *)(queues + fqsize);
1361 sc->post_busaddr = queues_busaddr + fqsize;
1362 mpr_dprint(sc, MPR_INIT, "free queue busaddr= %#016jx size= %d\n",
1363 (uintmax_t)sc->free_busaddr, fqsize);
1364 mpr_dprint(sc, MPR_INIT, "reply queue busaddr= %#016jx size= %d\n",
1365 (uintmax_t)sc->post_busaddr, pqsize);
1371 mpr_alloc_replies(struct mpr_softc *sc)
1373 int rsize, num_replies;
1375 /* Store the reply frame size in bytes rather than as 32bit words */
1376 sc->replyframesz = sc->facts->ReplyFrameSize * 4;
1379 * sc->num_replies should be one less than sc->fqdepth. We need to
1380 * allocate space for sc->fqdepth replies, but only sc->num_replies
1381 * replies can be used at once.
1383 num_replies = max(sc->fqdepth, sc->num_replies);
1385 rsize = sc->replyframesz * num_replies;
1386 if (bus_dma_tag_create( sc->mpr_parent_dmat, /* parent */
1387 4, 0, /* algnmnt, boundary */
1388 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
1389 BUS_SPACE_MAXADDR, /* highaddr */
1390 NULL, NULL, /* filter, filterarg */
1391 rsize, /* maxsize */
1393 rsize, /* maxsegsize */
1395 NULL, NULL, /* lockfunc, lockarg */
1397 mpr_dprint(sc, MPR_ERROR, "Cannot allocate replies DMA tag\n");
1400 if (bus_dmamem_alloc(sc->reply_dmat, (void **)&sc->reply_frames,
1401 BUS_DMA_NOWAIT, &sc->reply_map)) {
1402 mpr_dprint(sc, MPR_ERROR, "Cannot allocate replies memory\n");
1405 bzero(sc->reply_frames, rsize);
1406 bus_dmamap_load(sc->reply_dmat, sc->reply_map, sc->reply_frames, rsize,
1407 mpr_memaddr_cb, &sc->reply_busaddr, 0);
1408 mpr_dprint(sc, MPR_INIT, "reply frames busaddr= %#016jx size= %d\n",
1409 (uintmax_t)sc->reply_busaddr, rsize);
1415 mpr_load_chains_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1417 struct mpr_softc *sc = arg;
1418 struct mpr_chain *chain;
1425 for (i = 0, o = 0, s = 0; s < nsegs; s++) {
1426 for (bo = 0; bo + sc->chain_frame_size <= segs[s].ds_len;
1427 bo += sc->chain_frame_size) {
1428 chain = &sc->chains[i++];
1429 chain->chain =(MPI2_SGE_IO_UNION *)(sc->chain_frames+o);
1430 chain->chain_busaddr = segs[s].ds_addr + bo;
1431 o += sc->chain_frame_size;
1432 mpr_free_chain(sc, chain);
1434 if (bo != segs[s].ds_len)
1435 o += segs[s].ds_len - bo;
1437 sc->chain_free_lowwater = i;
1441 mpr_alloc_requests(struct mpr_softc *sc)
1443 struct mpr_command *cm;
1444 int i, rsize, nsegs;
1446 rsize = sc->reqframesz * sc->num_reqs;
1447 if (bus_dma_tag_create( sc->mpr_parent_dmat, /* parent */
1448 16, 0, /* algnmnt, boundary */
1449 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
1450 BUS_SPACE_MAXADDR, /* highaddr */
1451 NULL, NULL, /* filter, filterarg */
1452 rsize, /* maxsize */
1454 rsize, /* maxsegsize */
1456 NULL, NULL, /* lockfunc, lockarg */
1458 mpr_dprint(sc, MPR_ERROR, "Cannot allocate request DMA tag\n");
1461 if (bus_dmamem_alloc(sc->req_dmat, (void **)&sc->req_frames,
1462 BUS_DMA_NOWAIT, &sc->req_map)) {
1463 mpr_dprint(sc, MPR_ERROR, "Cannot allocate request memory\n");
1466 bzero(sc->req_frames, rsize);
1467 bus_dmamap_load(sc->req_dmat, sc->req_map, sc->req_frames, rsize,
1468 mpr_memaddr_cb, &sc->req_busaddr, 0);
1469 mpr_dprint(sc, MPR_INIT, "request frames busaddr= %#016jx size= %d\n",
1470 (uintmax_t)sc->req_busaddr, rsize);
1472 sc->chains = malloc(sizeof(struct mpr_chain) * sc->num_chains, M_MPR,
1475 mpr_dprint(sc, MPR_ERROR, "Cannot allocate chain memory\n");
1478 rsize = sc->chain_frame_size * sc->num_chains;
1479 if (bus_dma_tag_create( sc->mpr_parent_dmat, /* parent */
1480 16, 0, /* algnmnt, boundary */
1481 BUS_SPACE_MAXADDR, /* lowaddr */
1482 BUS_SPACE_MAXADDR, /* highaddr */
1483 NULL, NULL, /* filter, filterarg */
1484 rsize, /* maxsize */
1485 howmany(rsize, PAGE_SIZE), /* nsegments */
1486 rsize, /* maxsegsize */
1488 NULL, NULL, /* lockfunc, lockarg */
1490 mpr_dprint(sc, MPR_ERROR, "Cannot allocate chain DMA tag\n");
1493 if (bus_dmamem_alloc(sc->chain_dmat, (void **)&sc->chain_frames,
1494 BUS_DMA_NOWAIT | BUS_DMA_ZERO, &sc->chain_map)) {
1495 mpr_dprint(sc, MPR_ERROR, "Cannot allocate chain memory\n");
1498 if (bus_dmamap_load(sc->chain_dmat, sc->chain_map, sc->chain_frames,
1499 rsize, mpr_load_chains_cb, sc, BUS_DMA_NOWAIT)) {
1500 mpr_dprint(sc, MPR_ERROR, "Cannot load chain memory\n");
1501 bus_dmamem_free(sc->chain_dmat, sc->chain_frames,
1506 rsize = MPR_SENSE_LEN * sc->num_reqs;
1507 if (bus_dma_tag_create( sc->mpr_parent_dmat, /* parent */
1508 1, 0, /* algnmnt, boundary */
1509 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
1510 BUS_SPACE_MAXADDR, /* highaddr */
1511 NULL, NULL, /* filter, filterarg */
1512 rsize, /* maxsize */
1514 rsize, /* maxsegsize */
1516 NULL, NULL, /* lockfunc, lockarg */
1518 mpr_dprint(sc, MPR_ERROR, "Cannot allocate sense DMA tag\n");
1521 if (bus_dmamem_alloc(sc->sense_dmat, (void **)&sc->sense_frames,
1522 BUS_DMA_NOWAIT, &sc->sense_map)) {
1523 mpr_dprint(sc, MPR_ERROR, "Cannot allocate sense memory\n");
1526 bzero(sc->sense_frames, rsize);
1527 bus_dmamap_load(sc->sense_dmat, sc->sense_map, sc->sense_frames, rsize,
1528 mpr_memaddr_cb, &sc->sense_busaddr, 0);
1529 mpr_dprint(sc, MPR_INIT, "sense frames busaddr= %#016jx size= %d\n",
1530 (uintmax_t)sc->sense_busaddr, rsize);
1533 * Allocate NVMe PRP Pages for NVMe SGL support only if the FW supports
1536 if ((sc->facts->MsgVersion >= MPI2_VERSION_02_06) &&
1537 (sc->facts->ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_NVME_DEVICES)) {
1538 if (mpr_alloc_nvme_prp_pages(sc) == ENOMEM)
1542 nsegs = (sc->maxio / PAGE_SIZE) + 1;
1543 if (bus_dma_tag_create( sc->mpr_parent_dmat, /* parent */
1544 1, 0, /* algnmnt, boundary */
1545 BUS_SPACE_MAXADDR, /* lowaddr */
1546 BUS_SPACE_MAXADDR, /* highaddr */
1547 NULL, NULL, /* filter, filterarg */
1548 BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
1549 nsegs, /* nsegments */
1550 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
1551 BUS_DMA_ALLOCNOW, /* flags */
1552 busdma_lock_mutex, /* lockfunc */
1553 &sc->mpr_mtx, /* lockarg */
1554 &sc->buffer_dmat)) {
1555 mpr_dprint(sc, MPR_ERROR, "Cannot allocate buffer DMA tag\n");
1560 * SMID 0 cannot be used as a free command per the firmware spec.
1561 * Just drop that command instead of risking accounting bugs.
1563 sc->commands = malloc(sizeof(struct mpr_command) * sc->num_reqs,
1564 M_MPR, M_WAITOK | M_ZERO);
1565 if (!sc->commands) {
1566 mpr_dprint(sc, MPR_ERROR, "Cannot allocate command memory\n");
1569 for (i = 1; i < sc->num_reqs; i++) {
1570 cm = &sc->commands[i];
1571 cm->cm_req = sc->req_frames + i * sc->reqframesz;
1572 cm->cm_req_busaddr = sc->req_busaddr + i * sc->reqframesz;
1573 cm->cm_sense = &sc->sense_frames[i];
1574 cm->cm_sense_busaddr = sc->sense_busaddr + i * MPR_SENSE_LEN;
1575 cm->cm_desc.Default.SMID = i;
1577 cm->cm_state = MPR_CM_STATE_BUSY;
1578 TAILQ_INIT(&cm->cm_chain_list);
1579 TAILQ_INIT(&cm->cm_prp_page_list);
1580 callout_init_mtx(&cm->cm_callout, &sc->mpr_mtx, 0);
1582 /* XXX Is a failure here a critical problem? */
1583 if (bus_dmamap_create(sc->buffer_dmat, 0, &cm->cm_dmamap)
1585 if (i <= sc->num_prireqs)
1586 mpr_free_high_priority_command(sc, cm);
1588 mpr_free_command(sc, cm);
1590 panic("failed to allocate command %d\n", i);
1600 * Allocate contiguous buffers for PCIe NVMe devices for building native PRPs,
1601 * which are scatter/gather lists for NVMe devices.
1603 * This buffer must be contiguous due to the nature of how NVMe PRPs are built
1604 * and translated by FW.
1606 * returns ENOMEM if memory could not be allocated, otherwise returns 0.
1609 mpr_alloc_nvme_prp_pages(struct mpr_softc *sc)
1611 int PRPs_per_page, PRPs_required, pages_required;
1613 struct mpr_prp_page *prp_page;
1616 * Assuming a MAX_IO_SIZE of 1MB and a PAGE_SIZE of 4k, the max number
1617 * of PRPs (NVMe's Scatter/Gather Element) needed per I/O is:
1618 * MAX_IO_SIZE / PAGE_SIZE = 256
1620 * 1 PRP entry in main frame for PRP list pointer still leaves 255 PRPs
1621 * required for the remainder of the 1MB I/O. 512 PRPs can fit into one
1622 * page (4096 / 8 = 512), so only one page is required for each I/O.
1624 * Each of these buffers will need to be contiguous. For simplicity,
1625 * only one buffer is allocated here, which has all of the space
1626 * required for the NVMe Queue Depth. If there are problems allocating
1627 * this one buffer, this function will need to change to allocate
1628 * individual, contiguous NVME_QDEPTH buffers.
1630 * The real calculation will use the real max io size. Above is just an
1634 PRPs_required = sc->maxio / PAGE_SIZE;
1635 PRPs_per_page = (PAGE_SIZE / PRP_ENTRY_SIZE) - 1;
1636 pages_required = (PRPs_required / PRPs_per_page) + 1;
1638 sc->prp_buffer_size = PAGE_SIZE * pages_required;
1639 rsize = sc->prp_buffer_size * NVME_QDEPTH;
1640 if (bus_dma_tag_create( sc->mpr_parent_dmat, /* parent */
1641 4, 0, /* algnmnt, boundary */
1642 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
1643 BUS_SPACE_MAXADDR, /* highaddr */
1644 NULL, NULL, /* filter, filterarg */
1645 rsize, /* maxsize */
1647 rsize, /* maxsegsize */
1649 NULL, NULL, /* lockfunc, lockarg */
1650 &sc->prp_page_dmat)) {
1651 mpr_dprint(sc, MPR_ERROR, "Cannot allocate NVMe PRP DMA "
1655 if (bus_dmamem_alloc(sc->prp_page_dmat, (void **)&sc->prp_pages,
1656 BUS_DMA_NOWAIT, &sc->prp_page_map)) {
1657 mpr_dprint(sc, MPR_ERROR, "Cannot allocate NVMe PRP memory\n");
1660 bzero(sc->prp_pages, rsize);
1661 bus_dmamap_load(sc->prp_page_dmat, sc->prp_page_map, sc->prp_pages,
1662 rsize, mpr_memaddr_cb, &sc->prp_page_busaddr, 0);
1664 sc->prps = malloc(sizeof(struct mpr_prp_page) * NVME_QDEPTH, M_MPR,
1666 for (i = 0; i < NVME_QDEPTH; i++) {
1667 prp_page = &sc->prps[i];
1668 prp_page->prp_page = (uint64_t *)(sc->prp_pages +
1669 i * sc->prp_buffer_size);
1670 prp_page->prp_page_busaddr = (uint64_t)(sc->prp_page_busaddr +
1671 i * sc->prp_buffer_size);
1672 mpr_free_prp_page(sc, prp_page);
1673 sc->prp_pages_free_lowwater++;
1680 mpr_init_queues(struct mpr_softc *sc)
1684 memset((uint8_t *)sc->post_queue, 0xff, sc->pqdepth * 8);
1687 * According to the spec, we need to use one less reply than we
1688 * have space for on the queue. So sc->num_replies (the number we
1689 * use) should be less than sc->fqdepth (allocated size).
1691 if (sc->num_replies >= sc->fqdepth)
1695 * Initialize all of the free queue entries.
1697 for (i = 0; i < sc->fqdepth; i++) {
1698 sc->free_queue[i] = sc->reply_busaddr + (i * sc->replyframesz);
1700 sc->replyfreeindex = sc->num_replies;
1705 /* Get the driver parameter tunables. Lowest priority are the driver defaults.
1706 * Next are the global settings, if they exist. Highest are the per-unit
1707 * settings, if they exist.
1710 mpr_get_tunables(struct mpr_softc *sc)
1712 char tmpstr[80], mpr_debug[80];
1714 /* XXX default to some debugging for now */
1715 sc->mpr_debug = MPR_INFO | MPR_FAULT;
1716 sc->disable_msix = 0;
1717 sc->disable_msi = 0;
1718 sc->max_msix = MPR_MSIX_MAX;
1719 sc->max_chains = MPR_CHAIN_FRAMES;
1720 sc->max_io_pages = MPR_MAXIO_PAGES;
1721 sc->enable_ssu = MPR_SSU_ENABLE_SSD_DISABLE_HDD;
1722 sc->spinup_wait_time = DEFAULT_SPINUP_WAIT;
1724 sc->max_reqframes = MPR_REQ_FRAMES;
1725 sc->max_prireqframes = MPR_PRI_REQ_FRAMES;
1726 sc->max_replyframes = MPR_REPLY_FRAMES;
1727 sc->max_evtframes = MPR_EVT_REPLY_FRAMES;
1730 * Grab the global variables.
1732 bzero(mpr_debug, 80);
1733 if (TUNABLE_STR_FETCH("hw.mpr.debug_level", mpr_debug, 80) != 0)
1734 mpr_parse_debug(sc, mpr_debug);
1735 TUNABLE_INT_FETCH("hw.mpr.disable_msix", &sc->disable_msix);
1736 TUNABLE_INT_FETCH("hw.mpr.disable_msi", &sc->disable_msi);
1737 TUNABLE_INT_FETCH("hw.mpr.max_msix", &sc->max_msix);
1738 TUNABLE_INT_FETCH("hw.mpr.max_chains", &sc->max_chains);
1739 TUNABLE_INT_FETCH("hw.mpr.max_io_pages", &sc->max_io_pages);
1740 TUNABLE_INT_FETCH("hw.mpr.enable_ssu", &sc->enable_ssu);
1741 TUNABLE_INT_FETCH("hw.mpr.spinup_wait_time", &sc->spinup_wait_time);
1742 TUNABLE_INT_FETCH("hw.mpr.use_phy_num", &sc->use_phynum);
1743 TUNABLE_INT_FETCH("hw.mpr.max_reqframes", &sc->max_reqframes);
1744 TUNABLE_INT_FETCH("hw.mpr.max_prireqframes", &sc->max_prireqframes);
1745 TUNABLE_INT_FETCH("hw.mpr.max_replyframes", &sc->max_replyframes);
1746 TUNABLE_INT_FETCH("hw.mpr.max_evtframes", &sc->max_evtframes);
1748 /* Grab the unit-instance variables */
1749 snprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.debug_level",
1750 device_get_unit(sc->mpr_dev));
1751 bzero(mpr_debug, 80);
1752 if (TUNABLE_STR_FETCH(tmpstr, mpr_debug, 80) != 0)
1753 mpr_parse_debug(sc, mpr_debug);
1755 snprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.disable_msix",
1756 device_get_unit(sc->mpr_dev));
1757 TUNABLE_INT_FETCH(tmpstr, &sc->disable_msix);
1759 snprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.disable_msi",
1760 device_get_unit(sc->mpr_dev));
1761 TUNABLE_INT_FETCH(tmpstr, &sc->disable_msi);
1763 snprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.max_msix",
1764 device_get_unit(sc->mpr_dev));
1765 TUNABLE_INT_FETCH(tmpstr, &sc->max_msix);
1767 snprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.max_chains",
1768 device_get_unit(sc->mpr_dev));
1769 TUNABLE_INT_FETCH(tmpstr, &sc->max_chains);
1771 snprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.max_io_pages",
1772 device_get_unit(sc->mpr_dev));
1773 TUNABLE_INT_FETCH(tmpstr, &sc->max_io_pages);
1775 bzero(sc->exclude_ids, sizeof(sc->exclude_ids));
1776 snprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.exclude_ids",
1777 device_get_unit(sc->mpr_dev));
1778 TUNABLE_STR_FETCH(tmpstr, sc->exclude_ids, sizeof(sc->exclude_ids));
1780 snprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.enable_ssu",
1781 device_get_unit(sc->mpr_dev));
1782 TUNABLE_INT_FETCH(tmpstr, &sc->enable_ssu);
1784 snprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.spinup_wait_time",
1785 device_get_unit(sc->mpr_dev));
1786 TUNABLE_INT_FETCH(tmpstr, &sc->spinup_wait_time);
1788 snprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.use_phy_num",
1789 device_get_unit(sc->mpr_dev));
1790 TUNABLE_INT_FETCH(tmpstr, &sc->use_phynum);
1792 snprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.max_reqframes",
1793 device_get_unit(sc->mpr_dev));
1794 TUNABLE_INT_FETCH(tmpstr, &sc->max_reqframes);
1796 snprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.max_prireqframes",
1797 device_get_unit(sc->mpr_dev));
1798 TUNABLE_INT_FETCH(tmpstr, &sc->max_prireqframes);
1800 snprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.max_replyframes",
1801 device_get_unit(sc->mpr_dev));
1802 TUNABLE_INT_FETCH(tmpstr, &sc->max_replyframes);
1804 snprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.max_evtframes",
1805 device_get_unit(sc->mpr_dev));
1806 TUNABLE_INT_FETCH(tmpstr, &sc->max_evtframes);
1810 mpr_setup_sysctl(struct mpr_softc *sc)
1812 struct sysctl_ctx_list *sysctl_ctx = NULL;
1813 struct sysctl_oid *sysctl_tree = NULL;
1814 char tmpstr[80], tmpstr2[80];
1817 * Setup the sysctl variable so the user can change the debug level
1820 snprintf(tmpstr, sizeof(tmpstr), "MPR controller %d",
1821 device_get_unit(sc->mpr_dev));
1822 snprintf(tmpstr2, sizeof(tmpstr2), "%d", device_get_unit(sc->mpr_dev));
1824 sysctl_ctx = device_get_sysctl_ctx(sc->mpr_dev);
1825 if (sysctl_ctx != NULL)
1826 sysctl_tree = device_get_sysctl_tree(sc->mpr_dev);
1828 if (sysctl_tree == NULL) {
1829 sysctl_ctx_init(&sc->sysctl_ctx);
1830 sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx,
1831 SYSCTL_STATIC_CHILDREN(_hw_mpr), OID_AUTO, tmpstr2,
1832 CTLFLAG_RD, 0, tmpstr);
1833 if (sc->sysctl_tree == NULL)
1835 sysctl_ctx = &sc->sysctl_ctx;
1836 sysctl_tree = sc->sysctl_tree;
1839 SYSCTL_ADD_PROC(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
1840 OID_AUTO, "debug_level", CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE,
1841 sc, 0, mpr_debug_sysctl, "A", "mpr debug level");
1843 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
1844 OID_AUTO, "disable_msix", CTLFLAG_RD, &sc->disable_msix, 0,
1845 "Disable the use of MSI-X interrupts");
1847 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
1848 OID_AUTO, "max_msix", CTLFLAG_RD, &sc->max_msix, 0,
1849 "User-defined maximum number of MSIX queues");
1851 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
1852 OID_AUTO, "msix_msgs", CTLFLAG_RD, &sc->msi_msgs, 0,
1853 "Negotiated number of MSIX queues");
1855 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
1856 OID_AUTO, "max_reqframes", CTLFLAG_RD, &sc->max_reqframes, 0,
1857 "Total number of allocated request frames");
1859 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
1860 OID_AUTO, "max_prireqframes", CTLFLAG_RD, &sc->max_prireqframes, 0,
1861 "Total number of allocated high priority request frames");
1863 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
1864 OID_AUTO, "max_replyframes", CTLFLAG_RD, &sc->max_replyframes, 0,
1865 "Total number of allocated reply frames");
1867 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
1868 OID_AUTO, "max_evtframes", CTLFLAG_RD, &sc->max_evtframes, 0,
1869 "Total number of event frames allocated");
1871 SYSCTL_ADD_STRING(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
1872 OID_AUTO, "firmware_version", CTLFLAG_RW, sc->fw_version,
1873 strlen(sc->fw_version), "firmware version");
1875 SYSCTL_ADD_STRING(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
1876 OID_AUTO, "driver_version", CTLFLAG_RW, MPR_DRIVER_VERSION,
1877 strlen(MPR_DRIVER_VERSION), "driver version");
1879 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
1880 OID_AUTO, "io_cmds_active", CTLFLAG_RD,
1881 &sc->io_cmds_active, 0, "number of currently active commands");
1883 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
1884 OID_AUTO, "io_cmds_highwater", CTLFLAG_RD,
1885 &sc->io_cmds_highwater, 0, "maximum active commands seen");
1887 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
1888 OID_AUTO, "chain_free", CTLFLAG_RD,
1889 &sc->chain_free, 0, "number of free chain elements");
1891 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
1892 OID_AUTO, "chain_free_lowwater", CTLFLAG_RD,
1893 &sc->chain_free_lowwater, 0,"lowest number of free chain elements");
1895 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
1896 OID_AUTO, "max_chains", CTLFLAG_RD,
1897 &sc->max_chains, 0,"maximum chain frames that will be allocated");
1899 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
1900 OID_AUTO, "max_io_pages", CTLFLAG_RD,
1901 &sc->max_io_pages, 0,"maximum pages to allow per I/O (if <1 use "
1904 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
1905 OID_AUTO, "enable_ssu", CTLFLAG_RW, &sc->enable_ssu, 0,
1906 "enable SSU to SATA SSD/HDD at shutdown");
1908 SYSCTL_ADD_UQUAD(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
1909 OID_AUTO, "chain_alloc_fail", CTLFLAG_RD,
1910 &sc->chain_alloc_fail, "chain allocation failures");
1912 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
1913 OID_AUTO, "spinup_wait_time", CTLFLAG_RD,
1914 &sc->spinup_wait_time, DEFAULT_SPINUP_WAIT, "seconds to wait for "
1915 "spinup after SATA ID error");
1917 SYSCTL_ADD_PROC(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
1918 OID_AUTO, "dump_reqs", CTLTYPE_OPAQUE | CTLFLAG_RD | CTLFLAG_SKIP, sc, 0,
1919 mpr_dump_reqs, "I", "Dump Active Requests");
1921 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
1922 OID_AUTO, "use_phy_num", CTLFLAG_RD, &sc->use_phynum, 0,
1923 "Use the phy number for enumeration");
1925 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
1926 OID_AUTO, "prp_pages_free", CTLFLAG_RD,
1927 &sc->prp_pages_free, 0, "number of free PRP pages");
1929 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
1930 OID_AUTO, "prp_pages_free_lowwater", CTLFLAG_RD,
1931 &sc->prp_pages_free_lowwater, 0,"lowest number of free PRP pages");
1933 SYSCTL_ADD_UQUAD(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
1934 OID_AUTO, "prp_page_alloc_fail", CTLFLAG_RD,
1935 &sc->prp_page_alloc_fail, "PRP page allocation failures");
1938 static struct mpr_debug_string {
1941 } mpr_debug_strings[] = {
1943 {"fault", MPR_FAULT},
1944 {"event", MPR_EVENT},
1946 {"recovery", MPR_RECOVERY},
1947 {"error", MPR_ERROR},
1949 {"xinfo", MPR_XINFO},
1951 {"mapping", MPR_MAPPING},
1952 {"trace", MPR_TRACE}
1955 enum mpr_debug_level_combiner {
1962 mpr_debug_sysctl(SYSCTL_HANDLER_ARGS)
1964 struct mpr_softc *sc;
1965 struct mpr_debug_string *string;
1969 int i, len, debug, error;
1971 sc = (struct mpr_softc *)arg1;
1973 error = sysctl_wire_old_buffer(req, 0);
1977 sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
1978 debug = sc->mpr_debug;
1980 sbuf_printf(sbuf, "%#x", debug);
1982 sz = sizeof(mpr_debug_strings) / sizeof(mpr_debug_strings[0]);
1983 for (i = 0; i < sz; i++) {
1984 string = &mpr_debug_strings[i];
1985 if (debug & string->flag)
1986 sbuf_printf(sbuf, ",%s", string->name);
1989 error = sbuf_finish(sbuf);
1992 if (error || req->newptr == NULL)
1995 len = req->newlen - req->newidx;
1999 buffer = malloc(len, M_MPR, M_ZERO|M_WAITOK);
2000 error = SYSCTL_IN(req, buffer, len);
2002 mpr_parse_debug(sc, buffer);
2004 free(buffer, M_MPR);
2009 mpr_parse_debug(struct mpr_softc *sc, char *list)
2011 struct mpr_debug_string *string;
2012 enum mpr_debug_level_combiner op;
2013 char *token, *endtoken;
2017 if (list == NULL || *list == '\0')
2023 } else if (*list == '-') {
2032 sz = sizeof(mpr_debug_strings) / sizeof(mpr_debug_strings[0]);
2033 while ((token = strsep(&list, ":,")) != NULL) {
2035 /* Handle integer flags */
2036 flags |= strtol(token, &endtoken, 0);
2037 if (token != endtoken)
2040 /* Handle text flags */
2041 for (i = 0; i < sz; i++) {
2042 string = &mpr_debug_strings[i];
2043 if (strcasecmp(token, string->name) == 0) {
2044 flags |= string->flag;
2052 sc->mpr_debug = flags;
2055 sc->mpr_debug |= flags;
2058 sc->mpr_debug &= (~flags);
2064 struct mpr_dumpreq_hdr {
2073 mpr_dump_reqs(SYSCTL_HANDLER_ARGS)
2075 struct mpr_softc *sc;
2076 struct mpr_chain *chain, *chain1;
2077 struct mpr_command *cm;
2078 struct mpr_dumpreq_hdr hdr;
2080 uint32_t smid, state;
2081 int i, numreqs, error = 0;
2083 sc = (struct mpr_softc *)arg1;
2085 if ((error = priv_check(curthread, PRIV_DRIVER)) != 0) {
2086 printf("priv check error %d\n", error);
2090 state = MPR_CM_STATE_INQUEUE;
2092 numreqs = sc->num_reqs;
2094 if (req->newptr != NULL)
2097 if (smid == 0 || smid > sc->num_reqs)
2099 if (numreqs <= 0 || (numreqs + smid > sc->num_reqs))
2100 numreqs = sc->num_reqs;
2101 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
2103 /* Best effort, no locking */
2104 for (i = smid; i < numreqs; i++) {
2105 cm = &sc->commands[i];
2106 if (cm->cm_state != state)
2109 hdr.state = cm->cm_state;
2111 hdr.deschi = cm->cm_desc.Words.High;
2112 hdr.desclo = cm->cm_desc.Words.Low;
2113 TAILQ_FOREACH_SAFE(chain, &cm->cm_chain_list, chain_link,
2116 sbuf_bcat(sb, &hdr, sizeof(hdr));
2117 sbuf_bcat(sb, cm->cm_req, 128);
2118 TAILQ_FOREACH_SAFE(chain, &cm->cm_chain_list, chain_link,
2120 sbuf_bcat(sb, chain->chain, 128);
2123 error = sbuf_finish(sb);
2129 mpr_attach(struct mpr_softc *sc)
2134 mpr_dprint(sc, MPR_INIT, "%s entered\n", __func__);
2136 mtx_init(&sc->mpr_mtx, "MPR lock", NULL, MTX_DEF);
2137 callout_init_mtx(&sc->periodic, &sc->mpr_mtx, 0);
2138 callout_init_mtx(&sc->device_check_callout, &sc->mpr_mtx, 0);
2139 TAILQ_INIT(&sc->event_list);
2140 timevalclear(&sc->lastfail);
2142 if ((error = mpr_transition_ready(sc)) != 0) {
2143 mpr_dprint(sc, MPR_INIT|MPR_FAULT,
2144 "Failed to transition ready\n");
2148 sc->facts = malloc(sizeof(MPI2_IOC_FACTS_REPLY), M_MPR,
2151 mpr_dprint(sc, MPR_INIT|MPR_FAULT,
2152 "Cannot allocate memory, exit\n");
2157 * Get IOC Facts and allocate all structures based on this information.
2158 * A Diag Reset will also call mpr_iocfacts_allocate and re-read the IOC
2159 * Facts. If relevant values have changed in IOC Facts, this function
2160 * will free all of the memory based on IOC Facts and reallocate that
2161 * memory. If this fails, any allocated memory should already be freed.
2163 if ((error = mpr_iocfacts_allocate(sc, TRUE)) != 0) {
2164 mpr_dprint(sc, MPR_INIT|MPR_FAULT, "IOC Facts allocation "
2165 "failed with error %d\n", error);
2169 /* Start the periodic watchdog check on the IOC Doorbell */
2173 * The portenable will kick off discovery events that will drive the
2174 * rest of the initialization process. The CAM/SAS module will
2175 * hold up the boot sequence until discovery is complete.
2177 sc->mpr_ich.ich_func = mpr_startup;
2178 sc->mpr_ich.ich_arg = sc;
2179 if (config_intrhook_establish(&sc->mpr_ich) != 0) {
2180 mpr_dprint(sc, MPR_INIT|MPR_ERROR,
2181 "Cannot establish MPR config hook\n");
2186 * Allow IR to shutdown gracefully when shutdown occurs.
2188 sc->shutdown_eh = EVENTHANDLER_REGISTER(shutdown_final,
2189 mprsas_ir_shutdown, sc, SHUTDOWN_PRI_DEFAULT);
2191 if (sc->shutdown_eh == NULL)
2192 mpr_dprint(sc, MPR_INIT|MPR_ERROR,
2193 "shutdown event registration failed\n");
2195 mpr_setup_sysctl(sc);
2197 sc->mpr_flags |= MPR_FLAGS_ATTACH_DONE;
2198 mpr_dprint(sc, MPR_INIT, "%s exit error= %d\n", __func__, error);
2203 /* Run through any late-start handlers. */
2205 mpr_startup(void *arg)
2207 struct mpr_softc *sc;
2209 sc = (struct mpr_softc *)arg;
2210 mpr_dprint(sc, MPR_INIT, "%s entered\n", __func__);
2213 mpr_unmask_intr(sc);
2215 /* initialize device mapping tables */
2216 mpr_base_static_config_pages(sc);
2217 mpr_mapping_initialize(sc);
2221 mpr_dprint(sc, MPR_INIT, "disestablish config intrhook\n");
2222 config_intrhook_disestablish(&sc->mpr_ich);
2223 sc->mpr_ich.ich_arg = NULL;
2225 mpr_dprint(sc, MPR_INIT, "%s exit\n", __func__);
2228 /* Periodic watchdog. Is called with the driver lock already held. */
2230 mpr_periodic(void *arg)
2232 struct mpr_softc *sc;
2235 sc = (struct mpr_softc *)arg;
2236 if (sc->mpr_flags & MPR_FLAGS_SHUTDOWN)
2239 db = mpr_regread(sc, MPI2_DOORBELL_OFFSET);
2240 if ((db & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
2241 if ((db & MPI2_DOORBELL_FAULT_CODE_MASK) ==
2242 IFAULT_IOP_OVER_TEMP_THRESHOLD_EXCEEDED) {
2243 panic("TEMPERATURE FAULT: STOPPING.");
2245 mpr_dprint(sc, MPR_FAULT, "IOC Fault 0x%08x, Resetting\n", db);
2249 callout_reset(&sc->periodic, MPR_PERIODIC_DELAY * hz, mpr_periodic, sc);
2253 mpr_log_evt_handler(struct mpr_softc *sc, uintptr_t data,
2254 MPI2_EVENT_NOTIFICATION_REPLY *event)
2256 MPI2_EVENT_DATA_LOG_ENTRY_ADDED *entry;
2258 MPR_DPRINT_EVENT(sc, generic, event);
2260 switch (event->Event) {
2261 case MPI2_EVENT_LOG_DATA:
2262 mpr_dprint(sc, MPR_EVENT, "MPI2_EVENT_LOG_DATA:\n");
2263 if (sc->mpr_debug & MPR_EVENT)
2264 hexdump(event->EventData, event->EventDataLength, NULL,
2267 case MPI2_EVENT_LOG_ENTRY_ADDED:
2268 entry = (MPI2_EVENT_DATA_LOG_ENTRY_ADDED *)event->EventData;
2269 mpr_dprint(sc, MPR_EVENT, "MPI2_EVENT_LOG_ENTRY_ADDED event "
2270 "0x%x Sequence %d:\n", entry->LogEntryQualifier,
2271 entry->LogSequence);
2280 mpr_attach_log(struct mpr_softc *sc)
2285 setbit(events, MPI2_EVENT_LOG_DATA);
2286 setbit(events, MPI2_EVENT_LOG_ENTRY_ADDED);
2288 mpr_register_events(sc, events, mpr_log_evt_handler, NULL,
2295 mpr_detach_log(struct mpr_softc *sc)
2298 if (sc->mpr_log_eh != NULL)
2299 mpr_deregister_events(sc, sc->mpr_log_eh);
2304 * Free all of the driver resources and detach submodules. Should be called
2305 * without the lock held.
2308 mpr_free(struct mpr_softc *sc)
2312 mpr_dprint(sc, MPR_INIT, "%s entered\n", __func__);
2313 /* Turn off the watchdog */
2315 sc->mpr_flags |= MPR_FLAGS_SHUTDOWN;
2317 /* Lock must not be held for this */
2318 callout_drain(&sc->periodic);
2319 callout_drain(&sc->device_check_callout);
2321 if (((error = mpr_detach_log(sc)) != 0) ||
2322 ((error = mpr_detach_sas(sc)) != 0)) {
2323 mpr_dprint(sc, MPR_INIT|MPR_FAULT, "failed to detach "
2324 "subsystems, error= %d, exit\n", error);
2328 mpr_detach_user(sc);
2330 /* Put the IOC back in the READY state. */
2332 if ((error = mpr_transition_ready(sc)) != 0) {
2338 if (sc->facts != NULL)
2339 free(sc->facts, M_MPR);
2342 * Free all buffers that are based on IOC Facts. A Diag Reset may need
2343 * to free these buffers too.
2345 mpr_iocfacts_free(sc);
2347 if (sc->sysctl_tree != NULL)
2348 sysctl_ctx_free(&sc->sysctl_ctx);
2350 /* Deregister the shutdown function */
2351 if (sc->shutdown_eh != NULL)
2352 EVENTHANDLER_DEREGISTER(shutdown_final, sc->shutdown_eh);
2354 mtx_destroy(&sc->mpr_mtx);
2355 mpr_dprint(sc, MPR_INIT, "%s exit\n", __func__);
2360 static __inline void
2361 mpr_complete_command(struct mpr_softc *sc, struct mpr_command *cm)
2366 mpr_dprint(sc, MPR_ERROR, "Completing NULL command\n");
2370 cm->cm_state = MPR_CM_STATE_BUSY;
2371 if (cm->cm_flags & MPR_CM_FLAGS_POLLED)
2372 cm->cm_flags |= MPR_CM_FLAGS_COMPLETE;
2374 if (cm->cm_complete != NULL) {
2375 mpr_dprint(sc, MPR_TRACE,
2376 "%s cm %p calling cm_complete %p data %p reply %p\n",
2377 __func__, cm, cm->cm_complete, cm->cm_complete_data,
2379 cm->cm_complete(sc, cm);
2382 if (cm->cm_flags & MPR_CM_FLAGS_WAKEUP) {
2383 mpr_dprint(sc, MPR_TRACE, "waking up %p\n", cm);
2387 if (sc->io_cmds_active != 0) {
2388 sc->io_cmds_active--;
2390 mpr_dprint(sc, MPR_ERROR, "Warning: io_cmds_active is "
2391 "out of sync - resynching to 0\n");
2396 mpr_sas_log_info(struct mpr_softc *sc , u32 log_info)
2398 union loginfo_type {
2407 union loginfo_type sas_loginfo;
2408 char *originator_str = NULL;
2410 sas_loginfo.loginfo = log_info;
2411 if (sas_loginfo.dw.bus_type != 3 /*SAS*/)
2414 /* each nexus loss loginfo */
2415 if (log_info == 0x31170000)
2418 /* eat the loginfos associated with task aborts */
2419 if ((log_info == 30050000) || (log_info == 0x31140000) ||
2420 (log_info == 0x31130000))
2423 switch (sas_loginfo.dw.originator) {
2425 originator_str = "IOP";
2428 originator_str = "PL";
2431 originator_str = "IR";
2435 mpr_dprint(sc, MPR_LOG, "log_info(0x%08x): originator(%s), "
2436 "code(0x%02x), sub_code(0x%04x)\n", log_info, originator_str,
2437 sas_loginfo.dw.code, sas_loginfo.dw.subcode);
2441 mpr_display_reply_info(struct mpr_softc *sc, uint8_t *reply)
2443 MPI2DefaultReply_t *mpi_reply;
2446 mpi_reply = (MPI2DefaultReply_t*)reply;
2447 sc_status = le16toh(mpi_reply->IOCStatus);
2448 if (sc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE)
2449 mpr_sas_log_info(sc, le32toh(mpi_reply->IOCLogInfo));
2453 mpr_intr(void *data)
2455 struct mpr_softc *sc;
2458 sc = (struct mpr_softc *)data;
2459 mpr_dprint(sc, MPR_TRACE, "%s\n", __func__);
2462 * Check interrupt status register to flush the bus. This is
2463 * needed for both INTx interrupts and driver-driven polling
2465 status = mpr_regread(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET);
2466 if ((status & MPI2_HIS_REPLY_DESCRIPTOR_INTERRUPT) == 0)
2470 mpr_intr_locked(data);
2476 * In theory, MSI/MSIX interrupts shouldn't need to read any registers on the
2477 * chip. Hopefully this theory is correct.
2480 mpr_intr_msi(void *data)
2482 struct mpr_softc *sc;
2484 sc = (struct mpr_softc *)data;
2485 mpr_dprint(sc, MPR_TRACE, "%s\n", __func__);
2487 mpr_intr_locked(data);
2493 * The locking is overly broad and simplistic, but easy to deal with for now.
2496 mpr_intr_locked(void *data)
2498 MPI2_REPLY_DESCRIPTORS_UNION *desc;
2499 MPI2_DIAG_RELEASE_REPLY *rel_rep;
2500 mpr_fw_diagnostic_buffer_t *pBuffer;
2501 struct mpr_softc *sc;
2503 struct mpr_command *cm = NULL;
2507 sc = (struct mpr_softc *)data;
2509 pq = sc->replypostindex;
2510 mpr_dprint(sc, MPR_TRACE,
2511 "%s sc %p starting with replypostindex %u\n",
2512 __func__, sc, sc->replypostindex);
2516 desc = &sc->post_queue[sc->replypostindex];
2519 * Copy and clear out the descriptor so that any reentry will
2520 * immediately know that this descriptor has already been
2521 * looked at. There is unfortunate casting magic because the
2522 * MPI API doesn't have a cardinal 64bit type.
2524 tdesc = 0xffffffffffffffff;
2525 tdesc = atomic_swap_64((uint64_t *)desc, tdesc);
2526 desc = (MPI2_REPLY_DESCRIPTORS_UNION *)&tdesc;
2528 flags = desc->Default.ReplyFlags &
2529 MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
2530 if ((flags == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) ||
2531 (le32toh(desc->Words.High) == 0xffffffff))
2534 /* increment the replypostindex now, so that event handlers
2535 * and cm completion handlers which decide to do a diag
2536 * reset can zero it without it getting incremented again
2537 * afterwards, and we break out of this loop on the next
2538 * iteration since the reply post queue has been cleared to
2539 * 0xFF and all descriptors look unused (which they are).
2541 if (++sc->replypostindex >= sc->pqdepth)
2542 sc->replypostindex = 0;
2545 case MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS:
2546 case MPI25_RPY_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO_SUCCESS:
2547 case MPI26_RPY_DESCRIPT_FLAGS_PCIE_ENCAPSULATED_SUCCESS:
2548 cm = &sc->commands[le16toh(desc->SCSIIOSuccess.SMID)];
2549 KASSERT(cm->cm_state == MPR_CM_STATE_INQUEUE,
2550 ("command not inqueue\n"));
2551 cm->cm_state = MPR_CM_STATE_BUSY;
2552 cm->cm_reply = NULL;
2554 case MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY:
2560 * Re-compose the reply address from the address
2561 * sent back from the chip. The ReplyFrameAddress
2562 * is the lower 32 bits of the physical address of
2563 * particular reply frame. Convert that address to
2564 * host format, and then use that to provide the
2565 * offset against the virtual address base
2566 * (sc->reply_frames).
2568 baddr = le32toh(desc->AddressReply.ReplyFrameAddress);
2569 reply = sc->reply_frames +
2570 (baddr - ((uint32_t)sc->reply_busaddr));
2572 * Make sure the reply we got back is in a valid
2573 * range. If not, go ahead and panic here, since
2574 * we'll probably panic as soon as we deference the
2575 * reply pointer anyway.
2577 if ((reply < sc->reply_frames)
2578 || (reply > (sc->reply_frames +
2579 (sc->fqdepth * sc->replyframesz)))) {
2580 printf("%s: WARNING: reply %p out of range!\n",
2582 printf("%s: reply_frames %p, fqdepth %d, "
2583 "frame size %d\n", __func__,
2584 sc->reply_frames, sc->fqdepth,
2586 printf("%s: baddr %#x,\n", __func__, baddr);
2587 /* LSI-TODO. See Linux Code for Graceful exit */
2588 panic("Reply address out of range");
2590 if (le16toh(desc->AddressReply.SMID) == 0) {
2591 if (((MPI2_DEFAULT_REPLY *)reply)->Function ==
2592 MPI2_FUNCTION_DIAG_BUFFER_POST) {
2594 * If SMID is 0 for Diag Buffer Post,
2595 * this implies that the reply is due to
2596 * a release function with a status that
2597 * the buffer has been released. Set
2598 * the buffer flags accordingly.
2601 (MPI2_DIAG_RELEASE_REPLY *)reply;
2602 if ((le16toh(rel_rep->IOCStatus) &
2603 MPI2_IOCSTATUS_MASK) ==
2604 MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED)
2607 &sc->fw_diag_buffer_list[
2608 rel_rep->BufferType];
2609 pBuffer->valid_data = TRUE;
2610 pBuffer->owned_by_firmware =
2612 pBuffer->immediate = FALSE;
2615 mpr_dispatch_event(sc, baddr,
2616 (MPI2_EVENT_NOTIFICATION_REPLY *)
2620 le16toh(desc->AddressReply.SMID)];
2621 if (cm->cm_state == MPR_CM_STATE_INQUEUE) {
2622 cm->cm_reply = reply;
2624 le32toh(desc->AddressReply.
2627 mpr_dprint(sc, MPR_RECOVERY,
2628 "Bad state for ADDRESS_REPLY status,"
2629 " ignoring state %d cm %p\n",
2635 case MPI2_RPY_DESCRIPT_FLAGS_TARGETASSIST_SUCCESS:
2636 case MPI2_RPY_DESCRIPT_FLAGS_TARGET_COMMAND_BUFFER:
2637 case MPI2_RPY_DESCRIPT_FLAGS_RAID_ACCELERATOR_SUCCESS:
2640 mpr_dprint(sc, MPR_ERROR, "Unhandled reply 0x%x\n",
2641 desc->Default.ReplyFlags);
2647 // Print Error reply frame
2649 mpr_display_reply_info(sc,cm->cm_reply);
2650 mpr_complete_command(sc, cm);
2654 if (pq != sc->replypostindex) {
2655 mpr_dprint(sc, MPR_TRACE, "%s sc %p writing postindex %d\n",
2656 __func__, sc, sc->replypostindex);
2657 mpr_regwrite(sc, MPI2_REPLY_POST_HOST_INDEX_OFFSET,
2658 sc->replypostindex);
2665 mpr_dispatch_event(struct mpr_softc *sc, uintptr_t data,
2666 MPI2_EVENT_NOTIFICATION_REPLY *reply)
2668 struct mpr_event_handle *eh;
2669 int event, handled = 0;
2671 event = le16toh(reply->Event);
2672 TAILQ_FOREACH(eh, &sc->event_list, eh_list) {
2673 if (isset(eh->mask, event)) {
2674 eh->callback(sc, data, reply);
2680 mpr_dprint(sc, MPR_EVENT, "Unhandled event 0x%x\n",
2684 * This is the only place that the event/reply should be freed.
2685 * Anything wanting to hold onto the event data should have
2686 * already copied it into their own storage.
2688 mpr_free_reply(sc, data);
2692 mpr_reregister_events_complete(struct mpr_softc *sc, struct mpr_command *cm)
2694 mpr_dprint(sc, MPR_TRACE, "%s\n", __func__);
2697 MPR_DPRINT_EVENT(sc, generic,
2698 (MPI2_EVENT_NOTIFICATION_REPLY *)cm->cm_reply);
2700 mpr_free_command(sc, cm);
2702 /* next, send a port enable */
2707 * For both register_events and update_events, the caller supplies a bitmap
2708 * of events that it _wants_. These functions then turn that into a bitmask
2709 * suitable for the controller.
2712 mpr_register_events(struct mpr_softc *sc, uint8_t *mask,
2713 mpr_evt_callback_t *cb, void *data, struct mpr_event_handle **handle)
2715 struct mpr_event_handle *eh;
2718 eh = malloc(sizeof(struct mpr_event_handle), M_MPR, M_WAITOK|M_ZERO);
2720 mpr_dprint(sc, MPR_EVENT|MPR_ERROR,
2721 "Cannot allocate event memory\n");
2726 TAILQ_INSERT_TAIL(&sc->event_list, eh, eh_list);
2728 error = mpr_update_events(sc, eh, mask);
2735 mpr_update_events(struct mpr_softc *sc, struct mpr_event_handle *handle,
2738 MPI2_EVENT_NOTIFICATION_REQUEST *evtreq;
2739 MPI2_EVENT_NOTIFICATION_REPLY *reply = NULL;
2740 struct mpr_command *cm = NULL;
2741 struct mpr_event_handle *eh;
2744 mpr_dprint(sc, MPR_TRACE, "%s\n", __func__);
2746 if ((mask != NULL) && (handle != NULL))
2747 bcopy(mask, &handle->mask[0], 16);
2748 memset(sc->event_mask, 0xff, 16);
2750 TAILQ_FOREACH(eh, &sc->event_list, eh_list) {
2751 for (i = 0; i < 16; i++)
2752 sc->event_mask[i] &= ~eh->mask[i];
2755 if ((cm = mpr_alloc_command(sc)) == NULL)
2757 evtreq = (MPI2_EVENT_NOTIFICATION_REQUEST *)cm->cm_req;
2758 evtreq->Function = MPI2_FUNCTION_EVENT_NOTIFICATION;
2759 evtreq->MsgFlags = 0;
2760 evtreq->SASBroadcastPrimitiveMasks = 0;
2761 #ifdef MPR_DEBUG_ALL_EVENTS
2763 u_char fullmask[16];
2764 memset(fullmask, 0x00, 16);
2765 bcopy(fullmask, (uint8_t *)&evtreq->EventMasks, 16);
2768 bcopy(sc->event_mask, (uint8_t *)&evtreq->EventMasks, 16);
2770 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
2773 error = mpr_request_polled(sc, &cm);
2775 reply = (MPI2_EVENT_NOTIFICATION_REPLY *)cm->cm_reply;
2776 if ((reply == NULL) ||
2777 (reply->IOCStatus & MPI2_IOCSTATUS_MASK) != MPI2_IOCSTATUS_SUCCESS)
2781 MPR_DPRINT_EVENT(sc, generic, reply);
2783 mpr_dprint(sc, MPR_TRACE, "%s finished error %d\n", __func__, error);
2786 mpr_free_command(sc, cm);
2791 mpr_reregister_events(struct mpr_softc *sc)
2793 MPI2_EVENT_NOTIFICATION_REQUEST *evtreq;
2794 struct mpr_command *cm;
2795 struct mpr_event_handle *eh;
2798 mpr_dprint(sc, MPR_TRACE, "%s\n", __func__);
2800 /* first, reregister events */
2802 memset(sc->event_mask, 0xff, 16);
2804 TAILQ_FOREACH(eh, &sc->event_list, eh_list) {
2805 for (i = 0; i < 16; i++)
2806 sc->event_mask[i] &= ~eh->mask[i];
2809 if ((cm = mpr_alloc_command(sc)) == NULL)
2811 evtreq = (MPI2_EVENT_NOTIFICATION_REQUEST *)cm->cm_req;
2812 evtreq->Function = MPI2_FUNCTION_EVENT_NOTIFICATION;
2813 evtreq->MsgFlags = 0;
2814 evtreq->SASBroadcastPrimitiveMasks = 0;
2815 #ifdef MPR_DEBUG_ALL_EVENTS
2817 u_char fullmask[16];
2818 memset(fullmask, 0x00, 16);
2819 bcopy(fullmask, (uint8_t *)&evtreq->EventMasks, 16);
2822 bcopy(sc->event_mask, (uint8_t *)&evtreq->EventMasks, 16);
2824 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
2826 cm->cm_complete = mpr_reregister_events_complete;
2828 error = mpr_map_command(sc, cm);
2830 mpr_dprint(sc, MPR_TRACE, "%s finished with error %d\n", __func__,
2836 mpr_deregister_events(struct mpr_softc *sc, struct mpr_event_handle *handle)
2839 TAILQ_REMOVE(&sc->event_list, handle, eh_list);
2840 free(handle, M_MPR);
2841 return (mpr_update_events(sc, NULL, NULL));
2845 * mpr_build_nvme_prp - This function is called for NVMe end devices to build a
2846 * native SGL (NVMe PRP). The native SGL is built starting in the first PRP entry
2847 * of the NVMe message (PRP1). If the data buffer is small enough to be described
2848 * entirely using PRP1, then PRP2 is not used. If needed, PRP2 is used to
2849 * describe a larger data buffer. If the data buffer is too large to describe
2850 * using the two PRP entriess inside the NVMe message, then PRP1 describes the
2851 * first data memory segment, and PRP2 contains a pointer to a PRP list located
2852 * elsewhere in memory to describe the remaining data memory segments. The PRP
2853 * list will be contiguous.
2855 * The native SGL for NVMe devices is a Physical Region Page (PRP). A PRP
2856 * consists of a list of PRP entries to describe a number of noncontigous
2857 * physical memory segments as a single memory buffer, just as a SGL does. Note
2858 * however, that this function is only used by the IOCTL call, so the memory
2859 * given will be guaranteed to be contiguous. There is no need to translate
2860 * non-contiguous SGL into a PRP in this case. All PRPs will describe contiguous
2861 * space that is one page size each.
2863 * Each NVMe message contains two PRP entries. The first (PRP1) either contains
2864 * a PRP list pointer or a PRP element, depending upon the command. PRP2 contains
2865 * the second PRP element if the memory being described fits within 2 PRP
2866 * entries, or a PRP list pointer if the PRP spans more than two entries.
2868 * A PRP list pointer contains the address of a PRP list, structured as a linear
2869 * array of PRP entries. Each PRP entry in this list describes a segment of
2872 * Each 64-bit PRP entry comprises an address and an offset field. The address
2873 * always points to the beginning of a PAGE_SIZE physical memory page, and the
2874 * offset describes where within that page the memory segment begins. Only the
2875 * first element in a PRP list may contain a non-zero offest, implying that all
2876 * memory segments following the first begin at the start of a PAGE_SIZE page.
2878 * Each PRP element normally describes a chunck of PAGE_SIZE physical memory,
2879 * with exceptions for the first and last elements in the list. If the memory
2880 * being described by the list begins at a non-zero offset within the first page,
2881 * then the first PRP element will contain a non-zero offset indicating where the
2882 * region begins within the page. The last memory segment may end before the end
2883 * of the PAGE_SIZE segment, depending upon the overall size of the memory being
2884 * described by the PRP list.
2886 * Since PRP entries lack any indication of size, the overall data buffer length
2887 * is used to determine where the end of the data memory buffer is located, and
2888 * how many PRP entries are required to describe it.
2893 mpr_build_nvme_prp(struct mpr_softc *sc, struct mpr_command *cm,
2894 Mpi26NVMeEncapsulatedRequest_t *nvme_encap_request, void *data,
2895 uint32_t data_in_sz, uint32_t data_out_sz)
2897 int prp_size = PRP_ENTRY_SIZE;
2898 uint64_t *prp_entry, *prp1_entry, *prp2_entry;
2899 uint64_t *prp_entry_phys, *prp_page, *prp_page_phys;
2900 uint32_t offset, entry_len, page_mask_result, page_mask;
2903 struct mpr_prp_page *prp_page_info = NULL;
2906 * Not all commands require a data transfer. If no data, just return
2907 * without constructing any PRP.
2909 if (!data_in_sz && !data_out_sz)
2913 * Set pointers to PRP1 and PRP2, which are in the NVMe command. PRP1 is
2914 * located at a 24 byte offset from the start of the NVMe command. Then
2915 * set the current PRP entry pointer to PRP1.
2917 prp1_entry = (uint64_t *)(nvme_encap_request->NVMe_Command +
2918 NVME_CMD_PRP1_OFFSET);
2919 prp2_entry = (uint64_t *)(nvme_encap_request->NVMe_Command +
2920 NVME_CMD_PRP2_OFFSET);
2921 prp_entry = prp1_entry;
2924 * For the PRP entries, use the specially allocated buffer of
2925 * contiguous memory. PRP Page allocation failures should not happen
2926 * because there should be enough PRP page buffers to account for the
2927 * possible NVMe QDepth.
2929 prp_page_info = mpr_alloc_prp_page(sc);
2930 KASSERT(prp_page_info != NULL, ("%s: There are no PRP Pages left to be "
2931 "used for building a native NVMe SGL.\n", __func__));
2932 prp_page = (uint64_t *)prp_page_info->prp_page;
2933 prp_page_phys = (uint64_t *)(uintptr_t)prp_page_info->prp_page_busaddr;
2936 * Insert the allocated PRP page into the command's PRP page list. This
2937 * will be freed when the command is freed.
2939 TAILQ_INSERT_TAIL(&cm->cm_prp_page_list, prp_page_info, prp_page_link);
2942 * Check if we are within 1 entry of a page boundary we don't want our
2943 * first entry to be a PRP List entry.
2945 page_mask = PAGE_SIZE - 1;
2946 page_mask_result = (uintptr_t)((uint8_t *)prp_page + prp_size) &
2948 if (!page_mask_result)
2950 /* Bump up to next page boundary. */
2951 prp_page = (uint64_t *)((uint8_t *)prp_page + prp_size);
2952 prp_page_phys = (uint64_t *)((uint8_t *)prp_page_phys +
2957 * Set PRP physical pointer, which initially points to the current PRP
2960 prp_entry_phys = prp_page_phys;
2962 /* Get physical address and length of the data buffer. */
2963 paddr = (bus_addr_t)(uintptr_t)data;
2965 length = data_in_sz;
2967 length = data_out_sz;
2969 /* Loop while the length is not zero. */
2973 * Check if we need to put a list pointer here if we are at page
2974 * boundary - prp_size (8 bytes).
2976 page_mask_result = (uintptr_t)((uint8_t *)prp_entry_phys +
2977 prp_size) & page_mask;
2978 if (!page_mask_result)
2981 * This is the last entry in a PRP List, so we need to
2982 * put a PRP list pointer here. What this does is:
2983 * - bump the current memory pointer to the next
2984 * address, which will be the next full page.
2985 * - set the PRP Entry to point to that page. This is
2986 * now the PRP List pointer.
2987 * - bump the PRP Entry pointer the start of the next
2988 * page. Since all of this PRP memory is contiguous,
2989 * no need to get a new page - it's just the next
2994 htole64((uint64_t)(uintptr_t)prp_entry_phys);
2998 /* Need to handle if entry will be part of a page. */
2999 offset = (uint32_t)paddr & page_mask;
3000 entry_len = PAGE_SIZE - offset;
3002 if (prp_entry == prp1_entry)
3005 * Must fill in the first PRP pointer (PRP1) before
3008 *prp1_entry = htole64((uint64_t)paddr);
3011 * Now point to the second PRP entry within the
3014 prp_entry = prp2_entry;
3016 else if (prp_entry == prp2_entry)
3019 * Should the PRP2 entry be a PRP List pointer or just a
3020 * regular PRP pointer? If there is more than one more
3021 * page of data, must use a PRP List pointer.
3023 if (length > PAGE_SIZE)
3026 * PRP2 will contain a PRP List pointer because
3027 * more PRP's are needed with this command. The
3028 * list will start at the beginning of the
3029 * contiguous buffer.
3033 (uint64_t)(uintptr_t)prp_entry_phys);
3036 * The next PRP Entry will be the start of the
3039 prp_entry = prp_page;
3044 * After this, the PRP Entries are complete.
3045 * This command uses 2 PRP's and no PRP list.
3047 *prp2_entry = htole64((uint64_t)paddr);
3053 * Put entry in list and bump the addresses.
3055 * After PRP1 and PRP2 are filled in, this will fill in
3056 * all remaining PRP entries in a PRP List, one per each
3057 * time through the loop.
3059 *prp_entry = htole64((uint64_t)paddr);
3065 * Bump the phys address of the command's data buffer by the
3070 /* Decrement length accounting for last partial page. */
3071 if (entry_len > length)
3074 length -= entry_len;
3079 * mpr_check_pcie_native_sgl - This function is called for PCIe end devices to
3080 * determine if the driver needs to build a native SGL. If so, that native SGL
3081 * is built in the contiguous buffers allocated especially for PCIe SGL
3082 * creation. If the driver will not build a native SGL, return TRUE and a
3083 * normal IEEE SGL will be built. Currently this routine supports NVMe devices
3086 * Returns FALSE (0) if native SGL was built, TRUE (1) if no SGL was built.
3089 mpr_check_pcie_native_sgl(struct mpr_softc *sc, struct mpr_command *cm,
3090 bus_dma_segment_t *segs, int segs_left)
3092 uint32_t i, sge_dwords, length, offset, entry_len;
3093 uint32_t num_entries, buff_len = 0, sges_in_segment;
3094 uint32_t page_mask, page_mask_result, *curr_buff;
3095 uint32_t *ptr_sgl, *ptr_first_sgl, first_page_offset;
3096 uint32_t first_page_data_size, end_residual;
3099 int build_native_sgl = 0, first_prp_entry;
3100 int prp_size = PRP_ENTRY_SIZE;
3101 Mpi25IeeeSgeChain64_t *main_chain_element = NULL;
3102 struct mpr_prp_page *prp_page_info = NULL;
3104 mpr_dprint(sc, MPR_TRACE, "%s\n", __func__);
3107 * Add up the sizes of each segment length to get the total transfer
3108 * size, which will be checked against the Maximum Data Transfer Size.
3109 * If the data transfer length exceeds the MDTS for this device, just
3110 * return 1 so a normal IEEE SGL will be built. F/W will break the I/O
3111 * up into multiple I/O's. [nvme_mdts = 0 means unlimited]
3113 for (i = 0; i < segs_left; i++)
3114 buff_len += htole32(segs[i].ds_len);
3115 if ((cm->cm_targ->MDTS > 0) && (buff_len > cm->cm_targ->MDTS))
3118 /* Create page_mask (to get offset within page) */
3119 page_mask = PAGE_SIZE - 1;
3122 * Check if the number of elements exceeds the max number that can be
3123 * put in the main message frame (H/W can only translate an SGL that
3124 * is contained entirely in the main message frame).
3126 sges_in_segment = (sc->reqframesz -
3127 offsetof(Mpi25SCSIIORequest_t, SGL)) / sizeof(MPI25_SGE_IO_UNION);
3128 if (segs_left > sges_in_segment)
3129 build_native_sgl = 1;
3133 * NVMe uses one PRP for each physical page (or part of physical
3135 * if 4 pages or less then IEEE is OK
3136 * if > 5 pages then we need to build a native SGL
3137 * if > 4 and <= 5 pages, then check the physical address of
3138 * the first SG entry, then if this first size in the page
3139 * is >= the residual beyond 4 pages then use IEEE,
3140 * otherwise use native SGL
3142 if (buff_len > (PAGE_SIZE * 5))
3143 build_native_sgl = 1;
3144 else if ((buff_len > (PAGE_SIZE * 4)) &&
3145 (buff_len <= (PAGE_SIZE * 5)) )
3147 msg_phys = (uint64_t *)(uintptr_t)segs[0].ds_addr;
3149 ((uint32_t)(uint64_t)(uintptr_t)msg_phys &
3151 first_page_data_size = PAGE_SIZE - first_page_offset;
3152 end_residual = buff_len % PAGE_SIZE;
3155 * If offset into first page pushes the end of the data
3156 * beyond end of the 5th page, we need the extra PRP
3159 if (first_page_data_size < end_residual)
3160 build_native_sgl = 1;
3163 * Check if first SG entry size is < residual beyond 4
3166 if (htole32(segs[0].ds_len) <
3167 (buff_len - (PAGE_SIZE * 4)))
3168 build_native_sgl = 1;
3172 /* check if native SGL is needed */
3173 if (!build_native_sgl)
3177 * Native SGL is needed.
3178 * Put a chain element in main message frame that points to the first
3181 * NOTE: The ChainOffset field must be 0 when using a chain pointer to
3185 /* Set main message chain element pointer */
3186 main_chain_element = (pMpi25IeeeSgeChain64_t)cm->cm_sge;
3189 * For NVMe the chain element needs to be the 2nd SGL entry in the main
3192 main_chain_element = (Mpi25IeeeSgeChain64_t *)
3193 ((uint8_t *)main_chain_element + sizeof(MPI25_IEEE_SGE_CHAIN64));
3196 * For the PRP entries, use the specially allocated buffer of
3197 * contiguous memory. PRP Page allocation failures should not happen
3198 * because there should be enough PRP page buffers to account for the
3199 * possible NVMe QDepth.
3201 prp_page_info = mpr_alloc_prp_page(sc);
3202 KASSERT(prp_page_info != NULL, ("%s: There are no PRP Pages left to be "
3203 "used for building a native NVMe SGL.\n", __func__));
3204 curr_buff = (uint32_t *)prp_page_info->prp_page;
3205 msg_phys = (uint64_t *)(uintptr_t)prp_page_info->prp_page_busaddr;
3208 * Insert the allocated PRP page into the command's PRP page list. This
3209 * will be freed when the command is freed.
3211 TAILQ_INSERT_TAIL(&cm->cm_prp_page_list, prp_page_info, prp_page_link);
3214 * Check if we are within 1 entry of a page boundary we don't want our
3215 * first entry to be a PRP List entry.
3217 page_mask_result = (uintptr_t)((uint8_t *)curr_buff + prp_size) &
3219 if (!page_mask_result) {
3220 /* Bump up to next page boundary. */
3221 curr_buff = (uint32_t *)((uint8_t *)curr_buff + prp_size);
3222 msg_phys = (uint64_t *)((uint8_t *)msg_phys + prp_size);
3225 /* Fill in the chain element and make it an NVMe segment type. */
3226 main_chain_element->Address.High =
3227 htole32((uint32_t)((uint64_t)(uintptr_t)msg_phys >> 32));
3228 main_chain_element->Address.Low =
3229 htole32((uint32_t)(uintptr_t)msg_phys);
3230 main_chain_element->NextChainOffset = 0;
3231 main_chain_element->Flags = MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT |
3232 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR |
3233 MPI26_IEEE_SGE_FLAGS_NSF_NVME_PRP;
3235 /* Set SGL pointer to start of contiguous PCIe buffer. */
3236 ptr_sgl = curr_buff;
3241 * NVMe has a very convoluted PRP format. One PRP is required for each
3242 * page or partial page. We need to split up OS SG entries if they are
3243 * longer than one page or cross a page boundary. We also have to insert
3244 * a PRP list pointer entry as the last entry in each physical page of
3247 * NOTE: The first PRP "entry" is actually placed in the first SGL entry
3248 * in the main message in IEEE 64 format. The 2nd entry in the main
3249 * message is the chain element, and the rest of the PRP entries are
3250 * built in the contiguous PCIe buffer.
3252 first_prp_entry = 1;
3253 ptr_first_sgl = (uint32_t *)cm->cm_sge;
3255 for (i = 0; i < segs_left; i++) {
3256 /* Get physical address and length of this SG entry. */
3257 paddr = segs[i].ds_addr;
3258 length = segs[i].ds_len;
3261 * Check whether a given SGE buffer lies on a non-PAGED
3262 * boundary if this is not the first page. If so, this is not
3263 * expected so have FW build the SGL.
3265 if ((i != 0) && (((uint32_t)paddr & page_mask) != 0)) {
3266 mpr_dprint(sc, MPR_ERROR, "Unaligned SGE while "
3267 "building NVMe PRPs, low address is 0x%x\n",
3272 /* Apart from last SGE, if any other SGE boundary is not page
3273 * aligned then it means that hole exists. Existence of hole
3274 * leads to data corruption. So fallback to IEEE SGEs.
3276 if (i != (segs_left - 1)) {
3277 if (((uint32_t)paddr + length) & page_mask) {
3278 mpr_dprint(sc, MPR_ERROR, "Unaligned SGE "
3279 "boundary while building NVMe PRPs, low "
3280 "address: 0x%x and length: %u\n",
3281 (uint32_t)paddr, length);
3286 /* Loop while the length is not zero. */
3289 * Check if we need to put a list pointer here if we are
3290 * at page boundary - prp_size.
3292 page_mask_result = (uintptr_t)((uint8_t *)ptr_sgl +
3293 prp_size) & page_mask;
3294 if (!page_mask_result) {
3296 * Need to put a PRP list pointer here.
3298 msg_phys = (uint64_t *)((uint8_t *)msg_phys +
3300 *ptr_sgl = htole32((uintptr_t)msg_phys);
3301 *(ptr_sgl+1) = htole32((uint64_t)(uintptr_t)
3303 ptr_sgl += sge_dwords;
3307 /* Need to handle if entry will be part of a page. */
3308 offset = (uint32_t)paddr & page_mask;
3309 entry_len = PAGE_SIZE - offset;
3310 if (first_prp_entry) {
3312 * Put IEEE entry in first SGE in main message.
3313 * (Simple element, System addr, not end of
3316 *ptr_first_sgl = htole32((uint32_t)paddr);
3317 *(ptr_first_sgl + 1) =
3318 htole32((uint32_t)((uint64_t)paddr >> 32));
3319 *(ptr_first_sgl + 2) = htole32(entry_len);
3320 *(ptr_first_sgl + 3) = 0;
3322 /* No longer the first PRP entry. */
3323 first_prp_entry = 0;
3325 /* Put entry in list. */
3326 *ptr_sgl = htole32((uint32_t)paddr);
3328 htole32((uint32_t)((uint64_t)paddr >> 32));
3330 /* Bump ptr_sgl, msg_phys, and num_entries. */
3331 ptr_sgl += sge_dwords;
3332 msg_phys = (uint64_t *)((uint8_t *)msg_phys +
3337 /* Bump the phys address by the entry_len. */
3340 /* Decrement length accounting for last partial page. */
3341 if (entry_len > length)
3344 length -= entry_len;
3348 /* Set chain element Length. */
3349 main_chain_element->Length = htole32(num_entries * prp_size);
3351 /* Return 0, indicating we built a native SGL. */
3356 * Add a chain element as the next SGE for the specified command.
3357 * Reset cm_sge and cm_sgesize to indicate all the available space. Chains are
3358 * only required for IEEE commands. Therefore there is no code for commands
3359 * that have the MPR_CM_FLAGS_SGE_SIMPLE flag set (and those commands
3360 * shouldn't be requesting chains).
3363 mpr_add_chain(struct mpr_command *cm, int segsleft)
3365 struct mpr_softc *sc = cm->cm_sc;
3366 MPI2_REQUEST_HEADER *req;
3367 MPI25_IEEE_SGE_CHAIN64 *ieee_sgc;
3368 struct mpr_chain *chain;
3369 int sgc_size, current_segs, rem_segs, segs_per_frame;
3370 uint8_t next_chain_offset = 0;
3373 * Fail if a command is requesting a chain for SIMPLE SGE's. For SAS3
3374 * only IEEE commands should be requesting chains. Return some error
3375 * code other than 0.
3377 if (cm->cm_flags & MPR_CM_FLAGS_SGE_SIMPLE) {
3378 mpr_dprint(sc, MPR_ERROR, "A chain element cannot be added to "
3383 sgc_size = sizeof(MPI25_IEEE_SGE_CHAIN64);
3384 if (cm->cm_sglsize < sgc_size)
3385 panic("MPR: Need SGE Error Code\n");
3387 chain = mpr_alloc_chain(cm->cm_sc);
3392 * Note: a double-linked list is used to make it easier to walk for
3395 TAILQ_INSERT_TAIL(&cm->cm_chain_list, chain, chain_link);
3398 * Need to know if the number of frames left is more than 1 or not. If
3399 * more than 1 frame is required, NextChainOffset will need to be set,
3400 * which will just be the last segment of the frame.
3403 if (cm->cm_sglsize < (sgc_size * segsleft)) {
3405 * rem_segs is the number of segements remaining after the
3406 * segments that will go into the current frame. Since it is
3407 * known that at least one more frame is required, account for
3408 * the chain element. To know if more than one more frame is
3409 * required, just check if there will be a remainder after using
3410 * the current frame (with this chain) and the next frame. If
3411 * so the NextChainOffset must be the last element of the next
3414 current_segs = (cm->cm_sglsize / sgc_size) - 1;
3415 rem_segs = segsleft - current_segs;
3416 segs_per_frame = sc->chain_frame_size / sgc_size;
3417 if (rem_segs > segs_per_frame) {
3418 next_chain_offset = segs_per_frame - 1;
3421 ieee_sgc = &((MPI25_SGE_IO_UNION *)cm->cm_sge)->IeeeChain;
3422 ieee_sgc->Length = next_chain_offset ?
3423 htole32((uint32_t)sc->chain_frame_size) :
3424 htole32((uint32_t)rem_segs * (uint32_t)sgc_size);
3425 ieee_sgc->NextChainOffset = next_chain_offset;
3426 ieee_sgc->Flags = (MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT |
3427 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR);
3428 ieee_sgc->Address.Low = htole32(chain->chain_busaddr);
3429 ieee_sgc->Address.High = htole32(chain->chain_busaddr >> 32);
3430 cm->cm_sge = &((MPI25_SGE_IO_UNION *)chain->chain)->IeeeSimple;
3431 req = (MPI2_REQUEST_HEADER *)cm->cm_req;
3432 req->ChainOffset = (sc->chain_frame_size - sgc_size) >> 4;
3434 cm->cm_sglsize = sc->chain_frame_size;
3439 * Add one scatter-gather element to the scatter-gather list for a command.
3440 * Maintain cm_sglsize and cm_sge as the remaining size and pointer to the
3441 * next SGE to fill in, respectively. In Gen3, the MPI SGL does not have a
3442 * chain, so don't consider any chain additions.
3445 mpr_push_sge(struct mpr_command *cm, MPI2_SGE_SIMPLE64 *sge, size_t len,
3448 uint32_t saved_buf_len, saved_address_low, saved_address_high;
3452 * case 1: >=1 more segment, no room for anything (error)
3453 * case 2: 1 more segment and enough room for it
3456 if (cm->cm_sglsize < (segsleft * sizeof(MPI2_SGE_SIMPLE64))) {
3457 mpr_dprint(cm->cm_sc, MPR_ERROR,
3458 "%s: warning: Not enough room for MPI SGL in frame.\n",
3463 KASSERT(segsleft == 1,
3464 ("segsleft cannot be more than 1 for an MPI SGL; segsleft = %d\n",
3468 * There is one more segment left to add for the MPI SGL and there is
3469 * enough room in the frame to add it. This is the normal case because
3470 * MPI SGL's don't have chains, otherwise something is wrong.
3472 * If this is a bi-directional request, need to account for that
3473 * here. Save the pre-filled sge values. These will be used
3474 * either for the 2nd SGL or for a single direction SGL. If
3475 * cm_out_len is non-zero, this is a bi-directional request, so
3476 * fill in the OUT SGL first, then the IN SGL, otherwise just
3477 * fill in the IN SGL. Note that at this time, when filling in
3478 * 2 SGL's for a bi-directional request, they both use the same
3479 * DMA buffer (same cm command).
3481 saved_buf_len = sge->FlagsLength & 0x00FFFFFF;
3482 saved_address_low = sge->Address.Low;
3483 saved_address_high = sge->Address.High;
3484 if (cm->cm_out_len) {
3485 sge->FlagsLength = cm->cm_out_len |
3486 ((uint32_t)(MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
3487 MPI2_SGE_FLAGS_END_OF_BUFFER |
3488 MPI2_SGE_FLAGS_HOST_TO_IOC |
3489 MPI2_SGE_FLAGS_64_BIT_ADDRESSING) <<
3490 MPI2_SGE_FLAGS_SHIFT);
3491 cm->cm_sglsize -= len;
3492 /* Endian Safe code */
3493 sge_flags = sge->FlagsLength;
3494 sge->FlagsLength = htole32(sge_flags);
3495 sge->Address.High = htole32(sge->Address.High);
3496 sge->Address.Low = htole32(sge->Address.Low);
3497 bcopy(sge, cm->cm_sge, len);
3498 cm->cm_sge = (MPI2_SGE_IO_UNION *)((uintptr_t)cm->cm_sge + len);
3500 sge->FlagsLength = saved_buf_len |
3501 ((uint32_t)(MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
3502 MPI2_SGE_FLAGS_END_OF_BUFFER |
3503 MPI2_SGE_FLAGS_LAST_ELEMENT |
3504 MPI2_SGE_FLAGS_END_OF_LIST |
3505 MPI2_SGE_FLAGS_64_BIT_ADDRESSING) <<
3506 MPI2_SGE_FLAGS_SHIFT);
3507 if (cm->cm_flags & MPR_CM_FLAGS_DATAIN) {
3509 ((uint32_t)(MPI2_SGE_FLAGS_IOC_TO_HOST) <<
3510 MPI2_SGE_FLAGS_SHIFT);
3513 ((uint32_t)(MPI2_SGE_FLAGS_HOST_TO_IOC) <<
3514 MPI2_SGE_FLAGS_SHIFT);
3516 sge->Address.Low = saved_address_low;
3517 sge->Address.High = saved_address_high;
3519 cm->cm_sglsize -= len;
3520 /* Endian Safe code */
3521 sge_flags = sge->FlagsLength;
3522 sge->FlagsLength = htole32(sge_flags);
3523 sge->Address.High = htole32(sge->Address.High);
3524 sge->Address.Low = htole32(sge->Address.Low);
3525 bcopy(sge, cm->cm_sge, len);
3526 cm->cm_sge = (MPI2_SGE_IO_UNION *)((uintptr_t)cm->cm_sge + len);
3531 * Add one IEEE scatter-gather element (chain or simple) to the IEEE scatter-
3532 * gather list for a command. Maintain cm_sglsize and cm_sge as the
3533 * remaining size and pointer to the next SGE to fill in, respectively.
3536 mpr_push_ieee_sge(struct mpr_command *cm, void *sgep, int segsleft)
3538 MPI2_IEEE_SGE_SIMPLE64 *sge = sgep;
3539 int error, ieee_sge_size = sizeof(MPI25_SGE_IO_UNION);
3540 uint32_t saved_buf_len, saved_address_low, saved_address_high;
3541 uint32_t sge_length;
3544 * case 1: No room for chain or segment (error).
3545 * case 2: Two or more segments left but only room for chain.
3546 * case 3: Last segment and room for it, so set flags.
3550 * There should be room for at least one element, or there is a big
3553 if (cm->cm_sglsize < ieee_sge_size)
3554 panic("MPR: Need SGE Error Code\n");
3556 if ((segsleft >= 2) && (cm->cm_sglsize < (ieee_sge_size * 2))) {
3557 if ((error = mpr_add_chain(cm, segsleft)) != 0)
3561 if (segsleft == 1) {
3563 * If this is a bi-directional request, need to account for that
3564 * here. Save the pre-filled sge values. These will be used
3565 * either for the 2nd SGL or for a single direction SGL. If
3566 * cm_out_len is non-zero, this is a bi-directional request, so
3567 * fill in the OUT SGL first, then the IN SGL, otherwise just
3568 * fill in the IN SGL. Note that at this time, when filling in
3569 * 2 SGL's for a bi-directional request, they both use the same
3570 * DMA buffer (same cm command).
3572 saved_buf_len = sge->Length;
3573 saved_address_low = sge->Address.Low;
3574 saved_address_high = sge->Address.High;
3575 if (cm->cm_out_len) {
3576 sge->Length = cm->cm_out_len;
3577 sge->Flags = (MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
3578 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR);
3579 cm->cm_sglsize -= ieee_sge_size;
3580 /* Endian Safe code */
3581 sge_length = sge->Length;
3582 sge->Length = htole32(sge_length);
3583 sge->Address.High = htole32(sge->Address.High);
3584 sge->Address.Low = htole32(sge->Address.Low);
3585 bcopy(sgep, cm->cm_sge, ieee_sge_size);
3587 (MPI25_SGE_IO_UNION *)((uintptr_t)cm->cm_sge +
3590 sge->Length = saved_buf_len;
3591 sge->Flags = (MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
3592 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR |
3593 MPI25_IEEE_SGE_FLAGS_END_OF_LIST);
3594 sge->Address.Low = saved_address_low;
3595 sge->Address.High = saved_address_high;
3598 cm->cm_sglsize -= ieee_sge_size;
3599 /* Endian Safe code */
3600 sge_length = sge->Length;
3601 sge->Length = htole32(sge_length);
3602 sge->Address.High = htole32(sge->Address.High);
3603 sge->Address.Low = htole32(sge->Address.Low);
3604 bcopy(sgep, cm->cm_sge, ieee_sge_size);
3605 cm->cm_sge = (MPI25_SGE_IO_UNION *)((uintptr_t)cm->cm_sge +
3611 * Add one dma segment to the scatter-gather list for a command.
3614 mpr_add_dmaseg(struct mpr_command *cm, vm_paddr_t pa, size_t len, u_int flags,
3617 MPI2_SGE_SIMPLE64 sge;
3618 MPI2_IEEE_SGE_SIMPLE64 ieee_sge;
3620 if (!(cm->cm_flags & MPR_CM_FLAGS_SGE_SIMPLE)) {
3621 ieee_sge.Flags = (MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
3622 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR);
3623 ieee_sge.Length = len;
3624 mpr_from_u64(pa, &ieee_sge.Address);
3626 return (mpr_push_ieee_sge(cm, &ieee_sge, segsleft));
3629 * This driver always uses 64-bit address elements for
3632 flags |= MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
3633 MPI2_SGE_FLAGS_64_BIT_ADDRESSING;
3634 /* Set Endian safe macro in mpr_push_sge */
3635 sge.FlagsLength = len | (flags << MPI2_SGE_FLAGS_SHIFT);
3636 mpr_from_u64(pa, &sge.Address);
3638 return (mpr_push_sge(cm, &sge, sizeof sge, segsleft));
3643 mpr_data_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
3645 struct mpr_softc *sc;
3646 struct mpr_command *cm;
3647 u_int i, dir, sflags;
3649 cm = (struct mpr_command *)arg;
3653 * In this case, just print out a warning and let the chip tell the
3654 * user they did the wrong thing.
3656 if ((cm->cm_max_segs != 0) && (nsegs > cm->cm_max_segs)) {
3657 mpr_dprint(sc, MPR_ERROR, "%s: warning: busdma returned %d "
3658 "segments, more than the %d allowed\n", __func__, nsegs,
3663 * Set up DMA direction flags. Bi-directional requests are also handled
3664 * here. In that case, both direction flags will be set.
3667 if (cm->cm_flags & MPR_CM_FLAGS_SMP_PASS) {
3669 * We have to add a special case for SMP passthrough, there
3670 * is no easy way to generically handle it. The first
3671 * S/G element is used for the command (therefore the
3672 * direction bit needs to be set). The second one is used
3673 * for the reply. We'll leave it to the caller to make
3674 * sure we only have two buffers.
3677 * Even though the busdma man page says it doesn't make
3678 * sense to have both direction flags, it does in this case.
3679 * We have one s/g element being accessed in each direction.
3681 dir = BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD;
3684 * Set the direction flag on the first buffer in the SMP
3685 * passthrough request. We'll clear it for the second one.
3687 sflags |= MPI2_SGE_FLAGS_DIRECTION |
3688 MPI2_SGE_FLAGS_END_OF_BUFFER;
3689 } else if (cm->cm_flags & MPR_CM_FLAGS_DATAOUT) {
3690 sflags |= MPI2_SGE_FLAGS_HOST_TO_IOC;
3691 dir = BUS_DMASYNC_PREWRITE;
3693 dir = BUS_DMASYNC_PREREAD;
3695 /* Check if a native SG list is needed for an NVMe PCIe device. */
3696 if (cm->cm_targ && cm->cm_targ->is_nvme &&
3697 mpr_check_pcie_native_sgl(sc, cm, segs, nsegs) == 0) {
3698 /* A native SG list was built, skip to end. */
3702 for (i = 0; i < nsegs; i++) {
3703 if ((cm->cm_flags & MPR_CM_FLAGS_SMP_PASS) && (i != 0)) {
3704 sflags &= ~MPI2_SGE_FLAGS_DIRECTION;
3706 error = mpr_add_dmaseg(cm, segs[i].ds_addr, segs[i].ds_len,
3709 /* Resource shortage, roll back! */
3710 if (ratecheck(&sc->lastfail, &mpr_chainfail_interval))
3711 mpr_dprint(sc, MPR_INFO, "Out of chain frames, "
3712 "consider increasing hw.mpr.max_chains.\n");
3713 cm->cm_flags |= MPR_CM_FLAGS_CHAIN_FAILED;
3714 mpr_complete_command(sc, cm);
3720 bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir);
3721 mpr_enqueue_request(sc, cm);
3727 mpr_data_cb2(void *arg, bus_dma_segment_t *segs, int nsegs, bus_size_t mapsize,
3730 mpr_data_cb(arg, segs, nsegs, error);
3734 * This is the routine to enqueue commands ansynchronously.
3735 * Note that the only error path here is from bus_dmamap_load(), which can
3736 * return EINPROGRESS if it is waiting for resources. Other than this, it's
3737 * assumed that if you have a command in-hand, then you have enough credits
3741 mpr_map_command(struct mpr_softc *sc, struct mpr_command *cm)
3745 if (cm->cm_flags & MPR_CM_FLAGS_USE_UIO) {
3746 error = bus_dmamap_load_uio(sc->buffer_dmat, cm->cm_dmamap,
3747 &cm->cm_uio, mpr_data_cb2, cm, 0);
3748 } else if (cm->cm_flags & MPR_CM_FLAGS_USE_CCB) {
3749 error = bus_dmamap_load_ccb(sc->buffer_dmat, cm->cm_dmamap,
3750 cm->cm_data, mpr_data_cb, cm, 0);
3751 } else if ((cm->cm_data != NULL) && (cm->cm_length != 0)) {
3752 error = bus_dmamap_load(sc->buffer_dmat, cm->cm_dmamap,
3753 cm->cm_data, cm->cm_length, mpr_data_cb, cm, 0);
3755 /* Add a zero-length element as needed */
3756 if (cm->cm_sge != NULL)
3757 mpr_add_dmaseg(cm, 0, 0, 0, 1);
3758 mpr_enqueue_request(sc, cm);
3765 * This is the routine to enqueue commands synchronously. An error of
3766 * EINPROGRESS from mpr_map_command() is ignored since the command will
3767 * be executed and enqueued automatically. Other errors come from msleep().
3770 mpr_wait_command(struct mpr_softc *sc, struct mpr_command **cmp, int timeout,
3774 struct timeval cur_time, start_time;
3775 struct mpr_command *cm = *cmp;
3777 if (sc->mpr_flags & MPR_FLAGS_DIAGRESET)
3780 cm->cm_complete = NULL;
3781 cm->cm_flags |= (MPR_CM_FLAGS_WAKEUP + MPR_CM_FLAGS_POLLED);
3782 error = mpr_map_command(sc, cm);
3783 if ((error != 0) && (error != EINPROGRESS))
3786 // Check for context and wait for 50 mSec at a time until time has
3787 // expired or the command has finished. If msleep can't be used, need
3789 #if __FreeBSD_version >= 1000029
3790 if (curthread->td_no_sleeping)
3791 #else //__FreeBSD_version < 1000029
3792 if (curthread->td_pflags & TDP_NOSLEEPING)
3793 #endif //__FreeBSD_version >= 1000029
3794 sleep_flag = NO_SLEEP;
3795 getmicrouptime(&start_time);
3796 if (mtx_owned(&sc->mpr_mtx) && sleep_flag == CAN_SLEEP) {
3797 error = msleep(cm, &sc->mpr_mtx, 0, "mprwait", timeout*hz);
3798 if (error == EWOULDBLOCK) {
3800 * Record the actual elapsed time in the case of a
3801 * timeout for the message below.
3803 getmicrouptime(&cur_time);
3804 timevalsub(&cur_time, &start_time);
3807 while ((cm->cm_flags & MPR_CM_FLAGS_COMPLETE) == 0) {
3808 mpr_intr_locked(sc);
3809 if (sleep_flag == CAN_SLEEP)
3810 pause("mprwait", hz/20);
3814 getmicrouptime(&cur_time);
3815 timevalsub(&cur_time, &start_time);
3816 if (cur_time.tv_sec > timeout) {
3817 error = EWOULDBLOCK;
3823 if (error == EWOULDBLOCK) {
3824 if (cm->cm_timeout_handler == NULL) {
3825 mpr_dprint(sc, MPR_FAULT, "Calling Reinit from %s, timeout=%d,"
3826 " elapsed=%jd\n", __func__, timeout,
3827 (intmax_t)cur_time.tv_sec);
3828 rc = mpr_reinit(sc);
3829 mpr_dprint(sc, MPR_FAULT, "Reinit %s\n", (rc == 0) ? "success" :
3832 cm->cm_timeout_handler(sc, cm);
3833 if (sc->mpr_flags & MPR_FLAGS_REALLOCATED) {
3835 * Tell the caller that we freed the command in a
3846 * This is the routine to enqueue a command synchonously and poll for
3847 * completion. Its use should be rare.
3850 mpr_request_polled(struct mpr_softc *sc, struct mpr_command **cmp)
3853 struct timeval cur_time, start_time;
3854 struct mpr_command *cm = *cmp;
3858 cm->cm_flags |= MPR_CM_FLAGS_POLLED;
3859 cm->cm_complete = NULL;
3860 mpr_map_command(sc, cm);
3862 getmicrouptime(&start_time);
3863 while ((cm->cm_flags & MPR_CM_FLAGS_COMPLETE) == 0) {
3864 mpr_intr_locked(sc);
3866 if (mtx_owned(&sc->mpr_mtx))
3867 msleep(&sc->msleep_fake_chan, &sc->mpr_mtx, 0,
3870 pause("mprpoll", hz/20);
3873 * Check for real-time timeout and fail if more than 60 seconds.
3875 getmicrouptime(&cur_time);
3876 timevalsub(&cur_time, &start_time);
3877 if (cur_time.tv_sec > 60) {
3878 mpr_dprint(sc, MPR_FAULT, "polling failed\n");
3883 cm->cm_state = MPR_CM_STATE_BUSY;
3885 mpr_dprint(sc, MPR_FAULT, "Calling Reinit from %s\n", __func__);
3886 rc = mpr_reinit(sc);
3887 mpr_dprint(sc, MPR_FAULT, "Reinit %s\n", (rc == 0) ? "success" :
3890 if (sc->mpr_flags & MPR_FLAGS_REALLOCATED) {
3892 * Tell the caller that we freed the command in a
3902 * The MPT driver had a verbose interface for config pages. In this driver,
3903 * reduce it to much simpler terms, similar to the Linux driver.
3906 mpr_read_config_page(struct mpr_softc *sc, struct mpr_config_params *params)
3908 MPI2_CONFIG_REQUEST *req;
3909 struct mpr_command *cm;
3912 if (sc->mpr_flags & MPR_FLAGS_BUSY) {
3916 cm = mpr_alloc_command(sc);
3921 req = (MPI2_CONFIG_REQUEST *)cm->cm_req;
3922 req->Function = MPI2_FUNCTION_CONFIG;
3923 req->Action = params->action;
3925 req->ChainOffset = 0;
3926 req->PageAddress = params->page_address;
3927 if (params->hdr.Struct.PageType == MPI2_CONFIG_PAGETYPE_EXTENDED) {
3928 MPI2_CONFIG_EXTENDED_PAGE_HEADER *hdr;
3930 hdr = ¶ms->hdr.Ext;
3931 req->ExtPageType = hdr->ExtPageType;
3932 req->ExtPageLength = hdr->ExtPageLength;
3933 req->Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED;
3934 req->Header.PageLength = 0; /* Must be set to zero */
3935 req->Header.PageNumber = hdr->PageNumber;
3936 req->Header.PageVersion = hdr->PageVersion;
3938 MPI2_CONFIG_PAGE_HEADER *hdr;
3940 hdr = ¶ms->hdr.Struct;
3941 req->Header.PageType = hdr->PageType;
3942 req->Header.PageNumber = hdr->PageNumber;
3943 req->Header.PageLength = hdr->PageLength;
3944 req->Header.PageVersion = hdr->PageVersion;
3947 cm->cm_data = params->buffer;
3948 cm->cm_length = params->length;
3949 if (cm->cm_data != NULL) {
3950 cm->cm_sge = &req->PageBufferSGE;
3951 cm->cm_sglsize = sizeof(MPI2_SGE_IO_UNION);
3952 cm->cm_flags = MPR_CM_FLAGS_SGE_SIMPLE | MPR_CM_FLAGS_DATAIN;
3955 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3957 cm->cm_complete_data = params;
3958 if (params->callback != NULL) {
3959 cm->cm_complete = mpr_config_complete;
3960 return (mpr_map_command(sc, cm));
3962 error = mpr_wait_command(sc, &cm, 0, CAN_SLEEP);
3964 mpr_dprint(sc, MPR_FAULT,
3965 "Error %d reading config page\n", error);
3967 mpr_free_command(sc, cm);
3970 mpr_config_complete(sc, cm);
3977 mpr_write_config_page(struct mpr_softc *sc, struct mpr_config_params *params)
3983 mpr_config_complete(struct mpr_softc *sc, struct mpr_command *cm)
3985 MPI2_CONFIG_REPLY *reply;
3986 struct mpr_config_params *params;
3989 params = cm->cm_complete_data;
3991 if (cm->cm_data != NULL) {
3992 bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap,
3993 BUS_DMASYNC_POSTREAD);
3994 bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
3998 * XXX KDM need to do more error recovery? This results in the
3999 * device in question not getting probed.
4001 if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
4002 params->status = MPI2_IOCSTATUS_BUSY;
4006 reply = (MPI2_CONFIG_REPLY *)cm->cm_reply;
4007 if (reply == NULL) {
4008 params->status = MPI2_IOCSTATUS_BUSY;
4011 params->status = reply->IOCStatus;
4012 if (params->hdr.Struct.PageType == MPI2_CONFIG_PAGETYPE_EXTENDED) {
4013 params->hdr.Ext.ExtPageType = reply->ExtPageType;
4014 params->hdr.Ext.ExtPageLength = reply->ExtPageLength;
4015 params->hdr.Ext.PageType = reply->Header.PageType;
4016 params->hdr.Ext.PageNumber = reply->Header.PageNumber;
4017 params->hdr.Ext.PageVersion = reply->Header.PageVersion;
4019 params->hdr.Struct.PageType = reply->Header.PageType;
4020 params->hdr.Struct.PageNumber = reply->Header.PageNumber;
4021 params->hdr.Struct.PageLength = reply->Header.PageLength;
4022 params->hdr.Struct.PageVersion = reply->Header.PageVersion;
4026 mpr_free_command(sc, cm);
4027 if (params->callback != NULL)
4028 params->callback(sc, params);