2 * Generic routines for LSI Fusion adapters.
5 * Copyright (c) 2000, 2001 by Greg Ansley
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice immediately at the beginning of the file, without modification,
12 * this list of conditions, and the following disclaimer.
13 * 2. The name of the author may not be used to endorse or promote products
14 * derived from this software without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
20 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * Copyright (c) 2002, 2006 by Matthew Jacob
30 * All rights reserved.
32 * Redistribution and use in source and binary forms, with or without
33 * modification, are permitted provided that the following conditions are
35 * 1. Redistributions of source code must retain the above copyright
36 * notice, this list of conditions and the following disclaimer.
37 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
38 * substantially similar to the "NO WARRANTY" disclaimer below
39 * ("Disclaimer") and any redistribution must be conditioned upon including
40 * a substantially similar Disclaimer requirement for further binary
42 * 3. Neither the names of the above listed copyright holders nor the names
43 * of any contributors may be used to endorse or promote products derived
44 * from this software without specific prior written permission.
46 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
47 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
48 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
49 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
50 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
51 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
52 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
53 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
54 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
55 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
56 * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
58 * Support from Chris Ellsworth in order to make SAS adapters work
59 * is gratefully acknowledged.
62 * Support from LSI-Logic has also gone a great deal toward making this a
63 * workable subsystem and is gratefully acknowledged.
66 * Copyright (c) 2004, Avid Technology, Inc. and its contributors.
67 * Copyright (c) 2005, WHEEL Sp. z o.o.
68 * Copyright (c) 2004, 2005 Justin T. Gibbs
69 * All rights reserved.
71 * Redistribution and use in source and binary forms, with or without
72 * modification, are permitted provided that the following conditions are
74 * 1. Redistributions of source code must retain the above copyright
75 * notice, this list of conditions and the following disclaimer.
76 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
77 * substantially similar to the "NO WARRANTY" disclaimer below
78 * ("Disclaimer") and any redistribution must be conditioned upon including
79 * a substantially similar Disclaimer requirement for further binary
81 * 3. Neither the names of the above listed copyright holders nor the names
82 * of any contributors may be used to endorse or promote products derived
83 * from this software without specific prior written permission.
85 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
86 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
87 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
88 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
89 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
90 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
91 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
92 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
93 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
94 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
95 * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
98 #include <sys/cdefs.h>
99 __FBSDID("$FreeBSD$");
101 #include <dev/mpt/mpt.h>
102 #include <dev/mpt/mpt_cam.h> /* XXX For static handler registration */
103 #include <dev/mpt/mpt_raid.h> /* XXX For static handler registration */
105 #include <dev/mpt/mpilib/mpi.h>
106 #include <dev/mpt/mpilib/mpi_ioc.h>
107 #include <dev/mpt/mpilib/mpi_fc.h>
108 #include <dev/mpt/mpilib/mpi_targ.h>
110 #include <sys/sysctl.h>
112 #define MPT_MAX_TRYS 3
113 #define MPT_MAX_WAIT 300000
115 static int maxwait_ack = 0;
116 static int maxwait_int = 0;
117 static int maxwait_state = 0;
119 TAILQ_HEAD(, mpt_softc) mpt_tailq = TAILQ_HEAD_INITIALIZER(mpt_tailq);
120 mpt_reply_handler_t *mpt_reply_handlers[MPT_NUM_REPLY_HANDLERS];
122 static mpt_reply_handler_t mpt_default_reply_handler;
123 static mpt_reply_handler_t mpt_config_reply_handler;
124 static mpt_reply_handler_t mpt_handshake_reply_handler;
125 static mpt_reply_handler_t mpt_event_reply_handler;
126 static void mpt_send_event_ack(struct mpt_softc *mpt, request_t *ack_req,
127 MSG_EVENT_NOTIFY_REPLY *msg, uint32_t context);
128 static int mpt_send_event_request(struct mpt_softc *mpt, int onoff);
129 static int mpt_soft_reset(struct mpt_softc *mpt);
130 static void mpt_hard_reset(struct mpt_softc *mpt);
131 static int mpt_configure_ioc(struct mpt_softc *mpt);
132 static int mpt_enable_ioc(struct mpt_softc *mpt, int);
134 /************************* Personality Module Support *************************/
136 * We include one extra entry that is guaranteed to be NULL
137 * to simplify our itterator.
139 static struct mpt_personality *mpt_personalities[MPT_MAX_PERSONALITIES + 1];
140 static __inline struct mpt_personality*
141 mpt_pers_find(struct mpt_softc *, u_int);
142 static __inline struct mpt_personality*
143 mpt_pers_find_reverse(struct mpt_softc *, u_int);
145 static __inline struct mpt_personality *
146 mpt_pers_find(struct mpt_softc *mpt, u_int start_at)
148 KASSERT(start_at <= MPT_MAX_PERSONALITIES,
149 ("mpt_pers_find: starting position out of range\n"));
151 while (start_at < MPT_MAX_PERSONALITIES
152 && (mpt->mpt_pers_mask & (0x1 << start_at)) == 0) {
155 return (mpt_personalities[start_at]);
159 * Used infrequently, so no need to optimize like a forward
160 * traversal where we use the MAX+1 is guaranteed to be NULL
163 static __inline struct mpt_personality *
164 mpt_pers_find_reverse(struct mpt_softc *mpt, u_int start_at)
166 while (start_at < MPT_MAX_PERSONALITIES
167 && (mpt->mpt_pers_mask & (0x1 << start_at)) == 0) {
170 if (start_at < MPT_MAX_PERSONALITIES)
171 return (mpt_personalities[start_at]);
175 #define MPT_PERS_FOREACH(mpt, pers) \
176 for (pers = mpt_pers_find(mpt, /*start_at*/0); \
178 pers = mpt_pers_find(mpt, /*start_at*/pers->id+1))
180 #define MPT_PERS_FOREACH_REVERSE(mpt, pers) \
181 for (pers = mpt_pers_find_reverse(mpt, MPT_MAX_PERSONALITIES-1);\
183 pers = mpt_pers_find_reverse(mpt, /*start_at*/pers->id-1))
185 static mpt_load_handler_t mpt_stdload;
186 static mpt_probe_handler_t mpt_stdprobe;
187 static mpt_attach_handler_t mpt_stdattach;
188 static mpt_enable_handler_t mpt_stdenable;
189 static mpt_event_handler_t mpt_stdevent;
190 static mpt_reset_handler_t mpt_stdreset;
191 static mpt_shutdown_handler_t mpt_stdshutdown;
192 static mpt_detach_handler_t mpt_stddetach;
193 static mpt_unload_handler_t mpt_stdunload;
194 static struct mpt_personality mpt_default_personality =
197 .probe = mpt_stdprobe,
198 .attach = mpt_stdattach,
199 .enable = mpt_stdenable,
200 .event = mpt_stdevent,
201 .reset = mpt_stdreset,
202 .shutdown = mpt_stdshutdown,
203 .detach = mpt_stddetach,
204 .unload = mpt_stdunload
207 static mpt_load_handler_t mpt_core_load;
208 static mpt_attach_handler_t mpt_core_attach;
209 static mpt_enable_handler_t mpt_core_enable;
210 static mpt_reset_handler_t mpt_core_ioc_reset;
211 static mpt_event_handler_t mpt_core_event;
212 static mpt_shutdown_handler_t mpt_core_shutdown;
213 static mpt_shutdown_handler_t mpt_core_detach;
214 static mpt_unload_handler_t mpt_core_unload;
215 static struct mpt_personality mpt_core_personality =
218 .load = mpt_core_load,
219 .attach = mpt_core_attach,
220 .enable = mpt_core_enable,
221 .event = mpt_core_event,
222 .reset = mpt_core_ioc_reset,
223 .shutdown = mpt_core_shutdown,
224 .detach = mpt_core_detach,
225 .unload = mpt_core_unload,
229 * Manual declaration so that DECLARE_MPT_PERSONALITY doesn't need
230 * ordering information. We want the core to always register FIRST.
231 * other modules are set to SI_ORDER_SECOND.
233 static moduledata_t mpt_core_mod = {
234 "mpt_core", mpt_modevent, &mpt_core_personality
236 DECLARE_MODULE(mpt_core, mpt_core_mod, SI_SUB_DRIVERS, SI_ORDER_FIRST);
237 MODULE_VERSION(mpt_core, 1);
239 #define MPT_PERS_ATTACHED(pers, mpt) ((mpt)->mpt_pers_mask & (0x1 << pers->id))
243 mpt_modevent(module_t mod, int type, void *data)
245 struct mpt_personality *pers;
248 pers = (struct mpt_personality *)data;
254 mpt_load_handler_t **def_handler;
255 mpt_load_handler_t **pers_handler;
258 for (i = 0; i < MPT_MAX_PERSONALITIES; i++) {
259 if (mpt_personalities[i] == NULL)
262 if (i >= MPT_MAX_PERSONALITIES) {
267 mpt_personalities[i] = pers;
269 /* Install standard/noop handlers for any NULL entries. */
270 def_handler = MPT_PERS_FIRST_HANDLER(&mpt_default_personality);
271 pers_handler = MPT_PERS_FIRST_HANDLER(pers);
272 while (pers_handler <= MPT_PERS_LAST_HANDLER(pers)) {
273 if (*pers_handler == NULL)
274 *pers_handler = *def_handler;
279 error = (pers->load(pers));
281 mpt_personalities[i] = NULL;
286 #if __FreeBSD_version >= 500000
291 error = pers->unload(pers);
292 mpt_personalities[pers->id] = NULL;
302 mpt_stdload(struct mpt_personality *pers)
304 /* Load is always successfull. */
309 mpt_stdprobe(struct mpt_softc *mpt)
311 /* Probe is always successfull. */
316 mpt_stdattach(struct mpt_softc *mpt)
318 /* Attach is always successfull. */
323 mpt_stdenable(struct mpt_softc *mpt)
325 /* Enable is always successfull. */
330 mpt_stdevent(struct mpt_softc *mpt, request_t *req, MSG_EVENT_NOTIFY_REPLY *msg)
332 mpt_lprt(mpt, MPT_PRT_DEBUG, "mpt_stdevent: 0x%x\n", msg->Event & 0xFF);
333 /* Event was not for us. */
338 mpt_stdreset(struct mpt_softc *mpt, int type)
343 mpt_stdshutdown(struct mpt_softc *mpt)
348 mpt_stddetach(struct mpt_softc *mpt)
353 mpt_stdunload(struct mpt_personality *pers)
355 /* Unload is always successfull. */
359 /******************************* Bus DMA Support ******************************/
361 mpt_map_rquest(void *arg, bus_dma_segment_t *segs, int nseg, int error)
363 struct mpt_map_info *map_info;
365 map_info = (struct mpt_map_info *)arg;
366 map_info->error = error;
367 map_info->phys = segs->ds_addr;
370 /**************************** Reply/Event Handling ****************************/
372 mpt_register_handler(struct mpt_softc *mpt, mpt_handler_type type,
373 mpt_handler_t handler, uint32_t *phandler_id)
377 case MPT_HANDLER_REPLY:
382 if (phandler_id == NULL)
385 free_cbi = MPT_HANDLER_ID_NONE;
386 for (cbi = 0; cbi < MPT_NUM_REPLY_HANDLERS; cbi++) {
388 * If the same handler is registered multiple
389 * times, don't error out. Just return the
390 * index of the original registration.
392 if (mpt_reply_handlers[cbi] == handler.reply_handler) {
393 *phandler_id = MPT_CBI_TO_HID(cbi);
398 * Fill from the front in the hope that
399 * all registered handlers consume only a
402 * We don't break on the first empty slot so
403 * that the full table is checked to see if
404 * this handler was previously registered.
406 if (free_cbi == MPT_HANDLER_ID_NONE &&
407 (mpt_reply_handlers[cbi]
408 == mpt_default_reply_handler))
411 if (free_cbi == MPT_HANDLER_ID_NONE) {
414 mpt_reply_handlers[free_cbi] = handler.reply_handler;
415 *phandler_id = MPT_CBI_TO_HID(free_cbi);
419 mpt_prt(mpt, "mpt_register_handler unknown type %d\n", type);
426 mpt_deregister_handler(struct mpt_softc *mpt, mpt_handler_type type,
427 mpt_handler_t handler, uint32_t handler_id)
431 case MPT_HANDLER_REPLY:
435 cbi = MPT_CBI(handler_id);
436 if (cbi >= MPT_NUM_REPLY_HANDLERS
437 || mpt_reply_handlers[cbi] != handler.reply_handler)
439 mpt_reply_handlers[cbi] = mpt_default_reply_handler;
443 mpt_prt(mpt, "mpt_deregister_handler unknown type %d\n", type);
450 mpt_default_reply_handler(struct mpt_softc *mpt, request_t *req,
451 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
454 "Default Handler Called: req=%p:%u reply_descriptor=%x frame=%p\n",
455 req, req->serno, reply_desc, reply_frame);
457 if (reply_frame != NULL)
458 mpt_dump_reply_frame(mpt, reply_frame);
460 mpt_prt(mpt, "Reply Frame Ignored\n");
462 return (/*free_reply*/TRUE);
466 mpt_config_reply_handler(struct mpt_softc *mpt, request_t *req,
467 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
471 if (reply_frame != NULL) {
473 MSG_CONFIG_REPLY *reply;
475 cfgp = (MSG_CONFIG *)req->req_vbuf;
476 reply = (MSG_CONFIG_REPLY *)reply_frame;
477 req->IOCStatus = le16toh(reply_frame->IOCStatus);
478 bcopy(&reply->Header, &cfgp->Header,
479 sizeof(cfgp->Header));
481 req->state &= ~REQ_STATE_QUEUED;
482 req->state |= REQ_STATE_DONE;
483 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
484 if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) {
493 mpt_handshake_reply_handler(struct mpt_softc *mpt, request_t *req,
494 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
496 /* Nothing to be done. */
501 mpt_event_reply_handler(struct mpt_softc *mpt, request_t *req,
502 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
506 KASSERT(reply_frame != NULL, ("null reply in mpt_event_reply_handler"));
507 KASSERT(req != NULL, ("null request in mpt_event_reply_handler"));
510 switch (reply_frame->Function) {
511 case MPI_FUNCTION_EVENT_NOTIFICATION:
513 MSG_EVENT_NOTIFY_REPLY *msg;
514 struct mpt_personality *pers;
518 msg = (MSG_EVENT_NOTIFY_REPLY *)reply_frame;
519 MPT_PERS_FOREACH(mpt, pers)
520 handled += pers->event(mpt, req, msg);
522 if (handled == 0 && mpt->mpt_pers_mask == 0) {
523 mpt_lprt(mpt, MPT_PRT_INFO,
524 "No Handlers For Any Event Notify Frames. "
525 "Event %#x (ACK %sequired).\n",
526 msg->Event, msg->AckRequired? "r" : "not r");
527 } else if (handled == 0) {
528 mpt_lprt(mpt, MPT_PRT_WARN,
529 "Unhandled Event Notify Frame. Event %#x "
530 "(ACK %sequired).\n",
531 msg->Event, msg->AckRequired? "r" : "not r");
534 if (msg->AckRequired) {
538 context = htole32(req->index|MPT_REPLY_HANDLER_EVENTS);
539 ack_req = mpt_get_request(mpt, FALSE);
540 if (ack_req == NULL) {
541 struct mpt_evtf_record *evtf;
543 evtf = (struct mpt_evtf_record *)reply_frame;
544 evtf->context = context;
545 LIST_INSERT_HEAD(&mpt->ack_frames, evtf, links);
549 mpt_send_event_ack(mpt, ack_req, msg, context);
551 * Don't check for CONTINUATION_REPLY here
557 case MPI_FUNCTION_PORT_ENABLE:
558 mpt_lprt(mpt, MPT_PRT_DEBUG , "enable port reply\n");
560 case MPI_FUNCTION_EVENT_ACK:
563 mpt_prt(mpt, "unknown event function: %x\n",
564 reply_frame->Function);
569 * I'm not sure that this continuation stuff works as it should.
571 * I've had FC async events occur that free the frame up because
572 * the continuation bit isn't set, and then additional async events
573 * then occur using the same context. As you might imagine, this
574 * leads to Very Bad Thing.
576 * Let's just be safe for now and not free them up until we figure
577 * out what's actually happening here.
580 if ((reply_frame->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY) == 0) {
581 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
582 mpt_free_request(mpt, req);
583 mpt_prt(mpt, "event_reply %x for req %p:%u NOT a continuation",
584 reply_frame->Function, req, req->serno);
585 if (reply_frame->Function == MPI_FUNCTION_EVENT_NOTIFICATION) {
586 MSG_EVENT_NOTIFY_REPLY *msg =
587 (MSG_EVENT_NOTIFY_REPLY *)reply_frame;
588 mpt_prtc(mpt, " Event=0x%x AckReq=%d",
589 msg->Event, msg->AckRequired);
592 mpt_prt(mpt, "event_reply %x for %p:%u IS a continuation",
593 reply_frame->Function, req, req->serno);
594 if (reply_frame->Function == MPI_FUNCTION_EVENT_NOTIFICATION) {
595 MSG_EVENT_NOTIFY_REPLY *msg =
596 (MSG_EVENT_NOTIFY_REPLY *)reply_frame;
597 mpt_prtc(mpt, " Event=0x%x AckReq=%d",
598 msg->Event, msg->AckRequired);
607 * Process an asynchronous event from the IOC.
610 mpt_core_event(struct mpt_softc *mpt, request_t *req,
611 MSG_EVENT_NOTIFY_REPLY *msg)
613 mpt_lprt(mpt, MPT_PRT_DEBUG, "mpt_core_event: 0x%x\n",
615 switch(msg->Event & 0xFF) {
618 case MPI_EVENT_LOG_DATA:
622 /* Some error occured that LSI wants logged */
623 mpt_prt(mpt, "EvtLogData: IOCLogInfo: 0x%08x\n",
625 mpt_prt(mpt, "\tEvtLogData: Event Data:");
626 for (i = 0; i < msg->EventDataLength; i++)
627 mpt_prtc(mpt, " %08x", msg->Data[i]);
631 case MPI_EVENT_EVENT_CHANGE:
633 * This is just an acknowledgement
634 * of our mpt_send_event_request.
637 case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE:
647 mpt_send_event_ack(struct mpt_softc *mpt, request_t *ack_req,
648 MSG_EVENT_NOTIFY_REPLY *msg, uint32_t context)
652 ackp = (MSG_EVENT_ACK *)ack_req->req_vbuf;
653 memset(ackp, 0, sizeof (*ackp));
654 ackp->Function = MPI_FUNCTION_EVENT_ACK;
655 ackp->Event = msg->Event;
656 ackp->EventContext = msg->EventContext;
657 ackp->MsgContext = context;
658 mpt_check_doorbell(mpt);
659 mpt_send_cmd(mpt, ack_req);
662 /***************************** Interrupt Handling *****************************/
666 struct mpt_softc *mpt;
670 mpt = (struct mpt_softc *)arg;
671 mpt_lprt(mpt, MPT_PRT_DEBUG2, "enter mpt_intr\n");
672 while ((reply_desc = mpt_pop_reply_queue(mpt)) != MPT_REPLY_EMPTY) {
674 MSG_DEFAULT_REPLY *reply_frame;
675 uint32_t reply_baddr;
684 if ((reply_desc & MPI_ADDRESS_REPLY_A_BIT) != 0) {
687 * Insure that the reply frame is coherent.
689 reply_baddr = MPT_REPLY_BADDR(reply_desc);
690 offset = reply_baddr - (mpt->reply_phys & 0xFFFFFFFF);
691 bus_dmamap_sync_range(mpt->reply_dmat,
692 mpt->reply_dmap, offset, MPT_REPLY_SIZE,
693 BUS_DMASYNC_POSTREAD);
694 reply_frame = MPT_REPLY_OTOV(mpt, offset);
695 ctxt_idx = le32toh(reply_frame->MsgContext);
699 type = MPI_GET_CONTEXT_REPLY_TYPE(reply_desc);
700 ctxt_idx = reply_desc;
701 mpt_lprt(mpt, MPT_PRT_DEBUG1, "Context Reply: 0x%08x\n",
705 case MPI_CONTEXT_REPLY_TYPE_SCSI_INIT:
706 ctxt_idx &= MPI_CONTEXT_REPLY_CONTEXT_MASK;
708 case MPI_CONTEXT_REPLY_TYPE_SCSI_TARGET:
709 ctxt_idx = GET_IO_INDEX(reply_desc);
710 if (mpt->tgt_cmd_ptrs == NULL) {
712 "mpt_intr: no target cmd ptrs\n");
713 reply_desc = MPT_REPLY_EMPTY;
716 if (ctxt_idx >= mpt->tgt_cmds_allocated) {
718 "mpt_intr: bad tgt cmd ctxt %u\n",
720 reply_desc = MPT_REPLY_EMPTY;
724 req = mpt->tgt_cmd_ptrs[ctxt_idx];
726 mpt_prt(mpt, "no request backpointer "
727 "at index %u", ctxt_idx);
728 reply_desc = MPT_REPLY_EMPTY;
733 * Reformulate ctxt_idx to be just as if
734 * it were another type of context reply
735 * so the code below will find the request
736 * via indexing into the pool.
739 req->index | mpt->scsi_tgt_handler_id;
742 case MPI_CONTEXT_REPLY_TYPE_LAN:
743 mpt_prt(mpt, "LAN CONTEXT REPLY: 0x%08x\n",
745 reply_desc = MPT_REPLY_EMPTY;
748 mpt_prt(mpt, "Context Reply 0x%08x?\n", type);
749 reply_desc = MPT_REPLY_EMPTY;
752 if (reply_desc == MPT_REPLY_EMPTY) {
753 if (ntrips++ > 1000) {
760 cb_index = MPT_CONTEXT_TO_CBI(ctxt_idx);
761 req_index = MPT_CONTEXT_TO_REQI(ctxt_idx);
762 if (req_index < MPT_MAX_REQUESTS(mpt)) {
763 req = &mpt->request_pool[req_index];
765 mpt_prt(mpt, "WARN: mpt_intr index == %d (reply_desc =="
766 " 0x%x)\n", req_index, reply_desc);
769 free_rf = mpt_reply_handlers[cb_index](mpt, req,
770 reply_desc, reply_frame);
772 if (reply_frame != NULL && free_rf) {
773 mpt_free_reply(mpt, reply_baddr);
777 * If we got ourselves disabled, don't get stuck in a loop
780 mpt_disable_ints(mpt);
783 if (ntrips++ > 1000) {
787 mpt_lprt(mpt, MPT_PRT_DEBUG2, "exit mpt_intr\n");
790 /******************************* Error Recovery *******************************/
792 mpt_complete_request_chain(struct mpt_softc *mpt, struct req_queue *chain,
795 MSG_DEFAULT_REPLY ioc_status_frame;
798 memset(&ioc_status_frame, 0, sizeof(ioc_status_frame));
799 ioc_status_frame.MsgLength = roundup2(sizeof(ioc_status_frame), 4);
800 ioc_status_frame.IOCStatus = iocstatus;
801 while((req = TAILQ_FIRST(chain)) != NULL) {
802 MSG_REQUEST_HEADER *msg_hdr;
805 TAILQ_REMOVE(chain, req, links);
806 msg_hdr = (MSG_REQUEST_HEADER *)req->req_vbuf;
807 ioc_status_frame.Function = msg_hdr->Function;
808 ioc_status_frame.MsgContext = msg_hdr->MsgContext;
809 cb_index = MPT_CONTEXT_TO_CBI(le32toh(msg_hdr->MsgContext));
810 mpt_reply_handlers[cb_index](mpt, req, msg_hdr->MsgContext,
815 /********************************* Diagnostics ********************************/
817 * Perform a diagnostic dump of a reply frame.
820 mpt_dump_reply_frame(struct mpt_softc *mpt, MSG_DEFAULT_REPLY *reply_frame)
822 mpt_prt(mpt, "Address Reply:\n");
823 mpt_print_reply(reply_frame);
826 /******************************* Doorbell Access ******************************/
827 static __inline uint32_t mpt_rd_db(struct mpt_softc *mpt);
828 static __inline uint32_t mpt_rd_intr(struct mpt_softc *mpt);
830 static __inline uint32_t
831 mpt_rd_db(struct mpt_softc *mpt)
833 return mpt_read(mpt, MPT_OFFSET_DOORBELL);
836 static __inline uint32_t
837 mpt_rd_intr(struct mpt_softc *mpt)
839 return mpt_read(mpt, MPT_OFFSET_INTR_STATUS);
842 /* Busy wait for a door bell to be read by IOC */
844 mpt_wait_db_ack(struct mpt_softc *mpt)
847 for (i=0; i < MPT_MAX_WAIT; i++) {
848 if (!MPT_DB_IS_BUSY(mpt_rd_intr(mpt))) {
849 maxwait_ack = i > maxwait_ack ? i : maxwait_ack;
857 /* Busy wait for a door bell interrupt */
859 mpt_wait_db_int(struct mpt_softc *mpt)
862 for (i=0; i < MPT_MAX_WAIT; i++) {
863 if (MPT_DB_INTR(mpt_rd_intr(mpt))) {
864 maxwait_int = i > maxwait_int ? i : maxwait_int;
872 /* Wait for IOC to transition to a give state */
874 mpt_check_doorbell(struct mpt_softc *mpt)
876 uint32_t db = mpt_rd_db(mpt);
877 if (MPT_STATE(db) != MPT_DB_STATE_RUNNING) {
878 mpt_prt(mpt, "Device not running\n");
883 /* Wait for IOC to transition to a give state */
885 mpt_wait_state(struct mpt_softc *mpt, enum DB_STATE_BITS state)
889 for (i = 0; i < MPT_MAX_WAIT; i++) {
890 uint32_t db = mpt_rd_db(mpt);
891 if (MPT_STATE(db) == state) {
892 maxwait_state = i > maxwait_state ? i : maxwait_state;
901 /************************* Intialization/Configuration ************************/
902 static int mpt_download_fw(struct mpt_softc *mpt);
904 /* Issue the reset COMMAND to the IOC */
906 mpt_soft_reset(struct mpt_softc *mpt)
908 mpt_lprt(mpt, MPT_PRT_DEBUG, "soft reset\n");
910 /* Have to use hard reset if we are not in Running state */
911 if (MPT_STATE(mpt_rd_db(mpt)) != MPT_DB_STATE_RUNNING) {
912 mpt_prt(mpt, "soft reset failed: device not running\n");
916 /* If door bell is in use we don't have a chance of getting
917 * a word in since the IOC probably crashed in message
918 * processing. So don't waste our time.
920 if (MPT_DB_IS_IN_USE(mpt_rd_db(mpt))) {
921 mpt_prt(mpt, "soft reset failed: doorbell wedged\n");
925 /* Send the reset request to the IOC */
926 mpt_write(mpt, MPT_OFFSET_DOORBELL,
927 MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET << MPI_DOORBELL_FUNCTION_SHIFT);
928 if (mpt_wait_db_ack(mpt) != MPT_OK) {
929 mpt_prt(mpt, "soft reset failed: ack timeout\n");
933 /* Wait for the IOC to reload and come out of reset state */
934 if (mpt_wait_state(mpt, MPT_DB_STATE_READY) != MPT_OK) {
935 mpt_prt(mpt, "soft reset failed: device did not restart\n");
943 mpt_enable_diag_mode(struct mpt_softc *mpt)
950 if ((mpt_read(mpt, MPT_OFFSET_DIAGNOSTIC) & MPI_DIAG_DRWE) != 0)
953 /* Enable diagnostic registers */
954 mpt_write(mpt, MPT_OFFSET_SEQUENCE, 0xFF);
955 mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_1ST_KEY_VALUE);
956 mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_2ND_KEY_VALUE);
957 mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_3RD_KEY_VALUE);
958 mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_4TH_KEY_VALUE);
959 mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_5TH_KEY_VALUE);
969 mpt_disable_diag_mode(struct mpt_softc *mpt)
971 mpt_write(mpt, MPT_OFFSET_SEQUENCE, 0xFFFFFFFF);
974 /* This is a magic diagnostic reset that resets all the ARM
975 * processors in the chip.
978 mpt_hard_reset(struct mpt_softc *mpt)
984 mpt_lprt(mpt, MPT_PRT_DEBUG, "hard reset\n");
986 error = mpt_enable_diag_mode(mpt);
988 mpt_prt(mpt, "WARNING - Could not enter diagnostic mode !\n");
989 mpt_prt(mpt, "Trying to reset anyway.\n");
992 diagreg = mpt_read(mpt, MPT_OFFSET_DIAGNOSTIC);
995 * This appears to be a workaround required for some
996 * firmware or hardware revs.
998 mpt_write(mpt, MPT_OFFSET_DIAGNOSTIC, diagreg | MPI_DIAG_DISABLE_ARM);
1001 /* Diag. port is now active so we can now hit the reset bit */
1002 mpt_write(mpt, MPT_OFFSET_DIAGNOSTIC, diagreg | MPI_DIAG_RESET_ADAPTER);
1005 * Ensure that the reset has finished. We delay 1ms
1006 * prior to reading the register to make sure the chip
1007 * has sufficiently completed its reset to handle register
1013 diagreg = mpt_read(mpt, MPT_OFFSET_DIAGNOSTIC);
1014 } while (--wait && (diagreg & MPI_DIAG_RESET_ADAPTER) == 0);
1017 mpt_prt(mpt, "WARNING - Failed hard reset! "
1018 "Trying to initialize anyway.\n");
1022 * If we have firmware to download, it must be loaded before
1023 * the controller will become operational. Do so now.
1025 if (mpt->fw_image != NULL) {
1027 error = mpt_download_fw(mpt);
1030 mpt_prt(mpt, "WARNING - Firmware Download Failed!\n");
1031 mpt_prt(mpt, "Trying to initialize anyway.\n");
1036 * Reseting the controller should have disabled write
1037 * access to the diagnostic registers, but disable
1038 * manually to be sure.
1040 mpt_disable_diag_mode(mpt);
1044 mpt_core_ioc_reset(struct mpt_softc *mpt, int type)
1047 * Complete all pending requests with a status
1048 * appropriate for an IOC reset.
1050 mpt_complete_request_chain(mpt, &mpt->request_pending_list,
1051 MPI_IOCSTATUS_INVALID_STATE);
1056 * Reset the IOC when needed. Try software command first then if needed
1057 * poke at the magic diagnostic reset. Note that a hard reset resets
1058 * *both* IOCs on dual function chips (FC929 && LSI1030) as well as
1059 * fouls up the PCI configuration registers.
1062 mpt_reset(struct mpt_softc *mpt, int reinit)
1064 struct mpt_personality *pers;
1069 * Try a soft reset. If that fails, get out the big hammer.
1072 if ((ret = mpt_soft_reset(mpt)) != MPT_OK) {
1074 for (cnt = 0; cnt < 5; cnt++) {
1075 /* Failed; do a hard reset */
1076 mpt_hard_reset(mpt);
1079 * Wait for the IOC to reload
1080 * and come out of reset state
1082 ret = mpt_wait_state(mpt, MPT_DB_STATE_READY);
1083 if (ret == MPT_OK) {
1087 * Okay- try to check again...
1089 ret = mpt_wait_state(mpt, MPT_DB_STATE_READY);
1090 if (ret == MPT_OK) {
1093 mpt_prt(mpt, "mpt_reset: failed hard reset (%d:%d)\n",
1098 if (retry_cnt == 0) {
1100 * Invoke reset handlers. We bump the reset count so
1101 * that mpt_wait_req() understands that regardless of
1102 * the specified wait condition, it should stop its wait.
1105 MPT_PERS_FOREACH(mpt, pers)
1106 pers->reset(mpt, ret);
1110 ret = mpt_enable_ioc(mpt, 1);
1111 if (ret == MPT_OK) {
1112 mpt_enable_ints(mpt);
1115 if (ret != MPT_OK && retry_cnt++ < 2) {
1121 /* Return a command buffer to the free queue */
1123 mpt_free_request(struct mpt_softc *mpt, request_t *req)
1126 struct mpt_evtf_record *record;
1127 uint32_t reply_baddr;
1129 if (req == NULL || req != &mpt->request_pool[req->index]) {
1130 panic("mpt_free_request bad req ptr\n");
1133 if ((nxt = req->chain) != NULL) {
1135 mpt_free_request(mpt, nxt); /* NB: recursion */
1137 KASSERT(req->state != REQ_STATE_FREE, ("freeing free request"));
1138 KASSERT(!(req->state & REQ_STATE_LOCKED), ("freeing locked request"));
1139 KASSERT(MPT_OWNED(mpt), ("mpt_free_request: mpt not locked\n"));
1140 KASSERT(mpt_req_on_free_list(mpt, req) == 0,
1141 ("mpt_free_request: req %p:%u func %x already on freelist",
1142 req, req->serno, ((MSG_REQUEST_HEADER *)req->req_vbuf)->Function));
1143 KASSERT(mpt_req_on_pending_list(mpt, req) == 0,
1144 ("mpt_free_request: req %p:%u func %x on pending list",
1145 req, req->serno, ((MSG_REQUEST_HEADER *)req->req_vbuf)->Function));
1147 mpt_req_not_spcl(mpt, req, "mpt_free_request", __LINE__);
1151 if (LIST_EMPTY(&mpt->ack_frames)) {
1153 * Insert free ones at the tail
1156 req->state = REQ_STATE_FREE;
1158 memset(req->req_vbuf, 0xff, sizeof (MSG_REQUEST_HEADER));
1160 TAILQ_INSERT_TAIL(&mpt->request_free_list, req, links);
1161 if (mpt->getreqwaiter != 0) {
1162 mpt->getreqwaiter = 0;
1163 wakeup(&mpt->request_free_list);
1169 * Process an ack frame deferred due to resource shortage.
1171 record = LIST_FIRST(&mpt->ack_frames);
1172 LIST_REMOVE(record, links);
1173 req->state = REQ_STATE_ALLOCATED;
1174 mpt_assign_serno(mpt, req);
1175 mpt_send_event_ack(mpt, req, &record->reply, record->context);
1176 reply_baddr = (uint32_t)((uint8_t *)record - mpt->reply)
1177 + (mpt->reply_phys & 0xFFFFFFFF);
1178 mpt_free_reply(mpt, reply_baddr);
1181 /* Get a command buffer from the free queue */
1183 mpt_get_request(struct mpt_softc *mpt, int sleep_ok)
1188 KASSERT(MPT_OWNED(mpt), ("mpt_get_request: mpt not locked\n"));
1189 req = TAILQ_FIRST(&mpt->request_free_list);
1191 KASSERT(req == &mpt->request_pool[req->index],
1192 ("mpt_get_request: corrupted request free list\n"));
1193 KASSERT(req->state == REQ_STATE_FREE,
1194 ("req %p:%u not free on free list %x index %d function %x",
1195 req, req->serno, req->state, req->index,
1196 ((MSG_REQUEST_HEADER *)req->req_vbuf)->Function));
1197 TAILQ_REMOVE(&mpt->request_free_list, req, links);
1198 req->state = REQ_STATE_ALLOCATED;
1200 mpt_assign_serno(mpt, req);
1201 } else if (sleep_ok != 0) {
1202 mpt->getreqwaiter = 1;
1203 mpt_sleep(mpt, &mpt->request_free_list, PUSER, "mptgreq", 0);
1209 /* Pass the command to the IOC */
1211 mpt_send_cmd(struct mpt_softc *mpt, request_t *req)
1213 if (mpt->verbose > MPT_PRT_DEBUG2) {
1214 mpt_dump_request(mpt, req);
1216 bus_dmamap_sync(mpt->request_dmat, mpt->request_dmap,
1217 BUS_DMASYNC_PREWRITE);
1218 req->state |= REQ_STATE_QUEUED;
1219 KASSERT(mpt_req_on_free_list(mpt, req) == 0,
1220 ("req %p:%u func %x on freelist list in mpt_send_cmd",
1221 req, req->serno, ((MSG_REQUEST_HEADER *)req->req_vbuf)->Function));
1222 KASSERT(mpt_req_on_pending_list(mpt, req) == 0,
1223 ("req %p:%u func %x already on pending list in mpt_send_cmd",
1224 req, req->serno, ((MSG_REQUEST_HEADER *)req->req_vbuf)->Function));
1225 TAILQ_INSERT_HEAD(&mpt->request_pending_list, req, links);
1226 mpt_write(mpt, MPT_OFFSET_REQUEST_Q, (uint32_t) req->req_pbuf);
1230 * Wait for a request to complete.
1233 * mpt softc of controller executing request
1234 * req request to wait for
1235 * sleep_ok nonzero implies may sleep in this context
1236 * time_ms timeout in ms. 0 implies no timeout.
1239 * 0 Request completed
1240 * non-0 Timeout fired before request completion.
1243 mpt_wait_req(struct mpt_softc *mpt, request_t *req,
1244 mpt_req_state_t state, mpt_req_state_t mask,
1245 int sleep_ok, int time_ms)
1252 * timeout is in ms. 0 indicates infinite wait.
1253 * Convert to ticks or 500us units depending on
1256 if (sleep_ok != 0) {
1257 timeout = (time_ms * hz) / 1000;
1259 timeout = time_ms * 2;
1261 req->state |= REQ_STATE_NEED_WAKEUP;
1262 mask &= ~REQ_STATE_NEED_WAKEUP;
1263 saved_cnt = mpt->reset_cnt;
1264 while ((req->state & mask) != state && mpt->reset_cnt == saved_cnt) {
1265 if (sleep_ok != 0) {
1266 error = mpt_sleep(mpt, req, PUSER, "mptreq", timeout);
1267 if (error == EWOULDBLOCK) {
1272 if (time_ms != 0 && --timeout == 0) {
1279 req->state &= ~REQ_STATE_NEED_WAKEUP;
1280 if (mpt->reset_cnt != saved_cnt) {
1283 if (time_ms && timeout <= 0) {
1284 MSG_REQUEST_HEADER *msg_hdr = req->req_vbuf;
1285 mpt_prt(mpt, "mpt_wait_req(%x) timed out\n", msg_hdr->Function);
1292 * Send a command to the IOC via the handshake register.
1294 * Only done at initialization time and for certain unusual
1295 * commands such as device/bus reset as specified by LSI.
1298 mpt_send_handshake_cmd(struct mpt_softc *mpt, size_t len, void *cmd)
1301 uint32_t data, *data32;
1303 /* Check condition of the IOC */
1304 data = mpt_rd_db(mpt);
1305 if ((MPT_STATE(data) != MPT_DB_STATE_READY
1306 && MPT_STATE(data) != MPT_DB_STATE_RUNNING
1307 && MPT_STATE(data) != MPT_DB_STATE_FAULT)
1308 || MPT_DB_IS_IN_USE(data)) {
1309 mpt_prt(mpt, "handshake aborted - invalid doorbell state\n");
1314 /* We move things in 32 bit chunks */
1315 len = (len + 3) >> 2;
1318 /* Clear any left over pending doorbell interupts */
1319 if (MPT_DB_INTR(mpt_rd_intr(mpt)))
1320 mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0);
1323 * Tell the handshake reg. we are going to send a command
1324 * and how long it is going to be.
1326 data = (MPI_FUNCTION_HANDSHAKE << MPI_DOORBELL_FUNCTION_SHIFT) |
1327 (len << MPI_DOORBELL_ADD_DWORDS_SHIFT);
1328 mpt_write(mpt, MPT_OFFSET_DOORBELL, data);
1330 /* Wait for the chip to notice */
1331 if (mpt_wait_db_int(mpt) != MPT_OK) {
1332 mpt_prt(mpt, "mpt_send_handshake_cmd timeout1\n");
1336 /* Clear the interrupt */
1337 mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0);
1339 if (mpt_wait_db_ack(mpt) != MPT_OK) {
1340 mpt_prt(mpt, "mpt_send_handshake_cmd timeout2\n");
1344 /* Send the command */
1345 for (i = 0; i < len; i++) {
1346 mpt_write(mpt, MPT_OFFSET_DOORBELL, *data32++);
1347 if (mpt_wait_db_ack(mpt) != MPT_OK) {
1349 "mpt_send_handshake_cmd timeout! index = %d\n",
1357 /* Get the response from the handshake register */
1359 mpt_recv_handshake_reply(struct mpt_softc *mpt, size_t reply_len, void *reply)
1361 int left, reply_left;
1363 MSG_DEFAULT_REPLY *hdr;
1365 /* We move things out in 16 bit chunks */
1367 data16 = (u_int16_t *)reply;
1369 hdr = (MSG_DEFAULT_REPLY *)reply;
1371 /* Get first word */
1372 if (mpt_wait_db_int(mpt) != MPT_OK) {
1373 mpt_prt(mpt, "mpt_recv_handshake_cmd timeout1\n");
1376 *data16++ = mpt_read(mpt, MPT_OFFSET_DOORBELL) & MPT_DB_DATA_MASK;
1377 mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0);
1379 /* Get Second Word */
1380 if (mpt_wait_db_int(mpt) != MPT_OK) {
1381 mpt_prt(mpt, "mpt_recv_handshake_cmd timeout2\n");
1384 *data16++ = mpt_read(mpt, MPT_OFFSET_DOORBELL) & MPT_DB_DATA_MASK;
1385 mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0);
1388 * With the second word, we can now look at the length.
1389 * Warn about a reply that's too short (except for IOC FACTS REPLY)
1391 if ((reply_len >> 1) != hdr->MsgLength &&
1392 (hdr->Function != MPI_FUNCTION_IOC_FACTS)){
1393 #if __FreeBSD_version >= 500000
1394 mpt_prt(mpt, "reply length does not match message length: "
1395 "got %x; expected %zx for function %x\n",
1396 hdr->MsgLength << 2, reply_len << 1, hdr->Function);
1398 mpt_prt(mpt, "reply length does not match message length: "
1399 "got %x; expected %x for function %x\n",
1400 hdr->MsgLength << 2, reply_len << 1, hdr->Function);
1404 /* Get rest of the reply; but don't overflow the provided buffer */
1405 left = (hdr->MsgLength << 1) - 2;
1406 reply_left = reply_len - 2;
1410 if (mpt_wait_db_int(mpt) != MPT_OK) {
1411 mpt_prt(mpt, "mpt_recv_handshake_cmd timeout3\n");
1414 datum = mpt_read(mpt, MPT_OFFSET_DOORBELL);
1416 if (reply_left-- > 0)
1417 *data16++ = datum & MPT_DB_DATA_MASK;
1419 mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0);
1422 /* One more wait & clear at the end */
1423 if (mpt_wait_db_int(mpt) != MPT_OK) {
1424 mpt_prt(mpt, "mpt_recv_handshake_cmd timeout4\n");
1427 mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0);
1429 if ((hdr->IOCStatus & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
1430 if (mpt->verbose >= MPT_PRT_TRACE)
1431 mpt_print_reply(hdr);
1432 return (MPT_FAIL | hdr->IOCStatus);
1439 mpt_get_iocfacts(struct mpt_softc *mpt, MSG_IOC_FACTS_REPLY *freplp)
1441 MSG_IOC_FACTS f_req;
1444 memset(&f_req, 0, sizeof f_req);
1445 f_req.Function = MPI_FUNCTION_IOC_FACTS;
1446 f_req.MsgContext = htole32(MPT_REPLY_HANDLER_HANDSHAKE);
1447 error = mpt_send_handshake_cmd(mpt, sizeof f_req, &f_req);
1450 error = mpt_recv_handshake_reply(mpt, sizeof (*freplp), freplp);
1455 mpt_get_portfacts(struct mpt_softc *mpt, MSG_PORT_FACTS_REPLY *freplp)
1457 MSG_PORT_FACTS f_req;
1460 /* XXX: Only getting PORT FACTS for Port 0 */
1461 memset(&f_req, 0, sizeof f_req);
1462 f_req.Function = MPI_FUNCTION_PORT_FACTS;
1463 f_req.MsgContext = htole32(MPT_REPLY_HANDLER_HANDSHAKE);
1464 error = mpt_send_handshake_cmd(mpt, sizeof f_req, &f_req);
1467 error = mpt_recv_handshake_reply(mpt, sizeof (*freplp), freplp);
1472 * Send the initialization request. This is where we specify how many
1473 * SCSI busses and how many devices per bus we wish to emulate.
1474 * This is also the command that specifies the max size of the reply
1475 * frames from the IOC that we will be allocating.
1478 mpt_send_ioc_init(struct mpt_softc *mpt, uint32_t who)
1482 MSG_IOC_INIT_REPLY reply;
1484 memset(&init, 0, sizeof init);
1486 init.Function = MPI_FUNCTION_IOC_INIT;
1488 init.MaxDevices = 255;
1489 } else if (mpt->is_sas) {
1490 init.MaxDevices = mpt->mpt_max_devices;
1492 init.MaxDevices = 16;
1496 init.MsgVersion = htole16(MPI_VERSION);
1497 init.HeaderVersion = htole16(MPI_HEADER_VERSION);
1498 init.ReplyFrameSize = htole16(MPT_REPLY_SIZE);
1499 init.MsgContext = htole32(MPT_REPLY_HANDLER_HANDSHAKE);
1501 if ((error = mpt_send_handshake_cmd(mpt, sizeof init, &init)) != 0) {
1505 error = mpt_recv_handshake_reply(mpt, sizeof reply, &reply);
1511 * Utiltity routine to read configuration headers and pages
1514 mpt_issue_cfg_req(struct mpt_softc *mpt, request_t *req, u_int Action,
1515 u_int PageVersion, u_int PageLength, u_int PageNumber,
1516 u_int PageType, uint32_t PageAddress, bus_addr_t addr,
1517 bus_size_t len, int sleep_ok, int timeout_ms)
1522 cfgp = req->req_vbuf;
1523 memset(cfgp, 0, sizeof *cfgp);
1524 cfgp->Action = Action;
1525 cfgp->Function = MPI_FUNCTION_CONFIG;
1526 cfgp->Header.PageVersion = PageVersion;
1527 cfgp->Header.PageLength = PageLength;
1528 cfgp->Header.PageNumber = PageNumber;
1529 cfgp->Header.PageType = PageType;
1530 cfgp->PageAddress = PageAddress;
1531 se = (SGE_SIMPLE32 *)&cfgp->PageBufferSGE;
1533 MPI_pSGE_SET_LENGTH(se, len);
1534 MPI_pSGE_SET_FLAGS(se, (MPI_SGE_FLAGS_SIMPLE_ELEMENT |
1535 MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
1536 MPI_SGE_FLAGS_END_OF_LIST |
1537 ((Action == MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT
1538 || Action == MPI_CONFIG_ACTION_PAGE_WRITE_NVRAM)
1539 ? MPI_SGE_FLAGS_HOST_TO_IOC : MPI_SGE_FLAGS_IOC_TO_HOST)));
1540 cfgp->MsgContext = htole32(req->index | MPT_REPLY_HANDLER_CONFIG);
1542 mpt_check_doorbell(mpt);
1543 mpt_send_cmd(mpt, req);
1544 return (mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE,
1545 sleep_ok, timeout_ms));
1550 mpt_read_cfg_header(struct mpt_softc *mpt, int PageType, int PageNumber,
1551 uint32_t PageAddress, CONFIG_PAGE_HEADER *rslt,
1552 int sleep_ok, int timeout_ms)
1558 req = mpt_get_request(mpt, sleep_ok);
1560 mpt_prt(mpt, "mpt_read_cfg_header: Get request failed!\n");
1564 error = mpt_issue_cfg_req(mpt, req, MPI_CONFIG_ACTION_PAGE_HEADER,
1565 /*PageVersion*/0, /*PageLength*/0, PageNumber,
1566 PageType, PageAddress, /*addr*/0, /*len*/0,
1567 sleep_ok, timeout_ms);
1569 mpt_free_request(mpt, req);
1570 mpt_prt(mpt, "read_cfg_header timed out\n");
1574 switch (req->IOCStatus & MPI_IOCSTATUS_MASK) {
1575 case MPI_IOCSTATUS_SUCCESS:
1576 cfgp = req->req_vbuf;
1577 bcopy(&cfgp->Header, rslt, sizeof(*rslt));
1580 case MPI_IOCSTATUS_CONFIG_INVALID_PAGE:
1581 mpt_lprt(mpt, MPT_PRT_DEBUG,
1582 "Invalid Page Type %d Number %d Addr 0x%0x\n",
1583 PageType, PageNumber, PageAddress);
1587 mpt_prt(mpt, "mpt_read_cfg_header: Config Info Status %x\n",
1592 mpt_free_request(mpt, req);
1597 mpt_read_cfg_page(struct mpt_softc *mpt, int Action, uint32_t PageAddress,
1598 CONFIG_PAGE_HEADER *hdr, size_t len, int sleep_ok,
1604 req = mpt_get_request(mpt, sleep_ok);
1606 mpt_prt(mpt, "mpt_read_cfg_page: Get request failed!\n");
1610 error = mpt_issue_cfg_req(mpt, req, Action, hdr->PageVersion,
1611 hdr->PageLength, hdr->PageNumber,
1612 hdr->PageType & MPI_CONFIG_PAGETYPE_MASK,
1613 PageAddress, req->req_pbuf + MPT_RQSL(mpt),
1614 len, sleep_ok, timeout_ms);
1616 mpt_prt(mpt, "read_cfg_page(%d) timed out\n", Action);
1620 if ((req->IOCStatus & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
1621 mpt_prt(mpt, "mpt_read_cfg_page: Config Info Status %x\n",
1623 mpt_free_request(mpt, req);
1626 bus_dmamap_sync(mpt->request_dmat, mpt->request_dmap,
1627 BUS_DMASYNC_POSTREAD);
1628 memcpy(hdr, ((uint8_t *)req->req_vbuf)+MPT_RQSL(mpt), len);
1629 mpt_free_request(mpt, req);
1634 mpt_write_cfg_page(struct mpt_softc *mpt, int Action, uint32_t PageAddress,
1635 CONFIG_PAGE_HEADER *hdr, size_t len, int sleep_ok,
1642 hdr_attr = hdr->PageType & MPI_CONFIG_PAGEATTR_MASK;
1643 if (hdr_attr != MPI_CONFIG_PAGEATTR_CHANGEABLE &&
1644 hdr_attr != MPI_CONFIG_PAGEATTR_PERSISTENT) {
1645 mpt_prt(mpt, "page type 0x%x not changeable\n",
1646 hdr->PageType & MPI_CONFIG_PAGETYPE_MASK);
1649 hdr->PageType &= MPI_CONFIG_PAGETYPE_MASK,
1651 req = mpt_get_request(mpt, sleep_ok);
1655 memcpy(((caddr_t)req->req_vbuf)+MPT_RQSL(mpt), hdr, len);
1656 /* Restore stripped out attributes */
1657 hdr->PageType |= hdr_attr;
1659 error = mpt_issue_cfg_req(mpt, req, Action, hdr->PageVersion,
1660 hdr->PageLength, hdr->PageNumber,
1661 hdr->PageType & MPI_CONFIG_PAGETYPE_MASK,
1662 PageAddress, req->req_pbuf + MPT_RQSL(mpt),
1663 len, sleep_ok, timeout_ms);
1665 mpt_prt(mpt, "mpt_write_cfg_page timed out\n");
1669 if ((req->IOCStatus & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
1670 mpt_prt(mpt, "mpt_write_cfg_page: Config Info Status %x\n",
1672 mpt_free_request(mpt, req);
1675 mpt_free_request(mpt, req);
1680 * Read IOC configuration information
1683 mpt_read_config_info_ioc(struct mpt_softc *mpt)
1685 CONFIG_PAGE_HEADER hdr;
1686 struct mpt_raid_volume *mpt_raid;
1691 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_IOC,
1692 2, 0, &hdr, FALSE, 5000);
1694 * If it's an invalid page, so what? Not a supported function....
1703 #if __FreeBSD_version >= 500000
1704 mpt_lprt(mpt, MPT_PRT_DEBUG, "IOC Page 2 Header: ver %x, len %zx, "
1705 "num %x, type %x\n", hdr.PageVersion,
1706 hdr.PageLength * sizeof(uint32_t),
1707 hdr.PageNumber, hdr.PageType);
1709 mpt_lprt(mpt, MPT_PRT_DEBUG, "IOC Page 2 Header: ver %x, len %z, "
1710 "num %x, type %x\n", hdr.PageVersion,
1711 hdr.PageLength * sizeof(uint32_t),
1712 hdr.PageNumber, hdr.PageType);
1715 len = hdr.PageLength * sizeof(uint32_t);
1716 mpt->ioc_page2 = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
1717 if (mpt->ioc_page2 == NULL) {
1718 mpt_prt(mpt, "unable to allocate memory for IOC page 2\n");
1719 mpt_raid_free_mem(mpt);
1722 memcpy(&mpt->ioc_page2->Header, &hdr, sizeof(hdr));
1723 rv = mpt_read_cur_cfg_page(mpt, 0,
1724 &mpt->ioc_page2->Header, len, FALSE, 5000);
1726 mpt_prt(mpt, "failed to read IOC Page 2\n");
1727 mpt_raid_free_mem(mpt);
1731 if (mpt->ioc_page2->CapabilitiesFlags != 0) {
1734 mpt_prt(mpt, "Capabilities: (");
1735 for (mask = 1; mask != 0; mask <<= 1) {
1736 if ((mpt->ioc_page2->CapabilitiesFlags & mask) == 0) {
1740 case MPI_IOCPAGE2_CAP_FLAGS_IS_SUPPORT:
1741 mpt_prtc(mpt, " RAID-0");
1743 case MPI_IOCPAGE2_CAP_FLAGS_IME_SUPPORT:
1744 mpt_prtc(mpt, " RAID-1E");
1746 case MPI_IOCPAGE2_CAP_FLAGS_IM_SUPPORT:
1747 mpt_prtc(mpt, " RAID-1");
1749 case MPI_IOCPAGE2_CAP_FLAGS_SES_SUPPORT:
1750 mpt_prtc(mpt, " SES");
1752 case MPI_IOCPAGE2_CAP_FLAGS_SAFTE_SUPPORT:
1753 mpt_prtc(mpt, " SAFTE");
1755 case MPI_IOCPAGE2_CAP_FLAGS_CROSS_CHANNEL_SUPPORT:
1756 mpt_prtc(mpt, " Multi-Channel-Arrays");
1761 mpt_prtc(mpt, " )\n");
1762 if ((mpt->ioc_page2->CapabilitiesFlags
1763 & (MPI_IOCPAGE2_CAP_FLAGS_IS_SUPPORT
1764 | MPI_IOCPAGE2_CAP_FLAGS_IME_SUPPORT
1765 | MPI_IOCPAGE2_CAP_FLAGS_IM_SUPPORT)) != 0) {
1766 mpt_prt(mpt, "%d Active Volume%s(%d Max)\n",
1767 mpt->ioc_page2->NumActiveVolumes,
1768 mpt->ioc_page2->NumActiveVolumes != 1
1770 mpt->ioc_page2->MaxVolumes);
1771 mpt_prt(mpt, "%d Hidden Drive Member%s(%d Max)\n",
1772 mpt->ioc_page2->NumActivePhysDisks,
1773 mpt->ioc_page2->NumActivePhysDisks != 1
1775 mpt->ioc_page2->MaxPhysDisks);
1779 len = mpt->ioc_page2->MaxVolumes * sizeof(struct mpt_raid_volume);
1780 mpt->raid_volumes = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
1781 if (mpt->raid_volumes == NULL) {
1782 mpt_prt(mpt, "Could not allocate RAID volume data\n");
1783 mpt_raid_free_mem(mpt);
1788 * Copy critical data out of ioc_page2 so that we can
1789 * safely refresh the page without windows of unreliable
1792 mpt->raid_max_volumes = mpt->ioc_page2->MaxVolumes;
1794 len = sizeof(*mpt->raid_volumes->config_page) +
1795 (sizeof (RAID_VOL0_PHYS_DISK) * (mpt->ioc_page2->MaxPhysDisks - 1));
1796 for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) {
1797 mpt_raid = &mpt->raid_volumes[i];
1798 mpt_raid->config_page =
1799 malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
1800 if (mpt_raid->config_page == NULL) {
1801 mpt_prt(mpt, "Could not allocate RAID page data\n");
1802 mpt_raid_free_mem(mpt);
1806 mpt->raid_page0_len = len;
1808 len = mpt->ioc_page2->MaxPhysDisks * sizeof(struct mpt_raid_disk);
1809 mpt->raid_disks = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
1810 if (mpt->raid_disks == NULL) {
1811 mpt_prt(mpt, "Could not allocate RAID disk data\n");
1812 mpt_raid_free_mem(mpt);
1815 mpt->raid_max_disks = mpt->ioc_page2->MaxPhysDisks;
1820 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_IOC,
1821 3, 0, &hdr, FALSE, 5000);
1823 mpt_raid_free_mem(mpt);
1827 mpt_lprt(mpt, MPT_PRT_DEBUG, "IOC Page 3 Header: %x %x %x %x\n",
1828 hdr.PageVersion, hdr.PageLength, hdr.PageNumber, hdr.PageType);
1830 len = hdr.PageLength * sizeof(uint32_t);
1831 mpt->ioc_page3 = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
1832 if (mpt->ioc_page3 == NULL) {
1833 mpt_prt(mpt, "unable to allocate memory for IOC page 3\n");
1834 mpt_raid_free_mem(mpt);
1837 memcpy(&mpt->ioc_page3->Header, &hdr, sizeof(hdr));
1838 rv = mpt_read_cur_cfg_page(mpt, 0,
1839 &mpt->ioc_page3->Header, len, FALSE, 5000);
1841 mpt_raid_free_mem(mpt);
1844 mpt_raid_wakeup(mpt);
1852 mpt_send_port_enable(struct mpt_softc *mpt, int port)
1855 MSG_PORT_ENABLE *enable_req;
1858 req = mpt_get_request(mpt, /*sleep_ok*/FALSE);
1862 enable_req = req->req_vbuf;
1863 memset(enable_req, 0, MPT_RQSL(mpt));
1865 enable_req->Function = MPI_FUNCTION_PORT_ENABLE;
1866 enable_req->MsgContext = htole32(req->index | MPT_REPLY_HANDLER_CONFIG);
1867 enable_req->PortNumber = port;
1869 mpt_check_doorbell(mpt);
1870 mpt_lprt(mpt, MPT_PRT_DEBUG, "enabling port %d\n", port);
1872 mpt_send_cmd(mpt, req);
1873 error = mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE,
1874 FALSE, (mpt->is_sas || mpt->is_fc)? 30000 : 3000);
1876 mpt_prt(mpt, "port %d enable timed out\n", port);
1879 mpt_free_request(mpt, req);
1880 mpt_lprt(mpt, MPT_PRT_DEBUG, "enabled port %d\n", port);
1885 * Enable/Disable asynchronous event reporting.
1888 mpt_send_event_request(struct mpt_softc *mpt, int onoff)
1891 MSG_EVENT_NOTIFY *enable_req;
1893 req = mpt_get_request(mpt, FALSE);
1897 enable_req = req->req_vbuf;
1898 memset(enable_req, 0, sizeof *enable_req);
1900 enable_req->Function = MPI_FUNCTION_EVENT_NOTIFICATION;
1901 enable_req->MsgContext = htole32(req->index | MPT_REPLY_HANDLER_EVENTS);
1902 enable_req->Switch = onoff;
1904 mpt_check_doorbell(mpt);
1905 mpt_lprt(mpt, MPT_PRT_DEBUG, "%sabling async events\n",
1906 onoff ? "en" : "dis");
1908 * Send the command off, but don't wait for it.
1910 mpt_send_cmd(mpt, req);
1915 * Un-mask the interupts on the chip.
1918 mpt_enable_ints(struct mpt_softc *mpt)
1920 /* Unmask every thing except door bell int */
1921 mpt_write(mpt, MPT_OFFSET_INTR_MASK, MPT_INTR_DB_MASK);
1925 * Mask the interupts on the chip.
1928 mpt_disable_ints(struct mpt_softc *mpt)
1930 /* Mask all interrupts */
1931 mpt_write(mpt, MPT_OFFSET_INTR_MASK,
1932 MPT_INTR_REPLY_MASK | MPT_INTR_DB_MASK);
1936 mpt_sysctl_attach(struct mpt_softc *mpt)
1938 #if __FreeBSD_version >= 500000
1939 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(mpt->dev);
1940 struct sysctl_oid *tree = device_get_sysctl_tree(mpt->dev);
1942 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1943 "debug", CTLFLAG_RW, &mpt->verbose, 0,
1944 "Debugging/Verbose level");
1949 mpt_attach(struct mpt_softc *mpt)
1951 struct mpt_personality *pers;
1955 for (i = 0; i < MPT_MAX_PERSONALITIES; i++) {
1956 pers = mpt_personalities[i];
1960 if (pers->probe(mpt) == 0) {
1961 error = pers->attach(mpt);
1966 mpt->mpt_pers_mask |= (0x1 << pers->id);
1972 * Now that we've attached everything, do the enable function
1973 * for all of the personalities. This allows the personalities
1974 * to do setups that are appropriate for them prior to enabling
1977 for (i = 0; i < MPT_MAX_PERSONALITIES; i++) {
1978 pers = mpt_personalities[i];
1979 if (pers != NULL && MPT_PERS_ATTACHED(pers, mpt) != 0) {
1980 error = pers->enable(mpt);
1982 mpt_prt(mpt, "personality %s attached but would"
1983 " not enable (%d)\n", pers->name, error);
1993 mpt_shutdown(struct mpt_softc *mpt)
1995 struct mpt_personality *pers;
1997 MPT_PERS_FOREACH_REVERSE(mpt, pers) {
1998 pers->shutdown(mpt);
2004 mpt_detach(struct mpt_softc *mpt)
2006 struct mpt_personality *pers;
2008 MPT_PERS_FOREACH_REVERSE(mpt, pers) {
2010 mpt->mpt_pers_mask &= ~(0x1 << pers->id);
2018 mpt_core_load(struct mpt_personality *pers)
2023 * Setup core handlers and insert the default handler
2024 * into all "empty slots".
2026 for (i = 0; i < MPT_NUM_REPLY_HANDLERS; i++) {
2027 mpt_reply_handlers[i] = mpt_default_reply_handler;
2030 mpt_reply_handlers[MPT_CBI(MPT_REPLY_HANDLER_EVENTS)] =
2031 mpt_event_reply_handler;
2032 mpt_reply_handlers[MPT_CBI(MPT_REPLY_HANDLER_CONFIG)] =
2033 mpt_config_reply_handler;
2034 mpt_reply_handlers[MPT_CBI(MPT_REPLY_HANDLER_HANDSHAKE)] =
2035 mpt_handshake_reply_handler;
2040 * Initialize per-instance driver data and perform
2041 * initial controller configuration.
2044 mpt_core_attach(struct mpt_softc *mpt)
2050 LIST_INIT(&mpt->ack_frames);
2052 /* Put all request buffers on the free list */
2053 TAILQ_INIT(&mpt->request_pending_list);
2054 TAILQ_INIT(&mpt->request_free_list);
2055 TAILQ_INIT(&mpt->request_timeout_list);
2056 for (val = 0; val < MPT_MAX_REQUESTS(mpt); val++) {
2057 request_t *req = &mpt->request_pool[val];
2058 req->state = REQ_STATE_ALLOCATED;
2059 mpt_free_request(mpt, req);
2062 for (val = 0; val < MPT_MAX_LUNS; val++) {
2063 STAILQ_INIT(&mpt->trt[val].atios);
2064 STAILQ_INIT(&mpt->trt[val].inots);
2066 STAILQ_INIT(&mpt->trt_wildcard.atios);
2067 STAILQ_INIT(&mpt->trt_wildcard.inots);
2069 mpt->scsi_tgt_handler_id = MPT_HANDLER_ID_NONE;
2071 mpt_sysctl_attach(mpt);
2073 mpt_lprt(mpt, MPT_PRT_DEBUG, "doorbell req = %s\n",
2074 mpt_ioc_diag(mpt_read(mpt, MPT_OFFSET_DOORBELL)));
2076 error = mpt_configure_ioc(mpt);
2082 mpt_core_enable(struct mpt_softc *mpt)
2085 * We enter with the IOC enabled, but async events
2086 * not enabled, ports not enabled and interrupts
2091 * Enable asynchronous event reporting- all personalities
2092 * have attached so that they should be able to now field
2095 mpt_send_event_request(mpt, 1);
2098 * Catch any pending interrupts
2100 * This seems to be crucial- otherwise
2101 * the portenable below times out.
2108 mpt_enable_ints(mpt);
2111 * Catch any pending interrupts
2113 * This seems to be crucial- otherwise
2114 * the portenable below times out.
2121 if (mpt_send_port_enable(mpt, 0) != MPT_OK) {
2122 mpt_prt(mpt, "failed to enable port 0\n");
2129 mpt_core_shutdown(struct mpt_softc *mpt)
2131 mpt_disable_ints(mpt);
2135 mpt_core_detach(struct mpt_softc *mpt)
2137 mpt_disable_ints(mpt);
2141 mpt_core_unload(struct mpt_personality *pers)
2143 /* Unload is always successfull. */
2147 #define FW_UPLOAD_REQ_SIZE \
2148 (sizeof(MSG_FW_UPLOAD) - sizeof(SGE_MPI_UNION) \
2149 + sizeof(FW_UPLOAD_TCSGE) + sizeof(SGE_SIMPLE32))
2152 mpt_upload_fw(struct mpt_softc *mpt)
2154 uint8_t fw_req_buf[FW_UPLOAD_REQ_SIZE];
2155 MSG_FW_UPLOAD_REPLY fw_reply;
2156 MSG_FW_UPLOAD *fw_req;
2157 FW_UPLOAD_TCSGE *tsge;
2162 memset(&fw_req_buf, 0, sizeof(fw_req_buf));
2163 fw_req = (MSG_FW_UPLOAD *)fw_req_buf;
2164 fw_req->ImageType = MPI_FW_UPLOAD_ITYPE_FW_IOC_MEM;
2165 fw_req->Function = MPI_FUNCTION_FW_UPLOAD;
2166 fw_req->MsgContext = htole32(MPT_REPLY_HANDLER_HANDSHAKE);
2167 tsge = (FW_UPLOAD_TCSGE *)&fw_req->SGL;
2168 tsge->DetailsLength = 12;
2169 tsge->Flags = MPI_SGE_FLAGS_TRANSACTION_ELEMENT;
2170 tsge->ImageSize = htole32(mpt->fw_image_size);
2171 sge = (SGE_SIMPLE32 *)(tsge + 1);
2172 flags = (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER
2173 | MPI_SGE_FLAGS_END_OF_LIST | MPI_SGE_FLAGS_SIMPLE_ELEMENT
2174 | MPI_SGE_FLAGS_32_BIT_ADDRESSING | MPI_SGE_FLAGS_IOC_TO_HOST);
2175 flags <<= MPI_SGE_FLAGS_SHIFT;
2176 sge->FlagsLength = htole32(flags | mpt->fw_image_size);
2177 sge->Address = htole32(mpt->fw_phys);
2178 error = mpt_send_handshake_cmd(mpt, sizeof(fw_req_buf), &fw_req_buf);
2181 error = mpt_recv_handshake_reply(mpt, sizeof(fw_reply), &fw_reply);
2186 mpt_diag_outsl(struct mpt_softc *mpt, uint32_t addr,
2187 uint32_t *data, bus_size_t len)
2191 data_end = data + (roundup2(len, sizeof(uint32_t)) / 4);
2192 pci_enable_io(mpt->dev, SYS_RES_IOPORT);
2193 mpt_pio_write(mpt, MPT_OFFSET_DIAG_ADDR, addr);
2194 while (data != data_end) {
2195 mpt_pio_write(mpt, MPT_OFFSET_DIAG_DATA, *data);
2198 pci_disable_io(mpt->dev, SYS_RES_IOPORT);
2202 mpt_download_fw(struct mpt_softc *mpt)
2204 MpiFwHeader_t *fw_hdr;
2206 uint32_t ext_offset;
2209 mpt_prt(mpt, "Downloading Firmware - Image Size %d\n",
2210 mpt->fw_image_size);
2212 error = mpt_enable_diag_mode(mpt);
2214 mpt_prt(mpt, "Could not enter diagnostic mode!\n");
2218 mpt_write(mpt, MPT_OFFSET_DIAGNOSTIC,
2219 MPI_DIAG_RW_ENABLE|MPI_DIAG_DISABLE_ARM);
2221 fw_hdr = (MpiFwHeader_t *)mpt->fw_image;
2222 mpt_diag_outsl(mpt, fw_hdr->LoadStartAddress, (uint32_t*)fw_hdr,
2225 ext_offset = fw_hdr->NextImageHeaderOffset;
2226 while (ext_offset != 0) {
2227 MpiExtImageHeader_t *ext;
2229 ext = (MpiExtImageHeader_t *)((uintptr_t)fw_hdr + ext_offset);
2230 ext_offset = ext->NextImageHeaderOffset;
2232 mpt_diag_outsl(mpt, ext->LoadStartAddress, (uint32_t*)ext,
2236 pci_enable_io(mpt->dev, SYS_RES_IOPORT);
2237 /* Setup the address to jump to on reset. */
2238 mpt_pio_write(mpt, MPT_OFFSET_DIAG_ADDR, fw_hdr->IopResetRegAddr);
2239 mpt_pio_write(mpt, MPT_OFFSET_DIAG_DATA, fw_hdr->IopResetVectorValue);
2242 * The controller sets the "flash bad" status after attempting
2243 * to auto-boot from flash. Clear the status so that the controller
2244 * will continue the boot process with our newly installed firmware.
2246 mpt_pio_write(mpt, MPT_OFFSET_DIAG_ADDR, MPT_DIAG_MEM_CFG_BASE);
2247 data = mpt_pio_read(mpt, MPT_OFFSET_DIAG_DATA) | MPT_DIAG_MEM_CFG_BADFL;
2248 mpt_pio_write(mpt, MPT_OFFSET_DIAG_ADDR, MPT_DIAG_MEM_CFG_BASE);
2249 mpt_pio_write(mpt, MPT_OFFSET_DIAG_DATA, data);
2251 pci_disable_io(mpt->dev, SYS_RES_IOPORT);
2254 * Re-enable the processor and clear the boot halt flag.
2256 data = mpt_read(mpt, MPT_OFFSET_DIAGNOSTIC);
2257 data &= ~(MPI_DIAG_PREVENT_IOC_BOOT|MPI_DIAG_DISABLE_ARM);
2258 mpt_write(mpt, MPT_OFFSET_DIAGNOSTIC, data);
2260 mpt_disable_diag_mode(mpt);
2265 * Allocate/Initialize data structures for the controller. Called
2266 * once at instance startup.
2269 mpt_configure_ioc(struct mpt_softc *mpt)
2271 MSG_PORT_FACTS_REPLY pfp;
2272 MSG_IOC_FACTS_REPLY facts;
2275 uint32_t max_chain_depth;
2278 for (try = 0; try < MPT_MAX_TRYS; try++) {
2281 * No need to reset if the IOC is already in the READY state.
2283 * Force reset if initialization failed previously.
2284 * Note that a hard_reset of the second channel of a '929
2285 * will stop operation of the first channel. Hopefully, if the
2286 * first channel is ok, the second will not require a hard
2289 if (needreset || MPT_STATE(mpt_rd_db(mpt)) !=
2290 MPT_DB_STATE_READY) {
2291 if (mpt_reset(mpt, FALSE) != MPT_OK) {
2297 if (mpt_get_iocfacts(mpt, &facts) != MPT_OK) {
2298 mpt_prt(mpt, "mpt_get_iocfacts failed\n");
2303 mpt->mpt_global_credits = le16toh(facts.GlobalCredits);
2304 mpt->request_frame_size = le16toh(facts.RequestFrameSize);
2305 mpt->ioc_facts_flags = facts.Flags;
2306 mpt_prt(mpt, "MPI Version=%d.%d.%d.%d\n",
2307 le16toh(facts.MsgVersion) >> 8,
2308 le16toh(facts.MsgVersion) & 0xFF,
2309 le16toh(facts.HeaderVersion) >> 8,
2310 le16toh(facts.HeaderVersion) & 0xFF);
2313 * Now that we know request frame size, we can calculate
2314 * the actual (reasonable) segment limit for read/write I/O.
2316 * This limit is constrained by:
2318 * + The size of each area we allocate per command (and how
2319 * many chain segments we can fit into it).
2320 * + The total number of areas we've set up.
2321 * + The actual chain depth the card will allow.
2323 * The first area's segment count is limited by the I/O request
2324 * at the head of it. We cannot allocate realistically more
2325 * than MPT_MAX_REQUESTS areas. Therefore, to account for both
2326 * conditions, we'll just start out with MPT_MAX_REQUESTS-2.
2329 max_chain_depth = facts.MaxChainDepth;
2331 /* total number of request areas we (can) allocate */
2332 mpt->max_seg_cnt = MPT_MAX_REQUESTS(mpt) - 2;
2334 /* converted to the number of chain areas possible */
2335 mpt->max_seg_cnt *= MPT_NRFM(mpt);
2337 /* limited by the number of chain areas the card will support */
2338 if (mpt->max_seg_cnt > max_chain_depth) {
2339 mpt_lprt(mpt, MPT_PRT_DEBUG,
2340 "chain depth limited to %u (from %u)\n",
2341 max_chain_depth, mpt->max_seg_cnt);
2342 mpt->max_seg_cnt = max_chain_depth;
2345 /* converted to the number of simple sges in chain segments. */
2346 mpt->max_seg_cnt *= (MPT_NSGL(mpt) - 1);
2348 mpt_lprt(mpt, MPT_PRT_DEBUG,
2349 "Maximum Segment Count: %u\n", mpt->max_seg_cnt);
2350 mpt_lprt(mpt, MPT_PRT_DEBUG,
2351 "MsgLength=%u IOCNumber = %d\n",
2352 facts.MsgLength, facts.IOCNumber);
2353 mpt_lprt(mpt, MPT_PRT_DEBUG,
2354 "IOCFACTS: GlobalCredits=%d BlockSize=%u bytes "
2355 "Request Frame Size %u bytes Max Chain Depth %u\n",
2356 mpt->mpt_global_credits, facts.BlockSize,
2357 mpt->request_frame_size << 2, max_chain_depth);
2358 mpt_lprt(mpt, MPT_PRT_DEBUG,
2359 "IOCFACTS: Num Ports %d, FWImageSize %d, "
2360 "Flags=%#x\n", facts.NumberOfPorts,
2361 le32toh(facts.FWImageSize), facts.Flags);
2364 if ((facts.Flags & MPI_IOCFACTS_FLAGS_FW_DOWNLOAD_BOOT) != 0) {
2365 struct mpt_map_info mi;
2369 * In some configurations, the IOC's firmware is
2370 * stored in a shared piece of system NVRAM that
2371 * is only accessable via the BIOS. In this
2372 * case, the firmware keeps a copy of firmware in
2373 * RAM until the OS driver retrieves it. Once
2374 * retrieved, we are responsible for re-downloading
2375 * the firmware after any hard-reset.
2377 mpt->fw_image_size = le32toh(facts.FWImageSize);
2378 error = mpt_dma_tag_create(mpt, mpt->parent_dmat,
2379 /*alignment*/1, /*boundary*/0,
2380 /*lowaddr*/BUS_SPACE_MAXADDR_32BIT,
2381 /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL,
2382 /*filterarg*/NULL, mpt->fw_image_size,
2383 /*nsegments*/1, /*maxsegsz*/mpt->fw_image_size,
2384 /*flags*/0, &mpt->fw_dmat);
2386 mpt_prt(mpt, "cannot create fw dma tag\n");
2389 error = bus_dmamem_alloc(mpt->fw_dmat,
2390 (void **)&mpt->fw_image, BUS_DMA_NOWAIT,
2393 mpt_prt(mpt, "cannot allocate fw mem.\n");
2394 bus_dma_tag_destroy(mpt->fw_dmat);
2399 bus_dmamap_load(mpt->fw_dmat, mpt->fw_dmap,
2400 mpt->fw_image, mpt->fw_image_size, mpt_map_rquest,
2402 mpt->fw_phys = mi.phys;
2404 error = mpt_upload_fw(mpt);
2406 mpt_prt(mpt, "fw upload failed.\n");
2407 bus_dmamap_unload(mpt->fw_dmat, mpt->fw_dmap);
2408 bus_dmamem_free(mpt->fw_dmat, mpt->fw_image,
2410 bus_dma_tag_destroy(mpt->fw_dmat);
2411 mpt->fw_image = NULL;
2416 if (mpt_get_portfacts(mpt, &pfp) != MPT_OK) {
2417 mpt_prt(mpt, "mpt_get_portfacts failed\n");
2422 mpt_lprt(mpt, MPT_PRT_DEBUG,
2423 "PORTFACTS: Type %x PFlags %x IID %d MaxDev %d\n",
2424 pfp.PortType, pfp.ProtocolFlags, pfp.PortSCSIID,
2427 mpt->mpt_port_type = pfp.PortType;
2428 mpt->mpt_proto_flags = pfp.ProtocolFlags;
2429 if (pfp.PortType != MPI_PORTFACTS_PORTTYPE_SCSI &&
2430 pfp.PortType != MPI_PORTFACTS_PORTTYPE_SAS &&
2431 pfp.PortType != MPI_PORTFACTS_PORTTYPE_FC) {
2432 mpt_prt(mpt, "Unsupported Port Type (%x)\n",
2436 mpt->mpt_max_tgtcmds = le16toh(pfp.MaxPostedCmdBuffers);
2438 if (pfp.PortType == MPI_PORTFACTS_PORTTYPE_FC) {
2442 } else if (pfp.PortType == MPI_PORTFACTS_PORTTYPE_SAS) {
2451 mpt->mpt_ini_id = pfp.PortSCSIID;
2452 mpt->mpt_max_devices = pfp.MaxDevices;
2455 * Set our expected role with what this port supports.
2458 mpt->role = MPT_ROLE_NONE;
2459 if (pfp.ProtocolFlags & MPI_PORTFACTS_PROTOCOL_INITIATOR) {
2460 mpt->role |= MPT_ROLE_INITIATOR;
2462 if (pfp.ProtocolFlags & MPI_PORTFACTS_PROTOCOL_TARGET) {
2463 mpt->role |= MPT_ROLE_TARGET;
2465 if (mpt->role == MPT_ROLE_NONE) {
2466 mpt_prt(mpt, "port does not support either target or "
2467 "initiator role\n");
2471 if (mpt_enable_ioc(mpt, 0) != MPT_OK) {
2472 mpt_prt(mpt, "unable to initialize IOC\n");
2477 * Read IOC configuration information.
2479 * We need this to determine whether or not we have certain
2480 * settings for Integrated Mirroring (e.g.).
2482 mpt_read_config_info_ioc(mpt);
2484 /* Everything worked */
2488 if (try >= MPT_MAX_TRYS) {
2489 mpt_prt(mpt, "failed to initialize IOC");
2497 mpt_enable_ioc(struct mpt_softc *mpt, int portenable)
2502 if (mpt_send_ioc_init(mpt, MPI_WHOINIT_HOST_DRIVER) != MPT_OK) {
2503 mpt_prt(mpt, "mpt_send_ioc_init failed\n");
2507 mpt_lprt(mpt, MPT_PRT_DEBUG, "mpt_send_ioc_init ok\n");
2509 if (mpt_wait_state(mpt, MPT_DB_STATE_RUNNING) != MPT_OK) {
2510 mpt_prt(mpt, "IOC failed to go to run state\n");
2513 mpt_lprt(mpt, MPT_PRT_DEBUG, "IOC now at RUNSTATE\n");
2516 * Give it reply buffers
2518 * Do *not* exceed global credits.
2520 for (val = 0, pptr = mpt->reply_phys;
2521 (pptr + MPT_REPLY_SIZE) < (mpt->reply_phys + PAGE_SIZE);
2522 pptr += MPT_REPLY_SIZE) {
2523 mpt_free_reply(mpt, pptr);
2524 if (++val == mpt->mpt_global_credits - 1)
2530 * Enable the port if asked. This is only done if we're resetting
2531 * the IOC after initial startup.
2535 * Enable asynchronous event reporting
2537 mpt_send_event_request(mpt, 1);
2539 if (mpt_send_port_enable(mpt, 0) != MPT_OK) {
2540 mpt_prt(mpt, "failed to enable port 0\n");