2 * Generic routines for LSI Fusion adapters.
5 * Copyright (c) 2000, 2001 by Greg Ansley
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice immediately at the beginning of the file, without modification,
12 * this list of conditions, and the following disclaimer.
13 * 2. The name of the author may not be used to endorse or promote products
14 * derived from this software without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
20 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * Copyright (c) 2002, 2006 by Matthew Jacob
30 * All rights reserved.
32 * Redistribution and use in source and binary forms, with or without
33 * modification, are permitted provided that the following conditions are
35 * 1. Redistributions of source code must retain the above copyright
36 * notice, this list of conditions and the following disclaimer.
37 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
38 * substantially similar to the "NO WARRANTY" disclaimer below
39 * ("Disclaimer") and any redistribution must be conditioned upon including
40 * a substantially similar Disclaimer requirement for further binary
42 * 3. Neither the names of the above listed copyright holders nor the names
43 * of any contributors may be used to endorse or promote products derived
44 * from this software without specific prior written permission.
46 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
47 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
48 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
49 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
50 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
51 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
52 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
53 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
54 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
55 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
56 * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
58 * Support from Chris Ellsworth in order to make SAS adapters work
59 * is gratefully acknowledged.
62 * Copyright (c) 2004, Avid Technology, Inc. and its contributors.
63 * Copyright (c) 2005, WHEEL Sp. z o.o.
64 * Copyright (c) 2004, 2005 Justin T. Gibbs
65 * All rights reserved.
67 * Redistribution and use in source and binary forms, with or without
68 * modification, are permitted provided that the following conditions are
70 * 1. Redistributions of source code must retain the above copyright
71 * notice, this list of conditions and the following disclaimer.
72 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
73 * substantially similar to the "NO WARRANTY" disclaimer below
74 * ("Disclaimer") and any redistribution must be conditioned upon including
75 * a substantially similar Disclaimer requirement for further binary
77 * 3. Neither the names of the above listed copyright holders nor the names
78 * of any contributors may be used to endorse or promote products derived
79 * from this software without specific prior written permission.
81 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
82 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
83 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
84 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
85 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
86 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
87 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
88 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
89 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
90 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
91 * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
94 #include <sys/cdefs.h>
95 __FBSDID("$FreeBSD$");
97 #include <dev/mpt/mpt.h>
98 #include <dev/mpt/mpt_cam.h> /* XXX For static handler registration */
99 #include <dev/mpt/mpt_raid.h> /* XXX For static handler registration */
101 #include <dev/mpt/mpilib/mpi.h>
102 #include <dev/mpt/mpilib/mpi_ioc.h>
104 #include <sys/sysctl.h>
106 #define MPT_MAX_TRYS 3
107 #define MPT_MAX_WAIT 300000
109 static int maxwait_ack = 0;
110 static int maxwait_int = 0;
111 static int maxwait_state = 0;
113 TAILQ_HEAD(, mpt_softc) mpt_tailq = TAILQ_HEAD_INITIALIZER(mpt_tailq);
114 mpt_reply_handler_t *mpt_reply_handlers[MPT_NUM_REPLY_HANDLERS];
116 static mpt_reply_handler_t mpt_default_reply_handler;
117 static mpt_reply_handler_t mpt_config_reply_handler;
118 static mpt_reply_handler_t mpt_handshake_reply_handler;
119 static mpt_reply_handler_t mpt_event_reply_handler;
120 static void mpt_send_event_ack(struct mpt_softc *mpt, request_t *ack_req,
121 MSG_EVENT_NOTIFY_REPLY *msg, uint32_t context);
122 static int mpt_send_event_request(struct mpt_softc *mpt, int onoff);
123 static int mpt_soft_reset(struct mpt_softc *mpt);
124 static void mpt_hard_reset(struct mpt_softc *mpt);
125 static int mpt_configure_ioc(struct mpt_softc *mpt);
126 static int mpt_enable_ioc(struct mpt_softc *mpt);
128 /************************* Personality Module Support *************************/
130 * We include one extra entry that is guaranteed to be NULL
131 * to simplify our itterator.
133 static struct mpt_personality *mpt_personalities[MPT_MAX_PERSONALITIES + 1];
134 static __inline struct mpt_personality*
135 mpt_pers_find(struct mpt_softc *, u_int);
136 static __inline struct mpt_personality*
137 mpt_pers_find_reverse(struct mpt_softc *, u_int);
139 static __inline struct mpt_personality *
140 mpt_pers_find(struct mpt_softc *mpt, u_int start_at)
142 KASSERT(start_at <= MPT_MAX_PERSONALITIES,
143 ("mpt_pers_find: starting position out of range\n"));
145 while (start_at < MPT_MAX_PERSONALITIES
146 && (mpt->mpt_pers_mask & (0x1 << start_at)) == 0) {
149 return (mpt_personalities[start_at]);
153 * Used infrequenstly, so no need to optimize like a forward
154 * traversal where we use the MAX+1 is guaranteed to be NULL
157 static __inline struct mpt_personality *
158 mpt_pers_find_reverse(struct mpt_softc *mpt, u_int start_at)
160 while (start_at < MPT_MAX_PERSONALITIES
161 && (mpt->mpt_pers_mask & (0x1 << start_at)) == 0) {
164 if (start_at < MPT_MAX_PERSONALITIES)
165 return (mpt_personalities[start_at]);
169 #define MPT_PERS_FOREACH(mpt, pers) \
170 for (pers = mpt_pers_find(mpt, /*start_at*/0); \
172 pers = mpt_pers_find(mpt, /*start_at*/pers->id+1))
174 #define MPT_PERS_FOREACH_REVERSE(mpt, pers) \
175 for (pers = mpt_pers_find_reverse(mpt, MPT_MAX_PERSONALITIES-1);\
177 pers = mpt_pers_find_reverse(mpt, /*start_at*/pers->id-1))
179 static mpt_load_handler_t mpt_stdload;
180 static mpt_probe_handler_t mpt_stdprobe;
181 static mpt_attach_handler_t mpt_stdattach;
182 static mpt_event_handler_t mpt_stdevent;
183 static mpt_reset_handler_t mpt_stdreset;
184 static mpt_shutdown_handler_t mpt_stdshutdown;
185 static mpt_detach_handler_t mpt_stddetach;
186 static mpt_unload_handler_t mpt_stdunload;
187 static struct mpt_personality mpt_default_personality =
190 .probe = mpt_stdprobe,
191 .attach = mpt_stdattach,
192 .event = mpt_stdevent,
193 .reset = mpt_stdreset,
194 .shutdown = mpt_stdshutdown,
195 .detach = mpt_stddetach,
196 .unload = mpt_stdunload
199 static mpt_load_handler_t mpt_core_load;
200 static mpt_attach_handler_t mpt_core_attach;
201 static mpt_reset_handler_t mpt_core_ioc_reset;
202 static mpt_event_handler_t mpt_core_event;
203 static mpt_shutdown_handler_t mpt_core_shutdown;
204 static mpt_shutdown_handler_t mpt_core_detach;
205 static mpt_unload_handler_t mpt_core_unload;
206 static struct mpt_personality mpt_core_personality =
209 .load = mpt_core_load,
210 .attach = mpt_core_attach,
211 .event = mpt_core_event,
212 .reset = mpt_core_ioc_reset,
213 .shutdown = mpt_core_shutdown,
214 .detach = mpt_core_detach,
215 .unload = mpt_core_unload,
219 * Manual declaration so that DECLARE_MPT_PERSONALITY doesn't need
220 * ordering information. We want the core to always register FIRST.
221 * other modules are set to SI_ORDER_SECOND.
223 static moduledata_t mpt_core_mod = {
224 "mpt_core", mpt_modevent, &mpt_core_personality
226 DECLARE_MODULE(mpt_core, mpt_core_mod, SI_SUB_DRIVERS, SI_ORDER_FIRST);
227 MODULE_VERSION(mpt_core, 1);
229 #define MPT_PERS_ATACHED(pers, mpt) \
230 ((mpt)->pers_mask & (0x1 << pers->id))
234 mpt_modevent(module_t mod, int type, void *data)
236 struct mpt_personality *pers;
239 pers = (struct mpt_personality *)data;
245 mpt_load_handler_t **def_handler;
246 mpt_load_handler_t **pers_handler;
249 for (i = 0; i < MPT_MAX_PERSONALITIES; i++) {
250 if (mpt_personalities[i] == NULL)
253 if (i >= MPT_MAX_PERSONALITIES) {
258 mpt_personalities[i] = pers;
260 /* Install standard/noop handlers for any NULL entries. */
261 def_handler = MPT_PERS_FIRST_HANDLER(&mpt_default_personality);
262 pers_handler = MPT_PERS_FIRST_HANDLER(pers);
263 while (pers_handler <= MPT_PERS_LAST_HANDLER(pers)) {
264 if (*pers_handler == NULL)
265 *pers_handler = *def_handler;
270 error = (pers->load(pers));
272 mpt_personalities[i] = NULL;
280 error = pers->unload(pers);
281 mpt_personalities[pers->id] = NULL;
291 mpt_stdload(struct mpt_personality *pers)
293 /* Load is always successfull. */
298 mpt_stdprobe(struct mpt_softc *mpt)
300 /* Probe is always successfull. */
305 mpt_stdattach(struct mpt_softc *mpt)
307 /* Attach is always successfull. */
312 mpt_stdevent(struct mpt_softc *mpt, request_t *req, MSG_EVENT_NOTIFY_REPLY *msg)
314 mpt_lprt(mpt, MPT_PRT_DEBUG, "mpt_stdevent: 0x%x\n", msg->Event & 0xFF);
315 /* Event was not for us. */
320 mpt_stdreset(struct mpt_softc *mpt, int type)
325 mpt_stdshutdown(struct mpt_softc *mpt)
330 mpt_stddetach(struct mpt_softc *mpt)
335 mpt_stdunload(struct mpt_personality *pers)
337 /* Unload is always successfull. */
341 /******************************* Bus DMA Support ******************************/
343 mpt_map_rquest(void *arg, bus_dma_segment_t *segs, int nseg, int error)
345 struct mpt_map_info *map_info;
347 map_info = (struct mpt_map_info *)arg;
348 map_info->error = error;
349 map_info->phys = segs->ds_addr;
352 /**************************** Reply/Event Handling ****************************/
354 mpt_register_handler(struct mpt_softc *mpt, mpt_handler_type type,
355 mpt_handler_t handler, uint32_t *phandler_id)
359 case MPT_HANDLER_REPLY:
364 if (phandler_id == NULL)
367 free_cbi = MPT_HANDLER_ID_NONE;
368 for (cbi = 0; cbi < MPT_NUM_REPLY_HANDLERS; cbi++) {
370 * If the same handler is registered multiple
371 * times, don't error out. Just return the
372 * index of the original registration.
374 if (mpt_reply_handlers[cbi] == handler.reply_handler) {
375 *phandler_id = MPT_CBI_TO_HID(cbi);
380 * Fill from the front in the hope that
381 * all registered handlers consume only a
384 * We don't break on the first empty slot so
385 * that the full table is checked to see if
386 * this handler was previously registered.
388 if (free_cbi == MPT_HANDLER_ID_NONE
389 && (mpt_reply_handlers[cbi]
390 == mpt_default_reply_handler))
393 if (free_cbi == MPT_HANDLER_ID_NONE)
395 mpt_reply_handlers[free_cbi] = handler.reply_handler;
396 *phandler_id = MPT_CBI_TO_HID(free_cbi);
400 mpt_prt(mpt, "mpt_register_handler unknown type %d\n", type);
407 mpt_deregister_handler(struct mpt_softc *mpt, mpt_handler_type type,
408 mpt_handler_t handler, uint32_t handler_id)
412 case MPT_HANDLER_REPLY:
416 cbi = MPT_CBI(handler_id);
417 if (cbi >= MPT_NUM_REPLY_HANDLERS
418 || mpt_reply_handlers[cbi] != handler.reply_handler)
420 mpt_reply_handlers[cbi] = mpt_default_reply_handler;
424 mpt_prt(mpt, "mpt_deregister_handler unknown type %d\n", type);
431 mpt_default_reply_handler(struct mpt_softc *mpt, request_t *req,
432 MSG_DEFAULT_REPLY *reply_frame)
434 mpt_prt(mpt, "XXXX Default Handler Called. Req %p, Frame %p\n",
437 if (reply_frame != NULL)
438 mpt_dump_reply_frame(mpt, reply_frame);
440 mpt_prt(mpt, "XXXX Reply Frame Ignored\n");
442 return (/*free_reply*/TRUE);
446 mpt_config_reply_handler(struct mpt_softc *mpt, request_t *req,
447 MSG_DEFAULT_REPLY *reply_frame)
451 if (reply_frame != NULL) {
453 MSG_CONFIG_REPLY *reply;
455 cfgp = (MSG_CONFIG *)req->req_vbuf;
456 reply = (MSG_CONFIG_REPLY *)reply_frame;
457 req->IOCStatus = le16toh(reply_frame->IOCStatus);
458 bcopy(&reply->Header, &cfgp->Header,
459 sizeof(cfgp->Header));
461 req->state &= ~REQ_STATE_QUEUED;
462 req->state |= REQ_STATE_DONE;
463 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
465 if ((req->state & REQ_STATE_NEED_WAKEUP) != 0)
469 return (/*free_reply*/TRUE);
473 mpt_handshake_reply_handler(struct mpt_softc *mpt, request_t *req,
474 MSG_DEFAULT_REPLY *reply_frame)
476 /* Nothing to be done. */
477 return (/*free_reply*/TRUE);
481 mpt_event_reply_handler(struct mpt_softc *mpt, request_t *req,
482 MSG_DEFAULT_REPLY *reply_frame)
486 if (reply_frame == NULL) {
487 mpt_prt(mpt, "Event Handler: req %p - Unexpected NULL reply\n");
488 return (/*free_reply*/TRUE);
492 switch (reply_frame->Function) {
493 case MPI_FUNCTION_EVENT_NOTIFICATION:
495 MSG_EVENT_NOTIFY_REPLY *msg;
496 struct mpt_personality *pers;
500 msg = (MSG_EVENT_NOTIFY_REPLY *)reply_frame;
501 MPT_PERS_FOREACH(mpt, pers)
502 handled += pers->event(mpt, req, msg);
504 if (handled == 0 && mpt->mpt_pers_mask == 0) {
505 mpt_lprt(mpt, MPT_PRT_INFO,
506 "No Handlers For Any Event Notify Frames. "
507 "Event %#x (ACK %sequired).\n",
508 msg->Event, msg->AckRequired? "r" : "not r");
509 } else if (handled == 0) {
510 mpt_lprt(mpt, MPT_PRT_WARN,
511 "Unhandled Event Notify Frame. Event %#x "
512 "(ACK %sequired).\n",
513 msg->Event, msg->AckRequired? "r" : "not r");
516 if (msg->AckRequired) {
520 context = htole32(req->index|MPT_REPLY_HANDLER_EVENTS);
521 ack_req = mpt_get_request(mpt, /*sleep_ok*/FALSE);
522 if (ack_req == NULL) {
523 struct mpt_evtf_record *evtf;
525 evtf = (struct mpt_evtf_record *)reply_frame;
526 evtf->context = context;
527 LIST_INSERT_HEAD(&mpt->ack_frames, evtf, links);
531 mpt_send_event_ack(mpt, ack_req, msg, context);
535 case MPI_FUNCTION_PORT_ENABLE:
536 mpt_lprt(mpt, MPT_PRT_DEBUG, "enable port reply\n");
538 case MPI_FUNCTION_EVENT_ACK:
541 mpt_prt(mpt, "Unknown Event Function: %x\n",
542 reply_frame->Function);
547 && (reply_frame->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY) == 0) {
549 req->state &= ~REQ_STATE_QUEUED;
550 req->state |= REQ_STATE_DONE;
551 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
553 if ((req->state & REQ_STATE_NEED_WAKEUP) != 0)
556 mpt_free_request(mpt, req);
562 * Process an asynchronous event from the IOC.
565 mpt_core_event(struct mpt_softc *mpt, request_t *req,
566 MSG_EVENT_NOTIFY_REPLY *msg)
568 mpt_lprt(mpt, MPT_PRT_DEBUG, "mpt_core_event: 0x%x\n",
570 switch(msg->Event & 0xFF) {
573 case MPI_EVENT_LOG_DATA:
577 /* Some error occured that LSI wants logged */
578 mpt_prt(mpt, "EvtLogData: IOCLogInfo: 0x%08x\n",
580 mpt_prt(mpt, "\tEvtLogData: Event Data:");
581 for (i = 0; i < msg->EventDataLength; i++)
582 mpt_prtc(mpt, " %08x", msg->Data[i]);
586 case MPI_EVENT_EVENT_CHANGE:
588 * This is just an acknowledgement
589 * of our mpt_send_event_request.
592 case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE:
595 return (/*handled*/0);
598 return (/*handled*/1);
602 mpt_send_event_ack(struct mpt_softc *mpt, request_t *ack_req,
603 MSG_EVENT_NOTIFY_REPLY *msg, uint32_t context)
607 ackp = (MSG_EVENT_ACK *)ack_req->req_vbuf;
608 bzero(ackp, sizeof *ackp);
609 ackp->Function = MPI_FUNCTION_EVENT_ACK;
610 ackp->Event = msg->Event;
611 ackp->EventContext = msg->EventContext;
612 ackp->MsgContext = context;
613 mpt_check_doorbell(mpt);
614 mpt_send_cmd(mpt, ack_req);
617 /***************************** Interrupt Handling *****************************/
621 struct mpt_softc *mpt;
624 mpt = (struct mpt_softc *)arg;
625 while ((reply_desc = mpt_pop_reply_queue(mpt)) != MPT_REPLY_EMPTY) {
627 MSG_DEFAULT_REPLY *reply_frame;
628 uint32_t reply_baddr;
636 if ((reply_desc & MPI_ADDRESS_REPLY_A_BIT) != 0) {
640 * Insure that the reply frame is coherent.
642 reply_baddr = (reply_desc << 1);
643 offset = reply_baddr - (mpt->reply_phys & 0xFFFFFFFF);
644 bus_dmamap_sync_range(mpt->reply_dmat, mpt->reply_dmap,
645 offset, MPT_REPLY_SIZE,
646 BUS_DMASYNC_POSTREAD);
647 reply_frame = MPT_REPLY_OTOV(mpt, offset);
648 reply_desc = le32toh(reply_frame->MsgContext);
650 cb_index = MPT_CONTEXT_TO_CBI(reply_desc);
651 req_index = MPT_CONTEXT_TO_REQI(reply_desc);
652 if (req_index < MPT_MAX_REQUESTS(mpt))
653 req = &mpt->request_pool[req_index];
655 free_rf = mpt_reply_handlers[cb_index](mpt, req, reply_frame);
657 if (reply_frame != NULL && free_rf)
658 mpt_free_reply(mpt, reply_baddr);
662 /******************************* Error Recovery *******************************/
664 mpt_complete_request_chain(struct mpt_softc *mpt, struct req_queue *chain,
667 MSG_DEFAULT_REPLY ioc_status_frame;
670 bzero(&ioc_status_frame, sizeof(ioc_status_frame));
671 ioc_status_frame.MsgLength = roundup2(sizeof(ioc_status_frame), 4);
672 ioc_status_frame.IOCStatus = iocstatus;
673 while((req = TAILQ_FIRST(chain)) != NULL) {
674 MSG_REQUEST_HEADER *msg_hdr;
677 msg_hdr = (MSG_REQUEST_HEADER *)req->req_vbuf;
678 ioc_status_frame.Function = msg_hdr->Function;
679 ioc_status_frame.MsgContext = msg_hdr->MsgContext;
680 cb_index = MPT_CONTEXT_TO_CBI(le32toh(msg_hdr->MsgContext));
681 mpt_reply_handlers[cb_index](mpt, req, &ioc_status_frame);
685 /********************************* Diagnostics ********************************/
687 * Perform a diagnostic dump of a reply frame.
690 mpt_dump_reply_frame(struct mpt_softc *mpt, MSG_DEFAULT_REPLY *reply_frame)
693 mpt_prt(mpt, "Address Reply:\n");
694 mpt_print_reply(reply_frame);
697 /******************************* Doorbell Access ******************************/
698 static __inline uint32_t mpt_rd_db(struct mpt_softc *mpt);
699 static __inline uint32_t mpt_rd_intr(struct mpt_softc *mpt);
701 static __inline uint32_t
702 mpt_rd_db(struct mpt_softc *mpt)
704 return mpt_read(mpt, MPT_OFFSET_DOORBELL);
707 static __inline uint32_t
708 mpt_rd_intr(struct mpt_softc *mpt)
710 return mpt_read(mpt, MPT_OFFSET_INTR_STATUS);
713 /* Busy wait for a door bell to be read by IOC */
715 mpt_wait_db_ack(struct mpt_softc *mpt)
718 for (i=0; i < MPT_MAX_WAIT; i++) {
719 if (!MPT_DB_IS_BUSY(mpt_rd_intr(mpt))) {
720 maxwait_ack = i > maxwait_ack ? i : maxwait_ack;
729 /* Busy wait for a door bell interrupt */
731 mpt_wait_db_int(struct mpt_softc *mpt)
734 for (i=0; i < MPT_MAX_WAIT; i++) {
735 if (MPT_DB_INTR(mpt_rd_intr(mpt))) {
736 maxwait_int = i > maxwait_int ? i : maxwait_int;
744 /* Wait for IOC to transition to a give state */
746 mpt_check_doorbell(struct mpt_softc *mpt)
748 uint32_t db = mpt_rd_db(mpt);
749 if (MPT_STATE(db) != MPT_DB_STATE_RUNNING) {
750 mpt_prt(mpt, "Device not running\n");
755 /* Wait for IOC to transition to a give state */
757 mpt_wait_state(struct mpt_softc *mpt, enum DB_STATE_BITS state)
761 for (i = 0; i < MPT_MAX_WAIT; i++) {
762 uint32_t db = mpt_rd_db(mpt);
763 if (MPT_STATE(db) == state) {
764 maxwait_state = i > maxwait_state ? i : maxwait_state;
773 /************************* Intialization/Configuration ************************/
774 static int mpt_download_fw(struct mpt_softc *mpt);
776 /* Issue the reset COMMAND to the IOC */
778 mpt_soft_reset(struct mpt_softc *mpt)
780 mpt_lprt(mpt, MPT_PRT_DEBUG, "soft reset\n");
782 /* Have to use hard reset if we are not in Running state */
783 if (MPT_STATE(mpt_rd_db(mpt)) != MPT_DB_STATE_RUNNING) {
784 mpt_prt(mpt, "soft reset failed: device not running\n");
788 /* If door bell is in use we don't have a chance of getting
789 * a word in since the IOC probably crashed in message
790 * processing. So don't waste our time.
792 if (MPT_DB_IS_IN_USE(mpt_rd_db(mpt))) {
793 mpt_prt(mpt, "soft reset failed: doorbell wedged\n");
797 /* Send the reset request to the IOC */
798 mpt_write(mpt, MPT_OFFSET_DOORBELL,
799 MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET << MPI_DOORBELL_FUNCTION_SHIFT);
800 if (mpt_wait_db_ack(mpt) != MPT_OK) {
801 mpt_prt(mpt, "soft reset failed: ack timeout\n");
805 /* Wait for the IOC to reload and come out of reset state */
806 if (mpt_wait_state(mpt, MPT_DB_STATE_READY) != MPT_OK) {
807 mpt_prt(mpt, "soft reset failed: device did not restart\n");
815 mpt_enable_diag_mode(struct mpt_softc *mpt)
822 if ((mpt_read(mpt, MPT_OFFSET_DIAGNOSTIC) & MPI_DIAG_DRWE) != 0)
825 /* Enable diagnostic registers */
826 mpt_write(mpt, MPT_OFFSET_SEQUENCE, 0xFF);
827 mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_1ST_KEY_VALUE);
828 mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_2ND_KEY_VALUE);
829 mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_3RD_KEY_VALUE);
830 mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_4TH_KEY_VALUE);
831 mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_5TH_KEY_VALUE);
841 mpt_disable_diag_mode(struct mpt_softc *mpt)
843 mpt_write(mpt, MPT_OFFSET_SEQUENCE, 0xFFFFFFFF);
846 /* This is a magic diagnostic reset that resets all the ARM
847 * processors in the chip.
850 mpt_hard_reset(struct mpt_softc *mpt)
856 mpt_lprt(mpt, MPT_PRT_DEBUG, "hard reset\n");
858 error = mpt_enable_diag_mode(mpt);
860 mpt_prt(mpt, "WARNING - Could not enter diagnostic mode !\n");
861 mpt_prt(mpt, "Trying to reset anyway.\n");
864 diagreg = mpt_read(mpt, MPT_OFFSET_DIAGNOSTIC);
867 * This appears to be a workaround required for some
868 * firmware or hardware revs.
870 mpt_write(mpt, MPT_OFFSET_DIAGNOSTIC, diagreg | MPI_DIAG_DISABLE_ARM);
873 /* Diag. port is now active so we can now hit the reset bit */
874 mpt_write(mpt, MPT_OFFSET_DIAGNOSTIC, diagreg | MPI_DIAG_RESET_ADAPTER);
877 * Ensure that the reset has finished. We delay 1ms
878 * prior to reading the register to make sure the chip
879 * has sufficiently completed its reset to handle register
885 diagreg = mpt_read(mpt, MPT_OFFSET_DIAGNOSTIC);
886 } while (--wait && (diagreg & MPI_DIAG_RESET_ADAPTER) == 0);
889 mpt_prt(mpt, "WARNING - Failed hard reset! "
890 "Trying to initialize anyway.\n");
894 * If we have firmware to download, it must be loaded before
895 * the controller will become operational. Do so now.
897 if (mpt->fw_image != NULL) {
899 error = mpt_download_fw(mpt);
902 mpt_prt(mpt, "WARNING - Firmware Download Failed!\n");
903 mpt_prt(mpt, "Trying to initialize anyway.\n");
908 * Reseting the controller should have disabled write
909 * access to the diagnostic registers, but disable
910 * manually to be sure.
912 mpt_disable_diag_mode(mpt);
916 mpt_core_ioc_reset(struct mpt_softc *mpt, int type)
919 * Complete all pending requests with a status
920 * appropriate for an IOC reset.
922 mpt_complete_request_chain(mpt, &mpt->request_pending_list,
923 MPI_IOCSTATUS_INVALID_STATE);
928 * Reset the IOC when needed. Try software command first then if needed
929 * poke at the magic diagnostic reset. Note that a hard reset resets
930 * *both* IOCs on dual function chips (FC929 && LSI1030) as well as
931 * fouls up the PCI configuration registers.
934 mpt_reset(struct mpt_softc *mpt, int reinit)
936 struct mpt_personality *pers;
941 * Try a soft reset. If that fails, get out the big hammer.
944 if ((ret = mpt_soft_reset(mpt)) != MPT_OK) {
946 for (cnt = 0; cnt < 5; cnt++) {
947 /* Failed; do a hard reset */
951 * Wait for the IOC to reload
952 * and come out of reset state
954 ret = mpt_wait_state(mpt, MPT_DB_STATE_READY);
959 * Okay- try to check again...
961 ret = mpt_wait_state(mpt, MPT_DB_STATE_READY);
965 mpt_prt(mpt, "mpt_reset: failed hard reset (%d:%d)\n",
970 if (retry_cnt == 0) {
972 * Invoke reset handlers. We bump the reset count so
973 * that mpt_wait_req() understands that regardless of
974 * the specified wait condition, it should stop its wait.
977 MPT_PERS_FOREACH(mpt, pers)
978 pers->reset(mpt, ret);
982 ret = mpt_enable_ioc(mpt);
984 mpt_enable_ints(mpt);
987 if (ret != MPT_OK && retry_cnt++ < 2) {
993 /* Return a command buffer to the free queue */
995 mpt_free_request(struct mpt_softc *mpt, request_t *req)
998 struct mpt_evtf_record *record;
999 uint32_t reply_baddr;
1001 if (req == NULL || req != &mpt->request_pool[req->index]) {
1002 panic("mpt_free_request bad req ptr\n");
1005 if ((nxt = req->chain) != NULL) {
1007 mpt_free_request(mpt, nxt); /* NB: recursion */
1010 req->state = REQ_STATE_FREE;
1011 if (LIST_EMPTY(&mpt->ack_frames)) {
1012 TAILQ_INSERT_HEAD(&mpt->request_free_list, req, links);
1013 if (mpt->getreqwaiter != 0) {
1014 mpt->getreqwaiter = 0;
1015 wakeup(&mpt->request_free_list);
1021 * Process an ack frame deferred due to resource shortage.
1023 record = LIST_FIRST(&mpt->ack_frames);
1024 LIST_REMOVE(record, links);
1025 mpt_send_event_ack(mpt, req, &record->reply, record->context);
1026 reply_baddr = (uint32_t)((uint8_t *)record - mpt->reply)
1027 + (mpt->reply_phys & 0xFFFFFFFF);
1028 mpt_free_reply(mpt, reply_baddr);
1031 /* Get a command buffer from the free queue */
1033 mpt_get_request(struct mpt_softc *mpt, int sleep_ok)
1038 req = TAILQ_FIRST(&mpt->request_free_list);
1040 KASSERT(req == &mpt->request_pool[req->index],
1041 ("mpt_get_request: corrupted request free list\n"));
1042 TAILQ_REMOVE(&mpt->request_free_list, req, links);
1043 req->state = REQ_STATE_ALLOCATED;
1045 } else if (sleep_ok != 0) {
1046 mpt->getreqwaiter = 1;
1047 mpt_sleep(mpt, &mpt->request_free_list, PUSER, "mptgreq", 0);
1053 /* Pass the command to the IOC */
1055 mpt_send_cmd(struct mpt_softc *mpt, request_t *req)
1059 pReq = req->req_vbuf;
1060 if (mpt->verbose > MPT_PRT_TRACE) {
1062 mpt_prt(mpt, "Send Request %d (0x%x):",
1063 req->index, req->req_pbuf);
1064 for (offset = 0; offset < mpt->request_frame_size; offset++) {
1065 if ((offset & 0x7) == 0) {
1066 mpt_prtc(mpt, "\n");
1069 mpt_prtc(mpt, " %08x", pReq[offset]);
1071 mpt_prtc(mpt, "\n");
1073 bus_dmamap_sync(mpt->request_dmat, mpt->request_dmap,
1074 BUS_DMASYNC_PREWRITE);
1075 req->state |= REQ_STATE_QUEUED;
1076 TAILQ_INSERT_HEAD(&mpt->request_pending_list, req, links);
1077 mpt_write(mpt, MPT_OFFSET_REQUEST_Q, (uint32_t) req->req_pbuf);
1081 * Wait for a request to complete.
1084 * mpt softc of controller executing request
1085 * req request to wait for
1086 * sleep_ok nonzero implies may sleep in this context
1087 * time_ms timeout in ms. 0 implies no timeout.
1090 * 0 Request completed
1091 * non-0 Timeout fired before request completion.
1094 mpt_wait_req(struct mpt_softc *mpt, request_t *req,
1095 mpt_req_state_t state, mpt_req_state_t mask,
1096 int sleep_ok, int time_ms)
1103 * timeout is in ms. 0 indicates infinite wait.
1104 * Convert to ticks or 500us units depending on
1108 timeout = (time_ms * hz) / 1000;
1110 timeout = time_ms * 2;
1111 req->state |= REQ_STATE_NEED_WAKEUP;
1112 mask &= ~REQ_STATE_NEED_WAKEUP;
1113 saved_cnt = mpt->reset_cnt;
1114 while ((req->state & mask) != state
1115 && mpt->reset_cnt == saved_cnt) {
1117 if (sleep_ok != 0) {
1118 error = mpt_sleep(mpt, req, PUSER, "mptreq", timeout);
1119 if (error == EWOULDBLOCK) {
1124 if (time_ms != 0 && --timeout == 0) {
1125 mpt_prt(mpt, "mpt_wait_req timed out\n");
1132 req->state &= ~REQ_STATE_NEED_WAKEUP;
1133 if (mpt->reset_cnt != saved_cnt)
1135 if (time_ms && timeout <= 0)
1141 * Send a command to the IOC via the handshake register.
1143 * Only done at initialization time and for certain unusual
1144 * commands such as device/bus reset as specified by LSI.
1147 mpt_send_handshake_cmd(struct mpt_softc *mpt, size_t len, void *cmd)
1150 uint32_t data, *data32;
1152 /* Check condition of the IOC */
1153 data = mpt_rd_db(mpt);
1154 if ((MPT_STATE(data) != MPT_DB_STATE_READY
1155 && MPT_STATE(data) != MPT_DB_STATE_RUNNING
1156 && MPT_STATE(data) != MPT_DB_STATE_FAULT)
1157 || MPT_DB_IS_IN_USE(data)) {
1158 mpt_prt(mpt, "handshake aborted - invalid doorbell state\n");
1163 /* We move things in 32 bit chunks */
1164 len = (len + 3) >> 2;
1167 /* Clear any left over pending doorbell interupts */
1168 if (MPT_DB_INTR(mpt_rd_intr(mpt)))
1169 mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0);
1172 * Tell the handshake reg. we are going to send a command
1173 * and how long it is going to be.
1175 data = (MPI_FUNCTION_HANDSHAKE << MPI_DOORBELL_FUNCTION_SHIFT) |
1176 (len << MPI_DOORBELL_ADD_DWORDS_SHIFT);
1177 mpt_write(mpt, MPT_OFFSET_DOORBELL, data);
1179 /* Wait for the chip to notice */
1180 if (mpt_wait_db_int(mpt) != MPT_OK) {
1181 mpt_prt(mpt, "mpt_send_handshake_cmd timeout1\n");
1185 /* Clear the interrupt */
1186 mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0);
1188 if (mpt_wait_db_ack(mpt) != MPT_OK) {
1189 mpt_prt(mpt, "mpt_send_handshake_cmd timeout2\n");
1193 /* Send the command */
1194 for (i = 0; i < len; i++) {
1195 mpt_write(mpt, MPT_OFFSET_DOORBELL, *data32++);
1196 if (mpt_wait_db_ack(mpt) != MPT_OK) {
1198 "mpt_send_handshake_cmd timeout! index = %d\n",
1206 /* Get the response from the handshake register */
1208 mpt_recv_handshake_reply(struct mpt_softc *mpt, size_t reply_len, void *reply)
1210 int left, reply_left;
1212 MSG_DEFAULT_REPLY *hdr;
1214 /* We move things out in 16 bit chunks */
1216 data16 = (u_int16_t *)reply;
1218 hdr = (MSG_DEFAULT_REPLY *)reply;
1220 /* Get first word */
1221 if (mpt_wait_db_int(mpt) != MPT_OK) {
1222 mpt_prt(mpt, "mpt_recv_handshake_cmd timeout1\n");
1225 *data16++ = mpt_read(mpt, MPT_OFFSET_DOORBELL) & MPT_DB_DATA_MASK;
1226 mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0);
1228 /* Get Second Word */
1229 if (mpt_wait_db_int(mpt) != MPT_OK) {
1230 mpt_prt(mpt, "mpt_recv_handshake_cmd timeout2\n");
1233 *data16++ = mpt_read(mpt, MPT_OFFSET_DOORBELL) & MPT_DB_DATA_MASK;
1234 mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0);
1236 /* With the second word, we can now look at the length */
1237 if (((reply_len >> 1) != hdr->MsgLength)) {
1238 mpt_prt(mpt, "reply length does not match message length: "
1239 "got 0x%02x, expected 0x%02x\n",
1240 hdr->MsgLength << 2, reply_len << 1);
1243 /* Get rest of the reply; but don't overflow the provided buffer */
1244 left = (hdr->MsgLength << 1) - 2;
1245 reply_left = reply_len - 2;
1249 if (mpt_wait_db_int(mpt) != MPT_OK) {
1250 mpt_prt(mpt, "mpt_recv_handshake_cmd timeout3\n");
1253 datum = mpt_read(mpt, MPT_OFFSET_DOORBELL);
1255 if (reply_left-- > 0)
1256 *data16++ = datum & MPT_DB_DATA_MASK;
1258 mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0);
1261 /* One more wait & clear at the end */
1262 if (mpt_wait_db_int(mpt) != MPT_OK) {
1263 mpt_prt(mpt, "mpt_recv_handshake_cmd timeout4\n");
1266 mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0);
1268 if ((hdr->IOCStatus & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
1269 if (mpt->verbose >= MPT_PRT_TRACE)
1270 mpt_print_reply(hdr);
1271 return (MPT_FAIL | hdr->IOCStatus);
1278 mpt_get_iocfacts(struct mpt_softc *mpt, MSG_IOC_FACTS_REPLY *freplp)
1280 MSG_IOC_FACTS f_req;
1283 bzero(&f_req, sizeof f_req);
1284 f_req.Function = MPI_FUNCTION_IOC_FACTS;
1285 f_req.MsgContext = htole32(MPT_REPLY_HANDLER_HANDSHAKE);
1286 error = mpt_send_handshake_cmd(mpt, sizeof f_req, &f_req);
1289 error = mpt_recv_handshake_reply(mpt, sizeof (*freplp), freplp);
1294 mpt_get_portfacts(struct mpt_softc *mpt, MSG_PORT_FACTS_REPLY *freplp)
1296 MSG_PORT_FACTS f_req;
1299 /* XXX: Only getting PORT FACTS for Port 0 */
1300 memset(&f_req, 0, sizeof f_req);
1301 f_req.Function = MPI_FUNCTION_PORT_FACTS;
1302 f_req.MsgContext = htole32(MPT_REPLY_HANDLER_HANDSHAKE);
1303 error = mpt_send_handshake_cmd(mpt, sizeof f_req, &f_req);
1306 error = mpt_recv_handshake_reply(mpt, sizeof (*freplp), freplp);
1311 * Send the initialization request. This is where we specify how many
1312 * SCSI busses and how many devices per bus we wish to emulate.
1313 * This is also the command that specifies the max size of the reply
1314 * frames from the IOC that we will be allocating.
1317 mpt_send_ioc_init(struct mpt_softc *mpt, uint32_t who)
1321 MSG_IOC_INIT_REPLY reply;
1323 bzero(&init, sizeof init);
1325 init.Function = MPI_FUNCTION_IOC_INIT;
1327 init.MaxDevices = 255;
1328 } else if (mpt->is_sas) {
1329 init.MaxDevices = mpt->mpt_max_devices;
1331 init.MaxDevices = 16;
1335 init.MsgVersion = htole16(MPI_VERSION);
1336 init.HeaderVersion = htole16(MPI_HEADER_VERSION);
1337 init.ReplyFrameSize = htole16(MPT_REPLY_SIZE);
1338 init.MsgContext = htole32(MPT_REPLY_HANDLER_HANDSHAKE);
1340 if ((error = mpt_send_handshake_cmd(mpt, sizeof init, &init)) != 0) {
1344 error = mpt_recv_handshake_reply(mpt, sizeof reply, &reply);
1350 * Utiltity routine to read configuration headers and pages
1353 mpt_issue_cfg_req(struct mpt_softc *mpt, request_t *req, u_int Action,
1354 u_int PageVersion, u_int PageLength, u_int PageNumber,
1355 u_int PageType, uint32_t PageAddress, bus_addr_t addr,
1356 bus_size_t len, int sleep_ok, int timeout_ms)
1361 cfgp = req->req_vbuf;
1362 memset(cfgp, 0, sizeof *cfgp);
1363 cfgp->Action = Action;
1364 cfgp->Function = MPI_FUNCTION_CONFIG;
1365 cfgp->Header.PageVersion = PageVersion;
1366 cfgp->Header.PageLength = PageLength;
1367 cfgp->Header.PageNumber = PageNumber;
1368 cfgp->Header.PageType = PageType;
1369 cfgp->PageAddress = PageAddress;
1370 se = (SGE_SIMPLE32 *)&cfgp->PageBufferSGE;
1372 MPI_pSGE_SET_LENGTH(se, len);
1373 MPI_pSGE_SET_FLAGS(se, (MPI_SGE_FLAGS_SIMPLE_ELEMENT |
1374 MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
1375 MPI_SGE_FLAGS_END_OF_LIST |
1376 ((Action == MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT
1377 || Action == MPI_CONFIG_ACTION_PAGE_WRITE_NVRAM)
1378 ? MPI_SGE_FLAGS_HOST_TO_IOC : MPI_SGE_FLAGS_IOC_TO_HOST)));
1379 cfgp->MsgContext = htole32(req->index | MPT_REPLY_HANDLER_CONFIG);
1381 mpt_check_doorbell(mpt);
1382 mpt_send_cmd(mpt, req);
1383 return (mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE,
1384 sleep_ok, timeout_ms));
1389 mpt_read_cfg_header(struct mpt_softc *mpt, int PageType, int PageNumber,
1390 uint32_t PageAddress, CONFIG_PAGE_HEADER *rslt,
1391 int sleep_ok, int timeout_ms)
1397 req = mpt_get_request(mpt, sleep_ok);
1399 mpt_prt(mpt, "mpt_read_cfg_header: Get request failed!\n");
1403 error = mpt_issue_cfg_req(mpt, req, MPI_CONFIG_ACTION_PAGE_HEADER,
1404 /*PageVersion*/0, /*PageLength*/0, PageNumber,
1405 PageType, PageAddress, /*addr*/0, /*len*/0,
1406 sleep_ok, timeout_ms);
1408 mpt_free_request(mpt, req);
1409 mpt_prt(mpt, "read_cfg_header timed out\n");
1413 switch (req->IOCStatus & MPI_IOCSTATUS_MASK) {
1414 case MPI_IOCSTATUS_SUCCESS:
1415 cfgp = req->req_vbuf;
1416 bcopy(&cfgp->Header, rslt, sizeof(*rslt));
1419 case MPI_IOCSTATUS_CONFIG_INVALID_PAGE:
1420 mpt_lprt(mpt, MPT_PRT_DEBUG,
1421 "Invalid Page Type %d Number %d Addr 0x%0x\n",
1422 PageType, PageNumber, PageAddress);
1426 mpt_prt(mpt, "mpt_read_cfg_header: Config Info Status %x\n",
1431 mpt_free_request(mpt, req);
1435 #define CFG_DATA_OFF 128
1438 mpt_read_cfg_page(struct mpt_softc *mpt, int Action, uint32_t PageAddress,
1439 CONFIG_PAGE_HEADER *hdr, size_t len, int sleep_ok,
1445 req = mpt_get_request(mpt, sleep_ok);
1447 mpt_prt(mpt, "mpt_read_cfg_page: Get request failed!\n");
1451 error = mpt_issue_cfg_req(mpt, req, Action, hdr->PageVersion,
1452 hdr->PageLength, hdr->PageNumber,
1453 hdr->PageType & MPI_CONFIG_PAGETYPE_MASK,
1454 PageAddress, req->req_pbuf + CFG_DATA_OFF,
1455 len, sleep_ok, timeout_ms);
1457 mpt_prt(mpt, "read_cfg_page(%d) timed out\n", Action);
1461 if ((req->IOCStatus & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
1462 mpt_prt(mpt, "mpt_read_cfg_page: Config Info Status %x\n",
1464 mpt_free_request(mpt, req);
1467 bus_dmamap_sync(mpt->request_dmat, mpt->request_dmap,
1468 BUS_DMASYNC_POSTREAD);
1469 memcpy(hdr, ((uint8_t *)req->req_vbuf)+CFG_DATA_OFF, len);
1470 mpt_free_request(mpt, req);
1475 mpt_write_cfg_page(struct mpt_softc *mpt, int Action, uint32_t PageAddress,
1476 CONFIG_PAGE_HEADER *hdr, size_t len, int sleep_ok,
1483 hdr_attr = hdr->PageType & MPI_CONFIG_PAGEATTR_MASK;
1484 if (hdr_attr != MPI_CONFIG_PAGEATTR_CHANGEABLE &&
1485 hdr_attr != MPI_CONFIG_PAGEATTR_PERSISTENT) {
1486 mpt_prt(mpt, "page type 0x%x not changeable\n",
1487 hdr->PageType & MPI_CONFIG_PAGETYPE_MASK);
1490 hdr->PageType &= MPI_CONFIG_PAGETYPE_MASK,
1492 req = mpt_get_request(mpt, sleep_ok);
1496 memcpy(((caddr_t)req->req_vbuf)+CFG_DATA_OFF, hdr, len);
1497 /* Restore stripped out attributes */
1498 hdr->PageType |= hdr_attr;
1500 error = mpt_issue_cfg_req(mpt, req, Action, hdr->PageVersion,
1501 hdr->PageLength, hdr->PageNumber,
1502 hdr->PageType & MPI_CONFIG_PAGETYPE_MASK,
1503 PageAddress, req->req_pbuf + CFG_DATA_OFF,
1504 len, sleep_ok, timeout_ms);
1506 mpt_prt(mpt, "mpt_write_cfg_page timed out\n");
1510 if ((req->IOCStatus & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
1511 mpt_prt(mpt, "mpt_write_cfg_page: Config Info Status %x\n",
1513 mpt_free_request(mpt, req);
1516 mpt_free_request(mpt, req);
1521 * Read IOC configuration information
1524 mpt_read_config_info_ioc(struct mpt_softc *mpt)
1526 CONFIG_PAGE_HEADER hdr;
1527 struct mpt_raid_volume *mpt_raid;
1532 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_IOC,
1533 /*PageNumber*/2, /*PageAddress*/0, &hdr,
1534 /*sleep_ok*/FALSE, /*timeout_ms*/5000);
1536 * If it's an invalid page, so what? Not a supported function....
1543 mpt_lprt(mpt, MPT_PRT_DEBUG, "IOC Page 2 Header: ver %x, len %x, "
1544 "num %x, type %x\n", hdr.PageVersion,
1545 hdr.PageLength * sizeof(uint32_t),
1546 hdr.PageNumber, hdr.PageType);
1548 len = hdr.PageLength * sizeof(uint32_t);
1549 mpt->ioc_page2 = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
1550 if (mpt->ioc_page2 == NULL)
1552 memcpy(&mpt->ioc_page2->Header, &hdr, sizeof(hdr));
1553 rv = mpt_read_cur_cfg_page(mpt, /*PageAddress*/0,
1554 &mpt->ioc_page2->Header, len,
1555 /*sleep_ok*/FALSE, /*timeout_ms*/5000);
1557 mpt_prt(mpt, "failed to read IOC Page 2\n");
1558 } else if (mpt->ioc_page2->CapabilitiesFlags != 0) {
1561 mpt_prt(mpt, "Capabilities: (");
1562 for (mask = 1; mask != 0; mask <<= 1) {
1563 if ((mpt->ioc_page2->CapabilitiesFlags & mask) == 0)
1567 case MPI_IOCPAGE2_CAP_FLAGS_IS_SUPPORT:
1568 mpt_prtc(mpt, " RAID-0");
1570 case MPI_IOCPAGE2_CAP_FLAGS_IME_SUPPORT:
1571 mpt_prtc(mpt, " RAID-1E");
1573 case MPI_IOCPAGE2_CAP_FLAGS_IM_SUPPORT:
1574 mpt_prtc(mpt, " RAID-1");
1576 case MPI_IOCPAGE2_CAP_FLAGS_SES_SUPPORT:
1577 mpt_prtc(mpt, " SES");
1579 case MPI_IOCPAGE2_CAP_FLAGS_SAFTE_SUPPORT:
1580 mpt_prtc(mpt, " SAFTE");
1582 case MPI_IOCPAGE2_CAP_FLAGS_CROSS_CHANNEL_SUPPORT:
1583 mpt_prtc(mpt, " Multi-Channel-Arrays");
1588 mpt_prtc(mpt, " )\n");
1589 if ((mpt->ioc_page2->CapabilitiesFlags
1590 & (MPI_IOCPAGE2_CAP_FLAGS_IS_SUPPORT
1591 | MPI_IOCPAGE2_CAP_FLAGS_IME_SUPPORT
1592 | MPI_IOCPAGE2_CAP_FLAGS_IM_SUPPORT)) != 0) {
1593 mpt_prt(mpt, "%d Active Volume%s(%d Max)\n",
1594 mpt->ioc_page2->NumActiveVolumes,
1595 mpt->ioc_page2->NumActiveVolumes != 1
1597 mpt->ioc_page2->MaxVolumes);
1598 mpt_prt(mpt, "%d Hidden Drive Member%s(%d Max)\n",
1599 mpt->ioc_page2->NumActivePhysDisks,
1600 mpt->ioc_page2->NumActivePhysDisks != 1
1602 mpt->ioc_page2->MaxPhysDisks);
1606 len = mpt->ioc_page2->MaxVolumes * sizeof(struct mpt_raid_volume);
1607 mpt->raid_volumes = malloc(len, M_DEVBUF, M_NOWAIT);
1608 if (mpt->raid_volumes == NULL) {
1609 mpt_prt(mpt, "Could not allocate RAID volume data\n");
1611 memset(mpt->raid_volumes, 0, len);
1615 * Copy critical data out of ioc_page2 so that we can
1616 * safely refresh the page without windows of unreliable
1619 mpt->raid_max_volumes = mpt->ioc_page2->MaxVolumes;
1621 len = sizeof(*mpt->raid_volumes->config_page)
1622 + (sizeof(RAID_VOL0_PHYS_DISK)*(mpt->ioc_page2->MaxPhysDisks - 1));
1623 for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) {
1624 mpt_raid = &mpt->raid_volumes[i];
1625 mpt_raid->config_page = malloc(len, M_DEVBUF, M_NOWAIT);
1626 if (mpt_raid->config_page == NULL) {
1627 mpt_prt(mpt, "Could not allocate RAID page data\n");
1630 memset(mpt_raid->config_page, 0, len);
1632 mpt->raid_page0_len = len;
1634 len = mpt->ioc_page2->MaxPhysDisks * sizeof(struct mpt_raid_disk);
1635 mpt->raid_disks = malloc(len, M_DEVBUF, M_NOWAIT);
1636 if (mpt->raid_disks == NULL) {
1637 mpt_prt(mpt, "Could not allocate RAID disk data\n");
1639 memset(mpt->raid_disks, 0, len);
1642 mpt->raid_max_disks = mpt->ioc_page2->MaxPhysDisks;
1644 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_IOC,
1645 /*PageNumber*/3, /*PageAddress*/0, &hdr,
1646 /*sleep_ok*/FALSE, /*timeout_ms*/5000);
1650 mpt_lprt(mpt, MPT_PRT_DEBUG, "IOC Page 3 Header: %x %x %x %x\n",
1651 hdr.PageVersion, hdr.PageLength, hdr.PageNumber, hdr.PageType);
1653 if (mpt->ioc_page3 != NULL)
1654 free(mpt->ioc_page3, M_DEVBUF);
1655 len = hdr.PageLength * sizeof(uint32_t);
1656 mpt->ioc_page3 = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
1657 if (mpt->ioc_page3 == NULL)
1659 memcpy(&mpt->ioc_page3->Header, &hdr, sizeof(hdr));
1660 rv = mpt_read_cur_cfg_page(mpt, /*PageAddress*/0,
1661 &mpt->ioc_page3->Header, len,
1662 /*sleep_ok*/FALSE, /*timeout_ms*/5000);
1664 mpt_prt(mpt, "failed to read IOC Page 3\n");
1667 mpt_raid_wakeup(mpt);
1673 * Read SCSI configuration information
1676 mpt_read_config_info_spi(struct mpt_softc *mpt)
1680 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 0,
1681 0, &mpt->mpt_port_page0.Header,
1682 /*sleep_ok*/FALSE, /*timeout_ms*/5000);
1685 mpt_lprt(mpt, MPT_PRT_DEBUG,
1686 "SPI Port Page 0 Header: %x %x %x %x\n",
1687 mpt->mpt_port_page0.Header.PageVersion,
1688 mpt->mpt_port_page0.Header.PageLength,
1689 mpt->mpt_port_page0.Header.PageNumber,
1690 mpt->mpt_port_page0.Header.PageType);
1692 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 1,
1693 0, &mpt->mpt_port_page1.Header,
1694 /*sleep_ok*/FALSE, /*timeout_ms*/5000);
1698 mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 1 Header: %x %x %x %x\n",
1699 mpt->mpt_port_page1.Header.PageVersion,
1700 mpt->mpt_port_page1.Header.PageLength,
1701 mpt->mpt_port_page1.Header.PageNumber,
1702 mpt->mpt_port_page1.Header.PageType);
1704 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 2,
1705 /*PageAddress*/0, &mpt->mpt_port_page2.Header,
1706 /*sleep_ok*/FALSE, /*timeout_ms*/5000);
1710 mpt_lprt(mpt, MPT_PRT_DEBUG,
1711 "SPI Port Page 2 Header: %x %x %x %x\n",
1712 mpt->mpt_port_page1.Header.PageVersion,
1713 mpt->mpt_port_page1.Header.PageLength,
1714 mpt->mpt_port_page1.Header.PageNumber,
1715 mpt->mpt_port_page1.Header.PageType);
1717 for (i = 0; i < 16; i++) {
1718 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_DEVICE,
1719 0, i, &mpt->mpt_dev_page0[i].Header,
1720 /*sleep_ok*/FALSE, /*timeout_ms*/5000);
1724 mpt_lprt(mpt, MPT_PRT_DEBUG,
1725 "SPI Target %d Device Page 0 Header: %x %x %x %x\n",
1726 i, mpt->mpt_dev_page0[i].Header.PageVersion,
1727 mpt->mpt_dev_page0[i].Header.PageLength,
1728 mpt->mpt_dev_page0[i].Header.PageNumber,
1729 mpt->mpt_dev_page0[i].Header.PageType);
1731 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_DEVICE,
1732 1, i, &mpt->mpt_dev_page1[i].Header,
1733 /*sleep_ok*/FALSE, /*timeout_ms*/5000);
1737 mpt_lprt(mpt, MPT_PRT_DEBUG,
1738 "SPI Target %d Device Page 1 Header: %x %x %x %x\n",
1739 i, mpt->mpt_dev_page1[i].Header.PageVersion,
1740 mpt->mpt_dev_page1[i].Header.PageLength,
1741 mpt->mpt_dev_page1[i].Header.PageNumber,
1742 mpt->mpt_dev_page1[i].Header.PageType);
1746 * At this point, we don't *have* to fail. As long as we have
1747 * valid config header information, we can (barely) lurch
1751 rv = mpt_read_cur_cfg_page(mpt, /*PageAddress*/0,
1752 &mpt->mpt_port_page0.Header,
1753 sizeof(mpt->mpt_port_page0),
1754 /*sleep_ok*/FALSE, /*timeout_ms*/5000);
1756 mpt_prt(mpt, "failed to read SPI Port Page 0\n");
1758 mpt_lprt(mpt, MPT_PRT_DEBUG,
1759 "SPI Port Page 0: Capabilities %x PhysicalInterface %x\n",
1760 mpt->mpt_port_page0.Capabilities,
1761 mpt->mpt_port_page0.PhysicalInterface);
1764 rv = mpt_read_cur_cfg_page(mpt, /*PageAddress*/0,
1765 &mpt->mpt_port_page1.Header,
1766 sizeof(mpt->mpt_port_page1),
1767 /*sleep_ok*/FALSE, /*timeout_ms*/5000);
1769 mpt_prt(mpt, "failed to read SPI Port Page 1\n");
1771 mpt_lprt(mpt, MPT_PRT_DEBUG,
1772 "SPI Port Page 1: Configuration %x OnBusTimerValue %x\n",
1773 mpt->mpt_port_page1.Configuration,
1774 mpt->mpt_port_page1.OnBusTimerValue);
1777 rv = mpt_read_cur_cfg_page(mpt, /*PageAddress*/0,
1778 &mpt->mpt_port_page2.Header,
1779 sizeof(mpt->mpt_port_page2),
1780 /*sleep_ok*/FALSE, /*timeout_ms*/5000);
1782 mpt_prt(mpt, "failed to read SPI Port Page 2\n");
1784 mpt_lprt(mpt, MPT_PRT_DEBUG,
1785 "SPI Port Page 2: Flags %x Settings %x\n",
1786 mpt->mpt_port_page2.PortFlags,
1787 mpt->mpt_port_page2.PortSettings);
1788 for (i = 0; i < 16; i++) {
1789 mpt_lprt(mpt, MPT_PRT_DEBUG,
1790 "SPI Port Page 2 Tgt %d: timo %x SF %x Flags %x\n",
1791 i, mpt->mpt_port_page2.DeviceSettings[i].Timeout,
1792 mpt->mpt_port_page2.DeviceSettings[i].SyncFactor,
1793 mpt->mpt_port_page2.DeviceSettings[i].DeviceFlags);
1797 for (i = 0; i < 16; i++) {
1798 rv = mpt_read_cur_cfg_page(mpt, /*PageAddress*/i,
1799 &mpt->mpt_dev_page0[i].Header,
1800 sizeof(*mpt->mpt_dev_page0),
1802 /*timeout_ms*/5000);
1805 "cannot read SPI Tgt %d Device Page 0\n", i);
1808 mpt_lprt(mpt, MPT_PRT_DEBUG,
1809 "SPI Tgt %d Page 0: NParms %x Information %x",
1810 i, mpt->mpt_dev_page0[i].NegotiatedParameters,
1811 mpt->mpt_dev_page0[i].Information);
1813 rv = mpt_read_cur_cfg_page(mpt, /*PageAddress*/i,
1814 &mpt->mpt_dev_page1[i].Header,
1815 sizeof(*mpt->mpt_dev_page1),
1817 /*timeout_ms*/5000);
1820 "cannot read SPI Tgt %d Device Page 1\n", i);
1823 mpt_lprt(mpt, MPT_PRT_DEBUG,
1824 "SPI Tgt %d Page 1: RParms %x Configuration %x\n",
1825 i, mpt->mpt_dev_page1[i].RequestedParameters,
1826 mpt->mpt_dev_page1[i].Configuration);
1832 * Validate SPI configuration information.
1834 * In particular, validate SPI Port Page 1.
1837 mpt_set_initial_config_spi(struct mpt_softc *mpt)
1839 int i, pp1val = ((1 << mpt->mpt_ini_id) << 16) | mpt->mpt_ini_id;
1842 mpt->mpt_disc_enable = 0xff;
1843 mpt->mpt_tag_enable = 0;
1845 if (mpt->mpt_port_page1.Configuration != pp1val) {
1846 CONFIG_PAGE_SCSI_PORT_1 tmp;
1849 "SPI Port Page 1 Config value bad (%x)- should be %x\n",
1850 mpt->mpt_port_page1.Configuration, pp1val);
1851 tmp = mpt->mpt_port_page1;
1852 tmp.Configuration = pp1val;
1853 error = mpt_write_cur_cfg_page(mpt, /*PageAddress*/0,
1854 &tmp.Header, sizeof(tmp),
1856 /*timeout_ms*/5000);
1859 error = mpt_read_cur_cfg_page(mpt, /*PageAddress*/0,
1860 &tmp.Header, sizeof(tmp),
1862 /*timeout_ms*/5000);
1865 if (tmp.Configuration != pp1val) {
1867 "failed to reset SPI Port Page 1 Config value\n");
1870 mpt->mpt_port_page1 = tmp;
1873 for (i = 0; i < 16; i++) {
1874 CONFIG_PAGE_SCSI_DEVICE_1 tmp;
1875 tmp = mpt->mpt_dev_page1[i];
1876 tmp.RequestedParameters = 0;
1877 tmp.Configuration = 0;
1878 mpt_lprt(mpt, MPT_PRT_DEBUG,
1879 "Set Tgt %d SPI DevicePage 1 values to %x 0 %x\n",
1880 i, tmp.RequestedParameters, tmp.Configuration);
1881 error = mpt_write_cur_cfg_page(mpt, /*PageAddress*/i,
1882 &tmp.Header, sizeof(tmp),
1884 /*timeout_ms*/5000);
1887 error = mpt_read_cur_cfg_page(mpt, /*PageAddress*/i,
1888 &tmp.Header, sizeof(tmp),
1890 /*timeout_ms*/5000);
1893 mpt->mpt_dev_page1[i] = tmp;
1894 mpt_lprt(mpt, MPT_PRT_DEBUG,
1895 "SPI Tgt %d Page 1: RParm %x Configuration %x\n", i,
1896 mpt->mpt_dev_page1[i].RequestedParameters,
1897 mpt->mpt_dev_page1[i].Configuration);
1906 mpt_send_port_enable(struct mpt_softc *mpt, int port)
1909 MSG_PORT_ENABLE *enable_req;
1912 req = mpt_get_request(mpt, /*sleep_ok*/FALSE);
1916 enable_req = req->req_vbuf;
1917 bzero(enable_req, sizeof *enable_req);
1919 enable_req->Function = MPI_FUNCTION_PORT_ENABLE;
1920 enable_req->MsgContext = htole32(req->index | MPT_REPLY_HANDLER_CONFIG);
1921 enable_req->PortNumber = port;
1923 mpt_check_doorbell(mpt);
1924 mpt_lprt(mpt, MPT_PRT_DEBUG, "enabling port %d\n", port);
1926 mpt_send_cmd(mpt, req);
1927 error = mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE,
1929 /*time_ms*/(mpt->is_sas || mpt->is_fc)? 30000 : 3000);
1931 mpt_prt(mpt, "port enable timed out\n");
1934 mpt_free_request(mpt, req);
1939 * Enable/Disable asynchronous event reporting.
1941 * NB: this is the first command we send via shared memory
1942 * instead of the handshake register.
1945 mpt_send_event_request(struct mpt_softc *mpt, int onoff)
1948 MSG_EVENT_NOTIFY *enable_req;
1950 req = mpt_get_request(mpt, /*sleep_ok*/FALSE);
1952 enable_req = req->req_vbuf;
1953 bzero(enable_req, sizeof *enable_req);
1955 enable_req->Function = MPI_FUNCTION_EVENT_NOTIFICATION;
1956 enable_req->MsgContext = htole32(req->index | MPT_REPLY_HANDLER_EVENTS);
1957 enable_req->Switch = onoff;
1959 mpt_check_doorbell(mpt);
1960 mpt_lprt(mpt, MPT_PRT_DEBUG,
1961 "%sabling async events\n", onoff ? "en" : "dis");
1962 mpt_send_cmd(mpt, req);
1968 * Un-mask the interupts on the chip.
1971 mpt_enable_ints(struct mpt_softc *mpt)
1973 /* Unmask every thing except door bell int */
1974 mpt_write(mpt, MPT_OFFSET_INTR_MASK, MPT_INTR_DB_MASK);
1978 * Mask the interupts on the chip.
1981 mpt_disable_ints(struct mpt_softc *mpt)
1983 /* Mask all interrupts */
1984 mpt_write(mpt, MPT_OFFSET_INTR_MASK,
1985 MPT_INTR_REPLY_MASK | MPT_INTR_DB_MASK);
1989 mpt_sysctl_attach(struct mpt_softc *mpt)
1991 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(mpt->dev);
1992 struct sysctl_oid *tree = device_get_sysctl_tree(mpt->dev);
1994 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1995 "debug", CTLFLAG_RW, &mpt->verbose, 0,
1996 "Debugging/Verbose level");
2000 mpt_attach(struct mpt_softc *mpt)
2004 for (i = 0; i < MPT_MAX_PERSONALITIES; i++) {
2005 struct mpt_personality *pers;
2008 pers = mpt_personalities[i];
2012 if (pers->probe(mpt) == 0) {
2013 error = pers->attach(mpt);
2018 mpt->mpt_pers_mask |= (0x1 << pers->id);
2027 mpt_shutdown(struct mpt_softc *mpt)
2029 struct mpt_personality *pers;
2031 MPT_PERS_FOREACH_REVERSE(mpt, pers)
2032 pers->shutdown(mpt);
2034 mpt_reset(mpt, /*reinit*/FALSE);
2039 mpt_detach(struct mpt_softc *mpt)
2041 struct mpt_personality *pers;
2043 MPT_PERS_FOREACH_REVERSE(mpt, pers) {
2045 mpt->mpt_pers_mask &= ~(0x1 << pers->id);
2053 mpt_core_load(struct mpt_personality *pers)
2058 * Setup core handlers and insert the default handler
2059 * into all "empty slots".
2061 for (i = 0; i < MPT_NUM_REPLY_HANDLERS; i++)
2062 mpt_reply_handlers[i] = mpt_default_reply_handler;
2064 mpt_reply_handlers[MPT_CBI(MPT_REPLY_HANDLER_EVENTS)] =
2065 mpt_event_reply_handler;
2066 mpt_reply_handlers[MPT_CBI(MPT_REPLY_HANDLER_CONFIG)] =
2067 mpt_config_reply_handler;
2068 mpt_reply_handlers[MPT_CBI(MPT_REPLY_HANDLER_HANDSHAKE)] =
2069 mpt_handshake_reply_handler;
2075 * Initialize per-instance driver data and perform
2076 * initial controller configuration.
2079 mpt_core_attach(struct mpt_softc *mpt)
2084 LIST_INIT(&mpt->ack_frames);
2086 /* Put all request buffers on the free list */
2087 TAILQ_INIT(&mpt->request_pending_list);
2088 TAILQ_INIT(&mpt->request_free_list);
2089 for (val = 0; val < MPT_MAX_REQUESTS(mpt); val++)
2090 mpt_free_request(mpt, &mpt->request_pool[val]);
2092 mpt_sysctl_attach(mpt);
2094 mpt_lprt(mpt, MPT_PRT_DEBUG, "doorbell req = %s\n",
2095 mpt_ioc_diag(mpt_read(mpt, MPT_OFFSET_DOORBELL)));
2097 error = mpt_configure_ioc(mpt);
2103 mpt_core_shutdown(struct mpt_softc *mpt)
2108 mpt_core_detach(struct mpt_softc *mpt)
2113 mpt_core_unload(struct mpt_personality *pers)
2115 /* Unload is always successfull. */
2119 #define FW_UPLOAD_REQ_SIZE \
2120 (sizeof(MSG_FW_UPLOAD) - sizeof(SGE_MPI_UNION) \
2121 + sizeof(FW_UPLOAD_TCSGE) + sizeof(SGE_SIMPLE32))
2124 mpt_upload_fw(struct mpt_softc *mpt)
2126 uint8_t fw_req_buf[FW_UPLOAD_REQ_SIZE];
2127 MSG_FW_UPLOAD_REPLY fw_reply;
2128 MSG_FW_UPLOAD *fw_req;
2129 FW_UPLOAD_TCSGE *tsge;
2134 memset(&fw_req_buf, 0, sizeof(fw_req_buf));
2135 fw_req = (MSG_FW_UPLOAD *)fw_req_buf;
2136 fw_req->ImageType = MPI_FW_UPLOAD_ITYPE_FW_IOC_MEM;
2137 fw_req->Function = MPI_FUNCTION_FW_UPLOAD;
2138 fw_req->MsgContext = htole32(MPT_REPLY_HANDLER_HANDSHAKE);
2139 tsge = (FW_UPLOAD_TCSGE *)&fw_req->SGL;
2140 tsge->DetailsLength = 12;
2141 tsge->Flags = MPI_SGE_FLAGS_TRANSACTION_ELEMENT;
2142 tsge->ImageSize = htole32(mpt->fw_image_size);
2143 sge = (SGE_SIMPLE32 *)(tsge + 1);
2144 flags = (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER
2145 | MPI_SGE_FLAGS_END_OF_LIST | MPI_SGE_FLAGS_SIMPLE_ELEMENT
2146 | MPI_SGE_FLAGS_32_BIT_ADDRESSING | MPI_SGE_FLAGS_IOC_TO_HOST);
2147 flags <<= MPI_SGE_FLAGS_SHIFT;
2148 sge->FlagsLength = htole32(flags | mpt->fw_image_size);
2149 sge->Address = htole32(mpt->fw_phys);
2150 error = mpt_send_handshake_cmd(mpt, sizeof(fw_req_buf), &fw_req_buf);
2153 error = mpt_recv_handshake_reply(mpt, sizeof(fw_reply), &fw_reply);
2158 mpt_diag_outsl(struct mpt_softc *mpt, uint32_t addr,
2159 uint32_t *data, bus_size_t len)
2163 data_end = data + (roundup2(len, sizeof(uint32_t)) / 4);
2164 pci_enable_io(mpt->dev, SYS_RES_IOPORT);
2165 mpt_pio_write(mpt, MPT_OFFSET_DIAG_ADDR, addr);
2166 while (data != data_end) {
2167 mpt_pio_write(mpt, MPT_OFFSET_DIAG_DATA, *data);
2170 pci_disable_io(mpt->dev, SYS_RES_IOPORT);
2174 mpt_download_fw(struct mpt_softc *mpt)
2176 MpiFwHeader_t *fw_hdr;
2178 uint32_t ext_offset;
2181 mpt_prt(mpt, "Downloading Firmware - Image Size %d\n",
2182 mpt->fw_image_size);
2184 error = mpt_enable_diag_mode(mpt);
2186 mpt_prt(mpt, "Could not enter diagnostic mode!\n");
2190 mpt_write(mpt, MPT_OFFSET_DIAGNOSTIC,
2191 MPI_DIAG_RW_ENABLE|MPI_DIAG_DISABLE_ARM);
2193 fw_hdr = (MpiFwHeader_t *)mpt->fw_image;
2194 mpt_diag_outsl(mpt, fw_hdr->LoadStartAddress, (uint32_t*)fw_hdr,
2197 ext_offset = fw_hdr->NextImageHeaderOffset;
2198 while (ext_offset != 0) {
2199 MpiExtImageHeader_t *ext;
2201 ext = (MpiExtImageHeader_t *)((uintptr_t)fw_hdr + ext_offset);
2202 ext_offset = ext->NextImageHeaderOffset;
2204 mpt_diag_outsl(mpt, ext->LoadStartAddress, (uint32_t*)ext,
2208 pci_enable_io(mpt->dev, SYS_RES_IOPORT);
2209 /* Setup the address to jump to on reset. */
2210 mpt_pio_write(mpt, MPT_OFFSET_DIAG_ADDR, fw_hdr->IopResetRegAddr);
2211 mpt_pio_write(mpt, MPT_OFFSET_DIAG_DATA, fw_hdr->IopResetVectorValue);
2214 * The controller sets the "flash bad" status after attempting
2215 * to auto-boot from flash. Clear the status so that the controller
2216 * will continue the boot process with our newly installed firmware.
2218 mpt_pio_write(mpt, MPT_OFFSET_DIAG_ADDR, MPT_DIAG_MEM_CFG_BASE);
2219 data = mpt_pio_read(mpt, MPT_OFFSET_DIAG_DATA) | MPT_DIAG_MEM_CFG_BADFL;
2220 mpt_pio_write(mpt, MPT_OFFSET_DIAG_ADDR, MPT_DIAG_MEM_CFG_BASE);
2221 mpt_pio_write(mpt, MPT_OFFSET_DIAG_DATA, data);
2223 pci_disable_io(mpt->dev, SYS_RES_IOPORT);
2226 * Re-enable the processor and clear the boot halt flag.
2228 data = mpt_read(mpt, MPT_OFFSET_DIAGNOSTIC);
2229 data &= ~(MPI_DIAG_PREVENT_IOC_BOOT|MPI_DIAG_DISABLE_ARM);
2230 mpt_write(mpt, MPT_OFFSET_DIAGNOSTIC, data);
2232 mpt_disable_diag_mode(mpt);
2237 * Allocate/Initialize data structures for the controller. Called
2238 * once at instance startup.
2241 mpt_configure_ioc(struct mpt_softc *mpt)
2243 MSG_PORT_FACTS_REPLY pfp;
2244 MSG_IOC_FACTS_REPLY facts;
2247 uint32_t max_chain_depth;
2250 for (try = 0; try < MPT_MAX_TRYS; try++) {
2253 * No need to reset if the IOC is already in the READY state.
2255 * Force reset if initialization failed previously.
2256 * Note that a hard_reset of the second channel of a '929
2257 * will stop operation of the first channel. Hopefully, if the
2258 * first channel is ok, the second will not require a hard
2261 if (needreset || (mpt_rd_db(mpt) & MPT_DB_STATE_MASK) !=
2262 MPT_DB_STATE_READY) {
2263 if (mpt_reset(mpt, /*reinit*/FALSE) != MPT_OK)
2268 if (mpt_get_iocfacts(mpt, &facts) != MPT_OK) {
2269 mpt_prt(mpt, "mpt_get_iocfacts failed\n");
2274 mpt->mpt_global_credits = le16toh(facts.GlobalCredits);
2275 mpt->request_frame_size = le16toh(facts.RequestFrameSize);
2276 mpt->ioc_facts_flags = facts.Flags;
2277 mpt_prt(mpt, "MPI Version=%d.%d.%d.%d\n",
2278 le16toh(facts.MsgVersion) >> 8,
2279 le16toh(facts.MsgVersion) & 0xFF,
2280 le16toh(facts.HeaderVersion) >> 8,
2281 le16toh(facts.HeaderVersion) & 0xFF);
2284 * Now that we know request frame size, we can calculate
2285 * the actual (reasonable) segment limit for read/write I/O.
2287 * This limit is constrained by:
2289 * + The size of each area we allocate per command (and how
2290 * many chain segments we can fit into it).
2291 * + The total number of areas we've set up.
2292 * + The actual chain depth the card will allow.
2294 * The first area's segment count is limited by the I/O request
2295 * at the head of it. We cannot allocate realistically more
2296 * than MPT_MAX_REQUESTS areas. Therefore, to account for both
2297 * conditions, we'll just start out with MPT_MAX_REQUESTS-2.
2300 max_chain_depth = facts.MaxChainDepth;
2302 /* total number of request areas we (can) allocate */
2303 mpt->max_seg_cnt = MPT_MAX_REQUESTS(mpt) - 2;
2305 /* converted to the number of chain areas possible */
2306 mpt->max_seg_cnt *= MPT_NRFM(mpt);
2308 /* limited by the number of chain areas the card will support */
2309 if (mpt->max_seg_cnt > max_chain_depth) {
2310 mpt_lprt(mpt, MPT_PRT_DEBUG,
2311 "chain depth limited to %u (from %u)\n",
2312 max_chain_depth, mpt->max_seg_cnt);
2313 mpt->max_seg_cnt = max_chain_depth;
2316 /* converted to the number of simple sges in chain segments. */
2317 mpt->max_seg_cnt *= (MPT_NSGL(mpt) - 1);
2319 mpt_lprt(mpt, MPT_PRT_DEBUG,
2320 "Maximum Segment Count: %u\n", mpt->max_seg_cnt);
2321 mpt_lprt(mpt, MPT_PRT_DEBUG,
2322 "MsgLength=%u IOCNumber = %d\n",
2323 facts.MsgLength, facts.IOCNumber);
2324 mpt_lprt(mpt, MPT_PRT_DEBUG,
2325 "IOCFACTS: GlobalCredits=%d BlockSize=%u bytes "
2326 "Request Frame Size %u bytes Max Chain Depth %u\n",
2327 mpt->mpt_global_credits, facts.BlockSize,
2328 mpt->request_frame_size << 2, max_chain_depth);
2329 mpt_lprt(mpt, MPT_PRT_DEBUG,
2330 "IOCFACTS: Num Ports %d, FWImageSize %d, "
2331 "Flags=%#x\n", facts.NumberOfPorts,
2332 le32toh(facts.FWImageSize), facts.Flags);
2335 if ((facts.Flags & MPI_IOCFACTS_FLAGS_FW_DOWNLOAD_BOOT) != 0) {
2336 struct mpt_map_info mi;
2340 * In some configurations, the IOC's firmware is
2341 * stored in a shared piece of system NVRAM that
2342 * is only accessable via the BIOS. In this
2343 * case, the firmware keeps a copy of firmware in
2344 * RAM until the OS driver retrieves it. Once
2345 * retrieved, we are responsible for re-downloading
2346 * the firmware after any hard-reset.
2348 mpt->fw_image_size = le32toh(facts.FWImageSize);
2349 error = mpt_dma_tag_create(mpt, mpt->parent_dmat,
2350 /*alignment*/1, /*boundary*/0,
2351 /*lowaddr*/BUS_SPACE_MAXADDR_32BIT,
2352 /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL,
2353 /*filterarg*/NULL, mpt->fw_image_size,
2354 /*nsegments*/1, /*maxsegsz*/mpt->fw_image_size,
2355 /*flags*/0, &mpt->fw_dmat);
2357 mpt_prt(mpt, "cannot create fw dma tag\n");
2360 error = bus_dmamem_alloc(mpt->fw_dmat,
2361 (void **)&mpt->fw_image, BUS_DMA_NOWAIT,
2364 mpt_prt(mpt, "cannot allocate fw mem.\n");
2365 bus_dma_tag_destroy(mpt->fw_dmat);
2370 bus_dmamap_load(mpt->fw_dmat, mpt->fw_dmap,
2371 mpt->fw_image, mpt->fw_image_size, mpt_map_rquest,
2373 mpt->fw_phys = mi.phys;
2375 error = mpt_upload_fw(mpt);
2377 mpt_prt(mpt, "fw upload failed.\n");
2378 bus_dmamap_unload(mpt->fw_dmat, mpt->fw_dmap);
2379 bus_dmamem_free(mpt->fw_dmat, mpt->fw_image,
2381 bus_dma_tag_destroy(mpt->fw_dmat);
2382 mpt->fw_image = NULL;
2387 if (mpt_get_portfacts(mpt, &pfp) != MPT_OK) {
2388 mpt_prt(mpt, "mpt_get_portfacts failed\n");
2393 mpt_lprt(mpt, MPT_PRT_DEBUG,
2394 "PORTFACTS: Type %x PFlags %x IID %d MaxDev %d\n",
2395 pfp.PortType, pfp.ProtocolFlags, pfp.PortSCSIID,
2398 mpt->mpt_port_type = pfp.PortType;
2399 mpt->mpt_proto_flags = pfp.ProtocolFlags;
2400 if (pfp.PortType != MPI_PORTFACTS_PORTTYPE_SCSI &&
2401 pfp.PortType != MPI_PORTFACTS_PORTTYPE_SAS &&
2402 pfp.PortType != MPI_PORTFACTS_PORTTYPE_FC) {
2403 mpt_prt(mpt, "Unsupported Port Type (%x)\n",
2407 if (!(pfp.ProtocolFlags & MPI_PORTFACTS_PROTOCOL_INITIATOR)) {
2408 mpt_prt(mpt, "initiator role unsupported\n");
2411 if (pfp.PortType == MPI_PORTFACTS_PORTTYPE_FC) {
2414 } else if (pfp.PortType == MPI_PORTFACTS_PORTTYPE_SAS) {
2421 mpt->mpt_ini_id = pfp.PortSCSIID;
2422 mpt->mpt_max_devices = pfp.MaxDevices;
2424 if (mpt_enable_ioc(mpt) != 0) {
2425 mpt_prt(mpt, "Unable to initialize IOC\n");
2430 * Read and set up initial configuration information
2431 * (IOC and SPI only for now)
2433 * XXX Should figure out what "personalities" are
2434 * available and defer all initialization junk to
2437 mpt_read_config_info_ioc(mpt);
2439 if (mpt->is_fc == 0 && mpt->is_sas == 0) {
2440 if (mpt_read_config_info_spi(mpt)) {
2443 if (mpt_set_initial_config_spi(mpt)) {
2448 /* Everything worked */
2452 if (try >= MPT_MAX_TRYS) {
2453 mpt_prt(mpt, "failed to initialize IOC");
2457 mpt_lprt(mpt, MPT_PRT_DEBUG, "enabling interrupts\n");
2459 mpt_enable_ints(mpt);
2464 mpt_enable_ioc(struct mpt_softc *mpt)
2469 if (mpt_send_ioc_init(mpt, MPI_WHOINIT_HOST_DRIVER) != MPT_OK) {
2470 mpt_prt(mpt, "mpt_send_ioc_init failed\n");
2474 mpt_lprt(mpt, MPT_PRT_DEBUG, "mpt_send_ioc_init ok\n");
2476 if (mpt_wait_state(mpt, MPT_DB_STATE_RUNNING) != MPT_OK) {
2477 mpt_prt(mpt, "IOC failed to go to run state\n");
2480 mpt_lprt(mpt, MPT_PRT_DEBUG, "IOC now at RUNSTATE\n");
2483 * Give it reply buffers
2485 * Do *not* exceed global credits.
2487 for (val = 0, pptr = mpt->reply_phys;
2488 (pptr + MPT_REPLY_SIZE) < (mpt->reply_phys + PAGE_SIZE);
2489 pptr += MPT_REPLY_SIZE) {
2490 mpt_free_reply(mpt, pptr);
2491 if (++val == mpt->mpt_global_credits - 1)
2496 * Enable asynchronous event reporting
2498 mpt_send_event_request(mpt, 1);
2503 if (mpt_send_port_enable(mpt, 0) != MPT_OK) {
2504 mpt_prt(mpt, "failed to enable port 0\n");
2507 mpt_lprt(mpt, MPT_PRT_DEBUG, "enabled port 0\n");