2 * Routines for handling the integrated RAID features LSI MPT Fusion adapters.
4 * SPDX-License-Identifier: BSD-3-Clause
6 * Copyright (c) 2005, WHEEL Sp. z o.o.
7 * Copyright (c) 2005 Justin T. Gibbs.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions are
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
16 * substantially similar to the "NO WARRANTY" disclaimer below
17 * ("Disclaimer") and any redistribution must be conditioned upon including
18 * a substantially similar Disclaimer requirement for further binary
20 * 3. Neither the names of the above listed copyright holders nor the names
21 * of any contributors may be used to endorse or promote products derived
22 * from this software without specific prior written permission.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
34 * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37 * Some Breakage and Bug Fixing added later.
38 * Copyright (c) 2006, by Matthew Jacob
41 * Support from LSI-Logic has also gone a great deal toward making this a
42 * workable subsystem and is gratefully acknowledged.
45 #include <sys/cdefs.h>
46 __FBSDID("$FreeBSD$");
48 #include <dev/mpt/mpt.h>
49 #include <dev/mpt/mpt_raid.h>
51 #include "dev/mpt/mpilib/mpi_ioc.h" /* XXX Fix Event Handling!!! */
52 #include "dev/mpt/mpilib/mpi_raid.h"
55 #include <cam/cam_ccb.h>
56 #include <cam/cam_periph.h>
57 #include <cam/cam_sim.h>
58 #include <cam/cam_xpt_sim.h>
60 #include <sys/callout.h>
61 #include <sys/kthread.h>
62 #include <sys/sysctl.h>
64 #include <machine/stdarg.h>
66 struct mpt_raid_action_result
69 MPI_RAID_VOL_INDICATOR indicator_struct;
70 uint32_t new_settings;
71 uint8_t phys_disk_num;
73 uint16_t action_status;
76 #define REQ_TO_RAID_ACTION_RESULT(req) ((struct mpt_raid_action_result *) \
77 (((MSG_RAID_ACTION_REQUEST *)(req->req_vbuf)) + 1))
79 #define REQ_IOCSTATUS(req) ((req)->IOCStatus & MPI_IOCSTATUS_MASK)
81 static mpt_probe_handler_t mpt_raid_probe;
82 static mpt_attach_handler_t mpt_raid_attach;
83 static mpt_enable_handler_t mpt_raid_enable;
84 static mpt_event_handler_t mpt_raid_event;
85 static mpt_shutdown_handler_t mpt_raid_shutdown;
86 static mpt_reset_handler_t mpt_raid_ioc_reset;
87 static mpt_detach_handler_t mpt_raid_detach;
89 static struct mpt_personality mpt_raid_personality =
92 .probe = mpt_raid_probe,
93 .attach = mpt_raid_attach,
94 .enable = mpt_raid_enable,
95 .event = mpt_raid_event,
96 .reset = mpt_raid_ioc_reset,
97 .shutdown = mpt_raid_shutdown,
98 .detach = mpt_raid_detach,
101 DECLARE_MPT_PERSONALITY(mpt_raid, SI_ORDER_THIRD);
102 MPT_PERSONALITY_DEPEND(mpt_raid, mpt_cam, 1, 1, 1);
104 static mpt_reply_handler_t mpt_raid_reply_handler;
105 static int mpt_raid_reply_frame_handler(struct mpt_softc *mpt, request_t *req,
106 MSG_DEFAULT_REPLY *reply_frame);
107 static int mpt_spawn_raid_thread(struct mpt_softc *mpt);
108 static void mpt_terminate_raid_thread(struct mpt_softc *mpt);
109 static void mpt_raid_thread(void *arg);
110 static timeout_t mpt_raid_timer;
112 static void mpt_enable_vol(struct mpt_softc *mpt,
113 struct mpt_raid_volume *mpt_vol, int enable);
115 static void mpt_verify_mwce(struct mpt_softc *, struct mpt_raid_volume *);
116 static void mpt_adjust_queue_depth(struct mpt_softc *, struct mpt_raid_volume *,
118 static void mpt_raid_sysctl_attach(struct mpt_softc *);
120 static const char *mpt_vol_type(struct mpt_raid_volume *vol);
121 static const char *mpt_vol_state(struct mpt_raid_volume *vol);
122 static const char *mpt_disk_state(struct mpt_raid_disk *disk);
123 static void mpt_vol_prt(struct mpt_softc *mpt, struct mpt_raid_volume *vol,
124 const char *fmt, ...);
125 static void mpt_disk_prt(struct mpt_softc *mpt, struct mpt_raid_disk *disk,
126 const char *fmt, ...);
128 static int mpt_issue_raid_req(struct mpt_softc *mpt,
129 struct mpt_raid_volume *vol, struct mpt_raid_disk *disk, request_t *req,
130 u_int Action, uint32_t ActionDataWord, bus_addr_t addr, bus_size_t len,
131 int write, int wait);
133 static int mpt_refresh_raid_data(struct mpt_softc *mpt);
134 static void mpt_schedule_raid_refresh(struct mpt_softc *mpt);
136 static uint32_t raid_handler_id = MPT_HANDLER_ID_NONE;
139 mpt_vol_type(struct mpt_raid_volume *vol)
141 switch (vol->config_page->VolumeType) {
142 case MPI_RAID_VOL_TYPE_IS:
144 case MPI_RAID_VOL_TYPE_IME:
146 case MPI_RAID_VOL_TYPE_IM:
154 mpt_vol_state(struct mpt_raid_volume *vol)
156 switch (vol->config_page->VolumeStatus.State) {
157 case MPI_RAIDVOL0_STATUS_STATE_OPTIMAL:
159 case MPI_RAIDVOL0_STATUS_STATE_DEGRADED:
161 case MPI_RAIDVOL0_STATUS_STATE_FAILED:
169 mpt_disk_state(struct mpt_raid_disk *disk)
171 switch (disk->config_page.PhysDiskStatus.State) {
172 case MPI_PHYSDISK0_STATUS_ONLINE:
174 case MPI_PHYSDISK0_STATUS_MISSING:
176 case MPI_PHYSDISK0_STATUS_NOT_COMPATIBLE:
177 return ("Incompatible");
178 case MPI_PHYSDISK0_STATUS_FAILED:
180 case MPI_PHYSDISK0_STATUS_INITIALIZING:
181 return ("Initializing");
182 case MPI_PHYSDISK0_STATUS_OFFLINE_REQUESTED:
183 return ("Offline Requested");
184 case MPI_PHYSDISK0_STATUS_FAILED_REQUESTED:
185 return ("Failed per Host Request");
186 case MPI_PHYSDISK0_STATUS_OTHER_OFFLINE:
194 mpt_vol_prt(struct mpt_softc *mpt, struct mpt_raid_volume *vol,
195 const char *fmt, ...)
199 printf("%s:vol%d(%s:%d:%d): ", device_get_nameunit(mpt->dev),
200 (u_int)(vol - mpt->raid_volumes), device_get_nameunit(mpt->dev),
201 vol->config_page->VolumeBus, vol->config_page->VolumeID);
208 mpt_disk_prt(struct mpt_softc *mpt, struct mpt_raid_disk *disk,
209 const char *fmt, ...)
213 if (disk->volume != NULL) {
214 printf("(%s:vol%d:%d): ",
215 device_get_nameunit(mpt->dev),
216 disk->volume->config_page->VolumeID,
217 disk->member_number);
219 printf("(%s:%d:%d): ", device_get_nameunit(mpt->dev),
220 disk->config_page.PhysDiskBus,
221 disk->config_page.PhysDiskID);
229 mpt_raid_async(void *callback_arg, u_int32_t code,
230 struct cam_path *path, void *arg)
232 struct mpt_softc *mpt;
234 mpt = (struct mpt_softc*)callback_arg;
236 case AC_FOUND_DEVICE:
238 struct ccb_getdev *cgd;
239 struct mpt_raid_volume *mpt_vol;
241 cgd = (struct ccb_getdev *)arg;
246 mpt_lprt(mpt, MPT_PRT_DEBUG, "Callback for %d\n",
247 cgd->ccb_h.target_id);
249 RAID_VOL_FOREACH(mpt, mpt_vol) {
250 if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
253 if (mpt_vol->config_page->VolumeID
254 == cgd->ccb_h.target_id) {
255 mpt_adjust_queue_depth(mpt, mpt_vol, path);
266 mpt_raid_probe(struct mpt_softc *mpt)
269 if (mpt->ioc_page2 == NULL || mpt->ioc_page2->MaxPhysDisks == 0) {
276 mpt_raid_attach(struct mpt_softc *mpt)
278 struct ccb_setasync csa;
279 mpt_handler_t handler;
282 mpt_callout_init(mpt, &mpt->raid_timer);
284 error = mpt_spawn_raid_thread(mpt);
286 mpt_prt(mpt, "Unable to spawn RAID thread!\n");
291 handler.reply_handler = mpt_raid_reply_handler;
292 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
295 mpt_prt(mpt, "Unable to register RAID haandler!\n");
299 xpt_setup_ccb(&csa.ccb_h, mpt->path, 5);
300 csa.ccb_h.func_code = XPT_SASYNC_CB;
301 csa.event_enable = AC_FOUND_DEVICE;
302 csa.callback = mpt_raid_async;
303 csa.callback_arg = mpt;
304 xpt_action((union ccb *)&csa);
305 if (csa.ccb_h.status != CAM_REQ_CMP) {
306 mpt_prt(mpt, "mpt_raid_attach: Unable to register "
307 "CAM async handler.\n");
311 mpt_raid_sysctl_attach(mpt);
315 mpt_raid_detach(mpt);
320 mpt_raid_enable(struct mpt_softc *mpt)
327 mpt_raid_detach(struct mpt_softc *mpt)
329 struct ccb_setasync csa;
330 mpt_handler_t handler;
332 mpt_callout_drain(mpt, &mpt->raid_timer);
335 mpt_terminate_raid_thread(mpt);
336 handler.reply_handler = mpt_raid_reply_handler;
337 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
339 xpt_setup_ccb(&csa.ccb_h, mpt->path, /*priority*/5);
340 csa.ccb_h.func_code = XPT_SASYNC_CB;
341 csa.event_enable = 0;
342 csa.callback = mpt_raid_async;
343 csa.callback_arg = mpt;
344 xpt_action((union ccb *)&csa);
349 mpt_raid_ioc_reset(struct mpt_softc *mpt, int type)
352 /* Nothing to do yet. */
355 static const char *raid_event_txt[] =
359 "Volume Settings Changed",
360 "Volume Status Changed",
361 "Volume Physical Disk Membership Changed",
362 "Physical Disk Created",
363 "Physical Disk Deleted",
364 "Physical Disk Settings Changed",
365 "Physical Disk Status Changed",
366 "Domain Validation Required",
367 "SMART Data Received",
368 "Replace Action Started",
372 mpt_raid_event(struct mpt_softc *mpt, request_t *req,
373 MSG_EVENT_NOTIFY_REPLY *msg)
375 EVENT_DATA_RAID *raid_event;
376 struct mpt_raid_volume *mpt_vol;
377 struct mpt_raid_disk *mpt_disk;
378 CONFIG_PAGE_RAID_VOL_0 *vol_pg;
382 if (msg->Event != MPI_EVENT_INTEGRATED_RAID) {
386 raid_event = (EVENT_DATA_RAID *)&msg->Data;
390 if (mpt->raid_volumes != NULL && mpt->ioc_page2 != NULL) {
391 for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) {
392 mpt_vol = &mpt->raid_volumes[i];
393 vol_pg = mpt_vol->config_page;
395 if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
398 if (vol_pg->VolumeID == raid_event->VolumeID
399 && vol_pg->VolumeBus == raid_event->VolumeBus)
402 if (i >= mpt->ioc_page2->MaxVolumes) {
409 if (raid_event->PhysDiskNum != 0xFF && mpt->raid_disks != NULL) {
410 mpt_disk = mpt->raid_disks + raid_event->PhysDiskNum;
411 if ((mpt_disk->flags & MPT_RDF_ACTIVE) == 0) {
417 switch(raid_event->ReasonCode) {
418 case MPI_EVENT_RAID_RC_VOLUME_CREATED:
419 case MPI_EVENT_RAID_RC_VOLUME_DELETED:
421 case MPI_EVENT_RAID_RC_VOLUME_STATUS_CHANGED:
422 if (mpt_vol != NULL) {
423 if ((mpt_vol->flags & MPT_RVF_UP2DATE) != 0) {
424 mpt_vol->flags &= ~MPT_RVF_UP2DATE;
427 * Coalesce status messages into one
428 * per background run of our RAID thread.
429 * This removes "spurious" status messages
436 case MPI_EVENT_RAID_RC_VOLUME_SETTINGS_CHANGED:
437 case MPI_EVENT_RAID_RC_VOLUME_PHYSDISK_CHANGED:
439 if (mpt_vol != NULL) {
440 mpt_vol->flags &= ~(MPT_RVF_UP2DATE|MPT_RVF_ANNOUNCED);
443 case MPI_EVENT_RAID_RC_PHYSDISK_CREATED:
444 case MPI_EVENT_RAID_RC_PHYSDISK_DELETED:
447 case MPI_EVENT_RAID_RC_PHYSDISK_SETTINGS_CHANGED:
448 case MPI_EVENT_RAID_RC_PHYSDISK_STATUS_CHANGED:
450 if (mpt_disk != NULL) {
451 mpt_disk->flags &= ~MPT_RDF_UP2DATE;
454 case MPI_EVENT_RAID_RC_DOMAIN_VAL_NEEDED:
457 case MPI_EVENT_RAID_RC_SMART_DATA:
458 case MPI_EVENT_RAID_RC_REPLACE_ACTION_STARTED:
463 if (mpt_disk != NULL) {
464 mpt_disk_prt(mpt, mpt_disk, "");
465 } else if (mpt_vol != NULL) {
466 mpt_vol_prt(mpt, mpt_vol, "");
468 mpt_prt(mpt, "Volume(%d:%d", raid_event->VolumeBus,
469 raid_event->VolumeID);
471 if (raid_event->PhysDiskNum != 0xFF)
472 mpt_prtc(mpt, ":%d): ",
473 raid_event->PhysDiskNum);
475 mpt_prtc(mpt, "): ");
478 if (raid_event->ReasonCode >= NUM_ELEMENTS(raid_event_txt))
479 mpt_prtc(mpt, "Unhandled RaidEvent %#x\n",
480 raid_event->ReasonCode);
482 mpt_prtc(mpt, "%s\n",
483 raid_event_txt[raid_event->ReasonCode]);
486 if (raid_event->ReasonCode == MPI_EVENT_RAID_RC_SMART_DATA) {
487 /* XXX Use CAM's print sense for this... */
488 if (mpt_disk != NULL)
489 mpt_disk_prt(mpt, mpt_disk, "");
491 mpt_prt(mpt, "Volume(%d:%d:%d: ",
492 raid_event->VolumeBus, raid_event->VolumeID,
493 raid_event->PhysDiskNum);
494 mpt_prtc(mpt, "ASC 0x%x, ASCQ 0x%x)\n",
495 raid_event->ASC, raid_event->ASCQ);
498 mpt_raid_wakeup(mpt);
503 mpt_raid_shutdown(struct mpt_softc *mpt)
505 struct mpt_raid_volume *mpt_vol;
507 if (mpt->raid_mwce_setting != MPT_RAID_MWCE_REBUILD_ONLY) {
511 mpt->raid_mwce_setting = MPT_RAID_MWCE_OFF;
512 RAID_VOL_FOREACH(mpt, mpt_vol) {
513 mpt_verify_mwce(mpt, mpt_vol);
518 mpt_raid_reply_handler(struct mpt_softc *mpt, request_t *req,
519 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
527 if (reply_frame != NULL)
528 free_req = mpt_raid_reply_frame_handler(mpt, req, reply_frame);
530 else if (req->ccb != NULL) {
531 /* Complete Quiesce CCB with error... */
535 req->state &= ~REQ_STATE_QUEUED;
536 req->state |= REQ_STATE_DONE;
537 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
539 if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) {
541 } else if (free_req) {
542 mpt_free_request(mpt, req);
549 * Parse additional completion information in the reply
550 * frame for RAID I/O requests.
553 mpt_raid_reply_frame_handler(struct mpt_softc *mpt, request_t *req,
554 MSG_DEFAULT_REPLY *reply_frame)
556 MSG_RAID_ACTION_REPLY *reply;
557 struct mpt_raid_action_result *action_result;
558 MSG_RAID_ACTION_REQUEST *rap;
560 reply = (MSG_RAID_ACTION_REPLY *)reply_frame;
561 req->IOCStatus = le16toh(reply->IOCStatus);
562 rap = (MSG_RAID_ACTION_REQUEST *)req->req_vbuf;
564 switch (rap->Action) {
565 case MPI_RAID_ACTION_QUIESCE_PHYS_IO:
566 mpt_prt(mpt, "QUIESCE PHYSIO DONE\n");
568 case MPI_RAID_ACTION_ENABLE_PHYS_IO:
569 mpt_prt(mpt, "ENABLY PHYSIO DONE\n");
574 action_result = REQ_TO_RAID_ACTION_RESULT(req);
575 memcpy(&action_result->action_data, &reply->ActionData,
576 sizeof(action_result->action_data));
577 action_result->action_status = le16toh(reply->ActionStatus);
582 * Utiltity routine to perform a RAID action command;
585 mpt_issue_raid_req(struct mpt_softc *mpt, struct mpt_raid_volume *vol,
586 struct mpt_raid_disk *disk, request_t *req, u_int Action,
587 uint32_t ActionDataWord, bus_addr_t addr, bus_size_t len,
590 MSG_RAID_ACTION_REQUEST *rap;
594 memset(rap, 0, sizeof *rap);
595 rap->Action = Action;
596 rap->ActionDataWord = htole32(ActionDataWord);
597 rap->Function = MPI_FUNCTION_RAID_ACTION;
598 rap->VolumeID = vol->config_page->VolumeID;
599 rap->VolumeBus = vol->config_page->VolumeBus;
601 rap->PhysDiskNum = disk->config_page.PhysDiskNum;
603 rap->PhysDiskNum = 0xFF;
604 se = (SGE_SIMPLE32 *)&rap->ActionDataSGE;
605 se->Address = htole32(addr);
606 MPI_pSGE_SET_LENGTH(se, len);
607 MPI_pSGE_SET_FLAGS(se, (MPI_SGE_FLAGS_SIMPLE_ELEMENT |
608 MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
609 MPI_SGE_FLAGS_END_OF_LIST |
610 (write ? MPI_SGE_FLAGS_HOST_TO_IOC : MPI_SGE_FLAGS_IOC_TO_HOST)));
611 se->FlagsLength = htole32(se->FlagsLength);
612 rap->MsgContext = htole32(req->index | raid_handler_id);
614 mpt_check_doorbell(mpt);
615 mpt_send_cmd(mpt, req);
618 return (mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE,
619 /*sleep_ok*/FALSE, /*time_ms*/2000));
625 /*************************** RAID Status Monitoring ***************************/
627 mpt_spawn_raid_thread(struct mpt_softc *mpt)
632 * Freeze out any CAM transactions until our thread
633 * is able to run at least once. We need to update
634 * our RAID pages before acception I/O or we may
635 * reject I/O to an ID we later determine is for a
639 xpt_freeze_simq(mpt->phydisk_sim, 1);
641 error = kproc_create(mpt_raid_thread, mpt,
642 &mpt->raid_thread, /*flags*/0, /*altstack*/0,
643 "mpt_raid%d", mpt->unit);
646 xpt_release_simq(mpt->phydisk_sim, /*run_queue*/FALSE);
653 mpt_terminate_raid_thread(struct mpt_softc *mpt)
656 if (mpt->raid_thread == NULL) {
659 mpt->shutdwn_raid = 1;
660 wakeup(&mpt->raid_volumes);
662 * Sleep on a slightly different location
663 * for this interlock just for added safety.
665 mpt_sleep(mpt, &mpt->raid_thread, PUSER, "thtrm", 0);
669 mpt_raid_thread(void *arg)
671 struct mpt_softc *mpt;
674 mpt = (struct mpt_softc *)arg;
677 while (mpt->shutdwn_raid == 0) {
679 if (mpt->raid_wakeup == 0) {
680 mpt_sleep(mpt, &mpt->raid_volumes, PUSER, "idle", 0);
684 mpt->raid_wakeup = 0;
686 if (mpt_refresh_raid_data(mpt)) {
687 mpt_schedule_raid_refresh(mpt); /* XX NOT QUITE RIGHT */
692 * Now that we have our first snapshot of RAID data,
693 * allow CAM to access our physical disk bus.
697 xpt_release_simq(mpt->phydisk_sim, TRUE);
700 if (mpt->raid_rescan != 0) {
704 mpt->raid_rescan = 0;
707 ccb = xpt_alloc_ccb();
710 error = xpt_create_path(&ccb->ccb_h.path, NULL,
711 cam_sim_path(mpt->phydisk_sim),
712 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
713 if (error != CAM_REQ_CMP) {
715 mpt_prt(mpt, "Unable to rescan RAID Bus!\n");
721 mpt->raid_thread = NULL;
722 wakeup(&mpt->raid_thread);
729 mpt_raid_quiesce_timeout(void *arg)
732 /* Complete the CCB with error */
736 static timeout_t mpt_raid_quiesce_timeout;
738 mpt_raid_quiesce_disk(struct mpt_softc *mpt, struct mpt_raid_disk *mpt_disk,
744 if ((mpt_disk->flags & MPT_RDF_QUIESCED) != 0)
745 return (CAM_REQ_CMP);
747 if ((mpt_disk->flags & MPT_RDF_QUIESCING) == 0) {
750 mpt_disk->flags |= MPT_RDF_QUIESCING;
751 xpt_freeze_devq(ccb->ccb_h.path, 1);
753 rv = mpt_issue_raid_req(mpt, mpt_disk->volume, mpt_disk, req,
754 MPI_RAID_ACTION_QUIESCE_PHYS_IO,
755 /*ActionData*/0, /*addr*/0,
756 /*len*/0, /*write*/FALSE,
759 return (CAM_REQ_CMP_ERR);
761 mpt_req_timeout(req, mpt_raid_quiesce_timeout, ccb, 5 * hz);
763 if (rv == ETIMEDOUT) {
764 mpt_disk_prt(mpt, mpt_disk, "mpt_raid_quiesce_disk: "
765 "Quiece Timed-out\n");
766 xpt_release_devq(ccb->ccb_h.path, 1, /*run*/0);
767 return (CAM_REQ_CMP_ERR);
770 ar = REQ_TO_RAID_ACTION_RESULT(req);
772 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
773 || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
774 mpt_disk_prt(mpt, mpt_disk, "Quiece Failed"
775 "%d:%x:%x\n", rv, req->IOCStatus,
777 xpt_release_devq(ccb->ccb_h.path, 1, /*run*/0);
778 return (CAM_REQ_CMP_ERR);
781 return (CAM_REQ_INPROG);
783 return (CAM_REQUEUE_REQ);
787 /* XXX Ignores that there may be multiple buses/IOCs involved. */
789 mpt_map_physdisk(struct mpt_softc *mpt, union ccb *ccb, target_id_t *tgt)
791 struct mpt_raid_disk *mpt_disk;
793 mpt_disk = mpt->raid_disks + ccb->ccb_h.target_id;
794 if (ccb->ccb_h.target_id < mpt->raid_max_disks
795 && (mpt_disk->flags & MPT_RDF_ACTIVE) != 0) {
796 *tgt = mpt_disk->config_page.PhysDiskID;
799 mpt_lprt(mpt, MPT_PRT_DEBUG1, "mpt_map_physdisk(%d) - Not Active\n",
800 ccb->ccb_h.target_id);
804 /* XXX Ignores that there may be multiple buses/IOCs involved. */
806 mpt_is_raid_member(struct mpt_softc *mpt, target_id_t tgt)
808 struct mpt_raid_disk *mpt_disk;
811 if (mpt->ioc_page2 == NULL || mpt->ioc_page2->MaxPhysDisks == 0)
813 for (i = 0; i < mpt->ioc_page2->MaxPhysDisks; i++) {
814 mpt_disk = &mpt->raid_disks[i];
815 if ((mpt_disk->flags & MPT_RDF_ACTIVE) != 0 &&
816 mpt_disk->config_page.PhysDiskID == tgt)
823 /* XXX Ignores that there may be multiple buses/IOCs involved. */
825 mpt_is_raid_volume(struct mpt_softc *mpt, target_id_t tgt)
827 CONFIG_PAGE_IOC_2_RAID_VOL *ioc_vol;
828 CONFIG_PAGE_IOC_2_RAID_VOL *ioc_last_vol;
830 if (mpt->ioc_page2 == NULL || mpt->ioc_page2->MaxPhysDisks == 0) {
833 ioc_vol = mpt->ioc_page2->RaidVolume;
834 ioc_last_vol = ioc_vol + mpt->ioc_page2->NumActiveVolumes;
835 for (;ioc_vol != ioc_last_vol; ioc_vol++) {
836 if (ioc_vol->VolumeID == tgt) {
845 mpt_enable_vol(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol,
849 struct mpt_raid_action_result *ar;
850 CONFIG_PAGE_RAID_VOL_0 *vol_pg;
854 vol_pg = mpt_vol->config_page;
855 enabled = vol_pg->VolumeStatus.Flags & MPI_RAIDVOL0_STATUS_FLAG_ENABLED;
858 * If the setting matches the configuration,
859 * there is nothing to do.
861 if ((enabled && enable)
862 || (!enabled && !enable))
865 req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
867 mpt_vol_prt(mpt, mpt_vol,
868 "mpt_enable_vol: Get request failed!\n");
872 rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
873 enable ? MPI_RAID_ACTION_ENABLE_VOLUME
874 : MPI_RAID_ACTION_DISABLE_VOLUME,
875 /*data*/0, /*addr*/0, /*len*/0,
876 /*write*/FALSE, /*wait*/TRUE);
877 if (rv == ETIMEDOUT) {
878 mpt_vol_prt(mpt, mpt_vol, "mpt_enable_vol: "
879 "%s Volume Timed-out\n",
880 enable ? "Enable" : "Disable");
883 ar = REQ_TO_RAID_ACTION_RESULT(req);
885 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
886 || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
887 mpt_vol_prt(mpt, mpt_vol, "%s Volume Failed: %d:%x:%x\n",
888 enable ? "Enable" : "Disable",
889 rv, req->IOCStatus, ar->action_status);
892 mpt_free_request(mpt, req);
897 mpt_verify_mwce(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol)
900 struct mpt_raid_action_result *ar;
901 CONFIG_PAGE_RAID_VOL_0 *vol_pg;
907 vol_pg = mpt_vol->config_page;
908 resyncing = vol_pg->VolumeStatus.Flags
909 & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS;
910 mwce = vol_pg->VolumeSettings.Settings
911 & MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
914 * If the setting matches the configuration,
915 * there is nothing to do.
917 switch (mpt->raid_mwce_setting) {
918 case MPT_RAID_MWCE_REBUILD_ONLY:
919 if ((resyncing && mwce) || (!resyncing && !mwce)) {
922 mpt_vol->flags ^= MPT_RVF_WCE_CHANGED;
923 if ((mpt_vol->flags & MPT_RVF_WCE_CHANGED) == 0) {
925 * Wait one more status update to see if
926 * resyncing gets enabled. It gets disabled
927 * temporarilly when WCE is changed.
932 case MPT_RAID_MWCE_ON:
936 case MPT_RAID_MWCE_OFF:
940 case MPT_RAID_MWCE_NC:
944 req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
946 mpt_vol_prt(mpt, mpt_vol,
947 "mpt_verify_mwce: Get request failed!\n");
951 vol_pg->VolumeSettings.Settings ^=
952 MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
953 memcpy(&data, &vol_pg->VolumeSettings, sizeof(data));
954 vol_pg->VolumeSettings.Settings ^=
955 MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
956 rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
957 MPI_RAID_ACTION_CHANGE_VOLUME_SETTINGS,
958 data, /*addr*/0, /*len*/0,
959 /*write*/FALSE, /*wait*/TRUE);
960 if (rv == ETIMEDOUT) {
961 mpt_vol_prt(mpt, mpt_vol, "mpt_verify_mwce: "
962 "Write Cache Enable Timed-out\n");
965 ar = REQ_TO_RAID_ACTION_RESULT(req);
967 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
968 || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
969 mpt_vol_prt(mpt, mpt_vol, "Write Cache Enable Failed: "
970 "%d:%x:%x\n", rv, req->IOCStatus,
973 vol_pg->VolumeSettings.Settings ^=
974 MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
976 mpt_free_request(mpt, req);
980 mpt_verify_resync_rate(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol)
983 struct mpt_raid_action_result *ar;
984 CONFIG_PAGE_RAID_VOL_0 *vol_pg;
988 vol_pg = mpt_vol->config_page;
990 if (mpt->raid_resync_rate == MPT_RAID_RESYNC_RATE_NC)
994 * If the current RAID resync rate does not
995 * match our configured rate, update it.
997 prio = vol_pg->VolumeSettings.Settings
998 & MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
999 if (vol_pg->ResyncRate != 0
1000 && vol_pg->ResyncRate != mpt->raid_resync_rate) {
1002 req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
1004 mpt_vol_prt(mpt, mpt_vol, "mpt_verify_resync_rate: "
1005 "Get request failed!\n");
1009 rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
1010 MPI_RAID_ACTION_SET_RESYNC_RATE,
1011 mpt->raid_resync_rate, /*addr*/0,
1012 /*len*/0, /*write*/FALSE, /*wait*/TRUE);
1013 if (rv == ETIMEDOUT) {
1014 mpt_vol_prt(mpt, mpt_vol, "mpt_refresh_raid_data: "
1015 "Resync Rate Setting Timed-out\n");
1019 ar = REQ_TO_RAID_ACTION_RESULT(req);
1021 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
1022 || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
1023 mpt_vol_prt(mpt, mpt_vol, "Resync Rate Setting Failed: "
1024 "%d:%x:%x\n", rv, req->IOCStatus,
1027 vol_pg->ResyncRate = mpt->raid_resync_rate;
1028 mpt_free_request(mpt, req);
1029 } else if ((prio && mpt->raid_resync_rate < 128)
1030 || (!prio && mpt->raid_resync_rate >= 128)) {
1033 req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
1035 mpt_vol_prt(mpt, mpt_vol, "mpt_verify_resync_rate: "
1036 "Get request failed!\n");
1040 vol_pg->VolumeSettings.Settings ^=
1041 MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
1042 memcpy(&data, &vol_pg->VolumeSettings, sizeof(data));
1043 vol_pg->VolumeSettings.Settings ^=
1044 MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
1045 rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
1046 MPI_RAID_ACTION_CHANGE_VOLUME_SETTINGS,
1047 data, /*addr*/0, /*len*/0,
1048 /*write*/FALSE, /*wait*/TRUE);
1049 if (rv == ETIMEDOUT) {
1050 mpt_vol_prt(mpt, mpt_vol, "mpt_refresh_raid_data: "
1051 "Resync Rate Setting Timed-out\n");
1054 ar = REQ_TO_RAID_ACTION_RESULT(req);
1056 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
1057 || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
1058 mpt_vol_prt(mpt, mpt_vol, "Resync Rate Setting Failed: "
1059 "%d:%x:%x\n", rv, req->IOCStatus,
1062 vol_pg->VolumeSettings.Settings ^=
1063 MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
1066 mpt_free_request(mpt, req);
1071 mpt_adjust_queue_depth(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol,
1072 struct cam_path *path)
1074 struct ccb_relsim crs;
1076 xpt_setup_ccb(&crs.ccb_h, path, /*priority*/5);
1077 crs.ccb_h.func_code = XPT_REL_SIMQ;
1078 crs.ccb_h.flags = CAM_DEV_QFREEZE;
1079 crs.release_flags = RELSIM_ADJUST_OPENINGS;
1080 crs.openings = mpt->raid_queue_depth;
1081 xpt_action((union ccb *)&crs);
1082 if (crs.ccb_h.status != CAM_REQ_CMP)
1083 mpt_vol_prt(mpt, mpt_vol, "mpt_adjust_queue_depth failed "
1084 "with CAM status %#x\n", crs.ccb_h.status);
1088 mpt_announce_vol(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol)
1090 CONFIG_PAGE_RAID_VOL_0 *vol_pg;
1093 vol_pg = mpt_vol->config_page;
1094 mpt_vol_prt(mpt, mpt_vol, "Settings (");
1095 for (i = 1; i <= 0x8000; i <<= 1) {
1096 switch (vol_pg->VolumeSettings.Settings & i) {
1097 case MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE:
1098 mpt_prtc(mpt, " Member-WCE");
1100 case MPI_RAIDVOL0_SETTING_OFFLINE_ON_SMART:
1101 mpt_prtc(mpt, " Offline-On-SMART-Err");
1103 case MPI_RAIDVOL0_SETTING_AUTO_CONFIGURE:
1104 mpt_prtc(mpt, " Hot-Plug-Spares");
1106 case MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC:
1107 mpt_prtc(mpt, " High-Priority-ReSync");
1113 mpt_prtc(mpt, " )\n");
1114 if (vol_pg->VolumeSettings.HotSparePool != 0) {
1115 mpt_vol_prt(mpt, mpt_vol, "Using Spare Pool%s",
1116 powerof2(vol_pg->VolumeSettings.HotSparePool)
1118 for (i = 0; i < 8; i++) {
1122 if ((vol_pg->VolumeSettings.HotSparePool & mask) == 0)
1124 mpt_prtc(mpt, " %d", i);
1126 mpt_prtc(mpt, "\n");
1128 mpt_vol_prt(mpt, mpt_vol, "%d Members:\n", vol_pg->NumPhysDisks);
1129 for (i = 0; i < vol_pg->NumPhysDisks; i++){
1130 struct mpt_raid_disk *mpt_disk;
1131 CONFIG_PAGE_RAID_PHYS_DISK_0 *disk_pg;
1132 int pt_bus = cam_sim_bus(mpt->phydisk_sim);
1135 mpt_disk = mpt->raid_disks + vol_pg->PhysDisk[i].PhysDiskNum;
1136 disk_pg = &mpt_disk->config_page;
1138 mpt_prtc(mpt, "(%s:%d:%d:0): ", device_get_nameunit(mpt->dev),
1139 pt_bus, disk_pg->PhysDiskID);
1140 if (vol_pg->VolumeType == MPI_RAID_VOL_TYPE_IM) {
1141 mpt_prtc(mpt, "%s", mpt_disk->member_number == 0?
1142 "Primary" : "Secondary");
1144 mpt_prtc(mpt, "Stripe Position %d",
1145 mpt_disk->member_number);
1147 f = disk_pg->PhysDiskStatus.Flags;
1148 s = disk_pg->PhysDiskStatus.State;
1149 if (f & MPI_PHYSDISK0_STATUS_FLAG_OUT_OF_SYNC) {
1150 mpt_prtc(mpt, " Out of Sync");
1152 if (f & MPI_PHYSDISK0_STATUS_FLAG_QUIESCED) {
1153 mpt_prtc(mpt, " Quiesced");
1155 if (f & MPI_PHYSDISK0_STATUS_FLAG_INACTIVE_VOLUME) {
1156 mpt_prtc(mpt, " Inactive");
1158 if (f & MPI_PHYSDISK0_STATUS_FLAG_OPTIMAL_PREVIOUS) {
1159 mpt_prtc(mpt, " Was Optimal");
1161 if (f & MPI_PHYSDISK0_STATUS_FLAG_NOT_OPTIMAL_PREVIOUS) {
1162 mpt_prtc(mpt, " Was Non-Optimal");
1165 case MPI_PHYSDISK0_STATUS_ONLINE:
1166 mpt_prtc(mpt, " Online");
1168 case MPI_PHYSDISK0_STATUS_MISSING:
1169 mpt_prtc(mpt, " Missing");
1171 case MPI_PHYSDISK0_STATUS_NOT_COMPATIBLE:
1172 mpt_prtc(mpt, " Incompatible");
1174 case MPI_PHYSDISK0_STATUS_FAILED:
1175 mpt_prtc(mpt, " Failed");
1177 case MPI_PHYSDISK0_STATUS_INITIALIZING:
1178 mpt_prtc(mpt, " Initializing");
1180 case MPI_PHYSDISK0_STATUS_OFFLINE_REQUESTED:
1181 mpt_prtc(mpt, " Requested Offline");
1183 case MPI_PHYSDISK0_STATUS_FAILED_REQUESTED:
1184 mpt_prtc(mpt, " Requested Failed");
1186 case MPI_PHYSDISK0_STATUS_OTHER_OFFLINE:
1188 mpt_prtc(mpt, " Offline Other (%x)", s);
1191 mpt_prtc(mpt, "\n");
1196 mpt_announce_disk(struct mpt_softc *mpt, struct mpt_raid_disk *mpt_disk)
1198 CONFIG_PAGE_RAID_PHYS_DISK_0 *disk_pg;
1199 int rd_bus = cam_sim_bus(mpt->sim);
1200 int pt_bus = cam_sim_bus(mpt->phydisk_sim);
1203 disk_pg = &mpt_disk->config_page;
1204 mpt_disk_prt(mpt, mpt_disk,
1205 "Physical (%s:%d:%d:0), Pass-thru (%s:%d:%d:0)\n",
1206 device_get_nameunit(mpt->dev), rd_bus,
1207 disk_pg->PhysDiskID, device_get_nameunit(mpt->dev),
1208 pt_bus, mpt_disk - mpt->raid_disks);
1209 if (disk_pg->PhysDiskSettings.HotSparePool == 0)
1211 mpt_disk_prt(mpt, mpt_disk, "Member of Hot Spare Pool%s",
1212 powerof2(disk_pg->PhysDiskSettings.HotSparePool)
1214 for (i = 0; i < 8; i++) {
1218 if ((disk_pg->PhysDiskSettings.HotSparePool & mask) == 0)
1220 mpt_prtc(mpt, " %d", i);
1222 mpt_prtc(mpt, "\n");
1226 mpt_refresh_raid_disk(struct mpt_softc *mpt, struct mpt_raid_disk *mpt_disk,
1227 IOC_3_PHYS_DISK *ioc_disk)
1231 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_RAID_PHYSDISK,
1232 /*PageNumber*/0, ioc_disk->PhysDiskNum,
1233 &mpt_disk->config_page.Header,
1234 /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1236 mpt_prt(mpt, "mpt_refresh_raid_disk: "
1237 "Failed to read RAID Disk Hdr(%d)\n",
1238 ioc_disk->PhysDiskNum);
1241 rv = mpt_read_cur_cfg_page(mpt, ioc_disk->PhysDiskNum,
1242 &mpt_disk->config_page.Header,
1243 sizeof(mpt_disk->config_page),
1244 /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1246 mpt_prt(mpt, "mpt_refresh_raid_disk: "
1247 "Failed to read RAID Disk Page(%d)\n",
1248 ioc_disk->PhysDiskNum);
1249 mpt2host_config_page_raid_phys_disk_0(&mpt_disk->config_page);
1253 mpt_refresh_raid_vol(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol,
1254 CONFIG_PAGE_IOC_2_RAID_VOL *ioc_vol)
1256 CONFIG_PAGE_RAID_VOL_0 *vol_pg;
1257 struct mpt_raid_action_result *ar;
1262 vol_pg = mpt_vol->config_page;
1263 mpt_vol->flags &= ~MPT_RVF_UP2DATE;
1265 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_RAID_VOLUME, 0,
1266 ioc_vol->VolumePageNumber, &vol_pg->Header, TRUE, 5000);
1268 mpt_vol_prt(mpt, mpt_vol,
1269 "mpt_refresh_raid_vol: Failed to read RAID Vol Hdr(%d)\n",
1270 ioc_vol->VolumePageNumber);
1274 rv = mpt_read_cur_cfg_page(mpt, ioc_vol->VolumePageNumber,
1275 &vol_pg->Header, mpt->raid_page0_len, TRUE, 5000);
1277 mpt_vol_prt(mpt, mpt_vol,
1278 "mpt_refresh_raid_vol: Failed to read RAID Vol Page(%d)\n",
1279 ioc_vol->VolumePageNumber);
1282 mpt2host_config_page_raid_vol_0(vol_pg);
1284 mpt_vol->flags |= MPT_RVF_ACTIVE;
1286 /* Update disk entry array data. */
1287 for (i = 0; i < vol_pg->NumPhysDisks; i++) {
1288 struct mpt_raid_disk *mpt_disk;
1289 mpt_disk = mpt->raid_disks + vol_pg->PhysDisk[i].PhysDiskNum;
1290 mpt_disk->volume = mpt_vol;
1291 mpt_disk->member_number = vol_pg->PhysDisk[i].PhysDiskMap;
1292 if (vol_pg->VolumeType == MPI_RAID_VOL_TYPE_IM) {
1293 mpt_disk->member_number--;
1297 if ((vol_pg->VolumeStatus.Flags
1298 & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) == 0)
1301 req = mpt_get_request(mpt, TRUE);
1303 mpt_vol_prt(mpt, mpt_vol,
1304 "mpt_refresh_raid_vol: Get request failed!\n");
1307 rv = mpt_issue_raid_req(mpt, mpt_vol, NULL, req,
1308 MPI_RAID_ACTION_INDICATOR_STRUCT, 0, 0, 0, FALSE, TRUE);
1309 if (rv == ETIMEDOUT) {
1310 mpt_vol_prt(mpt, mpt_vol,
1311 "mpt_refresh_raid_vol: Progress Indicator fetch timeout\n");
1312 mpt_free_request(mpt, req);
1316 ar = REQ_TO_RAID_ACTION_RESULT(req);
1318 && ar->action_status == MPI_RAID_ACTION_ASTATUS_SUCCESS
1319 && REQ_IOCSTATUS(req) == MPI_IOCSTATUS_SUCCESS) {
1320 memcpy(&mpt_vol->sync_progress,
1321 &ar->action_data.indicator_struct,
1322 sizeof(mpt_vol->sync_progress));
1323 mpt2host_mpi_raid_vol_indicator(&mpt_vol->sync_progress);
1325 mpt_vol_prt(mpt, mpt_vol,
1326 "mpt_refresh_raid_vol: Progress indicator fetch failed!\n");
1328 mpt_free_request(mpt, req);
1332 * Update in-core information about RAID support. We update any entries
1333 * that didn't previously exists or have been marked as needing to
1334 * be updated by our event handler. Interesting changes are displayed
1338 mpt_refresh_raid_data(struct mpt_softc *mpt)
1340 CONFIG_PAGE_IOC_2_RAID_VOL *ioc_vol;
1341 CONFIG_PAGE_IOC_2_RAID_VOL *ioc_last_vol;
1342 IOC_3_PHYS_DISK *ioc_disk;
1343 IOC_3_PHYS_DISK *ioc_last_disk;
1344 CONFIG_PAGE_RAID_VOL_0 *vol_pg;
1348 u_int nonopt_volumes;
1350 if (mpt->ioc_page2 == NULL || mpt->ioc_page3 == NULL) {
1355 * Mark all items as unreferenced by the configuration.
1356 * This allows us to find, report, and discard stale
1359 for (i = 0; i < mpt->ioc_page2->MaxPhysDisks; i++) {
1360 mpt->raid_disks[i].flags &= ~MPT_RDF_REFERENCED;
1362 for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) {
1363 mpt->raid_volumes[i].flags &= ~MPT_RVF_REFERENCED;
1367 * Get Physical Disk information.
1369 len = mpt->ioc_page3->Header.PageLength * sizeof(uint32_t);
1370 rv = mpt_read_cur_cfg_page(mpt, /*PageAddress*/0,
1371 &mpt->ioc_page3->Header, len,
1372 /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1375 "mpt_refresh_raid_data: Failed to read IOC Page 3\n");
1378 mpt2host_config_page_ioc3(mpt->ioc_page3);
1380 ioc_disk = mpt->ioc_page3->PhysDisk;
1381 ioc_last_disk = ioc_disk + mpt->ioc_page3->NumPhysDisks;
1382 for (; ioc_disk != ioc_last_disk; ioc_disk++) {
1383 struct mpt_raid_disk *mpt_disk;
1385 mpt_disk = mpt->raid_disks + ioc_disk->PhysDiskNum;
1386 mpt_disk->flags |= MPT_RDF_REFERENCED;
1387 if ((mpt_disk->flags & (MPT_RDF_ACTIVE|MPT_RDF_UP2DATE))
1388 != (MPT_RDF_ACTIVE|MPT_RDF_UP2DATE)) {
1390 mpt_refresh_raid_disk(mpt, mpt_disk, ioc_disk);
1393 mpt_disk->flags |= MPT_RDF_ACTIVE;
1398 * Refresh volume data.
1400 len = mpt->ioc_page2->Header.PageLength * sizeof(uint32_t);
1401 rv = mpt_read_cur_cfg_page(mpt, /*PageAddress*/0,
1402 &mpt->ioc_page2->Header, len,
1403 /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1405 mpt_prt(mpt, "mpt_refresh_raid_data: "
1406 "Failed to read IOC Page 2\n");
1409 mpt2host_config_page_ioc2(mpt->ioc_page2);
1411 ioc_vol = mpt->ioc_page2->RaidVolume;
1412 ioc_last_vol = ioc_vol + mpt->ioc_page2->NumActiveVolumes;
1413 for (;ioc_vol != ioc_last_vol; ioc_vol++) {
1414 struct mpt_raid_volume *mpt_vol;
1416 mpt_vol = mpt->raid_volumes + ioc_vol->VolumePageNumber;
1417 mpt_vol->flags |= MPT_RVF_REFERENCED;
1418 vol_pg = mpt_vol->config_page;
1421 if (((mpt_vol->flags & (MPT_RVF_ACTIVE|MPT_RVF_UP2DATE))
1422 != (MPT_RVF_ACTIVE|MPT_RVF_UP2DATE))
1423 || (vol_pg->VolumeStatus.Flags
1424 & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) != 0) {
1426 mpt_refresh_raid_vol(mpt, mpt_vol, ioc_vol);
1428 mpt_vol->flags |= MPT_RVF_ACTIVE;
1432 for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) {
1433 struct mpt_raid_volume *mpt_vol;
1439 mpt_vol = &mpt->raid_volumes[i];
1441 if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0) {
1445 vol_pg = mpt_vol->config_page;
1446 if ((mpt_vol->flags & (MPT_RVF_REFERENCED|MPT_RVF_ANNOUNCED))
1447 == MPT_RVF_ANNOUNCED) {
1448 mpt_vol_prt(mpt, mpt_vol, "No longer configured\n");
1453 if ((mpt_vol->flags & MPT_RVF_ANNOUNCED) == 0) {
1454 mpt_announce_vol(mpt, mpt_vol);
1455 mpt_vol->flags |= MPT_RVF_ANNOUNCED;
1458 if (vol_pg->VolumeStatus.State !=
1459 MPI_RAIDVOL0_STATUS_STATE_OPTIMAL)
1462 if ((mpt_vol->flags & MPT_RVF_UP2DATE) != 0)
1465 mpt_vol->flags |= MPT_RVF_UP2DATE;
1466 mpt_vol_prt(mpt, mpt_vol, "%s - %s\n",
1467 mpt_vol_type(mpt_vol), mpt_vol_state(mpt_vol));
1468 mpt_verify_mwce(mpt, mpt_vol);
1470 if (vol_pg->VolumeStatus.Flags == 0) {
1474 mpt_vol_prt(mpt, mpt_vol, "Status (");
1475 for (m = 1; m <= 0x80; m <<= 1) {
1476 switch (vol_pg->VolumeStatus.Flags & m) {
1477 case MPI_RAIDVOL0_STATUS_FLAG_ENABLED:
1478 mpt_prtc(mpt, " Enabled");
1480 case MPI_RAIDVOL0_STATUS_FLAG_QUIESCED:
1481 mpt_prtc(mpt, " Quiesced");
1483 case MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS:
1484 mpt_prtc(mpt, " Re-Syncing");
1486 case MPI_RAIDVOL0_STATUS_FLAG_VOLUME_INACTIVE:
1487 mpt_prtc(mpt, " Inactive");
1493 mpt_prtc(mpt, " )\n");
1495 if ((vol_pg->VolumeStatus.Flags
1496 & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) == 0)
1499 mpt_verify_resync_rate(mpt, mpt_vol);
1501 left = MPT_U64_2_SCALAR(mpt_vol->sync_progress.BlocksRemaining);
1502 total = MPT_U64_2_SCALAR(mpt_vol->sync_progress.TotalBlocks);
1503 if (vol_pg->ResyncRate != 0) {
1505 prio = ((u_int)vol_pg->ResyncRate * 100000) / 0xFF;
1506 mpt_vol_prt(mpt, mpt_vol, "Rate %d.%d%%\n",
1507 prio / 1000, prio % 1000);
1509 prio = vol_pg->VolumeSettings.Settings
1510 & MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
1511 mpt_vol_prt(mpt, mpt_vol, "%s Priority Re-Sync\n",
1512 prio ? "High" : "Low");
1514 mpt_vol_prt(mpt, mpt_vol, "%ju of %ju "
1515 "blocks remaining\n", (uintmax_t)left,
1518 /* Periodically report on sync progress. */
1519 mpt_schedule_raid_refresh(mpt);
1522 for (i = 0; i < mpt->ioc_page2->MaxPhysDisks; i++) {
1523 struct mpt_raid_disk *mpt_disk;
1524 CONFIG_PAGE_RAID_PHYS_DISK_0 *disk_pg;
1527 mpt_disk = &mpt->raid_disks[i];
1528 disk_pg = &mpt_disk->config_page;
1530 if ((mpt_disk->flags & MPT_RDF_ACTIVE) == 0)
1533 if ((mpt_disk->flags & (MPT_RDF_REFERENCED|MPT_RDF_ANNOUNCED))
1534 == MPT_RDF_ANNOUNCED) {
1535 mpt_disk_prt(mpt, mpt_disk, "No longer configured\n");
1536 mpt_disk->flags = 0;
1541 if ((mpt_disk->flags & MPT_RDF_ANNOUNCED) == 0) {
1543 mpt_announce_disk(mpt, mpt_disk);
1544 mpt_disk->flags |= MPT_RVF_ANNOUNCED;
1547 if ((mpt_disk->flags & MPT_RDF_UP2DATE) != 0)
1550 mpt_disk->flags |= MPT_RDF_UP2DATE;
1551 mpt_disk_prt(mpt, mpt_disk, "%s\n", mpt_disk_state(mpt_disk));
1552 if (disk_pg->PhysDiskStatus.Flags == 0)
1555 mpt_disk_prt(mpt, mpt_disk, "Status (");
1556 for (m = 1; m <= 0x80; m <<= 1) {
1557 switch (disk_pg->PhysDiskStatus.Flags & m) {
1558 case MPI_PHYSDISK0_STATUS_FLAG_OUT_OF_SYNC:
1559 mpt_prtc(mpt, " Out-Of-Sync");
1561 case MPI_PHYSDISK0_STATUS_FLAG_QUIESCED:
1562 mpt_prtc(mpt, " Quiesced");
1568 mpt_prtc(mpt, " )\n");
1571 mpt->raid_nonopt_volumes = nonopt_volumes;
1576 mpt_raid_timer(void *arg)
1578 struct mpt_softc *mpt;
1580 mpt = (struct mpt_softc *)arg;
1581 MPT_LOCK_ASSERT(mpt);
1582 mpt_raid_wakeup(mpt);
1586 mpt_schedule_raid_refresh(struct mpt_softc *mpt)
1589 callout_reset(&mpt->raid_timer, MPT_RAID_SYNC_REPORT_INTERVAL,
1590 mpt_raid_timer, mpt);
1594 mpt_raid_free_mem(struct mpt_softc *mpt)
1597 if (mpt->raid_volumes) {
1598 struct mpt_raid_volume *mpt_raid;
1600 for (i = 0; i < mpt->raid_max_volumes; i++) {
1601 mpt_raid = &mpt->raid_volumes[i];
1602 if (mpt_raid->config_page) {
1603 free(mpt_raid->config_page, M_DEVBUF);
1604 mpt_raid->config_page = NULL;
1607 free(mpt->raid_volumes, M_DEVBUF);
1608 mpt->raid_volumes = NULL;
1610 if (mpt->raid_disks) {
1611 free(mpt->raid_disks, M_DEVBUF);
1612 mpt->raid_disks = NULL;
1614 if (mpt->ioc_page2) {
1615 free(mpt->ioc_page2, M_DEVBUF);
1616 mpt->ioc_page2 = NULL;
1618 if (mpt->ioc_page3) {
1619 free(mpt->ioc_page3, M_DEVBUF);
1620 mpt->ioc_page3 = NULL;
1622 mpt->raid_max_volumes = 0;
1623 mpt->raid_max_disks = 0;
1627 mpt_raid_set_vol_resync_rate(struct mpt_softc *mpt, u_int rate)
1629 struct mpt_raid_volume *mpt_vol;
1631 if ((rate > MPT_RAID_RESYNC_RATE_MAX
1632 || rate < MPT_RAID_RESYNC_RATE_MIN)
1633 && rate != MPT_RAID_RESYNC_RATE_NC)
1637 mpt->raid_resync_rate = rate;
1638 RAID_VOL_FOREACH(mpt, mpt_vol) {
1639 if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0) {
1642 mpt_verify_resync_rate(mpt, mpt_vol);
1649 mpt_raid_set_vol_queue_depth(struct mpt_softc *mpt, u_int vol_queue_depth)
1651 struct mpt_raid_volume *mpt_vol;
1653 if (vol_queue_depth > 255 || vol_queue_depth < 1)
1657 mpt->raid_queue_depth = vol_queue_depth;
1658 RAID_VOL_FOREACH(mpt, mpt_vol) {
1659 struct cam_path *path;
1662 if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
1665 mpt->raid_rescan = 0;
1667 error = xpt_create_path(&path, NULL,
1668 cam_sim_path(mpt->sim),
1669 mpt_vol->config_page->VolumeID,
1671 if (error != CAM_REQ_CMP) {
1672 mpt_vol_prt(mpt, mpt_vol, "Unable to allocate path!\n");
1675 mpt_adjust_queue_depth(mpt, mpt_vol, path);
1676 xpt_free_path(path);
1683 mpt_raid_set_vol_mwce(struct mpt_softc *mpt, mpt_raid_mwce_t mwce)
1685 struct mpt_raid_volume *mpt_vol;
1686 int force_full_resync;
1689 if (mwce == mpt->raid_mwce_setting) {
1695 * Catch MWCE being left on due to a failed shutdown. Since
1696 * sysctls cannot be set by the loader, we treat the first
1697 * setting of this varible specially and force a full volume
1698 * resync if MWCE is enabled and a resync is in progress.
1700 force_full_resync = 0;
1701 if (mpt->raid_mwce_set == 0
1702 && mpt->raid_mwce_setting == MPT_RAID_MWCE_NC
1703 && mwce == MPT_RAID_MWCE_REBUILD_ONLY)
1704 force_full_resync = 1;
1706 mpt->raid_mwce_setting = mwce;
1707 RAID_VOL_FOREACH(mpt, mpt_vol) {
1708 CONFIG_PAGE_RAID_VOL_0 *vol_pg;
1712 if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
1715 vol_pg = mpt_vol->config_page;
1716 resyncing = vol_pg->VolumeStatus.Flags
1717 & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS;
1718 mwce = vol_pg->VolumeSettings.Settings
1719 & MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
1720 if (force_full_resync && resyncing && mwce) {
1723 * XXX disable/enable volume should force a resync,
1724 * but we'll need to queice, drain, and restart
1727 mpt_vol_prt(mpt, mpt_vol, "WARNING - Unsafe shutdown "
1728 "detected. Suggest full resync.\n");
1730 mpt_verify_mwce(mpt, mpt_vol);
1732 mpt->raid_mwce_set = 1;
1737 static const char *mpt_vol_mwce_strs[] =
1741 "On-During-Rebuild",
1746 mpt_raid_sysctl_vol_member_wce(SYSCTL_HANDLER_ARGS)
1749 struct mpt_softc *mpt;
1757 mpt = (struct mpt_softc *)arg1;
1758 str = mpt_vol_mwce_strs[mpt->raid_mwce_setting];
1759 error = SYSCTL_OUT(req, str, strlen(str) + 1);
1760 if (error || !req->newptr) {
1764 size = req->newlen - req->newidx;
1765 if (size >= sizeof(inbuf)) {
1769 error = SYSCTL_IN(req, inbuf, size);
1774 for (i = 0; i < NUM_ELEMENTS(mpt_vol_mwce_strs); i++) {
1775 if (strcmp(mpt_vol_mwce_strs[i], inbuf) == 0) {
1776 return (mpt_raid_set_vol_mwce(mpt, i));
1783 mpt_raid_sysctl_vol_resync_rate(SYSCTL_HANDLER_ARGS)
1785 struct mpt_softc *mpt;
1786 u_int raid_resync_rate;
1791 mpt = (struct mpt_softc *)arg1;
1792 raid_resync_rate = mpt->raid_resync_rate;
1794 error = sysctl_handle_int(oidp, &raid_resync_rate, 0, req);
1795 if (error || !req->newptr) {
1799 return (mpt_raid_set_vol_resync_rate(mpt, raid_resync_rate));
1803 mpt_raid_sysctl_vol_queue_depth(SYSCTL_HANDLER_ARGS)
1805 struct mpt_softc *mpt;
1806 u_int raid_queue_depth;
1811 mpt = (struct mpt_softc *)arg1;
1812 raid_queue_depth = mpt->raid_queue_depth;
1814 error = sysctl_handle_int(oidp, &raid_queue_depth, 0, req);
1815 if (error || !req->newptr) {
1819 return (mpt_raid_set_vol_queue_depth(mpt, raid_queue_depth));
1823 mpt_raid_sysctl_attach(struct mpt_softc *mpt)
1825 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(mpt->dev);
1826 struct sysctl_oid *tree = device_get_sysctl_tree(mpt->dev);
1828 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1829 "vol_member_wce", CTLTYPE_STRING | CTLFLAG_RW, mpt, 0,
1830 mpt_raid_sysctl_vol_member_wce, "A",
1831 "volume member WCE(On,Off,On-During-Rebuild,NC)");
1833 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1834 "vol_queue_depth", CTLTYPE_INT | CTLFLAG_RW, mpt, 0,
1835 mpt_raid_sysctl_vol_queue_depth, "I",
1836 "default volume queue depth");
1838 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1839 "vol_resync_rate", CTLTYPE_INT | CTLFLAG_RW, mpt, 0,
1840 mpt_raid_sysctl_vol_resync_rate, "I",
1841 "volume resync priority (0 == NC, 1 - 255)");
1842 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1843 "nonoptimal_volumes", CTLFLAG_RD,
1844 &mpt->raid_nonopt_volumes, 0,
1845 "number of nonoptimal volumes");